aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2015-01-15 12:46:14 -0500
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2015-01-15 12:46:14 -0500
commit0c49cd295d42d0032af11d55e2140dbec11dc8d0 (patch)
tree1e7d0e50b6b6d6e4de1fb6bb0b6d856c3932da58 /include/linux
parent0c3e99437a66e4c869c60c2398449e6d98f3a988 (diff)
parenteaa27f34e91a14cdceed26ed6c6793ec1d186115 (diff)
Merge tag 'v3.19-rc4' into next
Merge with mainline to bring in the latest thermal and other changes.
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h175
-rw-r--r--include/linux/aer.h2
-rw-r--r--include/linux/ahci_platform.h13
-rw-r--r--include/linux/amba/bus.h16
-rw-r--r--include/linux/ata_platform.h5
-rw-r--r--include/linux/ath9k_platform.h3
-rw-r--r--include/linux/atmel-mci.h4
-rw-r--r--include/linux/atmel_tc.h13
-rw-r--r--include/linux/atomic.h36
-rw-r--r--include/linux/audit.h40
-rw-r--r--include/linux/backing-dev.h4
-rw-r--r--include/linux/balloon_compaction.h169
-rw-r--r--include/linux/bcma/bcma.h8
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h8
-rw-r--r--include/linux/bcma/bcma_driver_mips.h4
-rw-r--r--include/linux/bcma/bcma_regs.h5
-rw-r--r--include/linux/bcma/bcma_soc.h1
-rw-r--r--include/linux/binfmts.h4
-rw-r--r--include/linux/bio.h70
-rw-r--r--include/linux/bitmap.h39
-rw-r--r--include/linux/bitops.h27
-rw-r--r--include/linux/blk-mq.h48
-rw-r--r--include/linux/blk_types.h18
-rw-r--r--include/linux/blkdev.h84
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/bpf.h145
-rw-r--r--include/linux/brcmphy.h137
-rw-r--r--include/linux/buffer_head.h47
-rw-r--r--include/linux/cacheinfo.h100
-rw-r--r--include/linux/can/dev.h9
-rw-r--r--include/linux/ceph/auth.h26
-rw-r--r--include/linux/ceph/buffer.h3
-rw-r--r--include/linux/ceph/ceph_features.h1
-rw-r--r--include/linux/ceph/ceph_fs.h10
-rw-r--r--include/linux/ceph/libceph.h3
-rw-r--r--include/linux/ceph/messenger.h9
-rw-r--r--include/linux/ceph/msgr.h11
-rw-r--r--include/linux/ceph/osd_client.h13
-rw-r--r--include/linux/ceph/pagelist.h7
-rw-r--r--include/linux/ceph/rados.h225
-rw-r--r--include/linux/cgroup.h60
-rw-r--r--include/linux/clk-private.h2
-rw-r--r--include/linux/clk-provider.h42
-rw-r--r--include/linux/clk.h31
-rw-r--r--include/linux/clk/at91_pmc.h1
-rw-r--r--include/linux/clk/ti.h16
-rw-r--r--include/linux/clock_cooling.h65
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/cma.h8
-rw-r--r--include/linux/com20020.h29
-rw-r--r--include/linux/compaction.h30
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/compiler-gcc4.h1
-rw-r--r--include/linux/compiler-gcc5.h65
-rw-r--r--include/linux/compiler.h74
-rw-r--r--include/linux/coresight.h263
-rw-r--r--include/linux/cpu.h6
-rw-r--r--include/linux/cpu_cooling.h6
-rw-r--r--include/linux/cpufreq-dt.h22
-rw-r--r--include/linux/cpufreq.h57
-rw-r--r--include/linux/cpuidle.h3
-rw-r--r--include/linux/cpumask.h28
-rw-r--r--include/linux/cpuset.h40
-rw-r--r--include/linux/crash_dump.h15
-rw-r--r--include/linux/crc-t10dif.h5
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/crypto.h1112
-rw-r--r--include/linux/cycx_x25.h125
-rw-r--r--include/linux/dcache.h18
-rw-r--r--include/linux/debugfs.h24
-rw-r--r--include/linux/devcoredump.h35
-rw-r--r--include/linux/devfreq.h2
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/device.h55
-rw-r--r--include/linux/dma-mapping.h39
-rw-r--r--include/linux/dma/dw.h64
-rw-r--r--include/linux/dmaengine.h42
-rw-r--r--include/linux/dmar.h58
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/dw_dmac.h111
-rw-r--r--include/linux/dynamic_debug.h12
-rw-r--r--include/linux/dynamic_queue_limits.h12
-rw-r--r--include/linux/edac.h4
-rw-r--r--include/linux/eeprom_93cx6.h4
-rw-r--r--include/linux/efi.h23
-rw-r--r--include/linux/elf.h5
-rw-r--r--include/linux/etherdevice.h13
-rw-r--r--include/linux/ethtool.h46
-rw-r--r--include/linux/extcon/extcon-gpio.h4
-rw-r--r--include/linux/extcon/sm5502.h287
-rw-r--r--include/linux/f2fs_fs.h33
-rw-r--r--include/linux/fault-inject.h17
-rw-r--r--include/linux/fence.h4
-rw-r--r--include/linux/file.h1
-rw-r--r--include/linux/filter.h197
-rw-r--r--include/linux/flex_proportions.h5
-rw-r--r--include/linux/font.h4
-rw-r--r--include/linux/freezer.h50
-rw-r--r--include/linux/fs.h166
-rw-r--r--include/linux/fs_enet_pd.h1
-rw-r--r--include/linux/fsl_ifc.h23
-rw-r--r--include/linux/fsldma.h13
-rw-r--r--include/linux/fsnotify_backend.h31
-rw-r--r--include/linux/ftrace.h53
-rw-r--r--include/linux/ftrace_event.h11
-rw-r--r--include/linux/genalloc.h7
-rw-r--r--include/linux/genl_magic_func.h4
-rw-r--r--include/linux/gfp.h13
-rw-r--r--include/linux/gpio.h7
-rw-r--r--include/linux/gpio/consumer.h47
-rw-r--r--include/linux/gpio/driver.h20
-rw-r--r--include/linux/gpio_keys.h3
-rw-r--r--include/linux/hash.h35
-rw-r--r--include/linux/hdmi.h21
-rw-r--r--include/linux/hid.h46
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h49
-rw-r--r--include/linux/hugetlb_cgroup.h1
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/i2c.h31
-rw-r--r--include/linux/i2c/pmbus.h4
-rw-r--r--include/linux/i2c/twl.h2
-rw-r--r--include/linux/i82593.h229
-rw-r--r--include/linux/ieee80211.h144
-rw-r--r--include/linux/ieee802154.h242
-rw-r--r--include/linux/if_bridge.h31
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/if_vlan.h107
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/common/st_sensors.h10
-rw-r--r--include/linux/iio/events.h2
-rw-r--r--include/linux/iio/iio.h8
-rw-r--r--include/linux/ima.h4
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/init_task.h24
-rw-r--r--include/linux/integrity.h6
-rw-r--r--include/linux/interrupt.h11
-rw-r--r--include/linux/io.h4
-rw-r--r--include/linux/iommu.h59
-rw-r--r--include/linux/ioport.h5
-rw-r--r--include/linux/ipack.h24
-rw-r--r--include/linux/ipc_namespace.h23
-rw-r--r--include/linux/ipmi.h6
-rw-r--r--include/linux/ipmi_smi.h10
-rw-r--r--include/linux/ipv6.h11
-rw-r--r--include/linux/irq.h73
-rw-r--r--include/linux/irq_work.h3
-rw-r--r--include/linux/irqchip/arm-gic-v3.h128
-rw-r--r--include/linux/irqchip/arm-gic.h20
-rw-r--r--include/linux/irqchip/irq-omap-intc.h32
-rw-r--r--include/linux/irqchip/mips-gic.h249
-rw-r--r--include/linux/irqdesc.h29
-rw-r--r--include/linux/irqdomain.h101
-rw-r--r--include/linux/irqhandler.h14
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/jump_label.h17
-rw-r--r--include/linux/kcmp.h17
-rw-r--r--include/linux/kdb.h62
-rw-r--r--include/linux/kern_levels.h13
-rw-r--r--include/linux/kernel.h71
-rw-r--r--include/linux/kernel_stat.h10
-rw-r--r--include/linux/kernelcapi.h2
-rw-r--r--include/linux/kernfs.h8
-rw-r--r--include/linux/kexec.h1
-rw-r--r--include/linux/key-type.h34
-rw-r--r--include/linux/kfifo.h2
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/khugepaged.h17
-rw-r--r--include/linux/kmemleak.h2
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--include/linux/kvm_host.h140
-rw-r--r--include/linux/kvm_types.h41
-rw-r--r--include/linux/leds.h59
-rw-r--r--include/linux/libata.h22
-rw-r--r--include/linux/list.h35
-rw-r--r--include/linux/lockd/debug.h6
-rw-r--r--include/linux/lockd/lockd.h1
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/mailbox_client.h49
-rw-r--r--include/linux/mailbox_controller.h133
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/mbus.h1
-rw-r--r--include/linux/mei_cl_bus.h1
-rw-r--r--include/linux/memcontrol.h129
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h7
-rw-r--r--include/linux/mfd/abx500/ab8500-sysctrl.h1
-rw-r--r--include/linux/mfd/arizona/core.h1
-rw-r--r--include/linux/mfd/arizona/registers.h79
-rw-r--r--include/linux/mfd/atmel-hlcdc.h85
-rw-r--r--include/linux/mfd/axp20x.h59
-rw-r--r--include/linux/mfd/core.h10
-rw-r--r--include/linux/mfd/cros_ec.h24
-rw-r--r--include/linux/mfd/cros_ec_commands.h3
-rw-r--r--include/linux/mfd/da9052/da9052.h2
-rw-r--r--include/linux/mfd/davinci_voicecodec.h9
-rw-r--r--include/linux/mfd/dln2.h103
-rw-r--r--include/linux/mfd/hi6421-pmic.h41
-rw-r--r--include/linux/mfd/max14577-private.h95
-rw-r--r--include/linux/mfd/max14577.h30
-rw-r--r--include/linux/mfd/max77686.h7
-rw-r--r--include/linux/mfd/max77693-private.h69
-rw-r--r--include/linux/mfd/max77693.h40
-rw-r--r--include/linux/mfd/rk808.h196
-rw-r--r--include/linux/mfd/rn5t618.h228
-rw-r--r--include/linux/mfd/rtsx_pci.h37
-rw-r--r--include/linux/mfd/samsung/core.h23
-rw-r--r--include/linux/mfd/samsung/s2mpa01.h12
-rw-r--r--include/linux/mfd/samsung/s2mps11.h9
-rw-r--r--include/linux/mfd/samsung/s2mps13.h186
-rw-r--r--include/linux/mfd/samsung/s2mps14.h10
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h39
-rw-r--r--include/linux/mfd/tc3589x.h8
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h1
-rw-r--r--include/linux/mfd/ti_ssp.h93
-rw-r--r--include/linux/mfd/tmio.h25
-rw-r--r--include/linux/mfd/tps65217.h2
-rw-r--r--include/linux/migrate.h24
-rw-r--r--include/linux/mlx4/cmd.h31
-rw-r--r--include/linux/mlx4/device.h140
-rw-r--r--include/linux/mlx4/qp.h4
-rw-r--r--include/linux/mlx5/device.h222
-rw-r--r--include/linux/mlx5/driver.h140
-rw-r--r--include/linux/mlx5/mlx5_ifc.h349
-rw-r--r--include/linux/mlx5/qp.h103
-rw-r--r--include/linux/mm.h110
-rw-r--r--include/linux/mm_types.h21
-rw-r--r--include/linux/mmc/card.h33
-rw-r--r--include/linux/mmc/core.h3
-rw-r--r--include/linux/mmc/dw_mmc.h11
-rw-r--r--include/linux/mmc/host.h13
-rw-r--r--include/linux/mmc/mmc.h10
-rw-r--r--include/linux/mmc/sdhci.h21
-rw-r--r--include/linux/mmc/sdio_func.h2
-rw-r--r--include/linux/mmc/slot-gpio.h5
-rw-r--r--include/linux/mmdebug.h20
-rw-r--r--include/linux/mmu_notifier.h114
-rw-r--r--include/linux/mmzone.h74
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/module.h16
-rw-r--r--include/linux/moduleparam.h52
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/msi.h193
-rw-r--r--include/linux/mtd/cfi.h22
-rw-r--r--include/linux/mtd/nand.h30
-rw-r--r--include/linux/mtd/spi-nor.h32
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/namei.h25
-rw-r--r--include/linux/netdev_features.h9
-rw-r--r--include/linux/netdevice.h396
-rw-r--r--include/linux/netfilter/ipset/ip_set.h60
-rw-r--r--include/linux/netfilter/ipset/ip_set_list.h1
-rw-r--r--include/linux/netfilter_bridge.h50
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nfs4.h31
-rw-r--r--include/linux/nfs_fs.h49
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/nfs_xdr.h61
-rw-r--r--include/linux/nl802154.h4
-rw-r--r--include/linux/nmi.h13
-rw-r--r--include/linux/ns_common.h12
-rw-r--r--include/linux/nvme.h18
-rw-r--r--include/linux/of.h252
-rw-r--r--include/linux/of_address.h31
-rw-r--r--include/linux/of_iommu.h23
-rw-r--r--include/linux/of_pci.h25
-rw-r--r--include/linux/of_pdt.h3
-rw-r--r--include/linux/of_platform.h6
-rw-r--r--include/linux/of_reserved_mem.h9
-rw-r--r--include/linux/omap-dma.h37
-rw-r--r--include/linux/omap-gpmc.h199
-rw-r--r--include/linux/omap-mailbox.h16
-rw-r--r--include/linux/oom.h14
-rw-r--r--include/linux/page-debug-flags.h32
-rw-r--r--include/linux/page-isolation.h8
-rw-r--r--include/linux/page_cgroup.h105
-rw-r--r--include/linux/page_counter.h51
-rw-r--r--include/linux/page_ext.h84
-rw-r--r--include/linux/page_owner.h38
-rw-r--r--include/linux/pagemap.h45
-rw-r--r--include/linux/pci-acpi.h7
-rw-r--r--include/linux/pci.h66
-rw-r--r--include/linux/pci_hotplug.h3
-rw-r--r--include/linux/pci_ids.h23
-rw-r--r--include/linux/percpu-defs.h5
-rw-r--r--include/linux/percpu-refcount.h169
-rw-r--r--include/linux/percpu.h17
-rw-r--r--include/linux/percpu_counter.h10
-rw-r--r--include/linux/perf_event.h59
-rw-r--r--include/linux/perf_regs.h16
-rw-r--r--include/linux/phonedev.h25
-rw-r--r--include/linux/phy.h53
-rw-r--r--include/linux/phy/phy.h52
-rw-r--r--include/linux/phy_fixed.h33
-rw-r--r--include/linux/pid_namespace.h3
-rw-r--r--include/linux/pinctrl/pinconf-generic.h2
-rw-r--r--include/linux/pinctrl/pinmux.h7
-rw-r--r--include/linux/pl320-ipc.h (renamed from include/linux/mailbox.h)0
-rw-r--r--include/linux/platform_data/asoc-s3c.h1
-rw-r--r--include/linux/platform_data/bcmgenet.h18
-rw-r--r--include/linux/platform_data/dma-dw.h59
-rw-r--r--include/linux/platform_data/dma-imx.h1
-rw-r--r--include/linux/platform_data/dwc3-exynos.h24
-rw-r--r--include/linux/platform_data/elm.h16
-rw-r--r--include/linux/platform_data/gpio-dwapb.h32
-rw-r--r--include/linux/platform_data/hsmmc-omap.h90
-rw-r--r--include/linux/platform_data/i2c-designware.h21
-rw-r--r--include/linux/platform_data/isl9305.h30
-rw-r--r--include/linux/platform_data/lp855x.h2
-rw-r--r--include/linux/platform_data/mmc-atmel-mci.h22
-rw-r--r--include/linux/platform_data/mmc-omap.h27
-rw-r--r--include/linux/platform_data/mtd-nand-omap2.h1
-rw-r--r--include/linux/platform_data/pxa_sdhci.h5
-rw-r--r--include/linux/platform_data/rcar-du.h74
-rw-r--r--include/linux/platform_data/samsung-usbphy.h27
-rw-r--r--include/linux/platform_data/serial-omap.h3
-rw-r--r--include/linux/platform_data/st21nfca.h1
-rw-r--r--include/linux/platform_data/st21nfcb.h1
-rw-r--r--include/linux/platform_data/tegra_emc.h34
-rw-r--r--include/linux/platform_device.h12
-rw-r--r--include/linux/plist.h10
-rw-r--r--include/linux/pm.h24
-rw-r--r--include/linux/pm_clock.h8
-rw-r--r--include/linux/pm_domain.h157
-rw-r--r--include/linux/pm_opp.h12
-rw-r--r--include/linux/pm_qos.h43
-rw-r--r--include/linux/pm_runtime.h27
-rw-r--r--include/linux/pnfs_osd_xdr.h2
-rw-r--r--include/linux/power/charger-manager.h3
-rw-r--r--include/linux/power_supply.h14
-rw-r--r--include/linux/printk.h3
-rw-r--r--include/linux/prio_heap.h58
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/proc_ns.h43
-rw-r--r--include/linux/property.h143
-rw-r--r--include/linux/proportions.h5
-rw-r--r--include/linux/pstore_ram.h4
-rw-r--r--include/linux/ptrace.h2
-rw-r--r--include/linux/pxa168_eth.h3
-rw-r--r--include/linux/pxa2xx_ssp.h20
-rw-r--r--include/linux/quota.h5
-rw-r--r--include/linux/quotaops.h8
-rw-r--r--include/linux/random.h4
-rw-r--r--include/linux/ratelimit.h12
-rw-r--r--include/linux/rbtree_augmented.h10
-rw-r--r--include/linux/rculist.h17
-rw-r--r--include/linux/rcupdate.h136
-rw-r--r--include/linux/rcutiny.h4
-rw-r--r--include/linux/rcutree.h6
-rw-r--r--include/linux/reboot.h3
-rw-r--r--include/linux/regmap.h7
-rw-r--r--include/linux/regulator/consumer.h28
-rw-r--r--include/linux/regulator/da9211.h9
-rw-r--r--include/linux/regulator/driver.h20
-rw-r--r--include/linux/regulator/max1586.h2
-rw-r--r--include/linux/regulator/of_regulator.h9
-rw-r--r--include/linux/res_counter.h223
-rw-r--r--include/linux/reset-controller.h2
-rw-r--r--include/linux/reset.h7
-rw-r--r--include/linux/rhashtable.h17
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rmap.h12
-rw-r--r--include/linux/rtc.h21
-rw-r--r--include/linux/rtnetlink.h24
-rw-r--r--include/linux/rwsem.h2
-rw-r--r--include/linux/sched.h154
-rw-r--r--include/linux/screen_info.h8
-rw-r--r--include/linux/seccomp.h25
-rw-r--r--include/linux/security.h10
-rw-r--r--include/linux/seq_buf.h136
-rw-r--r--include/linux/seq_file.h15
-rw-r--r--include/linux/seqlock.h19
-rw-r--r--include/linux/serial_8250.h3
-rw-r--r--include/linux/serial_bcm63xx.h2
-rw-r--r--include/linux/serial_core.h85
-rw-r--r--include/linux/shrinker.h2
-rw-r--r--include/linux/signal.h29
-rw-r--r--include/linux/skbuff.h544
-rw-r--r--include/linux/slab.h70
-rw-r--r--include/linux/slab_def.h20
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/soc/ti/knav_dma.h175
-rw-r--r--include/linux/soc/ti/knav_qmss.h90
-rw-r--r--include/linux/socket.h26
-rw-r--r--include/linux/spi/mcp23s08.h18
-rw-r--r--include/linux/spi/pxa2xx_spi.h9
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spinlock.h8
-rw-r--r--include/linux/spmi.h3
-rw-r--r--include/linux/stacktrace.h5
-rw-r--r--include/linux/string.h7
-rw-r--r--include/linux/string_helpers.h65
-rw-r--r--include/linux/sunrpc/auth.h2
-rw-r--r--include/linux/sunrpc/clnt.h4
-rw-r--r--include/linux/sunrpc/debug.h64
-rw-r--r--include/linux/sunrpc/metrics.h3
-rw-r--r--include/linux/sunrpc/sched.h8
-rw-r--r--include/linux/sunrpc/svc.h35
-rw-r--r--include/linux/sunrpc/svc_xprt.h7
-rw-r--r--include/linux/sunrpc/xprt.h4
-rw-r--r--include/linux/sunrpc/xprtsock.h59
-rw-r--r--include/linux/suspend.h6
-rw-r--r--include/linux/swap.h30
-rw-r--r--include/linux/swap_cgroup.h42
-rw-r--r--include/linux/syscalls.h12
-rw-r--r--include/linux/sysfs.h9
-rw-r--r--include/linux/syslog.h9
-rw-r--r--include/linux/t10-pi.h22
-rw-r--r--include/linux/tcp.h18
-rw-r--r--include/linux/thermal.h79
-rw-r--r--include/linux/ti_wilink_st.h2
-rw-r--r--include/linux/tick.h4
-rw-r--r--include/linux/time.h17
-rw-r--r--include/linux/timekeeper_internal.h2
-rw-r--r--include/linux/timekeeping.h51
-rw-r--r--include/linux/topology.h17
-rw-r--r--include/linux/torture.h5
-rw-r--r--include/linux/trace_seq.h77
-rw-r--r--include/linux/tracepoint.h11
-rw-r--r--include/linux/tty.h43
-rw-r--r--include/linux/tty_driver.h4
-rw-r--r--include/linux/udp.h16
-rw-r--r--include/linux/uio.h15
-rw-r--r--include/linux/uio_driver.h14
-rw-r--r--include/linux/uprobes.h14
-rw-r--r--include/linux/usb.h14
-rw-r--r--include/linux/usb/chipidea.h6
-rw-r--r--include/linux/usb/composite.h7
-rw-r--r--include/linux/usb/ehci-dbgp.h83
-rw-r--r--include/linux/usb/ehci_def.h65
-rw-r--r--include/linux/usb/gadget.h31
-rw-r--r--include/linux/usb/hcd.h14
-rw-r--r--include/linux/usb/of.h5
-rw-r--r--include/linux/usb/otg.h7
-rw-r--r--include/linux/usb/phy.h6
-rw-r--r--include/linux/usb/quirks.h22
-rw-r--r--include/linux/usb/renesas_usbhs.h4
-rw-r--r--include/linux/usb/usbnet.h4
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/user_namespace.h15
-rw-r--r--include/linux/utsname.h3
-rw-r--r--include/linux/vexpress.h19
-rw-r--r--include/linux/virtio.h26
-rw-r--r--include/linux/virtio_byteorder.h59
-rw-r--r--include/linux/virtio_config.h149
-rw-r--r--include/linux/virtio_scsi.h162
-rw-r--r--include/linux/vm_event_item.h8
-rw-r--r--include/linux/vmw_vmci_api.h5
-rw-r--r--include/linux/vringh.h37
-rw-r--r--include/linux/wait.h101
-rw-r--r--include/linux/watchdog.h9
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/linux/zsmalloc.h2
454 files changed, 13185 insertions, 5123 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 807cbc46d73e..d459cd17b477 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -28,6 +28,7 @@
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/ioport.h> /* for struct resource */ 29#include <linux/ioport.h> /* for struct resource */
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/property.h>
31 32
32#ifndef _LINUX 33#ifndef _LINUX
33#define _LINUX 34#define _LINUX
@@ -123,6 +124,10 @@ int acpi_numa_init (void);
123 124
124int acpi_table_init (void); 125int acpi_table_init (void);
125int acpi_table_parse(char *id, acpi_tbl_table_handler handler); 126int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
127int __init acpi_parse_entries(char *id, unsigned long table_size,
128 acpi_tbl_entry_handler handler,
129 struct acpi_table_header *table_header,
130 int entry_id, unsigned int max_entries);
126int __init acpi_table_parse_entries(char *id, unsigned long table_size, 131int __init acpi_table_parse_entries(char *id, unsigned long table_size,
127 int entry_id, 132 int entry_id,
128 acpi_tbl_entry_handler handler, 133 acpi_tbl_entry_handler handler,
@@ -142,12 +147,13 @@ void acpi_numa_arch_fixup(void);
142 147
143#ifdef CONFIG_ACPI_HOTPLUG_CPU 148#ifdef CONFIG_ACPI_HOTPLUG_CPU
144/* Arch dependent functions for cpu hotplug support */ 149/* Arch dependent functions for cpu hotplug support */
145int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu); 150int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
146int acpi_unmap_lsapic(int cpu); 151int acpi_unmap_cpu(int cpu);
147#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 152#endif /* CONFIG_ACPI_HOTPLUG_CPU */
148 153
149int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); 154int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
150int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); 155int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
156int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
151void acpi_irq_stats_init(void); 157void acpi_irq_stats_init(void);
152extern u32 acpi_irq_handled; 158extern u32 acpi_irq_handled;
153extern u32 acpi_irq_not_handled; 159extern u32 acpi_irq_not_handled;
@@ -423,15 +429,13 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
423const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, 429const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
424 const struct device *dev); 430 const struct device *dev);
425 431
426static inline bool acpi_driver_match_device(struct device *dev, 432extern bool acpi_driver_match_device(struct device *dev,
427 const struct device_driver *drv) 433 const struct device_driver *drv);
428{
429 return !!acpi_match_device(drv->acpi_match_table, dev);
430}
431
432int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); 434int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
433int acpi_device_modalias(struct device *, char *, int); 435int acpi_device_modalias(struct device *, char *, int);
436void acpi_walk_dep_device_list(acpi_handle handle);
434 437
438struct platform_device *acpi_create_platform_device(struct acpi_device *);
435#define ACPI_PTR(_ptr) (_ptr) 439#define ACPI_PTR(_ptr) (_ptr)
436 440
437#else /* !CONFIG_ACPI */ 441#else /* !CONFIG_ACPI */
@@ -442,6 +446,23 @@ int acpi_device_modalias(struct device *, char *, int);
442#define ACPI_COMPANION_SET(dev, adev) do { } while (0) 446#define ACPI_COMPANION_SET(dev, adev) do { } while (0)
443#define ACPI_HANDLE(dev) (NULL) 447#define ACPI_HANDLE(dev) (NULL)
444 448
449struct fwnode_handle;
450
451static inline bool is_acpi_node(struct fwnode_handle *fwnode)
452{
453 return false;
454}
455
456static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode)
457{
458 return NULL;
459}
460
461static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
462{
463 return NULL;
464}
465
445static inline const char *acpi_dev_name(struct acpi_device *adev) 466static inline const char *acpi_dev_name(struct acpi_device *adev)
446{ 467{
447 return NULL; 468 return NULL;
@@ -552,16 +573,26 @@ static inline void arch_reserve_mem_area(acpi_physical_address addr,
552#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0) 573#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
553#endif 574#endif
554 575
555#if defined(CONFIG_ACPI) && defined(CONFIG_PM_RUNTIME) 576#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
556int acpi_dev_runtime_suspend(struct device *dev); 577int acpi_dev_runtime_suspend(struct device *dev);
557int acpi_dev_runtime_resume(struct device *dev); 578int acpi_dev_runtime_resume(struct device *dev);
558int acpi_subsys_runtime_suspend(struct device *dev); 579int acpi_subsys_runtime_suspend(struct device *dev);
559int acpi_subsys_runtime_resume(struct device *dev); 580int acpi_subsys_runtime_resume(struct device *dev);
581struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
582int acpi_dev_pm_attach(struct device *dev, bool power_on);
560#else 583#else
561static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; } 584static inline int acpi_dev_runtime_suspend(struct device *dev) { return 0; }
562static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; } 585static inline int acpi_dev_runtime_resume(struct device *dev) { return 0; }
563static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; } 586static inline int acpi_subsys_runtime_suspend(struct device *dev) { return 0; }
564static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; } 587static inline int acpi_subsys_runtime_resume(struct device *dev) { return 0; }
588static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
589{
590 return NULL;
591}
592static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
593{
594 return -ENODEV;
595}
565#endif 596#endif
566 597
567#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP) 598#if defined(CONFIG_ACPI) && defined(CONFIG_PM_SLEEP)
@@ -584,22 +615,6 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
584static inline int acpi_subsys_freeze(struct device *dev) { return 0; } 615static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
585#endif 616#endif
586 617
587#if defined(CONFIG_ACPI) && defined(CONFIG_PM)
588struct acpi_device *acpi_dev_pm_get_node(struct device *dev);
589int acpi_dev_pm_attach(struct device *dev, bool power_on);
590void acpi_dev_pm_detach(struct device *dev, bool power_off);
591#else
592static inline struct acpi_device *acpi_dev_pm_get_node(struct device *dev)
593{
594 return NULL;
595}
596static inline int acpi_dev_pm_attach(struct device *dev, bool power_on)
597{
598 return -ENODEV;
599}
600static inline void acpi_dev_pm_detach(struct device *dev, bool power_off) {}
601#endif
602
603#ifdef CONFIG_ACPI 618#ifdef CONFIG_ACPI
604__printf(3, 4) 619__printf(3, 4)
605void acpi_handle_printk(const char *level, acpi_handle handle, 620void acpi_handle_printk(const char *level, acpi_handle handle,
@@ -660,4 +675,114 @@ do { \
660#endif 675#endif
661#endif 676#endif
662 677
678struct acpi_gpio_params {
679 unsigned int crs_entry_index;
680 unsigned int line_index;
681 bool active_low;
682};
683
684struct acpi_gpio_mapping {
685 const char *name;
686 const struct acpi_gpio_params *data;
687 unsigned int size;
688};
689
690#if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB)
691int acpi_dev_add_driver_gpios(struct acpi_device *adev,
692 const struct acpi_gpio_mapping *gpios);
693
694static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
695{
696 if (adev)
697 adev->driver_gpios = NULL;
698}
699#else
700static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
701 const struct acpi_gpio_mapping *gpios)
702{
703 return -ENXIO;
704}
705static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
706#endif
707
708/* Device properties */
709
710#define MAX_ACPI_REFERENCE_ARGS 8
711struct acpi_reference_args {
712 struct acpi_device *adev;
713 size_t nargs;
714 u64 args[MAX_ACPI_REFERENCE_ARGS];
715};
716
717#ifdef CONFIG_ACPI
718int acpi_dev_get_property(struct acpi_device *adev, const char *name,
719 acpi_object_type type, const union acpi_object **obj);
720int acpi_dev_get_property_array(struct acpi_device *adev, const char *name,
721 acpi_object_type type,
722 const union acpi_object **obj);
723int acpi_dev_get_property_reference(struct acpi_device *adev,
724 const char *name, size_t index,
725 struct acpi_reference_args *args);
726
727int acpi_dev_prop_get(struct acpi_device *adev, const char *propname,
728 void **valptr);
729int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
730 enum dev_prop_type proptype, void *val);
731int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
732 enum dev_prop_type proptype, void *val, size_t nval);
733
734struct acpi_device *acpi_get_next_child(struct device *dev,
735 struct acpi_device *child);
736#else
737static inline int acpi_dev_get_property(struct acpi_device *adev,
738 const char *name, acpi_object_type type,
739 const union acpi_object **obj)
740{
741 return -ENXIO;
742}
743static inline int acpi_dev_get_property_array(struct acpi_device *adev,
744 const char *name,
745 acpi_object_type type,
746 const union acpi_object **obj)
747{
748 return -ENXIO;
749}
750static inline int acpi_dev_get_property_reference(struct acpi_device *adev,
751 const char *name, const char *cells_name,
752 size_t index, struct acpi_reference_args *args)
753{
754 return -ENXIO;
755}
756
757static inline int acpi_dev_prop_get(struct acpi_device *adev,
758 const char *propname,
759 void **valptr)
760{
761 return -ENXIO;
762}
763
764static inline int acpi_dev_prop_read_single(struct acpi_device *adev,
765 const char *propname,
766 enum dev_prop_type proptype,
767 void *val)
768{
769 return -ENXIO;
770}
771
772static inline int acpi_dev_prop_read(struct acpi_device *adev,
773 const char *propname,
774 enum dev_prop_type proptype,
775 void *val, size_t nval)
776{
777 return -ENXIO;
778}
779
780static inline struct acpi_device *acpi_get_next_child(struct device *dev,
781 struct acpi_device *child)
782{
783 return NULL;
784}
785
786#endif
787
663#endif /*_LINUX_ACPI_H*/ 788#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/aer.h b/include/linux/aer.h
index c826d1c28f9c..4fef65e57023 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -7,6 +7,8 @@
7#ifndef _AER_H_ 7#ifndef _AER_H_
8#define _AER_H_ 8#define _AER_H_
9 9
10#include <linux/types.h>
11
10#define AER_NONFATAL 0 12#define AER_NONFATAL 0
11#define AER_FATAL 1 13#define AER_FATAL 1
12#define AER_CORRECTABLE 2 14#define AER_CORRECTABLE 2
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 09a947e8bc87..642d6ae4030c 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -22,19 +22,6 @@ struct ata_port_info;
22struct ahci_host_priv; 22struct ahci_host_priv;
23struct platform_device; 23struct platform_device;
24 24
25/*
26 * Note ahci_platform_data is deprecated, it is only kept around for use
27 * by the old da850 and spear13xx ahci code.
28 * New drivers should instead declare their own platform_driver struct, and
29 * use ahci_platform* functions in their own probe, suspend and resume methods.
30 */
31struct ahci_platform_data {
32 int (*init)(struct device *dev, void __iomem *addr);
33 void (*exit)(struct device *dev);
34 int (*suspend)(struct device *dev);
35 int (*resume)(struct device *dev);
36};
37
38int ahci_platform_enable_clks(struct ahci_host_priv *hpriv); 25int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
39void ahci_platform_disable_clks(struct ahci_host_priv *hpriv); 26void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
40int ahci_platform_enable_resources(struct ahci_host_priv *hpriv); 27int ahci_platform_enable_resources(struct ahci_host_priv *hpriv);
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index fdd7e1b61f60..2afc618b15ce 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -23,6 +23,7 @@
23 23
24#define AMBA_NR_IRQS 9 24#define AMBA_NR_IRQS 9
25#define AMBA_CID 0xb105f00d 25#define AMBA_CID 0xb105f00d
26#define CORESIGHT_CID 0xb105900d
26 27
27struct clk; 28struct clk;
28 29
@@ -44,10 +45,15 @@ struct amba_driver {
44 const struct amba_id *id_table; 45 const struct amba_id *id_table;
45}; 46};
46 47
48/*
49 * Constants for the designer field of the Peripheral ID register. When bit 7
50 * is set to '1', bits [6:0] should be the JEP106 manufacturer identity code.
51 */
47enum amba_vendor { 52enum amba_vendor {
48 AMBA_VENDOR_ARM = 0x41, 53 AMBA_VENDOR_ARM = 0x41,
49 AMBA_VENDOR_ST = 0x80, 54 AMBA_VENDOR_ST = 0x80,
50 AMBA_VENDOR_QCOM = 0x51, 55 AMBA_VENDOR_QCOM = 0x51,
56 AMBA_VENDOR_LSI = 0xb6,
51}; 57};
52 58
53extern struct bus_type amba_bustype; 59extern struct bus_type amba_bustype;
@@ -92,6 +98,16 @@ void amba_release_regions(struct amba_device *);
92#define amba_pclk_disable(d) \ 98#define amba_pclk_disable(d) \
93 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) 99 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
94 100
101static inline int amba_pclk_prepare(struct amba_device *dev)
102{
103 return clk_prepare(dev->pclk);
104}
105
106static inline void amba_pclk_unprepare(struct amba_device *dev)
107{
108 clk_unprepare(dev->pclk);
109}
110
95/* Some drivers don't use the struct amba_device */ 111/* Some drivers don't use the struct amba_device */
96#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) 112#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
97#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) 113#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h
index b9fde17f767c..5c618a084225 100644
--- a/include/linux/ata_platform.h
+++ b/include/linux/ata_platform.h
@@ -8,11 +8,6 @@ struct pata_platform_info {
8 * spacing used by ata_std_ports(). 8 * spacing used by ata_std_ports().
9 */ 9 */
10 unsigned int ioport_shift; 10 unsigned int ioport_shift;
11 /*
12 * Indicate platform specific irq types and initial
13 * IRQ flags when call request_irq()
14 */
15 unsigned int irq_flags;
16}; 11};
17 12
18extern int __pata_platform_probe(struct device *dev, 13extern int __pata_platform_probe(struct device *dev,
diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h
index a495a959e8a7..33eb274cd0e6 100644
--- a/include/linux/ath9k_platform.h
+++ b/include/linux/ath9k_platform.h
@@ -31,8 +31,11 @@ struct ath9k_platform_data {
31 u32 gpio_mask; 31 u32 gpio_mask;
32 u32 gpio_val; 32 u32 gpio_val;
33 33
34 bool endian_check;
34 bool is_clk_25mhz; 35 bool is_clk_25mhz;
35 bool tx_gain_buffalo; 36 bool tx_gain_buffalo;
37 bool disable_2ghz;
38 bool disable_5ghz;
36 39
37 int (*get_mac_revision)(void); 40 int (*get_mac_revision)(void);
38 int (*external_reset)(void); 41 int (*external_reset)(void);
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 4c7a4b2104bf..9177947bf032 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -1,6 +1,8 @@
1#ifndef __LINUX_ATMEL_MCI_H 1#ifndef __LINUX_ATMEL_MCI_H
2#define __LINUX_ATMEL_MCI_H 2#define __LINUX_ATMEL_MCI_H
3 3
4#include <linux/types.h>
5
4#define ATMCI_MAX_NR_SLOTS 2 6#define ATMCI_MAX_NR_SLOTS 2
5 7
6/** 8/**
@@ -9,6 +11,7 @@
9 * @detect_pin: GPIO pin wired to the card detect switch 11 * @detect_pin: GPIO pin wired to the card detect switch
10 * @wp_pin: GPIO pin wired to the write protect sensor 12 * @wp_pin: GPIO pin wired to the write protect sensor
11 * @detect_is_active_high: The state of the detect pin when it is active 13 * @detect_is_active_high: The state of the detect pin when it is active
14 * @non_removable: The slot is not removable, only detect once
12 * 15 *
13 * If a given slot is not present on the board, @bus_width should be 16 * If a given slot is not present on the board, @bus_width should be
14 * set to 0. The other fields are ignored in this case. 17 * set to 0. The other fields are ignored in this case.
@@ -24,6 +27,7 @@ struct mci_slot_pdata {
24 int detect_pin; 27 int detect_pin;
25 int wp_pin; 28 int wp_pin;
26 bool detect_is_active_high; 29 bool detect_is_active_high;
30 bool non_removable;
27}; 31};
28 32
29/** 33/**
diff --git a/include/linux/atmel_tc.h b/include/linux/atmel_tc.h
index 89a931babecf..b87c1c7c242a 100644
--- a/include/linux/atmel_tc.h
+++ b/include/linux/atmel_tc.h
@@ -44,12 +44,13 @@ struct atmel_tcb_config {
44/** 44/**
45 * struct atmel_tc - information about a Timer/Counter Block 45 * struct atmel_tc - information about a Timer/Counter Block
46 * @pdev: physical device 46 * @pdev: physical device
47 * @iomem: resource associated with the I/O register
48 * @regs: mapping through which the I/O registers can be accessed 47 * @regs: mapping through which the I/O registers can be accessed
48 * @id: block id
49 * @tcb_config: configuration data from SoC 49 * @tcb_config: configuration data from SoC
50 * @irq: irq for each of the three channels 50 * @irq: irq for each of the three channels
51 * @clk: internal clock source for each of the three channels 51 * @clk: internal clock source for each of the three channels
52 * @node: list node, for tclib internal use 52 * @node: list node, for tclib internal use
53 * @allocated: if already used, for tclib internal use
53 * 54 *
54 * On some platforms, each TC channel has its own clocks and IRQs, 55 * On some platforms, each TC channel has its own clocks and IRQs,
55 * while on others, all TC channels share the same clock and IRQ. 56 * while on others, all TC channels share the same clock and IRQ.
@@ -61,15 +62,16 @@ struct atmel_tcb_config {
61 */ 62 */
62struct atmel_tc { 63struct atmel_tc {
63 struct platform_device *pdev; 64 struct platform_device *pdev;
64 struct resource *iomem;
65 void __iomem *regs; 65 void __iomem *regs;
66 int id;
66 const struct atmel_tcb_config *tcb_config; 67 const struct atmel_tcb_config *tcb_config;
67 int irq[3]; 68 int irq[3];
68 struct clk *clk[3]; 69 struct clk *clk[3];
69 struct list_head node; 70 struct list_head node;
71 bool allocated;
70}; 72};
71 73
72extern struct atmel_tc *atmel_tc_alloc(unsigned block, const char *name); 74extern struct atmel_tc *atmel_tc_alloc(unsigned block);
73extern void atmel_tc_free(struct atmel_tc *tc); 75extern void atmel_tc_free(struct atmel_tc *tc);
74 76
75/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */ 77/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
@@ -258,5 +260,10 @@ extern const u8 atmel_tc_divisors[5];
258#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */ 260#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */
259#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */ 261#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */
260#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */ 262#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */
263#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
264 ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
265 ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
266 ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
267 /* all IRQs */
261 268
262#endif 269#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index fef3a809e7cf..5b08a8540ecf 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -3,42 +3,6 @@
3#define _LINUX_ATOMIC_H 3#define _LINUX_ATOMIC_H
4#include <asm/atomic.h> 4#include <asm/atomic.h>
5 5
6/*
7 * Provide __deprecated wrappers for the new interface, avoid flag day changes.
8 * We need the ugly external functions to break header recursion hell.
9 */
10#ifndef smp_mb__before_atomic_inc
11static inline void __deprecated smp_mb__before_atomic_inc(void)
12{
13 extern void __smp_mb__before_atomic(void);
14 __smp_mb__before_atomic();
15}
16#endif
17
18#ifndef smp_mb__after_atomic_inc
19static inline void __deprecated smp_mb__after_atomic_inc(void)
20{
21 extern void __smp_mb__after_atomic(void);
22 __smp_mb__after_atomic();
23}
24#endif
25
26#ifndef smp_mb__before_atomic_dec
27static inline void __deprecated smp_mb__before_atomic_dec(void)
28{
29 extern void __smp_mb__before_atomic(void);
30 __smp_mb__before_atomic();
31}
32#endif
33
34#ifndef smp_mb__after_atomic_dec
35static inline void __deprecated smp_mb__after_atomic_dec(void)
36{
37 extern void __smp_mb__after_atomic(void);
38 __smp_mb__after_atomic();
39}
40#endif
41
42/** 6/**
43 * atomic_add_unless - add unless the number is already a given value 7 * atomic_add_unless - add unless the number is already a given value
44 * @v: pointer of type atomic_t 8 * @v: pointer of type atomic_t
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 22cfddb75566..af84234e1f6e 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -47,6 +47,7 @@ struct sk_buff;
47 47
48struct audit_krule { 48struct audit_krule {
49 int vers_ops; 49 int vers_ops;
50 u32 pflags;
50 u32 flags; 51 u32 flags;
51 u32 listnr; 52 u32 listnr;
52 u32 action; 53 u32 action;
@@ -64,14 +65,21 @@ struct audit_krule {
64 u64 prio; 65 u64 prio;
65}; 66};
66 67
68/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
69#define AUDIT_LOGINUID_LEGACY 0x1
70
67struct audit_field { 71struct audit_field {
68 u32 type; 72 u32 type;
69 u32 val; 73 union {
70 kuid_t uid; 74 u32 val;
71 kgid_t gid; 75 kuid_t uid;
76 kgid_t gid;
77 struct {
78 char *lsm_str;
79 void *lsm_rule;
80 };
81 };
72 u32 op; 82 u32 op;
73 char *lsm_str;
74 void *lsm_rule;
75}; 83};
76 84
77extern int is_audit_feature_set(int which); 85extern int is_audit_feature_set(int which);
@@ -86,7 +94,7 @@ extern unsigned compat_dir_class[];
86extern unsigned compat_chattr_class[]; 94extern unsigned compat_chattr_class[];
87extern unsigned compat_signal_class[]; 95extern unsigned compat_signal_class[];
88 96
89extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall); 97extern int audit_classify_compat_syscall(int abi, unsigned syscall);
90 98
91/* audit_names->type values */ 99/* audit_names->type values */
92#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */ 100#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
@@ -109,12 +117,13 @@ extern void audit_log_session_info(struct audit_buffer *ab);
109#endif 117#endif
110 118
111#ifdef CONFIG_AUDITSYSCALL 119#ifdef CONFIG_AUDITSYSCALL
120#include <asm/syscall.h> /* for syscall_get_arch() */
121
112/* These are defined in auditsc.c */ 122/* These are defined in auditsc.c */
113 /* Public API */ 123 /* Public API */
114extern int audit_alloc(struct task_struct *task); 124extern int audit_alloc(struct task_struct *task);
115extern void __audit_free(struct task_struct *task); 125extern void __audit_free(struct task_struct *task);
116extern void __audit_syscall_entry(int arch, 126extern void __audit_syscall_entry(int major, unsigned long a0, unsigned long a1,
117 int major, unsigned long a0, unsigned long a1,
118 unsigned long a2, unsigned long a3); 127 unsigned long a2, unsigned long a3);
119extern void __audit_syscall_exit(int ret_success, long ret_value); 128extern void __audit_syscall_exit(int ret_success, long ret_value);
120extern struct filename *__audit_reusename(const __user char *uptr); 129extern struct filename *__audit_reusename(const __user char *uptr);
@@ -125,6 +134,7 @@ extern void audit_putname(struct filename *name);
125#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ 134#define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */
126extern void __audit_inode(struct filename *name, const struct dentry *dentry, 135extern void __audit_inode(struct filename *name, const struct dentry *dentry,
127 unsigned int flags); 136 unsigned int flags);
137extern void __audit_file(const struct file *);
128extern void __audit_inode_child(const struct inode *parent, 138extern void __audit_inode_child(const struct inode *parent,
129 const struct dentry *dentry, 139 const struct dentry *dentry,
130 const unsigned char type); 140 const unsigned char type);
@@ -141,12 +151,12 @@ static inline void audit_free(struct task_struct *task)
141 if (unlikely(task->audit_context)) 151 if (unlikely(task->audit_context))
142 __audit_free(task); 152 __audit_free(task);
143} 153}
144static inline void audit_syscall_entry(int arch, int major, unsigned long a0, 154static inline void audit_syscall_entry(int major, unsigned long a0,
145 unsigned long a1, unsigned long a2, 155 unsigned long a1, unsigned long a2,
146 unsigned long a3) 156 unsigned long a3)
147{ 157{
148 if (unlikely(current->audit_context)) 158 if (unlikely(current->audit_context))
149 __audit_syscall_entry(arch, major, a0, a1, a2, a3); 159 __audit_syscall_entry(major, a0, a1, a2, a3);
150} 160}
151static inline void audit_syscall_exit(void *pt_regs) 161static inline void audit_syscall_exit(void *pt_regs)
152{ 162{
@@ -178,6 +188,11 @@ static inline void audit_inode(struct filename *name,
178 __audit_inode(name, dentry, flags); 188 __audit_inode(name, dentry, flags);
179 } 189 }
180} 190}
191static inline void audit_file(struct file *file)
192{
193 if (unlikely(!audit_dummy_context()))
194 __audit_file(file);
195}
181static inline void audit_inode_parent_hidden(struct filename *name, 196static inline void audit_inode_parent_hidden(struct filename *name,
182 const struct dentry *dentry) 197 const struct dentry *dentry)
183{ 198{
@@ -322,7 +337,7 @@ static inline int audit_alloc(struct task_struct *task)
322} 337}
323static inline void audit_free(struct task_struct *task) 338static inline void audit_free(struct task_struct *task)
324{ } 339{ }
325static inline void audit_syscall_entry(int arch, int major, unsigned long a0, 340static inline void audit_syscall_entry(int major, unsigned long a0,
326 unsigned long a1, unsigned long a2, 341 unsigned long a1, unsigned long a2,
327 unsigned long a3) 342 unsigned long a3)
328{ } 343{ }
@@ -352,6 +367,9 @@ static inline void audit_inode(struct filename *name,
352 const struct dentry *dentry, 367 const struct dentry *dentry,
353 unsigned int parent) 368 unsigned int parent)
354{ } 369{ }
370static inline void audit_file(struct file *file)
371{
372}
355static inline void audit_inode_parent_hidden(struct filename *name, 373static inline void audit_inode_parent_hidden(struct filename *name,
356 const struct dentry *dentry) 374 const struct dentry *dentry)
357{ } 375{ }
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index e488e9459a93..5da6012b7a14 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -28,12 +28,10 @@ struct dentry;
28 * Bits in backing_dev_info.state 28 * Bits in backing_dev_info.state
29 */ 29 */
30enum bdi_state { 30enum bdi_state {
31 BDI_wb_alloc, /* Default embedded wb allocated */
32 BDI_async_congested, /* The async (write) queue is getting full */ 31 BDI_async_congested, /* The async (write) queue is getting full */
33 BDI_sync_congested, /* The sync queue is getting full */ 32 BDI_sync_congested, /* The sync queue is getting full */
34 BDI_registered, /* bdi_register() was done */ 33 BDI_registered, /* bdi_register() was done */
35 BDI_writeback_running, /* Writeback is in progress */ 34 BDI_writeback_running, /* Writeback is in progress */
36 BDI_unused, /* Available bits start here */
37}; 35};
38 36
39typedef int (congested_fn)(void *, int); 37typedef int (congested_fn)(void *, int);
@@ -50,7 +48,6 @@ enum bdi_stat_item {
50 48
51struct bdi_writeback { 49struct bdi_writeback {
52 struct backing_dev_info *bdi; /* our parent bdi */ 50 struct backing_dev_info *bdi; /* our parent bdi */
53 unsigned int nr;
54 51
55 unsigned long last_old_flush; /* last old data flush */ 52 unsigned long last_old_flush; /* last old data flush */
56 53
@@ -124,7 +121,6 @@ void bdi_start_background_writeback(struct backing_dev_info *bdi);
124void bdi_writeback_workfn(struct work_struct *work); 121void bdi_writeback_workfn(struct work_struct *work);
125int bdi_has_dirty_io(struct backing_dev_info *bdi); 122int bdi_has_dirty_io(struct backing_dev_info *bdi);
126void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); 123void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi);
127void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2);
128 124
129extern spinlock_t bdi_lock; 125extern spinlock_t bdi_lock;
130extern struct list_head bdi_list; 126extern struct list_head bdi_list;
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 089743ade734..9b0a15d06a4f 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -27,10 +27,13 @@
27 * counter raised only while it is under our special handling; 27 * counter raised only while it is under our special handling;
28 * 28 *
29 * iii. after the lockless scan step have selected a potential balloon page for 29 * iii. after the lockless scan step have selected a potential balloon page for
30 * isolation, re-test the page->mapping flags and the page ref counter 30 * isolation, re-test the PageBalloon mark and the PagePrivate flag
31 * under the proper page lock, to ensure isolating a valid balloon page 31 * under the proper page lock, to ensure isolating a valid balloon page
32 * (not yet isolated, nor under release procedure) 32 * (not yet isolated, nor under release procedure)
33 * 33 *
34 * iv. isolation or dequeueing procedure must clear PagePrivate flag under
35 * page lock together with removing page from balloon device page list.
36 *
34 * The functions provided by this interface are placed to help on coping with 37 * The functions provided by this interface are placed to help on coping with
35 * the aforementioned balloon page corner case, as well as to ensure the simple 38 * the aforementioned balloon page corner case, as well as to ensure the simple
36 * set of exposed rules are satisfied while we are dealing with balloon pages 39 * set of exposed rules are satisfied while we are dealing with balloon pages
@@ -54,43 +57,22 @@
54 * balloon driver as a page book-keeper for its registered balloon devices. 57 * balloon driver as a page book-keeper for its registered balloon devices.
55 */ 58 */
56struct balloon_dev_info { 59struct balloon_dev_info {
57 void *balloon_device; /* balloon device descriptor */
58 struct address_space *mapping; /* balloon special page->mapping */
59 unsigned long isolated_pages; /* # of isolated pages for migration */ 60 unsigned long isolated_pages; /* # of isolated pages for migration */
60 spinlock_t pages_lock; /* Protection to pages list */ 61 spinlock_t pages_lock; /* Protection to pages list */
61 struct list_head pages; /* Pages enqueued & handled to Host */ 62 struct list_head pages; /* Pages enqueued & handled to Host */
63 int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
64 struct page *page, enum migrate_mode mode);
62}; 65};
63 66
64extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); 67extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
65extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); 68extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
66extern struct balloon_dev_info *balloon_devinfo_alloc(
67 void *balloon_dev_descriptor);
68 69
69static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info) 70static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
70{
71 kfree(b_dev_info);
72}
73
74/*
75 * balloon_page_free - release a balloon page back to the page free lists
76 * @page: ballooned page to be set free
77 *
78 * This function must be used to properly set free an isolated/dequeued balloon
79 * page at the end of a sucessful page migration, or at the balloon driver's
80 * page release procedure.
81 */
82static inline void balloon_page_free(struct page *page)
83{ 71{
84 /* 72 balloon->isolated_pages = 0;
85 * Balloon pages always get an extra refcount before being isolated 73 spin_lock_init(&balloon->pages_lock);
86 * and before being dequeued to help on sorting out fortuite colisions 74 INIT_LIST_HEAD(&balloon->pages);
87 * between a thread attempting to isolate and another thread attempting 75 balloon->migratepage = NULL;
88 * to release the very same balloon page.
89 *
90 * Before we handle the page back to Buddy, lets drop its extra refcnt.
91 */
92 put_page(page);
93 __free_page(page);
94} 76}
95 77
96#ifdef CONFIG_BALLOON_COMPACTION 78#ifdef CONFIG_BALLOON_COMPACTION
@@ -98,107 +80,58 @@ extern bool balloon_page_isolate(struct page *page);
98extern void balloon_page_putback(struct page *page); 80extern void balloon_page_putback(struct page *page);
99extern int balloon_page_migrate(struct page *newpage, 81extern int balloon_page_migrate(struct page *newpage,
100 struct page *page, enum migrate_mode mode); 82 struct page *page, enum migrate_mode mode);
101extern struct address_space
102*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
103 const struct address_space_operations *a_ops);
104
105static inline void balloon_mapping_free(struct address_space *balloon_mapping)
106{
107 kfree(balloon_mapping);
108}
109 83
110/* 84/*
111 * page_flags_cleared - helper to perform balloon @page ->flags tests. 85 * __is_movable_balloon_page - helper to perform @page PageBalloon tests
112 *
113 * As balloon pages are obtained from buddy and we do not play with page->flags
114 * at driver level (exception made when we get the page lock for compaction),
115 * we can safely identify a ballooned page by checking if the
116 * PAGE_FLAGS_CHECK_AT_PREP page->flags are all cleared. This approach also
117 * helps us skip ballooned pages that are locked for compaction or release, thus
118 * mitigating their racy check at balloon_page_movable()
119 */
120static inline bool page_flags_cleared(struct page *page)
121{
122 return !(page->flags & PAGE_FLAGS_CHECK_AT_PREP);
123}
124
125/*
126 * __is_movable_balloon_page - helper to perform @page mapping->flags tests
127 */ 86 */
128static inline bool __is_movable_balloon_page(struct page *page) 87static inline bool __is_movable_balloon_page(struct page *page)
129{ 88{
130 struct address_space *mapping = page->mapping; 89 return PageBalloon(page);
131 return mapping_balloon(mapping);
132} 90}
133 91
134/* 92/*
135 * balloon_page_movable - test page->mapping->flags to identify balloon pages 93 * balloon_page_movable - test PageBalloon to identify balloon pages
136 * that can be moved by compaction/migration. 94 * and PagePrivate to check that the page is not
137 * 95 * isolated and can be moved by compaction/migration.
138 * This function is used at core compaction's page isolation scheme, therefore
139 * most pages exposed to it are not enlisted as balloon pages and so, to avoid
140 * undesired side effects like racing against __free_pages(), we cannot afford
141 * holding the page locked while testing page->mapping->flags here.
142 * 96 *
143 * As we might return false positives in the case of a balloon page being just 97 * As we might return false positives in the case of a balloon page being just
144 * released under us, the page->mapping->flags need to be re-tested later, 98 * released under us, this need to be re-tested later, under the page lock.
145 * under the proper page lock, at the functions that will be coping with the
146 * balloon page case.
147 */ 99 */
148static inline bool balloon_page_movable(struct page *page) 100static inline bool balloon_page_movable(struct page *page)
149{ 101{
150 /* 102 return PageBalloon(page) && PagePrivate(page);
151 * Before dereferencing and testing mapping->flags, let's make sure
152 * this is not a page that uses ->mapping in a different way
153 */
154 if (page_flags_cleared(page) && !page_mapped(page) &&
155 page_count(page) == 1)
156 return __is_movable_balloon_page(page);
157
158 return false;
159} 103}
160 104
161/* 105/*
162 * isolated_balloon_page - identify an isolated balloon page on private 106 * isolated_balloon_page - identify an isolated balloon page on private
163 * compaction/migration page lists. 107 * compaction/migration page lists.
164 *
165 * After a compaction thread isolates a balloon page for migration, it raises
166 * the page refcount to prevent concurrent compaction threads from re-isolating
167 * the same page. For that reason putback_movable_pages(), or other routines
168 * that need to identify isolated balloon pages on private pagelists, cannot
169 * rely on balloon_page_movable() to accomplish the task.
170 */ 108 */
171static inline bool isolated_balloon_page(struct page *page) 109static inline bool isolated_balloon_page(struct page *page)
172{ 110{
173 /* Already isolated balloon pages, by default, have a raised refcount */ 111 return PageBalloon(page);
174 if (page_flags_cleared(page) && !page_mapped(page) &&
175 page_count(page) >= 2)
176 return __is_movable_balloon_page(page);
177
178 return false;
179} 112}
180 113
181/* 114/*
182 * balloon_page_insert - insert a page into the balloon's page list and make 115 * balloon_page_insert - insert a page into the balloon's page list and make
183 * the page->mapping assignment accordingly. 116 * the page->private assignment accordingly.
117 * @balloon : pointer to balloon device
184 * @page : page to be assigned as a 'balloon page' 118 * @page : page to be assigned as a 'balloon page'
185 * @mapping : allocated special 'balloon_mapping'
186 * @head : balloon's device page list head
187 * 119 *
188 * Caller must ensure the page is locked and the spin_lock protecting balloon 120 * Caller must ensure the page is locked and the spin_lock protecting balloon
189 * pages list is held before inserting a page into the balloon device. 121 * pages list is held before inserting a page into the balloon device.
190 */ 122 */
191static inline void balloon_page_insert(struct page *page, 123static inline void balloon_page_insert(struct balloon_dev_info *balloon,
192 struct address_space *mapping, 124 struct page *page)
193 struct list_head *head)
194{ 125{
195 page->mapping = mapping; 126 __SetPageBalloon(page);
196 list_add(&page->lru, head); 127 SetPagePrivate(page);
128 set_page_private(page, (unsigned long)balloon);
129 list_add(&page->lru, &balloon->pages);
197} 130}
198 131
199/* 132/*
200 * balloon_page_delete - delete a page from balloon's page list and clear 133 * balloon_page_delete - delete a page from balloon's page list and clear
201 * the page->mapping assignement accordingly. 134 * the page->private assignement accordingly.
202 * @page : page to be released from balloon's page list 135 * @page : page to be released from balloon's page list
203 * 136 *
204 * Caller must ensure the page is locked and the spin_lock protecting balloon 137 * Caller must ensure the page is locked and the spin_lock protecting balloon
@@ -206,8 +139,12 @@ static inline void balloon_page_insert(struct page *page,
206 */ 139 */
207static inline void balloon_page_delete(struct page *page) 140static inline void balloon_page_delete(struct page *page)
208{ 141{
209 page->mapping = NULL; 142 __ClearPageBalloon(page);
210 list_del(&page->lru); 143 set_page_private(page, 0);
144 if (PagePrivate(page)) {
145 ClearPagePrivate(page);
146 list_del(&page->lru);
147 }
211} 148}
212 149
213/* 150/*
@@ -216,11 +153,7 @@ static inline void balloon_page_delete(struct page *page)
216 */ 153 */
217static inline struct balloon_dev_info *balloon_page_device(struct page *page) 154static inline struct balloon_dev_info *balloon_page_device(struct page *page)
218{ 155{
219 struct address_space *mapping = page->mapping; 156 return (struct balloon_dev_info *)page_private(page);
220 if (likely(mapping))
221 return mapping->private_data;
222
223 return NULL;
224} 157}
225 158
226static inline gfp_t balloon_mapping_gfp_mask(void) 159static inline gfp_t balloon_mapping_gfp_mask(void)
@@ -228,34 +161,24 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
228 return GFP_HIGHUSER_MOVABLE; 161 return GFP_HIGHUSER_MOVABLE;
229} 162}
230 163
231static inline bool balloon_compaction_check(void)
232{
233 return true;
234}
235
236#else /* !CONFIG_BALLOON_COMPACTION */ 164#else /* !CONFIG_BALLOON_COMPACTION */
237 165
238static inline void *balloon_mapping_alloc(void *balloon_device, 166static inline void balloon_page_insert(struct balloon_dev_info *balloon,
239 const struct address_space_operations *a_ops) 167 struct page *page)
240{
241 return ERR_PTR(-EOPNOTSUPP);
242}
243
244static inline void balloon_mapping_free(struct address_space *balloon_mapping)
245{ 168{
246 return; 169 __SetPageBalloon(page);
170 list_add(&page->lru, &balloon->pages);
247} 171}
248 172
249static inline void balloon_page_insert(struct page *page, 173static inline void balloon_page_delete(struct page *page)
250 struct address_space *mapping,
251 struct list_head *head)
252{ 174{
253 list_add(&page->lru, head); 175 __ClearPageBalloon(page);
176 list_del(&page->lru);
254} 177}
255 178
256static inline void balloon_page_delete(struct page *page) 179static inline bool __is_movable_balloon_page(struct page *page)
257{ 180{
258 list_del(&page->lru); 181 return false;
259} 182}
260 183
261static inline bool balloon_page_movable(struct page *page) 184static inline bool balloon_page_movable(struct page *page)
@@ -289,9 +212,5 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
289 return GFP_HIGHUSER; 212 return GFP_HIGHUSER;
290} 213}
291 214
292static inline bool balloon_compaction_check(void)
293{
294 return false;
295}
296#endif /* CONFIG_BALLOON_COMPACTION */ 215#endif /* CONFIG_BALLOON_COMPACTION */
297#endif /* _LINUX_BALLOON_COMPACTION_H */ 216#endif /* _LINUX_BALLOON_COMPACTION_H */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 0272e49135d0..eb1c6a47b67f 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -267,7 +267,7 @@ struct bcma_device {
267 u8 core_unit; 267 u8 core_unit;
268 268
269 u32 addr; 269 u32 addr;
270 u32 addr1; 270 u32 addr_s[8];
271 u32 wrap; 271 u32 wrap;
272 272
273 void __iomem *io_addr; 273 void __iomem *io_addr;
@@ -323,6 +323,8 @@ struct bcma_bus {
323 struct pci_dev *host_pci; 323 struct pci_dev *host_pci;
324 /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */ 324 /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */
325 struct sdio_func *host_sdio; 325 struct sdio_func *host_sdio;
326 /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */
327 struct platform_device *host_pdev;
326 }; 328 };
327 329
328 struct bcma_chipinfo chipinfo; 330 struct bcma_chipinfo chipinfo;
@@ -332,10 +334,10 @@ struct bcma_bus {
332 struct bcma_device *mapped_core; 334 struct bcma_device *mapped_core;
333 struct list_head cores; 335 struct list_head cores;
334 u8 nr_cores; 336 u8 nr_cores;
335 u8 init_done:1;
336 u8 num; 337 u8 num;
337 338
338 struct bcma_drv_cc drv_cc; 339 struct bcma_drv_cc drv_cc;
340 struct bcma_drv_cc_b drv_cc_b;
339 struct bcma_drv_pci drv_pci[2]; 341 struct bcma_drv_pci drv_pci[2];
340 struct bcma_drv_pcie2 drv_pcie2; 342 struct bcma_drv_pcie2 drv_pcie2;
341 struct bcma_drv_mips drv_mips; 343 struct bcma_drv_mips drv_mips;
@@ -445,4 +447,6 @@ extern u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset);
445#define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */ 447#define BCMA_DMA_TRANSLATION_DMA64_CMT 0x80000000 /* Client Mode Translation for 64-bit DMA */
446extern u32 bcma_core_dma_translation(struct bcma_device *core); 448extern u32 bcma_core_dma_translation(struct bcma_device *core);
447 449
450extern unsigned int bcma_core_irq(struct bcma_device *core, int num);
451
448#endif /* LINUX_BCMA_H_ */ 452#endif /* LINUX_BCMA_H_ */
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 63d105cd14a3..db6fa217f98b 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -644,6 +644,12 @@ struct bcma_drv_cc {
644#endif 644#endif
645}; 645};
646 646
647struct bcma_drv_cc_b {
648 struct bcma_device *core;
649 u8 setup_done:1;
650 void __iomem *mii;
651};
652
647/* Register access */ 653/* Register access */
648#define bcma_cc_read32(cc, offset) \ 654#define bcma_cc_read32(cc, offset) \
649 bcma_read32((cc)->core, offset) 655 bcma_read32((cc)->core, offset)
@@ -699,4 +705,6 @@ extern void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid);
699 705
700extern u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc); 706extern u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc);
701 707
708void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value);
709
702#endif /* LINUX_BCMA_DRIVER_CC_H_ */ 710#endif /* LINUX_BCMA_DRIVER_CC_H_ */
diff --git a/include/linux/bcma/bcma_driver_mips.h b/include/linux/bcma/bcma_driver_mips.h
index fb61f3fb4ddb..0b3b32aeeb8a 100644
--- a/include/linux/bcma/bcma_driver_mips.h
+++ b/include/linux/bcma/bcma_driver_mips.h
@@ -43,12 +43,12 @@ struct bcma_drv_mips {
43extern void bcma_core_mips_init(struct bcma_drv_mips *mcore); 43extern void bcma_core_mips_init(struct bcma_drv_mips *mcore);
44extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore); 44extern void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
45 45
46extern unsigned int bcma_core_irq(struct bcma_device *core); 46extern unsigned int bcma_core_mips_irq(struct bcma_device *dev);
47#else 47#else
48static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { } 48static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore) { }
49static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { } 49static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { }
50 50
51static inline unsigned int bcma_core_irq(struct bcma_device *core) 51static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
52{ 52{
53 return 0; 53 return 0;
54} 54}
diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h
index 917dcd7965e7..e64ae7bf80a1 100644
--- a/include/linux/bcma/bcma_regs.h
+++ b/include/linux/bcma/bcma_regs.h
@@ -39,6 +39,11 @@
39#define BCMA_RESET_CTL_RESET 0x0001 39#define BCMA_RESET_CTL_RESET 0x0001
40#define BCMA_RESET_ST 0x0804 40#define BCMA_RESET_ST 0x0804
41 41
42#define BCMA_NS_ROM_IOST_BOOT_DEV_MASK 0x0003
43#define BCMA_NS_ROM_IOST_BOOT_DEV_NOR 0x0000
44#define BCMA_NS_ROM_IOST_BOOT_DEV_NAND 0x0001
45#define BCMA_NS_ROM_IOST_BOOT_DEV_ROM 0x0002
46
42/* BCMA PCI config space registers. */ 47/* BCMA PCI config space registers. */
43#define BCMA_PCI_PMCSR 0x44 48#define BCMA_PCI_PMCSR 0x44
44#define BCMA_PCI_PE 0x100 49#define BCMA_PCI_PE 0x100
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h
index 4203c5593b9f..f24d245f8394 100644
--- a/include/linux/bcma/bcma_soc.h
+++ b/include/linux/bcma/bcma_soc.h
@@ -10,6 +10,7 @@ struct bcma_soc {
10}; 10};
11 11
12int __init bcma_host_soc_register(struct bcma_soc *soc); 12int __init bcma_host_soc_register(struct bcma_soc *soc);
13int __init bcma_host_soc_init(struct bcma_soc *soc);
13 14
14int bcma_bus_register(struct bcma_bus *bus); 15int bcma_bus_register(struct bcma_bus *bus);
15 16
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 61f29e5ea840..576e4639ca60 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -53,6 +53,10 @@ struct linux_binprm {
53#define BINPRM_FLAGS_EXECFD_BIT 1 53#define BINPRM_FLAGS_EXECFD_BIT 1
54#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) 54#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
55 55
56/* filename of the binary will be inaccessible after exec */
57#define BINPRM_FLAGS_PATH_INACCESSIBLE_BIT 2
58#define BINPRM_FLAGS_PATH_INACCESSIBLE (1 << BINPRM_FLAGS_PATH_INACCESSIBLE_BIT)
59
56/* Function parameter for binfmt->coredump */ 60/* Function parameter for binfmt->coredump */
57struct coredump_params { 61struct coredump_params {
58 const siginfo_t *siginfo; 62 const siginfo_t *siginfo;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b39e5000ff58..efead0b532c4 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -292,7 +292,24 @@ static inline unsigned bio_segments(struct bio *bio)
292 */ 292 */
293#define bio_get(bio) atomic_inc(&(bio)->bi_cnt) 293#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
294 294
295enum bip_flags {
296 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
297 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
298 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */
299 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */
300 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
301};
302
295#if defined(CONFIG_BLK_DEV_INTEGRITY) 303#if defined(CONFIG_BLK_DEV_INTEGRITY)
304
305static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
306{
307 if (bio->bi_rw & REQ_INTEGRITY)
308 return bio->bi_integrity;
309
310 return NULL;
311}
312
296/* 313/*
297 * bio integrity payload 314 * bio integrity payload
298 */ 315 */
@@ -301,21 +318,40 @@ struct bio_integrity_payload {
301 318
302 struct bvec_iter bip_iter; 319 struct bvec_iter bip_iter;
303 320
304 /* kill - should just use bip_vec */
305 void *bip_buf; /* generated integrity data */
306
307 bio_end_io_t *bip_end_io; /* saved I/O completion fn */ 321 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
308 322
309 unsigned short bip_slab; /* slab the bip came from */ 323 unsigned short bip_slab; /* slab the bip came from */
310 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 324 unsigned short bip_vcnt; /* # of integrity bio_vecs */
311 unsigned short bip_max_vcnt; /* integrity bio_vec slots */ 325 unsigned short bip_max_vcnt; /* integrity bio_vec slots */
312 unsigned bip_owns_buf:1; /* should free bip_buf */ 326 unsigned short bip_flags; /* control flags */
313 327
314 struct work_struct bip_work; /* I/O completion */ 328 struct work_struct bip_work; /* I/O completion */
315 329
316 struct bio_vec *bip_vec; 330 struct bio_vec *bip_vec;
317 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ 331 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
318}; 332};
333
334static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
335{
336 struct bio_integrity_payload *bip = bio_integrity(bio);
337
338 if (bip)
339 return bip->bip_flags & flag;
340
341 return false;
342}
343
344static inline sector_t bip_get_seed(struct bio_integrity_payload *bip)
345{
346 return bip->bip_iter.bi_sector;
347}
348
349static inline void bip_set_seed(struct bio_integrity_payload *bip,
350 sector_t seed)
351{
352 bip->bip_iter.bi_sector = seed;
353}
354
319#endif /* CONFIG_BLK_DEV_INTEGRITY */ 355#endif /* CONFIG_BLK_DEV_INTEGRITY */
320 356
321extern void bio_trim(struct bio *bio, int offset, int size); 357extern void bio_trim(struct bio *bio, int offset, int size);
@@ -342,6 +378,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
342} 378}
343 379
344extern struct bio_set *bioset_create(unsigned int, unsigned int); 380extern struct bio_set *bioset_create(unsigned int, unsigned int);
381extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
345extern void bioset_free(struct bio_set *); 382extern void bioset_free(struct bio_set *);
346extern mempool_t *biovec_create_pool(int pool_entries); 383extern mempool_t *biovec_create_pool(int pool_entries);
347 384
@@ -353,7 +390,6 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
353extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); 390extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
354 391
355extern struct bio_set *fs_bio_set; 392extern struct bio_set *fs_bio_set;
356unsigned int bio_integrity_tag_size(struct bio *bio);
357 393
358static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) 394static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
359{ 395{
@@ -407,6 +443,11 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
407extern void bio_set_pages_dirty(struct bio *bio); 443extern void bio_set_pages_dirty(struct bio *bio);
408extern void bio_check_pages_dirty(struct bio *bio); 444extern void bio_check_pages_dirty(struct bio *bio);
409 445
446void generic_start_io_acct(int rw, unsigned long sectors,
447 struct hd_struct *part);
448void generic_end_io_acct(int rw, struct hd_struct *part,
449 unsigned long start_time);
450
410#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 451#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
411# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 452# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
412#endif 453#endif
@@ -661,14 +702,10 @@ struct biovec_slab {
661 for_each_bio(_bio) \ 702 for_each_bio(_bio) \
662 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) 703 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter)
663 704
664#define bio_integrity(bio) (bio->bi_integrity != NULL)
665
666extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 705extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
667extern void bio_integrity_free(struct bio *); 706extern void bio_integrity_free(struct bio *);
668extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 707extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
669extern int bio_integrity_enabled(struct bio *bio); 708extern bool bio_integrity_enabled(struct bio *bio);
670extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
671extern int bio_integrity_get_tag(struct bio *, void *, unsigned int);
672extern int bio_integrity_prep(struct bio *); 709extern int bio_integrity_prep(struct bio *);
673extern void bio_integrity_endio(struct bio *, int); 710extern void bio_integrity_endio(struct bio *, int);
674extern void bio_integrity_advance(struct bio *, unsigned int); 711extern void bio_integrity_advance(struct bio *, unsigned int);
@@ -680,14 +717,14 @@ extern void bio_integrity_init(void);
680 717
681#else /* CONFIG_BLK_DEV_INTEGRITY */ 718#else /* CONFIG_BLK_DEV_INTEGRITY */
682 719
683static inline int bio_integrity(struct bio *bio) 720static inline void *bio_integrity(struct bio *bio)
684{ 721{
685 return 0; 722 return NULL;
686} 723}
687 724
688static inline int bio_integrity_enabled(struct bio *bio) 725static inline bool bio_integrity_enabled(struct bio *bio)
689{ 726{
690 return 0; 727 return false;
691} 728}
692 729
693static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) 730static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
@@ -733,6 +770,11 @@ static inline void bio_integrity_init(void)
733 return; 770 return;
734} 771}
735 772
773static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
774{
775 return false;
776}
777
736#endif /* CONFIG_BLK_DEV_INTEGRITY */ 778#endif /* CONFIG_BLK_DEV_INTEGRITY */
737 779
738#endif /* CONFIG_BLOCK */ 780#endif /* CONFIG_BLOCK */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index e1c8d080c427..202e4034fe26 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -45,6 +45,7 @@
45 * bitmap_set(dst, pos, nbits) Set specified bit area 45 * bitmap_set(dst, pos, nbits) Set specified bit area
46 * bitmap_clear(dst, pos, nbits) Clear specified bit area 46 * bitmap_clear(dst, pos, nbits) Clear specified bit area
47 * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area 47 * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
48 * bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
48 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n 49 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
49 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n 50 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
50 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) 51 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
@@ -60,6 +61,7 @@
60 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region 61 * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
61 * bitmap_release_region(bitmap, pos, order) Free specified bit region 62 * bitmap_release_region(bitmap, pos, order) Free specified bit region
62 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region 63 * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
64 * bitmap_print_to_pagebuf(list, buf, mask, nbits) Print bitmap src as list/hex
63 */ 65 */
64 66
65/* 67/*
@@ -114,11 +116,36 @@ extern int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits);
114 116
115extern void bitmap_set(unsigned long *map, unsigned int start, int len); 117extern void bitmap_set(unsigned long *map, unsigned int start, int len);
116extern void bitmap_clear(unsigned long *map, unsigned int start, int len); 118extern void bitmap_clear(unsigned long *map, unsigned int start, int len);
117extern unsigned long bitmap_find_next_zero_area(unsigned long *map, 119
118 unsigned long size, 120extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
119 unsigned long start, 121 unsigned long size,
120 unsigned int nr, 122 unsigned long start,
121 unsigned long align_mask); 123 unsigned int nr,
124 unsigned long align_mask,
125 unsigned long align_offset);
126
127/**
128 * bitmap_find_next_zero_area - find a contiguous aligned zero area
129 * @map: The address to base the search on
130 * @size: The bitmap size in bits
131 * @start: The bitnumber to start searching at
132 * @nr: The number of zeroed bits we're looking for
133 * @align_mask: Alignment mask for zero area
134 *
135 * The @align_mask should be one less than a power of 2; the effect is that
136 * the bit offset of all zero areas this function finds is multiples of that
137 * power of 2. A @align_mask of 0 means no alignment is required.
138 */
139static inline unsigned long
140bitmap_find_next_zero_area(unsigned long *map,
141 unsigned long size,
142 unsigned long start,
143 unsigned int nr,
144 unsigned long align_mask)
145{
146 return bitmap_find_next_zero_area_off(map, size, start, nr,
147 align_mask, 0);
148}
122 149
123extern int bitmap_scnprintf(char *buf, unsigned int len, 150extern int bitmap_scnprintf(char *buf, unsigned int len,
124 const unsigned long *src, int nbits); 151 const unsigned long *src, int nbits);
@@ -145,6 +172,8 @@ extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int o
145extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); 172extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
146extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); 173extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
147extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); 174extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits);
175extern int bitmap_print_to_pagebuf(bool list, char *buf,
176 const unsigned long *maskp, int nmaskbits);
148 177
149#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) 178#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
150#define BITMAP_LAST_WORD_MASK(nbits) \ 179#define BITMAP_LAST_WORD_MASK(nbits) \
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index cbc5833fb221..5d858e02997f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -18,8 +18,11 @@
18 * position @h. For example 18 * position @h. For example
19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. 19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
20 */ 20 */
21#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) 21#define GENMASK(h, l) \
22#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) 22 (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
23
24#define GENMASK_ULL(h, l) \
25 (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
23 26
24extern unsigned int __sw_hweight8(unsigned int w); 27extern unsigned int __sw_hweight8(unsigned int w);
25extern unsigned int __sw_hweight16(unsigned int w); 28extern unsigned int __sw_hweight16(unsigned int w);
@@ -32,26 +35,6 @@ extern unsigned long __sw_hweight64(__u64 w);
32 */ 35 */
33#include <asm/bitops.h> 36#include <asm/bitops.h>
34 37
35/*
36 * Provide __deprecated wrappers for the new interface, avoid flag day changes.
37 * We need the ugly external functions to break header recursion hell.
38 */
39#ifndef smp_mb__before_clear_bit
40static inline void __deprecated smp_mb__before_clear_bit(void)
41{
42 extern void __smp_mb__before_atomic(void);
43 __smp_mb__before_atomic();
44}
45#endif
46
47#ifndef smp_mb__after_clear_bit
48static inline void __deprecated smp_mb__after_clear_bit(void)
49{
50 extern void __smp_mb__after_atomic(void);
51 __smp_mb__after_atomic();
52}
53#endif
54
55#define for_each_set_bit(bit, addr, size) \ 38#define for_each_set_bit(bit, addr, size) \
56 for ((bit) = find_first_bit((addr), (size)); \ 39 for ((bit) = find_first_bit((addr), (size)); \
57 (bit) < (size); \ 40 (bit) < (size); \
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index a1e31f274fcd..8aded9ab2e4e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -4,6 +4,7 @@
4#include <linux/blkdev.h> 4#include <linux/blkdev.h>
5 5
6struct blk_mq_tags; 6struct blk_mq_tags;
7struct blk_flush_queue;
7 8
8struct blk_mq_cpu_notifier { 9struct blk_mq_cpu_notifier {
9 struct list_head list; 10 struct list_head list;
@@ -34,6 +35,7 @@ struct blk_mq_hw_ctx {
34 35
35 struct request_queue *queue; 36 struct request_queue *queue;
36 unsigned int queue_num; 37 unsigned int queue_num;
38 struct blk_flush_queue *fq;
37 39
38 void *driver_data; 40 void *driver_data;
39 41
@@ -77,8 +79,15 @@ struct blk_mq_tag_set {
77 struct list_head tag_list; 79 struct list_head tag_list;
78}; 80};
79 81
80typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); 82struct blk_mq_queue_data {
83 struct request *rq;
84 struct list_head *list;
85 bool last;
86};
87
88typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
81typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); 89typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
90typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
82typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 91typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
83typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 92typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
84typedef int (init_request_fn)(void *, struct request *, unsigned int, 93typedef int (init_request_fn)(void *, struct request *, unsigned int,
@@ -86,6 +95,9 @@ typedef int (init_request_fn)(void *, struct request *, unsigned int,
86typedef void (exit_request_fn)(void *, struct request *, unsigned int, 95typedef void (exit_request_fn)(void *, struct request *, unsigned int,
87 unsigned int); 96 unsigned int);
88 97
98typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
99 bool);
100
89struct blk_mq_ops { 101struct blk_mq_ops {
90 /* 102 /*
91 * Queue request 103 * Queue request
@@ -100,7 +112,7 @@ struct blk_mq_ops {
100 /* 112 /*
101 * Called on request timeout 113 * Called on request timeout
102 */ 114 */
103 rq_timed_out_fn *timeout; 115 timeout_fn *timeout;
104 116
105 softirq_done_fn *complete; 117 softirq_done_fn *complete;
106 118
@@ -115,6 +127,10 @@ struct blk_mq_ops {
115 /* 127 /*
116 * Called for every command allocated by the block layer to allow 128 * Called for every command allocated by the block layer to allow
117 * the driver to set up driver specific data. 129 * the driver to set up driver specific data.
130 *
131 * Tag greater than or equal to queue_depth is for setting up
132 * flush request.
133 *
118 * Ditto for exit/teardown. 134 * Ditto for exit/teardown.
119 */ 135 */
120 init_request_fn *init_request; 136 init_request_fn *init_request;
@@ -130,6 +146,7 @@ enum {
130 BLK_MQ_F_TAG_SHARED = 1 << 1, 146 BLK_MQ_F_TAG_SHARED = 1 << 1,
131 BLK_MQ_F_SG_MERGE = 1 << 2, 147 BLK_MQ_F_SG_MERGE = 1 << 2,
132 BLK_MQ_F_SYSFS_UP = 1 << 3, 148 BLK_MQ_F_SYSFS_UP = 1 << 3,
149 BLK_MQ_F_DEFER_ISSUE = 1 << 4,
133 150
134 BLK_MQ_S_STOPPED = 0, 151 BLK_MQ_S_STOPPED = 0,
135 BLK_MQ_S_TAG_ACTIVE = 1, 152 BLK_MQ_S_TAG_ACTIVE = 1,
@@ -140,6 +157,7 @@ enum {
140}; 157};
141 158
142struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 159struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
160void blk_mq_finish_init(struct request_queue *q);
143int blk_mq_register_disk(struct gendisk *); 161int blk_mq_register_disk(struct gendisk *);
144void blk_mq_unregister_disk(struct gendisk *); 162void blk_mq_unregister_disk(struct gendisk *);
145 163
@@ -151,16 +169,35 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
151void blk_mq_insert_request(struct request *, bool, bool, bool); 169void blk_mq_insert_request(struct request *, bool, bool, bool);
152void blk_mq_run_queues(struct request_queue *q, bool async); 170void blk_mq_run_queues(struct request_queue *q, bool async);
153void blk_mq_free_request(struct request *rq); 171void blk_mq_free_request(struct request *rq);
172void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
154bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 173bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
155struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 174struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
156 gfp_t gfp, bool reserved); 175 gfp_t gfp, bool reserved);
157struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 176struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
158 177
178enum {
179 BLK_MQ_UNIQUE_TAG_BITS = 16,
180 BLK_MQ_UNIQUE_TAG_MASK = (1 << BLK_MQ_UNIQUE_TAG_BITS) - 1,
181};
182
183u32 blk_mq_unique_tag(struct request *rq);
184
185static inline u16 blk_mq_unique_tag_to_hwq(u32 unique_tag)
186{
187 return unique_tag >> BLK_MQ_UNIQUE_TAG_BITS;
188}
189
190static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
191{
192 return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
193}
194
159struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 195struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
160struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 196struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
161 197
162void blk_mq_end_io(struct request *rq, int error); 198void blk_mq_start_request(struct request *rq);
163void __blk_mq_end_io(struct request *rq, int error); 199void blk_mq_end_request(struct request *rq, int error);
200void __blk_mq_end_request(struct request *rq, int error);
164 201
165void blk_mq_requeue_request(struct request *rq); 202void blk_mq_requeue_request(struct request *rq);
166void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); 203void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
@@ -173,7 +210,8 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
173void blk_mq_start_hw_queues(struct request_queue *q); 210void blk_mq_start_hw_queues(struct request_queue *q);
174void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 211void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
175void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 212void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
176void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); 213void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
214 void *priv);
177 215
178/* 216/*
179 * Driver command data is immediately after the request. So subtract request 217 * Driver command data is immediately after the request. So subtract request
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 66c2167f04a9..445d59231bc4 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -78,9 +78,11 @@ struct bio {
78 struct io_context *bi_ioc; 78 struct io_context *bi_ioc;
79 struct cgroup_subsys_state *bi_css; 79 struct cgroup_subsys_state *bi_css;
80#endif 80#endif
81 union {
81#if defined(CONFIG_BLK_DEV_INTEGRITY) 82#if defined(CONFIG_BLK_DEV_INTEGRITY)
82 struct bio_integrity_payload *bi_integrity; /* data integrity */ 83 struct bio_integrity_payload *bi_integrity; /* data integrity */
83#endif 84#endif
85 };
84 86
85 unsigned short bi_vcnt; /* how many bio_vec's */ 87 unsigned short bi_vcnt; /* how many bio_vec's */
86 88
@@ -118,10 +120,8 @@ struct bio {
118#define BIO_USER_MAPPED 6 /* contains user pages */ 120#define BIO_USER_MAPPED 6 /* contains user pages */
119#define BIO_EOPNOTSUPP 7 /* not supported */ 121#define BIO_EOPNOTSUPP 7 /* not supported */
120#define BIO_NULL_MAPPED 8 /* contains invalid user pages */ 122#define BIO_NULL_MAPPED 8 /* contains invalid user pages */
121#define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */ 123#define BIO_QUIET 9 /* Make BIO Quiet */
122#define BIO_QUIET 10 /* Make BIO Quiet */ 124#define BIO_SNAP_STABLE 10 /* bio data must be snapshotted during write */
123#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
124#define BIO_SNAP_STABLE 12 /* bio data must be snapshotted during write */
125 125
126/* 126/*
127 * Flags starting here get preserved by bio_reset() - this includes 127 * Flags starting here get preserved by bio_reset() - this includes
@@ -162,6 +162,7 @@ enum rq_flag_bits {
162 __REQ_WRITE_SAME, /* write same block many times */ 162 __REQ_WRITE_SAME, /* write same block many times */
163 163
164 __REQ_NOIDLE, /* don't anticipate more IO after this one */ 164 __REQ_NOIDLE, /* don't anticipate more IO after this one */
165 __REQ_INTEGRITY, /* I/O includes block integrity payload */
165 __REQ_FUA, /* forced unit access */ 166 __REQ_FUA, /* forced unit access */
166 __REQ_FLUSH, /* request for cache flush */ 167 __REQ_FLUSH, /* request for cache flush */
167 168
@@ -186,9 +187,7 @@ enum rq_flag_bits {
186 __REQ_FLUSH_SEQ, /* request for flush sequence */ 187 __REQ_FLUSH_SEQ, /* request for flush sequence */
187 __REQ_IO_STAT, /* account I/O stat */ 188 __REQ_IO_STAT, /* account I/O stat */
188 __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 189 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
189 __REQ_KERNEL, /* direct IO to kernel pages */
190 __REQ_PM, /* runtime pm request */ 190 __REQ_PM, /* runtime pm request */
191 __REQ_END, /* last of chain of requests */
192 __REQ_HASHED, /* on IO scheduler merge hash */ 191 __REQ_HASHED, /* on IO scheduler merge hash */
193 __REQ_MQ_INFLIGHT, /* track inflight for MQ */ 192 __REQ_MQ_INFLIGHT, /* track inflight for MQ */
194 __REQ_NR_BITS, /* stops here */ 193 __REQ_NR_BITS, /* stops here */
@@ -204,13 +203,14 @@ enum rq_flag_bits {
204#define REQ_DISCARD (1ULL << __REQ_DISCARD) 203#define REQ_DISCARD (1ULL << __REQ_DISCARD)
205#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME) 204#define REQ_WRITE_SAME (1ULL << __REQ_WRITE_SAME)
206#define REQ_NOIDLE (1ULL << __REQ_NOIDLE) 205#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
206#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
207 207
208#define REQ_FAILFAST_MASK \ 208#define REQ_FAILFAST_MASK \
209 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 209 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
210#define REQ_COMMON_MASK \ 210#define REQ_COMMON_MASK \
211 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \ 211 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
212 REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \ 212 REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
213 REQ_SECURE) 213 REQ_SECURE | REQ_INTEGRITY)
214#define REQ_CLONE_MASK REQ_COMMON_MASK 214#define REQ_CLONE_MASK REQ_COMMON_MASK
215 215
216#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME) 216#define BIO_NO_ADVANCE_ITER_MASK (REQ_DISCARD|REQ_WRITE_SAME)
@@ -240,9 +240,7 @@ enum rq_flag_bits {
240#define REQ_IO_STAT (1ULL << __REQ_IO_STAT) 240#define REQ_IO_STAT (1ULL << __REQ_IO_STAT)
241#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE) 241#define REQ_MIXED_MERGE (1ULL << __REQ_MIXED_MERGE)
242#define REQ_SECURE (1ULL << __REQ_SECURE) 242#define REQ_SECURE (1ULL << __REQ_SECURE)
243#define REQ_KERNEL (1ULL << __REQ_KERNEL)
244#define REQ_PM (1ULL << __REQ_PM) 243#define REQ_PM (1ULL << __REQ_PM)
245#define REQ_END (1ULL << __REQ_END)
246#define REQ_HASHED (1ULL << __REQ_HASHED) 244#define REQ_HASHED (1ULL << __REQ_HASHED)
247#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) 245#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
248 246
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 518b46555b80..92f4b4b288dd 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -36,6 +36,7 @@ struct request;
36struct sg_io_hdr; 36struct sg_io_hdr;
37struct bsg_job; 37struct bsg_job;
38struct blkcg_gq; 38struct blkcg_gq;
39struct blk_flush_queue;
39 40
40#define BLKDEV_MIN_RQ 4 41#define BLKDEV_MIN_RQ 4
41#define BLKDEV_MAX_RQ 128 /* Default maximum */ 42#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -397,7 +398,7 @@ struct request_queue {
397 */ 398 */
398 struct kobject mq_kobj; 399 struct kobject mq_kobj;
399 400
400#ifdef CONFIG_PM_RUNTIME 401#ifdef CONFIG_PM
401 struct device *dev; 402 struct device *dev;
402 int rpm_status; 403 int rpm_status;
403 unsigned int nr_pending; 404 unsigned int nr_pending;
@@ -455,14 +456,7 @@ struct request_queue {
455 */ 456 */
456 unsigned int flush_flags; 457 unsigned int flush_flags;
457 unsigned int flush_not_queueable:1; 458 unsigned int flush_not_queueable:1;
458 unsigned int flush_queue_delayed:1; 459 struct blk_flush_queue *fq;
459 unsigned int flush_pending_idx:1;
460 unsigned int flush_running_idx:1;
461 unsigned long flush_pending_since;
462 struct list_head flush_queue[2];
463 struct list_head flush_data_in_flight;
464 struct request *flush_rq;
465 spinlock_t mq_flush_lock;
466 460
467 struct list_head requeue_list; 461 struct list_head requeue_list;
468 spinlock_t requeue_lock; 462 spinlock_t requeue_lock;
@@ -865,7 +859,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
865 859
866static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 860static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
867{ 861{
868 return bdev->bd_disk->queue; 862 return bdev->bd_disk->queue; /* this is never NULL */
869} 863}
870 864
871/* 865/*
@@ -1063,7 +1057,7 @@ extern void blk_put_queue(struct request_queue *);
1063/* 1057/*
1064 * block layer runtime pm functions 1058 * block layer runtime pm functions
1065 */ 1059 */
1066#ifdef CONFIG_PM_RUNTIME 1060#ifdef CONFIG_PM
1067extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); 1061extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1068extern int blk_pre_runtime_suspend(struct request_queue *q); 1062extern int blk_pre_runtime_suspend(struct request_queue *q);
1069extern void blk_post_runtime_suspend(struct request_queue *q, int err); 1063extern void blk_post_runtime_suspend(struct request_queue *q, int err);
@@ -1142,8 +1136,6 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1142/* 1136/*
1143 * tag stuff 1137 * tag stuff
1144 */ 1138 */
1145#define blk_rq_tagged(rq) \
1146 ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
1147extern int blk_queue_start_tag(struct request_queue *, struct request *); 1139extern int blk_queue_start_tag(struct request_queue *, struct request *);
1148extern struct request *blk_queue_find_tag(struct request_queue *, int); 1140extern struct request *blk_queue_find_tag(struct request_queue *, int);
1149extern void blk_queue_end_tag(struct request_queue *, struct request *); 1141extern void blk_queue_end_tag(struct request_queue *, struct request *);
@@ -1192,7 +1184,6 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1192enum blk_default_limits { 1184enum blk_default_limits {
1193 BLK_MAX_SEGMENTS = 128, 1185 BLK_MAX_SEGMENTS = 128,
1194 BLK_SAFE_MAX_SECTORS = 255, 1186 BLK_SAFE_MAX_SECTORS = 255,
1195 BLK_DEF_MAX_SECTORS = 1024,
1196 BLK_MAX_SEGMENT_SIZE = 65536, 1187 BLK_MAX_SEGMENT_SIZE = 65536,
1197 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1188 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1198}; 1189};
@@ -1285,10 +1276,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
1285static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1276static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1286{ 1277{
1287 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1278 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1288 unsigned int alignment = (sector << 9) & (granularity - 1); 1279 unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
1289 1280
1290 return (granularity + lim->alignment_offset - alignment) 1281 return (granularity + lim->alignment_offset - alignment) % granularity;
1291 & (granularity - 1);
1292} 1282}
1293 1283
1294static inline int bdev_alignment_offset(struct block_device *bdev) 1284static inline int bdev_alignment_offset(struct block_device *bdev)
@@ -1464,32 +1454,31 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
1464 1454
1465#if defined(CONFIG_BLK_DEV_INTEGRITY) 1455#if defined(CONFIG_BLK_DEV_INTEGRITY)
1466 1456
1467#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1457enum blk_integrity_flags {
1468#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1458 BLK_INTEGRITY_VERIFY = 1 << 0,
1459 BLK_INTEGRITY_GENERATE = 1 << 1,
1460 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
1461 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
1462};
1469 1463
1470struct blk_integrity_exchg { 1464struct blk_integrity_iter {
1471 void *prot_buf; 1465 void *prot_buf;
1472 void *data_buf; 1466 void *data_buf;
1473 sector_t sector; 1467 sector_t seed;
1474 unsigned int data_size; 1468 unsigned int data_size;
1475 unsigned short sector_size; 1469 unsigned short interval;
1476 const char *disk_name; 1470 const char *disk_name;
1477}; 1471};
1478 1472
1479typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1473typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
1480typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
1481typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
1482typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
1483 1474
1484struct blk_integrity { 1475struct blk_integrity {
1485 integrity_gen_fn *generate_fn; 1476 integrity_processing_fn *generate_fn;
1486 integrity_vrfy_fn *verify_fn; 1477 integrity_processing_fn *verify_fn;
1487 integrity_set_tag_fn *set_tag_fn;
1488 integrity_get_tag_fn *get_tag_fn;
1489 1478
1490 unsigned short flags; 1479 unsigned short flags;
1491 unsigned short tuple_size; 1480 unsigned short tuple_size;
1492 unsigned short sector_size; 1481 unsigned short interval;
1493 unsigned short tag_size; 1482 unsigned short tag_size;
1494 1483
1495 const char *name; 1484 const char *name;
@@ -1504,10 +1493,10 @@ extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1504extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1493extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1505 struct scatterlist *); 1494 struct scatterlist *);
1506extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1495extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1507extern int blk_integrity_merge_rq(struct request_queue *, struct request *, 1496extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1508 struct request *); 1497 struct request *);
1509extern int blk_integrity_merge_bio(struct request_queue *, struct request *, 1498extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1510 struct bio *); 1499 struct bio *);
1511 1500
1512static inline 1501static inline
1513struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1502struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
@@ -1520,12 +1509,9 @@ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1520 return disk->integrity; 1509 return disk->integrity;
1521} 1510}
1522 1511
1523static inline int blk_integrity_rq(struct request *rq) 1512static inline bool blk_integrity_rq(struct request *rq)
1524{ 1513{
1525 if (rq->bio == NULL) 1514 return rq->cmd_flags & REQ_INTEGRITY;
1526 return 0;
1527
1528 return bio_integrity(rq->bio);
1529} 1515}
1530 1516
1531static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1517static inline void blk_queue_max_integrity_segments(struct request_queue *q,
@@ -1564,7 +1550,7 @@ static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1564} 1550}
1565static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1551static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1566{ 1552{
1567 return 0; 1553 return NULL;
1568} 1554}
1569static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1555static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1570{ 1556{
@@ -1590,17 +1576,17 @@ static inline unsigned short queue_max_integrity_segments(struct request_queue *
1590{ 1576{
1591 return 0; 1577 return 0;
1592} 1578}
1593static inline int blk_integrity_merge_rq(struct request_queue *rq, 1579static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1594 struct request *r1, 1580 struct request *r1,
1595 struct request *r2) 1581 struct request *r2)
1596{ 1582{
1597 return 0; 1583 return true;
1598} 1584}
1599static inline int blk_integrity_merge_bio(struct request_queue *rq, 1585static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1600 struct request *r, 1586 struct request *r,
1601 struct bio *b) 1587 struct bio *b)
1602{ 1588{
1603 return 0; 1589 return true;
1604} 1590}
1605static inline bool blk_integrity_is_initialized(struct gendisk *g) 1591static inline bool blk_integrity_is_initialized(struct gendisk *g)
1606{ 1592{
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 4e2bd4c95b66..0995c2de8162 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
46extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); 46extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
47 47
48extern unsigned long free_all_bootmem(void); 48extern unsigned long free_all_bootmem(void);
49extern void reset_node_managed_pages(pg_data_t *pgdat);
49extern void reset_all_zones_managed_pages(void); 50extern void reset_all_zones_managed_pages(void);
50 51
51extern void free_bootmem_node(pg_data_t *pgdat, 52extern void free_bootmem_node(pg_data_t *pgdat,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
new file mode 100644
index 000000000000..bbfceb756452
--- /dev/null
+++ b/include/linux/bpf.h
@@ -0,0 +1,145 @@
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#ifndef _LINUX_BPF_H
8#define _LINUX_BPF_H 1
9
10#include <uapi/linux/bpf.h>
11#include <linux/workqueue.h>
12#include <linux/file.h>
13
14struct bpf_map;
15
16/* map is generic key/value storage optionally accesible by eBPF programs */
17struct bpf_map_ops {
18 /* funcs callable from userspace (via syscall) */
19 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
20 void (*map_free)(struct bpf_map *);
21 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
22
23 /* funcs callable from userspace and from eBPF programs */
24 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
25 int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
26 int (*map_delete_elem)(struct bpf_map *map, void *key);
27};
28
29struct bpf_map {
30 atomic_t refcnt;
31 enum bpf_map_type map_type;
32 u32 key_size;
33 u32 value_size;
34 u32 max_entries;
35 struct bpf_map_ops *ops;
36 struct work_struct work;
37};
38
39struct bpf_map_type_list {
40 struct list_head list_node;
41 struct bpf_map_ops *ops;
42 enum bpf_map_type type;
43};
44
45void bpf_register_map_type(struct bpf_map_type_list *tl);
46void bpf_map_put(struct bpf_map *map);
47struct bpf_map *bpf_map_get(struct fd f);
48
49/* function argument constraints */
50enum bpf_arg_type {
51 ARG_ANYTHING = 0, /* any argument is ok */
52
53 /* the following constraints used to prototype
54 * bpf_map_lookup/update/delete_elem() functions
55 */
56 ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */
57 ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */
58 ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */
59
60 /* the following constraints used to prototype bpf_memcmp() and other
61 * functions that access data on eBPF program stack
62 */
63 ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
64 ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
65};
66
67/* type of values returned from helper functions */
68enum bpf_return_type {
69 RET_INTEGER, /* function returns integer */
70 RET_VOID, /* function doesn't return anything */
71 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
72};
73
74/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
75 * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL
76 * instructions after verifying
77 */
78struct bpf_func_proto {
79 u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
80 bool gpl_only;
81 enum bpf_return_type ret_type;
82 enum bpf_arg_type arg1_type;
83 enum bpf_arg_type arg2_type;
84 enum bpf_arg_type arg3_type;
85 enum bpf_arg_type arg4_type;
86 enum bpf_arg_type arg5_type;
87};
88
89/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
90 * the first argument to eBPF programs.
91 * For socket filters: 'struct bpf_context *' == 'struct sk_buff *'
92 */
93struct bpf_context;
94
95enum bpf_access_type {
96 BPF_READ = 1,
97 BPF_WRITE = 2
98};
99
100struct bpf_verifier_ops {
101 /* return eBPF function prototype for verification */
102 const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id);
103
104 /* return true if 'size' wide access at offset 'off' within bpf_context
105 * with 'type' (read or write) is allowed
106 */
107 bool (*is_valid_access)(int off, int size, enum bpf_access_type type);
108};
109
110struct bpf_prog_type_list {
111 struct list_head list_node;
112 struct bpf_verifier_ops *ops;
113 enum bpf_prog_type type;
114};
115
116void bpf_register_prog_type(struct bpf_prog_type_list *tl);
117
118struct bpf_prog;
119
120struct bpf_prog_aux {
121 atomic_t refcnt;
122 bool is_gpl_compatible;
123 enum bpf_prog_type prog_type;
124 struct bpf_verifier_ops *ops;
125 struct bpf_map **used_maps;
126 u32 used_map_cnt;
127 struct bpf_prog *prog;
128 struct work_struct work;
129};
130
131#ifdef CONFIG_BPF_SYSCALL
132void bpf_prog_put(struct bpf_prog *prog);
133#else
134static inline void bpf_prog_put(struct bpf_prog *prog) {}
135#endif
136struct bpf_prog *bpf_prog_get(u32 ufd);
137/* verify correctness of eBPF program */
138int bpf_check(struct bpf_prog *fp, union bpf_attr *attr);
139
140/* verifier prototypes for helper functions called from eBPF programs */
141extern struct bpf_func_proto bpf_map_lookup_elem_proto;
142extern struct bpf_func_proto bpf_map_update_elem_proto;
143extern struct bpf_func_proto bpf_map_delete_elem_proto;
144
145#endif /* _LINUX_BPF_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 61219b9b3445..7ccd928cc1f2 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -13,7 +13,11 @@
13#define PHY_ID_BCM5461 0x002060c0 13#define PHY_ID_BCM5461 0x002060c0
14#define PHY_ID_BCM57780 0x03625d90 14#define PHY_ID_BCM57780 0x03625d90
15 15
16#define PHY_ID_BCM7250 0xae025280
17#define PHY_ID_BCM7364 0xae025260
16#define PHY_ID_BCM7366 0x600d8490 18#define PHY_ID_BCM7366 0x600d8490
19#define PHY_ID_BCM7425 0x03625e60
20#define PHY_ID_BCM7429 0x600d8730
17#define PHY_ID_BCM7439 0x600d8480 21#define PHY_ID_BCM7439 0x600d8480
18#define PHY_ID_BCM7445 0x600d8510 22#define PHY_ID_BCM7445 0x600d8510
19 23
@@ -21,9 +25,9 @@
21#define PHY_BCM_OUI_1 0x00206000 25#define PHY_BCM_OUI_1 0x00206000
22#define PHY_BCM_OUI_2 0x0143bc00 26#define PHY_BCM_OUI_2 0x0143bc00
23#define PHY_BCM_OUI_3 0x03625c00 27#define PHY_BCM_OUI_3 0x03625c00
24#define PHY_BCM_OUI_4 0x600d0000 28#define PHY_BCM_OUI_4 0x600d8400
25#define PHY_BCM_OUI_5 0x03625e00 29#define PHY_BCM_OUI_5 0x03625e00
26 30#define PHY_BCM_OUI_6 0xae025000
27 31
28#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001 32#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
29#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002 33#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
@@ -38,7 +42,8 @@
38#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 42#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
39#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 43#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
40/* Broadcom BCM7xxx specific workarounds */ 44/* Broadcom BCM7xxx specific workarounds */
41#define PHY_BRCM_100MBPS_WAR 0x00010000 45#define PHY_BRCM_7XXX_REV(x) (((x) >> 8) & 0xff)
46#define PHY_BRCM_7XXX_PATCH(x) ((x) & 0xff)
42#define PHY_BCM_FLAGS_VALID 0x80000000 47#define PHY_BCM_FLAGS_VALID 0x80000000
43 48
44/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */ 49/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */
@@ -92,4 +97,130 @@
92 97
93#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000 98#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
94 99
100/*
101 * Broadcom LED source encodings. These are used in BCM5461, BCM5481,
102 * BCM5482, and possibly some others.
103 */
104#define BCM_LED_SRC_LINKSPD1 0x0
105#define BCM_LED_SRC_LINKSPD2 0x1
106#define BCM_LED_SRC_XMITLED 0x2
107#define BCM_LED_SRC_ACTIVITYLED 0x3
108#define BCM_LED_SRC_FDXLED 0x4
109#define BCM_LED_SRC_SLAVE 0x5
110#define BCM_LED_SRC_INTR 0x6
111#define BCM_LED_SRC_QUALITY 0x7
112#define BCM_LED_SRC_RCVLED 0x8
113#define BCM_LED_SRC_MULTICOLOR1 0xa
114#define BCM_LED_SRC_OPENSHORT 0xb
115#define BCM_LED_SRC_OFF 0xe /* Tied high */
116#define BCM_LED_SRC_ON 0xf /* Tied low */
117
118
119/*
120 * BCM5482: Shadow registers
121 * Shadow values go into bits [14:10] of register 0x1c to select a shadow
122 * register to access.
123 */
124/* 00101: Spare Control Register 3 */
125#define BCM54XX_SHD_SCR3 0x05
126#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
127#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
128#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
129
130/* 01010: Auto Power-Down */
131#define BCM54XX_SHD_APD 0x0a
132#define BCM54XX_SHD_APD_EN 0x0020
133
134#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
135 /* LED3 / ~LINKSPD[2] selector */
136#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
137 /* LED1 / ~LINKSPD[1] selector */
138#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
139#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
140#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
141#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
142#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
143#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
144#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
145
146
147/*
148 * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
149 */
150#define MII_BCM54XX_EXP_AADJ1CH0 0x001f
151#define MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN 0x0200
152#define MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF 0x0100
153#define MII_BCM54XX_EXP_AADJ1CH3 0x601f
154#define MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ 0x0002
155#define MII_BCM54XX_EXP_EXP08 0x0F08
156#define MII_BCM54XX_EXP_EXP08_RJCT_2MHZ 0x0001
157#define MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE 0x0200
158#define MII_BCM54XX_EXP_EXP75 0x0f75
159#define MII_BCM54XX_EXP_EXP75_VDACCTRL 0x003c
160#define MII_BCM54XX_EXP_EXP75_CM_OSC 0x0001
161#define MII_BCM54XX_EXP_EXP96 0x0f96
162#define MII_BCM54XX_EXP_EXP96_MYST 0x0010
163#define MII_BCM54XX_EXP_EXP97 0x0f97
164#define MII_BCM54XX_EXP_EXP97_MYST 0x0c0c
165
166/*
167 * BCM5482: Secondary SerDes registers
168 */
169#define BCM5482_SSD_1000BX_CTL 0x00 /* 1000BASE-X Control */
170#define BCM5482_SSD_1000BX_CTL_PWRDOWN 0x0800 /* Power-down SSD */
171#define BCM5482_SSD_SGMII_SLAVE 0x15 /* SGMII Slave Register */
172#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
173#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
174
175
176/*****************************************************************************/
177/* Fast Ethernet Transceiver definitions. */
178/*****************************************************************************/
179
180#define MII_BRCM_FET_INTREG 0x1a /* Interrupt register */
181#define MII_BRCM_FET_IR_MASK 0x0100 /* Mask all interrupts */
182#define MII_BRCM_FET_IR_LINK_EN 0x0200 /* Link status change enable */
183#define MII_BRCM_FET_IR_SPEED_EN 0x0400 /* Link speed change enable */
184#define MII_BRCM_FET_IR_DUPLEX_EN 0x0800 /* Duplex mode change enable */
185#define MII_BRCM_FET_IR_ENABLE 0x4000 /* Interrupt enable */
186
187#define MII_BRCM_FET_BRCMTEST 0x1f /* Brcm test register */
188#define MII_BRCM_FET_BT_SRE 0x0080 /* Shadow register enable */
189
190
191/*** Shadow register definitions ***/
192
193#define MII_BRCM_FET_SHDW_MISCCTRL 0x10 /* Shadow misc ctrl */
194#define MII_BRCM_FET_SHDW_MC_FAME 0x4000 /* Force Auto MDIX enable */
195
196#define MII_BRCM_FET_SHDW_AUXMODE4 0x1a /* Auxiliary mode 4 */
197#define MII_BRCM_FET_SHDW_AM4_LED_MASK 0x0003
198#define MII_BRCM_FET_SHDW_AM4_LED_MODE1 0x0001
199
200#define MII_BRCM_FET_SHDW_AUXSTAT2 0x1b /* Auxiliary status 2 */
201#define MII_BRCM_FET_SHDW_AS2_APDE 0x0020 /* Auto power down enable */
202
203/*
204 * Indirect register access functions for the 1000BASE-T/100BASE-TX/10BASE-T
205 * 0x1c shadow registers.
206 */
207static inline int bcm54xx_shadow_read(struct phy_device *phydev, u16 shadow)
208{
209 phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
210 return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
211}
212
213static inline int bcm54xx_shadow_write(struct phy_device *phydev, u16 shadow,
214 u16 val)
215{
216 return phy_write(phydev, MII_BCM54XX_SHD,
217 MII_BCM54XX_SHD_WRITE |
218 MII_BCM54XX_SHD_VAL(shadow) |
219 MII_BCM54XX_SHD_DATA(val));
220}
221
222#define BRCM_CL45VEN_EEE_CONTROL 0x803d
223#define LPI_FEATURE_EN 0x8000
224#define LPI_FEATURE_EN_DIG1000X 0x4000
225
95#endif /* _LINUX_BRCMPHY_H */ 226#endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 324329ceea1e..73b45225a7ca 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -175,12 +175,13 @@ void __wait_on_buffer(struct buffer_head *);
175wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); 175wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
176struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, 176struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
177 unsigned size); 177 unsigned size);
178struct buffer_head *__getblk(struct block_device *bdev, sector_t block, 178struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
179 unsigned size); 179 unsigned size, gfp_t gfp);
180void __brelse(struct buffer_head *); 180void __brelse(struct buffer_head *);
181void __bforget(struct buffer_head *); 181void __bforget(struct buffer_head *);
182void __breadahead(struct block_device *, sector_t block, unsigned int size); 182void __breadahead(struct block_device *, sector_t block, unsigned int size);
183struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size); 183struct buffer_head *__bread_gfp(struct block_device *,
184 sector_t block, unsigned size, gfp_t gfp);
184void invalidate_bh_lrus(void); 185void invalidate_bh_lrus(void);
185struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 186struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
186void free_buffer_head(struct buffer_head * bh); 187void free_buffer_head(struct buffer_head * bh);
@@ -295,7 +296,13 @@ static inline void bforget(struct buffer_head *bh)
295static inline struct buffer_head * 296static inline struct buffer_head *
296sb_bread(struct super_block *sb, sector_t block) 297sb_bread(struct super_block *sb, sector_t block)
297{ 298{
298 return __bread(sb->s_bdev, block, sb->s_blocksize); 299 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
300}
301
302static inline struct buffer_head *
303sb_bread_unmovable(struct super_block *sb, sector_t block)
304{
305 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
299} 306}
300 307
301static inline void 308static inline void
@@ -307,7 +314,7 @@ sb_breadahead(struct super_block *sb, sector_t block)
307static inline struct buffer_head * 314static inline struct buffer_head *
308sb_getblk(struct super_block *sb, sector_t block) 315sb_getblk(struct super_block *sb, sector_t block)
309{ 316{
310 return __getblk(sb->s_bdev, block, sb->s_blocksize); 317 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
311} 318}
312 319
313static inline struct buffer_head * 320static inline struct buffer_head *
@@ -344,6 +351,36 @@ static inline void lock_buffer(struct buffer_head *bh)
344 __lock_buffer(bh); 351 __lock_buffer(bh);
345} 352}
346 353
354static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
355 sector_t block,
356 unsigned size)
357{
358 return __getblk_gfp(bdev, block, size, 0);
359}
360
361static inline struct buffer_head *__getblk(struct block_device *bdev,
362 sector_t block,
363 unsigned size)
364{
365 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
366}
367
368/**
369 * __bread() - reads a specified block and returns the bh
370 * @bdev: the block_device to read from
371 * @block: number of block
372 * @size: size (in bytes) to read
373 *
374 * Reads a specified block, and returns buffer head that contains it.
375 * The page cache is allocated from movable area so that it can be migrated.
376 * It returns NULL if the block was unreadable.
377 */
378static inline struct buffer_head *
379__bread(struct block_device *bdev, sector_t block, unsigned size)
380{
381 return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
382}
383
347extern int __set_page_dirty_buffers(struct page *page); 384extern int __set_page_dirty_buffers(struct page *page);
348 385
349#else /* CONFIG_BLOCK */ 386#else /* CONFIG_BLOCK */
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
new file mode 100644
index 000000000000..3daf5ed392c9
--- /dev/null
+++ b/include/linux/cacheinfo.h
@@ -0,0 +1,100 @@
1#ifndef _LINUX_CACHEINFO_H
2#define _LINUX_CACHEINFO_H
3
4#include <linux/bitops.h>
5#include <linux/cpumask.h>
6#include <linux/smp.h>
7
8struct device_node;
9struct attribute;
10
11enum cache_type {
12 CACHE_TYPE_NOCACHE = 0,
13 CACHE_TYPE_INST = BIT(0),
14 CACHE_TYPE_DATA = BIT(1),
15 CACHE_TYPE_SEPARATE = CACHE_TYPE_INST | CACHE_TYPE_DATA,
16 CACHE_TYPE_UNIFIED = BIT(2),
17};
18
19/**
20 * struct cacheinfo - represent a cache leaf node
21 * @type: type of the cache - data, inst or unified
22 * @level: represents the hierarcy in the multi-level cache
23 * @coherency_line_size: size of each cache line usually representing
24 * the minimum amount of data that gets transferred from memory
25 * @number_of_sets: total number of sets, a set is a collection of cache
26 * lines sharing the same index
27 * @ways_of_associativity: number of ways in which a particular memory
28 * block can be placed in the cache
29 * @physical_line_partition: number of physical cache lines sharing the
30 * same cachetag
31 * @size: Total size of the cache
32 * @shared_cpu_map: logical cpumask representing all the cpus sharing
33 * this cache node
34 * @attributes: bitfield representing various cache attributes
35 * @of_node: if devicetree is used, this represents either the cpu node in
36 * case there's no explicit cache node or the cache node itself in the
37 * device tree
38 * @disable_sysfs: indicates whether this node is visible to the user via
39 * sysfs or not
40 * @priv: pointer to any private data structure specific to particular
41 * cache design
42 *
43 * While @of_node, @disable_sysfs and @priv are used for internal book
44 * keeping, the remaining members form the core properties of the cache
45 */
46struct cacheinfo {
47 enum cache_type type;
48 unsigned int level;
49 unsigned int coherency_line_size;
50 unsigned int number_of_sets;
51 unsigned int ways_of_associativity;
52 unsigned int physical_line_partition;
53 unsigned int size;
54 cpumask_t shared_cpu_map;
55 unsigned int attributes;
56#define CACHE_WRITE_THROUGH BIT(0)
57#define CACHE_WRITE_BACK BIT(1)
58#define CACHE_WRITE_POLICY_MASK \
59 (CACHE_WRITE_THROUGH | CACHE_WRITE_BACK)
60#define CACHE_READ_ALLOCATE BIT(2)
61#define CACHE_WRITE_ALLOCATE BIT(3)
62#define CACHE_ALLOCATE_POLICY_MASK \
63 (CACHE_READ_ALLOCATE | CACHE_WRITE_ALLOCATE)
64
65 struct device_node *of_node;
66 bool disable_sysfs;
67 void *priv;
68};
69
70struct cpu_cacheinfo {
71 struct cacheinfo *info_list;
72 unsigned int num_levels;
73 unsigned int num_leaves;
74};
75
76/*
77 * Helpers to make sure "func" is executed on the cpu whose cache
78 * attributes are being detected
79 */
80#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \
81static inline void _##func(void *ret) \
82{ \
83 int cpu = smp_processor_id(); \
84 *(int *)ret = __##func(cpu); \
85} \
86 \
87int func(unsigned int cpu) \
88{ \
89 int ret; \
90 smp_call_function_single(cpu, _##func, &ret, true); \
91 return ret; \
92}
93
94struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu);
95int init_cache_level(unsigned int cpu);
96int populate_cache_leaves(unsigned int cpu);
97
98const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf);
99
100#endif /* _LINUX_CACHEINFO_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 6992afc6ba7f..c05ff0f9f9a5 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -99,6 +99,12 @@ inval_skb:
99 return 1; 99 return 1;
100} 100}
101 101
102static inline bool can_is_canfd_skb(const struct sk_buff *skb)
103{
104 /* the CAN specific type of skb is identified by its data length */
105 return skb->len == CANFD_MTU;
106}
107
102/* get data length from can_dlc with sanitized can_dlc */ 108/* get data length from can_dlc with sanitized can_dlc */
103u8 can_dlc2len(u8 can_dlc); 109u8 can_dlc2len(u8 can_dlc);
104 110
@@ -121,6 +127,9 @@ void unregister_candev(struct net_device *dev);
121int can_restart_now(struct net_device *dev); 127int can_restart_now(struct net_device *dev);
122void can_bus_off(struct net_device *dev); 128void can_bus_off(struct net_device *dev);
123 129
130void can_change_state(struct net_device *dev, struct can_frame *cf,
131 enum can_state tx_state, enum can_state rx_state);
132
124void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, 133void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
125 unsigned int idx); 134 unsigned int idx);
126unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); 135unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 5f3386844134..260d78b587c4 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -13,6 +13,7 @@
13 13
14struct ceph_auth_client; 14struct ceph_auth_client;
15struct ceph_authorizer; 15struct ceph_authorizer;
16struct ceph_msg;
16 17
17struct ceph_auth_handshake { 18struct ceph_auth_handshake {
18 struct ceph_authorizer *authorizer; 19 struct ceph_authorizer *authorizer;
@@ -20,6 +21,10 @@ struct ceph_auth_handshake {
20 size_t authorizer_buf_len; 21 size_t authorizer_buf_len;
21 void *authorizer_reply_buf; 22 void *authorizer_reply_buf;
22 size_t authorizer_reply_buf_len; 23 size_t authorizer_reply_buf_len;
24 int (*sign_message)(struct ceph_auth_handshake *auth,
25 struct ceph_msg *msg);
26 int (*check_message_signature)(struct ceph_auth_handshake *auth,
27 struct ceph_msg *msg);
23}; 28};
24 29
25struct ceph_auth_client_ops { 30struct ceph_auth_client_ops {
@@ -66,6 +71,11 @@ struct ceph_auth_client_ops {
66 void (*reset)(struct ceph_auth_client *ac); 71 void (*reset)(struct ceph_auth_client *ac);
67 72
68 void (*destroy)(struct ceph_auth_client *ac); 73 void (*destroy)(struct ceph_auth_client *ac);
74
75 int (*sign_message)(struct ceph_auth_handshake *auth,
76 struct ceph_msg *msg);
77 int (*check_message_signature)(struct ceph_auth_handshake *auth,
78 struct ceph_msg *msg);
69}; 79};
70 80
71struct ceph_auth_client { 81struct ceph_auth_client {
@@ -113,4 +123,20 @@ extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
113extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, 123extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
114 int peer_type); 124 int peer_type);
115 125
126static inline int ceph_auth_sign_message(struct ceph_auth_handshake *auth,
127 struct ceph_msg *msg)
128{
129 if (auth->sign_message)
130 return auth->sign_message(auth, msg);
131 return 0;
132}
133
134static inline
135int ceph_auth_check_message_signature(struct ceph_auth_handshake *auth,
136 struct ceph_msg *msg)
137{
138 if (auth->check_message_signature)
139 return auth->check_message_signature(auth, msg);
140 return 0;
141}
116#endif 142#endif
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h
index 07ad423cc37f..07ca15e76100 100644
--- a/include/linux/ceph/buffer.h
+++ b/include/linux/ceph/buffer.h
@@ -10,8 +10,7 @@
10/* 10/*
11 * a simple reference counted buffer. 11 * a simple reference counted buffer.
12 * 12 *
13 * use kmalloc for small sizes (<= one page), vmalloc for larger 13 * use kmalloc for smaller sizes, vmalloc for larger sizes.
14 * sizes.
15 */ 14 */
16struct ceph_buffer { 15struct ceph_buffer {
17 struct kref kref; 16 struct kref kref;
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index d12659ce550d..71e05bbf8ceb 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -84,6 +84,7 @@ static inline u64 ceph_sanitize_features(u64 features)
84 CEPH_FEATURE_PGPOOL3 | \ 84 CEPH_FEATURE_PGPOOL3 | \
85 CEPH_FEATURE_OSDENC | \ 85 CEPH_FEATURE_OSDENC | \
86 CEPH_FEATURE_CRUSH_TUNABLES | \ 86 CEPH_FEATURE_CRUSH_TUNABLES | \
87 CEPH_FEATURE_MSG_AUTH | \
87 CEPH_FEATURE_CRUSH_TUNABLES2 | \ 88 CEPH_FEATURE_CRUSH_TUNABLES2 | \
88 CEPH_FEATURE_REPLY_CREATE_INODE | \ 89 CEPH_FEATURE_REPLY_CREATE_INODE | \
89 CEPH_FEATURE_OSDHASHPSPOOL | \ 90 CEPH_FEATURE_OSDHASHPSPOOL | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 3c97d5e9b951..c0dadaac26e3 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -522,8 +522,11 @@ struct ceph_mds_reply_dirfrag {
522 __le32 dist[]; 522 __le32 dist[];
523} __attribute__ ((packed)); 523} __attribute__ ((packed));
524 524
525#define CEPH_LOCK_FCNTL 1 525#define CEPH_LOCK_FCNTL 1
526#define CEPH_LOCK_FLOCK 2 526#define CEPH_LOCK_FLOCK 2
527#define CEPH_LOCK_FCNTL_INTR 3
528#define CEPH_LOCK_FLOCK_INTR 4
529
527 530
528#define CEPH_LOCK_SHARED 1 531#define CEPH_LOCK_SHARED 1
529#define CEPH_LOCK_EXCL 2 532#define CEPH_LOCK_EXCL 2
@@ -549,6 +552,7 @@ struct ceph_filelock {
549 552
550int ceph_flags_to_mode(int flags); 553int ceph_flags_to_mode(int flags);
551 554
555#define CEPH_INLINE_NONE ((__u64)-1)
552 556
553/* capability bits */ 557/* capability bits */
554#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */ 558#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */
@@ -613,6 +617,8 @@ int ceph_flags_to_mode(int flags);
613 CEPH_CAP_LINK_SHARED | \ 617 CEPH_CAP_LINK_SHARED | \
614 CEPH_CAP_FILE_SHARED | \ 618 CEPH_CAP_FILE_SHARED | \
615 CEPH_CAP_XATTR_SHARED) 619 CEPH_CAP_XATTR_SHARED)
620#define CEPH_STAT_CAP_INLINE_DATA (CEPH_CAP_FILE_SHARED | \
621 CEPH_CAP_FILE_RD)
616 622
617#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \ 623#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
618 CEPH_CAP_LINK_SHARED | \ 624 CEPH_CAP_LINK_SHARED | \
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 279b0afac1c1..8b11a79ca1cb 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -29,6 +29,7 @@
29#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ 29#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
30#define CEPH_OPT_MYIP (1<<2) /* specified my ip */ 30#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
31#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ 31#define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */
32#define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */
32 33
33#define CEPH_OPT_DEFAULT (0) 34#define CEPH_OPT_DEFAULT (0)
34 35
@@ -184,7 +185,6 @@ extern bool libceph_compatible(void *data);
184extern const char *ceph_msg_type_name(int type); 185extern const char *ceph_msg_type_name(int type);
185extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); 186extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
186extern void *ceph_kvmalloc(size_t size, gfp_t flags); 187extern void *ceph_kvmalloc(size_t size, gfp_t flags);
187extern void ceph_kvfree(const void *ptr);
188 188
189extern struct ceph_options *ceph_parse_options(char *options, 189extern struct ceph_options *ceph_parse_options(char *options,
190 const char *dev_name, const char *dev_name_end, 190 const char *dev_name, const char *dev_name_end,
@@ -211,7 +211,6 @@ extern struct page **ceph_get_direct_page_vector(const void __user *data,
211 bool write_page); 211 bool write_page);
212extern void ceph_put_page_vector(struct page **pages, int num_pages, 212extern void ceph_put_page_vector(struct page **pages, int num_pages,
213 bool dirty); 213 bool dirty);
214extern void ceph_release_page_vector(struct page **pages, int num_pages);
215extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); 214extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
216extern int ceph_copy_user_to_page_vector(struct page **pages, 215extern int ceph_copy_user_to_page_vector(struct page **pages,
217 const void __user *data, 216 const void __user *data,
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 40ae58e3e9db..d9d396c16503 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -42,6 +42,10 @@ struct ceph_connection_operations {
42 struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, 42 struct ceph_msg * (*alloc_msg) (struct ceph_connection *con,
43 struct ceph_msg_header *hdr, 43 struct ceph_msg_header *hdr,
44 int *skip); 44 int *skip);
45 int (*sign_message) (struct ceph_connection *con, struct ceph_msg *msg);
46
47 int (*check_message_signature) (struct ceph_connection *con,
48 struct ceph_msg *msg);
45}; 49};
46 50
47/* use format string %s%d */ 51/* use format string %s%d */
@@ -142,7 +146,10 @@ struct ceph_msg_data_cursor {
142 */ 146 */
143struct ceph_msg { 147struct ceph_msg {
144 struct ceph_msg_header hdr; /* header */ 148 struct ceph_msg_header hdr; /* header */
145 struct ceph_msg_footer footer; /* footer */ 149 union {
150 struct ceph_msg_footer footer; /* footer */
151 struct ceph_msg_footer_old old_footer; /* old format footer */
152 };
146 struct kvec front; /* unaligned blobs of message */ 153 struct kvec front; /* unaligned blobs of message */
147 struct ceph_buffer *middle; 154 struct ceph_buffer *middle;
148 155
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 3d94a73b5f30..1c1887206ffa 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -152,7 +152,8 @@ struct ceph_msg_header {
152 receiver: mask against ~PAGE_MASK */ 152 receiver: mask against ~PAGE_MASK */
153 153
154 struct ceph_entity_name src; 154 struct ceph_entity_name src;
155 __le32 reserved; 155 __le16 compat_version;
156 __le16 reserved;
156 __le32 crc; /* header crc32c */ 157 __le32 crc; /* header crc32c */
157} __attribute__ ((packed)); 158} __attribute__ ((packed));
158 159
@@ -164,13 +165,21 @@ struct ceph_msg_header {
164/* 165/*
165 * follows data payload 166 * follows data payload
166 */ 167 */
168struct ceph_msg_footer_old {
169 __le32 front_crc, middle_crc, data_crc;
170 __u8 flags;
171} __attribute__ ((packed));
172
167struct ceph_msg_footer { 173struct ceph_msg_footer {
168 __le32 front_crc, middle_crc, data_crc; 174 __le32 front_crc, middle_crc, data_crc;
175 // sig holds the 64 bits of the digital signature for the message PLR
176 __le64 sig;
169 __u8 flags; 177 __u8 flags;
170} __attribute__ ((packed)); 178} __attribute__ ((packed));
171 179
172#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */ 180#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */
173#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */ 181#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */
182#define CEPH_MSG_FOOTER_SIGNED (1<<2) /* msg was signed */
174 183
175 184
176#endif 185#endif
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 03aeb27fcc69..61b19c46bdb3 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -87,6 +87,13 @@ struct ceph_osd_req_op {
87 struct ceph_osd_data osd_data; 87 struct ceph_osd_data osd_data;
88 } extent; 88 } extent;
89 struct { 89 struct {
90 u32 name_len;
91 u32 value_len;
92 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
93 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
94 struct ceph_osd_data osd_data;
95 } xattr;
96 struct {
90 const char *class_name; 97 const char *class_name;
91 const char *method_name; 98 const char *method_name;
92 struct ceph_osd_data request_info; 99 struct ceph_osd_data request_info;
@@ -295,6 +302,9 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
295extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req, 302extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req,
296 unsigned int which, u16 opcode, 303 unsigned int which, u16 opcode,
297 const char *class, const char *method); 304 const char *class, const char *method);
305extern int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
306 u16 opcode, const char *name, const void *value,
307 size_t size, u8 cmp_op, u8 cmp_mode);
298extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req, 308extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
299 unsigned int which, u16 opcode, 309 unsigned int which, u16 opcode,
300 u64 cookie, u64 version, int flag); 310 u64 cookie, u64 version, int flag);
@@ -318,7 +328,8 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
318 struct ceph_file_layout *layout, 328 struct ceph_file_layout *layout,
319 struct ceph_vino vino, 329 struct ceph_vino vino,
320 u64 offset, u64 *len, 330 u64 offset, u64 *len,
321 int num_ops, int opcode, int flags, 331 unsigned int which, int num_ops,
332 int opcode, int flags,
322 struct ceph_snap_context *snapc, 333 struct ceph_snap_context *snapc,
323 u32 truncate_seq, u64 truncate_size, 334 u32 truncate_seq, u64 truncate_size,
324 bool use_mempool); 335 bool use_mempool);
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 9660d6b0a35d..13d71fe18b0c 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -1,7 +1,10 @@
1#ifndef __FS_CEPH_PAGELIST_H 1#ifndef __FS_CEPH_PAGELIST_H
2#define __FS_CEPH_PAGELIST_H 2#define __FS_CEPH_PAGELIST_H
3 3
4#include <asm/byteorder.h>
5#include <linux/atomic.h>
4#include <linux/list.h> 6#include <linux/list.h>
7#include <linux/types.h>
5 8
6struct ceph_pagelist { 9struct ceph_pagelist {
7 struct list_head head; 10 struct list_head head;
@@ -10,6 +13,7 @@ struct ceph_pagelist {
10 size_t room; 13 size_t room;
11 struct list_head free_list; 14 struct list_head free_list;
12 size_t num_pages_free; 15 size_t num_pages_free;
16 atomic_t refcnt;
13}; 17};
14 18
15struct ceph_pagelist_cursor { 19struct ceph_pagelist_cursor {
@@ -26,9 +30,10 @@ static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
26 pl->room = 0; 30 pl->room = 0;
27 INIT_LIST_HEAD(&pl->free_list); 31 INIT_LIST_HEAD(&pl->free_list);
28 pl->num_pages_free = 0; 32 pl->num_pages_free = 0;
33 atomic_set(&pl->refcnt, 1);
29} 34}
30 35
31extern int ceph_pagelist_release(struct ceph_pagelist *pl); 36extern void ceph_pagelist_release(struct ceph_pagelist *pl);
32 37
33extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l); 38extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l);
34 39
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index f20e0d8a2155..2f822dca1046 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -172,6 +172,7 @@ extern const char *ceph_osd_state_name(int s);
172#define CEPH_OSD_OP_MODE_WR 0x2000 172#define CEPH_OSD_OP_MODE_WR 0x2000
173#define CEPH_OSD_OP_MODE_RMW 0x3000 173#define CEPH_OSD_OP_MODE_RMW 0x3000
174#define CEPH_OSD_OP_MODE_SUB 0x4000 174#define CEPH_OSD_OP_MODE_SUB 0x4000
175#define CEPH_OSD_OP_MODE_CACHE 0x8000
175 176
176#define CEPH_OSD_OP_TYPE 0x0f00 177#define CEPH_OSD_OP_TYPE 0x0f00
177#define CEPH_OSD_OP_TYPE_LOCK 0x0100 178#define CEPH_OSD_OP_TYPE_LOCK 0x0100
@@ -181,103 +182,135 @@ extern const char *ceph_osd_state_name(int s);
181#define CEPH_OSD_OP_TYPE_PG 0x0500 182#define CEPH_OSD_OP_TYPE_PG 0x0500
182#define CEPH_OSD_OP_TYPE_MULTI 0x0600 /* multiobject */ 183#define CEPH_OSD_OP_TYPE_MULTI 0x0600 /* multiobject */
183 184
185#define __CEPH_OSD_OP1(mode, nr) \
186 (CEPH_OSD_OP_MODE_##mode | (nr))
187
188#define __CEPH_OSD_OP(mode, type, nr) \
189 (CEPH_OSD_OP_MODE_##mode | CEPH_OSD_OP_TYPE_##type | (nr))
190
191#define __CEPH_FORALL_OSD_OPS(f) \
192 /** data **/ \
193 /* read */ \
194 f(READ, __CEPH_OSD_OP(RD, DATA, 1), "read") \
195 f(STAT, __CEPH_OSD_OP(RD, DATA, 2), "stat") \
196 f(MAPEXT, __CEPH_OSD_OP(RD, DATA, 3), "mapext") \
197 \
198 /* fancy read */ \
199 f(MASKTRUNC, __CEPH_OSD_OP(RD, DATA, 4), "masktrunc") \
200 f(SPARSE_READ, __CEPH_OSD_OP(RD, DATA, 5), "sparse-read") \
201 \
202 f(NOTIFY, __CEPH_OSD_OP(RD, DATA, 6), "notify") \
203 f(NOTIFY_ACK, __CEPH_OSD_OP(RD, DATA, 7), "notify-ack") \
204 \
205 /* versioning */ \
206 f(ASSERT_VER, __CEPH_OSD_OP(RD, DATA, 8), "assert-version") \
207 \
208 f(LIST_WATCHERS, __CEPH_OSD_OP(RD, DATA, 9), "list-watchers") \
209 \
210 f(LIST_SNAPS, __CEPH_OSD_OP(RD, DATA, 10), "list-snaps") \
211 \
212 /* sync */ \
213 f(SYNC_READ, __CEPH_OSD_OP(RD, DATA, 11), "sync_read") \
214 \
215 /* write */ \
216 f(WRITE, __CEPH_OSD_OP(WR, DATA, 1), "write") \
217 f(WRITEFULL, __CEPH_OSD_OP(WR, DATA, 2), "writefull") \
218 f(TRUNCATE, __CEPH_OSD_OP(WR, DATA, 3), "truncate") \
219 f(ZERO, __CEPH_OSD_OP(WR, DATA, 4), "zero") \
220 f(DELETE, __CEPH_OSD_OP(WR, DATA, 5), "delete") \
221 \
222 /* fancy write */ \
223 f(APPEND, __CEPH_OSD_OP(WR, DATA, 6), "append") \
224 f(STARTSYNC, __CEPH_OSD_OP(WR, DATA, 7), "startsync") \
225 f(SETTRUNC, __CEPH_OSD_OP(WR, DATA, 8), "settrunc") \
226 f(TRIMTRUNC, __CEPH_OSD_OP(WR, DATA, 9), "trimtrunc") \
227 \
228 f(TMAPUP, __CEPH_OSD_OP(RMW, DATA, 10), "tmapup") \
229 f(TMAPPUT, __CEPH_OSD_OP(WR, DATA, 11), "tmapput") \
230 f(TMAPGET, __CEPH_OSD_OP(RD, DATA, 12), "tmapget") \
231 \
232 f(CREATE, __CEPH_OSD_OP(WR, DATA, 13), "create") \
233 f(ROLLBACK, __CEPH_OSD_OP(WR, DATA, 14), "rollback") \
234 \
235 f(WATCH, __CEPH_OSD_OP(WR, DATA, 15), "watch") \
236 \
237 /* omap */ \
238 f(OMAPGETKEYS, __CEPH_OSD_OP(RD, DATA, 17), "omap-get-keys") \
239 f(OMAPGETVALS, __CEPH_OSD_OP(RD, DATA, 18), "omap-get-vals") \
240 f(OMAPGETHEADER, __CEPH_OSD_OP(RD, DATA, 19), "omap-get-header") \
241 f(OMAPGETVALSBYKEYS, __CEPH_OSD_OP(RD, DATA, 20), "omap-get-vals-by-keys") \
242 f(OMAPSETVALS, __CEPH_OSD_OP(WR, DATA, 21), "omap-set-vals") \
243 f(OMAPSETHEADER, __CEPH_OSD_OP(WR, DATA, 22), "omap-set-header") \
244 f(OMAPCLEAR, __CEPH_OSD_OP(WR, DATA, 23), "omap-clear") \
245 f(OMAPRMKEYS, __CEPH_OSD_OP(WR, DATA, 24), "omap-rm-keys") \
246 f(OMAP_CMP, __CEPH_OSD_OP(RD, DATA, 25), "omap-cmp") \
247 \
248 /* tiering */ \
249 f(COPY_FROM, __CEPH_OSD_OP(WR, DATA, 26), "copy-from") \
250 f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \
251 f(UNDIRTY, __CEPH_OSD_OP(WR, DATA, 28), "undirty") \
252 f(ISDIRTY, __CEPH_OSD_OP(RD, DATA, 29), "isdirty") \
253 f(COPY_GET, __CEPH_OSD_OP(RD, DATA, 30), "copy-get") \
254 f(CACHE_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 31), "cache-flush") \
255 f(CACHE_EVICT, __CEPH_OSD_OP(CACHE, DATA, 32), "cache-evict") \
256 f(CACHE_TRY_FLUSH, __CEPH_OSD_OP(CACHE, DATA, 33), "cache-try-flush") \
257 \
258 /* convert tmap to omap */ \
259 f(TMAP2OMAP, __CEPH_OSD_OP(RMW, DATA, 34), "tmap2omap") \
260 \
261 /* hints */ \
262 f(SETALLOCHINT, __CEPH_OSD_OP(WR, DATA, 35), "set-alloc-hint") \
263 \
264 /** multi **/ \
265 f(CLONERANGE, __CEPH_OSD_OP(WR, MULTI, 1), "clonerange") \
266 f(ASSERT_SRC_VERSION, __CEPH_OSD_OP(RD, MULTI, 2), "assert-src-version") \
267 f(SRC_CMPXATTR, __CEPH_OSD_OP(RD, MULTI, 3), "src-cmpxattr") \
268 \
269 /** attrs **/ \
270 /* read */ \
271 f(GETXATTR, __CEPH_OSD_OP(RD, ATTR, 1), "getxattr") \
272 f(GETXATTRS, __CEPH_OSD_OP(RD, ATTR, 2), "getxattrs") \
273 f(CMPXATTR, __CEPH_OSD_OP(RD, ATTR, 3), "cmpxattr") \
274 \
275 /* write */ \
276 f(SETXATTR, __CEPH_OSD_OP(WR, ATTR, 1), "setxattr") \
277 f(SETXATTRS, __CEPH_OSD_OP(WR, ATTR, 2), "setxattrs") \
278 f(RESETXATTRS, __CEPH_OSD_OP(WR, ATTR, 3), "resetxattrs") \
279 f(RMXATTR, __CEPH_OSD_OP(WR, ATTR, 4), "rmxattr") \
280 \
281 /** subop **/ \
282 f(PULL, __CEPH_OSD_OP1(SUB, 1), "pull") \
283 f(PUSH, __CEPH_OSD_OP1(SUB, 2), "push") \
284 f(BALANCEREADS, __CEPH_OSD_OP1(SUB, 3), "balance-reads") \
285 f(UNBALANCEREADS, __CEPH_OSD_OP1(SUB, 4), "unbalance-reads") \
286 f(SCRUB, __CEPH_OSD_OP1(SUB, 5), "scrub") \
287 f(SCRUB_RESERVE, __CEPH_OSD_OP1(SUB, 6), "scrub-reserve") \
288 f(SCRUB_UNRESERVE, __CEPH_OSD_OP1(SUB, 7), "scrub-unreserve") \
289 f(SCRUB_STOP, __CEPH_OSD_OP1(SUB, 8), "scrub-stop") \
290 f(SCRUB_MAP, __CEPH_OSD_OP1(SUB, 9), "scrub-map") \
291 \
292 /** lock **/ \
293 f(WRLOCK, __CEPH_OSD_OP(WR, LOCK, 1), "wrlock") \
294 f(WRUNLOCK, __CEPH_OSD_OP(WR, LOCK, 2), "wrunlock") \
295 f(RDLOCK, __CEPH_OSD_OP(WR, LOCK, 3), "rdlock") \
296 f(RDUNLOCK, __CEPH_OSD_OP(WR, LOCK, 4), "rdunlock") \
297 f(UPLOCK, __CEPH_OSD_OP(WR, LOCK, 5), "uplock") \
298 f(DNLOCK, __CEPH_OSD_OP(WR, LOCK, 6), "dnlock") \
299 \
300 /** exec **/ \
301 /* note: the RD bit here is wrong; see special-case below in helper */ \
302 f(CALL, __CEPH_OSD_OP(RD, EXEC, 1), "call") \
303 \
304 /** pg **/ \
305 f(PGLS, __CEPH_OSD_OP(RD, PG, 1), "pgls") \
306 f(PGLS_FILTER, __CEPH_OSD_OP(RD, PG, 2), "pgls-filter") \
307 f(PG_HITSET_LS, __CEPH_OSD_OP(RD, PG, 3), "pg-hitset-ls") \
308 f(PG_HITSET_GET, __CEPH_OSD_OP(RD, PG, 4), "pg-hitset-get")
309
184enum { 310enum {
185 /** data **/ 311#define GENERATE_ENUM_ENTRY(op, opcode, str) CEPH_OSD_OP_##op = (opcode),
186 /* read */ 312__CEPH_FORALL_OSD_OPS(GENERATE_ENUM_ENTRY)
187 CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1, 313#undef GENERATE_ENUM_ENTRY
188 CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2,
189 CEPH_OSD_OP_MAPEXT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 3,
190
191 /* fancy read */
192 CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4,
193 CEPH_OSD_OP_SPARSE_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 5,
194
195 CEPH_OSD_OP_NOTIFY = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 6,
196 CEPH_OSD_OP_NOTIFY_ACK = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 7,
197
198 /* versioning */
199 CEPH_OSD_OP_ASSERT_VER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 8,
200
201 /* write */
202 CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1,
203 CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2,
204 CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3,
205 CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4,
206 CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5,
207
208 /* fancy write */
209 CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6,
210 CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7,
211 CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8,
212 CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9,
213
214 CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10,
215 CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11,
216 CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12,
217
218 CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13,
219 CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14,
220
221 CEPH_OSD_OP_WATCH = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 15,
222
223 /* omap */
224 CEPH_OSD_OP_OMAPGETKEYS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 17,
225 CEPH_OSD_OP_OMAPGETVALS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 18,
226 CEPH_OSD_OP_OMAPGETHEADER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 19,
227 CEPH_OSD_OP_OMAPGETVALSBYKEYS =
228 CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 20,
229 CEPH_OSD_OP_OMAPSETVALS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 21,
230 CEPH_OSD_OP_OMAPSETHEADER = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 22,
231 CEPH_OSD_OP_OMAPCLEAR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 23,
232 CEPH_OSD_OP_OMAPRMKEYS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 24,
233 CEPH_OSD_OP_OMAP_CMP = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 25,
234
235 /* hints */
236 CEPH_OSD_OP_SETALLOCHINT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 35,
237
238 /** multi **/
239 CEPH_OSD_OP_CLONERANGE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_MULTI | 1,
240 CEPH_OSD_OP_ASSERT_SRC_VERSION = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 2,
241 CEPH_OSD_OP_SRC_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_MULTI | 3,
242
243 /** attrs **/
244 /* read */
245 CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
246 CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2,
247 CEPH_OSD_OP_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 3,
248
249 /* write */
250 CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1,
251 CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2,
252 CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3,
253 CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4,
254
255 /** subop **/
256 CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1,
257 CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2,
258 CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3,
259 CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4,
260 CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5,
261 CEPH_OSD_OP_SCRUB_RESERVE = CEPH_OSD_OP_MODE_SUB | 6,
262 CEPH_OSD_OP_SCRUB_UNRESERVE = CEPH_OSD_OP_MODE_SUB | 7,
263 CEPH_OSD_OP_SCRUB_STOP = CEPH_OSD_OP_MODE_SUB | 8,
264 CEPH_OSD_OP_SCRUB_MAP = CEPH_OSD_OP_MODE_SUB | 9,
265
266 /** lock **/
267 CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
268 CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2,
269 CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3,
270 CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4,
271 CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5,
272 CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6,
273
274 /** exec **/
275 /* note: the RD bit here is wrong; see special-case below in helper */
276 CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1,
277
278 /** pg **/
279 CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1,
280 CEPH_OSD_OP_PGLS_FILTER = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 2,
281}; 314};
282 315
283static inline int ceph_osd_op_type_lock(int op) 316static inline int ceph_osd_op_type_lock(int op)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b5223c570eba..da0dae0600e6 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -27,7 +27,6 @@
27 27
28struct cgroup_root; 28struct cgroup_root;
29struct cgroup_subsys; 29struct cgroup_subsys;
30struct inode;
31struct cgroup; 30struct cgroup;
32 31
33extern int cgroup_init_early(void); 32extern int cgroup_init_early(void);
@@ -38,7 +37,8 @@ extern void cgroup_exit(struct task_struct *p);
38extern int cgroupstats_build(struct cgroupstats *stats, 37extern int cgroupstats_build(struct cgroupstats *stats,
39 struct dentry *dentry); 38 struct dentry *dentry);
40 39
41extern int proc_cgroup_show(struct seq_file *, void *); 40extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
41 struct pid *pid, struct task_struct *tsk);
42 42
43/* define the enumeration of all cgroup subsystems */ 43/* define the enumeration of all cgroup subsystems */
44#define SUBSYS(_x) _x ## _cgrp_id, 44#define SUBSYS(_x) _x ## _cgrp_id,
@@ -113,6 +113,19 @@ static inline void css_get(struct cgroup_subsys_state *css)
113} 113}
114 114
115/** 115/**
116 * css_get_many - obtain references on the specified css
117 * @css: target css
118 * @n: number of references to get
119 *
120 * The caller must already have a reference.
121 */
122static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
123{
124 if (!(css->flags & CSS_NO_REF))
125 percpu_ref_get_many(&css->refcnt, n);
126}
127
128/**
116 * css_tryget - try to obtain a reference on the specified css 129 * css_tryget - try to obtain a reference on the specified css
117 * @css: target css 130 * @css: target css
118 * 131 *
@@ -159,13 +172,21 @@ static inline void css_put(struct cgroup_subsys_state *css)
159 percpu_ref_put(&css->refcnt); 172 percpu_ref_put(&css->refcnt);
160} 173}
161 174
175/**
176 * css_put_many - put css references
177 * @css: target css
178 * @n: number of references to put
179 *
180 * Put references obtained via css_get() and css_tryget_online().
181 */
182static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
183{
184 if (!(css->flags & CSS_NO_REF))
185 percpu_ref_put_many(&css->refcnt, n);
186}
187
162/* bits in struct cgroup flags field */ 188/* bits in struct cgroup flags field */
163enum { 189enum {
164 /*
165 * Control Group has previously had a child cgroup or a task,
166 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
167 */
168 CGRP_RELEASABLE,
169 /* Control Group requires release notifications to userspace */ 190 /* Control Group requires release notifications to userspace */
170 CGRP_NOTIFY_ON_RELEASE, 191 CGRP_NOTIFY_ON_RELEASE,
171 /* 192 /*
@@ -235,13 +256,6 @@ struct cgroup {
235 struct list_head e_csets[CGROUP_SUBSYS_COUNT]; 256 struct list_head e_csets[CGROUP_SUBSYS_COUNT];
236 257
237 /* 258 /*
238 * Linked list running through all cgroups that can
239 * potentially be reaped by the release agent. Protected by
240 * release_list_lock
241 */
242 struct list_head release_list;
243
244 /*
245 * list of pidlists, up to two for each namespace (one for procs, one 259 * list of pidlists, up to two for each namespace (one for procs, one
246 * for tasks); created on demand. 260 * for tasks); created on demand.
247 */ 261 */
@@ -250,6 +264,9 @@ struct cgroup {
250 264
251 /* used to wait for offlining of csses */ 265 /* used to wait for offlining of csses */
252 wait_queue_head_t offline_waitq; 266 wait_queue_head_t offline_waitq;
267
268 /* used to schedule release agent */
269 struct work_struct release_agent_work;
253}; 270};
254 271
255#define MAX_CGROUP_ROOT_NAMELEN 64 272#define MAX_CGROUP_ROOT_NAMELEN 64
@@ -376,8 +393,8 @@ struct css_set {
376 * struct cftype: handler definitions for cgroup control files 393 * struct cftype: handler definitions for cgroup control files
377 * 394 *
378 * When reading/writing to a file: 395 * When reading/writing to a file:
379 * - the cgroup to use is file->f_dentry->d_parent->d_fsdata 396 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata
380 * - the 'cftype' of the file is file->f_dentry->d_fsdata 397 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata
381 */ 398 */
382 399
383/* cftype->flags */ 400/* cftype->flags */
@@ -536,13 +553,10 @@ static inline bool cgroup_has_tasks(struct cgroup *cgrp)
536 return !list_empty(&cgrp->cset_links); 553 return !list_empty(&cgrp->cset_links);
537} 554}
538 555
539/* returns ino associated with a cgroup, 0 indicates unmounted root */ 556/* returns ino associated with a cgroup */
540static inline ino_t cgroup_ino(struct cgroup *cgrp) 557static inline ino_t cgroup_ino(struct cgroup *cgrp)
541{ 558{
542 if (cgrp->kn) 559 return cgrp->kn->ino;
543 return cgrp->kn->ino;
544 else
545 return 0;
546} 560}
547 561
548/* cft/css accessors for cftype->write() operation */ 562/* cft/css accessors for cftype->write() operation */
@@ -624,8 +638,10 @@ struct cgroup_subsys {
624 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); 638 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
625 int (*css_online)(struct cgroup_subsys_state *css); 639 int (*css_online)(struct cgroup_subsys_state *css);
626 void (*css_offline)(struct cgroup_subsys_state *css); 640 void (*css_offline)(struct cgroup_subsys_state *css);
641 void (*css_released)(struct cgroup_subsys_state *css);
627 void (*css_free)(struct cgroup_subsys_state *css); 642 void (*css_free)(struct cgroup_subsys_state *css);
628 void (*css_reset)(struct cgroup_subsys_state *css); 643 void (*css_reset)(struct cgroup_subsys_state *css);
644 void (*css_e_css_changed)(struct cgroup_subsys_state *css);
629 645
630 int (*can_attach)(struct cgroup_subsys_state *css, 646 int (*can_attach)(struct cgroup_subsys_state *css,
631 struct cgroup_taskset *tset); 647 struct cgroup_taskset *tset);
@@ -920,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it);
920int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 936int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
921int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); 937int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
922 938
939struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
940 struct cgroup_subsys *ss);
923struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 941struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
924 struct cgroup_subsys *ss); 942 struct cgroup_subsys *ss);
925 943
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
index efbf70b9fd84..0ca5f6046920 100644
--- a/include/linux/clk-private.h
+++ b/include/linux/clk-private.h
@@ -46,8 +46,10 @@ struct clk {
46 unsigned int enable_count; 46 unsigned int enable_count;
47 unsigned int prepare_count; 47 unsigned int prepare_count;
48 unsigned long accuracy; 48 unsigned long accuracy;
49 int phase;
49 struct hlist_head children; 50 struct hlist_head children;
50 struct hlist_node child_node; 51 struct hlist_node child_node;
52 struct hlist_node debug_node;
51 unsigned int notifier_count; 53 unsigned int notifier_count;
52#ifdef CONFIG_DEBUG_FS 54#ifdef CONFIG_DEBUG_FS
53 struct dentry *dentry; 55 struct dentry *dentry;
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 411dd7eb2653..d936409520f8 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/of.h>
16 17
17#ifdef CONFIG_COMMON_CLK 18#ifdef CONFIG_COMMON_CLK
18 19
@@ -129,6 +130,14 @@ struct dentry;
129 * set then clock accuracy will be initialized to parent accuracy 130 * set then clock accuracy will be initialized to parent accuracy
130 * or 0 (perfect clock) if clock has no parent. 131 * or 0 (perfect clock) if clock has no parent.
131 * 132 *
133 * @get_phase: Queries the hardware to get the current phase of a clock.
134 * Returned values are 0-359 degrees on success, negative
135 * error codes on failure.
136 *
137 * @set_phase: Shift the phase this clock signal in degrees specified
138 * by the second argument. Valid values for degrees are
139 * 0-359. Return 0 on success, otherwise -EERROR.
140 *
132 * @init: Perform platform-specific initialization magic. 141 * @init: Perform platform-specific initialization magic.
133 * This is not not used by any of the basic clock types. 142 * This is not not used by any of the basic clock types.
134 * Please consider other ways of solving initialization problems 143 * Please consider other ways of solving initialization problems
@@ -167,7 +176,7 @@ struct clk_ops {
167 unsigned long *parent_rate); 176 unsigned long *parent_rate);
168 long (*determine_rate)(struct clk_hw *hw, unsigned long rate, 177 long (*determine_rate)(struct clk_hw *hw, unsigned long rate,
169 unsigned long *best_parent_rate, 178 unsigned long *best_parent_rate,
170 struct clk **best_parent_clk); 179 struct clk_hw **best_parent_hw);
171 int (*set_parent)(struct clk_hw *hw, u8 index); 180 int (*set_parent)(struct clk_hw *hw, u8 index);
172 u8 (*get_parent)(struct clk_hw *hw); 181 u8 (*get_parent)(struct clk_hw *hw);
173 int (*set_rate)(struct clk_hw *hw, unsigned long rate, 182 int (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -177,6 +186,8 @@ struct clk_ops {
177 unsigned long parent_rate, u8 index); 186 unsigned long parent_rate, u8 index);
178 unsigned long (*recalc_accuracy)(struct clk_hw *hw, 187 unsigned long (*recalc_accuracy)(struct clk_hw *hw,
179 unsigned long parent_accuracy); 188 unsigned long parent_accuracy);
189 int (*get_phase)(struct clk_hw *hw);
190 int (*set_phase)(struct clk_hw *hw, int degrees);
180 void (*init)(struct clk_hw *hw); 191 void (*init)(struct clk_hw *hw);
181 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry); 192 int (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
182}; 193};
@@ -341,7 +352,6 @@ struct clk_divider {
341#define CLK_DIVIDER_READ_ONLY BIT(5) 352#define CLK_DIVIDER_READ_ONLY BIT(5)
342 353
343extern const struct clk_ops clk_divider_ops; 354extern const struct clk_ops clk_divider_ops;
344extern const struct clk_ops clk_divider_ro_ops;
345struct clk *clk_register_divider(struct device *dev, const char *name, 355struct clk *clk_register_divider(struct device *dev, const char *name,
346 const char *parent_name, unsigned long flags, 356 const char *parent_name, unsigned long flags,
347 void __iomem *reg, u8 shift, u8 width, 357 void __iomem *reg, u8 shift, u8 width,
@@ -488,6 +498,28 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
488 struct clk_hw *gate_hw, const struct clk_ops *gate_ops, 498 struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
489 unsigned long flags); 499 unsigned long flags);
490 500
501/***
502 * struct clk_gpio_gate - gpio gated clock
503 *
504 * @hw: handle between common and hardware-specific interfaces
505 * @gpiod: gpio descriptor
506 *
507 * Clock with a gpio control for enabling and disabling the parent clock.
508 * Implements .enable, .disable and .is_enabled
509 */
510
511struct clk_gpio {
512 struct clk_hw hw;
513 struct gpio_desc *gpiod;
514};
515
516extern const struct clk_ops clk_gpio_gate_ops;
517struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
518 const char *parent_name, struct gpio_desc *gpio,
519 unsigned long flags);
520
521void of_gpio_clk_gate_setup(struct device_node *node);
522
491/** 523/**
492 * clk_register - allocate a new clock, register it and return an opaque cookie 524 * clk_register - allocate a new clock, register it and return an opaque cookie
493 * @dev: device that is registering this clock 525 * @dev: device that is registering this clock
@@ -512,16 +544,14 @@ u8 __clk_get_num_parents(struct clk *clk);
512struct clk *__clk_get_parent(struct clk *clk); 544struct clk *__clk_get_parent(struct clk *clk);
513struct clk *clk_get_parent_by_index(struct clk *clk, u8 index); 545struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
514unsigned int __clk_get_enable_count(struct clk *clk); 546unsigned int __clk_get_enable_count(struct clk *clk);
515unsigned int __clk_get_prepare_count(struct clk *clk);
516unsigned long __clk_get_rate(struct clk *clk); 547unsigned long __clk_get_rate(struct clk *clk);
517unsigned long __clk_get_accuracy(struct clk *clk);
518unsigned long __clk_get_flags(struct clk *clk); 548unsigned long __clk_get_flags(struct clk *clk);
519bool __clk_is_prepared(struct clk *clk); 549bool __clk_is_prepared(struct clk *clk);
520bool __clk_is_enabled(struct clk *clk); 550bool __clk_is_enabled(struct clk *clk);
521struct clk *__clk_lookup(const char *name); 551struct clk *__clk_lookup(const char *name);
522long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, 552long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
523 unsigned long *best_parent_rate, 553 unsigned long *best_parent_rate,
524 struct clk **best_parent_p); 554 struct clk_hw **best_parent_p);
525 555
526/* 556/*
527 * FIXME clock api without lock protection 557 * FIXME clock api without lock protection
@@ -620,7 +650,7 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
620#endif /* platform dependent I/O accessors */ 650#endif /* platform dependent I/O accessors */
621 651
622#ifdef CONFIG_DEBUG_FS 652#ifdef CONFIG_DEBUG_FS
623struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode, 653struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
624 void *data, const struct file_operations *fops); 654 void *data, const struct file_operations *fops);
625#endif 655#endif
626 656
diff --git a/include/linux/clk.h b/include/linux/clk.h
index fb5e097d8f72..c7f258a81761 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -106,6 +106,25 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
106 */ 106 */
107long clk_get_accuracy(struct clk *clk); 107long clk_get_accuracy(struct clk *clk);
108 108
109/**
110 * clk_set_phase - adjust the phase shift of a clock signal
111 * @clk: clock signal source
112 * @degrees: number of degrees the signal is shifted
113 *
114 * Shifts the phase of a clock signal by the specified degrees. Returns 0 on
115 * success, -EERROR otherwise.
116 */
117int clk_set_phase(struct clk *clk, int degrees);
118
119/**
120 * clk_get_phase - return the phase shift of a clock signal
121 * @clk: clock signal source
122 *
123 * Returns the phase shift of a clock node in degrees, otherwise returns
124 * -EERROR.
125 */
126int clk_get_phase(struct clk *clk);
127
109#else 128#else
110 129
111static inline long clk_get_accuracy(struct clk *clk) 130static inline long clk_get_accuracy(struct clk *clk)
@@ -113,6 +132,16 @@ static inline long clk_get_accuracy(struct clk *clk)
113 return -ENOTSUPP; 132 return -ENOTSUPP;
114} 133}
115 134
135static inline long clk_set_phase(struct clk *clk, int phase)
136{
137 return -ENOTSUPP;
138}
139
140static inline long clk_get_phase(struct clk *clk)
141{
142 return -ENOTSUPP;
143}
144
116#endif 145#endif
117 146
118/** 147/**
@@ -238,7 +267,7 @@ void clk_put(struct clk *clk);
238 267
239/** 268/**
240 * devm_clk_put - "free" a managed clock source 269 * devm_clk_put - "free" a managed clock source
241 * @dev: device used to acuqire the clock 270 * @dev: device used to acquire the clock
242 * @clk: clock source acquired with devm_clk_get() 271 * @clk: clock source acquired with devm_clk_get()
243 * 272 *
244 * Note: drivers must ensure that all clk_enable calls made on this 273 * Note: drivers must ensure that all clk_enable calls made on this
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index de4268d4987a..c8e3b3d1eded 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -125,6 +125,7 @@ extern void __iomem *at91_pmc_base;
125#define AT91_PMC_PLLADIV2 (1 << 12) /* PLLA divisor by 2 [some SAM9 only] */ 125#define AT91_PMC_PLLADIV2 (1 << 12) /* PLLA divisor by 2 [some SAM9 only] */
126#define AT91_PMC_PLLADIV2_OFF (0 << 12) 126#define AT91_PMC_PLLADIV2_OFF (0 << 12)
127#define AT91_PMC_PLLADIV2_ON (1 << 12) 127#define AT91_PMC_PLLADIV2_ON (1 << 12)
128#define AT91_PMC_H32MXDIV BIT(24)
128 129
129#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ 130#define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */
130#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */ 131#define AT91_PMC_USBS (0x1 << 0) /* USB OHCI Input clock selection */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index e8d8a35034a5..55ef529a0dbf 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -254,13 +254,26 @@ extern const struct clk_ops ti_clk_mux_ops;
254void omap2_init_clk_hw_omap_clocks(struct clk *clk); 254void omap2_init_clk_hw_omap_clocks(struct clk *clk);
255int omap3_noncore_dpll_enable(struct clk_hw *hw); 255int omap3_noncore_dpll_enable(struct clk_hw *hw);
256void omap3_noncore_dpll_disable(struct clk_hw *hw); 256void omap3_noncore_dpll_disable(struct clk_hw *hw);
257int omap3_noncore_dpll_set_parent(struct clk_hw *hw, u8 index);
257int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, 258int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
258 unsigned long parent_rate); 259 unsigned long parent_rate);
260int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
261 unsigned long rate,
262 unsigned long parent_rate,
263 u8 index);
264long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
265 unsigned long rate,
266 unsigned long *best_parent_rate,
267 struct clk_hw **best_parent_clk);
259unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, 268unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
260 unsigned long parent_rate); 269 unsigned long parent_rate);
261long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, 270long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
262 unsigned long target_rate, 271 unsigned long target_rate,
263 unsigned long *parent_rate); 272 unsigned long *parent_rate);
273long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
274 unsigned long rate,
275 unsigned long *best_parent_rate,
276 struct clk_hw **best_parent_clk);
264u8 omap2_init_dpll_parent(struct clk_hw *hw); 277u8 omap2_init_dpll_parent(struct clk_hw *hw);
265unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate); 278unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
266long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, 279long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
@@ -278,6 +291,8 @@ int omap2_clk_disable_autoidle_all(void);
278void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks); 291void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks);
279int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate, 292int omap3_dpll4_set_rate(struct clk_hw *clk, unsigned long rate,
280 unsigned long parent_rate); 293 unsigned long parent_rate);
294int omap3_dpll4_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
295 unsigned long parent_rate, u8 index);
281int omap2_dflt_clk_enable(struct clk_hw *hw); 296int omap2_dflt_clk_enable(struct clk_hw *hw);
282void omap2_dflt_clk_disable(struct clk_hw *hw); 297void omap2_dflt_clk_disable(struct clk_hw *hw);
283int omap2_dflt_clk_is_enabled(struct clk_hw *hw); 298int omap2_dflt_clk_is_enabled(struct clk_hw *hw);
@@ -292,6 +307,7 @@ void omap2xxx_clkt_vps_init(void);
292void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index); 307void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index);
293void ti_dt_clocks_register(struct ti_dt_clk *oclks); 308void ti_dt_clocks_register(struct ti_dt_clk *oclks);
294void ti_dt_clk_init_provider(struct device_node *np, int index); 309void ti_dt_clk_init_provider(struct device_node *np, int index);
310void ti_dt_clk_init_retry_clks(void);
295void ti_dt_clockdomains_setup(void); 311void ti_dt_clockdomains_setup(void);
296int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw, 312int ti_clk_retry_init(struct device_node *node, struct clk_hw *hw,
297 ti_of_clk_init_cb_t func); 313 ti_of_clk_init_cb_t func);
diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h
new file mode 100644
index 000000000000..4d1019d56f7f
--- /dev/null
+++ b/include/linux/clock_cooling.h
@@ -0,0 +1,65 @@
1/*
2 * linux/include/linux/clock_cooling.h
3 *
4 * Copyright (C) 2014 Eduardo Valentin <edubezval@gmail.com>
5 *
6 * Copyright (C) 2013 Texas Instruments Inc.
7 * Contact: Eduardo Valentin <eduardo.valentin@ti.com>
8 *
9 * Highly based on cpu_cooling.c.
10 * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
11 * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 */
22
23#ifndef __CPU_COOLING_H__
24#define __CPU_COOLING_H__
25
26#include <linux/of.h>
27#include <linux/thermal.h>
28#include <linux/cpumask.h>
29
30#ifdef CONFIG_CLOCK_THERMAL
31/**
32 * clock_cooling_register - function to create clock cooling device.
33 * @dev: struct device pointer to the device used as clock cooling device.
34 * @clock_name: string containing the clock used as cooling mechanism.
35 */
36struct thermal_cooling_device *
37clock_cooling_register(struct device *dev, const char *clock_name);
38
39/**
40 * clock_cooling_unregister - function to remove clock cooling device.
41 * @cdev: thermal cooling device pointer.
42 */
43void clock_cooling_unregister(struct thermal_cooling_device *cdev);
44
45unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
46 unsigned long freq);
47#else /* !CONFIG_CLOCK_THERMAL */
48static inline struct thermal_cooling_device *
49clock_cooling_register(struct device *dev, const char *clock_name)
50{
51 return NULL;
52}
53static inline
54void clock_cooling_unregister(struct thermal_cooling_device *cdev)
55{
56}
57static inline
58unsigned long clock_cooling_get_level(struct thermal_cooling_device *cdev,
59 unsigned long freq)
60{
61 return THERMAL_CSTATE_INVALID;
62}
63#endif /* CONFIG_CLOCK_THERMAL */
64
65#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 653f0e2b6ca9..abcafaa20b86 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -287,7 +287,7 @@ extern struct clocksource* clocksource_get_next(void);
287extern void clocksource_change_rating(struct clocksource *cs, int rating); 287extern void clocksource_change_rating(struct clocksource *cs, int rating);
288extern void clocksource_suspend(void); 288extern void clocksource_suspend(void);
289extern void clocksource_resume(void); 289extern void clocksource_resume(void);
290extern struct clocksource * __init __weak clocksource_default_clock(void); 290extern struct clocksource * __init clocksource_default_clock(void);
291extern void clocksource_mark_unstable(struct clocksource *cs); 291extern void clocksource_mark_unstable(struct clocksource *cs);
292 292
293extern u64 293extern u64
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 371b93042520..9384ba66e975 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -15,13 +15,17 @@
15 15
16struct cma; 16struct cma;
17 17
18extern unsigned long totalcma_pages;
18extern phys_addr_t cma_get_base(struct cma *cma); 19extern phys_addr_t cma_get_base(struct cma *cma);
19extern unsigned long cma_get_size(struct cma *cma); 20extern unsigned long cma_get_size(struct cma *cma);
20 21
21extern int __init cma_declare_contiguous(phys_addr_t size, 22extern int __init cma_declare_contiguous(phys_addr_t base,
22 phys_addr_t base, phys_addr_t limit, 23 phys_addr_t size, phys_addr_t limit,
23 phys_addr_t alignment, unsigned int order_per_bit, 24 phys_addr_t alignment, unsigned int order_per_bit,
24 bool fixed, struct cma **res_cma); 25 bool fixed, struct cma **res_cma);
26extern int cma_init_reserved_mem(phys_addr_t base,
27 phys_addr_t size, int order_per_bit,
28 struct cma **res_cma);
25extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align); 29extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
26extern bool cma_release(struct cma *cma, struct page *pages, int count); 30extern bool cma_release(struct cma *cma, struct page *pages, int count);
27#endif 31#endif
diff --git a/include/linux/com20020.h b/include/linux/com20020.h
index 5dcfb944b6ce..85898995b234 100644
--- a/include/linux/com20020.h
+++ b/include/linux/com20020.h
@@ -41,6 +41,35 @@ extern const struct net_device_ops com20020_netdev_ops;
41#define BUS_ALIGN 1 41#define BUS_ALIGN 1
42#endif 42#endif
43 43
44#define PLX_PCI_MAX_CARDS 2
45
46struct com20020_pci_channel_map {
47 u32 bar;
48 u32 offset;
49 u32 size; /* 0x00 - auto, e.g. length of entire bar */
50};
51
52struct com20020_pci_card_info {
53 const char *name;
54 int devcount;
55
56 struct com20020_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CARDS];
57
58 unsigned int flags;
59};
60
61struct com20020_priv {
62 struct com20020_pci_card_info *ci;
63 struct list_head list_dev;
64};
65
66struct com20020_dev {
67 struct list_head list;
68 struct net_device *dev;
69
70 struct com20020_priv *pci_priv;
71 int index;
72};
44 73
45#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */ 74#define _INTMASK (ioaddr+BUS_ALIGN*0) /* writable */
46#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */ 75#define _STATUS (ioaddr+BUS_ALIGN*0) /* readable */
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 01e3132820da..3238ffa33f68 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -2,14 +2,24 @@
2#define _LINUX_COMPACTION_H 2#define _LINUX_COMPACTION_H
3 3
4/* Return values for compact_zone() and try_to_compact_pages() */ 4/* Return values for compact_zone() and try_to_compact_pages() */
5/* compaction didn't start as it was deferred due to past failures */
6#define COMPACT_DEFERRED 0
5/* compaction didn't start as it was not possible or direct reclaim was more suitable */ 7/* compaction didn't start as it was not possible or direct reclaim was more suitable */
6#define COMPACT_SKIPPED 0 8#define COMPACT_SKIPPED 1
7/* compaction should continue to another pageblock */ 9/* compaction should continue to another pageblock */
8#define COMPACT_CONTINUE 1 10#define COMPACT_CONTINUE 2
9/* direct compaction partially compacted a zone and there are suitable pages */ 11/* direct compaction partially compacted a zone and there are suitable pages */
10#define COMPACT_PARTIAL 2 12#define COMPACT_PARTIAL 3
11/* The full zone was compacted */ 13/* The full zone was compacted */
12#define COMPACT_COMPLETE 3 14#define COMPACT_COMPLETE 4
15
16/* Used to signal whether compaction detected need_sched() or lock contention */
17/* No contention detected */
18#define COMPACT_CONTENDED_NONE 0
19/* Either need_sched() was true or fatal signal pending */
20#define COMPACT_CONTENDED_SCHED 1
21/* Zone lock or lru_lock was contended in async compaction */
22#define COMPACT_CONTENDED_LOCK 2
13 23
14#ifdef CONFIG_COMPACTION 24#ifdef CONFIG_COMPACTION
15extern int sysctl_compact_memory; 25extern int sysctl_compact_memory;
@@ -22,10 +32,12 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
22extern int fragmentation_index(struct zone *zone, unsigned int order); 32extern int fragmentation_index(struct zone *zone, unsigned int order);
23extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 33extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
24 int order, gfp_t gfp_mask, nodemask_t *mask, 34 int order, gfp_t gfp_mask, nodemask_t *mask,
25 enum migrate_mode mode, bool *contended); 35 enum migrate_mode mode, int *contended,
36 int alloc_flags, int classzone_idx);
26extern void compact_pgdat(pg_data_t *pgdat, int order); 37extern void compact_pgdat(pg_data_t *pgdat, int order);
27extern void reset_isolation_suitable(pg_data_t *pgdat); 38extern void reset_isolation_suitable(pg_data_t *pgdat);
28extern unsigned long compaction_suitable(struct zone *zone, int order); 39extern unsigned long compaction_suitable(struct zone *zone, int order,
40 int alloc_flags, int classzone_idx);
29 41
30/* Do not skip compaction more than 64 times */ 42/* Do not skip compaction more than 64 times */
31#define COMPACT_MAX_DEFER_SHIFT 6 43#define COMPACT_MAX_DEFER_SHIFT 6
@@ -91,7 +103,8 @@ static inline bool compaction_restarting(struct zone *zone, int order)
91#else 103#else
92static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, 104static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
93 int order, gfp_t gfp_mask, nodemask_t *nodemask, 105 int order, gfp_t gfp_mask, nodemask_t *nodemask,
94 enum migrate_mode mode, bool *contended) 106 enum migrate_mode mode, int *contended,
107 int alloc_flags, int classzone_idx)
95{ 108{
96 return COMPACT_CONTINUE; 109 return COMPACT_CONTINUE;
97} 110}
@@ -104,7 +117,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat)
104{ 117{
105} 118}
106 119
107static inline unsigned long compaction_suitable(struct zone *zone, int order) 120static inline unsigned long compaction_suitable(struct zone *zone, int order,
121 int alloc_flags, int classzone_idx)
108{ 122{
109 return COMPACT_SKIPPED; 123 return COMPACT_SKIPPED;
110} 124}
diff --git a/include/linux/compat.h b/include/linux/compat.h
index e6494261eaff..7450ca2ac1fc 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -357,6 +357,9 @@ asmlinkage long compat_sys_lseek(unsigned int, compat_off_t, unsigned int);
357 357
358asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv, 358asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
359 const compat_uptr_t __user *envp); 359 const compat_uptr_t __user *envp);
360asmlinkage long compat_sys_execveat(int dfd, const char __user *filename,
361 const compat_uptr_t __user *argv,
362 const compat_uptr_t __user *envp, int flags);
360 363
361asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, 364asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
362 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 365 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 2507fd2a1eb4..d1a558239b1a 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -71,7 +71,6 @@
71 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 71 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
72 * 72 *
73 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. 73 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
74 * Fixed in GCC 4.8.2 and later versions.
75 * 74 *
76 * (asm goto is automatically volatile - the naming reflects this.) 75 * (asm goto is automatically volatile - the naming reflects this.)
77 */ 76 */
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
new file mode 100644
index 000000000000..c8c565952548
--- /dev/null
+++ b/include/linux/compiler-gcc5.h
@@ -0,0 +1,65 @@
1#ifndef __LINUX_COMPILER_H
2#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
3#endif
4
5#define __used __attribute__((__used__))
6#define __must_check __attribute__((warn_unused_result))
7#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
8
9/* Mark functions as cold. gcc will assume any path leading to a call
10 to them will be unlikely. This means a lot of manual unlikely()s
11 are unnecessary now for any paths leading to the usual suspects
12 like BUG(), printk(), panic() etc. [but let's keep them for now for
13 older compilers]
14
15 Early snapshots of gcc 4.3 don't support this and we can't detect this
16 in the preprocessor, but we can live with this because they're unreleased.
17 Maketime probing would be overkill here.
18
19 gcc also has a __attribute__((__hot__)) to move hot functions into
20 a special section, but I don't see any sense in this right now in
21 the kernel context */
22#define __cold __attribute__((__cold__))
23
24#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
25
26#ifndef __CHECKER__
27# define __compiletime_warning(message) __attribute__((warning(message)))
28# define __compiletime_error(message) __attribute__((error(message)))
29#endif /* __CHECKER__ */
30
31/*
32 * Mark a position in code as unreachable. This can be used to
33 * suppress control flow warnings after asm blocks that transfer
34 * control elsewhere.
35 *
36 * Early snapshots of gcc 4.5 don't support this and we can't detect
37 * this in the preprocessor, but we can live with this because they're
38 * unreleased. Really, we need to have autoconf for the kernel.
39 */
40#define unreachable() __builtin_unreachable()
41
42/* Mark a function definition as prohibited from being cloned. */
43#define __noclone __attribute__((__noclone__))
44
45/*
46 * Tell the optimizer that something else uses this function or variable.
47 */
48#define __visible __attribute__((externally_visible))
49
50/*
51 * GCC 'asm goto' miscompiles certain code sequences:
52 *
53 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
54 *
55 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
56 *
57 * (asm goto is automatically volatile - the naming reflects this.)
58 */
59#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
60
61#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
62#define __HAVE_BUILTIN_BSWAP32__
63#define __HAVE_BUILTIN_BSWAP64__
64#define __HAVE_BUILTIN_BSWAP16__
65#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d5ad7b1118fc..a1c81f80978e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -186,6 +186,80 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
186# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) 186# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
187#endif 187#endif
188 188
189#include <uapi/linux/types.h>
190
191static __always_inline void data_access_exceeds_word_size(void)
192#ifdef __compiletime_warning
193__compiletime_warning("data access exceeds word size and won't be atomic")
194#endif
195;
196
197static __always_inline void data_access_exceeds_word_size(void)
198{
199}
200
201static __always_inline void __read_once_size(volatile void *p, void *res, int size)
202{
203 switch (size) {
204 case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
205 case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
206 case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
207#ifdef CONFIG_64BIT
208 case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
209#endif
210 default:
211 barrier();
212 __builtin_memcpy((void *)res, (const void *)p, size);
213 data_access_exceeds_word_size();
214 barrier();
215 }
216}
217
218static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
219{
220 switch (size) {
221 case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
222 case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
223 case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
224#ifdef CONFIG_64BIT
225 case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
226#endif
227 default:
228 barrier();
229 __builtin_memcpy((void *)p, (const void *)res, size);
230 data_access_exceeds_word_size();
231 barrier();
232 }
233}
234
235/*
236 * Prevent the compiler from merging or refetching reads or writes. The
237 * compiler is also forbidden from reordering successive instances of
238 * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
239 * compiler is aware of some particular ordering. One way to make the
240 * compiler aware of ordering is to put the two invocations of READ_ONCE,
241 * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
242 *
243 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
244 * data types like structs or unions. If the size of the accessed data
245 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
246 * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
247 * compile-time warning.
248 *
249 * Their two major use cases are: (1) Mediating communication between
250 * process-level code and irq/NMI handlers, all running on the same CPU,
251 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
252 * mutilate accesses that either do not require ordering or that interact
253 * with an explicit memory barrier or atomic instruction that provides the
254 * required ordering.
255 */
256
257#define READ_ONCE(x) \
258 ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
259
260#define ASSIGN_ONCE(val, x) \
261 ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
262
189#endif /* __KERNEL__ */ 263#endif /* __KERNEL__ */
190 264
191#endif /* __ASSEMBLY__ */ 265#endif /* __ASSEMBLY__ */
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
new file mode 100644
index 000000000000..5d3c54311f7a
--- /dev/null
+++ b/include/linux/coresight.h
@@ -0,0 +1,263 @@
1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#ifndef _LINUX_CORESIGHT_H
14#define _LINUX_CORESIGHT_H
15
16#include <linux/device.h>
17
18/* Peripheral id registers (0xFD0-0xFEC) */
19#define CORESIGHT_PERIPHIDR4 0xfd0
20#define CORESIGHT_PERIPHIDR5 0xfd4
21#define CORESIGHT_PERIPHIDR6 0xfd8
22#define CORESIGHT_PERIPHIDR7 0xfdC
23#define CORESIGHT_PERIPHIDR0 0xfe0
24#define CORESIGHT_PERIPHIDR1 0xfe4
25#define CORESIGHT_PERIPHIDR2 0xfe8
26#define CORESIGHT_PERIPHIDR3 0xfeC
27/* Component id registers (0xFF0-0xFFC) */
28#define CORESIGHT_COMPIDR0 0xff0
29#define CORESIGHT_COMPIDR1 0xff4
30#define CORESIGHT_COMPIDR2 0xff8
31#define CORESIGHT_COMPIDR3 0xffC
32
33#define ETM_ARCH_V3_3 0x23
34#define ETM_ARCH_V3_5 0x25
35#define PFT_ARCH_V1_0 0x30
36#define PFT_ARCH_V1_1 0x31
37
38#define CORESIGHT_UNLOCK 0xc5acce55
39
40extern struct bus_type coresight_bustype;
41
42enum coresight_dev_type {
43 CORESIGHT_DEV_TYPE_NONE,
44 CORESIGHT_DEV_TYPE_SINK,
45 CORESIGHT_DEV_TYPE_LINK,
46 CORESIGHT_DEV_TYPE_LINKSINK,
47 CORESIGHT_DEV_TYPE_SOURCE,
48};
49
50enum coresight_dev_subtype_sink {
51 CORESIGHT_DEV_SUBTYPE_SINK_NONE,
52 CORESIGHT_DEV_SUBTYPE_SINK_PORT,
53 CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
54};
55
56enum coresight_dev_subtype_link {
57 CORESIGHT_DEV_SUBTYPE_LINK_NONE,
58 CORESIGHT_DEV_SUBTYPE_LINK_MERG,
59 CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
60 CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
61};
62
63enum coresight_dev_subtype_source {
64 CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
65 CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
66 CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
67 CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
68};
69
70/**
71 * struct coresight_dev_subtype - further characterisation of a type
72 * @sink_subtype: type of sink this component is, as defined
73 by @coresight_dev_subtype_sink.
74 * @link_subtype: type of link this component is, as defined
75 by @coresight_dev_subtype_link.
76 * @source_subtype: type of source this component is, as defined
77 by @coresight_dev_subtype_source.
78 */
79struct coresight_dev_subtype {
80 enum coresight_dev_subtype_sink sink_subtype;
81 enum coresight_dev_subtype_link link_subtype;
82 enum coresight_dev_subtype_source source_subtype;
83};
84
85/**
86 * struct coresight_platform_data - data harvested from the DT specification
87 * @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
88 * @name: name of the component as shown under sysfs.
89 * @nr_inport: number of input ports for this component.
90 * @outports: list of remote endpoint port number.
91 * @child_names:name of all child components connected to this device.
92 * @child_ports:child component port number the current component is
93 connected to.
94 * @nr_outport: number of output ports for this component.
95 * @clk: The clock this component is associated to.
96 */
97struct coresight_platform_data {
98 int cpu;
99 const char *name;
100 int nr_inport;
101 int *outports;
102 const char **child_names;
103 int *child_ports;
104 int nr_outport;
105 struct clk *clk;
106};
107
108/**
109 * struct coresight_desc - description of a component required from drivers
110 * @type: as defined by @coresight_dev_type.
111 * @subtype: as defined by @coresight_dev_subtype.
112 * @ops: generic operations for this component, as defined
113 by @coresight_ops.
114 * @pdata: platform data collected from DT.
115 * @dev: The device entity associated to this component.
116 * @groups: operations specific to this component. These will end up
117 in the component's sysfs sub-directory.
118 */
119struct coresight_desc {
120 enum coresight_dev_type type;
121 struct coresight_dev_subtype subtype;
122 const struct coresight_ops *ops;
123 struct coresight_platform_data *pdata;
124 struct device *dev;
125 const struct attribute_group **groups;
126};
127
128/**
129 * struct coresight_connection - representation of a single connection
130 * @outport: a connection's output port number.
131 * @chid_name: remote component's name.
132 * @child_port: remote component's port number @output is connected to.
133 * @child_dev: a @coresight_device representation of the component
134 connected to @outport.
135 */
136struct coresight_connection {
137 int outport;
138 const char *child_name;
139 int child_port;
140 struct coresight_device *child_dev;
141};
142
143/**
144 * struct coresight_device - representation of a device as used by the framework
145 * @conns: array of coresight_connections associated to this component.
146 * @nr_inport: number of input port associated to this component.
147 * @nr_outport: number of output port associated to this component.
148 * @type: as defined by @coresight_dev_type.
149 * @subtype: as defined by @coresight_dev_subtype.
150 * @ops: generic operations for this component, as defined
151 by @coresight_ops.
152 * @dev: The device entity associated to this component.
153 * @refcnt: keep track of what is in use.
154 * @path_link: link of current component into the path being enabled.
155 * @orphan: true if the component has connections that haven't been linked.
156 * @enable: 'true' if component is currently part of an active path.
157 * @activated: 'true' only if a _sink_ has been activated. A sink can be
158 activated but not yet enabled. Enabling for a _sink_
159 happens when a source has been selected for that it.
160 */
161struct coresight_device {
162 struct coresight_connection *conns;
163 int nr_inport;
164 int nr_outport;
165 enum coresight_dev_type type;
166 struct coresight_dev_subtype subtype;
167 const struct coresight_ops *ops;
168 struct device dev;
169 atomic_t *refcnt;
170 struct list_head path_link;
171 bool orphan;
172 bool enable; /* true only if configured as part of a path */
173 bool activated; /* true only if a sink is part of a path */
174};
175
176#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
177
178#define source_ops(csdev) csdev->ops->source_ops
179#define sink_ops(csdev) csdev->ops->sink_ops
180#define link_ops(csdev) csdev->ops->link_ops
181
182#define CORESIGHT_DEBUGFS_ENTRY(__name, __entry_name, \
183 __mode, __get, __set, __fmt) \
184DEFINE_SIMPLE_ATTRIBUTE(__name ## _ops, __get, __set, __fmt); \
185static const struct coresight_ops_entry __name ## _entry = { \
186 .name = __entry_name, \
187 .mode = __mode, \
188 .ops = &__name ## _ops \
189}
190
191/**
192 * struct coresight_ops_sink - basic operations for a sink
193 * Operations available for sinks
194 * @enable: enables the sink.
195 * @disable: disables the sink.
196 */
197struct coresight_ops_sink {
198 int (*enable)(struct coresight_device *csdev);
199 void (*disable)(struct coresight_device *csdev);
200};
201
202/**
203 * struct coresight_ops_link - basic operations for a link
204 * Operations available for links.
205 * @enable: enables flow between iport and oport.
206 * @disable: disables flow between iport and oport.
207 */
208struct coresight_ops_link {
209 int (*enable)(struct coresight_device *csdev, int iport, int oport);
210 void (*disable)(struct coresight_device *csdev, int iport, int oport);
211};
212
213/**
214 * struct coresight_ops_source - basic operations for a source
215 * Operations available for sources.
216 * @trace_id: returns the value of the component's trace ID as known
217 to the HW.
218 * @enable: enables tracing from a source.
219 * @disable: disables tracing for a source.
220 */
221struct coresight_ops_source {
222 int (*trace_id)(struct coresight_device *csdev);
223 int (*enable)(struct coresight_device *csdev);
224 void (*disable)(struct coresight_device *csdev);
225};
226
227struct coresight_ops {
228 const struct coresight_ops_sink *sink_ops;
229 const struct coresight_ops_link *link_ops;
230 const struct coresight_ops_source *source_ops;
231};
232
233#ifdef CONFIG_CORESIGHT
234extern struct coresight_device *
235coresight_register(struct coresight_desc *desc);
236extern void coresight_unregister(struct coresight_device *csdev);
237extern int coresight_enable(struct coresight_device *csdev);
238extern void coresight_disable(struct coresight_device *csdev);
239extern int coresight_is_bit_set(u32 val, int position, int value);
240extern int coresight_timeout(void __iomem *addr, u32 offset,
241 int position, int value);
242#ifdef CONFIG_OF
243extern struct coresight_platform_data *of_get_coresight_platform_data(
244 struct device *dev, struct device_node *node);
245#endif
246#else
247static inline struct coresight_device *
248coresight_register(struct coresight_desc *desc) { return NULL; }
249static inline void coresight_unregister(struct coresight_device *csdev) {}
250static inline int
251coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
252static inline void coresight_disable(struct coresight_device *csdev) {}
253static inline int coresight_is_bit_set(u32 val, int position, int value)
254 { return 0; }
255static inline int coresight_timeout(void __iomem *addr, u32 offset,
256 int position, int value) { return 1; }
257#ifdef CONFIG_OF
258static inline struct coresight_platform_data *of_get_coresight_platform_data(
259 struct device *dev, struct device_node *node) { return NULL; }
260#endif
261#endif
262
263#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 95978ad7fcdd..4260e8594bd7 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -19,6 +19,7 @@
19 19
20struct device; 20struct device;
21struct device_node; 21struct device_node;
22struct attribute_group;
22 23
23struct cpu { 24struct cpu {
24 int node_id; /* The node which contains the CPU */ 25 int node_id; /* The node which contains the CPU */
@@ -39,6 +40,9 @@ extern void cpu_remove_dev_attr(struct device_attribute *attr);
39extern int cpu_add_dev_attr_group(struct attribute_group *attrs); 40extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
40extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); 41extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
41 42
43extern struct device *cpu_device_create(struct device *parent, void *drvdata,
44 const struct attribute_group **groups,
45 const char *fmt, ...);
42#ifdef CONFIG_HOTPLUG_CPU 46#ifdef CONFIG_HOTPLUG_CPU
43extern void unregister_cpu(struct cpu *cpu); 47extern void unregister_cpu(struct cpu *cpu);
44extern ssize_t arch_cpu_probe(const char *, size_t); 48extern ssize_t arch_cpu_probe(const char *, size_t);
@@ -213,6 +217,7 @@ extern struct bus_type cpu_subsys;
213extern void cpu_hotplug_begin(void); 217extern void cpu_hotplug_begin(void);
214extern void cpu_hotplug_done(void); 218extern void cpu_hotplug_done(void);
215extern void get_online_cpus(void); 219extern void get_online_cpus(void);
220extern bool try_get_online_cpus(void);
216extern void put_online_cpus(void); 221extern void put_online_cpus(void);
217extern void cpu_hotplug_disable(void); 222extern void cpu_hotplug_disable(void);
218extern void cpu_hotplug_enable(void); 223extern void cpu_hotplug_enable(void);
@@ -230,6 +235,7 @@ int cpu_down(unsigned int cpu);
230static inline void cpu_hotplug_begin(void) {} 235static inline void cpu_hotplug_begin(void) {}
231static inline void cpu_hotplug_done(void) {} 236static inline void cpu_hotplug_done(void) {}
232#define get_online_cpus() do { } while (0) 237#define get_online_cpus() do { } while (0)
238#define try_get_online_cpus() true
233#define put_online_cpus() do { } while (0) 239#define put_online_cpus() do { } while (0)
234#define cpu_hotplug_disable() do { } while (0) 240#define cpu_hotplug_disable() do { } while (0)
235#define cpu_hotplug_enable() do { } while (0) 241#define cpu_hotplug_enable() do { } while (0)
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c303d383def1..bd955270d5aa 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -50,7 +50,7 @@ static inline struct thermal_cooling_device *
50of_cpufreq_cooling_register(struct device_node *np, 50of_cpufreq_cooling_register(struct device_node *np,
51 const struct cpumask *clip_cpus) 51 const struct cpumask *clip_cpus)
52{ 52{
53 return NULL; 53 return ERR_PTR(-ENOSYS);
54} 54}
55#endif 55#endif
56 56
@@ -65,13 +65,13 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
65static inline struct thermal_cooling_device * 65static inline struct thermal_cooling_device *
66cpufreq_cooling_register(const struct cpumask *clip_cpus) 66cpufreq_cooling_register(const struct cpumask *clip_cpus)
67{ 67{
68 return NULL; 68 return ERR_PTR(-ENOSYS);
69} 69}
70static inline struct thermal_cooling_device * 70static inline struct thermal_cooling_device *
71of_cpufreq_cooling_register(struct device_node *np, 71of_cpufreq_cooling_register(struct device_node *np,
72 const struct cpumask *clip_cpus) 72 const struct cpumask *clip_cpus)
73{ 73{
74 return NULL; 74 return ERR_PTR(-ENOSYS);
75} 75}
76static inline 76static inline
77void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) 77void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h
new file mode 100644
index 000000000000..0414009e2c30
--- /dev/null
+++ b/include/linux/cpufreq-dt.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2014 Marvell
3 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __CPUFREQ_DT_H__
11#define __CPUFREQ_DT_H__
12
13struct cpufreq_dt_platform_data {
14 /*
15 * True when each CPU has its own clock to control its
16 * frequency, false when all CPUs are controlled by a single
17 * clock.
18 */
19 bool independent_clocks;
20};
21
22#endif /* __CPUFREQ_DT_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 7d1955afa62c..4d078cebafd2 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -112,6 +112,9 @@ struct cpufreq_policy {
112 spinlock_t transition_lock; 112 spinlock_t transition_lock;
113 wait_queue_head_t transition_wait; 113 wait_queue_head_t transition_wait;
114 struct task_struct *transition_task; /* Task which is doing the transition */ 114 struct task_struct *transition_task; /* Task which is doing the transition */
115
116 /* For cpufreq driver's internal use */
117 void *driver_data;
115}; 118};
116 119
117/* Only for ACPI */ 120/* Only for ACPI */
@@ -214,25 +217,26 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
214 217
215 218
216struct cpufreq_driver { 219struct cpufreq_driver {
217 char name[CPUFREQ_NAME_LEN]; 220 char name[CPUFREQ_NAME_LEN];
218 u8 flags; 221 u8 flags;
222 void *driver_data;
219 223
220 /* needed by all drivers */ 224 /* needed by all drivers */
221 int (*init) (struct cpufreq_policy *policy); 225 int (*init)(struct cpufreq_policy *policy);
222 int (*verify) (struct cpufreq_policy *policy); 226 int (*verify)(struct cpufreq_policy *policy);
223 227
224 /* define one out of two */ 228 /* define one out of two */
225 int (*setpolicy) (struct cpufreq_policy *policy); 229 int (*setpolicy)(struct cpufreq_policy *policy);
226 230
227 /* 231 /*
228 * On failure, should always restore frequency to policy->restore_freq 232 * On failure, should always restore frequency to policy->restore_freq
229 * (i.e. old freq). 233 * (i.e. old freq).
230 */ 234 */
231 int (*target) (struct cpufreq_policy *policy, /* Deprecated */ 235 int (*target)(struct cpufreq_policy *policy,
232 unsigned int target_freq, 236 unsigned int target_freq,
233 unsigned int relation); 237 unsigned int relation); /* Deprecated */
234 int (*target_index) (struct cpufreq_policy *policy, 238 int (*target_index)(struct cpufreq_policy *policy,
235 unsigned int index); 239 unsigned int index);
236 /* 240 /*
237 * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION 241 * Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION
238 * unset. 242 * unset.
@@ -248,27 +252,31 @@ struct cpufreq_driver {
248 * wish to switch to intermediate frequency for some target frequency. 252 * wish to switch to intermediate frequency for some target frequency.
249 * In that case core will directly call ->target_index(). 253 * In that case core will directly call ->target_index().
250 */ 254 */
251 unsigned int (*get_intermediate)(struct cpufreq_policy *policy, 255 unsigned int (*get_intermediate)(struct cpufreq_policy *policy,
252 unsigned int index); 256 unsigned int index);
253 int (*target_intermediate)(struct cpufreq_policy *policy, 257 int (*target_intermediate)(struct cpufreq_policy *policy,
254 unsigned int index); 258 unsigned int index);
255 259
256 /* should be defined, if possible */ 260 /* should be defined, if possible */
257 unsigned int (*get) (unsigned int cpu); 261 unsigned int (*get)(unsigned int cpu);
258 262
259 /* optional */ 263 /* optional */
260 int (*bios_limit) (int cpu, unsigned int *limit); 264 int (*bios_limit)(int cpu, unsigned int *limit);
265
266 int (*exit)(struct cpufreq_policy *policy);
267 void (*stop_cpu)(struct cpufreq_policy *policy);
268 int (*suspend)(struct cpufreq_policy *policy);
269 int (*resume)(struct cpufreq_policy *policy);
270
271 /* Will be called after the driver is fully initialized */
272 void (*ready)(struct cpufreq_policy *policy);
261 273
262 int (*exit) (struct cpufreq_policy *policy); 274 struct freq_attr **attr;
263 void (*stop_cpu) (struct cpufreq_policy *policy);
264 int (*suspend) (struct cpufreq_policy *policy);
265 int (*resume) (struct cpufreq_policy *policy);
266 struct freq_attr **attr;
267 275
268 /* platform specific boost support code */ 276 /* platform specific boost support code */
269 bool boost_supported; 277 bool boost_supported;
270 bool boost_enabled; 278 bool boost_enabled;
271 int (*set_boost) (int state); 279 int (*set_boost)(int state);
272}; 280};
273 281
274/* flags */ 282/* flags */
@@ -309,6 +317,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data);
309int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); 317int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
310 318
311const char *cpufreq_get_current_driver(void); 319const char *cpufreq_get_current_driver(void);
320void *cpufreq_get_driver_data(void);
312 321
313static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, 322static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
314 unsigned int min, unsigned int max) 323 unsigned int min, unsigned int max)
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 25e0df6155a4..ab70f3bc44ad 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -53,7 +53,6 @@ struct cpuidle_state {
53}; 53};
54 54
55/* Idle State Flags */ 55/* Idle State Flags */
56#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
57#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ 56#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
58#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ 57#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
59 58
@@ -89,8 +88,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
89/** 88/**
90 * cpuidle_get_last_residency - retrieves the last state's residency time 89 * cpuidle_get_last_residency - retrieves the last state's residency time
91 * @dev: the target CPU 90 * @dev: the target CPU
92 *
93 * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set
94 */ 91 */
95static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) 92static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
96{ 93{
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 2997af6d2ccd..b950e9d6008b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -666,10 +666,19 @@ static inline size_t cpumask_size(void)
666 * 666 *
667 * This code makes NR_CPUS length memcopy and brings to a memory corruption. 667 * This code makes NR_CPUS length memcopy and brings to a memory corruption.
668 * cpumask_copy() provide safe copy functionality. 668 * cpumask_copy() provide safe copy functionality.
669 *
670 * Note that there is another evil here: If you define a cpumask_var_t
671 * as a percpu variable then the way to obtain the address of the cpumask
672 * structure differently influences what this_cpu_* operation needs to be
673 * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
674 * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
675 * other type of cpumask_var_t implementation is configured.
669 */ 676 */
670#ifdef CONFIG_CPUMASK_OFFSTACK 677#ifdef CONFIG_CPUMASK_OFFSTACK
671typedef struct cpumask *cpumask_var_t; 678typedef struct cpumask *cpumask_var_t;
672 679
680#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
681
673bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); 682bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
674bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); 683bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
675bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); 684bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
@@ -681,6 +690,8 @@ void free_bootmem_cpumask_var(cpumask_var_t mask);
681#else 690#else
682typedef struct cpumask cpumask_var_t[1]; 691typedef struct cpumask cpumask_var_t[1];
683 692
693#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
694
684static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) 695static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
685{ 696{
686 return true; 697 return true;
@@ -792,6 +803,23 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
792} 803}
793#endif /* NR_CPUS > BITS_PER_LONG */ 804#endif /* NR_CPUS > BITS_PER_LONG */
794 805
806/**
807 * cpumap_print_to_pagebuf - copies the cpumask into the buffer either
808 * as comma-separated list of cpus or hex values of cpumask
809 * @list: indicates whether the cpumap must be list
810 * @mask: the cpumask to copy
811 * @buf: the buffer to copy into
812 *
813 * Returns the length of the (null-terminated) @buf string, zero if
814 * nothing is copied.
815 */
816static inline ssize_t
817cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
818{
819 return bitmap_print_to_pagebuf(list, buf, cpumask_bits(mask),
820 nr_cpumask_bits);
821}
822
795/* 823/*
796 * 824 *
797 * From here down, all obsolete. Use cpumask_ variants! 825 * From here down, all obsolete. Use cpumask_ variants!
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 6e39c9bb0dae..1b357997cac5 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
48void cpuset_init_current_mems_allowed(void); 48void cpuset_init_current_mems_allowed(void);
49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50 50
51extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); 51extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
52extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
53 52
54static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) 53static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
55{ 54{
56 return nr_cpusets() <= 1 || 55 return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
57 __cpuset_node_allowed_softwall(node, gfp_mask);
58} 56}
59 57
60static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) 58static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
61{ 59{
62 return nr_cpusets() <= 1 || 60 return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
63 __cpuset_node_allowed_hardwall(node, gfp_mask);
64}
65
66static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
67{
68 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
69}
70
71static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
72{
73 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
74} 61}
75 62
76extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 63extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -86,7 +73,8 @@ extern void __cpuset_memory_pressure_bump(void);
86 73
87extern void cpuset_task_status_allowed(struct seq_file *m, 74extern void cpuset_task_status_allowed(struct seq_file *m,
88 struct task_struct *task); 75 struct task_struct *task);
89extern int proc_cpuset_show(struct seq_file *, void *); 76extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
77 struct pid *pid, struct task_struct *tsk);
90 78
91extern int cpuset_mem_spread_node(void); 79extern int cpuset_mem_spread_node(void);
92extern int cpuset_slab_spread_node(void); 80extern int cpuset_slab_spread_node(void);
@@ -178,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
178 return 1; 166 return 1;
179} 167}
180 168
181static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) 169static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
182{
183 return 1;
184}
185
186static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
187{
188 return 1;
189}
190
191static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
192{ 170{
193 return 1; 171 return 1;
194} 172}
195 173
196static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) 174static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
197{ 175{
198 return 1; 176 return 1;
199} 177}
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 72ab536ad3de..3849fce7ecfe 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -14,14 +14,13 @@
14extern unsigned long long elfcorehdr_addr; 14extern unsigned long long elfcorehdr_addr;
15extern unsigned long long elfcorehdr_size; 15extern unsigned long long elfcorehdr_size;
16 16
17extern int __weak elfcorehdr_alloc(unsigned long long *addr, 17extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
18 unsigned long long *size); 18extern void elfcorehdr_free(unsigned long long addr);
19extern void __weak elfcorehdr_free(unsigned long long addr); 19extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
20extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos); 20extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
21extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); 21extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
22extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, 22 unsigned long from, unsigned long pfn,
23 unsigned long from, unsigned long pfn, 23 unsigned long size, pgprot_t prot);
24 unsigned long size, pgprot_t prot);
25 24
26extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, 25extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
27 unsigned long, int); 26 unsigned long, int);
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index b3cb71f0d3b0..cf53d0773ce3 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -6,7 +6,8 @@
6#define CRC_T10DIF_DIGEST_SIZE 2 6#define CRC_T10DIF_DIGEST_SIZE 2
7#define CRC_T10DIF_BLOCK_SIZE 1 7#define CRC_T10DIF_BLOCK_SIZE 1
8 8
9__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len); 9extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
10__u16 crc_t10dif(unsigned char const *, size_t); 10 size_t len);
11extern __u16 crc_t10dif(unsigned char const *, size_t);
11 12
12#endif 13#endif
diff --git a/include/linux/cred.h b/include/linux/cred.h
index b2d0820837c4..2fb2ca2127ed 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -68,6 +68,7 @@ extern void groups_free(struct group_info *);
68extern int set_current_groups(struct group_info *); 68extern int set_current_groups(struct group_info *);
69extern void set_groups(struct cred *, struct group_info *); 69extern void set_groups(struct cred *, struct group_info *);
70extern int groups_search(const struct group_info *, kgid_t); 70extern int groups_search(const struct group_info *, kgid_t);
71extern bool may_setgroups(void);
71 72
72/* access the groups "array" with this macro */ 73/* access the groups "array" with this macro */
73#define GROUP_AT(gi, i) \ 74#define GROUP_AT(gi, i) \
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index d45e949699ea..9c8776d0ada8 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -26,6 +26,19 @@
26#include <linux/uaccess.h> 26#include <linux/uaccess.h>
27 27
28/* 28/*
29 * Autoloaded crypto modules should only use a prefixed name to avoid allowing
30 * arbitrary modules to be loaded. Loading from userspace may still need the
31 * unprefixed names, so retains those aliases as well.
32 * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
33 * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
34 * expands twice on the same line. Instead, use a separate base name for the
35 * alias.
36 */
37#define MODULE_ALIAS_CRYPTO(name) \
38 __MODULE_INFO(alias, alias_userspace, name); \
39 __MODULE_INFO(alias, alias_crypto, "crypto-" name)
40
41/*
29 * Algorithm masks and types. 42 * Algorithm masks and types.
30 */ 43 */
31#define CRYPTO_ALG_TYPE_MASK 0x0000000f 44#define CRYPTO_ALG_TYPE_MASK 0x0000000f
@@ -127,6 +140,13 @@ struct skcipher_givcrypt_request;
127 140
128typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); 141typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
129 142
143/**
144 * DOC: Block Cipher Context Data Structures
145 *
146 * These data structures define the operating context for each block cipher
147 * type.
148 */
149
130struct crypto_async_request { 150struct crypto_async_request {
131 struct list_head list; 151 struct list_head list;
132 crypto_completion_t complete; 152 crypto_completion_t complete;
@@ -194,9 +214,63 @@ struct hash_desc {
194 u32 flags; 214 u32 flags;
195}; 215};
196 216
197/* 217/**
198 * Algorithms: modular crypto algorithm implementations, managed 218 * DOC: Block Cipher Algorithm Definitions
199 * via crypto_register_alg() and crypto_unregister_alg(). 219 *
220 * These data structures define modular crypto algorithm implementations,
221 * managed via crypto_register_alg() and crypto_unregister_alg().
222 */
223
224/**
225 * struct ablkcipher_alg - asynchronous block cipher definition
226 * @min_keysize: Minimum key size supported by the transformation. This is the
227 * smallest key length supported by this transformation algorithm.
228 * This must be set to one of the pre-defined values as this is
229 * not hardware specific. Possible values for this field can be
230 * found via git grep "_MIN_KEY_SIZE" include/crypto/
231 * @max_keysize: Maximum key size supported by the transformation. This is the
232 * largest key length supported by this transformation algorithm.
233 * This must be set to one of the pre-defined values as this is
234 * not hardware specific. Possible values for this field can be
235 * found via git grep "_MAX_KEY_SIZE" include/crypto/
236 * @setkey: Set key for the transformation. This function is used to either
237 * program a supplied key into the hardware or store the key in the
238 * transformation context for programming it later. Note that this
239 * function does modify the transformation context. This function can
240 * be called multiple times during the existence of the transformation
241 * object, so one must make sure the key is properly reprogrammed into
242 * the hardware. This function is also responsible for checking the key
243 * length for validity. In case a software fallback was put in place in
244 * the @cra_init call, this function might need to use the fallback if
245 * the algorithm doesn't support all of the key sizes.
246 * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
247 * the supplied scatterlist containing the blocks of data. The crypto
248 * API consumer is responsible for aligning the entries of the
249 * scatterlist properly and making sure the chunks are correctly
250 * sized. In case a software fallback was put in place in the
251 * @cra_init call, this function might need to use the fallback if
252 * the algorithm doesn't support all of the key sizes. In case the
253 * key was stored in transformation context, the key might need to be
254 * re-programmed into the hardware in this function. This function
255 * shall not modify the transformation context, as this function may
256 * be called in parallel with the same transformation object.
257 * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
258 * and the conditions are exactly the same.
259 * @givencrypt: Update the IV for encryption. With this function, a cipher
260 * implementation may provide the function on how to update the IV
261 * for encryption.
262 * @givdecrypt: Update the IV for decryption. This is the reverse of
263 * @givencrypt .
264 * @geniv: The transformation implementation may use an "IV generator" provided
265 * by the kernel crypto API. Several use cases have a predefined
266 * approach how IVs are to be updated. For such use cases, the kernel
267 * crypto API provides ready-to-use implementations that can be
268 * referenced with this variable.
269 * @ivsize: IV size applicable for transformation. The consumer must provide an
270 * IV of exactly that size to perform the encrypt or decrypt operation.
271 *
272 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
273 * mandatory and must be filled.
200 */ 274 */
201struct ablkcipher_alg { 275struct ablkcipher_alg {
202 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, 276 int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
@@ -213,6 +287,32 @@ struct ablkcipher_alg {
213 unsigned int ivsize; 287 unsigned int ivsize;
214}; 288};
215 289
290/**
291 * struct aead_alg - AEAD cipher definition
292 * @maxauthsize: Set the maximum authentication tag size supported by the
293 * transformation. A transformation may support smaller tag sizes.
294 * As the authentication tag is a message digest to ensure the
295 * integrity of the encrypted data, a consumer typically wants the
296 * largest authentication tag possible as defined by this
297 * variable.
298 * @setauthsize: Set authentication size for the AEAD transformation. This
299 * function is used to specify the consumer requested size of the
300 * authentication tag to be either generated by the transformation
301 * during encryption or the size of the authentication tag to be
302 * supplied during the decryption operation. This function is also
303 * responsible for checking the authentication tag size for
304 * validity.
305 * @setkey: see struct ablkcipher_alg
306 * @encrypt: see struct ablkcipher_alg
307 * @decrypt: see struct ablkcipher_alg
308 * @givencrypt: see struct ablkcipher_alg
309 * @givdecrypt: see struct ablkcipher_alg
310 * @geniv: see struct ablkcipher_alg
311 * @ivsize: see struct ablkcipher_alg
312 *
313 * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
314 * mandatory and must be filled.
315 */
216struct aead_alg { 316struct aead_alg {
217 int (*setkey)(struct crypto_aead *tfm, const u8 *key, 317 int (*setkey)(struct crypto_aead *tfm, const u8 *key,
218 unsigned int keylen); 318 unsigned int keylen);
@@ -228,6 +328,18 @@ struct aead_alg {
228 unsigned int maxauthsize; 328 unsigned int maxauthsize;
229}; 329};
230 330
331/**
332 * struct blkcipher_alg - synchronous block cipher definition
333 * @min_keysize: see struct ablkcipher_alg
334 * @max_keysize: see struct ablkcipher_alg
335 * @setkey: see struct ablkcipher_alg
336 * @encrypt: see struct ablkcipher_alg
337 * @decrypt: see struct ablkcipher_alg
338 * @geniv: see struct ablkcipher_alg
339 * @ivsize: see struct ablkcipher_alg
340 *
341 * All fields except @geniv and @ivsize are mandatory and must be filled.
342 */
231struct blkcipher_alg { 343struct blkcipher_alg {
232 int (*setkey)(struct crypto_tfm *tfm, const u8 *key, 344 int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
233 unsigned int keylen); 345 unsigned int keylen);
@@ -245,6 +357,53 @@ struct blkcipher_alg {
245 unsigned int ivsize; 357 unsigned int ivsize;
246}; 358};
247 359
360/**
361 * struct cipher_alg - single-block symmetric ciphers definition
362 * @cia_min_keysize: Minimum key size supported by the transformation. This is
363 * the smallest key length supported by this transformation
364 * algorithm. This must be set to one of the pre-defined
365 * values as this is not hardware specific. Possible values
366 * for this field can be found via git grep "_MIN_KEY_SIZE"
367 * include/crypto/
368 * @cia_max_keysize: Maximum key size supported by the transformation. This is
369 * the largest key length supported by this transformation
370 * algorithm. This must be set to one of the pre-defined values
371 * as this is not hardware specific. Possible values for this
372 * field can be found via git grep "_MAX_KEY_SIZE"
373 * include/crypto/
374 * @cia_setkey: Set key for the transformation. This function is used to either
375 * program a supplied key into the hardware or store the key in the
376 * transformation context for programming it later. Note that this
377 * function does modify the transformation context. This function
378 * can be called multiple times during the existence of the
379 * transformation object, so one must make sure the key is properly
380 * reprogrammed into the hardware. This function is also
381 * responsible for checking the key length for validity.
382 * @cia_encrypt: Encrypt a single block. This function is used to encrypt a
383 * single block of data, which must be @cra_blocksize big. This
384 * always operates on a full @cra_blocksize and it is not possible
385 * to encrypt a block of smaller size. The supplied buffers must
386 * therefore also be at least of @cra_blocksize size. Both the
387 * input and output buffers are always aligned to @cra_alignmask.
388 * In case either of the input or output buffer supplied by user
389 * of the crypto API is not aligned to @cra_alignmask, the crypto
390 * API will re-align the buffers. The re-alignment means that a
391 * new buffer will be allocated, the data will be copied into the
392 * new buffer, then the processing will happen on the new buffer,
393 * then the data will be copied back into the original buffer and
394 * finally the new buffer will be freed. In case a software
395 * fallback was put in place in the @cra_init call, this function
396 * might need to use the fallback if the algorithm doesn't support
397 * all of the key sizes. In case the key was stored in
398 * transformation context, the key might need to be re-programmed
399 * into the hardware in this function. This function shall not
400 * modify the transformation context, as this function may be
401 * called in parallel with the same transformation object.
402 * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to
403 * @cia_encrypt, and the conditions are exactly the same.
404 *
405 * All fields are mandatory and must be filled.
406 */
248struct cipher_alg { 407struct cipher_alg {
249 unsigned int cia_min_keysize; 408 unsigned int cia_min_keysize;
250 unsigned int cia_max_keysize; 409 unsigned int cia_max_keysize;
@@ -261,6 +420,25 @@ struct compress_alg {
261 unsigned int slen, u8 *dst, unsigned int *dlen); 420 unsigned int slen, u8 *dst, unsigned int *dlen);
262}; 421};
263 422
423/**
424 * struct rng_alg - random number generator definition
425 * @rng_make_random: The function defined by this variable obtains a random
426 * number. The random number generator transform must generate
427 * the random number out of the context provided with this
428 * call.
429 * @rng_reset: Reset of the random number generator by clearing the entire state.
430 * With the invocation of this function call, the random number
431 * generator shall completely reinitialize its state. If the random
432 * number generator requires a seed for setting up a new state,
433 * the seed must be provided by the consumer while invoking this
434 * function. The required size of the seed is defined with
435 * @seedsize .
436 * @seedsize: The seed size required for a random number generator
437 * initialization defined with this variable. Some random number
438 * generators like the SP800-90A DRBG does not require a seed as the
439 * seeding is implemented internally without the need of support by
440 * the consumer. In this case, the seed size is set to zero.
441 */
264struct rng_alg { 442struct rng_alg {
265 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata, 443 int (*rng_make_random)(struct crypto_rng *tfm, u8 *rdata,
266 unsigned int dlen); 444 unsigned int dlen);
@@ -277,6 +455,81 @@ struct rng_alg {
277#define cra_compress cra_u.compress 455#define cra_compress cra_u.compress
278#define cra_rng cra_u.rng 456#define cra_rng cra_u.rng
279 457
458/**
459 * struct crypto_alg - definition of a cryptograpic cipher algorithm
460 * @cra_flags: Flags describing this transformation. See include/linux/crypto.h
461 * CRYPTO_ALG_* flags for the flags which go in here. Those are
462 * used for fine-tuning the description of the transformation
463 * algorithm.
464 * @cra_blocksize: Minimum block size of this transformation. The size in bytes
465 * of the smallest possible unit which can be transformed with
466 * this algorithm. The users must respect this value.
467 * In case of HASH transformation, it is possible for a smaller
468 * block than @cra_blocksize to be passed to the crypto API for
469 * transformation, in case of any other transformation type, an
470 * error will be returned upon any attempt to transform smaller
471 * than @cra_blocksize chunks.
472 * @cra_ctxsize: Size of the operational context of the transformation. This
473 * value informs the kernel crypto API about the memory size
474 * needed to be allocated for the transformation context.
475 * @cra_alignmask: Alignment mask for the input and output data buffer. The data
476 * buffer containing the input data for the algorithm must be
477 * aligned to this alignment mask. The data buffer for the
478 * output data must be aligned to this alignment mask. Note that
479 * the Crypto API will do the re-alignment in software, but
480 * only under special conditions and there is a performance hit.
481 * The re-alignment happens at these occasions for different
482 * @cra_u types: cipher -- For both input data and output data
483 * buffer; ahash -- For output hash destination buf; shash --
484 * For output hash destination buf.
485 * This is needed on hardware which is flawed by design and
486 * cannot pick data from arbitrary addresses.
487 * @cra_priority: Priority of this transformation implementation. In case
488 * multiple transformations with same @cra_name are available to
489 * the Crypto API, the kernel will use the one with highest
490 * @cra_priority.
491 * @cra_name: Generic name (usable by multiple implementations) of the
492 * transformation algorithm. This is the name of the transformation
493 * itself. This field is used by the kernel when looking up the
494 * providers of particular transformation.
495 * @cra_driver_name: Unique name of the transformation provider. This is the
496 * name of the provider of the transformation. This can be any
497 * arbitrary value, but in the usual case, this contains the
498 * name of the chip or provider and the name of the
499 * transformation algorithm.
500 * @cra_type: Type of the cryptographic transformation. This is a pointer to
501 * struct crypto_type, which implements callbacks common for all
502 * trasnformation types. There are multiple options:
503 * &crypto_blkcipher_type, &crypto_ablkcipher_type,
504 * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
505 * This field might be empty. In that case, there are no common
506 * callbacks. This is the case for: cipher, compress, shash.
507 * @cra_u: Callbacks implementing the transformation. This is a union of
508 * multiple structures. Depending on the type of transformation selected
509 * by @cra_type and @cra_flags above, the associated structure must be
510 * filled with callbacks. This field might be empty. This is the case
511 * for ahash, shash.
512 * @cra_init: Initialize the cryptographic transformation object. This function
513 * is used to initialize the cryptographic transformation object.
514 * This function is called only once at the instantiation time, right
515 * after the transformation context was allocated. In case the
516 * cryptographic hardware has some special requirements which need to
517 * be handled by software, this function shall check for the precise
518 * requirement of the transformation and put any software fallbacks
519 * in place.
520 * @cra_exit: Deinitialize the cryptographic transformation object. This is a
521 * counterpart to @cra_init, used to remove various changes set in
522 * @cra_init.
523 * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE
524 * @cra_list: internally used
525 * @cra_users: internally used
526 * @cra_refcnt: internally used
527 * @cra_destroy: internally used
528 *
529 * The struct crypto_alg describes a generic Crypto API algorithm and is common
530 * for all of the transformations. Any variable not documented here shall not
531 * be used by a cipher implementation as it is internal to the Crypto API.
532 */
280struct crypto_alg { 533struct crypto_alg {
281 struct list_head cra_list; 534 struct list_head cra_list;
282 struct list_head cra_users; 535 struct list_head cra_users;
@@ -581,6 +834,50 @@ static inline u32 crypto_skcipher_mask(u32 mask)
581 return mask; 834 return mask;
582} 835}
583 836
837/**
838 * DOC: Asynchronous Block Cipher API
839 *
840 * Asynchronous block cipher API is used with the ciphers of type
841 * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
842 *
843 * Asynchronous cipher operations imply that the function invocation for a
844 * cipher request returns immediately before the completion of the operation.
845 * The cipher request is scheduled as a separate kernel thread and therefore
846 * load-balanced on the different CPUs via the process scheduler. To allow
847 * the kernel crypto API to inform the caller about the completion of a cipher
848 * request, the caller must provide a callback function. That function is
849 * invoked with the cipher handle when the request completes.
850 *
851 * To support the asynchronous operation, additional information than just the
852 * cipher handle must be supplied to the kernel crypto API. That additional
853 * information is given by filling in the ablkcipher_request data structure.
854 *
855 * For the asynchronous block cipher API, the state is maintained with the tfm
856 * cipher handle. A single tfm can be used across multiple calls and in
857 * parallel. For asynchronous block cipher calls, context data supplied and
858 * only used by the caller can be referenced the request data structure in
859 * addition to the IV used for the cipher request. The maintenance of such
860 * state information would be important for a crypto driver implementer to
861 * have, because when calling the callback function upon completion of the
862 * cipher operation, that callback function may need some information about
863 * which operation just finished if it invoked multiple in parallel. This
864 * state information is unused by the kernel crypto API.
865 */
866
867/**
868 * crypto_alloc_ablkcipher() - allocate asynchronous block cipher handle
869 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
870 * ablkcipher cipher
871 * @type: specifies the type of the cipher
872 * @mask: specifies the mask for the cipher
873 *
874 * Allocate a cipher handle for an ablkcipher. The returned struct
875 * crypto_ablkcipher is the cipher handle that is required for any subsequent
876 * API invocation for that ablkcipher.
877 *
878 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
879 * of an error, PTR_ERR() returns the error code.
880 */
584struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name, 881struct crypto_ablkcipher *crypto_alloc_ablkcipher(const char *alg_name,
585 u32 type, u32 mask); 882 u32 type, u32 mask);
586 883
@@ -590,11 +887,25 @@ static inline struct crypto_tfm *crypto_ablkcipher_tfm(
590 return &tfm->base; 887 return &tfm->base;
591} 888}
592 889
890/**
891 * crypto_free_ablkcipher() - zeroize and free cipher handle
892 * @tfm: cipher handle to be freed
893 */
593static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) 894static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
594{ 895{
595 crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); 896 crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
596} 897}
597 898
899/**
900 * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
901 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
902 * ablkcipher
903 * @type: specifies the type of the cipher
904 * @mask: specifies the mask for the cipher
905 *
906 * Return: true when the ablkcipher is known to the kernel crypto API; false
907 * otherwise
908 */
598static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, 909static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
599 u32 mask) 910 u32 mask)
600{ 911{
@@ -608,12 +919,31 @@ static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
608 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; 919 return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
609} 920}
610 921
922/**
923 * crypto_ablkcipher_ivsize() - obtain IV size
924 * @tfm: cipher handle
925 *
926 * The size of the IV for the ablkcipher referenced by the cipher handle is
927 * returned. This IV size may be zero if the cipher does not need an IV.
928 *
929 * Return: IV size in bytes
930 */
611static inline unsigned int crypto_ablkcipher_ivsize( 931static inline unsigned int crypto_ablkcipher_ivsize(
612 struct crypto_ablkcipher *tfm) 932 struct crypto_ablkcipher *tfm)
613{ 933{
614 return crypto_ablkcipher_crt(tfm)->ivsize; 934 return crypto_ablkcipher_crt(tfm)->ivsize;
615} 935}
616 936
937/**
938 * crypto_ablkcipher_blocksize() - obtain block size of cipher
939 * @tfm: cipher handle
940 *
941 * The block size for the ablkcipher referenced with the cipher handle is
942 * returned. The caller may use that information to allocate appropriate
943 * memory for the data returned by the encryption or decryption operation
944 *
945 * Return: block size of cipher
946 */
617static inline unsigned int crypto_ablkcipher_blocksize( 947static inline unsigned int crypto_ablkcipher_blocksize(
618 struct crypto_ablkcipher *tfm) 948 struct crypto_ablkcipher *tfm)
619{ 949{
@@ -643,6 +973,22 @@ static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
643 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); 973 crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
644} 974}
645 975
976/**
977 * crypto_ablkcipher_setkey() - set key for cipher
978 * @tfm: cipher handle
979 * @key: buffer holding the key
980 * @keylen: length of the key in bytes
981 *
982 * The caller provided key is set for the ablkcipher referenced by the cipher
983 * handle.
984 *
985 * Note, the key length determines the cipher type. Many block ciphers implement
986 * different cipher modes depending on the key size, such as AES-128 vs AES-192
987 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
988 * is performed.
989 *
990 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
991 */
646static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, 992static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
647 const u8 *key, unsigned int keylen) 993 const u8 *key, unsigned int keylen)
648{ 994{
@@ -651,12 +997,32 @@ static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
651 return crt->setkey(crt->base, key, keylen); 997 return crt->setkey(crt->base, key, keylen);
652} 998}
653 999
1000/**
1001 * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
1002 * @req: ablkcipher_request out of which the cipher handle is to be obtained
1003 *
1004 * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
1005 * data structure.
1006 *
1007 * Return: crypto_ablkcipher handle
1008 */
654static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( 1009static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
655 struct ablkcipher_request *req) 1010 struct ablkcipher_request *req)
656{ 1011{
657 return __crypto_ablkcipher_cast(req->base.tfm); 1012 return __crypto_ablkcipher_cast(req->base.tfm);
658} 1013}
659 1014
1015/**
1016 * crypto_ablkcipher_encrypt() - encrypt plaintext
1017 * @req: reference to the ablkcipher_request handle that holds all information
1018 * needed to perform the cipher operation
1019 *
1020 * Encrypt plaintext data using the ablkcipher_request handle. That data
1021 * structure and how it is filled with data is discussed with the
1022 * ablkcipher_request_* functions.
1023 *
1024 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1025 */
660static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) 1026static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
661{ 1027{
662 struct ablkcipher_tfm *crt = 1028 struct ablkcipher_tfm *crt =
@@ -664,6 +1030,17 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
664 return crt->encrypt(req); 1030 return crt->encrypt(req);
665} 1031}
666 1032
1033/**
1034 * crypto_ablkcipher_decrypt() - decrypt ciphertext
1035 * @req: reference to the ablkcipher_request handle that holds all information
1036 * needed to perform the cipher operation
1037 *
1038 * Decrypt ciphertext data using the ablkcipher_request handle. That data
1039 * structure and how it is filled with data is discussed with the
1040 * ablkcipher_request_* functions.
1041 *
1042 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1043 */
667static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) 1044static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
668{ 1045{
669 struct ablkcipher_tfm *crt = 1046 struct ablkcipher_tfm *crt =
@@ -671,12 +1048,37 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
671 return crt->decrypt(req); 1048 return crt->decrypt(req);
672} 1049}
673 1050
1051/**
1052 * DOC: Asynchronous Cipher Request Handle
1053 *
1054 * The ablkcipher_request data structure contains all pointers to data
1055 * required for the asynchronous cipher operation. This includes the cipher
1056 * handle (which can be used by multiple ablkcipher_request instances), pointer
1057 * to plaintext and ciphertext, asynchronous callback function, etc. It acts
1058 * as a handle to the ablkcipher_request_* API calls in a similar way as
1059 * ablkcipher handle to the crypto_ablkcipher_* API calls.
1060 */
1061
1062/**
1063 * crypto_ablkcipher_reqsize() - obtain size of the request data structure
1064 * @tfm: cipher handle
1065 *
1066 * Return: number of bytes
1067 */
674static inline unsigned int crypto_ablkcipher_reqsize( 1068static inline unsigned int crypto_ablkcipher_reqsize(
675 struct crypto_ablkcipher *tfm) 1069 struct crypto_ablkcipher *tfm)
676{ 1070{
677 return crypto_ablkcipher_crt(tfm)->reqsize; 1071 return crypto_ablkcipher_crt(tfm)->reqsize;
678} 1072}
679 1073
1074/**
1075 * ablkcipher_request_set_tfm() - update cipher handle reference in request
1076 * @req: request handle to be modified
1077 * @tfm: cipher handle that shall be added to the request handle
1078 *
1079 * Allow the caller to replace the existing ablkcipher handle in the request
1080 * data structure with a different one.
1081 */
680static inline void ablkcipher_request_set_tfm( 1082static inline void ablkcipher_request_set_tfm(
681 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) 1083 struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
682{ 1084{
@@ -689,6 +1091,18 @@ static inline struct ablkcipher_request *ablkcipher_request_cast(
689 return container_of(req, struct ablkcipher_request, base); 1091 return container_of(req, struct ablkcipher_request, base);
690} 1092}
691 1093
1094/**
1095 * ablkcipher_request_alloc() - allocate request data structure
1096 * @tfm: cipher handle to be registered with the request
1097 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1098 *
1099 * Allocate the request data structure that must be used with the ablkcipher
1100 * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
1101 * handle is registered in the request data structure.
1102 *
1103 * Return: allocated request handle in case of success; IS_ERR() is true in case
1104 * of an error, PTR_ERR() returns the error code.
1105 */
692static inline struct ablkcipher_request *ablkcipher_request_alloc( 1106static inline struct ablkcipher_request *ablkcipher_request_alloc(
693 struct crypto_ablkcipher *tfm, gfp_t gfp) 1107 struct crypto_ablkcipher *tfm, gfp_t gfp)
694{ 1108{
@@ -703,11 +1117,40 @@ static inline struct ablkcipher_request *ablkcipher_request_alloc(
703 return req; 1117 return req;
704} 1118}
705 1119
1120/**
1121 * ablkcipher_request_free() - zeroize and free request data structure
1122 * @req: request data structure cipher handle to be freed
1123 */
706static inline void ablkcipher_request_free(struct ablkcipher_request *req) 1124static inline void ablkcipher_request_free(struct ablkcipher_request *req)
707{ 1125{
708 kzfree(req); 1126 kzfree(req);
709} 1127}
710 1128
1129/**
1130 * ablkcipher_request_set_callback() - set asynchronous callback function
1131 * @req: request handle
1132 * @flags: specify zero or an ORing of the flags
1133 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1134 * increase the wait queue beyond the initial maximum size;
1135 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1136 * @compl: callback function pointer to be registered with the request handle
1137 * @data: The data pointer refers to memory that is not used by the kernel
1138 * crypto API, but provided to the callback function for it to use. Here,
1139 * the caller can provide a reference to memory the callback function can
1140 * operate on. As the callback function is invoked asynchronously to the
1141 * related functionality, it may need to access data structures of the
1142 * related functionality which can be referenced using this pointer. The
1143 * callback function can access the memory via the "data" field in the
1144 * crypto_async_request data structure provided to the callback function.
1145 *
1146 * This function allows setting the callback function that is triggered once the
1147 * cipher operation completes.
1148 *
1149 * The callback function is registered with the ablkcipher_request handle and
1150 * must comply with the following template:
1151 *
1152 * void callback_function(struct crypto_async_request *req, int error)
1153 */
711static inline void ablkcipher_request_set_callback( 1154static inline void ablkcipher_request_set_callback(
712 struct ablkcipher_request *req, 1155 struct ablkcipher_request *req,
713 u32 flags, crypto_completion_t compl, void *data) 1156 u32 flags, crypto_completion_t compl, void *data)
@@ -717,6 +1160,22 @@ static inline void ablkcipher_request_set_callback(
717 req->base.flags = flags; 1160 req->base.flags = flags;
718} 1161}
719 1162
1163/**
1164 * ablkcipher_request_set_crypt() - set data buffers
1165 * @req: request handle
1166 * @src: source scatter / gather list
1167 * @dst: destination scatter / gather list
1168 * @nbytes: number of bytes to process from @src
1169 * @iv: IV for the cipher operation which must comply with the IV size defined
1170 * by crypto_ablkcipher_ivsize
1171 *
1172 * This function allows setting of the source data and destination data
1173 * scatter / gather lists.
1174 *
1175 * For encryption, the source is treated as the plaintext and the
1176 * destination is the ciphertext. For a decryption operation, the use is
1177 * reversed: the source is the ciphertext and the destination is the plaintext.
1178 */
720static inline void ablkcipher_request_set_crypt( 1179static inline void ablkcipher_request_set_crypt(
721 struct ablkcipher_request *req, 1180 struct ablkcipher_request *req,
722 struct scatterlist *src, struct scatterlist *dst, 1181 struct scatterlist *src, struct scatterlist *dst,
@@ -728,11 +1187,55 @@ static inline void ablkcipher_request_set_crypt(
728 req->info = iv; 1187 req->info = iv;
729} 1188}
730 1189
1190/**
1191 * DOC: Authenticated Encryption With Associated Data (AEAD) Cipher API
1192 *
1193 * The AEAD cipher API is used with the ciphers of type CRYPTO_ALG_TYPE_AEAD
1194 * (listed as type "aead" in /proc/crypto)
1195 *
1196 * The most prominent examples for this type of encryption is GCM and CCM.
1197 * However, the kernel supports other types of AEAD ciphers which are defined
1198 * with the following cipher string:
1199 *
1200 * authenc(keyed message digest, block cipher)
1201 *
1202 * For example: authenc(hmac(sha256), cbc(aes))
1203 *
1204 * The example code provided for the asynchronous block cipher operation
1205 * applies here as well. Naturally all *ablkcipher* symbols must be exchanged
1206 * the *aead* pendants discussed in the following. In addtion, for the AEAD
1207 * operation, the aead_request_set_assoc function must be used to set the
1208 * pointer to the associated data memory location before performing the
1209 * encryption or decryption operation. In case of an encryption, the associated
1210 * data memory is filled during the encryption operation. For decryption, the
1211 * associated data memory must contain data that is used to verify the integrity
1212 * of the decrypted data. Another deviation from the asynchronous block cipher
1213 * operation is that the caller should explicitly check for -EBADMSG of the
1214 * crypto_aead_decrypt. That error indicates an authentication error, i.e.
1215 * a breach in the integrity of the message. In essence, that -EBADMSG error
1216 * code is the key bonus an AEAD cipher has over "standard" block chaining
1217 * modes.
1218 */
1219
731static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm) 1220static inline struct crypto_aead *__crypto_aead_cast(struct crypto_tfm *tfm)
732{ 1221{
733 return (struct crypto_aead *)tfm; 1222 return (struct crypto_aead *)tfm;
734} 1223}
735 1224
1225/**
1226 * crypto_alloc_aead() - allocate AEAD cipher handle
1227 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1228 * AEAD cipher
1229 * @type: specifies the type of the cipher
1230 * @mask: specifies the mask for the cipher
1231 *
1232 * Allocate a cipher handle for an AEAD. The returned struct
1233 * crypto_aead is the cipher handle that is required for any subsequent
1234 * API invocation for that AEAD.
1235 *
1236 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1237 * of an error, PTR_ERR() returns the error code.
1238 */
736struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask); 1239struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask);
737 1240
738static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm) 1241static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
@@ -740,6 +1243,10 @@ static inline struct crypto_tfm *crypto_aead_tfm(struct crypto_aead *tfm)
740 return &tfm->base; 1243 return &tfm->base;
741} 1244}
742 1245
1246/**
1247 * crypto_free_aead() - zeroize and free aead handle
1248 * @tfm: cipher handle to be freed
1249 */
743static inline void crypto_free_aead(struct crypto_aead *tfm) 1250static inline void crypto_free_aead(struct crypto_aead *tfm)
744{ 1251{
745 crypto_free_tfm(crypto_aead_tfm(tfm)); 1252 crypto_free_tfm(crypto_aead_tfm(tfm));
@@ -750,16 +1257,47 @@ static inline struct aead_tfm *crypto_aead_crt(struct crypto_aead *tfm)
750 return &crypto_aead_tfm(tfm)->crt_aead; 1257 return &crypto_aead_tfm(tfm)->crt_aead;
751} 1258}
752 1259
1260/**
1261 * crypto_aead_ivsize() - obtain IV size
1262 * @tfm: cipher handle
1263 *
1264 * The size of the IV for the aead referenced by the cipher handle is
1265 * returned. This IV size may be zero if the cipher does not need an IV.
1266 *
1267 * Return: IV size in bytes
1268 */
753static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm) 1269static inline unsigned int crypto_aead_ivsize(struct crypto_aead *tfm)
754{ 1270{
755 return crypto_aead_crt(tfm)->ivsize; 1271 return crypto_aead_crt(tfm)->ivsize;
756} 1272}
757 1273
1274/**
1275 * crypto_aead_authsize() - obtain maximum authentication data size
1276 * @tfm: cipher handle
1277 *
1278 * The maximum size of the authentication data for the AEAD cipher referenced
1279 * by the AEAD cipher handle is returned. The authentication data size may be
1280 * zero if the cipher implements a hard-coded maximum.
1281 *
1282 * The authentication data may also be known as "tag value".
1283 *
1284 * Return: authentication data size / tag size in bytes
1285 */
758static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm) 1286static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
759{ 1287{
760 return crypto_aead_crt(tfm)->authsize; 1288 return crypto_aead_crt(tfm)->authsize;
761} 1289}
762 1290
1291/**
1292 * crypto_aead_blocksize() - obtain block size of cipher
1293 * @tfm: cipher handle
1294 *
1295 * The block size for the AEAD referenced with the cipher handle is returned.
1296 * The caller may use that information to allocate appropriate memory for the
1297 * data returned by the encryption or decryption operation
1298 *
1299 * Return: block size of cipher
1300 */
763static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm) 1301static inline unsigned int crypto_aead_blocksize(struct crypto_aead *tfm)
764{ 1302{
765 return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm)); 1303 return crypto_tfm_alg_blocksize(crypto_aead_tfm(tfm));
@@ -785,6 +1323,22 @@ static inline void crypto_aead_clear_flags(struct crypto_aead *tfm, u32 flags)
785 crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags); 1323 crypto_tfm_clear_flags(crypto_aead_tfm(tfm), flags);
786} 1324}
787 1325
1326/**
1327 * crypto_aead_setkey() - set key for cipher
1328 * @tfm: cipher handle
1329 * @key: buffer holding the key
1330 * @keylen: length of the key in bytes
1331 *
1332 * The caller provided key is set for the AEAD referenced by the cipher
1333 * handle.
1334 *
1335 * Note, the key length determines the cipher type. Many block ciphers implement
1336 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1337 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1338 * is performed.
1339 *
1340 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1341 */
788static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, 1342static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
789 unsigned int keylen) 1343 unsigned int keylen)
790{ 1344{
@@ -793,6 +1347,16 @@ static inline int crypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
793 return crt->setkey(crt->base, key, keylen); 1347 return crt->setkey(crt->base, key, keylen);
794} 1348}
795 1349
1350/**
1351 * crypto_aead_setauthsize() - set authentication data size
1352 * @tfm: cipher handle
1353 * @authsize: size of the authentication data / tag in bytes
1354 *
1355 * Set the authentication data size / tag size. AEAD requires an authentication
1356 * tag (or MAC) in addition to the associated data.
1357 *
1358 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1359 */
796int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize); 1360int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize);
797 1361
798static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) 1362static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
@@ -800,27 +1364,105 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
800 return __crypto_aead_cast(req->base.tfm); 1364 return __crypto_aead_cast(req->base.tfm);
801} 1365}
802 1366
1367/**
1368 * crypto_aead_encrypt() - encrypt plaintext
1369 * @req: reference to the aead_request handle that holds all information
1370 * needed to perform the cipher operation
1371 *
1372 * Encrypt plaintext data using the aead_request handle. That data structure
1373 * and how it is filled with data is discussed with the aead_request_*
1374 * functions.
1375 *
1376 * IMPORTANT NOTE The encryption operation creates the authentication data /
1377 * tag. That data is concatenated with the created ciphertext.
1378 * The ciphertext memory size is therefore the given number of
1379 * block cipher blocks + the size defined by the
1380 * crypto_aead_setauthsize invocation. The caller must ensure
1381 * that sufficient memory is available for the ciphertext and
1382 * the authentication tag.
1383 *
1384 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1385 */
803static inline int crypto_aead_encrypt(struct aead_request *req) 1386static inline int crypto_aead_encrypt(struct aead_request *req)
804{ 1387{
805 return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req); 1388 return crypto_aead_crt(crypto_aead_reqtfm(req))->encrypt(req);
806} 1389}
807 1390
1391/**
1392 * crypto_aead_decrypt() - decrypt ciphertext
1393 * @req: reference to the ablkcipher_request handle that holds all information
1394 * needed to perform the cipher operation
1395 *
1396 * Decrypt ciphertext data using the aead_request handle. That data structure
1397 * and how it is filled with data is discussed with the aead_request_*
1398 * functions.
1399 *
1400 * IMPORTANT NOTE The caller must concatenate the ciphertext followed by the
1401 * authentication data / tag. That authentication data / tag
1402 * must have the size defined by the crypto_aead_setauthsize
1403 * invocation.
1404 *
1405 *
1406 * Return: 0 if the cipher operation was successful; -EBADMSG: The AEAD
1407 * cipher operation performs the authentication of the data during the
1408 * decryption operation. Therefore, the function returns this error if
1409 * the authentication of the ciphertext was unsuccessful (i.e. the
1410 * integrity of the ciphertext or the associated data was violated);
1411 * < 0 if an error occurred.
1412 */
808static inline int crypto_aead_decrypt(struct aead_request *req) 1413static inline int crypto_aead_decrypt(struct aead_request *req)
809{ 1414{
810 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req); 1415 return crypto_aead_crt(crypto_aead_reqtfm(req))->decrypt(req);
811} 1416}
812 1417
1418/**
1419 * DOC: Asynchronous AEAD Request Handle
1420 *
1421 * The aead_request data structure contains all pointers to data required for
1422 * the AEAD cipher operation. This includes the cipher handle (which can be
1423 * used by multiple aead_request instances), pointer to plaintext and
1424 * ciphertext, asynchronous callback function, etc. It acts as a handle to the
1425 * aead_request_* API calls in a similar way as AEAD handle to the
1426 * crypto_aead_* API calls.
1427 */
1428
1429/**
1430 * crypto_aead_reqsize() - obtain size of the request data structure
1431 * @tfm: cipher handle
1432 *
1433 * Return: number of bytes
1434 */
813static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm) 1435static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
814{ 1436{
815 return crypto_aead_crt(tfm)->reqsize; 1437 return crypto_aead_crt(tfm)->reqsize;
816} 1438}
817 1439
1440/**
1441 * aead_request_set_tfm() - update cipher handle reference in request
1442 * @req: request handle to be modified
1443 * @tfm: cipher handle that shall be added to the request handle
1444 *
1445 * Allow the caller to replace the existing aead handle in the request
1446 * data structure with a different one.
1447 */
818static inline void aead_request_set_tfm(struct aead_request *req, 1448static inline void aead_request_set_tfm(struct aead_request *req,
819 struct crypto_aead *tfm) 1449 struct crypto_aead *tfm)
820{ 1450{
821 req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base); 1451 req->base.tfm = crypto_aead_tfm(crypto_aead_crt(tfm)->base);
822} 1452}
823 1453
1454/**
1455 * aead_request_alloc() - allocate request data structure
1456 * @tfm: cipher handle to be registered with the request
1457 * @gfp: memory allocation flag that is handed to kmalloc by the API call.
1458 *
1459 * Allocate the request data structure that must be used with the AEAD
1460 * encrypt and decrypt API calls. During the allocation, the provided aead
1461 * handle is registered in the request data structure.
1462 *
1463 * Return: allocated request handle in case of success; IS_ERR() is true in case
1464 * of an error, PTR_ERR() returns the error code.
1465 */
824static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm, 1466static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
825 gfp_t gfp) 1467 gfp_t gfp)
826{ 1468{
@@ -834,11 +1476,40 @@ static inline struct aead_request *aead_request_alloc(struct crypto_aead *tfm,
834 return req; 1476 return req;
835} 1477}
836 1478
1479/**
1480 * aead_request_free() - zeroize and free request data structure
1481 * @req: request data structure cipher handle to be freed
1482 */
837static inline void aead_request_free(struct aead_request *req) 1483static inline void aead_request_free(struct aead_request *req)
838{ 1484{
839 kzfree(req); 1485 kzfree(req);
840} 1486}
841 1487
1488/**
1489 * aead_request_set_callback() - set asynchronous callback function
1490 * @req: request handle
1491 * @flags: specify zero or an ORing of the flags
1492 * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
1493 * increase the wait queue beyond the initial maximum size;
1494 * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
1495 * @compl: callback function pointer to be registered with the request handle
1496 * @data: The data pointer refers to memory that is not used by the kernel
1497 * crypto API, but provided to the callback function for it to use. Here,
1498 * the caller can provide a reference to memory the callback function can
1499 * operate on. As the callback function is invoked asynchronously to the
1500 * related functionality, it may need to access data structures of the
1501 * related functionality which can be referenced using this pointer. The
1502 * callback function can access the memory via the "data" field in the
1503 * crypto_async_request data structure provided to the callback function.
1504 *
1505 * Setting the callback function that is triggered once the cipher operation
1506 * completes
1507 *
1508 * The callback function is registered with the aead_request handle and
1509 * must comply with the following template:
1510 *
1511 * void callback_function(struct crypto_async_request *req, int error)
1512 */
842static inline void aead_request_set_callback(struct aead_request *req, 1513static inline void aead_request_set_callback(struct aead_request *req,
843 u32 flags, 1514 u32 flags,
844 crypto_completion_t compl, 1515 crypto_completion_t compl,
@@ -849,6 +1520,36 @@ static inline void aead_request_set_callback(struct aead_request *req,
849 req->base.flags = flags; 1520 req->base.flags = flags;
850} 1521}
851 1522
1523/**
1524 * aead_request_set_crypt - set data buffers
1525 * @req: request handle
1526 * @src: source scatter / gather list
1527 * @dst: destination scatter / gather list
1528 * @cryptlen: number of bytes to process from @src
1529 * @iv: IV for the cipher operation which must comply with the IV size defined
1530 * by crypto_aead_ivsize()
1531 *
1532 * Setting the source data and destination data scatter / gather lists.
1533 *
1534 * For encryption, the source is treated as the plaintext and the
1535 * destination is the ciphertext. For a decryption operation, the use is
1536 * reversed: the source is the ciphertext and the destination is the plaintext.
1537 *
1538 * IMPORTANT NOTE AEAD requires an authentication tag (MAC). For decryption,
1539 * the caller must concatenate the ciphertext followed by the
1540 * authentication tag and provide the entire data stream to the
1541 * decryption operation (i.e. the data length used for the
1542 * initialization of the scatterlist and the data length for the
1543 * decryption operation is identical). For encryption, however,
1544 * the authentication tag is created while encrypting the data.
1545 * The destination buffer must hold sufficient space for the
1546 * ciphertext and the authentication tag while the encryption
1547 * invocation must only point to the plaintext data size. The
1548 * following code snippet illustrates the memory usage
1549 * buffer = kmalloc(ptbuflen + (enc ? authsize : 0));
1550 * sg_init_one(&sg, buffer, ptbuflen + (enc ? authsize : 0));
1551 * aead_request_set_crypt(req, &sg, &sg, ptbuflen, iv);
1552 */
852static inline void aead_request_set_crypt(struct aead_request *req, 1553static inline void aead_request_set_crypt(struct aead_request *req,
853 struct scatterlist *src, 1554 struct scatterlist *src,
854 struct scatterlist *dst, 1555 struct scatterlist *dst,
@@ -860,6 +1561,15 @@ static inline void aead_request_set_crypt(struct aead_request *req,
860 req->iv = iv; 1561 req->iv = iv;
861} 1562}
862 1563
1564/**
1565 * aead_request_set_assoc() - set the associated data scatter / gather list
1566 * @req: request handle
1567 * @assoc: associated data scatter / gather list
1568 * @assoclen: number of bytes to process from @assoc
1569 *
1570 * For encryption, the memory is filled with the associated data. For
1571 * decryption, the memory must point to the associated data.
1572 */
863static inline void aead_request_set_assoc(struct aead_request *req, 1573static inline void aead_request_set_assoc(struct aead_request *req,
864 struct scatterlist *assoc, 1574 struct scatterlist *assoc,
865 unsigned int assoclen) 1575 unsigned int assoclen)
@@ -868,6 +1578,36 @@ static inline void aead_request_set_assoc(struct aead_request *req,
868 req->assoclen = assoclen; 1578 req->assoclen = assoclen;
869} 1579}
870 1580
1581/**
1582 * DOC: Synchronous Block Cipher API
1583 *
1584 * The synchronous block cipher API is used with the ciphers of type
1585 * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
1586 *
1587 * Synchronous calls, have a context in the tfm. But since a single tfm can be
1588 * used in multiple calls and in parallel, this info should not be changeable
1589 * (unless a lock is used). This applies, for example, to the symmetric key.
1590 * However, the IV is changeable, so there is an iv field in blkcipher_tfm
1591 * structure for synchronous blkcipher api. So, its the only state info that can
1592 * be kept for synchronous calls without using a big lock across a tfm.
1593 *
1594 * The block cipher API allows the use of a complete cipher, i.e. a cipher
1595 * consisting of a template (a block chaining mode) and a single block cipher
1596 * primitive (e.g. AES).
1597 *
1598 * The plaintext data buffer and the ciphertext data buffer are pointed to
1599 * by using scatter/gather lists. The cipher operation is performed
1600 * on all segments of the provided scatter/gather lists.
1601 *
1602 * The kernel crypto API supports a cipher operation "in-place" which means that
1603 * the caller may provide the same scatter/gather list for the plaintext and
1604 * cipher text. After the completion of the cipher operation, the plaintext
1605 * data is replaced with the ciphertext data in case of an encryption and vice
1606 * versa for a decryption. The caller must ensure that the scatter/gather lists
1607 * for the output data point to sufficiently large buffers, i.e. multiples of
1608 * the block size of the cipher.
1609 */
1610
871static inline struct crypto_blkcipher *__crypto_blkcipher_cast( 1611static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
872 struct crypto_tfm *tfm) 1612 struct crypto_tfm *tfm)
873{ 1613{
@@ -881,6 +1621,20 @@ static inline struct crypto_blkcipher *crypto_blkcipher_cast(
881 return __crypto_blkcipher_cast(tfm); 1621 return __crypto_blkcipher_cast(tfm);
882} 1622}
883 1623
1624/**
1625 * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
1626 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1627 * blkcipher cipher
1628 * @type: specifies the type of the cipher
1629 * @mask: specifies the mask for the cipher
1630 *
1631 * Allocate a cipher handle for a block cipher. The returned struct
1632 * crypto_blkcipher is the cipher handle that is required for any subsequent
1633 * API invocation for that block cipher.
1634 *
1635 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1636 * of an error, PTR_ERR() returns the error code.
1637 */
884static inline struct crypto_blkcipher *crypto_alloc_blkcipher( 1638static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
885 const char *alg_name, u32 type, u32 mask) 1639 const char *alg_name, u32 type, u32 mask)
886{ 1640{
@@ -897,11 +1651,25 @@ static inline struct crypto_tfm *crypto_blkcipher_tfm(
897 return &tfm->base; 1651 return &tfm->base;
898} 1652}
899 1653
1654/**
1655 * crypto_free_blkcipher() - zeroize and free the block cipher handle
1656 * @tfm: cipher handle to be freed
1657 */
900static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) 1658static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
901{ 1659{
902 crypto_free_tfm(crypto_blkcipher_tfm(tfm)); 1660 crypto_free_tfm(crypto_blkcipher_tfm(tfm));
903} 1661}
904 1662
1663/**
1664 * crypto_has_blkcipher() - Search for the availability of a block cipher
1665 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1666 * block cipher
1667 * @type: specifies the type of the cipher
1668 * @mask: specifies the mask for the cipher
1669 *
1670 * Return: true when the block cipher is known to the kernel crypto API; false
1671 * otherwise
1672 */
905static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) 1673static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
906{ 1674{
907 type &= ~CRYPTO_ALG_TYPE_MASK; 1675 type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -911,6 +1679,12 @@ static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
911 return crypto_has_alg(alg_name, type, mask); 1679 return crypto_has_alg(alg_name, type, mask);
912} 1680}
913 1681
1682/**
1683 * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
1684 * @tfm: cipher handle
1685 *
1686 * Return: The character string holding the name of the cipher
1687 */
914static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) 1688static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
915{ 1689{
916 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); 1690 return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
@@ -928,11 +1702,30 @@ static inline struct blkcipher_alg *crypto_blkcipher_alg(
928 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; 1702 return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
929} 1703}
930 1704
1705/**
1706 * crypto_blkcipher_ivsize() - obtain IV size
1707 * @tfm: cipher handle
1708 *
1709 * The size of the IV for the block cipher referenced by the cipher handle is
1710 * returned. This IV size may be zero if the cipher does not need an IV.
1711 *
1712 * Return: IV size in bytes
1713 */
931static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) 1714static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
932{ 1715{
933 return crypto_blkcipher_alg(tfm)->ivsize; 1716 return crypto_blkcipher_alg(tfm)->ivsize;
934} 1717}
935 1718
1719/**
1720 * crypto_blkcipher_blocksize() - obtain block size of cipher
1721 * @tfm: cipher handle
1722 *
1723 * The block size for the block cipher referenced with the cipher handle is
1724 * returned. The caller may use that information to allocate appropriate
1725 * memory for the data returned by the encryption or decryption operation.
1726 *
1727 * Return: block size of cipher
1728 */
936static inline unsigned int crypto_blkcipher_blocksize( 1729static inline unsigned int crypto_blkcipher_blocksize(
937 struct crypto_blkcipher *tfm) 1730 struct crypto_blkcipher *tfm)
938{ 1731{
@@ -962,6 +1755,22 @@ static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
962 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); 1755 crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
963} 1756}
964 1757
1758/**
1759 * crypto_blkcipher_setkey() - set key for cipher
1760 * @tfm: cipher handle
1761 * @key: buffer holding the key
1762 * @keylen: length of the key in bytes
1763 *
1764 * The caller provided key is set for the block cipher referenced by the cipher
1765 * handle.
1766 *
1767 * Note, the key length determines the cipher type. Many block ciphers implement
1768 * different cipher modes depending on the key size, such as AES-128 vs AES-192
1769 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
1770 * is performed.
1771 *
1772 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
1773 */
965static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, 1774static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
966 const u8 *key, unsigned int keylen) 1775 const u8 *key, unsigned int keylen)
967{ 1776{
@@ -969,6 +1778,24 @@ static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
969 key, keylen); 1778 key, keylen);
970} 1779}
971 1780
1781/**
1782 * crypto_blkcipher_encrypt() - encrypt plaintext
1783 * @desc: reference to the block cipher handle with meta data
1784 * @dst: scatter/gather list that is filled by the cipher operation with the
1785 * ciphertext
1786 * @src: scatter/gather list that holds the plaintext
1787 * @nbytes: number of bytes of the plaintext to encrypt.
1788 *
1789 * Encrypt plaintext data using the IV set by the caller with a preceding
1790 * call of crypto_blkcipher_set_iv.
1791 *
1792 * The blkcipher_desc data structure must be filled by the caller and can
1793 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1794 * with the block cipher handle; desc.flags is filled with either
1795 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1796 *
1797 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1798 */
972static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, 1799static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
973 struct scatterlist *dst, 1800 struct scatterlist *dst,
974 struct scatterlist *src, 1801 struct scatterlist *src,
@@ -978,6 +1805,25 @@ static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
978 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1805 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
979} 1806}
980 1807
1808/**
1809 * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
1810 * @desc: reference to the block cipher handle with meta data
1811 * @dst: scatter/gather list that is filled by the cipher operation with the
1812 * ciphertext
1813 * @src: scatter/gather list that holds the plaintext
1814 * @nbytes: number of bytes of the plaintext to encrypt.
1815 *
1816 * Encrypt plaintext data with the use of an IV that is solely used for this
1817 * cipher operation. Any previously set IV is not used.
1818 *
1819 * The blkcipher_desc data structure must be filled by the caller and can
1820 * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
1821 * with the block cipher handle; desc.info is filled with the IV to be used for
1822 * the current operation; desc.flags is filled with either
1823 * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
1824 *
1825 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1826 */
981static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, 1827static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
982 struct scatterlist *dst, 1828 struct scatterlist *dst,
983 struct scatterlist *src, 1829 struct scatterlist *src,
@@ -986,6 +1832,23 @@ static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
986 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); 1832 return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
987} 1833}
988 1834
1835/**
1836 * crypto_blkcipher_decrypt() - decrypt ciphertext
1837 * @desc: reference to the block cipher handle with meta data
1838 * @dst: scatter/gather list that is filled by the cipher operation with the
1839 * plaintext
1840 * @src: scatter/gather list that holds the ciphertext
1841 * @nbytes: number of bytes of the ciphertext to decrypt.
1842 *
1843 * Decrypt ciphertext data using the IV set by the caller with a preceding
1844 * call of crypto_blkcipher_set_iv.
1845 *
1846 * The blkcipher_desc data structure must be filled by the caller as documented
1847 * for the crypto_blkcipher_encrypt call above.
1848 *
1849 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1850 *
1851 */
989static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, 1852static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
990 struct scatterlist *dst, 1853 struct scatterlist *dst,
991 struct scatterlist *src, 1854 struct scatterlist *src,
@@ -995,6 +1858,22 @@ static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
995 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1858 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
996} 1859}
997 1860
1861/**
1862 * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
1863 * @desc: reference to the block cipher handle with meta data
1864 * @dst: scatter/gather list that is filled by the cipher operation with the
1865 * plaintext
1866 * @src: scatter/gather list that holds the ciphertext
1867 * @nbytes: number of bytes of the ciphertext to decrypt.
1868 *
1869 * Decrypt ciphertext data with the use of an IV that is solely used for this
1870 * cipher operation. Any previously set IV is not used.
1871 *
1872 * The blkcipher_desc data structure must be filled by the caller as documented
1873 * for the crypto_blkcipher_encrypt_iv call above.
1874 *
1875 * Return: 0 if the cipher operation was successful; < 0 if an error occurred
1876 */
998static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, 1877static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
999 struct scatterlist *dst, 1878 struct scatterlist *dst,
1000 struct scatterlist *src, 1879 struct scatterlist *src,
@@ -1003,18 +1882,54 @@ static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
1003 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); 1882 return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
1004} 1883}
1005 1884
1885/**
1886 * crypto_blkcipher_set_iv() - set IV for cipher
1887 * @tfm: cipher handle
1888 * @src: buffer holding the IV
1889 * @len: length of the IV in bytes
1890 *
1891 * The caller provided IV is set for the block cipher referenced by the cipher
1892 * handle.
1893 */
1006static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, 1894static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
1007 const u8 *src, unsigned int len) 1895 const u8 *src, unsigned int len)
1008{ 1896{
1009 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); 1897 memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
1010} 1898}
1011 1899
1900/**
1901 * crypto_blkcipher_get_iv() - obtain IV from cipher
1902 * @tfm: cipher handle
1903 * @dst: buffer filled with the IV
1904 * @len: length of the buffer dst
1905 *
1906 * The caller can obtain the IV set for the block cipher referenced by the
1907 * cipher handle and store it into the user-provided buffer. If the buffer
1908 * has an insufficient space, the IV is truncated to fit the buffer.
1909 */
1012static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, 1910static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
1013 u8 *dst, unsigned int len) 1911 u8 *dst, unsigned int len)
1014{ 1912{
1015 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); 1913 memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
1016} 1914}
1017 1915
1916/**
1917 * DOC: Single Block Cipher API
1918 *
1919 * The single block cipher API is used with the ciphers of type
1920 * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto).
1921 *
1922 * Using the single block cipher API calls, operations with the basic cipher
1923 * primitive can be implemented. These cipher primitives exclude any block
1924 * chaining operations including IV handling.
1925 *
1926 * The purpose of this single block cipher API is to support the implementation
1927 * of templates or other concepts that only need to perform the cipher operation
1928 * on one block at a time. Templates invoke the underlying cipher primitive
1929 * block-wise and process either the input or the output data of these cipher
1930 * operations.
1931 */
1932
1018static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) 1933static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
1019{ 1934{
1020 return (struct crypto_cipher *)tfm; 1935 return (struct crypto_cipher *)tfm;
@@ -1026,6 +1941,20 @@ static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
1026 return __crypto_cipher_cast(tfm); 1941 return __crypto_cipher_cast(tfm);
1027} 1942}
1028 1943
1944/**
1945 * crypto_alloc_cipher() - allocate single block cipher handle
1946 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1947 * single block cipher
1948 * @type: specifies the type of the cipher
1949 * @mask: specifies the mask for the cipher
1950 *
1951 * Allocate a cipher handle for a single block cipher. The returned struct
1952 * crypto_cipher is the cipher handle that is required for any subsequent API
1953 * invocation for that single block cipher.
1954 *
1955 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
1956 * of an error, PTR_ERR() returns the error code.
1957 */
1029static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, 1958static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
1030 u32 type, u32 mask) 1959 u32 type, u32 mask)
1031{ 1960{
@@ -1041,11 +1970,25 @@ static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
1041 return &tfm->base; 1970 return &tfm->base;
1042} 1971}
1043 1972
1973/**
1974 * crypto_free_cipher() - zeroize and free the single block cipher handle
1975 * @tfm: cipher handle to be freed
1976 */
1044static inline void crypto_free_cipher(struct crypto_cipher *tfm) 1977static inline void crypto_free_cipher(struct crypto_cipher *tfm)
1045{ 1978{
1046 crypto_free_tfm(crypto_cipher_tfm(tfm)); 1979 crypto_free_tfm(crypto_cipher_tfm(tfm));
1047} 1980}
1048 1981
1982/**
1983 * crypto_has_cipher() - Search for the availability of a single block cipher
1984 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
1985 * single block cipher
1986 * @type: specifies the type of the cipher
1987 * @mask: specifies the mask for the cipher
1988 *
1989 * Return: true when the single block cipher is known to the kernel crypto API;
1990 * false otherwise
1991 */
1049static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) 1992static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
1050{ 1993{
1051 type &= ~CRYPTO_ALG_TYPE_MASK; 1994 type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1060,6 +2003,16 @@ static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
1060 return &crypto_cipher_tfm(tfm)->crt_cipher; 2003 return &crypto_cipher_tfm(tfm)->crt_cipher;
1061} 2004}
1062 2005
2006/**
2007 * crypto_cipher_blocksize() - obtain block size for cipher
2008 * @tfm: cipher handle
2009 *
2010 * The block size for the single block cipher referenced with the cipher handle
2011 * tfm is returned. The caller may use that information to allocate appropriate
2012 * memory for the data returned by the encryption or decryption operation
2013 *
2014 * Return: block size of cipher
2015 */
1063static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) 2016static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm)
1064{ 2017{
1065 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); 2018 return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm));
@@ -1087,6 +2040,22 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
1087 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); 2040 crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags);
1088} 2041}
1089 2042
2043/**
2044 * crypto_cipher_setkey() - set key for cipher
2045 * @tfm: cipher handle
2046 * @key: buffer holding the key
2047 * @keylen: length of the key in bytes
2048 *
2049 * The caller provided key is set for the single block cipher referenced by the
2050 * cipher handle.
2051 *
2052 * Note, the key length determines the cipher type. Many block ciphers implement
2053 * different cipher modes depending on the key size, such as AES-128 vs AES-192
2054 * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
2055 * is performed.
2056 *
2057 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
2058 */
1090static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, 2059static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1091 const u8 *key, unsigned int keylen) 2060 const u8 *key, unsigned int keylen)
1092{ 2061{
@@ -1094,6 +2063,15 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
1094 key, keylen); 2063 key, keylen);
1095} 2064}
1096 2065
2066/**
2067 * crypto_cipher_encrypt_one() - encrypt one block of plaintext
2068 * @tfm: cipher handle
2069 * @dst: points to the buffer that will be filled with the ciphertext
2070 * @src: buffer holding the plaintext to be encrypted
2071 *
2072 * Invoke the encryption operation of one block. The caller must ensure that
2073 * the plaintext and ciphertext buffers are at least one block in size.
2074 */
1097static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, 2075static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1098 u8 *dst, const u8 *src) 2076 u8 *dst, const u8 *src)
1099{ 2077{
@@ -1101,6 +2079,15 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
1101 dst, src); 2079 dst, src);
1102} 2080}
1103 2081
2082/**
2083 * crypto_cipher_decrypt_one() - decrypt one block of ciphertext
2084 * @tfm: cipher handle
2085 * @dst: points to the buffer that will be filled with the plaintext
2086 * @src: buffer holding the ciphertext to be decrypted
2087 *
2088 * Invoke the decryption operation of one block. The caller must ensure that
2089 * the plaintext and ciphertext buffers are at least one block in size.
2090 */
1104static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, 2091static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1105 u8 *dst, const u8 *src) 2092 u8 *dst, const u8 *src)
1106{ 2093{
@@ -1108,6 +2095,13 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
1108 dst, src); 2095 dst, src);
1109} 2096}
1110 2097
2098/**
2099 * DOC: Synchronous Message Digest API
2100 *
2101 * The synchronous message digest API is used with the ciphers of type
2102 * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
2103 */
2104
1111static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm) 2105static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
1112{ 2106{
1113 return (struct crypto_hash *)tfm; 2107 return (struct crypto_hash *)tfm;
@@ -1120,6 +2114,20 @@ static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
1120 return __crypto_hash_cast(tfm); 2114 return __crypto_hash_cast(tfm);
1121} 2115}
1122 2116
2117/**
2118 * crypto_alloc_hash() - allocate synchronous message digest handle
2119 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
2120 * message digest cipher
2121 * @type: specifies the type of the cipher
2122 * @mask: specifies the mask for the cipher
2123 *
2124 * Allocate a cipher handle for a message digest. The returned struct
2125 * crypto_hash is the cipher handle that is required for any subsequent
2126 * API invocation for that message digest.
2127 *
2128 * Return: allocated cipher handle in case of success; IS_ERR() is true in case
2129 * of an error, PTR_ERR() returns the error code.
2130 */
1123static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name, 2131static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
1124 u32 type, u32 mask) 2132 u32 type, u32 mask)
1125{ 2133{
@@ -1136,11 +2144,25 @@ static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
1136 return &tfm->base; 2144 return &tfm->base;
1137} 2145}
1138 2146
2147/**
2148 * crypto_free_hash() - zeroize and free message digest handle
2149 * @tfm: cipher handle to be freed
2150 */
1139static inline void crypto_free_hash(struct crypto_hash *tfm) 2151static inline void crypto_free_hash(struct crypto_hash *tfm)
1140{ 2152{
1141 crypto_free_tfm(crypto_hash_tfm(tfm)); 2153 crypto_free_tfm(crypto_hash_tfm(tfm));
1142} 2154}
1143 2155
2156/**
2157 * crypto_has_hash() - Search for the availability of a message digest
2158 * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
2159 * message digest cipher
2160 * @type: specifies the type of the cipher
2161 * @mask: specifies the mask for the cipher
2162 *
2163 * Return: true when the message digest cipher is known to the kernel crypto
2164 * API; false otherwise
2165 */
1144static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask) 2166static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
1145{ 2167{
1146 type &= ~CRYPTO_ALG_TYPE_MASK; 2168 type &= ~CRYPTO_ALG_TYPE_MASK;
@@ -1156,6 +2178,15 @@ static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
1156 return &crypto_hash_tfm(tfm)->crt_hash; 2178 return &crypto_hash_tfm(tfm)->crt_hash;
1157} 2179}
1158 2180
2181/**
2182 * crypto_hash_blocksize() - obtain block size for message digest
2183 * @tfm: cipher handle
2184 *
2185 * The block size for the message digest cipher referenced with the cipher
2186 * handle is returned.
2187 *
2188 * Return: block size of cipher
2189 */
1159static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm) 2190static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
1160{ 2191{
1161 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm)); 2192 return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
@@ -1166,6 +2197,15 @@ static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
1166 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm)); 2197 return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
1167} 2198}
1168 2199
2200/**
2201 * crypto_hash_digestsize() - obtain message digest size
2202 * @tfm: cipher handle
2203 *
2204 * The size for the message digest created by the message digest cipher
2205 * referenced with the cipher handle is returned.
2206 *
2207 * Return: message digest size
2208 */
1169static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm) 2209static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
1170{ 2210{
1171 return crypto_hash_crt(tfm)->digestsize; 2211 return crypto_hash_crt(tfm)->digestsize;
@@ -1186,11 +2226,38 @@ static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
1186 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags); 2226 crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
1187} 2227}
1188 2228
2229/**
2230 * crypto_hash_init() - (re)initialize message digest handle
2231 * @desc: cipher request handle that to be filled by caller --
2232 * desc.tfm is filled with the hash cipher handle;
2233 * desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
2234 *
2235 * The call (re-)initializes the message digest referenced by the hash cipher
2236 * request handle. Any potentially existing state created by previous
2237 * operations is discarded.
2238 *
2239 * Return: 0 if the message digest initialization was successful; < 0 if an
2240 * error occurred
2241 */
1189static inline int crypto_hash_init(struct hash_desc *desc) 2242static inline int crypto_hash_init(struct hash_desc *desc)
1190{ 2243{
1191 return crypto_hash_crt(desc->tfm)->init(desc); 2244 return crypto_hash_crt(desc->tfm)->init(desc);
1192} 2245}
1193 2246
2247/**
2248 * crypto_hash_update() - add data to message digest for processing
2249 * @desc: cipher request handle
2250 * @sg: scatter / gather list pointing to the data to be added to the message
2251 * digest
2252 * @nbytes: number of bytes to be processed from @sg
2253 *
2254 * Updates the message digest state of the cipher handle pointed to by the
2255 * hash cipher request handle with the input data pointed to by the
2256 * scatter/gather list.
2257 *
2258 * Return: 0 if the message digest update was successful; < 0 if an error
2259 * occurred
2260 */
1194static inline int crypto_hash_update(struct hash_desc *desc, 2261static inline int crypto_hash_update(struct hash_desc *desc,
1195 struct scatterlist *sg, 2262 struct scatterlist *sg,
1196 unsigned int nbytes) 2263 unsigned int nbytes)
@@ -1198,11 +2265,39 @@ static inline int crypto_hash_update(struct hash_desc *desc,
1198 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes); 2265 return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
1199} 2266}
1200 2267
2268/**
2269 * crypto_hash_final() - calculate message digest
2270 * @desc: cipher request handle
2271 * @out: message digest output buffer -- The caller must ensure that the out
2272 * buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
2273 * function).
2274 *
2275 * Finalize the message digest operation and create the message digest
2276 * based on all data added to the cipher handle. The message digest is placed
2277 * into the output buffer.
2278 *
2279 * Return: 0 if the message digest creation was successful; < 0 if an error
2280 * occurred
2281 */
1201static inline int crypto_hash_final(struct hash_desc *desc, u8 *out) 2282static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
1202{ 2283{
1203 return crypto_hash_crt(desc->tfm)->final(desc, out); 2284 return crypto_hash_crt(desc->tfm)->final(desc, out);
1204} 2285}
1205 2286
2287/**
2288 * crypto_hash_digest() - calculate message digest for a buffer
2289 * @desc: see crypto_hash_final()
2290 * @sg: see crypto_hash_update()
2291 * @nbytes: see crypto_hash_update()
2292 * @out: see crypto_hash_final()
2293 *
2294 * This function is a "short-hand" for the function calls of crypto_hash_init,
2295 * crypto_hash_update and crypto_hash_final. The parameters have the same
2296 * meaning as discussed for those separate three functions.
2297 *
2298 * Return: 0 if the message digest creation was successful; < 0 if an error
2299 * occurred
2300 */
1206static inline int crypto_hash_digest(struct hash_desc *desc, 2301static inline int crypto_hash_digest(struct hash_desc *desc,
1207 struct scatterlist *sg, 2302 struct scatterlist *sg,
1208 unsigned int nbytes, u8 *out) 2303 unsigned int nbytes, u8 *out)
@@ -1210,6 +2305,17 @@ static inline int crypto_hash_digest(struct hash_desc *desc,
1210 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out); 2305 return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
1211} 2306}
1212 2307
2308/**
2309 * crypto_hash_setkey() - set key for message digest
2310 * @hash: cipher handle
2311 * @key: buffer holding the key
2312 * @keylen: length of the key in bytes
2313 *
2314 * The caller provided key is set for the message digest cipher. The cipher
2315 * handle must point to a keyed hash in order for this function to succeed.
2316 *
2317 * Return: 0 if the setting of the key was successful; < 0 if an error occurred
2318 */
1213static inline int crypto_hash_setkey(struct crypto_hash *hash, 2319static inline int crypto_hash_setkey(struct crypto_hash *hash,
1214 const u8 *key, unsigned int keylen) 2320 const u8 *key, unsigned int keylen)
1215{ 2321{
diff --git a/include/linux/cycx_x25.h b/include/linux/cycx_x25.h
deleted file mode 100644
index 362bf19d6cf1..000000000000
--- a/include/linux/cycx_x25.h
+++ /dev/null
@@ -1,125 +0,0 @@
1#ifndef _CYCX_X25_H
2#define _CYCX_X25_H
3/*
4* cycx_x25.h Cyclom X.25 firmware API definitions.
5*
6* Author: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7*
8* Copyright: (c) 1998-2003 Arnaldo Carvalho de Melo
9*
10* Based on sdla_x25.h by Gene Kozin <74604.152@compuserve.com>
11*
12* This program is free software; you can redistribute it and/or
13* modify it under the terms of the GNU General Public License
14* as published by the Free Software Foundation; either version
15* 2 of the License, or (at your option) any later version.
16* ============================================================================
17* 2000/04/02 acme dprintk and cycx_debug
18* 1999/01/03 acme judicious use of data types
19* 1999/01/02 acme #define X25_ACK_N3 0x4411
20* 1998/12/28 acme cleanup: lot'o'things removed
21* commands listed,
22* TX25Cmd & TX25Config structs
23* typedef'ed
24*/
25#ifndef PACKED
26#define PACKED __attribute__((packed))
27#endif
28
29/* X.25 shared memory layout. */
30#define X25_MBOX_OFFS 0x300 /* general mailbox block */
31#define X25_RXMBOX_OFFS 0x340 /* receive mailbox */
32
33/* Debug */
34#define dprintk(level, format, a...) if (cycx_debug >= level) printk(format, ##a)
35
36extern unsigned int cycx_debug;
37
38/* Data Structures */
39/* X.25 Command Block. */
40struct cycx_x25_cmd {
41 u16 command;
42 u16 link; /* values: 0 or 1 */
43 u16 len; /* values: 0 thru 0x205 (517) */
44 u32 buf;
45} PACKED;
46
47/* Defines for the 'command' field. */
48#define X25_CONNECT_REQUEST 0x4401
49#define X25_CONNECT_RESPONSE 0x4402
50#define X25_DISCONNECT_REQUEST 0x4403
51#define X25_DISCONNECT_RESPONSE 0x4404
52#define X25_DATA_REQUEST 0x4405
53#define X25_ACK_TO_VC 0x4406
54#define X25_INTERRUPT_RESPONSE 0x4407
55#define X25_CONFIG 0x4408
56#define X25_CONNECT_INDICATION 0x4409
57#define X25_CONNECT_CONFIRM 0x440A
58#define X25_DISCONNECT_INDICATION 0x440B
59#define X25_DISCONNECT_CONFIRM 0x440C
60#define X25_DATA_INDICATION 0x440E
61#define X25_INTERRUPT_INDICATION 0x440F
62#define X25_ACK_FROM_VC 0x4410
63#define X25_ACK_N3 0x4411
64#define X25_CONNECT_COLLISION 0x4413
65#define X25_N3WIN 0x4414
66#define X25_LINE_ON 0x4415
67#define X25_LINE_OFF 0x4416
68#define X25_RESET_REQUEST 0x4417
69#define X25_LOG 0x4500
70#define X25_STATISTIC 0x4600
71#define X25_TRACE 0x4700
72#define X25_N2TRACEXC 0x4702
73#define X25_N3TRACEXC 0x4703
74
75/**
76 * struct cycx_x25_config - cyclom2x x25 firmware configuration
77 * @link - link number
78 * @speed - line speed
79 * @clock - internal/external
80 * @n2 - # of level 2 retransm.(values: 1 thru FF)
81 * @n2win - level 2 window (values: 1 thru 7)
82 * @n3win - level 3 window (values: 1 thru 7)
83 * @nvc - # of logical channels (values: 1 thru 64)
84 * @pktlen - level 3 packet length - log base 2 of size
85 * @locaddr - my address
86 * @remaddr - remote address
87 * @t1 - time, in seconds
88 * @t2 - time, in seconds
89 * @t21 - time, in seconds
90 * @npvc - # of permanent virt. circuits (1 thru nvc)
91 * @t23 - time, in seconds
92 * @flags - see dosx25.doc, in portuguese, for details
93 */
94struct cycx_x25_config {
95 u8 link;
96 u8 speed;
97 u8 clock;
98 u8 n2;
99 u8 n2win;
100 u8 n3win;
101 u8 nvc;
102 u8 pktlen;
103 u8 locaddr;
104 u8 remaddr;
105 u16 t1;
106 u16 t2;
107 u8 t21;
108 u8 npvc;
109 u8 t23;
110 u8 flags;
111} PACKED;
112
113struct cycx_x25_stats {
114 u16 rx_crc_errors;
115 u16 rx_over_errors;
116 u16 n2_tx_frames;
117 u16 n2_rx_frames;
118 u16 tx_timeouts;
119 u16 rx_timeouts;
120 u16 n3_tx_packets;
121 u16 n3_rx_packets;
122 u16 tx_aborts;
123 u16 rx_aborts;
124} PACKED;
125#endif /* _CYCX_X25_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 75a227cc7ce2..5a813988e6d4 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -11,7 +11,6 @@
11#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
12#include <linux/lockref.h> 12#include <linux/lockref.h>
13 13
14struct nameidata;
15struct path; 14struct path;
16struct vfsmount; 15struct vfsmount;
17 16
@@ -125,15 +124,15 @@ struct dentry {
125 void *d_fsdata; /* fs-specific data */ 124 void *d_fsdata; /* fs-specific data */
126 125
127 struct list_head d_lru; /* LRU list */ 126 struct list_head d_lru; /* LRU list */
127 struct list_head d_child; /* child of parent list */
128 struct list_head d_subdirs; /* our children */
128 /* 129 /*
129 * d_child and d_rcu can share memory 130 * d_alias and d_rcu can share memory
130 */ 131 */
131 union { 132 union {
132 struct list_head d_child; /* child of parent list */ 133 struct hlist_node d_alias; /* inode alias list */
133 struct rcu_head d_rcu; 134 struct rcu_head d_rcu;
134 } d_u; 135 } d_u;
135 struct list_head d_subdirs; /* our children */
136 struct hlist_node d_alias; /* inode alias list */
137}; 136};
138 137
139/* 138/*
@@ -226,17 +225,11 @@ struct dentry_operations {
226 225
227extern seqlock_t rename_lock; 226extern seqlock_t rename_lock;
228 227
229static inline int dname_external(const struct dentry *dentry)
230{
231 return dentry->d_name.name != dentry->d_iname;
232}
233
234/* 228/*
235 * These are the low-level FS interfaces to the dcache.. 229 * These are the low-level FS interfaces to the dcache..
236 */ 230 */
237extern void d_instantiate(struct dentry *, struct inode *); 231extern void d_instantiate(struct dentry *, struct inode *);
238extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); 232extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
239extern struct dentry * d_materialise_unique(struct dentry *, struct inode *);
240extern int d_instantiate_no_diralias(struct dentry *, struct inode *); 233extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
241extern void __d_drop(struct dentry *dentry); 234extern void __d_drop(struct dentry *dentry);
242extern void d_drop(struct dentry *dentry); 235extern void d_drop(struct dentry *dentry);
@@ -254,7 +247,7 @@ extern struct dentry * d_obtain_root(struct inode *);
254extern void shrink_dcache_sb(struct super_block *); 247extern void shrink_dcache_sb(struct super_block *);
255extern void shrink_dcache_parent(struct dentry *); 248extern void shrink_dcache_parent(struct dentry *);
256extern void shrink_dcache_for_umount(struct super_block *); 249extern void shrink_dcache_for_umount(struct super_block *);
257extern int d_invalidate(struct dentry *); 250extern void d_invalidate(struct dentry *);
258 251
259/* only used at mount-time */ 252/* only used at mount-time */
260extern struct dentry * d_make_root(struct inode *); 253extern struct dentry * d_make_root(struct inode *);
@@ -269,7 +262,6 @@ extern void d_prune_aliases(struct inode *);
269 262
270/* test whether we have any submounts in a subdir tree */ 263/* test whether we have any submounts in a subdir tree */
271extern int have_submounts(struct dentry *); 264extern int have_submounts(struct dentry *);
272extern int check_submounts_and_drop(struct dentry *);
273 265
274/* 266/*
275 * This adds the entry to the hash queues. 267 * This adds the entry to the hash queues.
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 4d0b4d1aa132..da4c4983adbe 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -20,6 +20,7 @@
20 20
21#include <linux/types.h> 21#include <linux/types.h>
22 22
23struct device;
23struct file_operations; 24struct file_operations;
24 25
25struct debugfs_blob_wrapper { 26struct debugfs_blob_wrapper {
@@ -92,20 +93,25 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
92 struct dentry *parent, 93 struct dentry *parent,
93 struct debugfs_regset32 *regset); 94 struct debugfs_regset32 *regset);
94 95
95int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, 96void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
96 int nregs, void __iomem *base, char *prefix); 97 int nregs, void __iomem *base, char *prefix);
97 98
98struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, 99struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
99 struct dentry *parent, 100 struct dentry *parent,
100 u32 *array, u32 elements); 101 u32 *array, u32 elements);
101 102
103struct dentry *debugfs_create_devm_seqfile(struct device *dev, const char *name,
104 struct dentry *parent,
105 int (*read_fn)(struct seq_file *s,
106 void *data));
107
102bool debugfs_initialized(void); 108bool debugfs_initialized(void);
103 109
104#else 110#else
105 111
106#include <linux/err.h> 112#include <linux/err.h>
107 113
108/* 114/*
109 * We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled 115 * We do not return NULL from these functions if CONFIG_DEBUG_FS is not enabled
110 * so users have a chance to detect if there was a real error or not. We don't 116 * so users have a chance to detect if there was a real error or not. We don't
111 * want to duplicate the design decision mistakes of procfs and devfs again. 117 * want to duplicate the design decision mistakes of procfs and devfs again.
@@ -233,10 +239,9 @@ static inline struct dentry *debugfs_create_regset32(const char *name,
233 return ERR_PTR(-ENODEV); 239 return ERR_PTR(-ENODEV);
234} 240}
235 241
236static inline int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, 242static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
237 int nregs, void __iomem *base, char *prefix) 243 int nregs, void __iomem *base, char *prefix)
238{ 244{
239 return 0;
240} 245}
241 246
242static inline bool debugfs_initialized(void) 247static inline bool debugfs_initialized(void)
@@ -251,6 +256,15 @@ static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t
251 return ERR_PTR(-ENODEV); 256 return ERR_PTR(-ENODEV);
252} 257}
253 258
259static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
260 const char *name,
261 struct dentry *parent,
262 int (*read_fn)(struct seq_file *s,
263 void *data))
264{
265 return ERR_PTR(-ENODEV);
266}
267
254#endif 268#endif
255 269
256#endif 270#endif
diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h
new file mode 100644
index 000000000000..c0a360e99f64
--- /dev/null
+++ b/include/linux/devcoredump.h
@@ -0,0 +1,35 @@
1#ifndef __DEVCOREDUMP_H
2#define __DEVCOREDUMP_H
3
4#include <linux/device.h>
5#include <linux/module.h>
6#include <linux/vmalloc.h>
7
8#ifdef CONFIG_DEV_COREDUMP
9void dev_coredumpv(struct device *dev, const void *data, size_t datalen,
10 gfp_t gfp);
11
12void dev_coredumpm(struct device *dev, struct module *owner,
13 const void *data, size_t datalen, gfp_t gfp,
14 ssize_t (*read)(char *buffer, loff_t offset, size_t count,
15 const void *data, size_t datalen),
16 void (*free)(const void *data));
17#else
18static inline void dev_coredumpv(struct device *dev, const void *data,
19 size_t datalen, gfp_t gfp)
20{
21 vfree(data);
22}
23
24static inline void
25dev_coredumpm(struct device *dev, struct module *owner,
26 const void *data, size_t datalen, gfp_t gfp,
27 ssize_t (*read)(char *buffer, loff_t offset, size_t count,
28 const void *data, size_t datalen),
29 void (*free)(const void *data))
30{
31 free(data);
32}
33#endif /* CONFIG_DEV_COREDUMP */
34
35#endif /* __DEVCOREDUMP_H */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index f1863dcd83ea..ce447f0f1bad 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -188,7 +188,7 @@ extern struct devfreq *devm_devfreq_add_device(struct device *dev,
188extern void devm_devfreq_remove_device(struct device *dev, 188extern void devm_devfreq_remove_device(struct device *dev,
189 struct devfreq *devfreq); 189 struct devfreq *devfreq);
190 190
191/* Supposed to be called by PM_SLEEP/PM_RUNTIME callbacks */ 191/* Supposed to be called by PM callbacks */
192extern int devfreq_suspend_device(struct devfreq *devfreq); 192extern int devfreq_suspend_device(struct devfreq *devfreq);
193extern int devfreq_resume_device(struct devfreq *devfreq); 193extern int devfreq_resume_device(struct devfreq *devfreq);
194 194
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index e1707de043ae..ca6d2acc5eb7 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -64,6 +64,7 @@ typedef int (*dm_request_endio_fn) (struct dm_target *ti,
64 union map_info *map_context); 64 union map_info *map_context);
65 65
66typedef void (*dm_presuspend_fn) (struct dm_target *ti); 66typedef void (*dm_presuspend_fn) (struct dm_target *ti);
67typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
67typedef void (*dm_postsuspend_fn) (struct dm_target *ti); 68typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
68typedef int (*dm_preresume_fn) (struct dm_target *ti); 69typedef int (*dm_preresume_fn) (struct dm_target *ti);
69typedef void (*dm_resume_fn) (struct dm_target *ti); 70typedef void (*dm_resume_fn) (struct dm_target *ti);
@@ -145,6 +146,7 @@ struct target_type {
145 dm_endio_fn end_io; 146 dm_endio_fn end_io;
146 dm_request_endio_fn rq_end_io; 147 dm_request_endio_fn rq_end_io;
147 dm_presuspend_fn presuspend; 148 dm_presuspend_fn presuspend;
149 dm_presuspend_undo_fn presuspend_undo;
148 dm_postsuspend_fn postsuspend; 150 dm_postsuspend_fn postsuspend;
149 dm_preresume_fn preresume; 151 dm_preresume_fn preresume;
150 dm_resume_fn resume; 152 dm_resume_fn resume;
diff --git a/include/linux/device.h b/include/linux/device.h
index 43d183aeb25b..fb506738f7b7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -181,13 +181,14 @@ extern int bus_unregister_notifier(struct bus_type *bus,
181 * with the device lock held in the core, so be careful. 181 * with the device lock held in the core, so be careful.
182 */ 182 */
183#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ 183#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
184#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */ 184#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
185#define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be 185#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
186#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
186 bound */ 187 bound */
187#define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */ 188#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
188#define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be 189#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
189 unbound */ 190 unbound */
190#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound 191#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
191 from the device */ 192 from the device */
192 193
193extern struct kset *bus_get_kset(struct bus_type *bus); 194extern struct kset *bus_get_kset(struct bus_type *bus);
@@ -607,8 +608,8 @@ extern int devres_release_group(struct device *dev, void *id);
607extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); 608extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp);
608extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 609extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
609 va_list ap); 610 va_list ap);
610extern char *devm_kasprintf(struct device *dev, gfp_t gfp, 611extern __printf(3, 4)
611 const char *fmt, ...); 612char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...);
612static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) 613static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
613{ 614{
614 return devm_kmalloc(dev, size, gfp | __GFP_ZERO); 615 return devm_kmalloc(dev, size, gfp | __GFP_ZERO);
@@ -910,6 +911,11 @@ static inline void device_unlock(struct device *dev)
910 mutex_unlock(&dev->mutex); 911 mutex_unlock(&dev->mutex);
911} 912}
912 913
914static inline void device_lock_assert(struct device *dev)
915{
916 lockdep_assert_held(&dev->mutex);
917}
918
913void driver_init(void); 919void driver_init(void);
914 920
915/* 921/*
@@ -1117,6 +1123,41 @@ do { \
1117}) 1123})
1118#endif 1124#endif
1119 1125
1126#ifdef CONFIG_PRINTK
1127#define dev_level_once(dev_level, dev, fmt, ...) \
1128do { \
1129 static bool __print_once __read_mostly; \
1130 \
1131 if (!__print_once) { \
1132 __print_once = true; \
1133 dev_level(dev, fmt, ##__VA_ARGS__); \
1134 } \
1135} while (0)
1136#else
1137#define dev_level_once(dev_level, dev, fmt, ...) \
1138do { \
1139 if (0) \
1140 dev_level(dev, fmt, ##__VA_ARGS__); \
1141} while (0)
1142#endif
1143
1144#define dev_emerg_once(dev, fmt, ...) \
1145 dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
1146#define dev_alert_once(dev, fmt, ...) \
1147 dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
1148#define dev_crit_once(dev, fmt, ...) \
1149 dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
1150#define dev_err_once(dev, fmt, ...) \
1151 dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
1152#define dev_warn_once(dev, fmt, ...) \
1153 dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
1154#define dev_notice_once(dev, fmt, ...) \
1155 dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
1156#define dev_info_once(dev, fmt, ...) \
1157 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
1158#define dev_dbg_once(dev, fmt, ...) \
1159 dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
1160
1120#define dev_level_ratelimited(dev_level, dev, fmt, ...) \ 1161#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
1121do { \ 1162do { \
1122 static DEFINE_RATELIMIT_STATE(_rs, \ 1163 static DEFINE_RATELIMIT_STATE(_rs, \
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 931b70986272..c3007cb4bfa6 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
129 129
130extern u64 dma_get_required_mask(struct device *dev); 130extern u64 dma_get_required_mask(struct device *dev);
131 131
132#ifndef set_arch_dma_coherent_ops 132#ifndef arch_setup_dma_ops
133static inline int set_arch_dma_coherent_ops(struct device *dev) 133static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
134{ 134 u64 size, struct iommu_ops *iommu,
135 return 0; 135 bool coherent) { }
136} 136#endif
137
138#ifndef arch_teardown_dma_ops
139static inline void arch_teardown_dma_ops(struct device *dev) { }
137#endif 140#endif
138 141
139static inline unsigned int dma_get_max_seg_size(struct device *dev) 142static inline unsigned int dma_get_max_seg_size(struct device *dev)
@@ -263,6 +266,32 @@ struct dma_attrs;
263#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ 266#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
264 dma_unmap_sg(dev, sgl, nents, dir) 267 dma_unmap_sg(dev, sgl, nents, dir)
265 268
269#else
270static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
271 dma_addr_t *dma_addr, gfp_t gfp)
272{
273 DEFINE_DMA_ATTRS(attrs);
274 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
275 return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
276}
277
278static inline void dma_free_writecombine(struct device *dev, size_t size,
279 void *cpu_addr, dma_addr_t dma_addr)
280{
281 DEFINE_DMA_ATTRS(attrs);
282 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
283 return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
284}
285
286static inline int dma_mmap_writecombine(struct device *dev,
287 struct vm_area_struct *vma,
288 void *cpu_addr, dma_addr_t dma_addr,
289 size_t size)
290{
291 DEFINE_DMA_ATTRS(attrs);
292 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
293 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
294}
266#endif /* CONFIG_HAVE_DMA_ATTRS */ 295#endif /* CONFIG_HAVE_DMA_ATTRS */
267 296
268#ifdef CONFIG_NEED_DMA_MAP_STATE 297#ifdef CONFIG_NEED_DMA_MAP_STATE
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h
new file mode 100644
index 000000000000..71456442ebe3
--- /dev/null
+++ b/include/linux/dma/dw.h
@@ -0,0 +1,64 @@
1/*
2 * Driver for the Synopsys DesignWare DMA Controller
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 * Copyright (C) 2014 Intel Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#ifndef _DMA_DW_H
13#define _DMA_DW_H
14
15#include <linux/clk.h>
16#include <linux/device.h>
17#include <linux/dmaengine.h>
18
19#include <linux/platform_data/dma-dw.h>
20
21struct dw_dma;
22
23/**
24 * struct dw_dma_chip - representation of DesignWare DMA controller hardware
25 * @dev: struct device of the DMA controller
26 * @irq: irq line
27 * @regs: memory mapped I/O space
28 * @clk: hclk clock
29 * @dw: struct dw_dma that is filed by dw_dma_probe()
30 */
31struct dw_dma_chip {
32 struct device *dev;
33 int irq;
34 void __iomem *regs;
35 struct clk *clk;
36 struct dw_dma *dw;
37};
38
39/* Export to the platform drivers */
40int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata);
41int dw_dma_remove(struct dw_dma_chip *chip);
42
43/* DMA API extensions */
44struct dw_desc;
45
46struct dw_cyclic_desc {
47 struct dw_desc **desc;
48 unsigned long periods;
49 void (*period_callback)(void *param);
50 void *period_callback_param;
51};
52
53struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
54 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
55 enum dma_transfer_direction direction);
56void dw_dma_cyclic_free(struct dma_chan *chan);
57int dw_dma_cyclic_start(struct dma_chan *chan);
58void dw_dma_cyclic_stop(struct dma_chan *chan);
59
60dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
61
62dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
63
64#endif /* _DMA_DW_H */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 1f9e642c66ad..40cd75e21ea2 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -199,15 +199,12 @@ enum dma_ctrl_flags {
199 * configuration data in statically from the platform). An additional 199 * configuration data in statically from the platform). An additional
200 * argument of struct dma_slave_config must be passed in with this 200 * argument of struct dma_slave_config must be passed in with this
201 * command. 201 * command.
202 * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller
203 * into external start mode.
204 */ 202 */
205enum dma_ctrl_cmd { 203enum dma_ctrl_cmd {
206 DMA_TERMINATE_ALL, 204 DMA_TERMINATE_ALL,
207 DMA_PAUSE, 205 DMA_PAUSE,
208 DMA_RESUME, 206 DMA_RESUME,
209 DMA_SLAVE_CONFIG, 207 DMA_SLAVE_CONFIG,
210 FSLDMA_EXTERNAL_START,
211}; 208};
212 209
213/** 210/**
@@ -307,7 +304,9 @@ enum dma_slave_buswidth {
307 * struct dma_slave_config - dma slave channel runtime config 304 * struct dma_slave_config - dma slave channel runtime config
308 * @direction: whether the data shall go in or out on this slave 305 * @direction: whether the data shall go in or out on this slave
309 * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are 306 * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are
310 * legal values. 307 * legal values. DEPRECATED, drivers should use the direction argument
308 * to the device_prep_slave_sg and device_prep_dma_cyclic functions or
309 * the dir field in the dma_interleaved_template structure.
311 * @src_addr: this is the physical address where DMA slave data 310 * @src_addr: this is the physical address where DMA slave data
312 * should be read (RX), if the source is memory this argument is 311 * should be read (RX), if the source is memory this argument is
313 * ignored. 312 * ignored.
@@ -448,7 +447,8 @@ struct dmaengine_unmap_data {
448 * communicate status 447 * communicate status
449 * @phys: physical address of the descriptor 448 * @phys: physical address of the descriptor
450 * @chan: target channel for this operation 449 * @chan: target channel for this operation
451 * @tx_submit: set the prepared descriptor(s) to be executed by the engine 450 * @tx_submit: accept the descriptor, assign ordered cookie and mark the
451 * descriptor pending. To be pushed on .issue_pending() call
452 * @callback: routine to call after this operation is complete 452 * @callback: routine to call after this operation is complete
453 * @callback_param: general parameter to pass to the callback routine 453 * @callback_param: general parameter to pass to the callback routine
454 * ---async_tx api specific fields--- 454 * ---async_tx api specific fields---
@@ -755,6 +755,16 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
755 return chan->device->device_prep_interleaved_dma(chan, xt, flags); 755 return chan->device->device_prep_interleaved_dma(chan, xt, flags);
756} 756}
757 757
758static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
759 struct dma_chan *chan,
760 struct scatterlist *dst_sg, unsigned int dst_nents,
761 struct scatterlist *src_sg, unsigned int src_nents,
762 unsigned long flags)
763{
764 return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
765 src_sg, src_nents, flags);
766}
767
758static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) 768static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
759{ 769{
760 if (!chan || !caps) 770 if (!chan || !caps)
@@ -900,18 +910,6 @@ static inline void dmaengine_put(void)
900} 910}
901#endif 911#endif
902 912
903#ifdef CONFIG_NET_DMA
904#define net_dmaengine_get() dmaengine_get()
905#define net_dmaengine_put() dmaengine_put()
906#else
907static inline void net_dmaengine_get(void)
908{
909}
910static inline void net_dmaengine_put(void)
911{
912}
913#endif
914
915#ifdef CONFIG_ASYNC_TX_DMA 913#ifdef CONFIG_ASYNC_TX_DMA
916#define async_dmaengine_get() dmaengine_get() 914#define async_dmaengine_get() dmaengine_get()
917#define async_dmaengine_put() dmaengine_put() 915#define async_dmaengine_put() dmaengine_put()
@@ -933,16 +931,8 @@ async_dma_find_channel(enum dma_transaction_type type)
933 return NULL; 931 return NULL;
934} 932}
935#endif /* CONFIG_ASYNC_TX_DMA */ 933#endif /* CONFIG_ASYNC_TX_DMA */
936
937dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
938 void *dest, void *src, size_t len);
939dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
940 struct page *page, unsigned int offset, void *kdata, size_t len);
941dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
942 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
943 unsigned int src_off, size_t len);
944void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 934void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
945 struct dma_chan *chan); 935 struct dma_chan *chan);
946 936
947static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) 937static inline void async_tx_ack(struct dma_async_tx_descriptor *tx)
948{ 938{
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 1deece46a0ca..30624954dec5 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -30,6 +30,12 @@
30 30
31struct acpi_dmar_header; 31struct acpi_dmar_header;
32 32
33#ifdef CONFIG_X86
34# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
35#else
36# define DMAR_UNITS_SUPPORTED 64
37#endif
38
33/* DMAR Flags */ 39/* DMAR Flags */
34#define DMAR_INTR_REMAP 0x1 40#define DMAR_INTR_REMAP 0x1
35#define DMAR_X2APIC_OPT_OUT 0x2 41#define DMAR_X2APIC_OPT_OUT 0x2
@@ -56,13 +62,19 @@ struct dmar_drhd_unit {
56 struct intel_iommu *iommu; 62 struct intel_iommu *iommu;
57}; 63};
58 64
65struct dmar_pci_path {
66 u8 bus;
67 u8 device;
68 u8 function;
69};
70
59struct dmar_pci_notify_info { 71struct dmar_pci_notify_info {
60 struct pci_dev *dev; 72 struct pci_dev *dev;
61 unsigned long event; 73 unsigned long event;
62 int bus; 74 int bus;
63 u16 seg; 75 u16 seg;
64 u16 level; 76 u16 level;
65 struct acpi_dmar_pci_path path[]; 77 struct dmar_pci_path path[];
66} __attribute__((packed)); 78} __attribute__((packed));
67 79
68extern struct rw_semaphore dmar_global_lock; 80extern struct rw_semaphore dmar_global_lock;
@@ -114,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
114/* Intel IOMMU detection */ 126/* Intel IOMMU detection */
115extern int detect_intel_iommu(void); 127extern int detect_intel_iommu(void);
116extern int enable_drhd_fault_handling(void); 128extern int enable_drhd_fault_handling(void);
129extern int dmar_device_add(acpi_handle handle);
130extern int dmar_device_remove(acpi_handle handle);
131
132static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
133{
134 return 0;
135}
117 136
118#ifdef CONFIG_INTEL_IOMMU 137#ifdef CONFIG_INTEL_IOMMU
119extern int iommu_detected, no_iommu; 138extern int iommu_detected, no_iommu;
120extern int intel_iommu_init(void); 139extern int intel_iommu_init(void);
121extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); 140extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
122extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); 141extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
142extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
143extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
144extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
123extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); 145extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
124#else /* !CONFIG_INTEL_IOMMU: */ 146#else /* !CONFIG_INTEL_IOMMU: */
125static inline int intel_iommu_init(void) { return -ENODEV; } 147static inline int intel_iommu_init(void) { return -ENODEV; }
126static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) 148
149#define dmar_parse_one_rmrr dmar_res_noop
150#define dmar_parse_one_atsr dmar_res_noop
151#define dmar_check_one_atsr dmar_res_noop
152#define dmar_release_one_atsr dmar_res_noop
153
154static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
127{ 155{
128 return 0; 156 return 0;
129} 157}
130static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) 158
159static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
131{ 160{
132 return 0; 161 return 0;
133} 162}
134static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) 163#endif /* CONFIG_INTEL_IOMMU */
164
165#ifdef CONFIG_IRQ_REMAP
166extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
167#else /* CONFIG_IRQ_REMAP */
168static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
169{ return 0; }
170#endif /* CONFIG_IRQ_REMAP */
171
172#else /* CONFIG_DMAR_TABLE */
173
174static inline int dmar_device_add(void *handle)
175{
176 return 0;
177}
178
179static inline int dmar_device_remove(void *handle)
135{ 180{
136 return 0; 181 return 0;
137} 182}
138#endif /* CONFIG_INTEL_IOMMU */
139 183
140#endif /* CONFIG_DMAR_TABLE */ 184#endif /* CONFIG_DMAR_TABLE */
141 185
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index debb70d40547..8723f2a99e15 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -172,7 +172,7 @@ enum drbd_ret_code {
172 ERR_RES_NOT_KNOWN = 158, 172 ERR_RES_NOT_KNOWN = 158,
173 ERR_RES_IN_USE = 159, 173 ERR_RES_IN_USE = 159,
174 ERR_MINOR_CONFIGURED = 160, 174 ERR_MINOR_CONFIGURED = 160,
175 ERR_MINOR_EXISTS = 161, 175 ERR_MINOR_OR_VOLUME_EXISTS = 161,
176 ERR_INVALID_REQUEST = 162, 176 ERR_INVALID_REQUEST = 162,
177 ERR_NEED_APV_100 = 163, 177 ERR_NEED_APV_100 = 163,
178 ERR_NEED_ALLOW_TWO_PRI = 164, 178 ERR_NEED_ALLOW_TWO_PRI = 164,
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
deleted file mode 100644
index 68b4024184de..000000000000
--- a/include/linux/dw_dmac.h
+++ /dev/null
@@ -1,111 +0,0 @@
1/*
2 * Driver for the Synopsys DesignWare DMA Controller
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef DW_DMAC_H
12#define DW_DMAC_H
13
14#include <linux/dmaengine.h>
15
16/**
17 * struct dw_dma_slave - Controller-specific information about a slave
18 *
19 * @dma_dev: required DMA master device. Depricated.
20 * @bus_id: name of this device channel, not just a device name since
21 * devices may have more than one channel e.g. "foo_tx"
22 * @cfg_hi: Platform-specific initializer for the CFG_HI register
23 * @cfg_lo: Platform-specific initializer for the CFG_LO register
24 * @src_master: src master for transfers on allocated channel.
25 * @dst_master: dest master for transfers on allocated channel.
26 */
27struct dw_dma_slave {
28 struct device *dma_dev;
29 u32 cfg_hi;
30 u32 cfg_lo;
31 u8 src_master;
32 u8 dst_master;
33};
34
35/**
36 * struct dw_dma_platform_data - Controller configuration parameters
37 * @nr_channels: Number of channels supported by hardware (max 8)
38 * @is_private: The device channels should be marked as private and not for
39 * by the general purpose DMA channel allocator.
40 * @chan_allocation_order: Allocate channels starting from 0 or 7
41 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
42 * @block_size: Maximum block size supported by the controller
43 * @nr_masters: Number of AHB masters supported by the controller
44 * @data_width: Maximum data width supported by hardware per AHB master
45 * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
46 */
47struct dw_dma_platform_data {
48 unsigned int nr_channels;
49 bool is_private;
50#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
51#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
52 unsigned char chan_allocation_order;
53#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
54#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
55 unsigned char chan_priority;
56 unsigned short block_size;
57 unsigned char nr_masters;
58 unsigned char data_width[4];
59};
60
61/* bursts size */
62enum dw_dma_msize {
63 DW_DMA_MSIZE_1,
64 DW_DMA_MSIZE_4,
65 DW_DMA_MSIZE_8,
66 DW_DMA_MSIZE_16,
67 DW_DMA_MSIZE_32,
68 DW_DMA_MSIZE_64,
69 DW_DMA_MSIZE_128,
70 DW_DMA_MSIZE_256,
71};
72
73/* Platform-configurable bits in CFG_HI */
74#define DWC_CFGH_FCMODE (1 << 0)
75#define DWC_CFGH_FIFO_MODE (1 << 1)
76#define DWC_CFGH_PROTCTL(x) ((x) << 2)
77#define DWC_CFGH_SRC_PER(x) ((x) << 7)
78#define DWC_CFGH_DST_PER(x) ((x) << 11)
79
80/* Platform-configurable bits in CFG_LO */
81#define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */
82#define DWC_CFGL_LOCK_CH_BLOCK (1 << 12)
83#define DWC_CFGL_LOCK_CH_XACT (2 << 12)
84#define DWC_CFGL_LOCK_BUS_XFER (0 << 14) /* scope of LOCK_BUS */
85#define DWC_CFGL_LOCK_BUS_BLOCK (1 << 14)
86#define DWC_CFGL_LOCK_BUS_XACT (2 << 14)
87#define DWC_CFGL_LOCK_CH (1 << 15) /* channel lockout */
88#define DWC_CFGL_LOCK_BUS (1 << 16) /* busmaster lockout */
89#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
90#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
91
92/* DMA API extensions */
93struct dw_cyclic_desc {
94 struct dw_desc **desc;
95 unsigned long periods;
96 void (*period_callback)(void *param);
97 void *period_callback_param;
98};
99
100struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
101 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
102 enum dma_transfer_direction direction);
103void dw_dma_cyclic_free(struct dma_chan *chan);
104int dw_dma_cyclic_start(struct dma_chan *chan);
105void dw_dma_cyclic_stop(struct dma_chan *chan);
106
107dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
108
109dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
110
111#endif /* DW_DMAC_H */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 2fe93b26b42f..4f1bbc68cd1b 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -42,7 +42,7 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n,
42#if defined(CONFIG_DYNAMIC_DEBUG) 42#if defined(CONFIG_DYNAMIC_DEBUG)
43extern int ddebug_remove_module(const char *mod_name); 43extern int ddebug_remove_module(const char *mod_name);
44extern __printf(2, 3) 44extern __printf(2, 3)
45int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); 45void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...);
46 46
47extern int ddebug_dyndbg_module_param_cb(char *param, char *val, 47extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
48 const char *modname); 48 const char *modname);
@@ -50,15 +50,15 @@ extern int ddebug_dyndbg_module_param_cb(char *param, char *val,
50struct device; 50struct device;
51 51
52extern __printf(3, 4) 52extern __printf(3, 4)
53int __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev, 53void __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev,
54 const char *fmt, ...); 54 const char *fmt, ...);
55 55
56struct net_device; 56struct net_device;
57 57
58extern __printf(3, 4) 58extern __printf(3, 4)
59int __dynamic_netdev_dbg(struct _ddebug *descriptor, 59void __dynamic_netdev_dbg(struct _ddebug *descriptor,
60 const struct net_device *dev, 60 const struct net_device *dev,
61 const char *fmt, ...); 61 const char *fmt, ...);
62 62
63#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ 63#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
64 static struct _ddebug __aligned(8) \ 64 static struct _ddebug __aligned(8) \
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 5621547d631b..a4be70398ce1 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -73,14 +73,22 @@ static inline void dql_queued(struct dql *dql, unsigned int count)
73{ 73{
74 BUG_ON(count > DQL_MAX_OBJECT); 74 BUG_ON(count > DQL_MAX_OBJECT);
75 75
76 dql->num_queued += count;
77 dql->last_obj_cnt = count; 76 dql->last_obj_cnt = count;
77
78 /* We want to force a write first, so that cpu do not attempt
79 * to get cache line containing last_obj_cnt, num_queued, adj_limit
80 * in Shared state, but directly does a Request For Ownership
81 * It is only a hint, we use barrier() only.
82 */
83 barrier();
84
85 dql->num_queued += count;
78} 86}
79 87
80/* Returns how many objects can be queued, < 0 indicates over limit. */ 88/* Returns how many objects can be queued, < 0 indicates over limit. */
81static inline int dql_avail(const struct dql *dql) 89static inline int dql_avail(const struct dql *dql)
82{ 90{
83 return dql->adj_limit - dql->num_queued; 91 return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
84} 92}
85 93
86/* Record number of completed objects and recalculate the limit. */ 94/* Record number of completed objects and recalculate the limit. */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index e1e68da6f35c..da3b72e95db3 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -194,7 +194,8 @@ static inline char *mc_event_error_type(const unsigned int err_type)
194 * @MEM_DDR3: DDR3 RAM 194 * @MEM_DDR3: DDR3 RAM
195 * @MEM_RDDR3: Registered DDR3 RAM 195 * @MEM_RDDR3: Registered DDR3 RAM
196 * This is a variant of the DDR3 memories. 196 * This is a variant of the DDR3 memories.
197 * @MEM_DDR4: DDR4 RAM 197 * @MEM_LRDDR3 Load-Reduced DDR3 memory.
198 * @MEM_DDR4: Unbuffered DDR4 RAM
198 * @MEM_RDDR4: Registered DDR4 RAM 199 * @MEM_RDDR4: Registered DDR4 RAM
199 * This is a variant of the DDR4 memories. 200 * This is a variant of the DDR4 memories.
200 */ 201 */
@@ -216,6 +217,7 @@ enum mem_type {
216 MEM_XDR, 217 MEM_XDR,
217 MEM_DDR3, 218 MEM_DDR3,
218 MEM_RDDR3, 219 MEM_RDDR3,
220 MEM_LRDDR3,
219 MEM_DDR4, 221 MEM_DDR4,
220 MEM_RDDR4, 222 MEM_RDDR4,
221}; 223};
diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h
index e50f98b0297a..eb0b1988050a 100644
--- a/include/linux/eeprom_93cx6.h
+++ b/include/linux/eeprom_93cx6.h
@@ -75,6 +75,10 @@ extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
75 const u8 word, u16 *data); 75 const u8 word, u16 *data);
76extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, 76extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
77 const u8 word, __le16 *data, const u16 words); 77 const u8 word, __le16 *data, const u16 words);
78extern void eeprom_93cx6_readb(struct eeprom_93cx6 *eeprom,
79 const u8 byte, u8 *data);
80extern void eeprom_93cx6_multireadb(struct eeprom_93cx6 *eeprom,
81 const u8 byte, u8 *data, const u16 bytes);
78 82
79extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); 83extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable);
80 84
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 45cb4ffdea62..0238d612750e 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -92,6 +92,7 @@ typedef struct {
92#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */ 92#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */
93#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */ 93#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */
94#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */ 94#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */
95#define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */
95#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ 96#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
96#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ 97#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
97#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ 98#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
@@ -502,6 +503,10 @@ typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char
502typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, 503typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
503 u32 attr, unsigned long data_size, 504 u32 attr, unsigned long data_size,
504 void *data); 505 void *data);
506typedef efi_status_t
507efi_set_variable_nonblocking_t(efi_char16_t *name, efi_guid_t *vendor,
508 u32 attr, unsigned long data_size, void *data);
509
505typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); 510typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
506typedef void efi_reset_system_t (int reset_type, efi_status_t status, 511typedef void efi_reset_system_t (int reset_type, efi_status_t status,
507 unsigned long data_size, efi_char16_t *data); 512 unsigned long data_size, efi_char16_t *data);
@@ -542,6 +547,9 @@ void efi_native_runtime_setup(void);
542#define SMBIOS_TABLE_GUID \ 547#define SMBIOS_TABLE_GUID \
543 EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) 548 EFI_GUID( 0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
544 549
550#define SMBIOS3_TABLE_GUID \
551 EFI_GUID( 0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94 )
552
545#define SAL_SYSTEM_TABLE_GUID \ 553#define SAL_SYSTEM_TABLE_GUID \
546 EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d ) 554 EFI_GUID( 0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x0, 0x90, 0x27, 0x3f, 0xc1, 0x4d )
547 555
@@ -805,7 +813,8 @@ extern struct efi {
805 unsigned long mps; /* MPS table */ 813 unsigned long mps; /* MPS table */
806 unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ 814 unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
807 unsigned long acpi20; /* ACPI table (ACPI 2.0) */ 815 unsigned long acpi20; /* ACPI table (ACPI 2.0) */
808 unsigned long smbios; /* SM BIOS table */ 816 unsigned long smbios; /* SMBIOS table (32 bit entry point) */
817 unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
809 unsigned long sal_systab; /* SAL system table */ 818 unsigned long sal_systab; /* SAL system table */
810 unsigned long boot_info; /* boot info table */ 819 unsigned long boot_info; /* boot info table */
811 unsigned long hcdp; /* HCDP table */ 820 unsigned long hcdp; /* HCDP table */
@@ -821,6 +830,7 @@ extern struct efi {
821 efi_get_variable_t *get_variable; 830 efi_get_variable_t *get_variable;
822 efi_get_next_variable_t *get_next_variable; 831 efi_get_next_variable_t *get_next_variable;
823 efi_set_variable_t *set_variable; 832 efi_set_variable_t *set_variable;
833 efi_set_variable_nonblocking_t *set_variable_nonblocking;
824 efi_query_variable_info_t *query_variable_info; 834 efi_query_variable_info_t *query_variable_info;
825 efi_update_capsule_t *update_capsule; 835 efi_update_capsule_t *update_capsule;
826 efi_query_capsule_caps_t *query_capsule_caps; 836 efi_query_capsule_caps_t *query_capsule_caps;
@@ -886,6 +896,13 @@ extern bool efi_poweroff_required(void);
886 (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ 896 (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
887 (md) = (void *)(md) + (m)->desc_size) 897 (md) = (void *)(md) + (m)->desc_size)
888 898
899/*
900 * Format an EFI memory descriptor's type and attributes to a user-provided
901 * character buffer, as per snprintf(), and return the buffer.
902 */
903char * __init efi_md_typeattr_format(char *buf, size_t size,
904 const efi_memory_desc_t *md);
905
889/** 906/**
890 * efi_range_is_wc - check the WC bit on an address range 907 * efi_range_is_wc - check the WC bit on an address range
891 * @start: starting kvirt address 908 * @start: starting kvirt address
@@ -1034,6 +1051,7 @@ struct efivar_operations {
1034 efi_get_variable_t *get_variable; 1051 efi_get_variable_t *get_variable;
1035 efi_get_next_variable_t *get_next_variable; 1052 efi_get_next_variable_t *get_next_variable;
1036 efi_set_variable_t *set_variable; 1053 efi_set_variable_t *set_variable;
1054 efi_set_variable_nonblocking_t *set_variable_nonblocking;
1037 efi_query_variable_store_t *query_variable_store; 1055 efi_query_variable_store_t *query_variable_store;
1038}; 1056};
1039 1057
@@ -1227,4 +1245,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
1227 unsigned long *load_addr, 1245 unsigned long *load_addr,
1228 unsigned long *load_size); 1246 unsigned long *load_size);
1229 1247
1248efi_status_t efi_parse_options(char *cmdline);
1249
1250bool efi_runtime_disabled(void);
1230#endif /* _LINUX_EFI_H */ 1251#endif /* _LINUX_EFI_H */
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 67a5fa7830c4..20fa8d8ae313 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -15,6 +15,11 @@
15 set_personality(PER_LINUX | (current->personality & (~PER_MASK))) 15 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
16#endif 16#endif
17 17
18#ifndef SET_PERSONALITY2
19#define SET_PERSONALITY2(ex, state) \
20 SET_PERSONALITY(ex)
21#endif
22
18#if ELF_CLASS == ELFCLASS32 23#if ELF_CLASS == ELFCLASS32
19 24
20extern Elf32_Dyn _DYNAMIC []; 25extern Elf32_Dyn _DYNAMIC [];
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 9c5529dc6d07..41c891d05f04 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -29,6 +29,7 @@
29#include <asm/bitsperlong.h> 29#include <asm/bitsperlong.h>
30 30
31#ifdef __KERNEL__ 31#ifdef __KERNEL__
32u32 eth_get_headlen(void *data, unsigned int max_len);
32__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 33__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
33extern const struct header_ops eth_header_ops; 34extern const struct header_ops eth_header_ops;
34 35
@@ -391,4 +392,16 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
391#endif 392#endif
392} 393}
393 394
395/**
396 * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
397 * @skb: Buffer to pad
398 *
399 * An Ethernet frame should have a minimum size of 60 bytes. This function
400 * takes short frames and pads them with zeros up to the 60 byte limit.
401 */
402static inline int eth_skb_pad(struct sk_buff *skb)
403{
404 return skb_put_padto(skb, ETH_ZLEN);
405}
406
394#endif /* _LINUX_ETHERDEVICE_H */ 407#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index e658229fee39..653dc9c4ebac 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -59,6 +59,26 @@ enum ethtool_phys_id_state {
59 ETHTOOL_ID_OFF 59 ETHTOOL_ID_OFF
60}; 60};
61 61
62enum {
63 ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */
64 ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */
65
66 /*
67 * Add your fresh new hash function bits above and remember to update
68 * rss_hash_func_strings[] in ethtool.c
69 */
70 ETH_RSS_HASH_FUNCS_COUNT
71};
72
73#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
74#define __ETH_RSS_HASH(name) __ETH_RSS_HASH_BIT(ETH_RSS_HASH_##name##_BIT)
75
76#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP)
77#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR)
78
79#define ETH_RSS_HASH_UNKNOWN 0
80#define ETH_RSS_HASH_NO_CHANGE 0
81
62struct net_device; 82struct net_device;
63 83
64/* Some generic methods drivers may use in their ethtool_ops */ 84/* Some generic methods drivers may use in their ethtool_ops */
@@ -158,17 +178,14 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
158 * Returns zero if not supported for this specific device. 178 * Returns zero if not supported for this specific device.
159 * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table. 179 * @get_rxfh_indir_size: Get the size of the RX flow hash indirection table.
160 * Returns zero if not supported for this specific device. 180 * Returns zero if not supported for this specific device.
161 * @get_rxfh: Get the contents of the RX flow hash indirection table and hash 181 * @get_rxfh: Get the contents of the RX flow hash indirection table, hash key
162 * key. 182 * and/or hash function.
163 * Will only be called if one or both of @get_rxfh_indir_size and
164 * @get_rxfh_key_size are implemented and return non-zero.
165 * Returns a negative error code or zero.
166 * @set_rxfh: Set the contents of the RX flow hash indirection table and/or
167 * hash key. In case only the indirection table or hash key is to be
168 * changed, the other argument will be %NULL.
169 * Will only be called if one or both of @get_rxfh_indir_size and
170 * @get_rxfh_key_size are implemented and return non-zero.
171 * Returns a negative error code or zero. 183 * Returns a negative error code or zero.
184 * @set_rxfh: Set the contents of the RX flow hash indirection table, hash
185 * key, and/or hash function. Arguments which are set to %NULL or zero
186 * will remain unchanged.
187 * Returns a negative error code or zero. An error code must be returned
188 * if at least one unsupported change was requested.
172 * @get_channels: Get number of channels. 189 * @get_channels: Get number of channels.
173 * @set_channels: Set number of channels. Returns a negative error code or 190 * @set_channels: Set number of channels. Returns a negative error code or
174 * zero. 191 * zero.
@@ -241,9 +258,10 @@ struct ethtool_ops {
241 int (*reset)(struct net_device *, u32 *); 258 int (*reset)(struct net_device *, u32 *);
242 u32 (*get_rxfh_key_size)(struct net_device *); 259 u32 (*get_rxfh_key_size)(struct net_device *);
243 u32 (*get_rxfh_indir_size)(struct net_device *); 260 u32 (*get_rxfh_indir_size)(struct net_device *);
244 int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key); 261 int (*get_rxfh)(struct net_device *, u32 *indir, u8 *key,
262 u8 *hfunc);
245 int (*set_rxfh)(struct net_device *, const u32 *indir, 263 int (*set_rxfh)(struct net_device *, const u32 *indir,
246 const u8 *key); 264 const u8 *key, const u8 hfunc);
247 void (*get_channels)(struct net_device *, struct ethtool_channels *); 265 void (*get_channels)(struct net_device *, struct ethtool_channels *);
248 int (*set_channels)(struct net_device *, struct ethtool_channels *); 266 int (*set_channels)(struct net_device *, struct ethtool_channels *);
249 int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); 267 int (*get_dump_flag)(struct net_device *, struct ethtool_dump *);
@@ -257,6 +275,10 @@ struct ethtool_ops {
257 struct ethtool_eeprom *, u8 *); 275 struct ethtool_eeprom *, u8 *);
258 int (*get_eee)(struct net_device *, struct ethtool_eee *); 276 int (*get_eee)(struct net_device *, struct ethtool_eee *);
259 int (*set_eee)(struct net_device *, struct ethtool_eee *); 277 int (*set_eee)(struct net_device *, struct ethtool_eee *);
278 int (*get_tunable)(struct net_device *,
279 const struct ethtool_tunable *, void *);
280 int (*set_tunable)(struct net_device *,
281 const struct ethtool_tunable *, const void *);
260 282
261 283
262}; 284};
diff --git a/include/linux/extcon/extcon-gpio.h b/include/linux/extcon/extcon-gpio.h
index 8900fdf511c6..0b17ad43fbfc 100644
--- a/include/linux/extcon/extcon-gpio.h
+++ b/include/linux/extcon/extcon-gpio.h
@@ -34,8 +34,10 @@
34 * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW). 34 * @irq_flags: IRQ Flags (e.g., IRQF_TRIGGER_LOW).
35 * @state_on: print_state is overriden with state_on if attached. 35 * @state_on: print_state is overriden with state_on if attached.
36 * If NULL, default method of extcon class is used. 36 * If NULL, default method of extcon class is used.
37 * @state_off: print_state is overriden with state_on if detached. 37 * @state_off: print_state is overriden with state_off if detached.
38 * If NUll, default method of extcon class is used. 38 * If NUll, default method of extcon class is used.
39 * @check_on_resume: Boolean describing whether to check the state of gpio
40 * while resuming from sleep.
39 * 41 *
40 * Note that in order for state_on or state_off to be valid, both state_on 42 * Note that in order for state_on or state_off to be valid, both state_on
41 * and state_off should be not NULL. If at least one of them is NULL, 43 * and state_off should be not NULL. If at least one of them is NULL,
diff --git a/include/linux/extcon/sm5502.h b/include/linux/extcon/sm5502.h
deleted file mode 100644
index 030526bf8d79..000000000000
--- a/include/linux/extcon/sm5502.h
+++ /dev/null
@@ -1,287 +0,0 @@
1/*
2 * sm5502.h
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __LINUX_EXTCON_SM5502_H
18#define __LINUX_EXTCON_SM5502_H
19
20enum sm5502_types {
21 TYPE_SM5502,
22};
23
24/* SM5502 registers */
25enum sm5502_reg {
26 SM5502_REG_DEVICE_ID = 0x01,
27 SM5502_REG_CONTROL,
28 SM5502_REG_INT1,
29 SM5502_REG_INT2,
30 SM5502_REG_INTMASK1,
31 SM5502_REG_INTMASK2,
32 SM5502_REG_ADC,
33 SM5502_REG_TIMING_SET1,
34 SM5502_REG_TIMING_SET2,
35 SM5502_REG_DEV_TYPE1,
36 SM5502_REG_DEV_TYPE2,
37 SM5502_REG_BUTTON1,
38 SM5502_REG_BUTTON2,
39 SM5502_REG_CAR_KIT_STATUS,
40 SM5502_REG_RSVD1,
41 SM5502_REG_RSVD2,
42 SM5502_REG_RSVD3,
43 SM5502_REG_RSVD4,
44 SM5502_REG_MANUAL_SW1,
45 SM5502_REG_MANUAL_SW2,
46 SM5502_REG_DEV_TYPE3,
47 SM5502_REG_RSVD5,
48 SM5502_REG_RSVD6,
49 SM5502_REG_RSVD7,
50 SM5502_REG_RSVD8,
51 SM5502_REG_RSVD9,
52 SM5502_REG_RESET,
53 SM5502_REG_RSVD10,
54 SM5502_REG_RESERVED_ID1,
55 SM5502_REG_RSVD11,
56 SM5502_REG_RSVD12,
57 SM5502_REG_RESERVED_ID2,
58 SM5502_REG_RSVD13,
59 SM5502_REG_OCP,
60 SM5502_REG_RSVD14,
61 SM5502_REG_RSVD15,
62 SM5502_REG_RSVD16,
63 SM5502_REG_RSVD17,
64 SM5502_REG_RSVD18,
65 SM5502_REG_RSVD19,
66 SM5502_REG_RSVD20,
67 SM5502_REG_RSVD21,
68 SM5502_REG_RSVD22,
69 SM5502_REG_RSVD23,
70 SM5502_REG_RSVD24,
71 SM5502_REG_RSVD25,
72 SM5502_REG_RSVD26,
73 SM5502_REG_RSVD27,
74 SM5502_REG_RSVD28,
75 SM5502_REG_RSVD29,
76 SM5502_REG_RSVD30,
77 SM5502_REG_RSVD31,
78 SM5502_REG_RSVD32,
79 SM5502_REG_RSVD33,
80 SM5502_REG_RSVD34,
81 SM5502_REG_RSVD35,
82 SM5502_REG_RSVD36,
83 SM5502_REG_RESERVED_ID3,
84
85 SM5502_REG_END,
86};
87
88/* Define SM5502 MASK/SHIFT constant */
89#define SM5502_REG_DEVICE_ID_VENDOR_SHIFT 0
90#define SM5502_REG_DEVICE_ID_VERSION_SHIFT 3
91#define SM5502_REG_DEVICE_ID_VENDOR_MASK (0x3 << SM5502_REG_DEVICE_ID_VENDOR_SHIFT)
92#define SM5502_REG_DEVICE_ID_VERSION_MASK (0x1f << SM5502_REG_DEVICE_ID_VERSION_SHIFT)
93
94#define SM5502_REG_CONTROL_MASK_INT_SHIFT 0
95#define SM5502_REG_CONTROL_WAIT_SHIFT 1
96#define SM5502_REG_CONTROL_MANUAL_SW_SHIFT 2
97#define SM5502_REG_CONTROL_RAW_DATA_SHIFT 3
98#define SM5502_REG_CONTROL_SW_OPEN_SHIFT 4
99#define SM5502_REG_CONTROL_MASK_INT_MASK (0x1 << SM5502_REG_CONTROL_MASK_INT_SHIFT)
100#define SM5502_REG_CONTROL_WAIT_MASK (0x1 << SM5502_REG_CONTROL_WAIT_SHIFT)
101#define SM5502_REG_CONTROL_MANUAL_SW_MASK (0x1 << SM5502_REG_CONTROL_MANUAL_SW_SHIFT)
102#define SM5502_REG_CONTROL_RAW_DATA_MASK (0x1 << SM5502_REG_CONTROL_RAW_DATA_SHIFT)
103#define SM5502_REG_CONTROL_SW_OPEN_MASK (0x1 << SM5502_REG_CONTROL_SW_OPEN_SHIFT)
104
105#define SM5502_REG_INTM1_ATTACH_SHIFT 0
106#define SM5502_REG_INTM1_DETACH_SHIFT 1
107#define SM5502_REG_INTM1_KP_SHIFT 2
108#define SM5502_REG_INTM1_LKP_SHIFT 3
109#define SM5502_REG_INTM1_LKR_SHIFT 4
110#define SM5502_REG_INTM1_OVP_EVENT_SHIFT 5
111#define SM5502_REG_INTM1_OCP_EVENT_SHIFT 6
112#define SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT 7
113#define SM5502_REG_INTM1_ATTACH_MASK (0x1 << SM5502_REG_INTM1_ATTACH_SHIFT)
114#define SM5502_REG_INTM1_DETACH_MASK (0x1 << SM5502_REG_INTM1_DETACH_SHIFT)
115#define SM5502_REG_INTM1_KP_MASK (0x1 << SM5502_REG_INTM1_KP_SHIFT)
116#define SM5502_REG_INTM1_LKP_MASK (0x1 << SM5502_REG_INTM1_LKP_SHIFT)
117#define SM5502_REG_INTM1_LKR_MASK (0x1 << SM5502_REG_INTM1_LKR_SHIFT)
118#define SM5502_REG_INTM1_OVP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OVP_EVENT_SHIFT)
119#define SM5502_REG_INTM1_OCP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OCP_EVENT_SHIFT)
120#define SM5502_REG_INTM1_OVP_OCP_DIS_MASK (0x1 << SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT)
121
122#define SM5502_REG_INTM2_VBUS_DET_SHIFT 0
123#define SM5502_REG_INTM2_REV_ACCE_SHIFT 1
124#define SM5502_REG_INTM2_ADC_CHG_SHIFT 2
125#define SM5502_REG_INTM2_STUCK_KEY_SHIFT 3
126#define SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT 4
127#define SM5502_REG_INTM2_MHL_SHIFT 5
128#define SM5502_REG_INTM2_VBUS_DET_MASK (0x1 << SM5502_REG_INTM2_VBUS_DET_SHIFT)
129#define SM5502_REG_INTM2_REV_ACCE_MASK (0x1 << SM5502_REG_INTM2_REV_ACCE_SHIFT)
130#define SM5502_REG_INTM2_ADC_CHG_MASK (0x1 << SM5502_REG_INTM2_ADC_CHG_SHIFT)
131#define SM5502_REG_INTM2_STUCK_KEY_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_SHIFT)
132#define SM5502_REG_INTM2_STUCK_KEY_RCV_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT)
133#define SM5502_REG_INTM2_MHL_MASK (0x1 << SM5502_REG_INTM2_MHL_SHIFT)
134
135#define SM5502_REG_ADC_SHIFT 0
136#define SM5502_REG_ADC_MASK (0x1f << SM5502_REG_ADC_SHIFT)
137
138#define SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT 4
139#define SM5502_REG_TIMING_SET1_KEY_PRESS_MASK (0xf << SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT)
140#define TIMING_KEY_PRESS_100MS 0x0
141#define TIMING_KEY_PRESS_200MS 0x1
142#define TIMING_KEY_PRESS_300MS 0x2
143#define TIMING_KEY_PRESS_400MS 0x3
144#define TIMING_KEY_PRESS_500MS 0x4
145#define TIMING_KEY_PRESS_600MS 0x5
146#define TIMING_KEY_PRESS_700MS 0x6
147#define TIMING_KEY_PRESS_800MS 0x7
148#define TIMING_KEY_PRESS_900MS 0x8
149#define TIMING_KEY_PRESS_1000MS 0x9
150#define SM5502_REG_TIMING_SET1_ADC_DET_SHIFT 0
151#define SM5502_REG_TIMING_SET1_ADC_DET_MASK (0xf << SM5502_REG_TIMING_SET1_ADC_DET_SHIFT)
152#define TIMING_ADC_DET_50MS 0x0
153#define TIMING_ADC_DET_100MS 0x1
154#define TIMING_ADC_DET_150MS 0x2
155#define TIMING_ADC_DET_200MS 0x3
156#define TIMING_ADC_DET_300MS 0x4
157#define TIMING_ADC_DET_400MS 0x5
158#define TIMING_ADC_DET_500MS 0x6
159#define TIMING_ADC_DET_600MS 0x7
160#define TIMING_ADC_DET_700MS 0x8
161#define TIMING_ADC_DET_800MS 0x9
162#define TIMING_ADC_DET_900MS 0xA
163#define TIMING_ADC_DET_1000MS 0xB
164
165#define SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT 4
166#define SM5502_REG_TIMING_SET2_SW_WAIT_MASK (0xf << SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT)
167#define TIMING_SW_WAIT_10MS 0x0
168#define TIMING_SW_WAIT_30MS 0x1
169#define TIMING_SW_WAIT_50MS 0x2
170#define TIMING_SW_WAIT_70MS 0x3
171#define TIMING_SW_WAIT_90MS 0x4
172#define TIMING_SW_WAIT_110MS 0x5
173#define TIMING_SW_WAIT_130MS 0x6
174#define TIMING_SW_WAIT_150MS 0x7
175#define TIMING_SW_WAIT_170MS 0x8
176#define TIMING_SW_WAIT_190MS 0x9
177#define TIMING_SW_WAIT_210MS 0xA
178#define SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT 0
179#define SM5502_REG_TIMING_SET2_LONG_KEY_MASK (0xf << SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT)
180#define TIMING_LONG_KEY_300MS 0x0
181#define TIMING_LONG_KEY_400MS 0x1
182#define TIMING_LONG_KEY_500MS 0x2
183#define TIMING_LONG_KEY_600MS 0x3
184#define TIMING_LONG_KEY_700MS 0x4
185#define TIMING_LONG_KEY_800MS 0x5
186#define TIMING_LONG_KEY_900MS 0x6
187#define TIMING_LONG_KEY_1000MS 0x7
188#define TIMING_LONG_KEY_1100MS 0x8
189#define TIMING_LONG_KEY_1200MS 0x9
190#define TIMING_LONG_KEY_1300MS 0xA
191#define TIMING_LONG_KEY_1400MS 0xB
192#define TIMING_LONG_KEY_1500MS 0xC
193
194#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT 0
195#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT 1
196#define SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT 2
197#define SM5502_REG_DEV_TYPE1_UART_SHIFT 3
198#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT 4
199#define SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT 5
200#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT 6
201#define SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT 7
202#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT)
203#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1__MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT)
204#define SM5502_REG_DEV_TYPE1_USB_SDP_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT)
205#define SM5502_REG_DEV_TYPE1_UART_MASK (0x1 << SM5502_REG_DEV_TYPE1_UART_SHIFT)
206#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_MASK (0x1 << SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT)
207#define SM5502_REG_DEV_TYPE1_USB_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT)
208#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT)
209#define SM5502_REG_DEV_TYPE1_USB_OTG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT)
210
211#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT 0
212#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT 1
213#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT 2
214#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT 3
215#define SM5502_REG_DEV_TYPE2_PPD_SHIFT 4
216#define SM5502_REG_DEV_TYPE2_TTY_SHIFT 5
217#define SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT 6
218#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT)
219#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT)
220#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT)
221#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT)
222#define SM5502_REG_DEV_TYPE2_PPD_MASK (0x1 << SM5502_REG_DEV_TYPE2_PPD_SHIFT)
223#define SM5502_REG_DEV_TYPE2_TTY_MASK (0x1 << SM5502_REG_DEV_TYPE2_TTY_SHIFT)
224#define SM5502_REG_DEV_TYPE2_AV_CABLE_MASK (0x1 << SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT)
225
226#define SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT 0
227#define SM5502_REG_MANUAL_SW1_DP_SHIFT 2
228#define SM5502_REG_MANUAL_SW1_DM_SHIFT 5
229#define SM5502_REG_MANUAL_SW1_VBUSIN_MASK (0x3 << SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT)
230#define SM5502_REG_MANUAL_SW1_DP_MASK (0x7 << SM5502_REG_MANUAL_SW1_DP_SHIFT)
231#define SM5502_REG_MANUAL_SW1_DM_MASK (0x7 << SM5502_REG_MANUAL_SW1_DM_SHIFT)
232#define VBUSIN_SWITCH_OPEN 0x0
233#define VBUSIN_SWITCH_VBUSOUT 0x1
234#define VBUSIN_SWITCH_MIC 0x2
235#define VBUSIN_SWITCH_VBUSOUT_WITH_USB 0x3
236#define DM_DP_CON_SWITCH_OPEN 0x0
237#define DM_DP_CON_SWITCH_USB 0x1
238#define DM_DP_CON_SWITCH_AUDIO 0x2
239#define DM_DP_CON_SWITCH_UART 0x3
240#define DM_DP_SWITCH_OPEN ((DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
241 | (DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
242#define DM_DP_SWITCH_USB ((DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
243 | (DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
244#define DM_DP_SWITCH_AUDIO ((DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
245 | (DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
246#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \
247 | (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT))
248
249/* SM5502 Interrupts */
250enum sm5502_irq {
251 /* INT1 */
252 SM5502_IRQ_INT1_ATTACH,
253 SM5502_IRQ_INT1_DETACH,
254 SM5502_IRQ_INT1_KP,
255 SM5502_IRQ_INT1_LKP,
256 SM5502_IRQ_INT1_LKR,
257 SM5502_IRQ_INT1_OVP_EVENT,
258 SM5502_IRQ_INT1_OCP_EVENT,
259 SM5502_IRQ_INT1_OVP_OCP_DIS,
260
261 /* INT2 */
262 SM5502_IRQ_INT2_VBUS_DET,
263 SM5502_IRQ_INT2_REV_ACCE,
264 SM5502_IRQ_INT2_ADC_CHG,
265 SM5502_IRQ_INT2_STUCK_KEY,
266 SM5502_IRQ_INT2_STUCK_KEY_RCV,
267 SM5502_IRQ_INT2_MHL,
268
269 SM5502_IRQ_NUM,
270};
271
272#define SM5502_IRQ_INT1_ATTACH_MASK BIT(0)
273#define SM5502_IRQ_INT1_DETACH_MASK BIT(1)
274#define SM5502_IRQ_INT1_KP_MASK BIT(2)
275#define SM5502_IRQ_INT1_LKP_MASK BIT(3)
276#define SM5502_IRQ_INT1_LKR_MASK BIT(4)
277#define SM5502_IRQ_INT1_OVP_EVENT_MASK BIT(5)
278#define SM5502_IRQ_INT1_OCP_EVENT_MASK BIT(6)
279#define SM5502_IRQ_INT1_OVP_OCP_DIS_MASK BIT(7)
280#define SM5502_IRQ_INT2_VBUS_DET_MASK BIT(0)
281#define SM5502_IRQ_INT2_REV_ACCE_MASK BIT(1)
282#define SM5502_IRQ_INT2_ADC_CHG_MASK BIT(2)
283#define SM5502_IRQ_INT2_STUCK_KEY_MASK BIT(3)
284#define SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK BIT(4)
285#define SM5502_IRQ_INT2_MHL_MASK BIT(5)
286
287#endif /* __LINUX_EXTCON_SM5502_H */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 08ed2b0a96e6..87f14e90e984 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -15,8 +15,9 @@
15#include <linux/types.h> 15#include <linux/types.h>
16 16
17#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */ 17#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
18#define F2FS_LOG_SECTOR_SIZE 9 /* 9 bits for 512 byte */ 18#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
19#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* 4KB: F2FS_BLKSIZE */ 19#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
20#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
20#define F2FS_BLKSIZE 4096 /* support only 4KB block */ 21#define F2FS_BLKSIZE 4096 /* support only 4KB block */
21#define F2FS_MAX_EXTENSION 64 /* # of extension entries */ 22#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
22#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE) 23#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
@@ -32,7 +33,8 @@
32#define F2FS_META_INO(sbi) (sbi->meta_ino_num) 33#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
33 34
34/* This flag is used by node and meta inodes, and by recovery */ 35/* This flag is used by node and meta inodes, and by recovery */
35#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO) 36#define GFP_F2FS_ZERO (GFP_NOFS | __GFP_ZERO)
37#define GFP_F2FS_HIGH_ZERO (GFP_NOFS | __GFP_ZERO | __GFP_HIGHMEM)
36 38
37/* 39/*
38 * For further optimization on multi-head logs, on-disk layout supports maximum 40 * For further optimization on multi-head logs, on-disk layout supports maximum
@@ -85,6 +87,7 @@ struct f2fs_super_block {
85/* 87/*
86 * For checkpoint 88 * For checkpoint
87 */ 89 */
90#define CP_FSCK_FLAG 0x00000010
88#define CP_ERROR_FLAG 0x00000008 91#define CP_ERROR_FLAG 0x00000008
89#define CP_COMPACT_SUM_FLAG 0x00000004 92#define CP_COMPACT_SUM_FLAG 0x00000004
90#define CP_ORPHAN_PRESENT_FLAG 0x00000002 93#define CP_ORPHAN_PRESENT_FLAG 0x00000002
@@ -168,14 +171,12 @@ struct f2fs_extent {
168 171
169#define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */ 172#define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */
170#define F2FS_INLINE_DATA 0x02 /* file inline data flag */ 173#define F2FS_INLINE_DATA 0x02 /* file inline data flag */
174#define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */
175#define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */
171 176
172#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ 177#define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \
173 F2FS_INLINE_XATTR_ADDRS - 1)) 178 F2FS_INLINE_XATTR_ADDRS - 1))
174 179
175#define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\
176 sizeof(__le32) * (DEF_ADDRS_PER_INODE + \
177 DEF_NIDS_PER_INODE - 1))
178
179struct f2fs_inode { 180struct f2fs_inode {
180 __le16 i_mode; /* file mode */ 181 __le16 i_mode; /* file mode */
181 __u8 i_advise; /* file hints */ 182 __u8 i_advise; /* file hints */
@@ -433,6 +434,24 @@ struct f2fs_dentry_block {
433 __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN]; 434 __u8 filename[NR_DENTRY_IN_BLOCK][F2FS_SLOT_LEN];
434} __packed; 435} __packed;
435 436
437/* for inline dir */
438#define NR_INLINE_DENTRY (MAX_INLINE_DATA * BITS_PER_BYTE / \
439 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
440 BITS_PER_BYTE + 1))
441#define INLINE_DENTRY_BITMAP_SIZE ((NR_INLINE_DENTRY + \
442 BITS_PER_BYTE - 1) / BITS_PER_BYTE)
443#define INLINE_RESERVED_SIZE (MAX_INLINE_DATA - \
444 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
445 NR_INLINE_DENTRY + INLINE_DENTRY_BITMAP_SIZE))
446
447/* inline directory entry structure */
448struct f2fs_inline_dentry {
449 __u8 dentry_bitmap[INLINE_DENTRY_BITMAP_SIZE];
450 __u8 reserved[INLINE_RESERVED_SIZE];
451 struct f2fs_dir_entry dentry[NR_INLINE_DENTRY];
452 __u8 filename[NR_INLINE_DENTRY][F2FS_SLOT_LEN];
453} __packed;
454
436/* file types used in inode_info->flags */ 455/* file types used in inode_info->flags */
437enum { 456enum {
438 F2FS_FT_UNKNOWN, 457 F2FS_FT_UNKNOWN,
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index c6f996f2abb6..798fad9e420d 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -5,6 +5,7 @@
5 5
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/debugfs.h> 7#include <linux/debugfs.h>
8#include <linux/ratelimit.h>
8#include <linux/atomic.h> 9#include <linux/atomic.h>
9 10
10/* 11/*
@@ -25,14 +26,18 @@ struct fault_attr {
25 unsigned long reject_end; 26 unsigned long reject_end;
26 27
27 unsigned long count; 28 unsigned long count;
29 struct ratelimit_state ratelimit_state;
30 struct dentry *dname;
28}; 31};
29 32
30#define FAULT_ATTR_INITIALIZER { \ 33#define FAULT_ATTR_INITIALIZER { \
31 .interval = 1, \ 34 .interval = 1, \
32 .times = ATOMIC_INIT(1), \ 35 .times = ATOMIC_INIT(1), \
33 .require_end = ULONG_MAX, \ 36 .require_end = ULONG_MAX, \
34 .stacktrace_depth = 32, \ 37 .stacktrace_depth = 32, \
35 .verbose = 2, \ 38 .ratelimit_state = RATELIMIT_STATE_INIT_DISABLED, \
39 .verbose = 2, \
40 .dname = NULL, \
36 } 41 }
37 42
38#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER 43#define DECLARE_FAULT_ATTR(name) struct fault_attr name = FAULT_ATTR_INITIALIZER
diff --git a/include/linux/fence.h b/include/linux/fence.h
index d174585b874b..39efee130d2b 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -128,8 +128,8 @@ struct fence_cb {
128 * from irq context, so normal spinlocks can be used. 128 * from irq context, so normal spinlocks can be used.
129 * 129 *
130 * A return value of false indicates the fence already passed, 130 * A return value of false indicates the fence already passed,
131 * or some failure occured that made it impossible to enable 131 * or some failure occurred that made it impossible to enable
132 * signaling. True indicates succesful enabling. 132 * signaling. True indicates successful enabling.
133 * 133 *
134 * fence->status may be set in enable_signaling, but only when false is 134 * fence->status may be set in enable_signaling, but only when false is
135 * returned. 135 * returned.
diff --git a/include/linux/file.h b/include/linux/file.h
index 4d69123377a2..f87d30882a24 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -66,7 +66,6 @@ extern void set_close_on_exec(unsigned int fd, int flag);
66extern bool get_close_on_exec(unsigned int fd); 66extern bool get_close_on_exec(unsigned int fd);
67extern void put_filp(struct file *); 67extern void put_filp(struct file *);
68extern int get_unused_fd_flags(unsigned flags); 68extern int get_unused_fd_flags(unsigned flags);
69#define get_unused_fd() get_unused_fd_flags(0)
70extern void put_unused_fd(unsigned int fd); 69extern void put_unused_fd(unsigned int fd);
71 70
72extern void fd_install(unsigned int fd, struct file *file); 71extern void fd_install(unsigned int fd, struct file *file);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a5227ab8ccb1..caac2087a4d5 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -4,58 +4,24 @@
4#ifndef __LINUX_FILTER_H__ 4#ifndef __LINUX_FILTER_H__
5#define __LINUX_FILTER_H__ 5#define __LINUX_FILTER_H__
6 6
7#include <stdarg.h>
8
7#include <linux/atomic.h> 9#include <linux/atomic.h>
8#include <linux/compat.h> 10#include <linux/compat.h>
9#include <linux/skbuff.h> 11#include <linux/skbuff.h>
12#include <linux/linkage.h>
13#include <linux/printk.h>
10#include <linux/workqueue.h> 14#include <linux/workqueue.h>
11#include <uapi/linux/filter.h>
12 15
13/* Internally used and optimized filter representation with extended 16#include <asm/cacheflush.h>
14 * instruction set based on top of classic BPF.
15 */
16 17
17/* instruction classes */ 18#include <uapi/linux/filter.h>
18#define BPF_ALU64 0x07 /* alu mode in double word width */ 19#include <uapi/linux/bpf.h>
19
20/* ld/ldx fields */
21#define BPF_DW 0x18 /* double word */
22#define BPF_XADD 0xc0 /* exclusive add */
23
24/* alu/jmp fields */
25#define BPF_MOV 0xb0 /* mov reg to reg */
26#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
27
28/* change endianness of a register */
29#define BPF_END 0xd0 /* flags for endianness conversion: */
30#define BPF_TO_LE 0x00 /* convert to little-endian */
31#define BPF_TO_BE 0x08 /* convert to big-endian */
32#define BPF_FROM_LE BPF_TO_LE
33#define BPF_FROM_BE BPF_TO_BE
34
35#define BPF_JNE 0x50 /* jump != */
36#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
37#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
38#define BPF_CALL 0x80 /* function call */
39#define BPF_EXIT 0x90 /* function return */
40
41/* Register numbers */
42enum {
43 BPF_REG_0 = 0,
44 BPF_REG_1,
45 BPF_REG_2,
46 BPF_REG_3,
47 BPF_REG_4,
48 BPF_REG_5,
49 BPF_REG_6,
50 BPF_REG_7,
51 BPF_REG_8,
52 BPF_REG_9,
53 BPF_REG_10,
54 __MAX_BPF_REG,
55};
56 20
57/* BPF has 10 general purpose 64-bit registers and stack frame. */ 21struct sk_buff;
58#define MAX_BPF_REG __MAX_BPF_REG 22struct sock;
23struct seccomp_data;
24struct bpf_prog_aux;
59 25
60/* ArgX, context and stack frame pointer register positions. Note, 26/* ArgX, context and stack frame pointer register positions. Note,
61 * Arg1, Arg2, Arg3, etc are used as argument mappings of function 27 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -161,6 +127,30 @@ enum {
161 .off = 0, \ 127 .off = 0, \
162 .imm = IMM }) 128 .imm = IMM })
163 129
130/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
131#define BPF_LD_IMM64(DST, IMM) \
132 BPF_LD_IMM64_RAW(DST, 0, IMM)
133
134#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \
135 ((struct bpf_insn) { \
136 .code = BPF_LD | BPF_DW | BPF_IMM, \
137 .dst_reg = DST, \
138 .src_reg = SRC, \
139 .off = 0, \
140 .imm = (__u32) (IMM) }), \
141 ((struct bpf_insn) { \
142 .code = 0, /* zero is reserved opcode */ \
143 .dst_reg = 0, \
144 .src_reg = 0, \
145 .off = 0, \
146 .imm = ((__u64) (IMM)) >> 32 })
147
148#define BPF_PSEUDO_MAP_FD 1
149
150/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
151#define BPF_LD_MAP_FD(DST, MAP_FD) \
152 BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
153
164/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ 154/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
165 155
166#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ 156#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \
@@ -299,14 +289,6 @@ enum {
299#define SK_RUN_FILTER(filter, ctx) \ 289#define SK_RUN_FILTER(filter, ctx) \
300 (*filter->prog->bpf_func)(ctx, filter->prog->insnsi) 290 (*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
301 291
302struct bpf_insn {
303 __u8 code; /* opcode */
304 __u8 dst_reg:4; /* dest register */
305 __u8 src_reg:4; /* source register */
306 __s16 off; /* signed offset */
307 __s32 imm; /* signed immediate constant */
308};
309
310#ifdef CONFIG_COMPAT 292#ifdef CONFIG_COMPAT
311/* A struct sock_filter is architecture independent. */ 293/* A struct sock_filter is architecture independent. */
312struct compat_sock_fprog { 294struct compat_sock_fprog {
@@ -320,20 +302,23 @@ struct sock_fprog_kern {
320 struct sock_filter *filter; 302 struct sock_filter *filter;
321}; 303};
322 304
323struct sk_buff; 305struct bpf_binary_header {
324struct sock; 306 unsigned int pages;
325struct seccomp_data; 307 u8 image[];
308};
326 309
327struct bpf_prog { 310struct bpf_prog {
328 u32 jited:1, /* Is our filter JIT'ed? */ 311 u16 pages; /* Number of allocated pages */
329 len:31; /* Number of filter blocks */ 312 bool jited; /* Is our filter JIT'ed? */
313 u32 len; /* Number of filter blocks */
330 struct sock_fprog_kern *orig_prog; /* Original BPF program */ 314 struct sock_fprog_kern *orig_prog; /* Original BPF program */
315 struct bpf_prog_aux *aux; /* Auxiliary fields */
331 unsigned int (*bpf_func)(const struct sk_buff *skb, 316 unsigned int (*bpf_func)(const struct sk_buff *skb,
332 const struct bpf_insn *filter); 317 const struct bpf_insn *filter);
318 /* Instructions for interpreter */
333 union { 319 union {
334 struct sock_filter insns[0]; 320 struct sock_filter insns[0];
335 struct bpf_insn insnsi[0]; 321 struct bpf_insn insnsi[0];
336 struct work_struct work;
337 }; 322 };
338}; 323};
339 324
@@ -353,6 +338,26 @@ static inline unsigned int bpf_prog_size(unsigned int proglen)
353 338
354#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) 339#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
355 340
341#ifdef CONFIG_DEBUG_SET_MODULE_RONX
342static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
343{
344 set_memory_ro((unsigned long)fp, fp->pages);
345}
346
347static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
348{
349 set_memory_rw((unsigned long)fp, fp->pages);
350}
351#else
352static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
353{
354}
355
356static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
357{
358}
359#endif /* CONFIG_DEBUG_SET_MODULE_RONX */
360
356int sk_filter(struct sock *sk, struct sk_buff *skb); 361int sk_filter(struct sock *sk, struct sk_buff *skb);
357 362
358void bpf_prog_select_runtime(struct bpf_prog *fp); 363void bpf_prog_select_runtime(struct bpf_prog *fp);
@@ -361,10 +366,22 @@ void bpf_prog_free(struct bpf_prog *fp);
361int bpf_convert_filter(struct sock_filter *prog, int len, 366int bpf_convert_filter(struct sock_filter *prog, int len,
362 struct bpf_insn *new_prog, int *new_len); 367 struct bpf_insn *new_prog, int *new_len);
363 368
369struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
370struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
371 gfp_t gfp_extra_flags);
372void __bpf_prog_free(struct bpf_prog *fp);
373
374static inline void bpf_prog_unlock_free(struct bpf_prog *fp)
375{
376 bpf_prog_unlock_ro(fp);
377 __bpf_prog_free(fp);
378}
379
364int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); 380int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
365void bpf_prog_destroy(struct bpf_prog *fp); 381void bpf_prog_destroy(struct bpf_prog *fp);
366 382
367int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 383int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
384int sk_attach_bpf(u32 ufd, struct sock *sk);
368int sk_detach_filter(struct sock *sk); 385int sk_detach_filter(struct sock *sk);
369 386
370int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); 387int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
@@ -377,6 +394,38 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
377u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 394u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
378void bpf_int_jit_compile(struct bpf_prog *fp); 395void bpf_int_jit_compile(struct bpf_prog *fp);
379 396
397#ifdef CONFIG_BPF_JIT
398typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
399
400struct bpf_binary_header *
401bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
402 unsigned int alignment,
403 bpf_jit_fill_hole_t bpf_fill_ill_insns);
404void bpf_jit_binary_free(struct bpf_binary_header *hdr);
405
406void bpf_jit_compile(struct bpf_prog *fp);
407void bpf_jit_free(struct bpf_prog *fp);
408
409static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
410 u32 pass, void *image)
411{
412 pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
413 flen, proglen, pass, image);
414 if (image)
415 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
416 16, 1, image, proglen, false);
417}
418#else
419static inline void bpf_jit_compile(struct bpf_prog *fp)
420{
421}
422
423static inline void bpf_jit_free(struct bpf_prog *fp)
424{
425 bpf_prog_unlock_free(fp);
426}
427#endif /* CONFIG_BPF_JIT */
428
380#define BPF_ANC BIT(15) 429#define BPF_ANC BIT(15)
381 430
382static inline u16 bpf_anc_helper(const struct sock_filter *ftest) 431static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
@@ -424,36 +473,6 @@ static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
424 return bpf_internal_load_pointer_neg_helper(skb, k, size); 473 return bpf_internal_load_pointer_neg_helper(skb, k, size);
425} 474}
426 475
427#ifdef CONFIG_BPF_JIT
428#include <stdarg.h>
429#include <linux/linkage.h>
430#include <linux/printk.h>
431
432void bpf_jit_compile(struct bpf_prog *fp);
433void bpf_jit_free(struct bpf_prog *fp);
434
435static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
436 u32 pass, void *image)
437{
438 pr_err("flen=%u proglen=%u pass=%u image=%pK\n",
439 flen, proglen, pass, image);
440 if (image)
441 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET,
442 16, 1, image, proglen, false);
443}
444#else
445#include <linux/slab.h>
446
447static inline void bpf_jit_compile(struct bpf_prog *fp)
448{
449}
450
451static inline void bpf_jit_free(struct bpf_prog *fp)
452{
453 kfree(fp);
454}
455#endif /* CONFIG_BPF_JIT */
456
457static inline int bpf_tell_extensions(void) 476static inline int bpf_tell_extensions(void)
458{ 477{
459 return SKF_AD_MAX; 478 return SKF_AD_MAX;
diff --git a/include/linux/flex_proportions.h b/include/linux/flex_proportions.h
index 4ebc49fae391..0d348e011a6e 100644
--- a/include/linux/flex_proportions.h
+++ b/include/linux/flex_proportions.h
@@ -10,6 +10,7 @@
10#include <linux/percpu_counter.h> 10#include <linux/percpu_counter.h>
11#include <linux/spinlock.h> 11#include <linux/spinlock.h>
12#include <linux/seqlock.h> 12#include <linux/seqlock.h>
13#include <linux/gfp.h>
13 14
14/* 15/*
15 * When maximum proportion of some event type is specified, this is the 16 * When maximum proportion of some event type is specified, this is the
@@ -32,7 +33,7 @@ struct fprop_global {
32 seqcount_t sequence; 33 seqcount_t sequence;
33}; 34};
34 35
35int fprop_global_init(struct fprop_global *p); 36int fprop_global_init(struct fprop_global *p, gfp_t gfp);
36void fprop_global_destroy(struct fprop_global *p); 37void fprop_global_destroy(struct fprop_global *p);
37bool fprop_new_period(struct fprop_global *p, int periods); 38bool fprop_new_period(struct fprop_global *p, int periods);
38 39
@@ -79,7 +80,7 @@ struct fprop_local_percpu {
79 raw_spinlock_t lock; /* Protect period and numerator */ 80 raw_spinlock_t lock; /* Protect period and numerator */
80}; 81};
81 82
82int fprop_local_init_percpu(struct fprop_local_percpu *pl); 83int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
83void fprop_local_destroy_percpu(struct fprop_local_percpu *pl); 84void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
84void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl); 85void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
85void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl, 86void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
diff --git a/include/linux/font.h b/include/linux/font.h
index 40a24ab41b36..d6821769dd1e 100644
--- a/include/linux/font.h
+++ b/include/linux/font.h
@@ -31,6 +31,7 @@ struct font_desc {
31#define SUN12x22_IDX 7 31#define SUN12x22_IDX 7
32#define ACORN8x8_IDX 8 32#define ACORN8x8_IDX 8
33#define MINI4x6_IDX 9 33#define MINI4x6_IDX 9
34#define FONT6x10_IDX 10
34 35
35extern const struct font_desc font_vga_8x8, 36extern const struct font_desc font_vga_8x8,
36 font_vga_8x16, 37 font_vga_8x16,
@@ -41,7 +42,8 @@ extern const struct font_desc font_vga_8x8,
41 font_sun_8x16, 42 font_sun_8x16,
42 font_sun_12x22, 43 font_sun_12x22,
43 font_acorn_8x8, 44 font_acorn_8x8,
44 font_mini_4x6; 45 font_mini_4x6,
46 font_6x10;
45 47
46/* Find a font with a specific name */ 48/* Find a font with a specific name */
47 49
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 7fd81b8c4897..6b7fd9cf5ea2 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -246,15 +246,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
246 * defined in <linux/wait.h> 246 * defined in <linux/wait.h>
247 */ 247 */
248 248
249#define wait_event_freezekillable(wq, condition) \
250({ \
251 int __retval; \
252 freezer_do_not_count(); \
253 __retval = wait_event_killable(wq, (condition)); \
254 freezer_count(); \
255 __retval; \
256})
257
258/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */ 249/* DO NOT ADD ANY NEW CALLERS OF THIS FUNCTION */
259#define wait_event_freezekillable_unsafe(wq, condition) \ 250#define wait_event_freezekillable_unsafe(wq, condition) \
260({ \ 251({ \
@@ -265,35 +256,6 @@ static inline int freezable_schedule_hrtimeout_range(ktime_t *expires,
265 __retval; \ 256 __retval; \
266}) 257})
267 258
268#define wait_event_freezable(wq, condition) \
269({ \
270 int __retval; \
271 freezer_do_not_count(); \
272 __retval = wait_event_interruptible(wq, (condition)); \
273 freezer_count(); \
274 __retval; \
275})
276
277#define wait_event_freezable_timeout(wq, condition, timeout) \
278({ \
279 long __retval = timeout; \
280 freezer_do_not_count(); \
281 __retval = wait_event_interruptible_timeout(wq, (condition), \
282 __retval); \
283 freezer_count(); \
284 __retval; \
285})
286
287#define wait_event_freezable_exclusive(wq, condition) \
288({ \
289 int __retval; \
290 freezer_do_not_count(); \
291 __retval = wait_event_interruptible_exclusive(wq, condition); \
292 freezer_count(); \
293 __retval; \
294})
295
296
297#else /* !CONFIG_FREEZER */ 259#else /* !CONFIG_FREEZER */
298static inline bool frozen(struct task_struct *p) { return false; } 260static inline bool frozen(struct task_struct *p) { return false; }
299static inline bool freezing(struct task_struct *p) { return false; } 261static inline bool freezing(struct task_struct *p) { return false; }
@@ -331,18 +293,6 @@ static inline void set_freezable(void) {}
331#define freezable_schedule_hrtimeout_range(expires, delta, mode) \ 293#define freezable_schedule_hrtimeout_range(expires, delta, mode) \
332 schedule_hrtimeout_range(expires, delta, mode) 294 schedule_hrtimeout_range(expires, delta, mode)
333 295
334#define wait_event_freezable(wq, condition) \
335 wait_event_interruptible(wq, condition)
336
337#define wait_event_freezable_timeout(wq, condition, timeout) \
338 wait_event_interruptible_timeout(wq, condition, timeout)
339
340#define wait_event_freezable_exclusive(wq, condition) \
341 wait_event_interruptible_exclusive(wq, condition)
342
343#define wait_event_freezekillable(wq, condition) \
344 wait_event_killable(wq, condition)
345
346#define wait_event_freezekillable_unsafe(wq, condition) \ 296#define wait_event_freezekillable_unsafe(wq, condition) \
347 wait_event_killable(wq, condition) 297 wait_event_killable(wq, condition)
348 298
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 94187721ad41..42efe13077b6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -18,6 +18,7 @@
18#include <linux/pid.h> 18#include <linux/pid.h>
19#include <linux/bug.h> 19#include <linux/bug.h>
20#include <linux/mutex.h> 20#include <linux/mutex.h>
21#include <linux/rwsem.h>
21#include <linux/capability.h> 22#include <linux/capability.h>
22#include <linux/semaphore.h> 23#include <linux/semaphore.h>
23#include <linux/fiemap.h> 24#include <linux/fiemap.h>
@@ -134,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
134#define FMODE_CAN_WRITE ((__force fmode_t)0x40000) 135#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
135 136
136/* File was opened by fanotify and shouldn't generate fanotify events */ 137/* File was opened by fanotify and shouldn't generate fanotify events */
137#define FMODE_NONOTIFY ((__force fmode_t)0x1000000) 138#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
138 139
139/* 140/*
140 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector 141 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
@@ -192,8 +193,6 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
192#define READ 0 193#define READ 0
193#define WRITE RW_MASK 194#define WRITE RW_MASK
194#define READA RWA_MASK 195#define READA RWA_MASK
195#define KERNEL_READ (READ|REQ_KERNEL)
196#define KERNEL_WRITE (WRITE|REQ_KERNEL)
197 196
198#define READ_SYNC (READ | REQ_SYNC) 197#define READ_SYNC (READ | REQ_SYNC)
199#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE) 198#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE)
@@ -225,6 +224,13 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
225#define ATTR_TIMES_SET (1 << 16) 224#define ATTR_TIMES_SET (1 << 16)
226 225
227/* 226/*
227 * Whiteout is represented by a char device. The following constants define the
228 * mode and device number to use.
229 */
230#define WHITEOUT_MODE 0
231#define WHITEOUT_DEV 0
232
233/*
228 * This is the Inode Attributes structure, used for notify_change(). It 234 * This is the Inode Attributes structure, used for notify_change(). It
229 * uses the above definitions as flags, to know which values have changed. 235 * uses the above definitions as flags, to know which values have changed.
230 * Also, in this manner, a Filesystem can look at only the values it cares 236 * Also, in this manner, a Filesystem can look at only the values it cares
@@ -256,6 +262,12 @@ struct iattr {
256 */ 262 */
257#include <linux/quota.h> 263#include <linux/quota.h>
258 264
265/*
266 * Maximum number of layers of fs stack. Needs to be limited to
267 * prevent kernel stack overflow
268 */
269#define FILESYSTEM_MAX_STACK_DEPTH 2
270
259/** 271/**
260 * enum positive_aop_returns - aop return codes with specific semantics 272 * enum positive_aop_returns - aop return codes with specific semantics
261 * 273 *
@@ -390,7 +402,7 @@ struct address_space {
390 atomic_t i_mmap_writable;/* count VM_SHARED mappings */ 402 atomic_t i_mmap_writable;/* count VM_SHARED mappings */
391 struct rb_root i_mmap; /* tree of private and shared mappings */ 403 struct rb_root i_mmap; /* tree of private and shared mappings */
392 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */ 404 struct list_head i_mmap_nonlinear;/*list VM_NONLINEAR mappings */
393 struct mutex i_mmap_mutex; /* protect tree, count, list */ 405 struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
394 /* Protected by tree_lock together with the radix tree */ 406 /* Protected by tree_lock together with the radix tree */
395 unsigned long nrpages; /* number of total pages */ 407 unsigned long nrpages; /* number of total pages */
396 unsigned long nrshadows; /* number of shadow entries */ 408 unsigned long nrshadows; /* number of shadow entries */
@@ -456,6 +468,26 @@ struct block_device {
456 468
457int mapping_tagged(struct address_space *mapping, int tag); 469int mapping_tagged(struct address_space *mapping, int tag);
458 470
471static inline void i_mmap_lock_write(struct address_space *mapping)
472{
473 down_write(&mapping->i_mmap_rwsem);
474}
475
476static inline void i_mmap_unlock_write(struct address_space *mapping)
477{
478 up_write(&mapping->i_mmap_rwsem);
479}
480
481static inline void i_mmap_lock_read(struct address_space *mapping)
482{
483 down_read(&mapping->i_mmap_rwsem);
484}
485
486static inline void i_mmap_unlock_read(struct address_space *mapping)
487{
488 up_read(&mapping->i_mmap_rwsem);
489}
490
459/* 491/*
460 * Might pages of this file be mapped into userspace? 492 * Might pages of this file be mapped into userspace?
461 */ 493 */
@@ -595,9 +627,6 @@ struct inode {
595 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 627 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
596 struct file_lock *i_flock; 628 struct file_lock *i_flock;
597 struct address_space i_data; 629 struct address_space i_data;
598#ifdef CONFIG_QUOTA
599 struct dquot *i_dquot[MAXQUOTAS];
600#endif
601 struct list_head i_devices; 630 struct list_head i_devices;
602 union { 631 union {
603 struct pipe_inode_info *i_pipe; 632 struct pipe_inode_info *i_pipe;
@@ -628,11 +657,13 @@ static inline int inode_unhashed(struct inode *inode)
628 * 2: child/target 657 * 2: child/target
629 * 3: xattr 658 * 3: xattr
630 * 4: second non-directory 659 * 4: second non-directory
631 * The last is for certain operations (such as rename) which lock two 660 * 5: second parent (when locking independent directories in rename)
661 *
662 * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two
632 * non-directories at once. 663 * non-directories at once.
633 * 664 *
634 * The locking order between these classes is 665 * The locking order between these classes is
635 * parent -> child -> normal -> xattr -> second non-directory 666 * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory
636 */ 667 */
637enum inode_i_mutex_lock_class 668enum inode_i_mutex_lock_class
638{ 669{
@@ -640,7 +671,8 @@ enum inode_i_mutex_lock_class
640 I_MUTEX_PARENT, 671 I_MUTEX_PARENT,
641 I_MUTEX_CHILD, 672 I_MUTEX_CHILD,
642 I_MUTEX_XATTR, 673 I_MUTEX_XATTR,
643 I_MUTEX_NONDIR2 674 I_MUTEX_NONDIR2,
675 I_MUTEX_PARENT2,
644}; 676};
645 677
646void lock_two_nondirectories(struct inode *, struct inode*); 678void lock_two_nondirectories(struct inode *, struct inode*);
@@ -775,7 +807,6 @@ struct file {
775 struct rcu_head fu_rcuhead; 807 struct rcu_head fu_rcuhead;
776 } f_u; 808 } f_u;
777 struct path f_path; 809 struct path f_path;
778#define f_dentry f_path.dentry
779 struct inode *f_inode; /* cached value */ 810 struct inode *f_inode; /* cached value */
780 const struct file_operations *f_op; 811 const struct file_operations *f_op;
781 812
@@ -851,13 +882,7 @@ static inline struct file *get_file(struct file *f)
851 */ 882 */
852#define FILE_LOCK_DEFERRED 1 883#define FILE_LOCK_DEFERRED 1
853 884
854/* 885/* legacy typedef, should eventually be removed */
855 * The POSIX file lock owner is determined by
856 * the "struct files_struct" in the thread group
857 * (or NULL for no owner - BSD locks).
858 *
859 * Lockd stuffs a "host" pointer into this.
860 */
861typedef void *fl_owner_t; 886typedef void *fl_owner_t;
862 887
863struct file_lock_operations { 888struct file_lock_operations {
@@ -868,10 +893,13 @@ struct file_lock_operations {
868struct lock_manager_operations { 893struct lock_manager_operations {
869 int (*lm_compare_owner)(struct file_lock *, struct file_lock *); 894 int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
870 unsigned long (*lm_owner_key)(struct file_lock *); 895 unsigned long (*lm_owner_key)(struct file_lock *);
896 void (*lm_get_owner)(struct file_lock *, struct file_lock *);
897 void (*lm_put_owner)(struct file_lock *);
871 void (*lm_notify)(struct file_lock *); /* unblock callback */ 898 void (*lm_notify)(struct file_lock *); /* unblock callback */
872 int (*lm_grant)(struct file_lock *, struct file_lock *, int); 899 int (*lm_grant)(struct file_lock *, int);
873 void (*lm_break)(struct file_lock *); 900 bool (*lm_break)(struct file_lock *);
874 int (*lm_change)(struct file_lock **, int); 901 int (*lm_change)(struct file_lock **, int, struct list_head *);
902 void (*lm_setup)(struct file_lock *, void **);
875}; 903};
876 904
877struct lock_manager { 905struct lock_manager {
@@ -966,7 +994,7 @@ void locks_free_lock(struct file_lock *fl);
966extern void locks_init_lock(struct file_lock *); 994extern void locks_init_lock(struct file_lock *);
967extern struct file_lock * locks_alloc_lock(void); 995extern struct file_lock * locks_alloc_lock(void);
968extern void locks_copy_lock(struct file_lock *, struct file_lock *); 996extern void locks_copy_lock(struct file_lock *, struct file_lock *);
969extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); 997extern void locks_copy_conflock(struct file_lock *, struct file_lock *);
970extern void locks_remove_posix(struct file *, fl_owner_t); 998extern void locks_remove_posix(struct file *, fl_owner_t);
971extern void locks_remove_file(struct file *); 999extern void locks_remove_file(struct file *);
972extern void locks_release_private(struct file_lock *); 1000extern void locks_release_private(struct file_lock *);
@@ -980,11 +1008,9 @@ extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl);
980extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); 1008extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
981extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type); 1009extern int __break_lease(struct inode *inode, unsigned int flags, unsigned int type);
982extern void lease_get_mtime(struct inode *, struct timespec *time); 1010extern void lease_get_mtime(struct inode *, struct timespec *time);
983extern int generic_setlease(struct file *, long, struct file_lock **); 1011extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
984extern int vfs_setlease(struct file *, long, struct file_lock **); 1012extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
985extern int lease_modify(struct file_lock **, int); 1013extern int lease_modify(struct file_lock **, int, struct list_head *);
986extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
987extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
988#else /* !CONFIG_FILE_LOCKING */ 1014#else /* !CONFIG_FILE_LOCKING */
989static inline int fcntl_getlk(struct file *file, unsigned int cmd, 1015static inline int fcntl_getlk(struct file *file, unsigned int cmd,
990 struct flock __user *user) 1016 struct flock __user *user)
@@ -1013,12 +1039,12 @@ static inline int fcntl_setlk64(unsigned int fd, struct file *file,
1013#endif 1039#endif
1014static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg) 1040static inline int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
1015{ 1041{
1016 return 0; 1042 return -EINVAL;
1017} 1043}
1018 1044
1019static inline int fcntl_getlease(struct file *filp) 1045static inline int fcntl_getlease(struct file *filp)
1020{ 1046{
1021 return 0; 1047 return F_UNLCK;
1022} 1048}
1023 1049
1024static inline void locks_init_lock(struct file_lock *fl) 1050static inline void locks_init_lock(struct file_lock *fl)
@@ -1026,7 +1052,7 @@ static inline void locks_init_lock(struct file_lock *fl)
1026 return; 1052 return;
1027} 1053}
1028 1054
1029static inline void __locks_copy_lock(struct file_lock *new, struct file_lock *fl) 1055static inline void locks_copy_conflock(struct file_lock *new, struct file_lock *fl)
1030{ 1056{
1031 return; 1057 return;
1032} 1058}
@@ -1100,33 +1126,22 @@ static inline void lease_get_mtime(struct inode *inode, struct timespec *time)
1100} 1126}
1101 1127
1102static inline int generic_setlease(struct file *filp, long arg, 1128static inline int generic_setlease(struct file *filp, long arg,
1103 struct file_lock **flp) 1129 struct file_lock **flp, void **priv)
1104{ 1130{
1105 return -EINVAL; 1131 return -EINVAL;
1106} 1132}
1107 1133
1108static inline int vfs_setlease(struct file *filp, long arg, 1134static inline int vfs_setlease(struct file *filp, long arg,
1109 struct file_lock **lease) 1135 struct file_lock **lease, void **priv)
1110{ 1136{
1111 return -EINVAL; 1137 return -EINVAL;
1112} 1138}
1113 1139
1114static inline int lease_modify(struct file_lock **before, int arg) 1140static inline int lease_modify(struct file_lock **before, int arg,
1141 struct list_head *dispose)
1115{ 1142{
1116 return -EINVAL; 1143 return -EINVAL;
1117} 1144}
1118
1119static inline int lock_may_read(struct inode *inode, loff_t start,
1120 unsigned long len)
1121{
1122 return 1;
1123}
1124
1125static inline int lock_may_write(struct inode *inode, loff_t start,
1126 unsigned long len)
1127{
1128 return 1;
1129}
1130#endif /* !CONFIG_FILE_LOCKING */ 1145#endif /* !CONFIG_FILE_LOCKING */
1131 1146
1132 1147
@@ -1151,8 +1166,8 @@ extern void fasync_free(struct fasync_struct *);
1151/* can be called from interrupts */ 1166/* can be called from interrupts */
1152extern void kill_fasync(struct fasync_struct **, int, int); 1167extern void kill_fasync(struct fasync_struct **, int, int);
1153 1168
1154extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); 1169extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force);
1155extern int f_setown(struct file *filp, unsigned long arg, int force); 1170extern void f_setown(struct file *filp, unsigned long arg, int force);
1156extern void f_delown(struct file *filp); 1171extern void f_delown(struct file *filp);
1157extern pid_t f_getown(struct file *filp); 1172extern pid_t f_getown(struct file *filp);
1158extern int send_sigurg(struct fown_struct *fown); 1173extern int send_sigurg(struct fown_struct *fown);
@@ -1226,6 +1241,7 @@ struct super_block {
1226 struct backing_dev_info *s_bdi; 1241 struct backing_dev_info *s_bdi;
1227 struct mtd_info *s_mtd; 1242 struct mtd_info *s_mtd;
1228 struct hlist_node s_instances; 1243 struct hlist_node s_instances;
1244 unsigned int s_quota_types; /* Bitmask of supported quota types */
1229 struct quota_info s_dquot; /* Diskquota specific options */ 1245 struct quota_info s_dquot; /* Diskquota specific options */
1230 1246
1231 struct sb_writers s_writers; 1247 struct sb_writers s_writers;
@@ -1284,6 +1300,11 @@ struct super_block {
1284 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; 1300 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
1285 struct list_lru s_inode_lru ____cacheline_aligned_in_smp; 1301 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
1286 struct rcu_head rcu; 1302 struct rcu_head rcu;
1303
1304 /*
1305 * Indicates how deep in a filesystem stack this SB is
1306 */
1307 int s_stack_depth;
1287}; 1308};
1288 1309
1289extern struct timespec current_fs_time(struct super_block *sb); 1310extern struct timespec current_fs_time(struct super_block *sb);
@@ -1416,6 +1437,7 @@ extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct ino
1416extern int vfs_rmdir(struct inode *, struct dentry *); 1437extern int vfs_rmdir(struct inode *, struct dentry *);
1417extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); 1438extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
1418extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); 1439extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
1440extern int vfs_whiteout(struct inode *, struct dentry *);
1419 1441
1420/* 1442/*
1421 * VFS dentry helper functions. 1443 * VFS dentry helper functions.
@@ -1463,7 +1485,10 @@ int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags);
1463 * This allows the kernel to read directories into kernel space or 1485 * This allows the kernel to read directories into kernel space or
1464 * to have different dirent layouts depending on the binary type. 1486 * to have different dirent layouts depending on the binary type.
1465 */ 1487 */
1466typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); 1488struct dir_context;
1489typedef int (*filldir_t)(struct dir_context *, const char *, int, loff_t, u64,
1490 unsigned);
1491
1467struct dir_context { 1492struct dir_context {
1468 const filldir_t actor; 1493 const filldir_t actor;
1469 loff_t pos; 1494 loff_t pos;
@@ -1493,6 +1518,7 @@ struct file_operations {
1493 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 1518 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
1494 long (*compat_ioctl) (struct file *, unsigned int, unsigned long); 1519 long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
1495 int (*mmap) (struct file *, struct vm_area_struct *); 1520 int (*mmap) (struct file *, struct vm_area_struct *);
1521 void (*mremap)(struct file *, struct vm_area_struct *);
1496 int (*open) (struct inode *, struct file *); 1522 int (*open) (struct inode *, struct file *);
1497 int (*flush) (struct file *, fl_owner_t id); 1523 int (*flush) (struct file *, fl_owner_t id);
1498 int (*release) (struct inode *, struct file *); 1524 int (*release) (struct inode *, struct file *);
@@ -1506,10 +1532,10 @@ struct file_operations {
1506 int (*flock) (struct file *, int, struct file_lock *); 1532 int (*flock) (struct file *, int, struct file_lock *);
1507 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); 1533 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
1508 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); 1534 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
1509 int (*setlease)(struct file *, long, struct file_lock **); 1535 int (*setlease)(struct file *, long, struct file_lock **, void **);
1510 long (*fallocate)(struct file *file, int mode, loff_t offset, 1536 long (*fallocate)(struct file *file, int mode, loff_t offset,
1511 loff_t len); 1537 loff_t len);
1512 int (*show_fdinfo)(struct seq_file *m, struct file *f); 1538 void (*show_fdinfo)(struct seq_file *m, struct file *f);
1513}; 1539};
1514 1540
1515struct inode_operations { 1541struct inode_operations {
@@ -1546,6 +1572,9 @@ struct inode_operations {
1546 umode_t create_mode, int *opened); 1572 umode_t create_mode, int *opened);
1547 int (*tmpfile) (struct inode *, struct dentry *, umode_t); 1573 int (*tmpfile) (struct inode *, struct dentry *, umode_t);
1548 int (*set_acl)(struct inode *, struct posix_acl *, int); 1574 int (*set_acl)(struct inode *, struct posix_acl *, int);
1575
1576 /* WARNING: probably going away soon, do not use! */
1577 int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
1549} ____cacheline_aligned; 1578} ____cacheline_aligned;
1550 1579
1551ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, 1580ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
@@ -1553,6 +1582,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
1553 struct iovec *fast_pointer, 1582 struct iovec *fast_pointer,
1554 struct iovec **ret_pointer); 1583 struct iovec **ret_pointer);
1555 1584
1585extern ssize_t __vfs_read(struct file *, char __user *, size_t, loff_t *);
1556extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); 1586extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
1557extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); 1587extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
1558extern ssize_t vfs_readv(struct file *, const struct iovec __user *, 1588extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
@@ -1570,7 +1600,9 @@ struct super_operations {
1570 void (*evict_inode) (struct inode *); 1600 void (*evict_inode) (struct inode *);
1571 void (*put_super) (struct super_block *); 1601 void (*put_super) (struct super_block *);
1572 int (*sync_fs)(struct super_block *sb, int wait); 1602 int (*sync_fs)(struct super_block *sb, int wait);
1603 int (*freeze_super) (struct super_block *);
1573 int (*freeze_fs) (struct super_block *); 1604 int (*freeze_fs) (struct super_block *);
1605 int (*thaw_super) (struct super_block *);
1574 int (*unfreeze_fs) (struct super_block *); 1606 int (*unfreeze_fs) (struct super_block *);
1575 int (*statfs) (struct dentry *, struct kstatfs *); 1607 int (*statfs) (struct dentry *, struct kstatfs *);
1576 int (*remount_fs) (struct super_block *, int *, char *); 1608 int (*remount_fs) (struct super_block *, int *, char *);
@@ -1583,6 +1615,7 @@ struct super_operations {
1583#ifdef CONFIG_QUOTA 1615#ifdef CONFIG_QUOTA
1584 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); 1616 ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
1585 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); 1617 ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
1618 struct dquot **(*get_dquots)(struct inode *);
1586#endif 1619#endif
1587 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); 1620 int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
1588 long (*nr_cached_objects)(struct super_block *, int); 1621 long (*nr_cached_objects)(struct super_block *, int);
@@ -1643,6 +1676,9 @@ struct super_operations {
1643#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) 1676#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
1644#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) 1677#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
1645 1678
1679#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
1680 (inode)->i_rdev == WHITEOUT_DEV)
1681
1646/* 1682/*
1647 * Inode state bits. Protected by inode->i_lock 1683 * Inode state bits. Protected by inode->i_lock
1648 * 1684 *
@@ -1855,7 +1891,8 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
1855extern void kern_unmount(struct vfsmount *mnt); 1891extern void kern_unmount(struct vfsmount *mnt);
1856extern int may_umount_tree(struct vfsmount *); 1892extern int may_umount_tree(struct vfsmount *);
1857extern int may_umount(struct vfsmount *); 1893extern int may_umount(struct vfsmount *);
1858extern long do_mount(const char *, const char *, const char *, unsigned long, void *); 1894extern long do_mount(const char *, const char __user *,
1895 const char *, unsigned long, void *);
1859extern struct vfsmount *collect_mounts(struct path *); 1896extern struct vfsmount *collect_mounts(struct path *);
1860extern void drop_collected_mounts(struct vfsmount *); 1897extern void drop_collected_mounts(struct vfsmount *);
1861extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, 1898extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *,
@@ -1874,7 +1911,7 @@ extern int current_umask(void);
1874extern void ihold(struct inode * inode); 1911extern void ihold(struct inode * inode);
1875extern void iput(struct inode *); 1912extern void iput(struct inode *);
1876 1913
1877static inline struct inode *file_inode(struct file *f) 1914static inline struct inode *file_inode(const struct file *f)
1878{ 1915{
1879 return f->f_inode; 1916 return f->f_inode;
1880} 1917}
@@ -2049,7 +2086,7 @@ struct filename {
2049extern long vfs_truncate(struct path *, loff_t); 2086extern long vfs_truncate(struct path *, loff_t);
2050extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, 2087extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
2051 struct file *filp); 2088 struct file *filp);
2052extern int do_fallocate(struct file *file, int mode, loff_t offset, 2089extern int vfs_fallocate(struct file *file, int mode, loff_t offset,
2053 loff_t len); 2090 loff_t len);
2054extern long do_sys_open(int dfd, const char __user *filename, int flags, 2091extern long do_sys_open(int dfd, const char __user *filename, int flags,
2055 umode_t mode); 2092 umode_t mode);
@@ -2057,9 +2094,11 @@ extern struct file *file_open_name(struct filename *, int, umode_t);
2057extern struct file *filp_open(const char *, int, umode_t); 2094extern struct file *filp_open(const char *, int, umode_t);
2058extern struct file *file_open_root(struct dentry *, struct vfsmount *, 2095extern struct file *file_open_root(struct dentry *, struct vfsmount *,
2059 const char *, int); 2096 const char *, int);
2097extern int vfs_open(const struct path *, struct file *, const struct cred *);
2060extern struct file * dentry_open(const struct path *, int, const struct cred *); 2098extern struct file * dentry_open(const struct path *, int, const struct cred *);
2061extern int filp_close(struct file *, fl_owner_t id); 2099extern int filp_close(struct file *, fl_owner_t id);
2062 2100
2101extern struct filename *getname_flags(const char __user *, int, int *);
2063extern struct filename *getname(const char __user *); 2102extern struct filename *getname(const char __user *);
2064extern struct filename *getname_kernel(const char *); 2103extern struct filename *getname_kernel(const char *);
2065 2104
@@ -2137,7 +2176,6 @@ static inline int sb_is_blkdev_sb(struct super_block *sb)
2137extern int sync_filesystem(struct super_block *); 2176extern int sync_filesystem(struct super_block *);
2138extern const struct file_operations def_blk_fops; 2177extern const struct file_operations def_blk_fops;
2139extern const struct file_operations def_chr_fops; 2178extern const struct file_operations def_chr_fops;
2140extern const struct file_operations bad_sock_fops;
2141#ifdef CONFIG_BLOCK 2179#ifdef CONFIG_BLOCK
2142extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); 2180extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
2143extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); 2181extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
@@ -2270,7 +2308,9 @@ extern sector_t bmap(struct inode *, sector_t);
2270#endif 2308#endif
2271extern int notify_change(struct dentry *, struct iattr *, struct inode **); 2309extern int notify_change(struct dentry *, struct iattr *, struct inode **);
2272extern int inode_permission(struct inode *, int); 2310extern int inode_permission(struct inode *, int);
2311extern int __inode_permission(struct inode *, int);
2273extern int generic_permission(struct inode *, int); 2312extern int generic_permission(struct inode *, int);
2313extern int __check_sticky(struct inode *dir, struct inode *inode);
2274 2314
2275static inline bool execute_ok(struct inode *inode) 2315static inline bool execute_ok(struct inode *inode)
2276{ 2316{
@@ -2455,6 +2495,7 @@ extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
2455extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); 2495extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
2456 2496
2457/* fs/block_dev.c */ 2497/* fs/block_dev.c */
2498extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
2458extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); 2499extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
2459extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, 2500extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
2460 int datasync); 2501 int datasync);
@@ -2469,6 +2510,9 @@ extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
2469 struct file *, loff_t *, size_t, unsigned int); 2510 struct file *, loff_t *, size_t, unsigned int);
2470extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 2511extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
2471 struct file *out, loff_t *, size_t len, unsigned int flags); 2512 struct file *out, loff_t *, size_t len, unsigned int flags);
2513extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
2514 loff_t *opos, size_t len, unsigned int flags);
2515
2472 2516
2473extern void 2517extern void
2474file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); 2518file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
@@ -2611,6 +2655,7 @@ extern int simple_write_end(struct file *file, struct address_space *mapping,
2611 struct page *page, void *fsdata); 2655 struct page *page, void *fsdata);
2612extern int always_delete_dentry(const struct dentry *); 2656extern int always_delete_dentry(const struct dentry *);
2613extern struct inode *alloc_anon_inode(struct super_block *); 2657extern struct inode *alloc_anon_inode(struct super_block *);
2658extern int simple_nosetlease(struct file *, long, struct file_lock **, void **);
2614extern const struct dentry_operations simple_dentry_operations; 2659extern const struct dentry_operations simple_dentry_operations;
2615 2660
2616extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags); 2661extern struct dentry *simple_lookup(struct inode *, struct dentry *, unsigned int flags);
@@ -2753,12 +2798,25 @@ static inline int is_sxid(umode_t mode)
2753 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); 2798 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
2754} 2799}
2755 2800
2801static inline int check_sticky(struct inode *dir, struct inode *inode)
2802{
2803 if (!(dir->i_mode & S_ISVTX))
2804 return 0;
2805
2806 return __check_sticky(dir, inode);
2807}
2808
2756static inline void inode_has_no_xattr(struct inode *inode) 2809static inline void inode_has_no_xattr(struct inode *inode)
2757{ 2810{
2758 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC)) 2811 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
2759 inode->i_flags |= S_NOSEC; 2812 inode->i_flags |= S_NOSEC;
2760} 2813}
2761 2814
2815static inline bool is_root_inode(struct inode *inode)
2816{
2817 return inode == inode->i_sb->s_root->d_inode;
2818}
2819
2762static inline bool dir_emit(struct dir_context *ctx, 2820static inline bool dir_emit(struct dir_context *ctx,
2763 const char *name, int namelen, 2821 const char *name, int namelen,
2764 u64 ino, unsigned type) 2822 u64 ino, unsigned type)
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
index efb05961bdd8..77d783f71527 100644
--- a/include/linux/fs_enet_pd.h
+++ b/include/linux/fs_enet_pd.h
@@ -139,7 +139,6 @@ struct fs_platform_info {
139 int rx_ring, tx_ring; /* number of buffers on rx */ 139 int rx_ring, tx_ring; /* number of buffers on rx */
140 __u8 macaddr[ETH_ALEN]; /* mac address */ 140 __u8 macaddr[ETH_ALEN]; /* mac address */
141 int rx_copybreak; /* limit we copy small frames */ 141 int rx_copybreak; /* limit we copy small frames */
142 int use_napi; /* use NAPI */
143 int napi_weight; /* NAPI weight */ 142 int napi_weight; /* NAPI weight */
144 143
145 int use_rmii; /* use RMII mode */ 144 int use_rmii; /* use RMII mode */
diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h
index f49ddb1b2273..bf0321eabbda 100644
--- a/include/linux/fsl_ifc.h
+++ b/include/linux/fsl_ifc.h
@@ -29,7 +29,16 @@
29#include <linux/of_platform.h> 29#include <linux/of_platform.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31 31
32#define FSL_IFC_BANK_COUNT 4 32/*
33 * The actual number of banks implemented depends on the IFC version
34 * - IFC version 1.0 implements 4 banks.
35 * - IFC version 1.1 onward implements 8 banks.
36 */
37#define FSL_IFC_BANK_COUNT 8
38
39#define FSL_IFC_VERSION_MASK 0x0F0F0000
40#define FSL_IFC_VERSION_1_0_0 0x01000000
41#define FSL_IFC_VERSION_1_1_0 0x01010000
33 42
34/* 43/*
35 * CSPR - Chip Select Property Register 44 * CSPR - Chip Select Property Register
@@ -776,23 +785,23 @@ struct fsl_ifc_regs {
776 __be32 cspr; 785 __be32 cspr;
777 u32 res2; 786 u32 res2;
778 } cspr_cs[FSL_IFC_BANK_COUNT]; 787 } cspr_cs[FSL_IFC_BANK_COUNT];
779 u32 res3[0x19]; 788 u32 res3[0xd];
780 struct { 789 struct {
781 __be32 amask; 790 __be32 amask;
782 u32 res4[0x2]; 791 u32 res4[0x2];
783 } amask_cs[FSL_IFC_BANK_COUNT]; 792 } amask_cs[FSL_IFC_BANK_COUNT];
784 u32 res5[0x17]; 793 u32 res5[0xc];
785 struct { 794 struct {
786 __be32 csor_ext;
787 __be32 csor; 795 __be32 csor;
796 __be32 csor_ext;
788 u32 res6; 797 u32 res6;
789 } csor_cs[FSL_IFC_BANK_COUNT]; 798 } csor_cs[FSL_IFC_BANK_COUNT];
790 u32 res7[0x19]; 799 u32 res7[0xc];
791 struct { 800 struct {
792 __be32 ftim[4]; 801 __be32 ftim[4];
793 u32 res8[0x8]; 802 u32 res8[0x8];
794 } ftim_cs[FSL_IFC_BANK_COUNT]; 803 } ftim_cs[FSL_IFC_BANK_COUNT];
795 u32 res9[0x60]; 804 u32 res9[0x30];
796 __be32 rb_stat; 805 __be32 rb_stat;
797 u32 res10[0x2]; 806 u32 res10[0x2];
798 __be32 ifc_gcr; 807 __be32 ifc_gcr;
@@ -827,6 +836,8 @@ struct fsl_ifc_ctrl {
827 int nand_irq; 836 int nand_irq;
828 spinlock_t lock; 837 spinlock_t lock;
829 void *nand; 838 void *nand;
839 int version;
840 int banks;
830 841
831 u32 nand_stat; 842 u32 nand_stat;
832 wait_queue_head_t nand_wait; 843 wait_queue_head_t nand_wait;
diff --git a/include/linux/fsldma.h b/include/linux/fsldma.h
new file mode 100644
index 000000000000..b213c02963c9
--- /dev/null
+++ b/include/linux/fsldma.h
@@ -0,0 +1,13 @@
1/*
2 * This is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 */
7
8#ifndef FSL_DMA_H
9#define FSL_DMA_H
10/* fsl dma API for enxternal start */
11int fsl_dma_external_start(struct dma_chan *dchan, int enable);
12
13#endif
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index ca060d7c4fa6..0f313f93c586 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -197,24 +197,6 @@ struct fsnotify_group {
197#define FSNOTIFY_EVENT_INODE 2 197#define FSNOTIFY_EVENT_INODE 2
198 198
199/* 199/*
200 * Inode specific fields in an fsnotify_mark
201 */
202struct fsnotify_inode_mark {
203 struct inode *inode; /* inode this mark is associated with */
204 struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */
205 struct list_head free_i_list; /* tmp list used when freeing this mark */
206};
207
208/*
209 * Mount point specific fields in an fsnotify_mark
210 */
211struct fsnotify_vfsmount_mark {
212 struct vfsmount *mnt; /* vfsmount this mark is associated with */
213 struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */
214 struct list_head free_m_list; /* tmp list used when freeing this mark */
215};
216
217/*
218 * a mark is simply an object attached to an in core inode which allows an 200 * a mark is simply an object attached to an in core inode which allows an
219 * fsnotify listener to indicate they are either no longer interested in events 201 * fsnotify listener to indicate they are either no longer interested in events
220 * of a type matching mask or only interested in those events. 202 * of a type matching mask or only interested in those events.
@@ -230,11 +212,17 @@ struct fsnotify_mark {
230 * in kernel that found and may be using this mark. */ 212 * in kernel that found and may be using this mark. */
231 atomic_t refcnt; /* active things looking at this mark */ 213 atomic_t refcnt; /* active things looking at this mark */
232 struct fsnotify_group *group; /* group this mark is for */ 214 struct fsnotify_group *group; /* group this mark is for */
233 struct list_head g_list; /* list of marks by group->i_fsnotify_marks */ 215 struct list_head g_list; /* list of marks by group->i_fsnotify_marks
216 * Also reused for queueing mark into
217 * destroy_list when it's waiting for
218 * the end of SRCU period before it can
219 * be freed */
234 spinlock_t lock; /* protect group and inode */ 220 spinlock_t lock; /* protect group and inode */
221 struct hlist_node obj_list; /* list of marks for inode / vfsmount */
222 struct list_head free_list; /* tmp list used when freeing this mark */
235 union { 223 union {
236 struct fsnotify_inode_mark i; 224 struct inode *inode; /* inode this mark is associated with */
237 struct fsnotify_vfsmount_mark m; 225 struct vfsmount *mnt; /* vfsmount this mark is associated with */
238 }; 226 };
239 __u32 ignored_mask; /* events types to ignore */ 227 __u32 ignored_mask; /* events types to ignore */
240#define FSNOTIFY_MARK_FLAG_INODE 0x01 228#define FSNOTIFY_MARK_FLAG_INODE 0x01
@@ -243,7 +231,6 @@ struct fsnotify_mark {
243#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 231#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
244#define FSNOTIFY_MARK_FLAG_ALIVE 0x10 232#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
245 unsigned int flags; /* vfsmount or inode mark? */ 233 unsigned int flags; /* vfsmount or inode mark? */
246 struct list_head destroy_list;
247 void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ 234 void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
248}; 235};
249 236
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index f0b0edbf55a9..1da602982cf9 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -39,6 +39,12 @@
39# define FTRACE_FORCE_LIST_FUNC 0 39# define FTRACE_FORCE_LIST_FUNC 0
40#endif 40#endif
41 41
42/* Main tracing buffer and events set up */
43#ifdef CONFIG_TRACING
44void trace_init(void);
45#else
46static inline void trace_init(void) { }
47#endif
42 48
43struct module; 49struct module;
44struct ftrace_hash; 50struct ftrace_hash;
@@ -56,9 +62,16 @@ struct ftrace_ops;
56typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, 62typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
57 struct ftrace_ops *op, struct pt_regs *regs); 63 struct ftrace_ops *op, struct pt_regs *regs);
58 64
65ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
66
59/* 67/*
60 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are 68 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
61 * set in the flags member. 69 * set in the flags member.
70 * CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
71 * IPMODIFY are a kind of attribute flags which can be set only before
72 * registering the ftrace_ops, and can not be modified while registered.
73 * Changing those attribute flags after regsitering ftrace_ops will
74 * cause unexpected results.
62 * 75 *
63 * ENABLED - set/unset when ftrace_ops is registered/unregistered 76 * ENABLED - set/unset when ftrace_ops is registered/unregistered
64 * DYNAMIC - set when ftrace_ops is registered to denote dynamically 77 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
@@ -89,6 +102,20 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
89 * INITIALIZED - The ftrace_ops has already been initialized (first use time 102 * INITIALIZED - The ftrace_ops has already been initialized (first use time
90 * register_ftrace_function() is called, it will initialized the ops) 103 * register_ftrace_function() is called, it will initialized the ops)
91 * DELETED - The ops are being deleted, do not let them be registered again. 104 * DELETED - The ops are being deleted, do not let them be registered again.
105 * ADDING - The ops is in the process of being added.
106 * REMOVING - The ops is in the process of being removed.
107 * MODIFYING - The ops is in the process of changing its filter functions.
108 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
109 * The arch specific code sets this flag when it allocated a
110 * trampoline. This lets the arch know that it can update the
111 * trampoline in case the callback function changes.
112 * The ftrace_ops trampoline can be set by the ftrace users, and
113 * in such cases the arch must not modify it. Only the arch ftrace
114 * core code should set this flag.
115 * IPMODIFY - The ops can modify the IP register. This can only be set with
116 * SAVE_REGS. If another ops with this flag set is already registered
117 * for any of the functions that this ops will be registered for, then
118 * this ops will fail to register or set_filter_ip.
92 */ 119 */
93enum { 120enum {
94 FTRACE_OPS_FL_ENABLED = 1 << 0, 121 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -100,6 +127,11 @@ enum {
100 FTRACE_OPS_FL_STUB = 1 << 6, 127 FTRACE_OPS_FL_STUB = 1 << 6,
101 FTRACE_OPS_FL_INITIALIZED = 1 << 7, 128 FTRACE_OPS_FL_INITIALIZED = 1 << 7,
102 FTRACE_OPS_FL_DELETED = 1 << 8, 129 FTRACE_OPS_FL_DELETED = 1 << 8,
130 FTRACE_OPS_FL_ADDING = 1 << 9,
131 FTRACE_OPS_FL_REMOVING = 1 << 10,
132 FTRACE_OPS_FL_MODIFYING = 1 << 11,
133 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
134 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
103}; 135};
104 136
105#ifdef CONFIG_DYNAMIC_FTRACE 137#ifdef CONFIG_DYNAMIC_FTRACE
@@ -132,8 +164,9 @@ struct ftrace_ops {
132 int nr_trampolines; 164 int nr_trampolines;
133 struct ftrace_ops_hash local_hash; 165 struct ftrace_ops_hash local_hash;
134 struct ftrace_ops_hash *func_hash; 166 struct ftrace_ops_hash *func_hash;
135 struct ftrace_hash *tramp_hash; 167 struct ftrace_ops_hash old_hash;
136 unsigned long trampoline; 168 unsigned long trampoline;
169 unsigned long trampoline_size;
137#endif 170#endif
138}; 171};
139 172
@@ -247,7 +280,9 @@ struct ftrace_func_command {
247int ftrace_arch_code_modify_prepare(void); 280int ftrace_arch_code_modify_prepare(void);
248int ftrace_arch_code_modify_post_process(void); 281int ftrace_arch_code_modify_post_process(void);
249 282
250void ftrace_bug(int err, unsigned long ip); 283struct dyn_ftrace;
284
285void ftrace_bug(int err, struct dyn_ftrace *rec);
251 286
252struct seq_file; 287struct seq_file;
253 288
@@ -279,6 +314,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
279 314
280extern int ftrace_nr_registered_ops(void); 315extern int ftrace_nr_registered_ops(void);
281 316
317bool is_ftrace_trampoline(unsigned long addr);
318
282/* 319/*
283 * The dyn_ftrace record's flags field is split into two parts. 320 * The dyn_ftrace record's flags field is split into two parts.
284 * the first part which is '0-FTRACE_REF_MAX' is a counter of 321 * the first part which is '0-FTRACE_REF_MAX' is a counter of
@@ -289,6 +326,7 @@ extern int ftrace_nr_registered_ops(void);
289 * ENABLED - the function is being traced 326 * ENABLED - the function is being traced
290 * REGS - the record wants the function to save regs 327 * REGS - the record wants the function to save regs
291 * REGS_EN - the function is set up to save regs. 328 * REGS_EN - the function is set up to save regs.
329 * IPMODIFY - the record allows for the IP address to be changed.
292 * 330 *
293 * When a new ftrace_ops is registered and wants a function to save 331 * When a new ftrace_ops is registered and wants a function to save
294 * pt_regs, the rec->flag REGS is set. When the function has been 332 * pt_regs, the rec->flag REGS is set. When the function has been
@@ -302,10 +340,11 @@ enum {
302 FTRACE_FL_REGS_EN = (1UL << 29), 340 FTRACE_FL_REGS_EN = (1UL << 29),
303 FTRACE_FL_TRAMP = (1UL << 28), 341 FTRACE_FL_TRAMP = (1UL << 28),
304 FTRACE_FL_TRAMP_EN = (1UL << 27), 342 FTRACE_FL_TRAMP_EN = (1UL << 27),
343 FTRACE_FL_IPMODIFY = (1UL << 26),
305}; 344};
306 345
307#define FTRACE_REF_MAX_SHIFT 27 346#define FTRACE_REF_MAX_SHIFT 26
308#define FTRACE_FL_BITS 5 347#define FTRACE_FL_BITS 6
309#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) 348#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
310#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) 349#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
311#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) 350#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
@@ -578,6 +617,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user
578 size_t cnt, loff_t *ppos) { return -ENODEV; } 617 size_t cnt, loff_t *ppos) { return -ENODEV; }
579static inline int 618static inline int
580ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } 619ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
620
621static inline bool is_ftrace_trampoline(unsigned long addr)
622{
623 return false;
624}
581#endif /* CONFIG_DYNAMIC_FTRACE */ 625#endif /* CONFIG_DYNAMIC_FTRACE */
582 626
583/* totally disable ftrace - can not re-enable after this */ 627/* totally disable ftrace - can not re-enable after this */
@@ -835,6 +879,7 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
835enum ftrace_dump_mode; 879enum ftrace_dump_mode;
836 880
837extern enum ftrace_dump_mode ftrace_dump_on_oops; 881extern enum ftrace_dump_mode ftrace_dump_on_oops;
882extern int tracepoint_printk;
838 883
839extern void disable_trace_on_warning(void); 884extern void disable_trace_on_warning(void);
840extern int __disable_trace_on_warning; 885extern int __disable_trace_on_warning;
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 28672e87e910..0bebb5c348b8 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -138,6 +138,17 @@ enum print_line_t {
138 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ 138 TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
139}; 139};
140 140
141/*
142 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
143 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
144 * simplifies those functions and keeps them in sync.
145 */
146static inline enum print_line_t trace_handle_return(struct trace_seq *s)
147{
148 return trace_seq_has_overflowed(s) ?
149 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
150}
151
141void tracing_generic_entry_update(struct trace_entry *entry, 152void tracing_generic_entry_update(struct trace_entry *entry,
142 unsigned long flags, 153 unsigned long flags,
143 int pc); 154 int pc);
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 1c2fdaa2ffc3..1ccaab44abcc 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -110,6 +110,10 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
110extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, 110extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
111 unsigned long start, unsigned int nr, void *data); 111 unsigned long start, unsigned int nr, void *data);
112 112
113extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
114 unsigned long size, unsigned long start, unsigned int nr,
115 void *data);
116
113extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, 117extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
114 unsigned long start, unsigned int nr, void *data); 118 unsigned long start, unsigned int nr, void *data);
115 119
@@ -117,6 +121,9 @@ extern struct gen_pool *devm_gen_pool_create(struct device *dev,
117 int min_alloc_order, int nid); 121 int min_alloc_order, int nid);
118extern struct gen_pool *dev_get_gen_pool(struct device *dev); 122extern struct gen_pool *dev_get_gen_pool(struct device *dev);
119 123
124bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
125 size_t size);
126
120#ifdef CONFIG_OF 127#ifdef CONFIG_OF
121extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, 128extern struct gen_pool *of_get_named_gen_pool(struct device_node *np,
122 const char *propname, int index); 129 const char *propname, int index);
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index c0894dd8827b..667c31101b8b 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -178,12 +178,12 @@ static int s_name ## _from_attrs_for_change(struct s_name *s, \
178#define __assign(attr_nr, attr_flag, name, nla_type, type, assignment...) \ 178#define __assign(attr_nr, attr_flag, name, nla_type, type, assignment...) \
179 nla = ntb[attr_nr]; \ 179 nla = ntb[attr_nr]; \
180 if (nla) { \ 180 if (nla) { \
181 if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \ 181 if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \
182 pr_info("<< must not change invariant attr: %s\n", #name); \ 182 pr_info("<< must not change invariant attr: %s\n", #name); \
183 return -EEXIST; \ 183 return -EEXIST; \
184 } \ 184 } \
185 assignment; \ 185 assignment; \
186 } else if (exclude_invariants && ((attr_flag) & DRBD_F_INVARIANT)) { \ 186 } else if (exclude_invariants && !!((attr_flag) & DRBD_F_INVARIANT)) { \
187 /* attribute missing from payload, */ \ 187 /* attribute missing from payload, */ \
188 /* which was expected */ \ 188 /* which was expected */ \
189 } else if ((attr_flag) & DRBD_F_REQUIRED) { \ 189 } else if ((attr_flag) & DRBD_F_REQUIRED) { \
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 5e7219dc0fae..b840e3b2770d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -110,11 +110,8 @@ struct vm_area_struct;
110#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \ 110#define GFP_TEMPORARY (__GFP_WAIT | __GFP_IO | __GFP_FS | \
111 __GFP_RECLAIMABLE) 111 __GFP_RECLAIMABLE)
112#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL) 112#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
113#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \ 113#define GFP_HIGHUSER (GFP_USER | __GFP_HIGHMEM)
114 __GFP_HIGHMEM) 114#define GFP_HIGHUSER_MOVABLE (GFP_HIGHUSER | __GFP_MOVABLE)
115#define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \
116 __GFP_HARDWALL | __GFP_HIGHMEM | \
117 __GFP_MOVABLE)
118#define GFP_IOFS (__GFP_IO | __GFP_FS) 115#define GFP_IOFS (__GFP_IO | __GFP_FS)
119#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \ 116#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
120 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ 117 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
@@ -156,7 +153,7 @@ struct vm_area_struct;
156#define GFP_DMA32 __GFP_DMA32 153#define GFP_DMA32 __GFP_DMA32
157 154
158/* Convert GFP flags to their corresponding migrate type */ 155/* Convert GFP flags to their corresponding migrate type */
159static inline int allocflags_to_migratetype(gfp_t gfp_flags) 156static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
160{ 157{
161 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); 158 WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
162 159
@@ -381,8 +378,8 @@ extern void free_kmem_pages(unsigned long addr, unsigned int order);
381 378
382void page_alloc_init(void); 379void page_alloc_init(void);
383void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); 380void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
384void drain_all_pages(void); 381void drain_all_pages(struct zone *zone);
385void drain_local_pages(void *dummy); 382void drain_local_pages(struct zone *zone);
386 383
387/* 384/*
388 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what 385 * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 85aa5d0b9357..ab81339a8590 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -216,14 +216,15 @@ static inline int gpio_to_irq(unsigned gpio)
216 return -EINVAL; 216 return -EINVAL;
217} 217}
218 218
219static inline int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset) 219static inline int gpiochip_lock_as_irq(struct gpio_chip *chip,
220 unsigned int offset)
220{ 221{
221 WARN_ON(1); 222 WARN_ON(1);
222 return -EINVAL; 223 return -EINVAL;
223} 224}
224 225
225static inline void gpio_unlock_as_irq(struct gpio_chip *chip, 226static inline void gpiochip_unlock_as_irq(struct gpio_chip *chip,
226 unsigned int offset) 227 unsigned int offset)
227{ 228{
228 WARN_ON(1); 229 WARN_ON(1);
229} 230}
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 12f146fa6604..fd85cb120ee0 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -66,7 +66,7 @@ __devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
66 unsigned int index, enum gpiod_flags flags); 66 unsigned int index, enum gpiod_flags flags);
67void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); 67void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
68 68
69int gpiod_get_direction(const struct gpio_desc *desc); 69int gpiod_get_direction(struct gpio_desc *desc);
70int gpiod_direction_input(struct gpio_desc *desc); 70int gpiod_direction_input(struct gpio_desc *desc);
71int gpiod_direction_output(struct gpio_desc *desc, int value); 71int gpiod_direction_output(struct gpio_desc *desc, int value);
72int gpiod_direction_output_raw(struct gpio_desc *desc, int value); 72int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
@@ -74,14 +74,24 @@ int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
74/* Value get/set from non-sleeping context */ 74/* Value get/set from non-sleeping context */
75int gpiod_get_value(const struct gpio_desc *desc); 75int gpiod_get_value(const struct gpio_desc *desc);
76void gpiod_set_value(struct gpio_desc *desc, int value); 76void gpiod_set_value(struct gpio_desc *desc, int value);
77void gpiod_set_array(unsigned int array_size,
78 struct gpio_desc **desc_array, int *value_array);
77int gpiod_get_raw_value(const struct gpio_desc *desc); 79int gpiod_get_raw_value(const struct gpio_desc *desc);
78void gpiod_set_raw_value(struct gpio_desc *desc, int value); 80void gpiod_set_raw_value(struct gpio_desc *desc, int value);
81void gpiod_set_raw_array(unsigned int array_size,
82 struct gpio_desc **desc_array, int *value_array);
79 83
80/* Value get/set from sleeping context */ 84/* Value get/set from sleeping context */
81int gpiod_get_value_cansleep(const struct gpio_desc *desc); 85int gpiod_get_value_cansleep(const struct gpio_desc *desc);
82void gpiod_set_value_cansleep(struct gpio_desc *desc, int value); 86void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
87void gpiod_set_array_cansleep(unsigned int array_size,
88 struct gpio_desc **desc_array,
89 int *value_array);
83int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc); 90int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
84void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value); 91void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
92void gpiod_set_raw_array_cansleep(unsigned int array_size,
93 struct gpio_desc **desc_array,
94 int *value_array);
85 95
86int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); 96int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
87 97
@@ -94,6 +104,13 @@ int gpiod_to_irq(const struct gpio_desc *desc);
94struct gpio_desc *gpio_to_desc(unsigned gpio); 104struct gpio_desc *gpio_to_desc(unsigned gpio);
95int desc_to_gpio(const struct gpio_desc *desc); 105int desc_to_gpio(const struct gpio_desc *desc);
96 106
107/* Child properties interface */
108struct fwnode_handle;
109
110struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
111 const char *propname);
112struct gpio_desc *devm_get_gpiod_from_child(struct device *dev,
113 struct fwnode_handle *child);
97#else /* CONFIG_GPIOLIB */ 114#else /* CONFIG_GPIOLIB */
98 115
99static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, 116static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev,
@@ -210,6 +227,13 @@ static inline void gpiod_set_value(struct gpio_desc *desc, int value)
210 /* GPIO can never have been requested */ 227 /* GPIO can never have been requested */
211 WARN_ON(1); 228 WARN_ON(1);
212} 229}
230static inline void gpiod_set_array(unsigned int array_size,
231 struct gpio_desc **desc_array,
232 int *value_array)
233{
234 /* GPIO can never have been requested */
235 WARN_ON(1);
236}
213static inline int gpiod_get_raw_value(const struct gpio_desc *desc) 237static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
214{ 238{
215 /* GPIO can never have been requested */ 239 /* GPIO can never have been requested */
@@ -221,6 +245,13 @@ static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
221 /* GPIO can never have been requested */ 245 /* GPIO can never have been requested */
222 WARN_ON(1); 246 WARN_ON(1);
223} 247}
248static inline void gpiod_set_raw_array(unsigned int array_size,
249 struct gpio_desc **desc_array,
250 int *value_array)
251{
252 /* GPIO can never have been requested */
253 WARN_ON(1);
254}
224 255
225static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) 256static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
226{ 257{
@@ -233,6 +264,13 @@ static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
233 /* GPIO can never have been requested */ 264 /* GPIO can never have been requested */
234 WARN_ON(1); 265 WARN_ON(1);
235} 266}
267static inline void gpiod_set_array_cansleep(unsigned int array_size,
268 struct gpio_desc **desc_array,
269 int *value_array)
270{
271 /* GPIO can never have been requested */
272 WARN_ON(1);
273}
236static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) 274static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
237{ 275{
238 /* GPIO can never have been requested */ 276 /* GPIO can never have been requested */
@@ -245,6 +283,13 @@ static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
245 /* GPIO can never have been requested */ 283 /* GPIO can never have been requested */
246 WARN_ON(1); 284 WARN_ON(1);
247} 285}
286static inline void gpiod_set_raw_array_cansleep(unsigned int array_size,
287 struct gpio_desc **desc_array,
288 int *value_array)
289{
290 /* GPIO can never have been requested */
291 WARN_ON(1);
292}
248 293
249static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) 294static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
250{ 295{
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index e78a2373e374..c497c62889d1 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -32,6 +32,7 @@ struct seq_file;
32 * @get: returns value for signal "offset"; for output signals this 32 * @get: returns value for signal "offset"; for output signals this
33 * returns either the value actually sensed, or zero 33 * returns either the value actually sensed, or zero
34 * @set: assigns output value for signal "offset" 34 * @set: assigns output value for signal "offset"
35 * @set_multiple: assigns output values for multiple signals defined by "mask"
35 * @set_debounce: optional hook for setting debounce time for specified gpio in 36 * @set_debounce: optional hook for setting debounce time for specified gpio in
36 * interrupt triggered gpio chips 37 * interrupt triggered gpio chips
37 * @to_irq: optional hook supporting non-static gpio_to_irq() mappings; 38 * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
@@ -56,6 +57,8 @@ struct seq_file;
56 * as the chip access may sleep when e.g. reading out the IRQ status 57 * as the chip access may sleep when e.g. reading out the IRQ status
57 * registers. 58 * registers.
58 * @exported: flags if the gpiochip is exported for use from sysfs. Private. 59 * @exported: flags if the gpiochip is exported for use from sysfs. Private.
60 * @irq_not_threaded: flag must be set if @can_sleep is set but the
61 * IRQs don't need to be threaded
59 * 62 *
60 * A gpio_chip can help platforms abstract various sources of GPIOs so 63 * A gpio_chip can help platforms abstract various sources of GPIOs so
61 * they can all be accessed through a common programing interface. 64 * they can all be accessed through a common programing interface.
@@ -87,6 +90,9 @@ struct gpio_chip {
87 unsigned offset); 90 unsigned offset);
88 void (*set)(struct gpio_chip *chip, 91 void (*set)(struct gpio_chip *chip,
89 unsigned offset, int value); 92 unsigned offset, int value);
93 void (*set_multiple)(struct gpio_chip *chip,
94 unsigned long *mask,
95 unsigned long *bits);
90 int (*set_debounce)(struct gpio_chip *chip, 96 int (*set_debounce)(struct gpio_chip *chip,
91 unsigned offset, 97 unsigned offset,
92 unsigned debounce); 98 unsigned debounce);
@@ -101,11 +107,12 @@ struct gpio_chip {
101 struct gpio_desc *desc; 107 struct gpio_desc *desc;
102 const char *const *names; 108 const char *const *names;
103 bool can_sleep; 109 bool can_sleep;
110 bool irq_not_threaded;
104 bool exported; 111 bool exported;
105 112
106#ifdef CONFIG_GPIOLIB_IRQCHIP 113#ifdef CONFIG_GPIOLIB_IRQCHIP
107 /* 114 /*
108 * With CONFIG_GPIO_IRQCHIP we get an irqchip inside the gpiolib 115 * With CONFIG_GPIOLIB_IRQCHIP we get an irqchip inside the gpiolib
109 * to handle IRQs for most practical cases. 116 * to handle IRQs for most practical cases.
110 */ 117 */
111 struct irq_chip *irqchip; 118 struct irq_chip *irqchip;
@@ -141,13 +148,13 @@ extern const char *gpiochip_is_requested(struct gpio_chip *chip,
141 148
142/* add/remove chips */ 149/* add/remove chips */
143extern int gpiochip_add(struct gpio_chip *chip); 150extern int gpiochip_add(struct gpio_chip *chip);
144extern int gpiochip_remove(struct gpio_chip *chip); 151extern void gpiochip_remove(struct gpio_chip *chip);
145extern struct gpio_chip *gpiochip_find(void *data, 152extern struct gpio_chip *gpiochip_find(void *data,
146 int (*match)(struct gpio_chip *chip, void *data)); 153 int (*match)(struct gpio_chip *chip, void *data));
147 154
148/* lock/unlock as IRQ */ 155/* lock/unlock as IRQ */
149int gpio_lock_as_irq(struct gpio_chip *chip, unsigned int offset); 156int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset);
150void gpio_unlock_as_irq(struct gpio_chip *chip, unsigned int offset); 157void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset);
151 158
152struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc); 159struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
153 160
@@ -164,9 +171,10 @@ int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
164 irq_flow_handler_t handler, 171 irq_flow_handler_t handler,
165 unsigned int type); 172 unsigned int type);
166 173
167#endif /* CONFIG_GPIO_IRQCHIP */ 174#endif /* CONFIG_GPIOLIB_IRQCHIP */
168 175
169int gpiochip_request_own_desc(struct gpio_desc *desc, const char *label); 176struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
177 const char *label);
170void gpiochip_free_own_desc(struct gpio_desc *desc); 178void gpiochip_free_own_desc(struct gpio_desc *desc);
171 179
172#else /* CONFIG_GPIOLIB */ 180#else /* CONFIG_GPIOLIB */
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 8b622468952c..ee2d8c6f9130 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -2,6 +2,7 @@
2#define _GPIO_KEYS_H 2#define _GPIO_KEYS_H
3 3
4struct device; 4struct device;
5struct gpio_desc;
5 6
6/** 7/**
7 * struct gpio_keys_button - configuration parameters 8 * struct gpio_keys_button - configuration parameters
@@ -17,6 +18,7 @@ struct device;
17 * disable button via sysfs 18 * disable button via sysfs
18 * @value: axis value for %EV_ABS 19 * @value: axis value for %EV_ABS
19 * @irq: Irq number in case of interrupt keys 20 * @irq: Irq number in case of interrupt keys
21 * @gpiod: GPIO descriptor
20 */ 22 */
21struct gpio_keys_button { 23struct gpio_keys_button {
22 unsigned int code; 24 unsigned int code;
@@ -29,6 +31,7 @@ struct gpio_keys_button {
29 bool can_disable; 31 bool can_disable;
30 int value; 32 int value;
31 unsigned int irq; 33 unsigned int irq;
34 struct gpio_desc *gpiod;
32}; 35};
33 36
34/** 37/**
diff --git a/include/linux/hash.h b/include/linux/hash.h
index d0494c399392..1afde47e1528 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -15,7 +15,6 @@
15 */ 15 */
16 16
17#include <asm/types.h> 17#include <asm/types.h>
18#include <asm/hash.h>
19#include <linux/compiler.h> 18#include <linux/compiler.h>
20 19
21/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ 20/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
@@ -84,38 +83,4 @@ static inline u32 hash32_ptr(const void *ptr)
84 return (u32)val; 83 return (u32)val;
85} 84}
86 85
87struct fast_hash_ops {
88 u32 (*hash)(const void *data, u32 len, u32 seed);
89 u32 (*hash2)(const u32 *data, u32 len, u32 seed);
90};
91
92/**
93 * arch_fast_hash - Caclulates a hash over a given buffer that can have
94 * arbitrary size. This function will eventually use an
95 * architecture-optimized hashing implementation if
96 * available, and trades off distribution for speed.
97 *
98 * @data: buffer to hash
99 * @len: length of buffer in bytes
100 * @seed: start seed
101 *
102 * Returns 32bit hash.
103 */
104extern u32 arch_fast_hash(const void *data, u32 len, u32 seed);
105
106/**
107 * arch_fast_hash2 - Caclulates a hash over a given buffer that has a
108 * size that is of a multiple of 32bit words. This
109 * function will eventually use an architecture-
110 * optimized hashing implementation if available,
111 * and trades off distribution for speed.
112 *
113 * @data: buffer to hash (must be 32bit padded)
114 * @len: number of 32bit words
115 * @seed: start seed
116 *
117 * Returns 32bit hash.
118 */
119extern u32 arch_fast_hash2(const u32 *data, u32 len, u32 seed);
120
121#endif /* _LINUX_HASH_H */ 86#endif /* _LINUX_HASH_H */
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 11c0182a153b..cbb5790a35cd 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -1,9 +1,24 @@
1/* 1/*
2 * Copyright (C) 2012 Avionic Design GmbH 2 * Copyright (C) 2012 Avionic Design GmbH
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * it under the terms of the GNU General Public License version 2 as 5 * copy of this software and associated documentation files (the "Software"),
6 * published by the Free Software Foundation. 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
7 */ 22 */
8 23
9#ifndef __LINUX_HDMI_H_ 24#ifndef __LINUX_HDMI_H_
diff --git a/include/linux/hid.h b/include/linux/hid.h
index f53c4a9cca1d..06c4607744f6 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -234,6 +234,33 @@ struct hid_item {
234#define HID_DG_BARRELSWITCH 0x000d0044 234#define HID_DG_BARRELSWITCH 0x000d0044
235#define HID_DG_ERASER 0x000d0045 235#define HID_DG_ERASER 0x000d0045
236#define HID_DG_TABLETPICK 0x000d0046 236#define HID_DG_TABLETPICK 0x000d0046
237
238#define HID_CP_CONSUMERCONTROL 0x000c0001
239#define HID_CP_NUMERICKEYPAD 0x000c0002
240#define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003
241#define HID_CP_MICROPHONE 0x000c0004
242#define HID_CP_HEADPHONE 0x000c0005
243#define HID_CP_GRAPHICEQUALIZER 0x000c0006
244#define HID_CP_FUNCTIONBUTTONS 0x000c0036
245#define HID_CP_SELECTION 0x000c0080
246#define HID_CP_MEDIASELECTION 0x000c0087
247#define HID_CP_SELECTDISC 0x000c00ba
248#define HID_CP_PLAYBACKSPEED 0x000c00f1
249#define HID_CP_PROXIMITY 0x000c0109
250#define HID_CP_SPEAKERSYSTEM 0x000c0160
251#define HID_CP_CHANNELLEFT 0x000c0161
252#define HID_CP_CHANNELRIGHT 0x000c0162
253#define HID_CP_CHANNELCENTER 0x000c0163
254#define HID_CP_CHANNELFRONT 0x000c0164
255#define HID_CP_CHANNELCENTERFRONT 0x000c0165
256#define HID_CP_CHANNELSIDE 0x000c0166
257#define HID_CP_CHANNELSURROUND 0x000c0167
258#define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168
259#define HID_CP_CHANNELTOP 0x000c0169
260#define HID_CP_CHANNELUNKNOWN 0x000c016a
261#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180
262#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200
263
237#define HID_DG_CONFIDENCE 0x000d0047 264#define HID_DG_CONFIDENCE 0x000d0047
238#define HID_DG_WIDTH 0x000d0048 265#define HID_DG_WIDTH 0x000d0048
239#define HID_DG_HEIGHT 0x000d0049 266#define HID_DG_HEIGHT 0x000d0049
@@ -265,6 +292,7 @@ struct hid_item {
265#define HID_CONNECT_HIDDEV 0x08 292#define HID_CONNECT_HIDDEV 0x08
266#define HID_CONNECT_HIDDEV_FORCE 0x10 293#define HID_CONNECT_HIDDEV_FORCE 0x10
267#define HID_CONNECT_FF 0x20 294#define HID_CONNECT_FF 0x20
295#define HID_CONNECT_DRIVER 0x40
268#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \ 296#define HID_CONNECT_DEFAULT (HID_CONNECT_HIDINPUT|HID_CONNECT_HIDRAW| \
269 HID_CONNECT_HIDDEV|HID_CONNECT_FF) 297 HID_CONNECT_HIDDEV|HID_CONNECT_FF)
270 298
@@ -287,6 +315,7 @@ struct hid_item {
287#define HID_QUIRK_HIDINPUT_FORCE 0x00000080 315#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
288#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100 316#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
289#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200 317#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
318#define HID_QUIRK_ALWAYS_POLL 0x00000400
290#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 319#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
291#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 320#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
292#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 321#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000
@@ -310,11 +339,8 @@ struct hid_item {
310 * Vendor specific HID device groups 339 * Vendor specific HID device groups
311 */ 340 */
312#define HID_GROUP_RMI 0x0100 341#define HID_GROUP_RMI 0x0100
313
314/*
315 * Vendor specific HID device groups
316 */
317#define HID_GROUP_WACOM 0x0101 342#define HID_GROUP_WACOM 0x0101
343#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
318 344
319/* 345/*
320 * This is the global environment of the parser. This information is 346 * This is the global environment of the parser. This information is
@@ -440,6 +466,7 @@ struct hid_output_fifo {
440#define HID_CLAIMED_INPUT 1 466#define HID_CLAIMED_INPUT 1
441#define HID_CLAIMED_HIDDEV 2 467#define HID_CLAIMED_HIDDEV 2
442#define HID_CLAIMED_HIDRAW 4 468#define HID_CLAIMED_HIDRAW 4
469#define HID_CLAIMED_DRIVER 8
443 470
444#define HID_STAT_ADDED 1 471#define HID_STAT_ADDED 1
445#define HID_STAT_PARSED 2 472#define HID_STAT_PARSED 2
@@ -1060,6 +1087,17 @@ static inline void hid_hw_wait(struct hid_device *hdev)
1060 hdev->ll_driver->wait(hdev); 1087 hdev->ll_driver->wait(hdev);
1061} 1088}
1062 1089
1090/**
1091 * hid_report_len - calculate the report length
1092 *
1093 * @report: the report we want to know the length
1094 */
1095static inline int hid_report_len(struct hid_report *report)
1096{
1097 /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
1098 return ((report->size - 1) >> 3) + 1 + (report->id > 0);
1099}
1100
1063int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, 1101int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
1064 int interrupt); 1102 int interrupt);
1065 1103
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 63579cb8d3dc..ad9051bab267 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -132,7 +132,7 @@ extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
132static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, 132static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
133 spinlock_t **ptl) 133 spinlock_t **ptl)
134{ 134{
135 VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); 135 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
136 if (pmd_trans_huge(*pmd)) 136 if (pmd_trans_huge(*pmd))
137 return __pmd_trans_huge_lock(pmd, vma, ptl); 137 return __pmd_trans_huge_lock(pmd, vma, ptl);
138 else 138 else
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6e6d338641fe..431b7fc605c9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -175,6 +175,52 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
175} 175}
176 176
177#endif /* !CONFIG_HUGETLB_PAGE */ 177#endif /* !CONFIG_HUGETLB_PAGE */
178/*
179 * hugepages at page global directory. If arch support
180 * hugepages at pgd level, they need to define this.
181 */
182#ifndef pgd_huge
183#define pgd_huge(x) 0
184#endif
185
186#ifndef pgd_write
187static inline int pgd_write(pgd_t pgd)
188{
189 BUG();
190 return 0;
191}
192#endif
193
194#ifndef pud_write
195static inline int pud_write(pud_t pud)
196{
197 BUG();
198 return 0;
199}
200#endif
201
202#ifndef is_hugepd
203/*
204 * Some architectures requires a hugepage directory format that is
205 * required to support multiple hugepage sizes. For example
206 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
207 * introduced the same on powerpc. This allows for a more flexible hugepage
208 * pagetable layout.
209 */
210typedef struct { unsigned long pd; } hugepd_t;
211#define is_hugepd(hugepd) (0)
212#define __hugepd(x) ((hugepd_t) { (x) })
213static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
214 unsigned pdshift, unsigned long end,
215 int write, struct page **pages, int *nr)
216{
217 return 0;
218}
219#else
220extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
221 unsigned pdshift, unsigned long end,
222 int write, struct page **pages, int *nr);
223#endif
178 224
179#define HUGETLB_ANON_FILE "anon_hugepage" 225#define HUGETLB_ANON_FILE "anon_hugepage"
180 226
@@ -311,7 +357,8 @@ static inline struct hstate *hstate_sizelog(int page_size_log)
311{ 357{
312 if (!page_size_log) 358 if (!page_size_log)
313 return &default_hstate; 359 return &default_hstate;
314 return size_to_hstate(1 << page_size_log); 360
361 return size_to_hstate(1UL << page_size_log);
315} 362}
316 363
317static inline struct hstate *hstate_vma(struct vm_area_struct *vma) 364static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 0129f89cf98d..bcc853eccc85 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -16,7 +16,6 @@
16#define _LINUX_HUGETLB_CGROUP_H 16#define _LINUX_HUGETLB_CGROUP_H
17 17
18#include <linux/mmdebug.h> 18#include <linux/mmdebug.h>
19#include <linux/res_counter.h>
20 19
21struct hugetlb_cgroup; 20struct hugetlb_cgroup;
22/* 21/*
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 08cfaff8a072..476c685ca6f9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -650,6 +650,8 @@ struct vmbus_channel {
650 u8 monitor_grp; 650 u8 monitor_grp;
651 u8 monitor_bit; 651 u8 monitor_bit;
652 652
653 bool rescind; /* got rescind msg */
654
653 u32 ringbuffer_gpadlhandle; 655 u32 ringbuffer_gpadlhandle;
654 656
655 /* Allocated memory for ring buffer */ 657 /* Allocated memory for ring buffer */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b556e0ab946f..e3a1721c8354 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -46,6 +46,8 @@ struct i2c_client;
46struct i2c_driver; 46struct i2c_driver;
47union i2c_smbus_data; 47union i2c_smbus_data;
48struct i2c_board_info; 48struct i2c_board_info;
49enum i2c_slave_event;
50typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *);
49 51
50struct module; 52struct module;
51 53
@@ -209,6 +211,8 @@ struct i2c_driver {
209 * @irq: indicates the IRQ generated by this device (if any) 211 * @irq: indicates the IRQ generated by this device (if any)
210 * @detected: member of an i2c_driver.clients list or i2c-core's 212 * @detected: member of an i2c_driver.clients list or i2c-core's
211 * userspace_devices list 213 * userspace_devices list
214 * @slave_cb: Callback when I2C slave mode of an adapter is used. The adapter
215 * calls it to pass on slave events to the slave driver.
212 * 216 *
213 * An i2c_client identifies a single device (i.e. chip) connected to an 217 * An i2c_client identifies a single device (i.e. chip) connected to an
214 * i2c bus. The behaviour exposed to Linux is defined by the driver 218 * i2c bus. The behaviour exposed to Linux is defined by the driver
@@ -224,6 +228,7 @@ struct i2c_client {
224 struct device dev; /* the device structure */ 228 struct device dev; /* the device structure */
225 int irq; /* irq issued by device */ 229 int irq; /* irq issued by device */
226 struct list_head detected; 230 struct list_head detected;
231 i2c_slave_cb_t slave_cb; /* callback for slave mode */
227}; 232};
228#define to_i2c_client(d) container_of(d, struct i2c_client, dev) 233#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
229 234
@@ -246,6 +251,25 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
246 dev_set_drvdata(&dev->dev, data); 251 dev_set_drvdata(&dev->dev, data);
247} 252}
248 253
254/* I2C slave support */
255
256enum i2c_slave_event {
257 I2C_SLAVE_REQ_READ_START,
258 I2C_SLAVE_REQ_READ_END,
259 I2C_SLAVE_REQ_WRITE_START,
260 I2C_SLAVE_REQ_WRITE_END,
261 I2C_SLAVE_STOP,
262};
263
264extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
265extern int i2c_slave_unregister(struct i2c_client *client);
266
267static inline int i2c_slave_event(struct i2c_client *client,
268 enum i2c_slave_event event, u8 *val)
269{
270 return client->slave_cb(client, event, val);
271}
272
249/** 273/**
250 * struct i2c_board_info - template for device creation 274 * struct i2c_board_info - template for device creation
251 * @type: chip type, to initialize i2c_client.name 275 * @type: chip type, to initialize i2c_client.name
@@ -352,6 +376,8 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
352 * into I2C transfers instead. 376 * into I2C transfers instead.
353 * @functionality: Return the flags that this algorithm/adapter pair supports 377 * @functionality: Return the flags that this algorithm/adapter pair supports
354 * from the I2C_FUNC_* flags. 378 * from the I2C_FUNC_* flags.
379 * @reg_slave: Register given client to I2C slave mode of this adapter
380 * @unreg_slave: Unregister given client from I2C slave mode of this adapter
355 * 381 *
356 * The following structs are for those who like to implement new bus drivers: 382 * The following structs are for those who like to implement new bus drivers:
357 * i2c_algorithm is the interface to a class of hardware solutions which can 383 * i2c_algorithm is the interface to a class of hardware solutions which can
@@ -359,7 +385,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
359 * to name two of the most common. 385 * to name two of the most common.
360 * 386 *
361 * The return codes from the @master_xfer field should indicate the type of 387 * The return codes from the @master_xfer field should indicate the type of
362 * error code that occured during the transfer, as documented in the kernel 388 * error code that occurred during the transfer, as documented in the kernel
363 * Documentation file Documentation/i2c/fault-codes. 389 * Documentation file Documentation/i2c/fault-codes.
364 */ 390 */
365struct i2c_algorithm { 391struct i2c_algorithm {
@@ -377,6 +403,9 @@ struct i2c_algorithm {
377 403
378 /* To determine what the adapter supports */ 404 /* To determine what the adapter supports */
379 u32 (*functionality) (struct i2c_adapter *); 405 u32 (*functionality) (struct i2c_adapter *);
406
407 int (*reg_slave)(struct i2c_client *client);
408 int (*unreg_slave)(struct i2c_client *client);
380}; 409};
381 410
382/** 411/**
diff --git a/include/linux/i2c/pmbus.h b/include/linux/i2c/pmbus.h
index 69280db02c41..ee3c2aba2a8e 100644
--- a/include/linux/i2c/pmbus.h
+++ b/include/linux/i2c/pmbus.h
@@ -40,6 +40,10 @@
40 40
41struct pmbus_platform_data { 41struct pmbus_platform_data {
42 u32 flags; /* Device specific flags */ 42 u32 flags; /* Device specific flags */
43
44 /* regulator support */
45 int num_regulators;
46 struct regulator_init_data *reg_init_data;
43}; 47};
44 48
45#endif /* _PMBUS_H_ */ 49#endif /* _PMBUS_H_ */
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h
index 8cfb50f38529..0bc03f100d04 100644
--- a/include/linux/i2c/twl.h
+++ b/include/linux/i2c/twl.h
@@ -26,7 +26,6 @@
26#define __TWL_H_ 26#define __TWL_H_
27 27
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/phy/phy.h>
30#include <linux/input/matrix_keypad.h> 29#include <linux/input/matrix_keypad.h>
31 30
32/* 31/*
@@ -634,7 +633,6 @@ enum twl4030_usb_mode {
634struct twl4030_usb_data { 633struct twl4030_usb_data {
635 enum twl4030_usb_mode usb_mode; 634 enum twl4030_usb_mode usb_mode;
636 unsigned long features; 635 unsigned long features;
637 struct phy_init_data *init_data;
638 636
639 int (*phy_init)(struct device *dev); 637 int (*phy_init)(struct device *dev);
640 int (*phy_exit)(struct device *dev); 638 int (*phy_exit)(struct device *dev);
diff --git a/include/linux/i82593.h b/include/linux/i82593.h
deleted file mode 100644
index afac5c7a323d..000000000000
--- a/include/linux/i82593.h
+++ /dev/null
@@ -1,229 +0,0 @@
1/*
2 * Definitions for Intel 82593 CSMA/CD Core LAN Controller
3 * The definitions are taken from the 1992 users manual with Intel
4 * order number 297125-001.
5 *
6 * /usr/src/pc/RCS/i82593.h,v 1.1 1996/07/17 15:23:12 root Exp
7 *
8 * Copyright 1994, Anders Klemets <klemets@it.kth.se>
9 *
10 * HISTORY
11 * i82593.h,v
12 * Revision 1.4 2005/11/4 09:15:00 baroniunas
13 * Modified copyright with permission of author as follows:
14 *
15 * "If I82539.H is the only file with my copyright statement
16 * that is included in the Source Forge project, then you have
17 * my approval to change the copyright statement to be a GPL
18 * license, in the way you proposed on October 10."
19 *
20 * Revision 1.1 1996/07/17 15:23:12 root
21 * Initial revision
22 *
23 * Revision 1.3 1995/04/05 15:13:58 adj
24 * Initial alpha release
25 *
26 * Revision 1.2 1994/06/16 23:57:31 klemets
27 * Mirrored all the fields in the configuration block.
28 *
29 * Revision 1.1 1994/06/02 20:25:34 klemets
30 * Initial revision
31 *
32 *
33 */
34#ifndef _I82593_H
35#define _I82593_H
36
37/* Intel 82593 CSMA/CD Core LAN Controller */
38
39/* Port 0 Command Register definitions */
40
41/* Execution operations */
42#define OP0_NOP 0 /* CHNL = 0 */
43#define OP0_SWIT_TO_PORT_1 0 /* CHNL = 1 */
44#define OP0_IA_SETUP 1
45#define OP0_CONFIGURE 2
46#define OP0_MC_SETUP 3
47#define OP0_TRANSMIT 4
48#define OP0_TDR 5
49#define OP0_DUMP 6
50#define OP0_DIAGNOSE 7
51#define OP0_TRANSMIT_NO_CRC 9
52#define OP0_RETRANSMIT 12
53#define OP0_ABORT 13
54/* Reception operations */
55#define OP0_RCV_ENABLE 8
56#define OP0_RCV_DISABLE 10
57#define OP0_STOP_RCV 11
58/* Status pointer control operations */
59#define OP0_FIX_PTR 15 /* CHNL = 1 */
60#define OP0_RLS_PTR 15 /* CHNL = 0 */
61#define OP0_RESET 14
62
63#define CR0_CHNL (1 << 4) /* 0=Channel 0, 1=Channel 1 */
64#define CR0_STATUS_0 0x00
65#define CR0_STATUS_1 0x20
66#define CR0_STATUS_2 0x40
67#define CR0_STATUS_3 0x60
68#define CR0_INT_ACK (1 << 7) /* 0=No ack, 1=acknowledge */
69
70/* Port 0 Status Register definitions */
71
72#define SR0_NO_RESULT 0 /* dummy */
73#define SR0_EVENT_MASK 0x0f
74#define SR0_IA_SETUP_DONE 1
75#define SR0_CONFIGURE_DONE 2
76#define SR0_MC_SETUP_DONE 3
77#define SR0_TRANSMIT_DONE 4
78#define SR0_TDR_DONE 5
79#define SR0_DUMP_DONE 6
80#define SR0_DIAGNOSE_PASSED 7
81#define SR0_TRANSMIT_NO_CRC_DONE 9
82#define SR0_RETRANSMIT_DONE 12
83#define SR0_EXECUTION_ABORTED 13
84#define SR0_END_OF_FRAME 8
85#define SR0_RECEPTION_ABORTED 10
86#define SR0_DIAGNOSE_FAILED 15
87#define SR0_STOP_REG_HIT 11
88
89#define SR0_CHNL (1 << 4)
90#define SR0_EXECUTION (1 << 5)
91#define SR0_RECEPTION (1 << 6)
92#define SR0_INTERRUPT (1 << 7)
93#define SR0_BOTH_RX_TX (SR0_EXECUTION | SR0_RECEPTION)
94
95#define SR3_EXEC_STATE_MASK 0x03
96#define SR3_EXEC_IDLE 0
97#define SR3_TX_ABORT_IN_PROGRESS 1
98#define SR3_EXEC_ACTIVE 2
99#define SR3_ABORT_IN_PROGRESS 3
100#define SR3_EXEC_CHNL (1 << 2)
101#define SR3_STP_ON_NO_RSRC (1 << 3)
102#define SR3_RCVING_NO_RSRC (1 << 4)
103#define SR3_RCV_STATE_MASK 0x60
104#define SR3_RCV_IDLE 0x00
105#define SR3_RCV_READY 0x20
106#define SR3_RCV_ACTIVE 0x40
107#define SR3_RCV_STOP_IN_PROG 0x60
108#define SR3_RCV_CHNL (1 << 7)
109
110/* Port 1 Command Register definitions */
111
112#define OP1_NOP 0
113#define OP1_SWIT_TO_PORT_0 1
114#define OP1_INT_DISABLE 2
115#define OP1_INT_ENABLE 3
116#define OP1_SET_TS 5
117#define OP1_RST_TS 7
118#define OP1_POWER_DOWN 8
119#define OP1_RESET_RING_MNGMT 11
120#define OP1_RESET 14
121#define OP1_SEL_RST 15
122
123#define CR1_STATUS_4 0x00
124#define CR1_STATUS_5 0x20
125#define CR1_STATUS_6 0x40
126#define CR1_STOP_REG_UPDATE (1 << 7)
127
128/* Receive frame status bits */
129
130#define RX_RCLD (1 << 0)
131#define RX_IA_MATCH (1 << 1)
132#define RX_NO_AD_MATCH (1 << 2)
133#define RX_NO_SFD (1 << 3)
134#define RX_SRT_FRM (1 << 7)
135#define RX_OVRRUN (1 << 8)
136#define RX_ALG_ERR (1 << 10)
137#define RX_CRC_ERR (1 << 11)
138#define RX_LEN_ERR (1 << 12)
139#define RX_RCV_OK (1 << 13)
140#define RX_TYP_LEN (1 << 15)
141
142/* Transmit status bits */
143
144#define TX_NCOL_MASK 0x0f
145#define TX_FRTL (1 << 4)
146#define TX_MAX_COL (1 << 5)
147#define TX_HRT_BEAT (1 << 6)
148#define TX_DEFER (1 << 7)
149#define TX_UND_RUN (1 << 8)
150#define TX_LOST_CTS (1 << 9)
151#define TX_LOST_CRS (1 << 10)
152#define TX_LTCOL (1 << 11)
153#define TX_OK (1 << 13)
154#define TX_COLL (1 << 15)
155
156struct i82593_conf_block {
157 u_char fifo_limit : 4,
158 forgnesi : 1,
159 fifo_32 : 1,
160 d6mod : 1,
161 throttle_enb : 1;
162 u_char throttle : 6,
163 cntrxint : 1,
164 contin : 1;
165 u_char addr_len : 3,
166 acloc : 1,
167 preamb_len : 2,
168 loopback : 2;
169 u_char lin_prio : 3,
170 tbofstop : 1,
171 exp_prio : 3,
172 bof_met : 1;
173 u_char : 4,
174 ifrm_spc : 4;
175 u_char : 5,
176 slottim_low : 3;
177 u_char slottim_hi : 3,
178 : 1,
179 max_retr : 4;
180 u_char prmisc : 1,
181 bc_dis : 1,
182 : 1,
183 crs_1 : 1,
184 nocrc_ins : 1,
185 crc_1632 : 1,
186 : 1,
187 crs_cdt : 1;
188 u_char cs_filter : 3,
189 crs_src : 1,
190 cd_filter : 3,
191 : 1;
192 u_char : 2,
193 min_fr_len : 6;
194 u_char lng_typ : 1,
195 lng_fld : 1,
196 rxcrc_xf : 1,
197 artx : 1,
198 sarec : 1,
199 tx_jabber : 1, /* why is this called max_len in the manual? */
200 hash_1 : 1,
201 lbpkpol : 1;
202 u_char : 6,
203 fdx : 1,
204 : 1;
205 u_char dummy_6 : 6, /* supposed to be ones */
206 mult_ia : 1,
207 dis_bof : 1;
208 u_char dummy_1 : 1, /* supposed to be one */
209 tx_ifs_retrig : 2,
210 mc_all : 1,
211 rcv_mon : 2,
212 frag_acpt : 1,
213 tstrttrs : 1;
214 u_char fretx : 1,
215 runt_eop : 1,
216 hw_sw_pin : 1,
217 big_endn : 1,
218 syncrqs : 1,
219 sttlen : 1,
220 tx_eop : 1,
221 rx_eop : 1;
222 u_char rbuf_size : 5,
223 rcvstop : 1,
224 : 2;
225};
226
227#define I82593_MAX_MULTICAST_ADDRESSES 128 /* Hardware hashed filter */
228
229#endif /* _I82593_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 63ab3873c5ed..4f4eea8a6288 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -6,6 +6,7 @@
6 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> 6 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
7 * Copyright (c) 2005, Devicescape Software, Inc. 7 * Copyright (c) 2005, Devicescape Software, Inc.
8 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> 8 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
9 * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
@@ -18,6 +19,7 @@
18#include <linux/types.h> 19#include <linux/types.h>
19#include <linux/if_ether.h> 20#include <linux/if_ether.h>
20#include <asm/byteorder.h> 21#include <asm/byteorder.h>
22#include <asm/unaligned.h>
21 23
22/* 24/*
23 * DS bit usage 25 * DS bit usage
@@ -165,8 +167,12 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
165 167
166#define IEEE80211_MAX_MESH_ID_LEN 32 168#define IEEE80211_MAX_MESH_ID_LEN 32
167 169
170#define IEEE80211_FIRST_TSPEC_TSID 8
168#define IEEE80211_NUM_TIDS 16 171#define IEEE80211_NUM_TIDS 16
169 172
173/* number of user priorities 802.11 uses */
174#define IEEE80211_NUM_UPS 8
175
170#define IEEE80211_QOS_CTL_LEN 2 176#define IEEE80211_QOS_CTL_LEN 2
171/* 1d tag mask */ 177/* 1d tag mask */
172#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 178#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007
@@ -838,6 +844,16 @@ enum ieee80211_vht_opmode_bits {
838 844
839#define WLAN_SA_QUERY_TR_ID_LEN 2 845#define WLAN_SA_QUERY_TR_ID_LEN 2
840 846
847/**
848 * struct ieee80211_tpc_report_ie
849 *
850 * This structure refers to "TPC Report element"
851 */
852struct ieee80211_tpc_report_ie {
853 u8 tx_power;
854 u8 link_margin;
855} __packed;
856
841struct ieee80211_mgmt { 857struct ieee80211_mgmt {
842 __le16 frame_control; 858 __le16 frame_control;
843 __le16 duration; 859 __le16 duration;
@@ -973,6 +989,13 @@ struct ieee80211_mgmt {
973 u8 action_code; 989 u8 action_code;
974 u8 operating_mode; 990 u8 operating_mode;
975 } __packed vht_opmode_notif; 991 } __packed vht_opmode_notif;
992 struct {
993 u8 action_code;
994 u8 dialog_token;
995 u8 tpc_elem_id;
996 u8 tpc_elem_length;
997 struct ieee80211_tpc_report_ie tpc;
998 } __packed tpc_report;
976 } u; 999 } u;
977 } __packed action; 1000 } __packed action;
978 } u; 1001 } u;
@@ -1044,6 +1067,12 @@ struct ieee80211_pspoll {
1044 1067
1045/* TDLS */ 1068/* TDLS */
1046 1069
1070/* Channel switch timing */
1071struct ieee80211_ch_switch_timing {
1072 __le16 switch_time;
1073 __le16 switch_timeout;
1074} __packed;
1075
1047/* Link-id information element */ 1076/* Link-id information element */
1048struct ieee80211_tdls_lnkie { 1077struct ieee80211_tdls_lnkie {
1049 u8 ie_type; /* Link Identifier IE */ 1078 u8 ie_type; /* Link Identifier IE */
@@ -1085,6 +1114,15 @@ struct ieee80211_tdls_data {
1085 u8 dialog_token; 1114 u8 dialog_token;
1086 u8 variable[0]; 1115 u8 variable[0];
1087 } __packed discover_req; 1116 } __packed discover_req;
1117 struct {
1118 u8 target_channel;
1119 u8 oper_class;
1120 u8 variable[0];
1121 } __packed chan_switch_req;
1122 struct {
1123 __le16 status_code;
1124 u8 variable[0];
1125 } __packed chan_switch_resp;
1088 } u; 1126 } u;
1089} __packed; 1127} __packed;
1090 1128
@@ -1252,7 +1290,7 @@ struct ieee80211_ht_cap {
1252#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2 1290#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
1253 1291
1254/* 1292/*
1255 * Maximum length of AMPDU that the STA can receive. 1293 * Maximum length of AMPDU that the STA can receive in high-throughput (HT).
1256 * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets) 1294 * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
1257 */ 1295 */
1258enum ieee80211_max_ampdu_length_exp { 1296enum ieee80211_max_ampdu_length_exp {
@@ -1262,6 +1300,21 @@ enum ieee80211_max_ampdu_length_exp {
1262 IEEE80211_HT_MAX_AMPDU_64K = 3 1300 IEEE80211_HT_MAX_AMPDU_64K = 3
1263}; 1301};
1264 1302
1303/*
1304 * Maximum length of AMPDU that the STA can receive in VHT.
1305 * Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
1306 */
1307enum ieee80211_vht_max_ampdu_length_exp {
1308 IEEE80211_VHT_MAX_AMPDU_8K = 0,
1309 IEEE80211_VHT_MAX_AMPDU_16K = 1,
1310 IEEE80211_VHT_MAX_AMPDU_32K = 2,
1311 IEEE80211_VHT_MAX_AMPDU_64K = 3,
1312 IEEE80211_VHT_MAX_AMPDU_128K = 4,
1313 IEEE80211_VHT_MAX_AMPDU_256K = 5,
1314 IEEE80211_VHT_MAX_AMPDU_512K = 6,
1315 IEEE80211_VHT_MAX_AMPDU_1024K = 7
1316};
1317
1265#define IEEE80211_HT_MAX_AMPDU_FACTOR 13 1318#define IEEE80211_HT_MAX_AMPDU_FACTOR 13
1266 1319
1267/* Minimum MPDU start spacing */ 1320/* Minimum MPDU start spacing */
@@ -1806,7 +1859,8 @@ enum ieee80211_eid {
1806 WLAN_EID_DMG_TSPEC = 146, 1859 WLAN_EID_DMG_TSPEC = 146,
1807 WLAN_EID_DMG_AT = 147, 1860 WLAN_EID_DMG_AT = 147,
1808 WLAN_EID_DMG_CAP = 148, 1861 WLAN_EID_DMG_CAP = 148,
1809 /* 149-150 reserved for Cisco */ 1862 /* 149 reserved for Cisco */
1863 WLAN_EID_CISCO_VENDOR_SPECIFIC = 150,
1810 WLAN_EID_DMG_OPERATION = 151, 1864 WLAN_EID_DMG_OPERATION = 151,
1811 WLAN_EID_DMG_BSS_PARAM_CHANGE = 152, 1865 WLAN_EID_DMG_BSS_PARAM_CHANGE = 152,
1812 WLAN_EID_DMG_BEAM_REFINEMENT = 153, 1866 WLAN_EID_DMG_BEAM_REFINEMENT = 153,
@@ -1865,6 +1919,7 @@ enum ieee80211_category {
1865 WLAN_CATEGORY_DLS = 2, 1919 WLAN_CATEGORY_DLS = 2,
1866 WLAN_CATEGORY_BACK = 3, 1920 WLAN_CATEGORY_BACK = 3,
1867 WLAN_CATEGORY_PUBLIC = 4, 1921 WLAN_CATEGORY_PUBLIC = 4,
1922 WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
1868 WLAN_CATEGORY_HT = 7, 1923 WLAN_CATEGORY_HT = 7,
1869 WLAN_CATEGORY_SA_QUERY = 8, 1924 WLAN_CATEGORY_SA_QUERY = 8,
1870 WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, 1925 WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
@@ -1974,6 +2029,16 @@ enum ieee80211_tdls_actioncode {
1974 WLAN_TDLS_DISCOVERY_REQUEST = 10, 2029 WLAN_TDLS_DISCOVERY_REQUEST = 10,
1975}; 2030};
1976 2031
2032/* Extended Channel Switching capability to be set in the 1st byte of
2033 * the @WLAN_EID_EXT_CAPABILITY information element
2034 */
2035#define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2)
2036
2037/* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */
2038#define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4)
2039#define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5)
2040#define WLAN_EXT_CAPA4_TDLS_CHAN_SWITCH BIT(6)
2041
1977/* Interworking capabilities are set in 7th bit of 4th byte of the 2042/* Interworking capabilities are set in 7th bit of 4th byte of the
1978 * @WLAN_EID_EXT_CAPABILITY information element 2043 * @WLAN_EID_EXT_CAPABILITY information element
1979 */ 2044 */
@@ -1985,6 +2050,7 @@ enum ieee80211_tdls_actioncode {
1985 */ 2050 */
1986#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5) 2051#define WLAN_EXT_CAPA5_TDLS_ENABLED BIT(5)
1987#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6) 2052#define WLAN_EXT_CAPA5_TDLS_PROHIBITED BIT(6)
2053#define WLAN_EXT_CAPA5_TDLS_CH_SW_PROHIBITED BIT(7)
1988 2054
1989#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6) 2055#define WLAN_EXT_CAPA8_OPMODE_NOTIF BIT(6)
1990#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7) 2056#define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED BIT(7)
@@ -1992,6 +2058,9 @@ enum ieee80211_tdls_actioncode {
1992/* TDLS specific payload type in the LLC/SNAP header */ 2058/* TDLS specific payload type in the LLC/SNAP header */
1993#define WLAN_TDLS_SNAP_RFTYPE 0x2 2059#define WLAN_TDLS_SNAP_RFTYPE 0x2
1994 2060
2061/* BSS Coex IE information field bits */
2062#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
2063
1995/** 2064/**
1996 * enum - mesh synchronization method identifier 2065 * enum - mesh synchronization method identifier
1997 * 2066 *
@@ -2374,8 +2443,79 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
2374 return !!(tim->virtual_map[index] & mask); 2443 return !!(tim->virtual_map[index] & mask);
2375} 2444}
2376 2445
2446/**
2447 * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
2448 * @skb: the skb containing the frame, length will not be checked
2449 * @hdr_size: the size of the ieee80211_hdr that starts at skb->data
2450 *
2451 * This function assumes the frame is a data frame, and that the network header
2452 * is in the correct place.
2453 */
2454static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
2455{
2456 if (!skb_is_nonlinear(skb) &&
2457 skb->len > (skb_network_offset(skb) + 2)) {
2458 /* Point to where the indication of TDLS should start */
2459 const u8 *tdls_data = skb_network_header(skb) - 2;
2460
2461 if (get_unaligned_be16(tdls_data) == ETH_P_TDLS &&
2462 tdls_data[2] == WLAN_TDLS_SNAP_RFTYPE &&
2463 tdls_data[3] == WLAN_CATEGORY_TDLS)
2464 return tdls_data[4];
2465 }
2466
2467 return -1;
2468}
2469
2377/* convert time units */ 2470/* convert time units */
2378#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024)) 2471#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
2379#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x)) 2472#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
2380 2473
2474/**
2475 * ieee80211_action_contains_tpc - checks if the frame contains TPC element
2476 * @skb: the skb containing the frame, length will be checked
2477 *
2478 * This function checks if it's either TPC report action frame or Link
2479 * Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5
2480 * and 8.5.7.5 accordingly.
2481 */
2482static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
2483{
2484 struct ieee80211_mgmt *mgmt = (void *)skb->data;
2485
2486 if (!ieee80211_is_action(mgmt->frame_control))
2487 return false;
2488
2489 if (skb->len < IEEE80211_MIN_ACTION_SIZE +
2490 sizeof(mgmt->u.action.u.tpc_report))
2491 return false;
2492
2493 /*
2494 * TPC report - check that:
2495 * category = 0 (Spectrum Management) or 5 (Radio Measurement)
2496 * spectrum management action = 3 (TPC/Link Measurement report)
2497 * TPC report EID = 35
2498 * TPC report element length = 2
2499 *
2500 * The spectrum management's tpc_report struct is used here both for
2501 * parsing tpc_report and radio measurement's link measurement report
2502 * frame, since the relevant part is identical in both frames.
2503 */
2504 if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT &&
2505 mgmt->u.action.category != WLAN_CATEGORY_RADIO_MEASUREMENT)
2506 return false;
2507
2508 /* both spectrum mgmt and link measurement have same action code */
2509 if (mgmt->u.action.u.tpc_report.action_code !=
2510 WLAN_ACTION_SPCT_TPC_RPRT)
2511 return false;
2512
2513 if (mgmt->u.action.u.tpc_report.tpc_elem_id != WLAN_EID_TPC_REPORT ||
2514 mgmt->u.action.u.tpc_report.tpc_elem_length !=
2515 sizeof(struct ieee80211_tpc_report_ie))
2516 return false;
2517
2518 return true;
2519}
2520
2381#endif /* LINUX_IEEE80211_H */ 2521#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/ieee802154.h b/include/linux/ieee802154.h
new file mode 100644
index 000000000000..6e82d888287c
--- /dev/null
+++ b/include/linux/ieee802154.h
@@ -0,0 +1,242 @@
1/*
2 * IEEE802.15.4-2003 specification
3 *
4 * Copyright (C) 2007, 2008 Siemens AG
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * Written by:
16 * Pavel Smolenskiy <pavel.smolenskiy@gmail.com>
17 * Maxim Gorbachyov <maxim.gorbachev@siemens.com>
18 * Maxim Osipov <maxim.osipov@siemens.com>
19 * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
20 * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
21 */
22
23#ifndef LINUX_IEEE802154_H
24#define LINUX_IEEE802154_H
25
26#include <linux/types.h>
27#include <linux/random.h>
28#include <asm/byteorder.h>
29
30#define IEEE802154_MTU 127
31#define IEEE802154_MIN_PSDU_LEN 5
32
33#define IEEE802154_PAN_ID_BROADCAST 0xffff
34#define IEEE802154_ADDR_SHORT_BROADCAST 0xffff
35#define IEEE802154_ADDR_SHORT_UNSPEC 0xfffe
36
37#define IEEE802154_EXTENDED_ADDR_LEN 8
38
39#define IEEE802154_LIFS_PERIOD 40
40#define IEEE802154_SIFS_PERIOD 12
41
42#define IEEE802154_MAX_CHANNEL 26
43#define IEEE802154_MAX_PAGE 31
44
45#define IEEE802154_FC_TYPE_BEACON 0x0 /* Frame is beacon */
46#define IEEE802154_FC_TYPE_DATA 0x1 /* Frame is data */
47#define IEEE802154_FC_TYPE_ACK 0x2 /* Frame is acknowledgment */
48#define IEEE802154_FC_TYPE_MAC_CMD 0x3 /* Frame is MAC command */
49
50#define IEEE802154_FC_TYPE_SHIFT 0
51#define IEEE802154_FC_TYPE_MASK ((1 << 3) - 1)
52#define IEEE802154_FC_TYPE(x) ((x & IEEE802154_FC_TYPE_MASK) >> IEEE802154_FC_TYPE_SHIFT)
53#define IEEE802154_FC_SET_TYPE(v, x) do { \
54 v = (((v) & ~IEEE802154_FC_TYPE_MASK) | \
55 (((x) << IEEE802154_FC_TYPE_SHIFT) & IEEE802154_FC_TYPE_MASK)); \
56 } while (0)
57
58#define IEEE802154_FC_SECEN_SHIFT 3
59#define IEEE802154_FC_SECEN (1 << IEEE802154_FC_SECEN_SHIFT)
60#define IEEE802154_FC_FRPEND_SHIFT 4
61#define IEEE802154_FC_FRPEND (1 << IEEE802154_FC_FRPEND_SHIFT)
62#define IEEE802154_FC_ACK_REQ_SHIFT 5
63#define IEEE802154_FC_ACK_REQ (1 << IEEE802154_FC_ACK_REQ_SHIFT)
64#define IEEE802154_FC_INTRA_PAN_SHIFT 6
65#define IEEE802154_FC_INTRA_PAN (1 << IEEE802154_FC_INTRA_PAN_SHIFT)
66
67#define IEEE802154_FC_SAMODE_SHIFT 14
68#define IEEE802154_FC_SAMODE_MASK (3 << IEEE802154_FC_SAMODE_SHIFT)
69#define IEEE802154_FC_DAMODE_SHIFT 10
70#define IEEE802154_FC_DAMODE_MASK (3 << IEEE802154_FC_DAMODE_SHIFT)
71
72#define IEEE802154_FC_VERSION_SHIFT 12
73#define IEEE802154_FC_VERSION_MASK (3 << IEEE802154_FC_VERSION_SHIFT)
74#define IEEE802154_FC_VERSION(x) ((x & IEEE802154_FC_VERSION_MASK) >> IEEE802154_FC_VERSION_SHIFT)
75
76#define IEEE802154_FC_SAMODE(x) \
77 (((x) & IEEE802154_FC_SAMODE_MASK) >> IEEE802154_FC_SAMODE_SHIFT)
78
79#define IEEE802154_FC_DAMODE(x) \
80 (((x) & IEEE802154_FC_DAMODE_MASK) >> IEEE802154_FC_DAMODE_SHIFT)
81
82#define IEEE802154_SCF_SECLEVEL_MASK 7
83#define IEEE802154_SCF_SECLEVEL_SHIFT 0
84#define IEEE802154_SCF_SECLEVEL(x) (x & IEEE802154_SCF_SECLEVEL_MASK)
85#define IEEE802154_SCF_KEY_ID_MODE_SHIFT 3
86#define IEEE802154_SCF_KEY_ID_MODE_MASK (3 << IEEE802154_SCF_KEY_ID_MODE_SHIFT)
87#define IEEE802154_SCF_KEY_ID_MODE(x) \
88 ((x & IEEE802154_SCF_KEY_ID_MODE_MASK) >> IEEE802154_SCF_KEY_ID_MODE_SHIFT)
89
90#define IEEE802154_SCF_KEY_IMPLICIT 0
91#define IEEE802154_SCF_KEY_INDEX 1
92#define IEEE802154_SCF_KEY_SHORT_INDEX 2
93#define IEEE802154_SCF_KEY_HW_INDEX 3
94
95#define IEEE802154_SCF_SECLEVEL_NONE 0
96#define IEEE802154_SCF_SECLEVEL_MIC32 1
97#define IEEE802154_SCF_SECLEVEL_MIC64 2
98#define IEEE802154_SCF_SECLEVEL_MIC128 3
99#define IEEE802154_SCF_SECLEVEL_ENC 4
100#define IEEE802154_SCF_SECLEVEL_ENC_MIC32 5
101#define IEEE802154_SCF_SECLEVEL_ENC_MIC64 6
102#define IEEE802154_SCF_SECLEVEL_ENC_MIC128 7
103
104/* MAC footer size */
105#define IEEE802154_MFR_SIZE 2 /* 2 octets */
106
107/* MAC's Command Frames Identifiers */
108#define IEEE802154_CMD_ASSOCIATION_REQ 0x01
109#define IEEE802154_CMD_ASSOCIATION_RESP 0x02
110#define IEEE802154_CMD_DISASSOCIATION_NOTIFY 0x03
111#define IEEE802154_CMD_DATA_REQ 0x04
112#define IEEE802154_CMD_PANID_CONFLICT_NOTIFY 0x05
113#define IEEE802154_CMD_ORPHAN_NOTIFY 0x06
114#define IEEE802154_CMD_BEACON_REQ 0x07
115#define IEEE802154_CMD_COORD_REALIGN_NOTIFY 0x08
116#define IEEE802154_CMD_GTS_REQ 0x09
117
118/*
119 * The return values of MAC operations
120 */
121enum {
122 /*
123 * The requested operation was completed successfully.
124 * For a transmission request, this value indicates
125 * a successful transmission.
126 */
127 IEEE802154_SUCCESS = 0x0,
128
129 /* The beacon was lost following a synchronization request. */
130 IEEE802154_BEACON_LOSS = 0xe0,
131 /*
132 * A transmission could not take place due to activity on the
133 * channel, i.e., the CSMA-CA mechanism has failed.
134 */
135 IEEE802154_CHNL_ACCESS_FAIL = 0xe1,
136 /* The GTS request has been denied by the PAN coordinator. */
137 IEEE802154_DENINED = 0xe2,
138 /* The attempt to disable the transceiver has failed. */
139 IEEE802154_DISABLE_TRX_FAIL = 0xe3,
140 /*
141 * The received frame induces a failed security check according to
142 * the security suite.
143 */
144 IEEE802154_FAILED_SECURITY_CHECK = 0xe4,
145 /*
146 * The frame resulting from secure processing has a length that is
147 * greater than aMACMaxFrameSize.
148 */
149 IEEE802154_FRAME_TOO_LONG = 0xe5,
150 /*
151 * The requested GTS transmission failed because the specified GTS
152 * either did not have a transmit GTS direction or was not defined.
153 */
154 IEEE802154_INVALID_GTS = 0xe6,
155 /*
156 * A request to purge an MSDU from the transaction queue was made using
157 * an MSDU handle that was not found in the transaction table.
158 */
159 IEEE802154_INVALID_HANDLE = 0xe7,
160 /* A parameter in the primitive is out of the valid range.*/
161 IEEE802154_INVALID_PARAMETER = 0xe8,
162 /* No acknowledgment was received after aMaxFrameRetries. */
163 IEEE802154_NO_ACK = 0xe9,
164 /* A scan operation failed to find any network beacons.*/
165 IEEE802154_NO_BEACON = 0xea,
166 /* No response data were available following a request. */
167 IEEE802154_NO_DATA = 0xeb,
168 /* The operation failed because a short address was not allocated. */
169 IEEE802154_NO_SHORT_ADDRESS = 0xec,
170 /*
171 * A receiver enable request was unsuccessful because it could not be
172 * completed within the CAP.
173 */
174 IEEE802154_OUT_OF_CAP = 0xed,
175 /*
176 * A PAN identifier conflict has been detected and communicated to the
177 * PAN coordinator.
178 */
179 IEEE802154_PANID_CONFLICT = 0xee,
180 /* A coordinator realignment command has been received. */
181 IEEE802154_REALIGMENT = 0xef,
182 /* The transaction has expired and its information discarded. */
183 IEEE802154_TRANSACTION_EXPIRED = 0xf0,
184 /* There is no capacity to store the transaction. */
185 IEEE802154_TRANSACTION_OVERFLOW = 0xf1,
186 /*
187 * The transceiver was in the transmitter enabled state when the
188 * receiver was requested to be enabled.
189 */
190 IEEE802154_TX_ACTIVE = 0xf2,
191 /* The appropriate key is not available in the ACL. */
192 IEEE802154_UNAVAILABLE_KEY = 0xf3,
193 /*
194 * A SET/GET request was issued with the identifier of a PIB attribute
195 * that is not supported.
196 */
197 IEEE802154_UNSUPPORTED_ATTR = 0xf4,
198 /*
199 * A request to perform a scan operation failed because the MLME was
200 * in the process of performing a previously initiated scan operation.
201 */
202 IEEE802154_SCAN_IN_PROGRESS = 0xfc,
203};
204
205/**
206 * ieee802154_is_valid_psdu_len - check if psdu len is valid
207 * @len: psdu len with (MHR + payload + MFR)
208 */
209static inline bool ieee802154_is_valid_psdu_len(const u8 len)
210{
211 return (len >= IEEE802154_MIN_PSDU_LEN && len <= IEEE802154_MTU);
212}
213
214/**
215 * ieee802154_is_valid_psdu_len - check if extended addr is valid
216 * @addr: extended addr to check
217 */
218static inline bool ieee802154_is_valid_extended_addr(const __le64 addr)
219{
220 /* These EUI-64 addresses are reserved by IEEE. 0xffffffffffffffff
221 * is used internally as extended to short address broadcast mapping.
222 * This is currently a workaround because neighbor discovery can't
223 * deal with short addresses types right now.
224 */
225 return ((addr != cpu_to_le64(0x0000000000000000ULL)) &&
226 (addr != cpu_to_le64(0xffffffffffffffffULL)));
227}
228
229/**
230 * ieee802154_random_extended_addr - generates a random extended address
231 * @addr: extended addr pointer to place the random address
232 */
233static inline void ieee802154_random_extended_addr(__le64 *addr)
234{
235 get_random_bytes(addr, IEEE802154_EXTENDED_ADDR_LEN);
236
237 /* toggle some bit if we hit an invalid extended addr */
238 if (!ieee802154_is_valid_extended_addr(*addr))
239 ((u8 *)addr)[IEEE802154_EXTENDED_ADDR_LEN - 1] ^= 0x01;
240}
241
242#endif /* LINUX_IEEE802154_H */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 808dcb8cc04f..0a8ce762a47f 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17#include <uapi/linux/if_bridge.h> 17#include <uapi/linux/if_bridge.h>
18#include <linux/bitops.h>
18 19
19struct br_ip { 20struct br_ip {
20 union { 21 union {
@@ -32,11 +33,41 @@ struct br_ip_list {
32 struct br_ip addr; 33 struct br_ip addr;
33}; 34};
34 35
36#define BR_HAIRPIN_MODE BIT(0)
37#define BR_BPDU_GUARD BIT(1)
38#define BR_ROOT_BLOCK BIT(2)
39#define BR_MULTICAST_FAST_LEAVE BIT(3)
40#define BR_ADMIN_COST BIT(4)
41#define BR_LEARNING BIT(5)
42#define BR_FLOOD BIT(6)
43#define BR_AUTO_MASK (BR_FLOOD | BR_LEARNING)
44#define BR_PROMISC BIT(7)
45#define BR_PROXYARP BIT(8)
46#define BR_LEARNING_SYNC BIT(9)
47
35extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); 48extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
36 49
37typedef int br_should_route_hook_t(struct sk_buff *skb); 50typedef int br_should_route_hook_t(struct sk_buff *skb);
38extern br_should_route_hook_t __rcu *br_should_route_hook; 51extern br_should_route_hook_t __rcu *br_should_route_hook;
39 52
53#if IS_ENABLED(CONFIG_BRIDGE)
54int br_fdb_external_learn_add(struct net_device *dev,
55 const unsigned char *addr, u16 vid);
56int br_fdb_external_learn_del(struct net_device *dev,
57 const unsigned char *addr, u16 vid);
58#else
59static inline int br_fdb_external_learn_add(struct net_device *dev,
60 const unsigned char *addr, u16 vid)
61{
62 return 0;
63}
64static inline int br_fdb_external_learn_del(struct net_device *dev,
65 const unsigned char *addr, u16 vid)
66{
67 return 0;
68}
69#endif
70
40#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) 71#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
41int br_multicast_list_adjacent(struct net_device *dev, 72int br_multicast_list_adjacent(struct net_device *dev,
42 struct list_head *br_ip_list); 73 struct list_head *br_ip_list);
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 6b2c7cf352a5..6f6929ea8a0c 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -60,6 +60,7 @@ struct macvlan_dev {
60#ifdef CONFIG_NET_POLL_CONTROLLER 60#ifdef CONFIG_NET_POLL_CONTROLLER
61 struct netpoll *netpoll; 61 struct netpoll *netpoll;
62#endif 62#endif
63 unsigned int macaddr_count;
63}; 64};
64 65
65static inline void macvlan_count_rx(const struct macvlan_dev *vlan, 66static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index d69f0577a319..515a35e2a48a 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -282,28 +282,24 @@ static inline bool vlan_hw_offload_capable(netdev_features_t features,
282} 282}
283 283
284/** 284/**
285 * vlan_insert_tag - regular VLAN tag inserting 285 * __vlan_insert_tag - regular VLAN tag inserting
286 * @skb: skbuff to tag 286 * @skb: skbuff to tag
287 * @vlan_proto: VLAN encapsulation protocol 287 * @vlan_proto: VLAN encapsulation protocol
288 * @vlan_tci: VLAN TCI to insert 288 * @vlan_tci: VLAN TCI to insert
289 * 289 *
290 * Inserts the VLAN tag into @skb as part of the payload 290 * Inserts the VLAN tag into @skb as part of the payload
291 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed. 291 * Returns error if skb_cow_head failes.
292 *
293 * Following the skb_unshare() example, in case of error, the calling function
294 * doesn't have to worry about freeing the original skb.
295 * 292 *
296 * Does not change skb->protocol so this function can be used during receive. 293 * Does not change skb->protocol so this function can be used during receive.
297 */ 294 */
298static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, 295static inline int __vlan_insert_tag(struct sk_buff *skb,
299 __be16 vlan_proto, u16 vlan_tci) 296 __be16 vlan_proto, u16 vlan_tci)
300{ 297{
301 struct vlan_ethhdr *veth; 298 struct vlan_ethhdr *veth;
302 299
303 if (skb_cow_head(skb, VLAN_HLEN) < 0) { 300 if (skb_cow_head(skb, VLAN_HLEN) < 0)
304 dev_kfree_skb_any(skb); 301 return -ENOMEM;
305 return NULL; 302
306 }
307 veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN); 303 veth = (struct vlan_ethhdr *)skb_push(skb, VLAN_HLEN);
308 304
309 /* Move the mac addresses to the beginning of the new header. */ 305 /* Move the mac addresses to the beginning of the new header. */
@@ -316,12 +312,40 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
316 /* now, the TCI */ 312 /* now, the TCI */
317 veth->h_vlan_TCI = htons(vlan_tci); 313 veth->h_vlan_TCI = htons(vlan_tci);
318 314
315 return 0;
316}
317
318/**
319 * vlan_insert_tag - regular VLAN tag inserting
320 * @skb: skbuff to tag
321 * @vlan_proto: VLAN encapsulation protocol
322 * @vlan_tci: VLAN TCI to insert
323 *
324 * Inserts the VLAN tag into @skb as part of the payload
325 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
326 *
327 * Following the skb_unshare() example, in case of error, the calling function
328 * doesn't have to worry about freeing the original skb.
329 *
330 * Does not change skb->protocol so this function can be used during receive.
331 */
332static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
333 __be16 vlan_proto, u16 vlan_tci)
334{
335 int err;
336
337 err = __vlan_insert_tag(skb, vlan_proto, vlan_tci);
338 if (err) {
339 dev_kfree_skb_any(skb);
340 return NULL;
341 }
319 return skb; 342 return skb;
320} 343}
321 344
322/** 345/**
323 * __vlan_put_tag - regular VLAN tag inserting 346 * vlan_insert_tag_set_proto - regular VLAN tag inserting
324 * @skb: skbuff to tag 347 * @skb: skbuff to tag
348 * @vlan_proto: VLAN encapsulation protocol
325 * @vlan_tci: VLAN TCI to insert 349 * @vlan_tci: VLAN TCI to insert
326 * 350 *
327 * Inserts the VLAN tag into @skb as part of the payload 351 * Inserts the VLAN tag into @skb as part of the payload
@@ -330,8 +354,9 @@ static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb,
330 * Following the skb_unshare() example, in case of error, the calling function 354 * Following the skb_unshare() example, in case of error, the calling function
331 * doesn't have to worry about freeing the original skb. 355 * doesn't have to worry about freeing the original skb.
332 */ 356 */
333static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, 357static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb,
334 __be16 vlan_proto, u16 vlan_tci) 358 __be16 vlan_proto,
359 u16 vlan_tci)
335{ 360{
336 skb = vlan_insert_tag(skb, vlan_proto, vlan_tci); 361 skb = vlan_insert_tag(skb, vlan_proto, vlan_tci);
337 if (skb) 362 if (skb)
@@ -339,39 +364,53 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb,
339 return skb; 364 return skb;
340} 365}
341 366
342/** 367/*
343 * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting 368 * __vlan_hwaccel_push_inside - pushes vlan tag to the payload
344 * @skb: skbuff to tag 369 * @skb: skbuff to tag
345 * @vlan_proto: VLAN encapsulation protocol
346 * @vlan_tci: VLAN TCI to insert
347 * 370 *
348 * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest 371 * Pushes the VLAN tag from @skb->vlan_tci inside to the payload.
372 *
373 * Following the skb_unshare() example, in case of error, the calling function
374 * doesn't have to worry about freeing the original skb.
349 */ 375 */
350static inline struct sk_buff *__vlan_hwaccel_put_tag(struct sk_buff *skb, 376static inline struct sk_buff *__vlan_hwaccel_push_inside(struct sk_buff *skb)
351 __be16 vlan_proto,
352 u16 vlan_tci)
353{ 377{
354 skb->vlan_proto = vlan_proto; 378 skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
355 skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; 379 vlan_tx_tag_get(skb));
380 if (likely(skb))
381 skb->vlan_tci = 0;
382 return skb;
383}
384/*
385 * vlan_hwaccel_push_inside - pushes vlan tag to the payload
386 * @skb: skbuff to tag
387 *
388 * Checks is tag is present in @skb->vlan_tci and if it is, it pushes the
389 * VLAN tag from @skb->vlan_tci inside to the payload.
390 *
391 * Following the skb_unshare() example, in case of error, the calling function
392 * doesn't have to worry about freeing the original skb.
393 */
394static inline struct sk_buff *vlan_hwaccel_push_inside(struct sk_buff *skb)
395{
396 if (vlan_tx_tag_present(skb))
397 skb = __vlan_hwaccel_push_inside(skb);
356 return skb; 398 return skb;
357} 399}
358 400
359/** 401/**
360 * vlan_put_tag - inserts VLAN tag according to device features 402 * __vlan_hwaccel_put_tag - hardware accelerated VLAN inserting
361 * @skb: skbuff to tag 403 * @skb: skbuff to tag
404 * @vlan_proto: VLAN encapsulation protocol
362 * @vlan_tci: VLAN TCI to insert 405 * @vlan_tci: VLAN TCI to insert
363 * 406 *
364 * Assumes skb->dev is the target that will xmit this frame. 407 * Puts the VLAN TCI in @skb->vlan_tci and lets the device do the rest
365 * Returns a VLAN tagged skb.
366 */ 408 */
367static inline struct sk_buff *vlan_put_tag(struct sk_buff *skb, 409static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb,
368 __be16 vlan_proto, u16 vlan_tci) 410 __be16 vlan_proto, u16 vlan_tci)
369{ 411{
370 if (vlan_hw_offload_capable(skb->dev->features, vlan_proto)) { 412 skb->vlan_proto = vlan_proto;
371 return __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); 413 skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci;
372 } else {
373 return __vlan_put_tag(skb, vlan_proto, vlan_tci);
374 }
375} 414}
376 415
377/** 416/**
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index f47550d75f85..2c677afeea47 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -39,6 +39,7 @@ static inline struct igmpv3_query *
39 39
40extern int sysctl_igmp_max_memberships; 40extern int sysctl_igmp_max_memberships;
41extern int sysctl_igmp_max_msf; 41extern int sysctl_igmp_max_msf;
42extern int sysctl_igmp_qrv;
42 43
43struct ip_sf_socklist { 44struct ip_sf_socklist {
44 unsigned int sl_max; 45 unsigned int sl_max;
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index d8257ab60bac..2c476acb87d9 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -164,7 +164,7 @@ struct st_sensor_transfer_function {
164}; 164};
165 165
166/** 166/**
167 * struct st_sensors - ST sensors list 167 * struct st_sensor_settings - ST specific sensor settings
168 * @wai: Contents of WhoAmI register. 168 * @wai: Contents of WhoAmI register.
169 * @sensors_supported: List of supported sensors by struct itself. 169 * @sensors_supported: List of supported sensors by struct itself.
170 * @ch: IIO channels for the sensor. 170 * @ch: IIO channels for the sensor.
@@ -177,7 +177,7 @@ struct st_sensor_transfer_function {
177 * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read. 177 * @multi_read_bit: Use or not particular bit for [I2C/SPI] multi-read.
178 * @bootime: samples to discard when sensor passing from power-down to power-up. 178 * @bootime: samples to discard when sensor passing from power-down to power-up.
179 */ 179 */
180struct st_sensors { 180struct st_sensor_settings {
181 u8 wai; 181 u8 wai;
182 char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME]; 182 char sensors_supported[ST_SENSORS_MAX_4WAI][ST_SENSORS_MAX_NAME];
183 struct iio_chan_spec *ch; 183 struct iio_chan_spec *ch;
@@ -196,7 +196,7 @@ struct st_sensors {
196 * struct st_sensor_data - ST sensor device status 196 * struct st_sensor_data - ST sensor device status
197 * @dev: Pointer to instance of struct device (I2C or SPI). 197 * @dev: Pointer to instance of struct device (I2C or SPI).
198 * @trig: The trigger in use by the core driver. 198 * @trig: The trigger in use by the core driver.
199 * @sensor: Pointer to the current sensor struct in use. 199 * @sensor_settings: Pointer to the specific sensor settings in use.
200 * @current_fullscale: Maximum range of measure by the sensor. 200 * @current_fullscale: Maximum range of measure by the sensor.
201 * @vdd: Pointer to sensor's Vdd power supply 201 * @vdd: Pointer to sensor's Vdd power supply
202 * @vdd_io: Pointer to sensor's Vdd-IO power supply 202 * @vdd_io: Pointer to sensor's Vdd-IO power supply
@@ -213,7 +213,7 @@ struct st_sensors {
213struct st_sensor_data { 213struct st_sensor_data {
214 struct device *dev; 214 struct device *dev;
215 struct iio_trigger *trig; 215 struct iio_trigger *trig;
216 struct st_sensors *sensor; 216 struct st_sensor_settings *sensor_settings;
217 struct st_sensor_fullscale_avl *current_fullscale; 217 struct st_sensor_fullscale_avl *current_fullscale;
218 struct regulator *vdd; 218 struct regulator *vdd;
219 struct regulator *vdd_io; 219 struct regulator *vdd_io;
@@ -279,7 +279,7 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
279 struct iio_chan_spec const *ch, int *val); 279 struct iio_chan_spec const *ch, int *val);
280 280
281int st_sensors_check_device_support(struct iio_dev *indio_dev, 281int st_sensors_check_device_support(struct iio_dev *indio_dev,
282 int num_sensors_list, const struct st_sensors *sensors); 282 int num_sensors_list, const struct st_sensor_settings *sensor_settings);
283 283
284ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, 284ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
285 struct device_attribute *attr, char *buf); 285 struct device_attribute *attr, char *buf);
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 8bbd7bc1043d..03fa332ad2a8 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -72,7 +72,7 @@ struct iio_event_data {
72 72
73#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) 73#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
74 74
75#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF) 75#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
76 76
77#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) 77#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
78 78
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 15dc6bc2bdd2..3642ce7ef512 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -13,6 +13,7 @@
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/cdev.h> 14#include <linux/cdev.h>
15#include <linux/iio/types.h> 15#include <linux/iio/types.h>
16#include <linux/of.h>
16/* IIO TODO LIST */ 17/* IIO TODO LIST */
17/* 18/*
18 * Provide means of adjusting timer accuracy. 19 * Provide means of adjusting timer accuracy.
@@ -326,6 +327,11 @@ struct iio_dev;
326 * @update_scan_mode: function to configure device and scan buffer when 327 * @update_scan_mode: function to configure device and scan buffer when
327 * channels have changed 328 * channels have changed
328 * @debugfs_reg_access: function to read or write register value of device 329 * @debugfs_reg_access: function to read or write register value of device
330 * @of_xlate: function pointer to obtain channel specifier index.
331 * When #iio-cells is greater than '0', the driver could
332 * provide a custom of_xlate function that reads the
333 * *args* and returns the appropriate index in registered
334 * IIO channels array.
329 **/ 335 **/
330struct iio_info { 336struct iio_info {
331 struct module *driver_module; 337 struct module *driver_module;
@@ -385,6 +391,8 @@ struct iio_info {
385 int (*debugfs_reg_access)(struct iio_dev *indio_dev, 391 int (*debugfs_reg_access)(struct iio_dev *indio_dev,
386 unsigned reg, unsigned writeval, 392 unsigned reg, unsigned writeval,
387 unsigned *readval); 393 unsigned *readval);
394 int (*of_xlate)(struct iio_dev *indio_dev,
395 const struct of_phandle_args *iiospec);
388}; 396};
389 397
390/** 398/**
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 7cf5e9b32550..120ccc53fcb7 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -15,7 +15,7 @@ struct linux_binprm;
15 15
16#ifdef CONFIG_IMA 16#ifdef CONFIG_IMA
17extern int ima_bprm_check(struct linux_binprm *bprm); 17extern int ima_bprm_check(struct linux_binprm *bprm);
18extern int ima_file_check(struct file *file, int mask); 18extern int ima_file_check(struct file *file, int mask, int opened);
19extern void ima_file_free(struct file *file); 19extern void ima_file_free(struct file *file);
20extern int ima_file_mmap(struct file *file, unsigned long prot); 20extern int ima_file_mmap(struct file *file, unsigned long prot);
21extern int ima_module_check(struct file *file); 21extern int ima_module_check(struct file *file);
@@ -27,7 +27,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
27 return 0; 27 return 0;
28} 28}
29 29
30static inline int ima_file_check(struct file *file, int mask) 30static inline int ima_file_check(struct file *file, int mask, int opened)
31{ 31{
32 return 0; 32 return 0;
33} 33}
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 0068708161ff..0a21fbefdfbe 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev)
242static __inline__ __be32 inet_make_mask(int logmask) 242static __inline__ __be32 inet_make_mask(int logmask)
243{ 243{
244 if (logmask) 244 if (logmask)
245 return htonl(~((1<<(32-logmask))-1)); 245 return htonl(~((1U<<(32-logmask))-1));
246 return 0; 246 return 0;
247} 247}
248 248
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 2bb4c4f3531a..3037fc085e8e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -102,7 +102,7 @@ extern struct group_info init_groups;
102#define INIT_IDS 102#define INIT_IDS
103#endif 103#endif
104 104
105#ifdef CONFIG_TREE_PREEMPT_RCU 105#ifdef CONFIG_PREEMPT_RCU
106#define INIT_TASK_RCU_TREE_PREEMPT() \ 106#define INIT_TASK_RCU_TREE_PREEMPT() \
107 .rcu_blocked_node = NULL, 107 .rcu_blocked_node = NULL,
108#else 108#else
@@ -111,12 +111,21 @@ extern struct group_info init_groups;
111#ifdef CONFIG_PREEMPT_RCU 111#ifdef CONFIG_PREEMPT_RCU
112#define INIT_TASK_RCU_PREEMPT(tsk) \ 112#define INIT_TASK_RCU_PREEMPT(tsk) \
113 .rcu_read_lock_nesting = 0, \ 113 .rcu_read_lock_nesting = 0, \
114 .rcu_read_unlock_special = 0, \ 114 .rcu_read_unlock_special.s = 0, \
115 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ 115 .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \
116 INIT_TASK_RCU_TREE_PREEMPT() 116 INIT_TASK_RCU_TREE_PREEMPT()
117#else 117#else
118#define INIT_TASK_RCU_PREEMPT(tsk) 118#define INIT_TASK_RCU_PREEMPT(tsk)
119#endif 119#endif
120#ifdef CONFIG_TASKS_RCU
121#define INIT_TASK_RCU_TASKS(tsk) \
122 .rcu_tasks_holdout = false, \
123 .rcu_tasks_holdout_list = \
124 LIST_HEAD_INIT(tsk.rcu_tasks_holdout_list), \
125 .rcu_tasks_idle_cpu = -1,
126#else
127#define INIT_TASK_RCU_TASKS(tsk)
128#endif
120 129
121extern struct cred init_cred; 130extern struct cred init_cred;
122 131
@@ -157,6 +166,15 @@ extern struct task_group root_task_group;
157# define INIT_RT_MUTEXES(tsk) 166# define INIT_RT_MUTEXES(tsk)
158#endif 167#endif
159 168
169#ifdef CONFIG_NUMA_BALANCING
170# define INIT_NUMA_BALANCING(tsk) \
171 .numa_preferred_nid = -1, \
172 .numa_group = NULL, \
173 .numa_faults = NULL,
174#else
175# define INIT_NUMA_BALANCING(tsk)
176#endif
177
160/* 178/*
161 * INIT_TASK is used to set up the first task table, touch at 179 * INIT_TASK is used to set up the first task table, touch at
162 * your own risk!. Base=0, limit=0x1fffff (=2MB) 180 * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -224,9 +242,11 @@ extern struct task_group root_task_group;
224 INIT_FTRACE_GRAPH \ 242 INIT_FTRACE_GRAPH \
225 INIT_TRACE_RECURSION \ 243 INIT_TRACE_RECURSION \
226 INIT_TASK_RCU_PREEMPT(tsk) \ 244 INIT_TASK_RCU_PREEMPT(tsk) \
245 INIT_TASK_RCU_TASKS(tsk) \
227 INIT_CPUSET_SEQ(tsk) \ 246 INIT_CPUSET_SEQ(tsk) \
228 INIT_RT_MUTEXES(tsk) \ 247 INIT_RT_MUTEXES(tsk) \
229 INIT_VTIME(tsk) \ 248 INIT_VTIME(tsk) \
249 INIT_NUMA_BALANCING(tsk) \
230} 250}
231 251
232 252
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 83222cebd47b..c2d6082a1a4c 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -24,6 +24,7 @@ enum integrity_status {
24#ifdef CONFIG_INTEGRITY 24#ifdef CONFIG_INTEGRITY
25extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode); 25extern struct integrity_iint_cache *integrity_inode_get(struct inode *inode);
26extern void integrity_inode_free(struct inode *inode); 26extern void integrity_inode_free(struct inode *inode);
27extern void __init integrity_load_keys(void);
27 28
28#else 29#else
29static inline struct integrity_iint_cache * 30static inline struct integrity_iint_cache *
@@ -36,5 +37,10 @@ static inline void integrity_inode_free(struct inode *inode)
36{ 37{
37 return; 38 return;
38} 39}
40
41static inline void integrity_load_keys(void)
42{
43}
39#endif /* CONFIG_INTEGRITY */ 44#endif /* CONFIG_INTEGRITY */
45
40#endif /* _LINUX_INTEGRITY_H */ 46#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 698ad053d064..d9b05b5bf8c7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -193,11 +193,6 @@ extern void irq_wake_thread(unsigned int irq, void *dev_id);
193/* The following three functions are for the core kernel use only. */ 193/* The following three functions are for the core kernel use only. */
194extern void suspend_device_irqs(void); 194extern void suspend_device_irqs(void);
195extern void resume_device_irqs(void); 195extern void resume_device_irqs(void);
196#ifdef CONFIG_PM_SLEEP
197extern int check_wakeup_irqs(void);
198#else
199static inline int check_wakeup_irqs(void) { return 0; }
200#endif
201 196
202/** 197/**
203 * struct irq_affinity_notify - context for notification of IRQ affinity changes 198 * struct irq_affinity_notify - context for notification of IRQ affinity changes
@@ -561,12 +556,6 @@ static inline void tasklet_enable(struct tasklet_struct *t)
561 atomic_dec(&t->count); 556 atomic_dec(&t->count);
562} 557}
563 558
564static inline void tasklet_hi_enable(struct tasklet_struct *t)
565{
566 smp_mb__before_atomic();
567 atomic_dec(&t->count);
568}
569
570extern void tasklet_kill(struct tasklet_struct *t); 559extern void tasklet_kill(struct tasklet_struct *t);
571extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); 560extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
572extern void tasklet_init(struct tasklet_struct *t, 561extern void tasklet_init(struct tasklet_struct *t,
diff --git a/include/linux/io.h b/include/linux/io.h
index d5fc9b8d8b03..fa02e55e5a2e 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -61,9 +61,9 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
61#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) 61#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
62 62
63void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, 63void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
64 unsigned long size); 64 resource_size_t size);
65void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, 65void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
66 unsigned long size); 66 resource_size_t size);
67void devm_iounmap(struct device *dev, void __iomem *addr); 67void devm_iounmap(struct device *dev, void __iomem *addr);
68int check_signature(const volatile void __iomem *io_addr, 68int check_signature(const volatile void __iomem *io_addr,
69 const unsigned char *signature, int length); 69 const unsigned char *signature, int length);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 20f9a527922a..38daa453f2e5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,13 +21,15 @@
21 21
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/of.h>
24#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/scatterlist.h>
25#include <trace/events/iommu.h> 27#include <trace/events/iommu.h>
26 28
27#define IOMMU_READ (1 << 0) 29#define IOMMU_READ (1 << 0)
28#define IOMMU_WRITE (1 << 1) 30#define IOMMU_WRITE (1 << 1)
29#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 31#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
30#define IOMMU_EXEC (1 << 3) 32#define IOMMU_NOEXEC (1 << 3)
31 33
32struct iommu_ops; 34struct iommu_ops;
33struct iommu_group; 35struct iommu_group;
@@ -57,8 +59,12 @@ struct iommu_domain {
57 struct iommu_domain_geometry geometry; 59 struct iommu_domain_geometry geometry;
58}; 60};
59 61
60#define IOMMU_CAP_CACHE_COHERENCY 0x1 62enum iommu_cap {
61#define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ 63 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
64 transactions */
65 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
66 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
67};
62 68
63/* 69/*
64 * Following constraints are specifc to FSL_PAMUV1: 70 * Following constraints are specifc to FSL_PAMUV1:
@@ -80,6 +86,7 @@ enum iommu_attr {
80 DOMAIN_ATTR_FSL_PAMU_STASH, 86 DOMAIN_ATTR_FSL_PAMU_STASH,
81 DOMAIN_ATTR_FSL_PAMU_ENABLE, 87 DOMAIN_ATTR_FSL_PAMU_ENABLE,
82 DOMAIN_ATTR_FSL_PAMUV1, 88 DOMAIN_ATTR_FSL_PAMUV1,
89 DOMAIN_ATTR_NESTING, /* two stages of translation */
83 DOMAIN_ATTR_MAX, 90 DOMAIN_ATTR_MAX,
84}; 91};
85 92
@@ -93,15 +100,19 @@ enum iommu_attr {
93 * @detach_dev: detach device from an iommu domain 100 * @detach_dev: detach device from an iommu domain
94 * @map: map a physically contiguous memory region to an iommu domain 101 * @map: map a physically contiguous memory region to an iommu domain
95 * @unmap: unmap a physically contiguous memory region from an iommu domain 102 * @unmap: unmap a physically contiguous memory region from an iommu domain
103 * @map_sg: map a scatter-gather list of physically contiguous memory chunks
104 * to an iommu domain
96 * @iova_to_phys: translate iova to physical address 105 * @iova_to_phys: translate iova to physical address
97 * @domain_has_cap: domain capabilities query
98 * @add_device: add device to iommu grouping 106 * @add_device: add device to iommu grouping
99 * @remove_device: remove device from iommu grouping 107 * @remove_device: remove device from iommu grouping
100 * @domain_get_attr: Query domain attributes 108 * @domain_get_attr: Query domain attributes
101 * @domain_set_attr: Change domain attributes 109 * @domain_set_attr: Change domain attributes
110 * @of_xlate: add OF master IDs to iommu grouping
102 * @pgsize_bitmap: bitmap of supported page sizes 111 * @pgsize_bitmap: bitmap of supported page sizes
112 * @priv: per-instance data private to the iommu driver
103 */ 113 */
104struct iommu_ops { 114struct iommu_ops {
115 bool (*capable)(enum iommu_cap);
105 int (*domain_init)(struct iommu_domain *domain); 116 int (*domain_init)(struct iommu_domain *domain);
106 void (*domain_destroy)(struct iommu_domain *domain); 117 void (*domain_destroy)(struct iommu_domain *domain);
107 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 118 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
@@ -110,9 +121,9 @@ struct iommu_ops {
110 phys_addr_t paddr, size_t size, int prot); 121 phys_addr_t paddr, size_t size, int prot);
111 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, 122 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
112 size_t size); 123 size_t size);
124 size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
125 struct scatterlist *sg, unsigned int nents, int prot);
113 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); 126 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
114 int (*domain_has_cap)(struct iommu_domain *domain,
115 unsigned long cap);
116 int (*add_device)(struct device *dev); 127 int (*add_device)(struct device *dev);
117 void (*remove_device)(struct device *dev); 128 void (*remove_device)(struct device *dev);
118 int (*device_group)(struct device *dev, unsigned int *groupid); 129 int (*device_group)(struct device *dev, unsigned int *groupid);
@@ -130,7 +141,12 @@ struct iommu_ops {
130 /* Get the numer of window per domain */ 141 /* Get the numer of window per domain */
131 u32 (*domain_get_windows)(struct iommu_domain *domain); 142 u32 (*domain_get_windows)(struct iommu_domain *domain);
132 143
144#ifdef CONFIG_OF_IOMMU
145 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
146#endif
147
133 unsigned long pgsize_bitmap; 148 unsigned long pgsize_bitmap;
149 void *priv;
134}; 150};
135 151
136#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ 152#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
@@ -142,6 +158,7 @@ struct iommu_ops {
142 158
143extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); 159extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
144extern bool iommu_present(struct bus_type *bus); 160extern bool iommu_present(struct bus_type *bus);
161extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
145extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); 162extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
146extern struct iommu_group *iommu_group_get_by_id(int id); 163extern struct iommu_group *iommu_group_get_by_id(int id);
147extern void iommu_domain_free(struct iommu_domain *domain); 164extern void iommu_domain_free(struct iommu_domain *domain);
@@ -153,9 +170,10 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
153 phys_addr_t paddr, size_t size, int prot); 170 phys_addr_t paddr, size_t size, int prot);
154extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 171extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
155 size_t size); 172 size_t size);
173extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
174 struct scatterlist *sg,unsigned int nents,
175 int prot);
156extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 176extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
157extern int iommu_domain_has_cap(struct iommu_domain *domain,
158 unsigned long cap);
159extern void iommu_set_fault_handler(struct iommu_domain *domain, 177extern void iommu_set_fault_handler(struct iommu_domain *domain,
160 iommu_fault_handler_t handler, void *token); 178 iommu_fault_handler_t handler, void *token);
161 179
@@ -240,6 +258,13 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
240 return ret; 258 return ret;
241} 259}
242 260
261static inline size_t iommu_map_sg(struct iommu_domain *domain,
262 unsigned long iova, struct scatterlist *sg,
263 unsigned int nents, int prot)
264{
265 return domain->ops->map_sg(domain, iova, sg, nents, prot);
266}
267
243#else /* CONFIG_IOMMU_API */ 268#else /* CONFIG_IOMMU_API */
244 269
245struct iommu_ops {}; 270struct iommu_ops {};
@@ -250,6 +275,11 @@ static inline bool iommu_present(struct bus_type *bus)
250 return false; 275 return false;
251} 276}
252 277
278static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
279{
280 return false;
281}
282
253static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 283static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
254{ 284{
255 return NULL; 285 return NULL;
@@ -287,6 +317,13 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova,
287 return -ENODEV; 317 return -ENODEV;
288} 318}
289 319
320static inline size_t iommu_map_sg(struct iommu_domain *domain,
321 unsigned long iova, struct scatterlist *sg,
322 unsigned int nents, int prot)
323{
324 return -ENODEV;
325}
326
290static inline int iommu_domain_window_enable(struct iommu_domain *domain, 327static inline int iommu_domain_window_enable(struct iommu_domain *domain,
291 u32 wnd_nr, phys_addr_t paddr, 328 u32 wnd_nr, phys_addr_t paddr,
292 u64 size, int prot) 329 u64 size, int prot)
@@ -304,12 +341,6 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_ad
304 return 0; 341 return 0;
305} 342}
306 343
307static inline int iommu_domain_has_cap(struct iommu_domain *domain,
308 unsigned long cap)
309{
310 return 0;
311}
312
313static inline void iommu_set_fault_handler(struct iommu_domain *domain, 344static inline void iommu_set_fault_handler(struct iommu_domain *domain,
314 iommu_fault_handler_t handler, void *token) 345 iommu_fault_handler_t handler, void *token)
315{ 346{
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 142ec544167c..2c5250222278 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -215,6 +215,11 @@ static inline int __deprecated check_region(resource_size_t s,
215 215
216/* Wrappers for managed devices */ 216/* Wrappers for managed devices */
217struct device; 217struct device;
218
219extern int devm_request_resource(struct device *dev, struct resource *root,
220 struct resource *new);
221extern void devm_release_resource(struct device *dev, struct resource *new);
222
218#define devm_request_region(dev,start,n,name) \ 223#define devm_request_region(dev,start,n,name) \
219 __devm_request_region(dev, &ioport_resource, (start), (n), (name)) 224 __devm_request_region(dev, &ioport_resource, (start), (n), (name))
220#define devm_request_mem_region(dev,start,n,name) \ 225#define devm_request_mem_region(dev,start,n,name) \
diff --git a/include/linux/ipack.h b/include/linux/ipack.h
index 1888e06ddf64..8bddc3fbdddf 100644
--- a/include/linux/ipack.h
+++ b/include/linux/ipack.h
@@ -172,6 +172,7 @@ struct ipack_bus_ops {
172 * @ops: bus operations for the mezzanine drivers 172 * @ops: bus operations for the mezzanine drivers
173 */ 173 */
174struct ipack_bus_device { 174struct ipack_bus_device {
175 struct module *owner;
175 struct device *parent; 176 struct device *parent;
176 int slots; 177 int slots;
177 int bus_nr; 178 int bus_nr;
@@ -189,7 +190,8 @@ struct ipack_bus_device {
189 * available bus device in ipack. 190 * available bus device in ipack.
190 */ 191 */
191struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, 192struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots,
192 const struct ipack_bus_ops *ops); 193 const struct ipack_bus_ops *ops,
194 struct module *owner);
193 195
194/** 196/**
195 * ipack_bus_unregister -- unregister an ipack bus 197 * ipack_bus_unregister -- unregister an ipack bus
@@ -265,3 +267,23 @@ void ipack_put_device(struct ipack_device *dev);
265 .format = (_format), \ 267 .format = (_format), \
266 .vendor = (vend), \ 268 .vendor = (vend), \
267 .device = (dev) 269 .device = (dev)
270
271/**
272 * ipack_get_carrier - it increase the carrier ref. counter of
273 * the carrier module
274 * @dev: mezzanine device which wants to get the carrier
275 */
276static inline int ipack_get_carrier(struct ipack_device *dev)
277{
278 return try_module_get(dev->bus->owner);
279}
280
281/**
282 * ipack_get_carrier - it decrease the carrier ref. counter of
283 * the carrier module
284 * @dev: mezzanine device which wants to get the carrier
285 */
286static inline void ipack_put_carrier(struct ipack_device *dev)
287{
288 module_put(dev->bus->owner);
289}
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 35e7eca4e33b..1eee6bcfcf76 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -6,15 +6,7 @@
6#include <linux/rwsem.h> 6#include <linux/rwsem.h>
7#include <linux/notifier.h> 7#include <linux/notifier.h>
8#include <linux/nsproxy.h> 8#include <linux/nsproxy.h>
9 9#include <linux/ns_common.h>
10/*
11 * ipc namespace events
12 */
13#define IPCNS_MEMCHANGED 0x00000001 /* Notify lowmem size changed */
14#define IPCNS_CREATED 0x00000002 /* Notify new ipc namespace created */
15#define IPCNS_REMOVED 0x00000003 /* Notify ipc namespace removed */
16
17#define IPCNS_CALLBACK_PRI 0
18 10
19struct user_namespace; 11struct user_namespace;
20 12
@@ -38,7 +30,6 @@ struct ipc_namespace {
38 unsigned int msg_ctlmni; 30 unsigned int msg_ctlmni;
39 atomic_t msg_bytes; 31 atomic_t msg_bytes;
40 atomic_t msg_hdrs; 32 atomic_t msg_hdrs;
41 int auto_msgmni;
42 33
43 size_t shm_ctlmax; 34 size_t shm_ctlmax;
44 size_t shm_ctlall; 35 size_t shm_ctlall;
@@ -68,7 +59,7 @@ struct ipc_namespace {
68 /* user_ns which owns the ipc ns */ 59 /* user_ns which owns the ipc ns */
69 struct user_namespace *user_ns; 60 struct user_namespace *user_ns;
70 61
71 unsigned int proc_inum; 62 struct ns_common ns;
72}; 63};
73 64
74extern struct ipc_namespace init_ipc_ns; 65extern struct ipc_namespace init_ipc_ns;
@@ -77,18 +68,8 @@ extern atomic_t nr_ipc_ns;
77extern spinlock_t mq_lock; 68extern spinlock_t mq_lock;
78 69
79#ifdef CONFIG_SYSVIPC 70#ifdef CONFIG_SYSVIPC
80extern int register_ipcns_notifier(struct ipc_namespace *);
81extern int cond_register_ipcns_notifier(struct ipc_namespace *);
82extern void unregister_ipcns_notifier(struct ipc_namespace *);
83extern int ipcns_notify(unsigned long);
84extern void shm_destroy_orphaned(struct ipc_namespace *ns); 71extern void shm_destroy_orphaned(struct ipc_namespace *ns);
85#else /* CONFIG_SYSVIPC */ 72#else /* CONFIG_SYSVIPC */
86static inline int register_ipcns_notifier(struct ipc_namespace *ns)
87{ return 0; }
88static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns)
89{ return 0; }
90static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { }
91static inline int ipcns_notify(unsigned long l) { return 0; }
92static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {} 73static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {}
93#endif /* CONFIG_SYSVIPC */ 74#endif /* CONFIG_SYSVIPC */
94 75
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 76d2acbfa7c6..838dbfa3c331 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -37,6 +37,7 @@
37 37
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/proc_fs.h> 39#include <linux/proc_fs.h>
40#include <linux/acpi.h> /* For acpi_handle */
40 41
41struct module; 42struct module;
42struct device; 43struct device;
@@ -278,15 +279,18 @@ enum ipmi_addr_src {
278 SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, 279 SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
279 SI_PCI, SI_DEVICETREE, SI_DEFAULT 280 SI_PCI, SI_DEVICETREE, SI_DEFAULT
280}; 281};
282const char *ipmi_addr_src_to_str(enum ipmi_addr_src src);
281 283
282union ipmi_smi_info_union { 284union ipmi_smi_info_union {
285#ifdef CONFIG_ACPI
283 /* 286 /*
284 * the acpi_info element is defined for the SI_ACPI 287 * the acpi_info element is defined for the SI_ACPI
285 * address type 288 * address type
286 */ 289 */
287 struct { 290 struct {
288 void *acpi_handle; 291 acpi_handle acpi_handle;
289 } acpi_info; 292 } acpi_info;
293#endif
290}; 294};
291 295
292struct ipmi_smi_info { 296struct ipmi_smi_info {
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index bd349240d50e..0b1e569f5ff5 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -98,12 +98,11 @@ struct ipmi_smi_handlers {
98 operation is not allowed to fail. If an error occurs, it 98 operation is not allowed to fail. If an error occurs, it
99 should report back the error in a received message. It may 99 should report back the error in a received message. It may
100 do this in the current call context, since no write locks 100 do this in the current call context, since no write locks
101 are held when this is run. If the priority is > 0, the 101 are held when this is run. Message are delivered one at
102 message will go into a high-priority queue and be sent 102 a time by the message handler, a new message will not be
103 first. Otherwise, it goes into a normal-priority queue. */ 103 delivered until the previous message is returned. */
104 void (*sender)(void *send_info, 104 void (*sender)(void *send_info,
105 struct ipmi_smi_msg *msg, 105 struct ipmi_smi_msg *msg);
106 int priority);
107 106
108 /* Called by the upper layer to request that we try to get 107 /* Called by the upper layer to request that we try to get
109 events from the BMC we are attached to. */ 108 events from the BMC we are attached to. */
@@ -212,7 +211,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
212 void *send_info, 211 void *send_info,
213 struct ipmi_device_id *device_id, 212 struct ipmi_device_id *device_id,
214 struct device *dev, 213 struct device *dev,
215 const char *sysfs_name,
216 unsigned char slave_addr); 214 unsigned char slave_addr);
217 215
218/* 216/*
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ff560537dd61..c694e7baa621 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -42,6 +42,7 @@ struct ipv6_devconf {
42 __s32 accept_ra_from_local; 42 __s32 accept_ra_from_local;
43#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 43#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
44 __s32 optimistic_dad; 44 __s32 optimistic_dad;
45 __s32 use_optimistic;
45#endif 46#endif
46#ifdef CONFIG_IPV6_MROUTE 47#ifdef CONFIG_IPV6_MROUTE
47 __s32 mc_forwarding; 48 __s32 mc_forwarding;
@@ -316,14 +317,4 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
316#define tcp_twsk_ipv6only(__sk) 0 317#define tcp_twsk_ipv6only(__sk) 0
317#define inet_v6_ipv6only(__sk) 0 318#define inet_v6_ipv6only(__sk) 0
318#endif /* IS_ENABLED(CONFIG_IPV6) */ 319#endif /* IS_ENABLED(CONFIG_IPV6) */
319
320#define INET6_MATCH(__sk, __net, __saddr, __daddr, __ports, __dif) \
321 (((__sk)->sk_portpair == (__ports)) && \
322 ((__sk)->sk_family == AF_INET6) && \
323 ipv6_addr_equal(&(__sk)->sk_v6_daddr, (__saddr)) && \
324 ipv6_addr_equal(&(__sk)->sk_v6_rcv_saddr, (__daddr)) && \
325 (!(__sk)->sk_bound_dev_if || \
326 ((__sk)->sk_bound_dev_if == (__dif))) && \
327 net_eq(sock_net(__sk), (__net)))
328
329#endif /* _IPV6_H */ 320#endif /* _IPV6_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 62af59242ddc..d09ec7a1243e 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -15,11 +15,13 @@
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/cpumask.h> 16#include <linux/cpumask.h>
17#include <linux/gfp.h> 17#include <linux/gfp.h>
18#include <linux/irqhandler.h>
18#include <linux/irqreturn.h> 19#include <linux/irqreturn.h>
19#include <linux/irqnr.h> 20#include <linux/irqnr.h>
20#include <linux/errno.h> 21#include <linux/errno.h>
21#include <linux/topology.h> 22#include <linux/topology.h>
22#include <linux/wait.h> 23#include <linux/wait.h>
24#include <linux/io.h>
23 25
24#include <asm/irq.h> 26#include <asm/irq.h>
25#include <asm/ptrace.h> 27#include <asm/ptrace.h>
@@ -27,11 +29,7 @@
27 29
28struct seq_file; 30struct seq_file;
29struct module; 31struct module;
30struct irq_desc; 32struct msi_msg;
31struct irq_data;
32typedef void (*irq_flow_handler_t)(unsigned int irq,
33 struct irq_desc *desc);
34typedef void (*irq_preflow_handler_t)(struct irq_data *data);
35 33
36/* 34/*
37 * IRQ line status. 35 * IRQ line status.
@@ -113,10 +111,14 @@ enum {
113 * 111 *
114 * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity 112 * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity
115 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity 113 * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity
114 * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to
115 * support stacked irqchips, which indicates skipping
116 * all descendent irqchips.
116 */ 117 */
117enum { 118enum {
118 IRQ_SET_MASK_OK = 0, 119 IRQ_SET_MASK_OK = 0,
119 IRQ_SET_MASK_OK_NOCOPY, 120 IRQ_SET_MASK_OK_NOCOPY,
121 IRQ_SET_MASK_OK_DONE,
120}; 122};
121 123
122struct msi_desc; 124struct msi_desc;
@@ -133,6 +135,8 @@ struct irq_domain;
133 * @chip: low level interrupt hardware access 135 * @chip: low level interrupt hardware access
134 * @domain: Interrupt translation domain; responsible for mapping 136 * @domain: Interrupt translation domain; responsible for mapping
135 * between hwirq number and linux irq number. 137 * between hwirq number and linux irq number.
138 * @parent_data: pointer to parent struct irq_data to support hierarchy
139 * irq_domain
136 * @handler_data: per-IRQ data for the irq_chip methods 140 * @handler_data: per-IRQ data for the irq_chip methods
137 * @chip_data: platform-specific per-chip private data for the chip 141 * @chip_data: platform-specific per-chip private data for the chip
138 * methods, to allow shared chip implementations 142 * methods, to allow shared chip implementations
@@ -151,6 +155,9 @@ struct irq_data {
151 unsigned int state_use_accessors; 155 unsigned int state_use_accessors;
152 struct irq_chip *chip; 156 struct irq_chip *chip;
153 struct irq_domain *domain; 157 struct irq_domain *domain;
158#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
159 struct irq_data *parent_data;
160#endif
154 void *handler_data; 161 void *handler_data;
155 void *chip_data; 162 void *chip_data;
156 struct msi_desc *msi_desc; 163 struct msi_desc *msi_desc;
@@ -173,6 +180,7 @@ struct irq_data {
173 * IRQD_IRQ_DISABLED - Disabled state of the interrupt 180 * IRQD_IRQ_DISABLED - Disabled state of the interrupt
174 * IRQD_IRQ_MASKED - Masked state of the interrupt 181 * IRQD_IRQ_MASKED - Masked state of the interrupt
175 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt 182 * IRQD_IRQ_INPROGRESS - In progress state of the interrupt
183 * IRQD_WAKEUP_ARMED - Wakeup mode armed
176 */ 184 */
177enum { 185enum {
178 IRQD_TRIGGER_MASK = 0xf, 186 IRQD_TRIGGER_MASK = 0xf,
@@ -186,6 +194,7 @@ enum {
186 IRQD_IRQ_DISABLED = (1 << 16), 194 IRQD_IRQ_DISABLED = (1 << 16),
187 IRQD_IRQ_MASKED = (1 << 17), 195 IRQD_IRQ_MASKED = (1 << 17),
188 IRQD_IRQ_INPROGRESS = (1 << 18), 196 IRQD_IRQ_INPROGRESS = (1 << 18),
197 IRQD_WAKEUP_ARMED = (1 << 19),
189}; 198};
190 199
191static inline bool irqd_is_setaffinity_pending(struct irq_data *d) 200static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
@@ -257,6 +266,12 @@ static inline bool irqd_irq_inprogress(struct irq_data *d)
257 return d->state_use_accessors & IRQD_IRQ_INPROGRESS; 266 return d->state_use_accessors & IRQD_IRQ_INPROGRESS;
258} 267}
259 268
269static inline bool irqd_is_wakeup_armed(struct irq_data *d)
270{
271 return d->state_use_accessors & IRQD_WAKEUP_ARMED;
272}
273
274
260/* 275/*
261 * Functions for chained handlers which can be enabled/disabled by the 276 * Functions for chained handlers which can be enabled/disabled by the
262 * standard disable_irq/enable_irq calls. Must be called with 277 * standard disable_irq/enable_irq calls. Must be called with
@@ -307,6 +322,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
307 * any other callback related to this irq 322 * any other callback related to this irq
308 * @irq_release_resources: optional to release resources acquired with 323 * @irq_release_resources: optional to release resources acquired with
309 * irq_request_resources 324 * irq_request_resources
325 * @irq_compose_msi_msg: optional to compose message content for MSI
326 * @irq_write_msi_msg: optional to write message content for MSI
310 * @flags: chip specific flags 327 * @flags: chip specific flags
311 */ 328 */
312struct irq_chip { 329struct irq_chip {
@@ -343,6 +360,9 @@ struct irq_chip {
343 int (*irq_request_resources)(struct irq_data *data); 360 int (*irq_request_resources)(struct irq_data *data);
344 void (*irq_release_resources)(struct irq_data *data); 361 void (*irq_release_resources)(struct irq_data *data);
345 362
363 void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg);
364 void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg);
365
346 unsigned long flags; 366 unsigned long flags;
347}; 367};
348 368
@@ -430,6 +450,18 @@ extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc);
430extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); 450extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
431extern void handle_nested_irq(unsigned int irq); 451extern void handle_nested_irq(unsigned int irq);
432 452
453extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
454#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
455extern void irq_chip_ack_parent(struct irq_data *data);
456extern int irq_chip_retrigger_hierarchy(struct irq_data *data);
457extern void irq_chip_mask_parent(struct irq_data *data);
458extern void irq_chip_unmask_parent(struct irq_data *data);
459extern void irq_chip_eoi_parent(struct irq_data *data);
460extern int irq_chip_set_affinity_parent(struct irq_data *data,
461 const struct cpumask *dest,
462 bool force);
463#endif
464
433/* Handling of unhandled and spurious interrupts: */ 465/* Handling of unhandled and spurious interrupts: */
434extern void note_interrupt(unsigned int irq, struct irq_desc *desc, 466extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
435 irqreturn_t action_ret); 467 irqreturn_t action_ret);
@@ -631,13 +663,6 @@ void arch_teardown_hwirq(unsigned int irq);
631void irq_init_desc(unsigned int irq); 663void irq_init_desc(unsigned int irq);
632#endif 664#endif
633 665
634#ifndef irq_reg_writel
635# define irq_reg_writel(val, addr) writel(val, addr)
636#endif
637#ifndef irq_reg_readl
638# define irq_reg_readl(addr) readl(addr)
639#endif
640
641/** 666/**
642 * struct irq_chip_regs - register offsets for struct irq_gci 667 * struct irq_chip_regs - register offsets for struct irq_gci
643 * @enable: Enable register offset to reg_base 668 * @enable: Enable register offset to reg_base
@@ -684,6 +709,8 @@ struct irq_chip_type {
684 * struct irq_chip_generic - Generic irq chip data structure 709 * struct irq_chip_generic - Generic irq chip data structure
685 * @lock: Lock to protect register and cache data access 710 * @lock: Lock to protect register and cache data access
686 * @reg_base: Register base address (virtual) 711 * @reg_base: Register base address (virtual)
712 * @reg_readl: Alternate I/O accessor (defaults to readl if NULL)
713 * @reg_writel: Alternate I/O accessor (defaults to writel if NULL)
687 * @irq_base: Interrupt base nr for this chip 714 * @irq_base: Interrupt base nr for this chip
688 * @irq_cnt: Number of interrupts handled by this chip 715 * @irq_cnt: Number of interrupts handled by this chip
689 * @mask_cache: Cached mask register shared between all chip types 716 * @mask_cache: Cached mask register shared between all chip types
@@ -708,6 +735,8 @@ struct irq_chip_type {
708struct irq_chip_generic { 735struct irq_chip_generic {
709 raw_spinlock_t lock; 736 raw_spinlock_t lock;
710 void __iomem *reg_base; 737 void __iomem *reg_base;
738 u32 (*reg_readl)(void __iomem *addr);
739 void (*reg_writel)(u32 val, void __iomem *addr);
711 unsigned int irq_base; 740 unsigned int irq_base;
712 unsigned int irq_cnt; 741 unsigned int irq_cnt;
713 u32 mask_cache; 742 u32 mask_cache;
@@ -732,12 +761,14 @@ struct irq_chip_generic {
732 * the parent irq. Usually GPIO implementations 761 * the parent irq. Usually GPIO implementations
733 * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private 762 * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private
734 * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask 763 * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask
764 * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE)
735 */ 765 */
736enum irq_gc_flags { 766enum irq_gc_flags {
737 IRQ_GC_INIT_MASK_CACHE = 1 << 0, 767 IRQ_GC_INIT_MASK_CACHE = 1 << 0,
738 IRQ_GC_INIT_NESTED_LOCK = 1 << 1, 768 IRQ_GC_INIT_NESTED_LOCK = 1 << 1,
739 IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, 769 IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2,
740 IRQ_GC_NO_MASK = 1 << 3, 770 IRQ_GC_NO_MASK = 1 << 3,
771 IRQ_GC_BE_IO = 1 << 4,
741}; 772};
742 773
743/* 774/*
@@ -813,4 +844,22 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
813static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } 844static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
814#endif 845#endif
815 846
847static inline void irq_reg_writel(struct irq_chip_generic *gc,
848 u32 val, int reg_offset)
849{
850 if (gc->reg_writel)
851 gc->reg_writel(val, gc->reg_base + reg_offset);
852 else
853 writel(val, gc->reg_base + reg_offset);
854}
855
856static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
857 int reg_offset)
858{
859 if (gc->reg_readl)
860 return gc->reg_readl(gc->reg_base + reg_offset);
861 else
862 return readl(gc->reg_base + reg_offset);
863}
864
816#endif /* _LINUX_IRQ_H */ 865#endif /* _LINUX_IRQ_H */
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index bf9422c3aefe..bf3fe719c7ce 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -39,9 +39,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu);
39#endif 39#endif
40 40
41void irq_work_run(void); 41void irq_work_run(void);
42void irq_work_tick(void);
42void irq_work_sync(struct irq_work *work); 43void irq_work_sync(struct irq_work *work);
43 44
44#ifdef CONFIG_IRQ_WORK 45#ifdef CONFIG_IRQ_WORK
46#include <asm/irq_work.h>
47
45bool irq_work_needs_cpu(void); 48bool irq_work_needs_cpu(void);
46#else 49#else
47static inline bool irq_work_needs_cpu(void) { return false; } 50static inline bool irq_work_needs_cpu(void) { return false; }
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 03a4ea37ba86..1e8b0cf30792 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -49,6 +49,10 @@
49#define GICD_CTLR_ENABLE_G1A (1U << 1) 49#define GICD_CTLR_ENABLE_G1A (1U << 1)
50#define GICD_CTLR_ENABLE_G1 (1U << 0) 50#define GICD_CTLR_ENABLE_G1 (1U << 0)
51 51
52#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
53#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
54#define GICD_TYPER_LPIS (1U << 17)
55
52#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) 56#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
53#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) 57#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
54 58
@@ -76,9 +80,27 @@
76#define GICR_MOVALLR 0x0110 80#define GICR_MOVALLR 0x0110
77#define GICR_PIDR2 GICD_PIDR2 81#define GICR_PIDR2 GICD_PIDR2
78 82
83#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
84
85#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
86
79#define GICR_WAKER_ProcessorSleep (1U << 1) 87#define GICR_WAKER_ProcessorSleep (1U << 1)
80#define GICR_WAKER_ChildrenAsleep (1U << 2) 88#define GICR_WAKER_ChildrenAsleep (1U << 2)
81 89
90#define GICR_PROPBASER_NonShareable (0U << 10)
91#define GICR_PROPBASER_InnerShareable (1U << 10)
92#define GICR_PROPBASER_OuterShareable (2U << 10)
93#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10)
94#define GICR_PROPBASER_nCnB (0U << 7)
95#define GICR_PROPBASER_nC (1U << 7)
96#define GICR_PROPBASER_RaWt (2U << 7)
97#define GICR_PROPBASER_RaWb (3U << 7)
98#define GICR_PROPBASER_WaWt (4U << 7)
99#define GICR_PROPBASER_WaWb (5U << 7)
100#define GICR_PROPBASER_RaWaWt (6U << 7)
101#define GICR_PROPBASER_RaWaWb (7U << 7)
102#define GICR_PROPBASER_IDBITS_MASK (0x1f)
103
82/* 104/*
83 * Re-Distributor registers, offsets from SGI_base 105 * Re-Distributor registers, offsets from SGI_base
84 */ 106 */
@@ -91,9 +113,93 @@
91#define GICR_IPRIORITYR0 GICD_IPRIORITYR 113#define GICR_IPRIORITYR0 GICD_IPRIORITYR
92#define GICR_ICFGR0 GICD_ICFGR 114#define GICR_ICFGR0 GICD_ICFGR
93 115
116#define GICR_TYPER_PLPIS (1U << 0)
94#define GICR_TYPER_VLPIS (1U << 1) 117#define GICR_TYPER_VLPIS (1U << 1)
95#define GICR_TYPER_LAST (1U << 4) 118#define GICR_TYPER_LAST (1U << 4)
96 119
120#define LPI_PROP_GROUP1 (1 << 1)
121#define LPI_PROP_ENABLED (1 << 0)
122
123/*
124 * ITS registers, offsets from ITS_base
125 */
126#define GITS_CTLR 0x0000
127#define GITS_IIDR 0x0004
128#define GITS_TYPER 0x0008
129#define GITS_CBASER 0x0080
130#define GITS_CWRITER 0x0088
131#define GITS_CREADR 0x0090
132#define GITS_BASER 0x0100
133#define GITS_PIDR2 GICR_PIDR2
134
135#define GITS_TRANSLATER 0x10040
136
137#define GITS_TYPER_PTA (1UL << 19)
138
139#define GITS_CBASER_VALID (1UL << 63)
140#define GITS_CBASER_nCnB (0UL << 59)
141#define GITS_CBASER_nC (1UL << 59)
142#define GITS_CBASER_RaWt (2UL << 59)
143#define GITS_CBASER_RaWb (3UL << 59)
144#define GITS_CBASER_WaWt (4UL << 59)
145#define GITS_CBASER_WaWb (5UL << 59)
146#define GITS_CBASER_RaWaWt (6UL << 59)
147#define GITS_CBASER_RaWaWb (7UL << 59)
148#define GITS_CBASER_NonShareable (0UL << 10)
149#define GITS_CBASER_InnerShareable (1UL << 10)
150#define GITS_CBASER_OuterShareable (2UL << 10)
151#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10)
152
153#define GITS_BASER_NR_REGS 8
154
155#define GITS_BASER_VALID (1UL << 63)
156#define GITS_BASER_nCnB (0UL << 59)
157#define GITS_BASER_nC (1UL << 59)
158#define GITS_BASER_RaWt (2UL << 59)
159#define GITS_BASER_RaWb (3UL << 59)
160#define GITS_BASER_WaWt (4UL << 59)
161#define GITS_BASER_WaWb (5UL << 59)
162#define GITS_BASER_RaWaWt (6UL << 59)
163#define GITS_BASER_RaWaWb (7UL << 59)
164#define GITS_BASER_TYPE_SHIFT (56)
165#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
166#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
167#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
168#define GITS_BASER_NonShareable (0UL << 10)
169#define GITS_BASER_InnerShareable (1UL << 10)
170#define GITS_BASER_OuterShareable (2UL << 10)
171#define GITS_BASER_SHAREABILITY_SHIFT (10)
172#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT)
173#define GITS_BASER_PAGE_SIZE_SHIFT (8)
174#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
175#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
176#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
177#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
178
179#define GITS_BASER_TYPE_NONE 0
180#define GITS_BASER_TYPE_DEVICE 1
181#define GITS_BASER_TYPE_VCPU 2
182#define GITS_BASER_TYPE_CPU 3
183#define GITS_BASER_TYPE_COLLECTION 4
184#define GITS_BASER_TYPE_RESERVED5 5
185#define GITS_BASER_TYPE_RESERVED6 6
186#define GITS_BASER_TYPE_RESERVED7 7
187
188/*
189 * ITS commands
190 */
191#define GITS_CMD_MAPD 0x08
192#define GITS_CMD_MAPC 0x09
193#define GITS_CMD_MAPVI 0x0a
194#define GITS_CMD_MOVI 0x01
195#define GITS_CMD_DISCARD 0x0f
196#define GITS_CMD_INV 0x0c
197#define GITS_CMD_MOVALL 0x0e
198#define GITS_CMD_INVALL 0x0d
199#define GITS_CMD_INT 0x03
200#define GITS_CMD_CLEAR 0x04
201#define GITS_CMD_SYNC 0x05
202
97/* 203/*
98 * CPU interface registers 204 * CPU interface registers
99 */ 205 */
@@ -189,12 +295,34 @@
189 295
190#include <linux/stringify.h> 296#include <linux/stringify.h>
191 297
298/*
299 * We need a value to serve as a irq-type for LPIs. Choose one that will
300 * hopefully pique the interest of the reviewer.
301 */
302#define GIC_IRQ_TYPE_LPI 0xa110c8ed
303
304struct rdists {
305 struct {
306 void __iomem *rd_base;
307 struct page *pend_page;
308 phys_addr_t phys_base;
309 } __percpu *rdist;
310 struct page *prop_page;
311 int id_bits;
312 u64 flags;
313};
314
192static inline void gic_write_eoir(u64 irq) 315static inline void gic_write_eoir(u64 irq)
193{ 316{
194 asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); 317 asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
195 isb(); 318 isb();
196} 319}
197 320
321struct irq_domain;
322int its_cpu_init(void);
323int its_init(struct device_node *node, struct rdists *rdists,
324 struct irq_domain *domain);
325
198#endif 326#endif
199 327
200#endif 328#endif
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 45e2d8c15bd2..71d706d5f169 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -21,7 +21,11 @@
21#define GIC_CPU_ACTIVEPRIO 0xd0 21#define GIC_CPU_ACTIVEPRIO 0xd0
22#define GIC_CPU_IDENT 0xfc 22#define GIC_CPU_IDENT 0xfc
23 23
24#define GICC_ENABLE 0x1
25#define GICC_INT_PRI_THRESHOLD 0xf0
24#define GICC_IAR_INT_ID_MASK 0x3ff 26#define GICC_IAR_INT_ID_MASK 0x3ff
27#define GICC_INT_SPURIOUS 1023
28#define GICC_DIS_BYPASS_MASK 0x1e0
25 29
26#define GIC_DIST_CTRL 0x000 30#define GIC_DIST_CTRL 0x000
27#define GIC_DIST_CTR 0x004 31#define GIC_DIST_CTR 0x004
@@ -39,6 +43,18 @@
39#define GIC_DIST_SGI_PENDING_CLEAR 0xf10 43#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
40#define GIC_DIST_SGI_PENDING_SET 0xf20 44#define GIC_DIST_SGI_PENDING_SET 0xf20
41 45
46#define GICD_ENABLE 0x1
47#define GICD_DISABLE 0x0
48#define GICD_INT_ACTLOW_LVLTRIG 0x0
49#define GICD_INT_EN_CLR_X32 0xffffffff
50#define GICD_INT_EN_SET_SGI 0x0000ffff
51#define GICD_INT_EN_CLR_PPI 0xffff0000
52#define GICD_INT_DEF_PRI 0xa0
53#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
54 (GICD_INT_DEF_PRI << 16) |\
55 (GICD_INT_DEF_PRI << 8) |\
56 GICD_INT_DEF_PRI)
57
42#define GICH_HCR 0x0 58#define GICH_HCR 0x0
43#define GICH_VTR 0x4 59#define GICH_VTR 0x4
44#define GICH_VMCR 0x8 60#define GICH_VMCR 0x8
@@ -75,6 +91,8 @@
75 91
76#ifndef __ASSEMBLY__ 92#ifndef __ASSEMBLY__
77 93
94#include <linux/irqdomain.h>
95
78struct device_node; 96struct device_node;
79 97
80extern struct irq_chip gic_arch_extn; 98extern struct irq_chip gic_arch_extn;
@@ -90,6 +108,8 @@ static inline void gic_init(unsigned int nr, int start,
90 gic_init_bases(nr, start, dist, cpu, 0, NULL); 108 gic_init_bases(nr, start, dist, cpu, 0, NULL);
91} 109}
92 110
111int gicv2m_of_init(struct device_node *node, struct irq_domain *parent);
112
93void gic_send_sgi(unsigned int cpu_id, unsigned int irq); 113void gic_send_sgi(unsigned int cpu_id, unsigned int irq);
94int gic_get_cpu_id(unsigned int cpu); 114int gic_get_cpu_id(unsigned int cpu);
95void gic_migrate_target(unsigned int new_cpu_id); 115void gic_migrate_target(unsigned int new_cpu_id);
diff --git a/include/linux/irqchip/irq-omap-intc.h b/include/linux/irqchip/irq-omap-intc.h
new file mode 100644
index 000000000000..e06b370cfc0d
--- /dev/null
+++ b/include/linux/irqchip/irq-omap-intc.h
@@ -0,0 +1,32 @@
1/**
2 * irq-omap-intc.h - INTC Idle Functions
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
5 *
6 * Author: Felipe Balbi <balbi@ti.com>
7 *
8 * This program is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 of
10 * the License as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#ifndef __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
19#define __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H
20
21void omap2_init_irq(void);
22void omap3_init_irq(void);
23void ti81xx_init_irq(void);
24
25int omap_irq_pending(void);
26void omap_intc_save_context(void);
27void omap_intc_restore_context(void);
28void omap3_intc_suspend(void);
29void omap3_intc_prepare_idle(void);
30void omap3_intc_resume_idle(void);
31
32#endif /* __INCLUDE_LINUX_IRQCHIP_IRQ_OMAP_INTC_H */
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
new file mode 100644
index 000000000000..420f77b34d02
--- /dev/null
+++ b/include/linux/irqchip/mips-gic.h
@@ -0,0 +1,249 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000, 07 MIPS Technologies, Inc.
7 */
8#ifndef __LINUX_IRQCHIP_MIPS_GIC_H
9#define __LINUX_IRQCHIP_MIPS_GIC_H
10
11#include <linux/clocksource.h>
12
13#define GIC_MAX_INTRS 256
14
15/* Constants */
16#define GIC_POL_POS 1
17#define GIC_POL_NEG 0
18#define GIC_TRIG_EDGE 1
19#define GIC_TRIG_LEVEL 0
20#define GIC_TRIG_DUAL_ENABLE 1
21#define GIC_TRIG_DUAL_DISABLE 0
22
23#define MSK(n) ((1 << (n)) - 1)
24
25/* Accessors */
26#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS)
27
28/* GIC Address Space */
29#define SHARED_SECTION_OFS 0x0000
30#define SHARED_SECTION_SIZE 0x8000
31#define VPE_LOCAL_SECTION_OFS 0x8000
32#define VPE_LOCAL_SECTION_SIZE 0x4000
33#define VPE_OTHER_SECTION_OFS 0xc000
34#define VPE_OTHER_SECTION_SIZE 0x4000
35#define USM_VISIBLE_SECTION_OFS 0x10000
36#define USM_VISIBLE_SECTION_SIZE 0x10000
37
38/* Register Map for Shared Section */
39
40#define GIC_SH_CONFIG_OFS 0x0000
41
42/* Shared Global Counter */
43#define GIC_SH_COUNTER_31_00_OFS 0x0010
44#define GIC_SH_COUNTER_63_32_OFS 0x0014
45#define GIC_SH_REVISIONID_OFS 0x0020
46
47/* Convert an interrupt number to a byte offset/bit for multi-word registers */
48#define GIC_INTR_OFS(intr) (((intr) / 32) * 4)
49#define GIC_INTR_BIT(intr) ((intr) % 32)
50
51/* Polarity : Reset Value is always 0 */
52#define GIC_SH_SET_POLARITY_OFS 0x0100
53
54/* Triggering : Reset Value is always 0 */
55#define GIC_SH_SET_TRIGGER_OFS 0x0180
56
57/* Dual edge triggering : Reset Value is always 0 */
58#define GIC_SH_SET_DUAL_OFS 0x0200
59
60/* Set/Clear corresponding bit in Edge Detect Register */
61#define GIC_SH_WEDGE_OFS 0x0280
62
63/* Mask manipulation */
64#define GIC_SH_RMASK_OFS 0x0300
65#define GIC_SH_SMASK_OFS 0x0380
66
67/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
68#define GIC_SH_MASK_OFS 0x0400
69
70/* Pending Global Interrupts (RO) */
71#define GIC_SH_PEND_OFS 0x0480
72
73/* Maps Interrupt X to a Pin */
74#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
75#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr))
76
77/* Maps Interrupt X to a VPE */
78#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
79#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
80 ((32 * (intr)) + (((vpe) / 32) * 4))
81#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
82
83/* Register Map for Local Section */
84#define GIC_VPE_CTL_OFS 0x0000
85#define GIC_VPE_PEND_OFS 0x0004
86#define GIC_VPE_MASK_OFS 0x0008
87#define GIC_VPE_RMASK_OFS 0x000c
88#define GIC_VPE_SMASK_OFS 0x0010
89#define GIC_VPE_WD_MAP_OFS 0x0040
90#define GIC_VPE_COMPARE_MAP_OFS 0x0044
91#define GIC_VPE_TIMER_MAP_OFS 0x0048
92#define GIC_VPE_FDC_MAP_OFS 0x004c
93#define GIC_VPE_PERFCTR_MAP_OFS 0x0050
94#define GIC_VPE_SWINT0_MAP_OFS 0x0054
95#define GIC_VPE_SWINT1_MAP_OFS 0x0058
96#define GIC_VPE_OTHER_ADDR_OFS 0x0080
97#define GIC_VPE_WD_CONFIG0_OFS 0x0090
98#define GIC_VPE_WD_COUNT0_OFS 0x0094
99#define GIC_VPE_WD_INITIAL0_OFS 0x0098
100#define GIC_VPE_COMPARE_LO_OFS 0x00a0
101#define GIC_VPE_COMPARE_HI_OFS 0x00a4
102
103#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
104#define GIC_VPE_EIC_SS(intr) (4 * (intr))
105
106#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800
107#define GIC_VPE_EIC_VEC(intr) (4 * (intr))
108
109#define GIC_VPE_TENABLE_NMI_OFS 0x1000
110#define GIC_VPE_TENABLE_YQ_OFS 0x1004
111#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080
112#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084
113
114/* User Mode Visible Section Register Map */
115#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000
116#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004
117
118/* Masks */
119#define GIC_SH_CONFIG_COUNTSTOP_SHF 28
120#define GIC_SH_CONFIG_COUNTSTOP_MSK (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF)
121
122#define GIC_SH_CONFIG_COUNTBITS_SHF 24
123#define GIC_SH_CONFIG_COUNTBITS_MSK (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF)
124
125#define GIC_SH_CONFIG_NUMINTRS_SHF 16
126#define GIC_SH_CONFIG_NUMINTRS_MSK (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF)
127
128#define GIC_SH_CONFIG_NUMVPES_SHF 0
129#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
130
131#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31))
132#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31))
133
134#define GIC_MAP_TO_PIN_SHF 31
135#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF)
136#define GIC_MAP_TO_NMI_SHF 30
137#define GIC_MAP_TO_NMI_MSK (MSK(1) << GIC_MAP_TO_NMI_SHF)
138#define GIC_MAP_TO_YQ_SHF 29
139#define GIC_MAP_TO_YQ_MSK (MSK(1) << GIC_MAP_TO_YQ_SHF)
140#define GIC_MAP_SHF 0
141#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF)
142
143/* GIC_VPE_CTL Masks */
144#define GIC_VPE_CTL_FDC_RTBL_SHF 4
145#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF)
146#define GIC_VPE_CTL_SWINT_RTBL_SHF 3
147#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF)
148#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2
149#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
150#define GIC_VPE_CTL_TIMER_RTBL_SHF 1
151#define GIC_VPE_CTL_TIMER_RTBL_MSK (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF)
152#define GIC_VPE_CTL_EIC_MODE_SHF 0
153#define GIC_VPE_CTL_EIC_MODE_MSK (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF)
154
155/* GIC_VPE_PEND Masks */
156#define GIC_VPE_PEND_WD_SHF 0
157#define GIC_VPE_PEND_WD_MSK (MSK(1) << GIC_VPE_PEND_WD_SHF)
158#define GIC_VPE_PEND_CMP_SHF 1
159#define GIC_VPE_PEND_CMP_MSK (MSK(1) << GIC_VPE_PEND_CMP_SHF)
160#define GIC_VPE_PEND_TIMER_SHF 2
161#define GIC_VPE_PEND_TIMER_MSK (MSK(1) << GIC_VPE_PEND_TIMER_SHF)
162#define GIC_VPE_PEND_PERFCOUNT_SHF 3
163#define GIC_VPE_PEND_PERFCOUNT_MSK (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF)
164#define GIC_VPE_PEND_SWINT0_SHF 4
165#define GIC_VPE_PEND_SWINT0_MSK (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
166#define GIC_VPE_PEND_SWINT1_SHF 5
167#define GIC_VPE_PEND_SWINT1_MSK (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
168
169/* GIC_VPE_RMASK Masks */
170#define GIC_VPE_RMASK_WD_SHF 0
171#define GIC_VPE_RMASK_WD_MSK (MSK(1) << GIC_VPE_RMASK_WD_SHF)
172#define GIC_VPE_RMASK_CMP_SHF 1
173#define GIC_VPE_RMASK_CMP_MSK (MSK(1) << GIC_VPE_RMASK_CMP_SHF)
174#define GIC_VPE_RMASK_TIMER_SHF 2
175#define GIC_VPE_RMASK_TIMER_MSK (MSK(1) << GIC_VPE_RMASK_TIMER_SHF)
176#define GIC_VPE_RMASK_PERFCNT_SHF 3
177#define GIC_VPE_RMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF)
178#define GIC_VPE_RMASK_SWINT0_SHF 4
179#define GIC_VPE_RMASK_SWINT0_MSK (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
180#define GIC_VPE_RMASK_SWINT1_SHF 5
181#define GIC_VPE_RMASK_SWINT1_MSK (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
182
183/* GIC_VPE_SMASK Masks */
184#define GIC_VPE_SMASK_WD_SHF 0
185#define GIC_VPE_SMASK_WD_MSK (MSK(1) << GIC_VPE_SMASK_WD_SHF)
186#define GIC_VPE_SMASK_CMP_SHF 1
187#define GIC_VPE_SMASK_CMP_MSK (MSK(1) << GIC_VPE_SMASK_CMP_SHF)
188#define GIC_VPE_SMASK_TIMER_SHF 2
189#define GIC_VPE_SMASK_TIMER_MSK (MSK(1) << GIC_VPE_SMASK_TIMER_SHF)
190#define GIC_VPE_SMASK_PERFCNT_SHF 3
191#define GIC_VPE_SMASK_PERFCNT_MSK (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF)
192#define GIC_VPE_SMASK_SWINT0_SHF 4
193#define GIC_VPE_SMASK_SWINT0_MSK (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
194#define GIC_VPE_SMASK_SWINT1_SHF 5
195#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
196
197/* GIC nomenclature for Core Interrupt Pins. */
198#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
199#define GIC_CPU_INT1 1 /* . */
200#define GIC_CPU_INT2 2 /* . */
201#define GIC_CPU_INT3 3 /* . */
202#define GIC_CPU_INT4 4 /* . */
203#define GIC_CPU_INT5 5 /* Core Interrupt 7 */
204
205/* Add 2 to convert GIC CPU pin to core interrupt */
206#define GIC_CPU_PIN_OFFSET 2
207
208/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
209#define GIC_CPU_TO_VEC_OFFSET 2
210
211/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
212#define GIC_PIN_TO_VEC_OFFSET 1
213
214/* Local GIC interrupts. */
215#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */
216#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */
217#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */
218#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */
219#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */
220#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */
221#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */
222#define GIC_NUM_LOCAL_INTRS 7
223
224/* Convert between local/shared IRQ number and GIC HW IRQ number. */
225#define GIC_LOCAL_HWIRQ_BASE 0
226#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
227#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
228#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
229#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
230#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
231
232extern unsigned int gic_present;
233
234extern void gic_init(unsigned long gic_base_addr,
235 unsigned long gic_addrspace_size, unsigned int cpu_vec,
236 unsigned int irqbase);
237extern void gic_clocksource_init(unsigned int);
238extern cycle_t gic_read_count(void);
239extern unsigned int gic_get_count_width(void);
240extern cycle_t gic_read_compare(void);
241extern void gic_write_compare(cycle_t cnt);
242extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
243extern void gic_send_ipi(unsigned int intr);
244extern unsigned int plat_ipi_call_int_xlate(unsigned int);
245extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
246extern unsigned int gic_get_timer_pending(void);
247extern int gic_get_c0_compare_int(void);
248extern int gic_get_c0_perfcount_int(void);
249#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 472c021a2d4f..faf433af425e 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -12,6 +12,8 @@ struct irq_affinity_notify;
12struct proc_dir_entry; 12struct proc_dir_entry;
13struct module; 13struct module;
14struct irq_desc; 14struct irq_desc;
15struct irq_domain;
16struct pt_regs;
15 17
16/** 18/**
17 * struct irq_desc - interrupt descriptor 19 * struct irq_desc - interrupt descriptor
@@ -36,6 +38,11 @@ struct irq_desc;
36 * @threads_oneshot: bitfield to handle shared oneshot threads 38 * @threads_oneshot: bitfield to handle shared oneshot threads
37 * @threads_active: number of irqaction threads currently running 39 * @threads_active: number of irqaction threads currently running
38 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers 40 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
41 * @nr_actions: number of installed actions on this descriptor
42 * @no_suspend_depth: number of irqactions on a irq descriptor with
43 * IRQF_NO_SUSPEND set
44 * @force_resume_depth: number of irqactions on a irq descriptor with
45 * IRQF_FORCE_RESUME set
39 * @dir: /proc/irq/ procfs entry 46 * @dir: /proc/irq/ procfs entry
40 * @name: flow handler name for /proc/interrupts output 47 * @name: flow handler name for /proc/interrupts output
41 */ 48 */
@@ -68,6 +75,11 @@ struct irq_desc {
68 unsigned long threads_oneshot; 75 unsigned long threads_oneshot;
69 atomic_t threads_active; 76 atomic_t threads_active;
70 wait_queue_head_t wait_for_threads; 77 wait_queue_head_t wait_for_threads;
78#ifdef CONFIG_PM_SLEEP
79 unsigned int nr_actions;
80 unsigned int no_suspend_depth;
81 unsigned int force_resume_depth;
82#endif
71#ifdef CONFIG_PROC_FS 83#ifdef CONFIG_PROC_FS
72 struct proc_dir_entry *dir; 84 struct proc_dir_entry *dir;
73#endif 85#endif
@@ -118,6 +130,23 @@ static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *de
118 130
119int generic_handle_irq(unsigned int irq); 131int generic_handle_irq(unsigned int irq);
120 132
133#ifdef CONFIG_HANDLE_DOMAIN_IRQ
134/*
135 * Convert a HW interrupt number to a logical one using a IRQ domain,
136 * and handle the result interrupt number. Return -EINVAL if
137 * conversion failed. Providing a NULL domain indicates that the
138 * conversion has already been done.
139 */
140int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
141 bool lookup, struct pt_regs *regs);
142
143static inline int handle_domain_irq(struct irq_domain *domain,
144 unsigned int hwirq, struct pt_regs *regs)
145{
146 return __handle_domain_irq(domain, hwirq, true, regs);
147}
148#endif
149
121/* Test to see if a driver has successfully requested an irq */ 150/* Test to see if a driver has successfully requested an irq */
122static inline int irq_has_action(unsigned int irq) 151static inline int irq_has_action(unsigned int irq)
123{ 152{
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index b0f9d16e48f6..676d7306a360 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -33,11 +33,14 @@
33#define _LINUX_IRQDOMAIN_H 33#define _LINUX_IRQDOMAIN_H
34 34
35#include <linux/types.h> 35#include <linux/types.h>
36#include <linux/irqhandler.h>
36#include <linux/radix-tree.h> 37#include <linux/radix-tree.h>
37 38
38struct device_node; 39struct device_node;
39struct irq_domain; 40struct irq_domain;
40struct of_device_id; 41struct of_device_id;
42struct irq_chip;
43struct irq_data;
41 44
42/* Number of irqs reserved for a legacy isa controller */ 45/* Number of irqs reserved for a legacy isa controller */
43#define NUM_ISA_INTERRUPTS 16 46#define NUM_ISA_INTERRUPTS 16
@@ -64,6 +67,16 @@ struct irq_domain_ops {
64 int (*xlate)(struct irq_domain *d, struct device_node *node, 67 int (*xlate)(struct irq_domain *d, struct device_node *node,
65 const u32 *intspec, unsigned int intsize, 68 const u32 *intspec, unsigned int intsize,
66 unsigned long *out_hwirq, unsigned int *out_type); 69 unsigned long *out_hwirq, unsigned int *out_type);
70
71#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
72 /* extended V2 interfaces to support hierarchy irq_domains */
73 int (*alloc)(struct irq_domain *d, unsigned int virq,
74 unsigned int nr_irqs, void *arg);
75 void (*free)(struct irq_domain *d, unsigned int virq,
76 unsigned int nr_irqs);
77 void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
78 void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
79#endif
67}; 80};
68 81
69extern struct irq_domain_ops irq_generic_chip_ops; 82extern struct irq_domain_ops irq_generic_chip_ops;
@@ -77,6 +90,7 @@ struct irq_domain_chip_generic;
77 * @ops: pointer to irq_domain methods 90 * @ops: pointer to irq_domain methods
78 * @host_data: private data pointer for use by owner. Not touched by irq_domain 91 * @host_data: private data pointer for use by owner. Not touched by irq_domain
79 * core code. 92 * core code.
93 * @flags: host per irq_domain flags
80 * 94 *
81 * Optional elements 95 * Optional elements
82 * @of_node: Pointer to device tree nodes associated with the irq_domain. Used 96 * @of_node: Pointer to device tree nodes associated with the irq_domain. Used
@@ -84,6 +98,7 @@ struct irq_domain_chip_generic;
84 * @gc: Pointer to a list of generic chips. There is a helper function for 98 * @gc: Pointer to a list of generic chips. There is a helper function for
85 * setting up one or more generic chips for interrupt controllers 99 * setting up one or more generic chips for interrupt controllers
86 * drivers using the generic chip library which uses this pointer. 100 * drivers using the generic chip library which uses this pointer.
101 * @parent: Pointer to parent irq_domain to support hierarchy irq_domains
87 * 102 *
88 * Revmap data, used internally by irq_domain 103 * Revmap data, used internally by irq_domain
89 * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that 104 * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that
@@ -97,10 +112,14 @@ struct irq_domain {
97 const char *name; 112 const char *name;
98 const struct irq_domain_ops *ops; 113 const struct irq_domain_ops *ops;
99 void *host_data; 114 void *host_data;
115 unsigned int flags;
100 116
101 /* Optional data */ 117 /* Optional data */
102 struct device_node *of_node; 118 struct device_node *of_node;
103 struct irq_domain_chip_generic *gc; 119 struct irq_domain_chip_generic *gc;
120#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
121 struct irq_domain *parent;
122#endif
104 123
105 /* reverse map data. The linear map gets appended to the irq_domain */ 124 /* reverse map data. The linear map gets appended to the irq_domain */
106 irq_hw_number_t hwirq_max; 125 irq_hw_number_t hwirq_max;
@@ -110,6 +129,22 @@ struct irq_domain {
110 unsigned int linear_revmap[]; 129 unsigned int linear_revmap[];
111}; 130};
112 131
132/* Irq domain flags */
133enum {
134 /* Irq domain is hierarchical */
135 IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
136
137 /* Core calls alloc/free recursive through the domain hierarchy. */
138 IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1),
139
140 /*
141 * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
142 * for implementation specific purposes and ignored by the
143 * core code.
144 */
145 IRQ_DOMAIN_FLAG_NONCORE = (1 << 16),
146};
147
113#ifdef CONFIG_IRQ_DOMAIN 148#ifdef CONFIG_IRQ_DOMAIN
114struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, 149struct irq_domain *__irq_domain_add(struct device_node *of_node, int size,
115 irq_hw_number_t hwirq_max, int direct_max, 150 irq_hw_number_t hwirq_max, int direct_max,
@@ -220,8 +255,74 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
220 const u32 *intspec, unsigned int intsize, 255 const u32 *intspec, unsigned int intsize,
221 irq_hw_number_t *out_hwirq, unsigned int *out_type); 256 irq_hw_number_t *out_hwirq, unsigned int *out_type);
222 257
258/* V2 interfaces to support hierarchy IRQ domains. */
259extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
260 unsigned int virq);
261#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
262extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent,
263 unsigned int flags, unsigned int size,
264 struct device_node *node,
265 const struct irq_domain_ops *ops, void *host_data);
266extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
267 unsigned int nr_irqs, int node, void *arg,
268 bool realloc);
269extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
270extern void irq_domain_activate_irq(struct irq_data *irq_data);
271extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
272
273static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
274 unsigned int nr_irqs, int node, void *arg)
275{
276 return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false);
277}
278
279extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
280 unsigned int virq,
281 irq_hw_number_t hwirq,
282 struct irq_chip *chip,
283 void *chip_data);
284extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
285 irq_hw_number_t hwirq, struct irq_chip *chip,
286 void *chip_data, irq_flow_handler_t handler,
287 void *handler_data, const char *handler_name);
288extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
289extern void irq_domain_free_irqs_common(struct irq_domain *domain,
290 unsigned int virq,
291 unsigned int nr_irqs);
292extern void irq_domain_free_irqs_top(struct irq_domain *domain,
293 unsigned int virq, unsigned int nr_irqs);
294
295extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
296 unsigned int irq_base,
297 unsigned int nr_irqs, void *arg);
298
299extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
300 unsigned int irq_base,
301 unsigned int nr_irqs);
302
303static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
304{
305 return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY;
306}
307#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
308static inline void irq_domain_activate_irq(struct irq_data *data) { }
309static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
310static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
311 unsigned int nr_irqs, int node, void *arg)
312{
313 return -1;
314}
315
316static inline bool irq_domain_is_hierarchy(struct irq_domain *domain)
317{
318 return false;
319}
320#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
321
223#else /* CONFIG_IRQ_DOMAIN */ 322#else /* CONFIG_IRQ_DOMAIN */
224static inline void irq_dispose_mapping(unsigned int virq) { } 323static inline void irq_dispose_mapping(unsigned int virq) { }
324static inline void irq_domain_activate_irq(struct irq_data *data) { }
325static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
225#endif /* !CONFIG_IRQ_DOMAIN */ 326#endif /* !CONFIG_IRQ_DOMAIN */
226 327
227#endif /* _LINUX_IRQDOMAIN_H */ 328#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h
new file mode 100644
index 000000000000..62d543004197
--- /dev/null
+++ b/include/linux/irqhandler.h
@@ -0,0 +1,14 @@
1#ifndef _LINUX_IRQHANDLER_H
2#define _LINUX_IRQHANDLER_H
3
4/*
5 * Interrupt flow handler typedefs are defined here to avoid circular
6 * include dependencies.
7 */
8
9struct irq_desc;
10struct irq_data;
11typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
12typedef void (*irq_preflow_handler_t)(struct irq_data *data);
13
14#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0dae71e9971c..704b9a599b26 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1042,7 +1042,7 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
1042extern void jbd2_journal_commit_transaction(journal_t *); 1042extern void jbd2_journal_commit_transaction(journal_t *);
1043 1043
1044/* Checkpoint list management */ 1044/* Checkpoint list management */
1045int __jbd2_journal_clean_checkpoint_list(journal_t *journal); 1045void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
1046int __jbd2_journal_remove_checkpoint(struct journal_head *); 1046int __jbd2_journal_remove_checkpoint(struct journal_head *);
1047void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); 1047void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
1048 1048
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 784304b222b3..98f923b6a0ea 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -8,28 +8,28 @@
8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
9 * 9 *
10 * Jump labels provide an interface to generate dynamic branches using 10 * Jump labels provide an interface to generate dynamic branches using
11 * self-modifying code. Assuming toolchain and architecture support the result 11 * self-modifying code. Assuming toolchain and architecture support, the result
12 * of a "if (static_key_false(&key))" statement is a unconditional branch (which 12 * of a "if (static_key_false(&key))" statement is an unconditional branch (which
13 * defaults to false - and the true block is placed out of line). 13 * defaults to false - and the true block is placed out of line).
14 * 14 *
15 * However at runtime we can change the branch target using 15 * However at runtime we can change the branch target using
16 * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key 16 * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
17 * object and for as long as there are references all branches referring to 17 * object, and for as long as there are references all branches referring to
18 * that particular key will point to the (out of line) true block. 18 * that particular key will point to the (out of line) true block.
19 * 19 *
20 * Since this relies on modifying code the static_key_slow_{inc,dec}() functions 20 * Since this relies on modifying code, the static_key_slow_{inc,dec}() functions
21 * must be considered absolute slow paths (machine wide synchronization etc.). 21 * must be considered absolute slow paths (machine wide synchronization etc.).
22 * OTOH, since the affected branches are unconditional their runtime overhead 22 * OTOH, since the affected branches are unconditional, their runtime overhead
23 * will be absolutely minimal, esp. in the default (off) case where the total 23 * will be absolutely minimal, esp. in the default (off) case where the total
24 * effect is a single NOP of appropriate size. The on case will patch in a jump 24 * effect is a single NOP of appropriate size. The on case will patch in a jump
25 * to the out-of-line block. 25 * to the out-of-line block.
26 * 26 *
27 * When the control is directly exposed to userspace it is prudent to delay the 27 * When the control is directly exposed to userspace, it is prudent to delay the
28 * decrement to avoid high frequency code modifications which can (and do) 28 * decrement to avoid high frequency code modifications which can (and do)
29 * cause significant performance degradation. Struct static_key_deferred and 29 * cause significant performance degradation. Struct static_key_deferred and
30 * static_key_slow_dec_deferred() provide for this. 30 * static_key_slow_dec_deferred() provide for this.
31 * 31 *
32 * Lacking toolchain and or architecture support, it falls back to a simple 32 * Lacking toolchain and or architecture support, jump labels fall back to a simple
33 * conditional branch. 33 * conditional branch.
34 * 34 *
35 * struct static_key my_key = STATIC_KEY_INIT_TRUE; 35 * struct static_key my_key = STATIC_KEY_INIT_TRUE;
@@ -43,8 +43,7 @@
43 * 43 *
44 * Not initializing the key (static data is initialized to 0s anyway) is the 44 * Not initializing the key (static data is initialized to 0s anyway) is the
45 * same as using STATIC_KEY_INIT_FALSE. 45 * same as using STATIC_KEY_INIT_FALSE.
46 * 46 */
47*/
48 47
49#include <linux/types.h> 48#include <linux/types.h>
50#include <linux/compiler.h> 49#include <linux/compiler.h>
diff --git a/include/linux/kcmp.h b/include/linux/kcmp.h
deleted file mode 100644
index 2dcd1b3aafc8..000000000000
--- a/include/linux/kcmp.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef _LINUX_KCMP_H
2#define _LINUX_KCMP_H
3
4/* Comparison type */
5enum kcmp_type {
6 KCMP_FILE,
7 KCMP_VM,
8 KCMP_FILES,
9 KCMP_FS,
10 KCMP_SIGHAND,
11 KCMP_IO,
12 KCMP_SYSVSEM,
13
14 KCMP_TYPES,
15};
16
17#endif /* _LINUX_KCMP_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 290db1269c4c..75ae2e2631fc 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,11 +13,54 @@
13 * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> 13 * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
14 */ 14 */
15 15
16/* Shifted versions of the command enable bits are be used if the command
17 * has no arguments (see kdb_check_flags). This allows commands, such as
18 * go, to have different permissions depending upon whether it is called
19 * with an argument.
20 */
21#define KDB_ENABLE_NO_ARGS_SHIFT 10
22
16typedef enum { 23typedef enum {
17 KDB_REPEAT_NONE = 0, /* Do not repeat this command */ 24 KDB_ENABLE_ALL = (1 << 0), /* Enable everything */
18 KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ 25 KDB_ENABLE_MEM_READ = (1 << 1),
19 KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ 26 KDB_ENABLE_MEM_WRITE = (1 << 2),
20} kdb_repeat_t; 27 KDB_ENABLE_REG_READ = (1 << 3),
28 KDB_ENABLE_REG_WRITE = (1 << 4),
29 KDB_ENABLE_INSPECT = (1 << 5),
30 KDB_ENABLE_FLOW_CTRL = (1 << 6),
31 KDB_ENABLE_SIGNAL = (1 << 7),
32 KDB_ENABLE_REBOOT = (1 << 8),
33 /* User exposed values stop here, all remaining flags are
34 * exclusively used to describe a commands behaviour.
35 */
36
37 KDB_ENABLE_ALWAYS_SAFE = (1 << 9),
38 KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1,
39
40 KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT,
41 KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ
42 << KDB_ENABLE_NO_ARGS_SHIFT,
43 KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE
44 << KDB_ENABLE_NO_ARGS_SHIFT,
45 KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ
46 << KDB_ENABLE_NO_ARGS_SHIFT,
47 KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE
48 << KDB_ENABLE_NO_ARGS_SHIFT,
49 KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT
50 << KDB_ENABLE_NO_ARGS_SHIFT,
51 KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL
52 << KDB_ENABLE_NO_ARGS_SHIFT,
53 KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL
54 << KDB_ENABLE_NO_ARGS_SHIFT,
55 KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT
56 << KDB_ENABLE_NO_ARGS_SHIFT,
57 KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE
58 << KDB_ENABLE_NO_ARGS_SHIFT,
59 KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT,
60
61 KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */
62 KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */
63} kdb_cmdflags_t;
21 64
22typedef int (*kdb_func_t)(int, const char **); 65typedef int (*kdb_func_t)(int, const char **);
23 66
@@ -62,6 +105,7 @@ extern atomic_t kdb_event;
62#define KDB_BADLENGTH (-19) 105#define KDB_BADLENGTH (-19)
63#define KDB_NOBP (-20) 106#define KDB_NOBP (-20)
64#define KDB_BADADDR (-21) 107#define KDB_BADADDR (-21)
108#define KDB_NOPERM (-22)
65 109
66/* 110/*
67 * kdb_diemsg 111 * kdb_diemsg
@@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
146 190
147/* Dynamic kdb shell command registration */ 191/* Dynamic kdb shell command registration */
148extern int kdb_register(char *, kdb_func_t, char *, char *, short); 192extern int kdb_register(char *, kdb_func_t, char *, char *, short);
149extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, 193extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
150 short, kdb_repeat_t); 194 short, kdb_cmdflags_t);
151extern int kdb_unregister(char *); 195extern int kdb_unregister(char *);
152#else /* ! CONFIG_KGDB_KDB */ 196#else /* ! CONFIG_KGDB_KDB */
153static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } 197static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
154static inline void kdb_init(int level) {} 198static inline void kdb_init(int level) {}
155static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, 199static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
156 char *help, short minlen) { return 0; } 200 char *help, short minlen) { return 0; }
157static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage, 201static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
158 char *help, short minlen, 202 char *help, short minlen,
159 kdb_repeat_t repeat) { return 0; } 203 kdb_cmdflags_t flags) { return 0; }
160static inline int kdb_unregister(char *cmd) { return 0; } 204static inline int kdb_unregister(char *cmd) { return 0; }
161#endif /* CONFIG_KGDB_KDB */ 205#endif /* CONFIG_KGDB_KDB */
162enum { 206enum {
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h
index 866caaa9e2bb..c2ce155d83cc 100644
--- a/include/linux/kern_levels.h
+++ b/include/linux/kern_levels.h
@@ -22,4 +22,17 @@
22 */ 22 */
23#define KERN_CONT "" 23#define KERN_CONT ""
24 24
25/* integer equivalents of KERN_<LEVEL> */
26#define LOGLEVEL_SCHED -2 /* Deferred messages from sched code
27 * are set to this special level */
28#define LOGLEVEL_DEFAULT -1 /* default (or last) loglevel */
29#define LOGLEVEL_EMERG 0 /* system is unusable */
30#define LOGLEVEL_ALERT 1 /* action must be taken immediately */
31#define LOGLEVEL_CRIT 2 /* critical conditions */
32#define LOGLEVEL_ERR 3 /* error conditions */
33#define LOGLEVEL_WARNING 4 /* warning conditions */
34#define LOGLEVEL_NOTICE 5 /* normal but significant condition */
35#define LOGLEVEL_INFO 6 /* informational */
36#define LOGLEVEL_DEBUG 7 /* debug-level messages */
37
25#endif 38#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 95624bed87ef..5449d2f4a1ef 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -162,6 +162,7 @@ extern int _cond_resched(void);
162#endif 162#endif
163 163
164#ifdef CONFIG_DEBUG_ATOMIC_SLEEP 164#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
165 void ___might_sleep(const char *file, int line, int preempt_offset);
165 void __might_sleep(const char *file, int line, int preempt_offset); 166 void __might_sleep(const char *file, int line, int preempt_offset);
166/** 167/**
167 * might_sleep - annotation for functions that can sleep 168 * might_sleep - annotation for functions that can sleep
@@ -175,10 +176,14 @@ extern int _cond_resched(void);
175 */ 176 */
176# define might_sleep() \ 177# define might_sleep() \
177 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) 178 do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
179# define sched_annotate_sleep() __set_current_state(TASK_RUNNING)
178#else 180#else
181 static inline void ___might_sleep(const char *file, int line,
182 int preempt_offset) { }
179 static inline void __might_sleep(const char *file, int line, 183 static inline void __might_sleep(const char *file, int line,
180 int preempt_offset) { } 184 int preempt_offset) { }
181# define might_sleep() do { might_resched(); } while (0) 185# define might_sleep() do { might_resched(); } while (0)
186# define sched_annotate_sleep() do { } while (0)
182#endif 187#endif
183 188
184#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) 189#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
@@ -376,10 +381,6 @@ extern unsigned long simple_strtoul(const char *,char **,unsigned int);
376extern long simple_strtol(const char *,char **,unsigned int); 381extern long simple_strtol(const char *,char **,unsigned int);
377extern unsigned long long simple_strtoull(const char *,char **,unsigned int); 382extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
378extern long long simple_strtoll(const char *,char **,unsigned int); 383extern long long simple_strtoll(const char *,char **,unsigned int);
379#define strict_strtoul kstrtoul
380#define strict_strtol kstrtol
381#define strict_strtoull kstrtoull
382#define strict_strtoll kstrtoll
383 384
384extern int num_to_str(char *buf, int size, unsigned long long num); 385extern int num_to_str(char *buf, int size, unsigned long long num);
385 386
@@ -407,6 +408,7 @@ int vsscanf(const char *, const char *, va_list);
407extern int get_option(char **str, int *pint); 408extern int get_option(char **str, int *pint);
408extern char *get_options(const char *str, int nints, int *ints); 409extern char *get_options(const char *str, int nints, int *ints);
409extern unsigned long long memparse(const char *ptr, char **retptr); 410extern unsigned long long memparse(const char *ptr, char **retptr);
411extern bool parse_option_str(const char *str, const char *option);
410 412
411extern int core_kernel_text(unsigned long addr); 413extern int core_kernel_text(unsigned long addr);
412extern int core_kernel_data(unsigned long addr); 414extern int core_kernel_data(unsigned long addr);
@@ -414,9 +416,6 @@ extern int __kernel_text_address(unsigned long addr);
414extern int kernel_text_address(unsigned long addr); 416extern int kernel_text_address(unsigned long addr);
415extern int func_ptr_is_kernel_text(void *ptr); 417extern int func_ptr_is_kernel_text(void *ptr);
416 418
417struct pid;
418extern struct pid *session_of_pgrp(struct pid *pgrp);
419
420unsigned long int_sqrt(unsigned long); 419unsigned long int_sqrt(unsigned long);
421 420
422extern void bust_spinlocks(int yes); 421extern void bust_spinlocks(int yes);
@@ -425,6 +424,7 @@ extern int panic_timeout;
425extern int panic_on_oops; 424extern int panic_on_oops;
426extern int panic_on_unrecovered_nmi; 425extern int panic_on_unrecovered_nmi;
427extern int panic_on_io_nmi; 426extern int panic_on_io_nmi;
427extern int panic_on_warn;
428extern int sysctl_panic_on_stackoverflow; 428extern int sysctl_panic_on_stackoverflow;
429/* 429/*
430 * Only to be used by arch init code. If the user over-wrote the default 430 * Only to be used by arch init code. If the user over-wrote the default
@@ -496,6 +496,7 @@ static inline char *hex_byte_pack_upper(char *buf, u8 byte)
496 496
497extern int hex_to_bin(char ch); 497extern int hex_to_bin(char ch);
498extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); 498extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
499extern char *bin2hex(char *dst, const void *src, size_t count);
499 500
500bool mac_pton(const char *s, u8 *mac); 501bool mac_pton(const char *s, u8 *mac);
501 502
@@ -715,23 +716,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
715 (void) (&_max1 == &_max2); \ 716 (void) (&_max1 == &_max2); \
716 _max1 > _max2 ? _max1 : _max2; }) 717 _max1 > _max2 ? _max1 : _max2; })
717 718
718#define min3(x, y, z) ({ \ 719#define min3(x, y, z) min((typeof(x))min(x, y), z)
719 typeof(x) _min1 = (x); \ 720#define max3(x, y, z) max((typeof(x))max(x, y), z)
720 typeof(y) _min2 = (y); \
721 typeof(z) _min3 = (z); \
722 (void) (&_min1 == &_min2); \
723 (void) (&_min1 == &_min3); \
724 _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \
725 (_min2 < _min3 ? _min2 : _min3); })
726
727#define max3(x, y, z) ({ \
728 typeof(x) _max1 = (x); \
729 typeof(y) _max2 = (y); \
730 typeof(z) _max3 = (z); \
731 (void) (&_max1 == &_max2); \
732 (void) (&_max1 == &_max3); \
733 _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
734 (_max2 > _max3 ? _max2 : _max3); })
735 721
736/** 722/**
737 * min_not_zero - return the minimum that is _not_ zero, unless both are zero 723 * min_not_zero - return the minimum that is _not_ zero, unless both are zero
@@ -746,20 +732,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
746/** 732/**
747 * clamp - return a value clamped to a given range with strict typechecking 733 * clamp - return a value clamped to a given range with strict typechecking
748 * @val: current value 734 * @val: current value
749 * @min: minimum allowable value 735 * @lo: lowest allowable value
750 * @max: maximum allowable value 736 * @hi: highest allowable value
751 * 737 *
752 * This macro does strict typechecking of min/max to make sure they are of the 738 * This macro does strict typechecking of lo/hi to make sure they are of the
753 * same type as val. See the unnecessary pointer comparisons. 739 * same type as val. See the unnecessary pointer comparisons.
754 */ 740 */
755#define clamp(val, min, max) ({ \ 741#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
756 typeof(val) __val = (val); \
757 typeof(min) __min = (min); \
758 typeof(max) __max = (max); \
759 (void) (&__val == &__min); \
760 (void) (&__val == &__max); \
761 __val = __val < __min ? __min: __val; \
762 __val > __max ? __max: __val; })
763 742
764/* 743/*
765 * ..and if you can't take the strict 744 * ..and if you can't take the strict
@@ -781,36 +760,26 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
781 * clamp_t - return a value clamped to a given range using a given type 760 * clamp_t - return a value clamped to a given range using a given type
782 * @type: the type of variable to use 761 * @type: the type of variable to use
783 * @val: current value 762 * @val: current value
784 * @min: minimum allowable value 763 * @lo: minimum allowable value
785 * @max: maximum allowable value 764 * @hi: maximum allowable value
786 * 765 *
787 * This macro does no typechecking and uses temporary variables of type 766 * This macro does no typechecking and uses temporary variables of type
788 * 'type' to make all the comparisons. 767 * 'type' to make all the comparisons.
789 */ 768 */
790#define clamp_t(type, val, min, max) ({ \ 769#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
791 type __val = (val); \
792 type __min = (min); \
793 type __max = (max); \
794 __val = __val < __min ? __min: __val; \
795 __val > __max ? __max: __val; })
796 770
797/** 771/**
798 * clamp_val - return a value clamped to a given range using val's type 772 * clamp_val - return a value clamped to a given range using val's type
799 * @val: current value 773 * @val: current value
800 * @min: minimum allowable value 774 * @lo: minimum allowable value
801 * @max: maximum allowable value 775 * @hi: maximum allowable value
802 * 776 *
803 * This macro does no typechecking and uses temporary variables of whatever 777 * This macro does no typechecking and uses temporary variables of whatever
804 * type the input argument 'val' is. This is useful when val is an unsigned 778 * type the input argument 'val' is. This is useful when val is an unsigned
805 * type and min and max are literals that will otherwise be assigned a signed 779 * type and min and max are literals that will otherwise be assigned a signed
806 * integer type. 780 * integer type.
807 */ 781 */
808#define clamp_val(val, min, max) ({ \ 782#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
809 typeof(val) __val = (val); \
810 typeof(val) __min = (min); \
811 typeof(val) __max = (max); \
812 __val = __val < __min ? __min: __val; \
813 __val > __max ? __max: __val; })
814 783
815 784
816/* 785/*
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index ecbc52f9ff77..25a822f6f000 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -44,8 +44,8 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
44DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); 44DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
45 45
46/* Must have preemption disabled for this to be meaningful. */ 46/* Must have preemption disabled for this to be meaningful. */
47#define kstat_this_cpu (&__get_cpu_var(kstat)) 47#define kstat_this_cpu this_cpu_ptr(&kstat)
48#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat)) 48#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
49#define kstat_cpu(cpu) per_cpu(kstat, cpu) 49#define kstat_cpu(cpu) per_cpu(kstat, cpu)
50#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) 50#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
51 51
@@ -68,6 +68,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
68 * Number of interrupts per specific IRQ source, since bootup 68 * Number of interrupts per specific IRQ source, since bootup
69 */ 69 */
70extern unsigned int kstat_irqs(unsigned int irq); 70extern unsigned int kstat_irqs(unsigned int irq);
71extern unsigned int kstat_irqs_usr(unsigned int irq);
71 72
72/* 73/*
73 * Number of interrupts per cpu, since bootup 74 * Number of interrupts per cpu, since bootup
@@ -77,11 +78,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
77 return kstat_cpu(cpu).irqs_sum; 78 return kstat_cpu(cpu).irqs_sum;
78} 79}
79 80
80/*
81 * Lock/unlock the current runqueue - to extract task statistics:
82 */
83extern unsigned long long task_delta_exec(struct task_struct *);
84
85extern void account_user_time(struct task_struct *, cputime_t, cputime_t); 81extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
86extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); 82extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
87extern void account_steal_time(cputime_t); 83extern void account_steal_time(cputime_t);
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
index 9be37da93680..e985ba679c4a 100644
--- a/include/linux/kernelcapi.h
+++ b/include/linux/kernelcapi.h
@@ -41,7 +41,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]);
41u16 capi20_get_version(u32 contr, struct capi_version *verp); 41u16 capi20_get_version(u32 contr, struct capi_version *verp);
42u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]); 42u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]);
43u16 capi20_get_profile(u32 contr, struct capi_profile *profp); 43u16 capi20_get_profile(u32 contr, struct capi_profile *profp);
44int capi20_manufacturer(unsigned int cmd, void __user *data); 44int capi20_manufacturer(unsigned long cmd, void __user *data);
45 45
46#define CAPICTR_UP 0 46#define CAPICTR_UP 0
47#define CAPICTR_DOWN 1 47#define CAPICTR_DOWN 1
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 30faf797c2c3..d4e01b358341 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -179,6 +179,7 @@ struct kernfs_open_file {
179 struct mutex mutex; 179 struct mutex mutex;
180 int event; 180 int event;
181 struct list_head list; 181 struct list_head list;
182 char *prealloc_buf;
182 183
183 size_t atomic_write_len; 184 size_t atomic_write_len;
184 bool mmapped; 185 bool mmapped;
@@ -214,6 +215,13 @@ struct kernfs_ops {
214 * larger ones are rejected with -E2BIG. 215 * larger ones are rejected with -E2BIG.
215 */ 216 */
216 size_t atomic_write_len; 217 size_t atomic_write_len;
218 /*
219 * "prealloc" causes a buffer to be allocated at open for
220 * all read/write requests. As ->seq_show uses seq_read()
221 * which does its own allocation, it is incompatible with
222 * ->prealloc. Provide ->read and ->write with ->prealloc.
223 */
224 bool prealloc;
217 ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, 225 ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
218 loff_t off); 226 loff_t off);
219 227
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 4b2a0e11cc5b..9d957b7ae095 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -178,6 +178,7 @@ struct kexec_buf {
178 struct kimage *image; 178 struct kimage *image;
179 char *buffer; 179 char *buffer;
180 unsigned long bufsz; 180 unsigned long bufsz;
181 unsigned long mem;
181 unsigned long memsz; 182 unsigned long memsz;
182 unsigned long buf_align; 183 unsigned long buf_align;
183 unsigned long buf_min; 184 unsigned long buf_min;
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index 44792ee649de..ff9f1d394235 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -53,6 +53,24 @@ typedef int (*request_key_actor_t)(struct key_construction *key,
53 const char *op, void *aux); 53 const char *op, void *aux);
54 54
55/* 55/*
56 * Preparsed matching criterion.
57 */
58struct key_match_data {
59 /* Comparison function, defaults to exact description match, but can be
60 * overridden by type->match_preparse(). Should return true if a match
61 * is found and false if not.
62 */
63 bool (*cmp)(const struct key *key,
64 const struct key_match_data *match_data);
65
66 const void *raw_data; /* Raw match data */
67 void *preparsed; /* For ->match_preparse() to stash stuff */
68 unsigned lookup_type; /* Type of lookup for this search. */
69#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
70#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
71};
72
73/*
56 * kernel managed key type definition 74 * kernel managed key type definition
57 */ 75 */
58struct key_type { 76struct key_type {
@@ -65,11 +83,6 @@ struct key_type {
65 */ 83 */
66 size_t def_datalen; 84 size_t def_datalen;
67 85
68 /* Default key search algorithm. */
69 unsigned def_lookup_type;
70#define KEYRING_SEARCH_LOOKUP_DIRECT 0x0000 /* Direct lookup by description. */
71#define KEYRING_SEARCH_LOOKUP_ITERATE 0x0001 /* Iterative search. */
72
73 /* vet a description */ 86 /* vet a description */
74 int (*vet_description)(const char *description); 87 int (*vet_description)(const char *description);
75 88
@@ -96,8 +109,15 @@ struct key_type {
96 */ 109 */
97 int (*update)(struct key *key, struct key_preparsed_payload *prep); 110 int (*update)(struct key *key, struct key_preparsed_payload *prep);
98 111
99 /* match a key against a description */ 112 /* Preparse the data supplied to ->match() (optional). The
100 int (*match)(const struct key *key, const void *desc); 113 * data to be preparsed can be found in match_data->raw_data.
114 * The lookup type can also be set by this function.
115 */
116 int (*match_preparse)(struct key_match_data *match_data);
117
118 /* Free preparsed match data (optional). This should be supplied it
119 * ->match_preparse() is supplied. */
120 void (*match_free)(struct key_match_data *match_data);
101 121
102 /* clear some of the data from a key on revokation (optional) 122 /* clear some of the data from a key on revokation (optional)
103 * - the key's semaphore will be write-locked by the caller 123 * - the key's semaphore will be write-locked by the caller
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index 554fde3a3927..473b43678ad1 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -722,7 +722,7 @@ __kfifo_uint_must_check_helper( \
722/** 722/**
723 * kfifo_dma_out_finish - finish a DMA OUT operation 723 * kfifo_dma_out_finish - finish a DMA OUT operation
724 * @fifo: address of the fifo to be used 724 * @fifo: address of the fifo to be used
725 * @len: number of bytes transferrd 725 * @len: number of bytes transferred
726 * 726 *
727 * This macro finish a DMA OUT operation. The out counter will be updated by 727 * This macro finish a DMA OUT operation. The out counter will be updated by
728 * the len parameter. No error checking will be done. 728 * the len parameter. No error checking will be done.
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 6b06d378f3df..e465bb15912d 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -283,7 +283,7 @@ struct kgdb_io {
283 283
284extern struct kgdb_arch arch_kgdb_ops; 284extern struct kgdb_arch arch_kgdb_ops;
285 285
286extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs); 286extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
287 287
288#ifdef CONFIG_SERIAL_KGDB_NMI 288#ifdef CONFIG_SERIAL_KGDB_NMI
289extern int kgdb_register_nmi_console(void); 289extern int kgdb_register_nmi_console(void);
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 6b394f0b5148..eeb307985715 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -6,7 +6,8 @@
6#ifdef CONFIG_TRANSPARENT_HUGEPAGE 6#ifdef CONFIG_TRANSPARENT_HUGEPAGE
7extern int __khugepaged_enter(struct mm_struct *mm); 7extern int __khugepaged_enter(struct mm_struct *mm);
8extern void __khugepaged_exit(struct mm_struct *mm); 8extern void __khugepaged_exit(struct mm_struct *mm);
9extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma); 9extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
10 unsigned long vm_flags);
10 11
11#define khugepaged_enabled() \ 12#define khugepaged_enabled() \
12 (transparent_hugepage_flags & \ 13 (transparent_hugepage_flags & \
@@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
35 __khugepaged_exit(mm); 36 __khugepaged_exit(mm);
36} 37}
37 38
38static inline int khugepaged_enter(struct vm_area_struct *vma) 39static inline int khugepaged_enter(struct vm_area_struct *vma,
40 unsigned long vm_flags)
39{ 41{
40 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) 42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
41 if ((khugepaged_always() || 43 if ((khugepaged_always() ||
42 (khugepaged_req_madv() && 44 (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
43 vma->vm_flags & VM_HUGEPAGE)) && 45 !(vm_flags & VM_NOHUGEPAGE))
44 !(vma->vm_flags & VM_NOHUGEPAGE))
45 if (__khugepaged_enter(vma->vm_mm)) 46 if (__khugepaged_enter(vma->vm_mm))
46 return -ENOMEM; 47 return -ENOMEM;
47 return 0; 48 return 0;
@@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
54static inline void khugepaged_exit(struct mm_struct *mm) 55static inline void khugepaged_exit(struct mm_struct *mm)
55{ 56{
56} 57}
57static inline int khugepaged_enter(struct vm_area_struct *vma) 58static inline int khugepaged_enter(struct vm_area_struct *vma,
59 unsigned long vm_flags)
58{ 60{
59 return 0; 61 return 0;
60} 62}
61static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 63static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
64 unsigned long vm_flags)
62{ 65{
63 return 0; 66 return 0;
64} 67}
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 057e95971014..e705467ddb47 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -21,6 +21,8 @@
21#ifndef __KMEMLEAK_H 21#ifndef __KMEMLEAK_H
22#define __KMEMLEAK_H 22#define __KMEMLEAK_H
23 23
24#include <linux/slab.h>
25
24#ifdef CONFIG_DEBUG_KMEMLEAK 26#ifdef CONFIG_DEBUG_KMEMLEAK
25 27
26extern void kmemleak_init(void) __ref; 28extern void kmemleak_init(void) __ref;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index f7296e57d614..5297f9fa0ef2 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -335,6 +335,7 @@ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
335extern int arch_prepare_kprobe_ftrace(struct kprobe *p); 335extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
336#endif 336#endif
337 337
338int arch_check_ftrace_location(struct kprobe *p);
338 339
339/* Get the kprobe at this addr (if any) - called with preemption disabled */ 340/* Get the kprobe at this addr (if any) - called with preemption disabled */
340struct kprobe *get_kprobe(void *addr); 341struct kprobe *get_kprobe(void *addr);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a4c33b34fe3f..26f106022c88 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -43,6 +43,7 @@
43 * include/linux/kvm_h. 43 * include/linux/kvm_h.
44 */ 44 */
45#define KVM_MEMSLOT_INVALID (1UL << 16) 45#define KVM_MEMSLOT_INVALID (1UL << 16)
46#define KVM_MEMSLOT_INCOHERENT (1UL << 17)
46 47
47/* Two fragments for cross MMIO pages. */ 48/* Two fragments for cross MMIO pages. */
48#define KVM_MAX_MMIO_FRAGMENTS 2 49#define KVM_MAX_MMIO_FRAGMENTS 2
@@ -136,12 +137,11 @@ static inline bool is_error_page(struct page *page)
136#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22 137#define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
137#define KVM_REQ_ENABLE_IBS 23 138#define KVM_REQ_ENABLE_IBS 23
138#define KVM_REQ_DISABLE_IBS 24 139#define KVM_REQ_DISABLE_IBS 24
140#define KVM_REQ_APIC_PAGE_RELOAD 25
139 141
140#define KVM_USERSPACE_IRQ_SOURCE_ID 0 142#define KVM_USERSPACE_IRQ_SOURCE_ID 0
141#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 143#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
142 144
143struct kvm;
144struct kvm_vcpu;
145extern struct kmem_cache *kvm_vcpu_cache; 145extern struct kmem_cache *kvm_vcpu_cache;
146 146
147extern spinlock_t kvm_lock; 147extern spinlock_t kvm_lock;
@@ -200,6 +200,17 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
200int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); 200int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
201#endif 201#endif
202 202
203/*
204 * Carry out a gup that requires IO. Allow the mm to relinquish the mmap
205 * semaphore if the filemap/swap has to wait on a page lock. pagep == NULL
206 * controls whether we retry the gup one more time to completion in that case.
207 * Typically this is called after a FAULT_FLAG_RETRY_NOWAIT in the main tdp
208 * handler.
209 */
210int kvm_get_user_page_io(struct task_struct *tsk, struct mm_struct *mm,
211 unsigned long addr, bool write_fault,
212 struct page **pagep);
213
203enum { 214enum {
204 OUTSIDE_GUEST_MODE, 215 OUTSIDE_GUEST_MODE,
205 IN_GUEST_MODE, 216 IN_GUEST_MODE,
@@ -325,8 +336,6 @@ struct kvm_kernel_irq_routing_entry {
325 struct hlist_node link; 336 struct hlist_node link;
326}; 337};
327 338
328struct kvm_irq_routing_table;
329
330#ifndef KVM_PRIVATE_MEM_SLOTS 339#ifndef KVM_PRIVATE_MEM_SLOTS
331#define KVM_PRIVATE_MEM_SLOTS 0 340#define KVM_PRIVATE_MEM_SLOTS 0
332#endif 341#endif
@@ -345,6 +354,8 @@ struct kvm_memslots {
345 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; 354 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
346 /* The mapping table from slot id to the index in memslots[]. */ 355 /* The mapping table from slot id to the index in memslots[]. */
347 short id_to_index[KVM_MEM_SLOTS_NUM]; 356 short id_to_index[KVM_MEM_SLOTS_NUM];
357 atomic_t lru_slot;
358 int used_slots;
348}; 359};
349 360
350struct kvm { 361struct kvm {
@@ -387,7 +398,6 @@ struct kvm {
387 * Update side is protected by irq_lock. 398 * Update side is protected by irq_lock.
388 */ 399 */
389 struct kvm_irq_routing_table __rcu *irq_routing; 400 struct kvm_irq_routing_table __rcu *irq_routing;
390 struct hlist_head mask_notifier_list;
391#endif 401#endif
392#ifdef CONFIG_HAVE_KVM_IRQFD 402#ifdef CONFIG_HAVE_KVM_IRQFD
393 struct hlist_head irq_ack_notifier_list; 403 struct hlist_head irq_ack_notifier_list;
@@ -439,6 +449,14 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
439int __must_check vcpu_load(struct kvm_vcpu *vcpu); 449int __must_check vcpu_load(struct kvm_vcpu *vcpu);
440void vcpu_put(struct kvm_vcpu *vcpu); 450void vcpu_put(struct kvm_vcpu *vcpu);
441 451
452#ifdef __KVM_HAVE_IOAPIC
453void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
454#else
455static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
456{
457}
458#endif
459
442#ifdef CONFIG_HAVE_KVM_IRQFD 460#ifdef CONFIG_HAVE_KVM_IRQFD
443int kvm_irqfd_init(void); 461int kvm_irqfd_init(void);
444void kvm_irqfd_exit(void); 462void kvm_irqfd_exit(void);
@@ -528,6 +546,8 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
528unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 546unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
529unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 547unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
530unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 548unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
549unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
550 bool *writable);
531void kvm_release_page_clean(struct page *page); 551void kvm_release_page_clean(struct page *page);
532void kvm_release_page_dirty(struct page *page); 552void kvm_release_page_dirty(struct page *page);
533void kvm_set_page_accessed(struct page *page); 553void kvm_set_page_accessed(struct page *page);
@@ -579,6 +599,7 @@ void kvm_flush_remote_tlbs(struct kvm *kvm);
579void kvm_reload_remote_mmus(struct kvm *kvm); 599void kvm_reload_remote_mmus(struct kvm *kvm);
580void kvm_make_mclock_inprogress_request(struct kvm *kvm); 600void kvm_make_mclock_inprogress_request(struct kvm *kvm);
581void kvm_make_scan_ioapic_request(struct kvm *kvm); 601void kvm_make_scan_ioapic_request(struct kvm *kvm);
602bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
582 603
583long kvm_arch_dev_ioctl(struct file *filp, 604long kvm_arch_dev_ioctl(struct file *filp,
584 unsigned int ioctl, unsigned long arg); 605 unsigned int ioctl, unsigned long arg);
@@ -624,6 +645,8 @@ void kvm_arch_exit(void);
624int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); 645int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
625void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); 646void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
626 647
648void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
649
627void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); 650void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
628void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 651void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
629void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); 652void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
@@ -632,8 +655,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
632int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); 655int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
633void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); 656void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
634 657
635int kvm_arch_hardware_enable(void *garbage); 658int kvm_arch_hardware_enable(void);
636void kvm_arch_hardware_disable(void *garbage); 659void kvm_arch_hardware_disable(void);
637int kvm_arch_hardware_setup(void); 660int kvm_arch_hardware_setup(void);
638void kvm_arch_hardware_unsetup(void); 661void kvm_arch_hardware_unsetup(void);
639void kvm_arch_check_processor_compat(void *rtn); 662void kvm_arch_check_processor_compat(void *rtn);
@@ -690,7 +713,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
690int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 713int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
691void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 714void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
692 715
693bool kvm_is_mmio_pfn(pfn_t pfn); 716bool kvm_is_reserved_pfn(pfn_t pfn);
694 717
695struct kvm_irq_ack_notifier { 718struct kvm_irq_ack_notifier {
696 struct hlist_node link; 719 struct hlist_node link;
@@ -698,44 +721,6 @@ struct kvm_irq_ack_notifier {
698 void (*irq_acked)(struct kvm_irq_ack_notifier *kian); 721 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
699}; 722};
700 723
701struct kvm_assigned_dev_kernel {
702 struct kvm_irq_ack_notifier ack_notifier;
703 struct list_head list;
704 int assigned_dev_id;
705 int host_segnr;
706 int host_busnr;
707 int host_devfn;
708 unsigned int entries_nr;
709 int host_irq;
710 bool host_irq_disabled;
711 bool pci_2_3;
712 struct msix_entry *host_msix_entries;
713 int guest_irq;
714 struct msix_entry *guest_msix_entries;
715 unsigned long irq_requested_type;
716 int irq_source_id;
717 int flags;
718 struct pci_dev *dev;
719 struct kvm *kvm;
720 spinlock_t intx_lock;
721 spinlock_t intx_mask_lock;
722 char irq_name[32];
723 struct pci_saved_state *pci_saved_state;
724};
725
726struct kvm_irq_mask_notifier {
727 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
728 int irq;
729 struct hlist_node link;
730};
731
732void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
733 struct kvm_irq_mask_notifier *kimn);
734void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
735 struct kvm_irq_mask_notifier *kimn);
736void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
737 bool mask);
738
739int kvm_irq_map_gsi(struct kvm *kvm, 724int kvm_irq_map_gsi(struct kvm *kvm,
740 struct kvm_kernel_irq_routing_entry *entries, int gsi); 725 struct kvm_kernel_irq_routing_entry *entries, int gsi);
741int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); 726int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
@@ -757,12 +742,6 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
757#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT 742#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
758int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 743int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
759void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); 744void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
760int kvm_iommu_map_guest(struct kvm *kvm);
761int kvm_iommu_unmap_guest(struct kvm *kvm);
762int kvm_assign_device(struct kvm *kvm,
763 struct kvm_assigned_dev_kernel *assigned_dev);
764int kvm_deassign_device(struct kvm *kvm,
765 struct kvm_assigned_dev_kernel *assigned_dev);
766#else 745#else
767static inline int kvm_iommu_map_pages(struct kvm *kvm, 746static inline int kvm_iommu_map_pages(struct kvm *kvm,
768 struct kvm_memory_slot *slot) 747 struct kvm_memory_slot *slot)
@@ -774,11 +753,6 @@ static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
774 struct kvm_memory_slot *slot) 753 struct kvm_memory_slot *slot)
775{ 754{
776} 755}
777
778static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
779{
780 return 0;
781}
782#endif 756#endif
783 757
784static inline void kvm_guest_enter(void) 758static inline void kvm_guest_enter(void)
@@ -819,12 +793,28 @@ static inline void kvm_guest_exit(void)
819static inline struct kvm_memory_slot * 793static inline struct kvm_memory_slot *
820search_memslots(struct kvm_memslots *slots, gfn_t gfn) 794search_memslots(struct kvm_memslots *slots, gfn_t gfn)
821{ 795{
822 struct kvm_memory_slot *memslot; 796 int start = 0, end = slots->used_slots;
797 int slot = atomic_read(&slots->lru_slot);
798 struct kvm_memory_slot *memslots = slots->memslots;
799
800 if (gfn >= memslots[slot].base_gfn &&
801 gfn < memslots[slot].base_gfn + memslots[slot].npages)
802 return &memslots[slot];
803
804 while (start < end) {
805 slot = start + (end - start) / 2;
806
807 if (gfn >= memslots[slot].base_gfn)
808 end = slot;
809 else
810 start = slot + 1;
811 }
823 812
824 kvm_for_each_memslot(memslot, slots) 813 if (gfn >= memslots[start].base_gfn &&
825 if (gfn >= memslot->base_gfn && 814 gfn < memslots[start].base_gfn + memslots[start].npages) {
826 gfn < memslot->base_gfn + memslot->npages) 815 atomic_set(&slots->lru_slot, start);
827 return memslot; 816 return &memslots[start];
817 }
828 818
829 return NULL; 819 return NULL;
830} 820}
@@ -998,25 +988,6 @@ static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
998 988
999#endif 989#endif
1000 990
1001#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
1002
1003long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
1004 unsigned long arg);
1005
1006void kvm_free_all_assigned_devices(struct kvm *kvm);
1007
1008#else
1009
1010static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
1011 unsigned long arg)
1012{
1013 return -ENOTTY;
1014}
1015
1016static inline void kvm_free_all_assigned_devices(struct kvm *kvm) {}
1017
1018#endif
1019
1020static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) 991static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1021{ 992{
1022 set_bit(req, &vcpu->requests); 993 set_bit(req, &vcpu->requests);
@@ -1034,8 +1005,6 @@ static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1034 1005
1035extern bool kvm_rebooting; 1006extern bool kvm_rebooting;
1036 1007
1037struct kvm_device_ops;
1038
1039struct kvm_device { 1008struct kvm_device {
1040 struct kvm_device_ops *ops; 1009 struct kvm_device_ops *ops;
1041 struct kvm *kvm; 1010 struct kvm *kvm;
@@ -1068,12 +1037,11 @@ struct kvm_device_ops {
1068void kvm_device_get(struct kvm_device *dev); 1037void kvm_device_get(struct kvm_device *dev);
1069void kvm_device_put(struct kvm_device *dev); 1038void kvm_device_put(struct kvm_device *dev);
1070struct kvm_device *kvm_device_from_filp(struct file *filp); 1039struct kvm_device *kvm_device_from_filp(struct file *filp);
1040int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1041void kvm_unregister_device_ops(u32 type);
1071 1042
1072extern struct kvm_device_ops kvm_mpic_ops; 1043extern struct kvm_device_ops kvm_mpic_ops;
1073extern struct kvm_device_ops kvm_xics_ops; 1044extern struct kvm_device_ops kvm_xics_ops;
1074extern struct kvm_device_ops kvm_vfio_ops;
1075extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1076extern struct kvm_device_ops kvm_flic_ops;
1077 1045
1078#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT 1046#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1079 1047
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index b0bcce0ddc95..931da7e917cf 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -17,6 +17,20 @@
17#ifndef __KVM_TYPES_H__ 17#ifndef __KVM_TYPES_H__
18#define __KVM_TYPES_H__ 18#define __KVM_TYPES_H__
19 19
20struct kvm;
21struct kvm_async_pf;
22struct kvm_device_ops;
23struct kvm_interrupt;
24struct kvm_irq_routing_table;
25struct kvm_memory_slot;
26struct kvm_one_reg;
27struct kvm_run;
28struct kvm_userspace_memory_region;
29struct kvm_vcpu;
30struct kvm_vcpu_init;
31
32enum kvm_mr_change;
33
20#include <asm/types.h> 34#include <asm/types.h>
21 35
22/* 36/*
@@ -40,33 +54,6 @@ typedef u64 hfn_t;
40 54
41typedef hfn_t pfn_t; 55typedef hfn_t pfn_t;
42 56
43union kvm_ioapic_redirect_entry {
44 u64 bits;
45 struct {
46 u8 vector;
47 u8 delivery_mode:3;
48 u8 dest_mode:1;
49 u8 delivery_status:1;
50 u8 polarity:1;
51 u8 remote_irr:1;
52 u8 trig_mode:1;
53 u8 mask:1;
54 u8 reserve:7;
55 u8 reserved[4];
56 u8 dest_id;
57 } fields;
58};
59
60struct kvm_lapic_irq {
61 u32 vector;
62 u32 delivery_mode;
63 u32 dest_mode;
64 u32 level;
65 u32 trig_mode;
66 u32 shorthand;
67 u32 dest_id;
68};
69
70struct gfn_to_hva_cache { 57struct gfn_to_hva_cache {
71 u64 generation; 58 u64 generation;
72 gpa_t gpa; 59 gpa_t gpa;
diff --git a/include/linux/leds.h b/include/linux/leds.h
index e43686472197..cfceef32c9b3 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -13,8 +13,9 @@
13#define __LINUX_LEDS_H_INCLUDED 13#define __LINUX_LEDS_H_INCLUDED
14 14
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/spinlock.h> 16#include <linux/mutex.h>
17#include <linux/rwsem.h> 17#include <linux/rwsem.h>
18#include <linux/spinlock.h>
18#include <linux/timer.h> 19#include <linux/timer.h>
19#include <linux/workqueue.h> 20#include <linux/workqueue.h>
20 21
@@ -31,8 +32,8 @@ enum led_brightness {
31 32
32struct led_classdev { 33struct led_classdev {
33 const char *name; 34 const char *name;
34 int brightness; 35 enum led_brightness brightness;
35 int max_brightness; 36 enum led_brightness max_brightness;
36 int flags; 37 int flags;
37 38
38 /* Lower 16 bits reflect status */ 39 /* Lower 16 bits reflect status */
@@ -42,11 +43,20 @@ struct led_classdev {
42#define LED_BLINK_ONESHOT (1 << 17) 43#define LED_BLINK_ONESHOT (1 << 17)
43#define LED_BLINK_ONESHOT_STOP (1 << 18) 44#define LED_BLINK_ONESHOT_STOP (1 << 18)
44#define LED_BLINK_INVERT (1 << 19) 45#define LED_BLINK_INVERT (1 << 19)
46#define LED_SYSFS_DISABLE (1 << 20)
47#define SET_BRIGHTNESS_ASYNC (1 << 21)
48#define SET_BRIGHTNESS_SYNC (1 << 22)
45 49
46 /* Set LED brightness level */ 50 /* Set LED brightness level */
47 /* Must not sleep, use a workqueue if needed */ 51 /* Must not sleep, use a workqueue if needed */
48 void (*brightness_set)(struct led_classdev *led_cdev, 52 void (*brightness_set)(struct led_classdev *led_cdev,
49 enum led_brightness brightness); 53 enum led_brightness brightness);
54 /*
55 * Set LED brightness level immediately - it can block the caller for
56 * the time required for accessing a LED device register.
57 */
58 int (*brightness_set_sync)(struct led_classdev *led_cdev,
59 enum led_brightness brightness);
50 /* Get LED brightness level */ 60 /* Get LED brightness level */
51 enum led_brightness (*brightness_get)(struct led_classdev *led_cdev); 61 enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
52 62
@@ -85,6 +95,9 @@ struct led_classdev {
85 /* true if activated - deactivate routine uses it to do cleanup */ 95 /* true if activated - deactivate routine uses it to do cleanup */
86 bool activated; 96 bool activated;
87#endif 97#endif
98
99 /* Ensures consistent access to the LED Flash Class device */
100 struct mutex led_access;
88}; 101};
89 102
90extern int led_classdev_register(struct device *parent, 103extern int led_classdev_register(struct device *parent,
@@ -140,6 +153,43 @@ extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
140 */ 153 */
141extern void led_set_brightness(struct led_classdev *led_cdev, 154extern void led_set_brightness(struct led_classdev *led_cdev,
142 enum led_brightness brightness); 155 enum led_brightness brightness);
156/**
157 * led_update_brightness - update LED brightness
158 * @led_cdev: the LED to query
159 *
160 * Get an LED's current brightness and update led_cdev->brightness
161 * member with the obtained value.
162 *
163 * Returns: 0 on success or negative error value on failure
164 */
165extern int led_update_brightness(struct led_classdev *led_cdev);
166
167/**
168 * led_sysfs_disable - disable LED sysfs interface
169 * @led_cdev: the LED to set
170 *
171 * Disable the led_cdev's sysfs interface.
172 */
173extern void led_sysfs_disable(struct led_classdev *led_cdev);
174
175/**
176 * led_sysfs_enable - enable LED sysfs interface
177 * @led_cdev: the LED to set
178 *
179 * Enable the led_cdev's sysfs interface.
180 */
181extern void led_sysfs_enable(struct led_classdev *led_cdev);
182
183/**
184 * led_sysfs_is_disabled - check if LED sysfs interface is disabled
185 * @led_cdev: the LED to query
186 *
187 * Returns: true if the led_cdev's sysfs interface is disabled.
188 */
189static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
190{
191 return led_cdev->flags & LED_SYSFS_DISABLE;
192}
143 193
144/* 194/*
145 * LED Triggers 195 * LED Triggers
@@ -251,6 +301,7 @@ struct gpio_led {
251 unsigned retain_state_suspended : 1; 301 unsigned retain_state_suspended : 1;
252 unsigned default_state : 2; 302 unsigned default_state : 2;
253 /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ 303 /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */
304 struct gpio_desc *gpiod;
254}; 305};
255#define LEDS_GPIO_DEFSTATE_OFF 0 306#define LEDS_GPIO_DEFSTATE_OFF 0
256#define LEDS_GPIO_DEFSTATE_ON 1 307#define LEDS_GPIO_DEFSTATE_ON 1
@@ -263,7 +314,7 @@ struct gpio_led_platform_data {
263#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */ 314#define GPIO_LED_NO_BLINK_LOW 0 /* No blink GPIO state low */
264#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */ 315#define GPIO_LED_NO_BLINK_HIGH 1 /* No blink GPIO state high */
265#define GPIO_LED_BLINK 2 /* Please, blink */ 316#define GPIO_LED_BLINK 2 /* Please, blink */
266 int (*gpio_blink_set)(unsigned gpio, int state, 317 int (*gpio_blink_set)(struct gpio_desc *desc, int state,
267 unsigned long *delay_on, 318 unsigned long *delay_on,
268 unsigned long *delay_off); 319 unsigned long *delay_off);
269}; 320};
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 92abb497ab14..2d182413b1db 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -191,7 +191,8 @@ enum {
191 ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ 191 ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */
192 ATA_DEV_SEMB = 7, /* SEMB */ 192 ATA_DEV_SEMB = 7, /* SEMB */
193 ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ 193 ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */
194 ATA_DEV_NONE = 9, /* no device */ 194 ATA_DEV_ZAC = 9, /* ZAC device */
195 ATA_DEV_NONE = 10, /* no device */
195 196
196 /* struct ata_link flags */ 197 /* struct ata_link flags */
197 ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ 198 ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
@@ -1191,9 +1192,9 @@ extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
1191extern int ata_scsi_slave_config(struct scsi_device *sdev); 1192extern int ata_scsi_slave_config(struct scsi_device *sdev);
1192extern void ata_scsi_slave_destroy(struct scsi_device *sdev); 1193extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
1193extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, 1194extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
1194 int queue_depth, int reason); 1195 int queue_depth);
1195extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, 1196extern int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
1196 int queue_depth, int reason); 1197 int queue_depth);
1197extern struct ata_device *ata_dev_pair(struct ata_device *adev); 1198extern struct ata_device *ata_dev_pair(struct ata_device *adev);
1198extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); 1199extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
1199extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); 1200extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
@@ -1404,14 +1405,14 @@ static inline int sata_srst_pmp(struct ata_link *link)
1404 * printk helpers 1405 * printk helpers
1405 */ 1406 */
1406__printf(3, 4) 1407__printf(3, 4)
1407int ata_port_printk(const struct ata_port *ap, const char *level, 1408void ata_port_printk(const struct ata_port *ap, const char *level,
1408 const char *fmt, ...); 1409 const char *fmt, ...);
1409__printf(3, 4) 1410__printf(3, 4)
1410int ata_link_printk(const struct ata_link *link, const char *level, 1411void ata_link_printk(const struct ata_link *link, const char *level,
1411 const char *fmt, ...); 1412 const char *fmt, ...);
1412__printf(3, 4) 1413__printf(3, 4)
1413int ata_dev_printk(const struct ata_device *dev, const char *level, 1414void ata_dev_printk(const struct ata_device *dev, const char *level,
1414 const char *fmt, ...); 1415 const char *fmt, ...);
1415 1416
1416#define ata_port_err(ap, fmt, ...) \ 1417#define ata_port_err(ap, fmt, ...) \
1417 ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__) 1418 ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__)
@@ -1491,7 +1492,8 @@ static inline unsigned int ata_tag_internal(unsigned int tag)
1491static inline unsigned int ata_class_enabled(unsigned int class) 1492static inline unsigned int ata_class_enabled(unsigned int class)
1492{ 1493{
1493 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || 1494 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI ||
1494 class == ATA_DEV_PMP || class == ATA_DEV_SEMB; 1495 class == ATA_DEV_PMP || class == ATA_DEV_SEMB ||
1496 class == ATA_DEV_ZAC;
1495} 1497}
1496 1498
1497static inline unsigned int ata_class_disabled(unsigned int class) 1499static inline unsigned int ata_class_disabled(unsigned int class)
diff --git a/include/linux/list.h b/include/linux/list.h
index cbbb96fcead9..feb773c76ee0 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -5,6 +5,7 @@
5#include <linux/stddef.h> 5#include <linux/stddef.h>
6#include <linux/poison.h> 6#include <linux/poison.h>
7#include <linux/const.h> 7#include <linux/const.h>
8#include <linux/kernel.h>
8 9
9/* 10/*
10 * Simple doubly linked list implementation. 11 * Simple doubly linked list implementation.
@@ -345,7 +346,7 @@ static inline void list_splice_tail_init(struct list_head *list,
345 * list_entry - get the struct for this entry 346 * list_entry - get the struct for this entry
346 * @ptr: the &struct list_head pointer. 347 * @ptr: the &struct list_head pointer.
347 * @type: the type of the struct this is embedded in. 348 * @type: the type of the struct this is embedded in.
348 * @member: the name of the list_struct within the struct. 349 * @member: the name of the list_head within the struct.
349 */ 350 */
350#define list_entry(ptr, type, member) \ 351#define list_entry(ptr, type, member) \
351 container_of(ptr, type, member) 352 container_of(ptr, type, member)
@@ -354,7 +355,7 @@ static inline void list_splice_tail_init(struct list_head *list,
354 * list_first_entry - get the first element from a list 355 * list_first_entry - get the first element from a list
355 * @ptr: the list head to take the element from. 356 * @ptr: the list head to take the element from.
356 * @type: the type of the struct this is embedded in. 357 * @type: the type of the struct this is embedded in.
357 * @member: the name of the list_struct within the struct. 358 * @member: the name of the list_head within the struct.
358 * 359 *
359 * Note, that list is expected to be not empty. 360 * Note, that list is expected to be not empty.
360 */ 361 */
@@ -365,7 +366,7 @@ static inline void list_splice_tail_init(struct list_head *list,
365 * list_last_entry - get the last element from a list 366 * list_last_entry - get the last element from a list
366 * @ptr: the list head to take the element from. 367 * @ptr: the list head to take the element from.
367 * @type: the type of the struct this is embedded in. 368 * @type: the type of the struct this is embedded in.
368 * @member: the name of the list_struct within the struct. 369 * @member: the name of the list_head within the struct.
369 * 370 *
370 * Note, that list is expected to be not empty. 371 * Note, that list is expected to be not empty.
371 */ 372 */
@@ -376,7 +377,7 @@ static inline void list_splice_tail_init(struct list_head *list,
376 * list_first_entry_or_null - get the first element from a list 377 * list_first_entry_or_null - get the first element from a list
377 * @ptr: the list head to take the element from. 378 * @ptr: the list head to take the element from.
378 * @type: the type of the struct this is embedded in. 379 * @type: the type of the struct this is embedded in.
379 * @member: the name of the list_struct within the struct. 380 * @member: the name of the list_head within the struct.
380 * 381 *
381 * Note that if the list is empty, it returns NULL. 382 * Note that if the list is empty, it returns NULL.
382 */ 383 */
@@ -386,7 +387,7 @@ static inline void list_splice_tail_init(struct list_head *list,
386/** 387/**
387 * list_next_entry - get the next element in list 388 * list_next_entry - get the next element in list
388 * @pos: the type * to cursor 389 * @pos: the type * to cursor
389 * @member: the name of the list_struct within the struct. 390 * @member: the name of the list_head within the struct.
390 */ 391 */
391#define list_next_entry(pos, member) \ 392#define list_next_entry(pos, member) \
392 list_entry((pos)->member.next, typeof(*(pos)), member) 393 list_entry((pos)->member.next, typeof(*(pos)), member)
@@ -394,7 +395,7 @@ static inline void list_splice_tail_init(struct list_head *list,
394/** 395/**
395 * list_prev_entry - get the prev element in list 396 * list_prev_entry - get the prev element in list
396 * @pos: the type * to cursor 397 * @pos: the type * to cursor
397 * @member: the name of the list_struct within the struct. 398 * @member: the name of the list_head within the struct.
398 */ 399 */
399#define list_prev_entry(pos, member) \ 400#define list_prev_entry(pos, member) \
400 list_entry((pos)->member.prev, typeof(*(pos)), member) 401 list_entry((pos)->member.prev, typeof(*(pos)), member)
@@ -440,7 +441,7 @@ static inline void list_splice_tail_init(struct list_head *list,
440 * list_for_each_entry - iterate over list of given type 441 * list_for_each_entry - iterate over list of given type
441 * @pos: the type * to use as a loop cursor. 442 * @pos: the type * to use as a loop cursor.
442 * @head: the head for your list. 443 * @head: the head for your list.
443 * @member: the name of the list_struct within the struct. 444 * @member: the name of the list_head within the struct.
444 */ 445 */
445#define list_for_each_entry(pos, head, member) \ 446#define list_for_each_entry(pos, head, member) \
446 for (pos = list_first_entry(head, typeof(*pos), member); \ 447 for (pos = list_first_entry(head, typeof(*pos), member); \
@@ -451,7 +452,7 @@ static inline void list_splice_tail_init(struct list_head *list,
451 * list_for_each_entry_reverse - iterate backwards over list of given type. 452 * list_for_each_entry_reverse - iterate backwards over list of given type.
452 * @pos: the type * to use as a loop cursor. 453 * @pos: the type * to use as a loop cursor.
453 * @head: the head for your list. 454 * @head: the head for your list.
454 * @member: the name of the list_struct within the struct. 455 * @member: the name of the list_head within the struct.
455 */ 456 */
456#define list_for_each_entry_reverse(pos, head, member) \ 457#define list_for_each_entry_reverse(pos, head, member) \
457 for (pos = list_last_entry(head, typeof(*pos), member); \ 458 for (pos = list_last_entry(head, typeof(*pos), member); \
@@ -462,7 +463,7 @@ static inline void list_splice_tail_init(struct list_head *list,
462 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() 463 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
463 * @pos: the type * to use as a start point 464 * @pos: the type * to use as a start point
464 * @head: the head of the list 465 * @head: the head of the list
465 * @member: the name of the list_struct within the struct. 466 * @member: the name of the list_head within the struct.
466 * 467 *
467 * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). 468 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
468 */ 469 */
@@ -473,7 +474,7 @@ static inline void list_splice_tail_init(struct list_head *list,
473 * list_for_each_entry_continue - continue iteration over list of given type 474 * list_for_each_entry_continue - continue iteration over list of given type
474 * @pos: the type * to use as a loop cursor. 475 * @pos: the type * to use as a loop cursor.
475 * @head: the head for your list. 476 * @head: the head for your list.
476 * @member: the name of the list_struct within the struct. 477 * @member: the name of the list_head within the struct.
477 * 478 *
478 * Continue to iterate over list of given type, continuing after 479 * Continue to iterate over list of given type, continuing after
479 * the current position. 480 * the current position.
@@ -487,7 +488,7 @@ static inline void list_splice_tail_init(struct list_head *list,
487 * list_for_each_entry_continue_reverse - iterate backwards from the given point 488 * list_for_each_entry_continue_reverse - iterate backwards from the given point
488 * @pos: the type * to use as a loop cursor. 489 * @pos: the type * to use as a loop cursor.
489 * @head: the head for your list. 490 * @head: the head for your list.
490 * @member: the name of the list_struct within the struct. 491 * @member: the name of the list_head within the struct.
491 * 492 *
492 * Start to iterate over list of given type backwards, continuing after 493 * Start to iterate over list of given type backwards, continuing after
493 * the current position. 494 * the current position.
@@ -501,7 +502,7 @@ static inline void list_splice_tail_init(struct list_head *list,
501 * list_for_each_entry_from - iterate over list of given type from the current point 502 * list_for_each_entry_from - iterate over list of given type from the current point
502 * @pos: the type * to use as a loop cursor. 503 * @pos: the type * to use as a loop cursor.
503 * @head: the head for your list. 504 * @head: the head for your list.
504 * @member: the name of the list_struct within the struct. 505 * @member: the name of the list_head within the struct.
505 * 506 *
506 * Iterate over list of given type, continuing from current position. 507 * Iterate over list of given type, continuing from current position.
507 */ 508 */
@@ -514,7 +515,7 @@ static inline void list_splice_tail_init(struct list_head *list,
514 * @pos: the type * to use as a loop cursor. 515 * @pos: the type * to use as a loop cursor.
515 * @n: another type * to use as temporary storage 516 * @n: another type * to use as temporary storage
516 * @head: the head for your list. 517 * @head: the head for your list.
517 * @member: the name of the list_struct within the struct. 518 * @member: the name of the list_head within the struct.
518 */ 519 */
519#define list_for_each_entry_safe(pos, n, head, member) \ 520#define list_for_each_entry_safe(pos, n, head, member) \
520 for (pos = list_first_entry(head, typeof(*pos), member), \ 521 for (pos = list_first_entry(head, typeof(*pos), member), \
@@ -527,7 +528,7 @@ static inline void list_splice_tail_init(struct list_head *list,
527 * @pos: the type * to use as a loop cursor. 528 * @pos: the type * to use as a loop cursor.
528 * @n: another type * to use as temporary storage 529 * @n: another type * to use as temporary storage
529 * @head: the head for your list. 530 * @head: the head for your list.
530 * @member: the name of the list_struct within the struct. 531 * @member: the name of the list_head within the struct.
531 * 532 *
532 * Iterate over list of given type, continuing after current point, 533 * Iterate over list of given type, continuing after current point,
533 * safe against removal of list entry. 534 * safe against removal of list entry.
@@ -543,7 +544,7 @@ static inline void list_splice_tail_init(struct list_head *list,
543 * @pos: the type * to use as a loop cursor. 544 * @pos: the type * to use as a loop cursor.
544 * @n: another type * to use as temporary storage 545 * @n: another type * to use as temporary storage
545 * @head: the head for your list. 546 * @head: the head for your list.
546 * @member: the name of the list_struct within the struct. 547 * @member: the name of the list_head within the struct.
547 * 548 *
548 * Iterate over list of given type from current point, safe against 549 * Iterate over list of given type from current point, safe against
549 * removal of list entry. 550 * removal of list entry.
@@ -558,7 +559,7 @@ static inline void list_splice_tail_init(struct list_head *list,
558 * @pos: the type * to use as a loop cursor. 559 * @pos: the type * to use as a loop cursor.
559 * @n: another type * to use as temporary storage 560 * @n: another type * to use as temporary storage
560 * @head: the head for your list. 561 * @head: the head for your list.
561 * @member: the name of the list_struct within the struct. 562 * @member: the name of the list_head within the struct.
562 * 563 *
563 * Iterate backwards over list of given type, safe against removal 564 * Iterate backwards over list of given type, safe against removal
564 * of list entry. 565 * of list entry.
@@ -573,7 +574,7 @@ static inline void list_splice_tail_init(struct list_head *list,
573 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop 574 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
574 * @pos: the loop cursor used in the list_for_each_entry_safe loop 575 * @pos: the loop cursor used in the list_for_each_entry_safe loop
575 * @n: temporary storage used in list_for_each_entry_safe 576 * @n: temporary storage used in list_for_each_entry_safe
576 * @member: the name of the list_struct within the struct. 577 * @member: the name of the list_head within the struct.
577 * 578 *
578 * list_safe_reset_next is not safe to use in general if the list may be 579 * list_safe_reset_next is not safe to use in general if the list may be
579 * modified concurrently (eg. the lock is dropped in the loop body). An 580 * modified concurrently (eg. the lock is dropped in the loop body). An
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
index 257d3779f2ab..0ca8109934e4 100644
--- a/include/linux/lockd/debug.h
+++ b/include/linux/lockd/debug.h
@@ -17,12 +17,8 @@
17 * Enable lockd debugging. 17 * Enable lockd debugging.
18 * Requires RPC_DEBUG. 18 * Requires RPC_DEBUG.
19 */ 19 */
20#ifdef RPC_DEBUG
21# define LOCKD_DEBUG 1
22#endif
23
24#undef ifdebug 20#undef ifdebug
25#if defined(RPC_DEBUG) && defined(LOCKD_DEBUG) 21#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
26# define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag)) 22# define ifdebug(flag) if (unlikely(nlm_debug & NLMDBG_##flag))
27#else 23#else
28# define ifdebug(flag) if (0) 24# define ifdebug(flag) if (0)
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 219d79627c05..ff82a32871b5 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -178,7 +178,6 @@ struct nlm_block {
178 unsigned char b_granted; /* VFS granted lock */ 178 unsigned char b_granted; /* VFS granted lock */
179 struct nlm_file * b_file; /* file in question */ 179 struct nlm_file * b_file; /* file in question */
180 struct cache_req * b_cache_req; /* deferred request handling */ 180 struct cache_req * b_cache_req; /* deferred request handling */
181 struct file_lock * b_fl; /* set for GETLK */
182 struct cache_deferred_req * b_deferred_req; 181 struct cache_deferred_req * b_deferred_req;
183 unsigned int b_flags; /* block flags */ 182 unsigned int b_flags; /* block flags */
184#define B_QUEUED 1 /* lock queued */ 183#define B_QUEUED 1 /* lock queued */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 008388f920d7..74ab23176e9b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * 6 *
7 * see Documentation/lockdep-design.txt for more details. 7 * see Documentation/locking/lockdep-design.txt for more details.
8 */ 8 */
9#ifndef __LINUX_LOCKDEP_H 9#ifndef __LINUX_LOCKDEP_H
10#define __LINUX_LOCKDEP_H 10#define __LINUX_LOCKDEP_H
@@ -362,6 +362,10 @@ extern void lockdep_trace_alloc(gfp_t mask);
362 WARN_ON(debug_locks && !lockdep_is_held(l)); \ 362 WARN_ON(debug_locks && !lockdep_is_held(l)); \
363 } while (0) 363 } while (0)
364 364
365#define lockdep_assert_held_once(l) do { \
366 WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
367 } while (0)
368
365#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion) 369#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
366 370
367#else /* !CONFIG_LOCKDEP */ 371#else /* !CONFIG_LOCKDEP */
@@ -412,6 +416,7 @@ struct lock_class_key { };
412#define lockdep_depth(tsk) (0) 416#define lockdep_depth(tsk) (0)
413 417
414#define lockdep_assert_held(l) do { (void)(l); } while (0) 418#define lockdep_assert_held(l) do { (void)(l); } while (0)
419#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
415 420
416#define lockdep_recursing(tsk) (0) 421#define lockdep_recursing(tsk) (0)
417 422
@@ -505,6 +510,7 @@ static inline void print_irqtrace_events(struct task_struct *curr)
505 510
506#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) 511#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
507#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) 512#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
513#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
508#define lock_map_release(l) lock_release(l, 1, _THIS_IP_) 514#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
509 515
510#ifdef CONFIG_PROVE_LOCKING 516#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
new file mode 100644
index 000000000000..1726ccbd8009
--- /dev/null
+++ b/include/linux/mailbox_client.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright (C) 2013-2014 Linaro Ltd.
3 * Author: Jassi Brar <jassisinghbrar@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __MAILBOX_CLIENT_H
11#define __MAILBOX_CLIENT_H
12
13#include <linux/of.h>
14#include <linux/device.h>
15
16struct mbox_chan;
17
18/**
19 * struct mbox_client - User of a mailbox
20 * @dev: The client device
21 * @tx_block: If the mbox_send_message should block until data is
22 * transmitted.
23 * @tx_tout: Max block period in ms before TX is assumed failure
24 * @knows_txdone: If the client could run the TX state machine. Usually
25 * if the client receives some ACK packet for transmission.
26 * Unused if the controller already has TX_Done/RTR IRQ.
27 * @rx_callback: Atomic callback to provide client the data received
28 * @tx_prepare: Atomic callback to ask client to prepare the payload
29 * before initiating the transmission if required.
30 * @tx_done: Atomic callback to tell client of data transmission
31 */
32struct mbox_client {
33 struct device *dev;
34 bool tx_block;
35 unsigned long tx_tout;
36 bool knows_txdone;
37
38 void (*rx_callback)(struct mbox_client *cl, void *mssg);
39 void (*tx_prepare)(struct mbox_client *cl, void *mssg);
40 void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
41};
42
43struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
44int mbox_send_message(struct mbox_chan *chan, void *mssg);
45void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
46bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
47void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
48
49#endif /* __MAILBOX_CLIENT_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
new file mode 100644
index 000000000000..d4cf96f07cfc
--- /dev/null
+++ b/include/linux/mailbox_controller.h
@@ -0,0 +1,133 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
7#ifndef __MAILBOX_CONTROLLER_H
8#define __MAILBOX_CONTROLLER_H
9
10#include <linux/of.h>
11#include <linux/types.h>
12#include <linux/timer.h>
13#include <linux/device.h>
14#include <linux/completion.h>
15
16struct mbox_chan;
17
18/**
19 * struct mbox_chan_ops - methods to control mailbox channels
20 * @send_data: The API asks the MBOX controller driver, in atomic
21 * context try to transmit a message on the bus. Returns 0 if
22 * data is accepted for transmission, -EBUSY while rejecting
23 * if the remote hasn't yet read the last data sent. Actual
24 * transmission of data is reported by the controller via
25 * mbox_chan_txdone (if it has some TX ACK irq). It must not
26 * sleep.
27 * @startup: Called when a client requests the chan. The controller
28 * could ask clients for additional parameters of communication
29 * to be provided via client's chan_data. This call may
30 * block. After this call the Controller must forward any
31 * data received on the chan by calling mbox_chan_received_data.
32 * The controller may do stuff that need to sleep.
33 * @shutdown: Called when a client relinquishes control of a chan.
34 * This call may block too. The controller must not forward
35 * any received data anymore.
36 * The controller may do stuff that need to sleep.
37 * @last_tx_done: If the controller sets 'txdone_poll', the API calls
38 * this to poll status of last TX. The controller must
39 * give priority to IRQ method over polling and never
40 * set both txdone_poll and txdone_irq. Only in polling
41 * mode 'send_data' is expected to return -EBUSY.
42 * The controller may do stuff that need to sleep/block.
43 * Used only if txdone_poll:=true && txdone_irq:=false
44 * @peek_data: Atomic check for any received data. Return true if controller
45 * has some data to push to the client. False otherwise.
46 */
47struct mbox_chan_ops {
48 int (*send_data)(struct mbox_chan *chan, void *data);
49 int (*startup)(struct mbox_chan *chan);
50 void (*shutdown)(struct mbox_chan *chan);
51 bool (*last_tx_done)(struct mbox_chan *chan);
52 bool (*peek_data)(struct mbox_chan *chan);
53};
54
55/**
56 * struct mbox_controller - Controller of a class of communication channels
57 * @dev: Device backing this controller
58 * @ops: Operators that work on each communication chan
59 * @chans: Array of channels
60 * @num_chans: Number of channels in the 'chans' array.
61 * @txdone_irq: Indicates if the controller can report to API when
62 * the last transmitted data was read by the remote.
63 * Eg, if it has some TX ACK irq.
64 * @txdone_poll: If the controller can read but not report the TX
65 * done. Ex, some register shows the TX status but
66 * no interrupt rises. Ignored if 'txdone_irq' is set.
67 * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
68 * last TX's status after these many millisecs
69 * @of_xlate: Controller driver specific mapping of channel via DT
70 * @poll: API private. Used to poll for TXDONE on all channels.
71 * @node: API private. To hook into list of controllers.
72 */
73struct mbox_controller {
74 struct device *dev;
75 struct mbox_chan_ops *ops;
76 struct mbox_chan *chans;
77 int num_chans;
78 bool txdone_irq;
79 bool txdone_poll;
80 unsigned txpoll_period;
81 struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
82 const struct of_phandle_args *sp);
83 /* Internal to API */
84 struct timer_list poll;
85 struct list_head node;
86};
87
88/*
89 * The length of circular buffer for queuing messages from a client.
90 * 'msg_count' tracks the number of buffered messages while 'msg_free'
91 * is the index where the next message would be buffered.
92 * We shouldn't need it too big because every transfer is interrupt
93 * triggered and if we have lots of data to transfer, the interrupt
94 * latencies are going to be the bottleneck, not the buffer length.
95 * Besides, mbox_send_message could be called from atomic context and
96 * the client could also queue another message from the notifier 'tx_done'
97 * of the last transfer done.
98 * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN"
99 * print, it needs to be taken from config option or somesuch.
100 */
101#define MBOX_TX_QUEUE_LEN 20
102
103/**
104 * struct mbox_chan - s/w representation of a communication chan
105 * @mbox: Pointer to the parent/provider of this channel
106 * @txdone_method: Way to detect TXDone chosen by the API
107 * @cl: Pointer to the current owner of this channel
108 * @tx_complete: Transmission completion
109 * @active_req: Currently active request hook
110 * @msg_count: No. of mssg currently queued
111 * @msg_free: Index of next available mssg slot
112 * @msg_data: Hook for data packet
113 * @lock: Serialise access to the channel
114 * @con_priv: Hook for controller driver to attach private data
115 */
116struct mbox_chan {
117 struct mbox_controller *mbox;
118 unsigned txdone_method;
119 struct mbox_client *cl;
120 struct completion tx_complete;
121 void *active_req;
122 unsigned msg_count, msg_free;
123 void *msg_data[MBOX_TX_QUEUE_LEN];
124 spinlock_t lock; /* Serialise access to the channel */
125 void *con_priv;
126};
127
128int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */
129void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */
130void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */
131void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
132
133#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 8e9a029e093d..e6982ac3200d 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -16,6 +16,7 @@
16#define MARVELL_PHY_ID_88E1318S 0x01410e90 16#define MARVELL_PHY_ID_88E1318S 0x01410e90
17#define MARVELL_PHY_ID_88E1116R 0x01410e40 17#define MARVELL_PHY_ID_88E1116R 0x01410e40
18#define MARVELL_PHY_ID_88E1510 0x01410dd0 18#define MARVELL_PHY_ID_88E1510 0x01410dd0
19#define MARVELL_PHY_ID_88E3016 0x01410e60
19 20
20/* struct phy_device dev_flags definitions */ 21/* struct phy_device dev_flags definitions */
21#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001 22#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
diff --git a/include/linux/mbus.h b/include/linux/mbus.h
index 550c88fb0267..611b69fa8594 100644
--- a/include/linux/mbus.h
+++ b/include/linux/mbus.h
@@ -61,6 +61,7 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info(void)
61} 61}
62#endif 62#endif
63 63
64int mvebu_mbus_save_cpu_target(u32 *store_addr);
64void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); 65void mvebu_mbus_get_pcie_mem_aperture(struct resource *res);
65void mvebu_mbus_get_pcie_io_aperture(struct resource *res); 66void mvebu_mbus_get_pcie_io_aperture(struct resource *res);
66int mvebu_mbus_add_window_remap_by_id(unsigned int target, 67int mvebu_mbus_add_window_remap_by_id(unsigned int target,
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index d14af7b722ef..164aad1f9f12 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/uuid.h> 5#include <linux/uuid.h>
6#include <linux/mod_devicetable.h>
6 7
7struct mei_cl_device; 8struct mei_cl_device;
8 9
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e0752d204d9e..7c95af8d552c 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -25,7 +25,6 @@
25#include <linux/jump_label.h> 25#include <linux/jump_label.h>
26 26
27struct mem_cgroup; 27struct mem_cgroup;
28struct page_cgroup;
29struct page; 28struct page;
30struct mm_struct; 29struct mm_struct;
31struct kmem_cache; 30struct kmem_cache;
@@ -68,10 +67,9 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
68struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 67struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
69struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 68struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
70 69
71bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 70bool mem_cgroup_is_descendant(struct mem_cgroup *memcg,
72 struct mem_cgroup *memcg); 71 struct mem_cgroup *root);
73bool task_in_mem_cgroup(struct task_struct *task, 72bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
74 const struct mem_cgroup *memcg);
75 73
76extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 74extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
77extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 75extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
@@ -79,15 +77,16 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
79extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 77extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
80extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); 78extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
81 79
82static inline 80static inline bool mm_match_cgroup(struct mm_struct *mm,
83bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) 81 struct mem_cgroup *memcg)
84{ 82{
85 struct mem_cgroup *task_memcg; 83 struct mem_cgroup *task_memcg;
86 bool match; 84 bool match = false;
87 85
88 rcu_read_lock(); 86 rcu_read_lock();
89 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 87 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
90 match = __mem_cgroup_same_or_subtree(memcg, task_memcg); 88 if (task_memcg)
89 match = mem_cgroup_is_descendant(task_memcg, memcg);
91 rcu_read_unlock(); 90 rcu_read_unlock();
92 return match; 91 return match;
93} 92}
@@ -139,48 +138,23 @@ static inline bool mem_cgroup_disabled(void)
139 return false; 138 return false;
140} 139}
141 140
142void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, 141struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
143 unsigned long *flags); 142 unsigned long *flags);
144 143void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked,
145extern atomic_t memcg_moving; 144 unsigned long *flags);
146 145void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
147static inline void mem_cgroup_begin_update_page_stat(struct page *page, 146 enum mem_cgroup_stat_index idx, int val);
148 bool *locked, unsigned long *flags)
149{
150 if (mem_cgroup_disabled())
151 return;
152 rcu_read_lock();
153 *locked = false;
154 if (atomic_read(&memcg_moving))
155 __mem_cgroup_begin_update_page_stat(page, locked, flags);
156}
157 147
158void __mem_cgroup_end_update_page_stat(struct page *page, 148static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
159 unsigned long *flags);
160static inline void mem_cgroup_end_update_page_stat(struct page *page,
161 bool *locked, unsigned long *flags)
162{
163 if (mem_cgroup_disabled())
164 return;
165 if (*locked)
166 __mem_cgroup_end_update_page_stat(page, flags);
167 rcu_read_unlock();
168}
169
170void mem_cgroup_update_page_stat(struct page *page,
171 enum mem_cgroup_stat_index idx,
172 int val);
173
174static inline void mem_cgroup_inc_page_stat(struct page *page,
175 enum mem_cgroup_stat_index idx) 149 enum mem_cgroup_stat_index idx)
176{ 150{
177 mem_cgroup_update_page_stat(page, idx, 1); 151 mem_cgroup_update_page_stat(memcg, idx, 1);
178} 152}
179 153
180static inline void mem_cgroup_dec_page_stat(struct page *page, 154static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
181 enum mem_cgroup_stat_index idx) 155 enum mem_cgroup_stat_index idx)
182{ 156{
183 mem_cgroup_update_page_stat(page, idx, -1); 157 mem_cgroup_update_page_stat(memcg, idx, -1);
184} 158}
185 159
186unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 160unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -199,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
199void mem_cgroup_split_huge_fixup(struct page *head); 173void mem_cgroup_split_huge_fixup(struct page *head);
200#endif 174#endif
201 175
202#ifdef CONFIG_DEBUG_VM
203bool mem_cgroup_bad_page_check(struct page *page);
204void mem_cgroup_print_bad_page(struct page *page);
205#endif
206#else /* CONFIG_MEMCG */ 176#else /* CONFIG_MEMCG */
207struct mem_cgroup; 177struct mem_cgroup;
208 178
@@ -315,12 +285,13 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
315{ 285{
316} 286}
317 287
318static inline void mem_cgroup_begin_update_page_stat(struct page *page, 288static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
319 bool *locked, unsigned long *flags) 289 bool *locked, unsigned long *flags)
320{ 290{
291 return NULL;
321} 292}
322 293
323static inline void mem_cgroup_end_update_page_stat(struct page *page, 294static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
324 bool *locked, unsigned long *flags) 295 bool *locked, unsigned long *flags)
325{ 296{
326} 297}
@@ -343,12 +314,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
343 return false; 314 return false;
344} 315}
345 316
346static inline void mem_cgroup_inc_page_stat(struct page *page, 317static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
347 enum mem_cgroup_stat_index idx) 318 enum mem_cgroup_stat_index idx)
348{ 319{
349} 320}
350 321
351static inline void mem_cgroup_dec_page_stat(struct page *page, 322static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
352 enum mem_cgroup_stat_index idx) 323 enum mem_cgroup_stat_index idx)
353{ 324{
354} 325}
@@ -371,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
371} 342}
372#endif /* CONFIG_MEMCG */ 343#endif /* CONFIG_MEMCG */
373 344
374#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
375static inline bool
376mem_cgroup_bad_page_check(struct page *page)
377{
378 return false;
379}
380
381static inline void
382mem_cgroup_print_bad_page(struct page *page)
383{
384}
385#endif
386
387enum { 345enum {
388 UNDER_LIMIT, 346 UNDER_LIMIT,
389 SOFT_LIMIT, 347 SOFT_LIMIT,
@@ -440,15 +398,10 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order);
440 398
441int memcg_cache_id(struct mem_cgroup *memcg); 399int memcg_cache_id(struct mem_cgroup *memcg);
442 400
443int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
444 struct kmem_cache *root_cache);
445void memcg_free_cache_params(struct kmem_cache *s);
446
447int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
448void memcg_update_array_size(int num_groups); 401void memcg_update_array_size(int num_groups);
449 402
450struct kmem_cache * 403struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep);
451__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); 404void __memcg_kmem_put_cache(struct kmem_cache *cachep);
452 405
453int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); 406int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
454void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); 407void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
@@ -476,9 +429,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
476 /* 429 /*
477 * __GFP_NOFAIL allocations will move on even if charging is not 430 * __GFP_NOFAIL allocations will move on even if charging is not
478 * possible. Therefore we don't even try, and have this allocation 431 * possible. Therefore we don't even try, and have this allocation
479 * unaccounted. We could in theory charge it with 432 * unaccounted. We could in theory charge it forcibly, but we hope
480 * res_counter_charge_nofail, but we hope those allocations are rare, 433 * those allocations are rare, and won't be worth the trouble.
481 * and won't be worth the trouble.
482 */ 434 */
483 if (gfp & __GFP_NOFAIL) 435 if (gfp & __GFP_NOFAIL)
484 return true; 436 return true;
@@ -496,8 +448,6 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
496 * memcg_kmem_uncharge_pages: uncharge pages from memcg 448 * memcg_kmem_uncharge_pages: uncharge pages from memcg
497 * @page: pointer to struct page being freed 449 * @page: pointer to struct page being freed
498 * @order: allocation order. 450 * @order: allocation order.
499 *
500 * there is no need to specify memcg here, since it is embedded in page_cgroup
501 */ 451 */
502static inline void 452static inline void
503memcg_kmem_uncharge_pages(struct page *page, int order) 453memcg_kmem_uncharge_pages(struct page *page, int order)
@@ -514,8 +464,7 @@ memcg_kmem_uncharge_pages(struct page *page, int order)
514 * 464 *
515 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or 465 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
516 * failure of the allocation. if @page is NULL, this function will revert the 466 * failure of the allocation. if @page is NULL, this function will revert the
517 * charges. Otherwise, it will commit the memcg given by @memcg to the 467 * charges. Otherwise, it will commit @page to @memcg.
518 * corresponding page_cgroup.
519 */ 468 */
520static inline void 469static inline void
521memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) 470memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
@@ -543,7 +492,13 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
543 if (unlikely(fatal_signal_pending(current))) 492 if (unlikely(fatal_signal_pending(current)))
544 return cachep; 493 return cachep;
545 494
546 return __memcg_kmem_get_cache(cachep, gfp); 495 return __memcg_kmem_get_cache(cachep);
496}
497
498static __always_inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
499{
500 if (memcg_kmem_enabled())
501 __memcg_kmem_put_cache(cachep);
547} 502}
548#else 503#else
549#define for_each_memcg_cache_index(_idx) \ 504#define for_each_memcg_cache_index(_idx) \
@@ -574,21 +529,15 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
574 return -1; 529 return -1;
575} 530}
576 531
577static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
578 struct kmem_cache *s, struct kmem_cache *root_cache)
579{
580 return 0;
581}
582
583static inline void memcg_free_cache_params(struct kmem_cache *s)
584{
585}
586
587static inline struct kmem_cache * 532static inline struct kmem_cache *
588memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 533memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
589{ 534{
590 return cachep; 535 return cachep;
591} 536}
537
538static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
539{
540}
592#endif /* CONFIG_MEMCG_KMEM */ 541#endif /* CONFIG_MEMCG_KMEM */
593#endif /* _LINUX_MEMCONTROL_H */ 542#endif /* _LINUX_MEMCONTROL_H */
594 543
diff --git a/include/linux/memory.h b/include/linux/memory.h
index bb7384e3c3d8..8b8d8d12348e 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -35,7 +35,7 @@ struct memory_block {
35}; 35};
36 36
37int arch_get_memory_phys_device(unsigned long start_pfn); 37int arch_get_memory_phys_device(unsigned long start_pfn);
38unsigned long __weak memory_block_size_bytes(void); 38unsigned long memory_block_size_bytes(void);
39 39
40/* These states are exposed to userspace as text strings in sysfs */ 40/* These states are exposed to userspace as text strings in sysfs */
41#define MEM_ONLINE (1<<0) /* exposed to userspace */ 41#define MEM_ONLINE (1<<0) /* exposed to userspace */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index d9524c49d767..8f1a41951df9 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -84,6 +84,7 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
84extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); 84extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
85/* VM interface that may be used by firmware interface */ 85/* VM interface that may be used by firmware interface */
86extern int online_pages(unsigned long, unsigned long, int); 86extern int online_pages(unsigned long, unsigned long, int);
87extern int test_pages_in_a_zone(unsigned long, unsigned long);
87extern void __offline_isolated_pages(unsigned long, unsigned long); 88extern void __offline_isolated_pages(unsigned long, unsigned long);
88 89
89typedef void (*online_page_callback_t)(struct page *page); 90typedef void (*online_page_callback_t)(struct page *page);
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index f230a978e6ba..3d385c81c153 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -134,9 +134,10 @@ void mpol_free_shared_policy(struct shared_policy *p);
134struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 134struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
135 unsigned long idx); 135 unsigned long idx);
136 136
137struct mempolicy *get_vma_policy(struct task_struct *tsk, 137struct mempolicy *get_task_policy(struct task_struct *p);
138 struct vm_area_struct *vma, unsigned long addr); 138struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
139bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma); 139 unsigned long addr);
140bool vma_policy_mof(struct vm_area_struct *vma);
140 141
141extern void numa_default_policy(void); 142extern void numa_default_policy(void);
142extern void numa_policy_init(void); 143extern void numa_policy_init(void);
diff --git a/include/linux/mfd/abx500/ab8500-sysctrl.h b/include/linux/mfd/abx500/ab8500-sysctrl.h
index adba89d9c660..689312745b2f 100644
--- a/include/linux/mfd/abx500/ab8500-sysctrl.h
+++ b/include/linux/mfd/abx500/ab8500-sysctrl.h
@@ -12,7 +12,6 @@
12 12
13int ab8500_sysctrl_read(u16 reg, u8 *value); 13int ab8500_sysctrl_read(u16 reg, u8 *value);
14int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value); 14int ab8500_sysctrl_write(u16 reg, u8 mask, u8 value);
15void ab8500_restart(char mode, const char *cmd);
16 15
17#else 16#else
18 17
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index f34723f7663c..910e3aa1e965 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -141,6 +141,7 @@ struct arizona {
141 141
142 uint16_t dac_comp_coeff; 142 uint16_t dac_comp_coeff;
143 uint8_t dac_comp_enabled; 143 uint8_t dac_comp_enabled;
144 struct mutex dac_comp_lock;
144}; 145};
145 146
146int arizona_clk32k_enable(struct arizona *arizona); 147int arizona_clk32k_enable(struct arizona *arizona);
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index dbd23c36de21..aacc10d7789c 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -27,6 +27,7 @@
27#define ARIZONA_WRITE_SEQUENCER_CTRL_0 0x16 27#define ARIZONA_WRITE_SEQUENCER_CTRL_0 0x16
28#define ARIZONA_WRITE_SEQUENCER_CTRL_1 0x17 28#define ARIZONA_WRITE_SEQUENCER_CTRL_1 0x17
29#define ARIZONA_WRITE_SEQUENCER_CTRL_2 0x18 29#define ARIZONA_WRITE_SEQUENCER_CTRL_2 0x18
30#define ARIZONA_WRITE_SEQUENCER_CTRL_3 0x19
30#define ARIZONA_WRITE_SEQUENCER_PROM 0x1A 31#define ARIZONA_WRITE_SEQUENCER_PROM 0x1A
31#define ARIZONA_TONE_GENERATOR_1 0x20 32#define ARIZONA_TONE_GENERATOR_1 0x20
32#define ARIZONA_TONE_GENERATOR_2 0x21 33#define ARIZONA_TONE_GENERATOR_2 0x21
@@ -70,7 +71,9 @@
70#define ARIZONA_SAMPLE_RATE_3_STATUS 0x10C 71#define ARIZONA_SAMPLE_RATE_3_STATUS 0x10C
71#define ARIZONA_ASYNC_CLOCK_1 0x112 72#define ARIZONA_ASYNC_CLOCK_1 0x112
72#define ARIZONA_ASYNC_SAMPLE_RATE_1 0x113 73#define ARIZONA_ASYNC_SAMPLE_RATE_1 0x113
74#define ARIZONA_ASYNC_SAMPLE_RATE_2 0x114
73#define ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B 75#define ARIZONA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B
76#define ARIZONA_ASYNC_SAMPLE_RATE_2_STATUS 0x11C
74#define ARIZONA_OUTPUT_SYSTEM_CLOCK 0x149 77#define ARIZONA_OUTPUT_SYSTEM_CLOCK 0x149
75#define ARIZONA_OUTPUT_ASYNC_CLOCK 0x14A 78#define ARIZONA_OUTPUT_ASYNC_CLOCK 0x14A
76#define ARIZONA_RATE_ESTIMATOR_1 0x152 79#define ARIZONA_RATE_ESTIMATOR_1 0x152
@@ -122,6 +125,8 @@
122#define ARIZONA_MIC_BIAS_CTRL_1 0x218 125#define ARIZONA_MIC_BIAS_CTRL_1 0x218
123#define ARIZONA_MIC_BIAS_CTRL_2 0x219 126#define ARIZONA_MIC_BIAS_CTRL_2 0x219
124#define ARIZONA_MIC_BIAS_CTRL_3 0x21A 127#define ARIZONA_MIC_BIAS_CTRL_3 0x21A
128#define ARIZONA_HP_CTRL_1L 0x225
129#define ARIZONA_HP_CTRL_1R 0x226
125#define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293 130#define ARIZONA_ACCESSORY_DETECT_MODE_1 0x293
126#define ARIZONA_HEADPHONE_DETECT_1 0x29B 131#define ARIZONA_HEADPHONE_DETECT_1 0x29B
127#define ARIZONA_HEADPHONE_DETECT_2 0x29C 132#define ARIZONA_HEADPHONE_DETECT_2 0x29C
@@ -276,8 +281,16 @@
276#define ARIZONA_AIF2_FRAME_CTRL_2 0x548 281#define ARIZONA_AIF2_FRAME_CTRL_2 0x548
277#define ARIZONA_AIF2_FRAME_CTRL_3 0x549 282#define ARIZONA_AIF2_FRAME_CTRL_3 0x549
278#define ARIZONA_AIF2_FRAME_CTRL_4 0x54A 283#define ARIZONA_AIF2_FRAME_CTRL_4 0x54A
284#define ARIZONA_AIF2_FRAME_CTRL_5 0x54B
285#define ARIZONA_AIF2_FRAME_CTRL_6 0x54C
286#define ARIZONA_AIF2_FRAME_CTRL_7 0x54D
287#define ARIZONA_AIF2_FRAME_CTRL_8 0x54E
279#define ARIZONA_AIF2_FRAME_CTRL_11 0x551 288#define ARIZONA_AIF2_FRAME_CTRL_11 0x551
280#define ARIZONA_AIF2_FRAME_CTRL_12 0x552 289#define ARIZONA_AIF2_FRAME_CTRL_12 0x552
290#define ARIZONA_AIF2_FRAME_CTRL_13 0x553
291#define ARIZONA_AIF2_FRAME_CTRL_14 0x554
292#define ARIZONA_AIF2_FRAME_CTRL_15 0x555
293#define ARIZONA_AIF2_FRAME_CTRL_16 0x556
281#define ARIZONA_AIF2_TX_ENABLES 0x559 294#define ARIZONA_AIF2_TX_ENABLES 0x559
282#define ARIZONA_AIF2_RX_ENABLES 0x55A 295#define ARIZONA_AIF2_RX_ENABLES 0x55A
283#define ARIZONA_AIF2_FORCE_WRITE 0x55B 296#define ARIZONA_AIF2_FORCE_WRITE 0x55B
@@ -1664,16 +1677,30 @@
1664/* 1677/*
1665 * R275 (0x113) - Async sample rate 1 1678 * R275 (0x113) - Async sample rate 1
1666 */ 1679 */
1667#define ARIZONA_ASYNC_SAMPLE_RATE_MASK 0x001F /* ASYNC_SAMPLE_RATE - [4:0] */ 1680#define ARIZONA_ASYNC_SAMPLE_RATE_1_MASK 0x001F /* ASYNC_SAMPLE_RATE_1 - [4:0] */
1668#define ARIZONA_ASYNC_SAMPLE_RATE_SHIFT 0 /* ASYNC_SAMPLE_RATE - [4:0] */ 1681#define ARIZONA_ASYNC_SAMPLE_RATE_1_SHIFT 0 /* ASYNC_SAMPLE_RATE_1 - [4:0] */
1669#define ARIZONA_ASYNC_SAMPLE_RATE_WIDTH 5 /* ASYNC_SAMPLE_RATE - [4:0] */ 1682#define ARIZONA_ASYNC_SAMPLE_RATE_1_WIDTH 5 /* ASYNC_SAMPLE_RATE_1 - [4:0] */
1683
1684/*
1685 * R276 (0x114) - Async sample rate 2
1686 */
1687#define ARIZONA_ASYNC_SAMPLE_RATE_2_MASK 0x001F /* ASYNC_SAMPLE_RATE_2 - [4:0] */
1688#define ARIZONA_ASYNC_SAMPLE_RATE_2_SHIFT 0 /* ASYNC_SAMPLE_RATE_2 - [4:0] */
1689#define ARIZONA_ASYNC_SAMPLE_RATE_2_WIDTH 5 /* ASYNC_SAMPLE_RATE_2 - [4:0] */
1670 1690
1671/* 1691/*
1672 * R283 (0x11B) - Async sample rate 1 status 1692 * R283 (0x11B) - Async sample rate 1 status
1673 */ 1693 */
1674#define ARIZONA_ASYNC_SAMPLE_RATE_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_STS - [4:0] */ 1694#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */
1675#define ARIZONA_ASYNC_SAMPLE_RATE_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_STS - [4:0] */ 1695#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */
1676#define ARIZONA_ASYNC_SAMPLE_RATE_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_STS - [4:0] */ 1696#define ARIZONA_ASYNC_SAMPLE_RATE_1_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_1_STS - [4:0] */
1697
1698/*
1699 * R284 (0x11C) - Async sample rate 2 status
1700 */
1701#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_MASK 0x001F /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */
1702#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_SHIFT 0 /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */
1703#define ARIZONA_ASYNC_SAMPLE_RATE_2_STS_WIDTH 5 /* ASYNC_SAMPLE_RATE_2_STS - [4:0] */
1677 1704
1678/* 1705/*
1679 * R329 (0x149) - Output system clock 1706 * R329 (0x149) - Output system clock
@@ -2228,6 +2255,46 @@
2228#define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */ 2255#define ARIZONA_MICB3_ENA_WIDTH 1 /* MICB3_ENA */
2229 2256
2230/* 2257/*
2258 * R549 (0x225) - HP Ctrl 1L
2259 */
2260#define ARIZONA_RMV_SHRT_HP1L 0x4000 /* RMV_SHRT_HP1L */
2261#define ARIZONA_RMV_SHRT_HP1L_MASK 0x4000 /* RMV_SHRT_HP1L */
2262#define ARIZONA_RMV_SHRT_HP1L_SHIFT 14 /* RMV_SHRT_HP1L */
2263#define ARIZONA_RMV_SHRT_HP1L_WIDTH 1 /* RMV_SHRT_HP1L */
2264#define ARIZONA_HP1L_FLWR 0x0004 /* HP1L_FLWR */
2265#define ARIZONA_HP1L_FLWR_MASK 0x0004 /* HP1L_FLWR */
2266#define ARIZONA_HP1L_FLWR_SHIFT 2 /* HP1L_FLWR */
2267#define ARIZONA_HP1L_FLWR_WIDTH 1 /* HP1L_FLWR */
2268#define ARIZONA_HP1L_SHRTI 0x0002 /* HP1L_SHRTI */
2269#define ARIZONA_HP1L_SHRTI_MASK 0x0002 /* HP1L_SHRTI */
2270#define ARIZONA_HP1L_SHRTI_SHIFT 1 /* HP1L_SHRTI */
2271#define ARIZONA_HP1L_SHRTI_WIDTH 1 /* HP1L_SHRTI */
2272#define ARIZONA_HP1L_SHRTO 0x0001 /* HP1L_SHRTO */
2273#define ARIZONA_HP1L_SHRTO_MASK 0x0001 /* HP1L_SHRTO */
2274#define ARIZONA_HP1L_SHRTO_SHIFT 0 /* HP1L_SHRTO */
2275#define ARIZONA_HP1L_SHRTO_WIDTH 1 /* HP1L_SHRTO */
2276
2277/*
2278 * R550 (0x226) - HP Ctrl 1R
2279 */
2280#define ARIZONA_RMV_SHRT_HP1R 0x4000 /* RMV_SHRT_HP1R */
2281#define ARIZONA_RMV_SHRT_HP1R_MASK 0x4000 /* RMV_SHRT_HP1R */
2282#define ARIZONA_RMV_SHRT_HP1R_SHIFT 14 /* RMV_SHRT_HP1R */
2283#define ARIZONA_RMV_SHRT_HP1R_WIDTH 1 /* RMV_SHRT_HP1R */
2284#define ARIZONA_HP1R_FLWR 0x0004 /* HP1R_FLWR */
2285#define ARIZONA_HP1R_FLWR_MASK 0x0004 /* HP1R_FLWR */
2286#define ARIZONA_HP1R_FLWR_SHIFT 2 /* HP1R_FLWR */
2287#define ARIZONA_HP1R_FLWR_WIDTH 1 /* HP1R_FLWR */
2288#define ARIZONA_HP1R_SHRTI 0x0002 /* HP1R_SHRTI */
2289#define ARIZONA_HP1R_SHRTI_MASK 0x0002 /* HP1R_SHRTI */
2290#define ARIZONA_HP1R_SHRTI_SHIFT 1 /* HP1R_SHRTI */
2291#define ARIZONA_HP1R_SHRTI_WIDTH 1 /* HP1R_SHRTI */
2292#define ARIZONA_HP1R_SHRTO 0x0001 /* HP1R_SHRTO */
2293#define ARIZONA_HP1R_SHRTO_MASK 0x0001 /* HP1R_SHRTO */
2294#define ARIZONA_HP1R_SHRTO_SHIFT 0 /* HP1R_SHRTO */
2295#define ARIZONA_HP1R_SHRTO_WIDTH 1 /* HP1R_SHRTO */
2296
2297/*
2231 * R659 (0x293) - Accessory Detect Mode 1 2298 * R659 (0x293) - Accessory Detect Mode 1
2232 */ 2299 */
2233#define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */ 2300#define ARIZONA_ACCDET_SRC 0x2000 /* ACCDET_SRC */
diff --git a/include/linux/mfd/atmel-hlcdc.h b/include/linux/mfd/atmel-hlcdc.h
new file mode 100644
index 000000000000..1279ab1644b5
--- /dev/null
+++ b/include/linux/mfd/atmel-hlcdc.h
@@ -0,0 +1,85 @@
1/*
2 * Copyright (C) 2014 Free Electrons
3 * Copyright (C) 2014 Atmel
4 *
5 * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#ifndef __LINUX_MFD_HLCDC_H
21#define __LINUX_MFD_HLCDC_H
22
23#include <linux/clk.h>
24#include <linux/regmap.h>
25
26#define ATMEL_HLCDC_CFG(i) ((i) * 0x4)
27#define ATMEL_HLCDC_SIG_CFG LCDCFG(5)
28#define ATMEL_HLCDC_HSPOL BIT(0)
29#define ATMEL_HLCDC_VSPOL BIT(1)
30#define ATMEL_HLCDC_VSPDLYS BIT(2)
31#define ATMEL_HLCDC_VSPDLYE BIT(3)
32#define ATMEL_HLCDC_DISPPOL BIT(4)
33#define ATMEL_HLCDC_DITHER BIT(6)
34#define ATMEL_HLCDC_DISPDLY BIT(7)
35#define ATMEL_HLCDC_MODE_MASK GENMASK(9, 8)
36#define ATMEL_HLCDC_PP BIT(10)
37#define ATMEL_HLCDC_VSPSU BIT(12)
38#define ATMEL_HLCDC_VSPHO BIT(13)
39#define ATMEL_HLCDC_GUARDTIME_MASK GENMASK(20, 16)
40
41#define ATMEL_HLCDC_EN 0x20
42#define ATMEL_HLCDC_DIS 0x24
43#define ATMEL_HLCDC_SR 0x28
44#define ATMEL_HLCDC_IER 0x2c
45#define ATMEL_HLCDC_IDR 0x30
46#define ATMEL_HLCDC_IMR 0x34
47#define ATMEL_HLCDC_ISR 0x38
48
49#define ATMEL_HLCDC_CLKPOL BIT(0)
50#define ATMEL_HLCDC_CLKSEL BIT(2)
51#define ATMEL_HLCDC_CLKPWMSEL BIT(3)
52#define ATMEL_HLCDC_CGDIS(i) BIT(8 + (i))
53#define ATMEL_HLCDC_CLKDIV_SHFT 16
54#define ATMEL_HLCDC_CLKDIV_MASK GENMASK(23, 16)
55#define ATMEL_HLCDC_CLKDIV(div) ((div - 2) << ATMEL_HLCDC_CLKDIV_SHFT)
56
57#define ATMEL_HLCDC_PIXEL_CLK BIT(0)
58#define ATMEL_HLCDC_SYNC BIT(1)
59#define ATMEL_HLCDC_DISP BIT(2)
60#define ATMEL_HLCDC_PWM BIT(3)
61#define ATMEL_HLCDC_SIP BIT(4)
62
63#define ATMEL_HLCDC_SOF BIT(0)
64#define ATMEL_HLCDC_SYNCDIS BIT(1)
65#define ATMEL_HLCDC_FIFOERR BIT(4)
66#define ATMEL_HLCDC_LAYER_STATUS(x) BIT((x) + 8)
67
68/**
69 * Structure shared by the MFD device and its subdevices.
70 *
71 * @regmap: register map used to access HLCDC IP registers
72 * @periph_clk: the hlcdc peripheral clock
73 * @sys_clk: the hlcdc system clock
74 * @slow_clk: the system slow clk
75 * @irq: the hlcdc irq
76 */
77struct atmel_hlcdc {
78 struct regmap *regmap;
79 struct clk *periph_clk;
80 struct clk *sys_clk;
81 struct clk *slow_clk;
82 int irq;
83};
84
85#endif /* __LINUX_MFD_HLCDC_H */
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index d0e31a2287ac..81589d176ae8 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -14,6 +14,8 @@
14enum { 14enum {
15 AXP202_ID = 0, 15 AXP202_ID = 0,
16 AXP209_ID, 16 AXP209_ID,
17 AXP288_ID,
18 NR_AXP20X_VARIANTS,
17}; 19};
18 20
19#define AXP20X_DATACACHE(m) (0x04 + (m)) 21#define AXP20X_DATACACHE(m) (0x04 + (m))
@@ -49,11 +51,13 @@ enum {
49#define AXP20X_IRQ3_EN 0x42 51#define AXP20X_IRQ3_EN 0x42
50#define AXP20X_IRQ4_EN 0x43 52#define AXP20X_IRQ4_EN 0x43
51#define AXP20X_IRQ5_EN 0x44 53#define AXP20X_IRQ5_EN 0x44
54#define AXP20X_IRQ6_EN 0x45
52#define AXP20X_IRQ1_STATE 0x48 55#define AXP20X_IRQ1_STATE 0x48
53#define AXP20X_IRQ2_STATE 0x49 56#define AXP20X_IRQ2_STATE 0x49
54#define AXP20X_IRQ3_STATE 0x4a 57#define AXP20X_IRQ3_STATE 0x4a
55#define AXP20X_IRQ4_STATE 0x4b 58#define AXP20X_IRQ4_STATE 0x4b
56#define AXP20X_IRQ5_STATE 0x4c 59#define AXP20X_IRQ5_STATE 0x4c
60#define AXP20X_IRQ6_STATE 0x4d
57 61
58/* ADC */ 62/* ADC */
59#define AXP20X_ACIN_V_ADC_H 0x56 63#define AXP20X_ACIN_V_ADC_H 0x56
@@ -116,6 +120,15 @@ enum {
116#define AXP20X_CC_CTRL 0xb8 120#define AXP20X_CC_CTRL 0xb8
117#define AXP20X_FG_RES 0xb9 121#define AXP20X_FG_RES 0xb9
118 122
123/* AXP288 specific registers */
124#define AXP288_PMIC_ADC_H 0x56
125#define AXP288_PMIC_ADC_L 0x57
126#define AXP288_ADC_TS_PIN_CTRL 0x84
127
128#define AXP288_PMIC_ADC_EN 0x84
129#define AXP288_FG_TUNE5 0xed
130
131
119/* Regulators IDs */ 132/* Regulators IDs */
120enum { 133enum {
121 AXP20X_LDO1 = 0, 134 AXP20X_LDO1 = 0,
@@ -169,12 +182,58 @@ enum {
169 AXP20X_IRQ_GPIO0_INPUT, 182 AXP20X_IRQ_GPIO0_INPUT,
170}; 183};
171 184
185enum axp288_irqs {
186 AXP288_IRQ_VBUS_FALL = 2,
187 AXP288_IRQ_VBUS_RISE,
188 AXP288_IRQ_OV,
189 AXP288_IRQ_FALLING_ALT,
190 AXP288_IRQ_RISING_ALT,
191 AXP288_IRQ_OV_ALT,
192 AXP288_IRQ_DONE = 10,
193 AXP288_IRQ_CHARGING,
194 AXP288_IRQ_SAFE_QUIT,
195 AXP288_IRQ_SAFE_ENTER,
196 AXP288_IRQ_ABSENT,
197 AXP288_IRQ_APPEND,
198 AXP288_IRQ_QWBTU,
199 AXP288_IRQ_WBTU,
200 AXP288_IRQ_QWBTO,
201 AXP288_IRQ_WBTO,
202 AXP288_IRQ_QCBTU,
203 AXP288_IRQ_CBTU,
204 AXP288_IRQ_QCBTO,
205 AXP288_IRQ_CBTO,
206 AXP288_IRQ_WL2,
207 AXP288_IRQ_WL1,
208 AXP288_IRQ_GPADC,
209 AXP288_IRQ_OT = 31,
210 AXP288_IRQ_GPIO0,
211 AXP288_IRQ_GPIO1,
212 AXP288_IRQ_POKO,
213 AXP288_IRQ_POKL,
214 AXP288_IRQ_POKS,
215 AXP288_IRQ_POKN,
216 AXP288_IRQ_POKP,
217 AXP288_IRQ_TIMER,
218 AXP288_IRQ_MV_CHNG,
219 AXP288_IRQ_BC_USB_CHNG,
220};
221
222#define AXP288_TS_ADC_H 0x58
223#define AXP288_TS_ADC_L 0x59
224#define AXP288_GP_ADC_H 0x5a
225#define AXP288_GP_ADC_L 0x5b
226
172struct axp20x_dev { 227struct axp20x_dev {
173 struct device *dev; 228 struct device *dev;
174 struct i2c_client *i2c_client; 229 struct i2c_client *i2c_client;
175 struct regmap *regmap; 230 struct regmap *regmap;
176 struct regmap_irq_chip_data *regmap_irqc; 231 struct regmap_irq_chip_data *regmap_irqc;
177 long variant; 232 long variant;
233 int nr_cells;
234 struct mfd_cell *cells;
235 const struct regmap_config *regmap_cfg;
236 const struct regmap_irq_chip *regmap_irq_chip;
178}; 237};
179 238
180#endif /* __LINUX_MFD_AXP20X_H */ 239#endif /* __LINUX_MFD_AXP20X_H */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index f543de91ce19..a76bc100bf97 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -44,6 +44,9 @@ struct mfd_cell {
44 */ 44 */
45 const char *of_compatible; 45 const char *of_compatible;
46 46
47 /* Matches ACPI PNP id, either _HID or _CID */
48 const char *acpi_pnpid;
49
47 /* 50 /*
48 * These resources can be specified relative to the parent device. 51 * These resources can be specified relative to the parent device.
49 * For accessing hardware you should use resources from the platform dev 52 * For accessing hardware you should use resources from the platform dev
@@ -108,6 +111,13 @@ extern int mfd_add_devices(struct device *parent, int id,
108 struct resource *mem_base, 111 struct resource *mem_base,
109 int irq_base, struct irq_domain *irq_domain); 112 int irq_base, struct irq_domain *irq_domain);
110 113
114static inline int mfd_add_hotplug_devices(struct device *parent,
115 const struct mfd_cell *cells, int n_devs)
116{
117 return mfd_add_devices(parent, PLATFORM_DEVID_AUTO, cells, n_devs,
118 NULL, 0, NULL);
119}
120
111extern void mfd_remove_devices(struct device *parent); 121extern void mfd_remove_devices(struct device *parent);
112 122
113#endif 123#endif
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index fcbe9d129a9d..0e166b92f5b4 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -62,10 +62,6 @@ struct cros_ec_command {
62 * @dev: Device pointer 62 * @dev: Device pointer
63 * @was_wake_device: true if this device was set to wake the system from 63 * @was_wake_device: true if this device was set to wake the system from
64 * sleep at the last suspend 64 * sleep at the last suspend
65 * @cmd_xfer: send command to EC and get response
66 * Returns the number of bytes received if the communication succeeded, but
67 * that doesn't mean the EC was happy with the command. The caller
68 * should check msg.result for the EC's result code.
69 * 65 *
70 * @priv: Private data 66 * @priv: Private data
71 * @irq: Interrupt to use 67 * @irq: Interrupt to use
@@ -82,6 +78,10 @@ struct cros_ec_command {
82 * @dout_size: size of dout buffer to allocate (zero to use static dout) 78 * @dout_size: size of dout buffer to allocate (zero to use static dout)
83 * @parent: pointer to parent device (e.g. i2c or spi device) 79 * @parent: pointer to parent device (e.g. i2c or spi device)
84 * @wake_enabled: true if this device can wake the system from sleep 80 * @wake_enabled: true if this device can wake the system from sleep
81 * @cmd_xfer: send command to EC and get response
82 * Returns the number of bytes received if the communication succeeded, but
83 * that doesn't mean the EC was happy with the command. The caller
84 * should check msg.result for the EC's result code.
85 * @lock: one transaction at a time 85 * @lock: one transaction at a time
86 */ 86 */
87struct cros_ec_device { 87struct cros_ec_device {
@@ -92,8 +92,6 @@ struct cros_ec_device {
92 struct device *dev; 92 struct device *dev;
93 bool was_wake_device; 93 bool was_wake_device;
94 struct class *cros_class; 94 struct class *cros_class;
95 int (*cmd_xfer)(struct cros_ec_device *ec,
96 struct cros_ec_command *msg);
97 95
98 /* These are used to implement the platform-specific interface */ 96 /* These are used to implement the platform-specific interface */
99 void *priv; 97 void *priv;
@@ -104,6 +102,8 @@ struct cros_ec_device {
104 int dout_size; 102 int dout_size;
105 struct device *parent; 103 struct device *parent;
106 bool wake_enabled; 104 bool wake_enabled;
105 int (*cmd_xfer)(struct cros_ec_device *ec,
106 struct cros_ec_command *msg);
107 struct mutex lock; 107 struct mutex lock;
108}; 108};
109 109
@@ -153,6 +153,18 @@ int cros_ec_check_result(struct cros_ec_device *ec_dev,
153 struct cros_ec_command *msg); 153 struct cros_ec_command *msg);
154 154
155/** 155/**
156 * cros_ec_cmd_xfer - Send a command to the ChromeOS EC
157 *
158 * Call this to send a command to the ChromeOS EC. This should be used
159 * instead of calling the EC's cmd_xfer() callback directly.
160 *
161 * @ec_dev: EC device
162 * @msg: Message to write
163 */
164int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
165 struct cros_ec_command *msg);
166
167/**
156 * cros_ec_remove - Remove a ChromeOS EC 168 * cros_ec_remove - Remove a ChromeOS EC
157 * 169 *
158 * Call this to deregister a ChromeOS EC, then clean up any private data. 170 * Call this to deregister a ChromeOS EC, then clean up any private data.
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 7853a6410d14..a49cd41feea7 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -1928,9 +1928,6 @@ struct ec_response_power_info {
1928 1928
1929#define EC_CMD_I2C_PASSTHRU 0x9e 1929#define EC_CMD_I2C_PASSTHRU 0x9e
1930 1930
1931/* Slave address is 10 (not 7) bit */
1932#define EC_I2C_FLAG_10BIT (1 << 16)
1933
1934/* Read data; if not present, message is a write */ 1931/* Read data; if not present, message is a write */
1935#define EC_I2C_FLAG_READ (1 << 15) 1932#define EC_I2C_FLAG_READ (1 << 15)
1936 1933
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index bba65f51a0b5..c18a4c19d6fc 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -211,7 +211,7 @@ static inline int da9052_reg_update(struct da9052 *da9052, unsigned char reg,
211int da9052_device_init(struct da9052 *da9052, u8 chip_id); 211int da9052_device_init(struct da9052 *da9052, u8 chip_id);
212void da9052_device_exit(struct da9052 *da9052); 212void da9052_device_exit(struct da9052 *da9052);
213 213
214extern struct regmap_config da9052_regmap_config; 214extern const struct regmap_config da9052_regmap_config;
215 215
216int da9052_irq_init(struct da9052 *da9052); 216int da9052_irq_init(struct da9052 *da9052);
217int da9052_irq_exit(struct da9052 *da9052); 217int da9052_irq_exit(struct da9052 *da9052);
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index 5166935ce66d..8e1cdbef3dad 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -21,7 +21,7 @@
21 */ 21 */
22 22
23#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_ 23#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_
24#define __LINUX_MFD_DAVINIC_VOICECODEC_H_ 24#define __LINUX_MFD_DAVINCI_VOICECODEC_H_
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
@@ -99,12 +99,6 @@ struct davinci_vcif {
99 dma_addr_t dma_rx_addr; 99 dma_addr_t dma_rx_addr;
100}; 100};
101 101
102struct cq93vc {
103 struct platform_device *pdev;
104 struct snd_soc_codec *codec;
105 u32 sysclk;
106};
107
108struct davinci_vc; 102struct davinci_vc;
109 103
110struct davinci_vc { 104struct davinci_vc {
@@ -122,7 +116,6 @@ struct davinci_vc {
122 116
123 /* Client devices */ 117 /* Client devices */
124 struct davinci_vcif davinci_vcif; 118 struct davinci_vcif davinci_vcif;
125 struct cq93vc cq93vc;
126}; 119};
127 120
128#endif 121#endif
diff --git a/include/linux/mfd/dln2.h b/include/linux/mfd/dln2.h
new file mode 100644
index 000000000000..004b24576da8
--- /dev/null
+++ b/include/linux/mfd/dln2.h
@@ -0,0 +1,103 @@
1#ifndef __LINUX_USB_DLN2_H
2#define __LINUX_USB_DLN2_H
3
4#define DLN2_CMD(cmd, id) ((cmd) | ((id) << 8))
5
6struct dln2_platform_data {
7 u16 handle; /* sub-driver handle (internally used only) */
8 u8 port; /* I2C/SPI port */
9};
10
11/**
12 * dln2_event_cb_t - event callback function signature
13 *
14 * @pdev - the sub-device that registered this callback
15 * @echo - the echo header field received in the message
16 * @data - the data payload
17 * @len - the data payload length
18 *
19 * The callback function is called in interrupt context and the data payload is
20 * only valid during the call. If the user needs later access of the data, it
21 * must copy it.
22 */
23
24typedef void (*dln2_event_cb_t)(struct platform_device *pdev, u16 echo,
25 const void *data, int len);
26
27/**
28 * dl2n_register_event_cb - register a callback function for an event
29 *
30 * @pdev - the sub-device that registers the callback
31 * @event - the event for which to register a callback
32 * @event_cb - the callback function
33 *
34 * @return 0 in case of success, negative value in case of error
35 */
36int dln2_register_event_cb(struct platform_device *pdev, u16 event,
37 dln2_event_cb_t event_cb);
38
39/**
40 * dln2_unregister_event_cb - unregister the callback function for an event
41 *
42 * @pdev - the sub-device that registered the callback
43 * @event - the event for which to register a callback
44 */
45void dln2_unregister_event_cb(struct platform_device *pdev, u16 event);
46
47/**
48 * dln2_transfer - issue a DLN2 command and wait for a response and the
49 * associated data
50 *
51 * @pdev - the sub-device which is issuing this transfer
52 * @cmd - the command to be sent to the device
53 * @obuf - the buffer to be sent to the device; it can be NULL if the user
54 * doesn't need to transmit data with this command
55 * @obuf_len - the size of the buffer to be sent to the device
56 * @ibuf - any data associated with the response will be copied here; it can be
57 * NULL if the user doesn't need the response data
58 * @ibuf_len - must be initialized to the input buffer size; it will be modified
59 * to indicate the actual data transferred;
60 *
61 * @return 0 for success, negative value for errors
62 */
63int dln2_transfer(struct platform_device *pdev, u16 cmd,
64 const void *obuf, unsigned obuf_len,
65 void *ibuf, unsigned *ibuf_len);
66
67/**
68 * dln2_transfer_rx - variant of @dln2_transfer() where TX buffer is not needed
69 *
70 * @pdev - the sub-device which is issuing this transfer
71 * @cmd - the command to be sent to the device
72 * @ibuf - any data associated with the response will be copied here; it can be
73 * NULL if the user doesn't need the response data
74 * @ibuf_len - must be initialized to the input buffer size; it will be modified
75 * to indicate the actual data transferred;
76 *
77 * @return 0 for success, negative value for errors
78 */
79
80static inline int dln2_transfer_rx(struct platform_device *pdev, u16 cmd,
81 void *ibuf, unsigned *ibuf_len)
82{
83 return dln2_transfer(pdev, cmd, NULL, 0, ibuf, ibuf_len);
84}
85
86/**
87 * dln2_transfer_tx - variant of @dln2_transfer() where RX buffer is not needed
88 *
89 * @pdev - the sub-device which is issuing this transfer
90 * @cmd - the command to be sent to the device
91 * @obuf - the buffer to be sent to the device; it can be NULL if the
92 * user doesn't need to transmit data with this command
93 * @obuf_len - the size of the buffer to be sent to the device
94 *
95 * @return 0 for success, negative value for errors
96 */
97static inline int dln2_transfer_tx(struct platform_device *pdev, u16 cmd,
98 const void *obuf, unsigned obuf_len)
99{
100 return dln2_transfer(pdev, cmd, obuf, obuf_len, NULL, NULL);
101}
102
103#endif
diff --git a/include/linux/mfd/hi6421-pmic.h b/include/linux/mfd/hi6421-pmic.h
new file mode 100644
index 000000000000..587273e35acf
--- /dev/null
+++ b/include/linux/mfd/hi6421-pmic.h
@@ -0,0 +1,41 @@
1/*
2 * Header file for device driver Hi6421 PMIC
3 *
4 * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd.
5 * http://www.hisilicon.com
6 * Copyright (c) <2013-2014> Linaro Ltd.
7 * http://www.linaro.org
8 *
9 * Author: Guodong Xu <guodong.xu@linaro.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#ifndef __HI6421_PMIC_H
17#define __HI6421_PMIC_H
18
19/* Hi6421 registers are mapped to memory bus in 4 bytes stride */
20#define HI6421_REG_TO_BUS_ADDR(x) (x << 2)
21
22/* Hi6421 maximum register number */
23#define HI6421_REG_MAX 0xFF
24
25/* Hi6421 OCP (over current protection) and DEB (debounce) control register */
26#define HI6421_OCP_DEB_CTRL_REG HI6421_REG_TO_BUS_ADDR(0x51)
27#define HI6421_OCP_DEB_SEL_MASK 0x0C
28#define HI6421_OCP_DEB_SEL_8MS 0x00
29#define HI6421_OCP_DEB_SEL_16MS 0x04
30#define HI6421_OCP_DEB_SEL_32MS 0x08
31#define HI6421_OCP_DEB_SEL_64MS 0x0C
32#define HI6421_OCP_EN_DEBOUNCE_MASK 0x02
33#define HI6421_OCP_EN_DEBOUNCE_ENABLE 0x02
34#define HI6421_OCP_AUTO_STOP_MASK 0x01
35#define HI6421_OCP_AUTO_STOP_ENABLE 0x01
36
37struct hi6421_pmic {
38 struct regmap *regmap;
39};
40
41#endif /* __HI6421_PMIC_H */
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index 499253604026..f01c1fae4d84 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -72,15 +72,33 @@ enum max14577_muic_reg {
72 MAX14577_MUIC_REG_END, 72 MAX14577_MUIC_REG_END,
73}; 73};
74 74
75/*
76 * Combined charger types for max14577 and max77836.
77 *
78 * On max14577 three lower bits map to STATUS2/CHGTYP field.
79 * However the max77836 has different two last values of STATUS2/CHGTYP.
80 * To indicate the difference enum has two additional values for max77836.
81 * These values are just a register value bitwise OR with 0x8.
82 */
75enum max14577_muic_charger_type { 83enum max14577_muic_charger_type {
76 MAX14577_CHARGER_TYPE_NONE = 0, 84 MAX14577_CHARGER_TYPE_NONE = 0x0,
77 MAX14577_CHARGER_TYPE_USB, 85 MAX14577_CHARGER_TYPE_USB = 0x1,
78 MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT, 86 MAX14577_CHARGER_TYPE_DOWNSTREAM_PORT = 0x2,
79 MAX14577_CHARGER_TYPE_DEDICATED_CHG, 87 MAX14577_CHARGER_TYPE_DEDICATED_CHG = 0x3,
80 MAX14577_CHARGER_TYPE_SPECIAL_500MA, 88 MAX14577_CHARGER_TYPE_SPECIAL_500MA = 0x4,
81 MAX14577_CHARGER_TYPE_SPECIAL_1A, 89 /* Special 1A or 2A charger */
82 MAX14577_CHARGER_TYPE_RESERVED, 90 MAX14577_CHARGER_TYPE_SPECIAL_1A = 0x5,
83 MAX14577_CHARGER_TYPE_DEAD_BATTERY = 7, 91 /* max14577: reserved, used on max77836 */
92 MAX14577_CHARGER_TYPE_RESERVED = 0x6,
93 /* max14577: dead-battery charing with maximum current 100mA */
94 MAX14577_CHARGER_TYPE_DEAD_BATTERY = 0x7,
95 /*
96 * max77836: special charger (bias on D+/D-),
97 * matches register value of 0x6
98 */
99 MAX77836_CHARGER_TYPE_SPECIAL_BIAS = 0xe,
100 /* max77836: reserved, register value 0x7 */
101 MAX77836_CHARGER_TYPE_RESERVED = 0xf,
84}; 102};
85 103
86/* MAX14577 interrupts */ 104/* MAX14577 interrupts */
@@ -121,13 +139,15 @@ enum max14577_muic_charger_type {
121#define STATUS2_CHGTYP_SHIFT 0 139#define STATUS2_CHGTYP_SHIFT 0
122#define STATUS2_CHGDETRUN_SHIFT 3 140#define STATUS2_CHGDETRUN_SHIFT 3
123#define STATUS2_DCDTMR_SHIFT 4 141#define STATUS2_DCDTMR_SHIFT 4
124#define STATUS2_DBCHG_SHIFT 5 142#define MAX14577_STATUS2_DBCHG_SHIFT 5
143#define MAX77836_STATUS2_DXOVP_SHIFT 5
125#define STATUS2_VBVOLT_SHIFT 6 144#define STATUS2_VBVOLT_SHIFT 6
126#define MAX77836_STATUS2_VIDRM_SHIFT 7 145#define MAX77836_STATUS2_VIDRM_SHIFT 7
127#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT) 146#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
128#define STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT) 147#define STATUS2_CHGDETRUN_MASK BIT(STATUS2_CHGDETRUN_SHIFT)
129#define STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT) 148#define STATUS2_DCDTMR_MASK BIT(STATUS2_DCDTMR_SHIFT)
130#define STATUS2_DBCHG_MASK BIT(STATUS2_DBCHG_SHIFT) 149#define MAX14577_STATUS2_DBCHG_MASK BIT(MAX14577_STATUS2_DBCHG_SHIFT)
150#define MAX77836_STATUS2_DXOVP_MASK BIT(MAX77836_STATUS2_DXOVP_SHIFT)
131#define STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT) 151#define STATUS2_VBVOLT_MASK BIT(STATUS2_VBVOLT_SHIFT)
132#define MAX77836_STATUS2_VIDRM_MASK BIT(MAX77836_STATUS2_VIDRM_SHIFT) 152#define MAX77836_STATUS2_VIDRM_MASK BIT(MAX77836_STATUS2_VIDRM_SHIFT)
133 153
@@ -177,9 +197,11 @@ enum max14577_muic_charger_type {
177#define CTRL3_JIGSET_SHIFT 0 197#define CTRL3_JIGSET_SHIFT 0
178#define CTRL3_BOOTSET_SHIFT 2 198#define CTRL3_BOOTSET_SHIFT 2
179#define CTRL3_ADCDBSET_SHIFT 4 199#define CTRL3_ADCDBSET_SHIFT 4
200#define CTRL3_WBTH_SHIFT 6
180#define CTRL3_JIGSET_MASK (0x3 << CTRL3_JIGSET_SHIFT) 201#define CTRL3_JIGSET_MASK (0x3 << CTRL3_JIGSET_SHIFT)
181#define CTRL3_BOOTSET_MASK (0x3 << CTRL3_BOOTSET_SHIFT) 202#define CTRL3_BOOTSET_MASK (0x3 << CTRL3_BOOTSET_SHIFT)
182#define CTRL3_ADCDBSET_MASK (0x3 << CTRL3_ADCDBSET_SHIFT) 203#define CTRL3_ADCDBSET_MASK (0x3 << CTRL3_ADCDBSET_SHIFT)
204#define CTRL3_WBTH_MASK (0x3 << CTRL3_WBTH_SHIFT)
183 205
184/* Slave addr = 0x4A: Charger */ 206/* Slave addr = 0x4A: Charger */
185enum max14577_charger_reg { 207enum max14577_charger_reg {
@@ -210,16 +232,20 @@ enum max14577_charger_reg {
210#define CDETCTRL1_CHGTYPMAN_SHIFT 1 232#define CDETCTRL1_CHGTYPMAN_SHIFT 1
211#define CDETCTRL1_DCDEN_SHIFT 2 233#define CDETCTRL1_DCDEN_SHIFT 2
212#define CDETCTRL1_DCD2SCT_SHIFT 3 234#define CDETCTRL1_DCD2SCT_SHIFT 3
213#define CDETCTRL1_DCHKTM_SHIFT 4 235#define MAX14577_CDETCTRL1_DCHKTM_SHIFT 4
214#define CDETCTRL1_DBEXIT_SHIFT 5 236#define MAX77836_CDETCTRL1_CDLY_SHIFT 4
237#define MAX14577_CDETCTRL1_DBEXIT_SHIFT 5
238#define MAX77836_CDETCTRL1_DCDCPL_SHIFT 5
215#define CDETCTRL1_DBIDLE_SHIFT 6 239#define CDETCTRL1_DBIDLE_SHIFT 6
216#define CDETCTRL1_CDPDET_SHIFT 7 240#define CDETCTRL1_CDPDET_SHIFT 7
217#define CDETCTRL1_CHGDETEN_MASK BIT(CDETCTRL1_CHGDETEN_SHIFT) 241#define CDETCTRL1_CHGDETEN_MASK BIT(CDETCTRL1_CHGDETEN_SHIFT)
218#define CDETCTRL1_CHGTYPMAN_MASK BIT(CDETCTRL1_CHGTYPMAN_SHIFT) 242#define CDETCTRL1_CHGTYPMAN_MASK BIT(CDETCTRL1_CHGTYPMAN_SHIFT)
219#define CDETCTRL1_DCDEN_MASK BIT(CDETCTRL1_DCDEN_SHIFT) 243#define CDETCTRL1_DCDEN_MASK BIT(CDETCTRL1_DCDEN_SHIFT)
220#define CDETCTRL1_DCD2SCT_MASK BIT(CDETCTRL1_DCD2SCT_SHIFT) 244#define CDETCTRL1_DCD2SCT_MASK BIT(CDETCTRL1_DCD2SCT_SHIFT)
221#define CDETCTRL1_DCHKTM_MASK BIT(CDETCTRL1_DCHKTM_SHIFT) 245#define MAX14577_CDETCTRL1_DCHKTM_MASK BIT(MAX14577_CDETCTRL1_DCHKTM_SHIFT)
222#define CDETCTRL1_DBEXIT_MASK BIT(CDETCTRL1_DBEXIT_SHIFT) 246#define MAX77836_CDETCTRL1_CDDLY_MASK BIT(MAX77836_CDETCTRL1_CDDLY_SHIFT)
247#define MAX14577_CDETCTRL1_DBEXIT_MASK BIT(MAX14577_CDETCTRL1_DBEXIT_SHIFT)
248#define MAX77836_CDETCTRL1_DCDCPL_MASK BIT(MAX77836_CDETCTRL1_DCDCPL_SHIFT)
223#define CDETCTRL1_DBIDLE_MASK BIT(CDETCTRL1_DBIDLE_SHIFT) 249#define CDETCTRL1_DBIDLE_MASK BIT(CDETCTRL1_DBIDLE_SHIFT)
224#define CDETCTRL1_CDPDET_MASK BIT(CDETCTRL1_CDPDET_SHIFT) 250#define CDETCTRL1_CDPDET_MASK BIT(CDETCTRL1_CDPDET_SHIFT)
225 251
@@ -255,17 +281,36 @@ enum max14577_charger_reg {
255#define CHGCTRL7_OTPCGHCVS_SHIFT 0 281#define CHGCTRL7_OTPCGHCVS_SHIFT 0
256#define CHGCTRL7_OTPCGHCVS_MASK (0x3 << CHGCTRL7_OTPCGHCVS_SHIFT) 282#define CHGCTRL7_OTPCGHCVS_MASK (0x3 << CHGCTRL7_OTPCGHCVS_SHIFT)
257 283
258/* MAX14577 regulator current limits (as in CHGCTRL4 register), uA */ 284/* MAX14577 charger current limits (as in CHGCTRL4 register), uA */
259#define MAX14577_REGULATOR_CURRENT_LIMIT_MIN 90000 285#define MAX14577_CHARGER_CURRENT_LIMIT_MIN 90000U
260#define MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_START 200000 286#define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_START 200000U
261#define MAX14577_REGULATOR_CURRENT_LIMIT_HIGH_STEP 50000 287#define MAX14577_CHARGER_CURRENT_LIMIT_HIGH_STEP 50000U
262#define MAX14577_REGULATOR_CURRENT_LIMIT_MAX 950000 288#define MAX14577_CHARGER_CURRENT_LIMIT_MAX 950000U
263 289
264/* MAX77836 regulator current limits (as in CHGCTRL4 register), uA */ 290/* MAX77836 charger current limits (as in CHGCTRL4 register), uA */
265#define MAX77836_REGULATOR_CURRENT_LIMIT_MIN 45000 291#define MAX77836_CHARGER_CURRENT_LIMIT_MIN 45000U
266#define MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_START 100000 292#define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_START 100000U
267#define MAX77836_REGULATOR_CURRENT_LIMIT_HIGH_STEP 25000 293#define MAX77836_CHARGER_CURRENT_LIMIT_HIGH_STEP 25000U
268#define MAX77836_REGULATOR_CURRENT_LIMIT_MAX 475000 294#define MAX77836_CHARGER_CURRENT_LIMIT_MAX 475000U
295
296/*
297 * MAX14577 charger End-Of-Charge current limits
298 * (as in CHGCTRL5 register), uA
299 */
300#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MIN 50000U
301#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_STEP 10000U
302#define MAX14577_CHARGER_EOC_CURRENT_LIMIT_MAX 200000U
303
304/*
305 * MAX14577/MAX77836 Battery Constant Voltage
306 * (as in CHGCTRL3 register), uV
307 */
308#define MAXIM_CHARGER_CONSTANT_VOLTAGE_MIN 4000000U
309#define MAXIM_CHARGER_CONSTANT_VOLTAGE_STEP 20000U
310#define MAXIM_CHARGER_CONSTANT_VOLTAGE_MAX 4350000U
311
312/* Default value for fast charge timer, in hours */
313#define MAXIM_CHARGER_FAST_CHARGE_TIMER_DEFAULT 5
269 314
270/* MAX14577 regulator SFOUT LDO voltage, fixed, uV */ 315/* MAX14577 regulator SFOUT LDO voltage, fixed, uV */
271#define MAX14577_REGULATOR_SAFEOUT_VOLTAGE 4900000 316#define MAX14577_REGULATOR_SAFEOUT_VOLTAGE 4900000
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index c83fbed1c7b6..ccfaf952c31b 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -54,6 +54,13 @@ struct max14577_regulator_platform_data {
54 struct device_node *of_node; 54 struct device_node *of_node;
55}; 55};
56 56
57struct max14577_charger_platform_data {
58 u32 constant_uvolt;
59 u32 fast_charge_uamp;
60 u32 eoc_uamp;
61 u32 ovp_uvolt;
62};
63
57/* 64/*
58 * MAX14577 MFD platform data 65 * MAX14577 MFD platform data
59 */ 66 */
@@ -74,4 +81,27 @@ struct max14577_platform_data {
74 struct max14577_regulator_platform_data *regulators; 81 struct max14577_regulator_platform_data *regulators;
75}; 82};
76 83
84/*
85 * Valid limits of current for max14577 and max77836 chargers.
86 * They must correspond to MBCICHWRCL and MBCICHWRCH fields in CHGCTRL4
87 * register for given chipset.
88 */
89struct maxim_charger_current {
90 /* Minimal current, set in CHGCTRL4/MBCICHWRCL, uA */
91 unsigned int min;
92 /*
93 * Minimal current when high setting is active,
94 * set in CHGCTRL4/MBCICHWRCH, uA
95 */
96 unsigned int high_start;
97 /* Value of one step in high setting, uA */
98 unsigned int high_step;
99 /* Maximum current of high setting, uA */
100 unsigned int max;
101};
102
103extern const struct maxim_charger_current maxim_charger_currents[];
104extern int maxim_charger_calc_reg_current(const struct maxim_charger_current *limits,
105 unsigned int min_ua, unsigned int max_ua, u8 *dst);
106
77#endif /* __MAX14577_H__ */ 107#endif /* __MAX14577_H__ */
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index 7e6dc4b2b795..553f7d09258a 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -131,13 +131,6 @@ enum max77686_opmode {
131 MAX77686_OPMODE_STANDBY, 131 MAX77686_OPMODE_STANDBY,
132}; 132};
133 133
134enum max77802_opmode {
135 MAX77802_OPMODE_OFF,
136 MAX77802_OPMODE_STANDBY,
137 MAX77802_OPMODE_LP,
138 MAX77802_OPMODE_NORMAL,
139};
140
141struct max77686_opmode_data { 134struct max77686_opmode_data {
142 int id; 135 int id;
143 int mode; 136 int mode;
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index d0e578fd7053..08dae01258b9 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -26,7 +26,6 @@
26 26
27#include <linux/i2c.h> 27#include <linux/i2c.h>
28 28
29#define MAX77693_NUM_IRQ_MUIC_REGS 3
30#define MAX77693_REG_INVALID (0xff) 29#define MAX77693_REG_INVALID (0xff)
31 30
32/* Slave addr = 0xCC: PMIC, Charger, Flash LED */ 31/* Slave addr = 0xCC: PMIC, Charger, Flash LED */
@@ -46,7 +45,7 @@ enum max77693_pmic_reg {
46 MAX77693_LED_REG_VOUT_FLASH2 = 0x0C, 45 MAX77693_LED_REG_VOUT_FLASH2 = 0x0C,
47 MAX77693_LED_REG_FLASH_INT = 0x0E, 46 MAX77693_LED_REG_FLASH_INT = 0x0E,
48 MAX77693_LED_REG_FLASH_INT_MASK = 0x0F, 47 MAX77693_LED_REG_FLASH_INT_MASK = 0x0F,
49 MAX77693_LED_REG_FLASH_INT_STATUS = 0x10, 48 MAX77693_LED_REG_FLASH_STATUS = 0x10,
50 49
51 MAX77693_PMIC_REG_PMIC_ID1 = 0x20, 50 MAX77693_PMIC_REG_PMIC_ID1 = 0x20,
52 MAX77693_PMIC_REG_PMIC_ID2 = 0x21, 51 MAX77693_PMIC_REG_PMIC_ID2 = 0x21,
@@ -85,6 +84,65 @@ enum max77693_pmic_reg {
85 MAX77693_PMIC_REG_END, 84 MAX77693_PMIC_REG_END,
86}; 85};
87 86
87/* MAX77693 ITORCH register */
88#define TORCH_IOUT1_SHIFT 0
89#define TORCH_IOUT2_SHIFT 4
90#define TORCH_IOUT_MIN 15625
91#define TORCH_IOUT_MAX 250000
92#define TORCH_IOUT_STEP 15625
93
94/* MAX77693 IFLASH1 and IFLASH2 registers */
95#define FLASH_IOUT_MIN 15625
96#define FLASH_IOUT_MAX_1LED 1000000
97#define FLASH_IOUT_MAX_2LEDS 625000
98#define FLASH_IOUT_STEP 15625
99
100/* MAX77693 TORCH_TIMER register */
101#define TORCH_TMR_NO_TIMER 0x40
102#define TORCH_TIMEOUT_MIN 262000
103#define TORCH_TIMEOUT_MAX 15728000
104
105/* MAX77693 FLASH_TIMER register */
106#define FLASH_TMR_LEVEL 0x80
107#define FLASH_TIMEOUT_MIN 62500
108#define FLASH_TIMEOUT_MAX 1000000
109#define FLASH_TIMEOUT_STEP 62500
110
111/* MAX77693 FLASH_EN register */
112#define FLASH_EN_OFF 0x0
113#define FLASH_EN_FLASH 0x1
114#define FLASH_EN_TORCH 0x2
115#define FLASH_EN_ON 0x3
116#define FLASH_EN_SHIFT(x) (6 - ((x) - 1) * 2)
117#define TORCH_EN_SHIFT(x) (2 - ((x) - 1) * 2)
118
119/* MAX77693 MAX_FLASH1 register */
120#define MAX_FLASH1_MAX_FL_EN 0x80
121#define MAX_FLASH1_VSYS_MIN 2400
122#define MAX_FLASH1_VSYS_MAX 3400
123#define MAX_FLASH1_VSYS_STEP 33
124
125/* MAX77693 VOUT_CNTL register */
126#define FLASH_BOOST_FIXED 0x04
127#define FLASH_BOOST_LEDNUM_2 0x80
128
129/* MAX77693 VOUT_FLASH1 register */
130#define FLASH_VOUT_MIN 3300
131#define FLASH_VOUT_MAX 5500
132#define FLASH_VOUT_STEP 25
133#define FLASH_VOUT_RMIN 0x0c
134
135/* MAX77693 FLASH_STATUS register */
136#define FLASH_STATUS_FLASH_ON BIT(3)
137#define FLASH_STATUS_TORCH_ON BIT(2)
138
139/* MAX77693 FLASH_INT register */
140#define FLASH_INT_FLED2_OPEN BIT(0)
141#define FLASH_INT_FLED2_SHORT BIT(1)
142#define FLASH_INT_FLED1_OPEN BIT(2)
143#define FLASH_INT_FLED1_SHORT BIT(3)
144#define FLASH_INT_OVER_CURRENT BIT(4)
145
88/* MAX77693 CHG_CNFG_00 register */ 146/* MAX77693 CHG_CNFG_00 register */
89#define CHG_CNFG_00_CHG_MASK 0x1 147#define CHG_CNFG_00_CHG_MASK 0x1
90#define CHG_CNFG_00_BUCK_MASK 0x4 148#define CHG_CNFG_00_BUCK_MASK 0x4
@@ -271,6 +329,13 @@ enum max77693_irq_source {
271 MAX77693_IRQ_GROUP_NR, 329 MAX77693_IRQ_GROUP_NR,
272}; 330};
273 331
332#define SRC_IRQ_CHARGER BIT(0)
333#define SRC_IRQ_TOP BIT(1)
334#define SRC_IRQ_FLASH BIT(2)
335#define SRC_IRQ_MUIC BIT(3)
336#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \
337 | SRC_IRQ_FLASH | SRC_IRQ_MUIC)
338
274#define LED_IRQ_FLED2_OPEN BIT(0) 339#define LED_IRQ_FLED2_OPEN BIT(0)
275#define LED_IRQ_FLED2_SHORT BIT(1) 340#define LED_IRQ_FLED2_SHORT BIT(1)
276#define LED_IRQ_FLED1_OPEN BIT(2) 341#define LED_IRQ_FLED1_OPEN BIT(2)
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index 3f3dc45f93ee..f0b6585cd874 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -63,6 +63,45 @@ struct max77693_muic_platform_data {
63 int path_uart; 63 int path_uart;
64}; 64};
65 65
66/* MAX77693 led flash */
67
68/* triggers */
69enum max77693_led_trigger {
70 MAX77693_LED_TRIG_OFF,
71 MAX77693_LED_TRIG_FLASH,
72 MAX77693_LED_TRIG_TORCH,
73 MAX77693_LED_TRIG_EXT,
74 MAX77693_LED_TRIG_SOFT,
75};
76
77/* trigger types */
78enum max77693_led_trigger_type {
79 MAX77693_LED_TRIG_TYPE_EDGE,
80 MAX77693_LED_TRIG_TYPE_LEVEL,
81};
82
83/* boost modes */
84enum max77693_led_boost_mode {
85 MAX77693_LED_BOOST_NONE,
86 MAX77693_LED_BOOST_ADAPTIVE,
87 MAX77693_LED_BOOST_FIXED,
88};
89
90struct max77693_led_platform_data {
91 u32 fleds[2];
92 u32 iout_torch[2];
93 u32 iout_flash[2];
94 u32 trigger[2];
95 u32 trigger_type[2];
96 u32 num_leds;
97 u32 boost_mode;
98 u32 flash_timeout;
99 u32 boost_vout;
100 u32 low_vsys;
101};
102
103/* MAX77693 */
104
66struct max77693_platform_data { 105struct max77693_platform_data {
67 /* regulator data */ 106 /* regulator data */
68 struct max77693_regulator_data *regulators; 107 struct max77693_regulator_data *regulators;
@@ -70,5 +109,6 @@ struct max77693_platform_data {
70 109
71 /* muic data */ 110 /* muic data */
72 struct max77693_muic_platform_data *muic_data; 111 struct max77693_muic_platform_data *muic_data;
112 struct max77693_led_platform_data *led_data;
73}; 113};
74#endif /* __LINUX_MFD_MAX77693_H */ 114#endif /* __LINUX_MFD_MAX77693_H */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
new file mode 100644
index 000000000000..fb09312d854b
--- /dev/null
+++ b/include/linux/mfd/rk808.h
@@ -0,0 +1,196 @@
1/*
2 * rk808.h for Rockchip RK808
3 *
4 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
5 *
6 * Author: Chris Zhong <zyw@rock-chips.com>
7 * Author: Zhang Qing <zhangqing@rock-chips.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 */
18
19#ifndef __LINUX_REGULATOR_rk808_H
20#define __LINUX_REGULATOR_rk808_H
21
22#include <linux/regulator/machine.h>
23#include <linux/regmap.h>
24
25/*
26 * rk808 Global Register Map.
27 */
28
29#define RK808_DCDC1 0 /* (0+RK808_START) */
30#define RK808_LDO1 4 /* (4+RK808_START) */
31#define RK808_NUM_REGULATORS 14
32
33enum rk808_reg {
34 RK808_ID_DCDC1,
35 RK808_ID_DCDC2,
36 RK808_ID_DCDC3,
37 RK808_ID_DCDC4,
38 RK808_ID_LDO1,
39 RK808_ID_LDO2,
40 RK808_ID_LDO3,
41 RK808_ID_LDO4,
42 RK808_ID_LDO5,
43 RK808_ID_LDO6,
44 RK808_ID_LDO7,
45 RK808_ID_LDO8,
46 RK808_ID_SWITCH1,
47 RK808_ID_SWITCH2,
48};
49
50#define RK808_SECONDS_REG 0x00
51#define RK808_MINUTES_REG 0x01
52#define RK808_HOURS_REG 0x02
53#define RK808_DAYS_REG 0x03
54#define RK808_MONTHS_REG 0x04
55#define RK808_YEARS_REG 0x05
56#define RK808_WEEKS_REG 0x06
57#define RK808_ALARM_SECONDS_REG 0x08
58#define RK808_ALARM_MINUTES_REG 0x09
59#define RK808_ALARM_HOURS_REG 0x0a
60#define RK808_ALARM_DAYS_REG 0x0b
61#define RK808_ALARM_MONTHS_REG 0x0c
62#define RK808_ALARM_YEARS_REG 0x0d
63#define RK808_RTC_CTRL_REG 0x10
64#define RK808_RTC_STATUS_REG 0x11
65#define RK808_RTC_INT_REG 0x12
66#define RK808_RTC_COMP_LSB_REG 0x13
67#define RK808_RTC_COMP_MSB_REG 0x14
68#define RK808_CLK32OUT_REG 0x20
69#define RK808_VB_MON_REG 0x21
70#define RK808_THERMAL_REG 0x22
71#define RK808_DCDC_EN_REG 0x23
72#define RK808_LDO_EN_REG 0x24
73#define RK808_SLEEP_SET_OFF_REG1 0x25
74#define RK808_SLEEP_SET_OFF_REG2 0x26
75#define RK808_DCDC_UV_STS_REG 0x27
76#define RK808_DCDC_UV_ACT_REG 0x28
77#define RK808_LDO_UV_STS_REG 0x29
78#define RK808_LDO_UV_ACT_REG 0x2a
79#define RK808_DCDC_PG_REG 0x2b
80#define RK808_LDO_PG_REG 0x2c
81#define RK808_VOUT_MON_TDB_REG 0x2d
82#define RK808_BUCK1_CONFIG_REG 0x2e
83#define RK808_BUCK1_ON_VSEL_REG 0x2f
84#define RK808_BUCK1_SLP_VSEL_REG 0x30
85#define RK808_BUCK1_DVS_VSEL_REG 0x31
86#define RK808_BUCK2_CONFIG_REG 0x32
87#define RK808_BUCK2_ON_VSEL_REG 0x33
88#define RK808_BUCK2_SLP_VSEL_REG 0x34
89#define RK808_BUCK2_DVS_VSEL_REG 0x35
90#define RK808_BUCK3_CONFIG_REG 0x36
91#define RK808_BUCK4_CONFIG_REG 0x37
92#define RK808_BUCK4_ON_VSEL_REG 0x38
93#define RK808_BUCK4_SLP_VSEL_REG 0x39
94#define RK808_BOOST_CONFIG_REG 0x3a
95#define RK808_LDO1_ON_VSEL_REG 0x3b
96#define RK808_LDO1_SLP_VSEL_REG 0x3c
97#define RK808_LDO2_ON_VSEL_REG 0x3d
98#define RK808_LDO2_SLP_VSEL_REG 0x3e
99#define RK808_LDO3_ON_VSEL_REG 0x3f
100#define RK808_LDO3_SLP_VSEL_REG 0x40
101#define RK808_LDO4_ON_VSEL_REG 0x41
102#define RK808_LDO4_SLP_VSEL_REG 0x42
103#define RK808_LDO5_ON_VSEL_REG 0x43
104#define RK808_LDO5_SLP_VSEL_REG 0x44
105#define RK808_LDO6_ON_VSEL_REG 0x45
106#define RK808_LDO6_SLP_VSEL_REG 0x46
107#define RK808_LDO7_ON_VSEL_REG 0x47
108#define RK808_LDO7_SLP_VSEL_REG 0x48
109#define RK808_LDO8_ON_VSEL_REG 0x49
110#define RK808_LDO8_SLP_VSEL_REG 0x4a
111#define RK808_DEVCTRL_REG 0x4b
112#define RK808_INT_STS_REG1 0x4c
113#define RK808_INT_STS_MSK_REG1 0x4d
114#define RK808_INT_STS_REG2 0x4e
115#define RK808_INT_STS_MSK_REG2 0x4f
116#define RK808_IO_POL_REG 0x50
117
118/* IRQ Definitions */
119#define RK808_IRQ_VOUT_LO 0
120#define RK808_IRQ_VB_LO 1
121#define RK808_IRQ_PWRON 2
122#define RK808_IRQ_PWRON_LP 3
123#define RK808_IRQ_HOTDIE 4
124#define RK808_IRQ_RTC_ALARM 5
125#define RK808_IRQ_RTC_PERIOD 6
126#define RK808_IRQ_PLUG_IN_INT 7
127#define RK808_IRQ_PLUG_OUT_INT 8
128#define RK808_NUM_IRQ 9
129
130#define RK808_IRQ_VOUT_LO_MSK BIT(0)
131#define RK808_IRQ_VB_LO_MSK BIT(1)
132#define RK808_IRQ_PWRON_MSK BIT(2)
133#define RK808_IRQ_PWRON_LP_MSK BIT(3)
134#define RK808_IRQ_HOTDIE_MSK BIT(4)
135#define RK808_IRQ_RTC_ALARM_MSK BIT(5)
136#define RK808_IRQ_RTC_PERIOD_MSK BIT(6)
137#define RK808_IRQ_PLUG_IN_INT_MSK BIT(0)
138#define RK808_IRQ_PLUG_OUT_INT_MSK BIT(1)
139
140#define RK808_VBAT_LOW_2V8 0x00
141#define RK808_VBAT_LOW_2V9 0x01
142#define RK808_VBAT_LOW_3V0 0x02
143#define RK808_VBAT_LOW_3V1 0x03
144#define RK808_VBAT_LOW_3V2 0x04
145#define RK808_VBAT_LOW_3V3 0x05
146#define RK808_VBAT_LOW_3V4 0x06
147#define RK808_VBAT_LOW_3V5 0x07
148#define VBAT_LOW_VOL_MASK (0x07 << 0)
149#define EN_VABT_LOW_SHUT_DOWN (0x00 << 4)
150#define EN_VBAT_LOW_IRQ (0x1 << 4)
151#define VBAT_LOW_ACT_MASK (0x1 << 4)
152
153#define BUCK_ILMIN_MASK (7 << 0)
154#define BOOST_ILMIN_MASK (7 << 0)
155#define BUCK1_RATE_MASK (3 << 3)
156#define BUCK2_RATE_MASK (3 << 3)
157#define MASK_ALL 0xff
158
159#define SWITCH2_EN BIT(6)
160#define SWITCH1_EN BIT(5)
161#define DEV_OFF_RST BIT(3)
162
163#define VB_LO_ACT BIT(4)
164#define VB_LO_SEL_3500MV (7 << 0)
165
166#define VOUT_LO_INT BIT(0)
167#define CLK32KOUT2_EN BIT(0)
168
169enum {
170 BUCK_ILMIN_50MA,
171 BUCK_ILMIN_100MA,
172 BUCK_ILMIN_150MA,
173 BUCK_ILMIN_200MA,
174 BUCK_ILMIN_250MA,
175 BUCK_ILMIN_300MA,
176 BUCK_ILMIN_350MA,
177 BUCK_ILMIN_400MA,
178};
179
180enum {
181 BOOST_ILMIN_75MA,
182 BOOST_ILMIN_100MA,
183 BOOST_ILMIN_125MA,
184 BOOST_ILMIN_150MA,
185 BOOST_ILMIN_175MA,
186 BOOST_ILMIN_200MA,
187 BOOST_ILMIN_225MA,
188 BOOST_ILMIN_250MA,
189};
190
191struct rk808 {
192 struct i2c_client *i2c;
193 struct regmap_irq_chip_data *irq_data;
194 struct regmap *regmap;
195};
196#endif /* __LINUX_REGULATOR_rk808_H */
diff --git a/include/linux/mfd/rn5t618.h b/include/linux/mfd/rn5t618.h
new file mode 100644
index 000000000000..c72d5344f3b3
--- /dev/null
+++ b/include/linux/mfd/rn5t618.h
@@ -0,0 +1,228 @@
1/*
2 * MFD core driver for Ricoh RN5T618 PMIC
3 *
4 * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * You should have received a copy of the GNU General Public License
11 * along with this program. If not, see <http://www.gnu.org/licenses/>.
12 */
13
14#ifndef __LINUX_MFD_RN5T618_H
15#define __LINUX_MFD_RN5T618_H
16
17#include <linux/regmap.h>
18
19#define RN5T618_LSIVER 0x00
20#define RN5T618_OTPVER 0x01
21#define RN5T618_IODAC 0x02
22#define RN5T618_VINDAC 0x03
23#define RN5T618_CPUCNT 0x06
24#define RN5T618_PSWR 0x07
25#define RN5T618_PONHIS 0x09
26#define RN5T618_POFFHIS 0x0a
27#define RN5T618_WATCHDOG 0x0b
28#define RN5T618_WATCHDOGCNT 0x0c
29#define RN5T618_PWRFUNC 0x0d
30#define RN5T618_SLPCNT 0x0e
31#define RN5T618_REPCNT 0x0f
32#define RN5T618_PWRONTIMSET 0x10
33#define RN5T618_NOETIMSETCNT 0x11
34#define RN5T618_PWRIREN 0x12
35#define RN5T618_PWRIRQ 0x13
36#define RN5T618_PWRMON 0x14
37#define RN5T618_PWRIRSEL 0x15
38#define RN5T618_DC1_SLOT 0x16
39#define RN5T618_DC2_SLOT 0x17
40#define RN5T618_DC3_SLOT 0x18
41#define RN5T618_LDO1_SLOT 0x1b
42#define RN5T618_LDO2_SLOT 0x1c
43#define RN5T618_LDO3_SLOT 0x1d
44#define RN5T618_LDO4_SLOT 0x1e
45#define RN5T618_LDO5_SLOT 0x1f
46#define RN5T618_PSO0_SLOT 0x25
47#define RN5T618_PSO1_SLOT 0x26
48#define RN5T618_PSO2_SLOT 0x27
49#define RN5T618_PSO3_SLOT 0x28
50#define RN5T618_LDORTC1_SLOT 0x2a
51#define RN5T618_DC1CTL 0x2c
52#define RN5T618_DC1CTL2 0x2d
53#define RN5T618_DC2CTL 0x2e
54#define RN5T618_DC2CTL2 0x2f
55#define RN5T618_DC3CTL 0x30
56#define RN5T618_DC3CTL2 0x31
57#define RN5T618_DC1DAC 0x36
58#define RN5T618_DC2DAC 0x37
59#define RN5T618_DC3DAC 0x38
60#define RN5T618_DC1DAC_SLP 0x3b
61#define RN5T618_DC2DAC_SLP 0x3c
62#define RN5T618_DC3DAC_SLP 0x3d
63#define RN5T618_DCIREN 0x40
64#define RN5T618_DCIRQ 0x41
65#define RN5T618_DCIRMON 0x42
66#define RN5T618_LDOEN1 0x44
67#define RN5T618_LDOEN2 0x45
68#define RN5T618_LDODIS 0x46
69#define RN5T618_LDO1DAC 0x4c
70#define RN5T618_LDO2DAC 0x4d
71#define RN5T618_LDO3DAC 0x4e
72#define RN5T618_LDO4DAC 0x4f
73#define RN5T618_LDO5DAC 0x50
74#define RN5T618_LDORTCDAC 0x56
75#define RN5T618_LDORTC2DAC 0x57
76#define RN5T618_LDO1DAC_SLP 0x58
77#define RN5T618_LDO2DAC_SLP 0x59
78#define RN5T618_LDO3DAC_SLP 0x5a
79#define RN5T618_LDO4DAC_SLP 0x5b
80#define RN5T618_LDO5DAC_SLP 0x5c
81#define RN5T618_ADCCNT1 0x64
82#define RN5T618_ADCCNT2 0x65
83#define RN5T618_ADCCNT3 0x66
84#define RN5T618_ILIMDATAH 0x68
85#define RN5T618_ILIMDATAL 0x69
86#define RN5T618_VBATDATAH 0x6a
87#define RN5T618_VBATDATAL 0x6b
88#define RN5T618_VADPDATAH 0x6c
89#define RN5T618_VADPDATAL 0x6d
90#define RN5T618_VUSBDATAH 0x6e
91#define RN5T618_VUSBDATAL 0x6f
92#define RN5T618_VSYSDATAH 0x70
93#define RN5T618_VSYSDATAL 0x71
94#define RN5T618_VTHMDATAH 0x72
95#define RN5T618_VTHMDATAL 0x73
96#define RN5T618_AIN1DATAH 0x74
97#define RN5T618_AIN1DATAL 0x75
98#define RN5T618_AIN0DATAH 0x76
99#define RN5T618_AIN0DATAL 0x77
100#define RN5T618_ILIMTHL 0x78
101#define RN5T618_ILIMTHH 0x79
102#define RN5T618_VBATTHL 0x7a
103#define RN5T618_VBATTHH 0x7b
104#define RN5T618_VADPTHL 0x7c
105#define RN5T618_VADPTHH 0x7d
106#define RN5T618_VUSBTHL 0x7e
107#define RN5T618_VUSBTHH 0x7f
108#define RN5T618_VSYSTHL 0x80
109#define RN5T618_VSYSTHH 0x81
110#define RN5T618_VTHMTHL 0x82
111#define RN5T618_VTHMTHH 0x83
112#define RN5T618_AIN1THL 0x84
113#define RN5T618_AIN1THH 0x85
114#define RN5T618_AIN0THL 0x86
115#define RN5T618_AIN0THH 0x87
116#define RN5T618_EN_ADCIR1 0x88
117#define RN5T618_EN_ADCIR2 0x89
118#define RN5T618_EN_ADCIR3 0x8a
119#define RN5T618_IR_ADC1 0x8c
120#define RN5T618_IR_ADC2 0x8d
121#define RN5T618_IR_ADC3 0x8e
122#define RN5T618_IOSEL 0x90
123#define RN5T618_IOOUT 0x91
124#define RN5T618_GPEDGE1 0x92
125#define RN5T618_GPEDGE2 0x93
126#define RN5T618_EN_GPIR 0x94
127#define RN5T618_IR_GPR 0x95
128#define RN5T618_IR_GPF 0x96
129#define RN5T618_MON_IOIN 0x97
130#define RN5T618_GPLED_FUNC 0x98
131#define RN5T618_INTPOL 0x9c
132#define RN5T618_INTEN 0x9d
133#define RN5T618_INTMON 0x9e
134#define RN5T618_PREVINDAC 0xb0
135#define RN5T618_BATDAC 0xb1
136#define RN5T618_CHGCTL1 0xb3
137#define RN5T618_CHGCTL2 0xb4
138#define RN5T618_VSYSSET 0xb5
139#define RN5T618_REGISET1 0xb6
140#define RN5T618_REGISET2 0xb7
141#define RN5T618_CHGISET 0xb8
142#define RN5T618_TIMSET 0xb9
143#define RN5T618_BATSET1 0xba
144#define RN5T618_BATSET2 0xbb
145#define RN5T618_DIESET 0xbc
146#define RN5T618_CHGSTATE 0xbd
147#define RN5T618_CHGCTRL_IRFMASK 0xbe
148#define RN5T618_CHGSTAT_IRFMASK1 0xbf
149#define RN5T618_CHGSTAT_IRFMASK2 0xc0
150#define RN5T618_CHGERR_IRFMASK 0xc1
151#define RN5T618_CHGCTRL_IRR 0xc2
152#define RN5T618_CHGSTAT_IRR1 0xc3
153#define RN5T618_CHGSTAT_IRR2 0xc4
154#define RN5T618_CHGERR_IRR 0xc5
155#define RN5T618_CHGCTRL_MONI 0xc6
156#define RN5T618_CHGSTAT_MONI1 0xc7
157#define RN5T618_CHGSTAT_MONI2 0xc8
158#define RN5T618_CHGERR_MONI 0xc9
159#define RN5T618_CHGCTRL_DETMOD1 0xca
160#define RN5T618_CHGCTRL_DETMOD2 0xcb
161#define RN5T618_CHGSTAT_DETMOD1 0xcc
162#define RN5T618_CHGSTAT_DETMOD2 0xcd
163#define RN5T618_CHGSTAT_DETMOD3 0xce
164#define RN5T618_CHGERR_DETMOD1 0xcf
165#define RN5T618_CHGERR_DETMOD2 0xd0
166#define RN5T618_CHGOSCCTL 0xd4
167#define RN5T618_CHGOSCSCORESET1 0xd5
168#define RN5T618_CHGOSCSCORESET2 0xd6
169#define RN5T618_CHGOSCSCORESET3 0xd7
170#define RN5T618_CHGOSCFREQSET1 0xd8
171#define RN5T618_CHGOSCFREQSET2 0xd9
172#define RN5T618_CONTROL 0xe0
173#define RN5T618_SOC 0xe1
174#define RN5T618_RE_CAP_H 0xe2
175#define RN5T618_RE_CAP_L 0xe3
176#define RN5T618_FA_CAP_H 0xe4
177#define RN5T618_FA_CAP_L 0xe5
178#define RN5T618_AGE 0xe6
179#define RN5T618_TT_EMPTY_H 0xe7
180#define RN5T618_TT_EMPTY_L 0xe8
181#define RN5T618_TT_FULL_H 0xe9
182#define RN5T618_TT_FULL_L 0xea
183#define RN5T618_VOLTAGE_1 0xeb
184#define RN5T618_VOLTAGE_0 0xec
185#define RN5T618_TEMP_1 0xed
186#define RN5T618_TEMP_0 0xee
187#define RN5T618_CC_CTRL 0xef
188#define RN5T618_CC_COUNT2 0xf0
189#define RN5T618_CC_COUNT1 0xf1
190#define RN5T618_CC_COUNT0 0xf2
191#define RN5T618_CC_SUMREG3 0xf3
192#define RN5T618_CC_SUMREG2 0xf4
193#define RN5T618_CC_SUMREG1 0xf5
194#define RN5T618_CC_SUMREG0 0xf6
195#define RN5T618_CC_OFFREG1 0xf7
196#define RN5T618_CC_OFFREG0 0xf8
197#define RN5T618_CC_GAINREG1 0xf9
198#define RN5T618_CC_GAINREG0 0xfa
199#define RN5T618_CC_AVEREG1 0xfb
200#define RN5T618_CC_AVEREG0 0xfc
201#define RN5T618_MAX_REG 0xfc
202
203#define RN5T618_REPCNT_REPWRON BIT(0)
204#define RN5T618_SLPCNT_SWPWROFF BIT(0)
205#define RN5T618_WATCHDOG_WDOGEN BIT(2)
206#define RN5T618_WATCHDOG_WDOGTIM_M (BIT(0) | BIT(1))
207#define RN5T618_WATCHDOG_WDOGTIM_S 0
208#define RN5T618_PWRIRQ_IR_WDOG BIT(6)
209
210enum {
211 RN5T618_DCDC1,
212 RN5T618_DCDC2,
213 RN5T618_DCDC3,
214 RN5T618_LDO1,
215 RN5T618_LDO2,
216 RN5T618_LDO3,
217 RN5T618_LDO4,
218 RN5T618_LDO5,
219 RN5T618_LDORTC1,
220 RN5T618_LDORTC2,
221 RN5T618_REG_NUM,
222};
223
224struct rn5t618 {
225 struct regmap *regmap;
226};
227
228#endif /* __LINUX_MFD_RN5T618_H */
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/mfd/rtsx_pci.h
index 74346d5e7899..0c12628e91c6 100644
--- a/include/linux/mfd/rtsx_pci.h
+++ b/include/linux/mfd/rtsx_pci.h
@@ -558,6 +558,7 @@
558#define SD_SAMPLE_POINT_CTL 0xFDA7 558#define SD_SAMPLE_POINT_CTL 0xFDA7
559#define SD_PUSH_POINT_CTL 0xFDA8 559#define SD_PUSH_POINT_CTL 0xFDA8
560#define SD_CMD0 0xFDA9 560#define SD_CMD0 0xFDA9
561#define SD_CMD_START 0x40
561#define SD_CMD1 0xFDAA 562#define SD_CMD1 0xFDAA
562#define SD_CMD2 0xFDAB 563#define SD_CMD2 0xFDAB
563#define SD_CMD3 0xFDAC 564#define SD_CMD3 0xFDAC
@@ -707,6 +708,14 @@
707#define PM_CTRL1 0xFF44 708#define PM_CTRL1 0xFF44
708#define PM_CTRL2 0xFF45 709#define PM_CTRL2 0xFF45
709#define PM_CTRL3 0xFF46 710#define PM_CTRL3 0xFF46
711#define SDIO_SEND_PME_EN 0x80
712#define FORCE_RC_MODE_ON 0x40
713#define FORCE_RX50_LINK_ON 0x20
714#define D3_DELINK_MODE_EN 0x10
715#define USE_PESRTB_CTL_DELINK 0x08
716#define DELAY_PIN_WAKE 0x04
717#define RESET_PIN_WAKE 0x02
718#define PM_WAKE_EN 0x01
710#define PM_CTRL4 0xFF47 719#define PM_CTRL4 0xFF47
711 720
712/* Memory mapping */ 721/* Memory mapping */
@@ -752,6 +761,14 @@
752#define PHY_DUM_REG 0x1F 761#define PHY_DUM_REG 0x1F
753 762
754#define LCTLR 0x80 763#define LCTLR 0x80
764#define LCTLR_EXT_SYNC 0x80
765#define LCTLR_COMMON_CLOCK_CFG 0x40
766#define LCTLR_RETRAIN_LINK 0x20
767#define LCTLR_LINK_DISABLE 0x10
768#define LCTLR_RCB 0x08
769#define LCTLR_RESERVED 0x04
770#define LCTLR_ASPM_CTL_MASK 0x03
771
755#define PCR_SETTING_REG1 0x724 772#define PCR_SETTING_REG1 0x724
756#define PCR_SETTING_REG2 0x814 773#define PCR_SETTING_REG2 0x814
757#define PCR_SETTING_REG3 0x747 774#define PCR_SETTING_REG3 0x747
@@ -967,4 +984,24 @@ static inline u8 *rtsx_pci_get_cmd_data(struct rtsx_pcr *pcr)
967 return (u8 *)(pcr->host_cmds_ptr); 984 return (u8 *)(pcr->host_cmds_ptr);
968} 985}
969 986
987static inline int rtsx_pci_update_cfg_byte(struct rtsx_pcr *pcr, int addr,
988 u8 mask, u8 append)
989{
990 int err;
991 u8 val;
992
993 err = pci_read_config_byte(pcr->pci, addr, &val);
994 if (err < 0)
995 return err;
996 return pci_write_config_byte(pcr->pci, addr, (val & mask) | append);
997}
998
999static inline void rtsx_pci_write_be32(struct rtsx_pcr *pcr, u16 reg, u32 val)
1000{
1001 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg, 0xFF, val >> 24);
1002 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 1, 0xFF, val >> 16);
1003 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 2, 0xFF, val >> 8);
1004 rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, reg + 3, 0xFF, val);
1005}
1006
970#endif 1007#endif
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index b5f73de81aad..3fdb7cfbffb3 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -14,12 +14,35 @@
14#ifndef __LINUX_MFD_SEC_CORE_H 14#ifndef __LINUX_MFD_SEC_CORE_H
15#define __LINUX_MFD_SEC_CORE_H 15#define __LINUX_MFD_SEC_CORE_H
16 16
17/* Macros to represent minimum voltages for LDO/BUCK */
18#define MIN_3000_MV 3000000
19#define MIN_2500_MV 2500000
20#define MIN_2000_MV 2000000
21#define MIN_1800_MV 1800000
22#define MIN_1500_MV 1500000
23#define MIN_1400_MV 1400000
24#define MIN_1000_MV 1000000
25
26#define MIN_900_MV 900000
27#define MIN_850_MV 850000
28#define MIN_800_MV 800000
29#define MIN_750_MV 750000
30#define MIN_600_MV 600000
31#define MIN_500_MV 500000
32
33/* Macros to represent steps for LDO/BUCK */
34#define STEP_50_MV 50000
35#define STEP_25_MV 25000
36#define STEP_12_5_MV 12500
37#define STEP_6_25_MV 6250
38
17enum sec_device_type { 39enum sec_device_type {
18 S5M8751X, 40 S5M8751X,
19 S5M8763X, 41 S5M8763X,
20 S5M8767X, 42 S5M8767X,
21 S2MPA01, 43 S2MPA01,
22 S2MPS11X, 44 S2MPS11X,
45 S2MPS13X,
23 S2MPS14X, 46 S2MPS14X,
24 S2MPU02, 47 S2MPU02,
25}; 48};
diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h
index fbc63bc0d6a2..2766108bca2f 100644
--- a/include/linux/mfd/samsung/s2mpa01.h
+++ b/include/linux/mfd/samsung/s2mpa01.h
@@ -155,18 +155,6 @@ enum s2mpa01_regulators {
155 S2MPA01_REGULATOR_MAX, 155 S2MPA01_REGULATOR_MAX,
156}; 156};
157 157
158#define S2MPA01_BUCK_MIN1 600000
159#define S2MPA01_BUCK_MIN2 800000
160#define S2MPA01_BUCK_MIN3 1000000
161#define S2MPA01_BUCK_MIN4 1500000
162#define S2MPA01_LDO_MIN 800000
163
164#define S2MPA01_BUCK_STEP1 6250
165#define S2MPA01_BUCK_STEP2 12500
166
167#define S2MPA01_LDO_STEP1 50000
168#define S2MPA01_LDO_STEP2 25000
169
170#define S2MPA01_LDO_VSEL_MASK 0x3F 158#define S2MPA01_LDO_VSEL_MASK 0x3F
171#define S2MPA01_BUCK_VSEL_MASK 0xFF 159#define S2MPA01_BUCK_VSEL_MASK 0xFF
172#define S2MPA01_ENABLE_MASK (0x03 << S2MPA01_ENABLE_SHIFT) 160#define S2MPA01_ENABLE_MASK (0x03 << S2MPA01_ENABLE_SHIFT)
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index b3ddf98dec37..7981a9d77d3f 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -171,15 +171,6 @@ enum s2mps11_regulators {
171 S2MPS11_REGULATOR_MAX, 171 S2MPS11_REGULATOR_MAX,
172}; 172};
173 173
174#define S2MPS11_BUCK_MIN1 600000
175#define S2MPS11_BUCK_MIN2 750000
176#define S2MPS11_BUCK_MIN3 3000000
177#define S2MPS11_LDO_MIN 800000
178#define S2MPS11_BUCK_STEP1 6250
179#define S2MPS11_BUCK_STEP2 12500
180#define S2MPS11_BUCK_STEP3 25000
181#define S2MPS11_LDO_STEP1 50000
182#define S2MPS11_LDO_STEP2 25000
183#define S2MPS11_LDO_VSEL_MASK 0x3F 174#define S2MPS11_LDO_VSEL_MASK 0x3F
184#define S2MPS11_BUCK_VSEL_MASK 0xFF 175#define S2MPS11_BUCK_VSEL_MASK 0xFF
185#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT) 176#define S2MPS11_ENABLE_MASK (0x03 << S2MPS11_ENABLE_SHIFT)
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
new file mode 100644
index 000000000000..ce5dda8958fe
--- /dev/null
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -0,0 +1,186 @@
1/*
2 * s2mps13.h
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd
5 * http://www.samsung.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#ifndef __LINUX_MFD_S2MPS13_H
20#define __LINUX_MFD_S2MPS13_H
21
22/* S2MPS13 registers */
23enum s2mps13_reg {
24 S2MPS13_REG_ID,
25 S2MPS13_REG_INT1,
26 S2MPS13_REG_INT2,
27 S2MPS13_REG_INT3,
28 S2MPS13_REG_INT1M,
29 S2MPS13_REG_INT2M,
30 S2MPS13_REG_INT3M,
31 S2MPS13_REG_ST1,
32 S2MPS13_REG_ST2,
33 S2MPS13_REG_PWRONSRC,
34 S2MPS13_REG_OFFSRC,
35 S2MPS13_REG_BU_CHG,
36 S2MPS13_REG_RTCCTRL,
37 S2MPS13_REG_CTRL1,
38 S2MPS13_REG_CTRL2,
39 S2MPS13_REG_RSVD1,
40 S2MPS13_REG_RSVD2,
41 S2MPS13_REG_RSVD3,
42 S2MPS13_REG_RSVD4,
43 S2MPS13_REG_RSVD5,
44 S2MPS13_REG_RSVD6,
45 S2MPS13_REG_CTRL3,
46 S2MPS13_REG_RSVD7,
47 S2MPS13_REG_RSVD8,
48 S2MPS13_REG_WRSTBI,
49 S2MPS13_REG_B1CTRL,
50 S2MPS13_REG_B1OUT,
51 S2MPS13_REG_B2CTRL,
52 S2MPS13_REG_B2OUT,
53 S2MPS13_REG_B3CTRL,
54 S2MPS13_REG_B3OUT,
55 S2MPS13_REG_B4CTRL,
56 S2MPS13_REG_B4OUT,
57 S2MPS13_REG_B5CTRL,
58 S2MPS13_REG_B5OUT,
59 S2MPS13_REG_B6CTRL,
60 S2MPS13_REG_B6OUT,
61 S2MPS13_REG_B7CTRL,
62 S2MPS13_REG_B7OUT,
63 S2MPS13_REG_B8CTRL,
64 S2MPS13_REG_B8OUT,
65 S2MPS13_REG_B9CTRL,
66 S2MPS13_REG_B9OUT,
67 S2MPS13_REG_B10CTRL,
68 S2MPS13_REG_B10OUT,
69 S2MPS13_REG_BB1CTRL,
70 S2MPS13_REG_BB1OUT,
71 S2MPS13_REG_BUCK_RAMP1,
72 S2MPS13_REG_BUCK_RAMP2,
73 S2MPS13_REG_LDO_DVS1,
74 S2MPS13_REG_LDO_DVS2,
75 S2MPS13_REG_LDO_DVS3,
76 S2MPS13_REG_B6OUT2,
77 S2MPS13_REG_L1CTRL,
78 S2MPS13_REG_L2CTRL,
79 S2MPS13_REG_L3CTRL,
80 S2MPS13_REG_L4CTRL,
81 S2MPS13_REG_L5CTRL,
82 S2MPS13_REG_L6CTRL,
83 S2MPS13_REG_L7CTRL,
84 S2MPS13_REG_L8CTRL,
85 S2MPS13_REG_L9CTRL,
86 S2MPS13_REG_L10CTRL,
87 S2MPS13_REG_L11CTRL,
88 S2MPS13_REG_L12CTRL,
89 S2MPS13_REG_L13CTRL,
90 S2MPS13_REG_L14CTRL,
91 S2MPS13_REG_L15CTRL,
92 S2MPS13_REG_L16CTRL,
93 S2MPS13_REG_L17CTRL,
94 S2MPS13_REG_L18CTRL,
95 S2MPS13_REG_L19CTRL,
96 S2MPS13_REG_L20CTRL,
97 S2MPS13_REG_L21CTRL,
98 S2MPS13_REG_L22CTRL,
99 S2MPS13_REG_L23CTRL,
100 S2MPS13_REG_L24CTRL,
101 S2MPS13_REG_L25CTRL,
102 S2MPS13_REG_L26CTRL,
103 S2MPS13_REG_L27CTRL,
104 S2MPS13_REG_L28CTRL,
105 S2MPS13_REG_L30CTRL,
106 S2MPS13_REG_L31CTRL,
107 S2MPS13_REG_L32CTRL,
108 S2MPS13_REG_L33CTRL,
109 S2MPS13_REG_L34CTRL,
110 S2MPS13_REG_L35CTRL,
111 S2MPS13_REG_L36CTRL,
112 S2MPS13_REG_L37CTRL,
113 S2MPS13_REG_L38CTRL,
114 S2MPS13_REG_L39CTRL,
115 S2MPS13_REG_L40CTRL,
116 S2MPS13_REG_LDODSCH1,
117 S2MPS13_REG_LDODSCH2,
118 S2MPS13_REG_LDODSCH3,
119 S2MPS13_REG_LDODSCH4,
120 S2MPS13_REG_LDODSCH5,
121};
122
123/* regulator ids */
124enum s2mps13_regulators {
125 S2MPS13_LDO1,
126 S2MPS13_LDO2,
127 S2MPS13_LDO3,
128 S2MPS13_LDO4,
129 S2MPS13_LDO5,
130 S2MPS13_LDO6,
131 S2MPS13_LDO7,
132 S2MPS13_LDO8,
133 S2MPS13_LDO9,
134 S2MPS13_LDO10,
135 S2MPS13_LDO11,
136 S2MPS13_LDO12,
137 S2MPS13_LDO13,
138 S2MPS13_LDO14,
139 S2MPS13_LDO15,
140 S2MPS13_LDO16,
141 S2MPS13_LDO17,
142 S2MPS13_LDO18,
143 S2MPS13_LDO19,
144 S2MPS13_LDO20,
145 S2MPS13_LDO21,
146 S2MPS13_LDO22,
147 S2MPS13_LDO23,
148 S2MPS13_LDO24,
149 S2MPS13_LDO25,
150 S2MPS13_LDO26,
151 S2MPS13_LDO27,
152 S2MPS13_LDO28,
153 S2MPS13_LDO29,
154 S2MPS13_LDO30,
155 S2MPS13_LDO31,
156 S2MPS13_LDO32,
157 S2MPS13_LDO33,
158 S2MPS13_LDO34,
159 S2MPS13_LDO35,
160 S2MPS13_LDO36,
161 S2MPS13_LDO37,
162 S2MPS13_LDO38,
163 S2MPS13_LDO39,
164 S2MPS13_LDO40,
165 S2MPS13_BUCK1,
166 S2MPS13_BUCK2,
167 S2MPS13_BUCK3,
168 S2MPS13_BUCK4,
169 S2MPS13_BUCK5,
170 S2MPS13_BUCK6,
171 S2MPS13_BUCK7,
172 S2MPS13_BUCK8,
173 S2MPS13_BUCK9,
174 S2MPS13_BUCK10,
175
176 S2MPS13_REGULATOR_MAX,
177};
178
179/*
180 * Default ramp delay in uv/us. Datasheet says that ramp delay can be
181 * controlled however it does not specify which register is used for that.
182 * Let's assume that default value will be set.
183 */
184#define S2MPS13_BUCK_RAMP_DELAY 12500
185
186#endif /* __LINUX_MFD_S2MPS13_H */
diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h
index 900cd7a04314..c92f4782afb5 100644
--- a/include/linux/mfd/samsung/s2mps14.h
+++ b/include/linux/mfd/samsung/s2mps14.h
@@ -123,10 +123,6 @@ enum s2mps14_regulators {
123}; 123};
124 124
125/* Regulator constraints for BUCKx */ 125/* Regulator constraints for BUCKx */
126#define S2MPS14_BUCK1235_MIN_600MV 600000
127#define S2MPS14_BUCK4_MIN_1400MV 1400000
128#define S2MPS14_BUCK1235_STEP_6_25MV 6250
129#define S2MPS14_BUCK4_STEP_12_5MV 12500
130#define S2MPS14_BUCK1235_START_SEL 0x20 126#define S2MPS14_BUCK1235_START_SEL 0x20
131#define S2MPS14_BUCK4_START_SEL 0x40 127#define S2MPS14_BUCK4_START_SEL 0x40
132/* 128/*
@@ -136,12 +132,6 @@ enum s2mps14_regulators {
136 */ 132 */
137#define S2MPS14_BUCK_RAMP_DELAY 12500 133#define S2MPS14_BUCK_RAMP_DELAY 12500
138 134
139/* Regulator constraints for different types of LDOx */
140#define S2MPS14_LDO_MIN_800MV 800000
141#define S2MPS14_LDO_MIN_1800MV 1800000
142#define S2MPS14_LDO_STEP_12_5MV 12500
143#define S2MPS14_LDO_STEP_25MV 25000
144
145#define S2MPS14_LDO_VSEL_MASK 0x3F 135#define S2MPS14_LDO_VSEL_MASK 0x3F
146#define S2MPS14_BUCK_VSEL_MASK 0xFF 136#define S2MPS14_BUCK_VSEL_MASK 0xFF
147#define S2MPS14_ENABLE_MASK (0x03 << S2MPS14_ENABLE_SHIFT) 137#define S2MPS14_ENABLE_MASK (0x03 << S2MPS14_ENABLE_SHIFT)
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index ff44374a1a4e..c877cad61a13 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -395,4 +395,43 @@
395#define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17) 395#define IMX6SL_GPR1_FEC_CLOCK_MUX1_SEL_MASK (0x3 << 17)
396#define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14) 396#define IMX6SL_GPR1_FEC_CLOCK_MUX2_SEL_MASK (0x1 << 14)
397 397
398/* For imx6sx iomux gpr register field define */
399#define IMX6SX_GPR1_VDEC_SW_RST_MASK (0x1 << 20)
400#define IMX6SX_GPR1_VDEC_SW_RST_RESET (0x1 << 20)
401#define IMX6SX_GPR1_VDEC_SW_RST_RELEASE (0x0 << 20)
402#define IMX6SX_GPR1_VADC_SW_RST_MASK (0x1 << 19)
403#define IMX6SX_GPR1_VADC_SW_RST_RESET (0x1 << 19)
404#define IMX6SX_GPR1_VADC_SW_RST_RELEASE (0x0 << 19)
405#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_MASK (0x3 << 13)
406#define IMX6SX_GPR1_FEC_CLOCK_PAD_DIR_MASK (0x3 << 17)
407#define IMX6SX_GPR1_FEC_CLOCK_MUX_SEL_EXT (0x3 << 13)
408
409#define IMX6SX_GPR4_FEC_ENET1_STOP_REQ (0x1 << 3)
410#define IMX6SX_GPR4_FEC_ENET2_STOP_REQ (0x1 << 4)
411
412#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_MASK (0x1 << 3)
413#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF1 (0x0 << 3)
414#define IMX6SX_GPR5_DISP_MUX_LDB_CTRL_LCDIF2 (0x1 << 3)
415
416#define IMX6SX_GPR5_CSI2_MUX_CTRL_MASK (0x3 << 27)
417#define IMX6SX_GPR5_CSI2_MUX_CTRL_EXT_PIN (0x0 << 27)
418#define IMX6SX_GPR5_CSI2_MUX_CTRL_CVD (0x1 << 27)
419#define IMX6SX_GPR5_CSI2_MUX_CTRL_VDAC_TO_CSI (0x2 << 27)
420#define IMX6SX_GPR5_CSI2_MUX_CTRL_GND (0x3 << 27)
421#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_MASK (0x1 << 26)
422#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_ENABLE (0x1 << 26)
423#define IMX6SX_GPR5_VADC_TO_CSI_CAPTURE_EN_DISABLE (0x0 << 26)
424#define IMX6SX_GPR5_CSI1_MUX_CTRL_MASK (0x3 << 4)
425#define IMX6SX_GPR5_CSI1_MUX_CTRL_EXT_PIN (0x0 << 4)
426#define IMX6SX_GPR5_CSI1_MUX_CTRL_CVD (0x1 << 4)
427#define IMX6SX_GPR5_CSI1_MUX_CTRL_VDAC_TO_CSI (0x2 << 4)
428#define IMX6SX_GPR5_CSI1_MUX_CTRL_GND (0x3 << 4)
429
430#define IMX6SX_GPR5_DISP_MUX_DCIC2_LCDIF2 (0x0 << 2)
431#define IMX6SX_GPR5_DISP_MUX_DCIC2_LVDS (0x1 << 2)
432#define IMX6SX_GPR5_DISP_MUX_DCIC2_MASK (0x1 << 2)
433#define IMX6SX_GPR5_DISP_MUX_DCIC1_LCDIF1 (0x0 << 1)
434#define IMX6SX_GPR5_DISP_MUX_DCIC1_LVDS (0x1 << 1)
435#define IMX6SX_GPR5_DISP_MUX_DCIC1_MASK (0x1 << 1)
436
398#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */ 437#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/tc3589x.h b/include/linux/mfd/tc3589x.h
index e6088c2e2092..e1c12d84c26a 100644
--- a/include/linux/mfd/tc3589x.h
+++ b/include/linux/mfd/tc3589x.h
@@ -164,13 +164,10 @@ struct tc3589x_keypad_platform_data {
164 164
165/** 165/**
166 * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data 166 * struct tc3589x_gpio_platform_data - TC3589x GPIO platform data
167 * @gpio_base: first gpio number assigned to TC3589x. A maximum of
168 * %TC3589x_NR_GPIOS GPIOs will be allocated.
169 * @setup: callback for board-specific initialization 167 * @setup: callback for board-specific initialization
170 * @remove: callback for board-specific teardown 168 * @remove: callback for board-specific teardown
171 */ 169 */
172struct tc3589x_gpio_platform_data { 170struct tc3589x_gpio_platform_data {
173 int gpio_base;
174 void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base); 171 void (*setup)(struct tc3589x *tc3589x, unsigned gpio_base);
175 void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base); 172 void (*remove)(struct tc3589x *tc3589x, unsigned gpio_base);
176}; 173};
@@ -178,18 +175,13 @@ struct tc3589x_gpio_platform_data {
178/** 175/**
179 * struct tc3589x_platform_data - TC3589x platform data 176 * struct tc3589x_platform_data - TC3589x platform data
180 * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*) 177 * @block: bitmask of blocks to enable (use TC3589x_BLOCK_*)
181 * @irq_base: base IRQ number. %TC3589x_NR_IRQS irqs will be used.
182 * @gpio: GPIO-specific platform data 178 * @gpio: GPIO-specific platform data
183 * @keypad: keypad-specific platform data 179 * @keypad: keypad-specific platform data
184 */ 180 */
185struct tc3589x_platform_data { 181struct tc3589x_platform_data {
186 unsigned int block; 182 unsigned int block;
187 int irq_base;
188 struct tc3589x_gpio_platform_data *gpio; 183 struct tc3589x_gpio_platform_data *gpio;
189 const struct tc3589x_keypad_platform_data *keypad; 184 const struct tc3589x_keypad_platform_data *keypad;
190}; 185};
191 186
192#define TC3589x_NR_GPIOS 24
193#define TC3589x_NR_IRQS TC3589x_INT_GPIO(TC3589x_NR_GPIOS)
194
195#endif 187#endif
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index fb96c84dada5..e2e70053470e 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -155,6 +155,7 @@ struct ti_tscadc_dev {
155 void __iomem *tscadc_base; 155 void __iomem *tscadc_base;
156 int irq; 156 int irq;
157 int used_cells; /* 1-2 */ 157 int used_cells; /* 1-2 */
158 int tsc_wires;
158 int tsc_cell; /* -1 if not used */ 159 int tsc_cell; /* -1 if not used */
159 int adc_cell; /* -1 if not used */ 160 int adc_cell; /* -1 if not used */
160 struct mfd_cell cells[TSCADC_CELLS]; 161 struct mfd_cell cells[TSCADC_CELLS];
diff --git a/include/linux/mfd/ti_ssp.h b/include/linux/mfd/ti_ssp.h
deleted file mode 100644
index dbb4b43bd20e..000000000000
--- a/include/linux/mfd/ti_ssp.h
+++ /dev/null
@@ -1,93 +0,0 @@
1/*
2 * Sequencer Serial Port (SSP) driver for Texas Instruments' SoCs
3 *
4 * Copyright (C) 2010 Texas Instruments Inc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef __TI_SSP_H__
22#define __TI_SSP_H__
23
24struct ti_ssp_dev_data {
25 const char *dev_name;
26 void *pdata;
27 size_t pdata_size;
28};
29
30struct ti_ssp_data {
31 unsigned long out_clock;
32 struct ti_ssp_dev_data dev_data[2];
33};
34
35struct ti_ssp_spi_data {
36 unsigned long iosel;
37 int num_cs;
38 void (*select)(int cs);
39};
40
41/*
42 * Sequencer port IO pin configuration bits. These do not correlate 1-1 with
43 * the hardware. The iosel field in the port data combines iosel1 and iosel2,
44 * and is therefore not a direct map to register space. It is best to use the
45 * macros below to construct iosel values.
46 *
47 * least significant 16 bits --> iosel1
48 * most significant 16 bits --> iosel2
49 */
50
51#define SSP_IN 0x0000
52#define SSP_DATA 0x0001
53#define SSP_CLOCK 0x0002
54#define SSP_CHIPSEL 0x0003
55#define SSP_OUT 0x0004
56#define SSP_PIN_SEL(pin, v) ((v) << ((pin) * 3))
57#define SSP_PIN_MASK(pin) SSP_PIN_SEL(pin, 0x7)
58#define SSP_INPUT_SEL(pin) ((pin) << 16)
59
60/* Sequencer port config bits */
61#define SSP_EARLY_DIN BIT(8)
62#define SSP_DELAY_DOUT BIT(9)
63
64/* Sequence map definitions */
65#define SSP_CLK_HIGH BIT(0)
66#define SSP_CLK_LOW 0
67#define SSP_DATA_HIGH BIT(1)
68#define SSP_DATA_LOW 0
69#define SSP_CS_HIGH BIT(2)
70#define SSP_CS_LOW 0
71#define SSP_OUT_MODE BIT(3)
72#define SSP_IN_MODE 0
73#define SSP_DATA_REG BIT(4)
74#define SSP_ADDR_REG 0
75
76#define SSP_OPCODE_DIRECT ((0x0) << 5)
77#define SSP_OPCODE_TOGGLE ((0x1) << 5)
78#define SSP_OPCODE_SHIFT ((0x2) << 5)
79#define SSP_OPCODE_BRANCH0 ((0x4) << 5)
80#define SSP_OPCODE_BRANCH1 ((0x5) << 5)
81#define SSP_OPCODE_BRANCH ((0x6) << 5)
82#define SSP_OPCODE_STOP ((0x7) << 5)
83#define SSP_BRANCH(addr) ((addr) << 8)
84#define SSP_COUNT(cycles) ((cycles) << 8)
85
86int ti_ssp_raw_read(struct device *dev);
87int ti_ssp_raw_write(struct device *dev, u32 val);
88int ti_ssp_load(struct device *dev, int offs, u32* prog, int len);
89int ti_ssp_run(struct device *dev, u32 pc, u32 input, u32 *output);
90int ti_ssp_set_mode(struct device *dev, int mode);
91int ti_ssp_set_iosel(struct device *dev, u32 iosel);
92
93#endif /* __TI_SSP_H__ */
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 8f6f2e91e7ae..57388171610d 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -5,6 +5,7 @@
5#include <linux/fb.h> 5#include <linux/fb.h>
6#include <linux/io.h> 6#include <linux/io.h>
7#include <linux/jiffies.h> 7#include <linux/jiffies.h>
8#include <linux/mmc/card.h>
8#include <linux/platform_device.h> 9#include <linux/platform_device.h>
9#include <linux/pm_runtime.h> 10#include <linux/pm_runtime.h>
10 11
@@ -83,6 +84,27 @@
83 */ 84 */
84#define TMIO_MMC_HAVE_HIGH_REG (1 << 6) 85#define TMIO_MMC_HAVE_HIGH_REG (1 << 6)
85 86
87/*
88 * Some controllers have CMD12 automatically
89 * issue/non-issue register
90 */
91#define TMIO_MMC_HAVE_CMD12_CTRL (1 << 7)
92
93/*
94 * Some controllers needs to set 1 on SDIO status reserved bits
95 */
96#define TMIO_MMC_SDIO_STATUS_QUIRK (1 << 8)
97
98/*
99 * Some controllers have DMA enable/disable register
100 */
101#define TMIO_MMC_HAVE_CTL_DMA_REG (1 << 9)
102
103/*
104 * Some controllers allows to set SDx actual clock
105 */
106#define TMIO_MMC_CLK_ACTUAL (1 << 10)
107
86int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); 108int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
87int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); 109int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
88void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state); 110void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
@@ -96,6 +118,7 @@ struct tmio_mmc_dma {
96 int slave_id_tx; 118 int slave_id_tx;
97 int slave_id_rx; 119 int slave_id_rx;
98 int alignment_shift; 120 int alignment_shift;
121 dma_addr_t dma_rx_offset;
99 bool (*filter)(struct dma_chan *chan, void *arg); 122 bool (*filter)(struct dma_chan *chan, void *arg);
100}; 123};
101 124
@@ -120,6 +143,8 @@ struct tmio_mmc_data {
120 /* clock management callbacks */ 143 /* clock management callbacks */
121 int (*clk_enable)(struct platform_device *pdev, unsigned int *f); 144 int (*clk_enable)(struct platform_device *pdev, unsigned int *f);
122 void (*clk_disable)(struct platform_device *pdev); 145 void (*clk_disable)(struct platform_device *pdev);
146 int (*multi_io_quirk)(struct mmc_card *card,
147 unsigned int direction, int blk_size);
123}; 148};
124 149
125/* 150/*
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index 95d6938737fd..ac7fba44d7e4 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -60,6 +60,8 @@
60#define TPS65217_REG_SEQ5 0X1D 60#define TPS65217_REG_SEQ5 0X1D
61#define TPS65217_REG_SEQ6 0X1E 61#define TPS65217_REG_SEQ6 0X1E
62 62
63#define TPS65217_REG_MAX TPS65217_REG_SEQ6
64
63/* Register field definitions */ 65/* Register field definitions */
64#define TPS65217_CHIPID_CHIP_MASK 0xF0 66#define TPS65217_CHIPID_CHIP_MASK 0xF0
65#define TPS65217_CHIPID_REV_MASK 0x0F 67#define TPS65217_CHIPID_REV_MASK 0x0F
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index a2901c414664..fab9b32ace8e 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -13,18 +13,9 @@ typedef void free_page_t(struct page *page, unsigned long private);
13 * Return values from addresss_space_operations.migratepage(): 13 * Return values from addresss_space_operations.migratepage():
14 * - negative errno on page migration failure; 14 * - negative errno on page migration failure;
15 * - zero on page migration success; 15 * - zero on page migration success;
16 *
17 * The balloon page migration introduces this special case where a 'distinct'
18 * return code is used to flag a successful page migration to unmap_and_move().
19 * This approach is necessary because page migration can race against balloon
20 * deflation procedure, and for such case we could introduce a nasty page leak
21 * if a successfully migrated balloon page gets released concurrently with
22 * migration's unmap_and_move() wrap-up steps.
23 */ 16 */
24#define MIGRATEPAGE_SUCCESS 0 17#define MIGRATEPAGE_SUCCESS 0
25#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page 18
26 * sucessful migration case.
27 */
28enum migrate_reason { 19enum migrate_reason {
29 MR_COMPACTION, 20 MR_COMPACTION,
30 MR_MEMORY_FAILURE, 21 MR_MEMORY_FAILURE,
@@ -45,9 +36,6 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
45 36
46extern int migrate_prep(void); 37extern int migrate_prep(void);
47extern int migrate_prep_local(void); 38extern int migrate_prep_local(void);
48extern int migrate_vmas(struct mm_struct *mm,
49 const nodemask_t *from, const nodemask_t *to,
50 unsigned long flags);
51extern void migrate_page_copy(struct page *newpage, struct page *page); 39extern void migrate_page_copy(struct page *newpage, struct page *page);
52extern int migrate_huge_page_move_mapping(struct address_space *mapping, 40extern int migrate_huge_page_move_mapping(struct address_space *mapping,
53 struct page *newpage, struct page *page); 41 struct page *newpage, struct page *page);
@@ -66,13 +54,6 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
66static inline int migrate_prep(void) { return -ENOSYS; } 54static inline int migrate_prep(void) { return -ENOSYS; }
67static inline int migrate_prep_local(void) { return -ENOSYS; } 55static inline int migrate_prep_local(void) { return -ENOSYS; }
68 56
69static inline int migrate_vmas(struct mm_struct *mm,
70 const nodemask_t *from, const nodemask_t *to,
71 unsigned long flags)
72{
73 return -ENOSYS;
74}
75
76static inline void migrate_page_copy(struct page *newpage, 57static inline void migrate_page_copy(struct page *newpage,
77 struct page *page) {} 58 struct page *page) {}
78 59
@@ -82,9 +63,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
82 return -ENOSYS; 63 return -ENOSYS;
83} 64}
84 65
85/* Possible settings for the migrate_page() method in address_operations */
86#define migrate_page NULL
87
88#endif /* CONFIG_MIGRATION */ 66#endif /* CONFIG_MIGRATION */
89 67
90#ifdef CONFIG_NUMA_BALANCING 68#ifdef CONFIG_NUMA_BALANCING
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 379c02648ab3..64d25941b329 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -67,6 +67,8 @@ enum {
67 MLX4_CMD_MAP_ICM_AUX = 0xffc, 67 MLX4_CMD_MAP_ICM_AUX = 0xffc,
68 MLX4_CMD_UNMAP_ICM_AUX = 0xffb, 68 MLX4_CMD_UNMAP_ICM_AUX = 0xffb,
69 MLX4_CMD_SET_ICM_SIZE = 0xffd, 69 MLX4_CMD_SET_ICM_SIZE = 0xffd,
70 MLX4_CMD_ACCESS_REG = 0x3b,
71
70 /*master notify fw on finish for slave's flr*/ 72 /*master notify fw on finish for slave's flr*/
71 MLX4_CMD_INFORM_FLR_DONE = 0x5b, 73 MLX4_CMD_INFORM_FLR_DONE = 0x5b,
72 MLX4_CMD_GET_OP_REQ = 0x59, 74 MLX4_CMD_GET_OP_REQ = 0x59,
@@ -197,6 +199,33 @@ enum {
197 MLX4_CMD_NATIVE 199 MLX4_CMD_NATIVE
198}; 200};
199 201
202/*
203 * MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP -
204 * Receive checksum value is reported in CQE also for non TCP/UDP packets.
205 *
206 * MLX4_RX_CSUM_MODE_L4 -
207 * L4_CSUM bit in CQE, which indicates whether or not L4 checksum
208 * was validated correctly, is supported.
209 *
210 * MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP -
211 * IP_OK CQE's field is supported also for non TCP/UDP IP packets.
212 *
213 * MLX4_RX_CSUM_MODE_MULTI_VLAN -
214 * Receive Checksum offload is supported for packets with more than 2 vlan headers.
215 */
216enum mlx4_rx_csum_mode {
217 MLX4_RX_CSUM_MODE_VAL_NON_TCP_UDP = 1UL << 0,
218 MLX4_RX_CSUM_MODE_L4 = 1UL << 1,
219 MLX4_RX_CSUM_MODE_IP_OK_IP_NON_TCP_UDP = 1UL << 2,
220 MLX4_RX_CSUM_MODE_MULTI_VLAN = 1UL << 3
221};
222
223struct mlx4_config_dev_params {
224 u16 vxlan_udp_dport;
225 u8 rx_csum_flags_port_1;
226 u8 rx_csum_flags_port_2;
227};
228
200struct mlx4_dev; 229struct mlx4_dev;
201 230
202struct mlx4_cmd_mailbox { 231struct mlx4_cmd_mailbox {
@@ -248,6 +277,8 @@ int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
248int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting); 277int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
249int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf); 278int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf);
250int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state); 279int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state);
280int mlx4_config_dev_retrieval(struct mlx4_dev *dev,
281 struct mlx4_config_dev_params *params);
251/* 282/*
252 * mlx4_get_slave_default_vlan - 283 * mlx4_get_slave_default_vlan -
253 * return true if VST ( default vlan) 284 * return true if VST ( default vlan)
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index a5b7d7cfcedf..25c791e295fd 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -38,6 +38,7 @@
38#include <linux/completion.h> 38#include <linux/completion.h>
39#include <linux/radix-tree.h> 39#include <linux/radix-tree.h>
40#include <linux/cpu_rmap.h> 40#include <linux/cpu_rmap.h>
41#include <linux/crash_dump.h>
41 42
42#include <linux/atomic.h> 43#include <linux/atomic.h>
43 44
@@ -94,7 +95,7 @@ enum {
94 95
95enum { 96enum {
96 MLX4_MAX_NUM_PF = 16, 97 MLX4_MAX_NUM_PF = 16,
97 MLX4_MAX_NUM_VF = 64, 98 MLX4_MAX_NUM_VF = 126,
98 MLX4_MAX_NUM_VF_P_PORT = 64, 99 MLX4_MAX_NUM_VF_P_PORT = 64,
99 MLX4_MFUNC_MAX = 80, 100 MLX4_MFUNC_MAX = 80,
100 MLX4_MAX_EQ_NUM = 1024, 101 MLX4_MAX_EQ_NUM = 1024,
@@ -116,6 +117,14 @@ enum {
116 MLX4_STEERING_MODE_DEVICE_MANAGED 117 MLX4_STEERING_MODE_DEVICE_MANAGED
117}; 118};
118 119
120enum {
121 MLX4_STEERING_DMFS_A0_DEFAULT,
122 MLX4_STEERING_DMFS_A0_DYNAMIC,
123 MLX4_STEERING_DMFS_A0_STATIC,
124 MLX4_STEERING_DMFS_A0_DISABLE,
125 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED
126};
127
119static inline const char *mlx4_steering_mode_str(int steering_mode) 128static inline const char *mlx4_steering_mode_str(int steering_mode)
120{ 129{
121 switch (steering_mode) { 130 switch (steering_mode) {
@@ -184,19 +193,49 @@ enum {
184 MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9, 193 MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9,
185 MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10, 194 MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 10,
186 MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11, 195 MLX4_DEV_CAP_FLAG2_MAD_DEMUX = 1LL << 11,
196 MLX4_DEV_CAP_FLAG2_CQE_STRIDE = 1LL << 12,
197 MLX4_DEV_CAP_FLAG2_EQE_STRIDE = 1LL << 13,
198 MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL = 1LL << 14,
199 MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP = 1LL << 15,
200 MLX4_DEV_CAP_FLAG2_CONFIG_DEV = 1LL << 16,
201 MLX4_DEV_CAP_FLAG2_SYS_EQS = 1LL << 17,
202 MLX4_DEV_CAP_FLAG2_80_VFS = 1LL << 18,
203 MLX4_DEV_CAP_FLAG2_FS_A0 = 1LL << 19
204};
205
206enum {
207 MLX4_QUERY_FUNC_FLAGS_BF_RES_QP = 1LL << 0,
208 MLX4_QUERY_FUNC_FLAGS_A0_RES_QP = 1LL << 1
209};
210
211/* bit enums for an 8-bit flags field indicating special use
212 * QPs which require special handling in qp_reserve_range.
213 * Currently, this only includes QPs used by the ETH interface,
214 * where we expect to use blueflame. These QPs must not have
215 * bits 6 and 7 set in their qp number.
216 *
217 * This enum may use only bits 0..7.
218 */
219enum {
220 MLX4_RESERVE_A0_QP = 1 << 6,
221 MLX4_RESERVE_ETH_BF_QP = 1 << 7,
187}; 222};
188 223
189enum { 224enum {
190 MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0, 225 MLX4_DEV_CAP_64B_EQE_ENABLED = 1LL << 0,
191 MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1 226 MLX4_DEV_CAP_64B_CQE_ENABLED = 1LL << 1,
227 MLX4_DEV_CAP_CQE_STRIDE_ENABLED = 1LL << 2,
228 MLX4_DEV_CAP_EQE_STRIDE_ENABLED = 1LL << 3
192}; 229};
193 230
194enum { 231enum {
195 MLX4_USER_DEV_CAP_64B_CQE = 1L << 0 232 MLX4_USER_DEV_CAP_LARGE_CQE = 1L << 0
196}; 233};
197 234
198enum { 235enum {
199 MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0 236 MLX4_FUNC_CAP_64B_EQE_CQE = 1L << 0,
237 MLX4_FUNC_CAP_EQE_CQE_STRIDE = 1L << 1,
238 MLX4_FUNC_CAP_DMFS_A0_STATIC = 1L << 2
200}; 239};
201 240
202 241
@@ -322,6 +361,8 @@ enum {
322 361
323enum mlx4_qp_region { 362enum mlx4_qp_region {
324 MLX4_QP_REGION_FW = 0, 363 MLX4_QP_REGION_FW = 0,
364 MLX4_QP_REGION_RSS_RAW_ETH,
365 MLX4_QP_REGION_BOTTOM = MLX4_QP_REGION_RSS_RAW_ETH,
325 MLX4_QP_REGION_ETH_ADDR, 366 MLX4_QP_REGION_ETH_ADDR,
326 MLX4_QP_REGION_FC_ADDR, 367 MLX4_QP_REGION_FC_ADDR,
327 MLX4_QP_REGION_FC_EXCH, 368 MLX4_QP_REGION_FC_EXCH,
@@ -373,6 +414,13 @@ enum {
373#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ 414#define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \
374 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK) 415 MLX4_EQ_PORT_INFO_MSTR_SM_LID_CHANGE_MASK)
375 416
417enum mlx4_module_id {
418 MLX4_MODULE_ID_SFP = 0x3,
419 MLX4_MODULE_ID_QSFP = 0xC,
420 MLX4_MODULE_ID_QSFP_PLUS = 0xD,
421 MLX4_MODULE_ID_QSFP28 = 0x11,
422};
423
376static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) 424static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
377{ 425{
378 return (major << 32) | (minor << 16) | subminor; 426 return (major << 32) | (minor << 16) | subminor;
@@ -427,6 +475,7 @@ struct mlx4_caps {
427 int num_cqs; 475 int num_cqs;
428 int max_cqes; 476 int max_cqes;
429 int reserved_cqs; 477 int reserved_cqs;
478 int num_sys_eqs;
430 int num_eqs; 479 int num_eqs;
431 int reserved_eqs; 480 int reserved_eqs;
432 int num_comp_vectors; 481 int num_comp_vectors;
@@ -443,6 +492,7 @@ struct mlx4_caps {
443 int reserved_mcgs; 492 int reserved_mcgs;
444 int num_qp_per_mgm; 493 int num_qp_per_mgm;
445 int steering_mode; 494 int steering_mode;
495 int dmfs_high_steer_mode;
446 int fs_log_max_ucast_qp_range_size; 496 int fs_log_max_ucast_qp_range_size;
447 int num_pds; 497 int num_pds;
448 int reserved_pds; 498 int reserved_pds;
@@ -481,6 +531,10 @@ struct mlx4_caps {
481 u16 hca_core_clock; 531 u16 hca_core_clock;
482 u64 phys_port_id[MLX4_MAX_PORTS + 1]; 532 u64 phys_port_id[MLX4_MAX_PORTS + 1];
483 int tunnel_offload_mode; 533 int tunnel_offload_mode;
534 u8 rx_checksum_flags_port[MLX4_MAX_PORTS + 1];
535 u8 alloc_res_qp_mask;
536 u32 dmfs_high_rate_qpn_base;
537 u32 dmfs_high_rate_qpn_range;
484}; 538};
485 539
486struct mlx4_buf_list { 540struct mlx4_buf_list {
@@ -577,7 +631,7 @@ struct mlx4_uar {
577}; 631};
578 632
579struct mlx4_bf { 633struct mlx4_bf {
580 unsigned long offset; 634 unsigned int offset;
581 int buf_size; 635 int buf_size;
582 struct mlx4_uar *uar; 636 struct mlx4_uar *uar;
583 void __iomem *reg; 637 void __iomem *reg;
@@ -601,6 +655,11 @@ struct mlx4_cq {
601 655
602 atomic_t refcount; 656 atomic_t refcount;
603 struct completion free; 657 struct completion free;
658 struct {
659 struct list_head list;
660 void (*comp)(struct mlx4_cq *);
661 void *priv;
662 } tasklet_ctx;
604}; 663};
605 664
606struct mlx4_qp { 665struct mlx4_qp {
@@ -701,6 +760,7 @@ struct mlx4_dev {
701 u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; 760 u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
702 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; 761 u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
703 struct mlx4_vf_dev *dev_vfs; 762 struct mlx4_vf_dev *dev_vfs;
763 int nvfs[MLX4_MAX_PORTS + 1];
704}; 764};
705 765
706struct mlx4_eqe { 766struct mlx4_eqe {
@@ -792,6 +852,26 @@ struct mlx4_init_port_param {
792 u64 si_guid; 852 u64 si_guid;
793}; 853};
794 854
855#define MAD_IFC_DATA_SZ 192
856/* MAD IFC Mailbox */
857struct mlx4_mad_ifc {
858 u8 base_version;
859 u8 mgmt_class;
860 u8 class_version;
861 u8 method;
862 __be16 status;
863 __be16 class_specific;
864 __be64 tid;
865 __be16 attr_id;
866 __be16 resv;
867 __be32 attr_mod;
868 __be64 mkey;
869 __be16 dr_slid;
870 __be16 dr_dlid;
871 u8 reserved[28];
872 u8 data[MAD_IFC_DATA_SZ];
873} __packed;
874
795#define mlx4_foreach_port(port, dev, type) \ 875#define mlx4_foreach_port(port, dev, type) \
796 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \ 876 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
797 if ((type) == (dev)->caps.port_mask[(port)]) 877 if ((type) == (dev)->caps.port_mask[(port)])
@@ -828,7 +908,9 @@ static inline int mlx4_num_reserved_sqps(struct mlx4_dev *dev)
828static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) 908static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn)
829{ 909{
830 return (qpn < dev->phys_caps.base_sqpn + 8 + 910 return (qpn < dev->phys_caps.base_sqpn + 8 +
831 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev)); 911 16 * MLX4_MFUNC_MAX * !!mlx4_is_master(dev) &&
912 qpn >= dev->phys_caps.base_sqpn) ||
913 (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]);
832} 914}
833 915
834static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) 916static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn)
@@ -904,8 +986,8 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
904 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, 986 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
905 unsigned vector, int collapsed, int timestamp_en); 987 unsigned vector, int collapsed, int timestamp_en);
906void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); 988void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
907 989int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
908int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base); 990 int *base, u8 flags);
909void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt); 991void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
910 992
911int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, 993int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
@@ -1276,10 +1358,50 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
1276 u64 iova, u64 size, int npages, 1358 u64 iova, u64 size, int npages,
1277 int page_shift, struct mlx4_mpt_entry *mpt_entry); 1359 int page_shift, struct mlx4_mpt_entry *mpt_entry);
1278 1360
1361int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
1362 u16 offset, u16 size, u8 *data);
1363
1279/* Returns true if running in low memory profile (kdump kernel) */ 1364/* Returns true if running in low memory profile (kdump kernel) */
1280static inline bool mlx4_low_memory_profile(void) 1365static inline bool mlx4_low_memory_profile(void)
1281{ 1366{
1282 return reset_devices; 1367 return is_kdump_kernel();
1283} 1368}
1284 1369
1370/* ACCESS REG commands */
1371enum mlx4_access_reg_method {
1372 MLX4_ACCESS_REG_QUERY = 0x1,
1373 MLX4_ACCESS_REG_WRITE = 0x2,
1374};
1375
1376/* ACCESS PTYS Reg command */
1377enum mlx4_ptys_proto {
1378 MLX4_PTYS_IB = 1<<0,
1379 MLX4_PTYS_EN = 1<<2,
1380};
1381
1382struct mlx4_ptys_reg {
1383 u8 resrvd1;
1384 u8 local_port;
1385 u8 resrvd2;
1386 u8 proto_mask;
1387 __be32 resrvd3[2];
1388 __be32 eth_proto_cap;
1389 __be16 ib_width_cap;
1390 __be16 ib_speed_cap;
1391 __be32 resrvd4;
1392 __be32 eth_proto_admin;
1393 __be16 ib_width_admin;
1394 __be16 ib_speed_admin;
1395 __be32 resrvd5;
1396 __be32 eth_proto_oper;
1397 __be16 ib_width_oper;
1398 __be16 ib_speed_oper;
1399 __be32 resrvd6;
1400 __be32 eth_proto_lp_adv;
1401} __packed;
1402
1403int mlx4_ACCESS_PTYS_REG(struct mlx4_dev *dev,
1404 enum mlx4_access_reg_method method,
1405 struct mlx4_ptys_reg *ptys_reg);
1406
1285#endif /* MLX4_DEVICE_H */ 1407#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 5f4e36cf0091..467ccdf94c98 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -120,13 +120,15 @@ enum {
120 MLX4_RSS_QPC_FLAG_OFFSET = 13, 120 MLX4_RSS_QPC_FLAG_OFFSET = 13,
121}; 121};
122 122
123#define MLX4_EN_RSS_KEY_SIZE 40
124
123struct mlx4_rss_context { 125struct mlx4_rss_context {
124 __be32 base_qpn; 126 __be32 base_qpn;
125 __be32 default_qpn; 127 __be32 default_qpn;
126 u16 reserved; 128 u16 reserved;
127 u8 hash_fn; 129 u8 hash_fn;
128 u8 flags; 130 u8 flags;
129 __be32 rss_key[10]; 131 __be32 rss_key[MLX4_EN_RSS_KEY_SIZE / sizeof(__be32)];
130 __be32 base_qpn_udp; 132 __be32 base_qpn_udp;
131}; 133};
132 134
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 334947151dfc..4e5bd813bb9a 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -44,6 +44,50 @@
44#error Host endianness not defined 44#error Host endianness not defined
45#endif 45#endif
46 46
47/* helper macros */
48#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
49#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
50#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
51#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
52#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
53#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
54#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
55#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
56#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
57
58#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
59#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
60#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
61#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
62#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld))
63
64/* insert a value to a struct */
65#define MLX5_SET(typ, p, fld, v) do { \
66 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
67 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
68 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
69 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \
70 << __mlx5_dw_bit_off(typ, fld))); \
71} while (0)
72
73#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
74__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
75__mlx5_mask(typ, fld))
76
77#define MLX5_GET_PR(typ, p, fld) ({ \
78 u32 ___t = MLX5_GET(typ, p, fld); \
79 pr_debug(#fld " = 0x%x\n", ___t); \
80 ___t; \
81})
82
83#define MLX5_SET64(typ, p, fld, v) do { \
84 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
85 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
86 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
87} while (0)
88
89#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
90
47enum { 91enum {
48 MLX5_MAX_COMMANDS = 32, 92 MLX5_MAX_COMMANDS = 32,
49 MLX5_CMD_DATA_BLOCK_SIZE = 512, 93 MLX5_CMD_DATA_BLOCK_SIZE = 512,
@@ -71,6 +115,20 @@ enum {
71}; 115};
72 116
73enum { 117enum {
118 MLX5_MIN_PKEY_TABLE_SIZE = 128,
119 MLX5_MAX_LOG_PKEY_TABLE = 5,
120};
121
122enum {
123 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
124};
125
126enum {
127 MLX5_PFAULT_SUBTYPE_WQE = 0,
128 MLX5_PFAULT_SUBTYPE_RDMA = 1,
129};
130
131enum {
74 MLX5_PERM_LOCAL_READ = 1 << 2, 132 MLX5_PERM_LOCAL_READ = 1 << 2,
75 MLX5_PERM_LOCAL_WRITE = 1 << 3, 133 MLX5_PERM_LOCAL_WRITE = 1 << 3,
76 MLX5_PERM_REMOTE_READ = 1 << 4, 134 MLX5_PERM_REMOTE_READ = 1 << 4,
@@ -131,6 +189,19 @@ enum {
131 MLX5_MKEY_MASK_FREE = 1ull << 29, 189 MLX5_MKEY_MASK_FREE = 1ull << 29,
132}; 190};
133 191
192enum {
193 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
194
195 MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
196 MLX5_UMR_CHECK_FREE = (2 << 5),
197
198 MLX5_UMR_INLINE = (1 << 7),
199};
200
201#define MLX5_UMR_MTT_ALIGNMENT 0x40
202#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
203#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
204
134enum mlx5_event { 205enum mlx5_event {
135 MLX5_EVENT_TYPE_COMP = 0x0, 206 MLX5_EVENT_TYPE_COMP = 0x0,
136 207
@@ -157,6 +228,8 @@ enum mlx5_event {
157 228
158 MLX5_EVENT_TYPE_CMD = 0x0a, 229 MLX5_EVENT_TYPE_CMD = 0x0a,
159 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, 230 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
231
232 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
160}; 233};
161 234
162enum { 235enum {
@@ -170,11 +243,7 @@ enum {
170}; 243};
171 244
172enum { 245enum {
173 MLX5_DEV_CAP_FLAG_RC = 1LL << 0,
174 MLX5_DEV_CAP_FLAG_UC = 1LL << 1,
175 MLX5_DEV_CAP_FLAG_UD = 1LL << 2,
176 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, 246 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3,
177 MLX5_DEV_CAP_FLAG_SRQ = 1LL << 6,
178 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, 247 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
179 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, 248 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
180 MLX5_DEV_CAP_FLAG_APM = 1LL << 17, 249 MLX5_DEV_CAP_FLAG_APM = 1LL << 17,
@@ -183,11 +252,8 @@ enum {
183 MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, 252 MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24,
184 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, 253 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
185 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, 254 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
186 MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, 255 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
187 MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38,
188 MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
189 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, 256 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
190 MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
191 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, 257 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
192}; 258};
193 259
@@ -243,10 +309,16 @@ enum {
243}; 309};
244 310
245enum { 311enum {
246 MLX5_CAP_OFF_DCT = 41,
247 MLX5_CAP_OFF_CMDIF_CSUM = 46, 312 MLX5_CAP_OFF_CMDIF_CSUM = 46,
248}; 313};
249 314
315enum {
316 HCA_CAP_OPMOD_GET_MAX = 0,
317 HCA_CAP_OPMOD_GET_CUR = 1,
318 HCA_CAP_OPMOD_GET_ODP_MAX = 4,
319 HCA_CAP_OPMOD_GET_ODP_CUR = 5
320};
321
250struct mlx5_inbox_hdr { 322struct mlx5_inbox_hdr {
251 __be16 opcode; 323 __be16 opcode;
252 u8 rsvd[4]; 324 u8 rsvd[4];
@@ -274,101 +346,23 @@ struct mlx5_cmd_query_adapter_mbox_out {
274 u8 vsd_psid[16]; 346 u8 vsd_psid[16];
275}; 347};
276 348
277struct mlx5_hca_cap { 349enum mlx5_odp_transport_cap_bits {
278 u8 rsvd1[16]; 350 MLX5_ODP_SUPPORT_SEND = 1 << 31,
279 u8 log_max_srq_sz; 351 MLX5_ODP_SUPPORT_RECV = 1 << 30,
280 u8 log_max_qp_sz; 352 MLX5_ODP_SUPPORT_WRITE = 1 << 29,
281 u8 rsvd2; 353 MLX5_ODP_SUPPORT_READ = 1 << 28,
282 u8 log_max_qp;
283 u8 log_max_strq_sz;
284 u8 log_max_srqs;
285 u8 rsvd4[2];
286 u8 rsvd5;
287 u8 log_max_cq_sz;
288 u8 rsvd6;
289 u8 log_max_cq;
290 u8 log_max_eq_sz;
291 u8 log_max_mkey;
292 u8 rsvd7;
293 u8 log_max_eq;
294 u8 max_indirection;
295 u8 log_max_mrw_sz;
296 u8 log_max_bsf_list_sz;
297 u8 log_max_klm_list_sz;
298 u8 rsvd_8_0;
299 u8 log_max_ra_req_dc;
300 u8 rsvd_8_1;
301 u8 log_max_ra_res_dc;
302 u8 rsvd9;
303 u8 log_max_ra_req_qp;
304 u8 rsvd10;
305 u8 log_max_ra_res_qp;
306 u8 rsvd11[4];
307 __be16 max_qp_count;
308 __be16 rsvd12;
309 u8 rsvd13;
310 u8 local_ca_ack_delay;
311 u8 rsvd14;
312 u8 num_ports;
313 u8 log_max_msg;
314 u8 rsvd15[3];
315 __be16 stat_rate_support;
316 u8 rsvd16[2];
317 __be64 flags;
318 u8 rsvd17;
319 u8 uar_sz;
320 u8 rsvd18;
321 u8 log_pg_sz;
322 __be16 bf_log_bf_reg_size;
323 u8 rsvd19[4];
324 __be16 max_desc_sz_sq;
325 u8 rsvd20[2];
326 __be16 max_desc_sz_rq;
327 u8 rsvd21[2];
328 __be16 max_desc_sz_sq_dc;
329 __be32 max_qp_mcg;
330 u8 rsvd22[3];
331 u8 log_max_mcg;
332 u8 rsvd23;
333 u8 log_max_pd;
334 u8 rsvd24;
335 u8 log_max_xrcd;
336 u8 rsvd25[42];
337 __be16 log_uar_page_sz;
338 u8 rsvd26[28];
339 u8 log_max_atomic_size_qp;
340 u8 rsvd27[2];
341 u8 log_max_atomic_size_dc;
342 u8 rsvd28[76];
343};
344
345
346struct mlx5_cmd_query_hca_cap_mbox_in {
347 struct mlx5_inbox_hdr hdr;
348 u8 rsvd[8];
349};
350
351
352struct mlx5_cmd_query_hca_cap_mbox_out {
353 struct mlx5_outbox_hdr hdr;
354 u8 rsvd0[8];
355 struct mlx5_hca_cap hca_cap;
356}; 354};
357 355
358 356struct mlx5_odp_caps {
359struct mlx5_cmd_set_hca_cap_mbox_in { 357 char reserved[0x10];
360 struct mlx5_inbox_hdr hdr; 358 struct {
361 u8 rsvd[8]; 359 __be32 rc_odp_caps;
362 struct mlx5_hca_cap hca_cap; 360 __be32 uc_odp_caps;
361 __be32 ud_odp_caps;
362 } per_transport_caps;
363 char reserved2[0xe4];
363}; 364};
364 365
365
366struct mlx5_cmd_set_hca_cap_mbox_out {
367 struct mlx5_outbox_hdr hdr;
368 u8 rsvd0[8];
369};
370
371
372struct mlx5_cmd_init_hca_mbox_in { 366struct mlx5_cmd_init_hca_mbox_in {
373 struct mlx5_inbox_hdr hdr; 367 struct mlx5_inbox_hdr hdr;
374 u8 rsvd0[2]; 368 u8 rsvd0[2];
@@ -489,6 +483,27 @@ struct mlx5_eqe_page_req {
489 __be32 rsvd1[5]; 483 __be32 rsvd1[5];
490}; 484};
491 485
486struct mlx5_eqe_page_fault {
487 __be32 bytes_committed;
488 union {
489 struct {
490 u16 reserved1;
491 __be16 wqe_index;
492 u16 reserved2;
493 __be16 packet_length;
494 u8 reserved3[12];
495 } __packed wqe;
496 struct {
497 __be32 r_key;
498 u16 reserved1;
499 __be16 packet_length;
500 __be32 rdma_op_len;
501 __be64 rdma_va;
502 } __packed rdma;
503 } __packed;
504 __be32 flags_qpn;
505} __packed;
506
492union ev_data { 507union ev_data {
493 __be32 raw[7]; 508 __be32 raw[7];
494 struct mlx5_eqe_cmd cmd; 509 struct mlx5_eqe_cmd cmd;
@@ -500,6 +515,7 @@ union ev_data {
500 struct mlx5_eqe_congestion cong; 515 struct mlx5_eqe_congestion cong;
501 struct mlx5_eqe_stall_vl stall_vl; 516 struct mlx5_eqe_stall_vl stall_vl;
502 struct mlx5_eqe_page_req req_pages; 517 struct mlx5_eqe_page_req req_pages;
518 struct mlx5_eqe_page_fault page_fault;
503} __packed; 519} __packed;
504 520
505struct mlx5_eqe { 521struct mlx5_eqe {
@@ -826,6 +842,10 @@ struct mlx5_query_eq_mbox_out {
826 struct mlx5_eq_context ctx; 842 struct mlx5_eq_context ctx;
827}; 843};
828 844
845enum {
846 MLX5_MKEY_STATUS_FREE = 1 << 6,
847};
848
829struct mlx5_mkey_seg { 849struct mlx5_mkey_seg {
830 /* This is a two bit field occupying bits 31-30. 850 /* This is a two bit field occupying bits 31-30.
831 * bit 31 is always 0, 851 * bit 31 is always 0,
@@ -862,7 +882,7 @@ struct mlx5_query_special_ctxs_mbox_out {
862struct mlx5_create_mkey_mbox_in { 882struct mlx5_create_mkey_mbox_in {
863 struct mlx5_inbox_hdr hdr; 883 struct mlx5_inbox_hdr hdr;
864 __be32 input_mkey_index; 884 __be32 input_mkey_index;
865 u8 rsvd0[4]; 885 __be32 flags;
866 struct mlx5_mkey_seg seg; 886 struct mlx5_mkey_seg seg;
867 u8 rsvd1[16]; 887 u8 rsvd1[16];
868 __be32 xlat_oct_act_size; 888 __be32 xlat_oct_act_size;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index b88e9b46d957..166d9315fe4b 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -44,6 +44,7 @@
44 44
45#include <linux/mlx5/device.h> 45#include <linux/mlx5/device.h>
46#include <linux/mlx5/doorbell.h> 46#include <linux/mlx5/doorbell.h>
47#include <linux/mlx5/mlx5_ifc.h>
47 48
48enum { 49enum {
49 MLX5_BOARD_ID_LEN = 64, 50 MLX5_BOARD_ID_LEN = 64,
@@ -99,81 +100,6 @@ enum {
99}; 100};
100 101
101enum { 102enum {
102 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
103 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
104 MLX5_CMD_OP_INIT_HCA = 0x102,
105 MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
106 MLX5_CMD_OP_ENABLE_HCA = 0x104,
107 MLX5_CMD_OP_DISABLE_HCA = 0x105,
108 MLX5_CMD_OP_QUERY_PAGES = 0x107,
109 MLX5_CMD_OP_MANAGE_PAGES = 0x108,
110 MLX5_CMD_OP_SET_HCA_CAP = 0x109,
111
112 MLX5_CMD_OP_CREATE_MKEY = 0x200,
113 MLX5_CMD_OP_QUERY_MKEY = 0x201,
114 MLX5_CMD_OP_DESTROY_MKEY = 0x202,
115 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
116
117 MLX5_CMD_OP_CREATE_EQ = 0x301,
118 MLX5_CMD_OP_DESTROY_EQ = 0x302,
119 MLX5_CMD_OP_QUERY_EQ = 0x303,
120
121 MLX5_CMD_OP_CREATE_CQ = 0x400,
122 MLX5_CMD_OP_DESTROY_CQ = 0x401,
123 MLX5_CMD_OP_QUERY_CQ = 0x402,
124 MLX5_CMD_OP_MODIFY_CQ = 0x403,
125
126 MLX5_CMD_OP_CREATE_QP = 0x500,
127 MLX5_CMD_OP_DESTROY_QP = 0x501,
128 MLX5_CMD_OP_RST2INIT_QP = 0x502,
129 MLX5_CMD_OP_INIT2RTR_QP = 0x503,
130 MLX5_CMD_OP_RTR2RTS_QP = 0x504,
131 MLX5_CMD_OP_RTS2RTS_QP = 0x505,
132 MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
133 MLX5_CMD_OP_2ERR_QP = 0x507,
134 MLX5_CMD_OP_RTS2SQD_QP = 0x508,
135 MLX5_CMD_OP_SQD2RTS_QP = 0x509,
136 MLX5_CMD_OP_2RST_QP = 0x50a,
137 MLX5_CMD_OP_QUERY_QP = 0x50b,
138 MLX5_CMD_OP_CONF_SQP = 0x50c,
139 MLX5_CMD_OP_MAD_IFC = 0x50d,
140 MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
141 MLX5_CMD_OP_SUSPEND_QP = 0x50f,
142 MLX5_CMD_OP_UNSUSPEND_QP = 0x510,
143 MLX5_CMD_OP_SQD2SQD_QP = 0x511,
144 MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512,
145 MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513,
146 MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514,
147
148 MLX5_CMD_OP_CREATE_PSV = 0x600,
149 MLX5_CMD_OP_DESTROY_PSV = 0x601,
150 MLX5_CMD_OP_QUERY_PSV = 0x602,
151 MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603,
152 MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604,
153
154 MLX5_CMD_OP_CREATE_SRQ = 0x700,
155 MLX5_CMD_OP_DESTROY_SRQ = 0x701,
156 MLX5_CMD_OP_QUERY_SRQ = 0x702,
157 MLX5_CMD_OP_ARM_RQ = 0x703,
158 MLX5_CMD_OP_RESIZE_SRQ = 0x704,
159
160 MLX5_CMD_OP_ALLOC_PD = 0x800,
161 MLX5_CMD_OP_DEALLOC_PD = 0x801,
162 MLX5_CMD_OP_ALLOC_UAR = 0x802,
163 MLX5_CMD_OP_DEALLOC_UAR = 0x803,
164
165 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
166 MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
167
168
169 MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
170 MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
171
172 MLX5_CMD_OP_ACCESS_REG = 0x805,
173 MLX5_CMD_OP_MAX = 0x810,
174};
175
176enum {
177 MLX5_REG_PCAP = 0x5001, 103 MLX5_REG_PCAP = 0x5001,
178 MLX5_REG_PMTU = 0x5003, 104 MLX5_REG_PMTU = 0x5003,
179 MLX5_REG_PTYS = 0x5004, 105 MLX5_REG_PTYS = 0x5004,
@@ -187,6 +113,13 @@ enum {
187 MLX5_REG_HOST_ENDIANNESS = 0x7004, 113 MLX5_REG_HOST_ENDIANNESS = 0x7004,
188}; 114};
189 115
116enum mlx5_page_fault_resume_flags {
117 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
118 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
119 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
120 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
121};
122
190enum dbg_rsc_type { 123enum dbg_rsc_type {
191 MLX5_DBG_RSC_QP, 124 MLX5_DBG_RSC_QP,
192 MLX5_DBG_RSC_EQ, 125 MLX5_DBG_RSC_EQ,
@@ -335,23 +268,30 @@ struct mlx5_port_caps {
335 int pkey_table_len; 268 int pkey_table_len;
336}; 269};
337 270
338struct mlx5_caps { 271struct mlx5_general_caps {
339 u8 log_max_eq; 272 u8 log_max_eq;
340 u8 log_max_cq; 273 u8 log_max_cq;
341 u8 log_max_qp; 274 u8 log_max_qp;
342 u8 log_max_mkey; 275 u8 log_max_mkey;
343 u8 log_max_pd; 276 u8 log_max_pd;
344 u8 log_max_srq; 277 u8 log_max_srq;
278 u8 log_max_strq;
279 u8 log_max_mrw_sz;
280 u8 log_max_bsf_list_size;
281 u8 log_max_klm_list_size;
345 u32 max_cqes; 282 u32 max_cqes;
346 int max_wqes; 283 int max_wqes;
284 u32 max_eqes;
285 u32 max_indirection;
347 int max_sq_desc_sz; 286 int max_sq_desc_sz;
348 int max_rq_desc_sz; 287 int max_rq_desc_sz;
288 int max_dc_sq_desc_sz;
349 u64 flags; 289 u64 flags;
350 u16 stat_rate_support; 290 u16 stat_rate_support;
351 int log_max_msg; 291 int log_max_msg;
352 int num_ports; 292 int num_ports;
353 int max_ra_res_qp; 293 u8 log_max_ra_res_qp;
354 int max_ra_req_qp; 294 u8 log_max_ra_req_qp;
355 int max_srq_wqes; 295 int max_srq_wqes;
356 int bf_reg_size; 296 int bf_reg_size;
357 int bf_regs_per_page; 297 int bf_regs_per_page;
@@ -363,6 +303,19 @@ struct mlx5_caps {
363 u8 log_max_mcg; 303 u8 log_max_mcg;
364 u32 max_qp_mcg; 304 u32 max_qp_mcg;
365 int min_page_sz; 305 int min_page_sz;
306 int pd_cap;
307 u32 max_qp_counters;
308 u32 pkey_table_size;
309 u8 log_max_ra_req_dc;
310 u8 log_max_ra_res_dc;
311 u32 uar_sz;
312 u8 min_log_pg_sz;
313 u8 log_max_xrcd;
314 u16 log_uar_page_sz;
315};
316
317struct mlx5_caps {
318 struct mlx5_general_caps gen;
366}; 319};
367 320
368struct mlx5_cmd_mailbox { 321struct mlx5_cmd_mailbox {
@@ -429,6 +382,16 @@ struct mlx5_core_mr {
429 u32 pd; 382 u32 pd;
430}; 383};
431 384
385enum mlx5_res_type {
386 MLX5_RES_QP,
387};
388
389struct mlx5_core_rsc_common {
390 enum mlx5_res_type res;
391 atomic_t refcount;
392 struct completion free;
393};
394
432struct mlx5_core_srq { 395struct mlx5_core_srq {
433 u32 srqn; 396 u32 srqn;
434 int max; 397 int max;
@@ -511,7 +474,7 @@ struct mlx5_priv {
511 struct workqueue_struct *pg_wq; 474 struct workqueue_struct *pg_wq;
512 struct rb_root page_root; 475 struct rb_root page_root;
513 int fw_pages; 476 int fw_pages;
514 int reg_pages; 477 atomic_t reg_pages;
515 struct list_head free_list; 478 struct list_head free_list;
516 479
517 struct mlx5_core_health health; 480 struct mlx5_core_health health;
@@ -677,14 +640,6 @@ static inline void *mlx5_vzalloc(unsigned long size)
677 return rtn; 640 return rtn;
678} 641}
679 642
680static inline void mlx5_vfree(const void *addr)
681{
682 if (addr && is_vmalloc_addr(addr))
683 vfree(addr);
684 else
685 kfree(addr);
686}
687
688static inline u32 mlx5_base_mkey(const u32 key) 643static inline u32 mlx5_base_mkey(const u32 key)
689{ 644{
690 return key & 0xffffff00u; 645 return key & 0xffffff00u;
@@ -695,6 +650,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
695void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 650void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
696void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 651void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
697int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); 652int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
653int mlx5_cmd_status_to_err_v2(void *ptr);
654int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
655 u16 opmod);
698int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 656int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
699 int out_size); 657 int out_size);
700int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 658int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
@@ -751,7 +709,10 @@ int mlx5_eq_init(struct mlx5_core_dev *dev);
751void mlx5_eq_cleanup(struct mlx5_core_dev *dev); 709void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
752void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); 710void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas);
753void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); 711void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn);
754void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type); 712void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
713#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
714void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
715#endif
755void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); 716void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);
756struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); 717struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn);
757void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); 718void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector);
@@ -788,6 +749,9 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
788int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, 749int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
789 int npsvs, u32 *sig_index); 750 int npsvs, u32 *sig_index);
790int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); 751int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
752void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
753int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
754 struct mlx5_odp_caps *odp_caps);
791 755
792static inline u32 mlx5_mkey_to_idx(u32 mkey) 756static inline u32 mlx5_mkey_to_idx(u32 mkey)
793{ 757{
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
new file mode 100644
index 000000000000..5f48b8f592c5
--- /dev/null
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -0,0 +1,349 @@
1/*
2 * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_IFC_H
34#define MLX5_IFC_H
35
36enum {
37 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
38 MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
39 MLX5_CMD_OP_INIT_HCA = 0x102,
40 MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
41 MLX5_CMD_OP_ENABLE_HCA = 0x104,
42 MLX5_CMD_OP_DISABLE_HCA = 0x105,
43 MLX5_CMD_OP_QUERY_PAGES = 0x107,
44 MLX5_CMD_OP_MANAGE_PAGES = 0x108,
45 MLX5_CMD_OP_SET_HCA_CAP = 0x109,
46 MLX5_CMD_OP_CREATE_MKEY = 0x200,
47 MLX5_CMD_OP_QUERY_MKEY = 0x201,
48 MLX5_CMD_OP_DESTROY_MKEY = 0x202,
49 MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203,
50 MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204,
51 MLX5_CMD_OP_CREATE_EQ = 0x301,
52 MLX5_CMD_OP_DESTROY_EQ = 0x302,
53 MLX5_CMD_OP_QUERY_EQ = 0x303,
54 MLX5_CMD_OP_GEN_EQE = 0x304,
55 MLX5_CMD_OP_CREATE_CQ = 0x400,
56 MLX5_CMD_OP_DESTROY_CQ = 0x401,
57 MLX5_CMD_OP_QUERY_CQ = 0x402,
58 MLX5_CMD_OP_MODIFY_CQ = 0x403,
59 MLX5_CMD_OP_CREATE_QP = 0x500,
60 MLX5_CMD_OP_DESTROY_QP = 0x501,
61 MLX5_CMD_OP_RST2INIT_QP = 0x502,
62 MLX5_CMD_OP_INIT2RTR_QP = 0x503,
63 MLX5_CMD_OP_RTR2RTS_QP = 0x504,
64 MLX5_CMD_OP_RTS2RTS_QP = 0x505,
65 MLX5_CMD_OP_SQERR2RTS_QP = 0x506,
66 MLX5_CMD_OP_2ERR_QP = 0x507,
67 MLX5_CMD_OP_2RST_QP = 0x50a,
68 MLX5_CMD_OP_QUERY_QP = 0x50b,
69 MLX5_CMD_OP_INIT2INIT_QP = 0x50e,
70 MLX5_CMD_OP_CREATE_PSV = 0x600,
71 MLX5_CMD_OP_DESTROY_PSV = 0x601,
72 MLX5_CMD_OP_CREATE_SRQ = 0x700,
73 MLX5_CMD_OP_DESTROY_SRQ = 0x701,
74 MLX5_CMD_OP_QUERY_SRQ = 0x702,
75 MLX5_CMD_OP_ARM_RQ = 0x703,
76 MLX5_CMD_OP_RESIZE_SRQ = 0x704,
77 MLX5_CMD_OP_CREATE_DCT = 0x710,
78 MLX5_CMD_OP_DESTROY_DCT = 0x711,
79 MLX5_CMD_OP_DRAIN_DCT = 0x712,
80 MLX5_CMD_OP_QUERY_DCT = 0x713,
81 MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714,
82 MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
83 MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
84 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
85 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753,
86 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
87 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755,
88 MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760,
89 MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761,
90 MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770,
91 MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
92 MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
93 MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
94 MLX5_CMD_OP_ALLOC_PD = 0x800,
95 MLX5_CMD_OP_DEALLOC_PD = 0x801,
96 MLX5_CMD_OP_ALLOC_UAR = 0x802,
97 MLX5_CMD_OP_DEALLOC_UAR = 0x803,
98 MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804,
99 MLX5_CMD_OP_ACCESS_REG = 0x805,
100 MLX5_CMD_OP_ATTACH_TO_MCG = 0x806,
101 MLX5_CMD_OP_DETACH_FROM_MCG = 0x807,
102 MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a,
103 MLX5_CMD_OP_MAD_IFC = 0x50d,
104 MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b,
105 MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c,
106 MLX5_CMD_OP_NOP = 0x80d,
107 MLX5_CMD_OP_ALLOC_XRCD = 0x80e,
108 MLX5_CMD_OP_DEALLOC_XRCD = 0x80f,
109 MLX5_CMD_OP_SET_BURST_SIZE = 0x812,
110 MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813,
111 MLX5_CMD_OP_ACTIVATE_TRACER = 0x814,
112 MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815,
113 MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820,
114 MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821,
115 MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822,
116 MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823,
117 MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824,
118 MLX5_CMD_OP_CREATE_TIR = 0x900,
119 MLX5_CMD_OP_MODIFY_TIR = 0x901,
120 MLX5_CMD_OP_DESTROY_TIR = 0x902,
121 MLX5_CMD_OP_QUERY_TIR = 0x903,
122 MLX5_CMD_OP_CREATE_TIS = 0x912,
123 MLX5_CMD_OP_MODIFY_TIS = 0x913,
124 MLX5_CMD_OP_DESTROY_TIS = 0x914,
125 MLX5_CMD_OP_QUERY_TIS = 0x915,
126 MLX5_CMD_OP_CREATE_SQ = 0x904,
127 MLX5_CMD_OP_MODIFY_SQ = 0x905,
128 MLX5_CMD_OP_DESTROY_SQ = 0x906,
129 MLX5_CMD_OP_QUERY_SQ = 0x907,
130 MLX5_CMD_OP_CREATE_RQ = 0x908,
131 MLX5_CMD_OP_MODIFY_RQ = 0x909,
132 MLX5_CMD_OP_DESTROY_RQ = 0x90a,
133 MLX5_CMD_OP_QUERY_RQ = 0x90b,
134 MLX5_CMD_OP_CREATE_RMP = 0x90c,
135 MLX5_CMD_OP_MODIFY_RMP = 0x90d,
136 MLX5_CMD_OP_DESTROY_RMP = 0x90e,
137 MLX5_CMD_OP_QUERY_RMP = 0x90f,
138 MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910,
139 MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911,
140 MLX5_CMD_OP_MAX = 0x911
141};
142
143struct mlx5_ifc_cmd_hca_cap_bits {
144 u8 reserved_0[0x80];
145
146 u8 log_max_srq_sz[0x8];
147 u8 log_max_qp_sz[0x8];
148 u8 reserved_1[0xb];
149 u8 log_max_qp[0x5];
150
151 u8 log_max_strq_sz[0x8];
152 u8 reserved_2[0x3];
153 u8 log_max_srqs[0x5];
154 u8 reserved_3[0x10];
155
156 u8 reserved_4[0x8];
157 u8 log_max_cq_sz[0x8];
158 u8 reserved_5[0xb];
159 u8 log_max_cq[0x5];
160
161 u8 log_max_eq_sz[0x8];
162 u8 reserved_6[0x2];
163 u8 log_max_mkey[0x6];
164 u8 reserved_7[0xc];
165 u8 log_max_eq[0x4];
166
167 u8 max_indirection[0x8];
168 u8 reserved_8[0x1];
169 u8 log_max_mrw_sz[0x7];
170 u8 reserved_9[0x2];
171 u8 log_max_bsf_list_size[0x6];
172 u8 reserved_10[0x2];
173 u8 log_max_klm_list_size[0x6];
174
175 u8 reserved_11[0xa];
176 u8 log_max_ra_req_dc[0x6];
177 u8 reserved_12[0xa];
178 u8 log_max_ra_res_dc[0x6];
179
180 u8 reserved_13[0xa];
181 u8 log_max_ra_req_qp[0x6];
182 u8 reserved_14[0xa];
183 u8 log_max_ra_res_qp[0x6];
184
185 u8 pad_cap[0x1];
186 u8 cc_query_allowed[0x1];
187 u8 cc_modify_allowed[0x1];
188 u8 reserved_15[0x1d];
189
190 u8 reserved_16[0x6];
191 u8 max_qp_cnt[0xa];
192 u8 pkey_table_size[0x10];
193
194 u8 eswitch_owner[0x1];
195 u8 reserved_17[0xa];
196 u8 local_ca_ack_delay[0x5];
197 u8 reserved_18[0x8];
198 u8 num_ports[0x8];
199
200 u8 reserved_19[0x3];
201 u8 log_max_msg[0x5];
202 u8 reserved_20[0x18];
203
204 u8 stat_rate_support[0x10];
205 u8 reserved_21[0x10];
206
207 u8 reserved_22[0x10];
208 u8 cmdif_checksum[0x2];
209 u8 sigerr_cqe[0x1];
210 u8 reserved_23[0x1];
211 u8 wq_signature[0x1];
212 u8 sctr_data_cqe[0x1];
213 u8 reserved_24[0x1];
214 u8 sho[0x1];
215 u8 tph[0x1];
216 u8 rf[0x1];
217 u8 dc[0x1];
218 u8 reserved_25[0x2];
219 u8 roce[0x1];
220 u8 atomic[0x1];
221 u8 rsz_srq[0x1];
222
223 u8 cq_oi[0x1];
224 u8 cq_resize[0x1];
225 u8 cq_moderation[0x1];
226 u8 sniffer_rule_flow[0x1];
227 u8 sniffer_rule_vport[0x1];
228 u8 sniffer_rule_phy[0x1];
229 u8 reserved_26[0x1];
230 u8 pg[0x1];
231 u8 block_lb_mc[0x1];
232 u8 reserved_27[0x3];
233 u8 cd[0x1];
234 u8 reserved_28[0x1];
235 u8 apm[0x1];
236 u8 reserved_29[0x7];
237 u8 qkv[0x1];
238 u8 pkv[0x1];
239 u8 reserved_30[0x4];
240 u8 xrc[0x1];
241 u8 ud[0x1];
242 u8 uc[0x1];
243 u8 rc[0x1];
244
245 u8 reserved_31[0xa];
246 u8 uar_sz[0x6];
247 u8 reserved_32[0x8];
248 u8 log_pg_sz[0x8];
249
250 u8 bf[0x1];
251 u8 reserved_33[0xa];
252 u8 log_bf_reg_size[0x5];
253 u8 reserved_34[0x10];
254
255 u8 reserved_35[0x10];
256 u8 max_wqe_sz_sq[0x10];
257
258 u8 reserved_36[0x10];
259 u8 max_wqe_sz_rq[0x10];
260
261 u8 reserved_37[0x10];
262 u8 max_wqe_sz_sq_dc[0x10];
263
264 u8 reserved_38[0x7];
265 u8 max_qp_mcg[0x19];
266
267 u8 reserved_39[0x18];
268 u8 log_max_mcg[0x8];
269
270 u8 reserved_40[0xb];
271 u8 log_max_pd[0x5];
272 u8 reserved_41[0xb];
273 u8 log_max_xrcd[0x5];
274
275 u8 reserved_42[0x20];
276
277 u8 reserved_43[0x3];
278 u8 log_max_rq[0x5];
279 u8 reserved_44[0x3];
280 u8 log_max_sq[0x5];
281 u8 reserved_45[0x3];
282 u8 log_max_tir[0x5];
283 u8 reserved_46[0x3];
284 u8 log_max_tis[0x5];
285
286 u8 reserved_47[0x13];
287 u8 log_max_rq_per_tir[0x5];
288 u8 reserved_48[0x3];
289 u8 log_max_tis_per_sq[0x5];
290
291 u8 reserved_49[0xe0];
292
293 u8 reserved_50[0x10];
294 u8 log_uar_page_sz[0x10];
295
296 u8 reserved_51[0x100];
297
298 u8 reserved_52[0x1f];
299 u8 cqe_zip[0x1];
300
301 u8 cqe_zip_timeout[0x10];
302 u8 cqe_zip_max_num[0x10];
303
304 u8 reserved_53[0x220];
305};
306
307struct mlx5_ifc_set_hca_cap_in_bits {
308 u8 opcode[0x10];
309 u8 reserved_0[0x10];
310
311 u8 reserved_1[0x10];
312 u8 op_mod[0x10];
313
314 u8 reserved_2[0x40];
315
316 struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct;
317};
318
319struct mlx5_ifc_query_hca_cap_in_bits {
320 u8 opcode[0x10];
321 u8 reserved_0[0x10];
322
323 u8 reserved_1[0x10];
324 u8 op_mod[0x10];
325
326 u8 reserved_2[0x40];
327};
328
329struct mlx5_ifc_query_hca_cap_out_bits {
330 u8 status[0x8];
331 u8 reserved_0[0x18];
332
333 u8 syndrome[0x20];
334
335 u8 reserved_1[0x40];
336
337 u8 capability_struct[256][0x8];
338};
339
340struct mlx5_ifc_set_hca_cap_out_bits {
341 u8 status[0x8];
342 u8 reserved_0[0x18];
343
344 u8 syndrome[0x20];
345
346 u8 reserved_1[0x40];
347};
348
349#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 9709b30e2d69..61f7a342d1bf 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -40,6 +40,18 @@
40#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5) 40#define MLX5_SIG_WQE_SIZE (MLX5_SEND_WQE_BB * 5)
41#define MLX5_DIF_SIZE 8 41#define MLX5_DIF_SIZE 8
42#define MLX5_STRIDE_BLOCK_OP 0x400 42#define MLX5_STRIDE_BLOCK_OP 0x400
43#define MLX5_CPY_GRD_MASK 0xc0
44#define MLX5_CPY_APP_MASK 0x30
45#define MLX5_CPY_REF_MASK 0x0f
46#define MLX5_BSF_INC_REFTAG (1 << 6)
47#define MLX5_BSF_INL_VALID (1 << 15)
48#define MLX5_BSF_REFRESH_DIF (1 << 14)
49#define MLX5_BSF_REPEAT_BLOCK (1 << 7)
50#define MLX5_BSF_APPTAG_ESCAPE 0x1
51#define MLX5_BSF_APPREF_ESCAPE 0x2
52
53#define MLX5_QPN_BITS 24
54#define MLX5_QPN_MASK ((1 << MLX5_QPN_BITS) - 1)
43 55
44enum mlx5_qp_optpar { 56enum mlx5_qp_optpar {
45 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, 57 MLX5_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
@@ -180,6 +192,14 @@ struct mlx5_wqe_ctrl_seg {
180 __be32 imm; 192 __be32 imm;
181}; 193};
182 194
195#define MLX5_WQE_CTRL_DS_MASK 0x3f
196#define MLX5_WQE_CTRL_QPN_MASK 0xffffff00
197#define MLX5_WQE_CTRL_QPN_SHIFT 8
198#define MLX5_WQE_DS_UNITS 16
199#define MLX5_WQE_CTRL_OPCODE_MASK 0xff
200#define MLX5_WQE_CTRL_WQE_INDEX_MASK 0x00ffff00
201#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
202
183struct mlx5_wqe_xrc_seg { 203struct mlx5_wqe_xrc_seg {
184 __be32 xrc_srqn; 204 __be32 xrc_srqn;
185 u8 rsvd[12]; 205 u8 rsvd[12];
@@ -283,10 +303,28 @@ struct mlx5_wqe_signature_seg {
283 u8 rsvd1[11]; 303 u8 rsvd1[11];
284}; 304};
285 305
306#define MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK 0x3ff
307
286struct mlx5_wqe_inline_seg { 308struct mlx5_wqe_inline_seg {
287 __be32 byte_count; 309 __be32 byte_count;
288}; 310};
289 311
312enum mlx5_sig_type {
313 MLX5_DIF_CRC = 0x1,
314 MLX5_DIF_IPCS = 0x2,
315};
316
317struct mlx5_bsf_inl {
318 __be16 vld_refresh;
319 __be16 dif_apptag;
320 __be32 dif_reftag;
321 u8 sig_type;
322 u8 rp_inv_seed;
323 u8 rsvd[3];
324 u8 dif_inc_ref_guard_check;
325 __be16 dif_app_bitmask_check;
326};
327
290struct mlx5_bsf { 328struct mlx5_bsf {
291 struct mlx5_bsf_basic { 329 struct mlx5_bsf_basic {
292 u8 bsf_size_sbs; 330 u8 bsf_size_sbs;
@@ -310,14 +348,8 @@ struct mlx5_bsf {
310 __be32 w_tfs_psv; 348 __be32 w_tfs_psv;
311 __be32 m_tfs_psv; 349 __be32 m_tfs_psv;
312 } ext; 350 } ext;
313 struct mlx5_bsf_inl { 351 struct mlx5_bsf_inl w_inl;
314 __be32 w_inl_vld; 352 struct mlx5_bsf_inl m_inl;
315 __be32 w_rsvd;
316 __be64 w_block_format;
317 __be32 m_inl_vld;
318 __be32 m_rsvd;
319 __be64 m_block_format;
320 } inl;
321}; 353};
322 354
323struct mlx5_klm { 355struct mlx5_klm {
@@ -341,11 +373,47 @@ struct mlx5_stride_block_ctrl_seg {
341 __be16 num_entries; 373 __be16 num_entries;
342}; 374};
343 375
376enum mlx5_pagefault_flags {
377 MLX5_PFAULT_REQUESTOR = 1 << 0,
378 MLX5_PFAULT_WRITE = 1 << 1,
379 MLX5_PFAULT_RDMA = 1 << 2,
380};
381
382/* Contains the details of a pagefault. */
383struct mlx5_pagefault {
384 u32 bytes_committed;
385 u8 event_subtype;
386 enum mlx5_pagefault_flags flags;
387 union {
388 /* Initiator or send message responder pagefault details. */
389 struct {
390 /* Received packet size, only valid for responders. */
391 u32 packet_size;
392 /*
393 * WQE index. Refers to either the send queue or
394 * receive queue, according to event_subtype.
395 */
396 u16 wqe_index;
397 } wqe;
398 /* RDMA responder pagefault details */
399 struct {
400 u32 r_key;
401 /*
402 * Received packet size, minimal size page fault
403 * resolution required for forward progress.
404 */
405 u32 packet_size;
406 u32 rdma_op_len;
407 u64 rdma_va;
408 } rdma;
409 };
410};
411
344struct mlx5_core_qp { 412struct mlx5_core_qp {
413 struct mlx5_core_rsc_common common; /* must be first */
345 void (*event) (struct mlx5_core_qp *, int); 414 void (*event) (struct mlx5_core_qp *, int);
415 void (*pfault_handler)(struct mlx5_core_qp *, struct mlx5_pagefault *);
346 int qpn; 416 int qpn;
347 atomic_t refcount;
348 struct completion free;
349 struct mlx5_rsc_debug *dbg; 417 struct mlx5_rsc_debug *dbg;
350 int pid; 418 int pid;
351}; 419};
@@ -512,6 +580,17 @@ static inline struct mlx5_core_mr *__mlx5_mr_lookup(struct mlx5_core_dev *dev, u
512 return radix_tree_lookup(&dev->priv.mr_table.tree, key); 580 return radix_tree_lookup(&dev->priv.mr_table.tree, key);
513} 581}
514 582
583struct mlx5_page_fault_resume_mbox_in {
584 struct mlx5_inbox_hdr hdr;
585 __be32 flags_qpn;
586 u8 reserved[4];
587};
588
589struct mlx5_page_fault_resume_mbox_out {
590 struct mlx5_outbox_hdr hdr;
591 u8 rsvd[8];
592};
593
515int mlx5_core_create_qp(struct mlx5_core_dev *dev, 594int mlx5_core_create_qp(struct mlx5_core_dev *dev,
516 struct mlx5_core_qp *qp, 595 struct mlx5_core_qp *qp,
517 struct mlx5_create_qp_mbox_in *in, 596 struct mlx5_create_qp_mbox_in *in,
@@ -531,6 +610,10 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev);
531void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev); 610void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
532int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); 611int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
533void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); 612void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
613#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
614int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
615 u8 context, int error);
616#endif
534 617
535static inline const char *mlx5_qp_type_str(int type) 618static inline const char *mlx5_qp_type_str(int type)
536{ 619{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8981cc882ed2..80fc92a49649 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -18,6 +18,8 @@
18#include <linux/pfn.h> 18#include <linux/pfn.h>
19#include <linux/bit_spinlock.h> 19#include <linux/bit_spinlock.h>
20#include <linux/shrinker.h> 20#include <linux/shrinker.h>
21#include <linux/resource.h>
22#include <linux/page_ext.h>
21 23
22struct mempolicy; 24struct mempolicy;
23struct anon_vma; 25struct anon_vma;
@@ -55,6 +57,17 @@ extern int sysctl_legacy_va_layout;
55#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 57#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
56#endif 58#endif
57 59
60/*
61 * To prevent common memory management code establishing
62 * a zero page mapping on a read fault.
63 * This macro should be defined within <asm/pgtable.h>.
64 * s390 does this to prevent multiplexing of hardware bits
65 * related to the physical page in case of virtualization.
66 */
67#ifndef mm_forbids_zeropage
68#define mm_forbids_zeropage(X) (0)
69#endif
70
58extern unsigned long sysctl_user_reserve_kbytes; 71extern unsigned long sysctl_user_reserve_kbytes;
59extern unsigned long sysctl_admin_reserve_kbytes; 72extern unsigned long sysctl_admin_reserve_kbytes;
60 73
@@ -127,6 +140,7 @@ extern unsigned int kobjsize(const void *objp);
127#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ 140#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
128#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ 141#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
129#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ 142#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
143#define VM_ARCH_2 0x02000000
130#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */ 144#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
131 145
132#ifdef CONFIG_MEM_SOFT_DIRTY 146#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -154,6 +168,11 @@ extern unsigned int kobjsize(const void *objp);
154# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */ 168# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
155#endif 169#endif
156 170
171#if defined(CONFIG_X86)
172/* MPX specific bounds table or bounds directory */
173# define VM_MPX VM_ARCH_2
174#endif
175
157#ifndef VM_GROWSUP 176#ifndef VM_GROWSUP
158# define VM_GROWSUP VM_NONE 177# define VM_GROWSUP VM_NONE
159#endif 178#endif
@@ -267,8 +286,6 @@ struct vm_operations_struct {
267 */ 286 */
268 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 287 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
269 unsigned long addr); 288 unsigned long addr);
270 int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
271 const nodemask_t *to, unsigned long flags);
272#endif 289#endif
273 /* called by sys_remap_file_pages() to populate non-linear mapping */ 290 /* called by sys_remap_file_pages() to populate non-linear mapping */
274 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr, 291 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
@@ -346,6 +363,7 @@ static inline int put_page_unless_one(struct page *page)
346} 363}
347 364
348extern int page_is_ram(unsigned long pfn); 365extern int page_is_ram(unsigned long pfn);
366extern int region_is_ram(resource_size_t phys_addr, unsigned long size);
349 367
350/* Support for virtually mapped pages */ 368/* Support for virtually mapped pages */
351struct page *vmalloc_to_page(const void *addr); 369struct page *vmalloc_to_page(const void *addr);
@@ -553,6 +571,25 @@ static inline void __ClearPageBuddy(struct page *page)
553 atomic_set(&page->_mapcount, -1); 571 atomic_set(&page->_mapcount, -1);
554} 572}
555 573
574#define PAGE_BALLOON_MAPCOUNT_VALUE (-256)
575
576static inline int PageBalloon(struct page *page)
577{
578 return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE;
579}
580
581static inline void __SetPageBalloon(struct page *page)
582{
583 VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
584 atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE);
585}
586
587static inline void __ClearPageBalloon(struct page *page)
588{
589 VM_BUG_ON_PAGE(!PageBalloon(page), page);
590 atomic_set(&page->_mapcount, -1);
591}
592
556void put_page(struct page *page); 593void put_page(struct page *page);
557void put_pages_list(struct list_head *pages); 594void put_pages_list(struct list_head *pages);
558 595
@@ -1155,6 +1192,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
1155 1192
1156extern void truncate_pagecache(struct inode *inode, loff_t new); 1193extern void truncate_pagecache(struct inode *inode, loff_t new);
1157extern void truncate_setsize(struct inode *inode, loff_t newsize); 1194extern void truncate_setsize(struct inode *inode, loff_t newsize);
1195void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1158void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 1196void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1159int truncate_inode_page(struct address_space *mapping, struct page *page); 1197int truncate_inode_page(struct address_space *mapping, struct page *page);
1160int generic_error_remove_page(struct address_space *mapping, struct page *page); 1198int generic_error_remove_page(struct address_space *mapping, struct page *page);
@@ -1213,7 +1251,6 @@ int __set_page_dirty_no_writeback(struct page *page);
1213int redirty_page_for_writepage(struct writeback_control *wbc, 1251int redirty_page_for_writepage(struct writeback_control *wbc,
1214 struct page *page); 1252 struct page *page);
1215void account_page_dirtied(struct page *page, struct address_space *mapping); 1253void account_page_dirtied(struct page *page, struct address_space *mapping);
1216void account_page_writeback(struct page *page);
1217int set_page_dirty(struct page *page); 1254int set_page_dirty(struct page *page);
1218int set_page_dirty_lock(struct page *page); 1255int set_page_dirty_lock(struct page *page);
1219int clear_page_dirty_for_io(struct page *page); 1256int clear_page_dirty_for_io(struct page *page);
@@ -1247,8 +1284,8 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
1247 !vma_growsup(vma->vm_next, addr); 1284 !vma_growsup(vma->vm_next, addr);
1248} 1285}
1249 1286
1250extern pid_t 1287extern struct task_struct *task_of_stack(struct task_struct *task,
1251vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); 1288 struct vm_area_struct *vma, bool in_group);
1252 1289
1253extern unsigned long move_page_tables(struct vm_area_struct *vma, 1290extern unsigned long move_page_tables(struct vm_area_struct *vma,
1254 unsigned long old_addr, struct vm_area_struct *new_vma, 1291 unsigned long old_addr, struct vm_area_struct *new_vma,
@@ -1780,6 +1817,20 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1780 bool *need_rmap_locks); 1817 bool *need_rmap_locks);
1781extern void exit_mmap(struct mm_struct *); 1818extern void exit_mmap(struct mm_struct *);
1782 1819
1820static inline int check_data_rlimit(unsigned long rlim,
1821 unsigned long new,
1822 unsigned long start,
1823 unsigned long end_data,
1824 unsigned long start_data)
1825{
1826 if (rlim < RLIM_INFINITY) {
1827 if (((new - start) + (end_data - start_data)) > rlim)
1828 return -ENOSPC;
1829 }
1830
1831 return 0;
1832}
1833
1783extern int mm_take_all_locks(struct mm_struct *mm); 1834extern int mm_take_all_locks(struct mm_struct *mm);
1784extern void mm_drop_all_locks(struct mm_struct *mm); 1835extern void mm_drop_all_locks(struct mm_struct *mm);
1785 1836
@@ -1901,7 +1952,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
1901#if VM_GROWSUP 1952#if VM_GROWSUP
1902extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); 1953extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1903#else 1954#else
1904 #define expand_upwards(vma, address) do { } while (0) 1955 #define expand_upwards(vma, address) (0)
1905#endif 1956#endif
1906 1957
1907/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ 1958/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
@@ -1939,11 +1990,16 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1939 1990
1940#ifdef CONFIG_MMU 1991#ifdef CONFIG_MMU
1941pgprot_t vm_get_page_prot(unsigned long vm_flags); 1992pgprot_t vm_get_page_prot(unsigned long vm_flags);
1993void vma_set_page_prot(struct vm_area_struct *vma);
1942#else 1994#else
1943static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) 1995static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1944{ 1996{
1945 return __pgprot(0); 1997 return __pgprot(0);
1946} 1998}
1999static inline void vma_set_page_prot(struct vm_area_struct *vma)
2000{
2001 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2002}
1947#endif 2003#endif
1948 2004
1949#ifdef CONFIG_NUMA_BALANCING 2005#ifdef CONFIG_NUMA_BALANCING
@@ -1985,6 +2041,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
1985#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 2041#define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */
1986#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */ 2042#define FOLL_NUMA 0x200 /* force NUMA hinting page fault */
1987#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */ 2043#define FOLL_MIGRATION 0x400 /* wait for page to replace migration entry */
2044#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
1988 2045
1989typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 2046typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1990 void *data); 2047 void *data);
@@ -2002,7 +2059,22 @@ static inline void vm_stat_account(struct mm_struct *mm,
2002#endif /* CONFIG_PROC_FS */ 2059#endif /* CONFIG_PROC_FS */
2003 2060
2004#ifdef CONFIG_DEBUG_PAGEALLOC 2061#ifdef CONFIG_DEBUG_PAGEALLOC
2005extern void kernel_map_pages(struct page *page, int numpages, int enable); 2062extern bool _debug_pagealloc_enabled;
2063extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2064
2065static inline bool debug_pagealloc_enabled(void)
2066{
2067 return _debug_pagealloc_enabled;
2068}
2069
2070static inline void
2071kernel_map_pages(struct page *page, int numpages, int enable)
2072{
2073 if (!debug_pagealloc_enabled())
2074 return;
2075
2076 __kernel_map_pages(page, numpages, enable);
2077}
2006#ifdef CONFIG_HIBERNATION 2078#ifdef CONFIG_HIBERNATION
2007extern bool kernel_page_present(struct page *page); 2079extern bool kernel_page_present(struct page *page);
2008#endif /* CONFIG_HIBERNATION */ 2080#endif /* CONFIG_HIBERNATION */
@@ -2036,9 +2108,9 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
2036 void __user *, size_t *, loff_t *); 2108 void __user *, size_t *, loff_t *);
2037#endif 2109#endif
2038 2110
2039unsigned long shrink_slab(struct shrink_control *shrink, 2111unsigned long shrink_node_slabs(gfp_t gfp_mask, int nid,
2040 unsigned long nr_pages_scanned, 2112 unsigned long nr_scanned,
2041 unsigned long lru_pages); 2113 unsigned long nr_eligible);
2042 2114
2043#ifndef CONFIG_MMU 2115#ifndef CONFIG_MMU
2044#define randomize_va_space 0 2116#define randomize_va_space 0
@@ -2097,20 +2169,36 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
2097 unsigned int pages_per_huge_page); 2169 unsigned int pages_per_huge_page);
2098#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ 2170#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2099 2171
2172extern struct page_ext_operations debug_guardpage_ops;
2173extern struct page_ext_operations page_poisoning_ops;
2174
2100#ifdef CONFIG_DEBUG_PAGEALLOC 2175#ifdef CONFIG_DEBUG_PAGEALLOC
2101extern unsigned int _debug_guardpage_minorder; 2176extern unsigned int _debug_guardpage_minorder;
2177extern bool _debug_guardpage_enabled;
2102 2178
2103static inline unsigned int debug_guardpage_minorder(void) 2179static inline unsigned int debug_guardpage_minorder(void)
2104{ 2180{
2105 return _debug_guardpage_minorder; 2181 return _debug_guardpage_minorder;
2106} 2182}
2107 2183
2184static inline bool debug_guardpage_enabled(void)
2185{
2186 return _debug_guardpage_enabled;
2187}
2188
2108static inline bool page_is_guard(struct page *page) 2189static inline bool page_is_guard(struct page *page)
2109{ 2190{
2110 return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags); 2191 struct page_ext *page_ext;
2192
2193 if (!debug_guardpage_enabled())
2194 return false;
2195
2196 page_ext = lookup_page_ext(page);
2197 return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2111} 2198}
2112#else 2199#else
2113static inline unsigned int debug_guardpage_minorder(void) { return 0; } 2200static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2201static inline bool debug_guardpage_enabled(void) { return false; }
2114static inline bool page_is_guard(struct page *page) { return false; } 2202static inline bool page_is_guard(struct page *page) { return false; }
2115#endif /* CONFIG_DEBUG_PAGEALLOC */ 2203#endif /* CONFIG_DEBUG_PAGEALLOC */
2116 2204
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 6e0b286649f1..6d34aa266a8c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -10,7 +10,6 @@
10#include <linux/rwsem.h> 10#include <linux/rwsem.h>
11#include <linux/completion.h> 11#include <linux/completion.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/page-debug-flags.h>
14#include <linux/uprobes.h> 13#include <linux/uprobes.h>
15#include <linux/page-flags-layout.h> 14#include <linux/page-flags-layout.h>
16#include <asm/page.h> 15#include <asm/page.h>
@@ -22,6 +21,7 @@
22#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) 21#define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
23 22
24struct address_space; 23struct address_space;
24struct mem_cgroup;
25 25
26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) 26#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ 27#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
@@ -167,6 +167,10 @@ struct page {
167 struct page *first_page; /* Compound tail pages */ 167 struct page *first_page; /* Compound tail pages */
168 }; 168 };
169 169
170#ifdef CONFIG_MEMCG
171 struct mem_cgroup *mem_cgroup;
172#endif
173
170 /* 174 /*
171 * On machines where all RAM is mapped into kernel address space, 175 * On machines where all RAM is mapped into kernel address space,
172 * we can simply calculate the virtual address. On machines with 176 * we can simply calculate the virtual address. On machines with
@@ -181,9 +185,6 @@ struct page {
181 void *virtual; /* Kernel virtual address (NULL if 185 void *virtual; /* Kernel virtual address (NULL if
182 not kmapped, ie. highmem) */ 186 not kmapped, ie. highmem) */
183#endif /* WANT_PAGE_VIRTUAL */ 187#endif /* WANT_PAGE_VIRTUAL */
184#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
185 unsigned long debug_flags; /* Use atomic bitops on this */
186#endif
187 188
188#ifdef CONFIG_KMEMCHECK 189#ifdef CONFIG_KMEMCHECK
189 /* 190 /*
@@ -454,6 +455,10 @@ struct mm_struct {
454 bool tlb_flush_pending; 455 bool tlb_flush_pending;
455#endif 456#endif
456 struct uprobes_state uprobes_state; 457 struct uprobes_state uprobes_state;
458#ifdef CONFIG_X86_INTEL_MPX
459 /* address of the bounds directory */
460 void __user *bd_addr;
461#endif
457}; 462};
458 463
459static inline void mm_init_cpumask(struct mm_struct *mm) 464static inline void mm_init_cpumask(struct mm_struct *mm)
@@ -525,4 +530,12 @@ enum tlb_flush_reason {
525 NR_TLB_FLUSH_REASONS, 530 NR_TLB_FLUSH_REASONS,
526}; 531};
527 532
533 /*
534 * A swap entry has to fit into a "unsigned long", as the entry is hidden
535 * in the "index" field of the swapper address space.
536 */
537typedef struct {
538 unsigned long val;
539} swp_entry_t;
540
528#endif /* _LINUX_MM_TYPES_H */ 541#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index d424b9de3aff..4d69c00497bd 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -42,7 +42,8 @@ struct mmc_csd {
42 unsigned int read_partial:1, 42 unsigned int read_partial:1,
43 read_misalign:1, 43 read_misalign:1,
44 write_partial:1, 44 write_partial:1,
45 write_misalign:1; 45 write_misalign:1,
46 dsr_imp:1;
46}; 47};
47 48
48struct mmc_ext_csd { 49struct mmc_ext_csd {
@@ -74,7 +75,7 @@ struct mmc_ext_csd {
74 unsigned int sec_trim_mult; /* Secure trim multiplier */ 75 unsigned int sec_trim_mult; /* Secure trim multiplier */
75 unsigned int sec_erase_mult; /* Secure erase multiplier */ 76 unsigned int sec_erase_mult; /* Secure erase multiplier */
76 unsigned int trim_timeout; /* In milliseconds */ 77 unsigned int trim_timeout; /* In milliseconds */
77 bool enhanced_area_en; /* enable bit */ 78 bool partition_setting_completed; /* enable bit */
78 unsigned long long enhanced_area_offset; /* Units: Byte */ 79 unsigned long long enhanced_area_offset; /* Units: Byte */
79 unsigned int enhanced_area_size; /* Units: KB */ 80 unsigned int enhanced_area_size; /* Units: KB */
80 unsigned int cache_size; /* Units: KB */ 81 unsigned int cache_size; /* Units: KB */
@@ -87,6 +88,9 @@ struct mmc_ext_csd {
87 unsigned int data_tag_unit_size; /* DATA TAG UNIT size */ 88 unsigned int data_tag_unit_size; /* DATA TAG UNIT size */
88 unsigned int boot_ro_lock; /* ro lock support */ 89 unsigned int boot_ro_lock; /* ro lock support */
89 bool boot_ro_lockable; 90 bool boot_ro_lockable;
91 bool ffu_capable; /* Firmware upgrade support */
92#define MMC_FIRMWARE_LEN 8
93 u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */
90 u8 raw_exception_status; /* 54 */ 94 u8 raw_exception_status; /* 54 */
91 u8 raw_partition_support; /* 160 */ 95 u8 raw_partition_support; /* 160 */
92 u8 raw_rpmb_size_mult; /* 168 */ 96 u8 raw_rpmb_size_mult; /* 168 */
@@ -214,11 +218,12 @@ enum mmc_blk_status {
214}; 218};
215 219
216/* The number of MMC physical partitions. These consist of: 220/* The number of MMC physical partitions. These consist of:
217 * boot partitions (2), general purpose partitions (4) in MMC v4.4. 221 * boot partitions (2), general purpose partitions (4) and
222 * RPMB partition (1) in MMC v4.4.
218 */ 223 */
219#define MMC_NUM_BOOT_PARTITION 2 224#define MMC_NUM_BOOT_PARTITION 2
220#define MMC_NUM_GP_PARTITION 4 225#define MMC_NUM_GP_PARTITION 4
221#define MMC_NUM_PHY_PARTITION 6 226#define MMC_NUM_PHY_PARTITION 7
222#define MAX_MMC_PART_NAME_LEN 20 227#define MAX_MMC_PART_NAME_LEN 20
223 228
224/* 229/*
@@ -507,24 +512,8 @@ static inline int mmc_card_broken_irq_polling(const struct mmc_card *c)
507 512
508#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev) 513#define mmc_dev_to_card(d) container_of(d, struct mmc_card, dev)
509 514
510#define mmc_list_to_card(l) container_of(l, struct mmc_card, node) 515extern int mmc_register_driver(struct device_driver *);
511#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev) 516extern void mmc_unregister_driver(struct device_driver *);
512#define mmc_set_drvdata(c,d) dev_set_drvdata(&(c)->dev, d)
513
514/*
515 * MMC device driver (e.g., Flash card, I/O card...)
516 */
517struct mmc_driver {
518 struct device_driver drv;
519 int (*probe)(struct mmc_card *);
520 void (*remove)(struct mmc_card *);
521 int (*suspend)(struct mmc_card *);
522 int (*resume)(struct mmc_card *);
523 void (*shutdown)(struct mmc_card *);
524};
525
526extern int mmc_register_driver(struct mmc_driver *);
527extern void mmc_unregister_driver(struct mmc_driver *);
528 517
529extern void mmc_fixup_device(struct mmc_card *card, 518extern void mmc_fixup_device(struct mmc_card *card,
530 const struct mmc_fixup *table); 519 const struct mmc_fixup *table);
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index f206e29f94d7..cb2b0400d284 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -154,7 +154,8 @@ extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
154extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool, 154extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool,
155 bool, bool); 155 bool, bool);
156extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int); 156extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
157extern int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); 157extern int mmc_send_tuning(struct mmc_host *host);
158extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
158 159
159#define MMC_ERASE_ARG 0x00000000 160#define MMC_ERASE_ARG 0x00000000
160#define MMC_SECURE_ERASE_ARG 0x80000000 161#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 29ce014ab421..42b724e8d503 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -26,6 +26,8 @@ enum dw_mci_state {
26 STATE_DATA_BUSY, 26 STATE_DATA_BUSY,
27 STATE_SENDING_STOP, 27 STATE_SENDING_STOP,
28 STATE_DATA_ERROR, 28 STATE_DATA_ERROR,
29 STATE_SENDING_CMD11,
30 STATE_WAITING_CMD11_DONE,
29}; 31};
30 32
31enum { 33enum {
@@ -52,6 +54,7 @@ struct mmc_data;
52 * transfer is in progress. 54 * transfer is in progress.
53 * @use_dma: Whether DMA channel is initialized or not. 55 * @use_dma: Whether DMA channel is initialized or not.
54 * @using_dma: Whether DMA is in use for the current transfer. 56 * @using_dma: Whether DMA is in use for the current transfer.
57 * @dma_64bit_address: Whether DMA supports 64-bit address mode or not.
55 * @sg_dma: Bus address of DMA buffer. 58 * @sg_dma: Bus address of DMA buffer.
56 * @sg_cpu: Virtual address of DMA buffer. 59 * @sg_cpu: Virtual address of DMA buffer.
57 * @dma_ops: Pointer to platform-specific DMA callbacks. 60 * @dma_ops: Pointer to platform-specific DMA callbacks.
@@ -94,6 +97,7 @@ struct mmc_data;
94 * @quirks: Set of quirks that apply to specific versions of the IP. 97 * @quirks: Set of quirks that apply to specific versions of the IP.
95 * @irq_flags: The flags to be passed to request_irq. 98 * @irq_flags: The flags to be passed to request_irq.
96 * @irq: The irq value to be passed to request_irq. 99 * @irq: The irq value to be passed to request_irq.
100 * @sdio_id0: Number of slot0 in the SDIO interrupt registers.
97 * 101 *
98 * Locking 102 * Locking
99 * ======= 103 * =======
@@ -133,11 +137,11 @@ struct dw_mci {
133 struct mmc_command stop_abort; 137 struct mmc_command stop_abort;
134 unsigned int prev_blksz; 138 unsigned int prev_blksz;
135 unsigned char timing; 139 unsigned char timing;
136 struct workqueue_struct *card_workqueue;
137 140
138 /* DMA interface members*/ 141 /* DMA interface members*/
139 int use_dma; 142 int use_dma;
140 int using_dma; 143 int using_dma;
144 int dma_64bit_address;
141 145
142 dma_addr_t sg_dma; 146 dma_addr_t sg_dma;
143 void *sg_cpu; 147 void *sg_cpu;
@@ -152,7 +156,6 @@ struct dw_mci {
152 u32 stop_cmdr; 156 u32 stop_cmdr;
153 u32 dir_status; 157 u32 dir_status;
154 struct tasklet_struct tasklet; 158 struct tasklet_struct tasklet;
155 struct work_struct card_work;
156 unsigned long pending_events; 159 unsigned long pending_events;
157 unsigned long completed_events; 160 unsigned long completed_events;
158 enum dw_mci_state state; 161 enum dw_mci_state state;
@@ -188,9 +191,11 @@ struct dw_mci {
188 /* Workaround flags */ 191 /* Workaround flags */
189 u32 quirks; 192 u32 quirks;
190 193
191 struct regulator *vmmc; /* Power regulator */ 194 bool vqmmc_enabled;
192 unsigned long irq_flags; /* IRQ flags */ 195 unsigned long irq_flags; /* IRQ flags */
193 int irq; 196 int irq;
197
198 int sdio_id0;
194}; 199};
195 200
196/* DMA ops for Internal/External DMAC interface */ 201/* DMA ops for Internal/External DMAC interface */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 7960424d0bc0..9f322706f7cb 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -42,6 +42,7 @@ struct mmc_ios {
42#define MMC_POWER_OFF 0 42#define MMC_POWER_OFF 0
43#define MMC_POWER_UP 1 43#define MMC_POWER_UP 1
44#define MMC_POWER_ON 2 44#define MMC_POWER_ON 2
45#define MMC_POWER_UNDEFINED 3
45 46
46 unsigned char bus_width; /* data bus width */ 47 unsigned char bus_width; /* data bus width */
47 48
@@ -139,6 +140,13 @@ struct mmc_host_ops {
139 int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv); 140 int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
140 void (*hw_reset)(struct mmc_host *host); 141 void (*hw_reset)(struct mmc_host *host);
141 void (*card_event)(struct mmc_host *host); 142 void (*card_event)(struct mmc_host *host);
143
144 /*
145 * Optional callback to support controllers with HW issues for multiple
146 * I/O. Returns the number of supported blocks for the request.
147 */
148 int (*multi_io_quirk)(struct mmc_card *card,
149 unsigned int direction, int blk_size);
142}; 150};
143 151
144struct mmc_card; 152struct mmc_card;
@@ -265,7 +273,6 @@ struct mmc_host {
265 273
266#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */ 274#define MMC_CAP2_BOOTPART_NOACC (1 << 0) /* Boot partition no access */
267#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */ 275#define MMC_CAP2_FULL_PWR_CYCLE (1 << 2) /* Can do full power cycle */
268#define MMC_CAP2_NO_MULTI_READ (1 << 3) /* Multiblock reads don't work */
269#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */ 276#define MMC_CAP2_HS200_1_8V_SDR (1 << 5) /* can support */
270#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */ 277#define MMC_CAP2_HS200_1_2V_SDR (1 << 6) /* can support */
271#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \ 278#define MMC_CAP2_HS200 (MMC_CAP2_HS200_1_8V_SDR | \
@@ -282,6 +289,7 @@ struct mmc_host {
282#define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */ 289#define MMC_CAP2_HS400_1_2V (1 << 16) /* Can support HS400 1.2V */
283#define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \ 290#define MMC_CAP2_HS400 (MMC_CAP2_HS400_1_8V | \
284 MMC_CAP2_HS400_1_2V) 291 MMC_CAP2_HS400_1_2V)
292#define MMC_CAP2_HSX00_1_2V (MMC_CAP2_HS200_1_2V_SDR | MMC_CAP2_HS400_1_2V)
285#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17) 293#define MMC_CAP2_SDIO_IRQ_NOTHREAD (1 << 17)
286 294
287 mmc_pm_flag_t pm_caps; /* supported pm features */ 295 mmc_pm_flag_t pm_caps; /* supported pm features */
@@ -365,6 +373,9 @@ struct mmc_host {
365 373
366 unsigned int slotno; /* used for sdio acpi binding */ 374 unsigned int slotno; /* used for sdio acpi binding */
367 375
376 int dsr_req; /* DSR value is valid */
377 u32 dsr; /* optional driver stage (DSR) value */
378
368 unsigned long private[0] ____cacheline_aligned; 379 unsigned long private[0] ____cacheline_aligned;
369}; 380};
370 381
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 64ec963ed347..49ad7a943638 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -53,6 +53,11 @@
53#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */ 53#define MMC_SEND_TUNING_BLOCK 19 /* adtc R1 */
54#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */ 54#define MMC_SEND_TUNING_BLOCK_HS200 21 /* adtc R1 */
55 55
56#define MMC_TUNING_BLK_PATTERN_4BIT_SIZE 64
57#define MMC_TUNING_BLK_PATTERN_8BIT_SIZE 128
58extern const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE];
59extern const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE];
60
56 /* class 3 */ 61 /* class 3 */
57#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */ 62#define MMC_WRITE_DAT_UNTIL_STOP 20 /* adtc [31:0] data addr R1 */
58 63
@@ -281,6 +286,7 @@ struct _mmc_csd {
281#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */ 286#define EXT_CSD_EXP_EVENTS_CTRL 56 /* R/W, 2 bytes */
282#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */ 287#define EXT_CSD_DATA_SECTOR_SIZE 61 /* R */
283#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */ 288#define EXT_CSD_GP_SIZE_MULT 143 /* R/W */
289#define EXT_CSD_PARTITION_SETTING_COMPLETED 155 /* R/W */
284#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */ 290#define EXT_CSD_PARTITION_ATTRIBUTE 156 /* R/W */
285#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */ 291#define EXT_CSD_PARTITION_SUPPORT 160 /* RO */
286#define EXT_CSD_HPI_MGMT 161 /* R/W */ 292#define EXT_CSD_HPI_MGMT 161 /* R/W */
@@ -290,6 +296,7 @@ struct _mmc_csd {
290#define EXT_CSD_SANITIZE_START 165 /* W */ 296#define EXT_CSD_SANITIZE_START 165 /* W */
291#define EXT_CSD_WR_REL_PARAM 166 /* RO */ 297#define EXT_CSD_WR_REL_PARAM 166 /* RO */
292#define EXT_CSD_RPMB_MULT 168 /* RO */ 298#define EXT_CSD_RPMB_MULT 168 /* RO */
299#define EXT_CSD_FW_CONFIG 169 /* R/W */
293#define EXT_CSD_BOOT_WP 173 /* R/W */ 300#define EXT_CSD_BOOT_WP 173 /* R/W */
294#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ 301#define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */
295#define EXT_CSD_PART_CONFIG 179 /* R/W */ 302#define EXT_CSD_PART_CONFIG 179 /* R/W */
@@ -326,6 +333,8 @@ struct _mmc_csd {
326#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */ 333#define EXT_CSD_GENERIC_CMD6_TIME 248 /* RO */
327#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */ 334#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
328#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */ 335#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */
336#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */
337#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
329#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */ 338#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
330#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */ 339#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
331#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */ 340#define EXT_CSD_MAX_PACKED_WRITES 500 /* RO */
@@ -349,6 +358,7 @@ struct _mmc_csd {
349#define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3) 358#define EXT_CSD_PART_CONFIG_ACC_RPMB (0x3)
350#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4) 359#define EXT_CSD_PART_CONFIG_ACC_GP0 (0x4)
351 360
361#define EXT_CSD_PART_SETTING_COMPLETED (0x1)
352#define EXT_CSD_PART_SUPPORT_PART_EN (0x1) 362#define EXT_CSD_PART_SUPPORT_PART_EN (0x1)
353 363
354#define EXT_CSD_CMD_SET_NORMAL (1<<0) 364#define EXT_CSD_CMD_SET_NORMAL (1<<0)
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 09ebe57d5ce9..375af80bde7d 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -98,6 +98,14 @@ struct sdhci_host {
98#define SDHCI_QUIRK2_BROKEN_HS200 (1<<6) 98#define SDHCI_QUIRK2_BROKEN_HS200 (1<<6)
99/* Controller does not support DDR50 */ 99/* Controller does not support DDR50 */
100#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) 100#define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7)
101/* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */
102#define SDHCI_QUIRK2_STOP_WITH_TC (1<<8)
103/* Controller does not support 64-bit DMA */
104#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9)
105/* need clear transfer mode register before send cmd */
106#define SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD (1<<10)
107/* Capability register bit-63 indicates HS400 support */
108#define SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 (1<<11)
101 109
102 int irq; /* Device IRQ */ 110 int irq; /* Device IRQ */
103 void __iomem *ioaddr; /* Mapped address */ 111 void __iomem *ioaddr; /* Mapped address */
@@ -128,6 +136,7 @@ struct sdhci_host {
128#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ 136#define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */
129#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ 137#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
130#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ 138#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
139#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
131 140
132 unsigned int version; /* SDHCI spec. version */ 141 unsigned int version; /* SDHCI spec. version */
133 142
@@ -146,18 +155,26 @@ struct sdhci_host {
146 struct mmc_command *cmd; /* Current command */ 155 struct mmc_command *cmd; /* Current command */
147 struct mmc_data *data; /* Current data request */ 156 struct mmc_data *data; /* Current data request */
148 unsigned int data_early:1; /* Data finished before cmd */ 157 unsigned int data_early:1; /* Data finished before cmd */
158 unsigned int busy_handle:1; /* Handling the order of Busy-end */
149 159
150 struct sg_mapping_iter sg_miter; /* SG state for PIO */ 160 struct sg_mapping_iter sg_miter; /* SG state for PIO */
151 unsigned int blocks; /* remaining PIO blocks */ 161 unsigned int blocks; /* remaining PIO blocks */
152 162
153 int sg_count; /* Mapped sg entries */ 163 int sg_count; /* Mapped sg entries */
154 164
155 u8 *adma_desc; /* ADMA descriptor table */ 165 void *adma_table; /* ADMA descriptor table */
156 u8 *align_buffer; /* Bounce buffer */ 166 void *align_buffer; /* Bounce buffer */
167
168 size_t adma_table_sz; /* ADMA descriptor table size */
169 size_t align_buffer_sz; /* Bounce buffer size */
157 170
158 dma_addr_t adma_addr; /* Mapped ADMA descr. table */ 171 dma_addr_t adma_addr; /* Mapped ADMA descr. table */
159 dma_addr_t align_addr; /* Mapped bounce buffer */ 172 dma_addr_t align_addr; /* Mapped bounce buffer */
160 173
174 unsigned int desc_sz; /* ADMA descriptor size */
175 unsigned int align_sz; /* ADMA alignment */
176 unsigned int align_mask; /* ADMA alignment mask */
177
161 struct tasklet_struct finish_tasklet; /* Tasklet structures */ 178 struct tasklet_struct finish_tasklet; /* Tasklet structures */
162 179
163 struct timer_list timer; /* Timer for timeouts */ 180 struct timer_list timer; /* Timer for timeouts */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 50f0bc952328..aab032a6ae61 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -84,8 +84,6 @@ struct sdio_driver {
84 struct device_driver drv; 84 struct device_driver drv;
85}; 85};
86 86
87#define to_sdio_driver(d) container_of(d, struct sdio_driver, drv)
88
89/** 87/**
90 * SDIO_DEVICE - macro used to describe a specific SDIO device 88 * SDIO_DEVICE - macro used to describe a specific SDIO device
91 * @vend: the 16 bit manufacturer code 89 * @vend: the 16 bit manufacturer code
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index d2433381e828..e56fa24c9322 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -24,7 +24,10 @@ void mmc_gpio_free_cd(struct mmc_host *host);
24 24
25int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, 25int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
26 unsigned int idx, bool override_active_level, 26 unsigned int idx, bool override_active_level,
27 unsigned int debounce); 27 unsigned int debounce, bool *gpio_invert);
28int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
29 unsigned int idx, bool override_active_level,
30 unsigned int debounce, bool *gpio_invert);
28void mmc_gpiod_free_cd(struct mmc_host *host); 31void mmc_gpiod_free_cd(struct mmc_host *host);
29void mmc_gpiod_request_cd_irq(struct mmc_host *host); 32void mmc_gpiod_request_cd_irq(struct mmc_host *host);
30 33
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 2f348d02f640..877ef226f90f 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -4,10 +4,14 @@
4#include <linux/stringify.h> 4#include <linux/stringify.h>
5 5
6struct page; 6struct page;
7struct vm_area_struct;
8struct mm_struct;
7 9
8extern void dump_page(struct page *page, const char *reason); 10extern void dump_page(struct page *page, const char *reason);
9extern void dump_page_badflags(struct page *page, const char *reason, 11extern void dump_page_badflags(struct page *page, const char *reason,
10 unsigned long badflags); 12 unsigned long badflags);
13void dump_vma(const struct vm_area_struct *vma);
14void dump_mm(const struct mm_struct *mm);
11 15
12#ifdef CONFIG_DEBUG_VM 16#ifdef CONFIG_DEBUG_VM
13#define VM_BUG_ON(cond) BUG_ON(cond) 17#define VM_BUG_ON(cond) BUG_ON(cond)
@@ -18,12 +22,28 @@ extern void dump_page_badflags(struct page *page, const char *reason,
18 BUG(); \ 22 BUG(); \
19 } \ 23 } \
20 } while (0) 24 } while (0)
25#define VM_BUG_ON_VMA(cond, vma) \
26 do { \
27 if (unlikely(cond)) { \
28 dump_vma(vma); \
29 BUG(); \
30 } \
31 } while (0)
32#define VM_BUG_ON_MM(cond, mm) \
33 do { \
34 if (unlikely(cond)) { \
35 dump_mm(mm); \
36 BUG(); \
37 } \
38 } while (0)
21#define VM_WARN_ON(cond) WARN_ON(cond) 39#define VM_WARN_ON(cond) WARN_ON(cond)
22#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) 40#define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond)
23#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) 41#define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format)
24#else 42#else
25#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) 43#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
26#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) 44#define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond)
45#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond)
46#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
27#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) 47#define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
28#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) 48#define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
29#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) 49#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 27288692241e..95243d28a0ee 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -57,10 +57,13 @@ struct mmu_notifier_ops {
57 * pte. This way the VM will provide proper aging to the 57 * pte. This way the VM will provide proper aging to the
58 * accesses to the page through the secondary MMUs and not 58 * accesses to the page through the secondary MMUs and not
59 * only to the ones through the Linux pte. 59 * only to the ones through the Linux pte.
60 * Start-end is necessary in case the secondary MMU is mapping the page
61 * at a smaller granularity than the primary MMU.
60 */ 62 */
61 int (*clear_flush_young)(struct mmu_notifier *mn, 63 int (*clear_flush_young)(struct mmu_notifier *mn,
62 struct mm_struct *mm, 64 struct mm_struct *mm,
63 unsigned long address); 65 unsigned long start,
66 unsigned long end);
64 67
65 /* 68 /*
66 * test_young is called to check the young/accessed bitflag in 69 * test_young is called to check the young/accessed bitflag in
@@ -95,11 +98,11 @@ struct mmu_notifier_ops {
95 /* 98 /*
96 * invalidate_range_start() and invalidate_range_end() must be 99 * invalidate_range_start() and invalidate_range_end() must be
97 * paired and are called only when the mmap_sem and/or the 100 * paired and are called only when the mmap_sem and/or the
98 * locks protecting the reverse maps are held. The subsystem 101 * locks protecting the reverse maps are held. If the subsystem
99 * must guarantee that no additional references are taken to 102 * can't guarantee that no additional references are taken to
100 * the pages in the range established between the call to 103 * the pages in the range, it has to implement the
101 * invalidate_range_start() and the matching call to 104 * invalidate_range() notifier to remove any references taken
102 * invalidate_range_end(). 105 * after invalidate_range_start().
103 * 106 *
104 * Invalidation of multiple concurrent ranges may be 107 * Invalidation of multiple concurrent ranges may be
105 * optionally permitted by the driver. Either way the 108 * optionally permitted by the driver. Either way the
@@ -141,6 +144,29 @@ struct mmu_notifier_ops {
141 void (*invalidate_range_end)(struct mmu_notifier *mn, 144 void (*invalidate_range_end)(struct mmu_notifier *mn,
142 struct mm_struct *mm, 145 struct mm_struct *mm,
143 unsigned long start, unsigned long end); 146 unsigned long start, unsigned long end);
147
148 /*
149 * invalidate_range() is either called between
150 * invalidate_range_start() and invalidate_range_end() when the
151 * VM has to free pages that where unmapped, but before the
152 * pages are actually freed, or outside of _start()/_end() when
153 * a (remote) TLB is necessary.
154 *
155 * If invalidate_range() is used to manage a non-CPU TLB with
156 * shared page-tables, it not necessary to implement the
157 * invalidate_range_start()/end() notifiers, as
158 * invalidate_range() alread catches the points in time when an
159 * external TLB range needs to be flushed.
160 *
161 * The invalidate_range() function is called under the ptl
162 * spin-lock and not allowed to sleep.
163 *
164 * Note that this function might be called with just a sub-range
165 * of what was passed to invalidate_range_start()/end(), if
166 * called between those functions.
167 */
168 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
169 unsigned long start, unsigned long end);
144}; 170};
145 171
146/* 172/*
@@ -151,7 +177,7 @@ struct mmu_notifier_ops {
151 * Therefore notifier chains can only be traversed when either 177 * Therefore notifier chains can only be traversed when either
152 * 178 *
153 * 1. mmap_sem is held. 179 * 1. mmap_sem is held.
154 * 2. One of the reverse map locks is held (i_mmap_mutex or anon_vma->rwsem). 180 * 2. One of the reverse map locks is held (i_mmap_rwsem or anon_vma->rwsem).
155 * 3. No other concurrent thread can access the list (release) 181 * 3. No other concurrent thread can access the list (release)
156 */ 182 */
157struct mmu_notifier { 183struct mmu_notifier {
@@ -175,7 +201,8 @@ extern void mmu_notifier_unregister_no_release(struct mmu_notifier *mn,
175extern void __mmu_notifier_mm_destroy(struct mm_struct *mm); 201extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
176extern void __mmu_notifier_release(struct mm_struct *mm); 202extern void __mmu_notifier_release(struct mm_struct *mm);
177extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm, 203extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
178 unsigned long address); 204 unsigned long start,
205 unsigned long end);
179extern int __mmu_notifier_test_young(struct mm_struct *mm, 206extern int __mmu_notifier_test_young(struct mm_struct *mm,
180 unsigned long address); 207 unsigned long address);
181extern void __mmu_notifier_change_pte(struct mm_struct *mm, 208extern void __mmu_notifier_change_pte(struct mm_struct *mm,
@@ -186,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
186 unsigned long start, unsigned long end); 213 unsigned long start, unsigned long end);
187extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, 214extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
188 unsigned long start, unsigned long end); 215 unsigned long start, unsigned long end);
216extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
217 unsigned long start, unsigned long end);
189 218
190static inline void mmu_notifier_release(struct mm_struct *mm) 219static inline void mmu_notifier_release(struct mm_struct *mm)
191{ 220{
@@ -194,10 +223,11 @@ static inline void mmu_notifier_release(struct mm_struct *mm)
194} 223}
195 224
196static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, 225static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
197 unsigned long address) 226 unsigned long start,
227 unsigned long end)
198{ 228{
199 if (mm_has_notifiers(mm)) 229 if (mm_has_notifiers(mm))
200 return __mmu_notifier_clear_flush_young(mm, address); 230 return __mmu_notifier_clear_flush_young(mm, start, end);
201 return 0; 231 return 0;
202} 232}
203 233
@@ -237,6 +267,13 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
237 __mmu_notifier_invalidate_range_end(mm, start, end); 267 __mmu_notifier_invalidate_range_end(mm, start, end);
238} 268}
239 269
270static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
271 unsigned long start, unsigned long end)
272{
273 if (mm_has_notifiers(mm))
274 __mmu_notifier_invalidate_range(mm, start, end);
275}
276
240static inline void mmu_notifier_mm_init(struct mm_struct *mm) 277static inline void mmu_notifier_mm_init(struct mm_struct *mm)
241{ 278{
242 mm->mmu_notifier_mm = NULL; 279 mm->mmu_notifier_mm = NULL;
@@ -255,7 +292,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
255 unsigned long ___address = __address; \ 292 unsigned long ___address = __address; \
256 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \ 293 __young = ptep_clear_flush_young(___vma, ___address, __ptep); \
257 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ 294 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
258 ___address); \ 295 ___address, \
296 ___address + \
297 PAGE_SIZE); \
259 __young; \ 298 __young; \
260}) 299})
261 300
@@ -266,10 +305,50 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
266 unsigned long ___address = __address; \ 305 unsigned long ___address = __address; \
267 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \ 306 __young = pmdp_clear_flush_young(___vma, ___address, __pmdp); \
268 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \ 307 __young |= mmu_notifier_clear_flush_young(___vma->vm_mm, \
269 ___address); \ 308 ___address, \
309 ___address + \
310 PMD_SIZE); \
270 __young; \ 311 __young; \
271}) 312})
272 313
314#define ptep_clear_flush_notify(__vma, __address, __ptep) \
315({ \
316 unsigned long ___addr = __address & PAGE_MASK; \
317 struct mm_struct *___mm = (__vma)->vm_mm; \
318 pte_t ___pte; \
319 \
320 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
321 mmu_notifier_invalidate_range(___mm, ___addr, \
322 ___addr + PAGE_SIZE); \
323 \
324 ___pte; \
325})
326
327#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \
328({ \
329 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
330 struct mm_struct *___mm = (__vma)->vm_mm; \
331 pmd_t ___pmd; \
332 \
333 ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \
334 mmu_notifier_invalidate_range(___mm, ___haddr, \
335 ___haddr + HPAGE_PMD_SIZE); \
336 \
337 ___pmd; \
338})
339
340#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \
341({ \
342 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
343 pmd_t ___pmd; \
344 \
345 ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \
346 mmu_notifier_invalidate_range(__mm, ___haddr, \
347 ___haddr + HPAGE_PMD_SIZE); \
348 \
349 ___pmd; \
350})
351
273/* 352/*
274 * set_pte_at_notify() sets the pte _after_ running the notifier. 353 * set_pte_at_notify() sets the pte _after_ running the notifier.
275 * This is safe to start by updating the secondary MMUs, because the primary MMU 354 * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -301,7 +380,8 @@ static inline void mmu_notifier_release(struct mm_struct *mm)
301} 380}
302 381
303static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, 382static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm,
304 unsigned long address) 383 unsigned long start,
384 unsigned long end)
305{ 385{
306 return 0; 386 return 0;
307} 387}
@@ -332,6 +412,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
332{ 412{
333} 413}
334 414
415static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
416 unsigned long start, unsigned long end)
417{
418}
419
335static inline void mmu_notifier_mm_init(struct mm_struct *mm) 420static inline void mmu_notifier_mm_init(struct mm_struct *mm)
336{ 421{
337} 422}
@@ -342,6 +427,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
342 427
343#define ptep_clear_flush_young_notify ptep_clear_flush_young 428#define ptep_clear_flush_young_notify ptep_clear_flush_young
344#define pmdp_clear_flush_young_notify pmdp_clear_flush_young 429#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
430#define ptep_clear_flush_notify ptep_clear_flush
431#define pmdp_clear_flush_notify pmdp_clear_flush
432#define pmdp_get_and_clear_notify pmdp_get_and_clear
345#define set_pte_at_notify set_pte_at 433#define set_pte_at_notify set_pte_at
346 434
347#endif /* CONFIG_MMU_NOTIFIER */ 435#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 318df7051850..2f0856d14b21 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -431,6 +431,15 @@ struct zone {
431 */ 431 */
432 int nr_migrate_reserve_block; 432 int nr_migrate_reserve_block;
433 433
434#ifdef CONFIG_MEMORY_ISOLATION
435 /*
436 * Number of isolated pageblock. It is used to solve incorrect
437 * freepage counting problem due to racy retrieving migratetype
438 * of pageblock. Protected by zone->lock.
439 */
440 unsigned long nr_isolate_pageblock;
441#endif
442
434#ifdef CONFIG_MEMORY_HOTPLUG 443#ifdef CONFIG_MEMORY_HOTPLUG
435 /* see spanned/present_pages for more description */ 444 /* see spanned/present_pages for more description */
436 seqlock_t span_seqlock; 445 seqlock_t span_seqlock;
@@ -521,13 +530,13 @@ struct zone {
521 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 530 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
522} ____cacheline_internodealigned_in_smp; 531} ____cacheline_internodealigned_in_smp;
523 532
524typedef enum { 533enum zone_flags {
525 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 534 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
526 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ 535 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
527 ZONE_CONGESTED, /* zone has many dirty pages backed by 536 ZONE_CONGESTED, /* zone has many dirty pages backed by
528 * a congested BDI 537 * a congested BDI
529 */ 538 */
530 ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found 539 ZONE_DIRTY, /* reclaim scanning has recently found
531 * many dirty file pages at the tail 540 * many dirty file pages at the tail
532 * of the LRU. 541 * of the LRU.
533 */ 542 */
@@ -535,52 +544,7 @@ typedef enum {
535 * many pages under writeback 544 * many pages under writeback
536 */ 545 */
537 ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */ 546 ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */
538} zone_flags_t; 547};
539
540static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
541{
542 set_bit(flag, &zone->flags);
543}
544
545static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
546{
547 return test_and_set_bit(flag, &zone->flags);
548}
549
550static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
551{
552 clear_bit(flag, &zone->flags);
553}
554
555static inline int zone_is_reclaim_congested(const struct zone *zone)
556{
557 return test_bit(ZONE_CONGESTED, &zone->flags);
558}
559
560static inline int zone_is_reclaim_dirty(const struct zone *zone)
561{
562 return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags);
563}
564
565static inline int zone_is_reclaim_writeback(const struct zone *zone)
566{
567 return test_bit(ZONE_WRITEBACK, &zone->flags);
568}
569
570static inline int zone_is_reclaim_locked(const struct zone *zone)
571{
572 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
573}
574
575static inline int zone_is_fair_depleted(const struct zone *zone)
576{
577 return test_bit(ZONE_FAIR_DEPLETED, &zone->flags);
578}
579
580static inline int zone_is_oom_locked(const struct zone *zone)
581{
582 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
583}
584 548
585static inline unsigned long zone_end_pfn(const struct zone *zone) 549static inline unsigned long zone_end_pfn(const struct zone *zone)
586{ 550{
@@ -758,8 +722,8 @@ typedef struct pglist_data {
758 int nr_zones; 722 int nr_zones;
759#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ 723#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
760 struct page *node_mem_map; 724 struct page *node_mem_map;
761#ifdef CONFIG_MEMCG 725#ifdef CONFIG_PAGE_EXTENSION
762 struct page_cgroup *node_page_cgroup; 726 struct page_ext *node_page_ext;
763#endif 727#endif
764#endif 728#endif
765#ifndef CONFIG_NO_BOOTMEM 729#ifndef CONFIG_NO_BOOTMEM
@@ -1114,7 +1078,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
1114#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) 1078#define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK)
1115 1079
1116struct page; 1080struct page;
1117struct page_cgroup; 1081struct page_ext;
1118struct mem_section { 1082struct mem_section {
1119 /* 1083 /*
1120 * This is, logically, a pointer to an array of struct 1084 * This is, logically, a pointer to an array of struct
@@ -1132,12 +1096,12 @@ struct mem_section {
1132 1096
1133 /* See declaration of similar field in struct zone */ 1097 /* See declaration of similar field in struct zone */
1134 unsigned long *pageblock_flags; 1098 unsigned long *pageblock_flags;
1135#ifdef CONFIG_MEMCG 1099#ifdef CONFIG_PAGE_EXTENSION
1136 /* 1100 /*
1137 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use 1101 * If !SPARSEMEM, pgdat doesn't have page_ext pointer. We use
1138 * section. (see memcontrol.h/page_cgroup.h about this.) 1102 * section. (see page_ext.h about this.)
1139 */ 1103 */
1140 struct page_cgroup *page_cgroup; 1104 struct page_ext *page_ext;
1141 unsigned long pad; 1105 unsigned long pad;
1142#endif 1106#endif
1143 /* 1107 /*
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 44eeef0da186..745def862580 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -69,7 +69,7 @@ struct ieee1394_device_id {
69 * @bDeviceClass: Class of device; numbers are assigned 69 * @bDeviceClass: Class of device; numbers are assigned
70 * by the USB forum. Products may choose to implement classes, 70 * by the USB forum. Products may choose to implement classes,
71 * or be vendor-specific. Device classes specify behavior of all 71 * or be vendor-specific. Device classes specify behavior of all
72 * the interfaces on a devices. 72 * the interfaces on a device.
73 * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. 73 * @bDeviceSubClass: Subclass of device; associated with bDeviceClass.
74 * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. 74 * @bDeviceProtocol: Protocol of device; associated with bDeviceClass.
75 * @bInterfaceClass: Class of interface; numbers are assigned 75 * @bInterfaceClass: Class of interface; numbers are assigned
diff --git a/include/linux/module.h b/include/linux/module.h
index 71f282a4e307..ebfb0e153c6a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -210,20 +210,6 @@ enum module_state {
210 MODULE_STATE_UNFORMED, /* Still setting it up. */ 210 MODULE_STATE_UNFORMED, /* Still setting it up. */
211}; 211};
212 212
213/**
214 * struct module_ref - per cpu module reference counts
215 * @incs: number of module get on this cpu
216 * @decs: number of module put on this cpu
217 *
218 * We force an alignment on 8 or 16 bytes, so that alloc_percpu()
219 * put @incs/@decs in same cache line, with no extra memory cost,
220 * since alloc_percpu() is fine grained.
221 */
222struct module_ref {
223 unsigned long incs;
224 unsigned long decs;
225} __attribute((aligned(2 * sizeof(unsigned long))));
226
227struct module { 213struct module {
228 enum module_state state; 214 enum module_state state;
229 215
@@ -367,7 +353,7 @@ struct module {
367 /* Destruction function. */ 353 /* Destruction function. */
368 void (*exit)(void); 354 void (*exit)(void);
369 355
370 struct module_ref __percpu *refptr; 356 atomic_t refcnt;
371#endif 357#endif
372 358
373#ifdef CONFIG_CONSTRUCTORS 359#ifdef CONFIG_CONSTRUCTORS
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 494f99e852da..1c9effa25e26 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -42,7 +42,7 @@ struct kernel_param;
42 * NOARG - the parameter allows for no argument (foo instead of foo=1) 42 * NOARG - the parameter allows for no argument (foo instead of foo=1)
43 */ 43 */
44enum { 44enum {
45 KERNEL_PARAM_FL_NOARG = (1 << 0) 45 KERNEL_PARAM_OPS_FL_NOARG = (1 << 0)
46}; 46};
47 47
48struct kernel_param_ops { 48struct kernel_param_ops {
@@ -56,11 +56,21 @@ struct kernel_param_ops {
56 void (*free)(void *arg); 56 void (*free)(void *arg);
57}; 57};
58 58
59/*
60 * Flags available for kernel_param
61 *
62 * UNSAFE - the parameter is dangerous and setting it will taint the kernel
63 */
64enum {
65 KERNEL_PARAM_FL_UNSAFE = (1 << 0)
66};
67
59struct kernel_param { 68struct kernel_param {
60 const char *name; 69 const char *name;
61 const struct kernel_param_ops *ops; 70 const struct kernel_param_ops *ops;
62 u16 perm; 71 u16 perm;
63 s16 level; 72 s8 level;
73 u8 flags;
64 union { 74 union {
65 void *arg; 75 void *arg;
66 const struct kparam_string *str; 76 const struct kparam_string *str;
@@ -68,6 +78,8 @@ struct kernel_param {
68 }; 78 };
69}; 79};
70 80
81extern const struct kernel_param __start___param[], __stop___param[];
82
71/* Special one for strings we want to copy into */ 83/* Special one for strings we want to copy into */
72struct kparam_string { 84struct kparam_string {
73 unsigned int maxlen; 85 unsigned int maxlen;
@@ -113,6 +125,12 @@ struct kparam_array
113 module_param_named(name, name, type, perm) 125 module_param_named(name, name, type, perm)
114 126
115/** 127/**
128 * module_param_unsafe - same as module_param but taints kernel
129 */
130#define module_param_unsafe(name, type, perm) \
131 module_param_named_unsafe(name, name, type, perm)
132
133/**
116 * module_param_named - typesafe helper for a renamed module/cmdline parameter 134 * module_param_named - typesafe helper for a renamed module/cmdline parameter
117 * @name: a valid C identifier which is the parameter name. 135 * @name: a valid C identifier which is the parameter name.
118 * @value: the actual lvalue to alter. 136 * @value: the actual lvalue to alter.
@@ -129,6 +147,14 @@ struct kparam_array
129 __MODULE_PARM_TYPE(name, #type) 147 __MODULE_PARM_TYPE(name, #type)
130 148
131/** 149/**
150 * module_param_named_unsafe - same as module_param_named but taints kernel
151 */
152#define module_param_named_unsafe(name, value, type, perm) \
153 param_check_##type(name, &(value)); \
154 module_param_cb_unsafe(name, &param_ops_##type, &value, perm); \
155 __MODULE_PARM_TYPE(name, #type)
156
157/**
132 * module_param_cb - general callback for a module/cmdline parameter 158 * module_param_cb - general callback for a module/cmdline parameter
133 * @name: a valid C identifier which is the parameter name. 159 * @name: a valid C identifier which is the parameter name.
134 * @ops: the set & get operations for this parameter. 160 * @ops: the set & get operations for this parameter.
@@ -137,7 +163,11 @@ struct kparam_array
137 * The ops can have NULL set or get functions. 163 * The ops can have NULL set or get functions.
138 */ 164 */
139#define module_param_cb(name, ops, arg, perm) \ 165#define module_param_cb(name, ops, arg, perm) \
140 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1) 166 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, 0)
167
168#define module_param_cb_unsafe(name, ops, arg, perm) \
169 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, \
170 KERNEL_PARAM_FL_UNSAFE)
141 171
142/** 172/**
143 * <level>_param_cb - general callback for a module/cmdline parameter 173 * <level>_param_cb - general callback for a module/cmdline parameter
@@ -149,7 +179,7 @@ struct kparam_array
149 * The ops can have NULL set or get functions. 179 * The ops can have NULL set or get functions.
150 */ 180 */
151#define __level_param_cb(name, ops, arg, perm, level) \ 181#define __level_param_cb(name, ops, arg, perm, level) \
152 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level) 182 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
153 183
154#define core_param_cb(name, ops, arg, perm) \ 184#define core_param_cb(name, ops, arg, perm) \
155 __level_param_cb(name, ops, arg, perm, 1) 185 __level_param_cb(name, ops, arg, perm, 1)
@@ -184,22 +214,22 @@ struct kparam_array
184 214
185/* This is the fundamental function for registering boot/module 215/* This is the fundamental function for registering boot/module
186 parameters. */ 216 parameters. */
187#define __module_param_call(prefix, name, ops, arg, perm, level) \ 217#define __module_param_call(prefix, name, ops, arg, perm, level, flags) \
188 /* Default value instead of permissions? */ \ 218 /* Default value instead of permissions? */ \
189 static const char __param_str_##name[] = prefix #name; \ 219 static const char __param_str_##name[] = prefix #name; \
190 static struct kernel_param __moduleparam_const __param_##name \ 220 static struct kernel_param __moduleparam_const __param_##name \
191 __used \ 221 __used \
192 __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ 222 __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
193 = { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \ 223 = { __param_str_##name, ops, VERIFY_OCTAL_PERMISSIONS(perm), \
194 level, { arg } } 224 level, flags, { arg } }
195 225
196/* Obsolete - use module_param_cb() */ 226/* Obsolete - use module_param_cb() */
197#define module_param_call(name, set, get, arg, perm) \ 227#define module_param_call(name, set, get, arg, perm) \
198 static struct kernel_param_ops __param_ops_##name = \ 228 static struct kernel_param_ops __param_ops_##name = \
199 { 0, (void *)set, (void *)get }; \ 229 { .flags = 0, (void *)set, (void *)get }; \
200 __module_param_call(MODULE_PARAM_PREFIX, \ 230 __module_param_call(MODULE_PARAM_PREFIX, \
201 name, &__param_ops_##name, arg, \ 231 name, &__param_ops_##name, arg, \
202 (perm) + sizeof(__check_old_set_param(set))*0, -1) 232 (perm) + sizeof(__check_old_set_param(set))*0, -1, 0)
203 233
204/* We don't get oldget: it's often a new-style param_get_uint, etc. */ 234/* We don't get oldget: it's often a new-style param_get_uint, etc. */
205static inline int 235static inline int
@@ -279,7 +309,7 @@ static inline void __kernel_param_unlock(void)
279 */ 309 */
280#define core_param(name, var, type, perm) \ 310#define core_param(name, var, type, perm) \
281 param_check_##type(name, &(var)); \ 311 param_check_##type(name, &(var)); \
282 __module_param_call("", name, &param_ops_##type, &var, perm, -1) 312 __module_param_call("", name, &param_ops_##type, &var, perm, -1, 0)
283#endif /* !MODULE */ 313#endif /* !MODULE */
284 314
285/** 315/**
@@ -297,7 +327,7 @@ static inline void __kernel_param_unlock(void)
297 = { len, string }; \ 327 = { len, string }; \
298 __module_param_call(MODULE_PARAM_PREFIX, name, \ 328 __module_param_call(MODULE_PARAM_PREFIX, name, \
299 &param_ops_string, \ 329 &param_ops_string, \
300 .str = &__param_string_##name, perm, -1); \ 330 .str = &__param_string_##name, perm, -1, 0);\
301 __MODULE_PARM_TYPE(name, "string") 331 __MODULE_PARM_TYPE(name, "string")
302 332
303/** 333/**
@@ -444,7 +474,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
444 __module_param_call(MODULE_PARAM_PREFIX, name, \ 474 __module_param_call(MODULE_PARAM_PREFIX, name, \
445 &param_array_ops, \ 475 &param_array_ops, \
446 .arr = &__param_arr_##name, \ 476 .arr = &__param_arr_##name, \
447 perm, -1); \ 477 perm, -1, 0); \
448 __MODULE_PARM_TYPE(name, "array of " #type) 478 __MODULE_PARM_TYPE(name, "array of " #type)
449 479
450extern struct kernel_param_ops param_array_ops; 480extern struct kernel_param_ops param_array_ops;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 9262e4bf0cc3..c2c561dc0114 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -81,6 +81,9 @@ extern struct vfsmount *mntget(struct vfsmount *mnt);
81extern struct vfsmount *mnt_clone_internal(struct path *path); 81extern struct vfsmount *mnt_clone_internal(struct path *path);
82extern int __mnt_is_readonly(struct vfsmount *mnt); 82extern int __mnt_is_readonly(struct vfsmount *mnt);
83 83
84struct path;
85extern struct vfsmount *clone_private_mount(struct path *path);
86
84struct file_system_type; 87struct file_system_type;
85extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, 88extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
86 int flags, const char *name, 89 int flags, const char *name,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8103f32f6d87..8ac4a68ffae2 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -10,17 +10,12 @@ struct msi_msg {
10 u32 data; /* 16 bits of msi message data */ 10 u32 data; /* 16 bits of msi message data */
11}; 11};
12 12
13extern int pci_msi_ignore_mask;
13/* Helper functions */ 14/* Helper functions */
14struct irq_data; 15struct irq_data;
15struct msi_desc; 16struct msi_desc;
16void mask_msi_irq(struct irq_data *data);
17void unmask_msi_irq(struct irq_data *data);
18void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
19void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 17void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
20void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
21void read_msi_msg(unsigned int irq, struct msi_msg *msg);
22void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); 18void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
23void write_msi_msg(unsigned int irq, struct msi_msg *msg);
24 19
25struct msi_desc { 20struct msi_desc {
26 struct { 21 struct {
@@ -29,7 +24,6 @@ struct msi_desc {
29 __u8 multi_cap : 3; /* log2 num of messages supported */ 24 __u8 multi_cap : 3; /* log2 num of messages supported */
30 __u8 maskbit : 1; /* mask-pending bit supported ? */ 25 __u8 maskbit : 1; /* mask-pending bit supported ? */
31 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 26 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
32 __u8 pos; /* Location of the msi capability */
33 __u16 entry_nr; /* specific enabled entry */ 27 __u16 entry_nr; /* specific enabled entry */
34 unsigned default_irq; /* default pre-assigned irq */ 28 unsigned default_irq; /* default pre-assigned irq */
35 } msi_attrib; 29 } msi_attrib;
@@ -47,10 +41,54 @@ struct msi_desc {
47 41
48 /* Last set MSI message */ 42 /* Last set MSI message */
49 struct msi_msg msg; 43 struct msi_msg msg;
50
51 struct kobject kobj;
52}; 44};
53 45
46/* Helpers to hide struct msi_desc implementation details */
47#define msi_desc_to_dev(desc) (&(desc)->dev.dev)
48#define dev_to_msi_list(dev) (&to_pci_dev((dev))->msi_list)
49#define first_msi_entry(dev) \
50 list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
51#define for_each_msi_entry(desc, dev) \
52 list_for_each_entry((desc), dev_to_msi_list((dev)), list)
53
54#ifdef CONFIG_PCI_MSI
55#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
56#define for_each_pci_msi_entry(desc, pdev) \
57 for_each_msi_entry((desc), &(pdev)->dev)
58
59static inline struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
60{
61 return desc->dev;
62}
63#endif /* CONFIG_PCI_MSI */
64
65void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
66void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
67void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
68
69u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
70u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
71void pci_msi_mask_irq(struct irq_data *data);
72void pci_msi_unmask_irq(struct irq_data *data);
73
74/* Conversion helpers. Should be removed after merging */
75static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
76{
77 __pci_write_msi_msg(entry, msg);
78}
79static inline void write_msi_msg(int irq, struct msi_msg *msg)
80{
81 pci_write_msi_msg(irq, msg);
82}
83static inline void mask_msi_irq(struct irq_data *data)
84{
85 pci_msi_mask_irq(data);
86}
87static inline void unmask_msi_irq(struct irq_data *data)
88{
89 pci_msi_unmask_irq(data);
90}
91
54/* 92/*
55 * The arch hooks to setup up msi irqs. Those functions are 93 * The arch hooks to setup up msi irqs. Those functions are
56 * implemented as weak symbols so that they /can/ be overriden by 94 * implemented as weak symbols so that they /can/ be overriden by
@@ -60,25 +98,146 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
60void arch_teardown_msi_irq(unsigned int irq); 98void arch_teardown_msi_irq(unsigned int irq);
61int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); 99int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
62void arch_teardown_msi_irqs(struct pci_dev *dev); 100void arch_teardown_msi_irqs(struct pci_dev *dev);
63int arch_msi_check_device(struct pci_dev* dev, int nvec, int type);
64void arch_restore_msi_irqs(struct pci_dev *dev); 101void arch_restore_msi_irqs(struct pci_dev *dev);
65 102
66void default_teardown_msi_irqs(struct pci_dev *dev); 103void default_teardown_msi_irqs(struct pci_dev *dev);
67void default_restore_msi_irqs(struct pci_dev *dev); 104void default_restore_msi_irqs(struct pci_dev *dev);
68u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
69u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag);
70 105
71struct msi_chip { 106struct msi_controller {
72 struct module *owner; 107 struct module *owner;
73 struct device *dev; 108 struct device *dev;
74 struct device_node *of_node; 109 struct device_node *of_node;
75 struct list_head list; 110 struct list_head list;
111#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
112 struct irq_domain *domain;
113#endif
76 114
77 int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, 115 int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
78 struct msi_desc *desc); 116 struct msi_desc *desc);
79 void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); 117 void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
80 int (*check_device)(struct msi_chip *chip, struct pci_dev *dev,
81 int nvec, int type);
82}; 118};
83 119
120#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
121
122#include <linux/irqhandler.h>
123#include <asm/msi.h>
124
125struct irq_domain;
126struct irq_chip;
127struct device_node;
128struct msi_domain_info;
129
130/**
131 * struct msi_domain_ops - MSI interrupt domain callbacks
132 * @get_hwirq: Retrieve the resulting hw irq number
133 * @msi_init: Domain specific init function for MSI interrupts
134 * @msi_free: Domain specific function to free a MSI interrupts
135 * @msi_check: Callback for verification of the domain/info/dev data
136 * @msi_prepare: Prepare the allocation of the interrupts in the domain
137 * @msi_finish: Optional callbacl to finalize the allocation
138 * @set_desc: Set the msi descriptor for an interrupt
139 * @handle_error: Optional error handler if the allocation fails
140 *
141 * @get_hwirq, @msi_init and @msi_free are callbacks used by
142 * msi_create_irq_domain() and related interfaces
143 *
144 * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
145 * are callbacks used by msi_irq_domain_alloc_irqs() and related
146 * interfaces which are based on msi_desc.
147 */
148struct msi_domain_ops {
149 irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
150 msi_alloc_info_t *arg);
151 int (*msi_init)(struct irq_domain *domain,
152 struct msi_domain_info *info,
153 unsigned int virq, irq_hw_number_t hwirq,
154 msi_alloc_info_t *arg);
155 void (*msi_free)(struct irq_domain *domain,
156 struct msi_domain_info *info,
157 unsigned int virq);
158 int (*msi_check)(struct irq_domain *domain,
159 struct msi_domain_info *info,
160 struct device *dev);
161 int (*msi_prepare)(struct irq_domain *domain,
162 struct device *dev, int nvec,
163 msi_alloc_info_t *arg);
164 void (*msi_finish)(msi_alloc_info_t *arg, int retval);
165 void (*set_desc)(msi_alloc_info_t *arg,
166 struct msi_desc *desc);
167 int (*handle_error)(struct irq_domain *domain,
168 struct msi_desc *desc, int error);
169};
170
171/**
172 * struct msi_domain_info - MSI interrupt domain data
173 * @flags: Flags to decribe features and capabilities
174 * @ops: The callback data structure
175 * @chip: Optional: associated interrupt chip
176 * @chip_data: Optional: associated interrupt chip data
177 * @handler: Optional: associated interrupt flow handler
178 * @handler_data: Optional: associated interrupt flow handler data
179 * @handler_name: Optional: associated interrupt flow handler name
180 * @data: Optional: domain specific data
181 */
182struct msi_domain_info {
183 u32 flags;
184 struct msi_domain_ops *ops;
185 struct irq_chip *chip;
186 void *chip_data;
187 irq_flow_handler_t handler;
188 void *handler_data;
189 const char *handler_name;
190 void *data;
191};
192
193/* Flags for msi_domain_info */
194enum {
195 /*
196 * Init non implemented ops callbacks with default MSI domain
197 * callbacks.
198 */
199 MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
200 /*
201 * Init non implemented chip callbacks with default MSI chip
202 * callbacks.
203 */
204 MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
205 /* Build identity map between hwirq and irq */
206 MSI_FLAG_IDENTITY_MAP = (1 << 2),
207 /* Support multiple PCI MSI interrupts */
208 MSI_FLAG_MULTI_PCI_MSI = (1 << 3),
209 /* Support PCI MSIX interrupts */
210 MSI_FLAG_PCI_MSIX = (1 << 4),
211};
212
213int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
214 bool force);
215
216struct irq_domain *msi_create_irq_domain(struct device_node *of_node,
217 struct msi_domain_info *info,
218 struct irq_domain *parent);
219int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
220 int nvec);
221void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
222struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
223
224#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
225
226#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
227void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
228struct irq_domain *pci_msi_create_irq_domain(struct device_node *node,
229 struct msi_domain_info *info,
230 struct irq_domain *parent);
231int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
232 int nvec, int type);
233void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
234struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node,
235 struct msi_domain_info *info, struct irq_domain *parent);
236
237irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
238 struct msi_desc *desc);
239int pci_msi_domain_check_cap(struct irq_domain *domain,
240 struct msi_domain_info *info, struct device *dev);
241#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
242
84#endif /* LINUX_MSI_H */ 243#endif /* LINUX_MSI_H */
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 37ef6b194089..299d7d31fe53 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -153,7 +153,7 @@ struct cfi_ident {
153 uint16_t MaxBufWriteSize; 153 uint16_t MaxBufWriteSize;
154 uint8_t NumEraseRegions; 154 uint8_t NumEraseRegions;
155 uint32_t EraseRegionInfo[0]; /* Not host ordered */ 155 uint32_t EraseRegionInfo[0]; /* Not host ordered */
156} __attribute__((packed)); 156} __packed;
157 157
158/* Extended Query Structure for both PRI and ALT */ 158/* Extended Query Structure for both PRI and ALT */
159 159
@@ -161,7 +161,7 @@ struct cfi_extquery {
161 uint8_t pri[3]; 161 uint8_t pri[3];
162 uint8_t MajorVersion; 162 uint8_t MajorVersion;
163 uint8_t MinorVersion; 163 uint8_t MinorVersion;
164} __attribute__((packed)); 164} __packed;
165 165
166/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */ 166/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
167 167
@@ -180,7 +180,7 @@ struct cfi_pri_intelext {
180 uint8_t FactProtRegSize; 180 uint8_t FactProtRegSize;
181 uint8_t UserProtRegSize; 181 uint8_t UserProtRegSize;
182 uint8_t extra[0]; 182 uint8_t extra[0];
183} __attribute__((packed)); 183} __packed;
184 184
185struct cfi_intelext_otpinfo { 185struct cfi_intelext_otpinfo {
186 uint32_t ProtRegAddr; 186 uint32_t ProtRegAddr;
@@ -188,7 +188,7 @@ struct cfi_intelext_otpinfo {
188 uint8_t FactProtRegSize; 188 uint8_t FactProtRegSize;
189 uint16_t UserGroups; 189 uint16_t UserGroups;
190 uint8_t UserProtRegSize; 190 uint8_t UserProtRegSize;
191} __attribute__((packed)); 191} __packed;
192 192
193struct cfi_intelext_blockinfo { 193struct cfi_intelext_blockinfo {
194 uint16_t NumIdentBlocks; 194 uint16_t NumIdentBlocks;
@@ -196,7 +196,7 @@ struct cfi_intelext_blockinfo {
196 uint16_t MinBlockEraseCycles; 196 uint16_t MinBlockEraseCycles;
197 uint8_t BitsPerCell; 197 uint8_t BitsPerCell;
198 uint8_t BlockCap; 198 uint8_t BlockCap;
199} __attribute__((packed)); 199} __packed;
200 200
201struct cfi_intelext_regioninfo { 201struct cfi_intelext_regioninfo {
202 uint16_t NumIdentPartitions; 202 uint16_t NumIdentPartitions;
@@ -205,7 +205,7 @@ struct cfi_intelext_regioninfo {
205 uint8_t NumOpAllowedSimEraMode; 205 uint8_t NumOpAllowedSimEraMode;
206 uint8_t NumBlockTypes; 206 uint8_t NumBlockTypes;
207 struct cfi_intelext_blockinfo BlockTypes[1]; 207 struct cfi_intelext_blockinfo BlockTypes[1];
208} __attribute__((packed)); 208} __packed;
209 209
210struct cfi_intelext_programming_regioninfo { 210struct cfi_intelext_programming_regioninfo {
211 uint8_t ProgRegShift; 211 uint8_t ProgRegShift;
@@ -214,7 +214,7 @@ struct cfi_intelext_programming_regioninfo {
214 uint8_t Reserved2; 214 uint8_t Reserved2;
215 uint8_t ControlInvalid; 215 uint8_t ControlInvalid;
216 uint8_t Reserved3; 216 uint8_t Reserved3;
217} __attribute__((packed)); 217} __packed;
218 218
219/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */ 219/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
220 220
@@ -233,7 +233,7 @@ struct cfi_pri_amdstd {
233 uint8_t VppMin; 233 uint8_t VppMin;
234 uint8_t VppMax; 234 uint8_t VppMax;
235 uint8_t TopBottom; 235 uint8_t TopBottom;
236} __attribute__((packed)); 236} __packed;
237 237
238/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */ 238/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
239 239
@@ -245,18 +245,18 @@ struct cfi_pri_atmel {
245 uint8_t BottomBoot; 245 uint8_t BottomBoot;
246 uint8_t BurstMode; 246 uint8_t BurstMode;
247 uint8_t PageMode; 247 uint8_t PageMode;
248} __attribute__((packed)); 248} __packed;
249 249
250struct cfi_pri_query { 250struct cfi_pri_query {
251 uint8_t NumFields; 251 uint8_t NumFields;
252 uint32_t ProtField[1]; /* Not host ordered */ 252 uint32_t ProtField[1]; /* Not host ordered */
253} __attribute__((packed)); 253} __packed;
254 254
255struct cfi_bri_query { 255struct cfi_bri_query {
256 uint8_t PageModeReadCap; 256 uint8_t PageModeReadCap;
257 uint8_t NumFields; 257 uint8_t NumFields;
258 uint32_t ConfField[1]; /* Not host ordered */ 258 uint32_t ConfField[1]; /* Not host ordered */
259} __attribute__((packed)); 259} __packed;
260 260
261#define P_ID_NONE 0x0000 261#define P_ID_NONE 0x0000
262#define P_ID_INTEL_EXT 0x0001 262#define P_ID_INTEL_EXT 0x0001
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index c300db3ae285..3d4ea7eb2b68 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -455,8 +455,21 @@ struct nand_hw_control {
455 * be provided if an hardware ECC is available 455 * be provided if an hardware ECC is available
456 * @calculate: function for ECC calculation or readback from ECC hardware 456 * @calculate: function for ECC calculation or readback from ECC hardware
457 * @correct: function for ECC correction, matching to ECC generator (sw/hw) 457 * @correct: function for ECC correction, matching to ECC generator (sw/hw)
458 * @read_page_raw: function to read a raw page without ECC 458 * @read_page_raw: function to read a raw page without ECC. This function
459 * @write_page_raw: function to write a raw page without ECC 459 * should hide the specific layout used by the ECC
460 * controller and always return contiguous in-band and
461 * out-of-band data even if they're not stored
462 * contiguously on the NAND chip (e.g.
463 * NAND_ECC_HW_SYNDROME interleaves in-band and
464 * out-of-band data).
465 * @write_page_raw: function to write a raw page without ECC. This function
466 * should hide the specific layout used by the ECC
467 * controller and consider the passed data as contiguous
468 * in-band and out-of-band data. ECC controller is
469 * responsible for doing the appropriate transformations
470 * to adapt to its specific layout (e.g.
471 * NAND_ECC_HW_SYNDROME interleaves in-band and
472 * out-of-band data).
460 * @read_page: function to read a page according to the ECC generator 473 * @read_page: function to read a page according to the ECC generator
461 * requirements; returns maximum number of bitflips corrected in 474 * requirements; returns maximum number of bitflips corrected in
462 * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error 475 * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error
@@ -587,6 +600,11 @@ struct nand_buffers {
587 * @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds, 600 * @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds,
588 * also from the datasheet. It is the recommended ECC step 601 * also from the datasheet. It is the recommended ECC step
589 * size, if known; if unknown, set to zero. 602 * size, if known; if unknown, set to zero.
603 * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is
604 * either deduced from the datasheet if the NAND
605 * chip is not ONFI compliant or set to 0 if it is
606 * (an ONFI chip is always configured in mode 0
607 * after a NAND reset)
590 * @numchips: [INTERN] number of physical chips 608 * @numchips: [INTERN] number of physical chips
591 * @chipsize: [INTERN] the size of one chip for multichip arrays 609 * @chipsize: [INTERN] the size of one chip for multichip arrays
592 * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 610 * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
@@ -671,6 +689,7 @@ struct nand_chip {
671 uint8_t bits_per_cell; 689 uint8_t bits_per_cell;
672 uint16_t ecc_strength_ds; 690 uint16_t ecc_strength_ds;
673 uint16_t ecc_step_ds; 691 uint16_t ecc_step_ds;
692 int onfi_timing_mode_default;
674 int badblockpos; 693 int badblockpos;
675 int badblockbits; 694 int badblockbits;
676 695
@@ -717,6 +736,7 @@ struct nand_chip {
717#define NAND_MFR_EON 0x92 736#define NAND_MFR_EON 0x92
718#define NAND_MFR_SANDISK 0x45 737#define NAND_MFR_SANDISK 0x45
719#define NAND_MFR_INTEL 0x89 738#define NAND_MFR_INTEL 0x89
739#define NAND_MFR_ATO 0x9b
720 740
721/* The maximum expected count of bytes in the NAND ID sequence */ 741/* The maximum expected count of bytes in the NAND ID sequence */
722#define NAND_MAX_ID_LEN 8 742#define NAND_MAX_ID_LEN 8
@@ -766,12 +786,17 @@ struct nand_chip {
766 * @options: stores various chip bit options 786 * @options: stores various chip bit options
767 * @id_len: The valid length of the @id. 787 * @id_len: The valid length of the @id.
768 * @oobsize: OOB size 788 * @oobsize: OOB size
789 * @ecc: ECC correctability and step information from the datasheet.
769 * @ecc.strength_ds: The ECC correctability from the datasheet, same as the 790 * @ecc.strength_ds: The ECC correctability from the datasheet, same as the
770 * @ecc_strength_ds in nand_chip{}. 791 * @ecc_strength_ds in nand_chip{}.
771 * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the 792 * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the
772 * @ecc_step_ds in nand_chip{}, also from the datasheet. 793 * @ecc_step_ds in nand_chip{}, also from the datasheet.
773 * For example, the "4bit ECC for each 512Byte" can be set with 794 * For example, the "4bit ECC for each 512Byte" can be set with
774 * NAND_ECC_INFO(4, 512). 795 * NAND_ECC_INFO(4, 512).
796 * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND
797 * reset. Should be deduced from timings described
798 * in the datasheet.
799 *
775 */ 800 */
776struct nand_flash_dev { 801struct nand_flash_dev {
777 char *name; 802 char *name;
@@ -792,6 +817,7 @@ struct nand_flash_dev {
792 uint16_t strength_ds; 817 uint16_t strength_ds;
793 uint16_t step_ds; 818 uint16_t step_ds;
794 } ecc; 819 } ecc;
820 int onfi_timing_mode_default;
795}; 821};
796 822
797/** 823/**
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 9e6294f32ba8..63aeccf9ddc8 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -116,6 +116,10 @@ enum spi_nor_ops {
116 SPI_NOR_OPS_UNLOCK, 116 SPI_NOR_OPS_UNLOCK,
117}; 117};
118 118
119enum spi_nor_option_flags {
120 SNOR_F_USE_FSR = BIT(0),
121};
122
119/** 123/**
120 * struct spi_nor - Structure for defining a the SPI NOR layer 124 * struct spi_nor - Structure for defining a the SPI NOR layer
121 * @mtd: point to a mtd_info structure 125 * @mtd: point to a mtd_info structure
@@ -129,6 +133,7 @@ enum spi_nor_ops {
129 * @program_opcode: the program opcode 133 * @program_opcode: the program opcode
130 * @flash_read: the mode of the read 134 * @flash_read: the mode of the read
131 * @sst_write_second: used by the SST write operation 135 * @sst_write_second: used by the SST write operation
136 * @flags: flag options for the current SPI-NOR (SNOR_F_*)
132 * @cfg: used by the read_xfer/write_xfer 137 * @cfg: used by the read_xfer/write_xfer
133 * @cmd_buf: used by the write_reg 138 * @cmd_buf: used by the write_reg
134 * @prepare: [OPTIONAL] do some preparations for the 139 * @prepare: [OPTIONAL] do some preparations for the
@@ -139,9 +144,6 @@ enum spi_nor_ops {
139 * @write_xfer: [OPTIONAL] the writefundamental primitive 144 * @write_xfer: [OPTIONAL] the writefundamental primitive
140 * @read_reg: [DRIVER-SPECIFIC] read out the register 145 * @read_reg: [DRIVER-SPECIFIC] read out the register
141 * @write_reg: [DRIVER-SPECIFIC] write data to the register 146 * @write_reg: [DRIVER-SPECIFIC] write data to the register
142 * @read_id: [REPLACEABLE] read out the ID data, and find
143 * the proper spi_device_id
144 * @wait_till_ready: [REPLACEABLE] wait till the NOR becomes ready
145 * @read: [DRIVER-SPECIFIC] read data from the SPI NOR 147 * @read: [DRIVER-SPECIFIC] read data from the SPI NOR
146 * @write: [DRIVER-SPECIFIC] write data to the SPI NOR 148 * @write: [DRIVER-SPECIFIC] write data to the SPI NOR
147 * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR 149 * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
@@ -160,6 +162,7 @@ struct spi_nor {
160 u8 program_opcode; 162 u8 program_opcode;
161 enum read_mode flash_read; 163 enum read_mode flash_read;
162 bool sst_write_second; 164 bool sst_write_second;
165 u32 flags;
163 struct spi_nor_xfer_cfg cfg; 166 struct spi_nor_xfer_cfg cfg;
164 u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; 167 u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE];
165 168
@@ -172,8 +175,6 @@ struct spi_nor {
172 int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); 175 int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
173 int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len, 176 int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len,
174 int write_enable); 177 int write_enable);
175 const struct spi_device_id *(*read_id)(struct spi_nor *nor);
176 int (*wait_till_ready)(struct spi_nor *nor);
177 178
178 int (*read)(struct spi_nor *nor, loff_t from, 179 int (*read)(struct spi_nor *nor, loff_t from,
179 size_t len, size_t *retlen, u_char *read_buf); 180 size_t len, size_t *retlen, u_char *read_buf);
@@ -187,32 +188,17 @@ struct spi_nor {
187/** 188/**
188 * spi_nor_scan() - scan the SPI NOR 189 * spi_nor_scan() - scan the SPI NOR
189 * @nor: the spi_nor structure 190 * @nor: the spi_nor structure
190 * @id: the spi_device_id provided by the driver 191 * @name: the chip type name
191 * @mode: the read mode supported by the driver 192 * @mode: the read mode supported by the driver
192 * 193 *
193 * The drivers can use this fuction to scan the SPI NOR. 194 * The drivers can use this fuction to scan the SPI NOR.
194 * In the scanning, it will try to get all the necessary information to 195 * In the scanning, it will try to get all the necessary information to
195 * fill the mtd_info{} and the spi_nor{}. 196 * fill the mtd_info{} and the spi_nor{}.
196 * 197 *
197 * The board may assigns a spi_device_id with @id which be used to compared with 198 * The chip type name can be provided through the @name parameter.
198 * the spi_device_id detected by the scanning.
199 * 199 *
200 * Return: 0 for success, others for failure. 200 * Return: 0 for success, others for failure.
201 */ 201 */
202int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, 202int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode);
203 enum read_mode mode);
204extern const struct spi_device_id spi_nor_ids[];
205
206/**
207 * spi_nor_match_id() - find the spi_device_id by the name
208 * @name: the name of the spi_device_id
209 *
210 * The drivers use this function to find the spi_device_id
211 * specified by the @name.
212 *
213 * Return: returns the right spi_device_id pointer on success,
214 * and returns NULL on failure.
215 */
216const struct spi_device_id *spi_nor_match_id(char *name);
217 203
218#endif 204#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 8d5535c58cc2..cc31498fc526 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -52,7 +52,7 @@ struct mutex {
52 atomic_t count; 52 atomic_t count;
53 spinlock_t wait_lock; 53 spinlock_t wait_lock;
54 struct list_head wait_list; 54 struct list_head wait_list;
55#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) 55#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
56 struct task_struct *owner; 56 struct task_struct *owner;
57#endif 57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
@@ -133,7 +133,7 @@ static inline int mutex_is_locked(struct mutex *lock)
133 133
134/* 134/*
135 * See kernel/locking/mutex.c for detailed documentation of these APIs. 135 * See kernel/locking/mutex.c for detailed documentation of these APIs.
136 * Also see Documentation/mutex-design.txt. 136 * Also see Documentation/locking/mutex-design.txt.
137 */ 137 */
138#ifdef CONFIG_DEBUG_LOCK_ALLOC 138#ifdef CONFIG_DEBUG_LOCK_ALLOC
139extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 139extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 492de72560fa..c8990779f0c3 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -7,21 +7,10 @@
7#include <linux/path.h> 7#include <linux/path.h>
8 8
9struct vfsmount; 9struct vfsmount;
10struct nameidata;
10 11
11enum { MAX_NESTED_LINKS = 8 }; 12enum { MAX_NESTED_LINKS = 8 };
12 13
13struct nameidata {
14 struct path path;
15 struct qstr last;
16 struct path root;
17 struct inode *inode; /* path.dentry.d_inode */
18 unsigned int flags;
19 unsigned seq, m_seq;
20 int last_type;
21 unsigned depth;
22 char *saved_names[MAX_NESTED_LINKS + 1];
23};
24
25/* 14/*
26 * Type of the last component on LOOKUP_PARENT 15 * Type of the last component on LOOKUP_PARENT
27 */ 16 */
@@ -82,16 +71,8 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
82extern void unlock_rename(struct dentry *, struct dentry *); 71extern void unlock_rename(struct dentry *, struct dentry *);
83 72
84extern void nd_jump_link(struct nameidata *nd, struct path *path); 73extern void nd_jump_link(struct nameidata *nd, struct path *path);
85 74extern void nd_set_link(struct nameidata *nd, char *path);
86static inline void nd_set_link(struct nameidata *nd, char *path) 75extern char *nd_get_link(struct nameidata *nd);
87{
88 nd->saved_names[nd->depth] = path;
89}
90
91static inline char *nd_get_link(struct nameidata *nd)
92{
93 return nd->saved_names[nd->depth];
94}
95 76
96static inline void nd_terminate_link(void *name, size_t len, size_t maxlen) 77static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
97{ 78{
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index dcfdecbfa0b7..8e30685affeb 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -47,9 +47,9 @@ enum {
47 NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */ 47 NETIF_F_GSO_SIT_BIT, /* ... SIT tunnel with TSO */
48 NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */ 48 NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
49 NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */ 49 NETIF_F_GSO_UDP_TUNNEL_CSUM_BIT,/* ... UDP TUNNEL with TSO & CSUM */
50 NETIF_F_GSO_MPLS_BIT, /* ... MPLS segmentation */ 50 NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
51 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ 51 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
52 NETIF_F_GSO_MPLS_BIT, 52 NETIF_F_GSO_TUNNEL_REMCSUM_BIT,
53 53
54 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ 54 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
55 NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ 55 NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
@@ -118,7 +118,7 @@ enum {
118#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT) 118#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
119#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL) 119#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
120#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM) 120#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
121#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS) 121#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
122#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) 122#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
123#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) 123#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
124#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) 124#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
@@ -181,7 +181,6 @@ enum {
181 NETIF_F_GSO_IPIP | \ 181 NETIF_F_GSO_IPIP | \
182 NETIF_F_GSO_SIT | \ 182 NETIF_F_GSO_SIT | \
183 NETIF_F_GSO_UDP_TUNNEL | \ 183 NETIF_F_GSO_UDP_TUNNEL | \
184 NETIF_F_GSO_UDP_TUNNEL_CSUM | \ 184 NETIF_F_GSO_UDP_TUNNEL_CSUM)
185 NETIF_F_GSO_MPLS)
186 185
187#endif /* _LINUX_NETDEV_FEATURES_H */ 186#endif /* _LINUX_NETDEV_FEATURES_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c8e388e5fccc..679e6e90aa4c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -30,6 +30,7 @@
30#include <linux/bug.h> 30#include <linux/bug.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/atomic.h> 32#include <linux/atomic.h>
33#include <linux/prefetch.h>
33#include <asm/cache.h> 34#include <asm/cache.h>
34#include <asm/byteorder.h> 35#include <asm/byteorder.h>
35 36
@@ -56,6 +57,8 @@ struct device;
56struct phy_device; 57struct phy_device;
57/* 802.11 specific */ 58/* 802.11 specific */
58struct wireless_dev; 59struct wireless_dev;
60/* 802.15.4 specific */
61struct wpan_dev;
59 62
60void netdev_set_default_ethtool_ops(struct net_device *dev, 63void netdev_set_default_ethtool_ops(struct net_device *dev,
61 const struct ethtool_ops *ops); 64 const struct ethtool_ops *ops);
@@ -313,6 +316,7 @@ struct napi_struct {
313 struct net_device *dev; 316 struct net_device *dev;
314 struct sk_buff *gro_list; 317 struct sk_buff *gro_list;
315 struct sk_buff *skb; 318 struct sk_buff *skb;
319 struct hrtimer timer;
316 struct list_head dev_list; 320 struct list_head dev_list;
317 struct hlist_node napi_hash_node; 321 struct hlist_node napi_hash_node;
318 unsigned int napi_id; 322 unsigned int napi_id;
@@ -385,6 +389,7 @@ typedef enum rx_handler_result rx_handler_result_t;
385typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 389typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
386 390
387void __napi_schedule(struct napi_struct *n); 391void __napi_schedule(struct napi_struct *n);
392void __napi_schedule_irqoff(struct napi_struct *n);
388 393
389static inline bool napi_disable_pending(struct napi_struct *n) 394static inline bool napi_disable_pending(struct napi_struct *n)
390{ 395{
@@ -419,6 +424,18 @@ static inline void napi_schedule(struct napi_struct *n)
419 __napi_schedule(n); 424 __napi_schedule(n);
420} 425}
421 426
427/**
428 * napi_schedule_irqoff - schedule NAPI poll
429 * @n: napi context
430 *
431 * Variant of napi_schedule(), assuming hard irqs are masked.
432 */
433static inline void napi_schedule_irqoff(struct napi_struct *n)
434{
435 if (napi_schedule_prep(n))
436 __napi_schedule_irqoff(n);
437}
438
422/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 439/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
423static inline bool napi_reschedule(struct napi_struct *napi) 440static inline bool napi_reschedule(struct napi_struct *napi)
424{ 441{
@@ -429,14 +446,19 @@ static inline bool napi_reschedule(struct napi_struct *napi)
429 return false; 446 return false;
430} 447}
431 448
449void __napi_complete(struct napi_struct *n);
450void napi_complete_done(struct napi_struct *n, int work_done);
432/** 451/**
433 * napi_complete - NAPI processing complete 452 * napi_complete - NAPI processing complete
434 * @n: napi context 453 * @n: napi context
435 * 454 *
436 * Mark NAPI processing as complete. 455 * Mark NAPI processing as complete.
456 * Consider using napi_complete_done() instead.
437 */ 457 */
438void __napi_complete(struct napi_struct *n); 458static inline void napi_complete(struct napi_struct *n)
439void napi_complete(struct napi_struct *n); 459{
460 return napi_complete_done(n, 0);
461}
440 462
441/** 463/**
442 * napi_by_id - lookup a NAPI by napi_id 464 * napi_by_id - lookup a NAPI by napi_id
@@ -471,14 +493,7 @@ void napi_hash_del(struct napi_struct *napi);
471 * Stop NAPI from being scheduled on this context. 493 * Stop NAPI from being scheduled on this context.
472 * Waits till any outstanding processing completes. 494 * Waits till any outstanding processing completes.
473 */ 495 */
474static inline void napi_disable(struct napi_struct *n) 496void napi_disable(struct napi_struct *n);
475{
476 might_sleep();
477 set_bit(NAPI_STATE_DISABLE, &n->state);
478 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
479 msleep(1);
480 clear_bit(NAPI_STATE_DISABLE, &n->state);
481}
482 497
483/** 498/**
484 * napi_enable - enable NAPI scheduling 499 * napi_enable - enable NAPI scheduling
@@ -543,7 +558,7 @@ struct netdev_queue {
543 * read mostly part 558 * read mostly part
544 */ 559 */
545 struct net_device *dev; 560 struct net_device *dev;
546 struct Qdisc *qdisc; 561 struct Qdisc __rcu *qdisc;
547 struct Qdisc *qdisc_sleeping; 562 struct Qdisc *qdisc_sleeping;
548#ifdef CONFIG_SYSFS 563#ifdef CONFIG_SYSFS
549 struct kobject kobj; 564 struct kobject kobj;
@@ -739,13 +754,13 @@ struct netdev_fcoe_hbainfo {
739}; 754};
740#endif 755#endif
741 756
742#define MAX_PHYS_PORT_ID_LEN 32 757#define MAX_PHYS_ITEM_ID_LEN 32
743 758
744/* This structure holds a unique identifier to identify the 759/* This structure holds a unique identifier to identify some
745 * physical port used by a netdevice. 760 * physical item (port for example) used by a netdevice.
746 */ 761 */
747struct netdev_phys_port_id { 762struct netdev_phys_item_id {
748 unsigned char id[MAX_PHYS_PORT_ID_LEN]; 763 unsigned char id[MAX_PHYS_ITEM_ID_LEN];
749 unsigned char id_len; 764 unsigned char id_len;
750}; 765};
751 766
@@ -936,11 +951,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
936 * 951 *
937 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 952 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
938 * struct net_device *dev, 953 * struct net_device *dev,
939 * const unsigned char *addr, u16 flags) 954 * const unsigned char *addr, u16 vid, u16 flags)
940 * Adds an FDB entry to dev for addr. 955 * Adds an FDB entry to dev for addr.
941 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 956 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
942 * struct net_device *dev, 957 * struct net_device *dev,
943 * const unsigned char *addr) 958 * const unsigned char *addr, u16 vid)
944 * Deletes the FDB entry from dev coresponding to addr. 959 * Deletes the FDB entry from dev coresponding to addr.
945 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 960 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
946 * struct net_device *dev, struct net_device *filter_dev, 961 * struct net_device *dev, struct net_device *filter_dev,
@@ -961,7 +976,7 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
961 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 976 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
962 * 977 *
963 * int (*ndo_get_phys_port_id)(struct net_device *dev, 978 * int (*ndo_get_phys_port_id)(struct net_device *dev,
964 * struct netdev_phys_port_id *ppid); 979 * struct netdev_phys_item_id *ppid);
965 * Called to get ID of physical port of this device. If driver does 980 * Called to get ID of physical port of this device. If driver does
966 * not implement this, it is assumed that the hw is not able to have 981 * not implement this, it is assumed that the hw is not able to have
967 * multiple net devices on single physical port. 982 * multiple net devices on single physical port.
@@ -997,6 +1012,24 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
997 * Callback to use for xmit over the accelerated station. This 1012 * Callback to use for xmit over the accelerated station. This
998 * is used in place of ndo_start_xmit on accelerated net 1013 * is used in place of ndo_start_xmit on accelerated net
999 * devices. 1014 * devices.
1015 * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
1016 * struct net_device *dev
1017 * netdev_features_t features);
1018 * Called by core transmit path to determine if device is capable of
1019 * performing offload operations on a given packet. This is to give
1020 * the device an opportunity to implement any restrictions that cannot
1021 * be otherwise expressed by feature flags. The check is called with
1022 * the set of features that the stack has calculated and it returns
1023 * those the driver believes to be appropriate.
1024 *
1025 * int (*ndo_switch_parent_id_get)(struct net_device *dev,
1026 * struct netdev_phys_item_id *psid);
1027 * Called to get an ID of the switch chip this port is part of.
1028 * If driver implements this, it indicates that it represents a port
1029 * of a switch chip.
1030 * int (*ndo_switch_port_stp_update)(struct net_device *dev, u8 state);
1031 * Called to notify switch device port of bridge port STP
1032 * state change.
1000 */ 1033 */
1001struct net_device_ops { 1034struct net_device_ops {
1002 int (*ndo_init)(struct net_device *dev); 1035 int (*ndo_init)(struct net_device *dev);
@@ -1107,11 +1140,13 @@ struct net_device_ops {
1107 struct nlattr *tb[], 1140 struct nlattr *tb[],
1108 struct net_device *dev, 1141 struct net_device *dev,
1109 const unsigned char *addr, 1142 const unsigned char *addr,
1143 u16 vid,
1110 u16 flags); 1144 u16 flags);
1111 int (*ndo_fdb_del)(struct ndmsg *ndm, 1145 int (*ndo_fdb_del)(struct ndmsg *ndm,
1112 struct nlattr *tb[], 1146 struct nlattr *tb[],
1113 struct net_device *dev, 1147 struct net_device *dev,
1114 const unsigned char *addr); 1148 const unsigned char *addr,
1149 u16 vid);
1115 int (*ndo_fdb_dump)(struct sk_buff *skb, 1150 int (*ndo_fdb_dump)(struct sk_buff *skb,
1116 struct netlink_callback *cb, 1151 struct netlink_callback *cb,
1117 struct net_device *dev, 1152 struct net_device *dev,
@@ -1129,7 +1164,7 @@ struct net_device_ops {
1129 int (*ndo_change_carrier)(struct net_device *dev, 1164 int (*ndo_change_carrier)(struct net_device *dev,
1130 bool new_carrier); 1165 bool new_carrier);
1131 int (*ndo_get_phys_port_id)(struct net_device *dev, 1166 int (*ndo_get_phys_port_id)(struct net_device *dev,
1132 struct netdev_phys_port_id *ppid); 1167 struct netdev_phys_item_id *ppid);
1133 void (*ndo_add_vxlan_port)(struct net_device *dev, 1168 void (*ndo_add_vxlan_port)(struct net_device *dev,
1134 sa_family_t sa_family, 1169 sa_family_t sa_family,
1135 __be16 port); 1170 __be16 port);
@@ -1146,6 +1181,15 @@ struct net_device_ops {
1146 struct net_device *dev, 1181 struct net_device *dev,
1147 void *priv); 1182 void *priv);
1148 int (*ndo_get_lock_subclass)(struct net_device *dev); 1183 int (*ndo_get_lock_subclass)(struct net_device *dev);
1184 netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
1185 struct net_device *dev,
1186 netdev_features_t features);
1187#ifdef CONFIG_NET_SWITCHDEV
1188 int (*ndo_switch_parent_id_get)(struct net_device *dev,
1189 struct netdev_phys_item_id *psid);
1190 int (*ndo_switch_port_stp_update)(struct net_device *dev,
1191 u8 state);
1192#endif
1149}; 1193};
1150 1194
1151/** 1195/**
@@ -1206,6 +1250,9 @@ enum netdev_priv_flags {
1206 IFF_SUPP_NOFCS = 1<<19, 1250 IFF_SUPP_NOFCS = 1<<19,
1207 IFF_LIVE_ADDR_CHANGE = 1<<20, 1251 IFF_LIVE_ADDR_CHANGE = 1<<20,
1208 IFF_MACVLAN = 1<<21, 1252 IFF_MACVLAN = 1<<21,
1253 IFF_XMIT_DST_RELEASE_PERM = 1<<22,
1254 IFF_IPVLAN_MASTER = 1<<23,
1255 IFF_IPVLAN_SLAVE = 1<<24,
1209}; 1256};
1210 1257
1211#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN 1258#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
@@ -1230,6 +1277,9 @@ enum netdev_priv_flags {
1230#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS 1277#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
1231#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE 1278#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
1232#define IFF_MACVLAN IFF_MACVLAN 1279#define IFF_MACVLAN IFF_MACVLAN
1280#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM
1281#define IFF_IPVLAN_MASTER IFF_IPVLAN_MASTER
1282#define IFF_IPVLAN_SLAVE IFF_IPVLAN_SLAVE
1233 1283
1234/** 1284/**
1235 * struct net_device - The DEVICE structure. 1285 * struct net_device - The DEVICE structure.
@@ -1416,6 +1466,8 @@ enum netdev_priv_flags {
1416 * @gso_max_size: Maximum size of generic segmentation offload 1466 * @gso_max_size: Maximum size of generic segmentation offload
1417 * @gso_max_segs: Maximum number of segments that can be passed to the 1467 * @gso_max_segs: Maximum number of segments that can be passed to the
1418 * NIC for GSO 1468 * NIC for GSO
1469 * @gso_min_segs: Minimum number of segments that can be passed to the
1470 * NIC for GSO
1419 * 1471 *
1420 * @dcbnl_ops: Data Center Bridging netlink ops 1472 * @dcbnl_ops: Data Center Bridging netlink ops
1421 * @num_tc: Number of traffic classes in the net device 1473 * @num_tc: Number of traffic classes in the net device
@@ -1559,6 +1611,7 @@ struct net_device {
1559 struct inet6_dev __rcu *ip6_ptr; 1611 struct inet6_dev __rcu *ip6_ptr;
1560 void *ax25_ptr; 1612 void *ax25_ptr;
1561 struct wireless_dev *ieee80211_ptr; 1613 struct wireless_dev *ieee80211_ptr;
1614 struct wpan_dev *ieee802154_ptr;
1562 1615
1563/* 1616/*
1564 * Cache lines mostly used on receive path (including eth_type_trans()) 1617 * Cache lines mostly used on receive path (including eth_type_trans())
@@ -1577,6 +1630,7 @@ struct net_device {
1577 1630
1578#endif 1631#endif
1579 1632
1633 unsigned long gro_flush_timeout;
1580 rx_handler_func_t __rcu *rx_handler; 1634 rx_handler_func_t __rcu *rx_handler;
1581 void __rcu *rx_handler_data; 1635 void __rcu *rx_handler_data;
1582 1636
@@ -1666,7 +1720,7 @@ struct net_device {
1666 unsigned int gso_max_size; 1720 unsigned int gso_max_size;
1667#define GSO_MAX_SEGS 65535 1721#define GSO_MAX_SEGS 65535
1668 u16 gso_max_segs; 1722 u16 gso_max_segs;
1669 1723 u16 gso_min_segs;
1670#ifdef CONFIG_DCB 1724#ifdef CONFIG_DCB
1671 const struct dcbnl_rtnl_ops *dcbnl_ops; 1725 const struct dcbnl_rtnl_ops *dcbnl_ops;
1672#endif 1726#endif
@@ -1747,6 +1801,12 @@ struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1747 return &dev->_tx[index]; 1801 return &dev->_tx[index];
1748} 1802}
1749 1803
1804static inline struct netdev_queue *skb_get_tx_queue(const struct net_device *dev,
1805 const struct sk_buff *skb)
1806{
1807 return netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
1808}
1809
1750static inline void netdev_for_each_tx_queue(struct net_device *dev, 1810static inline void netdev_for_each_tx_queue(struct net_device *dev,
1751 void (*f)(struct net_device *, 1811 void (*f)(struct net_device *,
1752 struct netdev_queue *, 1812 struct netdev_queue *,
@@ -1781,24 +1841,13 @@ void dev_net_set(struct net_device *dev, struct net *net)
1781#endif 1841#endif
1782} 1842}
1783 1843
1784static inline bool netdev_uses_dsa_tags(struct net_device *dev) 1844static inline bool netdev_uses_dsa(struct net_device *dev)
1785{
1786#ifdef CONFIG_NET_DSA_TAG_DSA
1787 if (dev->dsa_ptr != NULL)
1788 return dsa_uses_dsa_tags(dev->dsa_ptr);
1789#endif
1790
1791 return 0;
1792}
1793
1794static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1795{ 1845{
1796#ifdef CONFIG_NET_DSA_TAG_TRAILER 1846#if IS_ENABLED(CONFIG_NET_DSA)
1797 if (dev->dsa_ptr != NULL) 1847 if (dev->dsa_ptr != NULL)
1798 return dsa_uses_trailer_tags(dev->dsa_ptr); 1848 return dsa_uses_tagged_protocol(dev->dsa_ptr);
1799#endif 1849#endif
1800 1850 return false;
1801 return 0;
1802} 1851}
1803 1852
1804/** 1853/**
@@ -1879,11 +1928,20 @@ struct napi_gro_cb {
1879 /* jiffies when first packet was created/queued */ 1928 /* jiffies when first packet was created/queued */
1880 unsigned long age; 1929 unsigned long age;
1881 1930
1882 /* Used in ipv6_gro_receive() */ 1931 /* Used in ipv6_gro_receive() and foo-over-udp */
1883 u16 proto; 1932 u16 proto;
1884 1933
1885 /* Used in udp_gro_receive */ 1934 /* Used in udp_gro_receive */
1886 u16 udp_mark; 1935 u8 udp_mark:1;
1936
1937 /* GRO checksum is valid */
1938 u8 csum_valid:1;
1939
1940 /* Number of checksums via CHECKSUM_UNNECESSARY */
1941 u8 csum_cnt:3;
1942
1943 /* Used in foo-over-udp, set in udp[46]_gro_receive */
1944 u8 is_ipv6:1;
1887 1945
1888 /* used to support CHECKSUM_COMPLETE for tunneling protocols */ 1946 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
1889 __wsum csum; 1947 __wsum csum;
@@ -1910,7 +1968,6 @@ struct packet_type {
1910struct offload_callbacks { 1968struct offload_callbacks {
1911 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1969 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1912 netdev_features_t features); 1970 netdev_features_t features);
1913 int (*gso_send_check)(struct sk_buff *skb);
1914 struct sk_buff **(*gro_receive)(struct sk_buff **head, 1971 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1915 struct sk_buff *skb); 1972 struct sk_buff *skb);
1916 int (*gro_complete)(struct sk_buff *skb, int nhoff); 1973 int (*gro_complete)(struct sk_buff *skb, int nhoff);
@@ -1924,6 +1981,7 @@ struct packet_offload {
1924 1981
1925struct udp_offload { 1982struct udp_offload {
1926 __be16 port; 1983 __be16 port;
1984 u8 ipproto;
1927 struct offload_callbacks callbacks; 1985 struct offload_callbacks callbacks;
1928}; 1986};
1929 1987
@@ -1982,6 +2040,7 @@ struct pcpu_sw_netstats {
1982#define NETDEV_CHANGEUPPER 0x0015 2040#define NETDEV_CHANGEUPPER 0x0015
1983#define NETDEV_RESEND_IGMP 0x0016 2041#define NETDEV_RESEND_IGMP 0x0016
1984#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */ 2042#define NETDEV_PRECHANGEMTU 0x0017 /* notify before mtu change happened */
2043#define NETDEV_CHANGEINFODATA 0x0018
1985 2044
1986int register_netdevice_notifier(struct notifier_block *nb); 2045int register_netdevice_notifier(struct notifier_block *nb);
1987int unregister_netdevice_notifier(struct notifier_block *nb); 2046int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -2074,8 +2133,8 @@ void __dev_remove_pack(struct packet_type *pt);
2074void dev_add_offload(struct packet_offload *po); 2133void dev_add_offload(struct packet_offload *po);
2075void dev_remove_offload(struct packet_offload *po); 2134void dev_remove_offload(struct packet_offload *po);
2076 2135
2077struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, 2136struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2078 unsigned short mask); 2137 unsigned short mask);
2079struct net_device *dev_get_by_name(struct net *net, const char *name); 2138struct net_device *dev_get_by_name(struct net *net, const char *name);
2080struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 2139struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
2081struct net_device *__dev_get_by_name(struct net *net, const char *name); 2140struct net_device *__dev_get_by_name(struct net *net, const char *name);
@@ -2153,11 +2212,97 @@ static inline void *skb_gro_network_header(struct sk_buff *skb)
2153static inline void skb_gro_postpull_rcsum(struct sk_buff *skb, 2212static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
2154 const void *start, unsigned int len) 2213 const void *start, unsigned int len)
2155{ 2214{
2156 if (skb->ip_summed == CHECKSUM_COMPLETE) 2215 if (NAPI_GRO_CB(skb)->csum_valid)
2157 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum, 2216 NAPI_GRO_CB(skb)->csum = csum_sub(NAPI_GRO_CB(skb)->csum,
2158 csum_partial(start, len, 0)); 2217 csum_partial(start, len, 0));
2159} 2218}
2160 2219
2220/* GRO checksum functions. These are logical equivalents of the normal
2221 * checksum functions (in skbuff.h) except that they operate on the GRO
2222 * offsets and fields in sk_buff.
2223 */
2224
2225__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
2226
2227static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
2228 bool zero_okay,
2229 __sum16 check)
2230{
2231 return (skb->ip_summed != CHECKSUM_PARTIAL &&
2232 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2233 (!zero_okay || check));
2234}
2235
2236static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
2237 __wsum psum)
2238{
2239 if (NAPI_GRO_CB(skb)->csum_valid &&
2240 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
2241 return 0;
2242
2243 NAPI_GRO_CB(skb)->csum = psum;
2244
2245 return __skb_gro_checksum_complete(skb);
2246}
2247
2248static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
2249{
2250 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
2251 /* Consume a checksum from CHECKSUM_UNNECESSARY */
2252 NAPI_GRO_CB(skb)->csum_cnt--;
2253 } else {
2254 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
2255 * verified a new top level checksum or an encapsulated one
2256 * during GRO. This saves work if we fallback to normal path.
2257 */
2258 __skb_incr_checksum_unnecessary(skb);
2259 }
2260}
2261
2262#define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
2263 compute_pseudo) \
2264({ \
2265 __sum16 __ret = 0; \
2266 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
2267 __ret = __skb_gro_checksum_validate_complete(skb, \
2268 compute_pseudo(skb, proto)); \
2269 if (__ret) \
2270 __skb_mark_checksum_bad(skb); \
2271 else \
2272 skb_gro_incr_csum_unnecessary(skb); \
2273 __ret; \
2274})
2275
2276#define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
2277 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
2278
2279#define skb_gro_checksum_validate_zero_check(skb, proto, check, \
2280 compute_pseudo) \
2281 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
2282
2283#define skb_gro_checksum_simple_validate(skb) \
2284 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
2285
2286static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
2287{
2288 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
2289 !NAPI_GRO_CB(skb)->csum_valid);
2290}
2291
2292static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
2293 __sum16 check, __wsum pseudo)
2294{
2295 NAPI_GRO_CB(skb)->csum = ~pseudo;
2296 NAPI_GRO_CB(skb)->csum_valid = 1;
2297}
2298
2299#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
2300do { \
2301 if (__skb_gro_checksum_convert_check(skb)) \
2302 __skb_gro_checksum_convert(skb, check, \
2303 compute_pseudo(skb, proto)); \
2304} while (0)
2305
2161static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 2306static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
2162 unsigned short type, 2307 unsigned short type,
2163 const void *daddr, const void *saddr, 2308 const void *daddr, const void *saddr,
@@ -2212,10 +2357,7 @@ extern int netdev_flow_limit_table_len;
2212 * Incoming packets are placed on per-cpu queues 2357 * Incoming packets are placed on per-cpu queues
2213 */ 2358 */
2214struct softnet_data { 2359struct softnet_data {
2215 struct Qdisc *output_queue;
2216 struct Qdisc **output_queue_tailp;
2217 struct list_head poll_list; 2360 struct list_head poll_list;
2218 struct sk_buff *completion_queue;
2219 struct sk_buff_head process_queue; 2361 struct sk_buff_head process_queue;
2220 2362
2221 /* stats */ 2363 /* stats */
@@ -2223,10 +2365,17 @@ struct softnet_data {
2223 unsigned int time_squeeze; 2365 unsigned int time_squeeze;
2224 unsigned int cpu_collision; 2366 unsigned int cpu_collision;
2225 unsigned int received_rps; 2367 unsigned int received_rps;
2226
2227#ifdef CONFIG_RPS 2368#ifdef CONFIG_RPS
2228 struct softnet_data *rps_ipi_list; 2369 struct softnet_data *rps_ipi_list;
2370#endif
2371#ifdef CONFIG_NET_FLOW_LIMIT
2372 struct sd_flow_limit __rcu *flow_limit;
2373#endif
2374 struct Qdisc *output_queue;
2375 struct Qdisc **output_queue_tailp;
2376 struct sk_buff *completion_queue;
2229 2377
2378#ifdef CONFIG_RPS
2230 /* Elements below can be accessed between CPUs for RPS */ 2379 /* Elements below can be accessed between CPUs for RPS */
2231 struct call_single_data csd ____cacheline_aligned_in_smp; 2380 struct call_single_data csd ____cacheline_aligned_in_smp;
2232 struct softnet_data *rps_ipi_next; 2381 struct softnet_data *rps_ipi_next;
@@ -2238,9 +2387,6 @@ struct softnet_data {
2238 struct sk_buff_head input_pkt_queue; 2387 struct sk_buff_head input_pkt_queue;
2239 struct napi_struct backlog; 2388 struct napi_struct backlog;
2240 2389
2241#ifdef CONFIG_NET_FLOW_LIMIT
2242 struct sd_flow_limit __rcu *flow_limit;
2243#endif
2244}; 2390};
2245 2391
2246static inline void input_queue_head_incr(struct softnet_data *sd) 2392static inline void input_queue_head_incr(struct softnet_data *sd)
@@ -2261,12 +2407,7 @@ static inline void input_queue_tail_incr_save(struct softnet_data *sd,
2261DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 2407DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
2262 2408
2263void __netif_schedule(struct Qdisc *q); 2409void __netif_schedule(struct Qdisc *q);
2264 2410void netif_schedule_queue(struct netdev_queue *txq);
2265static inline void netif_schedule_queue(struct netdev_queue *txq)
2266{
2267 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
2268 __netif_schedule(txq->qdisc);
2269}
2270 2411
2271static inline void netif_tx_schedule_all(struct net_device *dev) 2412static inline void netif_tx_schedule_all(struct net_device *dev)
2272{ 2413{
@@ -2302,11 +2443,7 @@ static inline void netif_tx_start_all_queues(struct net_device *dev)
2302 } 2443 }
2303} 2444}
2304 2445
2305static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) 2446void netif_tx_wake_queue(struct netdev_queue *dev_queue);
2306{
2307 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
2308 __netif_schedule(dev_queue->qdisc);
2309}
2310 2447
2311/** 2448/**
2312 * netif_wake_queue - restart transmit 2449 * netif_wake_queue - restart transmit
@@ -2394,6 +2531,34 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
2394 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; 2531 return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
2395} 2532}
2396 2533
2534/**
2535 * netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
2536 * @dev_queue: pointer to transmit queue
2537 *
2538 * BQL enabled drivers might use this helper in their ndo_start_xmit(),
2539 * to give appropriate hint to the cpu.
2540 */
2541static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
2542{
2543#ifdef CONFIG_BQL
2544 prefetchw(&dev_queue->dql.num_queued);
2545#endif
2546}
2547
2548/**
2549 * netdev_txq_bql_complete_prefetchw - prefetch bql data for write
2550 * @dev_queue: pointer to transmit queue
2551 *
2552 * BQL enabled drivers might use this helper in their TX completion path,
2553 * to give appropriate hint to the cpu.
2554 */
2555static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
2556{
2557#ifdef CONFIG_BQL
2558 prefetchw(&dev_queue->dql.limit);
2559#endif
2560}
2561
2397static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 2562static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2398 unsigned int bytes) 2563 unsigned int bytes)
2399{ 2564{
@@ -2578,19 +2743,7 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
2578 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 2743 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2579} 2744}
2580 2745
2581/** 2746void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
2582 * netif_wake_subqueue - allow sending packets on subqueue
2583 * @dev: network device
2584 * @queue_index: sub queue index
2585 *
2586 * Resume individual transmit queue of a device with multiple transmit queues.
2587 */
2588static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2589{
2590 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2591 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2592 __netif_schedule(txq->qdisc);
2593}
2594 2747
2595#ifdef CONFIG_XPS 2748#ifdef CONFIG_XPS
2596int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, 2749int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
@@ -2637,23 +2790,6 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2637} 2790}
2638#endif 2791#endif
2639 2792
2640static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2641 const struct net_device *from_dev)
2642{
2643 int err;
2644
2645 err = netif_set_real_num_tx_queues(to_dev,
2646 from_dev->real_num_tx_queues);
2647 if (err)
2648 return err;
2649#ifdef CONFIG_SYSFS
2650 return netif_set_real_num_rx_queues(to_dev,
2651 from_dev->real_num_rx_queues);
2652#else
2653 return 0;
2654#endif
2655}
2656
2657#ifdef CONFIG_SYSFS 2793#ifdef CONFIG_SYSFS
2658static inline unsigned int get_netdev_rx_queue_index( 2794static inline unsigned int get_netdev_rx_queue_index(
2659 struct netdev_rx_queue *queue) 2795 struct netdev_rx_queue *queue)
@@ -2753,9 +2889,10 @@ void dev_set_group(struct net_device *, int);
2753int dev_set_mac_address(struct net_device *, struct sockaddr *); 2889int dev_set_mac_address(struct net_device *, struct sockaddr *);
2754int dev_change_carrier(struct net_device *, bool new_carrier); 2890int dev_change_carrier(struct net_device *, bool new_carrier);
2755int dev_get_phys_port_id(struct net_device *dev, 2891int dev_get_phys_port_id(struct net_device *dev,
2756 struct netdev_phys_port_id *ppid); 2892 struct netdev_phys_item_id *ppid);
2757int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2893struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
2758 struct netdev_queue *txq); 2894struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2895 struct netdev_queue *txq, int *ret);
2759int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2896int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2760int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); 2897int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2761bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); 2898bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb);
@@ -3313,6 +3450,12 @@ void netdev_upper_dev_unlink(struct net_device *dev,
3313void netdev_adjacent_rename_links(struct net_device *dev, char *oldname); 3450void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
3314void *netdev_lower_dev_get_private(struct net_device *dev, 3451void *netdev_lower_dev_get_private(struct net_device *dev,
3315 struct net_device *lower_dev); 3452 struct net_device *lower_dev);
3453
3454/* RSS keys are 40 or 52 bytes long */
3455#define NETDEV_RSS_KEY_LEN 52
3456extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
3457void netdev_rss_key_fill(void *buffer, size_t len);
3458
3316int dev_get_nest_level(struct net_device *dev, 3459int dev_get_nest_level(struct net_device *dev,
3317 bool (*type_check)(struct net_device *dev)); 3460 bool (*type_check)(struct net_device *dev));
3318int skb_checksum_help(struct sk_buff *skb); 3461int skb_checksum_help(struct sk_buff *skb);
@@ -3357,6 +3500,27 @@ int __init dev_proc_init(void);
3357#define dev_proc_init() 0 3500#define dev_proc_init() 0
3358#endif 3501#endif
3359 3502
3503static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops,
3504 struct sk_buff *skb, struct net_device *dev,
3505 bool more)
3506{
3507 skb->xmit_more = more ? 1 : 0;
3508 return ops->ndo_start_xmit(skb, dev);
3509}
3510
3511static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev,
3512 struct netdev_queue *txq, bool more)
3513{
3514 const struct net_device_ops *ops = dev->netdev_ops;
3515 int rc;
3516
3517 rc = __netdev_start_xmit(ops, skb, dev, more);
3518 if (rc == NETDEV_TX_OK)
3519 txq_trans_update(txq);
3520
3521 return rc;
3522}
3523
3360int netdev_class_create_file_ns(struct class_attribute *class_attr, 3524int netdev_class_create_file_ns(struct class_attribute *class_attr,
3361 const void *ns); 3525 const void *ns);
3362void netdev_class_remove_file_ns(struct class_attribute *class_attr, 3526void netdev_class_remove_file_ns(struct class_attribute *class_attr,
@@ -3436,7 +3600,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
3436 BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT)); 3600 BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
3437 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT)); 3601 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
3438 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT)); 3602 BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
3439 BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT)); 3603 BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
3440 3604
3441 return (features & feature) == feature; 3605 return (features & feature) == feature;
3442} 3606}
@@ -3447,7 +3611,7 @@ static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
3447 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 3611 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
3448} 3612}
3449 3613
3450static inline bool netif_needs_gso(struct sk_buff *skb, 3614static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
3451 netdev_features_t features) 3615 netdev_features_t features)
3452{ 3616{
3453 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 3617 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
@@ -3479,6 +3643,21 @@ static inline bool netif_is_macvlan(struct net_device *dev)
3479 return dev->priv_flags & IFF_MACVLAN; 3643 return dev->priv_flags & IFF_MACVLAN;
3480} 3644}
3481 3645
3646static inline bool netif_is_macvlan_port(struct net_device *dev)
3647{
3648 return dev->priv_flags & IFF_MACVLAN_PORT;
3649}
3650
3651static inline bool netif_is_ipvlan(struct net_device *dev)
3652{
3653 return dev->priv_flags & IFF_IPVLAN_SLAVE;
3654}
3655
3656static inline bool netif_is_ipvlan_port(struct net_device *dev)
3657{
3658 return dev->priv_flags & IFF_IPVLAN_MASTER;
3659}
3660
3482static inline bool netif_is_bond_master(struct net_device *dev) 3661static inline bool netif_is_bond_master(struct net_device *dev)
3483{ 3662{
3484 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 3663 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
@@ -3494,6 +3673,12 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
3494 return dev->priv_flags & IFF_SUPP_NOFCS; 3673 return dev->priv_flags & IFF_SUPP_NOFCS;
3495} 3674}
3496 3675
3676/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
3677static inline void netif_keep_dst(struct net_device *dev)
3678{
3679 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM);
3680}
3681
3497extern struct pernet_operations __net_initdata loopback_net_ops; 3682extern struct pernet_operations __net_initdata loopback_net_ops;
3498 3683
3499/* Logging, debugging and troubleshooting/diagnostic helpers. */ 3684/* Logging, debugging and troubleshooting/diagnostic helpers. */
@@ -3523,22 +3708,22 @@ static inline const char *netdev_reg_state(const struct net_device *dev)
3523} 3708}
3524 3709
3525__printf(3, 4) 3710__printf(3, 4)
3526int netdev_printk(const char *level, const struct net_device *dev, 3711void netdev_printk(const char *level, const struct net_device *dev,
3527 const char *format, ...); 3712 const char *format, ...);
3528__printf(2, 3) 3713__printf(2, 3)
3529int netdev_emerg(const struct net_device *dev, const char *format, ...); 3714void netdev_emerg(const struct net_device *dev, const char *format, ...);
3530__printf(2, 3) 3715__printf(2, 3)
3531int netdev_alert(const struct net_device *dev, const char *format, ...); 3716void netdev_alert(const struct net_device *dev, const char *format, ...);
3532__printf(2, 3) 3717__printf(2, 3)
3533int netdev_crit(const struct net_device *dev, const char *format, ...); 3718void netdev_crit(const struct net_device *dev, const char *format, ...);
3534__printf(2, 3) 3719__printf(2, 3)
3535int netdev_err(const struct net_device *dev, const char *format, ...); 3720void netdev_err(const struct net_device *dev, const char *format, ...);
3536__printf(2, 3) 3721__printf(2, 3)
3537int netdev_warn(const struct net_device *dev, const char *format, ...); 3722void netdev_warn(const struct net_device *dev, const char *format, ...);
3538__printf(2, 3) 3723__printf(2, 3)
3539int netdev_notice(const struct net_device *dev, const char *format, ...); 3724void netdev_notice(const struct net_device *dev, const char *format, ...);
3540__printf(2, 3) 3725__printf(2, 3)
3541int netdev_info(const struct net_device *dev, const char *format, ...); 3726void netdev_info(const struct net_device *dev, const char *format, ...);
3542 3727
3543#define MODULE_ALIAS_NETDEV(device) \ 3728#define MODULE_ALIAS_NETDEV(device) \
3544 MODULE_ALIAS("netdev-" device) 3729 MODULE_ALIAS("netdev-" device)
@@ -3556,7 +3741,6 @@ do { \
3556({ \ 3741({ \
3557 if (0) \ 3742 if (0) \
3558 netdev_printk(KERN_DEBUG, __dev, format, ##args); \ 3743 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
3559 0; \
3560}) 3744})
3561#endif 3745#endif
3562 3746
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 96afc29184be..f1606fa6132d 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -57,6 +57,8 @@ enum ip_set_extension {
57 IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER), 57 IPSET_EXT_COUNTER = (1 << IPSET_EXT_BIT_COUNTER),
58 IPSET_EXT_BIT_COMMENT = 2, 58 IPSET_EXT_BIT_COMMENT = 2,
59 IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT), 59 IPSET_EXT_COMMENT = (1 << IPSET_EXT_BIT_COMMENT),
60 IPSET_EXT_BIT_SKBINFO = 3,
61 IPSET_EXT_SKBINFO = (1 << IPSET_EXT_BIT_SKBINFO),
60 /* Mark set with an extension which needs to call destroy */ 62 /* Mark set with an extension which needs to call destroy */
61 IPSET_EXT_BIT_DESTROY = 7, 63 IPSET_EXT_BIT_DESTROY = 7,
62 IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY), 64 IPSET_EXT_DESTROY = (1 << IPSET_EXT_BIT_DESTROY),
@@ -65,12 +67,14 @@ enum ip_set_extension {
65#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT) 67#define SET_WITH_TIMEOUT(s) ((s)->extensions & IPSET_EXT_TIMEOUT)
66#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER) 68#define SET_WITH_COUNTER(s) ((s)->extensions & IPSET_EXT_COUNTER)
67#define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT) 69#define SET_WITH_COMMENT(s) ((s)->extensions & IPSET_EXT_COMMENT)
70#define SET_WITH_SKBINFO(s) ((s)->extensions & IPSET_EXT_SKBINFO)
68#define SET_WITH_FORCEADD(s) ((s)->flags & IPSET_CREATE_FLAG_FORCEADD) 71#define SET_WITH_FORCEADD(s) ((s)->flags & IPSET_CREATE_FLAG_FORCEADD)
69 72
70/* Extension id, in size order */ 73/* Extension id, in size order */
71enum ip_set_ext_id { 74enum ip_set_ext_id {
72 IPSET_EXT_ID_COUNTER = 0, 75 IPSET_EXT_ID_COUNTER = 0,
73 IPSET_EXT_ID_TIMEOUT, 76 IPSET_EXT_ID_TIMEOUT,
77 IPSET_EXT_ID_SKBINFO,
74 IPSET_EXT_ID_COMMENT, 78 IPSET_EXT_ID_COMMENT,
75 IPSET_EXT_ID_MAX, 79 IPSET_EXT_ID_MAX,
76}; 80};
@@ -92,6 +96,10 @@ struct ip_set_ext {
92 u64 packets; 96 u64 packets;
93 u64 bytes; 97 u64 bytes;
94 u32 timeout; 98 u32 timeout;
99 u32 skbmark;
100 u32 skbmarkmask;
101 u32 skbprio;
102 u16 skbqueue;
95 char *comment; 103 char *comment;
96}; 104};
97 105
@@ -104,6 +112,13 @@ struct ip_set_comment {
104 char *str; 112 char *str;
105}; 113};
106 114
115struct ip_set_skbinfo {
116 u32 skbmark;
117 u32 skbmarkmask;
118 u32 skbprio;
119 u16 skbqueue;
120};
121
107struct ip_set; 122struct ip_set;
108 123
109#define ext_timeout(e, s) \ 124#define ext_timeout(e, s) \
@@ -112,7 +127,8 @@ struct ip_set;
112(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER]) 127(struct ip_set_counter *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COUNTER])
113#define ext_comment(e, s) \ 128#define ext_comment(e, s) \
114(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT]) 129(struct ip_set_comment *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_COMMENT])
115 130#define ext_skbinfo(e, s) \
131(struct ip_set_skbinfo *)(((void *)(e)) + (s)->offset[IPSET_EXT_ID_SKBINFO])
116 132
117typedef int (*ipset_adtfn)(struct ip_set *set, void *value, 133typedef int (*ipset_adtfn)(struct ip_set *set, void *value,
118 const struct ip_set_ext *ext, 134 const struct ip_set_ext *ext,
@@ -256,6 +272,8 @@ ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
256 cadt_flags |= IPSET_FLAG_WITH_COUNTERS; 272 cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
257 if (SET_WITH_COMMENT(set)) 273 if (SET_WITH_COMMENT(set))
258 cadt_flags |= IPSET_FLAG_WITH_COMMENT; 274 cadt_flags |= IPSET_FLAG_WITH_COMMENT;
275 if (SET_WITH_SKBINFO(set))
276 cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
259 if (SET_WITH_FORCEADD(set)) 277 if (SET_WITH_FORCEADD(set))
260 cadt_flags |= IPSET_FLAG_WITH_FORCEADD; 278 cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
261 279
@@ -304,6 +322,43 @@ ip_set_update_counter(struct ip_set_counter *counter,
304 } 322 }
305} 323}
306 324
325static inline void
326ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
327 const struct ip_set_ext *ext,
328 struct ip_set_ext *mext, u32 flags)
329{
330 mext->skbmark = skbinfo->skbmark;
331 mext->skbmarkmask = skbinfo->skbmarkmask;
332 mext->skbprio = skbinfo->skbprio;
333 mext->skbqueue = skbinfo->skbqueue;
334}
335static inline bool
336ip_set_put_skbinfo(struct sk_buff *skb, struct ip_set_skbinfo *skbinfo)
337{
338 /* Send nonzero parameters only */
339 return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
340 nla_put_net64(skb, IPSET_ATTR_SKBMARK,
341 cpu_to_be64((u64)skbinfo->skbmark << 32 |
342 skbinfo->skbmarkmask))) ||
343 (skbinfo->skbprio &&
344 nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
345 cpu_to_be32(skbinfo->skbprio))) ||
346 (skbinfo->skbqueue &&
347 nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
348 cpu_to_be16(skbinfo->skbqueue)));
349
350}
351
352static inline void
353ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
354 const struct ip_set_ext *ext)
355{
356 skbinfo->skbmark = ext->skbmark;
357 skbinfo->skbmarkmask = ext->skbmarkmask;
358 skbinfo->skbprio = ext->skbprio;
359 skbinfo->skbqueue = ext->skbqueue;
360}
361
307static inline bool 362static inline bool
308ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter) 363ip_set_put_counter(struct sk_buff *skb, struct ip_set_counter *counter)
309{ 364{
@@ -497,6 +552,9 @@ ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set,
497 if (SET_WITH_COMMENT(set) && 552 if (SET_WITH_COMMENT(set) &&
498 ip_set_put_comment(skb, ext_comment(e, set))) 553 ip_set_put_comment(skb, ext_comment(e, set)))
499 return -EMSGSIZE; 554 return -EMSGSIZE;
555 if (SET_WITH_SKBINFO(set) &&
556 ip_set_put_skbinfo(skb, ext_skbinfo(e, set)))
557 return -EMSGSIZE;
500 return 0; 558 return 0;
501} 559}
502 560
diff --git a/include/linux/netfilter/ipset/ip_set_list.h b/include/linux/netfilter/ipset/ip_set_list.h
index 68c2aea897f5..fe2622a00151 100644
--- a/include/linux/netfilter/ipset/ip_set_list.h
+++ b/include/linux/netfilter/ipset/ip_set_list.h
@@ -6,5 +6,6 @@
6 6
7#define IP_SET_LIST_DEFAULT_SIZE 8 7#define IP_SET_LIST_DEFAULT_SIZE 8
8#define IP_SET_LIST_MIN_SIZE 4 8#define IP_SET_LIST_MIN_SIZE 4
9#define IP_SET_LIST_MAX_SIZE 65536
9 10
10#endif /* __IP_SET_LIST_H */ 11#endif /* __IP_SET_LIST_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 8ab1c278b66d..c755e4971fa3 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -15,7 +15,7 @@ enum nf_br_hook_priorities {
15 NF_BR_PRI_LAST = INT_MAX, 15 NF_BR_PRI_LAST = INT_MAX,
16}; 16};
17 17
18#ifdef CONFIG_BRIDGE_NETFILTER 18#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
19 19
20#define BRNF_PKT_TYPE 0x01 20#define BRNF_PKT_TYPE 0x01
21#define BRNF_BRIDGED_DNAT 0x02 21#define BRNF_BRIDGED_DNAT 0x02
@@ -24,16 +24,6 @@ enum nf_br_hook_priorities {
24#define BRNF_8021Q 0x10 24#define BRNF_8021Q 0x10
25#define BRNF_PPPoE 0x20 25#define BRNF_PPPoE 0x20
26 26
27/* Only used in br_forward.c */
28int nf_bridge_copy_header(struct sk_buff *skb);
29static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
30{
31 if (skb->nf_bridge &&
32 skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
33 return nf_bridge_copy_header(skb);
34 return 0;
35}
36
37static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb) 27static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
38{ 28{
39 switch (skb->protocol) { 29 switch (skb->protocol) {
@@ -46,6 +36,44 @@ static inline unsigned int nf_bridge_encap_header_len(const struct sk_buff *skb)
46 } 36 }
47} 37}
48 38
39static inline void nf_bridge_update_protocol(struct sk_buff *skb)
40{
41 if (skb->nf_bridge->mask & BRNF_8021Q)
42 skb->protocol = htons(ETH_P_8021Q);
43 else if (skb->nf_bridge->mask & BRNF_PPPoE)
44 skb->protocol = htons(ETH_P_PPP_SES);
45}
46
47/* Fill in the header for fragmented IP packets handled by
48 * the IPv4 connection tracking code.
49 *
50 * Only used in br_forward.c
51 */
52static inline int nf_bridge_copy_header(struct sk_buff *skb)
53{
54 int err;
55 unsigned int header_size;
56
57 nf_bridge_update_protocol(skb);
58 header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
59 err = skb_cow_head(skb, header_size);
60 if (err)
61 return err;
62
63 skb_copy_to_linear_data_offset(skb, -header_size,
64 skb->nf_bridge->data, header_size);
65 __skb_push(skb, nf_bridge_encap_header_len(skb));
66 return 0;
67}
68
69static inline int nf_bridge_maybe_copy_header(struct sk_buff *skb)
70{
71 if (skb->nf_bridge &&
72 skb->nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT))
73 return nf_bridge_copy_header(skb);
74 return 0;
75}
76
49static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) 77static inline unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb)
50{ 78{
51 if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE)) 79 if (unlikely(skb->nf_bridge->mask & BRNF_PPPoE))
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9e572daa15d5..02fc86d2348e 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -46,8 +46,8 @@ struct netlink_kernel_cfg {
46 unsigned int flags; 46 unsigned int flags;
47 void (*input)(struct sk_buff *skb); 47 void (*input)(struct sk_buff *skb);
48 struct mutex *cb_mutex; 48 struct mutex *cb_mutex;
49 int (*bind)(int group); 49 int (*bind)(struct net *net, int group);
50 void (*unbind)(int group); 50 void (*unbind)(struct net *net, int group);
51 bool (*compare)(struct net *net, struct sock *sk); 51 bool (*compare)(struct net *net, struct sock *sk);
52}; 52};
53 53
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index a1e3064a8d99..022b761dbf0a 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -110,6 +110,20 @@ enum nfs_opnum4 {
110 OP_DESTROY_CLIENTID = 57, 110 OP_DESTROY_CLIENTID = 57,
111 OP_RECLAIM_COMPLETE = 58, 111 OP_RECLAIM_COMPLETE = 58,
112 112
113 /* nfs42 */
114 OP_ALLOCATE = 59,
115 OP_COPY = 60,
116 OP_COPY_NOTIFY = 61,
117 OP_DEALLOCATE = 62,
118 OP_IO_ADVISE = 63,
119 OP_LAYOUTERROR = 64,
120 OP_LAYOUTSTATS = 65,
121 OP_OFFLOAD_CANCEL = 66,
122 OP_OFFLOAD_STATUS = 67,
123 OP_READ_PLUS = 68,
124 OP_SEEK = 69,
125 OP_WRITE_SAME = 70,
126
113 OP_ILLEGAL = 10044, 127 OP_ILLEGAL = 10044,
114}; 128};
115 129
@@ -117,10 +131,10 @@ enum nfs_opnum4 {
117Needs to be updated if more operations are defined in future.*/ 131Needs to be updated if more operations are defined in future.*/
118 132
119#define FIRST_NFS4_OP OP_ACCESS 133#define FIRST_NFS4_OP OP_ACCESS
120#define LAST_NFS4_OP OP_RECLAIM_COMPLETE 134#define LAST_NFS4_OP OP_WRITE_SAME
121#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER 135#define LAST_NFS40_OP OP_RELEASE_LOCKOWNER
122#define LAST_NFS41_OP OP_RECLAIM_COMPLETE 136#define LAST_NFS41_OP OP_RECLAIM_COMPLETE
123#define LAST_NFS42_OP OP_RECLAIM_COMPLETE 137#define LAST_NFS42_OP OP_WRITE_SAME
124 138
125enum nfsstat4 { 139enum nfsstat4 {
126 NFS4_OK = 0, 140 NFS4_OK = 0,
@@ -235,10 +249,11 @@ enum nfsstat4 {
235 /* nfs42 */ 249 /* nfs42 */
236 NFS4ERR_PARTNER_NOTSUPP = 10088, 250 NFS4ERR_PARTNER_NOTSUPP = 10088,
237 NFS4ERR_PARTNER_NO_AUTH = 10089, 251 NFS4ERR_PARTNER_NO_AUTH = 10089,
238 NFS4ERR_METADATA_NOTSUPP = 10090, 252 NFS4ERR_UNION_NOTSUPP = 10090,
239 NFS4ERR_OFFLOAD_DENIED = 10091, 253 NFS4ERR_OFFLOAD_DENIED = 10091,
240 NFS4ERR_WRONG_LFS = 10092, 254 NFS4ERR_WRONG_LFS = 10092,
241 NFS4ERR_BADLABEL = 10093, 255 NFS4ERR_BADLABEL = 10093,
256 NFS4ERR_OFFLOAD_NO_REQS = 10094,
242}; 257};
243 258
244static inline bool seqid_mutating_err(u32 err) 259static inline bool seqid_mutating_err(u32 err)
@@ -472,6 +487,11 @@ enum {
472 NFSPROC4_CLNT_GETDEVICELIST, 487 NFSPROC4_CLNT_GETDEVICELIST,
473 NFSPROC4_CLNT_BIND_CONN_TO_SESSION, 488 NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
474 NFSPROC4_CLNT_DESTROY_CLIENTID, 489 NFSPROC4_CLNT_DESTROY_CLIENTID,
490
491 /* nfs42 */
492 NFSPROC4_CLNT_SEEK,
493 NFSPROC4_CLNT_ALLOCATE,
494 NFSPROC4_CLNT_DEALLOCATE,
475}; 495};
476 496
477/* nfs41 types */ 497/* nfs41 types */
@@ -535,4 +555,9 @@ struct nfs4_deviceid {
535 char data[NFS4_DEVICEID4_SIZE]; 555 char data[NFS4_DEVICEID4_SIZE];
536}; 556};
537 557
558enum data_content4 {
559 NFS4_CONTENT_DATA = 0,
560 NFS4_CONTENT_HOLE = 1,
561};
562
538#endif 563#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 5180a7ededec..6d627b92df53 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -163,7 +163,7 @@ struct nfs_inode {
163 */ 163 */
164 __be32 cookieverf[2]; 164 __be32 cookieverf[2];
165 165
166 unsigned long npages; 166 unsigned long nrequests;
167 struct nfs_mds_commit_info commit_info; 167 struct nfs_mds_commit_info commit_info;
168 168
169 /* Open contexts for shared mmap writes */ 169 /* Open contexts for shared mmap writes */
@@ -443,31 +443,15 @@ static inline struct rpc_cred *nfs_file_cred(struct file *file)
443} 443}
444 444
445/* 445/*
446 * linux/fs/nfs/xattr.c
447 */
448#ifdef CONFIG_NFS_V3_ACL
449extern ssize_t nfs3_listxattr(struct dentry *, char *, size_t);
450extern ssize_t nfs3_getxattr(struct dentry *, const char *, void *, size_t);
451extern int nfs3_setxattr(struct dentry *, const char *,
452 const void *, size_t, int);
453extern int nfs3_removexattr (struct dentry *, const char *name);
454#else
455# define nfs3_listxattr NULL
456# define nfs3_getxattr NULL
457# define nfs3_setxattr NULL
458# define nfs3_removexattr NULL
459#endif
460
461/*
462 * linux/fs/nfs/direct.c 446 * linux/fs/nfs/direct.c
463 */ 447 */
464extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t); 448extern ssize_t nfs_direct_IO(int, struct kiocb *, struct iov_iter *, loff_t);
465extern ssize_t nfs_file_direct_read(struct kiocb *iocb, 449extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
466 struct iov_iter *iter, 450 struct iov_iter *iter,
467 loff_t pos, bool uio); 451 loff_t pos);
468extern ssize_t nfs_file_direct_write(struct kiocb *iocb, 452extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
469 struct iov_iter *iter, 453 struct iov_iter *iter,
470 loff_t pos, bool uio); 454 loff_t pos);
471 455
472/* 456/*
473 * linux/fs/nfs/dir.c 457 * linux/fs/nfs/dir.c
@@ -529,22 +513,14 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
529extern int nfs_wb_all(struct inode *inode); 513extern int nfs_wb_all(struct inode *inode);
530extern int nfs_wb_page(struct inode *inode, struct page* page); 514extern int nfs_wb_page(struct inode *inode, struct page* page);
531extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); 515extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
532#if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4)
533extern int nfs_commit_inode(struct inode *, int); 516extern int nfs_commit_inode(struct inode *, int);
534extern struct nfs_commit_data *nfs_commitdata_alloc(void); 517extern struct nfs_commit_data *nfs_commitdata_alloc(void);
535extern void nfs_commit_free(struct nfs_commit_data *data); 518extern void nfs_commit_free(struct nfs_commit_data *data);
536#else
537static inline int
538nfs_commit_inode(struct inode *inode, int how)
539{
540 return 0;
541}
542#endif
543 519
544static inline int 520static inline int
545nfs_have_writebacks(struct inode *inode) 521nfs_have_writebacks(struct inode *inode)
546{ 522{
547 return NFS_I(inode)->npages != 0; 523 return NFS_I(inode)->nrequests != 0;
548} 524}
549 525
550/* 526/*
@@ -557,23 +533,6 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
557 struct page *); 533 struct page *);
558 534
559/* 535/*
560 * linux/fs/nfs3proc.c
561 */
562#ifdef CONFIG_NFS_V3_ACL
563extern struct posix_acl *nfs3_get_acl(struct inode *inode, int type);
564extern int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type);
565extern int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
566 struct posix_acl *dfacl);
567extern const struct xattr_handler *nfs3_xattr_handlers[];
568#else
569static inline int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
570 struct posix_acl *dfacl)
571{
572 return 0;
573}
574#endif /* CONFIG_NFS_V3_ACL */
575
576/*
577 * inline functions 536 * inline functions
578 */ 537 */
579 538
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 922be2e050f5..1e37fbb78f7a 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -230,5 +230,8 @@ struct nfs_server {
230#define NFS_CAP_STATEID_NFSV41 (1U << 16) 230#define NFS_CAP_STATEID_NFSV41 (1U << 16)
231#define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17) 231#define NFS_CAP_ATOMIC_OPEN_V1 (1U << 17)
232#define NFS_CAP_SECURITY_LABEL (1U << 18) 232#define NFS_CAP_SECURITY_LABEL (1U << 18)
233#define NFS_CAP_SEEK (1U << 19)
234#define NFS_CAP_ALLOCATE (1U << 20)
235#define NFS_CAP_DEALLOCATE (1U << 21)
233 236
234#endif 237#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 0040629894df..467c84efb596 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -252,17 +252,6 @@ struct nfs4_layoutget {
252 gfp_t gfp_flags; 252 gfp_t gfp_flags;
253}; 253};
254 254
255struct nfs4_getdevicelist_args {
256 struct nfs4_sequence_args seq_args;
257 const struct nfs_fh *fh;
258 u32 layoutclass;
259};
260
261struct nfs4_getdevicelist_res {
262 struct nfs4_sequence_res seq_res;
263 struct pnfs_devicelist *devlist;
264};
265
266struct nfs4_getdeviceinfo_args { 255struct nfs4_getdeviceinfo_args {
267 struct nfs4_sequence_args seq_args; 256 struct nfs4_sequence_args seq_args;
268 struct pnfs_device *pdev; 257 struct pnfs_device *pdev;
@@ -279,6 +268,9 @@ struct nfs4_layoutcommit_args {
279 __u64 lastbytewritten; 268 __u64 lastbytewritten;
280 struct inode *inode; 269 struct inode *inode;
281 const u32 *bitmask; 270 const u32 *bitmask;
271 size_t layoutupdate_len;
272 struct page *layoutupdate_page;
273 struct page **layoutupdate_pages;
282}; 274};
283 275
284struct nfs4_layoutcommit_res { 276struct nfs4_layoutcommit_res {
@@ -1232,13 +1224,57 @@ struct nfs41_free_stateid_res {
1232 unsigned int status; 1224 unsigned int status;
1233}; 1225};
1234 1226
1227static inline void
1228nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
1229{
1230 kfree(cinfo->buckets);
1231}
1232
1235#else 1233#else
1236 1234
1237struct pnfs_ds_commit_info { 1235struct pnfs_ds_commit_info {
1238}; 1236};
1239 1237
1238static inline void
1239nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
1240{
1241}
1242
1240#endif /* CONFIG_NFS_V4_1 */ 1243#endif /* CONFIG_NFS_V4_1 */
1241 1244
1245#ifdef CONFIG_NFS_V4_2
1246struct nfs42_falloc_args {
1247 struct nfs4_sequence_args seq_args;
1248
1249 struct nfs_fh *falloc_fh;
1250 nfs4_stateid falloc_stateid;
1251 u64 falloc_offset;
1252 u64 falloc_length;
1253};
1254
1255struct nfs42_falloc_res {
1256 struct nfs4_sequence_res seq_res;
1257 unsigned int status;
1258};
1259
1260struct nfs42_seek_args {
1261 struct nfs4_sequence_args seq_args;
1262
1263 struct nfs_fh *sa_fh;
1264 nfs4_stateid sa_stateid;
1265 u64 sa_offset;
1266 u32 sa_what;
1267};
1268
1269struct nfs42_seek_res {
1270 struct nfs4_sequence_res seq_res;
1271 unsigned int status;
1272
1273 u32 sr_eof;
1274 u64 sr_offset;
1275};
1276#endif
1277
1242struct nfs_page; 1278struct nfs_page;
1243 1279
1244#define NFS_PAGEVEC_SIZE (8U) 1280#define NFS_PAGEVEC_SIZE (8U)
@@ -1328,6 +1364,7 @@ struct nfs_commit_data {
1328 struct pnfs_layout_segment *lseg; 1364 struct pnfs_layout_segment *lseg;
1329 struct nfs_client *ds_clp; /* pNFS data server */ 1365 struct nfs_client *ds_clp; /* pNFS data server */
1330 int ds_commit_index; 1366 int ds_commit_index;
1367 loff_t lwb;
1331 const struct rpc_call_ops *mds_ops; 1368 const struct rpc_call_ops *mds_ops;
1332 const struct nfs_commit_completion_ops *completion_ops; 1369 const struct nfs_commit_completion_ops *completion_ops;
1333 int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data); 1370 int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
@@ -1346,6 +1383,7 @@ struct nfs_unlinkdata {
1346 struct inode *dir; 1383 struct inode *dir;
1347 struct rpc_cred *cred; 1384 struct rpc_cred *cred;
1348 struct nfs_fattr dir_attr; 1385 struct nfs_fattr dir_attr;
1386 long timeout;
1349}; 1387};
1350 1388
1351struct nfs_renamedata { 1389struct nfs_renamedata {
@@ -1359,6 +1397,7 @@ struct nfs_renamedata {
1359 struct dentry *new_dentry; 1397 struct dentry *new_dentry;
1360 struct nfs_fattr new_fattr; 1398 struct nfs_fattr new_fattr;
1361 void (*complete)(struct rpc_task *, struct nfs_renamedata *); 1399 void (*complete)(struct rpc_task *, struct nfs_renamedata *);
1400 long timeout;
1362}; 1401};
1363 1402
1364struct nfs_access_entry; 1403struct nfs_access_entry;
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index 20163b9a0eae..167342c2ce6b 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */ 15 */
20 16
21#ifndef NL802154_H 17#ifndef NL802154_H
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 1d2a6ab6b8bb..9b2022ab4d85 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -24,6 +24,19 @@ static inline void touch_nmi_watchdog(void)
24} 24}
25#endif 25#endif
26 26
27#if defined(CONFIG_HARDLOCKUP_DETECTOR)
28extern void watchdog_enable_hardlockup_detector(bool val);
29extern bool watchdog_hardlockup_detector_is_enabled(void);
30#else
31static inline void watchdog_enable_hardlockup_detector(bool val)
32{
33}
34static inline bool watchdog_hardlockup_detector_is_enabled(void)
35{
36 return true;
37}
38#endif
39
27/* 40/*
28 * Create trigger_all_cpu_backtrace() out of the arch-provided 41 * Create trigger_all_cpu_backtrace() out of the arch-provided
29 * base function. Return whether such support was available, 42 * base function. Return whether such support was available,
diff --git a/include/linux/ns_common.h b/include/linux/ns_common.h
new file mode 100644
index 000000000000..85a5c8c16be9
--- /dev/null
+++ b/include/linux/ns_common.h
@@ -0,0 +1,12 @@
1#ifndef _LINUX_NS_COMMON_H
2#define _LINUX_NS_COMMON_H
3
4struct proc_ns_operations;
5
6struct ns_common {
7 atomic_long_t stashed;
8 const struct proc_ns_operations *ops;
9 unsigned int inum;
10};
11
12#endif
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 2bf403195c09..258945fcabf1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -19,6 +19,7 @@
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/miscdevice.h> 20#include <linux/miscdevice.h>
21#include <linux/kref.h> 21#include <linux/kref.h>
22#include <linux/blk-mq.h>
22 23
23struct nvme_bar { 24struct nvme_bar {
24 __u64 cap; /* Controller Capabilities */ 25 __u64 cap; /* Controller Capabilities */
@@ -38,6 +39,7 @@ struct nvme_bar {
38#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) 39#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
39#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) 40#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
40#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) 41#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
42#define NVME_CAP_MPSMAX(cap) (((cap) >> 52) & 0xf)
41 43
42enum { 44enum {
43 NVME_CC_ENABLE = 1 << 0, 45 NVME_CC_ENABLE = 1 << 0,
@@ -70,8 +72,10 @@ extern unsigned char nvme_io_timeout;
70 */ 72 */
71struct nvme_dev { 73struct nvme_dev {
72 struct list_head node; 74 struct list_head node;
73 struct nvme_queue __rcu **queues; 75 struct nvme_queue **queues;
74 unsigned short __percpu *io_queue; 76 struct request_queue *admin_q;
77 struct blk_mq_tag_set tagset;
78 struct blk_mq_tag_set admin_tagset;
75 u32 __iomem *dbs; 79 u32 __iomem *dbs;
76 struct pci_dev *pci_dev; 80 struct pci_dev *pci_dev;
77 struct dma_pool *prp_page_pool; 81 struct dma_pool *prp_page_pool;
@@ -90,15 +94,16 @@ struct nvme_dev {
90 struct miscdevice miscdev; 94 struct miscdevice miscdev;
91 work_func_t reset_workfn; 95 work_func_t reset_workfn;
92 struct work_struct reset_work; 96 struct work_struct reset_work;
93 struct work_struct cpu_work;
94 char name[12]; 97 char name[12];
95 char serial[20]; 98 char serial[20];
96 char model[40]; 99 char model[40];
97 char firmware_rev[8]; 100 char firmware_rev[8];
98 u32 max_hw_sectors; 101 u32 max_hw_sectors;
99 u32 stripe_size; 102 u32 stripe_size;
103 u32 page_size;
100 u16 oncs; 104 u16 oncs;
101 u16 abort_limit; 105 u16 abort_limit;
106 u8 event_limit;
102 u8 vwc; 107 u8 vwc;
103 u8 initialized; 108 u8 initialized;
104}; 109};
@@ -132,7 +137,6 @@ struct nvme_iod {
132 int offset; /* Of PRP list */ 137 int offset; /* Of PRP list */
133 int nents; /* Used in scatterlist */ 138 int nents; /* Used in scatterlist */
134 int length; /* Of data, in bytes */ 139 int length; /* Of data, in bytes */
135 unsigned long start_time;
136 dma_addr_t first_dma; 140 dma_addr_t first_dma;
137 struct list_head node; 141 struct list_head node;
138 struct scatterlist sg[0]; 142 struct scatterlist sg[0];
@@ -150,12 +154,14 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
150 */ 154 */
151void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod); 155void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
152 156
153int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t); 157int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int, gfp_t);
154struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, 158struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
155 unsigned long addr, unsigned length); 159 unsigned long addr, unsigned length);
156void nvme_unmap_user_pages(struct nvme_dev *dev, int write, 160void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
157 struct nvme_iod *iod); 161 struct nvme_iod *iod);
158int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *); 162int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_ns *,
163 struct nvme_command *, u32 *);
164int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
159int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *, 165int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
160 u32 *result); 166 u32 *result);
161int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns, 167int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
diff --git a/include/linux/of.h b/include/linux/of.h
index 6c4363b8ddc3..dfde07e77a63 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -23,6 +23,8 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/topology.h> 24#include <linux/topology.h>
25#include <linux/notifier.h> 25#include <linux/notifier.h>
26#include <linux/property.h>
27#include <linux/list.h>
26 28
27#include <asm/byteorder.h> 29#include <asm/byteorder.h>
28#include <asm/errno.h> 30#include <asm/errno.h>
@@ -49,14 +51,13 @@ struct device_node {
49 const char *type; 51 const char *type;
50 phandle phandle; 52 phandle phandle;
51 const char *full_name; 53 const char *full_name;
54 struct fwnode_handle fwnode;
52 55
53 struct property *properties; 56 struct property *properties;
54 struct property *deadprops; /* removed properties */ 57 struct property *deadprops; /* removed properties */
55 struct device_node *parent; 58 struct device_node *parent;
56 struct device_node *child; 59 struct device_node *child;
57 struct device_node *sibling; 60 struct device_node *sibling;
58 struct device_node *next; /* next device of same type */
59 struct device_node *allnext; /* next in list of all nodes */
60 struct kobject kobj; 61 struct kobject kobj;
61 unsigned long _flags; 62 unsigned long _flags;
62 void *data; 63 void *data;
@@ -74,11 +75,18 @@ struct of_phandle_args {
74 uint32_t args[MAX_PHANDLE_ARGS]; 75 uint32_t args[MAX_PHANDLE_ARGS];
75}; 76};
76 77
78struct of_reconfig_data {
79 struct device_node *dn;
80 struct property *prop;
81 struct property *old_prop;
82};
83
77/* initialize a node */ 84/* initialize a node */
78extern struct kobj_type of_node_ktype; 85extern struct kobj_type of_node_ktype;
79static inline void of_node_init(struct device_node *node) 86static inline void of_node_init(struct device_node *node)
80{ 87{
81 kobject_init(&node->kobj, &of_node_ktype); 88 kobject_init(&node->kobj, &of_node_ktype);
89 node->fwnode.type = FWNODE_OF;
82} 90}
83 91
84/* true when node is initialized */ 92/* true when node is initialized */
@@ -105,18 +113,27 @@ static inline struct device_node *of_node_get(struct device_node *node)
105static inline void of_node_put(struct device_node *node) { } 113static inline void of_node_put(struct device_node *node) { }
106#endif /* !CONFIG_OF_DYNAMIC */ 114#endif /* !CONFIG_OF_DYNAMIC */
107 115
108#ifdef CONFIG_OF
109
110/* Pointer for first entry in chain of all nodes. */ 116/* Pointer for first entry in chain of all nodes. */
111extern struct device_node *of_allnodes; 117extern struct device_node *of_root;
112extern struct device_node *of_chosen; 118extern struct device_node *of_chosen;
113extern struct device_node *of_aliases; 119extern struct device_node *of_aliases;
114extern struct device_node *of_stdout; 120extern struct device_node *of_stdout;
115extern raw_spinlock_t devtree_lock; 121extern raw_spinlock_t devtree_lock;
116 122
123#ifdef CONFIG_OF
124static inline bool is_of_node(struct fwnode_handle *fwnode)
125{
126 return fwnode && fwnode->type == FWNODE_OF;
127}
128
129static inline struct device_node *of_node(struct fwnode_handle *fwnode)
130{
131 return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL;
132}
133
117static inline bool of_have_populated_dt(void) 134static inline bool of_have_populated_dt(void)
118{ 135{
119 return of_allnodes != NULL; 136 return of_root != NULL;
120} 137}
121 138
122static inline bool of_node_is_root(const struct device_node *node) 139static inline bool of_node_is_root(const struct device_node *node)
@@ -160,6 +177,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag
160 clear_bit(flag, &p->_flags); 177 clear_bit(flag, &p->_flags);
161} 178}
162 179
180extern struct device_node *__of_find_all_nodes(struct device_node *prev);
163extern struct device_node *of_find_all_nodes(struct device_node *prev); 181extern struct device_node *of_find_all_nodes(struct device_node *prev);
164 182
165/* 183/*
@@ -215,8 +233,9 @@ static inline const char *of_node_full_name(const struct device_node *np)
215 return np ? np->full_name : "<no-node>"; 233 return np ? np->full_name : "<no-node>";
216} 234}
217 235
218#define for_each_of_allnodes(dn) \ 236#define for_each_of_allnodes_from(from, dn) \
219 for (dn = of_allnodes; dn; dn = dn->allnext) 237 for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn))
238#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn)
220extern struct device_node *of_find_node_by_name(struct device_node *from, 239extern struct device_node *of_find_node_by_name(struct device_node *from,
221 const char *name); 240 const char *name);
222extern struct device_node *of_find_node_by_type(struct device_node *from, 241extern struct device_node *of_find_node_by_type(struct device_node *from,
@@ -228,7 +247,13 @@ extern struct device_node *of_find_matching_node_and_match(
228 const struct of_device_id *matches, 247 const struct of_device_id *matches,
229 const struct of_device_id **match); 248 const struct of_device_id **match);
230 249
231extern struct device_node *of_find_node_by_path(const char *path); 250extern struct device_node *of_find_node_opts_by_path(const char *path,
251 const char **opts);
252static inline struct device_node *of_find_node_by_path(const char *path)
253{
254 return of_find_node_opts_by_path(path, NULL);
255}
256
232extern struct device_node *of_find_node_by_phandle(phandle handle); 257extern struct device_node *of_find_node_by_phandle(phandle handle);
233extern struct device_node *of_get_parent(const struct device_node *node); 258extern struct device_node *of_get_parent(const struct device_node *node);
234extern struct device_node *of_get_next_parent(struct device_node *node); 259extern struct device_node *of_get_next_parent(struct device_node *node);
@@ -263,21 +288,23 @@ extern int of_property_read_u32_array(const struct device_node *np,
263 size_t sz); 288 size_t sz);
264extern int of_property_read_u64(const struct device_node *np, 289extern int of_property_read_u64(const struct device_node *np,
265 const char *propname, u64 *out_value); 290 const char *propname, u64 *out_value);
291extern int of_property_read_u64_array(const struct device_node *np,
292 const char *propname,
293 u64 *out_values,
294 size_t sz);
266 295
267extern int of_property_read_string(struct device_node *np, 296extern int of_property_read_string(struct device_node *np,
268 const char *propname, 297 const char *propname,
269 const char **out_string); 298 const char **out_string);
270extern int of_property_read_string_index(struct device_node *np,
271 const char *propname,
272 int index, const char **output);
273extern int of_property_match_string(struct device_node *np, 299extern int of_property_match_string(struct device_node *np,
274 const char *propname, 300 const char *propname,
275 const char *string); 301 const char *string);
276extern int of_property_count_strings(struct device_node *np, 302extern int of_property_read_string_helper(struct device_node *np,
277 const char *propname); 303 const char *propname,
304 const char **out_strs, size_t sz, int index);
278extern int of_device_is_compatible(const struct device_node *device, 305extern int of_device_is_compatible(const struct device_node *device,
279 const char *); 306 const char *);
280extern int of_device_is_available(const struct device_node *device); 307extern bool of_device_is_available(const struct device_node *device);
281extern const void *of_get_property(const struct device_node *node, 308extern const void *of_get_property(const struct device_node *node,
282 const char *name, 309 const char *name,
283 int *lenp); 310 int *lenp);
@@ -319,16 +346,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop);
319#define OF_RECONFIG_REMOVE_PROPERTY 0x0004 346#define OF_RECONFIG_REMOVE_PROPERTY 0x0004
320#define OF_RECONFIG_UPDATE_PROPERTY 0x0005 347#define OF_RECONFIG_UPDATE_PROPERTY 0x0005
321 348
322struct of_prop_reconfig {
323 struct device_node *dn;
324 struct property *prop;
325 struct property *old_prop;
326};
327
328extern int of_reconfig_notifier_register(struct notifier_block *);
329extern int of_reconfig_notifier_unregister(struct notifier_block *);
330extern int of_reconfig_notify(unsigned long, void *);
331
332extern int of_attach_node(struct device_node *); 349extern int of_attach_node(struct device_node *);
333extern int of_detach_node(struct device_node *); 350extern int of_detach_node(struct device_node *);
334 351
@@ -357,6 +374,16 @@ bool of_console_check(struct device_node *dn, char *name, int index);
357 374
358#else /* CONFIG_OF */ 375#else /* CONFIG_OF */
359 376
377static inline bool is_of_node(struct fwnode_handle *fwnode)
378{
379 return false;
380}
381
382static inline struct device_node *of_node(struct fwnode_handle *fwnode)
383{
384 return NULL;
385}
386
360static inline const char* of_node_full_name(const struct device_node *np) 387static inline const char* of_node_full_name(const struct device_node *np)
361{ 388{
362 return "<no-node>"; 389 return "<no-node>";
@@ -387,6 +414,12 @@ static inline struct device_node *of_find_node_by_path(const char *path)
387 return NULL; 414 return NULL;
388} 415}
389 416
417static inline struct device_node *of_find_node_opts_by_path(const char *path,
418 const char **opts)
419{
420 return NULL;
421}
422
390static inline struct device_node *of_get_parent(const struct device_node *node) 423static inline struct device_node *of_get_parent(const struct device_node *node)
391{ 424{
392 return NULL; 425 return NULL;
@@ -428,9 +461,9 @@ static inline int of_device_is_compatible(const struct device_node *device,
428 return 0; 461 return 0;
429} 462}
430 463
431static inline int of_device_is_available(const struct device_node *device) 464static inline bool of_device_is_available(const struct device_node *device)
432{ 465{
433 return 0; 466 return false;
434} 467}
435 468
436static inline struct property *of_find_property(const struct device_node *np, 469static inline struct property *of_find_property(const struct device_node *np,
@@ -479,22 +512,23 @@ static inline int of_property_read_u32_array(const struct device_node *np,
479 return -ENOSYS; 512 return -ENOSYS;
480} 513}
481 514
482static inline int of_property_read_string(struct device_node *np, 515static inline int of_property_read_u64_array(const struct device_node *np,
483 const char *propname, 516 const char *propname,
484 const char **out_string) 517 u64 *out_values, size_t sz)
485{ 518{
486 return -ENOSYS; 519 return -ENOSYS;
487} 520}
488 521
489static inline int of_property_read_string_index(struct device_node *np, 522static inline int of_property_read_string(struct device_node *np,
490 const char *propname, int index, 523 const char *propname,
491 const char **out_string) 524 const char **out_string)
492{ 525{
493 return -ENOSYS; 526 return -ENOSYS;
494} 527}
495 528
496static inline int of_property_count_strings(struct device_node *np, 529static inline int of_property_read_string_helper(struct device_node *np,
497 const char *propname) 530 const char *propname,
531 const char **out_strs, size_t sz, int index)
498{ 532{
499 return -ENOSYS; 533 return -ENOSYS;
500} 534}
@@ -668,6 +702,70 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
668} 702}
669 703
670/** 704/**
705 * of_property_read_string_array() - Read an array of strings from a multiple
706 * strings property.
707 * @np: device node from which the property value is to be read.
708 * @propname: name of the property to be searched.
709 * @out_strs: output array of string pointers.
710 * @sz: number of array elements to read.
711 *
712 * Search for a property in a device tree node and retrieve a list of
713 * terminated string values (pointer to data, not a copy) in that property.
714 *
715 * If @out_strs is NULL, the number of strings in the property is returned.
716 */
717static inline int of_property_read_string_array(struct device_node *np,
718 const char *propname, const char **out_strs,
719 size_t sz)
720{
721 return of_property_read_string_helper(np, propname, out_strs, sz, 0);
722}
723
724/**
725 * of_property_count_strings() - Find and return the number of strings from a
726 * multiple strings property.
727 * @np: device node from which the property value is to be read.
728 * @propname: name of the property to be searched.
729 *
730 * Search for a property in a device tree node and retrieve the number of null
731 * terminated string contain in it. Returns the number of strings on
732 * success, -EINVAL if the property does not exist, -ENODATA if property
733 * does not have a value, and -EILSEQ if the string is not null-terminated
734 * within the length of the property data.
735 */
736static inline int of_property_count_strings(struct device_node *np,
737 const char *propname)
738{
739 return of_property_read_string_helper(np, propname, NULL, 0, 0);
740}
741
742/**
743 * of_property_read_string_index() - Find and read a string from a multiple
744 * strings property.
745 * @np: device node from which the property value is to be read.
746 * @propname: name of the property to be searched.
747 * @index: index of the string in the list of strings
748 * @out_string: pointer to null terminated return string, modified only if
749 * return value is 0.
750 *
751 * Search for a property in a device tree node and retrieve a null
752 * terminated string value (pointer to data, not a copy) in the list of strings
753 * contained in that property.
754 * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
755 * property does not have a value, and -EILSEQ if the string is not
756 * null-terminated within the length of the property data.
757 *
758 * The out_string pointer is modified only if a valid string can be decoded.
759 */
760static inline int of_property_read_string_index(struct device_node *np,
761 const char *propname,
762 int index, const char **output)
763{
764 int rc = of_property_read_string_helper(np, propname, output, 1, index);
765 return rc < 0 ? rc : 0;
766}
767
768/**
671 * of_property_read_bool - Findfrom a property 769 * of_property_read_bool - Findfrom a property
672 * @np: device node from which the property value is to be read. 770 * @np: device node from which the property value is to be read.
673 * @propname: name of the property to be searched. 771 * @propname: name of the property to be searched.
@@ -704,6 +802,13 @@ static inline int of_property_read_u32(const struct device_node *np,
704 return of_property_read_u32_array(np, propname, out_value, 1); 802 return of_property_read_u32_array(np, propname, out_value, 1);
705} 803}
706 804
805static inline int of_property_read_s32(const struct device_node *np,
806 const char *propname,
807 s32 *out_value)
808{
809 return of_property_read_u32(np, propname, (u32*) out_value);
810}
811
707#define of_property_for_each_u32(np, propname, prop, p, u) \ 812#define of_property_for_each_u32(np, propname, prop, p, u) \
708 for (prop = of_find_property(np, propname, NULL), \ 813 for (prop = of_find_property(np, propname, NULL), \
709 p = of_prop_next_u32(prop, NULL, &u); \ 814 p = of_prop_next_u32(prop, NULL, &u); \
@@ -772,7 +877,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
772 = { .compatible = compat, \ 877 = { .compatible = compat, \
773 .data = (fn == (fn_type)NULL) ? fn : fn } 878 .data = (fn == (fn_type)NULL) ? fn : fn }
774#else 879#else
775#define _OF_DECLARE(table, name, compat, fn, fn_type) \ 880#define _OF_DECLARE(table, name, compat, fn, fn_type) \
776 static const struct of_device_id __of_table_##name \ 881 static const struct of_device_id __of_table_##name \
777 __attribute__((unused)) \ 882 __attribute__((unused)) \
778 = { .compatible = compat, \ 883 = { .compatible = compat, \
@@ -823,7 +928,19 @@ struct of_changeset {
823 struct list_head entries; 928 struct list_head entries;
824}; 929};
825 930
931enum of_reconfig_change {
932 OF_RECONFIG_NO_CHANGE = 0,
933 OF_RECONFIG_CHANGE_ADD,
934 OF_RECONFIG_CHANGE_REMOVE,
935};
936
826#ifdef CONFIG_OF_DYNAMIC 937#ifdef CONFIG_OF_DYNAMIC
938extern int of_reconfig_notifier_register(struct notifier_block *);
939extern int of_reconfig_notifier_unregister(struct notifier_block *);
940extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd);
941extern int of_reconfig_get_state_change(unsigned long action,
942 struct of_reconfig_data *arg);
943
827extern void of_changeset_init(struct of_changeset *ocs); 944extern void of_changeset_init(struct of_changeset *ocs);
828extern void of_changeset_destroy(struct of_changeset *ocs); 945extern void of_changeset_destroy(struct of_changeset *ocs);
829extern int of_changeset_apply(struct of_changeset *ocs); 946extern int of_changeset_apply(struct of_changeset *ocs);
@@ -861,6 +978,69 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
861{ 978{
862 return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop); 979 return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
863} 980}
981#else /* CONFIG_OF_DYNAMIC */
982static inline int of_reconfig_notifier_register(struct notifier_block *nb)
983{
984 return -EINVAL;
985}
986static inline int of_reconfig_notifier_unregister(struct notifier_block *nb)
987{
988 return -EINVAL;
989}
990static inline int of_reconfig_notify(unsigned long action,
991 struct of_reconfig_data *arg)
992{
993 return -EINVAL;
994}
995static inline int of_reconfig_get_state_change(unsigned long action,
996 struct of_reconfig_data *arg)
997{
998 return -EINVAL;
999}
1000#endif /* CONFIG_OF_DYNAMIC */
1001
1002/* CONFIG_OF_RESOLVE api */
1003extern int of_resolve_phandles(struct device_node *tree);
1004
1005/**
1006 * of_device_is_system_power_controller - Tells if system-power-controller is found for device_node
1007 * @np: Pointer to the given device_node
1008 *
1009 * return true if present false otherwise
1010 */
1011static inline bool of_device_is_system_power_controller(const struct device_node *np)
1012{
1013 return of_property_read_bool(np, "system-power-controller");
1014}
1015
1016/**
1017 * Overlay support
1018 */
1019
1020#ifdef CONFIG_OF_OVERLAY
1021
1022/* ID based overlays; the API for external users */
1023int of_overlay_create(struct device_node *tree);
1024int of_overlay_destroy(int id);
1025int of_overlay_destroy_all(void);
1026
1027#else
1028
1029static inline int of_overlay_create(struct device_node *tree)
1030{
1031 return -ENOTSUPP;
1032}
1033
1034static inline int of_overlay_destroy(int id)
1035{
1036 return -ENOTSUPP;
1037}
1038
1039static inline int of_overlay_destroy_all(void)
1040{
1041 return -ENOTSUPP;
1042}
1043
864#endif 1044#endif
865 1045
866#endif /* _LINUX_OF_H */ 1046#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index fb7b7221e063..d88e81be6368 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -23,17 +23,6 @@ struct of_pci_range {
23#define for_each_of_pci_range(parser, range) \ 23#define for_each_of_pci_range(parser, range) \
24 for (; of_pci_range_parser_one(parser, range);) 24 for (; of_pci_range_parser_one(parser, range);)
25 25
26static inline void of_pci_range_to_resource(struct of_pci_range *range,
27 struct device_node *np,
28 struct resource *res)
29{
30 res->flags = range->flags;
31 res->start = range->cpu_addr;
32 res->end = range->cpu_addr + range->size - 1;
33 res->parent = res->child = res->sibling = NULL;
34 res->name = np->full_name;
35}
36
37/* Translate a DMA address from device space to CPU space */ 26/* Translate a DMA address from device space to CPU space */
38extern u64 of_translate_dma_address(struct device_node *dev, 27extern u64 of_translate_dma_address(struct device_node *dev,
39 const __be32 *in_addr); 28 const __be32 *in_addr);
@@ -55,7 +44,9 @@ extern void __iomem *of_iomap(struct device_node *device, int index);
55extern const __be32 *of_get_address(struct device_node *dev, int index, 44extern const __be32 *of_get_address(struct device_node *dev, int index,
56 u64 *size, unsigned int *flags); 45 u64 *size, unsigned int *flags);
57 46
47extern int pci_register_io_range(phys_addr_t addr, resource_size_t size);
58extern unsigned long pci_address_to_pio(phys_addr_t addr); 48extern unsigned long pci_address_to_pio(phys_addr_t addr);
49extern phys_addr_t pci_pio_to_address(unsigned long pio);
59 50
60extern int of_pci_range_parser_init(struct of_pci_range_parser *parser, 51extern int of_pci_range_parser_init(struct of_pci_range_parser *parser,
61 struct device_node *node); 52 struct device_node *node);
@@ -80,6 +71,11 @@ static inline const __be32 *of_get_address(struct device_node *dev, int index,
80 return NULL; 71 return NULL;
81} 72}
82 73
74static inline phys_addr_t pci_pio_to_address(unsigned long pio)
75{
76 return 0;
77}
78
83static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser, 79static inline int of_pci_range_parser_init(struct of_pci_range_parser *parser,
84 struct device_node *node) 80 struct device_node *node)
85{ 81{
@@ -110,7 +106,7 @@ extern int of_address_to_resource(struct device_node *dev, int index,
110 struct resource *r); 106 struct resource *r);
111void __iomem *of_iomap(struct device_node *node, int index); 107void __iomem *of_iomap(struct device_node *node, int index);
112void __iomem *of_io_request_and_map(struct device_node *device, 108void __iomem *of_io_request_and_map(struct device_node *device,
113 int index, char *name); 109 int index, const char *name);
114#else 110#else
115 111
116#include <linux/io.h> 112#include <linux/io.h>
@@ -127,7 +123,7 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
127} 123}
128 124
129static inline void __iomem *of_io_request_and_map(struct device_node *device, 125static inline void __iomem *of_io_request_and_map(struct device_node *device,
130 int index, char *name) 126 int index, const char *name)
131{ 127{
132 return IOMEM_ERR_PTR(-EINVAL); 128 return IOMEM_ERR_PTR(-EINVAL);
133} 129}
@@ -138,6 +134,9 @@ extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
138 u64 *size, unsigned int *flags); 134 u64 *size, unsigned int *flags);
139extern int of_pci_address_to_resource(struct device_node *dev, int bar, 135extern int of_pci_address_to_resource(struct device_node *dev, int bar,
140 struct resource *r); 136 struct resource *r);
137extern int of_pci_range_to_resource(struct of_pci_range *range,
138 struct device_node *np,
139 struct resource *res);
141#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */ 140#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
142static inline int of_pci_address_to_resource(struct device_node *dev, int bar, 141static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
143 struct resource *r) 142 struct resource *r)
@@ -150,6 +149,12 @@ static inline const __be32 *of_get_pci_address(struct device_node *dev,
150{ 149{
151 return NULL; 150 return NULL;
152} 151}
152static inline int of_pci_range_to_resource(struct of_pci_range *range,
153 struct device_node *np,
154 struct resource *res)
155{
156 return -ENOSYS;
157}
153#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */ 158#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
154 159
155#endif /* __OF_ADDRESS_H */ 160#endif /* __OF_ADDRESS_H */
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 51a560f34bca..16c75547d725 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -1,12 +1,19 @@
1#ifndef __OF_IOMMU_H 1#ifndef __OF_IOMMU_H
2#define __OF_IOMMU_H 2#define __OF_IOMMU_H
3 3
4#include <linux/device.h>
5#include <linux/iommu.h>
6#include <linux/of.h>
7
4#ifdef CONFIG_OF_IOMMU 8#ifdef CONFIG_OF_IOMMU
5 9
6extern int of_get_dma_window(struct device_node *dn, const char *prefix, 10extern int of_get_dma_window(struct device_node *dn, const char *prefix,
7 int index, unsigned long *busno, dma_addr_t *addr, 11 int index, unsigned long *busno, dma_addr_t *addr,
8 size_t *size); 12 size_t *size);
9 13
14extern void of_iommu_init(void);
15extern struct iommu_ops *of_iommu_configure(struct device *dev);
16
10#else 17#else
11 18
12static inline int of_get_dma_window(struct device_node *dn, const char *prefix, 19static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
@@ -16,6 +23,22 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix,
16 return -EINVAL; 23 return -EINVAL;
17} 24}
18 25
26static inline void of_iommu_init(void) { }
27static inline struct iommu_ops *of_iommu_configure(struct device *dev)
28{
29 return NULL;
30}
31
19#endif /* CONFIG_OF_IOMMU */ 32#endif /* CONFIG_OF_IOMMU */
20 33
34void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops);
35struct iommu_ops *of_iommu_get_ops(struct device_node *np);
36
37extern struct of_device_id __iommu_of_table;
38
39typedef int (*of_iommu_init_fn)(struct device_node *);
40
41#define IOMMU_OF_DECLARE(name, compat, fn) \
42 _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn)
43
21#endif /* __OF_IOMMU_H */ 44#endif /* __OF_IOMMU_H */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index dde3a4a0fa5d..ce0e5abeb454 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -15,6 +15,7 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
15int of_pci_get_devfn(struct device_node *np); 15int of_pci_get_devfn(struct device_node *np);
16int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); 16int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
17int of_pci_parse_bus_range(struct device_node *node, struct resource *res); 17int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
18int of_get_pci_domain_nr(struct device_node *node);
18#else 19#else
19static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) 20static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
20{ 21{
@@ -43,16 +44,28 @@ of_pci_parse_bus_range(struct device_node *node, struct resource *res)
43{ 44{
44 return -EINVAL; 45 return -EINVAL;
45} 46}
47
48static inline int
49of_get_pci_domain_nr(struct device_node *node)
50{
51 return -1;
52}
53#endif
54
55#if defined(CONFIG_OF_ADDRESS)
56int of_pci_get_host_bridge_resources(struct device_node *dev,
57 unsigned char busno, unsigned char bus_max,
58 struct list_head *resources, resource_size_t *io_base);
46#endif 59#endif
47 60
48#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) 61#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
49int of_pci_msi_chip_add(struct msi_chip *chip); 62int of_pci_msi_chip_add(struct msi_controller *chip);
50void of_pci_msi_chip_remove(struct msi_chip *chip); 63void of_pci_msi_chip_remove(struct msi_controller *chip);
51struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node); 64struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node);
52#else 65#else
53static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; } 66static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; }
54static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { } 67static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { }
55static inline struct msi_chip * 68static inline struct msi_controller *
56of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } 69of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
57#endif 70#endif
58 71
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h
index c65a18a0cfdf..7e09244bb679 100644
--- a/include/linux/of_pdt.h
+++ b/include/linux/of_pdt.h
@@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size);
39/* for building the device tree */ 39/* for building the device tree */
40extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); 40extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops);
41 41
42extern void (*of_pdt_build_more)(struct device_node *dp, 42extern void (*of_pdt_build_more)(struct device_node *dp);
43 struct device_node ***nextp);
44 43
45#endif /* _LINUX_OF_PDT_H */ 44#endif /* _LINUX_OF_PDT_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index c2b0627a2317..8a860f096c35 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root,
84static inline void of_platform_depopulate(struct device *parent) { } 84static inline void of_platform_depopulate(struct device *parent) { }
85#endif 85#endif
86 86
87#ifdef CONFIG_OF_DYNAMIC
88extern void of_platform_register_reconfig_notifier(void);
89#else
90static inline void of_platform_register_reconfig_notifier(void) { }
91#endif
92
87#endif /* _LINUX_OF_PLATFORM_H */ 93#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 5b5efae09135..ad2f67054372 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -16,7 +16,7 @@ struct reserved_mem {
16}; 16};
17 17
18struct reserved_mem_ops { 18struct reserved_mem_ops {
19 void (*device_init)(struct reserved_mem *rmem, 19 int (*device_init)(struct reserved_mem *rmem,
20 struct device *dev); 20 struct device *dev);
21 void (*device_release)(struct reserved_mem *rmem, 21 void (*device_release)(struct reserved_mem *rmem,
22 struct device *dev); 22 struct device *dev);
@@ -28,14 +28,17 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
28 _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) 28 _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
29 29
30#ifdef CONFIG_OF_RESERVED_MEM 30#ifdef CONFIG_OF_RESERVED_MEM
31void of_reserved_mem_device_init(struct device *dev); 31int of_reserved_mem_device_init(struct device *dev);
32void of_reserved_mem_device_release(struct device *dev); 32void of_reserved_mem_device_release(struct device *dev);
33 33
34void fdt_init_reserved_mem(void); 34void fdt_init_reserved_mem(void);
35void fdt_reserved_mem_save_node(unsigned long node, const char *uname, 35void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
36 phys_addr_t base, phys_addr_t size); 36 phys_addr_t base, phys_addr_t size);
37#else 37#else
38static inline void of_reserved_mem_device_init(struct device *dev) { } 38static inline int of_reserved_mem_device_init(struct device *dev)
39{
40 return -ENOSYS;
41}
39static inline void of_reserved_mem_device_release(struct device *pdev) { } 42static inline void of_reserved_mem_device_release(struct device *pdev) { }
40 43
41static inline void fdt_init_reserved_mem(void) { } 44static inline void fdt_init_reserved_mem(void) { }
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index 6f06f8bc612c..e5a70132a240 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -306,15 +306,12 @@ extern void omap_set_dma_transfer_params(int lch, int data_type,
306 int elem_count, int frame_count, 306 int elem_count, int frame_count,
307 int sync_mode, 307 int sync_mode,
308 int dma_trigger, int src_or_dst_synch); 308 int dma_trigger, int src_or_dst_synch);
309extern void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode,
310 u32 color);
311extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode); 309extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode);
312extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode); 310extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode);
313 311
314extern void omap_set_dma_src_params(int lch, int src_port, int src_amode, 312extern void omap_set_dma_src_params(int lch, int src_port, int src_amode,
315 unsigned long src_start, 313 unsigned long src_start,
316 int src_ei, int src_fi); 314 int src_ei, int src_fi);
317extern void omap_set_dma_src_index(int lch, int eidx, int fidx);
318extern void omap_set_dma_src_data_pack(int lch, int enable); 315extern void omap_set_dma_src_data_pack(int lch, int enable);
319extern void omap_set_dma_src_burst_mode(int lch, 316extern void omap_set_dma_src_burst_mode(int lch,
320 enum omap_dma_burst_mode burst_mode); 317 enum omap_dma_burst_mode burst_mode);
@@ -322,7 +319,6 @@ extern void omap_set_dma_src_burst_mode(int lch,
322extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode, 319extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
323 unsigned long dest_start, 320 unsigned long dest_start,
324 int dst_ei, int dst_fi); 321 int dst_ei, int dst_fi);
325extern void omap_set_dma_dest_index(int lch, int eidx, int fidx);
326extern void omap_set_dma_dest_data_pack(int lch, int enable); 322extern void omap_set_dma_dest_data_pack(int lch, int enable);
327extern void omap_set_dma_dest_burst_mode(int lch, 323extern void omap_set_dma_dest_burst_mode(int lch,
328 enum omap_dma_burst_mode burst_mode); 324 enum omap_dma_burst_mode burst_mode);
@@ -331,52 +327,19 @@ extern void omap_set_dma_params(int lch,
331 struct omap_dma_channel_params *params); 327 struct omap_dma_channel_params *params);
332 328
333extern void omap_dma_link_lch(int lch_head, int lch_queue); 329extern void omap_dma_link_lch(int lch_head, int lch_queue);
334extern void omap_dma_unlink_lch(int lch_head, int lch_queue);
335 330
336extern int omap_set_dma_callback(int lch, 331extern int omap_set_dma_callback(int lch,
337 void (*callback)(int lch, u16 ch_status, void *data), 332 void (*callback)(int lch, u16 ch_status, void *data),
338 void *data); 333 void *data);
339extern dma_addr_t omap_get_dma_src_pos(int lch); 334extern dma_addr_t omap_get_dma_src_pos(int lch);
340extern dma_addr_t omap_get_dma_dst_pos(int lch); 335extern dma_addr_t omap_get_dma_dst_pos(int lch);
341extern void omap_clear_dma(int lch);
342extern int omap_get_dma_active_status(int lch); 336extern int omap_get_dma_active_status(int lch);
343extern int omap_dma_running(void); 337extern int omap_dma_running(void);
344extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth, 338extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth,
345 int tparams); 339 int tparams);
346extern int omap_dma_set_prio_lch(int lch, unsigned char read_prio,
347 unsigned char write_prio);
348extern void omap_set_dma_dst_endian_type(int lch, enum end_type etype);
349extern void omap_set_dma_src_endian_type(int lch, enum end_type etype);
350extern int omap_get_dma_index(int lch, int *ei, int *fi);
351
352void omap_dma_global_context_save(void); 340void omap_dma_global_context_save(void);
353void omap_dma_global_context_restore(void); 341void omap_dma_global_context_restore(void);
354 342
355extern void omap_dma_disable_irq(int lch);
356
357/* Chaining APIs */
358#ifndef CONFIG_ARCH_OMAP1
359extern int omap_request_dma_chain(int dev_id, const char *dev_name,
360 void (*callback) (int lch, u16 ch_status,
361 void *data),
362 int *chain_id, int no_of_chans,
363 int chain_mode,
364 struct omap_dma_channel_params params);
365extern int omap_free_dma_chain(int chain_id);
366extern int omap_dma_chain_a_transfer(int chain_id, int src_start,
367 int dest_start, int elem_count,
368 int frame_count, void *callbk_data);
369extern int omap_start_dma_chain_transfers(int chain_id);
370extern int omap_stop_dma_chain_transfers(int chain_id);
371extern int omap_get_dma_chain_index(int chain_id, int *ei, int *fi);
372extern int omap_get_dma_chain_dst_pos(int chain_id);
373extern int omap_get_dma_chain_src_pos(int chain_id);
374
375extern int omap_modify_dma_chain_params(int chain_id,
376 struct omap_dma_channel_params params);
377extern int omap_dma_chain_status(int chain_id);
378#endif
379
380#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP) 343#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP)
381#include <mach/lcd_dma.h> 344#include <mach/lcd_dma.h>
382#else 345#else
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
new file mode 100644
index 000000000000..c2080eebbb47
--- /dev/null
+++ b/include/linux/omap-gpmc.h
@@ -0,0 +1,199 @@
1/*
2 * OMAP GPMC (General Purpose Memory Controller) defines
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 */
9
10/* Maximum Number of Chip Selects */
11#define GPMC_CS_NUM 8
12
13#define GPMC_CONFIG_WP 0x00000005
14
15#define GPMC_IRQ_FIFOEVENTENABLE 0x01
16#define GPMC_IRQ_COUNT_EVENT 0x02
17
18#define GPMC_BURST_4 4 /* 4 word burst */
19#define GPMC_BURST_8 8 /* 8 word burst */
20#define GPMC_BURST_16 16 /* 16 word burst */
21#define GPMC_DEVWIDTH_8BIT 1 /* 8-bit device width */
22#define GPMC_DEVWIDTH_16BIT 2 /* 16-bit device width */
23#define GPMC_MUX_AAD 1 /* Addr-Addr-Data multiplex */
24#define GPMC_MUX_AD 2 /* Addr-Data multiplex */
25
26/* bool type time settings */
27struct gpmc_bool_timings {
28 bool cycle2cyclediffcsen;
29 bool cycle2cyclesamecsen;
30 bool we_extra_delay;
31 bool oe_extra_delay;
32 bool adv_extra_delay;
33 bool cs_extra_delay;
34 bool time_para_granularity;
35};
36
37/*
38 * Note that all values in this struct are in nanoseconds except sync_clk
39 * (which is in picoseconds), while the register values are in gpmc_fck cycles.
40 */
41struct gpmc_timings {
42 /* Minimum clock period for synchronous mode (in picoseconds) */
43 u32 sync_clk;
44
45 /* Chip-select signal timings corresponding to GPMC_CS_CONFIG2 */
46 u32 cs_on; /* Assertion time */
47 u32 cs_rd_off; /* Read deassertion time */
48 u32 cs_wr_off; /* Write deassertion time */
49
50 /* ADV signal timings corresponding to GPMC_CONFIG3 */
51 u32 adv_on; /* Assertion time */
52 u32 adv_rd_off; /* Read deassertion time */
53 u32 adv_wr_off; /* Write deassertion time */
54
55 /* WE signals timings corresponding to GPMC_CONFIG4 */
56 u32 we_on; /* WE assertion time */
57 u32 we_off; /* WE deassertion time */
58
59 /* OE signals timings corresponding to GPMC_CONFIG4 */
60 u32 oe_on; /* OE assertion time */
61 u32 oe_off; /* OE deassertion time */
62
63 /* Access time and cycle time timings corresponding to GPMC_CONFIG5 */
64 u32 page_burst_access; /* Multiple access word delay */
65 u32 access; /* Start-cycle to first data valid delay */
66 u32 rd_cycle; /* Total read cycle time */
67 u32 wr_cycle; /* Total write cycle time */
68
69 u32 bus_turnaround;
70 u32 cycle2cycle_delay;
71
72 u32 wait_monitoring;
73 u32 clk_activation;
74
75 /* The following are only on OMAP3430 */
76 u32 wr_access; /* WRACCESSTIME */
77 u32 wr_data_mux_bus; /* WRDATAONADMUXBUS */
78
79 struct gpmc_bool_timings bool_timings;
80};
81
82/* Device timings in picoseconds */
83struct gpmc_device_timings {
84 u32 t_ceasu; /* address setup to CS valid */
85 u32 t_avdasu; /* address setup to ADV valid */
86 /* XXX: try to combine t_avdp_r & t_avdp_w. Issue is
87 * of tusb using these timings even for sync whilst
88 * ideally for adv_rd/(wr)_off it should have considered
89 * t_avdh instead. This indirectly necessitates r/w
90 * variations of t_avdp as it is possible to have one
91 * sync & other async
92 */
93 u32 t_avdp_r; /* ADV low time (what about t_cer ?) */
94 u32 t_avdp_w;
95 u32 t_aavdh; /* address hold time */
96 u32 t_oeasu; /* address setup to OE valid */
97 u32 t_aa; /* access time from ADV assertion */
98 u32 t_iaa; /* initial access time */
99 u32 t_oe; /* access time from OE assertion */
100 u32 t_ce; /* access time from CS asertion */
101 u32 t_rd_cycle; /* read cycle time */
102 u32 t_cez_r; /* read CS deassertion to high Z */
103 u32 t_cez_w; /* write CS deassertion to high Z */
104 u32 t_oez; /* OE deassertion to high Z */
105 u32 t_weasu; /* address setup to WE valid */
106 u32 t_wpl; /* write assertion time */
107 u32 t_wph; /* write deassertion time */
108 u32 t_wr_cycle; /* write cycle time */
109
110 u32 clk;
111 u32 t_bacc; /* burst access valid clock to output delay */
112 u32 t_ces; /* CS setup time to clk */
113 u32 t_avds; /* ADV setup time to clk */
114 u32 t_avdh; /* ADV hold time from clk */
115 u32 t_ach; /* address hold time from clk */
116 u32 t_rdyo; /* clk to ready valid */
117
118 u32 t_ce_rdyz; /* XXX: description ?, or use t_cez instead */
119 u32 t_ce_avd; /* CS on to ADV on delay */
120
121 /* XXX: check the possibility of combining
122 * cyc_aavhd_oe & cyc_aavdh_we
123 */
124 u8 cyc_aavdh_oe;/* read address hold time in cycles */
125 u8 cyc_aavdh_we;/* write address hold time in cycles */
126 u8 cyc_oe; /* access time from OE assertion in cycles */
127 u8 cyc_wpl; /* write deassertion time in cycles */
128 u32 cyc_iaa; /* initial access time in cycles */
129
130 /* extra delays */
131 bool ce_xdelay;
132 bool avd_xdelay;
133 bool oe_xdelay;
134 bool we_xdelay;
135};
136
137struct gpmc_settings {
138 bool burst_wrap; /* enables wrap bursting */
139 bool burst_read; /* enables read page/burst mode */
140 bool burst_write; /* enables write page/burst mode */
141 bool device_nand; /* device is NAND */
142 bool sync_read; /* enables synchronous reads */
143 bool sync_write; /* enables synchronous writes */
144 bool wait_on_read; /* monitor wait on reads */
145 bool wait_on_write; /* monitor wait on writes */
146 u32 burst_len; /* page/burst length */
147 u32 device_width; /* device bus width (8 or 16 bit) */
148 u32 mux_add_data; /* multiplex address & data */
149 u32 wait_pin; /* wait-pin to be used */
150};
151
152extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
153 struct gpmc_settings *gpmc_s,
154 struct gpmc_device_timings *dev_t);
155
156struct gpmc_nand_regs;
157struct device_node;
158
159extern void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs);
160extern int gpmc_get_client_irq(unsigned irq_config);
161
162extern unsigned int gpmc_ticks_to_ns(unsigned int ticks);
163
164extern void gpmc_cs_write_reg(int cs, int idx, u32 val);
165extern int gpmc_calc_divider(unsigned int sync_clk);
166extern int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t);
167extern int gpmc_cs_program_settings(int cs, struct gpmc_settings *p);
168extern int gpmc_cs_request(int cs, unsigned long size, unsigned long *base);
169extern void gpmc_cs_free(int cs);
170extern int gpmc_configure(int cmd, int wval);
171extern void gpmc_read_settings_dt(struct device_node *np,
172 struct gpmc_settings *p);
173
174extern void omap3_gpmc_save_context(void);
175extern void omap3_gpmc_restore_context(void);
176
177struct gpmc_timings;
178struct omap_nand_platform_data;
179struct omap_onenand_platform_data;
180
181#if IS_ENABLED(CONFIG_MTD_NAND_OMAP2)
182extern int gpmc_nand_init(struct omap_nand_platform_data *d,
183 struct gpmc_timings *gpmc_t);
184#else
185static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
186 struct gpmc_timings *gpmc_t)
187{
188 return 0;
189}
190#endif
191
192#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
193extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
194#else
195#define board_onenand_data NULL
196static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
197{
198}
199#endif
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h
index f8322d9cd235..587bbdd31f5a 100644
--- a/include/linux/omap-mailbox.h
+++ b/include/linux/omap-mailbox.h
@@ -10,20 +10,20 @@
10#define OMAP_MAILBOX_H 10#define OMAP_MAILBOX_H
11 11
12typedef u32 mbox_msg_t; 12typedef u32 mbox_msg_t;
13struct omap_mbox;
14 13
15typedef int __bitwise omap_mbox_irq_t; 14typedef int __bitwise omap_mbox_irq_t;
16#define IRQ_TX ((__force omap_mbox_irq_t) 1) 15#define IRQ_TX ((__force omap_mbox_irq_t) 1)
17#define IRQ_RX ((__force omap_mbox_irq_t) 2) 16#define IRQ_RX ((__force omap_mbox_irq_t) 2)
18 17
19int omap_mbox_msg_send(struct omap_mbox *, mbox_msg_t msg); 18struct mbox_chan;
19struct mbox_client;
20 20
21struct omap_mbox *omap_mbox_get(const char *, struct notifier_block *nb); 21struct mbox_chan *omap_mbox_request_channel(struct mbox_client *cl,
22void omap_mbox_put(struct omap_mbox *mbox, struct notifier_block *nb); 22 const char *chan_name);
23 23
24void omap_mbox_save_ctx(struct omap_mbox *mbox); 24void omap_mbox_save_ctx(struct mbox_chan *chan);
25void omap_mbox_restore_ctx(struct omap_mbox *mbox); 25void omap_mbox_restore_ctx(struct mbox_chan *chan);
26void omap_mbox_enable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); 26void omap_mbox_enable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
27void omap_mbox_disable_irq(struct omap_mbox *mbox, omap_mbox_irq_t irq); 27void omap_mbox_disable_irq(struct mbox_chan *chan, omap_mbox_irq_t irq);
28 28
29#endif /* OMAP_MAILBOX_H */ 29#endif /* OMAP_MAILBOX_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 647395a1a550..853698c721f7 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
50extern unsigned long oom_badness(struct task_struct *p, 50extern unsigned long oom_badness(struct task_struct *p,
51 struct mem_cgroup *memcg, const nodemask_t *nodemask, 51 struct mem_cgroup *memcg, const nodemask_t *nodemask,
52 unsigned long totalpages); 52 unsigned long totalpages);
53
54extern int oom_kills_count(void);
55extern void note_oom_kill(void);
53extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 56extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
54 unsigned int points, unsigned long totalpages, 57 unsigned int points, unsigned long totalpages,
55 struct mem_cgroup *memcg, nodemask_t *nodemask, 58 struct mem_cgroup *memcg, nodemask_t *nodemask,
@@ -89,6 +92,17 @@ static inline bool oom_gfp_allowed(gfp_t gfp_mask)
89 92
90extern struct task_struct *find_lock_task_mm(struct task_struct *p); 93extern struct task_struct *find_lock_task_mm(struct task_struct *p);
91 94
95static inline bool task_will_free_mem(struct task_struct *task)
96{
97 /*
98 * A coredumping process may sleep for an extended period in exit_mm(),
99 * so the oom killer cannot assume that the process will promptly exit
100 * and release memory.
101 */
102 return (task->flags & PF_EXITING) &&
103 !(task->signal->flags & SIGNAL_GROUP_COREDUMP);
104}
105
92/* sysctls */ 106/* sysctls */
93extern int sysctl_oom_dump_tasks; 107extern int sysctl_oom_dump_tasks;
94extern int sysctl_oom_kill_allocating_task; 108extern int sysctl_oom_kill_allocating_task;
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
deleted file mode 100644
index 22691f614043..000000000000
--- a/include/linux/page-debug-flags.h
+++ /dev/null
@@ -1,32 +0,0 @@
1#ifndef LINUX_PAGE_DEBUG_FLAGS_H
2#define LINUX_PAGE_DEBUG_FLAGS_H
3
4/*
5 * page->debug_flags bits:
6 *
7 * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to
8 * implement generic debug pagealloc feature. The pages are filled with
9 * poison patterns and set this flag after free_pages(). The poisoned
10 * pages are verified whether the patterns are not corrupted and clear
11 * the flag before alloc_pages().
12 */
13
14enum page_debug_flags {
15 PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */
16 PAGE_DEBUG_FLAG_GUARD,
17};
18
19/*
20 * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably
21 * gets turned off when no debug features are enabling it!
22 */
23
24#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
25#if !defined(CONFIG_PAGE_POISONING) && \
26 !defined(CONFIG_PAGE_GUARD) \
27/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
28#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
29#endif
30#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */
31
32#endif /* LINUX_PAGE_DEBUG_FLAGS_H */
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 3fff8e774067..2dc1e1697b45 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -2,6 +2,10 @@
2#define __LINUX_PAGEISOLATION_H 2#define __LINUX_PAGEISOLATION_H
3 3
4#ifdef CONFIG_MEMORY_ISOLATION 4#ifdef CONFIG_MEMORY_ISOLATION
5static inline bool has_isolate_pageblock(struct zone *zone)
6{
7 return zone->nr_isolate_pageblock;
8}
5static inline bool is_migrate_isolate_page(struct page *page) 9static inline bool is_migrate_isolate_page(struct page *page)
6{ 10{
7 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; 11 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
@@ -11,6 +15,10 @@ static inline bool is_migrate_isolate(int migratetype)
11 return migratetype == MIGRATE_ISOLATE; 15 return migratetype == MIGRATE_ISOLATE;
12} 16}
13#else 17#else
18static inline bool has_isolate_pageblock(struct zone *zone)
19{
20 return false;
21}
14static inline bool is_migrate_isolate_page(struct page *page) 22static inline bool is_migrate_isolate_page(struct page *page)
15{ 23{
16 return false; 24 return false;
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
deleted file mode 100644
index 5c831f1eca79..000000000000
--- a/include/linux/page_cgroup.h
+++ /dev/null
@@ -1,105 +0,0 @@
1#ifndef __LINUX_PAGE_CGROUP_H
2#define __LINUX_PAGE_CGROUP_H
3
4enum {
5 /* flags for mem_cgroup */
6 PCG_USED = 0x01, /* This page is charged to a memcg */
7 PCG_MEM = 0x02, /* This page holds a memory charge */
8 PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */
9};
10
11struct pglist_data;
12
13#ifdef CONFIG_MEMCG
14struct mem_cgroup;
15
16/*
17 * Page Cgroup can be considered as an extended mem_map.
18 * A page_cgroup page is associated with every page descriptor. The
19 * page_cgroup helps us identify information about the cgroup
20 * All page cgroups are allocated at boot or memory hotplug event,
21 * then the page cgroup for pfn always exists.
22 */
23struct page_cgroup {
24 unsigned long flags;
25 struct mem_cgroup *mem_cgroup;
26};
27
28extern void pgdat_page_cgroup_init(struct pglist_data *pgdat);
29
30#ifdef CONFIG_SPARSEMEM
31static inline void page_cgroup_init_flatmem(void)
32{
33}
34extern void page_cgroup_init(void);
35#else
36extern void page_cgroup_init_flatmem(void);
37static inline void page_cgroup_init(void)
38{
39}
40#endif
41
42struct page_cgroup *lookup_page_cgroup(struct page *page);
43
44static inline int PageCgroupUsed(struct page_cgroup *pc)
45{
46 return !!(pc->flags & PCG_USED);
47}
48#else /* !CONFIG_MEMCG */
49struct page_cgroup;
50
51static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat)
52{
53}
54
55static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
56{
57 return NULL;
58}
59
60static inline void page_cgroup_init(void)
61{
62}
63
64static inline void page_cgroup_init_flatmem(void)
65{
66}
67#endif /* CONFIG_MEMCG */
68
69#include <linux/swap.h>
70
71#ifdef CONFIG_MEMCG_SWAP
72extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
73 unsigned short old, unsigned short new);
74extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
75extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
76extern int swap_cgroup_swapon(int type, unsigned long max_pages);
77extern void swap_cgroup_swapoff(int type);
78#else
79
80static inline
81unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
82{
83 return 0;
84}
85
86static inline
87unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
88{
89 return 0;
90}
91
92static inline int
93swap_cgroup_swapon(int type, unsigned long max_pages)
94{
95 return 0;
96}
97
98static inline void swap_cgroup_swapoff(int type)
99{
100 return;
101}
102
103#endif /* CONFIG_MEMCG_SWAP */
104
105#endif /* __LINUX_PAGE_CGROUP_H */
diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h
new file mode 100644
index 000000000000..955421575d16
--- /dev/null
+++ b/include/linux/page_counter.h
@@ -0,0 +1,51 @@
1#ifndef _LINUX_PAGE_COUNTER_H
2#define _LINUX_PAGE_COUNTER_H
3
4#include <linux/atomic.h>
5#include <linux/kernel.h>
6#include <asm/page.h>
7
8struct page_counter {
9 atomic_long_t count;
10 unsigned long limit;
11 struct page_counter *parent;
12
13 /* legacy */
14 unsigned long watermark;
15 unsigned long failcnt;
16};
17
18#if BITS_PER_LONG == 32
19#define PAGE_COUNTER_MAX LONG_MAX
20#else
21#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
22#endif
23
24static inline void page_counter_init(struct page_counter *counter,
25 struct page_counter *parent)
26{
27 atomic_long_set(&counter->count, 0);
28 counter->limit = PAGE_COUNTER_MAX;
29 counter->parent = parent;
30}
31
32static inline unsigned long page_counter_read(struct page_counter *counter)
33{
34 return atomic_long_read(&counter->count);
35}
36
37void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
38void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
39int page_counter_try_charge(struct page_counter *counter,
40 unsigned long nr_pages,
41 struct page_counter **fail);
42void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
43int page_counter_limit(struct page_counter *counter, unsigned long limit);
44int page_counter_memparse(const char *buf, unsigned long *nr_pages);
45
46static inline void page_counter_reset_watermark(struct page_counter *counter)
47{
48 counter->watermark = page_counter_read(counter);
49}
50
51#endif /* _LINUX_PAGE_COUNTER_H */
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
new file mode 100644
index 000000000000..d2a2c84c72d0
--- /dev/null
+++ b/include/linux/page_ext.h
@@ -0,0 +1,84 @@
1#ifndef __LINUX_PAGE_EXT_H
2#define __LINUX_PAGE_EXT_H
3
4#include <linux/types.h>
5#include <linux/stacktrace.h>
6
7struct pglist_data;
8struct page_ext_operations {
9 bool (*need)(void);
10 void (*init)(void);
11};
12
13#ifdef CONFIG_PAGE_EXTENSION
14
15/*
16 * page_ext->flags bits:
17 *
18 * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
19 * implement generic debug pagealloc feature. The pages are filled with
20 * poison patterns and set this flag after free_pages(). The poisoned
21 * pages are verified whether the patterns are not corrupted and clear
22 * the flag before alloc_pages().
23 */
24
25enum page_ext_flags {
26 PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
27 PAGE_EXT_DEBUG_GUARD,
28 PAGE_EXT_OWNER,
29};
30
31/*
32 * Page Extension can be considered as an extended mem_map.
33 * A page_ext page is associated with every page descriptor. The
34 * page_ext helps us add more information about the page.
35 * All page_ext are allocated at boot or memory hotplug event,
36 * then the page_ext for pfn always exists.
37 */
38struct page_ext {
39 unsigned long flags;
40#ifdef CONFIG_PAGE_OWNER
41 unsigned int order;
42 gfp_t gfp_mask;
43 struct stack_trace trace;
44 unsigned long trace_entries[8];
45#endif
46};
47
48extern void pgdat_page_ext_init(struct pglist_data *pgdat);
49
50#ifdef CONFIG_SPARSEMEM
51static inline void page_ext_init_flatmem(void)
52{
53}
54extern void page_ext_init(void);
55#else
56extern void page_ext_init_flatmem(void);
57static inline void page_ext_init(void)
58{
59}
60#endif
61
62struct page_ext *lookup_page_ext(struct page *page);
63
64#else /* !CONFIG_PAGE_EXTENSION */
65struct page_ext;
66
67static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
68{
69}
70
71static inline struct page_ext *lookup_page_ext(struct page *page)
72{
73 return NULL;
74}
75
76static inline void page_ext_init(void)
77{
78}
79
80static inline void page_ext_init_flatmem(void)
81{
82}
83#endif /* CONFIG_PAGE_EXTENSION */
84#endif /* __LINUX_PAGE_EXT_H */
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
new file mode 100644
index 000000000000..b48c3471c254
--- /dev/null
+++ b/include/linux/page_owner.h
@@ -0,0 +1,38 @@
1#ifndef __LINUX_PAGE_OWNER_H
2#define __LINUX_PAGE_OWNER_H
3
4#ifdef CONFIG_PAGE_OWNER
5extern bool page_owner_inited;
6extern struct page_ext_operations page_owner_ops;
7
8extern void __reset_page_owner(struct page *page, unsigned int order);
9extern void __set_page_owner(struct page *page,
10 unsigned int order, gfp_t gfp_mask);
11
12static inline void reset_page_owner(struct page *page, unsigned int order)
13{
14 if (likely(!page_owner_inited))
15 return;
16
17 __reset_page_owner(page, order);
18}
19
20static inline void set_page_owner(struct page *page,
21 unsigned int order, gfp_t gfp_mask)
22{
23 if (likely(!page_owner_inited))
24 return;
25
26 __set_page_owner(page, order, gfp_mask);
27}
28#else
29static inline void reset_page_owner(struct page *page, unsigned int order)
30{
31}
32static inline void set_page_owner(struct page *page,
33 unsigned int order, gfp_t gfp_mask)
34{
35}
36
37#endif /* CONFIG_PAGE_OWNER */
38#endif /* __LINUX_PAGE_OWNER_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 3df8c7db7a4e..4b3736f7065c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -24,8 +24,7 @@ enum mapping_flags {
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ 24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ 25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ 26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27 AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */ 27 AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
28 AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */
29}; 28};
30 29
31static inline void mapping_set_error(struct address_space *mapping, int error) 30static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -55,21 +54,6 @@ static inline int mapping_unevictable(struct address_space *mapping)
55 return !!mapping; 54 return !!mapping;
56} 55}
57 56
58static inline void mapping_set_balloon(struct address_space *mapping)
59{
60 set_bit(AS_BALLOON_MAP, &mapping->flags);
61}
62
63static inline void mapping_clear_balloon(struct address_space *mapping)
64{
65 clear_bit(AS_BALLOON_MAP, &mapping->flags);
66}
67
68static inline int mapping_balloon(struct address_space *mapping)
69{
70 return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
71}
72
73static inline void mapping_set_exiting(struct address_space *mapping) 57static inline void mapping_set_exiting(struct address_space *mapping)
74{ 58{
75 set_bit(AS_EXITING, &mapping->flags); 59 set_bit(AS_EXITING, &mapping->flags);
@@ -96,7 +80,7 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
96} 80}
97 81
98/* 82/*
99 * The page cache can done in larger chunks than 83 * The page cache can be done in larger chunks than
100 * one page, because it allows for more efficient 84 * one page, because it allows for more efficient
101 * throughput (it can then be mapped into user 85 * throughput (it can then be mapped into user
102 * space in smaller chunks for same flexibility). 86 * space in smaller chunks for same flexibility).
@@ -267,7 +251,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
267#define FGP_NOWAIT 0x00000020 251#define FGP_NOWAIT 0x00000020
268 252
269struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, 253struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
270 int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask); 254 int fgp_flags, gfp_t cache_gfp_mask);
271 255
272/** 256/**
273 * find_get_page - find and get a page reference 257 * find_get_page - find and get a page reference
@@ -282,13 +266,13 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
282static inline struct page *find_get_page(struct address_space *mapping, 266static inline struct page *find_get_page(struct address_space *mapping,
283 pgoff_t offset) 267 pgoff_t offset)
284{ 268{
285 return pagecache_get_page(mapping, offset, 0, 0, 0); 269 return pagecache_get_page(mapping, offset, 0, 0);
286} 270}
287 271
288static inline struct page *find_get_page_flags(struct address_space *mapping, 272static inline struct page *find_get_page_flags(struct address_space *mapping,
289 pgoff_t offset, int fgp_flags) 273 pgoff_t offset, int fgp_flags)
290{ 274{
291 return pagecache_get_page(mapping, offset, fgp_flags, 0, 0); 275 return pagecache_get_page(mapping, offset, fgp_flags, 0);
292} 276}
293 277
294/** 278/**
@@ -308,7 +292,7 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
308static inline struct page *find_lock_page(struct address_space *mapping, 292static inline struct page *find_lock_page(struct address_space *mapping,
309 pgoff_t offset) 293 pgoff_t offset)
310{ 294{
311 return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0); 295 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
312} 296}
313 297
314/** 298/**
@@ -335,7 +319,7 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
335{ 319{
336 return pagecache_get_page(mapping, offset, 320 return pagecache_get_page(mapping, offset,
337 FGP_LOCK|FGP_ACCESSED|FGP_CREAT, 321 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
338 gfp_mask, gfp_mask & GFP_RECLAIM_MASK); 322 gfp_mask);
339} 323}
340 324
341/** 325/**
@@ -356,8 +340,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
356{ 340{
357 return pagecache_get_page(mapping, index, 341 return pagecache_get_page(mapping, index,
358 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, 342 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
359 mapping_gfp_mask(mapping), 343 mapping_gfp_mask(mapping));
360 GFP_NOFS);
361} 344}
362 345
363struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); 346struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
@@ -496,12 +479,14 @@ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
496} 479}
497 480
498/* 481/*
499 * This is exported only for wait_on_page_locked/wait_on_page_writeback. 482 * This is exported only for wait_on_page_locked/wait_on_page_writeback,
500 * Never use this directly! 483 * and for filesystems which need to wait on PG_private.
501 */ 484 */
502extern void wait_on_page_bit(struct page *page, int bit_nr); 485extern void wait_on_page_bit(struct page *page, int bit_nr);
503 486
504extern int wait_on_page_bit_killable(struct page *page, int bit_nr); 487extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
488extern int wait_on_page_bit_killable_timeout(struct page *page,
489 int bit_nr, unsigned long timeout);
505 490
506static inline int wait_on_page_locked_killable(struct page *page) 491static inline int wait_on_page_locked_killable(struct page *page)
507{ 492{
@@ -510,6 +495,12 @@ static inline int wait_on_page_locked_killable(struct page *page)
510 return 0; 495 return 0;
511} 496}
512 497
498extern wait_queue_head_t *page_waitqueue(struct page *page);
499static inline void wake_up_page(struct page *page, int bit)
500{
501 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
502}
503
513/* 504/*
514 * Wait for a page to be unlocked. 505 * Wait for a page to be unlocked.
515 * 506 *
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 64dacb7288a6..24c7728ca681 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -41,8 +41,13 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
41 41
42 if (pci_is_root_bus(pbus)) 42 if (pci_is_root_bus(pbus))
43 dev = pbus->bridge; 43 dev = pbus->bridge;
44 else 44 else {
45 /* If pbus is a virtual bus, there is no bridge to it */
46 if (!pbus->self)
47 return NULL;
48
45 dev = &pbus->self->dev; 49 dev = &pbus->self->dev;
50 }
46 51
47 return ACPI_HANDLE(dev); 52 return ACPI_HANDLE(dev);
48} 53}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 96453f9bc8ba..360a966a97a5 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -45,7 +45,7 @@
45 * In the interest of not exposing interfaces to user-space unnecessarily, 45 * In the interest of not exposing interfaces to user-space unnecessarily,
46 * the following kernel-only defines are being added here. 46 * the following kernel-only defines are being added here.
47 */ 47 */
48#define PCI_DEVID(bus, devfn) ((((u16)bus) << 8) | devfn) 48#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn))
49/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ 49/* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
50#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) 50#define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
51 51
@@ -331,6 +331,7 @@ struct pci_dev {
331 unsigned int is_added:1; 331 unsigned int is_added:1;
332 unsigned int is_busmaster:1; /* device is busmaster */ 332 unsigned int is_busmaster:1; /* device is busmaster */
333 unsigned int no_msi:1; /* device may not use msi */ 333 unsigned int no_msi:1; /* device may not use msi */
334 unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
334 unsigned int block_cfg_access:1; /* config space access is blocked */ 335 unsigned int block_cfg_access:1; /* config space access is blocked */
335 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 336 unsigned int broken_parity_status:1; /* Device generates false positive parity */
336 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ 337 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
@@ -348,6 +349,7 @@ struct pci_dev {
348 unsigned int __aer_firmware_first:1; 349 unsigned int __aer_firmware_first:1;
349 unsigned int broken_intx_masking:1; 350 unsigned int broken_intx_masking:1;
350 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ 351 unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
352 unsigned int irq_managed:1;
351 pci_dev_flags_t dev_flags; 353 pci_dev_flags_t dev_flags;
352 atomic_t enable_cnt; /* pci_enable_device has been called */ 354 atomic_t enable_cnt; /* pci_enable_device has been called */
353 355
@@ -449,7 +451,7 @@ struct pci_bus {
449 struct resource busn_res; /* bus numbers routed to this bus */ 451 struct resource busn_res; /* bus numbers routed to this bus */
450 452
451 struct pci_ops *ops; /* configuration access functions */ 453 struct pci_ops *ops; /* configuration access functions */
452 struct msi_chip *msi; /* MSI controller */ 454 struct msi_controller *msi; /* MSI controller */
453 void *sysdata; /* hook for sys-specific extension */ 455 void *sysdata; /* hook for sys-specific extension */
454 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ 456 struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */
455 457
@@ -457,6 +459,9 @@ struct pci_bus {
457 unsigned char primary; /* number of primary bridge */ 459 unsigned char primary; /* number of primary bridge */
458 unsigned char max_bus_speed; /* enum pci_bus_speed */ 460 unsigned char max_bus_speed; /* enum pci_bus_speed */
459 unsigned char cur_bus_speed; /* enum pci_bus_speed */ 461 unsigned char cur_bus_speed; /* enum pci_bus_speed */
462#ifdef CONFIG_PCI_DOMAINS_GENERIC
463 int domain_nr;
464#endif
460 465
461 char name[48]; 466 char name[48];
462 467
@@ -1000,6 +1005,8 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
1000int pci_save_state(struct pci_dev *dev); 1005int pci_save_state(struct pci_dev *dev);
1001void pci_restore_state(struct pci_dev *dev); 1006void pci_restore_state(struct pci_dev *dev);
1002struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); 1007struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1008int pci_load_saved_state(struct pci_dev *dev,
1009 struct pci_saved_state *state);
1003int pci_load_and_free_saved_state(struct pci_dev *dev, 1010int pci_load_and_free_saved_state(struct pci_dev *dev,
1004 struct pci_saved_state **state); 1011 struct pci_saved_state **state);
1005struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); 1012struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
@@ -1103,6 +1110,9 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
1103 resource_size_t), 1110 resource_size_t),
1104 void *alignf_data); 1111 void *alignf_data);
1105 1112
1113
1114int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
1115
1106static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar) 1116static inline dma_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
1107{ 1117{
1108 struct pci_bus_region region; 1118 struct pci_bus_region region;
@@ -1288,12 +1298,32 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
1288 */ 1298 */
1289#ifdef CONFIG_PCI_DOMAINS 1299#ifdef CONFIG_PCI_DOMAINS
1290extern int pci_domains_supported; 1300extern int pci_domains_supported;
1301int pci_get_new_domain_nr(void);
1291#else 1302#else
1292enum { pci_domains_supported = 0 }; 1303enum { pci_domains_supported = 0 };
1293static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1304static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1294static inline int pci_proc_domain(struct pci_bus *bus) { return 0; } 1305static inline int pci_proc_domain(struct pci_bus *bus) { return 0; }
1306static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1295#endif /* CONFIG_PCI_DOMAINS */ 1307#endif /* CONFIG_PCI_DOMAINS */
1296 1308
1309/*
1310 * Generic implementation for PCI domain support. If your
1311 * architecture does not need custom management of PCI
1312 * domains then this implementation will be used
1313 */
1314#ifdef CONFIG_PCI_DOMAINS_GENERIC
1315static inline int pci_domain_nr(struct pci_bus *bus)
1316{
1317 return bus->domain_nr;
1318}
1319void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent);
1320#else
1321static inline void pci_bus_assign_domain_nr(struct pci_bus *bus,
1322 struct device *parent)
1323{
1324}
1325#endif
1326
1297/* some architectures require additional setup to direct VGA traffic */ 1327/* some architectures require additional setup to direct VGA traffic */
1298typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, 1328typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode,
1299 unsigned int command_bits, u32 flags); 1329 unsigned int command_bits, u32 flags);
@@ -1402,6 +1432,7 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
1402 1432
1403static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } 1433static inline int pci_domain_nr(struct pci_bus *bus) { return 0; }
1404static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } 1434static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; }
1435static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
1405 1436
1406#define dev_is_pci(d) (false) 1437#define dev_is_pci(d) (false)
1407#define dev_is_pf(d) (false) 1438#define dev_is_pf(d) (false)
@@ -1563,16 +1594,11 @@ enum pci_fixup_pass {
1563 1594
1564#ifdef CONFIG_PCI_QUIRKS 1595#ifdef CONFIG_PCI_QUIRKS
1565void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); 1596void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
1566struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
1567int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); 1597int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
1568void pci_dev_specific_enable_acs(struct pci_dev *dev); 1598void pci_dev_specific_enable_acs(struct pci_dev *dev);
1569#else 1599#else
1570static inline void pci_fixup_device(enum pci_fixup_pass pass, 1600static inline void pci_fixup_device(enum pci_fixup_pass pass,
1571 struct pci_dev *dev) { } 1601 struct pci_dev *dev) { }
1572static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
1573{
1574 return pci_dev_get(dev);
1575}
1576static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, 1602static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
1577 u16 acs_flags) 1603 u16 acs_flags)
1578{ 1604{
@@ -1707,7 +1733,7 @@ bool pci_acs_path_enabled(struct pci_dev *start,
1707 struct pci_dev *end, u16 acs_flags); 1733 struct pci_dev *end, u16 acs_flags);
1708 1734
1709#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ 1735#define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */
1710#define PCI_VPD_LRDT_ID(x) (x | PCI_VPD_LRDT) 1736#define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT)
1711 1737
1712/* Large Resource Data Type Tag Item Names */ 1738/* Large Resource Data Type Tag Item Names */
1713#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */ 1739#define PCI_VPD_LTIN_ID_STRING 0x02 /* Identifier String */
@@ -1834,15 +1860,17 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
1834 int (*fn)(struct pci_dev *pdev, 1860 int (*fn)(struct pci_dev *pdev,
1835 u16 alias, void *data), void *data); 1861 u16 alias, void *data), void *data);
1836 1862
1837/** 1863/* helper functions for operation of device flag */
1838 * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device 1864static inline void pci_set_dev_assigned(struct pci_dev *pdev)
1839 * @pdev: the PCI device 1865{
1840 * 1866 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
1841 * if the device is PCIE, return NULL 1867}
1842 * if the device isn't connected to a PCIe bridge (that is its parent is a 1868static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
1843 * legacy PCI bridge and the bridge is directly connected to bus 0), return its 1869{
1844 * parent 1870 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
1845 */ 1871}
1846struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev); 1872static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
1847 1873{
1874 return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
1875}
1848#endif /* LINUX_PCI_H */ 1876#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 5f2e559af6b0..8c7895061121 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -109,7 +109,6 @@ struct hotplug_slot {
109 struct list_head slot_list; 109 struct list_head slot_list;
110 struct pci_slot *pci_slot; 110 struct pci_slot *pci_slot;
111}; 111};
112#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
113 112
114static inline const char *hotplug_slot_name(const struct hotplug_slot *slot) 113static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
115{ 114{
@@ -187,6 +186,4 @@ static inline int pci_get_hp_params(struct pci_dev *dev,
187 return -ENODEV; 186 return -ENODEV;
188} 187}
189#endif 188#endif
190
191void pci_configure_slot(struct pci_dev *dev);
192#endif 189#endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6ed0bb73a864..e63c02a93f6b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -522,6 +522,8 @@
522#define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403 522#define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403
523#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d 523#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d
524#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e 524#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e
525#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F3 0x1573
526#define PCI_DEVICE_ID_AMD_15H_M60H_NB_F4 0x1574
525#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600 527#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
526#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601 528#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
527#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602 529#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
@@ -562,6 +564,7 @@
562#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450 564#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
563#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451 565#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451
564#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458 566#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
567#define PCI_DEVICE_ID_AMD_NL_USB 0x7912
565#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F 568#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F
566#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090 569#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
567#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091 570#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
@@ -2245,6 +2248,8 @@
2245#define PCI_VENDOR_ID_MORETON 0x15aa 2248#define PCI_VENDOR_ID_MORETON 0x15aa
2246#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 2249#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
2247 2250
2251#define PCI_VENDOR_ID_VMWARE 0x15ad
2252
2248#define PCI_VENDOR_ID_ZOLTRIX 0x15b0 2253#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
2249#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 2254#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
2250 2255
@@ -2536,6 +2541,7 @@
2536#define PCI_DEVICE_ID_INTEL_EESSC 0x0008 2541#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
2537#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100 2542#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
2538#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154 2543#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
2544#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
2539#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 2545#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
2540#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320 2546#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
2541#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321 2547#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
@@ -2557,6 +2563,7 @@
2557#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823 2563#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
2558#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824 2564#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
2559#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F 2565#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
2566#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
2560#define PCI_DEVICE_ID_INTEL_I960 0x0960 2567#define PCI_DEVICE_ID_INTEL_I960 0x0960
2561#define PCI_DEVICE_ID_INTEL_I960RM 0x0962 2568#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
2562#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60 2569#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
@@ -2818,7 +2825,22 @@
2818#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43 2825#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
2819#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44 2826#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
2820#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45 2827#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
2828#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
2829#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
2830#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
2831#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR2 0x3c76 /* 16.6 */
2832#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR3 0x3c77 /* 16.7 */
2833#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0 0x3ca0 /* 14.0 */
2834#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA 0x3ca8 /* 15.0 */
2835#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0 0x3caa /* 15.2 */
2836#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
2837#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
2838#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
2839#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
2821#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0 2840#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
2841#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
2842#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
2843#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
2822#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f 2844#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
2823#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 2845#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
2824#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3 2846#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
@@ -2860,6 +2882,7 @@
2860#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601 2882#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
2861#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119 2883#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119
2862#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a 2884#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a
2885#define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183
2863#define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186 2886#define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186
2864#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4 2887#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4
2865#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5 2888#define PCI_DEVICE_ID_INTEL_82450GX 0x84c5
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index cfd56046ecec..57f3a1c550dc 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -254,11 +254,6 @@ do { \
254#endif /* CONFIG_SMP */ 254#endif /* CONFIG_SMP */
255 255
256#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) 256#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
257#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
258#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
259
260/* keep until we have removed all uses of __this_cpu_ptr */
261#define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
262 257
263/* 258/*
264 * Must be an lvalue. Since @var must be a simple identifier, 259 * Must be an lvalue. Since @var must be a simple identifier,
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index ef5894ca8e50..b4337646388b 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -13,7 +13,7 @@
13 * 13 *
14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less 14 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
15 * than an atomic_t - this is because of the way shutdown works, see 15 * than an atomic_t - this is because of the way shutdown works, see
16 * percpu_ref_kill()/PCPU_COUNT_BIAS. 16 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
17 * 17 *
18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the 18 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill() 19 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
@@ -29,7 +29,7 @@
29 * calls io_destroy() or the process exits. 29 * calls io_destroy() or the process exits.
30 * 30 *
31 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it 31 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
32 * calls percpu_ref_kill(), then hlist_del_rcu() and sychronize_rcu() to remove 32 * calls percpu_ref_kill(), then hlist_del_rcu() and synchronize_rcu() to remove
33 * the kioctx from the proccess's list of kioctxs - after that, there can't be 33 * the kioctx from the proccess's list of kioctxs - after that, there can't be
34 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop 34 * any new users of the kioctx (from lookup_ioctx()) and it's then safe to drop
35 * the initial ref with percpu_ref_put(). 35 * the initial ref with percpu_ref_put().
@@ -49,29 +49,60 @@
49#include <linux/kernel.h> 49#include <linux/kernel.h>
50#include <linux/percpu.h> 50#include <linux/percpu.h>
51#include <linux/rcupdate.h> 51#include <linux/rcupdate.h>
52#include <linux/gfp.h>
52 53
53struct percpu_ref; 54struct percpu_ref;
54typedef void (percpu_ref_func_t)(struct percpu_ref *); 55typedef void (percpu_ref_func_t)(struct percpu_ref *);
55 56
57/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
58enum {
59 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
60 __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
61 __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
62
63 __PERCPU_REF_FLAG_BITS = 2,
64};
65
66/* @flags for percpu_ref_init() */
67enum {
68 /*
69 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
70 * operation using percpu_ref_switch_to_percpu(). If initialized
71 * with this flag, the ref will stay in atomic mode until
72 * percpu_ref_switch_to_percpu() is invoked on it.
73 */
74 PERCPU_REF_INIT_ATOMIC = 1 << 0,
75
76 /*
77 * Start dead w/ ref == 0 in atomic mode. Must be revived with
78 * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
79 */
80 PERCPU_REF_INIT_DEAD = 1 << 1,
81};
82
56struct percpu_ref { 83struct percpu_ref {
57 atomic_t count; 84 atomic_long_t count;
58 /* 85 /*
59 * The low bit of the pointer indicates whether the ref is in percpu 86 * The low bit of the pointer indicates whether the ref is in percpu
60 * mode; if set, then get/put will manipulate the atomic_t. 87 * mode; if set, then get/put will manipulate the atomic_t.
61 */ 88 */
62 unsigned long pcpu_count_ptr; 89 unsigned long percpu_count_ptr;
63 percpu_ref_func_t *release; 90 percpu_ref_func_t *release;
64 percpu_ref_func_t *confirm_kill; 91 percpu_ref_func_t *confirm_switch;
92 bool force_atomic:1;
65 struct rcu_head rcu; 93 struct rcu_head rcu;
66}; 94};
67 95
68int __must_check percpu_ref_init(struct percpu_ref *ref, 96int __must_check percpu_ref_init(struct percpu_ref *ref,
69 percpu_ref_func_t *release); 97 percpu_ref_func_t *release, unsigned int flags,
70void percpu_ref_reinit(struct percpu_ref *ref); 98 gfp_t gfp);
71void percpu_ref_exit(struct percpu_ref *ref); 99void percpu_ref_exit(struct percpu_ref *ref);
100void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
101 percpu_ref_func_t *confirm_switch);
102void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
72void percpu_ref_kill_and_confirm(struct percpu_ref *ref, 103void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
73 percpu_ref_func_t *confirm_kill); 104 percpu_ref_func_t *confirm_kill);
74void __percpu_ref_kill_expedited(struct percpu_ref *ref); 105void percpu_ref_reinit(struct percpu_ref *ref);
75 106
76/** 107/**
77 * percpu_ref_kill - drop the initial ref 108 * percpu_ref_kill - drop the initial ref
@@ -88,70 +119,88 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
88 return percpu_ref_kill_and_confirm(ref, NULL); 119 return percpu_ref_kill_and_confirm(ref, NULL);
89} 120}
90 121
91#define PCPU_REF_DEAD 1
92
93/* 122/*
94 * Internal helper. Don't use outside percpu-refcount proper. The 123 * Internal helper. Don't use outside percpu-refcount proper. The
95 * function doesn't return the pointer and let the caller test it for NULL 124 * function doesn't return the pointer and let the caller test it for NULL
96 * because doing so forces the compiler to generate two conditional 125 * because doing so forces the compiler to generate two conditional
97 * branches as it can't assume that @ref->pcpu_count is not NULL. 126 * branches as it can't assume that @ref->percpu_count is not NULL.
98 */ 127 */
99static inline bool __pcpu_ref_alive(struct percpu_ref *ref, 128static inline bool __ref_is_percpu(struct percpu_ref *ref,
100 unsigned __percpu **pcpu_countp) 129 unsigned long __percpu **percpu_countp)
101{ 130{
102 unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
103
104 /* paired with smp_store_release() in percpu_ref_reinit() */ 131 /* paired with smp_store_release() in percpu_ref_reinit() */
105 smp_read_barrier_depends(); 132 unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
106 133
107 if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) 134 /*
135 * Theoretically, the following could test just ATOMIC; however,
136 * then we'd have to mask off DEAD separately as DEAD may be
137 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
138 * implies ATOMIC anyway. Test them together.
139 */
140 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
108 return false; 141 return false;
109 142
110 *pcpu_countp = (unsigned __percpu *)pcpu_ptr; 143 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
111 return true; 144 return true;
112} 145}
113 146
114/** 147/**
115 * percpu_ref_get - increment a percpu refcount 148 * percpu_ref_get_many - increment a percpu refcount
116 * @ref: percpu_ref to get 149 * @ref: percpu_ref to get
150 * @nr: number of references to get
117 * 151 *
118 * Analagous to atomic_inc(). 152 * Analogous to atomic_long_add().
119 */ 153 *
120static inline void percpu_ref_get(struct percpu_ref *ref) 154 * This function is safe to call as long as @ref is between init and exit.
155 */
156static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
121{ 157{
122 unsigned __percpu *pcpu_count; 158 unsigned long __percpu *percpu_count;
123 159
124 rcu_read_lock_sched(); 160 rcu_read_lock_sched();
125 161
126 if (__pcpu_ref_alive(ref, &pcpu_count)) 162 if (__ref_is_percpu(ref, &percpu_count))
127 this_cpu_inc(*pcpu_count); 163 this_cpu_add(*percpu_count, nr);
128 else 164 else
129 atomic_inc(&ref->count); 165 atomic_long_add(nr, &ref->count);
130 166
131 rcu_read_unlock_sched(); 167 rcu_read_unlock_sched();
132} 168}
133 169
134/** 170/**
171 * percpu_ref_get - increment a percpu refcount
172 * @ref: percpu_ref to get
173 *
174 * Analagous to atomic_long_inc().
175 *
176 * This function is safe to call as long as @ref is between init and exit.
177 */
178static inline void percpu_ref_get(struct percpu_ref *ref)
179{
180 percpu_ref_get_many(ref, 1);
181}
182
183/**
135 * percpu_ref_tryget - try to increment a percpu refcount 184 * percpu_ref_tryget - try to increment a percpu refcount
136 * @ref: percpu_ref to try-get 185 * @ref: percpu_ref to try-get
137 * 186 *
138 * Increment a percpu refcount unless its count already reached zero. 187 * Increment a percpu refcount unless its count already reached zero.
139 * Returns %true on success; %false on failure. 188 * Returns %true on success; %false on failure.
140 * 189 *
141 * The caller is responsible for ensuring that @ref stays accessible. 190 * This function is safe to call as long as @ref is between init and exit.
142 */ 191 */
143static inline bool percpu_ref_tryget(struct percpu_ref *ref) 192static inline bool percpu_ref_tryget(struct percpu_ref *ref)
144{ 193{
145 unsigned __percpu *pcpu_count; 194 unsigned long __percpu *percpu_count;
146 int ret = false; 195 int ret;
147 196
148 rcu_read_lock_sched(); 197 rcu_read_lock_sched();
149 198
150 if (__pcpu_ref_alive(ref, &pcpu_count)) { 199 if (__ref_is_percpu(ref, &percpu_count)) {
151 this_cpu_inc(*pcpu_count); 200 this_cpu_inc(*percpu_count);
152 ret = true; 201 ret = true;
153 } else { 202 } else {
154 ret = atomic_inc_not_zero(&ref->count); 203 ret = atomic_long_inc_not_zero(&ref->count);
155 } 204 }
156 205
157 rcu_read_unlock_sched(); 206 rcu_read_unlock_sched();
@@ -166,23 +215,26 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
166 * Increment a percpu refcount unless it has already been killed. Returns 215 * Increment a percpu refcount unless it has already been killed. Returns
167 * %true on success; %false on failure. 216 * %true on success; %false on failure.
168 * 217 *
169 * Completion of percpu_ref_kill() in itself doesn't guarantee that tryget 218 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
170 * will fail. For such guarantee, percpu_ref_kill_and_confirm() should be 219 * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
171 * used. After the confirm_kill callback is invoked, it's guaranteed that 220 * should be used. After the confirm_kill callback is invoked, it's
172 * no new reference will be given out by percpu_ref_tryget(). 221 * guaranteed that no new reference will be given out by
222 * percpu_ref_tryget_live().
173 * 223 *
174 * The caller is responsible for ensuring that @ref stays accessible. 224 * This function is safe to call as long as @ref is between init and exit.
175 */ 225 */
176static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) 226static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
177{ 227{
178 unsigned __percpu *pcpu_count; 228 unsigned long __percpu *percpu_count;
179 int ret = false; 229 int ret = false;
180 230
181 rcu_read_lock_sched(); 231 rcu_read_lock_sched();
182 232
183 if (__pcpu_ref_alive(ref, &pcpu_count)) { 233 if (__ref_is_percpu(ref, &percpu_count)) {
184 this_cpu_inc(*pcpu_count); 234 this_cpu_inc(*percpu_count);
185 ret = true; 235 ret = true;
236 } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
237 ret = atomic_long_inc_not_zero(&ref->count);
186 } 238 }
187 239
188 rcu_read_unlock_sched(); 240 rcu_read_unlock_sched();
@@ -191,39 +243,58 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
191} 243}
192 244
193/** 245/**
194 * percpu_ref_put - decrement a percpu refcount 246 * percpu_ref_put_many - decrement a percpu refcount
195 * @ref: percpu_ref to put 247 * @ref: percpu_ref to put
248 * @nr: number of references to put
196 * 249 *
197 * Decrement the refcount, and if 0, call the release function (which was passed 250 * Decrement the refcount, and if 0, call the release function (which was passed
198 * to percpu_ref_init()) 251 * to percpu_ref_init())
252 *
253 * This function is safe to call as long as @ref is between init and exit.
199 */ 254 */
200static inline void percpu_ref_put(struct percpu_ref *ref) 255static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
201{ 256{
202 unsigned __percpu *pcpu_count; 257 unsigned long __percpu *percpu_count;
203 258
204 rcu_read_lock_sched(); 259 rcu_read_lock_sched();
205 260
206 if (__pcpu_ref_alive(ref, &pcpu_count)) 261 if (__ref_is_percpu(ref, &percpu_count))
207 this_cpu_dec(*pcpu_count); 262 this_cpu_sub(*percpu_count, nr);
208 else if (unlikely(atomic_dec_and_test(&ref->count))) 263 else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
209 ref->release(ref); 264 ref->release(ref);
210 265
211 rcu_read_unlock_sched(); 266 rcu_read_unlock_sched();
212} 267}
213 268
214/** 269/**
270 * percpu_ref_put - decrement a percpu refcount
271 * @ref: percpu_ref to put
272 *
273 * Decrement the refcount, and if 0, call the release function (which was passed
274 * to percpu_ref_init())
275 *
276 * This function is safe to call as long as @ref is between init and exit.
277 */
278static inline void percpu_ref_put(struct percpu_ref *ref)
279{
280 percpu_ref_put_many(ref, 1);
281}
282
283/**
215 * percpu_ref_is_zero - test whether a percpu refcount reached zero 284 * percpu_ref_is_zero - test whether a percpu refcount reached zero
216 * @ref: percpu_ref to test 285 * @ref: percpu_ref to test
217 * 286 *
218 * Returns %true if @ref reached zero. 287 * Returns %true if @ref reached zero.
288 *
289 * This function is safe to call as long as @ref is between init and exit.
219 */ 290 */
220static inline bool percpu_ref_is_zero(struct percpu_ref *ref) 291static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
221{ 292{
222 unsigned __percpu *pcpu_count; 293 unsigned long __percpu *percpu_count;
223 294
224 if (__pcpu_ref_alive(ref, &pcpu_count)) 295 if (__ref_is_percpu(ref, &percpu_count))
225 return false; 296 return false;
226 return !atomic_read(&ref->count); 297 return !atomic_long_read(&ref->count);
227} 298}
228 299
229#endif 300#endif
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 6f61b61b7996..caebf2a758dc 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -5,6 +5,7 @@
5#include <linux/preempt.h> 5#include <linux/preempt.h>
6#include <linux/smp.h> 6#include <linux/smp.h>
7#include <linux/cpumask.h> 7#include <linux/cpumask.h>
8#include <linux/printk.h>
8#include <linux/pfn.h> 9#include <linux/pfn.h>
9#include <linux/init.h> 10#include <linux/init.h>
10 11
@@ -48,9 +49,9 @@
48 * intelligent way to determine this would be nice. 49 * intelligent way to determine this would be nice.
49 */ 50 */
50#if BITS_PER_LONG > 32 51#if BITS_PER_LONG > 32
51#define PERCPU_DYNAMIC_RESERVE (20 << 10) 52#define PERCPU_DYNAMIC_RESERVE (28 << 10)
52#else 53#else
53#define PERCPU_DYNAMIC_RESERVE (12 << 10) 54#define PERCPU_DYNAMIC_RESERVE (20 << 10)
54#endif 55#endif
55 56
56extern void *pcpu_base_addr; 57extern void *pcpu_base_addr;
@@ -122,11 +123,19 @@ extern void __init setup_per_cpu_areas(void);
122#endif 123#endif
123extern void __init percpu_init_late(void); 124extern void __init percpu_init_late(void);
124 125
126extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
125extern void __percpu *__alloc_percpu(size_t size, size_t align); 127extern void __percpu *__alloc_percpu(size_t size, size_t align);
126extern void free_percpu(void __percpu *__pdata); 128extern void free_percpu(void __percpu *__pdata);
127extern phys_addr_t per_cpu_ptr_to_phys(void *addr); 129extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
128 130
129#define alloc_percpu(type) \ 131#define alloc_percpu_gfp(type, gfp) \
130 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) 132 (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \
133 __alignof__(type), gfp)
134#define alloc_percpu(type) \
135 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
136 __alignof__(type))
137
138/* To avoid include hell, as printk can not declare this, we declare it here */
139DECLARE_PER_CPU(printk_func_t, printk_func);
131 140
132#endif /* __LINUX_PERCPU_H */ 141#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index d5dd4657c8d6..50e50095c8d1 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -12,6 +12,7 @@
12#include <linux/threads.h> 12#include <linux/threads.h>
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/gfp.h>
15 16
16#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
17 18
@@ -26,14 +27,14 @@ struct percpu_counter {
26 27
27extern int percpu_counter_batch; 28extern int percpu_counter_batch;
28 29
29int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 30int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
30 struct lock_class_key *key); 31 struct lock_class_key *key);
31 32
32#define percpu_counter_init(fbc, value) \ 33#define percpu_counter_init(fbc, value, gfp) \
33 ({ \ 34 ({ \
34 static struct lock_class_key __key; \ 35 static struct lock_class_key __key; \
35 \ 36 \
36 __percpu_counter_init(fbc, value, &__key); \ 37 __percpu_counter_init(fbc, value, gfp, &__key); \
37 }) 38 })
38 39
39void percpu_counter_destroy(struct percpu_counter *fbc); 40void percpu_counter_destroy(struct percpu_counter *fbc);
@@ -89,7 +90,8 @@ struct percpu_counter {
89 s64 count; 90 s64 count;
90}; 91};
91 92
92static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) 93static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
94 gfp_t gfp)
93{ 95{
94 fbc->count = amount; 96 fbc->count = amount;
95 return 0; 97 return 0;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 707617a8c0f6..4f7a61ca4b39 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -52,6 +52,7 @@ struct perf_guest_info_callbacks {
52#include <linux/atomic.h> 52#include <linux/atomic.h>
53#include <linux/sysfs.h> 53#include <linux/sysfs.h>
54#include <linux/perf_regs.h> 54#include <linux/perf_regs.h>
55#include <linux/workqueue.h>
55#include <asm/local.h> 56#include <asm/local.h>
56 57
57struct perf_callchain_entry { 58struct perf_callchain_entry {
@@ -78,11 +79,6 @@ struct perf_branch_stack {
78 struct perf_branch_entry entries[0]; 79 struct perf_branch_entry entries[0];
79}; 80};
80 81
81struct perf_regs_user {
82 __u64 abi;
83 struct pt_regs *regs;
84};
85
86struct task_struct; 82struct task_struct;
87 83
88/* 84/*
@@ -268,6 +264,7 @@ struct pmu {
268 * enum perf_event_active_state - the states of a event 264 * enum perf_event_active_state - the states of a event
269 */ 265 */
270enum perf_event_active_state { 266enum perf_event_active_state {
267 PERF_EVENT_STATE_EXIT = -3,
271 PERF_EVENT_STATE_ERROR = -2, 268 PERF_EVENT_STATE_ERROR = -2,
272 PERF_EVENT_STATE_OFF = -1, 269 PERF_EVENT_STATE_OFF = -1,
273 PERF_EVENT_STATE_INACTIVE = 0, 270 PERF_EVENT_STATE_INACTIVE = 0,
@@ -507,6 +504,9 @@ struct perf_event_context {
507 int nr_cgroups; /* cgroup evts */ 504 int nr_cgroups; /* cgroup evts */
508 int nr_branch_stack; /* branch_stack evt */ 505 int nr_branch_stack; /* branch_stack evt */
509 struct rcu_head rcu_head; 506 struct rcu_head rcu_head;
507
508 struct delayed_work orphans_remove;
509 bool orphans_remove_sched;
510}; 510};
511 511
512/* 512/*
@@ -575,34 +575,54 @@ extern u64 perf_event_read_value(struct perf_event *event,
575 575
576 576
577struct perf_sample_data { 577struct perf_sample_data {
578 u64 type; 578 /*
579 * Fields set by perf_sample_data_init(), group so as to
580 * minimize the cachelines touched.
581 */
582 u64 addr;
583 struct perf_raw_record *raw;
584 struct perf_branch_stack *br_stack;
585 u64 period;
586 u64 weight;
587 u64 txn;
588 union perf_mem_data_src data_src;
579 589
590 /*
591 * The other fields, optionally {set,used} by
592 * perf_{prepare,output}_sample().
593 */
594 u64 type;
580 u64 ip; 595 u64 ip;
581 struct { 596 struct {
582 u32 pid; 597 u32 pid;
583 u32 tid; 598 u32 tid;
584 } tid_entry; 599 } tid_entry;
585 u64 time; 600 u64 time;
586 u64 addr;
587 u64 id; 601 u64 id;
588 u64 stream_id; 602 u64 stream_id;
589 struct { 603 struct {
590 u32 cpu; 604 u32 cpu;
591 u32 reserved; 605 u32 reserved;
592 } cpu_entry; 606 } cpu_entry;
593 u64 period;
594 union perf_mem_data_src data_src;
595 struct perf_callchain_entry *callchain; 607 struct perf_callchain_entry *callchain;
596 struct perf_raw_record *raw; 608
597 struct perf_branch_stack *br_stack;
598 struct perf_regs_user regs_user;
599 u64 stack_user_size;
600 u64 weight;
601 /* 609 /*
602 * Transaction flags for abort events: 610 * regs_user may point to task_pt_regs or to regs_user_copy, depending
611 * on arch details.
603 */ 612 */
604 u64 txn; 613 struct perf_regs regs_user;
605}; 614 struct pt_regs regs_user_copy;
615
616 struct perf_regs regs_intr;
617 u64 stack_user_size;
618} ____cacheline_aligned;
619
620/* default value for data source */
621#define PERF_MEM_NA (PERF_MEM_S(OP, NA) |\
622 PERF_MEM_S(LVL, NA) |\
623 PERF_MEM_S(SNOOP, NA) |\
624 PERF_MEM_S(LOCK, NA) |\
625 PERF_MEM_S(TLB, NA))
606 626
607static inline void perf_sample_data_init(struct perf_sample_data *data, 627static inline void perf_sample_data_init(struct perf_sample_data *data,
608 u64 addr, u64 period) 628 u64 addr, u64 period)
@@ -612,11 +632,8 @@ static inline void perf_sample_data_init(struct perf_sample_data *data,
612 data->raw = NULL; 632 data->raw = NULL;
613 data->br_stack = NULL; 633 data->br_stack = NULL;
614 data->period = period; 634 data->period = period;
615 data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE;
616 data->regs_user.regs = NULL;
617 data->stack_user_size = 0;
618 data->weight = 0; 635 data->weight = 0;
619 data->data_src.val = 0; 636 data->data_src.val = PERF_MEM_NA;
620 data->txn = 0; 637 data->txn = 0;
621} 638}
622 639
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 3c73d5fe18be..a5f98d53d732 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -1,11 +1,19 @@
1#ifndef _LINUX_PERF_REGS_H 1#ifndef _LINUX_PERF_REGS_H
2#define _LINUX_PERF_REGS_H 2#define _LINUX_PERF_REGS_H
3 3
4struct perf_regs {
5 __u64 abi;
6 struct pt_regs *regs;
7};
8
4#ifdef CONFIG_HAVE_PERF_REGS 9#ifdef CONFIG_HAVE_PERF_REGS
5#include <asm/perf_regs.h> 10#include <asm/perf_regs.h>
6u64 perf_reg_value(struct pt_regs *regs, int idx); 11u64 perf_reg_value(struct pt_regs *regs, int idx);
7int perf_reg_validate(u64 mask); 12int perf_reg_validate(u64 mask);
8u64 perf_reg_abi(struct task_struct *task); 13u64 perf_reg_abi(struct task_struct *task);
14void perf_get_regs_user(struct perf_regs *regs_user,
15 struct pt_regs *regs,
16 struct pt_regs *regs_user_copy);
9#else 17#else
10static inline u64 perf_reg_value(struct pt_regs *regs, int idx) 18static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
11{ 19{
@@ -21,5 +29,13 @@ static inline u64 perf_reg_abi(struct task_struct *task)
21{ 29{
22 return PERF_SAMPLE_REGS_ABI_NONE; 30 return PERF_SAMPLE_REGS_ABI_NONE;
23} 31}
32
33static inline void perf_get_regs_user(struct perf_regs *regs_user,
34 struct pt_regs *regs,
35 struct pt_regs *regs_user_copy)
36{
37 regs_user->regs = task_pt_regs(current);
38 regs_user->abi = perf_reg_abi(current);
39}
24#endif /* CONFIG_HAVE_PERF_REGS */ 40#endif /* CONFIG_HAVE_PERF_REGS */
25#endif /* _LINUX_PERF_REGS_H */ 41#endif /* _LINUX_PERF_REGS_H */
diff --git a/include/linux/phonedev.h b/include/linux/phonedev.h
deleted file mode 100644
index 4269de99e320..000000000000
--- a/include/linux/phonedev.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef __LINUX_PHONEDEV_H
2#define __LINUX_PHONEDEV_H
3
4#include <linux/types.h>
5
6#ifdef __KERNEL__
7
8#include <linux/poll.h>
9
10struct phone_device {
11 struct phone_device *next;
12 const struct file_operations *f_op;
13 int (*open) (struct phone_device *, struct file *);
14 int board; /* Device private index */
15 int minor;
16};
17
18extern int phonedev_init(void);
19#define PHONE_MAJOR 100
20extern int phone_register_device(struct phone_device *, int unit);
21#define PHONE_UNIT_ANY -1
22extern void phone_unregister_device(struct phone_device *);
23
24#endif
25#endif
diff --git a/include/linux/phy.h b/include/linux/phy.h
index ed39956b5613..22af8f8f5802 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -433,6 +433,7 @@ struct phy_device {
433 * by this PHY 433 * by this PHY
434 * flags: A bitfield defining certain other features this PHY 434 * flags: A bitfield defining certain other features this PHY
435 * supports (like interrupts) 435 * supports (like interrupts)
436 * driver_data: static driver data
436 * 437 *
437 * The drivers must implement config_aneg and read_status. All 438 * The drivers must implement config_aneg and read_status. All
438 * other functions are optional. Note that none of these 439 * other functions are optional. Note that none of these
@@ -448,6 +449,7 @@ struct phy_driver {
448 unsigned int phy_id_mask; 449 unsigned int phy_id_mask;
449 u32 features; 450 u32 features;
450 u32 flags; 451 u32 flags;
452 const void *driver_data;
451 453
452 /* 454 /*
453 * Called to issue a PHY software reset 455 * Called to issue a PHY software reset
@@ -598,6 +600,19 @@ static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
598} 600}
599 601
600/** 602/**
603 * phy_read_mmd_indirect - reads data from the MMD registers
604 * @phydev: The PHY device bus
605 * @prtad: MMD Address
606 * @devad: MMD DEVAD
607 * @addr: PHY address on the MII bus
608 *
609 * Description: it reads data from the MMD registers (clause 22 to access to
610 * clause 45) of the specified phy address.
611 */
612int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
613 int devad, int addr);
614
615/**
601 * phy_read - Convenience function for reading a given PHY register 616 * phy_read - Convenience function for reading a given PHY register
602 * @phydev: the phy_device struct 617 * @phydev: the phy_device struct
603 * @regnum: register number to read 618 * @regnum: register number to read
@@ -668,6 +683,20 @@ static inline int phy_write_mmd(struct phy_device *phydev, int devad,
668 return mdiobus_write(phydev->bus, phydev->addr, regnum, val); 683 return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
669} 684}
670 685
686/**
687 * phy_write_mmd_indirect - writes data to the MMD registers
688 * @phydev: The PHY device
689 * @prtad: MMD Address
690 * @devad: MMD DEVAD
691 * @addr: PHY address on the MII bus
692 * @data: data to write in the MMD register
693 *
694 * Description: Write data from the MMD registers of the specified
695 * phy address.
696 */
697void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
698 int devad, int addr, u32 data);
699
671struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, 700struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
672 bool is_c45, 701 bool is_c45,
673 struct phy_c45_device_ids *c45_ids); 702 struct phy_c45_device_ids *c45_ids);
@@ -745,4 +774,28 @@ int __init mdio_bus_init(void);
745void mdio_bus_exit(void); 774void mdio_bus_exit(void);
746 775
747extern struct bus_type mdio_bus_type; 776extern struct bus_type mdio_bus_type;
777
778/**
779 * module_phy_driver() - Helper macro for registering PHY drivers
780 * @__phy_drivers: array of PHY drivers to register
781 *
782 * Helper macro for PHY drivers which do not do anything special in module
783 * init/exit. Each module may only use this macro once, and calling it
784 * replaces module_init() and module_exit().
785 */
786#define phy_module_driver(__phy_drivers, __count) \
787static int __init phy_module_init(void) \
788{ \
789 return phy_drivers_register(__phy_drivers, __count); \
790} \
791module_init(phy_module_init); \
792static void __exit phy_module_exit(void) \
793{ \
794 phy_drivers_unregister(__phy_drivers, __count); \
795} \
796module_exit(phy_module_exit)
797
798#define module_phy_driver(__phy_drivers) \
799 phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers))
800
748#endif /* __PHY_H */ 801#endif /* __PHY_H */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 8cb6f815475b..a0197fa1b116 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -61,7 +61,6 @@ struct phy {
61 struct device dev; 61 struct device dev;
62 int id; 62 int id;
63 const struct phy_ops *ops; 63 const struct phy_ops *ops;
64 struct phy_init_data *init_data;
65 struct mutex mutex; 64 struct mutex mutex;
66 int init_count; 65 int init_count;
67 int power_count; 66 int power_count;
@@ -84,33 +83,14 @@ struct phy_provider {
84 struct of_phandle_args *args); 83 struct of_phandle_args *args);
85}; 84};
86 85
87/** 86struct phy_lookup {
88 * struct phy_consumer - represents the phy consumer 87 struct list_head node;
89 * @dev_name: the device name of the controller that will use this PHY device 88 const char *dev_id;
90 * @port: name given to the consumer port 89 const char *con_id;
91 */ 90 struct phy *phy;
92struct phy_consumer {
93 const char *dev_name;
94 const char *port;
95};
96
97/**
98 * struct phy_init_data - contains the list of PHY consumers
99 * @num_consumers: number of consumers for this PHY device
100 * @consumers: list of PHY consumers
101 */
102struct phy_init_data {
103 unsigned int num_consumers;
104 struct phy_consumer *consumers;
105}; 91};
106 92
107#define PHY_CONSUMER(_dev_name, _port) \ 93#define to_phy(a) (container_of((a), struct phy, dev))
108{ \
109 .dev_name = _dev_name, \
110 .port = _port, \
111}
112
113#define to_phy(dev) (container_of((dev), struct phy, dev))
114 94
115#define of_phy_provider_register(dev, xlate) \ 95#define of_phy_provider_register(dev, xlate) \
116 __of_phy_provider_register((dev), THIS_MODULE, (xlate)) 96 __of_phy_provider_register((dev), THIS_MODULE, (xlate))
@@ -159,10 +139,9 @@ struct phy *of_phy_get(struct device_node *np, const char *con_id);
159struct phy *of_phy_simple_xlate(struct device *dev, 139struct phy *of_phy_simple_xlate(struct device *dev,
160 struct of_phandle_args *args); 140 struct of_phandle_args *args);
161struct phy *phy_create(struct device *dev, struct device_node *node, 141struct phy *phy_create(struct device *dev, struct device_node *node,
162 const struct phy_ops *ops, 142 const struct phy_ops *ops);
163 struct phy_init_data *init_data);
164struct phy *devm_phy_create(struct device *dev, struct device_node *node, 143struct phy *devm_phy_create(struct device *dev, struct device_node *node,
165 const struct phy_ops *ops, struct phy_init_data *init_data); 144 const struct phy_ops *ops);
166void phy_destroy(struct phy *phy); 145void phy_destroy(struct phy *phy);
167void devm_phy_destroy(struct device *dev, struct phy *phy); 146void devm_phy_destroy(struct device *dev, struct phy *phy);
168struct phy_provider *__of_phy_provider_register(struct device *dev, 147struct phy_provider *__of_phy_provider_register(struct device *dev,
@@ -174,6 +153,8 @@ struct phy_provider *__devm_of_phy_provider_register(struct device *dev,
174void of_phy_provider_unregister(struct phy_provider *phy_provider); 153void of_phy_provider_unregister(struct phy_provider *phy_provider);
175void devm_of_phy_provider_unregister(struct device *dev, 154void devm_of_phy_provider_unregister(struct device *dev,
176 struct phy_provider *phy_provider); 155 struct phy_provider *phy_provider);
156int phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id);
157void phy_remove_lookup(struct phy *phy, const char *con_id, const char *dev_id);
177#else 158#else
178static inline int phy_pm_runtime_get(struct phy *phy) 159static inline int phy_pm_runtime_get(struct phy *phy)
179{ 160{
@@ -301,16 +282,14 @@ static inline struct phy *of_phy_simple_xlate(struct device *dev,
301 282
302static inline struct phy *phy_create(struct device *dev, 283static inline struct phy *phy_create(struct device *dev,
303 struct device_node *node, 284 struct device_node *node,
304 const struct phy_ops *ops, 285 const struct phy_ops *ops)
305 struct phy_init_data *init_data)
306{ 286{
307 return ERR_PTR(-ENOSYS); 287 return ERR_PTR(-ENOSYS);
308} 288}
309 289
310static inline struct phy *devm_phy_create(struct device *dev, 290static inline struct phy *devm_phy_create(struct device *dev,
311 struct device_node *node, 291 struct device_node *node,
312 const struct phy_ops *ops, 292 const struct phy_ops *ops)
313 struct phy_init_data *init_data)
314{ 293{
315 return ERR_PTR(-ENOSYS); 294 return ERR_PTR(-ENOSYS);
316} 295}
@@ -345,6 +324,13 @@ static inline void devm_of_phy_provider_unregister(struct device *dev,
345 struct phy_provider *phy_provider) 324 struct phy_provider *phy_provider)
346{ 325{
347} 326}
327static inline int
328phy_create_lookup(struct phy *phy, const char *con_id, const char *dev_id)
329{
330 return 0;
331}
332static inline void phy_remove_lookup(struct phy *phy, const char *con_id,
333 const char *dev_id) { }
348#endif 334#endif
349 335
350#endif /* __DRIVERS_PHY_H */ 336#endif /* __DRIVERS_PHY_H */
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index ae612acebb53..7e75bfe37cc7 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -11,37 +11,38 @@ struct fixed_phy_status {
11 11
12struct device_node; 12struct device_node;
13 13
14#ifdef CONFIG_FIXED_PHY 14#if IS_ENABLED(CONFIG_FIXED_PHY)
15extern int fixed_phy_add(unsigned int irq, int phy_id, 15extern int fixed_phy_add(unsigned int irq, int phy_id,
16 struct fixed_phy_status *status); 16 struct fixed_phy_status *status);
17extern int fixed_phy_register(unsigned int irq, 17extern struct phy_device *fixed_phy_register(unsigned int irq,
18 struct fixed_phy_status *status, 18 struct fixed_phy_status *status,
19 struct device_node *np); 19 struct device_node *np);
20extern void fixed_phy_del(int phy_addr); 20extern void fixed_phy_del(int phy_addr);
21extern int fixed_phy_set_link_update(struct phy_device *phydev,
22 int (*link_update)(struct net_device *,
23 struct fixed_phy_status *));
21#else 24#else
22static inline int fixed_phy_add(unsigned int irq, int phy_id, 25static inline int fixed_phy_add(unsigned int irq, int phy_id,
23 struct fixed_phy_status *status) 26 struct fixed_phy_status *status)
24{ 27{
25 return -ENODEV; 28 return -ENODEV;
26} 29}
27static inline int fixed_phy_register(unsigned int irq, 30static inline struct phy_device *fixed_phy_register(unsigned int irq,
28 struct fixed_phy_status *status, 31 struct fixed_phy_status *status,
29 struct device_node *np) 32 struct device_node *np)
30{ 33{
31 return -ENODEV; 34 return ERR_PTR(-ENODEV);
32} 35}
33static inline int fixed_phy_del(int phy_addr) 36static inline int fixed_phy_del(int phy_addr)
34{ 37{
35 return -ENODEV; 38 return -ENODEV;
36} 39}
37#endif /* CONFIG_FIXED_PHY */ 40static inline int fixed_phy_set_link_update(struct phy_device *phydev,
38
39/*
40 * This function issued only by fixed_phy-aware drivers, no need
41 * protect it with #ifdef
42 */
43extern int fixed_phy_set_link_update(struct phy_device *phydev,
44 int (*link_update)(struct net_device *, 41 int (*link_update)(struct net_device *,
45 struct fixed_phy_status *)); 42 struct fixed_phy_status *))
43{
44 return -ENODEV;
45}
46#endif /* CONFIG_FIXED_PHY */
46 47
47#endif /* __PHY_FIXED_H */ 48#endif /* __PHY_FIXED_H */
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 1997ffc295a7..b9cf6c51b181 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -8,6 +8,7 @@
8#include <linux/threads.h> 8#include <linux/threads.h>
9#include <linux/nsproxy.h> 9#include <linux/nsproxy.h>
10#include <linux/kref.h> 10#include <linux/kref.h>
11#include <linux/ns_common.h>
11 12
12struct pidmap { 13struct pidmap {
13 atomic_t nr_free; 14 atomic_t nr_free;
@@ -43,7 +44,7 @@ struct pid_namespace {
43 kgid_t pid_gid; 44 kgid_t pid_gid;
44 int hide_pid; 45 int hide_pid;
45 int reboot; /* group exit code if this pidns was rebooted */ 46 int reboot; /* group exit code if this pidns was rebooted */
46 unsigned int proc_inum; 47 struct ns_common ns;
47}; 48};
48 49
49extern struct pid_namespace init_pid_ns; 50extern struct pid_namespace init_pid_ns;
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index a15f10727eb8..d578a60eff23 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -57,7 +57,7 @@
57 * which are then pulled up with an external resistor. Setting this 57 * which are then pulled up with an external resistor. Setting this
58 * config will enable open drain mode, the argument is ignored. 58 * config will enable open drain mode, the argument is ignored.
59 * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source 59 * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
60 * (open emitter). Setting this config will enable open drain mode, the 60 * (open emitter). Setting this config will enable open source mode, the
61 * argument is ignored. 61 * argument is ignored.
62 * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current 62 * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
63 * passed as argument. The argument is in mA. 63 * passed as argument. The argument is in mA.
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 3097aafbeb24..511bda9ed4bf 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -39,13 +39,12 @@ struct pinctrl_dev;
39 * name can be used with the generic @pinctrl_ops to retrieve the 39 * name can be used with the generic @pinctrl_ops to retrieve the
40 * actual pins affected. The applicable groups will be returned in 40 * actual pins affected. The applicable groups will be returned in
41 * @groups and the number of groups in @num_groups 41 * @groups and the number of groups in @num_groups
42 * @enable: enable a certain muxing function with a certain pin group. The 42 * @set_mux: enable a certain muxing function with a certain pin group. The
43 * driver does not need to figure out whether enabling this function 43 * driver does not need to figure out whether enabling this function
44 * conflicts some other use of the pins in that group, such collisions 44 * conflicts some other use of the pins in that group, such collisions
45 * are handled by the pinmux subsystem. The @func_selector selects a 45 * are handled by the pinmux subsystem. The @func_selector selects a
46 * certain function whereas @group_selector selects a certain set of pins 46 * certain function whereas @group_selector selects a certain set of pins
47 * to be used. On simple controllers the latter argument may be ignored 47 * to be used. On simple controllers the latter argument may be ignored
48 * @disable: disable a certain muxing selector with a certain pin group
49 * @gpio_request_enable: requests and enables GPIO on a certain pin. 48 * @gpio_request_enable: requests and enables GPIO on a certain pin.
50 * Implement this only if you can mux every pin individually as GPIO. The 49 * Implement this only if you can mux every pin individually as GPIO. The
51 * affected GPIO range is passed along with an offset(pin number) into that 50 * affected GPIO range is passed along with an offset(pin number) into that
@@ -68,8 +67,8 @@ struct pinmux_ops {
68 unsigned selector, 67 unsigned selector,
69 const char * const **groups, 68 const char * const **groups,
70 unsigned * const num_groups); 69 unsigned * const num_groups);
71 int (*enable) (struct pinctrl_dev *pctldev, unsigned func_selector, 70 int (*set_mux) (struct pinctrl_dev *pctldev, unsigned func_selector,
72 unsigned group_selector); 71 unsigned group_selector);
73 int (*gpio_request_enable) (struct pinctrl_dev *pctldev, 72 int (*gpio_request_enable) (struct pinctrl_dev *pctldev,
74 struct pinctrl_gpio_range *range, 73 struct pinctrl_gpio_range *range,
75 unsigned offset); 74 unsigned offset);
diff --git a/include/linux/mailbox.h b/include/linux/pl320-ipc.h
index 5161f63ec1c8..5161f63ec1c8 100644
--- a/include/linux/mailbox.h
+++ b/include/linux/pl320-ipc.h
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index a6591c693ebb..5e0bc779e6c5 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -27,6 +27,7 @@ struct samsung_i2s {
27#define QUIRK_NO_MUXPSR (1 << 2) 27#define QUIRK_NO_MUXPSR (1 << 2)
28#define QUIRK_NEED_RSTCLR (1 << 3) 28#define QUIRK_NEED_RSTCLR (1 << 3)
29#define QUIRK_SUPPORTS_TDM (1 << 4) 29#define QUIRK_SUPPORTS_TDM (1 << 4)
30#define QUIRK_SUPPORTS_IDMA (1 << 5)
30 /* Quirks of the I2S controller */ 31 /* Quirks of the I2S controller */
31 u32 quirks; 32 u32 quirks;
32 dma_addr_t idma_addr; 33 dma_addr_t idma_addr;
diff --git a/include/linux/platform_data/bcmgenet.h b/include/linux/platform_data/bcmgenet.h
new file mode 100644
index 000000000000..26af54321958
--- /dev/null
+++ b/include/linux/platform_data/bcmgenet.h
@@ -0,0 +1,18 @@
1#ifndef __LINUX_PLATFORM_DATA_BCMGENET_H__
2#define __LINUX_PLATFORM_DATA_BCMGENET_H__
3
4#include <linux/types.h>
5#include <linux/if_ether.h>
6#include <linux/phy.h>
7
8struct bcmgenet_platform_data {
9 bool mdio_enabled;
10 phy_interface_t phy_interface;
11 int phy_address;
12 int phy_speed;
13 int phy_duplex;
14 u8 mac_address[ETH_ALEN];
15 int genet_version;
16};
17
18#endif
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h
new file mode 100644
index 000000000000..d8155c005242
--- /dev/null
+++ b/include/linux/platform_data/dma-dw.h
@@ -0,0 +1,59 @@
1/*
2 * Driver for the Synopsys DesignWare DMA Controller
3 *
4 * Copyright (C) 2007 Atmel Corporation
5 * Copyright (C) 2010-2011 ST Microelectronics
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef _PLATFORM_DATA_DMA_DW_H
12#define _PLATFORM_DATA_DMA_DW_H
13
14#include <linux/device.h>
15
16/**
17 * struct dw_dma_slave - Controller-specific information about a slave
18 *
19 * @dma_dev: required DMA master device. Depricated.
20 * @src_id: src request line
21 * @dst_id: dst request line
22 * @src_master: src master for transfers on allocated channel.
23 * @dst_master: dest master for transfers on allocated channel.
24 */
25struct dw_dma_slave {
26 struct device *dma_dev;
27 u8 src_id;
28 u8 dst_id;
29 u8 src_master;
30 u8 dst_master;
31};
32
33/**
34 * struct dw_dma_platform_data - Controller configuration parameters
35 * @nr_channels: Number of channels supported by hardware (max 8)
36 * @is_private: The device channels should be marked as private and not for
37 * by the general purpose DMA channel allocator.
38 * @chan_allocation_order: Allocate channels starting from 0 or 7
39 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0.
40 * @block_size: Maximum block size supported by the controller
41 * @nr_masters: Number of AHB masters supported by the controller
42 * @data_width: Maximum data width supported by hardware per AHB master
43 * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
44 */
45struct dw_dma_platform_data {
46 unsigned int nr_channels;
47 bool is_private;
48#define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */
49#define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */
50 unsigned char chan_allocation_order;
51#define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */
52#define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */
53 unsigned char chan_priority;
54 unsigned short block_size;
55 unsigned char nr_masters;
56 unsigned char data_width[4];
57};
58
59#endif /* _PLATFORM_DATA_DMA_DW_H */
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
index 6a1357d31871..7d964e787299 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/platform_data/dma-imx.h
@@ -41,6 +41,7 @@ enum sdma_peripheral_type {
41 IMX_DMATYPE_ESAI, /* ESAI */ 41 IMX_DMATYPE_ESAI, /* ESAI */
42 IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ 42 IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
43 IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ 43 IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
44 IMX_DMATYPE_SAI, /* SAI */
44}; 45};
45 46
46enum imx_dma_prio { 47enum imx_dma_prio {
diff --git a/include/linux/platform_data/dwc3-exynos.h b/include/linux/platform_data/dwc3-exynos.h
deleted file mode 100644
index 5eb7da9b3772..000000000000
--- a/include/linux/platform_data/dwc3-exynos.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/**
2 * dwc3-exynos.h - Samsung EXYNOS DWC3 Specific Glue layer, header.
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * Author: Anton Tikhomirov <av.tikhomirov@samsung.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 */
14
15#ifndef _DWC3_EXYNOS_H_
16#define _DWC3_EXYNOS_H_
17
18struct dwc3_exynos_data {
19 int phy_type;
20 int (*phy_init)(struct platform_device *pdev, int type);
21 int (*phy_exit)(struct platform_device *pdev, int type);
22};
23
24#endif /* _DWC3_EXYNOS_H_ */
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
index 780d1e97f620..b8686c00f15f 100644
--- a/include/linux/platform_data/elm.h
+++ b/include/linux/platform_data/elm.h
@@ -42,8 +42,24 @@ struct elm_errorvec {
42 int error_loc[16]; 42 int error_loc[16];
43}; 43};
44 44
45#if IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)
45void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc, 46void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
46 struct elm_errorvec *err_vec); 47 struct elm_errorvec *err_vec);
47int elm_config(struct device *dev, enum bch_ecc bch_type, 48int elm_config(struct device *dev, enum bch_ecc bch_type,
48 int ecc_steps, int ecc_step_size, int ecc_syndrome_size); 49 int ecc_steps, int ecc_step_size, int ecc_syndrome_size);
50#else
51static inline void
52elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
53 struct elm_errorvec *err_vec)
54{
55}
56
57static inline int elm_config(struct device *dev, enum bch_ecc bch_type,
58 int ecc_steps, int ecc_step_size,
59 int ecc_syndrome_size)
60{
61 return -ENOSYS;
62}
63#endif /* CONFIG_MTD_NAND_ECC_BCH */
64
49#endif /* __ELM_H */ 65#endif /* __ELM_H */
diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h
new file mode 100644
index 000000000000..28702c849af1
--- /dev/null
+++ b/include/linux/platform_data/gpio-dwapb.h
@@ -0,0 +1,32 @@
1/*
2 * Copyright(c) 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef GPIO_DW_APB_H
15#define GPIO_DW_APB_H
16
17struct dwapb_port_property {
18 struct device_node *node;
19 const char *name;
20 unsigned int idx;
21 unsigned int ngpio;
22 unsigned int gpio_base;
23 unsigned int irq;
24 bool irq_shared;
25};
26
27struct dwapb_platform_data {
28 struct dwapb_port_property *properties;
29 unsigned int nports;
30};
31
32#endif
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
new file mode 100644
index 000000000000..67bbcf0785f6
--- /dev/null
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -0,0 +1,90 @@
1/*
2 * MMC definitions for OMAP2
3 *
4 * Copyright (C) 2006 Nokia Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * struct omap_hsmmc_dev_attr.flags possibilities
13 *
14 * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can
15 * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag
16 * should be set if this is the case. See for example Section 22.5.3
17 * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia
18 * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R).
19 *
20 * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers
21 * don't work correctly on some MMC controller instances on some
22 * OMAP3 SoCs; this flag should be set if this is the case. See
23 * for example Advisory 2.1.1.128 "MMC: Multiple Block Read
24 * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_
25 * Revision F (October 2010) (SPRZ278F).
26 */
27#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0)
28#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1)
29#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2)
30
31struct omap_hsmmc_dev_attr {
32 u8 flags;
33};
34
35struct mmc_card;
36
37struct omap_hsmmc_platform_data {
38 /* back-link to device */
39 struct device *dev;
40
41 /* set if your board has components or wiring that limits the
42 * maximum frequency on the MMC bus */
43 unsigned int max_freq;
44
45 /* Integrating attributes from the omap_hwmod layer */
46 u8 controller_flags;
47
48 /* Register offset deviation */
49 u16 reg_offset;
50
51 /*
52 * 4/8 wires and any additional host capabilities
53 * need to OR'd all capabilities (ref. linux/mmc/host.h)
54 */
55 u32 caps; /* Used for the MMC driver on 2430 and later */
56 u32 pm_caps; /* PM capabilities of the mmc */
57
58 /* switch pin can be for card detect (default) or card cover */
59 unsigned cover:1;
60
61 /* use the internal clock */
62 unsigned internal_clock:1;
63
64 /* nonremovable e.g. eMMC */
65 unsigned nonremovable:1;
66
67 /* eMMC does not handle power off when not in sleep state */
68 unsigned no_regulator_off_init:1;
69
70 /* we can put the features above into this variable */
71#define HSMMC_HAS_PBIAS (1 << 0)
72#define HSMMC_HAS_UPDATED_RESET (1 << 1)
73#define HSMMC_HAS_HSPE_SUPPORT (1 << 2)
74 unsigned features;
75
76 int switch_pin; /* gpio (card detect) */
77 int gpio_wp; /* gpio (write protect) */
78
79 int (*set_power)(struct device *dev, int power_on, int vdd);
80 void (*remux)(struct device *dev, int power_on);
81 /* Call back before enabling / disabling regulators */
82 void (*before_set_reg)(struct device *dev, int power_on, int vdd);
83 /* Call back after enabling / disabling regulators */
84 void (*after_set_reg)(struct device *dev, int power_on, int vdd);
85 /* if we have special card, init it using this callback */
86 void (*init_card)(struct mmc_card *card);
87
88 const char *name;
89 u32 ocr_mask;
90};
diff --git a/include/linux/platform_data/i2c-designware.h b/include/linux/platform_data/i2c-designware.h
new file mode 100644
index 000000000000..7a61fb27c25b
--- /dev/null
+++ b/include/linux/platform_data/i2c-designware.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright(c) 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef I2C_DESIGNWARE_H
15#define I2C_DESIGNWARE_H
16
17struct dw_i2c_platform_data {
18 unsigned int i2c_scl_freq;
19};
20
21#endif
diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h
new file mode 100644
index 000000000000..1419133fa69e
--- /dev/null
+++ b/include/linux/platform_data/isl9305.h
@@ -0,0 +1,30 @@
1/*
2 * isl9305 - Intersil ISL9305 DCDC regulator
3 *
4 * Copyright 2014 Linaro Ltd
5 *
6 * Author: Mark Brown <broonie@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef __ISL9305_H
15#define __ISL9305_H
16
17#define ISL9305_DCD1 0
18#define ISL9305_DCD2 1
19#define ISL9305_LDO1 2
20#define ISL9305_LDO2 3
21
22#define ISL9305_MAX_REGULATOR ISL9305_LDO2
23
24struct regulator_init_data;
25
26struct isl9305_pdata {
27 struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR];
28};
29
30#endif
diff --git a/include/linux/platform_data/lp855x.h b/include/linux/platform_data/lp855x.h
index 1b2ba24e4e03..9c7fd1efe495 100644
--- a/include/linux/platform_data/lp855x.h
+++ b/include/linux/platform_data/lp855x.h
@@ -136,6 +136,7 @@ struct lp855x_rom_data {
136 Only valid when mode is PWM_BASED. 136 Only valid when mode is PWM_BASED.
137 * @size_program : total size of lp855x_rom_data 137 * @size_program : total size of lp855x_rom_data
138 * @rom_data : list of new eeprom/eprom registers 138 * @rom_data : list of new eeprom/eprom registers
139 * @supply : regulator that supplies 3V input
139 */ 140 */
140struct lp855x_platform_data { 141struct lp855x_platform_data {
141 const char *name; 142 const char *name;
@@ -144,6 +145,7 @@ struct lp855x_platform_data {
144 unsigned int period_ns; 145 unsigned int period_ns;
145 int size_program; 146 int size_program;
146 struct lp855x_rom_data *rom_data; 147 struct lp855x_rom_data *rom_data;
148 struct regulator *supply;
147}; 149};
148 150
149#endif 151#endif
diff --git a/include/linux/platform_data/mmc-atmel-mci.h b/include/linux/platform_data/mmc-atmel-mci.h
new file mode 100644
index 000000000000..399a2d5a14bd
--- /dev/null
+++ b/include/linux/platform_data/mmc-atmel-mci.h
@@ -0,0 +1,22 @@
1#ifndef __MMC_ATMEL_MCI_H
2#define __MMC_ATMEL_MCI_H
3
4#include <linux/platform_data/dma-atmel.h>
5#include <linux/platform_data/dma-dw.h>
6
7/**
8 * struct mci_dma_data - DMA data for MCI interface
9 */
10struct mci_dma_data {
11#ifdef CONFIG_ARM
12 struct at_dma_slave sdata;
13#else
14 struct dw_dma_slave sdata;
15#endif
16};
17
18/* accessor macros */
19#define slave_data_ptr(s) (&(s)->sdata)
20#define find_slave_dev(s) ((s)->sdata.dma_dev)
21
22#endif /* __MMC_ATMEL_MCI_H */
diff --git a/include/linux/platform_data/mmc-omap.h b/include/linux/platform_data/mmc-omap.h
index 51e70cf25cbc..5c188f4e9bec 100644
--- a/include/linux/platform_data/mmc-omap.h
+++ b/include/linux/platform_data/mmc-omap.h
@@ -10,32 +10,8 @@
10 10
11#define OMAP_MMC_MAX_SLOTS 2 11#define OMAP_MMC_MAX_SLOTS 2
12 12
13/*
14 * struct omap_mmc_dev_attr.flags possibilities
15 *
16 * OMAP_HSMMC_SUPPORTS_DUAL_VOLT: Some HSMMC controller instances can
17 * operate with either 1.8Vdc or 3.0Vdc card voltages; this flag
18 * should be set if this is the case. See for example Section 22.5.3
19 * "MMC/SD/SDIO1 Bus Voltage Selection" of the OMAP34xx Multimedia
20 * Device Silicon Revision 3.1.x Revision ZR (July 2011) (SWPU223R).
21 *
22 * OMAP_HSMMC_BROKEN_MULTIBLOCK_READ: Multiple-block read transfers
23 * don't work correctly on some MMC controller instances on some
24 * OMAP3 SoCs; this flag should be set if this is the case. See
25 * for example Advisory 2.1.1.128 "MMC: Multiple Block Read
26 * Operation Issue" in _OMAP3530/3525/3515/3503 Silicon Errata_
27 * Revision F (October 2010) (SPRZ278F).
28 */
29#define OMAP_HSMMC_SUPPORTS_DUAL_VOLT BIT(0)
30#define OMAP_HSMMC_BROKEN_MULTIBLOCK_READ BIT(1)
31#define OMAP_HSMMC_SWAKEUP_MISSING BIT(2)
32
33struct mmc_card; 13struct mmc_card;
34 14
35struct omap_mmc_dev_attr {
36 u8 flags;
37};
38
39struct omap_mmc_platform_data { 15struct omap_mmc_platform_data {
40 /* back-link to device */ 16 /* back-link to device */
41 struct device *dev; 17 struct device *dev;
@@ -106,9 +82,6 @@ struct omap_mmc_platform_data {
106 unsigned vcc_aux_disable_is_sleep:1; 82 unsigned vcc_aux_disable_is_sleep:1;
107 83
108 /* we can put the features above into this variable */ 84 /* we can put the features above into this variable */
109#define HSMMC_HAS_PBIAS (1 << 0)
110#define HSMMC_HAS_UPDATED_RESET (1 << 1)
111#define HSMMC_HAS_HSPE_SUPPORT (1 << 2)
112#define MMC_OMAP7XX (1 << 3) 85#define MMC_OMAP7XX (1 << 3)
113#define MMC_OMAP15XX (1 << 4) 86#define MMC_OMAP15XX (1 << 4)
114#define MMC_OMAP16XX (1 << 5) 87#define MMC_OMAP16XX (1 << 5)
diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h
index 16ec262dfcc8..090bbab0130a 100644
--- a/include/linux/platform_data/mtd-nand-omap2.h
+++ b/include/linux/platform_data/mtd-nand-omap2.h
@@ -71,6 +71,7 @@ struct omap_nand_platform_data {
71 struct mtd_partition *parts; 71 struct mtd_partition *parts;
72 int nr_parts; 72 int nr_parts;
73 bool dev_ready; 73 bool dev_ready;
74 bool flash_bbt;
74 enum nand_io xfer_type; 75 enum nand_io xfer_type;
75 int devsize; 76 int devsize;
76 enum omap_ecc ecc_opt; 77 enum omap_ecc ecc_opt;
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h
index 27d3156d093a..9e20c2fb4ffd 100644
--- a/include/linux/platform_data/pxa_sdhci.h
+++ b/include/linux/platform_data/pxa_sdhci.h
@@ -55,9 +55,4 @@ struct sdhci_pxa_platdata {
55 unsigned int quirks2; 55 unsigned int quirks2;
56 unsigned int pm_caps; 56 unsigned int pm_caps;
57}; 57};
58
59struct sdhci_pxa {
60 u8 clk_enable;
61 u8 power_mode;
62};
63#endif /* _PXA_SDHCI_H_ */ 58#endif /* _PXA_SDHCI_H_ */
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
deleted file mode 100644
index 1a2e9901a22e..000000000000
--- a/include/linux/platform_data/rcar-du.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * rcar_du.h -- R-Car Display Unit DRM driver
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_H__
15#define __RCAR_DU_H__
16
17#include <drm/drm_mode.h>
18
19enum rcar_du_output {
20 RCAR_DU_OUTPUT_DPAD0,
21 RCAR_DU_OUTPUT_DPAD1,
22 RCAR_DU_OUTPUT_LVDS0,
23 RCAR_DU_OUTPUT_LVDS1,
24 RCAR_DU_OUTPUT_TCON,
25 RCAR_DU_OUTPUT_MAX,
26};
27
28enum rcar_du_encoder_type {
29 RCAR_DU_ENCODER_UNUSED = 0,
30 RCAR_DU_ENCODER_NONE,
31 RCAR_DU_ENCODER_VGA,
32 RCAR_DU_ENCODER_LVDS,
33};
34
35struct rcar_du_panel_data {
36 unsigned int width_mm; /* Panel width in mm */
37 unsigned int height_mm; /* Panel height in mm */
38 struct drm_mode_modeinfo mode;
39};
40
41struct rcar_du_connector_lvds_data {
42 struct rcar_du_panel_data panel;
43};
44
45struct rcar_du_connector_vga_data {
46 /* TODO: Add DDC information for EDID retrieval */
47};
48
49/*
50 * struct rcar_du_encoder_data - Encoder platform data
51 * @type: the encoder type (RCAR_DU_ENCODER_*)
52 * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*)
53 * @connector.lvds: platform data for LVDS connectors
54 * @connector.vga: platform data for VGA connectors
55 *
56 * Encoder platform data describes an on-board encoder, its associated DU SoC
57 * output, and the connector.
58 */
59struct rcar_du_encoder_data {
60 enum rcar_du_encoder_type type;
61 enum rcar_du_output output;
62
63 union {
64 struct rcar_du_connector_lvds_data lvds;
65 struct rcar_du_connector_vga_data vga;
66 } connector;
67};
68
69struct rcar_du_platform_data {
70 struct rcar_du_encoder_data *encoders;
71 unsigned int num_encoders;
72};
73
74#endif /* __RCAR_DU_H__ */
diff --git a/include/linux/platform_data/samsung-usbphy.h b/include/linux/platform_data/samsung-usbphy.h
deleted file mode 100644
index 1bd24cba982b..000000000000
--- a/include/linux/platform_data/samsung-usbphy.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * http://www.samsung.com/
4 * Author: Praveen Paneri <p.paneri@samsung.com>
5 *
6 * Defines platform data for samsung usb phy driver.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef __SAMSUNG_USBPHY_PLATFORM_H
15#define __SAMSUNG_USBPHY_PLATFORM_H
16
17/**
18 * samsung_usbphy_data - Platform data for USB PHY driver.
19 * @pmu_isolation: Function to control usb phy isolation in PMU.
20 */
21struct samsung_usbphy_data {
22 void (*pmu_isolation)(int on);
23};
24
25extern void samsung_usbphy_set_pdata(struct samsung_usbphy_data *pd);
26
27#endif /* __SAMSUNG_USBPHY_PLATFORM_H */
diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h
index c860c1b314c0..d09275f3cde3 100644
--- a/include/linux/platform_data/serial-omap.h
+++ b/include/linux/platform_data/serial-omap.h
@@ -38,9 +38,6 @@ struct omap_uart_port_info {
38 unsigned int dma_rx_timeout; 38 unsigned int dma_rx_timeout;
39 unsigned int autosuspend_timeout; 39 unsigned int autosuspend_timeout;
40 unsigned int dma_rx_poll_rate; 40 unsigned int dma_rx_poll_rate;
41 int DTR_gpio;
42 int DTR_inverted;
43 int DTR_present;
44 41
45 int (*get_context_loss_count)(struct device *); 42 int (*get_context_loss_count)(struct device *);
46 void (*enable_wakeup)(struct device *, bool); 43 void (*enable_wakeup)(struct device *, bool);
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
index 1730312398ff..5087fff96d86 100644
--- a/include/linux/platform_data/st21nfca.h
+++ b/include/linux/platform_data/st21nfca.h
@@ -24,7 +24,6 @@
24#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" 24#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci"
25 25
26struct st21nfca_nfc_platform_data { 26struct st21nfca_nfc_platform_data {
27 unsigned int gpio_irq;
28 unsigned int gpio_ena; 27 unsigned int gpio_ena;
29 unsigned int irq_polarity; 28 unsigned int irq_polarity;
30}; 29};
diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h
index 2d11f1f5efab..c3b432f5b63e 100644
--- a/include/linux/platform_data/st21nfcb.h
+++ b/include/linux/platform_data/st21nfcb.h
@@ -24,7 +24,6 @@
24#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" 24#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci"
25 25
26struct st21nfcb_nfc_platform_data { 26struct st21nfcb_nfc_platform_data {
27 unsigned int gpio_irq;
28 unsigned int gpio_reset; 27 unsigned int gpio_reset;
29 unsigned int irq_polarity; 28 unsigned int irq_polarity;
30}; 29};
diff --git a/include/linux/platform_data/tegra_emc.h b/include/linux/platform_data/tegra_emc.h
deleted file mode 100644
index df67505e98f8..000000000000
--- a/include/linux/platform_data/tegra_emc.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (C) 2011 Google, Inc.
3 *
4 * Author:
5 * Colin Cross <ccross@android.com>
6 * Olof Johansson <olof@lixom.net>
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 */
18
19#ifndef __TEGRA_EMC_H_
20#define __TEGRA_EMC_H_
21
22#define TEGRA_EMC_NUM_REGS 46
23
24struct tegra_emc_table {
25 unsigned long rate;
26 u32 regs[TEGRA_EMC_NUM_REGS];
27};
28
29struct tegra_emc_pdata {
30 int num_tables;
31 struct tegra_emc_table *tables;
32};
33
34#endif
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 153d303af7eb..ae4882ca4a64 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -197,8 +197,10 @@ extern void platform_driver_unregister(struct platform_driver *);
197/* non-hotpluggable platform devices may use this so that probe() and 197/* non-hotpluggable platform devices may use this so that probe() and
198 * its support may live in __init sections, conserving runtime memory. 198 * its support may live in __init sections, conserving runtime memory.
199 */ 199 */
200extern int platform_driver_probe(struct platform_driver *driver, 200#define platform_driver_probe(drv, probe) \
201 int (*probe)(struct platform_device *)); 201 __platform_driver_probe(drv, probe, THIS_MODULE)
202extern int __platform_driver_probe(struct platform_driver *driver,
203 int (*probe)(struct platform_device *), struct module *module);
202 204
203static inline void *platform_get_drvdata(const struct platform_device *pdev) 205static inline void *platform_get_drvdata(const struct platform_device *pdev)
204{ 206{
@@ -238,10 +240,12 @@ static void __exit __platform_driver##_exit(void) \
238} \ 240} \
239module_exit(__platform_driver##_exit); 241module_exit(__platform_driver##_exit);
240 242
241extern struct platform_device *platform_create_bundle( 243#define platform_create_bundle(driver, probe, res, n_res, data, size) \
244 __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE)
245extern struct platform_device *__platform_create_bundle(
242 struct platform_driver *driver, int (*probe)(struct platform_device *), 246 struct platform_driver *driver, int (*probe)(struct platform_device *),
243 struct resource *res, unsigned int n_res, 247 struct resource *res, unsigned int n_res,
244 const void *data, size_t size); 248 const void *data, size_t size, struct module *module);
245 249
246/* early platform driver interface */ 250/* early platform driver interface */
247struct early_platform_driver { 251struct early_platform_driver {
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 8b6c970cff6c..97883604a3c5 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -176,7 +176,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
176 * plist_for_each_entry - iterate over list of given type 176 * plist_for_each_entry - iterate over list of given type
177 * @pos: the type * to use as a loop counter 177 * @pos: the type * to use as a loop counter
178 * @head: the head for your list 178 * @head: the head for your list
179 * @mem: the name of the list_struct within the struct 179 * @mem: the name of the list_head within the struct
180 */ 180 */
181#define plist_for_each_entry(pos, head, mem) \ 181#define plist_for_each_entry(pos, head, mem) \
182 list_for_each_entry(pos, &(head)->node_list, mem.node_list) 182 list_for_each_entry(pos, &(head)->node_list, mem.node_list)
@@ -185,7 +185,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
185 * plist_for_each_entry_continue - continue iteration over list of given type 185 * plist_for_each_entry_continue - continue iteration over list of given type
186 * @pos: the type * to use as a loop cursor 186 * @pos: the type * to use as a loop cursor
187 * @head: the head for your list 187 * @head: the head for your list
188 * @m: the name of the list_struct within the struct 188 * @m: the name of the list_head within the struct
189 * 189 *
190 * Continue to iterate over list of given type, continuing after 190 * Continue to iterate over list of given type, continuing after
191 * the current position. 191 * the current position.
@@ -198,7 +198,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
198 * @pos: the type * to use as a loop counter 198 * @pos: the type * to use as a loop counter
199 * @n: another type * to use as temporary storage 199 * @n: another type * to use as temporary storage
200 * @head: the head for your list 200 * @head: the head for your list
201 * @m: the name of the list_struct within the struct 201 * @m: the name of the list_head within the struct
202 * 202 *
203 * Iterate over list of given type, safe against removal of list entry. 203 * Iterate over list of given type, safe against removal of list entry.
204 */ 204 */
@@ -229,7 +229,7 @@ static inline int plist_node_empty(const struct plist_node *node)
229 * plist_first_entry - get the struct for the first entry 229 * plist_first_entry - get the struct for the first entry
230 * @head: the &struct plist_head pointer 230 * @head: the &struct plist_head pointer
231 * @type: the type of the struct this is embedded in 231 * @type: the type of the struct this is embedded in
232 * @member: the name of the list_struct within the struct 232 * @member: the name of the list_head within the struct
233 */ 233 */
234#ifdef CONFIG_DEBUG_PI_LIST 234#ifdef CONFIG_DEBUG_PI_LIST
235# define plist_first_entry(head, type, member) \ 235# define plist_first_entry(head, type, member) \
@@ -246,7 +246,7 @@ static inline int plist_node_empty(const struct plist_node *node)
246 * plist_last_entry - get the struct for the last entry 246 * plist_last_entry - get the struct for the last entry
247 * @head: the &struct plist_head pointer 247 * @head: the &struct plist_head pointer
248 * @type: the type of the struct this is embedded in 248 * @type: the type of the struct this is embedded in
249 * @member: the name of the list_struct within the struct 249 * @member: the name of the list_head within the struct
250 */ 250 */
251#ifdef CONFIG_DEBUG_PI_LIST 251#ifdef CONFIG_DEBUG_PI_LIST
252# define plist_last_entry(head, type, member) \ 252# define plist_last_entry(head, type, member) \
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 72c0fe098a27..8b5976364619 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -342,7 +342,7 @@ struct dev_pm_ops {
342#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) 342#define SET_LATE_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn)
343#endif 343#endif
344 344
345#ifdef CONFIG_PM_RUNTIME 345#ifdef CONFIG_PM
346#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \ 346#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
347 .runtime_suspend = suspend_fn, \ 347 .runtime_suspend = suspend_fn, \
348 .runtime_resume = resume_fn, \ 348 .runtime_resume = resume_fn, \
@@ -351,15 +351,6 @@ struct dev_pm_ops {
351#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) 351#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
352#endif 352#endif
353 353
354#ifdef CONFIG_PM
355#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
356 .runtime_suspend = suspend_fn, \
357 .runtime_resume = resume_fn, \
358 .runtime_idle = idle_fn,
359#else
360#define SET_PM_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
361#endif
362
363/* 354/*
364 * Use this if you want to use the same suspend and resume callbacks for suspend 355 * Use this if you want to use the same suspend and resume callbacks for suspend
365 * to RAM and hibernation. 356 * to RAM and hibernation.
@@ -538,11 +529,7 @@ enum rpm_request {
538}; 529};
539 530
540struct wakeup_source; 531struct wakeup_source;
541 532struct pm_domain_data;
542struct pm_domain_data {
543 struct list_head list_node;
544 struct device *dev;
545};
546 533
547struct pm_subsys_data { 534struct pm_subsys_data {
548 spinlock_t lock; 535 spinlock_t lock;
@@ -576,7 +563,7 @@ struct dev_pm_info {
576#else 563#else
577 unsigned int should_wakeup:1; 564 unsigned int should_wakeup:1;
578#endif 565#endif
579#ifdef CONFIG_PM_RUNTIME 566#ifdef CONFIG_PM
580 struct timer_list suspend_timer; 567 struct timer_list suspend_timer;
581 unsigned long timer_expires; 568 unsigned long timer_expires;
582 struct work_struct work; 569 struct work_struct work;
@@ -619,6 +606,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
619 */ 606 */
620struct dev_pm_domain { 607struct dev_pm_domain {
621 struct dev_pm_ops ops; 608 struct dev_pm_ops ops;
609 void (*detach)(struct device *dev, bool power_off);
622}; 610};
623 611
624/* 612/*
@@ -679,12 +667,16 @@ struct dev_pm_domain {
679extern void device_pm_lock(void); 667extern void device_pm_lock(void);
680extern void dpm_resume_start(pm_message_t state); 668extern void dpm_resume_start(pm_message_t state);
681extern void dpm_resume_end(pm_message_t state); 669extern void dpm_resume_end(pm_message_t state);
670extern void dpm_resume_noirq(pm_message_t state);
671extern void dpm_resume_early(pm_message_t state);
682extern void dpm_resume(pm_message_t state); 672extern void dpm_resume(pm_message_t state);
683extern void dpm_complete(pm_message_t state); 673extern void dpm_complete(pm_message_t state);
684 674
685extern void device_pm_unlock(void); 675extern void device_pm_unlock(void);
686extern int dpm_suspend_end(pm_message_t state); 676extern int dpm_suspend_end(pm_message_t state);
687extern int dpm_suspend_start(pm_message_t state); 677extern int dpm_suspend_start(pm_message_t state);
678extern int dpm_suspend_noirq(pm_message_t state);
679extern int dpm_suspend_late(pm_message_t state);
688extern int dpm_suspend(pm_message_t state); 680extern int dpm_suspend(pm_message_t state);
689extern int dpm_prepare(pm_message_t state); 681extern int dpm_prepare(pm_message_t state);
690 682
diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h
index 8348866e7b05..0b0039634410 100644
--- a/include/linux/pm_clock.h
+++ b/include/linux/pm_clock.h
@@ -18,6 +18,8 @@ struct pm_clk_notifier_block {
18 char *con_ids[]; 18 char *con_ids[];
19}; 19};
20 20
21struct clk;
22
21#ifdef CONFIG_PM_CLK 23#ifdef CONFIG_PM_CLK
22static inline bool pm_clk_no_clocks(struct device *dev) 24static inline bool pm_clk_no_clocks(struct device *dev)
23{ 25{
@@ -29,6 +31,7 @@ extern void pm_clk_init(struct device *dev);
29extern int pm_clk_create(struct device *dev); 31extern int pm_clk_create(struct device *dev);
30extern void pm_clk_destroy(struct device *dev); 32extern void pm_clk_destroy(struct device *dev);
31extern int pm_clk_add(struct device *dev, const char *con_id); 33extern int pm_clk_add(struct device *dev, const char *con_id);
34extern int pm_clk_add_clk(struct device *dev, struct clk *clk);
32extern void pm_clk_remove(struct device *dev, const char *con_id); 35extern void pm_clk_remove(struct device *dev, const char *con_id);
33extern int pm_clk_suspend(struct device *dev); 36extern int pm_clk_suspend(struct device *dev);
34extern int pm_clk_resume(struct device *dev); 37extern int pm_clk_resume(struct device *dev);
@@ -51,6 +54,11 @@ static inline int pm_clk_add(struct device *dev, const char *con_id)
51{ 54{
52 return -EINVAL; 55 return -EINVAL;
53} 56}
57
58static inline int pm_clk_add_clk(struct device *dev, struct clk *clk)
59{
60 return -EINVAL;
61}
54static inline void pm_clk_remove(struct device *dev, const char *con_id) 62static inline void pm_clk_remove(struct device *dev, const char *con_id)
55{ 63{
56} 64}
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index ebc4c76ffb73..a9edab2c787a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -17,6 +17,9 @@
17#include <linux/notifier.h> 17#include <linux/notifier.h>
18#include <linux/cpuidle.h> 18#include <linux/cpuidle.h>
19 19
20/* Defines used for the flags field in the struct generic_pm_domain */
21#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
22
20enum gpd_status { 23enum gpd_status {
21 GPD_STATE_ACTIVE = 0, /* PM domain is active */ 24 GPD_STATE_ACTIVE = 0, /* PM domain is active */
22 GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ 25 GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */
@@ -35,18 +38,10 @@ struct gpd_dev_ops {
35 int (*stop)(struct device *dev); 38 int (*stop)(struct device *dev);
36 int (*save_state)(struct device *dev); 39 int (*save_state)(struct device *dev);
37 int (*restore_state)(struct device *dev); 40 int (*restore_state)(struct device *dev);
38 int (*suspend)(struct device *dev);
39 int (*suspend_late)(struct device *dev);
40 int (*resume_early)(struct device *dev);
41 int (*resume)(struct device *dev);
42 int (*freeze)(struct device *dev);
43 int (*freeze_late)(struct device *dev);
44 int (*thaw_early)(struct device *dev);
45 int (*thaw)(struct device *dev);
46 bool (*active_wakeup)(struct device *dev); 41 bool (*active_wakeup)(struct device *dev);
47}; 42};
48 43
49struct gpd_cpu_data { 44struct gpd_cpuidle_data {
50 unsigned int saved_exit_latency; 45 unsigned int saved_exit_latency;
51 struct cpuidle_state *idle_state; 46 struct cpuidle_state *idle_state;
52}; 47};
@@ -71,7 +66,6 @@ struct generic_pm_domain {
71 unsigned int suspended_count; /* System suspend device counter */ 66 unsigned int suspended_count; /* System suspend device counter */
72 unsigned int prepared_count; /* Suspend counter of prepared devices */ 67 unsigned int prepared_count; /* Suspend counter of prepared devices */
73 bool suspend_power_off; /* Power status before system suspend */ 68 bool suspend_power_off; /* Power status before system suspend */
74 bool dev_irq_safe; /* Device callbacks are IRQ-safe */
75 int (*power_off)(struct generic_pm_domain *domain); 69 int (*power_off)(struct generic_pm_domain *domain);
76 s64 power_off_latency_ns; 70 s64 power_off_latency_ns;
77 int (*power_on)(struct generic_pm_domain *domain); 71 int (*power_on)(struct generic_pm_domain *domain);
@@ -80,8 +74,12 @@ struct generic_pm_domain {
80 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */ 74 s64 max_off_time_ns; /* Maximum allowed "suspended" time. */
81 bool max_off_time_changed; 75 bool max_off_time_changed;
82 bool cached_power_down_ok; 76 bool cached_power_down_ok;
83 struct device_node *of_node; /* Node in device tree */ 77 struct gpd_cpuidle_data *cpuidle_data;
84 struct gpd_cpu_data *cpu_data; 78 int (*attach_dev)(struct generic_pm_domain *domain,
79 struct device *dev);
80 void (*detach_dev)(struct generic_pm_domain *domain,
81 struct device *dev);
82 unsigned int flags; /* Bit field of configs for genpd */
85}; 83};
86 84
87static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) 85static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -106,14 +104,18 @@ struct gpd_timing_data {
106 bool cached_stop_ok; 104 bool cached_stop_ok;
107}; 105};
108 106
107struct pm_domain_data {
108 struct list_head list_node;
109 struct device *dev;
110};
111
109struct generic_pm_domain_data { 112struct generic_pm_domain_data {
110 struct pm_domain_data base; 113 struct pm_domain_data base;
111 struct gpd_dev_ops ops;
112 struct gpd_timing_data td; 114 struct gpd_timing_data td;
113 struct notifier_block nb; 115 struct notifier_block nb;
114 struct mutex lock; 116 struct mutex lock;
115 unsigned int refcount; 117 unsigned int refcount;
116 bool need_restore; 118 int need_restore;
117}; 119};
118 120
119#ifdef CONFIG_PM_GENERIC_DOMAINS 121#ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -127,17 +129,11 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
127 return to_gpd_data(dev->power.subsys_data->domain_data); 129 return to_gpd_data(dev->power.subsys_data->domain_data);
128} 130}
129 131
130extern struct dev_power_governor simple_qos_governor;
131
132extern struct generic_pm_domain *dev_to_genpd(struct device *dev); 132extern struct generic_pm_domain *dev_to_genpd(struct device *dev);
133extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, 133extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
134 struct device *dev, 134 struct device *dev,
135 struct gpd_timing_data *td); 135 struct gpd_timing_data *td);
136 136
137extern int __pm_genpd_of_add_device(struct device_node *genpd_node,
138 struct device *dev,
139 struct gpd_timing_data *td);
140
141extern int __pm_genpd_name_add_device(const char *domain_name, 137extern int __pm_genpd_name_add_device(const char *domain_name,
142 struct device *dev, 138 struct device *dev,
143 struct gpd_timing_data *td); 139 struct gpd_timing_data *td);
@@ -151,10 +147,6 @@ extern int pm_genpd_add_subdomain_names(const char *master_name,
151 const char *subdomain_name); 147 const char *subdomain_name);
152extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, 148extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
153 struct generic_pm_domain *target); 149 struct generic_pm_domain *target);
154extern int pm_genpd_add_callbacks(struct device *dev,
155 struct gpd_dev_ops *ops,
156 struct gpd_timing_data *td);
157extern int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td);
158extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state); 150extern int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state);
159extern int pm_genpd_name_attach_cpuidle(const char *name, int state); 151extern int pm_genpd_name_attach_cpuidle(const char *name, int state);
160extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd); 152extern int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd);
@@ -164,9 +156,9 @@ extern void pm_genpd_init(struct generic_pm_domain *genpd,
164 156
165extern int pm_genpd_poweron(struct generic_pm_domain *genpd); 157extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
166extern int pm_genpd_name_poweron(const char *domain_name); 158extern int pm_genpd_name_poweron(const char *domain_name);
159extern void pm_genpd_poweroff_unused(void);
167 160
168extern bool default_stop_ok(struct device *dev); 161extern struct dev_power_governor simple_qos_governor;
169
170extern struct dev_power_governor pm_domain_always_on_gov; 162extern struct dev_power_governor pm_domain_always_on_gov;
171#else 163#else
172 164
@@ -184,12 +176,6 @@ static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
184{ 176{
185 return -ENOSYS; 177 return -ENOSYS;
186} 178}
187static inline int __pm_genpd_of_add_device(struct device_node *genpd_node,
188 struct device *dev,
189 struct gpd_timing_data *td)
190{
191 return -ENOSYS;
192}
193static inline int __pm_genpd_name_add_device(const char *domain_name, 179static inline int __pm_genpd_name_add_device(const char *domain_name,
194 struct device *dev, 180 struct device *dev,
195 struct gpd_timing_data *td) 181 struct gpd_timing_data *td)
@@ -217,16 +203,6 @@ static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
217{ 203{
218 return -ENOSYS; 204 return -ENOSYS;
219} 205}
220static inline int pm_genpd_add_callbacks(struct device *dev,
221 struct gpd_dev_ops *ops,
222 struct gpd_timing_data *td)
223{
224 return -ENOSYS;
225}
226static inline int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
227{
228 return -ENOSYS;
229}
230static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st) 206static inline int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int st)
231{ 207{
232 return -ENOSYS; 208 return -ENOSYS;
@@ -255,10 +231,7 @@ static inline int pm_genpd_name_poweron(const char *domain_name)
255{ 231{
256 return -ENOSYS; 232 return -ENOSYS;
257} 233}
258static inline bool default_stop_ok(struct device *dev) 234static inline void pm_genpd_poweroff_unused(void) {}
259{
260 return false;
261}
262#define simple_qos_governor NULL 235#define simple_qos_governor NULL
263#define pm_domain_always_on_gov NULL 236#define pm_domain_always_on_gov NULL
264#endif 237#endif
@@ -269,45 +242,89 @@ static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
269 return __pm_genpd_add_device(genpd, dev, NULL); 242 return __pm_genpd_add_device(genpd, dev, NULL);
270} 243}
271 244
272static inline int pm_genpd_of_add_device(struct device_node *genpd_node,
273 struct device *dev)
274{
275 return __pm_genpd_of_add_device(genpd_node, dev, NULL);
276}
277
278static inline int pm_genpd_name_add_device(const char *domain_name, 245static inline int pm_genpd_name_add_device(const char *domain_name,
279 struct device *dev) 246 struct device *dev)
280{ 247{
281 return __pm_genpd_name_add_device(domain_name, dev, NULL); 248 return __pm_genpd_name_add_device(domain_name, dev, NULL);
282} 249}
283 250
284static inline int pm_genpd_remove_callbacks(struct device *dev) 251#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP
252extern void pm_genpd_syscore_poweroff(struct device *dev);
253extern void pm_genpd_syscore_poweron(struct device *dev);
254#else
255static inline void pm_genpd_syscore_poweroff(struct device *dev) {}
256static inline void pm_genpd_syscore_poweron(struct device *dev) {}
257#endif
258
259/* OF PM domain providers */
260struct of_device_id;
261
262struct genpd_onecell_data {
263 struct generic_pm_domain **domains;
264 unsigned int num_domains;
265};
266
267typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
268 void *data);
269
270#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
271int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
272 void *data);
273void of_genpd_del_provider(struct device_node *np);
274struct generic_pm_domain *of_genpd_get_from_provider(
275 struct of_phandle_args *genpdspec);
276
277struct generic_pm_domain *__of_genpd_xlate_simple(
278 struct of_phandle_args *genpdspec,
279 void *data);
280struct generic_pm_domain *__of_genpd_xlate_onecell(
281 struct of_phandle_args *genpdspec,
282 void *data);
283
284int genpd_dev_pm_attach(struct device *dev);
285#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
286static inline int __of_genpd_add_provider(struct device_node *np,
287 genpd_xlate_t xlate, void *data)
285{ 288{
286 return __pm_genpd_remove_callbacks(dev, true); 289 return 0;
287} 290}
291static inline void of_genpd_del_provider(struct device_node *np) {}
288 292
289#ifdef CONFIG_PM_GENERIC_DOMAINS_RUNTIME 293static inline struct generic_pm_domain *of_genpd_get_from_provider(
290extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd); 294 struct of_phandle_args *genpdspec)
291extern void pm_genpd_poweroff_unused(void); 295{
292#else 296 return NULL;
293static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {} 297}
294static inline void pm_genpd_poweroff_unused(void) {}
295#endif
296 298
297#ifdef CONFIG_PM_GENERIC_DOMAINS_SLEEP 299#define __of_genpd_xlate_simple NULL
298extern void pm_genpd_syscore_switch(struct device *dev, bool suspend); 300#define __of_genpd_xlate_onecell NULL
299#else 301
300static inline void pm_genpd_syscore_switch(struct device *dev, bool suspend) {} 302static inline int genpd_dev_pm_attach(struct device *dev)
301#endif 303{
304 return -ENODEV;
305}
306#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
302 307
303static inline void pm_genpd_syscore_poweroff(struct device *dev) 308static inline int of_genpd_add_provider_simple(struct device_node *np,
309 struct generic_pm_domain *genpd)
310{
311 return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd);
312}
313static inline int of_genpd_add_provider_onecell(struct device_node *np,
314 struct genpd_onecell_data *data)
304{ 315{
305 pm_genpd_syscore_switch(dev, true); 316 return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data);
306} 317}
307 318
308static inline void pm_genpd_syscore_poweron(struct device *dev) 319#ifdef CONFIG_PM
320extern int dev_pm_domain_attach(struct device *dev, bool power_on);
321extern void dev_pm_domain_detach(struct device *dev, bool power_off);
322#else
323static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
309{ 324{
310 pm_genpd_syscore_switch(dev, false); 325 return -ENODEV;
311} 326}
327static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
328#endif
312 329
313#endif /* _LINUX_PM_DOMAIN_H */ 330#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 0330217abfad..cec2d4540914 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -21,7 +21,7 @@ struct dev_pm_opp;
21struct device; 21struct device;
22 22
23enum dev_pm_opp_event { 23enum dev_pm_opp_event {
24 OPP_EVENT_ADD, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE, 24 OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
25}; 25};
26 26
27#if defined(CONFIG_PM_OPP) 27#if defined(CONFIG_PM_OPP)
@@ -44,6 +44,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
44 44
45int dev_pm_opp_add(struct device *dev, unsigned long freq, 45int dev_pm_opp_add(struct device *dev, unsigned long freq,
46 unsigned long u_volt); 46 unsigned long u_volt);
47void dev_pm_opp_remove(struct device *dev, unsigned long freq);
47 48
48int dev_pm_opp_enable(struct device *dev, unsigned long freq); 49int dev_pm_opp_enable(struct device *dev, unsigned long freq);
49 50
@@ -90,6 +91,10 @@ static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
90 return -EINVAL; 91 return -EINVAL;
91} 92}
92 93
94static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
95{
96}
97
93static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) 98static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
94{ 99{
95 return 0; 100 return 0;
@@ -109,11 +114,16 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
109 114
110#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) 115#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
111int of_init_opp_table(struct device *dev); 116int of_init_opp_table(struct device *dev);
117void of_free_opp_table(struct device *dev);
112#else 118#else
113static inline int of_init_opp_table(struct device *dev) 119static inline int of_init_opp_table(struct device *dev)
114{ 120{
115 return -EINVAL; 121 return -EINVAL;
116} 122}
123
124static inline void of_free_opp_table(struct device *dev)
125{
126}
117#endif 127#endif
118 128
119#endif /* __LINUX_OPP_H__ */ 129#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 9ab4bf7c4646..7b3ae0cffc05 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -15,6 +15,7 @@ enum {
15 PM_QOS_CPU_DMA_LATENCY, 15 PM_QOS_CPU_DMA_LATENCY,
16 PM_QOS_NETWORK_LATENCY, 16 PM_QOS_NETWORK_LATENCY,
17 PM_QOS_NETWORK_THROUGHPUT, 17 PM_QOS_NETWORK_THROUGHPUT,
18 PM_QOS_MEMORY_BANDWIDTH,
18 19
19 /* insert new class ID */ 20 /* insert new class ID */
20 PM_QOS_NUM_CLASSES, 21 PM_QOS_NUM_CLASSES,
@@ -32,6 +33,7 @@ enum pm_qos_flags_status {
32#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) 33#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
33#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) 34#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
34#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 35#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
36#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
35#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0 37#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
36#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 38#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
37#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) 39#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
@@ -69,7 +71,8 @@ struct dev_pm_qos_request {
69enum pm_qos_type { 71enum pm_qos_type {
70 PM_QOS_UNITIALIZED, 72 PM_QOS_UNITIALIZED,
71 PM_QOS_MAX, /* return the largest value */ 73 PM_QOS_MAX, /* return the largest value */
72 PM_QOS_MIN /* return the smallest value */ 74 PM_QOS_MIN, /* return the smallest value */
75 PM_QOS_SUM /* return the sum */
73}; 76};
74 77
75/* 78/*
@@ -151,6 +154,23 @@ void dev_pm_qos_constraints_destroy(struct device *dev);
151int dev_pm_qos_add_ancestor_request(struct device *dev, 154int dev_pm_qos_add_ancestor_request(struct device *dev,
152 struct dev_pm_qos_request *req, 155 struct dev_pm_qos_request *req,
153 enum dev_pm_qos_req_type type, s32 value); 156 enum dev_pm_qos_req_type type, s32 value);
157int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
158void dev_pm_qos_hide_latency_limit(struct device *dev);
159int dev_pm_qos_expose_flags(struct device *dev, s32 value);
160void dev_pm_qos_hide_flags(struct device *dev);
161int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
162s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
163int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
164
165static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
166{
167 return dev->power.qos->resume_latency_req->data.pnode.prio;
168}
169
170static inline s32 dev_pm_qos_requested_flags(struct device *dev)
171{
172 return dev->power.qos->flags_req->data.flr.flags;
173}
154#else 174#else
155static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, 175static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
156 s32 mask) 176 s32 mask)
@@ -197,27 +217,6 @@ static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
197 enum dev_pm_qos_req_type type, 217 enum dev_pm_qos_req_type type,
198 s32 value) 218 s32 value)
199 { return 0; } 219 { return 0; }
200#endif
201
202#ifdef CONFIG_PM_RUNTIME
203int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
204void dev_pm_qos_hide_latency_limit(struct device *dev);
205int dev_pm_qos_expose_flags(struct device *dev, s32 value);
206void dev_pm_qos_hide_flags(struct device *dev);
207int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
208s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
209int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
210
211static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
212{
213 return dev->power.qos->resume_latency_req->data.pnode.prio;
214}
215
216static inline s32 dev_pm_qos_requested_flags(struct device *dev)
217{
218 return dev->power.qos->flags_req->data.flr.flags;
219}
220#else
221static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value) 220static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
222 { return 0; } 221 { return 0; }
223static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {} 222static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 367f49b9a1c9..30e84d48bfea 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -35,16 +35,6 @@ extern int pm_generic_runtime_suspend(struct device *dev);
35extern int pm_generic_runtime_resume(struct device *dev); 35extern int pm_generic_runtime_resume(struct device *dev);
36extern int pm_runtime_force_suspend(struct device *dev); 36extern int pm_runtime_force_suspend(struct device *dev);
37extern int pm_runtime_force_resume(struct device *dev); 37extern int pm_runtime_force_resume(struct device *dev);
38#else
39static inline bool queue_pm_work(struct work_struct *work) { return false; }
40
41static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
42static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
43static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
44static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
45#endif
46
47#ifdef CONFIG_PM_RUNTIME
48 38
49extern int __pm_runtime_idle(struct device *dev, int rpmflags); 39extern int __pm_runtime_idle(struct device *dev, int rpmflags);
50extern int __pm_runtime_suspend(struct device *dev, int rpmflags); 40extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
@@ -128,7 +118,19 @@ static inline void pm_runtime_mark_last_busy(struct device *dev)
128 ACCESS_ONCE(dev->power.last_busy) = jiffies; 118 ACCESS_ONCE(dev->power.last_busy) = jiffies;
129} 119}
130 120
131#else /* !CONFIG_PM_RUNTIME */ 121static inline bool pm_runtime_is_irq_safe(struct device *dev)
122{
123 return dev->power.irq_safe;
124}
125
126#else /* !CONFIG_PM */
127
128static inline bool queue_pm_work(struct work_struct *work) { return false; }
129
130static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
131static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
132static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
133static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
132 134
133static inline int __pm_runtime_idle(struct device *dev, int rpmflags) 135static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
134{ 136{
@@ -167,6 +169,7 @@ static inline bool pm_runtime_enabled(struct device *dev) { return false; }
167 169
168static inline void pm_runtime_no_callbacks(struct device *dev) {} 170static inline void pm_runtime_no_callbacks(struct device *dev) {}
169static inline void pm_runtime_irq_safe(struct device *dev) {} 171static inline void pm_runtime_irq_safe(struct device *dev) {}
172static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
170 173
171static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } 174static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
172static inline void pm_runtime_mark_last_busy(struct device *dev) {} 175static inline void pm_runtime_mark_last_busy(struct device *dev) {}
@@ -179,7 +182,7 @@ static inline unsigned long pm_runtime_autosuspend_expiration(
179static inline void pm_runtime_set_memalloc_noio(struct device *dev, 182static inline void pm_runtime_set_memalloc_noio(struct device *dev,
180 bool enable){} 183 bool enable){}
181 184
182#endif /* !CONFIG_PM_RUNTIME */ 185#endif /* !CONFIG_PM */
183 186
184static inline int pm_runtime_idle(struct device *dev) 187static inline int pm_runtime_idle(struct device *dev)
185{ 188{
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
index fe25876c1a5d..17d7d0d20eca 100644
--- a/include/linux/pnfs_osd_xdr.h
+++ b/include/linux/pnfs_osd_xdr.h
@@ -5,7 +5,7 @@
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Benny Halevy <bhalevy@panasas.com> 7 * Benny Halevy <bhalevy@panasas.com>
8 * Boaz Harrosh <bharrosh@panasas.com> 8 * Boaz Harrosh <ooo@electrozaur.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 11 * it under the terms of the GNU General Public License version 2
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index 07e7945a1ff2..e97fc656a058 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -253,9 +253,6 @@ struct charger_manager {
253 struct device *dev; 253 struct device *dev;
254 struct charger_desc *desc; 254 struct charger_desc *desc;
255 255
256 struct power_supply *fuel_gauge;
257 struct power_supply **charger_stat;
258
259#ifdef CONFIG_THERMAL 256#ifdef CONFIG_THERMAL
260 struct thermal_zone_device *tzd_batt; 257 struct thermal_zone_device *tzd_batt;
261#endif 258#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index f3dea41dbcd2..096dbced02ac 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -18,8 +18,6 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/notifier.h> 19#include <linux/notifier.h>
20 20
21struct device;
22
23/* 21/*
24 * All voltages, currents, charges, energies, time and temperatures in uV, 22 * All voltages, currents, charges, energies, time and temperatures in uV,
25 * µA, µAh, µWh, seconds and tenths of degree Celsius unless otherwise 23 * µA, µAh, µWh, seconds and tenths of degree Celsius unless otherwise
@@ -102,9 +100,11 @@ enum power_supply_property {
102 POWER_SUPPLY_PROP_VOLTAGE_NOW, 100 POWER_SUPPLY_PROP_VOLTAGE_NOW,
103 POWER_SUPPLY_PROP_VOLTAGE_AVG, 101 POWER_SUPPLY_PROP_VOLTAGE_AVG,
104 POWER_SUPPLY_PROP_VOLTAGE_OCV, 102 POWER_SUPPLY_PROP_VOLTAGE_OCV,
103 POWER_SUPPLY_PROP_VOLTAGE_BOOT,
105 POWER_SUPPLY_PROP_CURRENT_MAX, 104 POWER_SUPPLY_PROP_CURRENT_MAX,
106 POWER_SUPPLY_PROP_CURRENT_NOW, 105 POWER_SUPPLY_PROP_CURRENT_NOW,
107 POWER_SUPPLY_PROP_CURRENT_AVG, 106 POWER_SUPPLY_PROP_CURRENT_AVG,
107 POWER_SUPPLY_PROP_CURRENT_BOOT,
108 POWER_SUPPLY_PROP_POWER_NOW, 108 POWER_SUPPLY_PROP_POWER_NOW,
109 POWER_SUPPLY_PROP_POWER_AVG, 109 POWER_SUPPLY_PROP_POWER_AVG,
110 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 110 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
@@ -146,6 +146,7 @@ enum power_supply_property {
146 POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */ 146 POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */
147 POWER_SUPPLY_PROP_SCOPE, 147 POWER_SUPPLY_PROP_SCOPE,
148 POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, 148 POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
149 POWER_SUPPLY_PROP_CALIBRATE,
149 /* Properties of type `const char *' */ 150 /* Properties of type `const char *' */
150 POWER_SUPPLY_PROP_MODEL_NAME, 151 POWER_SUPPLY_PROP_MODEL_NAME,
151 POWER_SUPPLY_PROP_MANUFACTURER, 152 POWER_SUPPLY_PROP_MANUFACTURER,
@@ -172,6 +173,7 @@ union power_supply_propval {
172 const char *strval; 173 const char *strval;
173}; 174};
174 175
176struct device;
175struct device_node; 177struct device_node;
176 178
177struct power_supply { 179struct power_supply {
@@ -198,6 +200,12 @@ struct power_supply {
198 void (*external_power_changed)(struct power_supply *psy); 200 void (*external_power_changed)(struct power_supply *psy);
199 void (*set_charged)(struct power_supply *psy); 201 void (*set_charged)(struct power_supply *psy);
200 202
203 /*
204 * Set if thermal zone should not be created for this power supply.
205 * For example for virtual supplies forwarding calls to actual
206 * sensors or other supplies.
207 */
208 bool no_thermal;
201 /* For APM emulation, think legacy userspace. */ 209 /* For APM emulation, think legacy userspace. */
202 int use_for_apm; 210 int use_for_apm;
203 211
@@ -291,6 +299,7 @@ static inline bool power_supply_is_amp_property(enum power_supply_property psp)
291 case POWER_SUPPLY_PROP_CURRENT_MAX: 299 case POWER_SUPPLY_PROP_CURRENT_MAX:
292 case POWER_SUPPLY_PROP_CURRENT_NOW: 300 case POWER_SUPPLY_PROP_CURRENT_NOW:
293 case POWER_SUPPLY_PROP_CURRENT_AVG: 301 case POWER_SUPPLY_PROP_CURRENT_AVG:
302 case POWER_SUPPLY_PROP_CURRENT_BOOT:
294 return 1; 303 return 1;
295 default: 304 default:
296 break; 305 break;
@@ -315,6 +324,7 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
315 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 324 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
316 case POWER_SUPPLY_PROP_VOLTAGE_AVG: 325 case POWER_SUPPLY_PROP_VOLTAGE_AVG:
317 case POWER_SUPPLY_PROP_VOLTAGE_OCV: 326 case POWER_SUPPLY_PROP_VOLTAGE_OCV:
327 case POWER_SUPPLY_PROP_VOLTAGE_BOOT:
318 case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: 328 case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
319 case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: 329 case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
320 case POWER_SUPPLY_PROP_POWER_NOW: 330 case POWER_SUPPLY_PROP_POWER_NOW:
diff --git a/include/linux/printk.h b/include/linux/printk.h
index d78125f73ac4..c8f170324e64 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -118,12 +118,13 @@ int no_printk(const char *fmt, ...)
118#ifdef CONFIG_EARLY_PRINTK 118#ifdef CONFIG_EARLY_PRINTK
119extern asmlinkage __printf(1, 2) 119extern asmlinkage __printf(1, 2)
120void early_printk(const char *fmt, ...); 120void early_printk(const char *fmt, ...);
121void early_vprintk(const char *fmt, va_list ap);
122#else 121#else
123static inline __printf(1, 2) __cold 122static inline __printf(1, 2) __cold
124void early_printk(const char *s, ...) { } 123void early_printk(const char *s, ...) { }
125#endif 124#endif
126 125
126typedef int(*printk_func_t)(const char *fmt, va_list args);
127
127#ifdef CONFIG_PRINTK 128#ifdef CONFIG_PRINTK
128asmlinkage __printf(5, 0) 129asmlinkage __printf(5, 0)
129int vprintk_emit(int facility, int level, 130int vprintk_emit(int facility, int level,
diff --git a/include/linux/prio_heap.h b/include/linux/prio_heap.h
deleted file mode 100644
index 08094350f26a..000000000000
--- a/include/linux/prio_heap.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _LINUX_PRIO_HEAP_H
2#define _LINUX_PRIO_HEAP_H
3
4/*
5 * Simple insertion-only static-sized priority heap containing
6 * pointers, based on CLR, chapter 7
7 */
8
9#include <linux/gfp.h>
10
11/**
12 * struct ptr_heap - simple static-sized priority heap
13 * @ptrs - pointer to data area
14 * @max - max number of elements that can be stored in @ptrs
15 * @size - current number of valid elements in @ptrs (in the range 0..@size-1
16 * @gt: comparison operator, which should implement "greater than"
17 */
18struct ptr_heap {
19 void **ptrs;
20 int max;
21 int size;
22 int (*gt)(void *, void *);
23};
24
25/**
26 * heap_init - initialize an empty heap with a given memory size
27 * @heap: the heap structure to be initialized
28 * @size: amount of memory to use in bytes
29 * @gfp_mask: mask to pass to kmalloc()
30 * @gt: comparison operator, which should implement "greater than"
31 */
32extern int heap_init(struct ptr_heap *heap, size_t size, gfp_t gfp_mask,
33 int (*gt)(void *, void *));
34
35/**
36 * heap_free - release a heap's storage
37 * @heap: the heap structure whose data should be released
38 */
39void heap_free(struct ptr_heap *heap);
40
41/**
42 * heap_insert - insert a value into the heap and return any overflowed value
43 * @heap: the heap to be operated on
44 * @p: the pointer to be inserted
45 *
46 * Attempts to insert the given value into the priority heap. If the
47 * heap is full prior to the insertion, then the resulting heap will
48 * consist of the smallest @max elements of the original heap and the
49 * new element; the greatest element will be removed from the heap and
50 * returned. Note that the returned element will be the new element
51 * (i.e. no change to the heap) if the new element is greater than all
52 * elements currently in the heap.
53 */
54extern void *heap_insert(struct ptr_heap *heap, void *p);
55
56
57
58#endif /* _LINUX_PRIO_HEAP_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 9d117f61d976..b97bf2ef996e 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -74,6 +74,8 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p
74 74
75#endif /* CONFIG_PROC_FS */ 75#endif /* CONFIG_PROC_FS */
76 76
77struct net;
78
77static inline struct proc_dir_entry *proc_net_mkdir( 79static inline struct proc_dir_entry *proc_net_mkdir(
78 struct net *net, const char *name, struct proc_dir_entry *parent) 80 struct net *net, const char *name, struct proc_dir_entry *parent)
79{ 81{
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 34a1e105bef4..42dfc615dbf8 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -4,21 +4,18 @@
4#ifndef _LINUX_PROC_NS_H 4#ifndef _LINUX_PROC_NS_H
5#define _LINUX_PROC_NS_H 5#define _LINUX_PROC_NS_H
6 6
7#include <linux/ns_common.h>
8
7struct pid_namespace; 9struct pid_namespace;
8struct nsproxy; 10struct nsproxy;
11struct path;
9 12
10struct proc_ns_operations { 13struct proc_ns_operations {
11 const char *name; 14 const char *name;
12 int type; 15 int type;
13 void *(*get)(struct task_struct *task); 16 struct ns_common *(*get)(struct task_struct *task);
14 void (*put)(void *ns); 17 void (*put)(struct ns_common *ns);
15 int (*install)(struct nsproxy *nsproxy, void *ns); 18 int (*install)(struct nsproxy *nsproxy, struct ns_common *ns);
16 unsigned int (*inum)(void *ns);
17};
18
19struct proc_ns {
20 void *ns;
21 const struct proc_ns_operations *ns_ops;
22}; 19};
23 20
24extern const struct proc_ns_operations netns_operations; 21extern const struct proc_ns_operations netns_operations;
@@ -43,32 +40,38 @@ enum {
43 40
44extern int pid_ns_prepare_proc(struct pid_namespace *ns); 41extern int pid_ns_prepare_proc(struct pid_namespace *ns);
45extern void pid_ns_release_proc(struct pid_namespace *ns); 42extern void pid_ns_release_proc(struct pid_namespace *ns);
46extern struct file *proc_ns_fget(int fd);
47extern struct proc_ns *get_proc_ns(struct inode *);
48extern int proc_alloc_inum(unsigned int *pino); 43extern int proc_alloc_inum(unsigned int *pino);
49extern void proc_free_inum(unsigned int inum); 44extern void proc_free_inum(unsigned int inum);
50extern bool proc_ns_inode(struct inode *inode);
51 45
52#else /* CONFIG_PROC_FS */ 46#else /* CONFIG_PROC_FS */
53 47
54static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; } 48static inline int pid_ns_prepare_proc(struct pid_namespace *ns) { return 0; }
55static inline void pid_ns_release_proc(struct pid_namespace *ns) {} 49static inline void pid_ns_release_proc(struct pid_namespace *ns) {}
56 50
57static inline struct file *proc_ns_fget(int fd)
58{
59 return ERR_PTR(-EINVAL);
60}
61
62static inline struct proc_ns *get_proc_ns(struct inode *inode) { return NULL; }
63
64static inline int proc_alloc_inum(unsigned int *inum) 51static inline int proc_alloc_inum(unsigned int *inum)
65{ 52{
66 *inum = 1; 53 *inum = 1;
67 return 0; 54 return 0;
68} 55}
69static inline void proc_free_inum(unsigned int inum) {} 56static inline void proc_free_inum(unsigned int inum) {}
70static inline bool proc_ns_inode(struct inode *inode) { return false; }
71 57
72#endif /* CONFIG_PROC_FS */ 58#endif /* CONFIG_PROC_FS */
73 59
60static inline int ns_alloc_inum(struct ns_common *ns)
61{
62 atomic_long_set(&ns->stashed, 0);
63 return proc_alloc_inum(&ns->inum);
64}
65
66#define ns_free_inum(ns) proc_free_inum((ns)->inum)
67
68extern struct file *proc_ns_fget(int fd);
69#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
70extern void *ns_get_path(struct path *path, struct task_struct *task,
71 const struct proc_ns_operations *ns_ops);
72
73extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
74 const struct proc_ns_operations *ns_ops);
75extern void nsfs_init(void);
76
74#endif /* _LINUX_PROC_NS_H */ 77#endif /* _LINUX_PROC_NS_H */
diff --git a/include/linux/property.h b/include/linux/property.h
new file mode 100644
index 000000000000..a6a3d98bd7e9
--- /dev/null
+++ b/include/linux/property.h
@@ -0,0 +1,143 @@
1/*
2 * property.h - Unified device property interface.
3 *
4 * Copyright (C) 2014, Intel Corporation
5 * Authors: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 * Mika Westerberg <mika.westerberg@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef _LINUX_PROPERTY_H_
14#define _LINUX_PROPERTY_H_
15
16#include <linux/types.h>
17
18struct device;
19
20enum dev_prop_type {
21 DEV_PROP_U8,
22 DEV_PROP_U16,
23 DEV_PROP_U32,
24 DEV_PROP_U64,
25 DEV_PROP_STRING,
26 DEV_PROP_MAX,
27};
28
29bool device_property_present(struct device *dev, const char *propname);
30int device_property_read_u8_array(struct device *dev, const char *propname,
31 u8 *val, size_t nval);
32int device_property_read_u16_array(struct device *dev, const char *propname,
33 u16 *val, size_t nval);
34int device_property_read_u32_array(struct device *dev, const char *propname,
35 u32 *val, size_t nval);
36int device_property_read_u64_array(struct device *dev, const char *propname,
37 u64 *val, size_t nval);
38int device_property_read_string_array(struct device *dev, const char *propname,
39 const char **val, size_t nval);
40int device_property_read_string(struct device *dev, const char *propname,
41 const char **val);
42
43enum fwnode_type {
44 FWNODE_INVALID = 0,
45 FWNODE_OF,
46 FWNODE_ACPI,
47};
48
49struct fwnode_handle {
50 enum fwnode_type type;
51};
52
53bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname);
54int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
55 const char *propname, u8 *val,
56 size_t nval);
57int fwnode_property_read_u16_array(struct fwnode_handle *fwnode,
58 const char *propname, u16 *val,
59 size_t nval);
60int fwnode_property_read_u32_array(struct fwnode_handle *fwnode,
61 const char *propname, u32 *val,
62 size_t nval);
63int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
64 const char *propname, u64 *val,
65 size_t nval);
66int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
67 const char *propname, const char **val,
68 size_t nval);
69int fwnode_property_read_string(struct fwnode_handle *fwnode,
70 const char *propname, const char **val);
71
72struct fwnode_handle *device_get_next_child_node(struct device *dev,
73 struct fwnode_handle *child);
74
75#define device_for_each_child_node(dev, child) \
76 for (child = device_get_next_child_node(dev, NULL); child; \
77 child = device_get_next_child_node(dev, child))
78
79void fwnode_handle_put(struct fwnode_handle *fwnode);
80
81unsigned int device_get_child_node_count(struct device *dev);
82
83static inline bool device_property_read_bool(struct device *dev,
84 const char *propname)
85{
86 return device_property_present(dev, propname);
87}
88
89static inline int device_property_read_u8(struct device *dev,
90 const char *propname, u8 *val)
91{
92 return device_property_read_u8_array(dev, propname, val, 1);
93}
94
95static inline int device_property_read_u16(struct device *dev,
96 const char *propname, u16 *val)
97{
98 return device_property_read_u16_array(dev, propname, val, 1);
99}
100
101static inline int device_property_read_u32(struct device *dev,
102 const char *propname, u32 *val)
103{
104 return device_property_read_u32_array(dev, propname, val, 1);
105}
106
107static inline int device_property_read_u64(struct device *dev,
108 const char *propname, u64 *val)
109{
110 return device_property_read_u64_array(dev, propname, val, 1);
111}
112
113static inline bool fwnode_property_read_bool(struct fwnode_handle *fwnode,
114 const char *propname)
115{
116 return fwnode_property_present(fwnode, propname);
117}
118
119static inline int fwnode_property_read_u8(struct fwnode_handle *fwnode,
120 const char *propname, u8 *val)
121{
122 return fwnode_property_read_u8_array(fwnode, propname, val, 1);
123}
124
125static inline int fwnode_property_read_u16(struct fwnode_handle *fwnode,
126 const char *propname, u16 *val)
127{
128 return fwnode_property_read_u16_array(fwnode, propname, val, 1);
129}
130
131static inline int fwnode_property_read_u32(struct fwnode_handle *fwnode,
132 const char *propname, u32 *val)
133{
134 return fwnode_property_read_u32_array(fwnode, propname, val, 1);
135}
136
137static inline int fwnode_property_read_u64(struct fwnode_handle *fwnode,
138 const char *propname, u64 *val)
139{
140 return fwnode_property_read_u64_array(fwnode, propname, val, 1);
141}
142
143#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
index 26a8a4ed9b07..00e8e8fa7358 100644
--- a/include/linux/proportions.h
+++ b/include/linux/proportions.h
@@ -12,6 +12,7 @@
12#include <linux/percpu_counter.h> 12#include <linux/percpu_counter.h>
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/gfp.h>
15 16
16struct prop_global { 17struct prop_global {
17 /* 18 /*
@@ -40,7 +41,7 @@ struct prop_descriptor {
40 struct mutex mutex; /* serialize the prop_global switch */ 41 struct mutex mutex; /* serialize the prop_global switch */
41}; 42};
42 43
43int prop_descriptor_init(struct prop_descriptor *pd, int shift); 44int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
44void prop_change_shift(struct prop_descriptor *pd, int new_shift); 45void prop_change_shift(struct prop_descriptor *pd, int new_shift);
45 46
46/* 47/*
@@ -61,7 +62,7 @@ struct prop_local_percpu {
61 raw_spinlock_t lock; /* protect the snapshot state */ 62 raw_spinlock_t lock; /* protect the snapshot state */
62}; 63};
63 64
64int prop_local_init_percpu(struct prop_local_percpu *pl); 65int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
65void prop_local_destroy_percpu(struct prop_local_percpu *pl); 66void prop_local_destroy_percpu(struct prop_local_percpu *pl);
66void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); 67void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
67void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, 68void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9974975d40db..4af3fdc85b01 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -53,7 +53,8 @@ struct persistent_ram_zone {
53}; 53};
54 54
55struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, 55struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
56 u32 sig, struct persistent_ram_ecc_info *ecc_info); 56 u32 sig, struct persistent_ram_ecc_info *ecc_info,
57 unsigned int memtype);
57void persistent_ram_free(struct persistent_ram_zone *prz); 58void persistent_ram_free(struct persistent_ram_zone *prz);
58void persistent_ram_zap(struct persistent_ram_zone *prz); 59void persistent_ram_zap(struct persistent_ram_zone *prz);
59 60
@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
76struct ramoops_platform_data { 77struct ramoops_platform_data {
77 unsigned long mem_size; 78 unsigned long mem_size;
78 unsigned long mem_address; 79 unsigned long mem_address;
80 unsigned int mem_type;
79 unsigned long record_size; 81 unsigned long record_size;
80 unsigned long console_size; 82 unsigned long console_size;
81 unsigned long ftrace_size; 83 unsigned long ftrace_size;
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index cc79eff4a1ad..987a73a40ef8 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -52,7 +52,7 @@ extern void ptrace_notify(int exit_code);
52extern void __ptrace_link(struct task_struct *child, 52extern void __ptrace_link(struct task_struct *child,
53 struct task_struct *new_parent); 53 struct task_struct *new_parent);
54extern void __ptrace_unlink(struct task_struct *child); 54extern void __ptrace_unlink(struct task_struct *child);
55extern void exit_ptrace(struct task_struct *tracer); 55extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
56#define PTRACE_MODE_READ 0x01 56#define PTRACE_MODE_READ 0x01
57#define PTRACE_MODE_ATTACH 0x02 57#define PTRACE_MODE_ATTACH 0x02
58#define PTRACE_MODE_NOAUDIT 0x04 58#define PTRACE_MODE_NOAUDIT 0x04
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h
index 18d75e795606..e1ab6e86cdb3 100644
--- a/include/linux/pxa168_eth.h
+++ b/include/linux/pxa168_eth.h
@@ -4,6 +4,8 @@
4#ifndef __LINUX_PXA168_ETH_H 4#ifndef __LINUX_PXA168_ETH_H
5#define __LINUX_PXA168_ETH_H 5#define __LINUX_PXA168_ETH_H
6 6
7#include <linux/phy.h>
8
7struct pxa168_eth_platform_data { 9struct pxa168_eth_platform_data {
8 int port_number; 10 int port_number;
9 int phy_addr; 11 int phy_addr;
@@ -13,6 +15,7 @@ struct pxa168_eth_platform_data {
13 */ 15 */
14 int speed; /* 0, SPEED_10, SPEED_100 */ 16 int speed; /* 0, SPEED_10, SPEED_100 */
15 int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ 17 int duplex; /* DUPLEX_HALF or DUPLEX_FULL */
18 phy_interface_t intf;
16 19
17 /* 20 /*
18 * Override default RX/TX queue sizes if nonzero. 21 * Override default RX/TX queue sizes if nonzero.
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index f2b405116166..77aed9ea1d26 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -108,6 +108,25 @@
108#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */ 108#define SSCR1_RxTresh(x) (((x) - 1) << 10) /* level [1..4] */
109#endif 109#endif
110 110
111/* QUARK_X1000 SSCR0 bit definition */
112#define QUARK_X1000_SSCR0_DSS (0x1F) /* Data Size Select (mask) */
113#define QUARK_X1000_SSCR0_DataSize(x) ((x) - 1) /* Data Size Select [4..32] */
114#define QUARK_X1000_SSCR0_FRF (0x3 << 5) /* FRame Format (mask) */
115#define QUARK_X1000_SSCR0_Motorola (0x0 << 5) /* Motorola's Serial Peripheral Interface (SPI) */
116
117#define RX_THRESH_QUARK_X1000_DFLT 1
118#define TX_THRESH_QUARK_X1000_DFLT 16
119
120#define QUARK_X1000_SSSR_TFL_MASK (0x1F << 8) /* Transmit FIFO Level mask */
121#define QUARK_X1000_SSSR_RFL_MASK (0x1F << 13) /* Receive FIFO Level mask */
122
123#define QUARK_X1000_SSCR1_TFT (0x1F << 6) /* Transmit FIFO Threshold (mask) */
124#define QUARK_X1000_SSCR1_TxTresh(x) (((x) - 1) << 6) /* level [1..32] */
125#define QUARK_X1000_SSCR1_RFT (0x1F << 11) /* Receive FIFO Threshold (mask) */
126#define QUARK_X1000_SSCR1_RxTresh(x) (((x) - 1) << 11) /* level [1..32] */
127#define QUARK_X1000_SSCR1_STRF (1 << 17) /* Select FIFO or EFWR */
128#define QUARK_X1000_SSCR1_EFWR (1 << 16) /* Enable FIFO Write/Read */
129
111/* extra bits in PXA255, PXA26x and PXA27x SSP ports */ 130/* extra bits in PXA255, PXA26x and PXA27x SSP ports */
112#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */ 131#define SSCR0_TISSP (1 << 4) /* TI Sync Serial Protocol */
113#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */ 132#define SSCR0_PSP (3 << 4) /* PSP - Programmable Serial Protocol */
@@ -175,6 +194,7 @@ enum pxa_ssp_type {
175 PXA910_SSP, 194 PXA910_SSP,
176 CE4100_SSP, 195 CE4100_SSP,
177 LPSS_SSP, 196 LPSS_SSP,
197 QUARK_X1000_SSP,
178}; 198};
179 199
180struct ssp_device { 200struct ssp_device {
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 80d345a3524c..50978b781a19 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -56,6 +56,11 @@ enum quota_type {
56 PRJQUOTA = 2, /* element used for project quotas */ 56 PRJQUOTA = 2, /* element used for project quotas */
57}; 57};
58 58
59/* Masks for quota types when used as a bitmask */
60#define QTYPE_MASK_USR (1 << USRQUOTA)
61#define QTYPE_MASK_GRP (1 << GRPQUOTA)
62#define QTYPE_MASK_PRJ (1 << PRJQUOTA)
63
59typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */ 64typedef __kernel_uid32_t qid_t; /* Type in which we store ids in memory */
60typedef long long qsize_t; /* Type in which we store sizes */ 65typedef long long qsize_t; /* Type in which we store sizes */
61 66
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 1d3eee594cd6..f23538a6e411 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -64,10 +64,10 @@ void dquot_destroy(struct dquot *dquot);
64int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags); 64int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags);
65void __dquot_free_space(struct inode *inode, qsize_t number, int flags); 65void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
66 66
67int dquot_alloc_inode(const struct inode *inode); 67int dquot_alloc_inode(struct inode *inode);
68 68
69int dquot_claim_space_nodirty(struct inode *inode, qsize_t number); 69int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
70void dquot_free_inode(const struct inode *inode); 70void dquot_free_inode(struct inode *inode);
71void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number); 71void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
72 72
73int dquot_disable(struct super_block *sb, int type, unsigned int flags); 73int dquot_disable(struct super_block *sb, int type, unsigned int flags);
@@ -213,12 +213,12 @@ static inline void dquot_drop(struct inode *inode)
213{ 213{
214} 214}
215 215
216static inline int dquot_alloc_inode(const struct inode *inode) 216static inline int dquot_alloc_inode(struct inode *inode)
217{ 217{
218 return 0; 218 return 0;
219} 219}
220 220
221static inline void dquot_free_inode(const struct inode *inode) 221static inline void dquot_free_inode(struct inode *inode)
222{ 222{
223} 223}
224 224
diff --git a/include/linux/random.h b/include/linux/random.h
index 57fbbffd77a0..b05856e16b75 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -26,7 +26,7 @@ unsigned int get_random_int(void);
26unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len); 26unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
27 27
28u32 prandom_u32(void); 28u32 prandom_u32(void);
29void prandom_bytes(void *buf, int nbytes); 29void prandom_bytes(void *buf, size_t nbytes);
30void prandom_seed(u32 seed); 30void prandom_seed(u32 seed);
31void prandom_reseed_late(void); 31void prandom_reseed_late(void);
32 32
@@ -35,7 +35,7 @@ struct rnd_state {
35}; 35};
36 36
37u32 prandom_u32_state(struct rnd_state *state); 37u32 prandom_u32_state(struct rnd_state *state);
38void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes); 38void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
39 39
40/** 40/**
41 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro) 41 * prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 0a260d8a18bf..18102529254e 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -17,14 +17,20 @@ struct ratelimit_state {
17 unsigned long begin; 17 unsigned long begin;
18}; 18};
19 19
20#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \ 20#define RATELIMIT_STATE_INIT(name, interval_init, burst_init) { \
21 \
22 struct ratelimit_state name = { \
23 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ 21 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
24 .interval = interval_init, \ 22 .interval = interval_init, \
25 .burst = burst_init, \ 23 .burst = burst_init, \
26 } 24 }
27 25
26#define RATELIMIT_STATE_INIT_DISABLED \
27 RATELIMIT_STATE_INIT(ratelimit_state, 0, DEFAULT_RATELIMIT_BURST)
28
29#define DEFINE_RATELIMIT_STATE(name, interval_init, burst_init) \
30 \
31 struct ratelimit_state name = \
32 RATELIMIT_STATE_INIT(name, interval_init, burst_init) \
33
28static inline void ratelimit_state_init(struct ratelimit_state *rs, 34static inline void ratelimit_state_init(struct ratelimit_state *rs,
29 int interval, int burst) 35 int interval, int burst)
30{ 36{
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index fea49b5da12a..378c5ee75f78 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -43,6 +43,16 @@ struct rb_augment_callbacks {
43 43
44extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, 44extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
45 void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); 45 void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
46/*
47 * Fixup the rbtree and update the augmented information when rebalancing.
48 *
49 * On insertion, the user must update the augmented information on the path
50 * leading to the inserted node, then call rb_link_node() as usual and
51 * rb_augment_inserted() instead of the usual rb_insert_color() call.
52 * If rb_augment_inserted() rebalances the rbtree, it will callback into
53 * a user provided function to update the augmented information on the
54 * affected subtrees.
55 */
46static inline void 56static inline void
47rb_insert_augmented(struct rb_node *node, struct rb_root *root, 57rb_insert_augmented(struct rb_node *node, struct rb_root *root,
48 const struct rb_augment_callbacks *augment) 58 const struct rb_augment_callbacks *augment)
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 372ad5e0dcb8..529bc946f450 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -241,7 +241,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
241 * list_entry_rcu - get the struct for this entry 241 * list_entry_rcu - get the struct for this entry
242 * @ptr: the &struct list_head pointer. 242 * @ptr: the &struct list_head pointer.
243 * @type: the type of the struct this is embedded in. 243 * @type: the type of the struct this is embedded in.
244 * @member: the name of the list_struct within the struct. 244 * @member: the name of the list_head within the struct.
245 * 245 *
246 * This primitive may safely run concurrently with the _rcu list-mutation 246 * This primitive may safely run concurrently with the _rcu list-mutation
247 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 247 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
@@ -278,7 +278,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
278 * list_first_or_null_rcu - get the first element from a list 278 * list_first_or_null_rcu - get the first element from a list
279 * @ptr: the list head to take the element from. 279 * @ptr: the list head to take the element from.
280 * @type: the type of the struct this is embedded in. 280 * @type: the type of the struct this is embedded in.
281 * @member: the name of the list_struct within the struct. 281 * @member: the name of the list_head within the struct.
282 * 282 *
283 * Note that if the list is empty, it returns NULL. 283 * Note that if the list is empty, it returns NULL.
284 * 284 *
@@ -296,7 +296,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
296 * list_for_each_entry_rcu - iterate over rcu list of given type 296 * list_for_each_entry_rcu - iterate over rcu list of given type
297 * @pos: the type * to use as a loop cursor. 297 * @pos: the type * to use as a loop cursor.
298 * @head: the head for your list. 298 * @head: the head for your list.
299 * @member: the name of the list_struct within the struct. 299 * @member: the name of the list_head within the struct.
300 * 300 *
301 * This list-traversal primitive may safely run concurrently with 301 * This list-traversal primitive may safely run concurrently with
302 * the _rcu list-mutation primitives such as list_add_rcu() 302 * the _rcu list-mutation primitives such as list_add_rcu()
@@ -311,7 +311,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
311 * list_for_each_entry_continue_rcu - continue iteration over list of given type 311 * list_for_each_entry_continue_rcu - continue iteration over list of given type
312 * @pos: the type * to use as a loop cursor. 312 * @pos: the type * to use as a loop cursor.
313 * @head: the head for your list. 313 * @head: the head for your list.
314 * @member: the name of the list_struct within the struct. 314 * @member: the name of the list_head within the struct.
315 * 315 *
316 * Continue to iterate over list of given type, continuing after 316 * Continue to iterate over list of given type, continuing after
317 * the current position. 317 * the current position.
@@ -542,6 +542,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
542 pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\ 542 pos = hlist_entry_safe(rcu_dereference_bh((pos)->member.next),\
543 typeof(*(pos)), member)) 543 typeof(*(pos)), member))
544 544
545/**
546 * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
547 * @pos: the type * to use as a loop cursor.
548 * @member: the name of the hlist_node within the struct.
549 */
550#define hlist_for_each_entry_from_rcu(pos, member) \
551 for (; pos; \
552 pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
553 typeof(*(pos)), member))
545 554
546#endif /* __KERNEL__ */ 555#endif /* __KERNEL__ */
547#endif 556#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index d231aa17b1d7..ed4f5939a452 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -47,19 +47,17 @@
47#include <asm/barrier.h> 47#include <asm/barrier.h>
48 48
49extern int rcu_expedited; /* for sysctl */ 49extern int rcu_expedited; /* for sysctl */
50#ifdef CONFIG_RCU_TORTURE_TEST
51extern int rcutorture_runnable; /* for sysctl */
52#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
53 50
54enum rcutorture_type { 51enum rcutorture_type {
55 RCU_FLAVOR, 52 RCU_FLAVOR,
56 RCU_BH_FLAVOR, 53 RCU_BH_FLAVOR,
57 RCU_SCHED_FLAVOR, 54 RCU_SCHED_FLAVOR,
55 RCU_TASKS_FLAVOR,
58 SRCU_FLAVOR, 56 SRCU_FLAVOR,
59 INVALID_RCU_FLAVOR 57 INVALID_RCU_FLAVOR
60}; 58};
61 59
62#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 60#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
63void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, 61void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
64 unsigned long *gpnum, unsigned long *completed); 62 unsigned long *gpnum, unsigned long *completed);
65void rcutorture_record_test_transition(void); 63void rcutorture_record_test_transition(void);
@@ -197,6 +195,28 @@ void call_rcu_sched(struct rcu_head *head,
197 195
198void synchronize_sched(void); 196void synchronize_sched(void);
199 197
198/**
199 * call_rcu_tasks() - Queue an RCU for invocation task-based grace period
200 * @head: structure to be used for queueing the RCU updates.
201 * @func: actual callback function to be invoked after the grace period
202 *
203 * The callback function will be invoked some time after a full grace
204 * period elapses, in other words after all currently executing RCU
205 * read-side critical sections have completed. call_rcu_tasks() assumes
206 * that the read-side critical sections end at a voluntary context
207 * switch (not a preemption!), entry into idle, or transition to usermode
208 * execution. As such, there are no read-side primitives analogous to
209 * rcu_read_lock() and rcu_read_unlock() because this primitive is intended
210 * to determine that all tasks have passed through a safe state, not so
211 * much for data-strcuture synchronization.
212 *
213 * See the description of call_rcu() for more detailed information on
214 * memory ordering guarantees.
215 */
216void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
217void synchronize_rcu_tasks(void);
218void rcu_barrier_tasks(void);
219
200#ifdef CONFIG_PREEMPT_RCU 220#ifdef CONFIG_PREEMPT_RCU
201 221
202void __rcu_read_lock(void); 222void __rcu_read_lock(void);
@@ -238,9 +258,9 @@ static inline int rcu_preempt_depth(void)
238 258
239/* Internal to kernel */ 259/* Internal to kernel */
240void rcu_init(void); 260void rcu_init(void);
241void rcu_sched_qs(int cpu); 261void rcu_sched_qs(void);
242void rcu_bh_qs(int cpu); 262void rcu_bh_qs(void);
243void rcu_check_callbacks(int cpu, int user); 263void rcu_check_callbacks(int user);
244struct notifier_block; 264struct notifier_block;
245void rcu_idle_enter(void); 265void rcu_idle_enter(void);
246void rcu_idle_exit(void); 266void rcu_idle_exit(void);
@@ -269,6 +289,14 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
269 struct task_struct *next) { } 289 struct task_struct *next) { }
270#endif /* CONFIG_RCU_USER_QS */ 290#endif /* CONFIG_RCU_USER_QS */
271 291
292#ifdef CONFIG_RCU_NOCB_CPU
293void rcu_init_nohz(void);
294#else /* #ifdef CONFIG_RCU_NOCB_CPU */
295static inline void rcu_init_nohz(void)
296{
297}
298#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
299
272/** 300/**
273 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers 301 * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
274 * @a: Code that RCU needs to pay attention to. 302 * @a: Code that RCU needs to pay attention to.
@@ -294,6 +322,36 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
294 rcu_irq_exit(); \ 322 rcu_irq_exit(); \
295 } while (0) 323 } while (0)
296 324
325/*
326 * Note a voluntary context switch for RCU-tasks benefit. This is a
327 * macro rather than an inline function to avoid #include hell.
328 */
329#ifdef CONFIG_TASKS_RCU
330#define TASKS_RCU(x) x
331extern struct srcu_struct tasks_rcu_exit_srcu;
332#define rcu_note_voluntary_context_switch(t) \
333 do { \
334 if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
335 ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
336 } while (0)
337#else /* #ifdef CONFIG_TASKS_RCU */
338#define TASKS_RCU(x) do { } while (0)
339#define rcu_note_voluntary_context_switch(t) do { } while (0)
340#endif /* #else #ifdef CONFIG_TASKS_RCU */
341
342/**
343 * cond_resched_rcu_qs - Report potential quiescent states to RCU
344 *
345 * This macro resembles cond_resched(), except that it is defined to
346 * report potential quiescent states to RCU-tasks even if the cond_resched()
347 * machinery were to be shut off, as some advocate for PREEMPT kernels.
348 */
349#define cond_resched_rcu_qs() \
350do { \
351 if (!cond_resched()) \
352 rcu_note_voluntary_context_switch(current); \
353} while (0)
354
297#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) 355#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
298bool __rcu_is_watching(void); 356bool __rcu_is_watching(void);
299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 357#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
@@ -307,7 +365,7 @@ typedef void call_rcu_func_t(struct rcu_head *head,
307 void (*func)(struct rcu_head *head)); 365 void (*func)(struct rcu_head *head));
308void wait_rcu_gp(call_rcu_func_t crf); 366void wait_rcu_gp(call_rcu_func_t crf);
309 367
310#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) 368#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
311#include <linux/rcutree.h> 369#include <linux/rcutree.h>
312#elif defined(CONFIG_TINY_RCU) 370#elif defined(CONFIG_TINY_RCU)
313#include <linux/rcutiny.h> 371#include <linux/rcutiny.h>
@@ -349,7 +407,7 @@ bool rcu_lockdep_current_cpu_online(void);
349#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 407#else /* #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
350static inline bool rcu_lockdep_current_cpu_online(void) 408static inline bool rcu_lockdep_current_cpu_online(void)
351{ 409{
352 return 1; 410 return true;
353} 411}
354#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */ 412#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PROVE_RCU) */
355 413
@@ -371,41 +429,7 @@ extern struct lockdep_map rcu_sched_lock_map;
371extern struct lockdep_map rcu_callback_map; 429extern struct lockdep_map rcu_callback_map;
372int debug_lockdep_rcu_enabled(void); 430int debug_lockdep_rcu_enabled(void);
373 431
374/** 432int rcu_read_lock_held(void);
375 * rcu_read_lock_held() - might we be in RCU read-side critical section?
376 *
377 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
378 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
379 * this assumes we are in an RCU read-side critical section unless it can
380 * prove otherwise. This is useful for debug checks in functions that
381 * require that they be called within an RCU read-side critical section.
382 *
383 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
384 * and while lockdep is disabled.
385 *
386 * Note that rcu_read_lock() and the matching rcu_read_unlock() must
387 * occur in the same context, for example, it is illegal to invoke
388 * rcu_read_unlock() in process context if the matching rcu_read_lock()
389 * was invoked from within an irq handler.
390 *
391 * Note that rcu_read_lock() is disallowed if the CPU is either idle or
392 * offline from an RCU perspective, so check for those as well.
393 */
394static inline int rcu_read_lock_held(void)
395{
396 if (!debug_lockdep_rcu_enabled())
397 return 1;
398 if (!rcu_is_watching())
399 return 0;
400 if (!rcu_lockdep_current_cpu_online())
401 return 0;
402 return lock_is_held(&rcu_lock_map);
403}
404
405/*
406 * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
407 * hell.
408 */
409int rcu_read_lock_bh_held(void); 433int rcu_read_lock_bh_held(void);
410 434
411/** 435/**
@@ -593,6 +617,21 @@ static inline void rcu_preempt_sleep_check(void)
593#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) 617#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
594 618
595/** 619/**
620 * lockless_dereference() - safely load a pointer for later dereference
621 * @p: The pointer to load
622 *
623 * Similar to rcu_dereference(), but for situations where the pointed-to
624 * object's lifetime is managed by something other than RCU. That
625 * "something other" might be reference counting or simple immortality.
626 */
627#define lockless_dereference(p) \
628({ \
629 typeof(p) _________p1 = ACCESS_ONCE(p); \
630 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
631 (_________p1); \
632})
633
634/**
596 * rcu_assign_pointer() - assign to RCU-protected pointer 635 * rcu_assign_pointer() - assign to RCU-protected pointer
597 * @p: pointer to assign to 636 * @p: pointer to assign to
598 * @v: value to assign (publish) 637 * @v: value to assign (publish)
@@ -828,7 +867,7 @@ static inline void rcu_preempt_sleep_check(void)
828 * 867 *
829 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), 868 * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
830 * it is illegal to block while in an RCU read-side critical section. 869 * it is illegal to block while in an RCU read-side critical section.
831 * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT 870 * In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPT
832 * kernel builds, RCU read-side critical sections may be preempted, 871 * kernel builds, RCU read-side critical sections may be preempted,
833 * but explicit blocking is illegal. Finally, in preemptible RCU 872 * but explicit blocking is illegal. Finally, in preemptible RCU
834 * implementations in real-time (with -rt patchset) kernel builds, RCU 873 * implementations in real-time (with -rt patchset) kernel builds, RCU
@@ -863,7 +902,9 @@ static inline void rcu_read_lock(void)
863 * Unfortunately, this function acquires the scheduler's runqueue and 902 * Unfortunately, this function acquires the scheduler's runqueue and
864 * priority-inheritance spinlocks. This means that deadlock could result 903 * priority-inheritance spinlocks. This means that deadlock could result
865 * if the caller of rcu_read_unlock() already holds one of these locks or 904 * if the caller of rcu_read_unlock() already holds one of these locks or
866 * any lock that is ever acquired while holding them. 905 * any lock that is ever acquired while holding them; or any lock which
906 * can be taken from interrupt context because rcu_boost()->rt_mutex_lock()
907 * does not disable irqs while taking ->wait_lock.
867 * 908 *
868 * That said, RCU readers are never priority boosted unless they were 909 * That said, RCU readers are never priority boosted unless they were
869 * preempted. Therefore, one way to avoid deadlock is to make sure 910 * preempted. Therefore, one way to avoid deadlock is to make sure
@@ -1023,6 +1064,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
1023 */ 1064 */
1024#define RCU_INIT_POINTER(p, v) \ 1065#define RCU_INIT_POINTER(p, v) \
1025 do { \ 1066 do { \
1067 rcu_dereference_sparse(p, __rcu); \
1026 p = RCU_INITIALIZER(v); \ 1068 p = RCU_INITIALIZER(v); \
1027 } while (0) 1069 } while (0)
1028 1070
@@ -1079,7 +1121,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
1079 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) 1121 __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
1080 1122
1081#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) 1123#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
1082static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 1124static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
1083{ 1125{
1084 *delta_jiffies = ULONG_MAX; 1126 *delta_jiffies = ULONG_MAX;
1085 return 0; 1127 return 0;
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index d40a6a451330..0e5366200154 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -78,9 +78,9 @@ static inline void kfree_call_rcu(struct rcu_head *head,
78 call_rcu(head, func); 78 call_rcu(head, func);
79} 79}
80 80
81static inline void rcu_note_context_switch(int cpu) 81static inline void rcu_note_context_switch(void)
82{ 82{
83 rcu_sched_qs(cpu); 83 rcu_sched_qs();
84} 84}
85 85
86/* 86/*
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 3e2f5d432743..52953790dcca 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,9 +30,9 @@
30#ifndef __LINUX_RCUTREE_H 30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H 31#define __LINUX_RCUTREE_H
32 32
33void rcu_note_context_switch(int cpu); 33void rcu_note_context_switch(void);
34#ifndef CONFIG_RCU_NOCB_CPU_ALL 34#ifndef CONFIG_RCU_NOCB_CPU_ALL
35int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); 35int rcu_needs_cpu(unsigned long *delta_jiffies);
36#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ 36#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
37void rcu_cpu_stall_reset(void); 37void rcu_cpu_stall_reset(void);
38 38
@@ -43,7 +43,7 @@ void rcu_cpu_stall_reset(void);
43 */ 43 */
44static inline void rcu_virt_note_context_switch(int cpu) 44static inline void rcu_virt_note_context_switch(int cpu)
45{ 45{
46 rcu_note_context_switch(cpu); 46 rcu_note_context_switch();
47} 47}
48 48
49void synchronize_rcu_bh(void); 49void synchronize_rcu_bh(void);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 48bf152761c7..67fc8fcdc4b0 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -38,6 +38,9 @@ extern int reboot_force;
38extern int register_reboot_notifier(struct notifier_block *); 38extern int register_reboot_notifier(struct notifier_block *);
39extern int unregister_reboot_notifier(struct notifier_block *); 39extern int unregister_reboot_notifier(struct notifier_block *);
40 40
41extern int register_restart_handler(struct notifier_block *);
42extern int unregister_restart_handler(struct notifier_block *);
43extern void do_kernel_restart(char *cmd);
41 44
42/* 45/*
43 * Architecture-specific implementations of sys_reboot commands. 46 * Architecture-specific implementations of sys_reboot commands.
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index c5ed83f49c4e..4419b99d8d6e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -27,6 +27,7 @@ struct spmi_device;
27struct regmap; 27struct regmap;
28struct regmap_range_cfg; 28struct regmap_range_cfg;
29struct regmap_field; 29struct regmap_field;
30struct snd_ac97;
30 31
31/* An enum of all the supported cache types */ 32/* An enum of all the supported cache types */
32enum regcache_type { 33enum regcache_type {
@@ -340,6 +341,8 @@ struct regmap *regmap_init_spmi_ext(struct spmi_device *dev,
340struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, 341struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
341 void __iomem *regs, 342 void __iomem *regs,
342 const struct regmap_config *config); 343 const struct regmap_config *config);
344struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
345 const struct regmap_config *config);
343 346
344struct regmap *devm_regmap_init(struct device *dev, 347struct regmap *devm_regmap_init(struct device *dev,
345 const struct regmap_bus *bus, 348 const struct regmap_bus *bus,
@@ -356,6 +359,10 @@ struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev,
356struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, 359struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
357 void __iomem *regs, 360 void __iomem *regs,
358 const struct regmap_config *config); 361 const struct regmap_config *config);
362struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
363 const struct regmap_config *config);
364
365bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
359 366
360/** 367/**
361 * regmap_init_mmio(): Initialise register map 368 * regmap_init_mmio(): Initialise register map
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index f8a8733068a7..d17e1ff7ad01 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -35,6 +35,8 @@
35#ifndef __LINUX_REGULATOR_CONSUMER_H_ 35#ifndef __LINUX_REGULATOR_CONSUMER_H_
36#define __LINUX_REGULATOR_CONSUMER_H_ 36#define __LINUX_REGULATOR_CONSUMER_H_
37 37
38#include <linux/err.h>
39
38struct device; 40struct device;
39struct notifier_block; 41struct notifier_block;
40struct regmap; 42struct regmap;
@@ -93,7 +95,14 @@ struct regmap;
93 * OVER_TEMP Regulator over temp. 95 * OVER_TEMP Regulator over temp.
94 * FORCE_DISABLE Regulator forcibly shut down by software. 96 * FORCE_DISABLE Regulator forcibly shut down by software.
95 * VOLTAGE_CHANGE Regulator voltage changed. 97 * VOLTAGE_CHANGE Regulator voltage changed.
98 * Data passed is old voltage cast to (void *).
96 * DISABLE Regulator was disabled. 99 * DISABLE Regulator was disabled.
100 * PRE_VOLTAGE_CHANGE Regulator is about to have voltage changed.
101 * Data passed is "struct pre_voltage_change_data"
102 * ABORT_VOLTAGE_CHANGE Regulator voltage change failed for some reason.
103 * Data passed is old voltage cast to (void *).
104 * PRE_DISABLE Regulator is about to be disabled
105 * ABORT_DISABLE Regulator disable failed for some reason
97 * 106 *
98 * NOTE: These events can be OR'ed together when passed into handler. 107 * NOTE: These events can be OR'ed together when passed into handler.
99 */ 108 */
@@ -106,6 +115,23 @@ struct regmap;
106#define REGULATOR_EVENT_FORCE_DISABLE 0x20 115#define REGULATOR_EVENT_FORCE_DISABLE 0x20
107#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 116#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
108#define REGULATOR_EVENT_DISABLE 0x80 117#define REGULATOR_EVENT_DISABLE 0x80
118#define REGULATOR_EVENT_PRE_VOLTAGE_CHANGE 0x100
119#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
120#define REGULATOR_EVENT_PRE_DISABLE 0x400
121#define REGULATOR_EVENT_ABORT_DISABLE 0x800
122
123/**
124 * struct pre_voltage_change_data - Data sent with PRE_VOLTAGE_CHANGE event
125 *
126 * @old_uV: Current voltage before change.
127 * @min_uV: Min voltage we'll change to.
128 * @max_uV: Max voltage we'll change to.
129 */
130struct pre_voltage_change_data {
131 unsigned long old_uV;
132 unsigned long min_uV;
133 unsigned long max_uV;
134};
109 135
110struct regulator; 136struct regulator;
111 137
@@ -262,7 +288,7 @@ devm_regulator_get(struct device *dev, const char *id)
262static inline struct regulator *__must_check 288static inline struct regulator *__must_check
263regulator_get_exclusive(struct device *dev, const char *id) 289regulator_get_exclusive(struct device *dev, const char *id)
264{ 290{
265 return NULL; 291 return ERR_PTR(-ENODEV);
266} 292}
267 293
268static inline struct regulator *__must_check 294static inline struct regulator *__must_check
diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h
index 0981ce0e72cc..5479394fefce 100644
--- a/include/linux/regulator/da9211.h
+++ b/include/linux/regulator/da9211.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * da9211.h - Regulator device driver for DA9211 2 * da9211.h - Regulator device driver for DA9211/DA9213
3 * Copyright (C) 2014 Dialog Semiconductor Ltd. 3 * Copyright (C) 2014 Dialog Semiconductor Ltd.
4 * 4 *
5 * This library is free software; you can redistribute it and/or 5 * This library is free software; you can redistribute it and/or
@@ -20,6 +20,11 @@
20 20
21#define DA9211_MAX_REGULATORS 2 21#define DA9211_MAX_REGULATORS 2
22 22
23enum da9211_chip_id {
24 DA9211,
25 DA9213,
26};
27
23struct da9211_pdata { 28struct da9211_pdata {
24 /* 29 /*
25 * Number of buck 30 * Number of buck
@@ -27,6 +32,6 @@ struct da9211_pdata {
27 * 2 : 2 phase 2 buck 32 * 2 : 2 phase 2 buck
28 */ 33 */
29 int num_buck; 34 int num_buck;
30 struct regulator_init_data *init_data; 35 struct regulator_init_data *init_data[DA9211_MAX_REGULATORS];
31}; 36};
32#endif 37#endif
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 4efa1ed8a2b0..5f1e9ca47417 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -203,6 +203,8 @@ enum regulator_type {
203 * 203 *
204 * @name: Identifying name for the regulator. 204 * @name: Identifying name for the regulator.
205 * @supply_name: Identifying the regulator supply 205 * @supply_name: Identifying the regulator supply
206 * @of_match: Name used to identify regulator in DT.
207 * @regulators_node: Name of node containing regulator definitions in DT.
206 * @id: Numerical identifier for the regulator. 208 * @id: Numerical identifier for the regulator.
207 * @ops: Regulator operations table. 209 * @ops: Regulator operations table.
208 * @irq: Interrupt number for the regulator. 210 * @irq: Interrupt number for the regulator.
@@ -240,14 +242,19 @@ enum regulator_type {
240 * @bypass_val_off: Disabling value for control when using regmap set_bypass 242 * @bypass_val_off: Disabling value for control when using regmap set_bypass
241 * 243 *
242 * @enable_time: Time taken for initial enable of regulator (in uS). 244 * @enable_time: Time taken for initial enable of regulator (in uS).
245 * @off_on_delay: guard time (in uS), before re-enabling a regulator
246 *
247 * @of_map_mode: Maps a hardware mode defined in a DeviceTree to a standard mode
243 */ 248 */
244struct regulator_desc { 249struct regulator_desc {
245 const char *name; 250 const char *name;
246 const char *supply_name; 251 const char *supply_name;
252 const char *of_match;
253 const char *regulators_node;
247 int id; 254 int id;
248 bool continuous_voltage_range; 255 bool continuous_voltage_range;
249 unsigned n_voltages; 256 unsigned n_voltages;
250 struct regulator_ops *ops; 257 const struct regulator_ops *ops;
251 int irq; 258 int irq;
252 enum regulator_type type; 259 enum regulator_type type;
253 struct module *owner; 260 struct module *owner;
@@ -278,6 +285,10 @@ struct regulator_desc {
278 unsigned int bypass_val_off; 285 unsigned int bypass_val_off;
279 286
280 unsigned int enable_time; 287 unsigned int enable_time;
288
289 unsigned int off_on_delay;
290
291 unsigned int (*of_map_mode)(unsigned int mode);
281}; 292};
282 293
283/** 294/**
@@ -294,6 +305,9 @@ struct regulator_desc {
294 * NULL). 305 * NULL).
295 * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is 306 * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is
296 * insufficient. 307 * insufficient.
308 * @ena_gpio_initialized: GPIO controlling regulator enable was properly
309 * initialized, meaning that >= 0 is a valid gpio
310 * identifier and < 0 is a non existent gpio.
297 * @ena_gpio: GPIO controlling regulator enable. 311 * @ena_gpio: GPIO controlling regulator enable.
298 * @ena_gpio_invert: Sense for GPIO enable control. 312 * @ena_gpio_invert: Sense for GPIO enable control.
299 * @ena_gpio_flags: Flags to use when calling gpio_request_one() 313 * @ena_gpio_flags: Flags to use when calling gpio_request_one()
@@ -305,6 +319,7 @@ struct regulator_config {
305 struct device_node *of_node; 319 struct device_node *of_node;
306 struct regmap *regmap; 320 struct regmap *regmap;
307 321
322 bool ena_gpio_initialized;
308 int ena_gpio; 323 int ena_gpio;
309 unsigned int ena_gpio_invert:1; 324 unsigned int ena_gpio_invert:1;
310 unsigned int ena_gpio_flags; 325 unsigned int ena_gpio_flags;
@@ -350,6 +365,9 @@ struct regulator_dev {
350 365
351 struct regulator_enable_gpio *ena_pin; 366 struct regulator_enable_gpio *ena_pin;
352 unsigned int ena_gpio_state:1; 367 unsigned int ena_gpio_state:1;
368
369 /* time when this regulator was disabled last time */
370 unsigned long last_off_jiffy;
353}; 371};
354 372
355struct regulator_dev * 373struct regulator_dev *
diff --git a/include/linux/regulator/max1586.h b/include/linux/regulator/max1586.h
index de9a7fae20be..cedd0febe882 100644
--- a/include/linux/regulator/max1586.h
+++ b/include/linux/regulator/max1586.h
@@ -40,7 +40,7 @@
40 */ 40 */
41struct max1586_subdev_data { 41struct max1586_subdev_data {
42 int id; 42 int id;
43 char *name; 43 const char *name;
44 struct regulator_init_data *platform_data; 44 struct regulator_init_data *platform_data;
45}; 45};
46 46
diff --git a/include/linux/regulator/of_regulator.h b/include/linux/regulator/of_regulator.h
index f9217965aaa3..763953f7e3b8 100644
--- a/include/linux/regulator/of_regulator.h
+++ b/include/linux/regulator/of_regulator.h
@@ -6,24 +6,29 @@
6#ifndef __LINUX_OF_REG_H 6#ifndef __LINUX_OF_REG_H
7#define __LINUX_OF_REG_H 7#define __LINUX_OF_REG_H
8 8
9struct regulator_desc;
10
9struct of_regulator_match { 11struct of_regulator_match {
10 const char *name; 12 const char *name;
11 void *driver_data; 13 void *driver_data;
12 struct regulator_init_data *init_data; 14 struct regulator_init_data *init_data;
13 struct device_node *of_node; 15 struct device_node *of_node;
16 const struct regulator_desc *desc;
14}; 17};
15 18
16#if defined(CONFIG_OF) 19#if defined(CONFIG_OF)
17extern struct regulator_init_data 20extern struct regulator_init_data
18 *of_get_regulator_init_data(struct device *dev, 21 *of_get_regulator_init_data(struct device *dev,
19 struct device_node *node); 22 struct device_node *node,
23 const struct regulator_desc *desc);
20extern int of_regulator_match(struct device *dev, struct device_node *node, 24extern int of_regulator_match(struct device *dev, struct device_node *node,
21 struct of_regulator_match *matches, 25 struct of_regulator_match *matches,
22 unsigned int num_matches); 26 unsigned int num_matches);
23#else 27#else
24static inline struct regulator_init_data 28static inline struct regulator_init_data
25 *of_get_regulator_init_data(struct device *dev, 29 *of_get_regulator_init_data(struct device *dev,
26 struct device_node *node) 30 struct device_node *node,
31 const struct regulator_desc *desc)
27{ 32{
28 return NULL; 33 return NULL;
29} 34}
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
deleted file mode 100644
index 56b7bc32db4f..000000000000
--- a/include/linux/res_counter.h
+++ /dev/null
@@ -1,223 +0,0 @@
1#ifndef __RES_COUNTER_H__
2#define __RES_COUNTER_H__
3
4/*
5 * Resource Counters
6 * Contain common data types and routines for resource accounting
7 *
8 * Copyright 2007 OpenVZ SWsoft Inc
9 *
10 * Author: Pavel Emelianov <xemul@openvz.org>
11 *
12 * See Documentation/cgroups/resource_counter.txt for more
13 * info about what this counter is.
14 */
15
16#include <linux/spinlock.h>
17#include <linux/errno.h>
18
19/*
20 * The core object. the cgroup that wishes to account for some
21 * resource may include this counter into its structures and use
22 * the helpers described beyond
23 */
24
25struct res_counter {
26 /*
27 * the current resource consumption level
28 */
29 unsigned long long usage;
30 /*
31 * the maximal value of the usage from the counter creation
32 */
33 unsigned long long max_usage;
34 /*
35 * the limit that usage cannot exceed
36 */
37 unsigned long long limit;
38 /*
39 * the limit that usage can be exceed
40 */
41 unsigned long long soft_limit;
42 /*
43 * the number of unsuccessful attempts to consume the resource
44 */
45 unsigned long long failcnt;
46 /*
47 * the lock to protect all of the above.
48 * the routines below consider this to be IRQ-safe
49 */
50 spinlock_t lock;
51 /*
52 * Parent counter, used for hierarchial resource accounting
53 */
54 struct res_counter *parent;
55};
56
57#define RES_COUNTER_MAX ULLONG_MAX
58
59/**
60 * Helpers to interact with userspace
61 * res_counter_read_u64() - returns the value of the specified member.
62 * res_counter_read/_write - put/get the specified fields from the
63 * res_counter struct to/from the user
64 *
65 * @counter: the counter in question
66 * @member: the field to work with (see RES_xxx below)
67 * @buf: the buffer to opeate on,...
68 * @nbytes: its size...
69 * @pos: and the offset.
70 */
71
72u64 res_counter_read_u64(struct res_counter *counter, int member);
73
74ssize_t res_counter_read(struct res_counter *counter, int member,
75 const char __user *buf, size_t nbytes, loff_t *pos,
76 int (*read_strategy)(unsigned long long val, char *s));
77
78int res_counter_memparse_write_strategy(const char *buf,
79 unsigned long long *res);
80
81/*
82 * the field descriptors. one for each member of res_counter
83 */
84
85enum {
86 RES_USAGE,
87 RES_MAX_USAGE,
88 RES_LIMIT,
89 RES_FAILCNT,
90 RES_SOFT_LIMIT,
91};
92
93/*
94 * helpers for accounting
95 */
96
97void res_counter_init(struct res_counter *counter, struct res_counter *parent);
98
99/*
100 * charge - try to consume more resource.
101 *
102 * @counter: the counter
103 * @val: the amount of the resource. each controller defines its own
104 * units, e.g. numbers, bytes, Kbytes, etc
105 *
106 * returns 0 on success and <0 if the counter->usage will exceed the
107 * counter->limit
108 *
109 * charge_nofail works the same, except that it charges the resource
110 * counter unconditionally, and returns < 0 if the after the current
111 * charge we are over limit.
112 */
113
114int __must_check res_counter_charge(struct res_counter *counter,
115 unsigned long val, struct res_counter **limit_fail_at);
116int res_counter_charge_nofail(struct res_counter *counter,
117 unsigned long val, struct res_counter **limit_fail_at);
118
119/*
120 * uncharge - tell that some portion of the resource is released
121 *
122 * @counter: the counter
123 * @val: the amount of the resource
124 *
125 * these calls check for usage underflow and show a warning on the console
126 *
127 * returns the total charges still present in @counter.
128 */
129
130u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
131
132u64 res_counter_uncharge_until(struct res_counter *counter,
133 struct res_counter *top,
134 unsigned long val);
135/**
136 * res_counter_margin - calculate chargeable space of a counter
137 * @cnt: the counter
138 *
139 * Returns the difference between the hard limit and the current usage
140 * of resource counter @cnt.
141 */
142static inline unsigned long long res_counter_margin(struct res_counter *cnt)
143{
144 unsigned long long margin;
145 unsigned long flags;
146
147 spin_lock_irqsave(&cnt->lock, flags);
148 if (cnt->limit > cnt->usage)
149 margin = cnt->limit - cnt->usage;
150 else
151 margin = 0;
152 spin_unlock_irqrestore(&cnt->lock, flags);
153 return margin;
154}
155
156/**
157 * Get the difference between the usage and the soft limit
158 * @cnt: The counter
159 *
160 * Returns 0 if usage is less than or equal to soft limit
161 * The difference between usage and soft limit, otherwise.
162 */
163static inline unsigned long long
164res_counter_soft_limit_excess(struct res_counter *cnt)
165{
166 unsigned long long excess;
167 unsigned long flags;
168
169 spin_lock_irqsave(&cnt->lock, flags);
170 if (cnt->usage <= cnt->soft_limit)
171 excess = 0;
172 else
173 excess = cnt->usage - cnt->soft_limit;
174 spin_unlock_irqrestore(&cnt->lock, flags);
175 return excess;
176}
177
178static inline void res_counter_reset_max(struct res_counter *cnt)
179{
180 unsigned long flags;
181
182 spin_lock_irqsave(&cnt->lock, flags);
183 cnt->max_usage = cnt->usage;
184 spin_unlock_irqrestore(&cnt->lock, flags);
185}
186
187static inline void res_counter_reset_failcnt(struct res_counter *cnt)
188{
189 unsigned long flags;
190
191 spin_lock_irqsave(&cnt->lock, flags);
192 cnt->failcnt = 0;
193 spin_unlock_irqrestore(&cnt->lock, flags);
194}
195
196static inline int res_counter_set_limit(struct res_counter *cnt,
197 unsigned long long limit)
198{
199 unsigned long flags;
200 int ret = -EBUSY;
201
202 spin_lock_irqsave(&cnt->lock, flags);
203 if (cnt->usage <= limit) {
204 cnt->limit = limit;
205 ret = 0;
206 }
207 spin_unlock_irqrestore(&cnt->lock, flags);
208 return ret;
209}
210
211static inline int
212res_counter_set_soft_limit(struct res_counter *cnt,
213 unsigned long long soft_limit)
214{
215 unsigned long flags;
216
217 spin_lock_irqsave(&cnt->lock, flags);
218 cnt->soft_limit = soft_limit;
219 spin_unlock_irqrestore(&cnt->lock, flags);
220 return 0;
221}
222
223#endif
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index 41a4695fde08..ce6b962ffed4 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -12,11 +12,13 @@ struct reset_controller_dev;
12 * things to reset the device 12 * things to reset the device
13 * @assert: manually assert the reset line, if supported 13 * @assert: manually assert the reset line, if supported
14 * @deassert: manually deassert the reset line, if supported 14 * @deassert: manually deassert the reset line, if supported
15 * @status: return the status of the reset line, if supported
15 */ 16 */
16struct reset_control_ops { 17struct reset_control_ops {
17 int (*reset)(struct reset_controller_dev *rcdev, unsigned long id); 18 int (*reset)(struct reset_controller_dev *rcdev, unsigned long id);
18 int (*assert)(struct reset_controller_dev *rcdev, unsigned long id); 19 int (*assert)(struct reset_controller_dev *rcdev, unsigned long id);
19 int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id); 20 int (*deassert)(struct reset_controller_dev *rcdev, unsigned long id);
21 int (*status)(struct reset_controller_dev *rcdev, unsigned long id);
20}; 22};
21 23
22struct module; 24struct module;
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 349f150ae12c..da5602bd77d7 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -10,6 +10,7 @@ struct reset_control;
10int reset_control_reset(struct reset_control *rstc); 10int reset_control_reset(struct reset_control *rstc);
11int reset_control_assert(struct reset_control *rstc); 11int reset_control_assert(struct reset_control *rstc);
12int reset_control_deassert(struct reset_control *rstc); 12int reset_control_deassert(struct reset_control *rstc);
13int reset_control_status(struct reset_control *rstc);
13 14
14struct reset_control *reset_control_get(struct device *dev, const char *id); 15struct reset_control *reset_control_get(struct device *dev, const char *id);
15void reset_control_put(struct reset_control *rstc); 16void reset_control_put(struct reset_control *rstc);
@@ -57,6 +58,12 @@ static inline int reset_control_deassert(struct reset_control *rstc)
57 return 0; 58 return 0;
58} 59}
59 60
61static inline int reset_control_status(struct reset_control *rstc)
62{
63 WARN_ON(1);
64 return 0;
65}
66
60static inline void reset_control_put(struct reset_control *rstc) 67static inline void reset_control_put(struct reset_control *rstc)
61{ 68{
62 WARN_ON(1); 69 WARN_ON(1);
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 36826c0166c5..b93fd89b2e5e 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -44,6 +44,7 @@ struct rhashtable;
44 * @head_offset: Offset of rhash_head in struct to be hashed 44 * @head_offset: Offset of rhash_head in struct to be hashed
45 * @hash_rnd: Seed to use while hashing 45 * @hash_rnd: Seed to use while hashing
46 * @max_shift: Maximum number of shifts while expanding 46 * @max_shift: Maximum number of shifts while expanding
47 * @min_shift: Minimum number of shifts while shrinking
47 * @hashfn: Function to hash key 48 * @hashfn: Function to hash key
48 * @obj_hashfn: Function to hash object 49 * @obj_hashfn: Function to hash object
49 * @grow_decision: If defined, may return true if table should expand 50 * @grow_decision: If defined, may return true if table should expand
@@ -57,13 +58,17 @@ struct rhashtable_params {
57 size_t head_offset; 58 size_t head_offset;
58 u32 hash_rnd; 59 u32 hash_rnd;
59 size_t max_shift; 60 size_t max_shift;
61 size_t min_shift;
60 rht_hashfn_t hashfn; 62 rht_hashfn_t hashfn;
61 rht_obj_hashfn_t obj_hashfn; 63 rht_obj_hashfn_t obj_hashfn;
62 bool (*grow_decision)(const struct rhashtable *ht, 64 bool (*grow_decision)(const struct rhashtable *ht,
63 size_t new_size); 65 size_t new_size);
64 bool (*shrink_decision)(const struct rhashtable *ht, 66 bool (*shrink_decision)(const struct rhashtable *ht,
65 size_t new_size); 67 size_t new_size);
66 int (*mutex_is_held)(void); 68#ifdef CONFIG_PROVE_LOCKING
69 int (*mutex_is_held)(void *parent);
70 void *parent;
71#endif
67}; 72};
68 73
69/** 74/**
@@ -94,16 +99,16 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
94u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); 99u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
95u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); 100u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);
96 101
97void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); 102void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
98bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); 103bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
99void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, 104void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
100 struct rhash_head __rcu **pprev, gfp_t flags); 105 struct rhash_head __rcu **pprev);
101 106
102bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); 107bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
103bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); 108bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
104 109
105int rhashtable_expand(struct rhashtable *ht, gfp_t flags); 110int rhashtable_expand(struct rhashtable *ht);
106int rhashtable_shrink(struct rhashtable *ht, gfp_t flags); 111int rhashtable_shrink(struct rhashtable *ht);
107 112
108void *rhashtable_lookup(const struct rhashtable *ht, const void *key); 113void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
109void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, 114void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 49a4d6f59108..e2c13cd863bd 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
97 __ring_buffer_alloc((size), (flags), &__key); \ 97 __ring_buffer_alloc((size), (flags), &__key); \
98}) 98})
99 99
100int ring_buffer_wait(struct ring_buffer *buffer, int cpu); 100int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
101int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, 101int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
102 struct file *filp, poll_table *poll_table); 102 struct file *filp, poll_table *poll_table);
103 103
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index be574506e6a9..d9d7e7e56352 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -37,6 +37,16 @@ struct anon_vma {
37 atomic_t refcount; 37 atomic_t refcount;
38 38
39 /* 39 /*
40 * Count of child anon_vmas and VMAs which points to this anon_vma.
41 *
42 * This counter is used for making decision about reusing anon_vma
43 * instead of forking new one. See comments in function anon_vma_clone.
44 */
45 unsigned degree;
46
47 struct anon_vma *parent; /* Parent of this anon_vma */
48
49 /*
40 * NOTE: the LSB of the rb_root.rb_node is set by 50 * NOTE: the LSB of the rb_root.rb_node is set by
41 * mm_take_all_locks() _after_ taking the above lock. So the 51 * mm_take_all_locks() _after_ taking the above lock. So the
42 * rb_root must only be read/written after taking the above lock 52 * rb_root must only be read/written after taking the above lock
@@ -150,7 +160,7 @@ int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
150static inline void anon_vma_merge(struct vm_area_struct *vma, 160static inline void anon_vma_merge(struct vm_area_struct *vma,
151 struct vm_area_struct *next) 161 struct vm_area_struct *next)
152{ 162{
153 VM_BUG_ON(vma->anon_vma != next->anon_vma); 163 VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma);
154 unlink_anon_vmas(next); 164 unlink_anon_vmas(next);
155} 165}
156 166
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index c2c28975293c..6d6be09a2fe5 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -19,11 +19,28 @@
19extern int rtc_month_days(unsigned int month, unsigned int year); 19extern int rtc_month_days(unsigned int month, unsigned int year);
20extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year); 20extern int rtc_year_days(unsigned int day, unsigned int month, unsigned int year);
21extern int rtc_valid_tm(struct rtc_time *tm); 21extern int rtc_valid_tm(struct rtc_time *tm);
22extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); 22extern time64_t rtc_tm_to_time64(struct rtc_time *tm);
23extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); 23extern void rtc_time64_to_tm(time64_t time, struct rtc_time *tm);
24ktime_t rtc_tm_to_ktime(struct rtc_time tm); 24ktime_t rtc_tm_to_ktime(struct rtc_time tm);
25struct rtc_time rtc_ktime_to_tm(ktime_t kt); 25struct rtc_time rtc_ktime_to_tm(ktime_t kt);
26 26
27/**
28 * Deprecated. Use rtc_time64_to_tm().
29 */
30static inline void rtc_time_to_tm(unsigned long time, struct rtc_time *tm)
31{
32 rtc_time64_to_tm(time, tm);
33}
34
35/**
36 * Deprecated. Use rtc_tm_to_time64().
37 */
38static inline int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time)
39{
40 *time = rtc_tm_to_time64(tm);
41
42 return 0;
43}
27 44
28#include <linux/device.h> 45#include <linux/device.h>
29#include <linux/seq_file.h> 46#include <linux/seq_file.h>
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 167bae7bdfa4..5db76a32fcab 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -17,6 +17,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst,
17 u32 id, long expires, u32 error); 17 u32 id, long expires, u32 error);
18 18
19void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); 19void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags);
20struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
21 unsigned change, gfp_t flags);
22void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev,
23 gfp_t flags);
24
20 25
21/* RTNL is used as a global lock for all changes to network configuration */ 26/* RTNL is used as a global lock for all changes to network configuration */
22extern void rtnl_lock(void); 27extern void rtnl_lock(void);
@@ -47,6 +52,16 @@ static inline int lockdep_rtnl_is_held(void)
47 rcu_dereference_check(p, lockdep_rtnl_is_held()) 52 rcu_dereference_check(p, lockdep_rtnl_is_held())
48 53
49/** 54/**
55 * rcu_dereference_bh_rtnl - rcu_dereference_bh with debug checking
56 * @p: The pointer to read, prior to dereference
57 *
58 * Do an rcu_dereference_bh(p), but check caller either holds rcu_read_lock_bh()
59 * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference_bh()
60 */
61#define rcu_dereference_bh_rtnl(p) \
62 rcu_dereference_bh_check(p, lockdep_rtnl_is_held())
63
64/**
50 * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL 65 * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL
51 * @p: The pointer to read, prior to dereferencing 66 * @p: The pointer to read, prior to dereferencing
52 * 67 *
@@ -84,12 +99,15 @@ extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
84 struct nlattr *tb[], 99 struct nlattr *tb[],
85 struct net_device *dev, 100 struct net_device *dev,
86 const unsigned char *addr, 101 const unsigned char *addr,
87 u16 flags); 102 u16 vid,
103 u16 flags);
88extern int ndo_dflt_fdb_del(struct ndmsg *ndm, 104extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
89 struct nlattr *tb[], 105 struct nlattr *tb[],
90 struct net_device *dev, 106 struct net_device *dev,
91 const unsigned char *addr); 107 const unsigned char *addr,
108 u16 vid);
92 109
93extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 110extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
94 struct net_device *dev, u16 mode); 111 struct net_device *dev, u16 mode,
112 u32 flags, u32 mask);
95#endif /* __LINUX_RTNETLINK_H */ 113#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 035d3c57fc8a..8f498cdde280 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -149,7 +149,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
149 * static then another method for expressing nested locking is 149 * static then another method for expressing nested locking is
150 * the explicit definition of lock class keys and the use of 150 * the explicit definition of lock class keys and the use of
151 * lockdep_set_class() at lock initialization time. 151 * lockdep_set_class() at lock initialization time.
152 * See Documentation/lockdep-design.txt for more details.) 152 * See Documentation/locking/lockdep-design.txt for more details.)
153 */ 153 */
154extern void down_read_nested(struct rw_semaphore *sem, int subclass); 154extern void down_read_nested(struct rw_semaphore *sem, int subclass);
155extern void down_write_nested(struct rw_semaphore *sem, int subclass); 155extern void down_write_nested(struct rw_semaphore *sem, int subclass);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b867a4dab38a..8db31ef98d2f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -57,6 +57,7 @@ struct sched_param {
57#include <linux/llist.h> 57#include <linux/llist.h>
58#include <linux/uidgid.h> 58#include <linux/uidgid.h>
59#include <linux/gfp.h> 59#include <linux/gfp.h>
60#include <linux/magic.h>
60 61
61#include <asm/processor.h> 62#include <asm/processor.h>
62 63
@@ -167,6 +168,7 @@ extern int nr_threads;
167DECLARE_PER_CPU(unsigned long, process_counts); 168DECLARE_PER_CPU(unsigned long, process_counts);
168extern int nr_processes(void); 169extern int nr_processes(void);
169extern unsigned long nr_running(void); 170extern unsigned long nr_running(void);
171extern bool single_task_running(void);
170extern unsigned long nr_iowait(void); 172extern unsigned long nr_iowait(void);
171extern unsigned long nr_iowait_cpu(int cpu); 173extern unsigned long nr_iowait_cpu(int cpu);
172extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); 174extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
@@ -241,6 +243,43 @@ extern char ___assert_task_state[1 - 2*!!(
241 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ 243 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
242 (task->flags & PF_FROZEN) == 0) 244 (task->flags & PF_FROZEN) == 0)
243 245
246#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
247
248#define __set_task_state(tsk, state_value) \
249 do { \
250 (tsk)->task_state_change = _THIS_IP_; \
251 (tsk)->state = (state_value); \
252 } while (0)
253#define set_task_state(tsk, state_value) \
254 do { \
255 (tsk)->task_state_change = _THIS_IP_; \
256 set_mb((tsk)->state, (state_value)); \
257 } while (0)
258
259/*
260 * set_current_state() includes a barrier so that the write of current->state
261 * is correctly serialised wrt the caller's subsequent test of whether to
262 * actually sleep:
263 *
264 * set_current_state(TASK_UNINTERRUPTIBLE);
265 * if (do_i_need_to_sleep())
266 * schedule();
267 *
268 * If the caller does not need such serialisation then use __set_current_state()
269 */
270#define __set_current_state(state_value) \
271 do { \
272 current->task_state_change = _THIS_IP_; \
273 current->state = (state_value); \
274 } while (0)
275#define set_current_state(state_value) \
276 do { \
277 current->task_state_change = _THIS_IP_; \
278 set_mb(current->state, (state_value)); \
279 } while (0)
280
281#else
282
244#define __set_task_state(tsk, state_value) \ 283#define __set_task_state(tsk, state_value) \
245 do { (tsk)->state = (state_value); } while (0) 284 do { (tsk)->state = (state_value); } while (0)
246#define set_task_state(tsk, state_value) \ 285#define set_task_state(tsk, state_value) \
@@ -257,11 +296,13 @@ extern char ___assert_task_state[1 - 2*!!(
257 * 296 *
258 * If the caller does not need such serialisation then use __set_current_state() 297 * If the caller does not need such serialisation then use __set_current_state()
259 */ 298 */
260#define __set_current_state(state_value) \ 299#define __set_current_state(state_value) \
261 do { current->state = (state_value); } while (0) 300 do { current->state = (state_value); } while (0)
262#define set_current_state(state_value) \ 301#define set_current_state(state_value) \
263 set_mb(current->state, (state_value)) 302 set_mb(current->state, (state_value))
264 303
304#endif
305
265/* Task command name length */ 306/* Task command name length */
266#define TASK_COMM_LEN 16 307#define TASK_COMM_LEN 16
267 308
@@ -645,6 +686,7 @@ struct signal_struct {
645 * Live threads maintain their own counters and add to these 686 * Live threads maintain their own counters and add to these
646 * in __exit_signal, except for the group leader. 687 * in __exit_signal, except for the group leader.
647 */ 688 */
689 seqlock_t stats_lock;
648 cputime_t utime, stime, cutime, cstime; 690 cputime_t utime, stime, cutime, cstime;
649 cputime_t gtime; 691 cputime_t gtime;
650 cputime_t cgtime; 692 cputime_t cgtime;
@@ -1023,6 +1065,7 @@ struct sched_domain_topology_level {
1023extern struct sched_domain_topology_level *sched_domain_topology; 1065extern struct sched_domain_topology_level *sched_domain_topology;
1024 1066
1025extern void set_sched_topology(struct sched_domain_topology_level *tl); 1067extern void set_sched_topology(struct sched_domain_topology_level *tl);
1068extern void wake_up_if_idle(int cpu);
1026 1069
1027#ifdef CONFIG_SCHED_DEBUG 1070#ifdef CONFIG_SCHED_DEBUG
1028# define SD_INIT_NAME(type) .name = #type 1071# define SD_INIT_NAME(type) .name = #type
@@ -1212,6 +1255,13 @@ struct sched_dl_entity {
1212 struct hrtimer dl_timer; 1255 struct hrtimer dl_timer;
1213}; 1256};
1214 1257
1258union rcu_special {
1259 struct {
1260 bool blocked;
1261 bool need_qs;
1262 } b;
1263 short s;
1264};
1215struct rcu_node; 1265struct rcu_node;
1216 1266
1217enum perf_event_task_context { 1267enum perf_event_task_context {
@@ -1264,12 +1314,18 @@ struct task_struct {
1264 1314
1265#ifdef CONFIG_PREEMPT_RCU 1315#ifdef CONFIG_PREEMPT_RCU
1266 int rcu_read_lock_nesting; 1316 int rcu_read_lock_nesting;
1267 char rcu_read_unlock_special; 1317 union rcu_special rcu_read_unlock_special;
1268 struct list_head rcu_node_entry; 1318 struct list_head rcu_node_entry;
1269#endif /* #ifdef CONFIG_PREEMPT_RCU */ 1319#endif /* #ifdef CONFIG_PREEMPT_RCU */
1270#ifdef CONFIG_TREE_PREEMPT_RCU 1320#ifdef CONFIG_PREEMPT_RCU
1271 struct rcu_node *rcu_blocked_node; 1321 struct rcu_node *rcu_blocked_node;
1272#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 1322#endif /* #ifdef CONFIG_PREEMPT_RCU */
1323#ifdef CONFIG_TASKS_RCU
1324 unsigned long rcu_tasks_nvcsw;
1325 bool rcu_tasks_holdout;
1326 struct list_head rcu_tasks_holdout_list;
1327 int rcu_tasks_idle_cpu;
1328#endif /* #ifdef CONFIG_TASKS_RCU */
1273 1329
1274#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) 1330#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1275 struct sched_info sched_info; 1331 struct sched_info sched_info;
@@ -1308,6 +1364,10 @@ struct task_struct {
1308 unsigned sched_reset_on_fork:1; 1364 unsigned sched_reset_on_fork:1;
1309 unsigned sched_contributes_to_load:1; 1365 unsigned sched_contributes_to_load:1;
1310 1366
1367#ifdef CONFIG_MEMCG_KMEM
1368 unsigned memcg_kmem_skip_account:1;
1369#endif
1370
1311 unsigned long atomic_flags; /* Flags needing atomic access. */ 1371 unsigned long atomic_flags; /* Flags needing atomic access. */
1312 1372
1313 pid_t pid; 1373 pid_t pid;
@@ -1541,28 +1601,23 @@ struct task_struct {
1541 struct numa_group *numa_group; 1601 struct numa_group *numa_group;
1542 1602
1543 /* 1603 /*
1544 * Exponential decaying average of faults on a per-node basis. 1604 * numa_faults is an array split into four regions:
1545 * Scheduling placement decisions are made based on the these counts. 1605 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1546 * The values remain static for the duration of a PTE scan 1606 * in this precise order.
1607 *
1608 * faults_memory: Exponential decaying average of faults on a per-node
1609 * basis. Scheduling placement decisions are made based on these
1610 * counts. The values remain static for the duration of a PTE scan.
1611 * faults_cpu: Track the nodes the process was running on when a NUMA
1612 * hinting fault was incurred.
1613 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1614 * during the current scan window. When the scan completes, the counts
1615 * in faults_memory and faults_cpu decay and these values are copied.
1547 */ 1616 */
1548 unsigned long *numa_faults_memory; 1617 unsigned long *numa_faults;
1549 unsigned long total_numa_faults; 1618 unsigned long total_numa_faults;
1550 1619
1551 /* 1620 /*
1552 * numa_faults_buffer records faults per node during the current
1553 * scan window. When the scan completes, the counts in
1554 * numa_faults_memory decay and these values are copied.
1555 */
1556 unsigned long *numa_faults_buffer_memory;
1557
1558 /*
1559 * Track the nodes the process was running on when a NUMA hinting
1560 * fault was incurred.
1561 */
1562 unsigned long *numa_faults_cpu;
1563 unsigned long *numa_faults_buffer_cpu;
1564
1565 /*
1566 * numa_faults_locality tracks if faults recorded during the last 1621 * numa_faults_locality tracks if faults recorded during the last
1567 * scan window were remote/local. The task scan period is adapted 1622 * scan window were remote/local. The task scan period is adapted
1568 * based on the locality of the faults with different weights 1623 * based on the locality of the faults with different weights
@@ -1628,8 +1683,7 @@ struct task_struct {
1628 /* bitmask and counter of trace recursion */ 1683 /* bitmask and counter of trace recursion */
1629 unsigned long trace_recursion; 1684 unsigned long trace_recursion;
1630#endif /* CONFIG_TRACING */ 1685#endif /* CONFIG_TRACING */
1631#ifdef CONFIG_MEMCG /* memcg uses this to do batch job */ 1686#ifdef CONFIG_MEMCG
1632 unsigned int memcg_kmem_skip_account;
1633 struct memcg_oom_info { 1687 struct memcg_oom_info {
1634 struct mem_cgroup *memcg; 1688 struct mem_cgroup *memcg;
1635 gfp_t gfp_mask; 1689 gfp_t gfp_mask;
@@ -1644,6 +1698,9 @@ struct task_struct {
1644 unsigned int sequential_io; 1698 unsigned int sequential_io;
1645 unsigned int sequential_io_avg; 1699 unsigned int sequential_io_avg;
1646#endif 1700#endif
1701#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1702 unsigned long task_state_change;
1703#endif
1647}; 1704};
1648 1705
1649/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1706/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1934,11 +1991,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
1934#define tsk_used_math(p) ((p)->flags & PF_USED_MATH) 1991#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1935#define used_math() tsk_used_math(current) 1992#define used_math() tsk_used_math(current)
1936 1993
1937/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */ 1994/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
1995 * __GFP_FS is also cleared as it implies __GFP_IO.
1996 */
1938static inline gfp_t memalloc_noio_flags(gfp_t flags) 1997static inline gfp_t memalloc_noio_flags(gfp_t flags)
1939{ 1998{
1940 if (unlikely(current->flags & PF_MEMALLOC_NOIO)) 1999 if (unlikely(current->flags & PF_MEMALLOC_NOIO))
1941 flags &= ~__GFP_IO; 2000 flags &= ~(__GFP_IO | __GFP_FS);
1942 return flags; 2001 return flags;
1943} 2002}
1944 2003
@@ -2011,29 +2070,21 @@ extern void task_clear_jobctl_trapping(struct task_struct *task);
2011extern void task_clear_jobctl_pending(struct task_struct *task, 2070extern void task_clear_jobctl_pending(struct task_struct *task,
2012 unsigned int mask); 2071 unsigned int mask);
2013 2072
2014#ifdef CONFIG_PREEMPT_RCU
2015
2016#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */
2017#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */
2018
2019static inline void rcu_copy_process(struct task_struct *p) 2073static inline void rcu_copy_process(struct task_struct *p)
2020{ 2074{
2075#ifdef CONFIG_PREEMPT_RCU
2021 p->rcu_read_lock_nesting = 0; 2076 p->rcu_read_lock_nesting = 0;
2022 p->rcu_read_unlock_special = 0; 2077 p->rcu_read_unlock_special.s = 0;
2023#ifdef CONFIG_TREE_PREEMPT_RCU
2024 p->rcu_blocked_node = NULL; 2078 p->rcu_blocked_node = NULL;
2025#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
2026 INIT_LIST_HEAD(&p->rcu_node_entry); 2079 INIT_LIST_HEAD(&p->rcu_node_entry);
2080#endif /* #ifdef CONFIG_PREEMPT_RCU */
2081#ifdef CONFIG_TASKS_RCU
2082 p->rcu_tasks_holdout = false;
2083 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2084 p->rcu_tasks_idle_cpu = -1;
2085#endif /* #ifdef CONFIG_TASKS_RCU */
2027} 2086}
2028 2087
2029#else
2030
2031static inline void rcu_copy_process(struct task_struct *p)
2032{
2033}
2034
2035#endif
2036
2037static inline void tsk_restore_flags(struct task_struct *task, 2088static inline void tsk_restore_flags(struct task_struct *task,
2038 unsigned long orig_flags, unsigned long flags) 2089 unsigned long orig_flags, unsigned long flags)
2039{ 2090{
@@ -2041,6 +2092,10 @@ static inline void tsk_restore_flags(struct task_struct *task,
2041 task->flags |= orig_flags & flags; 2092 task->flags |= orig_flags & flags;
2042} 2093}
2043 2094
2095extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2096 const struct cpumask *trial);
2097extern int task_can_attach(struct task_struct *p,
2098 const struct cpumask *cs_cpus_allowed);
2044#ifdef CONFIG_SMP 2099#ifdef CONFIG_SMP
2045extern void do_set_cpus_allowed(struct task_struct *p, 2100extern void do_set_cpus_allowed(struct task_struct *p,
2046 const struct cpumask *new_mask); 2101 const struct cpumask *new_mask);
@@ -2430,6 +2485,10 @@ extern void do_group_exit(int);
2430extern int do_execve(struct filename *, 2485extern int do_execve(struct filename *,
2431 const char __user * const __user *, 2486 const char __user * const __user *,
2432 const char __user * const __user *); 2487 const char __user * const __user *);
2488extern int do_execveat(int, struct filename *,
2489 const char __user * const __user *,
2490 const char __user * const __user *,
2491 int);
2433extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *); 2492extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2434struct task_struct *fork_idle(int); 2493struct task_struct *fork_idle(int);
2435extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); 2494extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
@@ -2639,6 +2698,8 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
2639} 2698}
2640 2699
2641#endif 2700#endif
2701#define task_stack_end_corrupted(task) \
2702 (*(end_of_stack(task)) != STACK_END_MAGIC)
2642 2703
2643static inline int object_is_on_stack(void *obj) 2704static inline int object_is_on_stack(void *obj)
2644{ 2705{
@@ -2661,6 +2722,7 @@ static inline unsigned long stack_not_used(struct task_struct *p)
2661 return (unsigned long)n - (unsigned long)end_of_stack(p); 2722 return (unsigned long)n - (unsigned long)end_of_stack(p);
2662} 2723}
2663#endif 2724#endif
2725extern void set_task_stack_end_magic(struct task_struct *tsk);
2664 2726
2665/* set thread flags in other task's structures 2727/* set thread flags in other task's structures
2666 * - see asm/thread_info.h for TIF_xxxx flags available 2728 * - see asm/thread_info.h for TIF_xxxx flags available
@@ -2746,7 +2808,7 @@ static inline int signal_pending_state(long state, struct task_struct *p)
2746extern int _cond_resched(void); 2808extern int _cond_resched(void);
2747 2809
2748#define cond_resched() ({ \ 2810#define cond_resched() ({ \
2749 __might_sleep(__FILE__, __LINE__, 0); \ 2811 ___might_sleep(__FILE__, __LINE__, 0); \
2750 _cond_resched(); \ 2812 _cond_resched(); \
2751}) 2813})
2752 2814
@@ -2759,14 +2821,14 @@ extern int __cond_resched_lock(spinlock_t *lock);
2759#endif 2821#endif
2760 2822
2761#define cond_resched_lock(lock) ({ \ 2823#define cond_resched_lock(lock) ({ \
2762 __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \ 2824 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2763 __cond_resched_lock(lock); \ 2825 __cond_resched_lock(lock); \
2764}) 2826})
2765 2827
2766extern int __cond_resched_softirq(void); 2828extern int __cond_resched_softirq(void);
2767 2829
2768#define cond_resched_softirq() ({ \ 2830#define cond_resched_softirq() ({ \
2769 __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ 2831 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
2770 __cond_resched_softirq(); \ 2832 __cond_resched_softirq(); \
2771}) 2833})
2772 2834
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
index 005bf3e38db5..f0f8bad54be9 100644
--- a/include/linux/screen_info.h
+++ b/include/linux/screen_info.h
@@ -5,12 +5,4 @@
5 5
6extern struct screen_info screen_info; 6extern struct screen_info screen_info;
7 7
8#define ORIG_X (screen_info.orig_x)
9#define ORIG_Y (screen_info.orig_y)
10#define ORIG_VIDEO_MODE (screen_info.orig_video_mode)
11#define ORIG_VIDEO_COLS (screen_info.orig_video_cols)
12#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx)
13#define ORIG_VIDEO_LINES (screen_info.orig_video_lines)
14#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA)
15#define ORIG_VIDEO_POINTS (screen_info.orig_video_points)
16#endif /* _SCREEN_INFO_H */ 8#endif /* _SCREEN_INFO_H */
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 5d586a45a319..a19ddacdac30 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -27,19 +27,23 @@ struct seccomp {
27 struct seccomp_filter *filter; 27 struct seccomp_filter *filter;
28}; 28};
29 29
30extern int __secure_computing(int); 30#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
31static inline int secure_computing(int this_syscall) 31extern int __secure_computing(void);
32static inline int secure_computing(void)
32{ 33{
33 if (unlikely(test_thread_flag(TIF_SECCOMP))) 34 if (unlikely(test_thread_flag(TIF_SECCOMP)))
34 return __secure_computing(this_syscall); 35 return __secure_computing();
35 return 0; 36 return 0;
36} 37}
37 38
38/* A wrapper for architectures supporting only SECCOMP_MODE_STRICT. */ 39#define SECCOMP_PHASE1_OK 0
39static inline void secure_computing_strict(int this_syscall) 40#define SECCOMP_PHASE1_SKIP 1
40{ 41
41 BUG_ON(secure_computing(this_syscall) != 0); 42extern u32 seccomp_phase1(struct seccomp_data *sd);
42} 43int seccomp_phase2(u32 phase1_result);
44#else
45extern void secure_computing_strict(int this_syscall);
46#endif
43 47
44extern long prctl_get_seccomp(void); 48extern long prctl_get_seccomp(void);
45extern long prctl_set_seccomp(unsigned long, char __user *); 49extern long prctl_set_seccomp(unsigned long, char __user *);
@@ -56,8 +60,11 @@ static inline int seccomp_mode(struct seccomp *s)
56struct seccomp { }; 60struct seccomp { };
57struct seccomp_filter { }; 61struct seccomp_filter { };
58 62
59static inline int secure_computing(int this_syscall) { return 0; } 63#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
64static inline int secure_computing(void) { return 0; }
65#else
60static inline void secure_computing_strict(int this_syscall) { return; } 66static inline void secure_computing_strict(int this_syscall) { return; }
67#endif
61 68
62static inline long prctl_get_seccomp(void) 69static inline long prctl_get_seccomp(void)
63{ 70{
diff --git a/include/linux/security.h b/include/linux/security.h
index 623f90e5f38d..ba96471c11ba 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1559,7 +1559,7 @@ struct security_operations {
1559 int (*file_lock) (struct file *file, unsigned int cmd); 1559 int (*file_lock) (struct file *file, unsigned int cmd);
1560 int (*file_fcntl) (struct file *file, unsigned int cmd, 1560 int (*file_fcntl) (struct file *file, unsigned int cmd,
1561 unsigned long arg); 1561 unsigned long arg);
1562 int (*file_set_fowner) (struct file *file); 1562 void (*file_set_fowner) (struct file *file);
1563 int (*file_send_sigiotask) (struct task_struct *tsk, 1563 int (*file_send_sigiotask) (struct task_struct *tsk,
1564 struct fown_struct *fown, int sig); 1564 struct fown_struct *fown, int sig);
1565 int (*file_receive) (struct file *file); 1565 int (*file_receive) (struct file *file);
@@ -1834,7 +1834,7 @@ int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
1834 unsigned long prot); 1834 unsigned long prot);
1835int security_file_lock(struct file *file, unsigned int cmd); 1835int security_file_lock(struct file *file, unsigned int cmd);
1836int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg); 1836int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
1837int security_file_set_fowner(struct file *file); 1837void security_file_set_fowner(struct file *file);
1838int security_file_send_sigiotask(struct task_struct *tsk, 1838int security_file_send_sigiotask(struct task_struct *tsk,
1839 struct fown_struct *fown, int sig); 1839 struct fown_struct *fown, int sig);
1840int security_file_receive(struct file *file); 1840int security_file_receive(struct file *file);
@@ -2108,7 +2108,7 @@ static inline int security_dentry_init_security(struct dentry *dentry,
2108static inline int security_inode_init_security(struct inode *inode, 2108static inline int security_inode_init_security(struct inode *inode,
2109 struct inode *dir, 2109 struct inode *dir,
2110 const struct qstr *qstr, 2110 const struct qstr *qstr,
2111 const initxattrs initxattrs, 2111 const initxattrs xattrs,
2112 void *fs_data) 2112 void *fs_data)
2113{ 2113{
2114 return 0; 2114 return 0;
@@ -2312,9 +2312,9 @@ static inline int security_file_fcntl(struct file *file, unsigned int cmd,
2312 return 0; 2312 return 0;
2313} 2313}
2314 2314
2315static inline int security_file_set_fowner(struct file *file) 2315static inline void security_file_set_fowner(struct file *file)
2316{ 2316{
2317 return 0; 2317 return;
2318} 2318}
2319 2319
2320static inline int security_file_send_sigiotask(struct task_struct *tsk, 2320static inline int security_file_send_sigiotask(struct task_struct *tsk,
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
new file mode 100644
index 000000000000..9aafe0e24c68
--- /dev/null
+++ b/include/linux/seq_buf.h
@@ -0,0 +1,136 @@
1#ifndef _LINUX_SEQ_BUF_H
2#define _LINUX_SEQ_BUF_H
3
4#include <linux/fs.h>
5
6/*
7 * Trace sequences are used to allow a function to call several other functions
8 * to create a string of data to use.
9 */
10
11/**
12 * seq_buf - seq buffer structure
13 * @buffer: pointer to the buffer
14 * @size: size of the buffer
15 * @len: the amount of data inside the buffer
16 * @readpos: The next position to read in the buffer.
17 */
18struct seq_buf {
19 char *buffer;
20 size_t size;
21 size_t len;
22 loff_t readpos;
23};
24
25static inline void seq_buf_clear(struct seq_buf *s)
26{
27 s->len = 0;
28 s->readpos = 0;
29}
30
31static inline void
32seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
33{
34 s->buffer = buf;
35 s->size = size;
36 seq_buf_clear(s);
37}
38
39/*
40 * seq_buf have a buffer that might overflow. When this happens
41 * the len and size are set to be equal.
42 */
43static inline bool
44seq_buf_has_overflowed(struct seq_buf *s)
45{
46 return s->len > s->size;
47}
48
49static inline void
50seq_buf_set_overflow(struct seq_buf *s)
51{
52 s->len = s->size + 1;
53}
54
55/*
56 * How much buffer is left on the seq_buf?
57 */
58static inline unsigned int
59seq_buf_buffer_left(struct seq_buf *s)
60{
61 if (seq_buf_has_overflowed(s))
62 return 0;
63
64 return s->size - s->len;
65}
66
67/* How much buffer was written? */
68static inline unsigned int seq_buf_used(struct seq_buf *s)
69{
70 return min(s->len, s->size);
71}
72
73/**
74 * seq_buf_get_buf - get buffer to write arbitrary data to
75 * @s: the seq_buf handle
76 * @bufp: the beginning of the buffer is stored here
77 *
78 * Return the number of bytes available in the buffer, or zero if
79 * there's no space.
80 */
81static inline size_t seq_buf_get_buf(struct seq_buf *s, char **bufp)
82{
83 WARN_ON(s->len > s->size + 1);
84
85 if (s->len < s->size) {
86 *bufp = s->buffer + s->len;
87 return s->size - s->len;
88 }
89
90 *bufp = NULL;
91 return 0;
92}
93
94/**
95 * seq_buf_commit - commit data to the buffer
96 * @s: the seq_buf handle
97 * @num: the number of bytes to commit
98 *
99 * Commit @num bytes of data written to a buffer previously acquired
100 * by seq_buf_get. To signal an error condition, or that the data
101 * didn't fit in the available space, pass a negative @num value.
102 */
103static inline void seq_buf_commit(struct seq_buf *s, int num)
104{
105 if (num < 0) {
106 seq_buf_set_overflow(s);
107 } else {
108 /* num must be negative on overflow */
109 BUG_ON(s->len + num > s->size);
110 s->len += num;
111 }
112}
113
114extern __printf(2, 3)
115int seq_buf_printf(struct seq_buf *s, const char *fmt, ...);
116extern __printf(2, 0)
117int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
118extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s);
119extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf,
120 int cnt);
121extern int seq_buf_puts(struct seq_buf *s, const char *str);
122extern int seq_buf_putc(struct seq_buf *s, unsigned char c);
123extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
124extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
125 unsigned int len);
126extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc);
127
128extern int seq_buf_bitmask(struct seq_buf *s, const unsigned long *maskp,
129 int nmaskbits);
130
131#ifdef CONFIG_BINARY_PRINTF
132extern int
133seq_buf_bprintf(struct seq_buf *s, const char *fmt, const u32 *binary);
134#endif
135
136#endif /* _LINUX_SEQ_BUF_H */
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 52e0097f61f0..cf6a9daaaf6d 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -43,6 +43,21 @@ struct seq_operations {
43#define SEQ_SKIP 1 43#define SEQ_SKIP 1
44 44
45/** 45/**
46 * seq_has_overflowed - check if the buffer has overflowed
47 * @m: the seq_file handle
48 *
49 * seq_files have a buffer which may overflow. When this happens a larger
50 * buffer is reallocated and all the data will be printed again.
51 * The overflow state is true when m->count == m->size.
52 *
53 * Returns true if the buffer received more than it can hold.
54 */
55static inline bool seq_has_overflowed(struct seq_file *m)
56{
57 return m->count == m->size;
58}
59
60/**
46 * seq_get_buf - get buffer to write arbitrary data to 61 * seq_get_buf - get buffer to write arbitrary data to
47 * @m: the seq_file handle 62 * @m: the seq_file handle
48 * @bufp: the beginning of the buffer is stored here 63 * @bufp: the beginning of the buffer is stored here
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index cc359636cfa3..f5df8f687b4d 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -456,4 +456,23 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
456 spin_unlock_irqrestore(&sl->lock, flags); 456 spin_unlock_irqrestore(&sl->lock, flags);
457} 457}
458 458
459static inline unsigned long
460read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
461{
462 unsigned long flags = 0;
463
464 if (!(*seq & 1)) /* Even */
465 *seq = read_seqbegin(lock);
466 else /* Odd */
467 read_seqlock_excl_irqsave(lock, flags);
468
469 return flags;
470}
471
472static inline void
473done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
474{
475 if (seq & 1)
476 read_sequnlock_excl_irqrestore(lock, flags);
477}
459#endif /* __LINUX_SEQLOCK_H */ 478#endif /* __LINUX_SEQLOCK_H */
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index f93649e22c43..e02acf0a0ec9 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -84,6 +84,7 @@ struct uart_8250_port {
84 unsigned char mcr_mask; /* mask of user bits */ 84 unsigned char mcr_mask; /* mask of user bits */
85 unsigned char mcr_force; /* mask of forced bits */ 85 unsigned char mcr_force; /* mask of forced bits */
86 unsigned char cur_iotype; /* Running I/O type */ 86 unsigned char cur_iotype; /* Running I/O type */
87 unsigned int rpm_tx_active;
87 88
88 /* 89 /*
89 * Some bits in registers are cleared on a read, so they must 90 * Some bits in registers are cleared on a read, so they must
@@ -121,6 +122,8 @@ extern void serial8250_early_out(struct uart_port *port, int offset, int value);
121extern int setup_early_serial8250_console(char *cmdline); 122extern int setup_early_serial8250_console(char *cmdline);
122extern void serial8250_do_set_termios(struct uart_port *port, 123extern void serial8250_do_set_termios(struct uart_port *port,
123 struct ktermios *termios, struct ktermios *old); 124 struct ktermios *termios, struct ktermios *old);
125extern int serial8250_do_startup(struct uart_port *port);
126extern void serial8250_do_shutdown(struct uart_port *port);
124extern void serial8250_do_pm(struct uart_port *port, unsigned int state, 127extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
125 unsigned int oldstate); 128 unsigned int oldstate);
126extern int fsl8250_handle_irq(struct uart_port *port); 129extern int fsl8250_handle_irq(struct uart_port *port);
diff --git a/include/linux/serial_bcm63xx.h b/include/linux/serial_bcm63xx.h
index a80aa1a5bee2..570e964dc899 100644
--- a/include/linux/serial_bcm63xx.h
+++ b/include/linux/serial_bcm63xx.h
@@ -116,6 +116,4 @@
116 UART_FIFO_PARERR_MASK | \ 116 UART_FIFO_PARERR_MASK | \
117 UART_FIFO_BRKDET_MASK) 117 UART_FIFO_BRKDET_MASK)
118 118
119#define UART_REG_SIZE 24
120
121#endif /* _LINUX_SERIAL_BCM63XX_H */ 119#endif /* _LINUX_SERIAL_BCM63XX_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index cf3a1e789bf5..057038cf2788 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -63,7 +63,7 @@ struct uart_ops {
63 void (*flush_buffer)(struct uart_port *); 63 void (*flush_buffer)(struct uart_port *);
64 void (*set_termios)(struct uart_port *, struct ktermios *new, 64 void (*set_termios)(struct uart_port *, struct ktermios *new,
65 struct ktermios *old); 65 struct ktermios *old);
66 void (*set_ldisc)(struct uart_port *, int new); 66 void (*set_ldisc)(struct uart_port *, struct ktermios *);
67 void (*pm)(struct uart_port *, unsigned int state, 67 void (*pm)(struct uart_port *, unsigned int state,
68 unsigned int oldstate); 68 unsigned int oldstate);
69 69
@@ -112,6 +112,7 @@ struct uart_icount {
112}; 112};
113 113
114typedef unsigned int __bitwise__ upf_t; 114typedef unsigned int __bitwise__ upf_t;
115typedef unsigned int __bitwise__ upstat_t;
115 116
116struct uart_port { 117struct uart_port {
117 spinlock_t lock; /* port lock */ 118 spinlock_t lock; /* port lock */
@@ -122,10 +123,16 @@ struct uart_port {
122 void (*set_termios)(struct uart_port *, 123 void (*set_termios)(struct uart_port *,
123 struct ktermios *new, 124 struct ktermios *new,
124 struct ktermios *old); 125 struct ktermios *old);
126 int (*startup)(struct uart_port *port);
127 void (*shutdown)(struct uart_port *port);
128 void (*throttle)(struct uart_port *port);
129 void (*unthrottle)(struct uart_port *port);
125 int (*handle_irq)(struct uart_port *); 130 int (*handle_irq)(struct uart_port *);
126 void (*pm)(struct uart_port *, unsigned int state, 131 void (*pm)(struct uart_port *, unsigned int state,
127 unsigned int old); 132 unsigned int old);
128 void (*handle_break)(struct uart_port *); 133 void (*handle_break)(struct uart_port *);
134 int (*rs485_config)(struct uart_port *,
135 struct serial_rs485 *rs485);
129 unsigned int irq; /* irq number */ 136 unsigned int irq; /* irq number */
130 unsigned long irqflags; /* irq flags */ 137 unsigned long irqflags; /* irq flags */
131 unsigned int uartclk; /* base uart clock */ 138 unsigned int uartclk; /* base uart clock */
@@ -135,12 +142,13 @@ struct uart_port {
135 unsigned char iotype; /* io access style */ 142 unsigned char iotype; /* io access style */
136 unsigned char unused1; 143 unsigned char unused1;
137 144
138#define UPIO_PORT (0) 145#define UPIO_PORT (0) /* 8b I/O port access */
139#define UPIO_HUB6 (1) 146#define UPIO_HUB6 (1) /* Hub6 ISA card */
140#define UPIO_MEM (2) 147#define UPIO_MEM (2) /* 8b MMIO access */
141#define UPIO_MEM32 (3) 148#define UPIO_MEM32 (3) /* 32b little endian */
142#define UPIO_AU (4) /* Au1x00 and RT288x type IO */ 149#define UPIO_MEM32BE (4) /* 32b big endian */
143#define UPIO_TSI (5) /* Tsi108/109 type IO */ 150#define UPIO_AU (5) /* Au1x00 and RT288x type IO */
151#define UPIO_TSI (6) /* Tsi108/109 type IO */
144 152
145 unsigned int read_status_mask; /* driver specific */ 153 unsigned int read_status_mask; /* driver specific */
146 unsigned int ignore_status_mask; /* driver specific */ 154 unsigned int ignore_status_mask; /* driver specific */
@@ -152,23 +160,36 @@ struct uart_port {
152 unsigned long sysrq; /* sysrq timeout */ 160 unsigned long sysrq; /* sysrq timeout */
153#endif 161#endif
154 162
163 /* flags must be updated while holding port mutex */
155 upf_t flags; 164 upf_t flags;
156 165
157#define UPF_FOURPORT ((__force upf_t) (1 << 1)) 166 /*
158#define UPF_SAK ((__force upf_t) (1 << 2)) 167 * These flags must be equivalent to the flags defined in
159#define UPF_SPD_MASK ((__force upf_t) (0x1030)) 168 * include/uapi/linux/tty_flags.h which are the userspace definitions
160#define UPF_SPD_HI ((__force upf_t) (0x0010)) 169 * assigned from the serial_struct flags in uart_set_info()
161#define UPF_SPD_VHI ((__force upf_t) (0x0020)) 170 * [for bit definitions in the UPF_CHANGE_MASK]
162#define UPF_SPD_CUST ((__force upf_t) (0x0030)) 171 *
163#define UPF_SPD_SHI ((__force upf_t) (0x1000)) 172 * Bits [0..UPF_LAST_USER] are userspace defined/visible/changeable
164#define UPF_SPD_WARP ((__force upf_t) (0x1010)) 173 * except bit 15 (UPF_NO_TXEN_TEST) which is masked off.
165#define UPF_SKIP_TEST ((__force upf_t) (1 << 6)) 174 * The remaining bits are serial-core specific and not modifiable by
166#define UPF_AUTO_IRQ ((__force upf_t) (1 << 7)) 175 * userspace.
167#define UPF_HARDPPS_CD ((__force upf_t) (1 << 11)) 176 */
168#define UPF_LOW_LATENCY ((__force upf_t) (1 << 13)) 177#define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ )
169#define UPF_BUGGY_UART ((__force upf_t) (1 << 14)) 178#define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ )
179#define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ )
180#define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ )
181#define UPF_SPD_CUST ((__force upf_t) ASYNC_SPD_CUST /* 0x0030 */ )
182#define UPF_SPD_WARP ((__force upf_t) ASYNC_SPD_WARP /* 0x1010 */ )
183#define UPF_SPD_MASK ((__force upf_t) ASYNC_SPD_MASK /* 0x1030 */ )
184#define UPF_SKIP_TEST ((__force upf_t) ASYNC_SKIP_TEST /* 6 */ )
185#define UPF_AUTO_IRQ ((__force upf_t) ASYNC_AUTO_IRQ /* 7 */ )
186#define UPF_HARDPPS_CD ((__force upf_t) ASYNC_HARDPPS_CD /* 11 */ )
187#define UPF_SPD_SHI ((__force upf_t) ASYNC_SPD_SHI /* 12 */ )
188#define UPF_LOW_LATENCY ((__force upf_t) ASYNC_LOW_LATENCY /* 13 */ )
189#define UPF_BUGGY_UART ((__force upf_t) ASYNC_BUGGY_UART /* 14 */ )
170#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15)) 190#define UPF_NO_TXEN_TEST ((__force upf_t) (1 << 15))
171#define UPF_MAGIC_MULTIPLIER ((__force upf_t) (1 << 16)) 191#define UPF_MAGIC_MULTIPLIER ((__force upf_t) ASYNC_MAGIC_MULTIPLIER /* 16 */ )
192
172/* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */ 193/* Port has hardware-assisted h/w flow control (iow, auto-RTS *not* auto-CTS) */
173#define UPF_HARD_FLOW ((__force upf_t) (1 << 21)) 194#define UPF_HARD_FLOW ((__force upf_t) (1 << 21))
174/* Port has hardware-assisted s/w flow control */ 195/* Port has hardware-assisted s/w flow control */
@@ -184,9 +205,21 @@ struct uart_port {
184#define UPF_DEAD ((__force upf_t) (1 << 30)) 205#define UPF_DEAD ((__force upf_t) (1 << 30))
185#define UPF_IOREMAP ((__force upf_t) (1 << 31)) 206#define UPF_IOREMAP ((__force upf_t) (1 << 31))
186 207
187#define UPF_CHANGE_MASK ((__force upf_t) (0x17fff)) 208#define __UPF_CHANGE_MASK 0x17fff
209#define UPF_CHANGE_MASK ((__force upf_t) __UPF_CHANGE_MASK)
188#define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY)) 210#define UPF_USR_MASK ((__force upf_t) (UPF_SPD_MASK|UPF_LOW_LATENCY))
189 211
212#if __UPF_CHANGE_MASK > ASYNC_FLAGS
213#error Change mask not equivalent to userspace-visible bit defines
214#endif
215
216 /* status must be updated while holding port lock */
217 upstat_t status;
218
219#define UPSTAT_CTS_ENABLE ((__force upstat_t) (1 << 0))
220#define UPSTAT_DCD_ENABLE ((__force upstat_t) (1 << 1))
221
222 int hw_stopped; /* sw-assisted CTS flow state */
190 unsigned int mctrl; /* current modem ctrl settings */ 223 unsigned int mctrl; /* current modem ctrl settings */
191 unsigned int timeout; /* character-based timeout */ 224 unsigned int timeout; /* character-based timeout */
192 unsigned int type; /* port type */ 225 unsigned int type; /* port type */
@@ -201,6 +234,7 @@ struct uart_port {
201 unsigned char unused[2]; 234 unsigned char unused[2];
202 struct attribute_group *attr_group; /* port specific attributes */ 235 struct attribute_group *attr_group; /* port specific attributes */
203 const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ 236 const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
237 struct serial_rs485 rs485;
204 void *private_data; /* generic platform data pointer */ 238 void *private_data; /* generic platform data pointer */
205}; 239};
206 240
@@ -347,11 +381,16 @@ int uart_resume_port(struct uart_driver *reg, struct uart_port *port);
347static inline int uart_tx_stopped(struct uart_port *port) 381static inline int uart_tx_stopped(struct uart_port *port)
348{ 382{
349 struct tty_struct *tty = port->state->port.tty; 383 struct tty_struct *tty = port->state->port.tty;
350 if(tty->stopped || tty->hw_stopped) 384 if (tty->stopped || port->hw_stopped)
351 return 1; 385 return 1;
352 return 0; 386 return 0;
353} 387}
354 388
389static inline bool uart_cts_enabled(struct uart_port *uport)
390{
391 return !!(uport->status & UPSTAT_CTS_ENABLE);
392}
393
355/* 394/*
356 * The following are helper functions for the low level drivers. 395 * The following are helper functions for the low level drivers.
357 */ 396 */
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 68c097077ef0..f4aee75f00b1 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -18,8 +18,6 @@ struct shrink_control {
18 */ 18 */
19 unsigned long nr_to_scan; 19 unsigned long nr_to_scan;
20 20
21 /* shrink from these nodes */
22 nodemask_t nodes_to_scan;
23 /* current node being shrunk (for NUMA aware shrinkers) */ 21 /* current node being shrunk (for NUMA aware shrinkers) */
24 int nid; 22 int nid;
25}; 23};
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 750196fcc0a5..ab1e0392b5ac 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -2,6 +2,7 @@
2#define _LINUX_SIGNAL_H 2#define _LINUX_SIGNAL_H
3 3
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/bug.h>
5#include <uapi/linux/signal.h> 6#include <uapi/linux/signal.h>
6 7
7struct task_struct; 8struct task_struct;
@@ -67,7 +68,6 @@ static inline int sigismember(sigset_t *set, int _sig)
67 68
68static inline int sigisemptyset(sigset_t *set) 69static inline int sigisemptyset(sigset_t *set)
69{ 70{
70 extern void _NSIG_WORDS_is_unsupported_size(void);
71 switch (_NSIG_WORDS) { 71 switch (_NSIG_WORDS) {
72 case 4: 72 case 4:
73 return (set->sig[3] | set->sig[2] | 73 return (set->sig[3] | set->sig[2] |
@@ -77,7 +77,7 @@ static inline int sigisemptyset(sigset_t *set)
77 case 1: 77 case 1:
78 return set->sig[0] == 0; 78 return set->sig[0] == 0;
79 default: 79 default:
80 _NSIG_WORDS_is_unsupported_size(); 80 BUILD_BUG();
81 return 0; 81 return 0;
82 } 82 }
83} 83}
@@ -90,24 +90,23 @@ static inline int sigisemptyset(sigset_t *set)
90#define _SIG_SET_BINOP(name, op) \ 90#define _SIG_SET_BINOP(name, op) \
91static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \ 91static inline void name(sigset_t *r, const sigset_t *a, const sigset_t *b) \
92{ \ 92{ \
93 extern void _NSIG_WORDS_is_unsupported_size(void); \
94 unsigned long a0, a1, a2, a3, b0, b1, b2, b3; \ 93 unsigned long a0, a1, a2, a3, b0, b1, b2, b3; \
95 \ 94 \
96 switch (_NSIG_WORDS) { \ 95 switch (_NSIG_WORDS) { \
97 case 4: \ 96 case 4: \
98 a3 = a->sig[3]; a2 = a->sig[2]; \ 97 a3 = a->sig[3]; a2 = a->sig[2]; \
99 b3 = b->sig[3]; b2 = b->sig[2]; \ 98 b3 = b->sig[3]; b2 = b->sig[2]; \
100 r->sig[3] = op(a3, b3); \ 99 r->sig[3] = op(a3, b3); \
101 r->sig[2] = op(a2, b2); \ 100 r->sig[2] = op(a2, b2); \
102 case 2: \ 101 case 2: \
103 a1 = a->sig[1]; b1 = b->sig[1]; \ 102 a1 = a->sig[1]; b1 = b->sig[1]; \
104 r->sig[1] = op(a1, b1); \ 103 r->sig[1] = op(a1, b1); \
105 case 1: \ 104 case 1: \
106 a0 = a->sig[0]; b0 = b->sig[0]; \ 105 a0 = a->sig[0]; b0 = b->sig[0]; \
107 r->sig[0] = op(a0, b0); \ 106 r->sig[0] = op(a0, b0); \
108 break; \ 107 break; \
109 default: \ 108 default: \
110 _NSIG_WORDS_is_unsupported_size(); \ 109 BUILD_BUG(); \
111 } \ 110 } \
112} 111}
113 112
@@ -128,16 +127,14 @@ _SIG_SET_BINOP(sigandnsets, _sig_andn)
128#define _SIG_SET_OP(name, op) \ 127#define _SIG_SET_OP(name, op) \
129static inline void name(sigset_t *set) \ 128static inline void name(sigset_t *set) \
130{ \ 129{ \
131 extern void _NSIG_WORDS_is_unsupported_size(void); \
132 \
133 switch (_NSIG_WORDS) { \ 130 switch (_NSIG_WORDS) { \
134 case 4: set->sig[3] = op(set->sig[3]); \ 131 case 4: set->sig[3] = op(set->sig[3]); \
135 set->sig[2] = op(set->sig[2]); \ 132 set->sig[2] = op(set->sig[2]); \
136 case 2: set->sig[1] = op(set->sig[1]); \ 133 case 2: set->sig[1] = op(set->sig[1]); \
137 case 1: set->sig[0] = op(set->sig[0]); \ 134 case 1: set->sig[0] = op(set->sig[0]); \
138 break; \ 135 break; \
139 default: \ 136 default: \
140 _NSIG_WORDS_is_unsupported_size(); \ 137 BUILD_BUG(); \
141 } \ 138 } \
142} 139}
143 140
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index abde271c18ae..85ab7d72b54c 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -20,6 +20,8 @@
20#include <linux/time.h> 20#include <linux/time.h>
21#include <linux/bug.h> 21#include <linux/bug.h>
22#include <linux/cache.h> 22#include <linux/cache.h>
23#include <linux/rbtree.h>
24#include <linux/socket.h>
23 25
24#include <linux/atomic.h> 26#include <linux/atomic.h>
25#include <asm/types.h> 27#include <asm/types.h>
@@ -28,7 +30,6 @@
28#include <linux/textsearch.h> 30#include <linux/textsearch.h>
29#include <net/checksum.h> 31#include <net/checksum.h>
30#include <linux/rcupdate.h> 32#include <linux/rcupdate.h>
31#include <linux/dmaengine.h>
32#include <linux/hrtimer.h> 33#include <linux/hrtimer.h>
33#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
34#include <linux/netdev_features.h> 35#include <linux/netdev_features.h>
@@ -47,11 +48,29 @@
47 * 48 *
48 * The hardware you're dealing with doesn't calculate the full checksum 49 * The hardware you're dealing with doesn't calculate the full checksum
49 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums 50 * (as in CHECKSUM_COMPLETE), but it does parse headers and verify checksums
50 * for specific protocols e.g. TCP/UDP/SCTP, then, for such packets it will 51 * for specific protocols. For such packets it will set CHECKSUM_UNNECESSARY
51 * set CHECKSUM_UNNECESSARY if their checksums are okay. skb->csum is still 52 * if their checksums are okay. skb->csum is still undefined in this case
52 * undefined in this case though. It is a bad option, but, unfortunately, 53 * though. It is a bad option, but, unfortunately, nowadays most vendors do
53 * nowadays most vendors do this. Apparently with the secret goal to sell 54 * this. Apparently with the secret goal to sell you new devices, when you
54 * you new devices, when you will add new protocol to your host, f.e. IPv6 8) 55 * will add new protocol to your host, f.e. IPv6 8)
56 *
57 * CHECKSUM_UNNECESSARY is applicable to following protocols:
58 * TCP: IPv6 and IPv4.
59 * UDP: IPv4 and IPv6. A device may apply CHECKSUM_UNNECESSARY to a
60 * zero UDP checksum for either IPv4 or IPv6, the networking stack
61 * may perform further validation in this case.
62 * GRE: only if the checksum is present in the header.
63 * SCTP: indicates the CRC in SCTP header has been validated.
64 *
65 * skb->csum_level indicates the number of consecutive checksums found in
66 * the packet minus one that have been verified as CHECKSUM_UNNECESSARY.
67 * For instance if a device receives an IPv6->UDP->GRE->IPv4->TCP packet
68 * and a device is able to verify the checksums for UDP (possibly zero),
69 * GRE (checksum flag is set), and TCP-- skb->csum_level would be set to
70 * two. If the device were only able to verify the UDP checksum and not
71 * GRE, either because it doesn't support GRE checksum of because GRE
72 * checksum is bad, skb->csum_level would be set to zero (TCP checksum is
73 * not considered in this case).
55 * 74 *
56 * CHECKSUM_COMPLETE: 75 * CHECKSUM_COMPLETE:
57 * 76 *
@@ -112,6 +131,9 @@
112#define CHECKSUM_COMPLETE 2 131#define CHECKSUM_COMPLETE 2
113#define CHECKSUM_PARTIAL 3 132#define CHECKSUM_PARTIAL 3
114 133
134/* Maximum value in skb->csum_level */
135#define SKB_MAX_CSUM_LEVEL 3
136
115#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) 137#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES)
116#define SKB_WITH_OVERHEAD(X) \ 138#define SKB_WITH_OVERHEAD(X) \
117 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 139 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -128,6 +150,8 @@
128struct net_device; 150struct net_device;
129struct scatterlist; 151struct scatterlist;
130struct pipe_inode_info; 152struct pipe_inode_info;
153struct iov_iter;
154struct napi_struct;
131 155
132#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 156#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
133struct nf_conntrack { 157struct nf_conntrack {
@@ -135,7 +159,7 @@ struct nf_conntrack {
135}; 159};
136#endif 160#endif
137 161
138#ifdef CONFIG_BRIDGE_NETFILTER 162#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
139struct nf_bridge_info { 163struct nf_bridge_info {
140 atomic_t use; 164 atomic_t use;
141 unsigned int mask; 165 unsigned int mask;
@@ -318,9 +342,9 @@ struct skb_shared_info {
318 342
319 343
320enum { 344enum {
321 SKB_FCLONE_UNAVAILABLE, 345 SKB_FCLONE_UNAVAILABLE, /* skb has no fclone (from head_cache) */
322 SKB_FCLONE_ORIG, 346 SKB_FCLONE_ORIG, /* orig skb (from fclone_cache) */
323 SKB_FCLONE_CLONE, 347 SKB_FCLONE_CLONE, /* companion fclone skb (from fclone_cache) */
324}; 348};
325 349
326enum { 350enum {
@@ -349,8 +373,7 @@ enum {
349 373
350 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11, 374 SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
351 375
352 SKB_GSO_MPLS = 1 << 12, 376 SKB_GSO_TUNNEL_REMCSUM = 1 << 12,
353
354}; 377};
355 378
356#if BITS_PER_LONG > 32 379#if BITS_PER_LONG > 32
@@ -419,6 +442,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
419 * @next: Next buffer in list 442 * @next: Next buffer in list
420 * @prev: Previous buffer in list 443 * @prev: Previous buffer in list
421 * @tstamp: Time we arrived/left 444 * @tstamp: Time we arrived/left
445 * @rbnode: RB tree node, alternative to next/prev for netem/tcp
422 * @sk: Socket we are owned by 446 * @sk: Socket we are owned by
423 * @dev: Device we arrived on/are leaving by 447 * @dev: Device we arrived on/are leaving by
424 * @cb: Control buffer. Free for use by every layer. Put private vars here 448 * @cb: Control buffer. Free for use by every layer. Put private vars here
@@ -452,6 +476,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
452 * @tc_verd: traffic control verdict 476 * @tc_verd: traffic control verdict
453 * @hash: the packet hash 477 * @hash: the packet hash
454 * @queue_mapping: Queue mapping for multiqueue devices 478 * @queue_mapping: Queue mapping for multiqueue devices
479 * @xmit_more: More SKBs are pending for this queue
455 * @ndisc_nodetype: router type (from link layer) 480 * @ndisc_nodetype: router type (from link layer)
456 * @ooo_okay: allow the mapping of a socket to a queue to be changed 481 * @ooo_okay: allow the mapping of a socket to a queue to be changed
457 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport 482 * @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -460,8 +485,6 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
460 * @wifi_acked_valid: wifi_acked was set 485 * @wifi_acked_valid: wifi_acked was set
461 * @wifi_acked: whether frame was acked on wifi or not 486 * @wifi_acked: whether frame was acked on wifi or not
462 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 487 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
463 * @dma_cookie: a cookie to one of several possible DMA operations
464 * done by skb DMA functions
465 * @napi_id: id of the NAPI struct this skb came from 488 * @napi_id: id of the NAPI struct this skb came from
466 * @secmark: security marking 489 * @secmark: security marking
467 * @mark: Generic packet mark 490 * @mark: Generic packet mark
@@ -484,15 +507,19 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
484 */ 507 */
485 508
486struct sk_buff { 509struct sk_buff {
487 /* These two members must be first. */
488 struct sk_buff *next;
489 struct sk_buff *prev;
490
491 union { 510 union {
492 ktime_t tstamp; 511 struct {
493 struct skb_mstamp skb_mstamp; 512 /* These two members must be first. */
513 struct sk_buff *next;
514 struct sk_buff *prev;
515
516 union {
517 ktime_t tstamp;
518 struct skb_mstamp skb_mstamp;
519 };
520 };
521 struct rb_node rbnode; /* used in netem & tcp stack */
494 }; 522 };
495
496 struct sock *sk; 523 struct sock *sk;
497 struct net_device *dev; 524 struct net_device *dev;
498 525
@@ -505,87 +532,102 @@ struct sk_buff {
505 char cb[48] __aligned(8); 532 char cb[48] __aligned(8);
506 533
507 unsigned long _skb_refdst; 534 unsigned long _skb_refdst;
535 void (*destructor)(struct sk_buff *skb);
508#ifdef CONFIG_XFRM 536#ifdef CONFIG_XFRM
509 struct sec_path *sp; 537 struct sec_path *sp;
510#endif 538#endif
539#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
540 struct nf_conntrack *nfct;
541#endif
542#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
543 struct nf_bridge_info *nf_bridge;
544#endif
511 unsigned int len, 545 unsigned int len,
512 data_len; 546 data_len;
513 __u16 mac_len, 547 __u16 mac_len,
514 hdr_len; 548 hdr_len;
515 union { 549
516 __wsum csum; 550 /* Following fields are _not_ copied in __copy_skb_header()
517 struct { 551 * Note that queue_mapping is here mostly to fill a hole.
518 __u16 csum_start; 552 */
519 __u16 csum_offset;
520 };
521 };
522 __u32 priority;
523 kmemcheck_bitfield_begin(flags1); 553 kmemcheck_bitfield_begin(flags1);
524 __u8 ignore_df:1, 554 __u16 queue_mapping;
525 cloned:1, 555 __u8 cloned:1,
526 ip_summed:2,
527 nohdr:1, 556 nohdr:1,
528 nfctinfo:3;
529 __u8 pkt_type:3,
530 fclone:2, 557 fclone:2,
531 ipvs_property:1,
532 peeked:1, 558 peeked:1,
533 nf_trace:1; 559 head_frag:1,
560 xmit_more:1;
561 /* one bit hole */
534 kmemcheck_bitfield_end(flags1); 562 kmemcheck_bitfield_end(flags1);
535 __be16 protocol;
536 563
537 void (*destructor)(struct sk_buff *skb); 564 /* fields enclosed in headers_start/headers_end are copied
538#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 565 * using a single memcpy() in __copy_skb_header()
539 struct nf_conntrack *nfct; 566 */
540#endif 567 /* private: */
541#ifdef CONFIG_BRIDGE_NETFILTER 568 __u32 headers_start[0];
542 struct nf_bridge_info *nf_bridge; 569 /* public: */
543#endif
544
545 int skb_iif;
546
547 __u32 hash;
548
549 __be16 vlan_proto;
550 __u16 vlan_tci;
551 570
552#ifdef CONFIG_NET_SCHED 571/* if you move pkt_type around you also must adapt those constants */
553 __u16 tc_index; /* traffic control index */ 572#ifdef __BIG_ENDIAN_BITFIELD
554#ifdef CONFIG_NET_CLS_ACT 573#define PKT_TYPE_MAX (7 << 5)
555 __u16 tc_verd; /* traffic control verdict */ 574#else
556#endif 575#define PKT_TYPE_MAX 7
557#endif 576#endif
577#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
558 578
559 __u16 queue_mapping; 579 __u8 __pkt_type_offset[0];
560 kmemcheck_bitfield_begin(flags2); 580 __u8 pkt_type:3;
561#ifdef CONFIG_IPV6_NDISC_NODETYPE
562 __u8 ndisc_nodetype:2;
563#endif
564 __u8 pfmemalloc:1; 581 __u8 pfmemalloc:1;
582 __u8 ignore_df:1;
583 __u8 nfctinfo:3;
584
585 __u8 nf_trace:1;
586 __u8 ip_summed:2;
565 __u8 ooo_okay:1; 587 __u8 ooo_okay:1;
566 __u8 l4_hash:1; 588 __u8 l4_hash:1;
567 __u8 sw_hash:1; 589 __u8 sw_hash:1;
568 __u8 wifi_acked_valid:1; 590 __u8 wifi_acked_valid:1;
569 __u8 wifi_acked:1; 591 __u8 wifi_acked:1;
592
570 __u8 no_fcs:1; 593 __u8 no_fcs:1;
571 __u8 head_frag:1; 594 /* Indicates the inner headers are valid in the skbuff. */
572 /* Encapsulation protocol and NIC drivers should use
573 * this flag to indicate to each other if the skb contains
574 * encapsulated packet or not and maybe use the inner packet
575 * headers if needed
576 */
577 __u8 encapsulation:1; 595 __u8 encapsulation:1;
578 __u8 encap_hdr_csum:1; 596 __u8 encap_hdr_csum:1;
579 __u8 csum_valid:1; 597 __u8 csum_valid:1;
580 __u8 csum_complete_sw:1; 598 __u8 csum_complete_sw:1;
581 /* 2/4 bit hole (depending on ndisc_nodetype presence) */ 599 __u8 csum_level:2;
582 kmemcheck_bitfield_end(flags2); 600 __u8 csum_bad:1;
601
602#ifdef CONFIG_IPV6_NDISC_NODETYPE
603 __u8 ndisc_nodetype:2;
604#endif
605 __u8 ipvs_property:1;
606 __u8 inner_protocol_type:1;
607 __u8 remcsum_offload:1;
608 /* 3 or 5 bit hole */
609
610#ifdef CONFIG_NET_SCHED
611 __u16 tc_index; /* traffic control index */
612#ifdef CONFIG_NET_CLS_ACT
613 __u16 tc_verd; /* traffic control verdict */
614#endif
615#endif
583 616
584#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
585 union { 617 union {
586 unsigned int napi_id; 618 __wsum csum;
587 dma_cookie_t dma_cookie; 619 struct {
620 __u16 csum_start;
621 __u16 csum_offset;
622 };
588 }; 623 };
624 __u32 priority;
625 int skb_iif;
626 __u32 hash;
627 __be16 vlan_proto;
628 __u16 vlan_tci;
629#ifdef CONFIG_NET_RX_BUSY_POLL
630 unsigned int napi_id;
589#endif 631#endif
590#ifdef CONFIG_NETWORK_SECMARK 632#ifdef CONFIG_NETWORK_SECMARK
591 __u32 secmark; 633 __u32 secmark;
@@ -596,13 +638,24 @@ struct sk_buff {
596 __u32 reserved_tailroom; 638 __u32 reserved_tailroom;
597 }; 639 };
598 640
599 __be16 inner_protocol; 641 union {
642 __be16 inner_protocol;
643 __u8 inner_ipproto;
644 };
645
600 __u16 inner_transport_header; 646 __u16 inner_transport_header;
601 __u16 inner_network_header; 647 __u16 inner_network_header;
602 __u16 inner_mac_header; 648 __u16 inner_mac_header;
649
650 __be16 protocol;
603 __u16 transport_header; 651 __u16 transport_header;
604 __u16 network_header; 652 __u16 network_header;
605 __u16 mac_header; 653 __u16 mac_header;
654
655 /* private: */
656 __u32 headers_end[0];
657 /* public: */
658
606 /* These elements must be at the end, see alloc_skb() for details. */ 659 /* These elements must be at the end, see alloc_skb() for details. */
607 sk_buff_data_t tail; 660 sk_buff_data_t tail;
608 sk_buff_data_t end; 661 sk_buff_data_t end;
@@ -621,6 +674,7 @@ struct sk_buff {
621 674
622#define SKB_ALLOC_FCLONE 0x01 675#define SKB_ALLOC_FCLONE 0x01
623#define SKB_ALLOC_RX 0x02 676#define SKB_ALLOC_RX 0x02
677#define SKB_ALLOC_NAPI 0x04
624 678
625/* Returns true if the skb was allocated from PFMEMALLOC reserves */ 679/* Returns true if the skb was allocated from PFMEMALLOC reserves */
626static inline bool skb_pfmemalloc(const struct sk_buff *skb) 680static inline bool skb_pfmemalloc(const struct sk_buff *skb)
@@ -665,9 +719,6 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
665 skb->_skb_refdst = (unsigned long)dst; 719 skb->_skb_refdst = (unsigned long)dst;
666} 720}
667 721
668void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
669 bool force);
670
671/** 722/**
672 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference 723 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
673 * @skb: buffer 724 * @skb: buffer
@@ -680,24 +731,8 @@ void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
680 */ 731 */
681static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) 732static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
682{ 733{
683 __skb_dst_set_noref(skb, dst, false); 734 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
684} 735 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
685
686/**
687 * skb_dst_set_noref_force - sets skb dst, without taking reference
688 * @skb: buffer
689 * @dst: dst entry
690 *
691 * Sets skb dst, assuming a reference was not taken on dst.
692 * No reference is taken and no dst_release will be called. While for
693 * cached dsts deferred reclaim is a basic feature, for entries that are
694 * not cached it is caller's job to guarantee that last dst_release for
695 * provided dst happens when nobody uses it, eg. after a RCU grace period.
696 */
697static inline void skb_dst_set_noref_force(struct sk_buff *skb,
698 struct dst_entry *dst)
699{
700 __skb_dst_set_noref(skb, dst, true);
701} 736}
702 737
703/** 738/**
@@ -734,6 +769,41 @@ static inline struct sk_buff *alloc_skb(unsigned int size,
734 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 769 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
735} 770}
736 771
772struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
773 unsigned long data_len,
774 int max_page_order,
775 int *errcode,
776 gfp_t gfp_mask);
777
778/* Layout of fast clones : [skb1][skb2][fclone_ref] */
779struct sk_buff_fclones {
780 struct sk_buff skb1;
781
782 struct sk_buff skb2;
783
784 atomic_t fclone_ref;
785};
786
787/**
788 * skb_fclone_busy - check if fclone is busy
789 * @skb: buffer
790 *
791 * Returns true is skb is a fast clone, and its clone is not freed.
792 * Some drivers call skb_orphan() in their ndo_start_xmit(),
793 * so we also check that this didnt happen.
794 */
795static inline bool skb_fclone_busy(const struct sock *sk,
796 const struct sk_buff *skb)
797{
798 const struct sk_buff_fclones *fclones;
799
800 fclones = container_of(skb, struct sk_buff_fclones, skb1);
801
802 return skb->fclone == SKB_FCLONE_ORIG &&
803 atomic_read(&fclones->fclone_ref) > 1 &&
804 fclones->skb2.sk == sk;
805}
806
737static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 807static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
738 gfp_t priority) 808 gfp_t priority)
739{ 809{
@@ -1042,6 +1112,7 @@ static inline int skb_header_cloned(const struct sk_buff *skb)
1042 * Drop a reference to the header part of the buffer. This is done 1112 * Drop a reference to the header part of the buffer. This is done
1043 * by acquiring a payload reference. You must not read from the header 1113 * by acquiring a payload reference. You must not read from the header
1044 * part of skb->data after this. 1114 * part of skb->data after this.
1115 * Note : Check if you can use __skb_header_release() instead.
1045 */ 1116 */
1046static inline void skb_header_release(struct sk_buff *skb) 1117static inline void skb_header_release(struct sk_buff *skb)
1047{ 1118{
@@ -1051,6 +1122,20 @@ static inline void skb_header_release(struct sk_buff *skb)
1051} 1122}
1052 1123
1053/** 1124/**
1125 * __skb_header_release - release reference to header
1126 * @skb: buffer to operate on
1127 *
1128 * Variant of skb_header_release() assuming skb is private to caller.
1129 * We can avoid one atomic operation.
1130 */
1131static inline void __skb_header_release(struct sk_buff *skb)
1132{
1133 skb->nohdr = 1;
1134 atomic_set(&skb_shinfo(skb)->dataref, 1 + (1 << SKB_DATAREF_SHIFT));
1135}
1136
1137
1138/**
1054 * skb_shared - is the buffer shared 1139 * skb_shared - is the buffer shared
1055 * @skb: buffer to check 1140 * @skb: buffer to check
1056 * 1141 *
@@ -1116,7 +1201,12 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
1116 might_sleep_if(pri & __GFP_WAIT); 1201 might_sleep_if(pri & __GFP_WAIT);
1117 if (skb_cloned(skb)) { 1202 if (skb_cloned(skb)) {
1118 struct sk_buff *nskb = skb_copy(skb, pri); 1203 struct sk_buff *nskb = skb_copy(skb, pri);
1119 kfree_skb(skb); /* Free our shared copy */ 1204
1205 /* Free our shared copy */
1206 if (likely(nskb))
1207 consume_skb(skb);
1208 else
1209 kfree_skb(skb);
1120 skb = nskb; 1210 skb = nskb;
1121 } 1211 }
1122 return skb; 1212 return skb;
@@ -1675,6 +1765,23 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
1675 skb->tail += len; 1765 skb->tail += len;
1676} 1766}
1677 1767
1768#define ENCAP_TYPE_ETHER 0
1769#define ENCAP_TYPE_IPPROTO 1
1770
1771static inline void skb_set_inner_protocol(struct sk_buff *skb,
1772 __be16 protocol)
1773{
1774 skb->inner_protocol = protocol;
1775 skb->inner_protocol_type = ENCAP_TYPE_ETHER;
1776}
1777
1778static inline void skb_set_inner_ipproto(struct sk_buff *skb,
1779 __u8 ipproto)
1780{
1781 skb->inner_ipproto = ipproto;
1782 skb->inner_protocol_type = ENCAP_TYPE_IPPROTO;
1783}
1784
1678static inline void skb_reset_inner_headers(struct sk_buff *skb) 1785static inline void skb_reset_inner_headers(struct sk_buff *skb)
1679{ 1786{
1680 skb->inner_mac_header = skb->mac_header; 1787 skb->inner_mac_header = skb->mac_header;
@@ -1860,18 +1967,6 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1860 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1967 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1861} 1968}
1862 1969
1863static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
1864{
1865 /* Only continue with checksum unnecessary if device indicated
1866 * it is valid across encapsulation (skb->encapsulation was set).
1867 */
1868 if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation)
1869 skb->ip_summed = CHECKSUM_NONE;
1870
1871 skb->encapsulation = 0;
1872 skb->csum_valid = 0;
1873}
1874
1875/* 1970/*
1876 * CPUs often take a performance hit when accessing unaligned memory 1971 * CPUs often take a performance hit when accessing unaligned memory
1877 * locations. The actual performance hit varies, it can be small if the 1972 * locations. The actual performance hit varies, it can be small if the
@@ -2071,47 +2166,61 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
2071 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 2166 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2072} 2167}
2073 2168
2169void *napi_alloc_frag(unsigned int fragsz);
2170struct sk_buff *__napi_alloc_skb(struct napi_struct *napi,
2171 unsigned int length, gfp_t gfp_mask);
2172static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi,
2173 unsigned int length)
2174{
2175 return __napi_alloc_skb(napi, length, GFP_ATOMIC);
2176}
2177
2074/** 2178/**
2075 * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data 2179 * __dev_alloc_pages - allocate page for network Rx
2076 * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX 2180 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2077 * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used 2181 * @order: size of the allocation
2078 * @order: size of the allocation
2079 * 2182 *
2080 * Allocate a new page. 2183 * Allocate a new page.
2081 * 2184 *
2082 * %NULL is returned if there is no free memory. 2185 * %NULL is returned if there is no free memory.
2083*/ 2186*/
2084static inline struct page *__skb_alloc_pages(gfp_t gfp_mask, 2187static inline struct page *__dev_alloc_pages(gfp_t gfp_mask,
2085 struct sk_buff *skb, 2188 unsigned int order)
2086 unsigned int order) 2189{
2087{ 2190 /* This piece of code contains several assumptions.
2088 struct page *page; 2191 * 1. This is for device Rx, therefor a cold page is preferred.
2089 2192 * 2. The expectation is the user wants a compound page.
2090 gfp_mask |= __GFP_COLD; 2193 * 3. If requesting a order 0 page it will not be compound
2091 2194 * due to the check to see if order has a value in prep_new_page
2092 if (!(gfp_mask & __GFP_NOMEMALLOC)) 2195 * 4. __GFP_MEMALLOC is ignored if __GFP_NOMEMALLOC is set due to
2093 gfp_mask |= __GFP_MEMALLOC; 2196 * code in gfp_to_alloc_flags that should be enforcing this.
2197 */
2198 gfp_mask |= __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC;
2094 2199
2095 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); 2200 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2096 if (skb && page && page->pfmemalloc) 2201}
2097 skb->pfmemalloc = true;
2098 2202
2099 return page; 2203static inline struct page *dev_alloc_pages(unsigned int order)
2204{
2205 return __dev_alloc_pages(GFP_ATOMIC, order);
2100} 2206}
2101 2207
2102/** 2208/**
2103 * __skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data 2209 * __dev_alloc_page - allocate a page for network Rx
2104 * @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX 2210 * @gfp_mask: allocation priority. Set __GFP_NOMEMALLOC if not for network Rx
2105 * @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
2106 * 2211 *
2107 * Allocate a new page. 2212 * Allocate a new page.
2108 * 2213 *
2109 * %NULL is returned if there is no free memory. 2214 * %NULL is returned if there is no free memory.
2110 */ 2215 */
2111static inline struct page *__skb_alloc_page(gfp_t gfp_mask, 2216static inline struct page *__dev_alloc_page(gfp_t gfp_mask)
2112 struct sk_buff *skb) 2217{
2218 return __dev_alloc_pages(gfp_mask, 0);
2219}
2220
2221static inline struct page *dev_alloc_page(void)
2113{ 2222{
2114 return __skb_alloc_pages(gfp_mask, skb, 0); 2223 return __dev_alloc_page(GFP_ATOMIC);
2115} 2224}
2116 2225
2117/** 2226/**
@@ -2343,7 +2452,6 @@ static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2343 * is untouched. Otherwise it is extended. Returns zero on 2452 * is untouched. Otherwise it is extended. Returns zero on
2344 * success. The skb is freed on error. 2453 * success. The skb is freed on error.
2345 */ 2454 */
2346
2347static inline int skb_padto(struct sk_buff *skb, unsigned int len) 2455static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2348{ 2456{
2349 unsigned int size = skb->len; 2457 unsigned int size = skb->len;
@@ -2352,6 +2460,29 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2352 return skb_pad(skb, len - size); 2460 return skb_pad(skb, len - size);
2353} 2461}
2354 2462
2463/**
2464 * skb_put_padto - increase size and pad an skbuff up to a minimal size
2465 * @skb: buffer to pad
2466 * @len: minimal length
2467 *
2468 * Pads up a buffer to ensure the trailing bytes exist and are
2469 * blanked. If the buffer already contains sufficient data it
2470 * is untouched. Otherwise it is extended. Returns zero on
2471 * success. The skb is freed on error.
2472 */
2473static inline int skb_put_padto(struct sk_buff *skb, unsigned int len)
2474{
2475 unsigned int size = skb->len;
2476
2477 if (unlikely(size < len)) {
2478 len -= size;
2479 if (skb_pad(skb, len))
2480 return -ENOMEM;
2481 __skb_put(skb, len);
2482 }
2483 return 0;
2484}
2485
2355static inline int skb_add_data(struct sk_buff *skb, 2486static inline int skb_add_data(struct sk_buff *skb,
2356 char __user *from, int copy) 2487 char __user *from, int copy)
2357{ 2488{
@@ -2524,18 +2655,18 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
2524 int *err); 2655 int *err);
2525unsigned int datagram_poll(struct file *file, struct socket *sock, 2656unsigned int datagram_poll(struct file *file, struct socket *sock,
2526 struct poll_table_struct *wait); 2657 struct poll_table_struct *wait);
2527int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, 2658int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
2528 struct iovec *to, int size); 2659 struct iov_iter *to, int size);
2529int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, 2660static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
2530 struct iovec *iov); 2661 struct msghdr *msg, int size)
2531int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset, 2662{
2532 const struct iovec *from, int from_offset, 2663 return skb_copy_datagram_iter(from, offset, &msg->msg_iter, size);
2533 int len); 2664}
2534int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *frm, 2665int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
2535 int offset, size_t count); 2666 struct msghdr *msg);
2536int skb_copy_datagram_const_iovec(const struct sk_buff *from, int offset, 2667int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
2537 const struct iovec *to, int to_offset, 2668 struct iov_iter *from, int len);
2538 int size); 2669int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
2539void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2670void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2540void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb); 2671void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb);
2541int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); 2672int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags);
@@ -2556,6 +2687,20 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
2556unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); 2687unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
2557struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); 2688struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
2558struct sk_buff *skb_vlan_untag(struct sk_buff *skb); 2689struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
2690int skb_ensure_writable(struct sk_buff *skb, int write_len);
2691int skb_vlan_pop(struct sk_buff *skb);
2692int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
2693
2694static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
2695{
2696 /* XXX: stripping const */
2697 return memcpy_fromiovec(data, (struct iovec *)msg->msg_iter.iov, len);
2698}
2699
2700static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
2701{
2702 return copy_to_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
2703}
2559 2704
2560struct skb_checksum_ops { 2705struct skb_checksum_ops {
2561 __wsum (*update)(const void *mem, int len, __wsum wsum); 2706 __wsum (*update)(const void *mem, int len, __wsum wsum);
@@ -2567,20 +2712,26 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2567__wsum skb_checksum(const struct sk_buff *skb, int offset, int len, 2712__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
2568 __wsum csum); 2713 __wsum csum);
2569 2714
2570static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2715static inline void *__skb_header_pointer(const struct sk_buff *skb, int offset,
2571 int len, void *buffer) 2716 int len, void *data, int hlen, void *buffer)
2572{ 2717{
2573 int hlen = skb_headlen(skb);
2574
2575 if (hlen - offset >= len) 2718 if (hlen - offset >= len)
2576 return skb->data + offset; 2719 return data + offset;
2577 2720
2578 if (skb_copy_bits(skb, offset, buffer, len) < 0) 2721 if (!skb ||
2722 skb_copy_bits(skb, offset, buffer, len) < 0)
2579 return NULL; 2723 return NULL;
2580 2724
2581 return buffer; 2725 return buffer;
2582} 2726}
2583 2727
2728static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2729 int len, void *buffer)
2730{
2731 return __skb_header_pointer(skb, offset, len, skb->data,
2732 skb_headlen(skb), buffer);
2733}
2734
2584/** 2735/**
2585 * skb_needs_linearize - check if we need to linearize a given skb 2736 * skb_needs_linearize - check if we need to linearize a given skb
2586 * depending on the given device features. 2737 * depending on the given device features.
@@ -2671,6 +2822,8 @@ static inline ktime_t net_invalid_timestamp(void)
2671 return ktime_set(0, 0); 2822 return ktime_set(0, 0);
2672} 2823}
2673 2824
2825struct sk_buff *skb_clone_sk(struct sk_buff *skb);
2826
2674#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2827#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2675 2828
2676void skb_clone_tx_timestamp(struct sk_buff *skb); 2829void skb_clone_tx_timestamp(struct sk_buff *skb);
@@ -2786,6 +2939,42 @@ static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2786 0 : __skb_checksum_complete(skb); 2939 0 : __skb_checksum_complete(skb);
2787} 2940}
2788 2941
2942static inline void __skb_decr_checksum_unnecessary(struct sk_buff *skb)
2943{
2944 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2945 if (skb->csum_level == 0)
2946 skb->ip_summed = CHECKSUM_NONE;
2947 else
2948 skb->csum_level--;
2949 }
2950}
2951
2952static inline void __skb_incr_checksum_unnecessary(struct sk_buff *skb)
2953{
2954 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2955 if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
2956 skb->csum_level++;
2957 } else if (skb->ip_summed == CHECKSUM_NONE) {
2958 skb->ip_summed = CHECKSUM_UNNECESSARY;
2959 skb->csum_level = 0;
2960 }
2961}
2962
2963static inline void __skb_mark_checksum_bad(struct sk_buff *skb)
2964{
2965 /* Mark current checksum as bad (typically called from GRO
2966 * path). In the case that ip_summed is CHECKSUM_NONE
2967 * this must be the first checksum encountered in the packet.
2968 * When ip_summed is CHECKSUM_UNNECESSARY, this is the first
2969 * checksum after the last one validated. For UDP, a zero
2970 * checksum can not be marked as bad.
2971 */
2972
2973 if (skb->ip_summed == CHECKSUM_NONE ||
2974 skb->ip_summed == CHECKSUM_UNNECESSARY)
2975 skb->csum_bad = 1;
2976}
2977
2789/* Check if we need to perform checksum complete validation. 2978/* Check if we need to perform checksum complete validation.
2790 * 2979 *
2791 * Returns true if checksum complete is needed, false otherwise 2980 * Returns true if checksum complete is needed, false otherwise
@@ -2797,6 +2986,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
2797{ 2986{
2798 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) { 2987 if (skb_csum_unnecessary(skb) || (zero_okay && !check)) {
2799 skb->csum_valid = 1; 2988 skb->csum_valid = 1;
2989 __skb_decr_checksum_unnecessary(skb);
2800 return false; 2990 return false;
2801 } 2991 }
2802 2992
@@ -2826,6 +3016,9 @@ static inline __sum16 __skb_checksum_validate_complete(struct sk_buff *skb,
2826 skb->csum_valid = 1; 3016 skb->csum_valid = 1;
2827 return 0; 3017 return 0;
2828 } 3018 }
3019 } else if (skb->csum_bad) {
3020 /* ip_summed == CHECKSUM_NONE in this case */
3021 return 1;
2829 } 3022 }
2830 3023
2831 skb->csum = psum; 3024 skb->csum = psum;
@@ -2883,6 +3076,26 @@ static inline __wsum null_compute_pseudo(struct sk_buff *skb, int proto)
2883#define skb_checksum_simple_validate(skb) \ 3076#define skb_checksum_simple_validate(skb) \
2884 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo) 3077 __skb_checksum_validate(skb, 0, true, false, 0, null_compute_pseudo)
2885 3078
3079static inline bool __skb_checksum_convert_check(struct sk_buff *skb)
3080{
3081 return (skb->ip_summed == CHECKSUM_NONE &&
3082 skb->csum_valid && !skb->csum_bad);
3083}
3084
3085static inline void __skb_checksum_convert(struct sk_buff *skb,
3086 __sum16 check, __wsum pseudo)
3087{
3088 skb->csum = ~pseudo;
3089 skb->ip_summed = CHECKSUM_COMPLETE;
3090}
3091
3092#define skb_checksum_try_convert(skb, proto, check, compute_pseudo) \
3093do { \
3094 if (__skb_checksum_convert_check(skb)) \
3095 __skb_checksum_convert(skb, check, \
3096 compute_pseudo(skb, proto)); \
3097} while (0)
3098
2886#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3099#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2887void nf_conntrack_destroy(struct nf_conntrack *nfct); 3100void nf_conntrack_destroy(struct nf_conntrack *nfct);
2888static inline void nf_conntrack_put(struct nf_conntrack *nfct) 3101static inline void nf_conntrack_put(struct nf_conntrack *nfct)
@@ -2896,7 +3109,7 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2896 atomic_inc(&nfct->use); 3109 atomic_inc(&nfct->use);
2897} 3110}
2898#endif 3111#endif
2899#ifdef CONFIG_BRIDGE_NETFILTER 3112#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2900static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 3113static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2901{ 3114{
2902 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 3115 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
@@ -2914,7 +3127,7 @@ static inline void nf_reset(struct sk_buff *skb)
2914 nf_conntrack_put(skb->nfct); 3127 nf_conntrack_put(skb->nfct);
2915 skb->nfct = NULL; 3128 skb->nfct = NULL;
2916#endif 3129#endif
2917#ifdef CONFIG_BRIDGE_NETFILTER 3130#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2918 nf_bridge_put(skb->nf_bridge); 3131 nf_bridge_put(skb->nf_bridge);
2919 skb->nf_bridge = NULL; 3132 skb->nf_bridge = NULL;
2920#endif 3133#endif
@@ -2928,19 +3141,22 @@ static inline void nf_reset_trace(struct sk_buff *skb)
2928} 3141}
2929 3142
2930/* Note: This doesn't put any conntrack and bridge info in dst. */ 3143/* Note: This doesn't put any conntrack and bridge info in dst. */
2931static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 3144static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
3145 bool copy)
2932{ 3146{
2933#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3147#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2934 dst->nfct = src->nfct; 3148 dst->nfct = src->nfct;
2935 nf_conntrack_get(src->nfct); 3149 nf_conntrack_get(src->nfct);
2936 dst->nfctinfo = src->nfctinfo; 3150 if (copy)
3151 dst->nfctinfo = src->nfctinfo;
2937#endif 3152#endif
2938#ifdef CONFIG_BRIDGE_NETFILTER 3153#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2939 dst->nf_bridge = src->nf_bridge; 3154 dst->nf_bridge = src->nf_bridge;
2940 nf_bridge_get(src->nf_bridge); 3155 nf_bridge_get(src->nf_bridge);
2941#endif 3156#endif
2942#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) 3157#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
2943 dst->nf_trace = src->nf_trace; 3158 if (copy)
3159 dst->nf_trace = src->nf_trace;
2944#endif 3160#endif
2945} 3161}
2946 3162
@@ -2949,10 +3165,10 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2949#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 3165#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2950 nf_conntrack_put(dst->nfct); 3166 nf_conntrack_put(dst->nfct);
2951#endif 3167#endif
2952#ifdef CONFIG_BRIDGE_NETFILTER 3168#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
2953 nf_bridge_put(dst->nf_bridge); 3169 nf_bridge_put(dst->nf_bridge);
2954#endif 3170#endif
2955 __nf_copy(dst, src); 3171 __nf_copy(dst, src, true);
2956} 3172}
2957 3173
2958#ifdef CONFIG_NETWORK_SECMARK 3174#ifdef CONFIG_NETWORK_SECMARK
@@ -3137,7 +3353,9 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
3137 3353
3138int skb_checksum_setup(struct sk_buff *skb, bool recalculate); 3354int skb_checksum_setup(struct sk_buff *skb, bool recalculate);
3139 3355
3140u32 __skb_get_poff(const struct sk_buff *skb); 3356u32 skb_get_poff(const struct sk_buff *skb);
3357u32 __skb_get_poff(const struct sk_buff *skb, void *data,
3358 const struct flow_keys *keys, int hlen);
3141 3359
3142/** 3360/**
3143 * skb_head_is_locked - Determine if the skb->head is locked down 3361 * skb_head_is_locked - Determine if the skb->head is locked down
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 1d9abb7d22a0..9a139b637069 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -158,31 +158,6 @@ size_t ksize(const void *);
158#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 158#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
159#endif 159#endif
160 160
161#ifdef CONFIG_SLOB
162/*
163 * Common fields provided in kmem_cache by all slab allocators
164 * This struct is either used directly by the allocator (SLOB)
165 * or the allocator must include definitions for all fields
166 * provided in kmem_cache_common in their definition of kmem_cache.
167 *
168 * Once we can do anonymous structs (C11 standard) we could put a
169 * anonymous struct definition in these allocators so that the
170 * separate allocations in the kmem_cache structure of SLAB and
171 * SLUB is no longer needed.
172 */
173struct kmem_cache {
174 unsigned int object_size;/* The original size of the object */
175 unsigned int size; /* The aligned/padded/added on size */
176 unsigned int align; /* Alignment as calculated */
177 unsigned long flags; /* Active flags on the slab */
178 const char *name; /* Slab name for sysfs */
179 int refcount; /* Use counter */
180 void (*ctor)(void *); /* Called on object slot creation */
181 struct list_head list; /* List of all slab caches on the system */
182};
183
184#endif /* CONFIG_SLOB */
185
186/* 161/*
187 * Kmalloc array related definitions 162 * Kmalloc array related definitions
188 */ 163 */
@@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
363} 338}
364#endif /* CONFIG_TRACING */ 339#endif /* CONFIG_TRACING */
365 340
366#ifdef CONFIG_SLAB
367#include <linux/slab_def.h>
368#endif
369
370#ifdef CONFIG_SLUB
371#include <linux/slub_def.h>
372#endif
373
374extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); 341extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order);
375 342
376#ifdef CONFIG_TRACING 343#ifdef CONFIG_TRACING
@@ -526,7 +493,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
526 * @memcg: pointer to the memcg this cache belongs to 493 * @memcg: pointer to the memcg this cache belongs to
527 * @list: list_head for the list of all caches in this memcg 494 * @list: list_head for the list of all caches in this memcg
528 * @root_cache: pointer to the global, root cache, this cache was derived from 495 * @root_cache: pointer to the global, root cache, this cache was derived from
529 * @nr_pages: number of pages that belongs to this cache.
530 */ 496 */
531struct memcg_cache_params { 497struct memcg_cache_params {
532 bool is_root_cache; 498 bool is_root_cache;
@@ -539,17 +505,12 @@ struct memcg_cache_params {
539 struct mem_cgroup *memcg; 505 struct mem_cgroup *memcg;
540 struct list_head list; 506 struct list_head list;
541 struct kmem_cache *root_cache; 507 struct kmem_cache *root_cache;
542 atomic_t nr_pages;
543 }; 508 };
544 }; 509 };
545}; 510};
546 511
547int memcg_update_all_caches(int num_memcgs); 512int memcg_update_all_caches(int num_memcgs);
548 513
549struct seq_file;
550int cache_show(struct kmem_cache *s, struct seq_file *m);
551void print_slabinfo_header(struct seq_file *m);
552
553/** 514/**
554 * kmalloc_array - allocate memory for an array. 515 * kmalloc_array - allocate memory for an array.
555 * @n: number of elements. 516 * @n: number of elements.
@@ -582,37 +543,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
582 * allocator where we care about the real place the memory allocation 543 * allocator where we care about the real place the memory allocation
583 * request comes from. 544 * request comes from.
584 */ 545 */
585#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
586 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
587 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
588extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); 546extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
589#define kmalloc_track_caller(size, flags) \ 547#define kmalloc_track_caller(size, flags) \
590 __kmalloc_track_caller(size, flags, _RET_IP_) 548 __kmalloc_track_caller(size, flags, _RET_IP_)
591#else
592#define kmalloc_track_caller(size, flags) \
593 __kmalloc(size, flags)
594#endif /* DEBUG_SLAB */
595 549
596#ifdef CONFIG_NUMA 550#ifdef CONFIG_NUMA
597/*
598 * kmalloc_node_track_caller is a special version of kmalloc_node that
599 * records the calling function of the routine calling it for slab leak
600 * tracking instead of just the calling function (confusing, eh?).
601 * It's useful when the call to kmalloc_node comes from a widely-used
602 * standard allocator where we care about the real place the memory
603 * allocation request comes from.
604 */
605#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
606 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
607 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
608extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); 551extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
609#define kmalloc_node_track_caller(size, flags, node) \ 552#define kmalloc_node_track_caller(size, flags, node) \
610 __kmalloc_node_track_caller(size, flags, node, \ 553 __kmalloc_node_track_caller(size, flags, node, \
611 _RET_IP_) 554 _RET_IP_)
612#else
613#define kmalloc_node_track_caller(size, flags, node) \
614 __kmalloc_node(size, flags, node)
615#endif
616 555
617#else /* CONFIG_NUMA */ 556#else /* CONFIG_NUMA */
618 557
@@ -650,14 +589,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
650 return kmalloc_node(size, flags | __GFP_ZERO, node); 589 return kmalloc_node(size, flags | __GFP_ZERO, node);
651} 590}
652 591
653/* 592unsigned int kmem_cache_size(struct kmem_cache *s);
654 * Determine the size of a slab object
655 */
656static inline unsigned int kmem_cache_size(struct kmem_cache *s)
657{
658 return s->object_size;
659}
660
661void __init kmem_cache_init_late(void); 593void __init kmem_cache_init_late(void);
662 594
663#endif /* _LINUX_SLAB_H */ 595#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 8235dfbb3b05..b869d1662ba3 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -8,6 +8,8 @@
8 */ 8 */
9 9
10struct kmem_cache { 10struct kmem_cache {
11 struct array_cache __percpu *cpu_cache;
12
11/* 1) Cache tunables. Protected by slab_mutex */ 13/* 1) Cache tunables. Protected by slab_mutex */
12 unsigned int batchcount; 14 unsigned int batchcount;
13 unsigned int limit; 15 unsigned int limit;
@@ -71,23 +73,7 @@ struct kmem_cache {
71 struct memcg_cache_params *memcg_params; 73 struct memcg_cache_params *memcg_params;
72#endif 74#endif
73 75
74/* 6) per-cpu/per-node data, touched during every alloc/free */ 76 struct kmem_cache_node *node[MAX_NUMNODES];
75 /*
76 * We put array[] at the end of kmem_cache, because we want to size
77 * this array to nr_cpu_ids slots instead of NR_CPUS
78 * (see kmem_cache_init())
79 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
80 * is statically defined, so we reserve the max number of cpus.
81 *
82 * We also need to guarantee that the list is able to accomodate a
83 * pointer for each node since "nodelists" uses the remainder of
84 * available pointers.
85 */
86 struct kmem_cache_node **node;
87 struct array_cache *array[NR_CPUS + MAX_NUMNODES];
88 /*
89 * Do not add fields after array[]
90 */
91}; 77};
92 78
93#endif /* _LINUX_SLAB_DEF_H */ 79#endif /* _LINUX_SLAB_DEF_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 34347f26be9b..93dff5fff524 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -100,6 +100,7 @@ int smp_call_function_any(const struct cpumask *mask,
100 smp_call_func_t func, void *info, int wait); 100 smp_call_func_t func, void *info, int wait);
101 101
102void kick_all_cpus_sync(void); 102void kick_all_cpus_sync(void);
103void wake_up_all_idle_cpus(void);
103 104
104/* 105/*
105 * Generic and arch helpers 106 * Generic and arch helpers
@@ -148,6 +149,7 @@ smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
148} 149}
149 150
150static inline void kick_all_cpus_sync(void) { } 151static inline void kick_all_cpus_sync(void) { }
152static inline void wake_up_all_idle_cpus(void) { }
151 153
152#endif /* !SMP */ 154#endif /* !SMP */
153 155
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
new file mode 100644
index 000000000000..dad035c16d94
--- /dev/null
+++ b/include/linux/soc/ti/knav_dma.h
@@ -0,0 +1,175 @@
1/*
2 * Copyright (C) 2014 Texas Instruments Incorporated
3 * Authors: Sandeep Nair <sandeep_n@ti.com
4 * Cyril Chemparathy <cyril@ti.com
5 Santosh Shilimkar <santosh.shilimkar@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
18#define __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__
19
20/*
21 * PKTDMA descriptor manipulation macros for host packet descriptor
22 */
23#define MASK(x) (BIT(x) - 1)
24#define KNAV_DMA_DESC_PKT_LEN_MASK MASK(22)
25#define KNAV_DMA_DESC_PKT_LEN_SHIFT 0
26#define KNAV_DMA_DESC_PS_INFO_IN_SOP BIT(22)
27#define KNAV_DMA_DESC_PS_INFO_IN_DESC 0
28#define KNAV_DMA_DESC_TAG_MASK MASK(8)
29#define KNAV_DMA_DESC_SAG_HI_SHIFT 24
30#define KNAV_DMA_DESC_STAG_LO_SHIFT 16
31#define KNAV_DMA_DESC_DTAG_HI_SHIFT 8
32#define KNAV_DMA_DESC_DTAG_LO_SHIFT 0
33#define KNAV_DMA_DESC_HAS_EPIB BIT(31)
34#define KNAV_DMA_DESC_NO_EPIB 0
35#define KNAV_DMA_DESC_PSLEN_SHIFT 24
36#define KNAV_DMA_DESC_PSLEN_MASK MASK(6)
37#define KNAV_DMA_DESC_ERR_FLAG_SHIFT 20
38#define KNAV_DMA_DESC_ERR_FLAG_MASK MASK(4)
39#define KNAV_DMA_DESC_PSFLAG_SHIFT 16
40#define KNAV_DMA_DESC_PSFLAG_MASK MASK(4)
41#define KNAV_DMA_DESC_RETQ_SHIFT 0
42#define KNAV_DMA_DESC_RETQ_MASK MASK(14)
43#define KNAV_DMA_DESC_BUF_LEN_MASK MASK(22)
44
45#define KNAV_DMA_NUM_EPIB_WORDS 4
46#define KNAV_DMA_NUM_PS_WORDS 16
47#define KNAV_DMA_FDQ_PER_CHAN 4
48
49/* Tx channel scheduling priority */
50enum knav_dma_tx_priority {
51 DMA_PRIO_HIGH = 0,
52 DMA_PRIO_MED_H,
53 DMA_PRIO_MED_L,
54 DMA_PRIO_LOW
55};
56
57/* Rx channel error handling mode during buffer starvation */
58enum knav_dma_rx_err_mode {
59 DMA_DROP = 0,
60 DMA_RETRY
61};
62
63/* Rx flow size threshold configuration */
64enum knav_dma_rx_thresholds {
65 DMA_THRESH_NONE = 0,
66 DMA_THRESH_0 = 1,
67 DMA_THRESH_0_1 = 3,
68 DMA_THRESH_0_1_2 = 7
69};
70
71/* Descriptor type */
72enum knav_dma_desc_type {
73 DMA_DESC_HOST = 0,
74 DMA_DESC_MONOLITHIC = 2
75};
76
77/**
78 * struct knav_dma_tx_cfg: Tx channel configuration
79 * @filt_einfo: Filter extended packet info
80 * @filt_pswords: Filter PS words present
81 * @knav_dma_tx_priority: Tx channel scheduling priority
82 */
83struct knav_dma_tx_cfg {
84 bool filt_einfo;
85 bool filt_pswords;
86 enum knav_dma_tx_priority priority;
87};
88
89/**
90 * struct knav_dma_rx_cfg: Rx flow configuration
91 * @einfo_present: Extended packet info present
92 * @psinfo_present: PS words present
93 * @knav_dma_rx_err_mode: Error during buffer starvation
94 * @knav_dma_desc_type: Host or Monolithic desc
95 * @psinfo_at_sop: PS word located at start of packet
96 * @sop_offset: Start of packet offset
97 * @dst_q: Destination queue for a given flow
98 * @thresh: Rx flow size threshold
99 * @fdq[]: Free desc Queue array
100 * @sz_thresh0: RX packet size threshold 0
101 * @sz_thresh1: RX packet size threshold 1
102 * @sz_thresh2: RX packet size threshold 2
103 */
104struct knav_dma_rx_cfg {
105 bool einfo_present;
106 bool psinfo_present;
107 enum knav_dma_rx_err_mode err_mode;
108 enum knav_dma_desc_type desc_type;
109 bool psinfo_at_sop;
110 unsigned int sop_offset;
111 unsigned int dst_q;
112 enum knav_dma_rx_thresholds thresh;
113 unsigned int fdq[KNAV_DMA_FDQ_PER_CHAN];
114 unsigned int sz_thresh0;
115 unsigned int sz_thresh1;
116 unsigned int sz_thresh2;
117};
118
119/**
120 * struct knav_dma_cfg: Pktdma channel configuration
121 * @sl_cfg: Slave configuration
122 * @tx: Tx channel configuration
123 * @rx: Rx flow configuration
124 */
125struct knav_dma_cfg {
126 enum dma_transfer_direction direction;
127 union {
128 struct knav_dma_tx_cfg tx;
129 struct knav_dma_rx_cfg rx;
130 } u;
131};
132
133/**
134 * struct knav_dma_desc: Host packet descriptor layout
135 * @desc_info: Descriptor information like id, type, length
136 * @tag_info: Flow tag info written in during RX
137 * @packet_info: Queue Manager, policy, flags etc
138 * @buff_len: Buffer length in bytes
139 * @buff: Buffer pointer
140 * @next_desc: For chaining the descriptors
141 * @orig_len: length since 'buff_len' can be overwritten
142 * @orig_buff: buff pointer since 'buff' can be overwritten
143 * @epib: Extended packet info block
144 * @psdata: Protocol specific
145 */
146struct knav_dma_desc {
147 u32 desc_info;
148 u32 tag_info;
149 u32 packet_info;
150 u32 buff_len;
151 u32 buff;
152 u32 next_desc;
153 u32 orig_len;
154 u32 orig_buff;
155 u32 epib[KNAV_DMA_NUM_EPIB_WORDS];
156 u32 psdata[KNAV_DMA_NUM_PS_WORDS];
157 u32 pad[4];
158} ____cacheline_aligned;
159
160#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
161void *knav_dma_open_channel(struct device *dev, const char *name,
162 struct knav_dma_cfg *config);
163void knav_dma_close_channel(void *channel);
164#else
165static inline void *knav_dma_open_channel(struct device *dev, const char *name,
166 struct knav_dma_cfg *config)
167{
168 return (void *) NULL;
169}
170static inline void knav_dma_close_channel(void *channel)
171{}
172
173#endif
174
175#endif /* __SOC_TI_KEYSTONE_NAVIGATOR_DMA_H__ */
diff --git a/include/linux/soc/ti/knav_qmss.h b/include/linux/soc/ti/knav_qmss.h
new file mode 100644
index 000000000000..9f0ebb3bad27
--- /dev/null
+++ b/include/linux/soc/ti/knav_qmss.h
@@ -0,0 +1,90 @@
1/*
2 * Keystone Navigator Queue Management Sub-System header
3 *
4 * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
5 * Author: Sandeep Nair <sandeep_n@ti.com>
6 * Cyril Chemparathy <cyril@ti.com>
7 * Santosh Shilimkar <santosh.shilimkar@ti.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19#ifndef __SOC_TI_KNAV_QMSS_H__
20#define __SOC_TI_KNAV_QMSS_H__
21
22#include <linux/err.h>
23#include <linux/time.h>
24#include <linux/atomic.h>
25#include <linux/device.h>
26#include <linux/fcntl.h>
27#include <linux/dma-mapping.h>
28
29/* queue types */
30#define KNAV_QUEUE_QPEND ((unsigned)-2) /* interruptible qpend queue */
31#define KNAV_QUEUE_ACC ((unsigned)-3) /* Accumulated queue */
32#define KNAV_QUEUE_GP ((unsigned)-4) /* General purpose queue */
33
34/* queue flags */
35#define KNAV_QUEUE_SHARED 0x0001 /* Queue can be shared */
36
37/**
38 * enum knav_queue_ctrl_cmd - queue operations.
39 * @KNAV_QUEUE_GET_ID: Get the ID number for an open queue
40 * @KNAV_QUEUE_FLUSH: forcibly empty a queue if possible
41 * @KNAV_QUEUE_SET_NOTIFIER: Set a notifier callback to a queue handle.
42 * @KNAV_QUEUE_ENABLE_NOTIFY: Enable notifier callback for a queue handle.
43 * @KNAV_QUEUE_DISABLE_NOTIFY: Disable notifier callback for a queue handle.
44 * @KNAV_QUEUE_GET_COUNT: Get number of queues.
45 */
46enum knav_queue_ctrl_cmd {
47 KNAV_QUEUE_GET_ID,
48 KNAV_QUEUE_FLUSH,
49 KNAV_QUEUE_SET_NOTIFIER,
50 KNAV_QUEUE_ENABLE_NOTIFY,
51 KNAV_QUEUE_DISABLE_NOTIFY,
52 KNAV_QUEUE_GET_COUNT
53};
54
55/* Queue notifier callback prototype */
56typedef void (*knav_queue_notify_fn)(void *arg);
57
58/**
59 * struct knav_queue_notify_config: Notifier configuration
60 * @fn: Notifier function
61 * @fn_arg: Notifier function arguments
62 */
63struct knav_queue_notify_config {
64 knav_queue_notify_fn fn;
65 void *fn_arg;
66};
67
68void *knav_queue_open(const char *name, unsigned id,
69 unsigned flags);
70void knav_queue_close(void *qhandle);
71int knav_queue_device_control(void *qhandle,
72 enum knav_queue_ctrl_cmd cmd,
73 unsigned long arg);
74dma_addr_t knav_queue_pop(void *qhandle, unsigned *size);
75int knav_queue_push(void *qhandle, dma_addr_t dma,
76 unsigned size, unsigned flags);
77
78void *knav_pool_create(const char *name,
79 int num_desc, int region_id);
80void knav_pool_destroy(void *ph);
81int knav_pool_count(void *ph);
82void *knav_pool_desc_get(void *ph);
83void knav_pool_desc_put(void *ph, void *desc);
84int knav_pool_desc_map(void *ph, void *desc, unsigned size,
85 dma_addr_t *dma, unsigned *dma_sz);
86void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz);
87dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt);
88void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma);
89
90#endif /* __SOC_TI_KNAV_QMSS_H__ */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index ec538fc287a6..6e49a14365dc 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -47,16 +47,25 @@ struct linger {
47struct msghdr { 47struct msghdr {
48 void *msg_name; /* ptr to socket address structure */ 48 void *msg_name; /* ptr to socket address structure */
49 int msg_namelen; /* size of socket address structure */ 49 int msg_namelen; /* size of socket address structure */
50 struct iovec *msg_iov; /* scatter/gather array */ 50 struct iov_iter msg_iter; /* data */
51 __kernel_size_t msg_iovlen; /* # elements in msg_iov */
52 void *msg_control; /* ancillary data */ 51 void *msg_control; /* ancillary data */
53 __kernel_size_t msg_controllen; /* ancillary data buffer length */ 52 __kernel_size_t msg_controllen; /* ancillary data buffer length */
54 unsigned int msg_flags; /* flags on received message */ 53 unsigned int msg_flags; /* flags on received message */
55}; 54};
55
56struct user_msghdr {
57 void __user *msg_name; /* ptr to socket address structure */
58 int msg_namelen; /* size of socket address structure */
59 struct iovec __user *msg_iov; /* scatter/gather array */
60 __kernel_size_t msg_iovlen; /* # elements in msg_iov */
61 void __user *msg_control; /* ancillary data */
62 __kernel_size_t msg_controllen; /* ancillary data buffer length */
63 unsigned int msg_flags; /* flags on received message */
64};
56 65
57/* For recvmmsg/sendmmsg */ 66/* For recvmmsg/sendmmsg */
58struct mmsghdr { 67struct mmsghdr {
59 struct msghdr msg_hdr; 68 struct user_msghdr msg_hdr;
60 unsigned int msg_len; 69 unsigned int msg_len;
61}; 70};
62 71
@@ -94,6 +103,10 @@ struct cmsghdr {
94 (cmsg)->cmsg_len <= (unsigned long) \ 103 (cmsg)->cmsg_len <= (unsigned long) \
95 ((mhdr)->msg_controllen - \ 104 ((mhdr)->msg_controllen - \
96 ((char *)(cmsg) - (char *)(mhdr)->msg_control))) 105 ((char *)(cmsg) - (char *)(mhdr)->msg_control)))
106#define for_each_cmsghdr(cmsg, msg) \
107 for (cmsg = CMSG_FIRSTHDR(msg); \
108 cmsg; \
109 cmsg = CMSG_NXTHDR(msg, cmsg))
97 110
98/* 111/*
99 * Get the next cmsg header 112 * Get the next cmsg header
@@ -256,7 +269,7 @@ struct ucred {
256#define MSG_EOF MSG_FIN 269#define MSG_EOF MSG_FIN
257 270
258#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ 271#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
259#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file 272#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
260 descriptor received through 273 descriptor received through
261 SCM_RIGHTS */ 274 SCM_RIGHTS */
262#if defined(CONFIG_COMPAT) 275#if defined(CONFIG_COMPAT)
@@ -312,15 +325,14 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
312extern unsigned long iov_pages(const struct iovec *iov, int offset, 325extern unsigned long iov_pages(const struct iovec *iov, int offset,
313 unsigned long nr_segs); 326 unsigned long nr_segs);
314 327
315extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
316extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); 328extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
317extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); 329extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
318 330
319struct timespec; 331struct timespec;
320 332
321/* The __sys_...msg variants allow MSG_CMSG_COMPAT */ 333/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
322extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); 334extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
323extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); 335extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
324extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, 336extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
325 unsigned int flags, struct timespec *timeout); 337 unsigned int flags, struct timespec *timeout);
326extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, 338extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
index 2d676d5aaa89..aa07d7b32568 100644
--- a/include/linux/spi/mcp23s08.h
+++ b/include/linux/spi/mcp23s08.h
@@ -22,4 +22,22 @@ struct mcp23s08_platform_data {
22 * base to base+15 (or base+31 for s17 variant). 22 * base to base+15 (or base+31 for s17 variant).
23 */ 23 */
24 unsigned base; 24 unsigned base;
25 /* Marks the device as a interrupt controller.
26 * NOTE: The interrupt functionality is only supported for i2c
27 * versions of the chips. The spi chips can also do the interrupts,
28 * but this is not supported by the linux driver yet.
29 */
30 bool irq_controller;
31
32 /* Sets the mirror flag in the IOCON register. Devices
33 * with two interrupt outputs (these are the devices ending with 17 and
34 * those that have 16 IOs) have two IO banks: IO 0-7 form bank 1 and
35 * IO 8-15 are bank 2. These chips have two different interrupt outputs:
36 * One for bank 1 and another for bank 2. If irq-mirror is set, both
37 * interrupts are generated regardless of the bank that an input change
38 * occurred on. If it is not set, the interrupt are only generated for
39 * the bank they belong to.
40 * On devices with only one interrupt output this property is useless.
41 */
42 bool mirror;
25}; 43};
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index 82d5111cd0c2..d5a316550177 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -23,6 +23,8 @@
23#define PXA2XX_CS_ASSERT (0x01) 23#define PXA2XX_CS_ASSERT (0x01)
24#define PXA2XX_CS_DEASSERT (0x02) 24#define PXA2XX_CS_DEASSERT (0x02)
25 25
26struct dma_chan;
27
26/* device.platform_data for SSP controller devices */ 28/* device.platform_data for SSP controller devices */
27struct pxa2xx_spi_master { 29struct pxa2xx_spi_master {
28 u32 clock_enable; 30 u32 clock_enable;
@@ -30,10 +32,9 @@ struct pxa2xx_spi_master {
30 u8 enable_dma; 32 u8 enable_dma;
31 33
32 /* DMA engine specific config */ 34 /* DMA engine specific config */
33 int rx_chan_id; 35 bool (*dma_filter)(struct dma_chan *chan, void *param);
34 int tx_chan_id; 36 void *tx_param;
35 int rx_slave_id; 37 void *rx_param;
36 int tx_slave_id;
37 38
38 /* For non-PXA arches */ 39 /* For non-PXA arches */
39 struct ssp_device ssp; 40 struct ssp_device ssp;
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 46d188a9947c..a6ef2a8e6de4 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -1049,4 +1049,10 @@ spi_unregister_device(struct spi_device *spi)
1049extern const struct spi_device_id * 1049extern const struct spi_device_id *
1050spi_get_device_id(const struct spi_device *sdev); 1050spi_get_device_id(const struct spi_device *sdev);
1051 1051
1052static inline bool
1053spi_transfer_is_last(struct spi_master *master, struct spi_transfer *xfer)
1054{
1055 return list_is_last(&xfer->transfer_list, &master->cur_msg->transfers);
1056}
1057
1052#endif /* __LINUX_SPI_H */ 1058#endif /* __LINUX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 3f2867ff0ced..262ba4ef9a8e 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -197,7 +197,13 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 197 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
198 } while (0) 198 } while (0)
199#else 199#else
200# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) 200/*
201 * Always evaluate the 'subclass' argument to avoid that the compiler
202 * warns about set-but-not-used variables when building with
203 * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
204 */
205# define raw_spin_lock_nested(lock, subclass) \
206 _raw_spin_lock(((void)(subclass), (lock)))
201# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) 207# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
202#endif 208#endif
203 209
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index 91f5eab9e428..f84212cd3b7d 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -134,9 +134,6 @@ void spmi_controller_remove(struct spmi_controller *ctrl);
134 * this structure. 134 * this structure.
135 * @probe: binds this driver to a SPMI device. 135 * @probe: binds this driver to a SPMI device.
136 * @remove: unbinds this driver from the SPMI device. 136 * @remove: unbinds this driver from the SPMI device.
137 * @shutdown: standard shutdown callback used during powerdown/halt.
138 * @suspend: standard suspend callback used during system suspend.
139 * @resume: standard resume callback used during system resume.
140 * 137 *
141 * If PM runtime support is desired for a slave, a device driver can call 138 * If PM runtime support is desired for a slave, a device driver can call
142 * pm_runtime_put() from their probe() routine (and a balancing 139 * pm_runtime_put() from their probe() routine (and a balancing
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 115b570e3bff..669045ab73f3 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -1,6 +1,8 @@
1#ifndef __LINUX_STACKTRACE_H 1#ifndef __LINUX_STACKTRACE_H
2#define __LINUX_STACKTRACE_H 2#define __LINUX_STACKTRACE_H
3 3
4#include <linux/types.h>
5
4struct task_struct; 6struct task_struct;
5struct pt_regs; 7struct pt_regs;
6 8
@@ -20,6 +22,8 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
20 struct stack_trace *trace); 22 struct stack_trace *trace);
21 23
22extern void print_stack_trace(struct stack_trace *trace, int spaces); 24extern void print_stack_trace(struct stack_trace *trace, int spaces);
25extern int snprint_stack_trace(char *buf, size_t size,
26 struct stack_trace *trace, int spaces);
23 27
24#ifdef CONFIG_USER_STACKTRACE_SUPPORT 28#ifdef CONFIG_USER_STACKTRACE_SUPPORT
25extern void save_stack_trace_user(struct stack_trace *trace); 29extern void save_stack_trace_user(struct stack_trace *trace);
@@ -32,6 +36,7 @@ extern void save_stack_trace_user(struct stack_trace *trace);
32# define save_stack_trace_tsk(tsk, trace) do { } while (0) 36# define save_stack_trace_tsk(tsk, trace) do { } while (0)
33# define save_stack_trace_user(trace) do { } while (0) 37# define save_stack_trace_user(trace) do { } while (0)
34# define print_stack_trace(trace, spaces) do { } while (0) 38# define print_stack_trace(trace, spaces) do { } while (0)
39# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
35#endif 40#endif
36 41
37#endif 42#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index d36977e029af..2e22a2e58f3a 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -41,7 +41,7 @@ extern int strcmp(const char *,const char *);
41extern int strncmp(const char *,const char *,__kernel_size_t); 41extern int strncmp(const char *,const char *,__kernel_size_t);
42#endif 42#endif
43#ifndef __HAVE_ARCH_STRNICMP 43#ifndef __HAVE_ARCH_STRNICMP
44extern int strnicmp(const char *, const char *, __kernel_size_t); 44#define strnicmp strncasecmp
45#endif 45#endif
46#ifndef __HAVE_ARCH_STRCASECMP 46#ifndef __HAVE_ARCH_STRCASECMP
47extern int strcasecmp(const char *s1, const char *s2); 47extern int strcasecmp(const char *s1, const char *s2);
@@ -132,7 +132,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
132#endif 132#endif
133 133
134extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, 134extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
135 const void *from, size_t available); 135 const void *from, size_t available);
136 136
137/** 137/**
138 * strstarts - does @str start with @prefix? 138 * strstarts - does @str start with @prefix?
@@ -144,7 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
144 return strncmp(str, prefix, strlen(prefix)) == 0; 144 return strncmp(str, prefix, strlen(prefix)) == 0;
145} 145}
146 146
147extern size_t memweight(const void *ptr, size_t bytes); 147size_t memweight(const void *ptr, size_t bytes);
148void memzero_explicit(void *s, size_t count);
148 149
149/** 150/**
150 * kbasename - return the last part of a pathname. 151 * kbasename - return the last part of a pathname.
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 3eeee9672a4a..6eb567ac56bc 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -20,40 +20,6 @@ int string_get_size(u64 size, enum string_size_units units,
20#define UNESCAPE_ANY \ 20#define UNESCAPE_ANY \
21 (UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL) 21 (UNESCAPE_SPACE | UNESCAPE_OCTAL | UNESCAPE_HEX | UNESCAPE_SPECIAL)
22 22
23/**
24 * string_unescape - unquote characters in the given string
25 * @src: source buffer (escaped)
26 * @dst: destination buffer (unescaped)
27 * @size: size of the destination buffer (0 to unlimit)
28 * @flags: combination of the flags (bitwise OR):
29 * %UNESCAPE_SPACE:
30 * '\f' - form feed
31 * '\n' - new line
32 * '\r' - carriage return
33 * '\t' - horizontal tab
34 * '\v' - vertical tab
35 * %UNESCAPE_OCTAL:
36 * '\NNN' - byte with octal value NNN (1 to 3 digits)
37 * %UNESCAPE_HEX:
38 * '\xHH' - byte with hexadecimal value HH (1 to 2 digits)
39 * %UNESCAPE_SPECIAL:
40 * '\"' - double quote
41 * '\\' - backslash
42 * '\a' - alert (BEL)
43 * '\e' - escape
44 * %UNESCAPE_ANY:
45 * all previous together
46 *
47 * Returns amount of characters processed to the destination buffer excluding
48 * trailing '\0'.
49 *
50 * Because the size of the output will be the same as or less than the size of
51 * the input, the transformation may be performed in place.
52 *
53 * Caller must provide valid source and destination pointers. Be aware that
54 * destination buffer will always be NULL-terminated. Source string must be
55 * NULL-terminated as well.
56 */
57int string_unescape(char *src, char *dst, size_t size, unsigned int flags); 23int string_unescape(char *src, char *dst, size_t size, unsigned int flags);
58 24
59static inline int string_unescape_inplace(char *buf, unsigned int flags) 25static inline int string_unescape_inplace(char *buf, unsigned int flags)
@@ -71,4 +37,35 @@ static inline int string_unescape_any_inplace(char *buf)
71 return string_unescape_any(buf, buf, 0); 37 return string_unescape_any(buf, buf, 0);
72} 38}
73 39
40#define ESCAPE_SPACE 0x01
41#define ESCAPE_SPECIAL 0x02
42#define ESCAPE_NULL 0x04
43#define ESCAPE_OCTAL 0x08
44#define ESCAPE_ANY \
45 (ESCAPE_SPACE | ESCAPE_OCTAL | ESCAPE_SPECIAL | ESCAPE_NULL)
46#define ESCAPE_NP 0x10
47#define ESCAPE_ANY_NP (ESCAPE_ANY | ESCAPE_NP)
48#define ESCAPE_HEX 0x20
49
50int string_escape_mem(const char *src, size_t isz, char **dst, size_t osz,
51 unsigned int flags, const char *esc);
52
53static inline int string_escape_mem_any_np(const char *src, size_t isz,
54 char **dst, size_t osz, const char *esc)
55{
56 return string_escape_mem(src, isz, dst, osz, ESCAPE_ANY_NP, esc);
57}
58
59static inline int string_escape_str(const char *src, char **dst, size_t sz,
60 unsigned int flags, const char *esc)
61{
62 return string_escape_mem(src, strlen(src), dst, sz, flags, esc);
63}
64
65static inline int string_escape_str_any_np(const char *src, char **dst,
66 size_t sz, const char *esc)
67{
68 return string_escape_str(src, dst, sz, ESCAPE_ANY_NP, esc);
69}
70
74#endif 71#endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 8e030075fe79..a7cbb570cc5c 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -53,7 +53,7 @@ struct rpc_cred {
53 struct rcu_head cr_rcu; 53 struct rcu_head cr_rcu;
54 struct rpc_auth * cr_auth; 54 struct rpc_auth * cr_auth;
55 const struct rpc_credops *cr_ops; 55 const struct rpc_credops *cr_ops;
56#ifdef RPC_DEBUG 56#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
57 unsigned long cr_magic; /* 0x0f4aa4f0 */ 57 unsigned long cr_magic; /* 0x0f4aa4f0 */
58#endif 58#endif
59 unsigned long cr_expire; /* when to gc */ 59 unsigned long cr_expire; /* when to gc */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 70736b98c721..d86acc63b25f 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -63,6 +63,9 @@ struct rpc_clnt {
63 struct rpc_rtt cl_rtt_default; 63 struct rpc_rtt cl_rtt_default;
64 struct rpc_timeout cl_timeout_default; 64 struct rpc_timeout cl_timeout_default;
65 const struct rpc_program *cl_program; 65 const struct rpc_program *cl_program;
66#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
67 struct dentry *cl_debugfs; /* debugfs directory */
68#endif
66}; 69};
67 70
68/* 71/*
@@ -176,5 +179,6 @@ size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
176const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); 179const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
177int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t); 180int rpc_localaddr(struct rpc_clnt *, struct sockaddr *, size_t);
178 181
182const char *rpc_proc_name(const struct rpc_task *task);
179#endif /* __KERNEL__ */ 183#endif /* __KERNEL__ */
180#endif /* _LINUX_SUNRPC_CLNT_H */ 184#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index 9385bd74c860..c57d8ea0716c 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -10,22 +10,10 @@
10 10
11#include <uapi/linux/sunrpc/debug.h> 11#include <uapi/linux/sunrpc/debug.h>
12 12
13
14/*
15 * Enable RPC debugging/profiling.
16 */
17#ifdef CONFIG_SUNRPC_DEBUG
18#define RPC_DEBUG
19#endif
20#ifdef CONFIG_TRACEPOINTS
21#define RPC_TRACEPOINTS
22#endif
23/* #define RPC_PROFILE */
24
25/* 13/*
26 * Debugging macros etc 14 * Debugging macros etc
27 */ 15 */
28#ifdef RPC_DEBUG 16#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
29extern unsigned int rpc_debug; 17extern unsigned int rpc_debug;
30extern unsigned int nfs_debug; 18extern unsigned int nfs_debug;
31extern unsigned int nfsd_debug; 19extern unsigned int nfsd_debug;
@@ -36,7 +24,7 @@ extern unsigned int nlm_debug;
36#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args) 24#define dprintk_rcu(args...) dfprintk_rcu(FACILITY, ## args)
37 25
38#undef ifdebug 26#undef ifdebug
39#ifdef RPC_DEBUG 27#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
40# define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac)) 28# define ifdebug(fac) if (unlikely(rpc_debug & RPCDBG_##fac))
41 29
42# define dfprintk(fac, args...) \ 30# define dfprintk(fac, args...) \
@@ -65,9 +53,55 @@ extern unsigned int nlm_debug;
65/* 53/*
66 * Sysctl interface for RPC debugging 54 * Sysctl interface for RPC debugging
67 */ 55 */
68#ifdef RPC_DEBUG 56
57struct rpc_clnt;
58struct rpc_xprt;
59
60#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
69void rpc_register_sysctl(void); 61void rpc_register_sysctl(void);
70void rpc_unregister_sysctl(void); 62void rpc_unregister_sysctl(void);
63int sunrpc_debugfs_init(void);
64void sunrpc_debugfs_exit(void);
65int rpc_clnt_debugfs_register(struct rpc_clnt *);
66void rpc_clnt_debugfs_unregister(struct rpc_clnt *);
67int rpc_xprt_debugfs_register(struct rpc_xprt *);
68void rpc_xprt_debugfs_unregister(struct rpc_xprt *);
69#else
70static inline int
71sunrpc_debugfs_init(void)
72{
73 return 0;
74}
75
76static inline void
77sunrpc_debugfs_exit(void)
78{
79 return;
80}
81
82static inline int
83rpc_clnt_debugfs_register(struct rpc_clnt *clnt)
84{
85 return 0;
86}
87
88static inline void
89rpc_clnt_debugfs_unregister(struct rpc_clnt *clnt)
90{
91 return;
92}
93
94static inline int
95rpc_xprt_debugfs_register(struct rpc_xprt *xprt)
96{
97 return 0;
98}
99
100static inline void
101rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt)
102{
103 return;
104}
71#endif 105#endif
72 106
73#endif /* _LINUX_SUNRPC_DEBUG_H_ */ 107#endif /* _LINUX_SUNRPC_DEBUG_H_ */
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index 1565bbe86d51..eecb5a71e6c0 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -27,10 +27,13 @@
27 27
28#include <linux/seq_file.h> 28#include <linux/seq_file.h>
29#include <linux/ktime.h> 29#include <linux/ktime.h>
30#include <linux/spinlock.h>
30 31
31#define RPC_IOSTATS_VERS "1.0" 32#define RPC_IOSTATS_VERS "1.0"
32 33
33struct rpc_iostats { 34struct rpc_iostats {
35 spinlock_t om_lock;
36
34 /* 37 /*
35 * These counters give an idea about how many request 38 * These counters give an idea about how many request
36 * transmissions are required, on average, to complete that 39 * transmissions are required, on average, to complete that
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 1a8959944c5f..5f1e6bd4c316 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -79,7 +79,7 @@ struct rpc_task {
79 unsigned short tk_flags; /* misc flags */ 79 unsigned short tk_flags; /* misc flags */
80 unsigned short tk_timeouts; /* maj timeouts */ 80 unsigned short tk_timeouts; /* maj timeouts */
81 81
82#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) 82#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
83 unsigned short tk_pid; /* debugging aid */ 83 unsigned short tk_pid; /* debugging aid */
84#endif 84#endif
85 unsigned char tk_priority : 2,/* Task priority */ 85 unsigned char tk_priority : 2,/* Task priority */
@@ -187,7 +187,7 @@ struct rpc_wait_queue {
187 unsigned char nr; /* # tasks remaining for cookie */ 187 unsigned char nr; /* # tasks remaining for cookie */
188 unsigned short qlen; /* total # tasks waiting in queue */ 188 unsigned short qlen; /* total # tasks waiting in queue */
189 struct rpc_timer timer_list; 189 struct rpc_timer timer_list;
190#if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS) 190#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
191 const char * name; 191 const char * name;
192#endif 192#endif
193}; 193};
@@ -237,7 +237,7 @@ void rpc_free(void *);
237int rpciod_up(void); 237int rpciod_up(void);
238void rpciod_down(void); 238void rpciod_down(void);
239int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); 239int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
240#ifdef RPC_DEBUG 240#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
241struct net; 241struct net;
242void rpc_show_tasks(struct net *); 242void rpc_show_tasks(struct net *);
243#endif 243#endif
@@ -251,7 +251,7 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task)
251 return __rpc_wait_for_completion_task(task, NULL); 251 return __rpc_wait_for_completion_task(task, NULL);
252} 252}
253 253
254#if defined(RPC_DEBUG) || defined (RPC_TRACEPOINTS) 254#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
255static inline const char * rpc_qname(const struct rpc_wait_queue *q) 255static inline const char * rpc_qname(const struct rpc_wait_queue *q)
256{ 256{
257 return ((q && q->name) ? q->name : "unknown"); 257 return ((q && q->name) ? q->name : "unknown");
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cf61ecd148e0..6f22cfeef5e3 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -26,10 +26,10 @@ typedef int (*svc_thread_fn)(void *);
26 26
27/* statistics for svc_pool structures */ 27/* statistics for svc_pool structures */
28struct svc_pool_stats { 28struct svc_pool_stats {
29 unsigned long packets; 29 atomic_long_t packets;
30 unsigned long sockets_queued; 30 unsigned long sockets_queued;
31 unsigned long threads_woken; 31 atomic_long_t threads_woken;
32 unsigned long threads_timedout; 32 atomic_long_t threads_timedout;
33}; 33};
34 34
35/* 35/*
@@ -45,12 +45,13 @@ struct svc_pool_stats {
45struct svc_pool { 45struct svc_pool {
46 unsigned int sp_id; /* pool id; also node id on NUMA */ 46 unsigned int sp_id; /* pool id; also node id on NUMA */
47 spinlock_t sp_lock; /* protects all fields */ 47 spinlock_t sp_lock; /* protects all fields */
48 struct list_head sp_threads; /* idle server threads */
49 struct list_head sp_sockets; /* pending sockets */ 48 struct list_head sp_sockets; /* pending sockets */
50 unsigned int sp_nrthreads; /* # of threads in pool */ 49 unsigned int sp_nrthreads; /* # of threads in pool */
51 struct list_head sp_all_threads; /* all server threads */ 50 struct list_head sp_all_threads; /* all server threads */
52 struct svc_pool_stats sp_stats; /* statistics on pool operation */ 51 struct svc_pool_stats sp_stats; /* statistics on pool operation */
53 int sp_task_pending;/* has pending task */ 52#define SP_TASK_PENDING (0) /* still work to do even if no
53 * xprt is queued. */
54 unsigned long sp_flags;
54} ____cacheline_aligned_in_smp; 55} ____cacheline_aligned_in_smp;
55 56
56/* 57/*
@@ -219,8 +220,8 @@ static inline void svc_putu32(struct kvec *iov, __be32 val)
219 * processed. 220 * processed.
220 */ 221 */
221struct svc_rqst { 222struct svc_rqst {
222 struct list_head rq_list; /* idle list */
223 struct list_head rq_all; /* all threads list */ 223 struct list_head rq_all; /* all threads list */
224 struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
224 struct svc_xprt * rq_xprt; /* transport ptr */ 225 struct svc_xprt * rq_xprt; /* transport ptr */
225 226
226 struct sockaddr_storage rq_addr; /* peer address */ 227 struct sockaddr_storage rq_addr; /* peer address */
@@ -236,7 +237,6 @@ struct svc_rqst {
236 struct svc_cred rq_cred; /* auth info */ 237 struct svc_cred rq_cred; /* auth info */
237 void * rq_xprt_ctxt; /* transport specific context ptr */ 238 void * rq_xprt_ctxt; /* transport specific context ptr */
238 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ 239 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
239 bool rq_usedeferral; /* use deferral */
240 240
241 size_t rq_xprt_hlen; /* xprt header len */ 241 size_t rq_xprt_hlen; /* xprt header len */
242 struct xdr_buf rq_arg; 242 struct xdr_buf rq_arg;
@@ -253,9 +253,17 @@ struct svc_rqst {
253 u32 rq_vers; /* program version */ 253 u32 rq_vers; /* program version */
254 u32 rq_proc; /* procedure number */ 254 u32 rq_proc; /* procedure number */
255 u32 rq_prot; /* IP protocol */ 255 u32 rq_prot; /* IP protocol */
256 unsigned short 256 int rq_cachetype; /* catering to nfsd */
257 rq_secure : 1; /* secure port */ 257#define RQ_SECURE (0) /* secure port */
258 unsigned short rq_local : 1; /* local request */ 258#define RQ_LOCAL (1) /* local request */
259#define RQ_USEDEFERRAL (2) /* use deferral */
260#define RQ_DROPME (3) /* drop current reply */
261#define RQ_SPLICE_OK (4) /* turned off in gss privacy
262 * to prevent encrypting page
263 * cache pages */
264#define RQ_VICTIM (5) /* about to be shut down */
265#define RQ_BUSY (6) /* request is busy */
266 unsigned long rq_flags; /* flags field */
259 267
260 void * rq_argp; /* decoded arguments */ 268 void * rq_argp; /* decoded arguments */
261 void * rq_resp; /* xdr'd results */ 269 void * rq_resp; /* xdr'd results */
@@ -271,17 +279,12 @@ struct svc_rqst {
271 struct cache_req rq_chandle; /* handle passed to caches for 279 struct cache_req rq_chandle; /* handle passed to caches for
272 * request delaying 280 * request delaying
273 */ 281 */
274 bool rq_dropme;
275 /* Catering to nfsd */ 282 /* Catering to nfsd */
276 struct auth_domain * rq_client; /* RPC peer info */ 283 struct auth_domain * rq_client; /* RPC peer info */
277 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ 284 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
278 int rq_cachetype;
279 struct svc_cacherep * rq_cacherep; /* cache info */ 285 struct svc_cacherep * rq_cacherep; /* cache info */
280 bool rq_splice_ok; /* turned off in gss privacy
281 * to prevent encrypting page
282 * cache pages */
283 wait_queue_head_t rq_wait; /* synchronization */
284 struct task_struct *rq_task; /* service thread */ 286 struct task_struct *rq_task; /* service thread */
287 spinlock_t rq_lock; /* per-request lock */
285}; 288};
286 289
287#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net) 290#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index ce6e4182a5b2..79f6f8f3dc0a 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -63,10 +63,9 @@ struct svc_xprt {
63#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */ 63#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */
64#define XPT_DEFERRED 8 /* deferred request pending */ 64#define XPT_DEFERRED 8 /* deferred request pending */
65#define XPT_OLD 9 /* used for xprt aging mark+sweep */ 65#define XPT_OLD 9 /* used for xprt aging mark+sweep */
66#define XPT_DETACHED 10 /* detached from tempsocks list */ 66#define XPT_LISTENER 10 /* listening endpoint */
67#define XPT_LISTENER 11 /* listening endpoint */ 67#define XPT_CACHE_AUTH 11 /* cache auth info */
68#define XPT_CACHE_AUTH 12 /* cache auth info */ 68#define XPT_LOCAL 12 /* connection from loopback interface */
69#define XPT_LOCAL 13 /* connection from loopback interface */
70 69
71 struct svc_serv *xpt_server; /* service for transport */ 70 struct svc_serv *xpt_server; /* service for transport */
72 atomic_t xpt_reserved; /* space on outq that is rsvd */ 71 atomic_t xpt_reserved; /* space on outq that is rsvd */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index fcbfe8783243..9d27ac45b909 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -239,6 +239,9 @@ struct rpc_xprt {
239 struct net *xprt_net; 239 struct net *xprt_net;
240 const char *servername; 240 const char *servername;
241 const char *address_strings[RPC_DISPLAY_MAX]; 241 const char *address_strings[RPC_DISPLAY_MAX];
242#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
243 struct dentry *debugfs; /* debugfs directory */
244#endif
242}; 245};
243 246
244#if defined(CONFIG_SUNRPC_BACKCHANNEL) 247#if defined(CONFIG_SUNRPC_BACKCHANNEL)
@@ -357,6 +360,7 @@ int xs_swapper(struct rpc_xprt *xprt, int enable);
357#define XPRT_CONNECTION_ABORT (7) 360#define XPRT_CONNECTION_ABORT (7)
358#define XPRT_CONNECTION_CLOSE (8) 361#define XPRT_CONNECTION_CLOSE (8)
359#define XPRT_CONGESTED (9) 362#define XPRT_CONGESTED (9)
363#define XPRT_CONNECTION_REUSE (10)
360 364
361static inline void xprt_set_connected(struct rpc_xprt *xprt) 365static inline void xprt_set_connected(struct rpc_xprt *xprt)
362{ 366{
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index 1ad36cc25b2e..7591788e9fbf 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -17,6 +17,65 @@ void cleanup_socket_xprt(void);
17#define RPC_DEF_MIN_RESVPORT (665U) 17#define RPC_DEF_MIN_RESVPORT (665U)
18#define RPC_DEF_MAX_RESVPORT (1023U) 18#define RPC_DEF_MAX_RESVPORT (1023U)
19 19
20struct sock_xprt {
21 struct rpc_xprt xprt;
22
23 /*
24 * Network layer
25 */
26 struct socket * sock;
27 struct sock * inet;
28
29 /*
30 * State of TCP reply receive
31 */
32 __be32 tcp_fraghdr,
33 tcp_xid,
34 tcp_calldir;
35
36 u32 tcp_offset,
37 tcp_reclen;
38
39 unsigned long tcp_copied,
40 tcp_flags;
41
42 /*
43 * Connection of transports
44 */
45 struct delayed_work connect_worker;
46 struct sockaddr_storage srcaddr;
47 unsigned short srcport;
48
49 /*
50 * UDP socket buffer size parameters
51 */
52 size_t rcvsize,
53 sndsize;
54
55 /*
56 * Saved socket callback addresses
57 */
58 void (*old_data_ready)(struct sock *);
59 void (*old_state_change)(struct sock *);
60 void (*old_write_space)(struct sock *);
61 void (*old_error_report)(struct sock *);
62};
63
64/*
65 * TCP receive state flags
66 */
67#define TCP_RCV_LAST_FRAG (1UL << 0)
68#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
69#define TCP_RCV_COPY_XID (1UL << 2)
70#define TCP_RCV_COPY_DATA (1UL << 3)
71#define TCP_RCV_READ_CALLDIR (1UL << 4)
72#define TCP_RCV_COPY_CALLDIR (1UL << 5)
73
74/*
75 * TCP RPC flags
76 */
77#define TCP_RPC_REPLY (1UL << 6)
78
20#endif /* __KERNEL__ */ 79#endif /* __KERNEL__ */
21 80
22#endif /* _LINUX_SUNRPC_XPRTSOCK_H */ 81#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 519064e0c943..3388c1b6f7d8 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,6 +189,8 @@ struct platform_suspend_ops {
189 189
190struct platform_freeze_ops { 190struct platform_freeze_ops {
191 int (*begin)(void); 191 int (*begin)(void);
192 int (*prepare)(void);
193 void (*restore)(void);
192 void (*end)(void); 194 void (*end)(void);
193}; 195};
194 196
@@ -371,6 +373,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb);
371extern bool events_check_enabled; 373extern bool events_check_enabled;
372 374
373extern bool pm_wakeup_pending(void); 375extern bool pm_wakeup_pending(void);
376extern void pm_system_wakeup(void);
377extern void pm_wakeup_clear(void);
374extern bool pm_get_wakeup_count(unsigned int *count, bool block); 378extern bool pm_get_wakeup_count(unsigned int *count, bool block);
375extern bool pm_save_wakeup_count(unsigned int count); 379extern bool pm_save_wakeup_count(unsigned int count);
376extern void pm_wakep_autosleep_enabled(bool set); 380extern void pm_wakep_autosleep_enabled(bool set);
@@ -418,6 +422,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
418#define pm_notifier(fn, pri) do { (void)(fn); } while (0) 422#define pm_notifier(fn, pri) do { (void)(fn); } while (0)
419 423
420static inline bool pm_wakeup_pending(void) { return false; } 424static inline bool pm_wakeup_pending(void) { return false; }
425static inline void pm_system_wakeup(void) {}
426static inline void pm_wakeup_clear(void) {}
421 427
422static inline void lock_system_sleep(void) {} 428static inline void lock_system_sleep(void) {}
423static inline void unlock_system_sleep(void) {} 429static inline void unlock_system_sleep(void) {}
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1b72060f093a..34e8b60ab973 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -102,14 +102,6 @@ union swap_header {
102 } info; 102 } info;
103}; 103};
104 104
105 /* A swap entry has to fit into a "unsigned long", as
106 * the entry is hidden in the "index" field of the
107 * swapper address space.
108 */
109typedef struct {
110 unsigned long val;
111} swp_entry_t;
112
113/* 105/*
114 * current->reclaim_state points to one of these when a task is running 106 * current->reclaim_state points to one of these when a task is running
115 * memory reclaim 107 * memory reclaim
@@ -327,8 +319,10 @@ extern void lru_cache_add_active_or_unevictable(struct page *page,
327extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 319extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
328 gfp_t gfp_mask, nodemask_t *mask); 320 gfp_t gfp_mask, nodemask_t *mask);
329extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); 321extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
330extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 322extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
331 gfp_t gfp_mask, bool noswap); 323 unsigned long nr_pages,
324 gfp_t gfp_mask,
325 bool may_swap);
332extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 326extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
333 gfp_t gfp_mask, bool noswap, 327 gfp_t gfp_mask, bool noswap,
334 struct zone *zone, 328 struct zone *zone,
@@ -354,22 +348,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
354extern int page_evictable(struct page *page); 348extern int page_evictable(struct page *page);
355extern void check_move_unevictable_pages(struct page **, int nr_pages); 349extern void check_move_unevictable_pages(struct page **, int nr_pages);
356 350
357extern unsigned long scan_unevictable_pages;
358extern int scan_unevictable_handler(struct ctl_table *, int,
359 void __user *, size_t *, loff_t *);
360#ifdef CONFIG_NUMA
361extern int scan_unevictable_register_node(struct node *node);
362extern void scan_unevictable_unregister_node(struct node *node);
363#else
364static inline int scan_unevictable_register_node(struct node *node)
365{
366 return 0;
367}
368static inline void scan_unevictable_unregister_node(struct node *node)
369{
370}
371#endif
372
373extern int kswapd_run(int nid); 351extern int kswapd_run(int nid);
374extern void kswapd_stop(int nid); 352extern void kswapd_stop(int nid);
375#ifdef CONFIG_MEMCG 353#ifdef CONFIG_MEMCG
diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h
new file mode 100644
index 000000000000..145306bdc92f
--- /dev/null
+++ b/include/linux/swap_cgroup.h
@@ -0,0 +1,42 @@
1#ifndef __LINUX_SWAP_CGROUP_H
2#define __LINUX_SWAP_CGROUP_H
3
4#include <linux/swap.h>
5
6#ifdef CONFIG_MEMCG_SWAP
7
8extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
9 unsigned short old, unsigned short new);
10extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
11extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent);
12extern int swap_cgroup_swapon(int type, unsigned long max_pages);
13extern void swap_cgroup_swapoff(int type);
14
15#else
16
17static inline
18unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
19{
20 return 0;
21}
22
23static inline
24unsigned short lookup_swap_cgroup_id(swp_entry_t ent)
25{
26 return 0;
27}
28
29static inline int
30swap_cgroup_swapon(int type, unsigned long max_pages)
31{
32 return 0;
33}
34
35static inline void swap_cgroup_swapoff(int type)
36{
37 return;
38}
39
40#endif /* CONFIG_MEMCG_SWAP */
41
42#endif /* __LINUX_SWAP_CGROUP_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 0f86d85a9ce4..85893d744901 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -25,7 +25,7 @@ struct linux_dirent64;
25struct list_head; 25struct list_head;
26struct mmap_arg_struct; 26struct mmap_arg_struct;
27struct msgbuf; 27struct msgbuf;
28struct msghdr; 28struct user_msghdr;
29struct mmsghdr; 29struct mmsghdr;
30struct msqid_ds; 30struct msqid_ds;
31struct new_utsname; 31struct new_utsname;
@@ -65,6 +65,7 @@ struct old_linux_dirent;
65struct perf_event_attr; 65struct perf_event_attr;
66struct file_handle; 66struct file_handle;
67struct sigaltstack; 67struct sigaltstack;
68union bpf_attr;
68 69
69#include <linux/types.h> 70#include <linux/types.h>
70#include <linux/aio_abi.h> 71#include <linux/aio_abi.h>
@@ -600,13 +601,13 @@ asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
600asmlinkage long sys_send(int, void __user *, size_t, unsigned); 601asmlinkage long sys_send(int, void __user *, size_t, unsigned);
601asmlinkage long sys_sendto(int, void __user *, size_t, unsigned, 602asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
602 struct sockaddr __user *, int); 603 struct sockaddr __user *, int);
603asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); 604asmlinkage long sys_sendmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
604asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, 605asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
605 unsigned int vlen, unsigned flags); 606 unsigned int vlen, unsigned flags);
606asmlinkage long sys_recv(int, void __user *, size_t, unsigned); 607asmlinkage long sys_recv(int, void __user *, size_t, unsigned);
607asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned, 608asmlinkage long sys_recvfrom(int, void __user *, size_t, unsigned,
608 struct sockaddr __user *, int __user *); 609 struct sockaddr __user *, int __user *);
609asmlinkage long sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); 610asmlinkage long sys_recvmsg(int fd, struct user_msghdr __user *msg, unsigned flags);
610asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg, 611asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
611 unsigned int vlen, unsigned flags, 612 unsigned int vlen, unsigned flags,
612 struct timespec __user *timeout); 613 struct timespec __user *timeout);
@@ -875,5 +876,10 @@ asmlinkage long sys_seccomp(unsigned int op, unsigned int flags,
875 const char __user *uargs); 876 const char __user *uargs);
876asmlinkage long sys_getrandom(char __user *buf, size_t count, 877asmlinkage long sys_getrandom(char __user *buf, size_t count,
877 unsigned int flags); 878 unsigned int flags);
879asmlinkage long sys_bpf(int cmd, union bpf_attr *attr, unsigned int size);
880
881asmlinkage long sys_execveat(int dfd, const char __user *filename,
882 const char __user *const __user *argv,
883 const char __user *const __user *envp, int flags);
878 884
879#endif 885#endif
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index f97d0dbb59fa..ddad16148bd6 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -70,6 +70,8 @@ struct attribute_group {
70 * for examples.. 70 * for examples..
71 */ 71 */
72 72
73#define SYSFS_PREALLOC 010000
74
73#define __ATTR(_name, _mode, _show, _store) { \ 75#define __ATTR(_name, _mode, _show, _store) { \
74 .attr = {.name = __stringify(_name), \ 76 .attr = {.name = __stringify(_name), \
75 .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \ 77 .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
@@ -77,6 +79,13 @@ struct attribute_group {
77 .store = _store, \ 79 .store = _store, \
78} 80}
79 81
82#define __ATTR_PREALLOC(_name, _mode, _show, _store) { \
83 .attr = {.name = __stringify(_name), \
84 .mode = SYSFS_PREALLOC | VERIFY_OCTAL_PERMISSIONS(_mode) },\
85 .show = _show, \
86 .store = _store, \
87}
88
80#define __ATTR_RO(_name) { \ 89#define __ATTR_RO(_name) { \
81 .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ 90 .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \
82 .show = _name##_show, \ 91 .show = _name##_show, \
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index 98a3153c0f96..4b7b875a7ce1 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -49,4 +49,13 @@
49 49
50int do_syslog(int type, char __user *buf, int count, bool from_file); 50int do_syslog(int type, char __user *buf, int count, bool from_file);
51 51
52#ifdef CONFIG_PRINTK
53int check_syslog_permissions(int type, bool from_file);
54#else
55static inline int check_syslog_permissions(int type, bool from_file)
56{
57 return 0;
58}
59#endif
60
52#endif /* _LINUX_SYSLOG_H */ 61#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
new file mode 100644
index 000000000000..6a8b9942632d
--- /dev/null
+++ b/include/linux/t10-pi.h
@@ -0,0 +1,22 @@
1#ifndef _LINUX_T10_PI_H
2#define _LINUX_T10_PI_H
3
4#include <linux/types.h>
5#include <linux/blkdev.h>
6
7/*
8 * T10 Protection Information tuple.
9 */
10struct t10_pi_tuple {
11 __be16 guard_tag; /* Checksum */
12 __be16 app_tag; /* Opaque storage */
13 __be32 ref_tag; /* Target LBA or indirect LBA */
14};
15
16
17extern struct blk_integrity t10_pi_type1_crc;
18extern struct blk_integrity t10_pi_type1_ip;
19extern struct blk_integrity t10_pi_type3_crc;
20extern struct blk_integrity t10_pi_type3_ip;
21
22#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fa5258f322e7..67309ece0772 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,7 +19,6 @@
19 19
20 20
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/dmaengine.h>
23#include <net/sock.h> 22#include <net/sock.h>
24#include <net/inet_connection_sock.h> 23#include <net/inet_connection_sock.h>
25#include <net/inet_timewait_sock.h> 24#include <net/inet_timewait_sock.h>
@@ -131,7 +130,7 @@ struct tcp_sock {
131 /* inet_connection_sock has to be the first member of tcp_sock */ 130 /* inet_connection_sock has to be the first member of tcp_sock */
132 struct inet_connection_sock inet_conn; 131 struct inet_connection_sock inet_conn;
133 u16 tcp_header_len; /* Bytes of tcp header to send */ 132 u16 tcp_header_len; /* Bytes of tcp header to send */
134 u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ 133 u16 gso_segs; /* Max number of segs per GSO packet */
135 134
136/* 135/*
137 * Header prediction flags 136 * Header prediction flags
@@ -163,16 +162,9 @@ struct tcp_sock {
163 struct { 162 struct {
164 struct sk_buff_head prequeue; 163 struct sk_buff_head prequeue;
165 struct task_struct *task; 164 struct task_struct *task;
166 struct iovec *iov; 165 struct msghdr *msg;
167 int memory; 166 int memory;
168 int len; 167 int len;
169#ifdef CONFIG_NET_DMA
170 /* members for async copy */
171 struct dma_chan *dma_chan;
172 int wakeup;
173 struct dma_pinned_list *pinned_list;
174 dma_cookie_t dma_cookie;
175#endif
176 } ucopy; 168 } ucopy;
177 169
178 u32 snd_wl1; /* Sequence for window update */ 170 u32 snd_wl1; /* Sequence for window update */
@@ -212,10 +204,10 @@ struct tcp_sock {
212 204
213 u16 urg_data; /* Saved octet of OOB data and control flags */ 205 u16 urg_data; /* Saved octet of OOB data and control flags */
214 u8 ecn_flags; /* ECN status bits. */ 206 u8 ecn_flags; /* ECN status bits. */
215 u8 reordering; /* Packet reordering metric. */ 207 u8 keepalive_probes; /* num of allowed keep alive probes */
208 u32 reordering; /* Packet reordering metric. */
216 u32 snd_up; /* Urgent pointer */ 209 u32 snd_up; /* Urgent pointer */
217 210
218 u8 keepalive_probes; /* num of allowed keep alive probes */
219/* 211/*
220 * Options received (usually on last packet, some only on SYN packets). 212 * Options received (usually on last packet, some only on SYN packets).
221 */ 213 */
@@ -276,7 +268,7 @@ struct tcp_sock {
276 u32 retrans_stamp; /* Timestamp of the last retransmit, 268 u32 retrans_stamp; /* Timestamp of the last retransmit,
277 * also used in SYN-SENT to remember stamp of 269 * also used in SYN-SENT to remember stamp of
278 * the first SYN. */ 270 * the first SYN. */
279 u32 undo_marker; /* tracking retrans started here. */ 271 u32 undo_marker; /* snd_una upon a new recovery episode. */
280 int undo_retrans; /* number of undoable retransmissions. */ 272 int undo_retrans; /* number of undoable retransmissions. */
281 u32 total_retrans; /* Total retransmits for entire connection */ 273 u32 total_retrans; /* Total retransmits for entire connection */
282 274
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 0305cde21a74..fc52e307efab 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -29,26 +29,25 @@
29#include <linux/idr.h> 29#include <linux/idr.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/workqueue.h> 31#include <linux/workqueue.h>
32#include <uapi/linux/thermal.h>
32 33
33#define THERMAL_TRIPS_NONE -1 34#define THERMAL_TRIPS_NONE -1
34#define THERMAL_MAX_TRIPS 12 35#define THERMAL_MAX_TRIPS 12
35#define THERMAL_NAME_LENGTH 20
36 36
37/* invalid cooling state */ 37/* invalid cooling state */
38#define THERMAL_CSTATE_INVALID -1UL 38#define THERMAL_CSTATE_INVALID -1UL
39 39
40/* No upper/lower limit requirement */ 40/* No upper/lower limit requirement */
41#define THERMAL_NO_LIMIT THERMAL_CSTATE_INVALID 41#define THERMAL_NO_LIMIT ((u32)~0)
42 42
43/* Unit conversion macros */ 43/* Unit conversion macros */
44#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ 44#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
45 ((long)t-2732+5)/10 : ((long)t-2732-5)/10) 45 ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
46#define CELSIUS_TO_KELVIN(t) ((t)*10+2732) 46#define CELSIUS_TO_KELVIN(t) ((t)*10+2732)
47 47#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
48/* Adding event notification support elements */ 48#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
49#define THERMAL_GENL_FAMILY_NAME "thermal_event" 49#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
50#define THERMAL_GENL_VERSION 0x01 50#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
51#define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_grp"
52 51
53/* Default Thermal Governor */ 52/* Default Thermal Governor */
54#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) 53#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
@@ -82,30 +81,6 @@ enum thermal_trend {
82 THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */ 81 THERMAL_TREND_DROP_FULL, /* apply lowest cooling action */
83}; 82};
84 83
85/* Events supported by Thermal Netlink */
86enum events {
87 THERMAL_AUX0,
88 THERMAL_AUX1,
89 THERMAL_CRITICAL,
90 THERMAL_DEV_FAULT,
91};
92
93/* attributes of thermal_genl_family */
94enum {
95 THERMAL_GENL_ATTR_UNSPEC,
96 THERMAL_GENL_ATTR_EVENT,
97 __THERMAL_GENL_ATTR_MAX,
98};
99#define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1)
100
101/* commands supported by the thermal_genl_family */
102enum {
103 THERMAL_GENL_CMD_UNSPEC,
104 THERMAL_GENL_CMD_EVENT,
105 __THERMAL_GENL_CMD_MAX,
106};
107#define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1)
108
109struct thermal_zone_device_ops { 84struct thermal_zone_device_ops {
110 int (*bind) (struct thermal_zone_device *, 85 int (*bind) (struct thermal_zone_device *,
111 struct thermal_cooling_device *); 86 struct thermal_cooling_device *);
@@ -285,19 +260,49 @@ struct thermal_genl_event {
285 enum events event; 260 enum events event;
286}; 261};
287 262
263/**
264 * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones
265 *
266 * Mandatory:
267 * @get_temp: a pointer to a function that reads the sensor temperature.
268 *
269 * Optional:
270 * @get_trend: a pointer to a function that reads the sensor temperature trend.
271 * @set_emul_temp: a pointer to a function that sets sensor emulated
272 * temperature.
273 */
274struct thermal_zone_of_device_ops {
275 int (*get_temp)(void *, long *);
276 int (*get_trend)(void *, long *);
277 int (*set_emul_temp)(void *, unsigned long);
278};
279
280/**
281 * struct thermal_trip - representation of a point in temperature domain
282 * @np: pointer to struct device_node that this trip point was created from
283 * @temperature: temperature value in miliCelsius
284 * @hysteresis: relative hysteresis in miliCelsius
285 * @type: trip point type
286 */
287
288struct thermal_trip {
289 struct device_node *np;
290 unsigned long int temperature;
291 unsigned long int hysteresis;
292 enum thermal_trip_type type;
293};
294
288/* Function declarations */ 295/* Function declarations */
289#ifdef CONFIG_THERMAL_OF 296#ifdef CONFIG_THERMAL_OF
290struct thermal_zone_device * 297struct thermal_zone_device *
291thermal_zone_of_sensor_register(struct device *dev, int id, 298thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
292 void *data, int (*get_temp)(void *, long *), 299 const struct thermal_zone_of_device_ops *ops);
293 int (*get_trend)(void *, long *));
294void thermal_zone_of_sensor_unregister(struct device *dev, 300void thermal_zone_of_sensor_unregister(struct device *dev,
295 struct thermal_zone_device *tz); 301 struct thermal_zone_device *tz);
296#else 302#else
297static inline struct thermal_zone_device * 303static inline struct thermal_zone_device *
298thermal_zone_of_sensor_register(struct device *dev, int id, 304thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
299 void *data, int (*get_temp)(void *, long *), 305 const struct thermal_zone_of_device_ops *ops)
300 int (*get_trend)(void *, long *))
301{ 306{
302 return NULL; 307 return NULL;
303} 308}
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 932b76392248..884d6263e962 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -268,7 +268,7 @@ struct kim_data_s {
268 struct st_data_s *core_data; 268 struct st_data_s *core_data;
269 struct chip_version version; 269 struct chip_version version;
270 unsigned char ldisc_install; 270 unsigned char ldisc_install;
271 unsigned char dev_name[UART_DEV_NAME_LEN]; 271 unsigned char dev_name[UART_DEV_NAME_LEN + 1];
272 unsigned char flow_cntrl; 272 unsigned char flow_cntrl;
273 unsigned long baud_rate; 273 unsigned long baud_rate;
274}; 274};
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 9a82c7dc3fdd..eda850ca757a 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -108,7 +108,7 @@ extern struct tick_sched *tick_get_tick_sched(int cpu);
108extern void tick_irq_enter(void); 108extern void tick_irq_enter(void);
109extern int tick_oneshot_mode_active(void); 109extern int tick_oneshot_mode_active(void);
110# ifndef arch_needs_cpu 110# ifndef arch_needs_cpu
111# define arch_needs_cpu(cpu) (0) 111# define arch_needs_cpu() (0)
112# endif 112# endif
113# else 113# else
114static inline void tick_clock_notify(void) { } 114static inline void tick_clock_notify(void) { }
@@ -181,14 +181,12 @@ static inline bool tick_nohz_full_cpu(int cpu)
181 return cpumask_test_cpu(cpu, tick_nohz_full_mask); 181 return cpumask_test_cpu(cpu, tick_nohz_full_mask);
182} 182}
183 183
184extern void tick_nohz_init(void);
185extern void __tick_nohz_full_check(void); 184extern void __tick_nohz_full_check(void);
186extern void tick_nohz_full_kick(void); 185extern void tick_nohz_full_kick(void);
187extern void tick_nohz_full_kick_cpu(int cpu); 186extern void tick_nohz_full_kick_cpu(int cpu);
188extern void tick_nohz_full_kick_all(void); 187extern void tick_nohz_full_kick_all(void);
189extern void __tick_nohz_task_switch(struct task_struct *tsk); 188extern void __tick_nohz_task_switch(struct task_struct *tsk);
190#else 189#else
191static inline void tick_nohz_init(void) { }
192static inline bool tick_nohz_full_enabled(void) { return false; } 190static inline bool tick_nohz_full_enabled(void) { return false; }
193static inline bool tick_nohz_full_cpu(int cpu) { return false; } 191static inline bool tick_nohz_full_cpu(int cpu) { return false; }
194static inline void __tick_nohz_full_check(void) { } 192static inline void __tick_nohz_full_check(void) { }
diff --git a/include/linux/time.h b/include/linux/time.h
index 8c42cf8d2444..203c2ad40d71 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -39,9 +39,20 @@ static inline int timeval_compare(const struct timeval *lhs, const struct timeva
39 return lhs->tv_usec - rhs->tv_usec; 39 return lhs->tv_usec - rhs->tv_usec;
40} 40}
41 41
42extern unsigned long mktime(const unsigned int year, const unsigned int mon, 42extern time64_t mktime64(const unsigned int year, const unsigned int mon,
43 const unsigned int day, const unsigned int hour, 43 const unsigned int day, const unsigned int hour,
44 const unsigned int min, const unsigned int sec); 44 const unsigned int min, const unsigned int sec);
45
46/**
47 * Deprecated. Use mktime64().
48 */
49static inline unsigned long mktime(const unsigned int year,
50 const unsigned int mon, const unsigned int day,
51 const unsigned int hour, const unsigned int min,
52 const unsigned int sec)
53{
54 return mktime64(year, mon, day, hour, min, sec);
55}
45 56
46extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); 57extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec);
47 58
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 95640dcd1899..05af9a334893 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -42,6 +42,7 @@ struct tk_read_base {
42 * struct timekeeper - Structure holding internal timekeeping values. 42 * struct timekeeper - Structure holding internal timekeeping values.
43 * @tkr: The readout base structure 43 * @tkr: The readout base structure
44 * @xtime_sec: Current CLOCK_REALTIME time in seconds 44 * @xtime_sec: Current CLOCK_REALTIME time in seconds
45 * @ktime_sec: Current CLOCK_MONOTONIC time in seconds
45 * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset 46 * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
46 * @offs_real: Offset clock monotonic -> clock realtime 47 * @offs_real: Offset clock monotonic -> clock realtime
47 * @offs_boot: Offset clock monotonic -> clock boottime 48 * @offs_boot: Offset clock monotonic -> clock boottime
@@ -77,6 +78,7 @@ struct tk_read_base {
77struct timekeeper { 78struct timekeeper {
78 struct tk_read_base tkr; 79 struct tk_read_base tkr;
79 u64 xtime_sec; 80 u64 xtime_sec;
81 unsigned long ktime_sec;
80 struct timespec64 wall_to_monotonic; 82 struct timespec64 wall_to_monotonic;
81 ktime_t offs_real; 83 ktime_t offs_real;
82 ktime_t offs_boot; 84 ktime_t offs_boot;
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 1caa6b04fdc5..9b63d13ba82b 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -10,7 +10,7 @@ extern int timekeeping_suspended;
10 * Get and set timeofday 10 * Get and set timeofday
11 */ 11 */
12extern void do_gettimeofday(struct timeval *tv); 12extern void do_gettimeofday(struct timeval *tv);
13extern int do_settimeofday(const struct timespec *tv); 13extern int do_settimeofday64(const struct timespec64 *ts);
14extern int do_sys_settimeofday(const struct timespec *tv, 14extern int do_sys_settimeofday(const struct timespec *tv,
15 const struct timezone *tz); 15 const struct timezone *tz);
16 16
@@ -25,14 +25,24 @@ struct timespec __current_kernel_time(void);
25/* 25/*
26 * timespec based interfaces 26 * timespec based interfaces
27 */ 27 */
28struct timespec get_monotonic_coarse(void); 28struct timespec64 get_monotonic_coarse64(void);
29extern void getrawmonotonic(struct timespec *ts); 29extern void getrawmonotonic64(struct timespec64 *ts);
30extern void ktime_get_ts64(struct timespec64 *ts); 30extern void ktime_get_ts64(struct timespec64 *ts);
31extern time64_t ktime_get_seconds(void);
32extern time64_t ktime_get_real_seconds(void);
31 33
32extern int __getnstimeofday64(struct timespec64 *tv); 34extern int __getnstimeofday64(struct timespec64 *tv);
33extern void getnstimeofday64(struct timespec64 *tv); 35extern void getnstimeofday64(struct timespec64 *tv);
34 36
35#if BITS_PER_LONG == 64 37#if BITS_PER_LONG == 64
38/**
39 * Deprecated. Use do_settimeofday64().
40 */
41static inline int do_settimeofday(const struct timespec *ts)
42{
43 return do_settimeofday64(ts);
44}
45
36static inline int __getnstimeofday(struct timespec *ts) 46static inline int __getnstimeofday(struct timespec *ts)
37{ 47{
38 return __getnstimeofday64(ts); 48 return __getnstimeofday64(ts);
@@ -53,7 +63,27 @@ static inline void ktime_get_real_ts(struct timespec *ts)
53 getnstimeofday64(ts); 63 getnstimeofday64(ts);
54} 64}
55 65
66static inline void getrawmonotonic(struct timespec *ts)
67{
68 getrawmonotonic64(ts);
69}
70
71static inline struct timespec get_monotonic_coarse(void)
72{
73 return get_monotonic_coarse64();
74}
56#else 75#else
76/**
77 * Deprecated. Use do_settimeofday64().
78 */
79static inline int do_settimeofday(const struct timespec *ts)
80{
81 struct timespec64 ts64;
82
83 ts64 = timespec_to_timespec64(*ts);
84 return do_settimeofday64(&ts64);
85}
86
57static inline int __getnstimeofday(struct timespec *ts) 87static inline int __getnstimeofday(struct timespec *ts)
58{ 88{
59 struct timespec64 ts64; 89 struct timespec64 ts64;
@@ -86,6 +116,19 @@ static inline void ktime_get_real_ts(struct timespec *ts)
86 getnstimeofday64(&ts64); 116 getnstimeofday64(&ts64);
87 *ts = timespec64_to_timespec(ts64); 117 *ts = timespec64_to_timespec(ts64);
88} 118}
119
120static inline void getrawmonotonic(struct timespec *ts)
121{
122 struct timespec64 ts64;
123
124 getrawmonotonic64(&ts64);
125 *ts = timespec64_to_timespec(ts64);
126}
127
128static inline struct timespec get_monotonic_coarse(void)
129{
130 return timespec64_to_timespec(get_monotonic_coarse64());
131}
89#endif 132#endif
90 133
91extern void getboottime(struct timespec *ts); 134extern void getboottime(struct timespec *ts);
@@ -182,7 +225,7 @@ static inline void timekeeping_clocktai(struct timespec *ts)
182/* 225/*
183 * RTC specific 226 * RTC specific
184 */ 227 */
185extern void timekeeping_inject_sleeptime(struct timespec *delta); 228extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
186 229
187/* 230/*
188 * PPS accessor 231 * PPS accessor
diff --git a/include/linux/topology.h b/include/linux/topology.h
index dda6ee521e74..909b6e43b694 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -119,11 +119,20 @@ static inline int numa_node_id(void)
119 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). 119 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
120 */ 120 */
121DECLARE_PER_CPU(int, _numa_mem_); 121DECLARE_PER_CPU(int, _numa_mem_);
122extern int _node_numa_mem_[MAX_NUMNODES];
122 123
123#ifndef set_numa_mem 124#ifndef set_numa_mem
124static inline void set_numa_mem(int node) 125static inline void set_numa_mem(int node)
125{ 126{
126 this_cpu_write(_numa_mem_, node); 127 this_cpu_write(_numa_mem_, node);
128 _node_numa_mem_[numa_node_id()] = node;
129}
130#endif
131
132#ifndef node_to_mem_node
133static inline int node_to_mem_node(int node)
134{
135 return _node_numa_mem_[node];
127} 136}
128#endif 137#endif
129 138
@@ -146,6 +155,7 @@ static inline int cpu_to_mem(int cpu)
146static inline void set_cpu_numa_mem(int cpu, int node) 155static inline void set_cpu_numa_mem(int cpu, int node)
147{ 156{
148 per_cpu(_numa_mem_, cpu) = node; 157 per_cpu(_numa_mem_, cpu) = node;
158 _node_numa_mem_[cpu_to_node(cpu)] = node;
149} 159}
150#endif 160#endif
151 161
@@ -159,6 +169,13 @@ static inline int numa_mem_id(void)
159} 169}
160#endif 170#endif
161 171
172#ifndef node_to_mem_node
173static inline int node_to_mem_node(int node)
174{
175 return node;
176}
177#endif
178
162#ifndef cpu_to_mem 179#ifndef cpu_to_mem
163static inline int cpu_to_mem(int cpu) 180static inline int cpu_to_mem(int cpu)
164{ 181{
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 5ca58fcbaf1b..7759fc3c622d 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -51,7 +51,7 @@
51 51
52/* Definitions for online/offline exerciser. */ 52/* Definitions for online/offline exerciser. */
53int torture_onoff_init(long ooholdoff, long oointerval); 53int torture_onoff_init(long ooholdoff, long oointerval);
54char *torture_onoff_stats(char *page); 54void torture_onoff_stats(void);
55bool torture_onoff_failures(void); 55bool torture_onoff_failures(void);
56 56
57/* Low-rider random number generator. */ 57/* Low-rider random number generator. */
@@ -77,7 +77,8 @@ int torture_stutter_init(int s);
77/* Initialization and cleanup. */ 77/* Initialization and cleanup. */
78bool torture_init_begin(char *ttype, bool v, int *runnable); 78bool torture_init_begin(char *ttype, bool v, int *runnable);
79void torture_init_end(void); 79void torture_init_end(void);
80bool torture_cleanup(void); 80bool torture_cleanup_begin(void);
81void torture_cleanup_end(void);
81bool torture_must_stop(void); 82bool torture_must_stop(void);
82bool torture_must_stop_irq(void); 83bool torture_must_stop_irq(void);
83void torture_kthread_stopping(char *title); 84void torture_kthread_stopping(char *title);
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index ea6c9dea79e3..cfaf5a1d4bad 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -1,7 +1,7 @@
1#ifndef _LINUX_TRACE_SEQ_H 1#ifndef _LINUX_TRACE_SEQ_H
2#define _LINUX_TRACE_SEQ_H 2#define _LINUX_TRACE_SEQ_H
3 3
4#include <linux/fs.h> 4#include <linux/seq_buf.h>
5 5
6#include <asm/page.h> 6#include <asm/page.h>
7 7
@@ -12,20 +12,36 @@
12 12
13struct trace_seq { 13struct trace_seq {
14 unsigned char buffer[PAGE_SIZE]; 14 unsigned char buffer[PAGE_SIZE];
15 unsigned int len; 15 struct seq_buf seq;
16 unsigned int readpos;
17 int full; 16 int full;
18}; 17};
19 18
20static inline void 19static inline void
21trace_seq_init(struct trace_seq *s) 20trace_seq_init(struct trace_seq *s)
22{ 21{
23 s->len = 0; 22 seq_buf_init(&s->seq, s->buffer, PAGE_SIZE);
24 s->readpos = 0;
25 s->full = 0; 23 s->full = 0;
26} 24}
27 25
28/** 26/**
27 * trace_seq_used - amount of actual data written to buffer
28 * @s: trace sequence descriptor
29 *
30 * Returns the amount of data written to the buffer.
31 *
32 * IMPORTANT!
33 *
34 * Use this instead of @s->seq.len if you need to pass the amount
35 * of data from the buffer to another buffer (userspace, or what not).
36 * The @s->seq.len on overflow is bigger than the buffer size and
37 * using it can cause access to undefined memory.
38 */
39static inline int trace_seq_used(struct trace_seq *s)
40{
41 return seq_buf_used(&s->seq);
42}
43
44/**
29 * trace_seq_buffer_ptr - return pointer to next location in buffer 45 * trace_seq_buffer_ptr - return pointer to next location in buffer
30 * @s: trace sequence descriptor 46 * @s: trace sequence descriptor
31 * 47 *
@@ -37,7 +53,19 @@ trace_seq_init(struct trace_seq *s)
37static inline unsigned char * 53static inline unsigned char *
38trace_seq_buffer_ptr(struct trace_seq *s) 54trace_seq_buffer_ptr(struct trace_seq *s)
39{ 55{
40 return s->buffer + s->len; 56 return s->buffer + seq_buf_used(&s->seq);
57}
58
59/**
60 * trace_seq_has_overflowed - return true if the trace_seq took too much
61 * @s: trace sequence descriptor
62 *
63 * Returns true if too much data was added to the trace_seq and it is
64 * now full and will not take anymore.
65 */
66static inline bool trace_seq_has_overflowed(struct trace_seq *s)
67{
68 return s->full || seq_buf_has_overflowed(&s->seq);
41} 69}
42 70
43/* 71/*
@@ -45,40 +73,37 @@ trace_seq_buffer_ptr(struct trace_seq *s)
45 */ 73 */
46#ifdef CONFIG_TRACING 74#ifdef CONFIG_TRACING
47extern __printf(2, 3) 75extern __printf(2, 3)
48int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); 76void trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
49extern __printf(2, 0) 77extern __printf(2, 0)
50int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); 78void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
51extern int 79extern void
52trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); 80trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
53extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); 81extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
54extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 82extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
55 int cnt); 83 int cnt);
56extern int trace_seq_puts(struct trace_seq *s, const char *str); 84extern void trace_seq_puts(struct trace_seq *s, const char *str);
57extern int trace_seq_putc(struct trace_seq *s, unsigned char c); 85extern void trace_seq_putc(struct trace_seq *s, unsigned char c);
58extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); 86extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len);
59extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, 87extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
60 unsigned int len); 88 unsigned int len);
61extern int trace_seq_path(struct trace_seq *s, const struct path *path); 89extern int trace_seq_path(struct trace_seq *s, const struct path *path);
62 90
63extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, 91extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
64 int nmaskbits); 92 int nmaskbits);
65 93
66#else /* CONFIG_TRACING */ 94#else /* CONFIG_TRACING */
67static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) 95static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
68{ 96{
69 return 0;
70} 97}
71static inline int 98static inline void
72trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) 99trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
73{ 100{
74 return 0;
75} 101}
76 102
77static inline int 103static inline void
78trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, 104trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
79 int nmaskbits) 105 int nmaskbits)
80{ 106{
81 return 0;
82} 107}
83 108
84static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) 109static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
@@ -90,23 +115,19 @@ static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
90{ 115{
91 return 0; 116 return 0;
92} 117}
93static inline int trace_seq_puts(struct trace_seq *s, const char *str) 118static inline void trace_seq_puts(struct trace_seq *s, const char *str)
94{ 119{
95 return 0;
96} 120}
97static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) 121static inline void trace_seq_putc(struct trace_seq *s, unsigned char c)
98{ 122{
99 return 0;
100} 123}
101static inline int 124static inline void
102trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) 125trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
103{ 126{
104 return 0;
105} 127}
106static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, 128static inline void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
107 unsigned int len) 129 unsigned int len)
108{ 130{
109 return 0;
110} 131}
111static inline int trace_seq_path(struct trace_seq *s, const struct path *path) 132static inline int trace_seq_path(struct trace_seq *s, const struct path *path)
112{ 133{
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index b1293f15f592..e08e21e5f601 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -157,6 +157,12 @@ extern void syscall_unregfunc(void);
157 * Make sure the alignment of the structure in the __tracepoints section will 157 * Make sure the alignment of the structure in the __tracepoints section will
158 * not add unwanted padding between the beginning of the section and the 158 * not add unwanted padding between the beginning of the section and the
159 * structure. Force alignment to the same alignment as the section start. 159 * structure. Force alignment to the same alignment as the section start.
160 *
161 * When lockdep is enabled, we make sure to always do the RCU portions of
162 * the tracepoint code, regardless of whether tracing is on or we match the
163 * condition. This lets us find RCU issues triggered with tracepoints even
164 * when this tracepoint is off. This code has no purpose other than poking
165 * RCU a bit.
160 */ 166 */
161#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ 167#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
162 extern struct tracepoint __tracepoint_##name; \ 168 extern struct tracepoint __tracepoint_##name; \
@@ -167,6 +173,11 @@ extern void syscall_unregfunc(void);
167 TP_PROTO(data_proto), \ 173 TP_PROTO(data_proto), \
168 TP_ARGS(data_args), \ 174 TP_ARGS(data_args), \
169 TP_CONDITION(cond),,); \ 175 TP_CONDITION(cond),,); \
176 if (IS_ENABLED(CONFIG_LOCKDEP)) { \
177 rcu_read_lock_sched_notrace(); \
178 rcu_dereference_sched(__tracepoint_##name.funcs);\
179 rcu_read_unlock_sched_notrace(); \
180 } \
170 } \ 181 } \
171 __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ 182 __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
172 PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ 183 PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 84132942902a..7d66ae508e5c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -252,6 +252,7 @@ struct tty_struct {
252 struct rw_semaphore termios_rwsem; 252 struct rw_semaphore termios_rwsem;
253 struct mutex winsize_mutex; 253 struct mutex winsize_mutex;
254 spinlock_t ctrl_lock; 254 spinlock_t ctrl_lock;
255 spinlock_t flow_lock;
255 /* Termios values are protected by the termios rwsem */ 256 /* Termios values are protected by the termios rwsem */
256 struct ktermios termios, termios_locked; 257 struct ktermios termios, termios_locked;
257 struct termiox *termiox; /* May be NULL for unsupported */ 258 struct termiox *termiox; /* May be NULL for unsupported */
@@ -261,8 +262,13 @@ struct tty_struct {
261 unsigned long flags; 262 unsigned long flags;
262 int count; 263 int count;
263 struct winsize winsize; /* winsize_mutex */ 264 struct winsize winsize; /* winsize_mutex */
264 unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1; 265 unsigned long stopped:1, /* flow_lock */
265 unsigned char ctrl_status; /* ctrl_lock */ 266 flow_stopped:1,
267 unused:BITS_PER_LONG - 2;
268 int hw_stopped;
269 unsigned long ctrl_status:8, /* ctrl_lock */
270 packet:1,
271 unused_ctrl:BITS_PER_LONG - 9;
266 unsigned int receive_room; /* Bytes free for queue */ 272 unsigned int receive_room; /* Bytes free for queue */
267 int flow_change; 273 int flow_change;
268 274
@@ -278,7 +284,7 @@ struct tty_struct {
278 284
279#define N_TTY_BUF_SIZE 4096 285#define N_TTY_BUF_SIZE 4096
280 286
281 unsigned char closing:1; 287 int closing;
282 unsigned char *write_buf; 288 unsigned char *write_buf;
283 int write_cnt; 289 int write_cnt;
284 /* If the tty has a pending do_SAK, queue it here - akpm */ 290 /* If the tty has a pending do_SAK, queue it here - akpm */
@@ -310,12 +316,10 @@ struct tty_file_private {
310#define TTY_EXCLUSIVE 3 /* Exclusive open mode */ 316#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
311#define TTY_DEBUG 4 /* Debugging */ 317#define TTY_DEBUG 4 /* Debugging */
312#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */ 318#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
313#define TTY_CLOSING 7 /* ->close() in progress */
314#define TTY_LDISC_OPEN 11 /* Line discipline is open */ 319#define TTY_LDISC_OPEN 11 /* Line discipline is open */
315#define TTY_PTY_LOCK 16 /* pty private */ 320#define TTY_PTY_LOCK 16 /* pty private */
316#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */ 321#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
317#define TTY_HUPPED 18 /* Post driver->hangup() */ 322#define TTY_HUPPED 18 /* Post driver->hangup() */
318#define TTY_HUPPING 21 /* ->hangup() in progress */
319#define TTY_LDISC_HALTED 22 /* Line discipline is halted */ 323#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
320 324
321#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) 325#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
@@ -397,7 +401,9 @@ extern int tty_paranoia_check(struct tty_struct *tty, struct inode *inode,
397extern char *tty_name(struct tty_struct *tty, char *buf); 401extern char *tty_name(struct tty_struct *tty, char *buf);
398extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); 402extern void tty_wait_until_sent(struct tty_struct *tty, long timeout);
399extern int tty_check_change(struct tty_struct *tty); 403extern int tty_check_change(struct tty_struct *tty);
404extern void __stop_tty(struct tty_struct *tty);
400extern void stop_tty(struct tty_struct *tty); 405extern void stop_tty(struct tty_struct *tty);
406extern void __start_tty(struct tty_struct *tty);
401extern void start_tty(struct tty_struct *tty); 407extern void start_tty(struct tty_struct *tty);
402extern int tty_register_driver(struct tty_driver *driver); 408extern int tty_register_driver(struct tty_driver *driver);
403extern int tty_unregister_driver(struct tty_driver *driver); 409extern int tty_unregister_driver(struct tty_driver *driver);
@@ -411,6 +417,7 @@ extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
411extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp, 417extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
412 int buflen); 418 int buflen);
413extern void tty_write_message(struct tty_struct *tty, char *msg); 419extern void tty_write_message(struct tty_struct *tty, char *msg);
420extern int tty_send_xchar(struct tty_struct *tty, char ch);
414extern int tty_put_char(struct tty_struct *tty, unsigned char c); 421extern int tty_put_char(struct tty_struct *tty, unsigned char c);
415extern int tty_chars_in_buffer(struct tty_struct *tty); 422extern int tty_chars_in_buffer(struct tty_struct *tty);
416extern int tty_write_room(struct tty_struct *tty); 423extern int tty_write_room(struct tty_struct *tty);
@@ -428,14 +435,13 @@ extern int is_ignored(int sig);
428extern int tty_signal(int sig, struct tty_struct *tty); 435extern int tty_signal(int sig, struct tty_struct *tty);
429extern void tty_hangup(struct tty_struct *tty); 436extern void tty_hangup(struct tty_struct *tty);
430extern void tty_vhangup(struct tty_struct *tty); 437extern void tty_vhangup(struct tty_struct *tty);
431extern void tty_unhangup(struct file *filp);
432extern int tty_hung_up_p(struct file *filp); 438extern int tty_hung_up_p(struct file *filp);
433extern void do_SAK(struct tty_struct *tty); 439extern void do_SAK(struct tty_struct *tty);
434extern void __do_SAK(struct tty_struct *tty); 440extern void __do_SAK(struct tty_struct *tty);
435extern void no_tty(void); 441extern void no_tty(void);
436extern void tty_flush_to_ldisc(struct tty_struct *tty); 442extern void tty_flush_to_ldisc(struct tty_struct *tty);
437extern void tty_buffer_free_all(struct tty_port *port); 443extern void tty_buffer_free_all(struct tty_port *port);
438extern void tty_buffer_flush(struct tty_struct *tty); 444extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
439extern void tty_buffer_init(struct tty_port *port); 445extern void tty_buffer_init(struct tty_port *port);
440extern speed_t tty_termios_baud_rate(struct ktermios *termios); 446extern speed_t tty_termios_baud_rate(struct ktermios *termios);
441extern speed_t tty_termios_input_baud_rate(struct ktermios *termios); 447extern speed_t tty_termios_input_baud_rate(struct ktermios *termios);
@@ -489,14 +495,9 @@ extern int tty_init_termios(struct tty_struct *tty);
489extern int tty_standard_install(struct tty_driver *driver, 495extern int tty_standard_install(struct tty_driver *driver,
490 struct tty_struct *tty); 496 struct tty_struct *tty);
491 497
492extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
493extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
494
495extern struct mutex tty_mutex; 498extern struct mutex tty_mutex;
496extern spinlock_t tty_files_lock; 499extern spinlock_t tty_files_lock;
497 500
498extern void tty_write_unlock(struct tty_struct *tty);
499extern int tty_write_lock(struct tty_struct *tty, int ndelay);
500#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock)) 501#define tty_is_writelocked(tty) (mutex_is_locked(&tty->atomic_write_lock))
501 502
502extern void tty_port_init(struct tty_port *port); 503extern void tty_port_init(struct tty_port *port);
@@ -555,7 +556,7 @@ extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
555extern int tty_unregister_ldisc(int disc); 556extern int tty_unregister_ldisc(int disc);
556extern int tty_set_ldisc(struct tty_struct *tty, int ldisc); 557extern int tty_set_ldisc(struct tty_struct *tty, int ldisc);
557extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty); 558extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
558extern void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty); 559extern void tty_ldisc_release(struct tty_struct *tty);
559extern void tty_ldisc_init(struct tty_struct *tty); 560extern void tty_ldisc_init(struct tty_struct *tty);
560extern void tty_ldisc_deinit(struct tty_struct *tty); 561extern void tty_ldisc_deinit(struct tty_struct *tty);
561extern void tty_ldisc_begin(void); 562extern void tty_ldisc_begin(void);
@@ -616,14 +617,6 @@ extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file,
616extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file, 617extern long n_tty_compat_ioctl_helper(struct tty_struct *tty, struct file *file,
617 unsigned int cmd, unsigned long arg); 618 unsigned int cmd, unsigned long arg);
618 619
619/* serial.c */
620
621extern void serial_console_init(void);
622
623/* pcxx.c */
624
625extern int pcxe_open(struct tty_struct *tty, struct file *filp);
626
627/* vt.c */ 620/* vt.c */
628 621
629extern int vt_ioctl(struct tty_struct *tty, 622extern int vt_ioctl(struct tty_struct *tty,
@@ -636,11 +629,9 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
636/* functions for preparation of BKL removal */ 629/* functions for preparation of BKL removal */
637extern void __lockfunc tty_lock(struct tty_struct *tty); 630extern void __lockfunc tty_lock(struct tty_struct *tty);
638extern void __lockfunc tty_unlock(struct tty_struct *tty); 631extern void __lockfunc tty_unlock(struct tty_struct *tty);
639extern void __lockfunc tty_lock_pair(struct tty_struct *tty, 632extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
640 struct tty_struct *tty2); 633extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
641extern void __lockfunc tty_unlock_pair(struct tty_struct *tty, 634extern void tty_set_lock_subclass(struct tty_struct *tty);
642 struct tty_struct *tty2);
643
644/* 635/*
645 * this shall be called only from where BTM is held (like close) 636 * this shall be called only from where BTM is held (like close)
646 * 637 *
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index e48c608a8fa8..92e337c18839 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -152,6 +152,8 @@
152 * This routine notifies the tty driver that it should stop 152 * This routine notifies the tty driver that it should stop
153 * outputting characters to the tty device. 153 * outputting characters to the tty device.
154 * 154 *
155 * Called with ->flow_lock held. Serialized with start() method.
156 *
155 * Optional: 157 * Optional:
156 * 158 *
157 * Note: Call stop_tty not this method. 159 * Note: Call stop_tty not this method.
@@ -161,6 +163,8 @@
161 * This routine notifies the tty driver that it resume sending 163 * This routine notifies the tty driver that it resume sending
162 * characters to the tty device. 164 * characters to the tty device.
163 * 165 *
166 * Called with ->flow_lock held. Serialized with stop() method.
167 *
164 * Optional: 168 * Optional:
165 * 169 *
166 * Note: Call start_tty not this method. 170 * Note: Call start_tty not this method.
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 247cfdcc4b08..ee3277593222 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -49,7 +49,11 @@ struct udp_sock {
49 unsigned int corkflag; /* Cork is required */ 49 unsigned int corkflag; /* Cork is required */
50 __u8 encap_type; /* Is this an Encapsulation socket? */ 50 __u8 encap_type; /* Is this an Encapsulation socket? */
51 unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */ 51 unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
52 no_check6_rx:1;/* Allow zero UDP6 checksums on RX? */ 52 no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
53 convert_csum:1;/* On receive, convert checksum
54 * unnecessary to checksum complete
55 * if possible.
56 */
53 /* 57 /*
54 * Following member retains the information to create a UDP header 58 * Following member retains the information to create a UDP header
55 * when the socket is uncorked. 59 * when the socket is uncorked.
@@ -98,6 +102,16 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
98 return udp_sk(sk)->no_check6_rx; 102 return udp_sk(sk)->no_check6_rx;
99} 103}
100 104
105static inline void udp_set_convert_csum(struct sock *sk, bool val)
106{
107 udp_sk(sk)->convert_csum = val;
108}
109
110static inline bool udp_get_convert_csum(struct sock *sk)
111{
112 return udp_sk(sk)->convert_csum;
113}
114
101#define udp_portaddr_for_each_entry(__sk, node, list) \ 115#define udp_portaddr_for_each_entry(__sk, node, list) \
102 hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node) 116 hlist_nulls_for_each_entry(__sk, node, list, __sk_common.skc_portaddr_node)
103 117
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 290fbf0b6b8a..1c5e453f7ea9 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -31,6 +31,7 @@ struct iov_iter {
31 size_t count; 31 size_t count;
32 union { 32 union {
33 const struct iovec *iov; 33 const struct iovec *iov;
34 const struct kvec *kvec;
34 const struct bio_vec *bvec; 35 const struct bio_vec *bvec;
35 }; 36 };
36 unsigned long nr_segs; 37 unsigned long nr_segs;
@@ -80,9 +81,15 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
80 struct iov_iter *i); 81 struct iov_iter *i);
81size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 82size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
82 struct iov_iter *i); 83 struct iov_iter *i);
84size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i);
85size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
86size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
87size_t iov_iter_zero(size_t bytes, struct iov_iter *);
83unsigned long iov_iter_alignment(const struct iov_iter *i); 88unsigned long iov_iter_alignment(const struct iov_iter *i);
84void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov, 89void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
85 unsigned long nr_segs, size_t count); 90 unsigned long nr_segs, size_t count);
91void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *iov,
92 unsigned long nr_segs, size_t count);
86ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, 93ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
87 size_t maxsize, unsigned maxpages, size_t *start); 94 size_t maxsize, unsigned maxpages, size_t *start);
88ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, 95ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages,
@@ -94,6 +101,11 @@ static inline size_t iov_iter_count(struct iov_iter *i)
94 return i->count; 101 return i->count;
95} 102}
96 103
104static inline bool iter_is_iovec(struct iov_iter *i)
105{
106 return !(i->type & (ITER_BVEC | ITER_KVEC));
107}
108
97/* 109/*
98 * Cap the iov_iter by given limit; note that the second argument is 110 * Cap the iov_iter by given limit; note that the second argument is
99 * *not* the new size - it's upper limit for such. Passing it a value 111 * *not* the new size - it's upper limit for such. Passing it a value
@@ -120,9 +132,10 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
120{ 132{
121 i->count = count; 133 i->count = count;
122} 134}
135size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
136size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
123 137
124int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); 138int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
125int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
126int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, 139int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
127 int offset, int len); 140 int offset, int len);
128int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, 141int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 1ad4724458de..32c0e83d6239 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -35,7 +35,7 @@ struct uio_map;
35struct uio_mem { 35struct uio_mem {
36 const char *name; 36 const char *name;
37 phys_addr_t addr; 37 phys_addr_t addr;
38 unsigned long size; 38 resource_size_t size;
39 int memtype; 39 int memtype;
40 void __iomem *internal_addr; 40 void __iomem *internal_addr;
41 struct uio_map *map; 41 struct uio_map *map;
@@ -63,7 +63,17 @@ struct uio_port {
63 63
64#define MAX_UIO_PORT_REGIONS 5 64#define MAX_UIO_PORT_REGIONS 5
65 65
66struct uio_device; 66struct uio_device {
67 struct module *owner;
68 struct device *dev;
69 int minor;
70 atomic_t event;
71 struct fasync_struct *async_queue;
72 wait_queue_head_t wait;
73 struct uio_info *info;
74 struct kobject *map_dir;
75 struct kobject *portio_dir;
76};
67 77
68/** 78/**
69 * struct uio_info - UIO device capabilities 79 * struct uio_info - UIO device capabilities
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 4f844c6b03ee..60beb5dc7977 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -98,11 +98,11 @@ struct uprobes_state {
98 struct xol_area *xol_area; 98 struct xol_area *xol_area;
99}; 99};
100 100
101extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); 101extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
102extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); 102extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
103extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); 103extern bool is_swbp_insn(uprobe_opcode_t *insn);
104extern bool __weak is_trap_insn(uprobe_opcode_t *insn); 104extern bool is_trap_insn(uprobe_opcode_t *insn);
105extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); 105extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
106extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); 106extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
107extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); 107extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
108extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); 108extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
@@ -128,8 +128,8 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
128extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); 128extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
129extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); 129extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
130extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); 130extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
131extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); 131extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
132extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 132extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
133 void *src, unsigned long len); 133 void *src, unsigned long len);
134#else /* !CONFIG_UPROBES */ 134#else /* !CONFIG_UPROBES */
135struct uprobes_state { 135struct uprobes_state {
diff --git a/include/linux/usb.h b/include/linux/usb.h
index d2465bc0e73c..f89c24a03bd9 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -637,7 +637,7 @@ static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
637#endif 637#endif
638 638
639/* USB autosuspend and autoresume */ 639/* USB autosuspend and autoresume */
640#ifdef CONFIG_PM_RUNTIME 640#ifdef CONFIG_PM
641extern void usb_enable_autosuspend(struct usb_device *udev); 641extern void usb_enable_autosuspend(struct usb_device *udev);
642extern void usb_disable_autosuspend(struct usb_device *udev); 642extern void usb_disable_autosuspend(struct usb_device *udev);
643 643
@@ -1862,6 +1862,18 @@ extern void usb_unregister_notify(struct notifier_block *nb);
1862/* debugfs stuff */ 1862/* debugfs stuff */
1863extern struct dentry *usb_debug_root; 1863extern struct dentry *usb_debug_root;
1864 1864
1865/* LED triggers */
1866enum usb_led_event {
1867 USB_LED_EVENT_HOST = 0,
1868 USB_LED_EVENT_GADGET = 1,
1869};
1870
1871#ifdef CONFIG_USB_LED_TRIG
1872extern void usb_led_activity(enum usb_led_event ev);
1873#else
1874static inline void usb_led_activity(enum usb_led_event ev) {}
1875#endif
1876
1865#endif /* __KERNEL__ */ 1877#endif /* __KERNEL__ */
1866 1878
1867#endif 1879#endif
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index bbe779f640be..535997a6681b 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -13,11 +13,12 @@ struct ci_hdrc_platform_data {
13 /* offset of the capability registers */ 13 /* offset of the capability registers */
14 uintptr_t capoffset; 14 uintptr_t capoffset;
15 unsigned power_budget; 15 unsigned power_budget;
16 struct usb_phy *phy; 16 struct phy *phy;
17 /* old usb_phy interface */
18 struct usb_phy *usb_phy;
17 enum usb_phy_interface phy_mode; 19 enum usb_phy_interface phy_mode;
18 unsigned long flags; 20 unsigned long flags;
19#define CI_HDRC_REGS_SHARED BIT(0) 21#define CI_HDRC_REGS_SHARED BIT(0)
20#define CI_HDRC_REQUIRE_TRANSCEIVER BIT(1)
21#define CI_HDRC_DISABLE_STREAMING BIT(3) 22#define CI_HDRC_DISABLE_STREAMING BIT(3)
22 /* 23 /*
23 * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1, 24 * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
@@ -31,6 +32,7 @@ struct ci_hdrc_platform_data {
31#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 32#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
32 void (*notify_event) (struct ci_hdrc *ci, unsigned event); 33 void (*notify_event) (struct ci_hdrc *ci, unsigned event);
33 struct regulator *reg_vbus; 34 struct regulator *reg_vbus;
35 bool tpl_support;
34}; 36};
35 37
36/* Default offset of capability registers */ 38/* Default offset of capability registers */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index c330f5ef42cf..3d87defcc527 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -427,6 +427,8 @@ static inline struct usb_composite_driver *to_cdriver(
427 * @b_vendor_code: bMS_VendorCode part of the OS string 427 * @b_vendor_code: bMS_VendorCode part of the OS string
428 * @use_os_string: false by default, interested gadgets set it 428 * @use_os_string: false by default, interested gadgets set it
429 * @os_desc_config: the configuration to be used with OS descriptors 429 * @os_desc_config: the configuration to be used with OS descriptors
430 * @setup_pending: true when setup request is queued but not completed
431 * @os_desc_pending: true when os_desc request is queued but not completed
430 * 432 *
431 * One of these devices is allocated and initialized before the 433 * One of these devices is allocated and initialized before the
432 * associated device driver's bind() is called. 434 * associated device driver's bind() is called.
@@ -488,6 +490,9 @@ struct usb_composite_dev {
488 490
489 /* protects deactivations and delayed_status counts*/ 491 /* protects deactivations and delayed_status counts*/
490 spinlock_t lock; 492 spinlock_t lock;
493
494 unsigned setup_pending:1;
495 unsigned os_desc_pending:1;
491}; 496};
492 497
493extern int usb_string_id(struct usb_composite_dev *c); 498extern int usb_string_id(struct usb_composite_dev *c);
@@ -501,6 +506,8 @@ extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n);
501extern void composite_disconnect(struct usb_gadget *gadget); 506extern void composite_disconnect(struct usb_gadget *gadget);
502extern int composite_setup(struct usb_gadget *gadget, 507extern int composite_setup(struct usb_gadget *gadget,
503 const struct usb_ctrlrequest *ctrl); 508 const struct usb_ctrlrequest *ctrl);
509extern void composite_suspend(struct usb_gadget *gadget);
510extern void composite_resume(struct usb_gadget *gadget);
504 511
505/* 512/*
506 * Some systems will need runtime overrides for the product identifiers 513 * Some systems will need runtime overrides for the product identifiers
diff --git a/include/linux/usb/ehci-dbgp.h b/include/linux/usb/ehci-dbgp.h
new file mode 100644
index 000000000000..7344d9e591cc
--- /dev/null
+++ b/include/linux/usb/ehci-dbgp.h
@@ -0,0 +1,83 @@
1/*
2 * Standalone EHCI usb debug driver
3 *
4 * Originally written by:
5 * Eric W. Biederman" <ebiederm@xmission.com> and
6 * Yinghai Lu <yhlu.kernel@gmail.com>
7 *
8 * Changes for early/late printk and HW errata:
9 * Jason Wessel <jason.wessel@windriver.com>
10 * Copyright (C) 2009 Wind River Systems, Inc.
11 *
12 */
13
14#ifndef __LINUX_USB_EHCI_DBGP_H
15#define __LINUX_USB_EHCI_DBGP_H
16
17#include <linux/console.h>
18#include <linux/types.h>
19
20/* Appendix C, Debug port ... intended for use with special "debug devices"
21 * that can help if there's no serial console. (nonstandard enumeration.)
22 */
23struct ehci_dbg_port {
24 u32 control;
25#define DBGP_OWNER (1<<30)
26#define DBGP_ENABLED (1<<28)
27#define DBGP_DONE (1<<16)
28#define DBGP_INUSE (1<<10)
29#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
30# define DBGP_ERR_BAD 1
31# define DBGP_ERR_SIGNAL 2
32#define DBGP_ERROR (1<<6)
33#define DBGP_GO (1<<5)
34#define DBGP_OUT (1<<4)
35#define DBGP_LEN(x) (((x)>>0)&0x0f)
36 u32 pids;
37#define DBGP_PID_GET(x) (((x)>>16)&0xff)
38#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
39 u32 data03;
40 u32 data47;
41 u32 address;
42#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
43};
44
45#ifdef CONFIG_EARLY_PRINTK_DBGP
46extern int early_dbgp_init(char *s);
47extern struct console early_dbgp_console;
48#endif /* CONFIG_EARLY_PRINTK_DBGP */
49
50struct usb_hcd;
51
52#ifdef CONFIG_XEN_DOM0
53extern int xen_dbgp_reset_prep(struct usb_hcd *);
54extern int xen_dbgp_external_startup(struct usb_hcd *);
55#else
56static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
57{
58 return 1; /* Shouldn't this be 0? */
59}
60
61static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
62{
63 return -1;
64}
65#endif
66
67#ifdef CONFIG_EARLY_PRINTK_DBGP
68/* Call backs from ehci host driver to ehci debug driver */
69extern int dbgp_external_startup(struct usb_hcd *);
70extern int dbgp_reset_prep(struct usb_hcd *);
71#else
72static inline int dbgp_reset_prep(struct usb_hcd *hcd)
73{
74 return xen_dbgp_reset_prep(hcd);
75}
76
77static inline int dbgp_external_startup(struct usb_hcd *hcd)
78{
79 return xen_dbgp_external_startup(hcd);
80}
81#endif
82
83#endif /* __LINUX_USB_EHCI_DBGP_H */
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index daec99af5d54..966889a20ea3 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -19,6 +19,8 @@
19#ifndef __LINUX_USB_EHCI_DEF_H 19#ifndef __LINUX_USB_EHCI_DEF_H
20#define __LINUX_USB_EHCI_DEF_H 20#define __LINUX_USB_EHCI_DEF_H
21 21
22#include <linux/usb/ehci-dbgp.h>
23
22/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */ 24/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
23 25
24/* Section 2.2 Host Controller Capability Registers */ 26/* Section 2.2 Host Controller Capability Registers */
@@ -190,67 +192,4 @@ struct ehci_regs {
190#define USBMODE_EX_HC (3<<0) /* host controller mode */ 192#define USBMODE_EX_HC (3<<0) /* host controller mode */
191}; 193};
192 194
193/* Appendix C, Debug port ... intended for use with special "debug devices"
194 * that can help if there's no serial console. (nonstandard enumeration.)
195 */
196struct ehci_dbg_port {
197 u32 control;
198#define DBGP_OWNER (1<<30)
199#define DBGP_ENABLED (1<<28)
200#define DBGP_DONE (1<<16)
201#define DBGP_INUSE (1<<10)
202#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
203# define DBGP_ERR_BAD 1
204# define DBGP_ERR_SIGNAL 2
205#define DBGP_ERROR (1<<6)
206#define DBGP_GO (1<<5)
207#define DBGP_OUT (1<<4)
208#define DBGP_LEN(x) (((x)>>0)&0x0f)
209 u32 pids;
210#define DBGP_PID_GET(x) (((x)>>16)&0xff)
211#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
212 u32 data03;
213 u32 data47;
214 u32 address;
215#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
216};
217
218#ifdef CONFIG_EARLY_PRINTK_DBGP
219#include <linux/init.h>
220extern int __init early_dbgp_init(char *s);
221extern struct console early_dbgp_console;
222#endif /* CONFIG_EARLY_PRINTK_DBGP */
223
224struct usb_hcd;
225
226#ifdef CONFIG_XEN_DOM0
227extern int xen_dbgp_reset_prep(struct usb_hcd *);
228extern int xen_dbgp_external_startup(struct usb_hcd *);
229#else
230static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
231{
232 return 1; /* Shouldn't this be 0? */
233}
234
235static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
236{
237 return -1;
238}
239#endif
240
241#ifdef CONFIG_EARLY_PRINTK_DBGP
242/* Call backs from ehci host driver to ehci debug driver */
243extern int dbgp_external_startup(struct usb_hcd *);
244extern int dbgp_reset_prep(struct usb_hcd *hcd);
245#else
246static inline int dbgp_reset_prep(struct usb_hcd *hcd)
247{
248 return xen_dbgp_reset_prep(hcd);
249}
250static inline int dbgp_external_startup(struct usb_hcd *hcd)
251{
252 return xen_dbgp_external_startup(hcd);
253}
254#endif
255
256#endif /* __LINUX_USB_EHCI_DEF_H */ 195#endif /* __LINUX_USB_EHCI_DEF_H */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index c3a61853cd13..70ddb3943b62 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -345,12 +345,13 @@ static inline int usb_ep_queue(struct usb_ep *ep,
345 * @ep:the endpoint associated with the request 345 * @ep:the endpoint associated with the request
346 * @req:the request being canceled 346 * @req:the request being canceled
347 * 347 *
348 * if the request is still active on the endpoint, it is dequeued and its 348 * If the request is still active on the endpoint, it is dequeued and its
349 * completion routine is called (with status -ECONNRESET); else a negative 349 * completion routine is called (with status -ECONNRESET); else a negative
350 * error code is returned. 350 * error code is returned. This is guaranteed to happen before the call to
351 * usb_ep_dequeue() returns.
351 * 352 *
352 * note that some hardware can't clear out write fifos (to unlink the request 353 * Note that some hardware can't clear out write fifos (to unlink the request
353 * at the head of the queue) except as part of disconnecting from usb. such 354 * at the head of the queue) except as part of disconnecting from usb. Such
354 * restrictions prevent drivers from supporting configuration changes, 355 * restrictions prevent drivers from supporting configuration changes,
355 * even to configuration zero (a "chapter 9" requirement). 356 * even to configuration zero (a "chapter 9" requirement).
356 */ 357 */
@@ -489,8 +490,7 @@ struct usb_gadget_ops {
489 void (*get_config_params)(struct usb_dcd_config_params *); 490 void (*get_config_params)(struct usb_dcd_config_params *);
490 int (*udc_start)(struct usb_gadget *, 491 int (*udc_start)(struct usb_gadget *,
491 struct usb_gadget_driver *); 492 struct usb_gadget_driver *);
492 int (*udc_stop)(struct usb_gadget *, 493 int (*udc_stop)(struct usb_gadget *);
493 struct usb_gadget_driver *);
494}; 494};
495 495
496/** 496/**
@@ -816,6 +816,8 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget)
816 * Called in a context that permits sleeping. 816 * Called in a context that permits sleeping.
817 * @suspend: Invoked on USB suspend. May be called in_interrupt. 817 * @suspend: Invoked on USB suspend. May be called in_interrupt.
818 * @resume: Invoked on USB resume. May be called in_interrupt. 818 * @resume: Invoked on USB resume. May be called in_interrupt.
819 * @reset: Invoked on USB bus reset. It is mandatory for all gadget drivers
820 * and should be called in_interrupt.
819 * @driver: Driver model state for this driver. 821 * @driver: Driver model state for this driver.
820 * 822 *
821 * Devices are disabled till a gadget driver successfully bind()s, which 823 * Devices are disabled till a gadget driver successfully bind()s, which
@@ -873,6 +875,7 @@ struct usb_gadget_driver {
873 void (*disconnect)(struct usb_gadget *); 875 void (*disconnect)(struct usb_gadget *);
874 void (*suspend)(struct usb_gadget *); 876 void (*suspend)(struct usb_gadget *);
875 void (*resume)(struct usb_gadget *); 877 void (*resume)(struct usb_gadget *);
878 void (*reset)(struct usb_gadget *);
876 879
877 /* FIXME support safe rmmod */ 880 /* FIXME support safe rmmod */
878 struct device_driver driver; 881 struct device_driver driver;
@@ -921,7 +924,7 @@ extern int usb_add_gadget_udc_release(struct device *parent,
921 struct usb_gadget *gadget, void (*release)(struct device *dev)); 924 struct usb_gadget *gadget, void (*release)(struct device *dev));
922extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); 925extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget);
923extern void usb_del_gadget_udc(struct usb_gadget *gadget); 926extern void usb_del_gadget_udc(struct usb_gadget *gadget);
924extern int udc_attach_driver(const char *name, 927extern int usb_udc_attach_driver(const char *name,
925 struct usb_gadget_driver *driver); 928 struct usb_gadget_driver *driver);
926 929
927/*-------------------------------------------------------------------------*/ 930/*-------------------------------------------------------------------------*/
@@ -1013,6 +1016,20 @@ extern void usb_gadget_set_state(struct usb_gadget *gadget,
1013 1016
1014/*-------------------------------------------------------------------------*/ 1017/*-------------------------------------------------------------------------*/
1015 1018
1019/* utility to tell udc core that the bus reset occurs */
1020extern void usb_gadget_udc_reset(struct usb_gadget *gadget,
1021 struct usb_gadget_driver *driver);
1022
1023/*-------------------------------------------------------------------------*/
1024
1025/* utility to give requests back to the gadget layer */
1026
1027extern void usb_gadget_giveback_request(struct usb_ep *ep,
1028 struct usb_request *req);
1029
1030
1031/*-------------------------------------------------------------------------*/
1032
1016/* utility wrapping a simple endpoint selection policy */ 1033/* utility wrapping a simple endpoint selection policy */
1017 1034
1018extern struct usb_ep *usb_ep_autoconfig(struct usb_gadget *, 1035extern struct usb_ep *usb_ep_autoconfig(struct usb_gadget *,
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 485cd5e2100c..086bf13307e6 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -93,7 +93,7 @@ struct usb_hcd {
93 93
94 struct timer_list rh_timer; /* drives root-hub polling */ 94 struct timer_list rh_timer; /* drives root-hub polling */
95 struct urb *status_urb; /* the current status urb */ 95 struct urb *status_urb; /* the current status urb */
96#ifdef CONFIG_PM_RUNTIME 96#ifdef CONFIG_PM
97 struct work_struct wakeup_work; /* for remote wakeup */ 97 struct work_struct wakeup_work; /* for remote wakeup */
98#endif 98#endif
99 99
@@ -106,7 +106,8 @@ struct usb_hcd {
106 * OTG and some Host controllers need software interaction with phys; 106 * OTG and some Host controllers need software interaction with phys;
107 * other external phys should be software-transparent 107 * other external phys should be software-transparent
108 */ 108 */
109 struct usb_phy *phy; 109 struct usb_phy *usb_phy;
110 struct phy *phy;
110 111
111 /* Flags that need to be manipulated atomically because they can 112 /* Flags that need to be manipulated atomically because they can
112 * change while the host controller is running. Always use 113 * change while the host controller is running. Always use
@@ -144,6 +145,7 @@ struct usb_hcd {
144 unsigned has_tt:1; /* Integrated TT in root hub */ 145 unsigned has_tt:1; /* Integrated TT in root hub */
145 unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */ 146 unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
146 unsigned can_do_streams:1; /* HC supports streams */ 147 unsigned can_do_streams:1; /* HC supports streams */
148 unsigned tpl_support:1; /* OTG & EH TPL support */
147 149
148 unsigned int irq; /* irq allocated */ 150 unsigned int irq; /* irq allocated */
149 void __iomem *regs; /* device memory/io */ 151 void __iomem *regs; /* device memory/io */
@@ -377,6 +379,9 @@ struct hc_driver {
377 int (*disable_usb3_lpm_timeout)(struct usb_hcd *, 379 int (*disable_usb3_lpm_timeout)(struct usb_hcd *,
378 struct usb_device *, enum usb3_link_state state); 380 struct usb_device *, enum usb3_link_state state);
379 int (*find_raw_port_number)(struct usb_hcd *, int); 381 int (*find_raw_port_number)(struct usb_hcd *, int);
382 /* Call for power on/off the port if necessary */
383 int (*port_power)(struct usb_hcd *hcd, int portnum, bool enable);
384
380}; 385};
381 386
382static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd) 387static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
@@ -623,16 +628,13 @@ extern int usb_find_interface_driver(struct usb_device *dev,
623extern void usb_root_hub_lost_power(struct usb_device *rhdev); 628extern void usb_root_hub_lost_power(struct usb_device *rhdev);
624extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg); 629extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
625extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg); 630extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
626#endif /* CONFIG_PM */
627
628#ifdef CONFIG_PM_RUNTIME
629extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd); 631extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
630#else 632#else
631static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd) 633static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
632{ 634{
633 return; 635 return;
634} 636}
635#endif /* CONFIG_PM_RUNTIME */ 637#endif /* CONFIG_PM */
636 638
637/*-------------------------------------------------------------------------*/ 639/*-------------------------------------------------------------------------*/
638 640
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 8c38aa26b3bb..cfe0528cdbb1 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -14,6 +14,7 @@
14#if IS_ENABLED(CONFIG_OF) 14#if IS_ENABLED(CONFIG_OF)
15enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np); 15enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
16enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np); 16enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
17bool of_usb_host_tpl_support(struct device_node *np);
17#else 18#else
18static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np) 19static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
19{ 20{
@@ -25,6 +26,10 @@ of_usb_get_maximum_speed(struct device_node *np)
25{ 26{
26 return USB_SPEED_UNKNOWN; 27 return USB_SPEED_UNKNOWN;
27} 28}
29static inline bool of_usb_host_tpl_support(struct device_node *np)
30{
31 return false;
32}
28#endif 33#endif
29 34
30#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT) 35#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 154332b7c8c0..52661c5da690 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -9,15 +9,20 @@
9#ifndef __LINUX_USB_OTG_H 9#ifndef __LINUX_USB_OTG_H
10#define __LINUX_USB_OTG_H 10#define __LINUX_USB_OTG_H
11 11
12#include <linux/phy/phy.h>
12#include <linux/usb/phy.h> 13#include <linux/usb/phy.h>
13 14
14struct usb_otg { 15struct usb_otg {
15 u8 default_a; 16 u8 default_a;
16 17
17 struct usb_phy *phy; 18 struct phy *phy;
19 /* old usb_phy interface */
20 struct usb_phy *usb_phy;
18 struct usb_bus *host; 21 struct usb_bus *host;
19 struct usb_gadget *gadget; 22 struct usb_gadget *gadget;
20 23
24 enum usb_otg_state state;
25
21 /* bind/unbind the host controller */ 26 /* bind/unbind the host controller */
22 int (*set_host)(struct usb_otg *otg, struct usb_bus *host); 27 int (*set_host)(struct usb_otg *otg, struct usb_bus *host);
23 28
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 353053a33f21..f499c23e6342 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -77,7 +77,6 @@ struct usb_phy {
77 unsigned int flags; 77 unsigned int flags;
78 78
79 enum usb_phy_type type; 79 enum usb_phy_type type;
80 enum usb_otg_state state;
81 enum usb_phy_events last_event; 80 enum usb_phy_events last_event;
82 81
83 struct usb_otg *otg; 82 struct usb_otg *otg;
@@ -210,6 +209,7 @@ extern void usb_put_phy(struct usb_phy *);
210extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x); 209extern void devm_usb_put_phy(struct device *dev, struct usb_phy *x);
211extern int usb_bind_phy(const char *dev_name, u8 index, 210extern int usb_bind_phy(const char *dev_name, u8 index,
212 const char *phy_dev_name); 211 const char *phy_dev_name);
212extern void usb_phy_set_event(struct usb_phy *x, unsigned long event);
213#else 213#else
214static inline struct usb_phy *usb_get_phy(enum usb_phy_type type) 214static inline struct usb_phy *usb_get_phy(enum usb_phy_type type)
215{ 215{
@@ -251,6 +251,10 @@ static inline int usb_bind_phy(const char *dev_name, u8 index,
251{ 251{
252 return -EOPNOTSUPP; 252 return -EOPNOTSUPP;
253} 253}
254
255static inline void usb_phy_set_event(struct usb_phy *x, unsigned long event)
256{
257}
254#endif 258#endif
255 259
256static inline int 260static inline int
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 55a17b188daa..9948c874e3f1 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -8,27 +8,27 @@
8#define __LINUX_USB_QUIRKS_H 8#define __LINUX_USB_QUIRKS_H
9 9
10/* string descriptors must not be fetched using a 255-byte read */ 10/* string descriptors must not be fetched using a 255-byte read */
11#define USB_QUIRK_STRING_FETCH_255 0x00000001 11#define USB_QUIRK_STRING_FETCH_255 BIT(0)
12 12
13/* device can't resume correctly so reset it instead */ 13/* device can't resume correctly so reset it instead */
14#define USB_QUIRK_RESET_RESUME 0x00000002 14#define USB_QUIRK_RESET_RESUME BIT(1)
15 15
16/* device can't handle Set-Interface requests */ 16/* device can't handle Set-Interface requests */
17#define USB_QUIRK_NO_SET_INTF 0x00000004 17#define USB_QUIRK_NO_SET_INTF BIT(2)
18 18
19/* device can't handle its Configuration or Interface strings */ 19/* device can't handle its Configuration or Interface strings */
20#define USB_QUIRK_CONFIG_INTF_STRINGS 0x00000008 20#define USB_QUIRK_CONFIG_INTF_STRINGS BIT(3)
21 21
22/* device can't be reset(e.g morph devices), don't use reset */ 22/* device can't be reset(e.g morph devices), don't use reset */
23#define USB_QUIRK_RESET 0x00000010 23#define USB_QUIRK_RESET BIT(4)
24 24
25/* device has more interface descriptions than the bNumInterfaces count, 25/* device has more interface descriptions than the bNumInterfaces count,
26 and can't handle talking to these interfaces */ 26 and can't handle talking to these interfaces */
27#define USB_QUIRK_HONOR_BNUMINTERFACES 0x00000020 27#define USB_QUIRK_HONOR_BNUMINTERFACES BIT(5)
28 28
29/* device needs a pause during initialization, after we read the device 29/* device needs a pause during initialization, after we read the device
30 descriptor */ 30 descriptor */
31#define USB_QUIRK_DELAY_INIT 0x00000040 31#define USB_QUIRK_DELAY_INIT BIT(6)
32 32
33/* 33/*
34 * For high speed and super speed interupt endpoints, the USB 2.0 and 34 * For high speed and super speed interupt endpoints, the USB 2.0 and
@@ -39,6 +39,12 @@
39 * Devices with this quirk report their bInterval as the result of this 39 * Devices with this quirk report their bInterval as the result of this
40 * calculation instead of the exponent variable used in the calculation. 40 * calculation instead of the exponent variable used in the calculation.
41 */ 41 */
42#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL 0x00000080 42#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL BIT(7)
43
44/* device can't handle device_qualifier descriptor requests */
45#define USB_QUIRK_DEVICE_QUALIFIER BIT(8)
46
47/* device generates spurious wakeup, ignore remote wakeup capability */
48#define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9)
43 49
44#endif /* __LINUX_USB_QUIRKS_H */ 50#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index d5952bb66752..9fd9e481ea98 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -145,6 +145,10 @@ struct renesas_usbhs_driver_param {
145 int d0_rx_id; 145 int d0_rx_id;
146 int d1_tx_id; 146 int d1_tx_id;
147 int d1_rx_id; 147 int d1_rx_id;
148 int d2_tx_id;
149 int d2_rx_id;
150 int d3_tx_id;
151 int d3_rx_id;
148 152
149 /* 153 /*
150 * option: 154 * option:
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 26088feb6608..d9a4905e01d0 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -78,6 +78,7 @@ struct usbnet {
78# define EVENT_NO_RUNTIME_PM 9 78# define EVENT_NO_RUNTIME_PM 9
79# define EVENT_RX_KILL 10 79# define EVENT_RX_KILL 10
80# define EVENT_LINK_CHANGE 11 80# define EVENT_LINK_CHANGE 11
81# define EVENT_SET_RX_MODE 12
81}; 82};
82 83
83static inline struct usb_driver *driver_of(struct usb_interface *intf) 84static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -159,6 +160,9 @@ struct driver_info {
159 /* called by minidriver when receiving indication */ 160 /* called by minidriver when receiving indication */
160 void (*indication)(struct usbnet *dev, void *ind, int indlen); 161 void (*indication)(struct usbnet *dev, void *ind, int indlen);
161 162
163 /* rx mode change (device changes address list filtering) */
164 void (*set_rx_mode)(struct usbnet *dev);
165
162 /* for new devices, use the descriptor-reading code instead */ 166 /* for new devices, use the descriptor-reading code instead */
163 int in; /* rx endpoint */ 167 int in; /* rx endpoint */
164 int out; /* tx endpoint */ 168 int out; /* tx endpoint */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 9b7de1b46437..a7f2604c5f25 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -73,6 +73,10 @@
73 /* Device advertises UAS but it is broken */ \ 73 /* Device advertises UAS but it is broken */ \
74 US_FLAG(BROKEN_FUA, 0x01000000) \ 74 US_FLAG(BROKEN_FUA, 0x01000000) \
75 /* Cannot handle FUA in WRITE or READ CDBs */ \ 75 /* Cannot handle FUA in WRITE or READ CDBs */ \
76 US_FLAG(NO_ATA_1X, 0x02000000) \
77 /* Cannot handle ATA_12 or ATA_16 CDBs */ \
78 US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
79 /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
76 80
77#define US_FLAG(name, value) US_FL_##name = value , 81#define US_FLAG(name, value) US_FL_##name = value ,
78enum { US_DO_ALL_FLAGS }; 82enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index e95372654f09..8297e5b341d8 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/kref.h> 4#include <linux/kref.h>
5#include <linux/nsproxy.h> 5#include <linux/nsproxy.h>
6#include <linux/ns_common.h>
6#include <linux/sched.h> 7#include <linux/sched.h>
7#include <linux/err.h> 8#include <linux/err.h>
8 9
@@ -17,6 +18,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */
17 } extent[UID_GID_MAP_MAX_EXTENTS]; 18 } extent[UID_GID_MAP_MAX_EXTENTS];
18}; 19};
19 20
21#define USERNS_SETGROUPS_ALLOWED 1UL
22
23#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
24
20struct user_namespace { 25struct user_namespace {
21 struct uid_gid_map uid_map; 26 struct uid_gid_map uid_map;
22 struct uid_gid_map gid_map; 27 struct uid_gid_map gid_map;
@@ -26,7 +31,8 @@ struct user_namespace {
26 int level; 31 int level;
27 kuid_t owner; 32 kuid_t owner;
28 kgid_t group; 33 kgid_t group;
29 unsigned int proc_inum; 34 struct ns_common ns;
35 unsigned long flags;
30 36
31 /* Register of per-UID persistent keyrings for this namespace */ 37 /* Register of per-UID persistent keyrings for this namespace */
32#ifdef CONFIG_PERSISTENT_KEYRINGS 38#ifdef CONFIG_PERSISTENT_KEYRINGS
@@ -63,6 +69,9 @@ extern const struct seq_operations proc_projid_seq_operations;
63extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *); 69extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
64extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *); 70extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
65extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *); 71extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
72extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
73extern int proc_setgroups_show(struct seq_file *m, void *v);
74extern bool userns_may_setgroups(const struct user_namespace *ns);
66#else 75#else
67 76
68static inline struct user_namespace *get_user_ns(struct user_namespace *ns) 77static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -87,6 +96,10 @@ static inline void put_user_ns(struct user_namespace *ns)
87{ 96{
88} 97}
89 98
99static inline bool userns_may_setgroups(const struct user_namespace *ns)
100{
101 return true;
102}
90#endif 103#endif
91 104
92#endif /* _LINUX_USER_H */ 105#endif /* _LINUX_USER_H */
diff --git a/include/linux/utsname.h b/include/linux/utsname.h
index 239e27733d6c..5093f58ae192 100644
--- a/include/linux/utsname.h
+++ b/include/linux/utsname.h
@@ -5,6 +5,7 @@
5#include <linux/sched.h> 5#include <linux/sched.h>
6#include <linux/kref.h> 6#include <linux/kref.h>
7#include <linux/nsproxy.h> 7#include <linux/nsproxy.h>
8#include <linux/ns_common.h>
8#include <linux/err.h> 9#include <linux/err.h>
9#include <uapi/linux/utsname.h> 10#include <uapi/linux/utsname.h>
10 11
@@ -23,7 +24,7 @@ struct uts_namespace {
23 struct kref kref; 24 struct kref kref;
24 struct new_utsname name; 25 struct new_utsname name;
25 struct user_namespace *user_ns; 26 struct user_namespace *user_ns;
26 unsigned int proc_inum; 27 struct ns_common ns;
27}; 28};
28extern struct uts_namespace init_uts_ns; 29extern struct uts_namespace init_uts_ns;
29 30
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
index a4c9547aae64..f8e76e08ebe4 100644
--- a/include/linux/vexpress.h
+++ b/include/linux/vexpress.h
@@ -15,8 +15,6 @@
15#define _LINUX_VEXPRESS_H 15#define _LINUX_VEXPRESS_H
16 16
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/platform_device.h>
19#include <linux/reboot.h>
20#include <linux/regmap.h> 18#include <linux/regmap.h>
21 19
22#define VEXPRESS_SITE_MB 0 20#define VEXPRESS_SITE_MB 0
@@ -24,13 +22,6 @@
24#define VEXPRESS_SITE_DB2 2 22#define VEXPRESS_SITE_DB2 2
25#define VEXPRESS_SITE_MASTER 0xf 23#define VEXPRESS_SITE_MASTER 0xf
26 24
27#define VEXPRESS_RES_FUNC(_site, _func) \
28{ \
29 .start = (_site), \
30 .end = (_func), \
31 .flags = IORESOURCE_BUS, \
32}
33
34/* Config infrastructure */ 25/* Config infrastructure */
35 26
36void vexpress_config_set_master(u32 site); 27void vexpress_config_set_master(u32 site);
@@ -58,16 +49,6 @@ struct regmap *devm_regmap_init_vexpress_config(struct device *dev);
58 49
59/* Platform control */ 50/* Platform control */
60 51
61unsigned int vexpress_get_mci_cardin(struct device *dev);
62u32 vexpress_get_procid(int site);
63void *vexpress_get_24mhz_clock_base(void);
64void vexpress_flags_set(u32 data); 52void vexpress_flags_set(u32 data);
65 53
66void vexpress_sysreg_early_init(void __iomem *base);
67int vexpress_syscfg_device_register(struct platform_device *pdev);
68
69/* Clocks */
70
71void vexpress_clk_init(void __iomem *sp810_base);
72
73#endif 54#endif
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index b46671e28de2..28f0e65b9a11 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -75,9 +75,16 @@ unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
75 75
76bool virtqueue_is_broken(struct virtqueue *vq); 76bool virtqueue_is_broken(struct virtqueue *vq);
77 77
78void *virtqueue_get_avail(struct virtqueue *vq);
79void *virtqueue_get_used(struct virtqueue *vq);
80
78/** 81/**
79 * virtio_device - representation of a device using virtio 82 * virtio_device - representation of a device using virtio
80 * @index: unique position on the virtio bus 83 * @index: unique position on the virtio bus
84 * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore)
85 * @config_enabled: configuration change reporting enabled
86 * @config_change_pending: configuration change reported while disabled
87 * @config_lock: protects configuration change reporting
81 * @dev: underlying device. 88 * @dev: underlying device.
82 * @id: the device type identification (used to match it with a driver). 89 * @id: the device type identification (used to match it with a driver).
83 * @config: the configuration ops for this device. 90 * @config: the configuration ops for this device.
@@ -88,16 +95,21 @@ bool virtqueue_is_broken(struct virtqueue *vq);
88 */ 95 */
89struct virtio_device { 96struct virtio_device {
90 int index; 97 int index;
98 bool failed;
99 bool config_enabled;
100 bool config_change_pending;
101 spinlock_t config_lock;
91 struct device dev; 102 struct device dev;
92 struct virtio_device_id id; 103 struct virtio_device_id id;
93 const struct virtio_config_ops *config; 104 const struct virtio_config_ops *config;
94 const struct vringh_config_ops *vringh_config; 105 const struct vringh_config_ops *vringh_config;
95 struct list_head vqs; 106 struct list_head vqs;
96 /* Note that this is a Linux set_bit-style bitmap. */ 107 u64 features;
97 unsigned long features[1];
98 void *priv; 108 void *priv;
99}; 109};
100 110
111bool virtio_device_is_legacy_only(struct virtio_device_id id);
112
101static inline struct virtio_device *dev_to_virtio(struct device *_dev) 113static inline struct virtio_device *dev_to_virtio(struct device *_dev)
102{ 114{
103 return container_of(_dev, struct virtio_device, dev); 115 return container_of(_dev, struct virtio_device, dev);
@@ -108,12 +120,20 @@ void unregister_virtio_device(struct virtio_device *dev);
108 120
109void virtio_break_device(struct virtio_device *dev); 121void virtio_break_device(struct virtio_device *dev);
110 122
123void virtio_config_changed(struct virtio_device *dev);
124#ifdef CONFIG_PM_SLEEP
125int virtio_device_freeze(struct virtio_device *dev);
126int virtio_device_restore(struct virtio_device *dev);
127#endif
128
111/** 129/**
112 * virtio_driver - operations for a virtio I/O driver 130 * virtio_driver - operations for a virtio I/O driver
113 * @driver: underlying device driver (populate name and owner). 131 * @driver: underlying device driver (populate name and owner).
114 * @id_table: the ids serviced by this driver. 132 * @id_table: the ids serviced by this driver.
115 * @feature_table: an array of feature numbers supported by this driver. 133 * @feature_table: an array of feature numbers supported by this driver.
116 * @feature_table_size: number of entries in the feature table array. 134 * @feature_table_size: number of entries in the feature table array.
135 * @feature_table_legacy: same as feature_table but when working in legacy mode.
136 * @feature_table_size_legacy: number of entries in feature table legacy array.
117 * @probe: the function to call when a device is found. Returns 0 or -errno. 137 * @probe: the function to call when a device is found. Returns 0 or -errno.
118 * @remove: the function to call when a device is removed. 138 * @remove: the function to call when a device is removed.
119 * @config_changed: optional function to call when the device configuration 139 * @config_changed: optional function to call when the device configuration
@@ -124,6 +144,8 @@ struct virtio_driver {
124 const struct virtio_device_id *id_table; 144 const struct virtio_device_id *id_table;
125 const unsigned int *feature_table; 145 const unsigned int *feature_table;
126 unsigned int feature_table_size; 146 unsigned int feature_table_size;
147 const unsigned int *feature_table_legacy;
148 unsigned int feature_table_size_legacy;
127 int (*probe)(struct virtio_device *dev); 149 int (*probe)(struct virtio_device *dev);
128 void (*scan)(struct virtio_device *dev); 150 void (*scan)(struct virtio_device *dev);
129 void (*remove)(struct virtio_device *dev); 151 void (*remove)(struct virtio_device *dev);
diff --git a/include/linux/virtio_byteorder.h b/include/linux/virtio_byteorder.h
new file mode 100644
index 000000000000..51865d05b267
--- /dev/null
+++ b/include/linux/virtio_byteorder.h
@@ -0,0 +1,59 @@
1#ifndef _LINUX_VIRTIO_BYTEORDER_H
2#define _LINUX_VIRTIO_BYTEORDER_H
3#include <linux/types.h>
4#include <uapi/linux/virtio_types.h>
5
6/*
7 * Low-level memory accessors for handling virtio in modern little endian and in
8 * compatibility native endian format.
9 */
10
11static inline u16 __virtio16_to_cpu(bool little_endian, __virtio16 val)
12{
13 if (little_endian)
14 return le16_to_cpu((__force __le16)val);
15 else
16 return (__force u16)val;
17}
18
19static inline __virtio16 __cpu_to_virtio16(bool little_endian, u16 val)
20{
21 if (little_endian)
22 return (__force __virtio16)cpu_to_le16(val);
23 else
24 return (__force __virtio16)val;
25}
26
27static inline u32 __virtio32_to_cpu(bool little_endian, __virtio32 val)
28{
29 if (little_endian)
30 return le32_to_cpu((__force __le32)val);
31 else
32 return (__force u32)val;
33}
34
35static inline __virtio32 __cpu_to_virtio32(bool little_endian, u32 val)
36{
37 if (little_endian)
38 return (__force __virtio32)cpu_to_le32(val);
39 else
40 return (__force __virtio32)val;
41}
42
43static inline u64 __virtio64_to_cpu(bool little_endian, __virtio64 val)
44{
45 if (little_endian)
46 return le64_to_cpu((__force __le64)val);
47 else
48 return (__force u64)val;
49}
50
51static inline __virtio64 __cpu_to_virtio64(bool little_endian, u64 val)
52{
53 if (little_endian)
54 return (__force __virtio64)cpu_to_le64(val);
55 else
56 return (__force __virtio64)val;
57}
58
59#endif /* _LINUX_VIRTIO_BYTEORDER */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index e8f8f71e843c..ca3ed78e5ec7 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -4,6 +4,7 @@
4#include <linux/err.h> 4#include <linux/err.h>
5#include <linux/bug.h> 5#include <linux/bug.h>
6#include <linux/virtio.h> 6#include <linux/virtio.h>
7#include <linux/virtio_byteorder.h>
7#include <uapi/linux/virtio_config.h> 8#include <uapi/linux/virtio_config.h>
8 9
9/** 10/**
@@ -18,6 +19,9 @@
18 * offset: the offset of the configuration field 19 * offset: the offset of the configuration field
19 * buf: the buffer to read the field value from. 20 * buf: the buffer to read the field value from.
20 * len: the length of the buffer 21 * len: the length of the buffer
22 * @generation: config generation counter
23 * vdev: the virtio_device
24 * Returns the config generation counter
21 * @get_status: read the status byte 25 * @get_status: read the status byte
22 * vdev: the virtio_device 26 * vdev: the virtio_device
23 * Returns the status byte 27 * Returns the status byte
@@ -46,6 +50,7 @@
46 * vdev: the virtio_device 50 * vdev: the virtio_device
47 * This gives the final feature bits for the device: it can change 51 * This gives the final feature bits for the device: it can change
48 * the dev->feature bits if it wants. 52 * the dev->feature bits if it wants.
53 * Returns 0 on success or error status
49 * @bus_name: return the bus name associated with the device 54 * @bus_name: return the bus name associated with the device
50 * vdev: the virtio_device 55 * vdev: the virtio_device
51 * This returns a pointer to the bus name a la pci_name from which 56 * This returns a pointer to the bus name a la pci_name from which
@@ -58,6 +63,7 @@ struct virtio_config_ops {
58 void *buf, unsigned len); 63 void *buf, unsigned len);
59 void (*set)(struct virtio_device *vdev, unsigned offset, 64 void (*set)(struct virtio_device *vdev, unsigned offset,
60 const void *buf, unsigned len); 65 const void *buf, unsigned len);
66 u32 (*generation)(struct virtio_device *vdev);
61 u8 (*get_status)(struct virtio_device *vdev); 67 u8 (*get_status)(struct virtio_device *vdev);
62 void (*set_status)(struct virtio_device *vdev, u8 status); 68 void (*set_status)(struct virtio_device *vdev, u8 status);
63 void (*reset)(struct virtio_device *vdev); 69 void (*reset)(struct virtio_device *vdev);
@@ -66,8 +72,8 @@ struct virtio_config_ops {
66 vq_callback_t *callbacks[], 72 vq_callback_t *callbacks[],
67 const char *names[]); 73 const char *names[]);
68 void (*del_vqs)(struct virtio_device *); 74 void (*del_vqs)(struct virtio_device *);
69 u32 (*get_features)(struct virtio_device *vdev); 75 u64 (*get_features)(struct virtio_device *vdev);
70 void (*finalize_features)(struct virtio_device *vdev); 76 int (*finalize_features)(struct virtio_device *vdev);
71 const char *(*bus_name)(struct virtio_device *vdev); 77 const char *(*bus_name)(struct virtio_device *vdev);
72 int (*set_vq_affinity)(struct virtqueue *vq, int cpu); 78 int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
73}; 79};
@@ -77,23 +83,70 @@ void virtio_check_driver_offered_feature(const struct virtio_device *vdev,
77 unsigned int fbit); 83 unsigned int fbit);
78 84
79/** 85/**
80 * virtio_has_feature - helper to determine if this device has this feature. 86 * __virtio_test_bit - helper to test feature bits. For use by transports.
87 * Devices should normally use virtio_has_feature,
88 * which includes more checks.
81 * @vdev: the device 89 * @vdev: the device
82 * @fbit: the feature bit 90 * @fbit: the feature bit
83 */ 91 */
84static inline bool virtio_has_feature(const struct virtio_device *vdev, 92static inline bool __virtio_test_bit(const struct virtio_device *vdev,
93 unsigned int fbit)
94{
95 /* Did you forget to fix assumptions on max features? */
96 if (__builtin_constant_p(fbit))
97 BUILD_BUG_ON(fbit >= 64);
98 else
99 BUG_ON(fbit >= 64);
100
101 return vdev->features & BIT_ULL(fbit);
102}
103
104/**
105 * __virtio_set_bit - helper to set feature bits. For use by transports.
106 * @vdev: the device
107 * @fbit: the feature bit
108 */
109static inline void __virtio_set_bit(struct virtio_device *vdev,
110 unsigned int fbit)
111{
112 /* Did you forget to fix assumptions on max features? */
113 if (__builtin_constant_p(fbit))
114 BUILD_BUG_ON(fbit >= 64);
115 else
116 BUG_ON(fbit >= 64);
117
118 vdev->features |= BIT_ULL(fbit);
119}
120
121/**
122 * __virtio_clear_bit - helper to clear feature bits. For use by transports.
123 * @vdev: the device
124 * @fbit: the feature bit
125 */
126static inline void __virtio_clear_bit(struct virtio_device *vdev,
85 unsigned int fbit) 127 unsigned int fbit)
86{ 128{
87 /* Did you forget to fix assumptions on max features? */ 129 /* Did you forget to fix assumptions on max features? */
88 if (__builtin_constant_p(fbit)) 130 if (__builtin_constant_p(fbit))
89 BUILD_BUG_ON(fbit >= 32); 131 BUILD_BUG_ON(fbit >= 64);
90 else 132 else
91 BUG_ON(fbit >= 32); 133 BUG_ON(fbit >= 64);
134
135 vdev->features &= ~BIT_ULL(fbit);
136}
92 137
138/**
139 * virtio_has_feature - helper to determine if this device has this feature.
140 * @vdev: the device
141 * @fbit: the feature bit
142 */
143static inline bool virtio_has_feature(const struct virtio_device *vdev,
144 unsigned int fbit)
145{
93 if (fbit < VIRTIO_TRANSPORT_F_START) 146 if (fbit < VIRTIO_TRANSPORT_F_START)
94 virtio_check_driver_offered_feature(vdev, fbit); 147 virtio_check_driver_offered_feature(vdev, fbit);
95 148
96 return test_bit(fbit, vdev->features); 149 return __virtio_test_bit(vdev, fbit);
97} 150}
98 151
99static inline 152static inline
@@ -109,6 +162,23 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
109 return vq; 162 return vq;
110} 163}
111 164
165/**
166 * virtio_device_ready - enable vq use in probe function
167 * @vdev: the device
168 *
169 * Driver must call this to use vqs in the probe function.
170 *
171 * Note: vqs are enabled automatically after probe returns.
172 */
173static inline
174void virtio_device_ready(struct virtio_device *dev)
175{
176 unsigned status = dev->config->get_status(dev);
177
178 BUG_ON(status & VIRTIO_CONFIG_S_DRIVER_OK);
179 dev->config->set_status(dev, status | VIRTIO_CONFIG_S_DRIVER_OK);
180}
181
112static inline 182static inline
113const char *virtio_bus_name(struct virtio_device *vdev) 183const char *virtio_bus_name(struct virtio_device *vdev)
114{ 184{
@@ -135,6 +205,37 @@ int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
135 return 0; 205 return 0;
136} 206}
137 207
208/* Memory accessors */
209static inline u16 virtio16_to_cpu(struct virtio_device *vdev, __virtio16 val)
210{
211 return __virtio16_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
212}
213
214static inline __virtio16 cpu_to_virtio16(struct virtio_device *vdev, u16 val)
215{
216 return __cpu_to_virtio16(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
217}
218
219static inline u32 virtio32_to_cpu(struct virtio_device *vdev, __virtio32 val)
220{
221 return __virtio32_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
222}
223
224static inline __virtio32 cpu_to_virtio32(struct virtio_device *vdev, u32 val)
225{
226 return __cpu_to_virtio32(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
227}
228
229static inline u64 virtio64_to_cpu(struct virtio_device *vdev, __virtio64 val)
230{
231 return __virtio64_to_cpu(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
232}
233
234static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val)
235{
236 return __cpu_to_virtio64(virtio_has_feature(vdev, VIRTIO_F_VERSION_1), val);
237}
238
138/* Config space accessors. */ 239/* Config space accessors. */
139#define virtio_cread(vdev, structname, member, ptr) \ 240#define virtio_cread(vdev, structname, member, ptr) \
140 do { \ 241 do { \
@@ -204,11 +305,33 @@ static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset)
204 return ret; 305 return ret;
205} 306}
206 307
308/* Read @count fields, @bytes each. */
309static inline void __virtio_cread_many(struct virtio_device *vdev,
310 unsigned int offset,
311 void *buf, size_t count, size_t bytes)
312{
313 u32 old, gen = vdev->config->generation ?
314 vdev->config->generation(vdev) : 0;
315 int i;
316
317 do {
318 old = gen;
319
320 for (i = 0; i < count; i++)
321 vdev->config->get(vdev, offset + bytes * i,
322 buf + i * bytes, bytes);
323
324 gen = vdev->config->generation ?
325 vdev->config->generation(vdev) : 0;
326 } while (gen != old);
327}
328
329
207static inline void virtio_cread_bytes(struct virtio_device *vdev, 330static inline void virtio_cread_bytes(struct virtio_device *vdev,
208 unsigned int offset, 331 unsigned int offset,
209 void *buf, size_t len) 332 void *buf, size_t len)
210{ 333{
211 vdev->config->get(vdev, offset, buf, len); 334 __virtio_cread_many(vdev, offset, buf, len, 1);
212} 335}
213 336
214static inline void virtio_cwrite8(struct virtio_device *vdev, 337static inline void virtio_cwrite8(struct virtio_device *vdev,
@@ -222,12 +345,13 @@ static inline u16 virtio_cread16(struct virtio_device *vdev,
222{ 345{
223 u16 ret; 346 u16 ret;
224 vdev->config->get(vdev, offset, &ret, sizeof(ret)); 347 vdev->config->get(vdev, offset, &ret, sizeof(ret));
225 return ret; 348 return virtio16_to_cpu(vdev, (__force __virtio16)ret);
226} 349}
227 350
228static inline void virtio_cwrite16(struct virtio_device *vdev, 351static inline void virtio_cwrite16(struct virtio_device *vdev,
229 unsigned int offset, u16 val) 352 unsigned int offset, u16 val)
230{ 353{
354 val = (__force u16)cpu_to_virtio16(vdev, val);
231 vdev->config->set(vdev, offset, &val, sizeof(val)); 355 vdev->config->set(vdev, offset, &val, sizeof(val));
232} 356}
233 357
@@ -236,12 +360,13 @@ static inline u32 virtio_cread32(struct virtio_device *vdev,
236{ 360{
237 u32 ret; 361 u32 ret;
238 vdev->config->get(vdev, offset, &ret, sizeof(ret)); 362 vdev->config->get(vdev, offset, &ret, sizeof(ret));
239 return ret; 363 return virtio32_to_cpu(vdev, (__force __virtio32)ret);
240} 364}
241 365
242static inline void virtio_cwrite32(struct virtio_device *vdev, 366static inline void virtio_cwrite32(struct virtio_device *vdev,
243 unsigned int offset, u32 val) 367 unsigned int offset, u32 val)
244{ 368{
369 val = (__force u32)cpu_to_virtio32(vdev, val);
245 vdev->config->set(vdev, offset, &val, sizeof(val)); 370 vdev->config->set(vdev, offset, &val, sizeof(val));
246} 371}
247 372
@@ -250,12 +375,14 @@ static inline u64 virtio_cread64(struct virtio_device *vdev,
250{ 375{
251 u64 ret; 376 u64 ret;
252 vdev->config->get(vdev, offset, &ret, sizeof(ret)); 377 vdev->config->get(vdev, offset, &ret, sizeof(ret));
253 return ret; 378 __virtio_cread_many(vdev, offset, &ret, 1, sizeof(ret));
379 return virtio64_to_cpu(vdev, (__force __virtio64)ret);
254} 380}
255 381
256static inline void virtio_cwrite64(struct virtio_device *vdev, 382static inline void virtio_cwrite64(struct virtio_device *vdev,
257 unsigned int offset, u64 val) 383 unsigned int offset, u64 val)
258{ 384{
385 val = (__force u64)cpu_to_virtio64(vdev, val);
259 vdev->config->set(vdev, offset, &val, sizeof(val)); 386 vdev->config->set(vdev, offset, &val, sizeof(val));
260} 387}
261 388
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h
deleted file mode 100644
index de429d1f4357..000000000000
--- a/include/linux/virtio_scsi.h
+++ /dev/null
@@ -1,162 +0,0 @@
1/*
2 * This header is BSD licensed so anyone can use the definitions to implement
3 * compatible drivers/servers.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27#ifndef _LINUX_VIRTIO_SCSI_H
28#define _LINUX_VIRTIO_SCSI_H
29
30#define VIRTIO_SCSI_CDB_SIZE 32
31#define VIRTIO_SCSI_SENSE_SIZE 96
32
33/* SCSI command request, followed by data-out */
34struct virtio_scsi_cmd_req {
35 u8 lun[8]; /* Logical Unit Number */
36 u64 tag; /* Command identifier */
37 u8 task_attr; /* Task attribute */
38 u8 prio; /* SAM command priority field */
39 u8 crn;
40 u8 cdb[VIRTIO_SCSI_CDB_SIZE];
41} __packed;
42
43/* SCSI command request, followed by protection information */
44struct virtio_scsi_cmd_req_pi {
45 u8 lun[8]; /* Logical Unit Number */
46 u64 tag; /* Command identifier */
47 u8 task_attr; /* Task attribute */
48 u8 prio; /* SAM command priority field */
49 u8 crn;
50 u32 pi_bytesout; /* DataOUT PI Number of bytes */
51 u32 pi_bytesin; /* DataIN PI Number of bytes */
52 u8 cdb[VIRTIO_SCSI_CDB_SIZE];
53} __packed;
54
55/* Response, followed by sense data and data-in */
56struct virtio_scsi_cmd_resp {
57 u32 sense_len; /* Sense data length */
58 u32 resid; /* Residual bytes in data buffer */
59 u16 status_qualifier; /* Status qualifier */
60 u8 status; /* Command completion status */
61 u8 response; /* Response values */
62 u8 sense[VIRTIO_SCSI_SENSE_SIZE];
63} __packed;
64
65/* Task Management Request */
66struct virtio_scsi_ctrl_tmf_req {
67 u32 type;
68 u32 subtype;
69 u8 lun[8];
70 u64 tag;
71} __packed;
72
73struct virtio_scsi_ctrl_tmf_resp {
74 u8 response;
75} __packed;
76
77/* Asynchronous notification query/subscription */
78struct virtio_scsi_ctrl_an_req {
79 u32 type;
80 u8 lun[8];
81 u32 event_requested;
82} __packed;
83
84struct virtio_scsi_ctrl_an_resp {
85 u32 event_actual;
86 u8 response;
87} __packed;
88
89struct virtio_scsi_event {
90 u32 event;
91 u8 lun[8];
92 u32 reason;
93} __packed;
94
95struct virtio_scsi_config {
96 u32 num_queues;
97 u32 seg_max;
98 u32 max_sectors;
99 u32 cmd_per_lun;
100 u32 event_info_size;
101 u32 sense_size;
102 u32 cdb_size;
103 u16 max_channel;
104 u16 max_target;
105 u32 max_lun;
106} __packed;
107
108/* Feature Bits */
109#define VIRTIO_SCSI_F_INOUT 0
110#define VIRTIO_SCSI_F_HOTPLUG 1
111#define VIRTIO_SCSI_F_CHANGE 2
112#define VIRTIO_SCSI_F_T10_PI 3
113
114/* Response codes */
115#define VIRTIO_SCSI_S_OK 0
116#define VIRTIO_SCSI_S_OVERRUN 1
117#define VIRTIO_SCSI_S_ABORTED 2
118#define VIRTIO_SCSI_S_BAD_TARGET 3
119#define VIRTIO_SCSI_S_RESET 4
120#define VIRTIO_SCSI_S_BUSY 5
121#define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
122#define VIRTIO_SCSI_S_TARGET_FAILURE 7
123#define VIRTIO_SCSI_S_NEXUS_FAILURE 8
124#define VIRTIO_SCSI_S_FAILURE 9
125#define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
126#define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
127#define VIRTIO_SCSI_S_INCORRECT_LUN 12
128
129/* Controlq type codes. */
130#define VIRTIO_SCSI_T_TMF 0
131#define VIRTIO_SCSI_T_AN_QUERY 1
132#define VIRTIO_SCSI_T_AN_SUBSCRIBE 2
133
134/* Valid TMF subtypes. */
135#define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
136#define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
137#define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
138#define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
139#define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
140#define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
141#define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
142#define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
143
144/* Events. */
145#define VIRTIO_SCSI_T_EVENTS_MISSED 0x80000000
146#define VIRTIO_SCSI_T_NO_EVENT 0
147#define VIRTIO_SCSI_T_TRANSPORT_RESET 1
148#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2
149#define VIRTIO_SCSI_T_PARAM_CHANGE 3
150
151/* Reasons of transport reset event */
152#define VIRTIO_SCSI_EVT_RESET_HARD 0
153#define VIRTIO_SCSI_EVT_RESET_RESCAN 1
154#define VIRTIO_SCSI_EVT_RESET_REMOVED 2
155
156#define VIRTIO_SCSI_S_SIMPLE 0
157#define VIRTIO_SCSI_S_ORDERED 1
158#define VIRTIO_SCSI_S_HEAD 2
159#define VIRTIO_SCSI_S_ACA 3
160
161
162#endif /* _LINUX_VIRTIO_SCSI_H */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index ced92345c963..9246d32dc973 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -72,6 +72,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
72 THP_ZERO_PAGE_ALLOC, 72 THP_ZERO_PAGE_ALLOC,
73 THP_ZERO_PAGE_ALLOC_FAILED, 73 THP_ZERO_PAGE_ALLOC_FAILED,
74#endif 74#endif
75#ifdef CONFIG_MEMORY_BALLOON
76 BALLOON_INFLATE,
77 BALLOON_DEFLATE,
78#ifdef CONFIG_BALLOON_COMPACTION
79 BALLOON_MIGRATE,
80#endif
81#endif
75#ifdef CONFIG_DEBUG_TLBFLUSH 82#ifdef CONFIG_DEBUG_TLBFLUSH
76#ifdef CONFIG_SMP 83#ifdef CONFIG_SMP
77 NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ 84 NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
@@ -83,6 +90,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
83#ifdef CONFIG_DEBUG_VM_VMACACHE 90#ifdef CONFIG_DEBUG_VM_VMACACHE
84 VMACACHE_FIND_CALLS, 91 VMACACHE_FIND_CALLS,
85 VMACACHE_FIND_HITS, 92 VMACACHE_FIND_HITS,
93 VMACACHE_FULL_FLUSHES,
86#endif 94#endif
87 NR_VM_EVENT_ITEMS 95 NR_VM_EVENT_ITEMS
88}; 96};
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index 023430e265fe..5691f752ce8f 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -24,6 +24,7 @@
24#define VMCI_KERNEL_API_VERSION_2 2 24#define VMCI_KERNEL_API_VERSION_2 2
25#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 25#define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2
26 26
27struct msghdr;
27typedef void (vmci_device_shutdown_fn) (void *device_registration, 28typedef void (vmci_device_shutdown_fn) (void *device_registration,
28 void *user_data); 29 void *user_data);
29 30
@@ -75,8 +76,8 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
75ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, 76ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
76 void *iov, size_t iov_size, int mode); 77 void *iov, size_t iov_size, int mode);
77ssize_t vmci_qpair_dequev(struct vmci_qp *qpair, 78ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
78 void *iov, size_t iov_size, int mode); 79 struct msghdr *msg, size_t iov_size, int mode);
79ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size, 80ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, struct msghdr *msg, size_t iov_size,
80 int mode); 81 int mode);
81 82
82#endif /* !__VMW_VMCI_API_H__ */ 83#endif /* !__VMW_VMCI_API_H__ */
diff --git a/include/linux/vringh.h b/include/linux/vringh.h
index 749cde28728b..a3fa537e717a 100644
--- a/include/linux/vringh.h
+++ b/include/linux/vringh.h
@@ -24,12 +24,16 @@
24#ifndef _LINUX_VRINGH_H 24#ifndef _LINUX_VRINGH_H
25#define _LINUX_VRINGH_H 25#define _LINUX_VRINGH_H
26#include <uapi/linux/virtio_ring.h> 26#include <uapi/linux/virtio_ring.h>
27#include <linux/virtio_byteorder.h>
27#include <linux/uio.h> 28#include <linux/uio.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
29#include <asm/barrier.h> 30#include <asm/barrier.h>
30 31
31/* virtio_ring with information needed for host access. */ 32/* virtio_ring with information needed for host access. */
32struct vringh { 33struct vringh {
34 /* Everything is little endian */
35 bool little_endian;
36
33 /* Guest publishes used event idx (note: we always do). */ 37 /* Guest publishes used event idx (note: we always do). */
34 bool event_indices; 38 bool event_indices;
35 39
@@ -105,7 +109,7 @@ struct vringh_kiov {
105#define VRINGH_IOV_ALLOCATED 0x8000000 109#define VRINGH_IOV_ALLOCATED 0x8000000
106 110
107/* Helpers for userspace vrings. */ 111/* Helpers for userspace vrings. */
108int vringh_init_user(struct vringh *vrh, u32 features, 112int vringh_init_user(struct vringh *vrh, u64 features,
109 unsigned int num, bool weak_barriers, 113 unsigned int num, bool weak_barriers,
110 struct vring_desc __user *desc, 114 struct vring_desc __user *desc,
111 struct vring_avail __user *avail, 115 struct vring_avail __user *avail,
@@ -167,7 +171,7 @@ bool vringh_notify_enable_user(struct vringh *vrh);
167void vringh_notify_disable_user(struct vringh *vrh); 171void vringh_notify_disable_user(struct vringh *vrh);
168 172
169/* Helpers for kernelspace vrings. */ 173/* Helpers for kernelspace vrings. */
170int vringh_init_kern(struct vringh *vrh, u32 features, 174int vringh_init_kern(struct vringh *vrh, u64 features,
171 unsigned int num, bool weak_barriers, 175 unsigned int num, bool weak_barriers,
172 struct vring_desc *desc, 176 struct vring_desc *desc,
173 struct vring_avail *avail, 177 struct vring_avail *avail,
@@ -222,4 +226,33 @@ static inline void vringh_notify(struct vringh *vrh)
222 vrh->notify(vrh); 226 vrh->notify(vrh);
223} 227}
224 228
229static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
230{
231 return __virtio16_to_cpu(vrh->little_endian, val);
232}
233
234static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
235{
236 return __cpu_to_virtio16(vrh->little_endian, val);
237}
238
239static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
240{
241 return __virtio32_to_cpu(vrh->little_endian, val);
242}
243
244static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
245{
246 return __cpu_to_virtio32(vrh->little_endian, val);
247}
248
249static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
250{
251 return __virtio64_to_cpu(vrh->little_endian, val);
252}
253
254static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
255{
256 return __cpu_to_virtio64(vrh->little_endian, val);
257}
225#endif /* _LINUX_VRINGH_H */ 258#endif /* _LINUX_VRINGH_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 6fb1ba5f9b2f..2232ed16635a 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -13,9 +13,12 @@ typedef struct __wait_queue wait_queue_t;
13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); 13typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); 14int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15 15
16/* __wait_queue::flags */
17#define WQ_FLAG_EXCLUSIVE 0x01
18#define WQ_FLAG_WOKEN 0x02
19
16struct __wait_queue { 20struct __wait_queue {
17 unsigned int flags; 21 unsigned int flags;
18#define WQ_FLAG_EXCLUSIVE 0x01
19 void *private; 22 void *private;
20 wait_queue_func_t func; 23 wait_queue_func_t func;
21 struct list_head task_list; 24 struct list_head task_list;
@@ -25,7 +28,7 @@ struct wait_bit_key {
25 void *flags; 28 void *flags;
26 int bit_nr; 29 int bit_nr;
27#define WAIT_ATOMIC_T_BIT_NR -1 30#define WAIT_ATOMIC_T_BIT_NR -1
28 unsigned long private; 31 unsigned long timeout;
29}; 32};
30 33
31struct wait_bit_queue { 34struct wait_bit_queue {
@@ -154,6 +157,7 @@ int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_ac
154void wake_up_bit(void *, int); 157void wake_up_bit(void *, int);
155void wake_up_atomic_t(atomic_t *); 158void wake_up_atomic_t(atomic_t *);
156int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned); 159int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
160int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
157int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned); 161int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
158int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); 162int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
159wait_queue_head_t *bit_waitqueue(void *, int); 163wait_queue_head_t *bit_waitqueue(void *, int);
@@ -257,11 +261,37 @@ __out: __ret; \
257 */ 261 */
258#define wait_event(wq, condition) \ 262#define wait_event(wq, condition) \
259do { \ 263do { \
264 might_sleep(); \
260 if (condition) \ 265 if (condition) \
261 break; \ 266 break; \
262 __wait_event(wq, condition); \ 267 __wait_event(wq, condition); \
263} while (0) 268} while (0)
264 269
270#define __wait_event_freezable(wq, condition) \
271 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
272 schedule(); try_to_freeze())
273
274/**
275 * wait_event - sleep (or freeze) until a condition gets true
276 * @wq: the waitqueue to wait on
277 * @condition: a C expression for the event to wait for
278 *
279 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
280 * to system load) until the @condition evaluates to true. The
281 * @condition is checked each time the waitqueue @wq is woken up.
282 *
283 * wake_up() has to be called after changing any variable that could
284 * change the result of the wait condition.
285 */
286#define wait_event_freezable(wq, condition) \
287({ \
288 int __ret = 0; \
289 might_sleep(); \
290 if (!(condition)) \
291 __ret = __wait_event_freezable(wq, condition); \
292 __ret; \
293})
294
265#define __wait_event_timeout(wq, condition, timeout) \ 295#define __wait_event_timeout(wq, condition, timeout) \
266 ___wait_event(wq, ___wait_cond_timeout(condition), \ 296 ___wait_event(wq, ___wait_cond_timeout(condition), \
267 TASK_UNINTERRUPTIBLE, 0, timeout, \ 297 TASK_UNINTERRUPTIBLE, 0, timeout, \
@@ -280,18 +310,39 @@ do { \
280 * wake_up() has to be called after changing any variable that could 310 * wake_up() has to be called after changing any variable that could
281 * change the result of the wait condition. 311 * change the result of the wait condition.
282 * 312 *
283 * The function returns 0 if the @timeout elapsed, or the remaining 313 * Returns:
284 * jiffies (at least 1) if the @condition evaluated to %true before 314 * 0 if the @condition evaluated to %false after the @timeout elapsed,
285 * the @timeout elapsed. 315 * 1 if the @condition evaluated to %true after the @timeout elapsed,
316 * or the remaining jiffies (at least 1) if the @condition evaluated
317 * to %true before the @timeout elapsed.
286 */ 318 */
287#define wait_event_timeout(wq, condition, timeout) \ 319#define wait_event_timeout(wq, condition, timeout) \
288({ \ 320({ \
289 long __ret = timeout; \ 321 long __ret = timeout; \
322 might_sleep(); \
290 if (!___wait_cond_timeout(condition)) \ 323 if (!___wait_cond_timeout(condition)) \
291 __ret = __wait_event_timeout(wq, condition, timeout); \ 324 __ret = __wait_event_timeout(wq, condition, timeout); \
292 __ret; \ 325 __ret; \
293}) 326})
294 327
328#define __wait_event_freezable_timeout(wq, condition, timeout) \
329 ___wait_event(wq, ___wait_cond_timeout(condition), \
330 TASK_INTERRUPTIBLE, 0, timeout, \
331 __ret = schedule_timeout(__ret); try_to_freeze())
332
333/*
334 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
335 * increasing load and is freezable.
336 */
337#define wait_event_freezable_timeout(wq, condition, timeout) \
338({ \
339 long __ret = timeout; \
340 might_sleep(); \
341 if (!___wait_cond_timeout(condition)) \
342 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
343 __ret; \
344})
345
295#define __wait_event_cmd(wq, condition, cmd1, cmd2) \ 346#define __wait_event_cmd(wq, condition, cmd1, cmd2) \
296 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 347 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
297 cmd1; schedule(); cmd2) 348 cmd1; schedule(); cmd2)
@@ -312,6 +363,7 @@ do { \
312 */ 363 */
313#define wait_event_cmd(wq, condition, cmd1, cmd2) \ 364#define wait_event_cmd(wq, condition, cmd1, cmd2) \
314do { \ 365do { \
366 might_sleep(); \
315 if (condition) \ 367 if (condition) \
316 break; \ 368 break; \
317 __wait_event_cmd(wq, condition, cmd1, cmd2); \ 369 __wait_event_cmd(wq, condition, cmd1, cmd2); \
@@ -339,6 +391,7 @@ do { \
339#define wait_event_interruptible(wq, condition) \ 391#define wait_event_interruptible(wq, condition) \
340({ \ 392({ \
341 int __ret = 0; \ 393 int __ret = 0; \
394 might_sleep(); \
342 if (!(condition)) \ 395 if (!(condition)) \
343 __ret = __wait_event_interruptible(wq, condition); \ 396 __ret = __wait_event_interruptible(wq, condition); \
344 __ret; \ 397 __ret; \
@@ -363,13 +416,16 @@ do { \
363 * change the result of the wait condition. 416 * change the result of the wait condition.
364 * 417 *
365 * Returns: 418 * Returns:
366 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by 419 * 0 if the @condition evaluated to %false after the @timeout elapsed,
367 * a signal, or the remaining jiffies (at least 1) if the @condition 420 * 1 if the @condition evaluated to %true after the @timeout elapsed,
368 * evaluated to %true before the @timeout elapsed. 421 * the remaining jiffies (at least 1) if the @condition evaluated
422 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
423 * interrupted by a signal.
369 */ 424 */
370#define wait_event_interruptible_timeout(wq, condition, timeout) \ 425#define wait_event_interruptible_timeout(wq, condition, timeout) \
371({ \ 426({ \
372 long __ret = timeout; \ 427 long __ret = timeout; \
428 might_sleep(); \
373 if (!___wait_cond_timeout(condition)) \ 429 if (!___wait_cond_timeout(condition)) \
374 __ret = __wait_event_interruptible_timeout(wq, \ 430 __ret = __wait_event_interruptible_timeout(wq, \
375 condition, timeout); \ 431 condition, timeout); \
@@ -420,6 +476,7 @@ do { \
420#define wait_event_hrtimeout(wq, condition, timeout) \ 476#define wait_event_hrtimeout(wq, condition, timeout) \
421({ \ 477({ \
422 int __ret = 0; \ 478 int __ret = 0; \
479 might_sleep(); \
423 if (!(condition)) \ 480 if (!(condition)) \
424 __ret = __wait_event_hrtimeout(wq, condition, timeout, \ 481 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
425 TASK_UNINTERRUPTIBLE); \ 482 TASK_UNINTERRUPTIBLE); \
@@ -445,6 +502,7 @@ do { \
445#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ 502#define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
446({ \ 503({ \
447 long __ret = 0; \ 504 long __ret = 0; \
505 might_sleep(); \
448 if (!(condition)) \ 506 if (!(condition)) \
449 __ret = __wait_event_hrtimeout(wq, condition, timeout, \ 507 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
450 TASK_INTERRUPTIBLE); \ 508 TASK_INTERRUPTIBLE); \
@@ -458,12 +516,27 @@ do { \
458#define wait_event_interruptible_exclusive(wq, condition) \ 516#define wait_event_interruptible_exclusive(wq, condition) \
459({ \ 517({ \
460 int __ret = 0; \ 518 int __ret = 0; \
519 might_sleep(); \
461 if (!(condition)) \ 520 if (!(condition)) \
462 __ret = __wait_event_interruptible_exclusive(wq, condition);\ 521 __ret = __wait_event_interruptible_exclusive(wq, condition);\
463 __ret; \ 522 __ret; \
464}) 523})
465 524
466 525
526#define __wait_event_freezable_exclusive(wq, condition) \
527 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
528 schedule(); try_to_freeze())
529
530#define wait_event_freezable_exclusive(wq, condition) \
531({ \
532 int __ret = 0; \
533 might_sleep(); \
534 if (!(condition)) \
535 __ret = __wait_event_freezable_exclusive(wq, condition);\
536 __ret; \
537})
538
539
467#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ 540#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
468({ \ 541({ \
469 int __ret = 0; \ 542 int __ret = 0; \
@@ -632,6 +705,7 @@ do { \
632#define wait_event_killable(wq, condition) \ 705#define wait_event_killable(wq, condition) \
633({ \ 706({ \
634 int __ret = 0; \ 707 int __ret = 0; \
708 might_sleep(); \
635 if (!(condition)) \ 709 if (!(condition)) \
636 __ret = __wait_event_killable(wq, condition); \ 710 __ret = __wait_event_killable(wq, condition); \
637 __ret; \ 711 __ret; \
@@ -825,6 +899,8 @@ void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int sta
825long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); 899long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
826void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 900void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
827void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key); 901void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
902long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
903int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
828int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 904int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
829int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 905int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
830 906
@@ -859,6 +935,8 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
859 935
860extern int bit_wait(struct wait_bit_key *); 936extern int bit_wait(struct wait_bit_key *);
861extern int bit_wait_io(struct wait_bit_key *); 937extern int bit_wait_io(struct wait_bit_key *);
938extern int bit_wait_timeout(struct wait_bit_key *);
939extern int bit_wait_io_timeout(struct wait_bit_key *);
862 940
863/** 941/**
864 * wait_on_bit - wait for a bit to be cleared 942 * wait_on_bit - wait for a bit to be cleared
@@ -879,6 +957,7 @@ extern int bit_wait_io(struct wait_bit_key *);
879static inline int 957static inline int
880wait_on_bit(void *word, int bit, unsigned mode) 958wait_on_bit(void *word, int bit, unsigned mode)
881{ 959{
960 might_sleep();
882 if (!test_bit(bit, word)) 961 if (!test_bit(bit, word))
883 return 0; 962 return 0;
884 return out_of_line_wait_on_bit(word, bit, 963 return out_of_line_wait_on_bit(word, bit,
@@ -903,6 +982,7 @@ wait_on_bit(void *word, int bit, unsigned mode)
903static inline int 982static inline int
904wait_on_bit_io(void *word, int bit, unsigned mode) 983wait_on_bit_io(void *word, int bit, unsigned mode)
905{ 984{
985 might_sleep();
906 if (!test_bit(bit, word)) 986 if (!test_bit(bit, word))
907 return 0; 987 return 0;
908 return out_of_line_wait_on_bit(word, bit, 988 return out_of_line_wait_on_bit(word, bit,
@@ -929,6 +1009,7 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
929static inline int 1009static inline int
930wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) 1010wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
931{ 1011{
1012 might_sleep();
932 if (!test_bit(bit, word)) 1013 if (!test_bit(bit, word))
933 return 0; 1014 return 0;
934 return out_of_line_wait_on_bit(word, bit, action, mode); 1015 return out_of_line_wait_on_bit(word, bit, action, mode);
@@ -956,6 +1037,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode
956static inline int 1037static inline int
957wait_on_bit_lock(void *word, int bit, unsigned mode) 1038wait_on_bit_lock(void *word, int bit, unsigned mode)
958{ 1039{
1040 might_sleep();
959 if (!test_and_set_bit(bit, word)) 1041 if (!test_and_set_bit(bit, word))
960 return 0; 1042 return 0;
961 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); 1043 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
@@ -979,6 +1061,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode)
979static inline int 1061static inline int
980wait_on_bit_lock_io(void *word, int bit, unsigned mode) 1062wait_on_bit_lock_io(void *word, int bit, unsigned mode)
981{ 1063{
1064 might_sleep();
982 if (!test_and_set_bit(bit, word)) 1065 if (!test_and_set_bit(bit, word))
983 return 0; 1066 return 0;
984 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); 1067 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
@@ -1004,6 +1087,7 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode)
1004static inline int 1087static inline int
1005wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) 1088wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
1006{ 1089{
1090 might_sleep();
1007 if (!test_and_set_bit(bit, word)) 1091 if (!test_and_set_bit(bit, word))
1008 return 0; 1092 return 0;
1009 return out_of_line_wait_on_bit_lock(word, bit, action, mode); 1093 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
@@ -1022,6 +1106,7 @@ wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned
1022static inline 1106static inline
1023int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode) 1107int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1024{ 1108{
1109 might_sleep();
1025 if (atomic_read(val) == 0) 1110 if (atomic_read(val) == 0)
1026 return 0; 1111 return 0;
1027 return out_of_line_wait_on_atomic_t(val, action, mode); 1112 return out_of_line_wait_on_atomic_t(val, action, mode);
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 2a3038ee17a3..395b70e0eccf 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -97,13 +97,8 @@ struct watchdog_device {
97#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */ 97#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */
98}; 98};
99 99
100#ifdef CONFIG_WATCHDOG_NOWAYOUT 100#define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT)
101#define WATCHDOG_NOWAYOUT 1 101#define WATCHDOG_NOWAYOUT_INIT_STATUS (WATCHDOG_NOWAYOUT << WDOG_NO_WAY_OUT)
102#define WATCHDOG_NOWAYOUT_INIT_STATUS (1 << WDOG_NO_WAY_OUT)
103#else
104#define WATCHDOG_NOWAYOUT 0
105#define WATCHDOG_NOWAYOUT_INIT_STATUS 0
106#endif
107 102
108/* Use the following function to check whether or not the watchdog is active */ 103/* Use the following function to check whether or not the watchdog is active */
109static inline bool watchdog_active(struct watchdog_device *wdd) 104static inline bool watchdog_active(struct watchdog_device *wdd)
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a219be961c0a..00048339c23e 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping,
177 struct writeback_control *wbc, writepage_t writepage, 177 struct writeback_control *wbc, writepage_t writepage,
178 void *data); 178 void *data);
179int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 179int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
180void set_page_dirty_balance(struct page *page);
181void writeback_set_ratelimit(void); 180void writeback_set_ratelimit(void);
182void tag_pages_for_writeback(struct address_space *mapping, 181void tag_pages_for_writeback(struct address_space *mapping,
183 pgoff_t start, pgoff_t end); 182 pgoff_t start, pgoff_t end);
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index e44d634e7fb7..05c214760977 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -46,6 +46,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
46 enum zs_mapmode mm); 46 enum zs_mapmode mm);
47void zs_unmap_object(struct zs_pool *pool, unsigned long handle); 47void zs_unmap_object(struct zs_pool *pool, unsigned long handle);
48 48
49u64 zs_get_total_size_bytes(struct zs_pool *pool); 49unsigned long zs_get_total_pages(struct zs_pool *pool);
50 50
51#endif 51#endif