aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-31 02:31:57 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-31 02:31:57 -0500
commita9de18eb761f7c1c860964b2e5addc1a35c7e861 (patch)
tree886e75fdfd09690cd262ca69cb7f5d1d42b48602 /include/linux
parentb2aaf8f74cdc84a9182f6cabf198b7763bcb9d40 (diff)
parent6a94cb73064c952255336cc57731904174b2c58f (diff)
Merge branch 'linus' into stackprotector
Conflicts: arch/x86/include/asm/pda.h kernel/fork.c
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild4
-rw-r--r--include/linux/acpi.h56
-rw-r--r--include/linux/aer.h5
-rw-r--r--include/linux/aio.h14
-rw-r--r--include/linux/atm.h17
-rw-r--r--include/linux/atmdev.h15
-rw-r--r--include/linux/audit.h32
-rw-r--r--include/linux/auto_dev-ioctl.h157
-rw-r--r--include/linux/auto_fs4.h7
-rw-r--r--include/linux/backing-dev.h13
-rw-r--r--include/linux/bcd.h16
-rw-r--r--include/linux/binfmts.h21
-rw-r--r--include/linux/bio.h65
-rw-r--r--include/linux/bitmap.h2
-rw-r--r--include/linux/blkdev.h105
-rw-r--r--include/linux/blktrace_api.h146
-rw-r--r--include/linux/bottom_half.h1
-rw-r--r--include/linux/buffer_head.h3
-rw-r--r--include/linux/byteorder/Kbuild1
-rw-r--r--include/linux/byteorder/big_endian.h1
-rw-r--r--include/linux/byteorder/little_endian.h1
-rw-r--r--include/linux/c2port.h65
-rw-r--r--include/linux/can/core.h2
-rw-r--r--include/linux/capability.h25
-rw-r--r--include/linux/cdrom.h10
-rw-r--r--include/linux/cgroup.h35
-rw-r--r--include/linux/cgroup_subsys.h12
-rw-r--r--include/linux/clk.h4
-rw-r--r--include/linux/clocksource.h14
-rw-r--r--include/linux/cnt32_to_63.h22
-rw-r--r--include/linux/compat.h8
-rw-r--r--include/linux/compiler.h86
-rw-r--r--include/linux/console.h4
-rw-r--r--include/linux/cpumask.h559
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/crash_dump.h40
-rw-r--r--include/linux/crc32c.h6
-rw-r--r--include/linux/cred.h342
-rw-r--r--include/linux/crypto.h10
-rw-r--r--include/linux/dcache.h3
-rw-r--r--include/linux/dcbnl.h340
-rw-r--r--include/linux/dccp.h42
-rw-r--r--include/linux/debug_locks.h2
-rw-r--r--include/linux/device-mapper.h14
-rw-r--r--include/linux/device.h20
-rw-r--r--include/linux/dm-region-hash.h104
-rw-r--r--include/linux/dma_remapping.h156
-rw-r--r--include/linux/dmar.h1
-rw-r--r--include/linux/dmi.h5
-rw-r--r--include/linux/ds1286.h2
-rw-r--r--include/linux/dvb/frontend.h2
-rw-r--r--include/linux/dynamic_printk.h93
-rw-r--r--include/linux/efi.h4
-rw-r--r--include/linux/elevator.h8
-rw-r--r--include/linux/etherdevice.h46
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/ext2_fs.h2
-rw-r--r--include/linux/ext3_fs.h6
-rw-r--r--include/linux/ext3_jbd.h14
-rw-r--r--include/linux/fault-inject.h9
-rw-r--r--include/linux/fb.h3
-rw-r--r--include/linux/fddidevice.h1
-rw-r--r--include/linux/file.h4
-rw-r--r--include/linux/filter.h3
-rw-r--r--include/linux/firewire-cdev.h9
-rw-r--r--include/linux/freezer.h40
-rw-r--r--include/linux/fs.h96
-rw-r--r--include/linux/fsl_devices.h17
-rw-r--r--include/linux/fsnotify.h2
-rw-r--r--include/linux/ftrace.h374
-rw-r--r--include/linux/ftrace_irq.h13
-rw-r--r--include/linux/fuse.h12
-rw-r--r--include/linux/futex.h5
-rw-r--r--include/linux/gameport.h7
-rw-r--r--include/linux/genhd.h8
-rw-r--r--include/linux/gpio.h3
-rw-r--r--include/linux/hardirq.h28
-rw-r--r--include/linux/hdlc.h4
-rw-r--r--include/linux/hid.h7
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/hippidevice.h4
-rw-r--r--include/linux/hrtimer.h152
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/i2c-algo-pcf.h5
-rw-r--r--include/linux/i2c-id.h2
-rw-r--r--include/linux/i2c.h163
-rw-r--r--include/linux/i2c/twl4030.h343
-rw-r--r--include/linux/i2o.h292
-rw-r--r--include/linux/i7300_idle.h83
-rw-r--r--include/linux/icmpv6.h6
-rw-r--r--include/linux/ide.h84
-rw-r--r--include/linux/idr.h3
-rw-r--r--include/linux/ieee80211.h212
-rw-r--r--include/linux/if.h1
-rw-r--r--include/linux/if_arp.h3
-rw-r--r--include/linux/if_vlan.h7
-rw-r--r--include/linux/in.h4
-rw-r--r--include/linux/init.h12
-rw-r--r--include/linux/init_task.h15
-rw-r--r--include/linux/inotify.h11
-rw-r--r--include/linux/input.h19
-rw-r--r--include/linux/intel-iommu.h363
-rw-r--r--include/linux/interrupt.h29
-rw-r--r--include/linux/io-mapping.h125
-rw-r--r--include/linux/iommu-helper.h3
-rw-r--r--include/linux/ioport.h8
-rw-r--r--include/linux/iova.h52
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h127
-rw-r--r--include/linux/irqnr.h38
-rw-r--r--include/linux/jbd.h9
-rw-r--r--include/linux/jbd2.h13
-rw-r--r--include/linux/jiffies.h10
-rw-r--r--include/linux/journal-head.h2
-rw-r--r--include/linux/kallsyms.h8
-rw-r--r--include/linux/kernel.h134
-rw-r--r--include/linux/kernel_stat.h35
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/key-ui.h66
-rw-r--r--include/linux/key.h32
-rw-r--r--include/linux/keyctl.h4
-rw-r--r--include/linux/kmod.h3
-rw-r--r--include/linux/kprobes.h5
-rw-r--r--include/linux/kvm.h78
-rw-r--r--include/linux/kvm_host.h87
-rw-r--r--include/linux/leds.h4
-rw-r--r--include/linux/lguest_launcher.h6
-rw-r--r--include/linux/libata.h78
-rw-r--r--include/linux/linkage.h10
-rw-r--r--include/linux/list_nulls.h94
-rw-r--r--include/linux/lockd/bind.h1
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/lockdep.h50
-rw-r--r--include/linux/map_to_7segment.h187
-rw-r--r--include/linux/marker.h78
-rw-r--r--include/linux/mdio-gpio.h25
-rw-r--r--include/linux/memcontrol.h34
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/mfd/da903x.h201
-rw-r--r--include/linux/mfd/t7l66xb.h2
-rw-r--r--include/linux/mfd/tc6387xb.h3
-rw-r--r--include/linux/mfd/tc6393xb.h17
-rw-r--r--include/linux/mfd/tmio.h19
-rw-r--r--include/linux/mfd/wm8350/audio.h38
-rw-r--r--include/linux/mfd/wm8350/rtc.h2
-rw-r--r--include/linux/migrate.h3
-rw-r--r--include/linux/mii.h33
-rw-r--r--include/linux/mlx4/cmd.h9
-rw-r--r--include/linux/mlx4/device.h59
-rw-r--r--include/linux/mm.h30
-rw-r--r--include/linux/mm_inline.h98
-rw-r--r--include/linux/mm_types.h8
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/sdio_func.h2
-rw-r--r--include/linux/mmiotrace.h20
-rw-r--r--include/linux/mmzone.h105
-rw-r--r--include/linux/mod_devicetable.h3
-rw-r--r--include/linux/module.h24
-rw-r--r--include/linux/moduleparam.h25
-rw-r--r--include/linux/mount.h2
-rw-r--r--include/linux/mroute6.h26
-rw-r--r--include/linux/msdos_fs.h281
-rw-r--r--include/linux/msi.h3
-rw-r--r--include/linux/mtd/cfi.h31
-rw-r--r--include/linux/mtd/flashchip.h4
-rw-r--r--include/linux/mtd/mtd.h4
-rw-r--r--include/linux/mtd/nand-gpio.h19
-rw-r--r--include/linux/mtd/nand.h1
-rw-r--r--include/linux/mtd/onenand_regs.h2
-rw-r--r--include/linux/mtd/partitions.h1
-rw-r--r--include/linux/mtd/sh_flctl.h125
-rw-r--r--include/linux/mutex.h2
-rw-r--r--include/linux/namei.h8
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/netdevice.h406
-rw-r--r--include/linux/netfilter/nfnetlink.h3
-rw-r--r--include/linux/netfilter/nfnetlink_conntrack.h1
-rw-r--r--include/linux/netfilter/x_tables.h2
-rw-r--r--include/linux/netfilter_bridge/ebtables.h3
-rw-r--r--include/linux/netfilter_ipv4/ipt_policy.h2
-rw-r--r--include/linux/netfilter_ipv6/ip6t_policy.h2
-rw-r--r--include/linux/netlink.h3
-rw-r--r--include/linux/netpoll.h5
-rw-r--r--include/linux/nfs_fs.h44
-rw-r--r--include/linux/nfs_fs_sb.h7
-rw-r--r--include/linux/nfs_mount.h7
-rw-r--r--include/linux/nfs_xdr.h18
-rw-r--r--include/linux/nfsd/state.h2
-rw-r--r--include/linux/nl80211.h223
-rw-r--r--include/linux/nsproxy.h1
-rw-r--r--include/linux/of.h9
-rw-r--r--include/linux/of_gpio.h44
-rw-r--r--include/linux/of_platform.h3
-rw-r--r--include/linux/oprofile.h19
-rw-r--r--include/linux/page-flags.h55
-rw-r--r--include/linux/page_cgroup.h108
-rw-r--r--include/linux/pagemap.h44
-rw-r--r--include/linux/pagevec.h34
-rw-r--r--include/linux/parport.h2
-rw-r--r--include/linux/pci.h52
-rw-r--r--include/linux/pci_hotplug.h11
-rw-r--r--include/linux/pci_ids.h21
-rw-r--r--include/linux/pci_regs.h16
-rw-r--r--include/linux/pfn.h6
-rw-r--r--include/linux/phonet.h1
-rw-r--r--include/linux/phy.h2
-rw-r--r--include/linux/pid.h4
-rw-r--r--include/linux/pid_namespace.h6
-rw-r--r--include/linux/pkt_cls.h14
-rw-r--r--include/linux/pkt_sched.h16
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pnp.h16
-rw-r--r--include/linux/poll.h8
-rw-r--r--include/linux/posix-timers.h10
-rw-r--r--include/linux/power_supply.h6
-rw-r--r--include/linux/prctl.h7
-rw-r--r--include/linux/proc_fs.h6
-rw-r--r--include/linux/profile.h13
-rw-r--r--include/linux/ptrace.h23
-rw-r--r--include/linux/quota.h2
-rw-r--r--include/linux/quotaops.h3
-rw-r--r--include/linux/raid/linear.h10
-rw-r--r--include/linux/raid/md.h32
-rw-r--r--include/linux/raid/md_k.h8
-rw-r--r--include/linux/random.h51
-rw-r--r--include/linux/ratelimit.h7
-rw-r--r--include/linux/rcuclassic.h2
-rw-r--r--include/linux/rculist_nulls.h110
-rw-r--r--include/linux/rcupdate.h12
-rw-r--r--include/linux/rcutree.h329
-rw-r--r--include/linux/reiserfs_fs.h2
-rw-r--r--include/linux/reiserfs_fs_sb.h1
-rw-r--r--include/linux/resource.h4
-rw-r--r--include/linux/rfkill.h8
-rw-r--r--include/linux/ring_buffer.h140
-rw-r--r--include/linux/rio_drv.h4
-rw-r--r--include/linux/rmap.h29
-rw-r--r--include/linux/rtmutex.h2
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/sched.h230
-rw-r--r--include/linux/securebits.h2
-rw-r--r--include/linux/security.h348
-rw-r--r--include/linux/seq_file.h14
-rw-r--r--include/linux/serial_core.h5
-rw-r--r--include/linux/sh_intc.h91
-rw-r--r--include/linux/skbuff.h54
-rw-r--r--include/linux/slab.h43
-rw-r--r--include/linux/smc911x.h1
-rw-r--r--include/linux/smp.h15
-rw-r--r--include/linux/smsc911x.h47
-rw-r--r--include/linux/snmp.h3
-rw-r--r--include/linux/spi/orion_spi.h1
-rw-r--r--include/linux/spi/spi_bitbang.h3
-rw-r--r--include/linux/ssb/ssb.h42
-rw-r--r--include/linux/stacktrace.h8
-rw-r--r--include/linux/string.h2
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h1
-rw-r--r--include/linux/sunrpc/svc_xprt.h8
-rw-r--r--include/linux/sunrpc/svcauth_gss.h1
-rw-r--r--include/linux/sunrpc/xdr.h15
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/sunrpc/xprtrdma.h4
-rw-r--r--include/linux/swab.h10
-rw-r--r--include/linux/swap.h69
-rw-r--r--include/linux/swiotlb.h105
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/sysfs.h41
-rw-r--r--include/linux/task_io_accounting.h2
-rw-r--r--include/linux/telephony.h6
-rw-r--r--include/linux/thread_info.h8
-rw-r--r--include/linux/tick.h7
-rw-r--r--include/linux/time.h11
-rw-r--r--include/linux/timer.h5
-rw-r--r--include/linux/timex.h82
-rw-r--r--include/linux/topology.h10
-rw-r--r--include/linux/tracepoint.h156
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/types.h20
-rw-r--r--include/linux/uaccess.h2
-rw-r--r--include/linux/usb.h9
-rw-r--r--include/linux/usb/Kbuild3
-rw-r--r--include/linux/usb/cdc.h9
-rw-r--r--include/linux/usb/ch9.h8
-rw-r--r--include/linux/usb/composite.h11
-rw-r--r--include/linux/usb/serial.h2
-rw-r--r--include/linux/usb/tmc.h43
-rw-r--r--include/linux/usb/vstusb.h71
-rw-r--r--include/linux/usb/wusb-wa.h271
-rw-r--r--include/linux/usb/wusb.h376
-rw-r--r--include/linux/user_namespace.h13
-rw-r--r--include/linux/uwb.h765
-rw-r--r--include/linux/uwb/debug-cmd.h57
-rw-r--r--include/linux/uwb/debug.h82
-rw-r--r--include/linux/uwb/spec.h727
-rw-r--r--include/linux/uwb/umc.h194
-rw-r--r--include/linux/uwb/whci.h117
-rw-r--r--include/linux/videodev2.h21
-rw-r--r--include/linux/virtio_balloon.h3
-rw-r--r--include/linux/virtio_console.h11
-rw-r--r--include/linux/virtio_net.h9
-rw-r--r--include/linux/virtio_pci.h8
-rw-r--r--include/linux/virtio_ring.h13
-rw-r--r--include/linux/vmalloc.h17
-rw-r--r--include/linux/vmstat.h24
-rw-r--r--include/linux/wait.h9
-rw-r--r--include/linux/wlp.h735
-rw-r--r--include/linux/workqueue.h26
-rw-r--r--include/linux/writeback.h10
-rw-r--r--include/linux/xfrm.h14
313 files changed, 12661 insertions, 2434 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 282a504bd1db..95ac82340c3b 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -107,6 +107,7 @@ header-y += keyctl.h
107header-y += limits.h 107header-y += limits.h
108header-y += magic.h 108header-y += magic.h
109header-y += major.h 109header-y += major.h
110header-y += map_to_7segment.h
110header-y += matroxfb.h 111header-y += matroxfb.h
111header-y += meye.h 112header-y += meye.h
112header-y += minix_fs.h 113header-y += minix_fs.h
@@ -182,6 +183,7 @@ unifdef-y += auto_fs.h
182unifdef-y += auxvec.h 183unifdef-y += auxvec.h
183unifdef-y += binfmts.h 184unifdef-y += binfmts.h
184unifdef-y += blktrace_api.h 185unifdef-y += blktrace_api.h
186unifdef-y += byteorder.h
185unifdef-y += capability.h 187unifdef-y += capability.h
186unifdef-y += capi.h 188unifdef-y += capi.h
187unifdef-y += cciss_ioctl.h 189unifdef-y += cciss_ioctl.h
@@ -311,6 +313,7 @@ unifdef-y += ptrace.h
311unifdef-y += qnx4_fs.h 313unifdef-y += qnx4_fs.h
312unifdef-y += quota.h 314unifdef-y += quota.h
313unifdef-y += random.h 315unifdef-y += random.h
316unifdef-y += irqnr.h
314unifdef-y += reboot.h 317unifdef-y += reboot.h
315unifdef-y += reiserfs_fs.h 318unifdef-y += reiserfs_fs.h
316unifdef-y += reiserfs_xattr.h 319unifdef-y += reiserfs_xattr.h
@@ -339,6 +342,7 @@ unifdef-y += soundcard.h
339unifdef-y += stat.h 342unifdef-y += stat.h
340unifdef-y += stddef.h 343unifdef-y += stddef.h
341unifdef-y += string.h 344unifdef-y += string.h
345unifdef-y += swab.h
342unifdef-y += synclink.h 346unifdef-y += synclink.h
343unifdef-y += sysctl.h 347unifdef-y += sysctl.h
344unifdef-y += tcp.h 348unifdef-y += tcp.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 702f79dad16a..fba8051fb297 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -94,18 +94,10 @@ int acpi_parse_mcfg (struct acpi_table_header *header);
94void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); 94void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
95 95
96/* the following four functions are architecture-dependent */ 96/* the following four functions are architecture-dependent */
97#ifdef CONFIG_HAVE_ARCH_PARSE_SRAT
98#define NR_NODE_MEMBLKS MAX_NUMNODES
99#define acpi_numa_slit_init(slit) do {} while (0)
100#define acpi_numa_processor_affinity_init(pa) do {} while (0)
101#define acpi_numa_memory_affinity_init(ma) do {} while (0)
102#define acpi_numa_arch_fixup() do {} while (0)
103#else
104void acpi_numa_slit_init (struct acpi_table_slit *slit); 97void acpi_numa_slit_init (struct acpi_table_slit *slit);
105void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); 98void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
106void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); 99void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
107void acpi_numa_arch_fixup(void); 100void acpi_numa_arch_fixup(void);
108#endif
109 101
110#ifdef CONFIG_ACPI_HOTPLUG_CPU 102#ifdef CONFIG_ACPI_HOTPLUG_CPU
111/* Arch dependent functions for cpu hotplug support */ 103/* Arch dependent functions for cpu hotplug support */
@@ -171,8 +163,6 @@ struct acpi_pci_driver {
171int acpi_pci_register_driver(struct acpi_pci_driver *driver); 163int acpi_pci_register_driver(struct acpi_pci_driver *driver);
172void acpi_pci_unregister_driver(struct acpi_pci_driver *driver); 164void acpi_pci_unregister_driver(struct acpi_pci_driver *driver);
173 165
174#ifdef CONFIG_ACPI_EC
175
176extern int ec_read(u8 addr, u8 *val); 166extern int ec_read(u8 addr, u8 *val);
177extern int ec_write(u8 addr, u8 val); 167extern int ec_write(u8 addr, u8 val);
178extern int ec_transaction(u8 command, 168extern int ec_transaction(u8 command,
@@ -180,8 +170,6 @@ extern int ec_transaction(u8 command,
180 u8 *rdata, unsigned rdata_len, 170 u8 *rdata, unsigned rdata_len,
181 int force_poll); 171 int force_poll);
182 172
183#endif /*CONFIG_ACPI_EC*/
184
185#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) 173#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
186 174
187typedef void (*wmi_notify_handler) (u32 value, void *context); 175typedef void (*wmi_notify_handler) (u32 value, void *context);
@@ -202,6 +190,50 @@ extern bool wmi_has_guid(const char *guid);
202 190
203#endif /* CONFIG_ACPI_WMI */ 191#endif /* CONFIG_ACPI_WMI */
204 192
193#define ACPI_VIDEO_OUTPUT_SWITCHING 0x0001
194#define ACPI_VIDEO_DEVICE_POSTING 0x0002
195#define ACPI_VIDEO_ROM_AVAILABLE 0x0004
196#define ACPI_VIDEO_BACKLIGHT 0x0008
197#define ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR 0x0010
198#define ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO 0x0020
199#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR 0x0040
200#define ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO 0x0080
201#define ACPI_VIDEO_BACKLIGHT_DMI_VENDOR 0x0100
202#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200
203#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
204#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
205
206#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
207
208extern long acpi_video_get_capabilities(acpi_handle graphics_dev_handle);
209extern long acpi_is_video_device(struct acpi_device *device);
210extern int acpi_video_backlight_support(void);
211extern int acpi_video_display_switch_support(void);
212
213#else
214
215static inline long acpi_video_get_capabilities(acpi_handle graphics_dev_handle)
216{
217 return 0;
218}
219
220static inline long acpi_is_video_device(struct acpi_device *device)
221{
222 return 0;
223}
224
225static inline int acpi_video_backlight_support(void)
226{
227 return 0;
228}
229
230static inline int acpi_video_display_switch_support(void)
231{
232 return 0;
233}
234
235#endif /* defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) */
236
205extern int acpi_blacklisted(void); 237extern int acpi_blacklisted(void);
206#ifdef CONFIG_DMI 238#ifdef CONFIG_DMI
207extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d); 239extern void acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d);
diff --git a/include/linux/aer.h b/include/linux/aer.h
index f2518141de88..f7df1eefc107 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -10,7 +10,6 @@
10#if defined(CONFIG_PCIEAER) 10#if defined(CONFIG_PCIEAER)
11/* pci-e port driver needs this function to enable aer */ 11/* pci-e port driver needs this function to enable aer */
12extern int pci_enable_pcie_error_reporting(struct pci_dev *dev); 12extern int pci_enable_pcie_error_reporting(struct pci_dev *dev);
13extern int pci_find_aer_capability(struct pci_dev *dev);
14extern int pci_disable_pcie_error_reporting(struct pci_dev *dev); 13extern int pci_disable_pcie_error_reporting(struct pci_dev *dev);
15extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev); 14extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
16#else 15#else
@@ -18,10 +17,6 @@ static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
18{ 17{
19 return -EINVAL; 18 return -EINVAL;
20} 19}
21static inline int pci_find_aer_capability(struct pci_dev *dev)
22{
23 return 0;
24}
25static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev) 20static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
26{ 21{
27 return -EINVAL; 22 return -EINVAL;
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 09b276c35227..b16a957030f8 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -5,6 +5,7 @@
5#include <linux/workqueue.h> 5#include <linux/workqueue.h>
6#include <linux/aio_abi.h> 6#include <linux/aio_abi.h>
7#include <linux/uio.h> 7#include <linux/uio.h>
8#include <linux/rcupdate.h>
8 9
9#include <asm/atomic.h> 10#include <asm/atomic.h>
10 11
@@ -183,7 +184,7 @@ struct kioctx {
183 184
184 /* This needs improving */ 185 /* This needs improving */
185 unsigned long user_id; 186 unsigned long user_id;
186 struct kioctx *next; 187 struct hlist_node list;
187 188
188 wait_queue_head_t wait; 189 wait_queue_head_t wait;
189 190
@@ -199,17 +200,28 @@ struct kioctx {
199 struct aio_ring_info ring_info; 200 struct aio_ring_info ring_info;
200 201
201 struct delayed_work wq; 202 struct delayed_work wq;
203
204 struct rcu_head rcu_head;
202}; 205};
203 206
204/* prototypes */ 207/* prototypes */
205extern unsigned aio_max_size; 208extern unsigned aio_max_size;
206 209
210#ifdef CONFIG_AIO
207extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); 211extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
208extern int aio_put_req(struct kiocb *iocb); 212extern int aio_put_req(struct kiocb *iocb);
209extern void kick_iocb(struct kiocb *iocb); 213extern void kick_iocb(struct kiocb *iocb);
210extern int aio_complete(struct kiocb *iocb, long res, long res2); 214extern int aio_complete(struct kiocb *iocb, long res, long res2);
211struct mm_struct; 215struct mm_struct;
212extern void exit_aio(struct mm_struct *mm); 216extern void exit_aio(struct mm_struct *mm);
217#else
218static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; }
219static inline int aio_put_req(struct kiocb *iocb) { return 0; }
220static inline void kick_iocb(struct kiocb *iocb) { }
221static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; }
222struct mm_struct;
223static inline void exit_aio(struct mm_struct *mm) { }
224#endif /* CONFIG_AIO */
213 225
214#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait) 226#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
215 227
diff --git a/include/linux/atm.h b/include/linux/atm.h
index c791ddd96939..d3b292174aeb 100644
--- a/include/linux/atm.h
+++ b/include/linux/atm.h
@@ -231,10 +231,21 @@ static __inline__ int atmpvc_addr_in_use(struct sockaddr_atmpvc addr)
231 */ 231 */
232 232
233struct atmif_sioc { 233struct atmif_sioc {
234 int number; 234 int number;
235 int length; 235 int length;
236 void __user *arg; 236 void __user *arg;
237}; 237};
238 238
239#ifdef __KERNEL__
240#ifdef CONFIG_COMPAT
241#include <linux/compat.h>
242struct compat_atmif_sioc {
243 int number;
244 int length;
245 compat_uptr_t arg;
246};
247#endif
248#endif
249
239typedef unsigned short atm_backend_t; 250typedef unsigned short atm_backend_t;
240#endif 251#endif
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index a3d07c29d16c..086e5c362d3a 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -100,6 +100,10 @@ struct atm_dev_stats {
100 /* use backend to make new if */ 100 /* use backend to make new if */
101#define ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct atm_iobuf) 101#define ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct atm_iobuf)
102 /* add party to p2mp call */ 102 /* add party to p2mp call */
103#ifdef CONFIG_COMPAT
104/* It actually takes struct sockaddr_atmsvc, not struct atm_iobuf */
105#define COMPAT_ATM_ADDPARTY _IOW('a', ATMIOC_SPECIAL+4,struct compat_atm_iobuf)
106#endif
103#define ATM_DROPPARTY _IOW('a', ATMIOC_SPECIAL+5,int) 107#define ATM_DROPPARTY _IOW('a', ATMIOC_SPECIAL+5,int)
104 /* drop party from p2mp call */ 108 /* drop party from p2mp call */
105 109
@@ -224,6 +228,13 @@ struct atm_cirange {
224extern struct proc_dir_entry *atm_proc_root; 228extern struct proc_dir_entry *atm_proc_root;
225#endif 229#endif
226 230
231#ifdef CONFIG_COMPAT
232#include <linux/compat.h>
233struct compat_atm_iobuf {
234 int length;
235 compat_uptr_t buffer;
236};
237#endif
227 238
228struct k_atm_aal_stats { 239struct k_atm_aal_stats {
229#define __HANDLE_ITEM(i) atomic_t i 240#define __HANDLE_ITEM(i) atomic_t i
@@ -379,6 +390,10 @@ struct atmdev_ops { /* only send is required */
379 int (*open)(struct atm_vcc *vcc); 390 int (*open)(struct atm_vcc *vcc);
380 void (*close)(struct atm_vcc *vcc); 391 void (*close)(struct atm_vcc *vcc);
381 int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg); 392 int (*ioctl)(struct atm_dev *dev,unsigned int cmd,void __user *arg);
393#ifdef CONFIG_COMPAT
394 int (*compat_ioctl)(struct atm_dev *dev,unsigned int cmd,
395 void __user *arg);
396#endif
382 int (*getsockopt)(struct atm_vcc *vcc,int level,int optname, 397 int (*getsockopt)(struct atm_vcc *vcc,int level,int optname,
383 void __user *optval,int optlen); 398 void __user *optval,int optlen);
384 int (*setsockopt)(struct atm_vcc *vcc,int level,int optname, 399 int (*setsockopt)(struct atm_vcc *vcc,int level,int optname,
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 6272a395d43c..26c4f6f65a46 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -99,6 +99,8 @@
99#define AUDIT_OBJ_PID 1318 /* ptrace target */ 99#define AUDIT_OBJ_PID 1318 /* ptrace target */
100#define AUDIT_TTY 1319 /* Input on an administrative TTY */ 100#define AUDIT_TTY 1319 /* Input on an administrative TTY */
101#define AUDIT_EOE 1320 /* End of multi-record event */ 101#define AUDIT_EOE 1320 /* End of multi-record event */
102#define AUDIT_BPRM_FCAPS 1321 /* Information about fcaps increasing perms */
103#define AUDIT_CAPSET 1322 /* Record showing argument to sys_capset */
102 104
103#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */ 105#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
104#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */ 106#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
@@ -391,6 +393,7 @@ extern int audit_classify_arch(int arch);
391#ifdef CONFIG_AUDITSYSCALL 393#ifdef CONFIG_AUDITSYSCALL
392/* These are defined in auditsc.c */ 394/* These are defined in auditsc.c */
393 /* Public API */ 395 /* Public API */
396extern void audit_finish_fork(struct task_struct *child);
394extern int audit_alloc(struct task_struct *task); 397extern int audit_alloc(struct task_struct *task);
395extern void audit_free(struct task_struct *task); 398extern void audit_free(struct task_struct *task);
396extern void audit_syscall_entry(int arch, 399extern void audit_syscall_entry(int arch,
@@ -434,7 +437,7 @@ static inline void audit_ptrace(struct task_struct *t)
434 437
435 /* Private API (for audit.c only) */ 438 /* Private API (for audit.c only) */
436extern unsigned int audit_serial(void); 439extern unsigned int audit_serial(void);
437extern void auditsc_get_stamp(struct audit_context *ctx, 440extern int auditsc_get_stamp(struct audit_context *ctx,
438 struct timespec *t, unsigned int *serial); 441 struct timespec *t, unsigned int *serial);
439extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid); 442extern int audit_set_loginuid(struct task_struct *task, uid_t loginuid);
440#define audit_get_loginuid(t) ((t)->loginuid) 443#define audit_get_loginuid(t) ((t)->loginuid)
@@ -452,6 +455,10 @@ extern int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_pr
452extern int __audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout); 455extern int __audit_mq_timedreceive(mqd_t mqdes, size_t msg_len, unsigned int __user *u_msg_prio, const struct timespec __user *u_abs_timeout);
453extern int __audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification); 456extern int __audit_mq_notify(mqd_t mqdes, const struct sigevent __user *u_notification);
454extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); 457extern int __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat);
458extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm,
459 const struct cred *new,
460 const struct cred *old);
461extern int __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old);
455 462
456static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp) 463static inline int audit_ipc_obj(struct kern_ipc_perm *ipcp)
457{ 464{
@@ -501,9 +508,28 @@ static inline int audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat)
501 return __audit_mq_getsetattr(mqdes, mqstat); 508 return __audit_mq_getsetattr(mqdes, mqstat);
502 return 0; 509 return 0;
503} 510}
511
512static inline int audit_log_bprm_fcaps(struct linux_binprm *bprm,
513 const struct cred *new,
514 const struct cred *old)
515{
516 if (unlikely(!audit_dummy_context()))
517 return __audit_log_bprm_fcaps(bprm, new, old);
518 return 0;
519}
520
521static inline int audit_log_capset(pid_t pid, const struct cred *new,
522 const struct cred *old)
523{
524 if (unlikely(!audit_dummy_context()))
525 return __audit_log_capset(pid, new, old);
526 return 0;
527}
528
504extern int audit_n_rules; 529extern int audit_n_rules;
505extern int audit_signals; 530extern int audit_signals;
506#else 531#else
532#define audit_finish_fork(t)
507#define audit_alloc(t) ({ 0; }) 533#define audit_alloc(t) ({ 0; })
508#define audit_free(t) do { ; } while (0) 534#define audit_free(t) do { ; } while (0)
509#define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0) 535#define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0)
@@ -516,7 +542,7 @@ extern int audit_signals;
516#define audit_inode(n,d) do { ; } while (0) 542#define audit_inode(n,d) do { ; } while (0)
517#define audit_inode_child(d,i,p) do { ; } while (0) 543#define audit_inode_child(d,i,p) do { ; } while (0)
518#define audit_core_dumps(i) do { ; } while (0) 544#define audit_core_dumps(i) do { ; } while (0)
519#define auditsc_get_stamp(c,t,s) do { BUG(); } while (0) 545#define auditsc_get_stamp(c,t,s) (0)
520#define audit_get_loginuid(t) (-1) 546#define audit_get_loginuid(t) (-1)
521#define audit_get_sessionid(t) (-1) 547#define audit_get_sessionid(t) (-1)
522#define audit_log_task_context(b) do { ; } while (0) 548#define audit_log_task_context(b) do { ; } while (0)
@@ -532,6 +558,8 @@ extern int audit_signals;
532#define audit_mq_timedreceive(d,l,p,t) ({ 0; }) 558#define audit_mq_timedreceive(d,l,p,t) ({ 0; })
533#define audit_mq_notify(d,n) ({ 0; }) 559#define audit_mq_notify(d,n) ({ 0; })
534#define audit_mq_getsetattr(d,s) ({ 0; }) 560#define audit_mq_getsetattr(d,s) ({ 0; })
561#define audit_log_bprm_fcaps(b, ncr, ocr) ({ 0; })
562#define audit_log_capset(pid, ncr, ocr) ({ 0; })
535#define audit_ptrace(t) ((void)0) 563#define audit_ptrace(t) ((void)0)
536#define audit_n_rules 0 564#define audit_n_rules 0
537#define audit_signals 0 565#define audit_signals 0
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
new file mode 100644
index 000000000000..f4d05ccd731f
--- /dev/null
+++ b/include/linux/auto_dev-ioctl.h
@@ -0,0 +1,157 @@
1/*
2 * Copyright 2008 Red Hat, Inc. All rights reserved.
3 * Copyright 2008 Ian Kent <raven@themaw.net>
4 *
5 * This file is part of the Linux kernel and is made available under
6 * the terms of the GNU General Public License, version 2, or at your
7 * option, any later version, incorporated herein by reference.
8 */
9
10#ifndef _LINUX_AUTO_DEV_IOCTL_H
11#define _LINUX_AUTO_DEV_IOCTL_H
12
13#include <linux/types.h>
14
15#define AUTOFS_DEVICE_NAME "autofs"
16
17#define AUTOFS_DEV_IOCTL_VERSION_MAJOR 1
18#define AUTOFS_DEV_IOCTL_VERSION_MINOR 0
19
20#define AUTOFS_DEVID_LEN 16
21
22#define AUTOFS_DEV_IOCTL_SIZE sizeof(struct autofs_dev_ioctl)
23
24/*
25 * An ioctl interface for autofs mount point control.
26 */
27
28/*
29 * All the ioctls use this structure.
30 * When sending a path size must account for the total length
31 * of the chunk of memory otherwise is is the size of the
32 * structure.
33 */
34
35struct autofs_dev_ioctl {
36 __u32 ver_major;
37 __u32 ver_minor;
38 __u32 size; /* total size of data passed in
39 * including this struct */
40 __s32 ioctlfd; /* automount command fd */
41
42 __u32 arg1; /* Command parameters */
43 __u32 arg2;
44
45 char path[0];
46};
47
48static inline void init_autofs_dev_ioctl(struct autofs_dev_ioctl *in)
49{
50 in->ver_major = AUTOFS_DEV_IOCTL_VERSION_MAJOR;
51 in->ver_minor = AUTOFS_DEV_IOCTL_VERSION_MINOR;
52 in->size = sizeof(struct autofs_dev_ioctl);
53 in->ioctlfd = -1;
54 in->arg1 = 0;
55 in->arg2 = 0;
56 return;
57}
58
59/*
60 * If you change this make sure you make the corresponding change
61 * to autofs-dev-ioctl.c:lookup_ioctl()
62 */
63enum {
64 /* Get various version info */
65 AUTOFS_DEV_IOCTL_VERSION_CMD = 0x71,
66 AUTOFS_DEV_IOCTL_PROTOVER_CMD,
67 AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD,
68
69 /* Open mount ioctl fd */
70 AUTOFS_DEV_IOCTL_OPENMOUNT_CMD,
71
72 /* Close mount ioctl fd */
73 AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD,
74
75 /* Mount/expire status returns */
76 AUTOFS_DEV_IOCTL_READY_CMD,
77 AUTOFS_DEV_IOCTL_FAIL_CMD,
78
79 /* Activate/deactivate autofs mount */
80 AUTOFS_DEV_IOCTL_SETPIPEFD_CMD,
81 AUTOFS_DEV_IOCTL_CATATONIC_CMD,
82
83 /* Expiry timeout */
84 AUTOFS_DEV_IOCTL_TIMEOUT_CMD,
85
86 /* Get mount last requesting uid and gid */
87 AUTOFS_DEV_IOCTL_REQUESTER_CMD,
88
89 /* Check for eligible expire candidates */
90 AUTOFS_DEV_IOCTL_EXPIRE_CMD,
91
92 /* Request busy status */
93 AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD,
94
95 /* Check if path is a mountpoint */
96 AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD,
97};
98
99#define AUTOFS_IOCTL 0x93
100
101#define AUTOFS_DEV_IOCTL_VERSION \
102 _IOWR(AUTOFS_IOCTL, \
103 AUTOFS_DEV_IOCTL_VERSION_CMD, struct autofs_dev_ioctl)
104
105#define AUTOFS_DEV_IOCTL_PROTOVER \
106 _IOWR(AUTOFS_IOCTL, \
107 AUTOFS_DEV_IOCTL_PROTOVER_CMD, struct autofs_dev_ioctl)
108
109#define AUTOFS_DEV_IOCTL_PROTOSUBVER \
110 _IOWR(AUTOFS_IOCTL, \
111 AUTOFS_DEV_IOCTL_PROTOSUBVER_CMD, struct autofs_dev_ioctl)
112
113#define AUTOFS_DEV_IOCTL_OPENMOUNT \
114 _IOWR(AUTOFS_IOCTL, \
115 AUTOFS_DEV_IOCTL_OPENMOUNT_CMD, struct autofs_dev_ioctl)
116
117#define AUTOFS_DEV_IOCTL_CLOSEMOUNT \
118 _IOWR(AUTOFS_IOCTL, \
119 AUTOFS_DEV_IOCTL_CLOSEMOUNT_CMD, struct autofs_dev_ioctl)
120
121#define AUTOFS_DEV_IOCTL_READY \
122 _IOWR(AUTOFS_IOCTL, \
123 AUTOFS_DEV_IOCTL_READY_CMD, struct autofs_dev_ioctl)
124
125#define AUTOFS_DEV_IOCTL_FAIL \
126 _IOWR(AUTOFS_IOCTL, \
127 AUTOFS_DEV_IOCTL_FAIL_CMD, struct autofs_dev_ioctl)
128
129#define AUTOFS_DEV_IOCTL_SETPIPEFD \
130 _IOWR(AUTOFS_IOCTL, \
131 AUTOFS_DEV_IOCTL_SETPIPEFD_CMD, struct autofs_dev_ioctl)
132
133#define AUTOFS_DEV_IOCTL_CATATONIC \
134 _IOWR(AUTOFS_IOCTL, \
135 AUTOFS_DEV_IOCTL_CATATONIC_CMD, struct autofs_dev_ioctl)
136
137#define AUTOFS_DEV_IOCTL_TIMEOUT \
138 _IOWR(AUTOFS_IOCTL, \
139 AUTOFS_DEV_IOCTL_TIMEOUT_CMD, struct autofs_dev_ioctl)
140
141#define AUTOFS_DEV_IOCTL_REQUESTER \
142 _IOWR(AUTOFS_IOCTL, \
143 AUTOFS_DEV_IOCTL_REQUESTER_CMD, struct autofs_dev_ioctl)
144
145#define AUTOFS_DEV_IOCTL_EXPIRE \
146 _IOWR(AUTOFS_IOCTL, \
147 AUTOFS_DEV_IOCTL_EXPIRE_CMD, struct autofs_dev_ioctl)
148
149#define AUTOFS_DEV_IOCTL_ASKUMOUNT \
150 _IOWR(AUTOFS_IOCTL, \
151 AUTOFS_DEV_IOCTL_ASKUMOUNT_CMD, struct autofs_dev_ioctl)
152
153#define AUTOFS_DEV_IOCTL_ISMOUNTPOINT \
154 _IOWR(AUTOFS_IOCTL, \
155 AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD, struct autofs_dev_ioctl)
156
157#endif /* _LINUX_AUTO_DEV_IOCTL_H */
diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h
index b785c6f8644d..2253716d4b92 100644
--- a/include/linux/auto_fs4.h
+++ b/include/linux/auto_fs4.h
@@ -23,12 +23,17 @@
23#define AUTOFS_MIN_PROTO_VERSION 3 23#define AUTOFS_MIN_PROTO_VERSION 3
24#define AUTOFS_MAX_PROTO_VERSION 5 24#define AUTOFS_MAX_PROTO_VERSION 5
25 25
26#define AUTOFS_PROTO_SUBVERSION 0 26#define AUTOFS_PROTO_SUBVERSION 1
27 27
28/* Mask for expire behaviour */ 28/* Mask for expire behaviour */
29#define AUTOFS_EXP_IMMEDIATE 1 29#define AUTOFS_EXP_IMMEDIATE 1
30#define AUTOFS_EXP_LEAVES 2 30#define AUTOFS_EXP_LEAVES 2
31 31
32#define AUTOFS_TYPE_ANY 0x0000
33#define AUTOFS_TYPE_INDIRECT 0x0001
34#define AUTOFS_TYPE_DIRECT 0x0002
35#define AUTOFS_TYPE_OFFSET 0x0004
36
32/* Daemon notification packet types */ 37/* Daemon notification packet types */
33enum autofs_notify { 38enum autofs_notify {
34 NFY_NONE, 39 NFY_NONE,
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0a24d5550eb3..bee52abb8a4d 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -175,6 +175,8 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
175 * BDI_CAP_READ_MAP: Can be mapped for reading 175 * BDI_CAP_READ_MAP: Can be mapped for reading
176 * BDI_CAP_WRITE_MAP: Can be mapped for writing 176 * BDI_CAP_WRITE_MAP: Can be mapped for writing
177 * BDI_CAP_EXEC_MAP: Can be mapped for execution 177 * BDI_CAP_EXEC_MAP: Can be mapped for execution
178 *
179 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed.
178 */ 180 */
179#define BDI_CAP_NO_ACCT_DIRTY 0x00000001 181#define BDI_CAP_NO_ACCT_DIRTY 0x00000001
180#define BDI_CAP_NO_WRITEBACK 0x00000002 182#define BDI_CAP_NO_WRITEBACK 0x00000002
@@ -184,6 +186,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
184#define BDI_CAP_WRITE_MAP 0x00000020 186#define BDI_CAP_WRITE_MAP 0x00000020
185#define BDI_CAP_EXEC_MAP 0x00000040 187#define BDI_CAP_EXEC_MAP 0x00000040
186#define BDI_CAP_NO_ACCT_WB 0x00000080 188#define BDI_CAP_NO_ACCT_WB 0x00000080
189#define BDI_CAP_SWAP_BACKED 0x00000100
187 190
188#define BDI_CAP_VMFLAGS \ 191#define BDI_CAP_VMFLAGS \
189 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP) 192 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
@@ -248,6 +251,11 @@ static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
248 BDI_CAP_NO_WRITEBACK)); 251 BDI_CAP_NO_WRITEBACK));
249} 252}
250 253
254static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi)
255{
256 return bdi->capabilities & BDI_CAP_SWAP_BACKED;
257}
258
251static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) 259static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
252{ 260{
253 return bdi_cap_writeback_dirty(mapping->backing_dev_info); 261 return bdi_cap_writeback_dirty(mapping->backing_dev_info);
@@ -258,4 +266,9 @@ static inline bool mapping_cap_account_dirty(struct address_space *mapping)
258 return bdi_cap_account_dirty(mapping->backing_dev_info); 266 return bdi_cap_account_dirty(mapping->backing_dev_info);
259} 267}
260 268
269static inline bool mapping_cap_swap_backed(struct address_space *mapping)
270{
271 return bdi_cap_swap_backed(mapping->backing_dev_info);
272}
273
261#endif /* _LINUX_BACKING_DEV_H */ 274#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/bcd.h b/include/linux/bcd.h
index 7ac518e3c152..22ea563ba3eb 100644
--- a/include/linux/bcd.h
+++ b/include/linux/bcd.h
@@ -1,12 +1,3 @@
1/* Permission is hereby granted to copy, modify and redistribute this code
2 * in terms of the GNU Library General Public License, Version 2 or later,
3 * at your option.
4 */
5
6/* macros to translate to/from binary and binary-coded decimal (frequently
7 * found in RTC chips).
8 */
9
10#ifndef _BCD_H 1#ifndef _BCD_H
11#define _BCD_H 2#define _BCD_H
12 3
@@ -15,11 +6,4 @@
15unsigned bcd2bin(unsigned char val) __attribute_const__; 6unsigned bcd2bin(unsigned char val) __attribute_const__;
16unsigned char bin2bcd(unsigned val) __attribute_const__; 7unsigned char bin2bcd(unsigned val) __attribute_const__;
17 8
18#define BCD2BIN(val) bcd2bin(val)
19#define BIN2BCD(val) bin2bcd(val)
20
21/* backwards compat */
22#define BCD_TO_BIN(val) ((val)=BCD2BIN(val))
23#define BIN_TO_BCD(val) ((val)=BIN2BCD(val))
24
25#endif /* _BCD_H */ 9#endif /* _BCD_H */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 826f62350805..6cbfbe297180 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -35,12 +35,20 @@ struct linux_binprm{
35 struct mm_struct *mm; 35 struct mm_struct *mm;
36 unsigned long p; /* current top of mem */ 36 unsigned long p; /* current top of mem */
37 unsigned int sh_bang:1, 37 unsigned int sh_bang:1,
38 misc_bang:1; 38 misc_bang:1,
39 cred_prepared:1,/* true if creds already prepared (multiple
40 * preps happen for interpreters) */
41 cap_effective:1;/* true if has elevated effective capabilities,
42 * false if not; except for init which inherits
43 * its parent's caps anyway */
44#ifdef __alpha__
45 unsigned int taso:1;
46#endif
47 unsigned int recursion_depth;
39 struct file * file; 48 struct file * file;
40 int e_uid, e_gid; 49 struct cred *cred; /* new credentials */
41 kernel_cap_t cap_post_exec_permitted; 50 int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */
42 bool cap_effective; 51 unsigned int per_clear; /* bits to clear in current->personality */
43 void *security;
44 int argc, envc; 52 int argc, envc;
45 char * filename; /* Name of binary as seen by procps */ 53 char * filename; /* Name of binary as seen by procps */
46 char * interp; /* Name of the binary really executed. Most 54 char * interp; /* Name of the binary really executed. Most
@@ -58,6 +66,7 @@ struct linux_binprm{
58#define BINPRM_FLAGS_EXECFD_BIT 1 66#define BINPRM_FLAGS_EXECFD_BIT 1
59#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT) 67#define BINPRM_FLAGS_EXECFD (1 << BINPRM_FLAGS_EXECFD_BIT)
60 68
69#define BINPRM_MAX_RECURSION 4
61 70
62/* 71/*
63 * This structure defines the functions that are used to load the binary formats that 72 * This structure defines the functions that are used to load the binary formats that
@@ -96,7 +105,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm,
96 int executable_stack); 105 int executable_stack);
97extern int bprm_mm_init(struct linux_binprm *bprm); 106extern int bprm_mm_init(struct linux_binprm *bprm);
98extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); 107extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
99extern void compute_creds(struct linux_binprm *binprm); 108extern void install_exec_creds(struct linux_binprm *bprm);
100extern int do_coredump(long signr, int exit_code, struct pt_regs * regs); 109extern int do_coredump(long signr, int exit_code, struct pt_regs * regs);
101extern int set_binfmt(struct linux_binfmt *new); 110extern int set_binfmt(struct linux_binfmt *new);
102extern void free_bprm(struct linux_binprm *); 111extern void free_bprm(struct linux_binprm *);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ff5b4cf9e2da..18462c5b8fff 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -79,14 +79,22 @@ struct bio {
79 79
80 unsigned int bi_size; /* residual I/O count */ 80 unsigned int bi_size; /* residual I/O count */
81 81
82 /*
83 * To keep track of the max segment size, we account for the
84 * sizes of the first and last mergeable segments in this bio.
85 */
86 unsigned int bi_seg_front_size;
87 unsigned int bi_seg_back_size;
88
82 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 89 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
83 90
84 unsigned int bi_comp_cpu; /* completion CPU */ 91 unsigned int bi_comp_cpu; /* completion CPU */
85 92
93 atomic_t bi_cnt; /* pin count */
94
86 struct bio_vec *bi_io_vec; /* the actual vec list */ 95 struct bio_vec *bi_io_vec; /* the actual vec list */
87 96
88 bio_end_io_t *bi_end_io; 97 bio_end_io_t *bi_end_io;
89 atomic_t bi_cnt; /* pin count */
90 98
91 void *bi_private; 99 void *bi_private;
92#if defined(CONFIG_BLK_DEV_INTEGRITY) 100#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -94,6 +102,13 @@ struct bio {
94#endif 102#endif
95 103
96 bio_destructor_t *bi_destructor; /* destructor */ 104 bio_destructor_t *bi_destructor; /* destructor */
105
106 /*
107 * We can inline a number of vecs at the end of the bio, to avoid
108 * double allocations for a small number of bio_vecs. This member
109 * MUST obviously be kept at the very end of the bio.
110 */
111 struct bio_vec bi_inline_vecs[0];
97}; 112};
98 113
99/* 114/*
@@ -110,6 +125,7 @@ struct bio {
110#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ 125#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
111#define BIO_NULL_MAPPED 9 /* contains invalid user pages */ 126#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
112#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ 127#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
128#define BIO_QUIET 11 /* Make BIO Quiet */
113#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) 129#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
114 130
115/* 131/*
@@ -129,25 +145,30 @@ struct bio {
129 * bit 2 -- barrier 145 * bit 2 -- barrier
130 * Insert a serialization point in the IO queue, forcing previously 146 * Insert a serialization point in the IO queue, forcing previously
131 * submitted IO to be completed before this oen is issued. 147 * submitted IO to be completed before this oen is issued.
132 * bit 3 -- fail fast, don't want low level driver retries 148 * bit 3 -- synchronous I/O hint: the block layer will unplug immediately
133 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
134 * Note that this does NOT indicate that the IO itself is sync, just 149 * Note that this does NOT indicate that the IO itself is sync, just
135 * that the block layer will not postpone issue of this IO by plugging. 150 * that the block layer will not postpone issue of this IO by plugging.
136 * bit 5 -- metadata request 151 * bit 4 -- metadata request
137 * Used for tracing to differentiate metadata and data IO. May also 152 * Used for tracing to differentiate metadata and data IO. May also
138 * get some preferential treatment in the IO scheduler 153 * get some preferential treatment in the IO scheduler
139 * bit 6 -- discard sectors 154 * bit 5 -- discard sectors
140 * Informs the lower level device that this range of sectors is no longer 155 * Informs the lower level device that this range of sectors is no longer
141 * used by the file system and may thus be freed by the device. Used 156 * used by the file system and may thus be freed by the device. Used
142 * for flash based storage. 157 * for flash based storage.
158 * bit 6 -- fail fast device errors
159 * bit 7 -- fail fast transport errors
160 * bit 8 -- fail fast driver errors
161 * Don't want driver retries for any fast fail whatever the reason.
143 */ 162 */
144#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ 163#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
145#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ 164#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
146#define BIO_RW_BARRIER 2 165#define BIO_RW_BARRIER 2
147#define BIO_RW_FAILFAST 3 166#define BIO_RW_SYNC 3
148#define BIO_RW_SYNC 4 167#define BIO_RW_META 4
149#define BIO_RW_META 5 168#define BIO_RW_DISCARD 5
150#define BIO_RW_DISCARD 6 169#define BIO_RW_FAILFAST_DEV 6
170#define BIO_RW_FAILFAST_TRANSPORT 7
171#define BIO_RW_FAILFAST_DRIVER 8
151 172
152/* 173/*
153 * upper 16 bits of bi_rw define the io priority of this bio 174 * upper 16 bits of bi_rw define the io priority of this bio
@@ -174,7 +195,10 @@ struct bio {
174#define bio_sectors(bio) ((bio)->bi_size >> 9) 195#define bio_sectors(bio) ((bio)->bi_size >> 9)
175#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER)) 196#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
176#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC)) 197#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
177#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) 198#define bio_failfast_dev(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DEV))
199#define bio_failfast_transport(bio) \
200 ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_TRANSPORT))
201#define bio_failfast_driver(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST_DRIVER))
178#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) 202#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
179#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) 203#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
180#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD)) 204#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
@@ -196,6 +220,11 @@ static inline void *bio_data(struct bio *bio)
196 return NULL; 220 return NULL;
197} 221}
198 222
223static inline int bio_has_allocated_vec(struct bio *bio)
224{
225 return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
226}
227
199/* 228/*
200 * will die 229 * will die
201 */ 230 */
@@ -221,12 +250,16 @@ static inline void *bio_data(struct bio *bio)
221#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) 250#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
222#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx) 251#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
223 252
253/* Default implementation of BIOVEC_PHYS_MERGEABLE */
254#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
255 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
256
224/* 257/*
225 * allow arch override, for eg virtualized architectures (put in asm/io.h) 258 * allow arch override, for eg virtualized architectures (put in asm/io.h)
226 */ 259 */
227#ifndef BIOVEC_PHYS_MERGEABLE 260#ifndef BIOVEC_PHYS_MERGEABLE
228#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 261#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
229 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 262 __BIOVEC_PHYS_MERGEABLE(vec1, vec2)
230#endif 263#endif
231 264
232#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 265#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
@@ -313,7 +346,7 @@ struct bio_pair {
313extern struct bio_pair *bio_split(struct bio *bi, int first_sectors); 346extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
314extern void bio_pair_release(struct bio_pair *dbio); 347extern void bio_pair_release(struct bio_pair *dbio);
315 348
316extern struct bio_set *bioset_create(int, int); 349extern struct bio_set *bioset_create(unsigned int, unsigned int);
317extern void bioset_free(struct bio_set *); 350extern void bioset_free(struct bio_set *);
318 351
319extern struct bio *bio_alloc(gfp_t, int); 352extern struct bio *bio_alloc(gfp_t, int);
@@ -358,6 +391,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
358extern int bio_uncopy_user(struct bio *); 391extern int bio_uncopy_user(struct bio *);
359void zero_fill_bio(struct bio *bio); 392void zero_fill_bio(struct bio *bio);
360extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); 393extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
394extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
361extern unsigned int bvec_nr_vecs(unsigned short idx); 395extern unsigned int bvec_nr_vecs(unsigned short idx);
362 396
363/* 397/*
@@ -376,13 +410,17 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
376 */ 410 */
377#define BIO_POOL_SIZE 2 411#define BIO_POOL_SIZE 2
378#define BIOVEC_NR_POOLS 6 412#define BIOVEC_NR_POOLS 6
413#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
379 414
380struct bio_set { 415struct bio_set {
416 struct kmem_cache *bio_slab;
417 unsigned int front_pad;
418
381 mempool_t *bio_pool; 419 mempool_t *bio_pool;
382#if defined(CONFIG_BLK_DEV_INTEGRITY) 420#if defined(CONFIG_BLK_DEV_INTEGRITY)
383 mempool_t *bio_integrity_pool; 421 mempool_t *bio_integrity_pool;
384#endif 422#endif
385 mempool_t *bvec_pools[BIOVEC_NR_POOLS]; 423 mempool_t *bvec_pool;
386}; 424};
387 425
388struct biovec_slab { 426struct biovec_slab {
@@ -392,6 +430,7 @@ struct biovec_slab {
392}; 430};
393 431
394extern struct bio_set *fs_bio_set; 432extern struct bio_set *fs_bio_set;
433extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
395 434
396/* 435/*
397 * a small number of entries is fine, not going to be performance critical. 436 * a small number of entries is fine, not going to be performance critical.
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 89781fd48859..a08c33a26ca9 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -110,7 +110,6 @@ extern int __bitmap_weight(const unsigned long *bitmap, int bits);
110 110
111extern int bitmap_scnprintf(char *buf, unsigned int len, 111extern int bitmap_scnprintf(char *buf, unsigned int len,
112 const unsigned long *src, int nbits); 112 const unsigned long *src, int nbits);
113extern int bitmap_scnprintf_len(unsigned int nr_bits);
114extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, 113extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
115 unsigned long *dst, int nbits); 114 unsigned long *dst, int nbits);
116extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, 115extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
@@ -130,6 +129,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
130extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); 129extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
131extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); 130extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
132extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); 131extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
132extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits);
133 133
134#define BITMAP_LAST_WORD_MASK(nbits) \ 134#define BITMAP_LAST_WORD_MASK(nbits) \
135( \ 135( \
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index a92d9e4ea96e..7035cec583b6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -26,7 +26,6 @@ struct scsi_ioctl_command;
26 26
27struct request_queue; 27struct request_queue;
28struct elevator_queue; 28struct elevator_queue;
29typedef struct elevator_queue elevator_t;
30struct request_pm_state; 29struct request_pm_state;
31struct blk_trace; 30struct blk_trace;
32struct request; 31struct request;
@@ -87,7 +86,9 @@ enum {
87 */ 86 */
88enum rq_flag_bits { 87enum rq_flag_bits {
89 __REQ_RW, /* not set, read. set, write */ 88 __REQ_RW, /* not set, read. set, write */
90 __REQ_FAILFAST, /* no low level driver retries */ 89 __REQ_FAILFAST_DEV, /* no driver retries of device errors */
90 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
91 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
91 __REQ_DISCARD, /* request to discard sectors */ 92 __REQ_DISCARD, /* request to discard sectors */
92 __REQ_SORTED, /* elevator knows about this request */ 93 __REQ_SORTED, /* elevator knows about this request */
93 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 94 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
@@ -111,8 +112,10 @@ enum rq_flag_bits {
111}; 112};
112 113
113#define REQ_RW (1 << __REQ_RW) 114#define REQ_RW (1 << __REQ_RW)
115#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
116#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
117#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
114#define REQ_DISCARD (1 << __REQ_DISCARD) 118#define REQ_DISCARD (1 << __REQ_DISCARD)
115#define REQ_FAILFAST (1 << __REQ_FAILFAST)
116#define REQ_SORTED (1 << __REQ_SORTED) 119#define REQ_SORTED (1 << __REQ_SORTED)
117#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 120#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
118#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 121#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
@@ -309,7 +312,7 @@ struct request_queue
309 */ 312 */
310 struct list_head queue_head; 313 struct list_head queue_head;
311 struct request *last_merge; 314 struct request *last_merge;
312 elevator_t *elevator; 315 struct elevator_queue *elevator;
313 316
314 /* 317 /*
315 * the queue request freelist, one for reads and one for writes 318 * the queue request freelist, one for reads and one for writes
@@ -445,6 +448,7 @@ struct request_queue
445#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ 448#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
446#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ 449#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
447#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 450#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
451#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
448 452
449static inline int queue_is_locked(struct request_queue *q) 453static inline int queue_is_locked(struct request_queue *q)
450{ 454{
@@ -518,22 +522,32 @@ enum {
518 * TAG_FLUSH : ordering by tag w/ pre and post flushes 522 * TAG_FLUSH : ordering by tag w/ pre and post flushes
519 * TAG_FUA : ordering by tag w/ pre flush and FUA write 523 * TAG_FUA : ordering by tag w/ pre flush and FUA write
520 */ 524 */
521 QUEUE_ORDERED_NONE = 0x00, 525 QUEUE_ORDERED_BY_DRAIN = 0x01,
522 QUEUE_ORDERED_DRAIN = 0x01, 526 QUEUE_ORDERED_BY_TAG = 0x02,
523 QUEUE_ORDERED_TAG = 0x02, 527 QUEUE_ORDERED_DO_PREFLUSH = 0x10,
524 528 QUEUE_ORDERED_DO_BAR = 0x20,
525 QUEUE_ORDERED_PREFLUSH = 0x10, 529 QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
526 QUEUE_ORDERED_POSTFLUSH = 0x20, 530 QUEUE_ORDERED_DO_FUA = 0x80,
527 QUEUE_ORDERED_FUA = 0x40, 531
528 532 QUEUE_ORDERED_NONE = 0x00,
529 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 533
530 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 534 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
531 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | 535 QUEUE_ORDERED_DO_BAR,
532 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 536 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
533 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 537 QUEUE_ORDERED_DO_PREFLUSH |
534 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, 538 QUEUE_ORDERED_DO_POSTFLUSH,
535 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | 539 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
536 QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, 540 QUEUE_ORDERED_DO_PREFLUSH |
541 QUEUE_ORDERED_DO_FUA,
542
543 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
544 QUEUE_ORDERED_DO_BAR,
545 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
546 QUEUE_ORDERED_DO_PREFLUSH |
547 QUEUE_ORDERED_DO_POSTFLUSH,
548 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
549 QUEUE_ORDERED_DO_PREFLUSH |
550 QUEUE_ORDERED_DO_FUA,
537 551
538 /* 552 /*
539 * Ordered operation sequence 553 * Ordered operation sequence
@@ -560,7 +574,12 @@ enum {
560#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 574#define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL)
561#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 575#define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE)
562 576
563#define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) 577#define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV)
578#define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT)
579#define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER)
580#define blk_noretry_request(rq) (blk_failfast_dev(rq) || \
581 blk_failfast_transport(rq) || \
582 blk_failfast_driver(rq))
564#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 583#define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED)
565 584
566#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 585#define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq)))
@@ -576,7 +595,6 @@ enum {
576#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 595#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
577#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) 596#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
578#define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 597#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
579#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
580/* rq->queuelist of dequeued request must be list_empty() */ 598/* rq->queuelist of dequeued request must be list_empty() */
581#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) 599#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
582 600
@@ -653,6 +671,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn;
653 * default timeout for SG_IO if none specified 671 * default timeout for SG_IO if none specified
654 */ 672 */
655#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ) 673#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
674#define BLK_MIN_SG_TIMEOUT (7 * HZ)
656 675
657#ifdef CONFIG_BOUNCE 676#ifdef CONFIG_BOUNCE
658extern int init_emergency_isa_pool(void); 677extern int init_emergency_isa_pool(void);
@@ -708,10 +727,10 @@ extern void blk_plug_device(struct request_queue *);
708extern void blk_plug_device_unlocked(struct request_queue *); 727extern void blk_plug_device_unlocked(struct request_queue *);
709extern int blk_remove_plug(struct request_queue *); 728extern int blk_remove_plug(struct request_queue *);
710extern void blk_recount_segments(struct request_queue *, struct bio *); 729extern void blk_recount_segments(struct request_queue *, struct bio *);
711extern int scsi_cmd_ioctl(struct file *, struct request_queue *, 730extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
712 struct gendisk *, unsigned int, void __user *); 731 unsigned int, void __user *);
713extern int sg_scsi_ioctl(struct file *, struct request_queue *, 732extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
714 struct gendisk *, struct scsi_ioctl_command __user *); 733 struct scsi_ioctl_command __user *);
715 734
716/* 735/*
717 * Temporary export, until SCSI gets fixed up. 736 * Temporary export, until SCSI gets fixed up.
@@ -777,6 +796,8 @@ static inline void blk_run_address_space(struct address_space *mapping)
777 blk_run_backing_dev(mapping->backing_dev_info, NULL); 796 blk_run_backing_dev(mapping->backing_dev_info, NULL);
778} 797}
779 798
799extern void blkdev_dequeue_request(struct request *req);
800
780/* 801/*
781 * blk_end_request() and friends. 802 * blk_end_request() and friends.
782 * __blk_end_request() and end_request() must be called with 803 * __blk_end_request() and end_request() must be called with
@@ -811,11 +832,6 @@ extern void blk_update_request(struct request *rq, int error,
811extern unsigned int blk_rq_bytes(struct request *rq); 832extern unsigned int blk_rq_bytes(struct request *rq);
812extern unsigned int blk_rq_cur_bytes(struct request *rq); 833extern unsigned int blk_rq_cur_bytes(struct request *rq);
813 834
814static inline void blkdev_dequeue_request(struct request *req)
815{
816 elv_dequeue_request(req->q, req);
817}
818
819/* 835/*
820 * Access functions for manipulating queue properties 836 * Access functions for manipulating queue properties
821 */ 837 */
@@ -848,15 +864,14 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
848extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 864extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
849extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 865extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
850extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 866extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
851extern int blk_do_ordered(struct request_queue *, struct request **); 867extern bool blk_do_ordered(struct request_queue *, struct request **);
852extern unsigned blk_ordered_cur_seq(struct request_queue *); 868extern unsigned blk_ordered_cur_seq(struct request_queue *);
853extern unsigned blk_ordered_req_seq(struct request *); 869extern unsigned blk_ordered_req_seq(struct request *);
854extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); 870extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
855 871
856extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 872extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
857extern void blk_dump_rq_flags(struct request *, char *); 873extern void blk_dump_rq_flags(struct request *, char *);
858extern void generic_unplug_device(struct request_queue *); 874extern void generic_unplug_device(struct request_queue *);
859extern void __generic_unplug_device(struct request_queue *);
860extern long nr_blockdev_pages(void); 875extern long nr_blockdev_pages(void);
861 876
862int blk_get_queue(struct request_queue *); 877int blk_get_queue(struct request_queue *);
@@ -902,7 +917,8 @@ static inline int sb_issue_discard(struct super_block *sb,
902* command filter functions 917* command filter functions
903*/ 918*/
904extern int blk_verify_command(struct blk_cmd_filter *filter, 919extern int blk_verify_command(struct blk_cmd_filter *filter,
905 unsigned char *cmd, int has_write_perm); 920 unsigned char *cmd, fmode_t has_write_perm);
921extern void blk_unregister_filter(struct gendisk *disk);
906extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); 922extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
907 923
908#define MAX_PHYS_SEGMENTS 128 924#define MAX_PHYS_SEGMENTS 128
@@ -912,6 +928,8 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
912 928
913#define MAX_SEGMENT_SIZE 65536 929#define MAX_SEGMENT_SIZE 65536
914 930
931#define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL
932
915#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 933#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
916 934
917static inline int queue_hardsect_size(struct request_queue *q) 935static inline int queue_hardsect_size(struct request_queue *q)
@@ -968,7 +986,6 @@ static inline void put_dev_sector(Sector p)
968 986
969struct work_struct; 987struct work_struct;
970int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 988int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
971void kblockd_flush_work(struct work_struct *work);
972 989
973#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 990#define MODULE_ALIAS_BLOCKDEV(major,minor) \
974 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 991 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
@@ -1048,6 +1065,22 @@ static inline int blk_integrity_rq(struct request *rq)
1048 1065
1049#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1066#endif /* CONFIG_BLK_DEV_INTEGRITY */
1050 1067
1068struct block_device_operations {
1069 int (*open) (struct block_device *, fmode_t);
1070 int (*release) (struct gendisk *, fmode_t);
1071 int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1072 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1073 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1074 int (*direct_access) (struct block_device *, sector_t,
1075 void **, unsigned long *);
1076 int (*media_changed) (struct gendisk *);
1077 int (*revalidate_disk) (struct gendisk *);
1078 int (*getgeo)(struct block_device *, struct hd_geometry *);
1079 struct module *owner;
1080};
1081
1082extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1083 unsigned long);
1051#else /* CONFIG_BLOCK */ 1084#else /* CONFIG_BLOCK */
1052/* 1085/*
1053 * stubs for when the block layer is configured out 1086 * stubs for when the block layer is configured out
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3a31eb506164..1dba3493d520 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -24,6 +24,7 @@ enum blktrace_cat {
24 BLK_TC_AHEAD = 1 << 11, /* readahead */ 24 BLK_TC_AHEAD = 1 << 11, /* readahead */
25 BLK_TC_META = 1 << 12, /* metadata */ 25 BLK_TC_META = 1 << 12, /* metadata */
26 BLK_TC_DISCARD = 1 << 13, /* discard requests */ 26 BLK_TC_DISCARD = 1 << 13, /* discard requests */
27 BLK_TC_DRV_DATA = 1 << 14, /* binary per-driver data */
27 28
28 BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ 29 BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
29}; 30};
@@ -51,6 +52,7 @@ enum blktrace_act {
51 __BLK_TA_BOUNCE, /* bio was bounced */ 52 __BLK_TA_BOUNCE, /* bio was bounced */
52 __BLK_TA_REMAP, /* bio was remapped */ 53 __BLK_TA_REMAP, /* bio was remapped */
53 __BLK_TA_ABORT, /* request aborted */ 54 __BLK_TA_ABORT, /* request aborted */
55 __BLK_TA_DRV_DATA, /* driver-specific binary data */
54}; 56};
55 57
56/* 58/*
@@ -82,6 +84,7 @@ enum blktrace_notify {
82#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) 84#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
83#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) 85#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
84#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE)) 86#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
87#define BLK_TA_DRV_DATA (__BLK_TA_DRV_DATA | BLK_TC_ACT(BLK_TC_DRV_DATA))
85 88
86#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY)) 89#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
87#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY)) 90#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
@@ -157,7 +160,6 @@ struct blk_trace {
157 160
158extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); 161extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
159extern void blk_trace_shutdown(struct request_queue *); 162extern void blk_trace_shutdown(struct request_queue *);
160extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *);
161extern int do_blk_trace_setup(struct request_queue *q, 163extern int do_blk_trace_setup(struct request_queue *q,
162 char *name, dev_t dev, struct blk_user_trace_setup *buts); 164 char *name, dev_t dev, struct blk_user_trace_setup *buts);
163extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); 165extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
@@ -183,140 +185,8 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
183 } while (0) 185 } while (0)
184#define BLK_TN_MAX_MSG 128 186#define BLK_TN_MAX_MSG 128
185 187
186/** 188extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
187 * blk_add_trace_rq - Add a trace for a request oriented action 189 void *data, size_t len);
188 * @q: queue the io is for
189 * @rq: the source request
190 * @what: the action
191 *
192 * Description:
193 * Records an action against a request. Will log the bio offset + size.
194 *
195 **/
196static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq,
197 u32 what)
198{
199 struct blk_trace *bt = q->blk_trace;
200 int rw = rq->cmd_flags & 0x03;
201
202 if (likely(!bt))
203 return;
204
205 if (blk_discard_rq(rq))
206 rw |= (1 << BIO_RW_DISCARD);
207
208 if (blk_pc_request(rq)) {
209 what |= BLK_TC_ACT(BLK_TC_PC);
210 __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd);
211 } else {
212 what |= BLK_TC_ACT(BLK_TC_FS);
213 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL);
214 }
215}
216
217/**
218 * blk_add_trace_bio - Add a trace for a bio oriented action
219 * @q: queue the io is for
220 * @bio: the source bio
221 * @what: the action
222 *
223 * Description:
224 * Records an action against a bio. Will log the bio offset + size.
225 *
226 **/
227static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
228 u32 what)
229{
230 struct blk_trace *bt = q->blk_trace;
231
232 if (likely(!bt))
233 return;
234
235 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
236}
237
238/**
239 * blk_add_trace_generic - Add a trace for a generic action
240 * @q: queue the io is for
241 * @bio: the source bio
242 * @rw: the data direction
243 * @what: the action
244 *
245 * Description:
246 * Records a simple trace
247 *
248 **/
249static inline void blk_add_trace_generic(struct request_queue *q,
250 struct bio *bio, int rw, u32 what)
251{
252 struct blk_trace *bt = q->blk_trace;
253
254 if (likely(!bt))
255 return;
256
257 if (bio)
258 blk_add_trace_bio(q, bio, what);
259 else
260 __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL);
261}
262
263/**
264 * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload
265 * @q: queue the io is for
266 * @what: the action
267 * @bio: the source bio
268 * @pdu: the integer payload
269 *
270 * Description:
271 * Adds a trace with some integer payload. This might be an unplug
272 * option given as the action, with the depth at unplug time given
273 * as the payload
274 *
275 **/
276static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what,
277 struct bio *bio, unsigned int pdu)
278{
279 struct blk_trace *bt = q->blk_trace;
280 __be64 rpdu = cpu_to_be64(pdu);
281
282 if (likely(!bt))
283 return;
284
285 if (bio)
286 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu);
287 else
288 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
289}
290
291/**
292 * blk_add_trace_remap - Add a trace for a remap operation
293 * @q: queue the io is for
294 * @bio: the source bio
295 * @dev: target device
296 * @from: source sector
297 * @to: target sector
298 *
299 * Description:
300 * Device mapper or raid target sometimes need to split a bio because
301 * it spans a stripe (or similar). Add a trace for that action.
302 *
303 **/
304static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
305 dev_t dev, sector_t from, sector_t to)
306{
307 struct blk_trace *bt = q->blk_trace;
308 struct blk_io_trace_remap r;
309
310 if (likely(!bt))
311 return;
312
313 r.device = cpu_to_be32(dev);
314 r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev);
315 r.sector = cpu_to_be64(to);
316
317 __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
318}
319
320extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 190extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
321 char __user *arg); 191 char __user *arg);
322extern int blk_trace_startstop(struct request_queue *q, int start); 192extern int blk_trace_startstop(struct request_queue *q, int start);
@@ -325,12 +195,8 @@ extern int blk_trace_remove(struct request_queue *q);
325#else /* !CONFIG_BLK_DEV_IO_TRACE */ 195#else /* !CONFIG_BLK_DEV_IO_TRACE */
326#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 196#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
327#define blk_trace_shutdown(q) do { } while (0) 197#define blk_trace_shutdown(q) do { } while (0)
328#define blk_add_trace_rq(q, rq, what) do { } while (0)
329#define blk_add_trace_bio(q, rq, what) do { } while (0)
330#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
331#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
332#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
333#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) 198#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
199#define blk_add_driver_data(q, rq, data, len) do {} while (0)
334#define blk_trace_setup(q, name, dev, arg) (-ENOTTY) 200#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
335#define blk_trace_startstop(q, start) (-ENOTTY) 201#define blk_trace_startstop(q, start) (-ENOTTY)
336#define blk_trace_remove(q) (-ENOTTY) 202#define blk_trace_remove(q) (-ENOTTY)
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h
index 777dbf695d44..27b1bcffe408 100644
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
@@ -2,7 +2,6 @@
2#define _LINUX_BH_H 2#define _LINUX_BH_H
3 3
4extern void local_bh_disable(void); 4extern void local_bh_disable(void);
5extern void __local_bh_enable(void);
6extern void _local_bh_enable(void); 5extern void _local_bh_enable(void);
7extern void local_bh_enable(void); 6extern void local_bh_enable(void);
8extern void local_bh_enable_ip(unsigned long ip); 7extern void local_bh_enable_ip(unsigned long ip);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index eadaab44015f..8605f8a74df9 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -35,6 +35,7 @@ enum bh_state_bits {
35 BH_Ordered, /* ordered write */ 35 BH_Ordered, /* ordered write */
36 BH_Eopnotsupp, /* operation not supported (barrier) */ 36 BH_Eopnotsupp, /* operation not supported (barrier) */
37 BH_Unwritten, /* Buffer is allocated on disk but not written */ 37 BH_Unwritten, /* Buffer is allocated on disk but not written */
38 BH_Quiet, /* Buffer Error Prinks to be quiet */
38 39
39 BH_PrivateStart,/* not a state bit, but the first bit available 40 BH_PrivateStart,/* not a state bit, but the first bit available
40 * for private allocation by other entities 41 * for private allocation by other entities
@@ -322,7 +323,7 @@ static inline void wait_on_buffer(struct buffer_head *bh)
322 323
323static inline int trylock_buffer(struct buffer_head *bh) 324static inline int trylock_buffer(struct buffer_head *bh)
324{ 325{
325 return likely(!test_and_set_bit(BH_Lock, &bh->b_state)); 326 return likely(!test_and_set_bit_lock(BH_Lock, &bh->b_state));
326} 327}
327 328
328static inline void lock_buffer(struct buffer_head *bh) 329static inline void lock_buffer(struct buffer_head *bh)
diff --git a/include/linux/byteorder/Kbuild b/include/linux/byteorder/Kbuild
index 1133d5f9d818..fbaa7f9cee32 100644
--- a/include/linux/byteorder/Kbuild
+++ b/include/linux/byteorder/Kbuild
@@ -1,3 +1,4 @@
1unifdef-y += big_endian.h 1unifdef-y += big_endian.h
2unifdef-y += little_endian.h 2unifdef-y += little_endian.h
3unifdef-y += swab.h 3unifdef-y += swab.h
4unifdef-y += swabb.h
diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h
index 44f95b92393b..1cba3f3efe5f 100644
--- a/include/linux/byteorder/big_endian.h
+++ b/include/linux/byteorder/big_endian.h
@@ -10,6 +10,7 @@
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/byteorder/swab.h> 12#include <linux/byteorder/swab.h>
13#include <linux/byteorder/swabb.h>
13 14
14#define __constant_htonl(x) ((__force __be32)(__u32)(x)) 15#define __constant_htonl(x) ((__force __be32)(__u32)(x))
15#define __constant_ntohl(x) ((__force __u32)(__be32)(x)) 16#define __constant_ntohl(x) ((__force __u32)(__be32)(x))
diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
index 4cc170a31762..cedc1b5a289c 100644
--- a/include/linux/byteorder/little_endian.h
+++ b/include/linux/byteorder/little_endian.h
@@ -10,6 +10,7 @@
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/byteorder/swab.h> 12#include <linux/byteorder/swab.h>
13#include <linux/byteorder/swabb.h>
13 14
14#define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) 15#define __constant_htonl(x) ((__force __be32)___constant_swab32((x)))
15#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) 16#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x))
diff --git a/include/linux/c2port.h b/include/linux/c2port.h
new file mode 100644
index 000000000000..7b5a2388ba67
--- /dev/null
+++ b/include/linux/c2port.h
@@ -0,0 +1,65 @@
1/*
2 * Silicon Labs C2 port Linux support
3 *
4 * Copyright (c) 2007 Rodolfo Giometti <giometti@linux.it>
5 * Copyright (c) 2007 Eurotech S.p.A. <info@eurotech.it>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation
10 */
11
12#include <linux/device.h>
13
14#define C2PORT_NAME_LEN 32
15
16/*
17 * C2 port basic structs
18 */
19
20/* Main struct */
21struct c2port_ops;
22struct c2port_device {
23 unsigned int access:1;
24 unsigned int flash_access:1;
25
26 int id;
27 char name[C2PORT_NAME_LEN];
28 struct c2port_ops *ops;
29 struct mutex mutex; /* prevent races during read/write */
30
31 struct device *dev;
32
33 void *private_data;
34};
35
36/* Basic operations */
37struct c2port_ops {
38 /* Flash layout */
39 unsigned short block_size; /* flash block size in bytes */
40 unsigned short blocks_num; /* flash blocks number */
41
42 /* Enable or disable the access to C2 port */
43 void (*access)(struct c2port_device *dev, int status);
44
45 /* Set C2D data line as input/output */
46 void (*c2d_dir)(struct c2port_device *dev, int dir);
47
48 /* Read/write C2D data line */
49 int (*c2d_get)(struct c2port_device *dev);
50 void (*c2d_set)(struct c2port_device *dev, int status);
51
52 /* Write C2CK clock line */
53 void (*c2ck_set)(struct c2port_device *dev, int status);
54};
55
56/*
57 * Exported functions
58 */
59
60#define to_class_dev(obj) container_of((obj), struct class_device, kobj)
61#define to_c2port_device(obj) container_of((obj), struct c2port_device, class)
62
63extern struct c2port_device *c2port_device_register(char *name,
64 struct c2port_ops *ops, void *devdata);
65extern void c2port_device_unregister(struct c2port_device *dev);
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index e9ca210ffa5b..f50785ad4781 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -19,7 +19,7 @@
19#include <linux/skbuff.h> 19#include <linux/skbuff.h>
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21 21
22#define CAN_VERSION "20071116" 22#define CAN_VERSION "20081130"
23 23
24/* increment this number each time you change some user-space interface */ 24/* increment this number each time you change some user-space interface */
25#define CAN_ABI_VERSION "8" 25#define CAN_ABI_VERSION "8"
diff --git a/include/linux/capability.h b/include/linux/capability.h
index 9d1fe30b6f6c..e22f48c2a46f 100644
--- a/include/linux/capability.h
+++ b/include/linux/capability.h
@@ -53,6 +53,7 @@ typedef struct __user_cap_data_struct {
53#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX 53#define XATTR_NAME_CAPS XATTR_SECURITY_PREFIX XATTR_CAPS_SUFFIX
54 54
55#define VFS_CAP_REVISION_MASK 0xFF000000 55#define VFS_CAP_REVISION_MASK 0xFF000000
56#define VFS_CAP_REVISION_SHIFT 24
56#define VFS_CAP_FLAGS_MASK ~VFS_CAP_REVISION_MASK 57#define VFS_CAP_FLAGS_MASK ~VFS_CAP_REVISION_MASK
57#define VFS_CAP_FLAGS_EFFECTIVE 0x000001 58#define VFS_CAP_FLAGS_EFFECTIVE 0x000001
58 59
@@ -68,6 +69,9 @@ typedef struct __user_cap_data_struct {
68#define VFS_CAP_U32 VFS_CAP_U32_2 69#define VFS_CAP_U32 VFS_CAP_U32_2
69#define VFS_CAP_REVISION VFS_CAP_REVISION_2 70#define VFS_CAP_REVISION VFS_CAP_REVISION_2
70 71
72#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
73extern int file_caps_enabled;
74#endif
71 75
72struct vfs_cap_data { 76struct vfs_cap_data {
73 __le32 magic_etc; /* Little endian */ 77 __le32 magic_etc; /* Little endian */
@@ -96,6 +100,13 @@ typedef struct kernel_cap_struct {
96 __u32 cap[_KERNEL_CAPABILITY_U32S]; 100 __u32 cap[_KERNEL_CAPABILITY_U32S];
97} kernel_cap_t; 101} kernel_cap_t;
98 102
103/* exact same as vfs_cap_data but in cpu endian and always filled completely */
104struct cpu_vfs_cap_data {
105 __u32 magic_etc;
106 kernel_cap_t permitted;
107 kernel_cap_t inheritable;
108};
109
99#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct)) 110#define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct))
100#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t)) 111#define _KERNEL_CAP_T_SIZE (sizeof(kernel_cap_t))
101 112
@@ -454,6 +465,13 @@ static inline int cap_isclear(const kernel_cap_t a)
454 return 1; 465 return 1;
455} 466}
456 467
468/*
469 * Check if "a" is a subset of "set".
470 * return 1 if ALL of the capabilities in "a" are also in "set"
471 * cap_issubset(0101, 1111) will return 1
472 * return 0 if ANY of the capabilities in "a" are not in "set"
473 * cap_issubset(1111, 0101) will return 0
474 */
457static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set) 475static inline int cap_issubset(const kernel_cap_t a, const kernel_cap_t set)
458{ 476{
459 kernel_cap_t dest; 477 kernel_cap_t dest;
@@ -501,8 +519,6 @@ extern const kernel_cap_t __cap_empty_set;
501extern const kernel_cap_t __cap_full_set; 519extern const kernel_cap_t __cap_full_set;
502extern const kernel_cap_t __cap_init_eff_set; 520extern const kernel_cap_t __cap_init_eff_set;
503 521
504kernel_cap_t cap_set_effective(const kernel_cap_t pE_new);
505
506/** 522/**
507 * has_capability - Determine if a task has a superior capability available 523 * has_capability - Determine if a task has a superior capability available
508 * @t: The task in question 524 * @t: The task in question
@@ -514,9 +530,14 @@ kernel_cap_t cap_set_effective(const kernel_cap_t pE_new);
514 * Note that this does not set PF_SUPERPRIV on the task. 530 * Note that this does not set PF_SUPERPRIV on the task.
515 */ 531 */
516#define has_capability(t, cap) (security_capable((t), (cap)) == 0) 532#define has_capability(t, cap) (security_capable((t), (cap)) == 0)
533#define has_capability_noaudit(t, cap) (security_capable_noaudit((t), (cap)) == 0)
517 534
518extern int capable(int cap); 535extern int capable(int cap);
519 536
537/* audit system wants to get cap info from files as well */
538struct dentry;
539extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
540
520#endif /* __KERNEL__ */ 541#endif /* __KERNEL__ */
521 542
522#endif /* !_LINUX_CAPABILITY_H */ 543#endif /* !_LINUX_CAPABILITY_H */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index 5db265ea60f6..0b49e08d3cb0 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -987,11 +987,11 @@ struct cdrom_device_ops {
987}; 987};
988 988
989/* the general block_device operations structure: */ 989/* the general block_device operations structure: */
990extern int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, 990extern int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev,
991 struct file *fp); 991 fmode_t mode);
992extern int cdrom_release(struct cdrom_device_info *cdi, struct file *fp); 992extern void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode);
993extern int cdrom_ioctl(struct file *file, struct cdrom_device_info *cdi, 993extern int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
994 struct inode *ip, unsigned int cmd, unsigned long arg); 994 fmode_t mode, unsigned int cmd, unsigned long arg);
995extern int cdrom_media_changed(struct cdrom_device_info *); 995extern int cdrom_media_changed(struct cdrom_device_info *);
996 996
997extern int register_cdrom(struct cdrom_device_info *cdi); 997extern int register_cdrom(struct cdrom_device_info *cdi);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c98dd7cb7076..1164963c3a85 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -9,12 +9,12 @@
9 */ 9 */
10 10
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <linux/kref.h>
13#include <linux/cpumask.h> 12#include <linux/cpumask.h>
14#include <linux/nodemask.h> 13#include <linux/nodemask.h>
15#include <linux/rcupdate.h> 14#include <linux/rcupdate.h>
16#include <linux/cgroupstats.h> 15#include <linux/cgroupstats.h>
17#include <linux/prio_heap.h> 16#include <linux/prio_heap.h>
17#include <linux/rwsem.h>
18 18
19#ifdef CONFIG_CGROUPS 19#ifdef CONFIG_CGROUPS
20 20
@@ -25,7 +25,6 @@ struct cgroup;
25 25
26extern int cgroup_init_early(void); 26extern int cgroup_init_early(void);
27extern int cgroup_init(void); 27extern int cgroup_init(void);
28extern void cgroup_init_smp(void);
29extern void cgroup_lock(void); 28extern void cgroup_lock(void);
30extern bool cgroup_lock_live_group(struct cgroup *cgrp); 29extern bool cgroup_lock_live_group(struct cgroup *cgrp);
31extern void cgroup_unlock(void); 30extern void cgroup_unlock(void);
@@ -137,6 +136,15 @@ struct cgroup {
137 * release_list_lock 136 * release_list_lock
138 */ 137 */
139 struct list_head release_list; 138 struct list_head release_list;
139
140 /* pids_mutex protects the fields below */
141 struct rw_semaphore pids_mutex;
142 /* Array of process ids in the cgroup */
143 pid_t *tasks_pids;
144 /* How many files are using the current tasks_pids array */
145 int pids_use_count;
146 /* Length of the current tasks_pids array */
147 int pids_length;
140}; 148};
141 149
142/* A css_set is a structure holding pointers to a set of 150/* A css_set is a structure holding pointers to a set of
@@ -149,7 +157,7 @@ struct cgroup {
149struct css_set { 157struct css_set {
150 158
151 /* Reference count */ 159 /* Reference count */
152 struct kref ref; 160 atomic_t refcount;
153 161
154 /* 162 /*
155 * List running through all cgroup groups in the same hash 163 * List running through all cgroup groups in the same hash
@@ -326,7 +334,8 @@ struct cgroup_subsys {
326 */ 334 */
327 void (*mm_owner_changed)(struct cgroup_subsys *ss, 335 void (*mm_owner_changed)(struct cgroup_subsys *ss,
328 struct cgroup *old, 336 struct cgroup *old,
329 struct cgroup *new); 337 struct cgroup *new,
338 struct task_struct *p);
330 int subsys_id; 339 int subsys_id;
331 int active; 340 int active;
332 int disabled; 341 int disabled;
@@ -338,8 +347,6 @@ struct cgroup_subsys {
338 struct cgroupfs_root *root; 347 struct cgroupfs_root *root;
339 348
340 struct list_head sibling; 349 struct list_head sibling;
341
342 void *private;
343}; 350};
344 351
345#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; 352#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
@@ -393,11 +400,13 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
393int cgroup_scan_tasks(struct cgroup_scanner *scan); 400int cgroup_scan_tasks(struct cgroup_scanner *scan);
394int cgroup_attach_task(struct cgroup *, struct task_struct *); 401int cgroup_attach_task(struct cgroup *, struct task_struct *);
395 402
403void cgroup_mm_owner_callbacks(struct task_struct *old,
404 struct task_struct *new);
405
396#else /* !CONFIG_CGROUPS */ 406#else /* !CONFIG_CGROUPS */
397 407
398static inline int cgroup_init_early(void) { return 0; } 408static inline int cgroup_init_early(void) { return 0; }
399static inline int cgroup_init(void) { return 0; } 409static inline int cgroup_init(void) { return 0; }
400static inline void cgroup_init_smp(void) {}
401static inline void cgroup_fork(struct task_struct *p) {} 410static inline void cgroup_fork(struct task_struct *p) {}
402static inline void cgroup_fork_callbacks(struct task_struct *p) {} 411static inline void cgroup_fork_callbacks(struct task_struct *p) {}
403static inline void cgroup_post_fork(struct task_struct *p) {} 412static inline void cgroup_post_fork(struct task_struct *p) {}
@@ -411,15 +420,9 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
411 return -EINVAL; 420 return -EINVAL;
412} 421}
413 422
423static inline void cgroup_mm_owner_callbacks(struct task_struct *old,
424 struct task_struct *new) {}
425
414#endif /* !CONFIG_CGROUPS */ 426#endif /* !CONFIG_CGROUPS */
415 427
416#ifdef CONFIG_MM_OWNER
417extern void
418cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new);
419#else /* !CONFIG_MM_OWNER */
420static inline void
421cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
422{
423}
424#endif /* CONFIG_MM_OWNER */
425#endif /* _LINUX_CGROUP_H */ 428#endif /* _LINUX_CGROUP_H */
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index e2877454ec82..9c8d31bacf46 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -48,3 +48,15 @@ SUBSYS(devices)
48#endif 48#endif
49 49
50/* */ 50/* */
51
52#ifdef CONFIG_CGROUP_FREEZER
53SUBSYS(freezer)
54#endif
55
56/* */
57
58#ifdef CONFIG_NET_CLS_CGROUP
59SUBSYS(net_cls)
60#endif
61
62/* */
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 5ca8c6fddb56..778777316ea4 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -35,6 +35,8 @@ struct clk;
35 * clk_get may return different clock producers depending on @dev.) 35 * clk_get may return different clock producers depending on @dev.)
36 * 36 *
37 * Drivers must assume that the clock source is not enabled. 37 * Drivers must assume that the clock source is not enabled.
38 *
39 * clk_get should not be called from within interrupt context.
38 */ 40 */
39struct clk *clk_get(struct device *dev, const char *id); 41struct clk *clk_get(struct device *dev, const char *id);
40 42
@@ -76,6 +78,8 @@ unsigned long clk_get_rate(struct clk *clk);
76 * Note: drivers must ensure that all clk_enable calls made on this 78 * Note: drivers must ensure that all clk_enable calls made on this
77 * clock source are balanced by clk_disable calls prior to calling 79 * clock source are balanced by clk_disable calls prior to calling
78 * this function. 80 * this function.
81 *
82 * clk_put should not be called from within interrupt context.
79 */ 83 */
80void clk_put(struct clk *clk); 84void clk_put(struct clk *clk);
81 85
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 55e434feec99..f88d32f8ff7c 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -45,7 +45,8 @@ struct clocksource;
45 * @read: returns a cycle value 45 * @read: returns a cycle value
46 * @mask: bitmask for two's complement 46 * @mask: bitmask for two's complement
47 * subtraction of non 64 bit counters 47 * subtraction of non 64 bit counters
48 * @mult: cycle to nanosecond multiplier 48 * @mult: cycle to nanosecond multiplier (adjusted by NTP)
49 * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP)
49 * @shift: cycle to nanosecond divisor (power of two) 50 * @shift: cycle to nanosecond divisor (power of two)
50 * @flags: flags describing special properties 51 * @flags: flags describing special properties
51 * @vread: vsyscall based read 52 * @vread: vsyscall based read
@@ -63,6 +64,7 @@ struct clocksource {
63 cycle_t (*read)(void); 64 cycle_t (*read)(void);
64 cycle_t mask; 65 cycle_t mask;
65 u32 mult; 66 u32 mult;
67 u32 mult_orig;
66 u32 shift; 68 u32 shift;
67 unsigned long flags; 69 unsigned long flags;
68 cycle_t (*vread)(void); 70 cycle_t (*vread)(void);
@@ -77,6 +79,7 @@ struct clocksource {
77 /* timekeeping specific data, ignore */ 79 /* timekeeping specific data, ignore */
78 cycle_t cycle_interval; 80 cycle_t cycle_interval;
79 u64 xtime_interval; 81 u64 xtime_interval;
82 u32 raw_interval;
80 /* 83 /*
81 * Second part is written at each timer interrupt 84 * Second part is written at each timer interrupt
82 * Keep it in a different cache line to dirty no 85 * Keep it in a different cache line to dirty no
@@ -85,6 +88,7 @@ struct clocksource {
85 cycle_t cycle_last ____cacheline_aligned_in_smp; 88 cycle_t cycle_last ____cacheline_aligned_in_smp;
86 u64 xtime_nsec; 89 u64 xtime_nsec;
87 s64 error; 90 s64 error;
91 struct timespec raw_time;
88 92
89#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 93#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
90 /* Watchdog related data, used by the framework */ 94 /* Watchdog related data, used by the framework */
@@ -201,17 +205,19 @@ static inline void clocksource_calculate_interval(struct clocksource *c,
201{ 205{
202 u64 tmp; 206 u64 tmp;
203 207
204 /* XXX - All of this could use a whole lot of optimization */ 208 /* Do the ns -> cycle conversion first, using original mult */
205 tmp = length_nsec; 209 tmp = length_nsec;
206 tmp <<= c->shift; 210 tmp <<= c->shift;
207 tmp += c->mult/2; 211 tmp += c->mult_orig/2;
208 do_div(tmp, c->mult); 212 do_div(tmp, c->mult_orig);
209 213
210 c->cycle_interval = (cycle_t)tmp; 214 c->cycle_interval = (cycle_t)tmp;
211 if (c->cycle_interval == 0) 215 if (c->cycle_interval == 0)
212 c->cycle_interval = 1; 216 c->cycle_interval = 1;
213 217
218 /* Go back from cycles -> shifted ns, this time use ntp adjused mult */
214 c->xtime_interval = (u64)c->cycle_interval * c->mult; 219 c->xtime_interval = (u64)c->cycle_interval * c->mult;
220 c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
215} 221}
216 222
217 223
diff --git a/include/linux/cnt32_to_63.h b/include/linux/cnt32_to_63.h
index 8c0f9505b48c..7605fdd1eb65 100644
--- a/include/linux/cnt32_to_63.h
+++ b/include/linux/cnt32_to_63.h
@@ -16,6 +16,7 @@
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/byteorder.h> 18#include <asm/byteorder.h>
19#include <asm/system.h>
19 20
20/* this is used only to give gcc a clue about good code generation */ 21/* this is used only to give gcc a clue about good code generation */
21union cnt32_to_63 { 22union cnt32_to_63 {
@@ -53,11 +54,19 @@ union cnt32_to_63 {
53 * needed increment. And any race in updating the value in memory is harmless 54 * needed increment. And any race in updating the value in memory is harmless
54 * as the same value would simply be stored more than once. 55 * as the same value would simply be stored more than once.
55 * 56 *
56 * The only restriction for the algorithm to work properly is that this 57 * The restrictions for the algorithm to work properly are:
57 * code must be executed at least once per each half period of the 32-bit 58 *
58 * counter to properly update the state bit in memory. This is usually not a 59 * 1) this code must be called at least once per each half period of the
59 * problem in practice, but if it is then a kernel timer could be scheduled 60 * 32-bit counter;
60 * to manage for this code to be executed often enough. 61 *
62 * 2) this code must not be preempted for a duration longer than the
63 * 32-bit counter half period minus the longest period between two
64 * calls to this code.
65 *
66 * Those requirements ensure proper update to the state bit in memory.
67 * This is usually not a problem in practice, but if it is then a kernel
68 * timer should be scheduled to manage for this code to be executed often
69 * enough.
61 * 70 *
62 * Note that the top bit (bit 63) in the returned value should be considered 71 * Note that the top bit (bit 63) in the returned value should be considered
63 * as garbage. It is not cleared here because callers are likely to use a 72 * as garbage. It is not cleared here because callers are likely to use a
@@ -68,9 +77,10 @@ union cnt32_to_63 {
68 */ 77 */
69#define cnt32_to_63(cnt_lo) \ 78#define cnt32_to_63(cnt_lo) \
70({ \ 79({ \
71 static volatile u32 __m_cnt_hi; \ 80 static u32 __m_cnt_hi; \
72 union cnt32_to_63 __x; \ 81 union cnt32_to_63 __x; \
73 __x.hi = __m_cnt_hi; \ 82 __x.hi = __m_cnt_hi; \
83 smp_rmb(); \
74 __x.lo = (cnt_lo); \ 84 __x.lo = (cnt_lo); \
75 if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \ 85 if (unlikely((s32)(__x.hi ^ __x.lo) < 0)) \
76 __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \ 86 __m_cnt_hi = __x.hi = (__x.hi ^ 0x80000000) + (__x.hi >> 31); \
diff --git a/include/linux/compat.h b/include/linux/compat.h
index cf8d11cad5ae..e88f3ecf38b4 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -78,7 +78,6 @@ typedef struct {
78 compat_sigset_word sig[_COMPAT_NSIG_WORDS]; 78 compat_sigset_word sig[_COMPAT_NSIG_WORDS];
79} compat_sigset_t; 79} compat_sigset_t;
80 80
81extern int cp_compat_stat(struct kstat *, struct compat_stat __user *);
82extern int get_compat_timespec(struct timespec *, const struct compat_timespec __user *); 81extern int get_compat_timespec(struct timespec *, const struct compat_timespec __user *);
83extern int put_compat_timespec(const struct timespec *, struct compat_timespec __user *); 82extern int put_compat_timespec(const struct timespec *, struct compat_timespec __user *);
84 83
@@ -235,6 +234,11 @@ extern int get_compat_itimerspec(struct itimerspec *dst,
235extern int put_compat_itimerspec(struct compat_itimerspec __user *dst, 234extern int put_compat_itimerspec(struct compat_itimerspec __user *dst,
236 const struct itimerspec *src); 235 const struct itimerspec *src);
237 236
237asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
238 struct timezone __user *tz);
239asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
240 struct timezone __user *tz);
241
238asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); 242asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
239 243
240extern int compat_printk(const char *fmt, ...); 244extern int compat_printk(const char *fmt, ...);
@@ -248,12 +252,10 @@ extern int compat_ptrace_request(struct task_struct *child,
248 compat_long_t request, 252 compat_long_t request,
249 compat_ulong_t addr, compat_ulong_t data); 253 compat_ulong_t addr, compat_ulong_t data);
250 254
251#ifdef __ARCH_WANT_COMPAT_SYS_PTRACE
252extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, 255extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
253 compat_ulong_t addr, compat_ulong_t data); 256 compat_ulong_t addr, compat_ulong_t data);
254asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, 257asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
255 compat_long_t addr, compat_long_t data); 258 compat_long_t addr, compat_long_t data);
256#endif /* __ARCH_WANT_COMPAT_SYS_PTRACE */
257 259
258/* 260/*
259 * epoll (fs/eventpoll.c) compat bits follow ... 261 * epoll (fs/eventpoll.c) compat bits follow ...
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 8322141ee480..ea7c6be354b7 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -44,6 +44,8 @@ extern void __chk_io_ptr(const volatile void __iomem *);
44# error Sorry, your compiler is too old/not recognized. 44# error Sorry, your compiler is too old/not recognized.
45#endif 45#endif
46 46
47#define notrace __attribute__((no_instrument_function))
48
47/* Intel compiler defines __GNUC__. So we will overwrite implementations 49/* Intel compiler defines __GNUC__. So we will overwrite implementations
48 * coming from above header files here 50 * coming from above header files here
49 */ 51 */
@@ -57,8 +59,88 @@ extern void __chk_io_ptr(const volatile void __iomem *);
57 * specific implementations come from the above header files 59 * specific implementations come from the above header files
58 */ 60 */
59 61
60#define likely(x) __builtin_expect(!!(x), 1) 62struct ftrace_branch_data {
61#define unlikely(x) __builtin_expect(!!(x), 0) 63 const char *func;
64 const char *file;
65 unsigned line;
66 union {
67 struct {
68 unsigned long correct;
69 unsigned long incorrect;
70 };
71 struct {
72 unsigned long miss;
73 unsigned long hit;
74 };
75 };
76};
77
78/*
79 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
80 * to disable branch tracing on a per file basis.
81 */
82#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING)
83void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
84
85#define likely_notrace(x) __builtin_expect(!!(x), 1)
86#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
87
88#define __branch_check__(x, expect) ({ \
89 int ______r; \
90 static struct ftrace_branch_data \
91 __attribute__((__aligned__(4))) \
92 __attribute__((section("_ftrace_annotated_branch"))) \
93 ______f = { \
94 .func = __func__, \
95 .file = __FILE__, \
96 .line = __LINE__, \
97 }; \
98 ______r = likely_notrace(x); \
99 ftrace_likely_update(&______f, ______r, expect); \
100 ______r; \
101 })
102
103/*
104 * Using __builtin_constant_p(x) to ignore cases where the return
105 * value is always the same. This idea is taken from a similar patch
106 * written by Daniel Walker.
107 */
108# ifndef likely
109# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
110# endif
111# ifndef unlikely
112# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
113# endif
114
115#ifdef CONFIG_PROFILE_ALL_BRANCHES
116/*
117 * "Define 'is'", Bill Clinton
118 * "Define 'if'", Steven Rostedt
119 */
120#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \
121 ({ \
122 int ______r; \
123 static struct ftrace_branch_data \
124 __attribute__((__aligned__(4))) \
125 __attribute__((section("_ftrace_branch"))) \
126 ______f = { \
127 .func = __func__, \
128 .file = __FILE__, \
129 .line = __LINE__, \
130 }; \
131 ______r = !!(cond); \
132 if (______r) \
133 ______f.hit++; \
134 else \
135 ______f.miss++; \
136 ______r; \
137 }))
138#endif /* CONFIG_PROFILE_ALL_BRANCHES */
139
140#else
141# define likely(x) __builtin_expect(!!(x), 1)
142# define unlikely(x) __builtin_expect(!!(x), 0)
143#endif
62 144
63/* Optimization barrier */ 145/* Optimization barrier */
64#ifndef barrier 146#ifndef barrier
diff --git a/include/linux/console.h b/include/linux/console.h
index 248e6e3b9b73..a67a90cf8268 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -153,4 +153,8 @@ void vcs_remove_sysfs(struct tty_struct *tty);
153#define VESA_HSYNC_SUSPEND 2 153#define VESA_HSYNC_SUSPEND 2
154#define VESA_POWERDOWN 3 154#define VESA_POWERDOWN 3
155 155
156#ifdef CONFIG_VGA_CONSOLE
157extern bool vgacon_text_force(void);
158#endif
159
156#endif /* _LINUX_CONSOLE_H */ 160#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index d3219d73f8e6..21e1dd43e52a 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -5,6 +5,9 @@
5 * Cpumasks provide a bitmap suitable for representing the 5 * Cpumasks provide a bitmap suitable for representing the
6 * set of CPU's in a system, one bit position per CPU number. 6 * set of CPU's in a system, one bit position per CPU number.
7 * 7 *
8 * The new cpumask_ ops take a "struct cpumask *"; the old ones
9 * use cpumask_t.
10 *
8 * See detailed comments in the file linux/bitmap.h describing the 11 * See detailed comments in the file linux/bitmap.h describing the
9 * data type on which these cpumasks are based. 12 * data type on which these cpumasks are based.
10 * 13 *
@@ -31,7 +34,7 @@
31 * will span the entire range of NR_CPUS. 34 * will span the entire range of NR_CPUS.
32 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 35 * . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
33 * 36 *
34 * The available cpumask operations are: 37 * The obsolescent cpumask operations are:
35 * 38 *
36 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask 39 * void cpu_set(cpu, mask) turn on bit 'cpu' in mask
37 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask 40 * void cpu_clear(cpu, mask) turn off bit 'cpu' in mask
@@ -138,7 +141,7 @@
138#include <linux/threads.h> 141#include <linux/threads.h>
139#include <linux/bitmap.h> 142#include <linux/bitmap.h>
140 143
141typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t; 144typedef struct cpumask { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
142extern cpumask_t _unused_cpumask_arg_; 145extern cpumask_t _unused_cpumask_arg_;
143 146
144#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst)) 147#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
@@ -527,4 +530,556 @@ extern cpumask_t cpu_active_map;
527#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map) 530#define for_each_online_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_online_map)
528#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map) 531#define for_each_present_cpu(cpu) for_each_cpu_mask_nr((cpu), cpu_present_map)
529 532
533/* These are the new versions of the cpumask operators: passed by pointer.
534 * The older versions will be implemented in terms of these, then deleted. */
535#define cpumask_bits(maskp) ((maskp)->bits)
536
537#if NR_CPUS <= BITS_PER_LONG
538#define CPU_BITS_ALL \
539{ \
540 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
541}
542
543/* This produces more efficient code. */
544#define nr_cpumask_bits NR_CPUS
545
546#else /* NR_CPUS > BITS_PER_LONG */
547
548#define CPU_BITS_ALL \
549{ \
550 [0 ... BITS_TO_LONGS(NR_CPUS)-2] = ~0UL, \
551 [BITS_TO_LONGS(NR_CPUS)-1] = CPU_MASK_LAST_WORD \
552}
553
554#define nr_cpumask_bits nr_cpu_ids
555#endif /* NR_CPUS > BITS_PER_LONG */
556
557/* verify cpu argument to cpumask_* operators */
558static inline unsigned int cpumask_check(unsigned int cpu)
559{
560#ifdef CONFIG_DEBUG_PER_CPU_MAPS
561 WARN_ON_ONCE(cpu >= nr_cpumask_bits);
562#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
563 return cpu;
564}
565
566#if NR_CPUS == 1
567/* Uniprocessor. Assume all masks are "1". */
568static inline unsigned int cpumask_first(const struct cpumask *srcp)
569{
570 return 0;
571}
572
573/* Valid inputs for n are -1 and 0. */
574static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
575{
576 return n+1;
577}
578
579static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
580{
581 return n+1;
582}
583
584static inline unsigned int cpumask_next_and(int n,
585 const struct cpumask *srcp,
586 const struct cpumask *andp)
587{
588 return n+1;
589}
590
591/* cpu must be a valid cpu, ie 0, so there's no other choice. */
592static inline unsigned int cpumask_any_but(const struct cpumask *mask,
593 unsigned int cpu)
594{
595 return 1;
596}
597
598#define for_each_cpu(cpu, mask) \
599 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
600#define for_each_cpu_and(cpu, mask, and) \
601 for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
602#else
603/**
604 * cpumask_first - get the first cpu in a cpumask
605 * @srcp: the cpumask pointer
606 *
607 * Returns >= nr_cpu_ids if no cpus set.
608 */
609static inline unsigned int cpumask_first(const struct cpumask *srcp)
610{
611 return find_first_bit(cpumask_bits(srcp), nr_cpumask_bits);
612}
613
614/**
615 * cpumask_next - get the next cpu in a cpumask
616 * @n: the cpu prior to the place to search (ie. return will be > @n)
617 * @srcp: the cpumask pointer
618 *
619 * Returns >= nr_cpu_ids if no further cpus set.
620 */
621static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
622{
623 /* -1 is a legal arg here. */
624 if (n != -1)
625 cpumask_check(n);
626 return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
627}
628
629/**
630 * cpumask_next_zero - get the next unset cpu in a cpumask
631 * @n: the cpu prior to the place to search (ie. return will be > @n)
632 * @srcp: the cpumask pointer
633 *
634 * Returns >= nr_cpu_ids if no further cpus unset.
635 */
636static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
637{
638 /* -1 is a legal arg here. */
639 if (n != -1)
640 cpumask_check(n);
641 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
642}
643
644int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
645int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
646
647/**
648 * for_each_cpu - iterate over every cpu in a mask
649 * @cpu: the (optionally unsigned) integer iterator
650 * @mask: the cpumask pointer
651 *
652 * After the loop, cpu is >= nr_cpu_ids.
653 */
654#define for_each_cpu(cpu, mask) \
655 for ((cpu) = -1; \
656 (cpu) = cpumask_next((cpu), (mask)), \
657 (cpu) < nr_cpu_ids;)
658
659/**
660 * for_each_cpu_and - iterate over every cpu in both masks
661 * @cpu: the (optionally unsigned) integer iterator
662 * @mask: the first cpumask pointer
663 * @and: the second cpumask pointer
664 *
665 * This saves a temporary CPU mask in many places. It is equivalent to:
666 * struct cpumask tmp;
667 * cpumask_and(&tmp, &mask, &and);
668 * for_each_cpu(cpu, &tmp)
669 * ...
670 *
671 * After the loop, cpu is >= nr_cpu_ids.
672 */
673#define for_each_cpu_and(cpu, mask, and) \
674 for ((cpu) = -1; \
675 (cpu) = cpumask_next_and((cpu), (mask), (and)), \
676 (cpu) < nr_cpu_ids;)
677#endif /* SMP */
678
679#define CPU_BITS_NONE \
680{ \
681 [0 ... BITS_TO_LONGS(NR_CPUS)-1] = 0UL \
682}
683
684#define CPU_BITS_CPU0 \
685{ \
686 [0] = 1UL \
687}
688
689/**
690 * cpumask_set_cpu - set a cpu in a cpumask
691 * @cpu: cpu number (< nr_cpu_ids)
692 * @dstp: the cpumask pointer
693 */
694static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
695{
696 set_bit(cpumask_check(cpu), cpumask_bits(dstp));
697}
698
699/**
700 * cpumask_clear_cpu - clear a cpu in a cpumask
701 * @cpu: cpu number (< nr_cpu_ids)
702 * @dstp: the cpumask pointer
703 */
704static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
705{
706 clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
707}
708
709/**
710 * cpumask_test_cpu - test for a cpu in a cpumask
711 * @cpu: cpu number (< nr_cpu_ids)
712 * @cpumask: the cpumask pointer
713 *
714 * No static inline type checking - see Subtlety (1) above.
715 */
716#define cpumask_test_cpu(cpu, cpumask) \
717 test_bit(cpumask_check(cpu), (cpumask)->bits)
718
719/**
720 * cpumask_test_and_set_cpu - atomically test and set a cpu in a cpumask
721 * @cpu: cpu number (< nr_cpu_ids)
722 * @cpumask: the cpumask pointer
723 *
724 * test_and_set_bit wrapper for cpumasks.
725 */
726static inline int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
727{
728 return test_and_set_bit(cpumask_check(cpu), cpumask_bits(cpumask));
729}
730
731/**
732 * cpumask_setall - set all cpus (< nr_cpu_ids) in a cpumask
733 * @dstp: the cpumask pointer
734 */
735static inline void cpumask_setall(struct cpumask *dstp)
736{
737 bitmap_fill(cpumask_bits(dstp), nr_cpumask_bits);
738}
739
740/**
741 * cpumask_clear - clear all cpus (< nr_cpu_ids) in a cpumask
742 * @dstp: the cpumask pointer
743 */
744static inline void cpumask_clear(struct cpumask *dstp)
745{
746 bitmap_zero(cpumask_bits(dstp), nr_cpumask_bits);
747}
748
749/**
750 * cpumask_and - *dstp = *src1p & *src2p
751 * @dstp: the cpumask result
752 * @src1p: the first input
753 * @src2p: the second input
754 */
755static inline void cpumask_and(struct cpumask *dstp,
756 const struct cpumask *src1p,
757 const struct cpumask *src2p)
758{
759 bitmap_and(cpumask_bits(dstp), cpumask_bits(src1p),
760 cpumask_bits(src2p), nr_cpumask_bits);
761}
762
763/**
764 * cpumask_or - *dstp = *src1p | *src2p
765 * @dstp: the cpumask result
766 * @src1p: the first input
767 * @src2p: the second input
768 */
769static inline void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p,
770 const struct cpumask *src2p)
771{
772 bitmap_or(cpumask_bits(dstp), cpumask_bits(src1p),
773 cpumask_bits(src2p), nr_cpumask_bits);
774}
775
776/**
777 * cpumask_xor - *dstp = *src1p ^ *src2p
778 * @dstp: the cpumask result
779 * @src1p: the first input
780 * @src2p: the second input
781 */
782static inline void cpumask_xor(struct cpumask *dstp,
783 const struct cpumask *src1p,
784 const struct cpumask *src2p)
785{
786 bitmap_xor(cpumask_bits(dstp), cpumask_bits(src1p),
787 cpumask_bits(src2p), nr_cpumask_bits);
788}
789
790/**
791 * cpumask_andnot - *dstp = *src1p & ~*src2p
792 * @dstp: the cpumask result
793 * @src1p: the first input
794 * @src2p: the second input
795 */
796static inline void cpumask_andnot(struct cpumask *dstp,
797 const struct cpumask *src1p,
798 const struct cpumask *src2p)
799{
800 bitmap_andnot(cpumask_bits(dstp), cpumask_bits(src1p),
801 cpumask_bits(src2p), nr_cpumask_bits);
802}
803
804/**
805 * cpumask_complement - *dstp = ~*srcp
806 * @dstp: the cpumask result
807 * @srcp: the input to invert
808 */
809static inline void cpumask_complement(struct cpumask *dstp,
810 const struct cpumask *srcp)
811{
812 bitmap_complement(cpumask_bits(dstp), cpumask_bits(srcp),
813 nr_cpumask_bits);
814}
815
816/**
817 * cpumask_equal - *src1p == *src2p
818 * @src1p: the first input
819 * @src2p: the second input
820 */
821static inline bool cpumask_equal(const struct cpumask *src1p,
822 const struct cpumask *src2p)
823{
824 return bitmap_equal(cpumask_bits(src1p), cpumask_bits(src2p),
825 nr_cpumask_bits);
826}
827
828/**
829 * cpumask_intersects - (*src1p & *src2p) != 0
830 * @src1p: the first input
831 * @src2p: the second input
832 */
833static inline bool cpumask_intersects(const struct cpumask *src1p,
834 const struct cpumask *src2p)
835{
836 return bitmap_intersects(cpumask_bits(src1p), cpumask_bits(src2p),
837 nr_cpumask_bits);
838}
839
840/**
841 * cpumask_subset - (*src1p & ~*src2p) == 0
842 * @src1p: the first input
843 * @src2p: the second input
844 */
845static inline int cpumask_subset(const struct cpumask *src1p,
846 const struct cpumask *src2p)
847{
848 return bitmap_subset(cpumask_bits(src1p), cpumask_bits(src2p),
849 nr_cpumask_bits);
850}
851
852/**
853 * cpumask_empty - *srcp == 0
854 * @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
855 */
856static inline bool cpumask_empty(const struct cpumask *srcp)
857{
858 return bitmap_empty(cpumask_bits(srcp), nr_cpumask_bits);
859}
860
861/**
862 * cpumask_full - *srcp == 0xFFFFFFFF...
863 * @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
864 */
865static inline bool cpumask_full(const struct cpumask *srcp)
866{
867 return bitmap_full(cpumask_bits(srcp), nr_cpumask_bits);
868}
869
870/**
871 * cpumask_weight - Count of bits in *srcp
872 * @srcp: the cpumask to count bits (< nr_cpu_ids) in.
873 */
874static inline unsigned int cpumask_weight(const struct cpumask *srcp)
875{
876 return bitmap_weight(cpumask_bits(srcp), nr_cpumask_bits);
877}
878
879/**
880 * cpumask_shift_right - *dstp = *srcp >> n
881 * @dstp: the cpumask result
882 * @srcp: the input to shift
883 * @n: the number of bits to shift by
884 */
885static inline void cpumask_shift_right(struct cpumask *dstp,
886 const struct cpumask *srcp, int n)
887{
888 bitmap_shift_right(cpumask_bits(dstp), cpumask_bits(srcp), n,
889 nr_cpumask_bits);
890}
891
892/**
893 * cpumask_shift_left - *dstp = *srcp << n
894 * @dstp: the cpumask result
895 * @srcp: the input to shift
896 * @n: the number of bits to shift by
897 */
898static inline void cpumask_shift_left(struct cpumask *dstp,
899 const struct cpumask *srcp, int n)
900{
901 bitmap_shift_left(cpumask_bits(dstp), cpumask_bits(srcp), n,
902 nr_cpumask_bits);
903}
904
905/**
906 * cpumask_copy - *dstp = *srcp
907 * @dstp: the result
908 * @srcp: the input cpumask
909 */
910static inline void cpumask_copy(struct cpumask *dstp,
911 const struct cpumask *srcp)
912{
913 bitmap_copy(cpumask_bits(dstp), cpumask_bits(srcp), nr_cpumask_bits);
914}
915
916/**
917 * cpumask_any - pick a "random" cpu from *srcp
918 * @srcp: the input cpumask
919 *
920 * Returns >= nr_cpu_ids if no cpus set.
921 */
922#define cpumask_any(srcp) cpumask_first(srcp)
923
924/**
925 * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
926 * @src1p: the first input
927 * @src2p: the second input
928 *
929 * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
930 */
931#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
932
933/**
934 * cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
935 * @mask1: the first input cpumask
936 * @mask2: the second input cpumask
937 *
938 * Returns >= nr_cpu_ids if no cpus set.
939 */
940#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
941
942/**
943 * cpumask_of - the cpumask containing just a given cpu
944 * @cpu: the cpu (<= nr_cpu_ids)
945 */
946#define cpumask_of(cpu) (get_cpu_mask(cpu))
947
948/**
949 * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
950 * @bitmap: the bitmap
951 *
952 * There are a few places where cpumask_var_t isn't appropriate and
953 * static cpumasks must be used (eg. very early boot), yet we don't
954 * expose the definition of 'struct cpumask'.
955 *
956 * This does the conversion, and can be used as a constant initializer.
957 */
958#define to_cpumask(bitmap) \
959 ((struct cpumask *)(1 ? (bitmap) \
960 : (void *)sizeof(__check_is_bitmap(bitmap))))
961
962static inline int __check_is_bitmap(const unsigned long *bitmap)
963{
964 return 1;
965}
966
967/**
968 * cpumask_size - size to allocate for a 'struct cpumask' in bytes
969 *
970 * This will eventually be a runtime variable, depending on nr_cpu_ids.
971 */
972static inline size_t cpumask_size(void)
973{
974 /* FIXME: Once all cpumask assignments are eliminated, this
975 * can be nr_cpumask_bits */
976 return BITS_TO_LONGS(NR_CPUS) * sizeof(long);
977}
978
979/*
980 * cpumask_var_t: struct cpumask for stack usage.
981 *
982 * Oh, the wicked games we play! In order to make kernel coding a
983 * little more difficult, we typedef cpumask_var_t to an array or a
984 * pointer: doing &mask on an array is a noop, so it still works.
985 *
986 * ie.
987 * cpumask_var_t tmpmask;
988 * if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
989 * return -ENOMEM;
990 *
991 * ... use 'tmpmask' like a normal struct cpumask * ...
992 *
993 * free_cpumask_var(tmpmask);
994 */
995#ifdef CONFIG_CPUMASK_OFFSTACK
996typedef struct cpumask *cpumask_var_t;
997
998bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
999void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
1000void free_cpumask_var(cpumask_var_t mask);
1001void free_bootmem_cpumask_var(cpumask_var_t mask);
1002
1003#else
1004typedef struct cpumask cpumask_var_t[1];
1005
1006static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
1007{
1008 return true;
1009}
1010
1011static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
1012{
1013}
1014
1015static inline void free_cpumask_var(cpumask_var_t mask)
1016{
1017}
1018
1019static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
1020{
1021}
1022#endif /* CONFIG_CPUMASK_OFFSTACK */
1023
1024/* The pointer versions of the maps, these will become the primary versions. */
1025#define cpu_possible_mask ((const struct cpumask *)&cpu_possible_map)
1026#define cpu_online_mask ((const struct cpumask *)&cpu_online_map)
1027#define cpu_present_mask ((const struct cpumask *)&cpu_present_map)
1028#define cpu_active_mask ((const struct cpumask *)&cpu_active_map)
1029
1030/* It's common to want to use cpu_all_mask in struct member initializers,
1031 * so it has to refer to an address rather than a pointer. */
1032extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
1033#define cpu_all_mask to_cpumask(cpu_all_bits)
1034
1035/* First bits of cpu_bit_bitmap are in fact unset. */
1036#define cpu_none_mask to_cpumask(cpu_bit_bitmap[0])
1037
1038/* Wrappers for arch boot code to manipulate normally-constant masks */
1039static inline void set_cpu_possible(unsigned int cpu, bool possible)
1040{
1041 if (possible)
1042 cpumask_set_cpu(cpu, &cpu_possible_map);
1043 else
1044 cpumask_clear_cpu(cpu, &cpu_possible_map);
1045}
1046
1047static inline void set_cpu_present(unsigned int cpu, bool present)
1048{
1049 if (present)
1050 cpumask_set_cpu(cpu, &cpu_present_map);
1051 else
1052 cpumask_clear_cpu(cpu, &cpu_present_map);
1053}
1054
1055static inline void set_cpu_online(unsigned int cpu, bool online)
1056{
1057 if (online)
1058 cpumask_set_cpu(cpu, &cpu_online_map);
1059 else
1060 cpumask_clear_cpu(cpu, &cpu_online_map);
1061}
1062
1063static inline void set_cpu_active(unsigned int cpu, bool active)
1064{
1065 if (active)
1066 cpumask_set_cpu(cpu, &cpu_active_map);
1067 else
1068 cpumask_clear_cpu(cpu, &cpu_active_map);
1069}
1070
1071static inline void init_cpu_present(const struct cpumask *src)
1072{
1073 cpumask_copy(&cpu_present_map, src);
1074}
1075
1076static inline void init_cpu_possible(const struct cpumask *src)
1077{
1078 cpumask_copy(&cpu_possible_map, src);
1079}
1080
1081static inline void init_cpu_online(const struct cpumask *src)
1082{
1083 cpumask_copy(&cpu_online_map, src);
1084}
530#endif /* __LINUX_CPUMASK_H */ 1085#endif /* __LINUX_CPUMASK_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 2691926fb506..8e540d32c9fe 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -74,8 +74,6 @@ static inline int cpuset_do_slab_mem_spread(void)
74 return current->flags & PF_SPREAD_SLAB; 74 return current->flags & PF_SPREAD_SLAB;
75} 75}
76 76
77extern void cpuset_track_online_nodes(void);
78
79extern int current_cpuset_is_being_rebound(void); 77extern int current_cpuset_is_being_rebound(void);
80 78
81extern void rebuild_sched_domains(void); 79extern void rebuild_sched_domains(void);
@@ -151,8 +149,6 @@ static inline int cpuset_do_slab_mem_spread(void)
151 return 0; 149 return 0;
152} 150}
153 151
154static inline void cpuset_track_online_nodes(void) {}
155
156static inline int current_cpuset_is_being_rebound(void) 152static inline int current_cpuset_is_being_rebound(void)
157{ 153{
158 return 0; 154 return 0;
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 025e4f575103..2dac064d8359 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -8,17 +8,12 @@
8#include <linux/proc_fs.h> 8#include <linux/proc_fs.h>
9 9
10#define ELFCORE_ADDR_MAX (-1ULL) 10#define ELFCORE_ADDR_MAX (-1ULL)
11#define ELFCORE_ADDR_ERR (-2ULL)
11 12
12#ifdef CONFIG_PROC_VMCORE
13extern unsigned long long elfcorehdr_addr; 13extern unsigned long long elfcorehdr_addr;
14#else
15static const unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX;
16#endif
17 14
18extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, 15extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
19 unsigned long, int); 16 unsigned long, int);
20extern const struct file_operations proc_vmcore_operations;
21extern struct proc_dir_entry *proc_vmcore;
22 17
23/* Architecture code defines this if there are other possible ELF 18/* Architecture code defines this if there are other possible ELF
24 * machine types, e.g. on bi-arch capable hardware. */ 19 * machine types, e.g. on bi-arch capable hardware. */
@@ -28,10 +23,43 @@ extern struct proc_dir_entry *proc_vmcore;
28 23
29#define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x)) 24#define vmcore_elf_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
30 25
26/*
27 * is_kdump_kernel() checks whether this kernel is booting after a panic of
28 * previous kernel or not. This is determined by checking if previous kernel
29 * has passed the elf core header address on command line.
30 *
31 * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will
32 * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of
33 * previous kernel.
34 */
35
31static inline int is_kdump_kernel(void) 36static inline int is_kdump_kernel(void)
32{ 37{
33 return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0; 38 return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0;
34} 39}
40
41/* is_vmcore_usable() checks if the kernel is booting after a panic and
42 * the vmcore region is usable.
43 *
44 * This makes use of the fact that due to alignment -2ULL is not
45 * a valid pointer, much in the vain of IS_ERR(), except
46 * dealing directly with an unsigned long long rather than a pointer.
47 */
48
49static inline int is_vmcore_usable(void)
50{
51 return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0;
52}
53
54/* vmcore_unusable() marks the vmcore as unusable,
55 * without disturbing the logic of is_kdump_kernel()
56 */
57
58static inline void vmcore_unusable(void)
59{
60 if (is_kdump_kernel())
61 elfcorehdr_addr = ELFCORE_ADDR_ERR;
62}
35#else /* !CONFIG_CRASH_DUMP */ 63#else /* !CONFIG_CRASH_DUMP */
36static inline int is_kdump_kernel(void) { return 0; } 64static inline int is_kdump_kernel(void) { return 0; }
37#endif /* CONFIG_CRASH_DUMP */ 65#endif /* CONFIG_CRASH_DUMP */
diff --git a/include/linux/crc32c.h b/include/linux/crc32c.h
index 508f512e5a2f..bd8b44d96bdc 100644
--- a/include/linux/crc32c.h
+++ b/include/linux/crc32c.h
@@ -3,9 +3,9 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6extern u32 crc32c_le(u32 crc, unsigned char const *address, size_t length); 6extern u32 crc32c(u32 crc, const void *address, unsigned int length);
7extern u32 crc32c_be(u32 crc, unsigned char const *address, size_t length);
8 7
9#define crc32c(seed, data, length) crc32c_le(seed, (unsigned char const *)data, length) 8/* This macro exists for backwards-compatibility. */
9#define crc32c_le crc32c
10 10
11#endif /* _LINUX_CRC32C_H */ 11#endif /* _LINUX_CRC32C_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index b69222cc1fd2..3282ee4318e7 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -1,4 +1,4 @@
1/* Credentials management 1/* Credentials management - see Documentation/credentials.txt
2 * 2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
@@ -12,39 +12,335 @@
12#ifndef _LINUX_CRED_H 12#ifndef _LINUX_CRED_H
13#define _LINUX_CRED_H 13#define _LINUX_CRED_H
14 14
15#define get_current_user() (get_uid(current->user)) 15#include <linux/capability.h>
16#include <linux/key.h>
17#include <asm/atomic.h>
16 18
17#define task_uid(task) ((task)->uid) 19struct user_struct;
18#define task_gid(task) ((task)->gid) 20struct cred;
19#define task_euid(task) ((task)->euid) 21struct inode;
20#define task_egid(task) ((task)->egid)
21 22
22#define current_uid() (current->uid) 23/*
23#define current_gid() (current->gid) 24 * COW Supplementary groups list
24#define current_euid() (current->euid) 25 */
25#define current_egid() (current->egid) 26#define NGROUPS_SMALL 32
26#define current_suid() (current->suid) 27#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
27#define current_sgid() (current->sgid) 28
28#define current_fsuid() (current->fsuid) 29struct group_info {
29#define current_fsgid() (current->fsgid) 30 atomic_t usage;
30#define current_cap() (current->cap_effective) 31 int ngroups;
32 int nblocks;
33 gid_t small_block[NGROUPS_SMALL];
34 gid_t *blocks[0];
35};
36
37/**
38 * get_group_info - Get a reference to a group info structure
39 * @group_info: The group info to reference
40 *
41 * This gets a reference to a set of supplementary groups.
42 *
43 * If the caller is accessing a task's credentials, they must hold the RCU read
44 * lock when reading.
45 */
46static inline struct group_info *get_group_info(struct group_info *gi)
47{
48 atomic_inc(&gi->usage);
49 return gi;
50}
51
52/**
53 * put_group_info - Release a reference to a group info structure
54 * @group_info: The group info to release
55 */
56#define put_group_info(group_info) \
57do { \
58 if (atomic_dec_and_test(&(group_info)->usage)) \
59 groups_free(group_info); \
60} while (0)
61
62extern struct group_info *groups_alloc(int);
63extern struct group_info init_groups;
64extern void groups_free(struct group_info *);
65extern int set_current_groups(struct group_info *);
66extern int set_groups(struct cred *, struct group_info *);
67extern int groups_search(const struct group_info *, gid_t);
68
69/* access the groups "array" with this macro */
70#define GROUP_AT(gi, i) \
71 ((gi)->blocks[(i) / NGROUPS_PER_BLOCK][(i) % NGROUPS_PER_BLOCK])
72
73extern int in_group_p(gid_t);
74extern int in_egroup_p(gid_t);
75
76/*
77 * The common credentials for a thread group
78 * - shared by CLONE_THREAD
79 */
80#ifdef CONFIG_KEYS
81struct thread_group_cred {
82 atomic_t usage;
83 pid_t tgid; /* thread group process ID */
84 spinlock_t lock;
85 struct key *session_keyring; /* keyring inherited over fork */
86 struct key *process_keyring; /* keyring private to this process */
87 struct rcu_head rcu; /* RCU deletion hook */
88};
89#endif
90
91/*
92 * The security context of a task
93 *
94 * The parts of the context break down into two categories:
95 *
96 * (1) The objective context of a task. These parts are used when some other
97 * task is attempting to affect this one.
98 *
99 * (2) The subjective context. These details are used when the task is acting
100 * upon another object, be that a file, a task, a key or whatever.
101 *
102 * Note that some members of this structure belong to both categories - the
103 * LSM security pointer for instance.
104 *
105 * A task has two security pointers. task->real_cred points to the objective
106 * context that defines that task's actual details. The objective part of this
107 * context is used whenever that task is acted upon.
108 *
109 * task->cred points to the subjective context that defines the details of how
110 * that task is going to act upon another object. This may be overridden
111 * temporarily to point to another security context, but normally points to the
112 * same context as task->real_cred.
113 */
114struct cred {
115 atomic_t usage;
116 uid_t uid; /* real UID of the task */
117 gid_t gid; /* real GID of the task */
118 uid_t suid; /* saved UID of the task */
119 gid_t sgid; /* saved GID of the task */
120 uid_t euid; /* effective UID of the task */
121 gid_t egid; /* effective GID of the task */
122 uid_t fsuid; /* UID for VFS ops */
123 gid_t fsgid; /* GID for VFS ops */
124 unsigned securebits; /* SUID-less security management */
125 kernel_cap_t cap_inheritable; /* caps our children can inherit */
126 kernel_cap_t cap_permitted; /* caps we're permitted */
127 kernel_cap_t cap_effective; /* caps we can actually use */
128 kernel_cap_t cap_bset; /* capability bounding set */
129#ifdef CONFIG_KEYS
130 unsigned char jit_keyring; /* default keyring to attach requested
131 * keys to */
132 struct key *thread_keyring; /* keyring private to this thread */
133 struct key *request_key_auth; /* assumed request_key authority */
134 struct thread_group_cred *tgcred; /* thread-group shared credentials */
135#endif
136#ifdef CONFIG_SECURITY
137 void *security; /* subjective LSM security */
138#endif
139 struct user_struct *user; /* real user ID subscription */
140 struct group_info *group_info; /* supplementary groups for euid/fsgid */
141 struct rcu_head rcu; /* RCU deletion hook */
142};
143
144extern void __put_cred(struct cred *);
145extern int copy_creds(struct task_struct *, unsigned long);
146extern struct cred *prepare_creds(void);
147extern struct cred *prepare_exec_creds(void);
148extern struct cred *prepare_usermodehelper_creds(void);
149extern int commit_creds(struct cred *);
150extern void abort_creds(struct cred *);
151extern const struct cred *override_creds(const struct cred *);
152extern void revert_creds(const struct cred *);
153extern struct cred *prepare_kernel_cred(struct task_struct *);
154extern int change_create_files_as(struct cred *, struct inode *);
155extern int set_security_override(struct cred *, u32);
156extern int set_security_override_from_ctx(struct cred *, const char *);
157extern int set_create_files_as(struct cred *, struct inode *);
158extern void __init cred_init(void);
159
160/**
161 * get_new_cred - Get a reference on a new set of credentials
162 * @cred: The new credentials to reference
163 *
164 * Get a reference on the specified set of new credentials. The caller must
165 * release the reference.
166 */
167static inline struct cred *get_new_cred(struct cred *cred)
168{
169 atomic_inc(&cred->usage);
170 return cred;
171}
172
173/**
174 * get_cred - Get a reference on a set of credentials
175 * @cred: The credentials to reference
176 *
177 * Get a reference on the specified set of credentials. The caller must
178 * release the reference.
179 *
180 * This is used to deal with a committed set of credentials. Although the
181 * pointer is const, this will temporarily discard the const and increment the
182 * usage count. The purpose of this is to attempt to catch at compile time the
183 * accidental alteration of a set of credentials that should be considered
184 * immutable.
185 */
186static inline const struct cred *get_cred(const struct cred *cred)
187{
188 return get_new_cred((struct cred *) cred);
189}
190
191/**
192 * put_cred - Release a reference to a set of credentials
193 * @cred: The credentials to release
194 *
195 * Release a reference to a set of credentials, deleting them when the last ref
196 * is released.
197 *
198 * This takes a const pointer to a set of credentials because the credentials
199 * on task_struct are attached by const pointers to prevent accidental
200 * alteration of otherwise immutable credential sets.
201 */
202static inline void put_cred(const struct cred *_cred)
203{
204 struct cred *cred = (struct cred *) _cred;
205
206 BUG_ON(atomic_read(&(cred)->usage) <= 0);
207 if (atomic_dec_and_test(&(cred)->usage))
208 __put_cred(cred);
209}
210
211/**
212 * current_cred - Access the current task's subjective credentials
213 *
214 * Access the subjective credentials of the current task.
215 */
216#define current_cred() \
217 (current->cred)
218
219/**
220 * __task_cred - Access a task's objective credentials
221 * @task: The task to query
222 *
223 * Access the objective credentials of a task. The caller must hold the RCU
224 * readlock.
225 *
226 * The caller must make sure task doesn't go away, either by holding a ref on
227 * task or by holding tasklist_lock to prevent it from being unlinked.
228 */
229#define __task_cred(task) \
230 ((const struct cred *)(rcu_dereference((task)->real_cred)))
231
232/**
233 * get_task_cred - Get another task's objective credentials
234 * @task: The task to query
235 *
236 * Get the objective credentials of a task, pinning them so that they can't go
237 * away. Accessing a task's credentials directly is not permitted.
238 *
239 * The caller must make sure task doesn't go away, either by holding a ref on
240 * task or by holding tasklist_lock to prevent it from being unlinked.
241 */
242#define get_task_cred(task) \
243({ \
244 struct cred *__cred; \
245 rcu_read_lock(); \
246 __cred = (struct cred *) __task_cred((task)); \
247 get_cred(__cred); \
248 rcu_read_unlock(); \
249 __cred; \
250})
251
252/**
253 * get_current_cred - Get the current task's subjective credentials
254 *
255 * Get the subjective credentials of the current task, pinning them so that
256 * they can't go away. Accessing the current task's credentials directly is
257 * not permitted.
258 */
259#define get_current_cred() \
260 (get_cred(current_cred()))
261
262/**
263 * get_current_user - Get the current task's user_struct
264 *
265 * Get the user record of the current task, pinning it so that it can't go
266 * away.
267 */
268#define get_current_user() \
269({ \
270 struct user_struct *__u; \
271 struct cred *__cred; \
272 __cred = (struct cred *) current_cred(); \
273 __u = get_uid(__cred->user); \
274 __u; \
275})
276
277/**
278 * get_current_groups - Get the current task's supplementary group list
279 *
280 * Get the supplementary group list of the current task, pinning it so that it
281 * can't go away.
282 */
283#define get_current_groups() \
284({ \
285 struct group_info *__groups; \
286 struct cred *__cred; \
287 __cred = (struct cred *) current_cred(); \
288 __groups = get_group_info(__cred->group_info); \
289 __groups; \
290})
291
292#define task_cred_xxx(task, xxx) \
293({ \
294 __typeof__(((struct cred *)NULL)->xxx) ___val; \
295 rcu_read_lock(); \
296 ___val = __task_cred((task))->xxx; \
297 rcu_read_unlock(); \
298 ___val; \
299})
300
301#define task_uid(task) (task_cred_xxx((task), uid))
302#define task_euid(task) (task_cred_xxx((task), euid))
303
304#define current_cred_xxx(xxx) \
305({ \
306 current->cred->xxx; \
307})
308
309#define current_uid() (current_cred_xxx(uid))
310#define current_gid() (current_cred_xxx(gid))
311#define current_euid() (current_cred_xxx(euid))
312#define current_egid() (current_cred_xxx(egid))
313#define current_suid() (current_cred_xxx(suid))
314#define current_sgid() (current_cred_xxx(sgid))
315#define current_fsuid() (current_cred_xxx(fsuid))
316#define current_fsgid() (current_cred_xxx(fsgid))
317#define current_cap() (current_cred_xxx(cap_effective))
318#define current_user() (current_cred_xxx(user))
319#define current_user_ns() (current_cred_xxx(user)->user_ns)
320#define current_security() (current_cred_xxx(security))
31 321
32#define current_uid_gid(_uid, _gid) \ 322#define current_uid_gid(_uid, _gid) \
33do { \ 323do { \
34 *(_uid) = current->uid; \ 324 const struct cred *__cred; \
35 *(_gid) = current->gid; \ 325 __cred = current_cred(); \
326 *(_uid) = __cred->uid; \
327 *(_gid) = __cred->gid; \
36} while(0) 328} while(0)
37 329
38#define current_euid_egid(_uid, _gid) \ 330#define current_euid_egid(_euid, _egid) \
39do { \ 331do { \
40 *(_uid) = current->euid; \ 332 const struct cred *__cred; \
41 *(_gid) = current->egid; \ 333 __cred = current_cred(); \
334 *(_euid) = __cred->euid; \
335 *(_egid) = __cred->egid; \
42} while(0) 336} while(0)
43 337
44#define current_fsuid_fsgid(_uid, _gid) \ 338#define current_fsuid_fsgid(_fsuid, _fsgid) \
45do { \ 339do { \
46 *(_uid) = current->fsuid; \ 340 const struct cred *__cred; \
47 *(_gid) = current->fsgid; \ 341 __cred = current_cred(); \
342 *(_fsuid) = __cred->fsuid; \
343 *(_fsgid) = __cred->fsgid; \
48} while(0) 344} while(0)
49 345
50#endif /* _LINUX_CRED_H */ 346#endif /* _LINUX_CRED_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 3d2317e4af2e..3bacd71509fb 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -36,7 +36,8 @@
36#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 36#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
37#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 37#define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006
38#define CRYPTO_ALG_TYPE_DIGEST 0x00000008 38#define CRYPTO_ALG_TYPE_DIGEST 0x00000008
39#define CRYPTO_ALG_TYPE_HASH 0x00000009 39#define CRYPTO_ALG_TYPE_HASH 0x00000008
40#define CRYPTO_ALG_TYPE_SHASH 0x00000009
40#define CRYPTO_ALG_TYPE_AHASH 0x0000000a 41#define CRYPTO_ALG_TYPE_AHASH 0x0000000a
41#define CRYPTO_ALG_TYPE_RNG 0x0000000c 42#define CRYPTO_ALG_TYPE_RNG 0x0000000c
42 43
@@ -220,6 +221,7 @@ struct ablkcipher_alg {
220 221
221struct ahash_alg { 222struct ahash_alg {
222 int (*init)(struct ahash_request *req); 223 int (*init)(struct ahash_request *req);
224 int (*reinit)(struct ahash_request *req);
223 int (*update)(struct ahash_request *req); 225 int (*update)(struct ahash_request *req);
224 int (*final)(struct ahash_request *req); 226 int (*final)(struct ahash_request *req);
225 int (*digest)(struct ahash_request *req); 227 int (*digest)(struct ahash_request *req);
@@ -480,6 +482,8 @@ struct crypto_tfm {
480 struct compress_tfm compress; 482 struct compress_tfm compress;
481 struct rng_tfm rng; 483 struct rng_tfm rng;
482 } crt_u; 484 } crt_u;
485
486 void (*exit)(struct crypto_tfm *tfm);
483 487
484 struct crypto_alg *__crt_alg; 488 struct crypto_alg *__crt_alg;
485 489
@@ -544,7 +548,9 @@ struct crypto_attr_u32 {
544 * Transform user interface. 548 * Transform user interface.
545 */ 549 */
546 550
547struct crypto_tfm *crypto_alloc_tfm(const char *alg_name, u32 tfm_flags); 551struct crypto_tfm *crypto_alloc_tfm(const char *alg_name,
552 const struct crypto_type *frontend,
553 u32 type, u32 mask);
548struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); 554struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
549void crypto_free_tfm(struct crypto_tfm *tfm); 555void crypto_free_tfm(struct crypto_tfm *tfm);
550 556
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index efba1de629ac..a37359d0bad1 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -228,9 +228,9 @@ extern void d_delete(struct dentry *);
228 228
229/* allocate/de-allocate */ 229/* allocate/de-allocate */
230extern struct dentry * d_alloc(struct dentry *, const struct qstr *); 230extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
231extern struct dentry * d_alloc_anon(struct inode *);
232extern struct dentry * d_splice_alias(struct inode *, struct dentry *); 231extern struct dentry * d_splice_alias(struct inode *, struct dentry *);
233extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); 232extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *);
233extern struct dentry * d_obtain_alias(struct inode *);
234extern void shrink_dcache_sb(struct super_block *); 234extern void shrink_dcache_sb(struct super_block *);
235extern void shrink_dcache_parent(struct dentry *); 235extern void shrink_dcache_parent(struct dentry *);
236extern void shrink_dcache_for_umount(struct super_block *); 236extern void shrink_dcache_for_umount(struct super_block *);
@@ -287,6 +287,7 @@ static inline struct dentry *d_add_unique(struct dentry *entry, struct inode *in
287 287
288/* used for rename() and baskets */ 288/* used for rename() and baskets */
289extern void d_move(struct dentry *, struct dentry *); 289extern void d_move(struct dentry *, struct dentry *);
290extern struct dentry *d_ancestor(struct dentry *, struct dentry *);
290 291
291/* appendix may either be NULL or be used for transname suffixes */ 292/* appendix may either be NULL or be used for transname suffixes */
292extern struct dentry * d_lookup(struct dentry *, struct qstr *); 293extern struct dentry * d_lookup(struct dentry *, struct qstr *);
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
new file mode 100644
index 000000000000..b0ef274e0031
--- /dev/null
+++ b/include/linux/dcbnl.h
@@ -0,0 +1,340 @@
1/*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Author: Lucy Liu <lucy.liu@intel.com>
18 */
19
20#ifndef __LINUX_DCBNL_H__
21#define __LINUX_DCBNL_H__
22
23#define DCB_PROTO_VERSION 1
24
25struct dcbmsg {
26 unsigned char dcb_family;
27 __u8 cmd;
28 __u16 dcb_pad;
29};
30
31/**
32 * enum dcbnl_commands - supported DCB commands
33 *
34 * @DCB_CMD_UNDEFINED: unspecified command to catch errors
35 * @DCB_CMD_GSTATE: request the state of DCB in the device
36 * @DCB_CMD_SSTATE: set the state of DCB in the device
37 * @DCB_CMD_PGTX_GCFG: request the priority group configuration for Tx
38 * @DCB_CMD_PGTX_SCFG: set the priority group configuration for Tx
39 * @DCB_CMD_PGRX_GCFG: request the priority group configuration for Rx
40 * @DCB_CMD_PGRX_SCFG: set the priority group configuration for Rx
41 * @DCB_CMD_PFC_GCFG: request the priority flow control configuration
42 * @DCB_CMD_PFC_SCFG: set the priority flow control configuration
43 * @DCB_CMD_SET_ALL: apply all changes to the underlying device
44 * @DCB_CMD_GPERM_HWADDR: get the permanent MAC address of the underlying
45 * device. Only useful when using bonding.
46 * @DCB_CMD_GCAP: request the DCB capabilities of the device
47 * @DCB_CMD_GNUMTCS: get the number of traffic classes currently supported
48 * @DCB_CMD_SNUMTCS: set the number of traffic classes
49 * @DCB_CMD_GBCN: set backward congestion notification configuration
50 * @DCB_CMD_SBCN: get backward congestion notification configration.
51 */
52enum dcbnl_commands {
53 DCB_CMD_UNDEFINED,
54
55 DCB_CMD_GSTATE,
56 DCB_CMD_SSTATE,
57
58 DCB_CMD_PGTX_GCFG,
59 DCB_CMD_PGTX_SCFG,
60 DCB_CMD_PGRX_GCFG,
61 DCB_CMD_PGRX_SCFG,
62
63 DCB_CMD_PFC_GCFG,
64 DCB_CMD_PFC_SCFG,
65
66 DCB_CMD_SET_ALL,
67
68 DCB_CMD_GPERM_HWADDR,
69
70 DCB_CMD_GCAP,
71
72 DCB_CMD_GNUMTCS,
73 DCB_CMD_SNUMTCS,
74
75 DCB_CMD_PFC_GSTATE,
76 DCB_CMD_PFC_SSTATE,
77
78 DCB_CMD_BCN_GCFG,
79 DCB_CMD_BCN_SCFG,
80
81 __DCB_CMD_ENUM_MAX,
82 DCB_CMD_MAX = __DCB_CMD_ENUM_MAX - 1,
83};
84
85/**
86 * enum dcbnl_attrs - DCB top-level netlink attributes
87 *
88 * @DCB_ATTR_UNDEFINED: unspecified attribute to catch errors
89 * @DCB_ATTR_IFNAME: interface name of the underlying device (NLA_STRING)
90 * @DCB_ATTR_STATE: enable state of DCB in the device (NLA_U8)
91 * @DCB_ATTR_PFC_STATE: enable state of PFC in the device (NLA_U8)
92 * @DCB_ATTR_PFC_CFG: priority flow control configuration (NLA_NESTED)
93 * @DCB_ATTR_NUM_TC: number of traffic classes supported in the device (NLA_U8)
94 * @DCB_ATTR_PG_CFG: priority group configuration (NLA_NESTED)
95 * @DCB_ATTR_SET_ALL: bool to commit changes to hardware or not (NLA_U8)
96 * @DCB_ATTR_PERM_HWADDR: MAC address of the physical device (NLA_NESTED)
97 * @DCB_ATTR_CAP: DCB capabilities of the device (NLA_NESTED)
98 * @DCB_ATTR_NUMTCS: number of traffic classes supported (NLA_NESTED)
99 * @DCB_ATTR_BCN: backward congestion notification configuration (NLA_NESTED)
100 */
101enum dcbnl_attrs {
102 DCB_ATTR_UNDEFINED,
103
104 DCB_ATTR_IFNAME,
105 DCB_ATTR_STATE,
106 DCB_ATTR_PFC_STATE,
107 DCB_ATTR_PFC_CFG,
108 DCB_ATTR_NUM_TC,
109 DCB_ATTR_PG_CFG,
110 DCB_ATTR_SET_ALL,
111 DCB_ATTR_PERM_HWADDR,
112 DCB_ATTR_CAP,
113 DCB_ATTR_NUMTCS,
114 DCB_ATTR_BCN,
115
116 __DCB_ATTR_ENUM_MAX,
117 DCB_ATTR_MAX = __DCB_ATTR_ENUM_MAX - 1,
118};
119
120/**
121 * enum dcbnl_pfc_attrs - DCB Priority Flow Control user priority nested attrs
122 *
123 * @DCB_PFC_UP_ATTR_UNDEFINED: unspecified attribute to catch errors
124 * @DCB_PFC_UP_ATTR_0: Priority Flow Control value for User Priority 0 (NLA_U8)
125 * @DCB_PFC_UP_ATTR_1: Priority Flow Control value for User Priority 1 (NLA_U8)
126 * @DCB_PFC_UP_ATTR_2: Priority Flow Control value for User Priority 2 (NLA_U8)
127 * @DCB_PFC_UP_ATTR_3: Priority Flow Control value for User Priority 3 (NLA_U8)
128 * @DCB_PFC_UP_ATTR_4: Priority Flow Control value for User Priority 4 (NLA_U8)
129 * @DCB_PFC_UP_ATTR_5: Priority Flow Control value for User Priority 5 (NLA_U8)
130 * @DCB_PFC_UP_ATTR_6: Priority Flow Control value for User Priority 6 (NLA_U8)
131 * @DCB_PFC_UP_ATTR_7: Priority Flow Control value for User Priority 7 (NLA_U8)
132 * @DCB_PFC_UP_ATTR_MAX: highest attribute number currently defined
133 * @DCB_PFC_UP_ATTR_ALL: apply to all priority flow control attrs (NLA_FLAG)
134 *
135 */
136enum dcbnl_pfc_up_attrs {
137 DCB_PFC_UP_ATTR_UNDEFINED,
138
139 DCB_PFC_UP_ATTR_0,
140 DCB_PFC_UP_ATTR_1,
141 DCB_PFC_UP_ATTR_2,
142 DCB_PFC_UP_ATTR_3,
143 DCB_PFC_UP_ATTR_4,
144 DCB_PFC_UP_ATTR_5,
145 DCB_PFC_UP_ATTR_6,
146 DCB_PFC_UP_ATTR_7,
147 DCB_PFC_UP_ATTR_ALL,
148
149 __DCB_PFC_UP_ATTR_ENUM_MAX,
150 DCB_PFC_UP_ATTR_MAX = __DCB_PFC_UP_ATTR_ENUM_MAX - 1,
151};
152
153/**
154 * enum dcbnl_pg_attrs - DCB Priority Group attributes
155 *
156 * @DCB_PG_ATTR_UNDEFINED: unspecified attribute to catch errors
157 * @DCB_PG_ATTR_TC_0: Priority Group Traffic Class 0 configuration (NLA_NESTED)
158 * @DCB_PG_ATTR_TC_1: Priority Group Traffic Class 1 configuration (NLA_NESTED)
159 * @DCB_PG_ATTR_TC_2: Priority Group Traffic Class 2 configuration (NLA_NESTED)
160 * @DCB_PG_ATTR_TC_3: Priority Group Traffic Class 3 configuration (NLA_NESTED)
161 * @DCB_PG_ATTR_TC_4: Priority Group Traffic Class 4 configuration (NLA_NESTED)
162 * @DCB_PG_ATTR_TC_5: Priority Group Traffic Class 5 configuration (NLA_NESTED)
163 * @DCB_PG_ATTR_TC_6: Priority Group Traffic Class 6 configuration (NLA_NESTED)
164 * @DCB_PG_ATTR_TC_7: Priority Group Traffic Class 7 configuration (NLA_NESTED)
165 * @DCB_PG_ATTR_TC_MAX: highest attribute number currently defined
166 * @DCB_PG_ATTR_TC_ALL: apply to all traffic classes (NLA_NESTED)
167 * @DCB_PG_ATTR_BW_ID_0: Percent of link bandwidth for Priority Group 0 (NLA_U8)
168 * @DCB_PG_ATTR_BW_ID_1: Percent of link bandwidth for Priority Group 1 (NLA_U8)
169 * @DCB_PG_ATTR_BW_ID_2: Percent of link bandwidth for Priority Group 2 (NLA_U8)
170 * @DCB_PG_ATTR_BW_ID_3: Percent of link bandwidth for Priority Group 3 (NLA_U8)
171 * @DCB_PG_ATTR_BW_ID_4: Percent of link bandwidth for Priority Group 4 (NLA_U8)
172 * @DCB_PG_ATTR_BW_ID_5: Percent of link bandwidth for Priority Group 5 (NLA_U8)
173 * @DCB_PG_ATTR_BW_ID_6: Percent of link bandwidth for Priority Group 6 (NLA_U8)
174 * @DCB_PG_ATTR_BW_ID_7: Percent of link bandwidth for Priority Group 7 (NLA_U8)
175 * @DCB_PG_ATTR_BW_ID_MAX: highest attribute number currently defined
176 * @DCB_PG_ATTR_BW_ID_ALL: apply to all priority groups (NLA_FLAG)
177 *
178 */
179enum dcbnl_pg_attrs {
180 DCB_PG_ATTR_UNDEFINED,
181
182 DCB_PG_ATTR_TC_0,
183 DCB_PG_ATTR_TC_1,
184 DCB_PG_ATTR_TC_2,
185 DCB_PG_ATTR_TC_3,
186 DCB_PG_ATTR_TC_4,
187 DCB_PG_ATTR_TC_5,
188 DCB_PG_ATTR_TC_6,
189 DCB_PG_ATTR_TC_7,
190 DCB_PG_ATTR_TC_MAX,
191 DCB_PG_ATTR_TC_ALL,
192
193 DCB_PG_ATTR_BW_ID_0,
194 DCB_PG_ATTR_BW_ID_1,
195 DCB_PG_ATTR_BW_ID_2,
196 DCB_PG_ATTR_BW_ID_3,
197 DCB_PG_ATTR_BW_ID_4,
198 DCB_PG_ATTR_BW_ID_5,
199 DCB_PG_ATTR_BW_ID_6,
200 DCB_PG_ATTR_BW_ID_7,
201 DCB_PG_ATTR_BW_ID_MAX,
202 DCB_PG_ATTR_BW_ID_ALL,
203
204 __DCB_PG_ATTR_ENUM_MAX,
205 DCB_PG_ATTR_MAX = __DCB_PG_ATTR_ENUM_MAX - 1,
206};
207
208/**
209 * enum dcbnl_tc_attrs - DCB Traffic Class attributes
210 *
211 * @DCB_TC_ATTR_PARAM_UNDEFINED: unspecified attribute to catch errors
212 * @DCB_TC_ATTR_PARAM_PGID: (NLA_U8) Priority group the traffic class belongs to
213 * Valid values are: 0-7
214 * @DCB_TC_ATTR_PARAM_UP_MAPPING: (NLA_U8) Traffic class to user priority map
215 * Some devices may not support changing the
216 * user priority map of a TC.
217 * @DCB_TC_ATTR_PARAM_STRICT_PRIO: (NLA_U8) Strict priority setting
218 * 0 - none
219 * 1 - group strict
220 * 2 - link strict
221 * @DCB_TC_ATTR_PARAM_BW_PCT: optional - (NLA_U8) If supported by the device and
222 * not configured to use link strict priority,
223 * this is the percentage of bandwidth of the
224 * priority group this traffic class belongs to
225 * @DCB_TC_ATTR_PARAM_ALL: (NLA_FLAG) all traffic class parameters
226 *
227 */
228enum dcbnl_tc_attrs {
229 DCB_TC_ATTR_PARAM_UNDEFINED,
230
231 DCB_TC_ATTR_PARAM_PGID,
232 DCB_TC_ATTR_PARAM_UP_MAPPING,
233 DCB_TC_ATTR_PARAM_STRICT_PRIO,
234 DCB_TC_ATTR_PARAM_BW_PCT,
235 DCB_TC_ATTR_PARAM_ALL,
236
237 __DCB_TC_ATTR_PARAM_ENUM_MAX,
238 DCB_TC_ATTR_PARAM_MAX = __DCB_TC_ATTR_PARAM_ENUM_MAX - 1,
239};
240
241/**
242 * enum dcbnl_cap_attrs - DCB Capability attributes
243 *
244 * @DCB_CAP_ATTR_UNDEFINED: unspecified attribute to catch errors
245 * @DCB_CAP_ATTR_ALL: (NLA_FLAG) all capability parameters
246 * @DCB_CAP_ATTR_PG: (NLA_U8) device supports Priority Groups
247 * @DCB_CAP_ATTR_PFC: (NLA_U8) device supports Priority Flow Control
248 * @DCB_CAP_ATTR_UP2TC: (NLA_U8) device supports user priority to
249 * traffic class mapping
250 * @DCB_CAP_ATTR_PG_TCS: (NLA_U8) bitmap where each bit represents a
251 * number of traffic classes the device
252 * can be configured to use for Priority Groups
253 * @DCB_CAP_ATTR_PFC_TCS: (NLA_U8) bitmap where each bit represents a
254 * number of traffic classes the device can be
255 * configured to use for Priority Flow Control
256 * @DCB_CAP_ATTR_GSP: (NLA_U8) device supports group strict priority
257 * @DCB_CAP_ATTR_BCN: (NLA_U8) device supports Backwards Congestion
258 * Notification
259 */
260enum dcbnl_cap_attrs {
261 DCB_CAP_ATTR_UNDEFINED,
262 DCB_CAP_ATTR_ALL,
263 DCB_CAP_ATTR_PG,
264 DCB_CAP_ATTR_PFC,
265 DCB_CAP_ATTR_UP2TC,
266 DCB_CAP_ATTR_PG_TCS,
267 DCB_CAP_ATTR_PFC_TCS,
268 DCB_CAP_ATTR_GSP,
269 DCB_CAP_ATTR_BCN,
270
271 __DCB_CAP_ATTR_ENUM_MAX,
272 DCB_CAP_ATTR_MAX = __DCB_CAP_ATTR_ENUM_MAX - 1,
273};
274
275/**
276 * enum dcbnl_numtcs_attrs - number of traffic classes
277 *
278 * @DCB_NUMTCS_ATTR_UNDEFINED: unspecified attribute to catch errors
279 * @DCB_NUMTCS_ATTR_ALL: (NLA_FLAG) all traffic class attributes
280 * @DCB_NUMTCS_ATTR_PG: (NLA_U8) number of traffic classes used for
281 * priority groups
282 * @DCB_NUMTCS_ATTR_PFC: (NLA_U8) number of traffic classes which can
283 * support priority flow control
284 */
285enum dcbnl_numtcs_attrs {
286 DCB_NUMTCS_ATTR_UNDEFINED,
287 DCB_NUMTCS_ATTR_ALL,
288 DCB_NUMTCS_ATTR_PG,
289 DCB_NUMTCS_ATTR_PFC,
290
291 __DCB_NUMTCS_ATTR_ENUM_MAX,
292 DCB_NUMTCS_ATTR_MAX = __DCB_NUMTCS_ATTR_ENUM_MAX - 1,
293};
294
295enum dcbnl_bcn_attrs{
296 DCB_BCN_ATTR_UNDEFINED = 0,
297
298 DCB_BCN_ATTR_RP_0,
299 DCB_BCN_ATTR_RP_1,
300 DCB_BCN_ATTR_RP_2,
301 DCB_BCN_ATTR_RP_3,
302 DCB_BCN_ATTR_RP_4,
303 DCB_BCN_ATTR_RP_5,
304 DCB_BCN_ATTR_RP_6,
305 DCB_BCN_ATTR_RP_7,
306 DCB_BCN_ATTR_RP_ALL,
307
308 DCB_BCN_ATTR_BCNA_0,
309 DCB_BCN_ATTR_BCNA_1,
310 DCB_BCN_ATTR_ALPHA,
311 DCB_BCN_ATTR_BETA,
312 DCB_BCN_ATTR_GD,
313 DCB_BCN_ATTR_GI,
314 DCB_BCN_ATTR_TMAX,
315 DCB_BCN_ATTR_TD,
316 DCB_BCN_ATTR_RMIN,
317 DCB_BCN_ATTR_W,
318 DCB_BCN_ATTR_RD,
319 DCB_BCN_ATTR_RU,
320 DCB_BCN_ATTR_WRTT,
321 DCB_BCN_ATTR_RI,
322 DCB_BCN_ATTR_C,
323 DCB_BCN_ATTR_ALL,
324
325 __DCB_BCN_ATTR_ENUM_MAX,
326 DCB_BCN_ATTR_MAX = __DCB_BCN_ATTR_ENUM_MAX - 1,
327};
328
329/**
330 * enum dcb_general_attr_values - general DCB attribute values
331 *
332 * @DCB_ATTR_UNDEFINED: value used to indicate an attribute is not supported
333 *
334 */
335enum dcb_general_attr_values {
336 DCB_ATTR_VALUE_UNDEFINED = 0xff
337};
338
339
340#endif /* __LINUX_DCBNL_H__ */
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 6080449fbec9..61734e27abb7 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -168,6 +168,8 @@ enum {
168 DCCPO_MIN_CCID_SPECIFIC = 128, 168 DCCPO_MIN_CCID_SPECIFIC = 128,
169 DCCPO_MAX_CCID_SPECIFIC = 255, 169 DCCPO_MAX_CCID_SPECIFIC = 255,
170}; 170};
171/* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */
172#define DCCP_SINGLE_OPT_MAXLEN 253
171 173
172/* DCCP CCIDS */ 174/* DCCP CCIDS */
173enum { 175enum {
@@ -176,29 +178,23 @@ enum {
176}; 178};
177 179
178/* DCCP features (RFC 4340 section 6.4) */ 180/* DCCP features (RFC 4340 section 6.4) */
179enum { 181enum dccp_feature_numbers {
180 DCCPF_RESERVED = 0, 182 DCCPF_RESERVED = 0,
181 DCCPF_CCID = 1, 183 DCCPF_CCID = 1,
182 DCCPF_SHORT_SEQNOS = 2, /* XXX: not yet implemented */ 184 DCCPF_SHORT_SEQNOS = 2,
183 DCCPF_SEQUENCE_WINDOW = 3, 185 DCCPF_SEQUENCE_WINDOW = 3,
184 DCCPF_ECN_INCAPABLE = 4, /* XXX: not yet implemented */ 186 DCCPF_ECN_INCAPABLE = 4,
185 DCCPF_ACK_RATIO = 5, 187 DCCPF_ACK_RATIO = 5,
186 DCCPF_SEND_ACK_VECTOR = 6, 188 DCCPF_SEND_ACK_VECTOR = 6,
187 DCCPF_SEND_NDP_COUNT = 7, 189 DCCPF_SEND_NDP_COUNT = 7,
188 DCCPF_MIN_CSUM_COVER = 8, 190 DCCPF_MIN_CSUM_COVER = 8,
189 DCCPF_DATA_CHECKSUM = 9, /* XXX: not yet implemented */ 191 DCCPF_DATA_CHECKSUM = 9,
190 /* 10-127 reserved */ 192 /* 10-127 reserved */
191 DCCPF_MIN_CCID_SPECIFIC = 128, 193 DCCPF_MIN_CCID_SPECIFIC = 128,
194 DCCPF_SEND_LEV_RATE = 192, /* RFC 4342, sec. 8.4 */
192 DCCPF_MAX_CCID_SPECIFIC = 255, 195 DCCPF_MAX_CCID_SPECIFIC = 255,
193}; 196};
194 197
195/* this structure is argument to DCCP_SOCKOPT_CHANGE_X */
196struct dccp_so_feat {
197 __u8 dccpsf_feat;
198 __u8 __user *dccpsf_val;
199 __u8 dccpsf_len;
200};
201
202/* DCCP socket options */ 198/* DCCP socket options */
203#define DCCP_SOCKOPT_PACKET_SIZE 1 /* XXX deprecated, without effect */ 199#define DCCP_SOCKOPT_PACKET_SIZE 1 /* XXX deprecated, without effect */
204#define DCCP_SOCKOPT_SERVICE 2 200#define DCCP_SOCKOPT_SERVICE 2
@@ -208,6 +204,10 @@ struct dccp_so_feat {
208#define DCCP_SOCKOPT_SERVER_TIMEWAIT 6 204#define DCCP_SOCKOPT_SERVER_TIMEWAIT 6
209#define DCCP_SOCKOPT_SEND_CSCOV 10 205#define DCCP_SOCKOPT_SEND_CSCOV 10
210#define DCCP_SOCKOPT_RECV_CSCOV 11 206#define DCCP_SOCKOPT_RECV_CSCOV 11
207#define DCCP_SOCKOPT_AVAILABLE_CCIDS 12
208#define DCCP_SOCKOPT_CCID 13
209#define DCCP_SOCKOPT_TX_CCID 14
210#define DCCP_SOCKOPT_RX_CCID 15
211#define DCCP_SOCKOPT_CCID_RX_INFO 128 211#define DCCP_SOCKOPT_CCID_RX_INFO 128
212#define DCCP_SOCKOPT_CCID_TX_INFO 192 212#define DCCP_SOCKOPT_CCID_TX_INFO 192
213 213
@@ -360,7 +360,6 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
360#define DCCPF_INITIAL_SEQUENCE_WINDOW 100 360#define DCCPF_INITIAL_SEQUENCE_WINDOW 100
361#define DCCPF_INITIAL_ACK_RATIO 2 361#define DCCPF_INITIAL_ACK_RATIO 2
362#define DCCPF_INITIAL_CCID DCCPC_CCID2 362#define DCCPF_INITIAL_CCID DCCPC_CCID2
363#define DCCPF_INITIAL_SEND_ACK_VECTOR 1
364/* FIXME: for now we're default to 1 but it should really be 0 */ 363/* FIXME: for now we're default to 1 but it should really be 0 */
365#define DCCPF_INITIAL_SEND_NDP_COUNT 1 364#define DCCPF_INITIAL_SEND_NDP_COUNT 1
366 365
@@ -370,20 +369,11 @@ static inline unsigned int dccp_hdr_len(const struct sk_buff *skb)
370 * Will be used to pass the state from dccp_request_sock to dccp_sock. 369 * Will be used to pass the state from dccp_request_sock to dccp_sock.
371 * 370 *
372 * @dccpms_sequence_window - Sequence Window Feature (section 7.5.2) 371 * @dccpms_sequence_window - Sequence Window Feature (section 7.5.2)
373 * @dccpms_ccid - Congestion Control Id (CCID) (section 10)
374 * @dccpms_send_ack_vector - Send Ack Vector Feature (section 11.5)
375 * @dccpms_send_ndp_count - Send NDP Count Feature (7.7.2)
376 * @dccpms_ack_ratio - Ack Ratio Feature (section 11.3)
377 * @dccpms_pending - List of features being negotiated 372 * @dccpms_pending - List of features being negotiated
378 * @dccpms_conf - 373 * @dccpms_conf -
379 */ 374 */
380struct dccp_minisock { 375struct dccp_minisock {
381 __u64 dccpms_sequence_window; 376 __u64 dccpms_sequence_window;
382 __u8 dccpms_rx_ccid;
383 __u8 dccpms_tx_ccid;
384 __u8 dccpms_send_ack_vector;
385 __u8 dccpms_send_ndp_count;
386 __u8 dccpms_ack_ratio;
387 struct list_head dccpms_pending; 377 struct list_head dccpms_pending;
388 struct list_head dccpms_conf; 378 struct list_head dccpms_conf;
389}; 379};
@@ -411,6 +401,7 @@ extern void dccp_minisock_init(struct dccp_minisock *dmsk);
411 * @dreq_iss: initial sequence number sent on the Response (RFC 4340, 7.1) 401 * @dreq_iss: initial sequence number sent on the Response (RFC 4340, 7.1)
412 * @dreq_isr: initial sequence number received on the Request 402 * @dreq_isr: initial sequence number received on the Request
413 * @dreq_service: service code present on the Request (there is just one) 403 * @dreq_service: service code present on the Request (there is just one)
404 * @dreq_featneg: feature negotiation options for this connection
414 * The following two fields are analogous to the ones in dccp_sock: 405 * The following two fields are analogous to the ones in dccp_sock:
415 * @dreq_timestamp_echo: last received timestamp to echo (13.1) 406 * @dreq_timestamp_echo: last received timestamp to echo (13.1)
416 * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo 407 * @dreq_timestamp_echo: the time of receiving the last @dreq_timestamp_echo
@@ -420,6 +411,7 @@ struct dccp_request_sock {
420 __u64 dreq_iss; 411 __u64 dreq_iss;
421 __u64 dreq_isr; 412 __u64 dreq_isr;
422 __be32 dreq_service; 413 __be32 dreq_service;
414 struct list_head dreq_featneg;
423 __u32 dreq_timestamp_echo; 415 __u32 dreq_timestamp_echo;
424 __u32 dreq_timestamp_time; 416 __u32 dreq_timestamp_time;
425}; 417};
@@ -493,10 +485,12 @@ struct dccp_ackvec;
493 * @dccps_r_ack_ratio - feature-remote Ack Ratio 485 * @dccps_r_ack_ratio - feature-remote Ack Ratio
494 * @dccps_pcslen - sender partial checksum coverage (via sockopt) 486 * @dccps_pcslen - sender partial checksum coverage (via sockopt)
495 * @dccps_pcrlen - receiver partial checksum coverage (via sockopt) 487 * @dccps_pcrlen - receiver partial checksum coverage (via sockopt)
488 * @dccps_send_ndp_count - local Send NDP Count feature (7.7.2)
496 * @dccps_ndp_count - number of Non Data Packets since last data packet 489 * @dccps_ndp_count - number of Non Data Packets since last data packet
497 * @dccps_mss_cache - current value of MSS (path MTU minus header sizes) 490 * @dccps_mss_cache - current value of MSS (path MTU minus header sizes)
498 * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4) 491 * @dccps_rate_last - timestamp for rate-limiting DCCP-Sync (RFC 4340, 7.5.4)
499 * @dccps_minisock - associated minisock (accessed via dccp_msk) 492 * @dccps_minisock - associated minisock (accessed via dccp_msk)
493 * @dccps_featneg - tracks feature-negotiation state (mostly during handshake)
500 * @dccps_hc_rx_ackvec - rx half connection ack vector 494 * @dccps_hc_rx_ackvec - rx half connection ack vector
501 * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection) 495 * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
502 * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection) 496 * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
@@ -529,11 +523,13 @@ struct dccp_sock {
529 __u32 dccps_timestamp_time; 523 __u32 dccps_timestamp_time;
530 __u16 dccps_l_ack_ratio; 524 __u16 dccps_l_ack_ratio;
531 __u16 dccps_r_ack_ratio; 525 __u16 dccps_r_ack_ratio;
532 __u16 dccps_pcslen; 526 __u8 dccps_pcslen:4;
533 __u16 dccps_pcrlen; 527 __u8 dccps_pcrlen:4;
528 __u8 dccps_send_ndp_count:1;
534 __u64 dccps_ndp_count:48; 529 __u64 dccps_ndp_count:48;
535 unsigned long dccps_rate_last; 530 unsigned long dccps_rate_last;
536 struct dccp_minisock dccps_minisock; 531 struct dccp_minisock dccps_minisock;
532 struct list_head dccps_featneg;
537 struct dccp_ackvec *dccps_hc_rx_ackvec; 533 struct dccp_ackvec *dccps_hc_rx_ackvec;
538 struct ccid *dccps_hc_rx_ccid; 534 struct ccid *dccps_hc_rx_ccid;
539 struct ccid *dccps_hc_tx_ccid; 535 struct ccid *dccps_hc_tx_ccid;
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h
index 4aaa4afb1cb9..096476f1fb35 100644
--- a/include/linux/debug_locks.h
+++ b/include/linux/debug_locks.h
@@ -17,7 +17,7 @@ extern int debug_locks_off(void);
17({ \ 17({ \
18 int __ret = 0; \ 18 int __ret = 0; \
19 \ 19 \
20 if (unlikely(c)) { \ 20 if (!oops_in_progress && unlikely(c)) { \
21 if (debug_locks_off() && !debug_locks_silent) \ 21 if (debug_locks_off() && !debug_locks_silent) \
22 WARN_ON(1); \ 22 WARN_ON(1); \
23 __ret = 1; \ 23 __ret = 1; \
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 08d783592b73..c17fd334e574 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -69,8 +69,7 @@ typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
69 69
70typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv); 70typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
71 71
72typedef int (*dm_ioctl_fn) (struct dm_target *ti, struct inode *inode, 72typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
73 struct file *filp, unsigned int cmd,
74 unsigned long arg); 73 unsigned long arg);
75 74
76typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm, 75typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
@@ -85,7 +84,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev);
85 84
86struct dm_dev { 85struct dm_dev {
87 struct block_device *bdev; 86 struct block_device *bdev;
88 int mode; 87 fmode_t mode;
89 char name[16]; 88 char name[16];
90}; 89};
91 90
@@ -95,7 +94,7 @@ struct dm_dev {
95 * FIXME: too many arguments. 94 * FIXME: too many arguments.
96 */ 95 */
97int dm_get_device(struct dm_target *ti, const char *path, sector_t start, 96int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
98 sector_t len, int mode, struct dm_dev **result); 97 sector_t len, fmode_t mode, struct dm_dev **result);
99void dm_put_device(struct dm_target *ti, struct dm_dev *d); 98void dm_put_device(struct dm_target *ti, struct dm_dev *d);
100 99
101/* 100/*
@@ -223,7 +222,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
223/* 222/*
224 * First create an empty table. 223 * First create an empty table.
225 */ 224 */
226int dm_table_create(struct dm_table **result, int mode, 225int dm_table_create(struct dm_table **result, fmode_t mode,
227 unsigned num_targets, struct mapped_device *md); 226 unsigned num_targets, struct mapped_device *md);
228 227
229/* 228/*
@@ -254,7 +253,7 @@ void dm_table_put(struct dm_table *t);
254 */ 253 */
255sector_t dm_table_get_size(struct dm_table *t); 254sector_t dm_table_get_size(struct dm_table *t);
256unsigned int dm_table_get_num_targets(struct dm_table *t); 255unsigned int dm_table_get_num_targets(struct dm_table *t);
257int dm_table_get_mode(struct dm_table *t); 256fmode_t dm_table_get_mode(struct dm_table *t);
258struct mapped_device *dm_table_get_md(struct dm_table *t); 257struct mapped_device *dm_table_get_md(struct dm_table *t);
259 258
260/* 259/*
@@ -354,6 +353,9 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
354 */ 353 */
355#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) 354#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
356 355
356#define dm_array_too_big(fixed, obj, num) \
357 ((num) > (UINT_MAX - (fixed)) / (obj))
358
357static inline sector_t to_sector(unsigned long n) 359static inline sector_t to_sector(unsigned long n)
358{ 360{
359 return (n >> SECTOR_SHIFT); 361 return (n >> SECTOR_SHIFT);
diff --git a/include/linux/device.h b/include/linux/device.h
index 246937c9cbc7..1a3686d15f98 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -90,6 +90,9 @@ int __must_check bus_for_each_drv(struct bus_type *bus,
90 struct device_driver *start, void *data, 90 struct device_driver *start, void *data,
91 int (*fn)(struct device_driver *, void *)); 91 int (*fn)(struct device_driver *, void *));
92 92
93void bus_sort_breadthfirst(struct bus_type *bus,
94 int (*compare)(const struct device *a,
95 const struct device *b));
93/* 96/*
94 * Bus notifiers: Get notified of addition/removal of devices 97 * Bus notifiers: Get notified of addition/removal of devices
95 * and binding/unbinding of drivers to devices. 98 * and binding/unbinding of drivers to devices.
@@ -447,7 +450,7 @@ static inline void set_dev_node(struct device *dev, int node)
447} 450}
448#endif 451#endif
449 452
450static inline void *dev_get_drvdata(struct device *dev) 453static inline void *dev_get_drvdata(const struct device *dev)
451{ 454{
452 return dev->driver_data; 455 return dev->driver_data;
453} 456}
@@ -502,7 +505,6 @@ extern struct device *device_create(struct class *cls, struct device *parent,
502 dev_t devt, void *drvdata, 505 dev_t devt, void *drvdata,
503 const char *fmt, ...) 506 const char *fmt, ...)
504 __attribute__((format(printf, 5, 6))); 507 __attribute__((format(printf, 5, 6)));
505#define device_create_drvdata device_create
506extern void device_destroy(struct class *cls, dev_t devt); 508extern void device_destroy(struct class *cls, dev_t devt);
507 509
508/* 510/*
@@ -551,7 +553,11 @@ extern const char *dev_driver_string(const struct device *dev);
551#define dev_info(dev, format, arg...) \ 553#define dev_info(dev, format, arg...) \
552 dev_printk(KERN_INFO , dev , format , ## arg) 554 dev_printk(KERN_INFO , dev , format , ## arg)
553 555
554#ifdef DEBUG 556#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
557#define dev_dbg(dev, format, ...) do { \
558 dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
559 } while (0)
560#elif defined(DEBUG)
555#define dev_dbg(dev, format, arg...) \ 561#define dev_dbg(dev, format, arg...) \
556 dev_printk(KERN_DEBUG , dev , format , ## arg) 562 dev_printk(KERN_DEBUG , dev , format , ## arg)
557#else 563#else
@@ -567,6 +573,14 @@ extern const char *dev_driver_string(const struct device *dev);
567 ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; }) 573 ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
568#endif 574#endif
569 575
576/*
577 * dev_WARN() acts like dev_printk(), but with the key difference
578 * of using a WARN/WARN_ON to get the message out, including the
579 * file/line information and a backtrace.
580 */
581#define dev_WARN(dev, format, arg...) \
582 WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg);
583
570/* Create alias, so I can be autoloaded. */ 584/* Create alias, so I can be autoloaded. */
571#define MODULE_ALIAS_CHARDEV(major,minor) \ 585#define MODULE_ALIAS_CHARDEV(major,minor) \
572 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) 586 MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
new file mode 100644
index 000000000000..a9e652a41373
--- /dev/null
+++ b/include/linux/dm-region-hash.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * Device-Mapper dirty region hash interface.
6 *
7 * This file is released under the GPL.
8 */
9
10#ifndef DM_REGION_HASH_H
11#define DM_REGION_HASH_H
12
13#include <linux/dm-dirty-log.h>
14
15/*-----------------------------------------------------------------
16 * Region hash
17 *----------------------------------------------------------------*/
18struct dm_region_hash;
19struct dm_region;
20
21/*
22 * States a region can have.
23 */
24enum dm_rh_region_states {
25 DM_RH_CLEAN = 0x01, /* No writes in flight. */
26 DM_RH_DIRTY = 0x02, /* Writes in flight. */
27 DM_RH_NOSYNC = 0x04, /* Out of sync. */
28 DM_RH_RECOVERING = 0x08, /* Under resynchronization. */
29};
30
31/*
32 * Region hash create/destroy.
33 */
34struct bio_list;
35struct dm_region_hash *dm_region_hash_create(
36 void *context, void (*dispatch_bios)(void *context,
37 struct bio_list *bios),
38 void (*wakeup_workers)(void *context),
39 void (*wakeup_all_recovery_waiters)(void *context),
40 sector_t target_begin, unsigned max_recovery,
41 struct dm_dirty_log *log, uint32_t region_size,
42 region_t nr_regions);
43void dm_region_hash_destroy(struct dm_region_hash *rh);
44
45struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh);
46
47/*
48 * Conversion functions.
49 */
50region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio);
51sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region);
52void *dm_rh_region_context(struct dm_region *reg);
53
54/*
55 * Get region size and key (ie. number of the region).
56 */
57sector_t dm_rh_get_region_size(struct dm_region_hash *rh);
58region_t dm_rh_get_region_key(struct dm_region *reg);
59
60/*
61 * Get/set/update region state (and dirty log).
62 *
63 */
64int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block);
65void dm_rh_set_state(struct dm_region_hash *rh, region_t region,
66 enum dm_rh_region_states state, int may_block);
67
68/* Non-zero errors_handled leaves the state of the region NOSYNC */
69void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled);
70
71/* Flush the region hash and dirty log. */
72int dm_rh_flush(struct dm_region_hash *rh);
73
74/* Inc/dec pending count on regions. */
75void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios);
76void dm_rh_dec(struct dm_region_hash *rh, region_t region);
77
78/* Delay bios on regions. */
79void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
80
81void dm_rh_mark_nosync(struct dm_region_hash *rh,
82 struct bio *bio, unsigned done, int error);
83
84/*
85 * Region recovery control.
86 */
87
88/* Prepare some regions for recovery by starting to quiesce them. */
89void dm_rh_recovery_prepare(struct dm_region_hash *rh);
90
91/* Try fetching a quiesced region for recovery. */
92struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh);
93
94/* Report recovery end on a region. */
95void dm_rh_recovery_end(struct dm_region *reg, int error);
96
97/* Returns number of regions with recovery work outstanding. */
98int dm_rh_recovery_in_flight(struct dm_region_hash *rh);
99
100/* Start/stop recovery. */
101void dm_rh_start_recovery(struct dm_region_hash *rh);
102void dm_rh_stop_recovery(struct dm_region_hash *rh);
103
104#endif /* DM_REGION_HASH_H */
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
new file mode 100644
index 000000000000..952df39c989d
--- /dev/null
+++ b/include/linux/dma_remapping.h
@@ -0,0 +1,156 @@
1#ifndef _DMA_REMAPPING_H
2#define _DMA_REMAPPING_H
3
4/*
5 * VT-d hardware uses 4KiB page size regardless of host page size.
6 */
7#define VTD_PAGE_SHIFT (12)
8#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
9#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
10#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
11
12#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
13#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
14#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
15
16
17/*
18 * 0: Present
19 * 1-11: Reserved
20 * 12-63: Context Ptr (12 - (haw-1))
21 * 64-127: Reserved
22 */
23struct root_entry {
24 u64 val;
25 u64 rsvd1;
26};
27#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
28static inline bool root_present(struct root_entry *root)
29{
30 return (root->val & 1);
31}
32static inline void set_root_present(struct root_entry *root)
33{
34 root->val |= 1;
35}
36static inline void set_root_value(struct root_entry *root, unsigned long value)
37{
38 root->val |= value & VTD_PAGE_MASK;
39}
40
41struct context_entry;
42static inline struct context_entry *
43get_context_addr_from_root(struct root_entry *root)
44{
45 return (struct context_entry *)
46 (root_present(root)?phys_to_virt(
47 root->val & VTD_PAGE_MASK) :
48 NULL);
49}
50
51/*
52 * low 64 bits:
53 * 0: present
54 * 1: fault processing disable
55 * 2-3: translation type
56 * 12-63: address space root
57 * high 64 bits:
58 * 0-2: address width
59 * 3-6: aval
60 * 8-23: domain id
61 */
62struct context_entry {
63 u64 lo;
64 u64 hi;
65};
66#define context_present(c) ((c).lo & 1)
67#define context_fault_disable(c) (((c).lo >> 1) & 1)
68#define context_translation_type(c) (((c).lo >> 2) & 3)
69#define context_address_root(c) ((c).lo & VTD_PAGE_MASK)
70#define context_address_width(c) ((c).hi & 7)
71#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
72
73#define context_set_present(c) do {(c).lo |= 1;} while (0)
74#define context_set_fault_enable(c) \
75 do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
76#define context_set_translation_type(c, val) \
77 do { \
78 (c).lo &= (((u64)-1) << 4) | 3; \
79 (c).lo |= ((val) & 3) << 2; \
80 } while (0)
81#define CONTEXT_TT_MULTI_LEVEL 0
82#define context_set_address_root(c, val) \
83 do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0)
84#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
85#define context_set_domain_id(c, val) \
86 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
87#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
88
89/*
90 * 0: readable
91 * 1: writable
92 * 2-6: reserved
93 * 7: super page
94 * 8-11: available
95 * 12-63: Host physcial address
96 */
97struct dma_pte {
98 u64 val;
99};
100#define dma_clear_pte(p) do {(p).val = 0;} while (0)
101
102#define DMA_PTE_READ (1)
103#define DMA_PTE_WRITE (2)
104
105#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
106#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
107#define dma_set_pte_prot(p, prot) \
108 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
109#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
110#define dma_set_pte_addr(p, addr) do {\
111 (p).val |= ((addr) & VTD_PAGE_MASK); } while (0)
112#define dma_pte_present(p) (((p).val & 3) != 0)
113
114struct intel_iommu;
115
116struct dmar_domain {
117 int id; /* domain id */
118 struct intel_iommu *iommu; /* back pointer to owning iommu */
119
120 struct list_head devices; /* all devices' list */
121 struct iova_domain iovad; /* iova's that belong to this domain */
122
123 struct dma_pte *pgd; /* virtual address */
124 spinlock_t mapping_lock; /* page table lock */
125 int gaw; /* max guest address width */
126
127 /* adjusted guest address width, 0 is level 2 30-bit */
128 int agaw;
129
130#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
131 int flags;
132};
133
134/* PCI domain-device relationship */
135struct device_domain_info {
136 struct list_head link; /* link to domain siblings */
137 struct list_head global; /* link to global list */
138 u8 bus; /* PCI bus numer */
139 u8 devfn; /* PCI devfn number */
140 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
141 struct dmar_domain *domain; /* pointer to domain */
142};
143
144extern int init_dmars(void);
145extern void free_dmar_iommu(struct intel_iommu *iommu);
146
147extern int dmar_disabled;
148
149#ifndef CONFIG_DMAR_GFX_WA
150static inline void iommu_prepare_gfx_mapping(void)
151{
152 return;
153}
154#endif /* !CONFIG_DMAR_GFX_WA */
155
156#endif
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index c360c558e59e..f1984fc3e06d 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -45,7 +45,6 @@ extern struct list_head dmar_drhd_units;
45 list_for_each_entry(drhd, &dmar_drhd_units, list) 45 list_for_each_entry(drhd, &dmar_drhd_units, list)
46 46
47extern int dmar_table_init(void); 47extern int dmar_table_init(void);
48extern int early_dmar_detect(void);
49extern int dmar_dev_scope_init(void); 48extern int dmar_dev_scope_init(void);
50 49
51/* Intel IOMMU detection */ 50/* Intel IOMMU detection */
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index e5084eb5943a..34161907b2f8 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -44,8 +44,10 @@ extern const struct dmi_device * dmi_find_device(int type, const char *name,
44extern void dmi_scan_machine(void); 44extern void dmi_scan_machine(void);
45extern int dmi_get_year(int field); 45extern int dmi_get_year(int field);
46extern int dmi_name_in_vendors(const char *str); 46extern int dmi_name_in_vendors(const char *str);
47extern int dmi_name_in_serial(const char *str);
47extern int dmi_available; 48extern int dmi_available;
48extern int dmi_walk(void (*decode)(const struct dmi_header *)); 49extern int dmi_walk(void (*decode)(const struct dmi_header *));
50extern bool dmi_match(enum dmi_field f, const char *str);
49 51
50#else 52#else
51 53
@@ -56,9 +58,12 @@ static inline const struct dmi_device * dmi_find_device(int type, const char *na
56static inline void dmi_scan_machine(void) { return; } 58static inline void dmi_scan_machine(void) { return; }
57static inline int dmi_get_year(int year) { return 0; } 59static inline int dmi_get_year(int year) { return 0; }
58static inline int dmi_name_in_vendors(const char *s) { return 0; } 60static inline int dmi_name_in_vendors(const char *s) { return 0; }
61static inline int dmi_name_in_serial(const char *s) { return 0; }
59#define dmi_available 0 62#define dmi_available 0
60static inline int dmi_walk(void (*decode)(const struct dmi_header *)) 63static inline int dmi_walk(void (*decode)(const struct dmi_header *))
61 { return -1; } 64 { return -1; }
65static inline bool dmi_match(enum dmi_field f, const char *str)
66 { return false; }
62 67
63#endif 68#endif
64 69
diff --git a/include/linux/ds1286.h b/include/linux/ds1286.h
index d8989860e4ce..45ea0aa0aeb9 100644
--- a/include/linux/ds1286.h
+++ b/include/linux/ds1286.h
@@ -8,8 +8,6 @@
8#ifndef __LINUX_DS1286_H 8#ifndef __LINUX_DS1286_H
9#define __LINUX_DS1286_H 9#define __LINUX_DS1286_H
10 10
11#include <asm/ds1286.h>
12
13/********************************************************************** 11/**********************************************************************
14 * register summary 12 * register summary
15 **********************************************************************/ 13 **********************************************************************/
diff --git a/include/linux/dvb/frontend.h b/include/linux/dvb/frontend.h
index 6e4ace270276..79a8ed8e6a7d 100644
--- a/include/linux/dvb/frontend.h
+++ b/include/linux/dvb/frontend.h
@@ -166,6 +166,7 @@ typedef enum fe_modulation {
166 VSB_16, 166 VSB_16,
167 PSK_8, 167 PSK_8,
168 APSK_16, 168 APSK_16,
169 APSK_32,
169 DQPSK, 170 DQPSK,
170} fe_modulation_t; 171} fe_modulation_t;
171 172
@@ -295,6 +296,7 @@ typedef enum fe_delivery_system {
295 SYS_DVBC_ANNEX_AC, 296 SYS_DVBC_ANNEX_AC,
296 SYS_DVBC_ANNEX_B, 297 SYS_DVBC_ANNEX_B,
297 SYS_DVBT, 298 SYS_DVBT,
299 SYS_DSS,
298 SYS_DVBS, 300 SYS_DVBS,
299 SYS_DVBS2, 301 SYS_DVBS2,
300 SYS_DVBH, 302 SYS_DVBH,
diff --git a/include/linux/dynamic_printk.h b/include/linux/dynamic_printk.h
new file mode 100644
index 000000000000..2d528d009074
--- /dev/null
+++ b/include/linux/dynamic_printk.h
@@ -0,0 +1,93 @@
1#ifndef _DYNAMIC_PRINTK_H
2#define _DYNAMIC_PRINTK_H
3
4#define DYNAMIC_DEBUG_HASH_BITS 6
5#define DEBUG_HASH_TABLE_SIZE (1 << DYNAMIC_DEBUG_HASH_BITS)
6
7#define TYPE_BOOLEAN 1
8
9#define DYNAMIC_ENABLED_ALL 0
10#define DYNAMIC_ENABLED_NONE 1
11#define DYNAMIC_ENABLED_SOME 2
12
13extern int dynamic_enabled;
14
15/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which
16 * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They
17 * use independent hash functions, to reduce the chance of false positives.
18 */
19extern long long dynamic_printk_enabled;
20extern long long dynamic_printk_enabled2;
21
22struct mod_debug {
23 char *modname;
24 char *logical_modname;
25 char *flag_names;
26 int type;
27 int hash;
28 int hash2;
29} __attribute__((aligned(8)));
30
31int register_dynamic_debug_module(char *mod_name, int type, char *share_name,
32 char *flags, int hash, int hash2);
33
34#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
35extern int unregister_dynamic_debug_module(char *mod_name);
36extern int __dynamic_dbg_enabled_helper(char *modname, int type,
37 int value, int hash);
38
39#define __dynamic_dbg_enabled(module, type, value, level, hash) ({ \
40 int __ret = 0; \
41 if (unlikely((dynamic_printk_enabled & (1LL << DEBUG_HASH)) && \
42 (dynamic_printk_enabled2 & (1LL << DEBUG_HASH2)))) \
43 __ret = __dynamic_dbg_enabled_helper(module, type, \
44 value, hash);\
45 __ret; })
46
47#define dynamic_pr_debug(fmt, ...) do { \
48 static char mod_name[] \
49 __attribute__((section("__verbose_strings"))) \
50 = KBUILD_MODNAME; \
51 static struct mod_debug descriptor \
52 __used \
53 __attribute__((section("__verbose"), aligned(8))) = \
54 { mod_name, mod_name, NULL, TYPE_BOOLEAN, DEBUG_HASH, DEBUG_HASH2 };\
55 if (__dynamic_dbg_enabled(KBUILD_MODNAME, TYPE_BOOLEAN, \
56 0, 0, DEBUG_HASH)) \
57 printk(KERN_DEBUG KBUILD_MODNAME ":" fmt, \
58 ##__VA_ARGS__); \
59 } while (0)
60
61#define dynamic_dev_dbg(dev, format, ...) do { \
62 static char mod_name[] \
63 __attribute__((section("__verbose_strings"))) \
64 = KBUILD_MODNAME; \
65 static struct mod_debug descriptor \
66 __used \
67 __attribute__((section("__verbose"), aligned(8))) = \
68 { mod_name, mod_name, NULL, TYPE_BOOLEAN, DEBUG_HASH, DEBUG_HASH2 };\
69 if (__dynamic_dbg_enabled(KBUILD_MODNAME, TYPE_BOOLEAN, \
70 0, 0, DEBUG_HASH)) \
71 dev_printk(KERN_DEBUG, dev, \
72 KBUILD_MODNAME ": " format, \
73 ##__VA_ARGS__); \
74 } while (0)
75
76#else
77
78static inline int unregister_dynamic_debug_module(const char *mod_name)
79{
80 return 0;
81}
82static inline int __dynamic_dbg_enabled_helper(char *modname, int type,
83 int value, int hash)
84{
85 return 0;
86}
87
88#define __dynamic_dbg_enabled(module, type, value, level, hash) ({ 0; })
89#define dynamic_pr_debug(fmt, ...) do { } while (0)
90#define dynamic_dev_dbg(dev, format, ...) do { } while (0)
91#endif
92
93#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 807373d467f7..bb66feb164bd 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -208,6 +208,9 @@ typedef efi_status_t efi_set_virtual_address_map_t (unsigned long memory_map_siz
208#define EFI_GLOBAL_VARIABLE_GUID \ 208#define EFI_GLOBAL_VARIABLE_GUID \
209 EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c ) 209 EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c )
210 210
211#define UV_SYSTEM_TABLE_GUID \
212 EFI_GUID( 0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93 )
213
211typedef struct { 214typedef struct {
212 efi_guid_t guid; 215 efi_guid_t guid;
213 unsigned long table; 216 unsigned long table;
@@ -255,6 +258,7 @@ extern struct efi {
255 unsigned long boot_info; /* boot info table */ 258 unsigned long boot_info; /* boot info table */
256 unsigned long hcdp; /* HCDP table */ 259 unsigned long hcdp; /* HCDP table */
257 unsigned long uga; /* UGA table */ 260 unsigned long uga; /* UGA table */
261 unsigned long uv_systab; /* UV system table */
258 efi_get_time_t *get_time; 262 efi_get_time_t *get_time;
259 efi_set_time_t *set_time; 263 efi_set_time_t *set_time;
260 efi_get_wakeup_time_t *get_wakeup_time; 264 efi_get_wakeup_time_t *get_wakeup_time;
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 92f6f634e3e6..7a204256b155 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -28,7 +28,7 @@ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request
28typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *); 28typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
29 29
30typedef void *(elevator_init_fn) (struct request_queue *); 30typedef void *(elevator_init_fn) (struct request_queue *);
31typedef void (elevator_exit_fn) (elevator_t *); 31typedef void (elevator_exit_fn) (struct elevator_queue *);
32 32
33struct elevator_ops 33struct elevator_ops
34{ 34{
@@ -62,8 +62,8 @@ struct elevator_ops
62 62
63struct elv_fs_entry { 63struct elv_fs_entry {
64 struct attribute attr; 64 struct attribute attr;
65 ssize_t (*show)(elevator_t *, char *); 65 ssize_t (*show)(struct elevator_queue *, char *);
66 ssize_t (*store)(elevator_t *, const char *, size_t); 66 ssize_t (*store)(struct elevator_queue *, const char *, size_t);
67}; 67};
68 68
69/* 69/*
@@ -130,7 +130,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
130extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); 130extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
131 131
132extern int elevator_init(struct request_queue *, char *); 132extern int elevator_init(struct request_queue *, char *);
133extern void elevator_exit(elevator_t *); 133extern void elevator_exit(struct elevator_queue *);
134extern int elv_rq_merge_ok(struct request *, struct bio *); 134extern int elv_rq_merge_ok(struct request *, struct bio *);
135 135
136/* 136/*
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 25d62e6e3290..1cb0f0b90926 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -27,6 +27,7 @@
27#include <linux/if_ether.h> 27#include <linux/if_ether.h>
28#include <linux/netdevice.h> 28#include <linux/netdevice.h>
29#include <linux/random.h> 29#include <linux/random.h>
30#include <asm/unaligned.h>
30 31
31#ifdef __KERNEL__ 32#ifdef __KERNEL__
32extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 33extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
@@ -41,6 +42,10 @@ extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh);
41extern void eth_header_cache_update(struct hh_cache *hh, 42extern void eth_header_cache_update(struct hh_cache *hh,
42 const struct net_device *dev, 43 const struct net_device *dev,
43 const unsigned char *haddr); 44 const unsigned char *haddr);
45extern int eth_mac_addr(struct net_device *dev, void *p);
46extern int eth_change_mtu(struct net_device *dev, int new_mtu);
47extern int eth_validate_addr(struct net_device *dev);
48
44 49
45 50
46extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count); 51extern struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count);
@@ -136,6 +141,47 @@ static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2)
136 BUILD_BUG_ON(ETH_ALEN != 6); 141 BUILD_BUG_ON(ETH_ALEN != 6);
137 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; 142 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
138} 143}
144
145static inline unsigned long zap_last_2bytes(unsigned long value)
146{
147#ifdef __BIG_ENDIAN
148 return value >> 16;
149#else
150 return value << 16;
151#endif
152}
153
154/**
155 * compare_ether_addr_64bits - Compare two Ethernet addresses
156 * @addr1: Pointer to an array of 8 bytes
157 * @addr2: Pointer to an other array of 8 bytes
158 *
159 * Compare two ethernet addresses, returns 0 if equal.
160 * Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional
161 * branches, and possibly long word memory accesses on CPU allowing cheap
162 * unaligned memory reads.
163 * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2}
164 *
165 * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits.
166 */
167
168static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2],
169 const u8 addr2[6+2])
170{
171#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
172 unsigned long fold = ((*(unsigned long *)addr1) ^
173 (*(unsigned long *)addr2));
174
175 if (sizeof(fold) == 8)
176 return zap_last_2bytes(fold) != 0;
177
178 fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^
179 (*(unsigned long *)(addr2 + 4)));
180 return fold != 0;
181#else
182 return compare_ether_addr(addr1, addr2);
183#endif
184}
139#endif /* __KERNEL__ */ 185#endif /* __KERNEL__ */
140 186
141#endif /* _LINUX_ETHERDEVICE_H */ 187#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index b4b038b89ee6..27c67a542235 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -467,6 +467,8 @@ struct ethtool_ops {
467 467
468#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */ 468#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */
469#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ 469#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
470#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */
471#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */
470 472
471/* compatibility with older code */ 473/* compatibility with older code */
472#define SPARC_ETH_GSET ETHTOOL_GSET 474#define SPARC_ETH_GSET ETHTOOL_GSET
diff --git a/include/linux/ext2_fs.h b/include/linux/ext2_fs.h
index 2efe7b863cff..78c775a83f7c 100644
--- a/include/linux/ext2_fs.h
+++ b/include/linux/ext2_fs.h
@@ -47,7 +47,7 @@
47#ifdef EXT2FS_DEBUG 47#ifdef EXT2FS_DEBUG
48# define ext2_debug(f, a...) { \ 48# define ext2_debug(f, a...) { \
49 printk ("EXT2-fs DEBUG (%s, %d): %s:", \ 49 printk ("EXT2-fs DEBUG (%s, %d): %s:", \
50 __FILE__, __LINE__, __FUNCTION__); \ 50 __FILE__, __LINE__, __func__); \
51 printk (f, ## a); \ 51 printk (f, ## a); \
52 } 52 }
53#else 53#else
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 8120fa1bc235..d14f02918483 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -43,7 +43,7 @@
43#define ext3_debug(f, a...) \ 43#define ext3_debug(f, a...) \
44 do { \ 44 do { \
45 printk (KERN_DEBUG "EXT3-fs DEBUG (%s, %d): %s:", \ 45 printk (KERN_DEBUG "EXT3-fs DEBUG (%s, %d): %s:", \
46 __FILE__, __LINE__, __FUNCTION__); \ 46 __FILE__, __LINE__, __func__); \
47 printk (KERN_DEBUG f, ## a); \ 47 printk (KERN_DEBUG f, ## a); \
48 } while (0) 48 } while (0)
49#else 49#else
@@ -380,6 +380,8 @@ struct ext3_inode {
380#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */ 380#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */
381#define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ 381#define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
382#define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ 382#define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
383#define EXT3_MOUNT_DATA_ERR_ABORT 0x400000 /* Abort on file data write
384 * error in ordered mode */
383 385
384/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */ 386/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
385#ifndef _LINUX_EXT2_FS_H 387#ifndef _LINUX_EXT2_FS_H
@@ -871,7 +873,7 @@ extern void ext3_update_dynamic_rev (struct super_block *sb);
871#define ext3_std_error(sb, errno) \ 873#define ext3_std_error(sb, errno) \
872do { \ 874do { \
873 if ((errno)) \ 875 if ((errno)) \
874 __ext3_std_error((sb), __FUNCTION__, (errno)); \ 876 __ext3_std_error((sb), __func__, (errno)); \
875} while (0) 877} while (0)
876 878
877/* 879/*
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
index 8c43b13a02fe..cf82d519be40 100644
--- a/include/linux/ext3_jbd.h
+++ b/include/linux/ext3_jbd.h
@@ -137,17 +137,17 @@ int __ext3_journal_dirty_metadata(const char *where,
137 handle_t *handle, struct buffer_head *bh); 137 handle_t *handle, struct buffer_head *bh);
138 138
139#define ext3_journal_get_undo_access(handle, bh) \ 139#define ext3_journal_get_undo_access(handle, bh) \
140 __ext3_journal_get_undo_access(__FUNCTION__, (handle), (bh)) 140 __ext3_journal_get_undo_access(__func__, (handle), (bh))
141#define ext3_journal_get_write_access(handle, bh) \ 141#define ext3_journal_get_write_access(handle, bh) \
142 __ext3_journal_get_write_access(__FUNCTION__, (handle), (bh)) 142 __ext3_journal_get_write_access(__func__, (handle), (bh))
143#define ext3_journal_revoke(handle, blocknr, bh) \ 143#define ext3_journal_revoke(handle, blocknr, bh) \
144 __ext3_journal_revoke(__FUNCTION__, (handle), (blocknr), (bh)) 144 __ext3_journal_revoke(__func__, (handle), (blocknr), (bh))
145#define ext3_journal_get_create_access(handle, bh) \ 145#define ext3_journal_get_create_access(handle, bh) \
146 __ext3_journal_get_create_access(__FUNCTION__, (handle), (bh)) 146 __ext3_journal_get_create_access(__func__, (handle), (bh))
147#define ext3_journal_dirty_metadata(handle, bh) \ 147#define ext3_journal_dirty_metadata(handle, bh) \
148 __ext3_journal_dirty_metadata(__FUNCTION__, (handle), (bh)) 148 __ext3_journal_dirty_metadata(__func__, (handle), (bh))
149#define ext3_journal_forget(handle, bh) \ 149#define ext3_journal_forget(handle, bh) \
150 __ext3_journal_forget(__FUNCTION__, (handle), (bh)) 150 __ext3_journal_forget(__func__, (handle), (bh))
151 151
152int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh); 152int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh);
153 153
@@ -160,7 +160,7 @@ static inline handle_t *ext3_journal_start(struct inode *inode, int nblocks)
160} 160}
161 161
162#define ext3_journal_stop(handle) \ 162#define ext3_journal_stop(handle) \
163 __ext3_journal_stop(__FUNCTION__, (handle)) 163 __ext3_journal_stop(__func__, (handle))
164 164
165static inline handle_t *ext3_journal_current_handle(void) 165static inline handle_t *ext3_journal_current_handle(void)
166{ 166{
diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h
index 32368c4f0326..06ca9b21dad2 100644
--- a/include/linux/fault-inject.h
+++ b/include/linux/fault-inject.h
@@ -81,4 +81,13 @@ static inline void cleanup_fault_attr_dentries(struct fault_attr *attr)
81 81
82#endif /* CONFIG_FAULT_INJECTION */ 82#endif /* CONFIG_FAULT_INJECTION */
83 83
84#ifdef CONFIG_FAILSLAB
85extern bool should_failslab(size_t size, gfp_t gfpflags);
86#else
87static inline bool should_failslab(size_t size, gfp_t gfpflags)
88{
89 return false;
90}
91#endif /* CONFIG_FAILSLAB */
92
84#endif /* _LINUX_FAULT_INJECT_H */ 93#endif /* _LINUX_FAULT_INJECT_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 531ccd5f5960..1ee63df5be92 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -808,6 +808,7 @@ struct fb_tile_ops {
808struct fb_info { 808struct fb_info {
809 int node; 809 int node;
810 int flags; 810 int flags;
811 struct mutex lock; /* Lock for open/release/ioctl funcs */
811 struct fb_var_screeninfo var; /* Current var */ 812 struct fb_var_screeninfo var; /* Current var */
812 struct fb_fix_screeninfo fix; /* Current fix */ 813 struct fb_fix_screeninfo fix; /* Current fix */
813 struct fb_monspecs monspecs; /* Current Monitor specs */ 814 struct fb_monspecs monspecs; /* Current Monitor specs */
@@ -887,7 +888,7 @@ struct fb_info {
887#define fb_writeq sbus_writeq 888#define fb_writeq sbus_writeq
888#define fb_memset sbus_memset_io 889#define fb_memset sbus_memset_io
889 890
890#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || (defined(__sh__) && !defined(__SH5__)) || defined(__powerpc__) || defined(__avr32__) 891#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__)
891 892
892#define fb_readb __raw_readb 893#define fb_readb __raw_readb
893#define fb_readw __raw_readw 894#define fb_readw __raw_readw
diff --git a/include/linux/fddidevice.h b/include/linux/fddidevice.h
index e61e42dfd317..155bafd9e886 100644
--- a/include/linux/fddidevice.h
+++ b/include/linux/fddidevice.h
@@ -27,6 +27,7 @@
27#ifdef __KERNEL__ 27#ifdef __KERNEL__
28extern __be16 fddi_type_trans(struct sk_buff *skb, 28extern __be16 fddi_type_trans(struct sk_buff *skb,
29 struct net_device *dev); 29 struct net_device *dev);
30extern int fddi_change_mtu(struct net_device *dev, int new_mtu);
30extern struct net_device *alloc_fddidev(int sizeof_priv); 31extern struct net_device *alloc_fddidev(int sizeof_priv);
31#endif 32#endif
32 33
diff --git a/include/linux/file.h b/include/linux/file.h
index a20259e248a5..335a0a5c316e 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -19,10 +19,10 @@ struct file_operations;
19struct vfsmount; 19struct vfsmount;
20struct dentry; 20struct dentry;
21extern int init_file(struct file *, struct vfsmount *mnt, 21extern int init_file(struct file *, struct vfsmount *mnt,
22 struct dentry *dentry, mode_t mode, 22 struct dentry *dentry, fmode_t mode,
23 const struct file_operations *fop); 23 const struct file_operations *fop);
24extern struct file *alloc_file(struct vfsmount *, struct dentry *dentry, 24extern struct file *alloc_file(struct vfsmount *, struct dentry *dentry,
25 mode_t mode, const struct file_operations *fop); 25 fmode_t mode, const struct file_operations *fop);
26 26
27static inline void fput_light(struct file *file, int fput_needed) 27static inline void fput_light(struct file *file, int fput_needed)
28{ 28{
diff --git a/include/linux/filter.h b/include/linux/filter.h
index b6ea9aa9e853..1354aaf6abbe 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -122,7 +122,8 @@ struct sock_fprog /* Required for SO_ATTACH_FILTER. */
122#define SKF_AD_PKTTYPE 4 122#define SKF_AD_PKTTYPE 4
123#define SKF_AD_IFINDEX 8 123#define SKF_AD_IFINDEX 8
124#define SKF_AD_NLATTR 12 124#define SKF_AD_NLATTR 12
125#define SKF_AD_MAX 16 125#define SKF_AD_NLATTR_NEST 16
126#define SKF_AD_MAX 20
126#define SKF_NET_OFF (-0x100000) 127#define SKF_NET_OFF (-0x100000)
127#define SKF_LL_OFF (-0x200000) 128#define SKF_LL_OFF (-0x200000)
128 129
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 0f0e271f97fa..4d078e99c017 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -154,8 +154,13 @@ struct fw_cdev_event_iso_interrupt {
154 * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST 154 * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST
155 * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT 155 * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT
156 * 156 *
157 * Convenience union for userspace use. Events could be read(2) into a char 157 * Convenience union for userspace use. Events could be read(2) into an
158 * buffer and then cast to this union for further processing. 158 * appropriately aligned char buffer and then cast to this union for further
159 * processing. Note that for a request, response or iso_interrupt event,
160 * the data[] or header[] may make the size of the full event larger than
161 * sizeof(union fw_cdev_event). Also note that if you attempt to read(2)
162 * an event into a buffer that is not large enough for it, the data that does
163 * not fit will be discarded so that the next read(2) will return a new event.
159 */ 164 */
160union fw_cdev_event { 165union fw_cdev_event {
161 struct fw_cdev_event_common common; 166 struct fw_cdev_event_common common;
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index deddeedf3257..5a361f85cfec 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -6,7 +6,7 @@
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/wait.h> 7#include <linux/wait.h>
8 8
9#ifdef CONFIG_PM_SLEEP 9#ifdef CONFIG_FREEZER
10/* 10/*
11 * Check if a process has been frozen 11 * Check if a process has been frozen
12 */ 12 */
@@ -39,29 +39,14 @@ static inline void clear_freeze_flag(struct task_struct *p)
39 clear_tsk_thread_flag(p, TIF_FREEZE); 39 clear_tsk_thread_flag(p, TIF_FREEZE);
40} 40}
41 41
42/* 42static inline bool should_send_signal(struct task_struct *p)
43 * Wake up a frozen process
44 *
45 * task_lock() is taken to prevent the race with refrigerator() which may
46 * occur if the freezing of tasks fails. Namely, without the lock, if the
47 * freezing of tasks failed, thaw_tasks() might have run before a task in
48 * refrigerator() could call frozen_process(), in which case the task would be
49 * frozen and no one would thaw it.
50 */
51static inline int thaw_process(struct task_struct *p)
52{ 43{
53 task_lock(p); 44 return !(p->flags & PF_FREEZER_NOSIG);
54 if (frozen(p)) {
55 p->flags &= ~PF_FROZEN;
56 task_unlock(p);
57 wake_up_process(p);
58 return 1;
59 }
60 clear_freeze_flag(p);
61 task_unlock(p);
62 return 0;
63} 45}
64 46
47/* Takes and releases task alloc lock using task_lock() */
48extern int thaw_process(struct task_struct *p);
49
65extern void refrigerator(void); 50extern void refrigerator(void);
66extern int freeze_processes(void); 51extern int freeze_processes(void);
67extern void thaw_processes(void); 52extern void thaw_processes(void);
@@ -75,6 +60,15 @@ static inline int try_to_freeze(void)
75 return 0; 60 return 0;
76} 61}
77 62
63extern bool freeze_task(struct task_struct *p, bool sig_only);
64extern void cancel_freezing(struct task_struct *p);
65
66#ifdef CONFIG_CGROUP_FREEZER
67extern int cgroup_frozen(struct task_struct *task);
68#else /* !CONFIG_CGROUP_FREEZER */
69static inline int cgroup_frozen(struct task_struct *task) { return 0; }
70#endif /* !CONFIG_CGROUP_FREEZER */
71
78/* 72/*
79 * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it 73 * The PF_FREEZER_SKIP flag should be set by a vfork parent right before it
80 * calls wait_for_completion(&vfork) and reset right after it returns from this 74 * calls wait_for_completion(&vfork) and reset right after it returns from this
@@ -166,7 +160,7 @@ static inline void set_freezable_with_signal(void)
166 } while (try_to_freeze()); \ 160 } while (try_to_freeze()); \
167 __retval; \ 161 __retval; \
168}) 162})
169#else /* !CONFIG_PM_SLEEP */ 163#else /* !CONFIG_FREEZER */
170static inline int frozen(struct task_struct *p) { return 0; } 164static inline int frozen(struct task_struct *p) { return 0; }
171static inline int freezing(struct task_struct *p) { return 0; } 165static inline int freezing(struct task_struct *p) { return 0; }
172static inline void set_freeze_flag(struct task_struct *p) {} 166static inline void set_freeze_flag(struct task_struct *p) {}
@@ -191,6 +185,6 @@ static inline void set_freezable_with_signal(void) {}
191#define wait_event_freezable_timeout(wq, condition, timeout) \ 185#define wait_event_freezable_timeout(wq, condition, timeout) \
192 wait_event_interruptible_timeout(wq, condition, timeout) 186 wait_event_interruptible_timeout(wq, condition, timeout)
193 187
194#endif /* !CONFIG_PM_SLEEP */ 188#endif /* !CONFIG_FREEZER */
195 189
196#endif /* FREEZER_H_INCLUDED */ 190#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a6a625be13fc..001ded4845b4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -63,18 +63,32 @@ extern int dir_notify_enable;
63#define MAY_ACCESS 16 63#define MAY_ACCESS 16
64#define MAY_OPEN 32 64#define MAY_OPEN 32
65 65
66#define FMODE_READ 1 66/* file is open for reading */
67#define FMODE_WRITE 2 67#define FMODE_READ ((__force fmode_t)1)
68/* file is open for writing */
69#define FMODE_WRITE ((__force fmode_t)2)
70/* file is seekable */
71#define FMODE_LSEEK ((__force fmode_t)4)
72/* file can be accessed using pread/pwrite */
73#define FMODE_PREAD ((__force fmode_t)8)
74#define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */
75/* File is opened for execution with sys_execve / sys_uselib */
76#define FMODE_EXEC ((__force fmode_t)16)
77/* File is opened with O_NDELAY (only set for block devices) */
78#define FMODE_NDELAY ((__force fmode_t)32)
79/* File is opened with O_EXCL (only set for block devices) */
80#define FMODE_EXCL ((__force fmode_t)64)
81/* File is opened using open(.., 3, ..) and is writeable only for ioctls
82 (specialy hack for floppy.c) */
83#define FMODE_WRITE_IOCTL ((__force fmode_t)128)
68 84
69/* Internal kernel extensions */ 85/*
70#define FMODE_LSEEK 4 86 * Don't update ctime and mtime.
71#define FMODE_PREAD 8 87 *
72#define FMODE_PWRITE FMODE_PREAD /* These go hand in hand */ 88 * Currently a special hack for the XFS open_by_handle ioctl, but we'll
73 89 * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon.
74/* File is being opened for execution. Primary users of this flag are 90 */
75 distributed filesystems that can use it to achieve correct ETXTBUSY 91#define FMODE_NOCMTIME ((__force fmode_t)2048)
76 behavior for cross-node execution/opening_for_writing of files */
77#define FMODE_EXEC 16
78 92
79#define RW_MASK 1 93#define RW_MASK 1
80#define RWA_MASK 2 94#define RWA_MASK 2
@@ -136,7 +150,7 @@ extern int dir_notify_enable;
136/* 150/*
137 * Superblock flags that can be altered by MS_REMOUNT 151 * Superblock flags that can be altered by MS_REMOUNT
138 */ 152 */
139#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK) 153#define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION)
140 154
141/* 155/*
142 * Old magic mount flag and mask 156 * Old magic mount flag and mask
@@ -310,6 +324,7 @@ struct poll_table_struct;
310struct kstatfs; 324struct kstatfs;
311struct vm_area_struct; 325struct vm_area_struct;
312struct vfsmount; 326struct vfsmount;
327struct cred;
313 328
314extern void __init inode_init(void); 329extern void __init inode_init(void);
315extern void __init inode_init_early(void); 330extern void __init inode_init_early(void);
@@ -484,13 +499,6 @@ struct address_space_operations {
484 int (*readpages)(struct file *filp, struct address_space *mapping, 499 int (*readpages)(struct file *filp, struct address_space *mapping,
485 struct list_head *pages, unsigned nr_pages); 500 struct list_head *pages, unsigned nr_pages);
486 501
487 /*
488 * ext3 requires that a successful prepare_write() call be followed
489 * by a commit_write() call - they must be balanced
490 */
491 int (*prepare_write)(struct file *, struct page *, unsigned, unsigned);
492 int (*commit_write)(struct file *, struct page *, unsigned, unsigned);
493
494 int (*write_begin)(struct file *, struct address_space *mapping, 502 int (*write_begin)(struct file *, struct address_space *mapping,
495 loff_t pos, unsigned len, unsigned flags, 503 loff_t pos, unsigned len, unsigned flags,
496 struct page **pagep, void **fsdata); 504 struct page **pagep, void **fsdata);
@@ -825,10 +833,10 @@ struct file {
825 const struct file_operations *f_op; 833 const struct file_operations *f_op;
826 atomic_long_t f_count; 834 atomic_long_t f_count;
827 unsigned int f_flags; 835 unsigned int f_flags;
828 mode_t f_mode; 836 fmode_t f_mode;
829 loff_t f_pos; 837 loff_t f_pos;
830 struct fown_struct f_owner; 838 struct fown_struct f_owner;
831 unsigned int f_uid, f_gid; 839 const struct cred *f_cred;
832 struct file_ra_state f_ra; 840 struct file_ra_state f_ra;
833 841
834 u64 f_version; 842 u64 f_version;
@@ -1037,7 +1045,6 @@ extern int vfs_setlease(struct file *, long, struct file_lock **);
1037extern int lease_modify(struct file_lock **, int); 1045extern int lease_modify(struct file_lock **, int);
1038extern int lock_may_read(struct inode *, loff_t start, unsigned long count); 1046extern int lock_may_read(struct inode *, loff_t start, unsigned long count);
1039extern int lock_may_write(struct inode *, loff_t start, unsigned long count); 1047extern int lock_may_write(struct inode *, loff_t start, unsigned long count);
1040extern struct seq_operations locks_seq_operations;
1041#else /* !CONFIG_FILE_LOCKING */ 1048#else /* !CONFIG_FILE_LOCKING */
1042#define fcntl_getlk(a, b) ({ -EINVAL; }) 1049#define fcntl_getlk(a, b) ({ -EINVAL; })
1043#define fcntl_setlk(a, b, c, d) ({ -EACCES; }) 1050#define fcntl_setlk(a, b, c, d) ({ -EACCES; })
@@ -1152,6 +1159,7 @@ struct super_block {
1152 char s_id[32]; /* Informational name */ 1159 char s_id[32]; /* Informational name */
1153 1160
1154 void *s_fs_info; /* Filesystem private info */ 1161 void *s_fs_info; /* Filesystem private info */
1162 fmode_t s_mode;
1155 1163
1156 /* 1164 /*
1157 * The next field is for VFS *only*. No filesystems have any business 1165 * The next field is for VFS *only*. No filesystems have any business
@@ -1195,7 +1203,7 @@ enum {
1195#define has_fs_excl() atomic_read(&current->fs_excl) 1203#define has_fs_excl() atomic_read(&current->fs_excl)
1196 1204
1197#define is_owner_or_cap(inode) \ 1205#define is_owner_or_cap(inode) \
1198 ((current->fsuid == (inode)->i_uid) || capable(CAP_FOWNER)) 1206 ((current_fsuid() == (inode)->i_uid) || capable(CAP_FOWNER))
1199 1207
1200/* not quite ready to be deprecated, but... */ 1208/* not quite ready to be deprecated, but... */
1201extern void lock_super(struct super_block *); 1209extern void lock_super(struct super_block *);
@@ -1266,20 +1274,7 @@ int generic_osync_inode(struct inode *, struct address_space *, int);
1266 * to have different dirent layouts depending on the binary type. 1274 * to have different dirent layouts depending on the binary type.
1267 */ 1275 */
1268typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); 1276typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned);
1269 1277struct block_device_operations;
1270struct block_device_operations {
1271 int (*open) (struct inode *, struct file *);
1272 int (*release) (struct inode *, struct file *);
1273 int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
1274 long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
1275 long (*compat_ioctl) (struct file *, unsigned, unsigned long);
1276 int (*direct_access) (struct block_device *, sector_t,
1277 void **, unsigned long *);
1278 int (*media_changed) (struct gendisk *);
1279 int (*revalidate_disk) (struct gendisk *);
1280 int (*getgeo)(struct block_device *, struct hd_geometry *);
1281 struct module *owner;
1282};
1283 1278
1284/* These macros are for out of kernel modules to test that 1279/* These macros are for out of kernel modules to test that
1285 * the kernel supports the unlocked_ioctl and compat_ioctl 1280 * the kernel supports the unlocked_ioctl and compat_ioctl
@@ -1593,7 +1588,6 @@ extern int get_sb_pseudo(struct file_system_type *, char *,
1593 struct vfsmount *mnt); 1588 struct vfsmount *mnt);
1594extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); 1589extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
1595int __put_super_and_need_restart(struct super_block *sb); 1590int __put_super_and_need_restart(struct super_block *sb);
1596void unnamed_dev_init(void);
1597 1591
1598/* Alas, no aliases. Too much hassle with bringing module.h everywhere */ 1592/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
1599#define fops_get(fops) \ 1593#define fops_get(fops) \
@@ -1689,7 +1683,8 @@ extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
1689extern long do_sys_open(int dfd, const char __user *filename, int flags, 1683extern long do_sys_open(int dfd, const char __user *filename, int flags,
1690 int mode); 1684 int mode);
1691extern struct file *filp_open(const char *, int, int); 1685extern struct file *filp_open(const char *, int, int);
1692extern struct file * dentry_open(struct dentry *, struct vfsmount *, int); 1686extern struct file * dentry_open(struct dentry *, struct vfsmount *, int,
1687 const struct cred *);
1693extern int filp_close(struct file *, fl_owner_t id); 1688extern int filp_close(struct file *, fl_owner_t id);
1694extern char * getname(const char __user *); 1689extern char * getname(const char __user *);
1695 1690
@@ -1714,7 +1709,7 @@ extern struct block_device *bdget(dev_t);
1714extern void bd_set_size(struct block_device *, loff_t size); 1709extern void bd_set_size(struct block_device *, loff_t size);
1715extern void bd_forget(struct inode *inode); 1710extern void bd_forget(struct inode *inode);
1716extern void bdput(struct block_device *); 1711extern void bdput(struct block_device *);
1717extern struct block_device *open_by_devnum(dev_t, unsigned); 1712extern struct block_device *open_by_devnum(dev_t, fmode_t);
1718#else 1713#else
1719static inline void bd_forget(struct inode *inode) {} 1714static inline void bd_forget(struct inode *inode) {}
1720#endif 1715#endif
@@ -1724,13 +1719,10 @@ extern const struct file_operations bad_sock_fops;
1724extern const struct file_operations def_fifo_fops; 1719extern const struct file_operations def_fifo_fops;
1725#ifdef CONFIG_BLOCK 1720#ifdef CONFIG_BLOCK
1726extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); 1721extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
1727extern int blkdev_ioctl(struct inode *, struct file *, unsigned, unsigned long); 1722extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
1728extern int blkdev_driver_ioctl(struct inode *inode, struct file *file,
1729 struct gendisk *disk, unsigned cmd,
1730 unsigned long arg);
1731extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); 1723extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
1732extern int blkdev_get(struct block_device *, mode_t, unsigned); 1724extern int blkdev_get(struct block_device *, fmode_t);
1733extern int blkdev_put(struct block_device *); 1725extern int blkdev_put(struct block_device *, fmode_t);
1734extern int bd_claim(struct block_device *, void *); 1726extern int bd_claim(struct block_device *, void *);
1735extern void bd_release(struct block_device *); 1727extern void bd_release(struct block_device *);
1736#ifdef CONFIG_SYSFS 1728#ifdef CONFIG_SYSFS
@@ -1761,9 +1753,10 @@ extern void chrdev_show(struct seq_file *,off_t);
1761extern const char *__bdevname(dev_t, char *buffer); 1753extern const char *__bdevname(dev_t, char *buffer);
1762extern const char *bdevname(struct block_device *bdev, char *buffer); 1754extern const char *bdevname(struct block_device *bdev, char *buffer);
1763extern struct block_device *lookup_bdev(const char *); 1755extern struct block_device *lookup_bdev(const char *);
1764extern struct block_device *open_bdev_excl(const char *, int, void *); 1756extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *);
1765extern void close_bdev_excl(struct block_device *); 1757extern void close_bdev_exclusive(struct block_device *, fmode_t);
1766extern void blkdev_show(struct seq_file *,off_t); 1758extern void blkdev_show(struct seq_file *,off_t);
1759
1767#else 1760#else
1768#define BLKDEV_MAJOR_HASH_SIZE 0 1761#define BLKDEV_MAJOR_HASH_SIZE 0
1769#endif 1762#endif
@@ -1852,6 +1845,11 @@ extern int inode_permission(struct inode *, int);
1852extern int generic_permission(struct inode *, int, 1845extern int generic_permission(struct inode *, int,
1853 int (*check_acl)(struct inode *, int)); 1846 int (*check_acl)(struct inode *, int));
1854 1847
1848static inline bool execute_ok(struct inode *inode)
1849{
1850 return (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode);
1851}
1852
1855extern int get_write_access(struct inode *); 1853extern int get_write_access(struct inode *);
1856extern int deny_write_access(struct file *); 1854extern int deny_write_access(struct file *);
1857static inline void put_write_access(struct inode * inode) 1855static inline void put_write_access(struct inode * inode)
@@ -1887,7 +1885,9 @@ extern loff_t default_llseek(struct file *file, loff_t offset, int origin);
1887 1885
1888extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); 1886extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
1889 1887
1888extern struct inode * inode_init_always(struct super_block *, struct inode *);
1890extern void inode_init_once(struct inode *); 1889extern void inode_init_once(struct inode *);
1890extern void inode_add_to_lists(struct super_block *, struct inode *);
1891extern void iput(struct inode *); 1891extern void iput(struct inode *);
1892extern struct inode * igrab(struct inode *); 1892extern struct inode * igrab(struct inode *);
1893extern ino_t iunique(struct super_block *, ino_t); 1893extern ino_t iunique(struct super_block *, ino_t);
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 4e625e0094c8..d9051d717d27 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -47,11 +47,7 @@
47struct gianfar_platform_data { 47struct gianfar_platform_data {
48 /* device specific information */ 48 /* device specific information */
49 u32 device_flags; 49 u32 device_flags;
50 /* board specific information */ 50 char bus_id[BUS_ID_SIZE];
51 u32 board_flags;
52 char bus_id[MII_BUS_ID_SIZE];
53 u32 phy_id;
54 u8 mac_addr[6];
55 phy_interface_t interface; 51 phy_interface_t interface;
56}; 52};
57 53
@@ -60,17 +56,6 @@ struct gianfar_mdio_data {
60 int irq[32]; 56 int irq[32];
61}; 57};
62 58
63/* Flags related to gianfar device features */
64#define FSL_GIANFAR_DEV_HAS_GIGABIT 0x00000001
65#define FSL_GIANFAR_DEV_HAS_COALESCE 0x00000002
66#define FSL_GIANFAR_DEV_HAS_RMON 0x00000004
67#define FSL_GIANFAR_DEV_HAS_MULTI_INTR 0x00000008
68#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
69#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
70#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
71#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
72#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
73
74/* Flags in gianfar_platform_data */ 59/* Flags in gianfar_platform_data */
75#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */ 60#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */
76#define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */ 61#define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index a89513188ce7..00fbd5b245c9 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -188,7 +188,7 @@ static inline void fsnotify_close(struct file *file)
188 struct dentry *dentry = file->f_path.dentry; 188 struct dentry *dentry = file->f_path.dentry;
189 struct inode *inode = dentry->d_inode; 189 struct inode *inode = dentry->d_inode;
190 const char *name = dentry->d_name.name; 190 const char *name = dentry->d_name.name;
191 mode_t mode = file->f_mode; 191 fmode_t mode = file->f_mode;
192 u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE; 192 u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE;
193 193
194 if (S_ISDIR(inode->i_mode)) 194 if (S_ISDIR(inode->i_mode))
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index bb384068272e..677432b9cb7e 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,10 +1,17 @@
1#ifndef _LINUX_FTRACE_H 1#ifndef _LINUX_FTRACE_H
2#define _LINUX_FTRACE_H 2#define _LINUX_FTRACE_H
3 3
4#ifdef CONFIG_FTRACE
5
6#include <linux/linkage.h> 4#include <linux/linkage.h>
7#include <linux/fs.h> 5#include <linux/fs.h>
6#include <linux/ktime.h>
7#include <linux/init.h>
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/kallsyms.h>
11#include <linux/bitops.h>
12#include <linux/sched.h>
13
14#ifdef CONFIG_FUNCTION_TRACER
8 15
9extern int ftrace_enabled; 16extern int ftrace_enabled;
10extern int 17extern int
@@ -19,6 +26,45 @@ struct ftrace_ops {
19 struct ftrace_ops *next; 26 struct ftrace_ops *next;
20}; 27};
21 28
29extern int function_trace_stop;
30
31/*
32 * Type of the current tracing.
33 */
34enum ftrace_tracing_type_t {
35 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
36 FTRACE_TYPE_RETURN, /* Hook the return of the function */
37};
38
39/* Current tracing type, default is FTRACE_TYPE_ENTER */
40extern enum ftrace_tracing_type_t ftrace_tracing_type;
41
42/**
43 * ftrace_stop - stop function tracer.
44 *
45 * A quick way to stop the function tracer. Note this an on off switch,
46 * it is not something that is recursive like preempt_disable.
47 * This does not disable the calling of mcount, it only stops the
48 * calling of functions from mcount.
49 */
50static inline void ftrace_stop(void)
51{
52 function_trace_stop = 1;
53}
54
55/**
56 * ftrace_start - start the function tracer.
57 *
58 * This function is the inverse of ftrace_stop. This does not enable
59 * the function tracing if the function tracer is disabled. This only
60 * sets the function tracer flag to continue calling the functions
61 * from mcount.
62 */
63static inline void ftrace_start(void)
64{
65 function_trace_stop = 0;
66}
67
22/* 68/*
23 * The ftrace_ops must be a static and should also 69 * The ftrace_ops must be a static and should also
24 * be read_mostly. These functions do modify read_mostly variables 70 * be read_mostly. These functions do modify read_mostly variables
@@ -32,15 +78,26 @@ void clear_ftrace_function(void);
32 78
33extern void ftrace_stub(unsigned long a0, unsigned long a1); 79extern void ftrace_stub(unsigned long a0, unsigned long a1);
34 80
35#else /* !CONFIG_FTRACE */ 81#else /* !CONFIG_FUNCTION_TRACER */
36# define register_ftrace_function(ops) do { } while (0) 82# define register_ftrace_function(ops) do { } while (0)
37# define unregister_ftrace_function(ops) do { } while (0) 83# define unregister_ftrace_function(ops) do { } while (0)
38# define clear_ftrace_function(ops) do { } while (0) 84# define clear_ftrace_function(ops) do { } while (0)
39#endif /* CONFIG_FTRACE */ 85static inline void ftrace_kill(void) { }
86static inline void ftrace_stop(void) { }
87static inline void ftrace_start(void) { }
88#endif /* CONFIG_FUNCTION_TRACER */
89
90#ifdef CONFIG_STACK_TRACER
91extern int stack_tracer_enabled;
92int
93stack_trace_sysctl(struct ctl_table *table, int write,
94 struct file *file, void __user *buffer, size_t *lenp,
95 loff_t *ppos);
96#endif
40 97
41#ifdef CONFIG_DYNAMIC_FTRACE 98#ifdef CONFIG_DYNAMIC_FTRACE
42# define FTRACE_HASHBITS 10 99/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */
43# define FTRACE_HASHSIZE (1<<FTRACE_HASHBITS) 100#include <asm/ftrace.h>
44 101
45enum { 102enum {
46 FTRACE_FL_FREE = (1 << 0), 103 FTRACE_FL_FREE = (1 << 0),
@@ -53,9 +110,10 @@ enum {
53}; 110};
54 111
55struct dyn_ftrace { 112struct dyn_ftrace {
56 struct hlist_node node; 113 struct list_head list;
57 unsigned long ip; /* address of mcount call-site */ 114 unsigned long ip; /* address of mcount call-site */
58 unsigned long flags; 115 unsigned long flags;
116 struct dyn_arch_ftrace arch;
59}; 117};
60 118
61int ftrace_force_update(void); 119int ftrace_force_update(void);
@@ -63,47 +121,103 @@ void ftrace_set_filter(unsigned char *buf, int len, int reset);
63 121
64/* defined in arch */ 122/* defined in arch */
65extern int ftrace_ip_converted(unsigned long ip); 123extern int ftrace_ip_converted(unsigned long ip);
66extern unsigned char *ftrace_nop_replace(void);
67extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
68extern int ftrace_dyn_arch_init(void *data); 124extern int ftrace_dyn_arch_init(void *data);
69extern int ftrace_mcount_set(unsigned long *data);
70extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
71 unsigned char *new_code);
72extern int ftrace_update_ftrace_func(ftrace_func_t func); 125extern int ftrace_update_ftrace_func(ftrace_func_t func);
73extern void ftrace_caller(void); 126extern void ftrace_caller(void);
74extern void ftrace_call(void); 127extern void ftrace_call(void);
75extern void mcount_call(void); 128extern void mcount_call(void);
129#ifdef CONFIG_FUNCTION_GRAPH_TRACER
130extern void ftrace_graph_caller(void);
131extern int ftrace_enable_ftrace_graph_caller(void);
132extern int ftrace_disable_ftrace_graph_caller(void);
133#else
134static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
135static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
136#endif
137
138/**
139 * ftrace_make_nop - convert code into top
140 * @mod: module structure if called by module load initialization
141 * @rec: the mcount call site record
142 * @addr: the address that the call site should be calling
143 *
144 * This is a very sensitive operation and great care needs
145 * to be taken by the arch. The operation should carefully
146 * read the location, check to see if what is read is indeed
147 * what we expect it to be, and then on success of the compare,
148 * it should write to the location.
149 *
150 * The code segment at @rec->ip should be a caller to @addr
151 *
152 * Return must be:
153 * 0 on success
154 * -EFAULT on error reading the location
155 * -EINVAL on a failed compare of the contents
156 * -EPERM on error writing to the location
157 * Any other value will be considered a failure.
158 */
159extern int ftrace_make_nop(struct module *mod,
160 struct dyn_ftrace *rec, unsigned long addr);
161
162/**
163 * ftrace_make_call - convert a nop call site into a call to addr
164 * @rec: the mcount call site record
165 * @addr: the address that the call site should call
166 *
167 * This is a very sensitive operation and great care needs
168 * to be taken by the arch. The operation should carefully
169 * read the location, check to see if what is read is indeed
170 * what we expect it to be, and then on success of the compare,
171 * it should write to the location.
172 *
173 * The code segment at @rec->ip should be a nop
174 *
175 * Return must be:
176 * 0 on success
177 * -EFAULT on error reading the location
178 * -EINVAL on a failed compare of the contents
179 * -EPERM on error writing to the location
180 * Any other value will be considered a failure.
181 */
182extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
183
184
185/* May be defined in arch */
186extern int ftrace_arch_read_dyn_info(char *buf, int size);
76 187
77extern int skip_trace(unsigned long ip); 188extern int skip_trace(unsigned long ip);
78 189
79void ftrace_disable_daemon(void); 190extern void ftrace_release(void *start, unsigned long size);
80void ftrace_enable_daemon(void);
81 191
192extern void ftrace_disable_daemon(void);
193extern void ftrace_enable_daemon(void);
82#else 194#else
83# define skip_trace(ip) ({ 0; }) 195# define skip_trace(ip) ({ 0; })
84# define ftrace_force_update() ({ 0; }) 196# define ftrace_force_update() ({ 0; })
85# define ftrace_set_filter(buf, len, reset) do { } while (0) 197# define ftrace_set_filter(buf, len, reset) do { } while (0)
86# define ftrace_disable_daemon() do { } while (0) 198# define ftrace_disable_daemon() do { } while (0)
87# define ftrace_enable_daemon() do { } while (0) 199# define ftrace_enable_daemon() do { } while (0)
200static inline void ftrace_release(void *start, unsigned long size) { }
88#endif /* CONFIG_DYNAMIC_FTRACE */ 201#endif /* CONFIG_DYNAMIC_FTRACE */
89 202
90/* totally disable ftrace - can not re-enable after this */ 203/* totally disable ftrace - can not re-enable after this */
91void ftrace_kill(void); 204void ftrace_kill(void);
92void ftrace_kill_atomic(void);
93 205
94static inline void tracer_disable(void) 206static inline void tracer_disable(void)
95{ 207{
96#ifdef CONFIG_FTRACE 208#ifdef CONFIG_FUNCTION_TRACER
97 ftrace_enabled = 0; 209 ftrace_enabled = 0;
98#endif 210#endif
99} 211}
100 212
101/* Ftrace disable/restore without lock. Some synchronization mechanism 213/*
214 * Ftrace disable/restore without lock. Some synchronization mechanism
102 * must be used to prevent ftrace_enabled to be changed between 215 * must be used to prevent ftrace_enabled to be changed between
103 * disable/restore. */ 216 * disable/restore.
217 */
104static inline int __ftrace_enabled_save(void) 218static inline int __ftrace_enabled_save(void)
105{ 219{
106#ifdef CONFIG_FTRACE 220#ifdef CONFIG_FUNCTION_TRACER
107 int saved_ftrace_enabled = ftrace_enabled; 221 int saved_ftrace_enabled = ftrace_enabled;
108 ftrace_enabled = 0; 222 ftrace_enabled = 0;
109 return saved_ftrace_enabled; 223 return saved_ftrace_enabled;
@@ -114,7 +228,7 @@ static inline int __ftrace_enabled_save(void)
114 228
115static inline void __ftrace_enabled_restore(int enabled) 229static inline void __ftrace_enabled_restore(int enabled)
116{ 230{
117#ifdef CONFIG_FTRACE 231#ifdef CONFIG_FUNCTION_TRACER
118 ftrace_enabled = enabled; 232 ftrace_enabled = enabled;
119#endif 233#endif
120} 234}
@@ -155,11 +269,227 @@ static inline void __ftrace_enabled_restore(int enabled)
155#endif 269#endif
156 270
157#ifdef CONFIG_TRACING 271#ifdef CONFIG_TRACING
272extern int ftrace_dump_on_oops;
273
274extern void tracing_start(void);
275extern void tracing_stop(void);
276extern void ftrace_off_permanent(void);
277
158extern void 278extern void
159ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); 279ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
280
281/**
282 * ftrace_printk - printf formatting in the ftrace buffer
283 * @fmt: the printf format for printing
284 *
285 * Note: __ftrace_printk is an internal function for ftrace_printk and
286 * the @ip is passed in via the ftrace_printk macro.
287 *
288 * This function allows a kernel developer to debug fast path sections
289 * that printk is not appropriate for. By scattering in various
290 * printk like tracing in the code, a developer can quickly see
291 * where problems are occurring.
292 *
293 * This is intended as a debugging tool for the developer only.
294 * Please refrain from leaving ftrace_printks scattered around in
295 * your code.
296 */
297# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
298extern int
299__ftrace_printk(unsigned long ip, const char *fmt, ...)
300 __attribute__ ((format (printf, 2, 3)));
301extern void ftrace_dump(void);
160#else 302#else
161static inline void 303static inline void
162ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } 304ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
305static inline int
306ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
307
308static inline void tracing_start(void) { }
309static inline void tracing_stop(void) { }
310static inline void ftrace_off_permanent(void) { }
311static inline int
312ftrace_printk(const char *fmt, ...)
313{
314 return 0;
315}
316static inline void ftrace_dump(void) { }
317#endif
318
319#ifdef CONFIG_FTRACE_MCOUNT_RECORD
320extern void ftrace_init(void);
321extern void ftrace_init_module(struct module *mod,
322 unsigned long *start, unsigned long *end);
323#else
324static inline void ftrace_init(void) { }
325static inline void
326ftrace_init_module(struct module *mod,
327 unsigned long *start, unsigned long *end) { }
328#endif
329
330enum {
331 POWER_NONE = 0,
332 POWER_CSTATE = 1,
333 POWER_PSTATE = 2,
334};
335
336struct power_trace {
337#ifdef CONFIG_POWER_TRACER
338 ktime_t stamp;
339 ktime_t end;
340 int type;
341 int state;
342#endif
343};
344
345#ifdef CONFIG_POWER_TRACER
346extern void trace_power_start(struct power_trace *it, unsigned int type,
347 unsigned int state);
348extern void trace_power_mark(struct power_trace *it, unsigned int type,
349 unsigned int state);
350extern void trace_power_end(struct power_trace *it);
351#else
352static inline void trace_power_start(struct power_trace *it, unsigned int type,
353 unsigned int state) { }
354static inline void trace_power_mark(struct power_trace *it, unsigned int type,
355 unsigned int state) { }
356static inline void trace_power_end(struct power_trace *it) { }
357#endif
358
359
360/*
361 * Structure that defines an entry function trace.
362 */
363struct ftrace_graph_ent {
364 unsigned long func; /* Current function */
365 int depth;
366};
367
368/*
369 * Structure that defines a return function trace.
370 */
371struct ftrace_graph_ret {
372 unsigned long func; /* Current function */
373 unsigned long long calltime;
374 unsigned long long rettime;
375 /* Number of functions that overran the depth limit for current task */
376 unsigned long overrun;
377 int depth;
378};
379
380#ifdef CONFIG_FUNCTION_GRAPH_TRACER
381
382/*
383 * Sometimes we don't want to trace a function with the function
384 * graph tracer but we want them to keep traced by the usual function
385 * tracer if the function graph tracer is not configured.
386 */
387#define __notrace_funcgraph notrace
388
389/*
390 * We want to which function is an entrypoint of a hardirq.
391 * That will help us to put a signal on output.
392 */
393#define __irq_entry __attribute__((__section__(".irqentry.text")))
394
395/* Limits of hardirq entrypoints */
396extern char __irqentry_text_start[];
397extern char __irqentry_text_end[];
398
399#define FTRACE_RETFUNC_DEPTH 50
400#define FTRACE_RETSTACK_ALLOC_SIZE 32
401/* Type of the callback handlers for tracing function graph*/
402typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
403typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
404
405extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
406 trace_func_graph_ent_t entryfunc);
407
408extern void ftrace_graph_stop(void);
409
410/* The current handlers in use */
411extern trace_func_graph_ret_t ftrace_graph_return;
412extern trace_func_graph_ent_t ftrace_graph_entry;
413
414extern void unregister_ftrace_graph(void);
415
416extern void ftrace_graph_init_task(struct task_struct *t);
417extern void ftrace_graph_exit_task(struct task_struct *t);
418
419static inline int task_curr_ret_stack(struct task_struct *t)
420{
421 return t->curr_ret_stack;
422}
423
424static inline void pause_graph_tracing(void)
425{
426 atomic_inc(&current->tracing_graph_pause);
427}
428
429static inline void unpause_graph_tracing(void)
430{
431 atomic_dec(&current->tracing_graph_pause);
432}
433#else
434
435#define __notrace_funcgraph
436#define __irq_entry
437
438static inline void ftrace_graph_init_task(struct task_struct *t) { }
439static inline void ftrace_graph_exit_task(struct task_struct *t) { }
440
441static inline int task_curr_ret_stack(struct task_struct *tsk)
442{
443 return -1;
444}
445
446static inline void pause_graph_tracing(void) { }
447static inline void unpause_graph_tracing(void) { }
163#endif 448#endif
164 449
450#ifdef CONFIG_TRACING
451#include <linux/sched.h>
452
453/* flags for current->trace */
454enum {
455 TSK_TRACE_FL_TRACE_BIT = 0,
456 TSK_TRACE_FL_GRAPH_BIT = 1,
457};
458enum {
459 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
460 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
461};
462
463static inline void set_tsk_trace_trace(struct task_struct *tsk)
464{
465 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
466}
467
468static inline void clear_tsk_trace_trace(struct task_struct *tsk)
469{
470 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
471}
472
473static inline int test_tsk_trace_trace(struct task_struct *tsk)
474{
475 return tsk->trace & TSK_TRACE_FL_TRACE;
476}
477
478static inline void set_tsk_trace_graph(struct task_struct *tsk)
479{
480 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
481}
482
483static inline void clear_tsk_trace_graph(struct task_struct *tsk)
484{
485 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
486}
487
488static inline int test_tsk_trace_graph(struct task_struct *tsk)
489{
490 return tsk->trace & TSK_TRACE_FL_GRAPH;
491}
492
493#endif /* CONFIG_TRACING */
494
165#endif /* _LINUX_FTRACE_H */ 495#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
new file mode 100644
index 000000000000..366a054d0b05
--- /dev/null
+++ b/include/linux/ftrace_irq.h
@@ -0,0 +1,13 @@
1#ifndef _LINUX_FTRACE_IRQ_H
2#define _LINUX_FTRACE_IRQ_H
3
4
5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void);
8#else
9static inline void ftrace_nmi_enter(void) { }
10static inline void ftrace_nmi_exit(void) { }
11#endif
12
13#endif /* _LINUX_FTRACE_IRQ_H */
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 265635dc9908..350fe9767bbc 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -17,8 +17,14 @@
17 * - add lock_owner field to fuse_setattr_in, fuse_read_in and fuse_write_in 17 * - add lock_owner field to fuse_setattr_in, fuse_read_in and fuse_write_in
18 * - add blksize field to fuse_attr 18 * - add blksize field to fuse_attr
19 * - add file flags field to fuse_read_in and fuse_write_in 19 * - add file flags field to fuse_read_in and fuse_write_in
20 *
21 * 7.10
22 * - add nonseekable open flag
20 */ 23 */
21 24
25#ifndef _LINUX_FUSE_H
26#define _LINUX_FUSE_H
27
22#include <asm/types.h> 28#include <asm/types.h>
23#include <linux/major.h> 29#include <linux/major.h>
24 30
@@ -26,7 +32,7 @@
26#define FUSE_KERNEL_VERSION 7 32#define FUSE_KERNEL_VERSION 7
27 33
28/** Minor version number of this interface */ 34/** Minor version number of this interface */
29#define FUSE_KERNEL_MINOR_VERSION 9 35#define FUSE_KERNEL_MINOR_VERSION 10
30 36
31/** The node ID of the root inode */ 37/** The node ID of the root inode */
32#define FUSE_ROOT_ID 1 38#define FUSE_ROOT_ID 1
@@ -98,9 +104,11 @@ struct fuse_file_lock {
98 * 104 *
99 * FOPEN_DIRECT_IO: bypass page cache for this open file 105 * FOPEN_DIRECT_IO: bypass page cache for this open file
100 * FOPEN_KEEP_CACHE: don't invalidate the data cache on open 106 * FOPEN_KEEP_CACHE: don't invalidate the data cache on open
107 * FOPEN_NONSEEKABLE: the file is not seekable
101 */ 108 */
102#define FOPEN_DIRECT_IO (1 << 0) 109#define FOPEN_DIRECT_IO (1 << 0)
103#define FOPEN_KEEP_CACHE (1 << 1) 110#define FOPEN_KEEP_CACHE (1 << 1)
111#define FOPEN_NONSEEKABLE (1 << 2)
104 112
105/** 113/**
106 * INIT request/reply flags 114 * INIT request/reply flags
@@ -409,3 +417,5 @@ struct fuse_dirent {
409#define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1)) 417#define FUSE_DIRENT_ALIGN(x) (((x) + sizeof(__u64) - 1) & ~(sizeof(__u64) - 1))
410#define FUSE_DIRENT_SIZE(d) \ 418#define FUSE_DIRENT_SIZE(d) \
411 FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen) 419 FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
420
421#endif /* _LINUX_FUSE_H */
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 586ab56a3ec3..3bf5bb5a34f9 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -25,7 +25,8 @@ union ktime;
25#define FUTEX_WAKE_BITSET 10 25#define FUTEX_WAKE_BITSET 10
26 26
27#define FUTEX_PRIVATE_FLAG 128 27#define FUTEX_PRIVATE_FLAG 128
28#define FUTEX_CMD_MASK ~FUTEX_PRIVATE_FLAG 28#define FUTEX_CLOCK_REALTIME 256
29#define FUTEX_CMD_MASK ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
29 30
30#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG) 31#define FUTEX_WAIT_PRIVATE (FUTEX_WAIT | FUTEX_PRIVATE_FLAG)
31#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG) 32#define FUTEX_WAKE_PRIVATE (FUTEX_WAKE | FUTEX_PRIVATE_FLAG)
@@ -164,6 +165,8 @@ union futex_key {
164 } both; 165 } both;
165}; 166};
166 167
168#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
169
167#ifdef CONFIG_FUTEX 170#ifdef CONFIG_FUTEX
168extern void exit_robust_list(struct task_struct *curr); 171extern void exit_robust_list(struct task_struct *curr);
169extern void exit_pi_state_list(struct task_struct *curr); 172extern void exit_pi_state_list(struct task_struct *curr);
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index f64e29c0ef3f..0cd825f7363a 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -146,10 +146,11 @@ static inline void gameport_unpin_driver(struct gameport *gameport)
146 mutex_unlock(&gameport->drv_mutex); 146 mutex_unlock(&gameport->drv_mutex);
147} 147}
148 148
149void __gameport_register_driver(struct gameport_driver *drv, struct module *owner); 149int __gameport_register_driver(struct gameport_driver *drv,
150static inline void gameport_register_driver(struct gameport_driver *drv) 150 struct module *owner, const char *mod_name);
151static inline int __must_check gameport_register_driver(struct gameport_driver *drv)
151{ 152{
152 __gameport_register_driver(drv, THIS_MODULE); 153 return __gameport_register_driver(drv, THIS_MODULE, KBUILD_MODNAME);
153} 154}
154 155
155void gameport_unregister_driver(struct gameport_driver *drv); 156void gameport_unregister_driver(struct gameport_driver *drv);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 206cdf96c3a7..16948eaecae3 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -25,9 +25,6 @@ extern struct device_type part_type;
25extern struct kobject *block_depr; 25extern struct kobject *block_depr;
26extern struct class block_class; 26extern struct class block_class;
27 27
28extern const struct seq_operations partitions_op;
29extern const struct seq_operations diskstats_op;
30
31enum { 28enum {
32/* These three have identical behaviour; use the second one if DOS FDISK gets 29/* These three have identical behaviour; use the second one if DOS FDISK gets
33 confused about extended/logical partitions starting past cylinder 1023. */ 30 confused about extended/logical partitions starting past cylinder 1023. */
@@ -129,6 +126,7 @@ struct blk_scsi_cmd_filter {
129struct disk_part_tbl { 126struct disk_part_tbl {
130 struct rcu_head rcu_head; 127 struct rcu_head rcu_head;
131 int len; 128 int len;
129 struct hd_struct *last_lookup;
132 struct hd_struct *part[]; 130 struct hd_struct *part[];
133}; 131};
134 132
@@ -525,7 +523,9 @@ extern char *disk_name (struct gendisk *hd, int partno, char *buf);
525 523
526extern int disk_expand_part_tbl(struct gendisk *disk, int target); 524extern int disk_expand_part_tbl(struct gendisk *disk, int target);
527extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); 525extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
528extern int __must_check add_partition(struct gendisk *, int, sector_t, sector_t, int); 526extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
527 int partno, sector_t start,
528 sector_t len, int flags);
529extern void delete_partition(struct gendisk *, int); 529extern void delete_partition(struct gendisk *, int);
530extern void printk_all_partitions(void); 530extern void printk_all_partitions(void);
531 531
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 730a20b83576..e10c49a5b96e 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -8,6 +8,7 @@
8 8
9#else 9#else
10 10
11#include <linux/kernel.h>
11#include <linux/types.h> 12#include <linux/types.h>
12#include <linux/errno.h> 13#include <linux/errno.h>
13 14
@@ -32,6 +33,8 @@ static inline int gpio_request(unsigned gpio, const char *label)
32 33
33static inline void gpio_free(unsigned gpio) 34static inline void gpio_free(unsigned gpio)
34{ 35{
36 might_sleep();
37
35 /* GPIO can never have been requested */ 38 /* GPIO can never have been requested */
36 WARN_ON(1); 39 WARN_ON(1);
37} 40}
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 181006cc94a0..f83288347dda 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -4,6 +4,7 @@
4#include <linux/preempt.h> 4#include <linux/preempt.h>
5#include <linux/smp_lock.h> 5#include <linux/smp_lock.h>
6#include <linux/lockdep.h> 6#include <linux/lockdep.h>
7#include <linux/ftrace_irq.h>
7#include <asm/hardirq.h> 8#include <asm/hardirq.h>
8#include <asm/system.h> 9#include <asm/system.h>
9 10
@@ -118,13 +119,17 @@ static inline void account_system_vtime(struct task_struct *tsk)
118} 119}
119#endif 120#endif
120 121
121#if defined(CONFIG_PREEMPT_RCU) && defined(CONFIG_NO_HZ) 122#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
122extern void rcu_irq_enter(void); 123extern void rcu_irq_enter(void);
123extern void rcu_irq_exit(void); 124extern void rcu_irq_exit(void);
125extern void rcu_nmi_enter(void);
126extern void rcu_nmi_exit(void);
124#else 127#else
125# define rcu_irq_enter() do { } while (0) 128# define rcu_irq_enter() do { } while (0)
126# define rcu_irq_exit() do { } while (0) 129# define rcu_irq_exit() do { } while (0)
127#endif /* CONFIG_PREEMPT_RCU */ 130# define rcu_nmi_enter() do { } while (0)
131# define rcu_nmi_exit() do { } while (0)
132#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
128 133
129/* 134/*
130 * It is safe to do non-atomic ops on ->hardirq_context, 135 * It is safe to do non-atomic ops on ->hardirq_context,
@@ -134,7 +139,6 @@ extern void rcu_irq_exit(void);
134 */ 139 */
135#define __irq_enter() \ 140#define __irq_enter() \
136 do { \ 141 do { \
137 rcu_irq_enter(); \
138 account_system_vtime(current); \ 142 account_system_vtime(current); \
139 add_preempt_count(HARDIRQ_OFFSET); \ 143 add_preempt_count(HARDIRQ_OFFSET); \
140 trace_hardirq_enter(); \ 144 trace_hardirq_enter(); \
@@ -153,7 +157,6 @@ extern void irq_enter(void);
153 trace_hardirq_exit(); \ 157 trace_hardirq_exit(); \
154 account_system_vtime(current); \ 158 account_system_vtime(current); \
155 sub_preempt_count(HARDIRQ_OFFSET); \ 159 sub_preempt_count(HARDIRQ_OFFSET); \
156 rcu_irq_exit(); \
157 } while (0) 160 } while (0)
158 161
159/* 162/*
@@ -161,7 +164,20 @@ extern void irq_enter(void);
161 */ 164 */
162extern void irq_exit(void); 165extern void irq_exit(void);
163 166
164#define nmi_enter() do { lockdep_off(); __irq_enter(); } while (0) 167#define nmi_enter() \
165#define nmi_exit() do { __irq_exit(); lockdep_on(); } while (0) 168 do { \
169 ftrace_nmi_enter(); \
170 lockdep_off(); \
171 rcu_nmi_enter(); \
172 __irq_enter(); \
173 } while (0)
174
175#define nmi_exit() \
176 do { \
177 __irq_exit(); \
178 rcu_nmi_exit(); \
179 lockdep_on(); \
180 ftrace_nmi_exit(); \
181 } while (0)
166 182
167#endif /* LINUX_HARDIRQ_H */ 183#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h
index c59769693bee..fd47a151665e 100644
--- a/include/linux/hdlc.h
+++ b/include/linux/hdlc.h
@@ -43,7 +43,7 @@ struct hdlc_proto {
43}; 43};
44 44
45 45
46/* Pointed to by dev->priv */ 46/* Pointed to by netdev_priv(dev) */
47typedef struct hdlc_device { 47typedef struct hdlc_device {
48 /* used by HDLC layer to take control over HDLC device from hw driver*/ 48 /* used by HDLC layer to take control over HDLC device from hw driver*/
49 int (*attach)(struct net_device *dev, 49 int (*attach)(struct net_device *dev,
@@ -80,7 +80,7 @@ struct net_device *alloc_hdlcdev(void *priv);
80 80
81static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev) 81static inline struct hdlc_device* dev_to_hdlc(struct net_device *dev)
82{ 82{
83 return dev->priv; 83 return netdev_priv(dev);
84} 84}
85 85
86static __inline__ void debug_frame(const struct sk_buff *skb) 86static __inline__ void debug_frame(const struct sk_buff *skb)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index f13bca2dd53b..e5780f8c934a 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -410,6 +410,7 @@ struct hid_output_fifo {
410#define HID_SUSPENDED 5 410#define HID_SUSPENDED 5
411#define HID_CLEAR_HALT 6 411#define HID_CLEAR_HALT 6
412#define HID_DISCONNECTED 7 412#define HID_DISCONNECTED 7
413#define HID_STARTED 8
413 414
414struct hid_input { 415struct hid_input {
415 struct list_head list; 416 struct list_head list;
@@ -417,6 +418,11 @@ struct hid_input {
417 struct input_dev *input; 418 struct input_dev *input;
418}; 419};
419 420
421enum hid_type {
422 HID_TYPE_OTHER = 0,
423 HID_TYPE_USBMOUSE
424};
425
420struct hid_driver; 426struct hid_driver;
421struct hid_ll_driver; 427struct hid_ll_driver;
422 428
@@ -431,6 +437,7 @@ struct hid_device { /* device report descriptor */
431 __u32 vendor; /* Vendor ID */ 437 __u32 vendor; /* Vendor ID */
432 __u32 product; /* Product ID */ 438 __u32 product; /* Product ID */
433 __u32 version; /* HID version */ 439 __u32 version; /* HID version */
440 enum hid_type type; /* device type (mouse, kbd, ...) */
434 unsigned country; /* HID country */ 441 unsigned country; /* HID country */
435 struct hid_report_enum report_enum[HID_REPORT_TYPES]; 442 struct hid_report_enum report_enum[HID_REPORT_TYPES];
436 443
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 7dcbc82f3b7b..13875ce9112a 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -63,12 +63,14 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx)
63#endif /* CONFIG_HIGHMEM */ 63#endif /* CONFIG_HIGHMEM */
64 64
65/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 65/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
66#ifndef clear_user_highpage
66static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 67static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
67{ 68{
68 void *addr = kmap_atomic(page, KM_USER0); 69 void *addr = kmap_atomic(page, KM_USER0);
69 clear_user_page(addr, vaddr, page); 70 clear_user_page(addr, vaddr, page);
70 kunmap_atomic(addr, KM_USER0); 71 kunmap_atomic(addr, KM_USER0);
71} 72}
73#endif
72 74
73#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE 75#ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
74/** 76/**
diff --git a/include/linux/hippidevice.h b/include/linux/hippidevice.h
index bab303dafd6e..f148e4908410 100644
--- a/include/linux/hippidevice.h
+++ b/include/linux/hippidevice.h
@@ -32,7 +32,9 @@ struct hippi_cb {
32}; 32};
33 33
34extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); 34extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
35 35extern int hippi_change_mtu(struct net_device *dev, int new_mtu);
36extern int hippi_mac_addr(struct net_device *dev, void *p);
37extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
36extern struct net_device *alloc_hippi_dev(int sizeof_priv); 38extern struct net_device *alloc_hippi_dev(int sizeof_priv);
37#endif 39#endif
38 40
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 2f245fe63bda..bd37078c2d7d 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -20,6 +20,8 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/wait.h> 22#include <linux/wait.h>
23#include <linux/percpu.h>
24
23 25
24struct hrtimer_clock_base; 26struct hrtimer_clock_base;
25struct hrtimer_cpu_base; 27struct hrtimer_cpu_base;
@@ -41,31 +43,6 @@ enum hrtimer_restart {
41}; 43};
42 44
43/* 45/*
44 * hrtimer callback modes:
45 *
46 * HRTIMER_CB_SOFTIRQ: Callback must run in softirq context
47 * HRTIMER_CB_IRQSAFE: Callback may run in hardirq context
48 * HRTIMER_CB_IRQSAFE_NO_RESTART: Callback may run in hardirq context and
49 * does not restart the timer
50 * HRTIMER_CB_IRQSAFE_PERCPU: Callback must run in hardirq context
51 * Special mode for tick emulation and
52 * scheduler timer. Such timers are per
53 * cpu and not allowed to be migrated on
54 * cpu unplug.
55 * HRTIMER_CB_IRQSAFE_UNLOCKED: Callback should run in hardirq context
56 * with timer->base lock unlocked
57 * used for timers which call wakeup to
58 * avoid lock order problems with rq->lock
59 */
60enum hrtimer_cb_mode {
61 HRTIMER_CB_SOFTIRQ,
62 HRTIMER_CB_IRQSAFE,
63 HRTIMER_CB_IRQSAFE_NO_RESTART,
64 HRTIMER_CB_IRQSAFE_PERCPU,
65 HRTIMER_CB_IRQSAFE_UNLOCKED,
66};
67
68/*
69 * Values to track state of the timer 46 * Values to track state of the timer
70 * 47 *
71 * Possible states: 48 * Possible states:
@@ -73,7 +50,6 @@ enum hrtimer_cb_mode {
73 * 0x00 inactive 50 * 0x00 inactive
74 * 0x01 enqueued into rbtree 51 * 0x01 enqueued into rbtree
75 * 0x02 callback function running 52 * 0x02 callback function running
76 * 0x04 callback pending (high resolution mode)
77 * 53 *
78 * Special cases: 54 * Special cases:
79 * 0x03 callback function running and enqueued 55 * 0x03 callback function running and enqueued
@@ -95,20 +71,22 @@ enum hrtimer_cb_mode {
95#define HRTIMER_STATE_INACTIVE 0x00 71#define HRTIMER_STATE_INACTIVE 0x00
96#define HRTIMER_STATE_ENQUEUED 0x01 72#define HRTIMER_STATE_ENQUEUED 0x01
97#define HRTIMER_STATE_CALLBACK 0x02 73#define HRTIMER_STATE_CALLBACK 0x02
98#define HRTIMER_STATE_PENDING 0x04 74#define HRTIMER_STATE_MIGRATE 0x04
99#define HRTIMER_STATE_MIGRATE 0x08
100 75
101/** 76/**
102 * struct hrtimer - the basic hrtimer structure 77 * struct hrtimer - the basic hrtimer structure
103 * @node: red black tree node for time ordered insertion 78 * @node: red black tree node for time ordered insertion
104 * @expires: the absolute expiry time in the hrtimers internal 79 * @_expires: the absolute expiry time in the hrtimers internal
105 * representation. The time is related to the clock on 80 * representation. The time is related to the clock on
106 * which the timer is based. 81 * which the timer is based. Is setup by adding
82 * slack to the _softexpires value. For non range timers
83 * identical to _softexpires.
84 * @_softexpires: the absolute earliest expiry time of the hrtimer.
85 * The time which was given as expiry time when the timer
86 * was armed.
107 * @function: timer expiry callback function 87 * @function: timer expiry callback function
108 * @base: pointer to the timer base (per cpu and per clock) 88 * @base: pointer to the timer base (per cpu and per clock)
109 * @state: state information (See bit values above) 89 * @state: state information (See bit values above)
110 * @cb_mode: high resolution timer feature to select the callback execution
111 * mode
112 * @cb_entry: list head to enqueue an expired timer into the callback list 90 * @cb_entry: list head to enqueue an expired timer into the callback list
113 * @start_site: timer statistics field to store the site where the timer 91 * @start_site: timer statistics field to store the site where the timer
114 * was started 92 * was started
@@ -121,16 +99,16 @@ enum hrtimer_cb_mode {
121 */ 99 */
122struct hrtimer { 100struct hrtimer {
123 struct rb_node node; 101 struct rb_node node;
124 ktime_t expires; 102 ktime_t _expires;
103 ktime_t _softexpires;
125 enum hrtimer_restart (*function)(struct hrtimer *); 104 enum hrtimer_restart (*function)(struct hrtimer *);
126 struct hrtimer_clock_base *base; 105 struct hrtimer_clock_base *base;
127 unsigned long state; 106 unsigned long state;
128 enum hrtimer_cb_mode cb_mode;
129 struct list_head cb_entry; 107 struct list_head cb_entry;
130#ifdef CONFIG_TIMER_STATS 108#ifdef CONFIG_TIMER_STATS
109 int start_pid;
131 void *start_site; 110 void *start_site;
132 char start_comm[16]; 111 char start_comm[16];
133 int start_pid;
134#endif 112#endif
135}; 113};
136 114
@@ -155,10 +133,8 @@ struct hrtimer_sleeper {
155 * @first: pointer to the timer node which expires first 133 * @first: pointer to the timer node which expires first
156 * @resolution: the resolution of the clock, in nanoseconds 134 * @resolution: the resolution of the clock, in nanoseconds
157 * @get_time: function to retrieve the current time of the clock 135 * @get_time: function to retrieve the current time of the clock
158 * @get_softirq_time: function to retrieve the current time from the softirq
159 * @softirq_time: the time when running the hrtimer queue in the softirq 136 * @softirq_time: the time when running the hrtimer queue in the softirq
160 * @offset: offset of this clock to the monotonic base 137 * @offset: offset of this clock to the monotonic base
161 * @reprogram: function to reprogram the timer event
162 */ 138 */
163struct hrtimer_clock_base { 139struct hrtimer_clock_base {
164 struct hrtimer_cpu_base *cpu_base; 140 struct hrtimer_cpu_base *cpu_base;
@@ -167,13 +143,9 @@ struct hrtimer_clock_base {
167 struct rb_node *first; 143 struct rb_node *first;
168 ktime_t resolution; 144 ktime_t resolution;
169 ktime_t (*get_time)(void); 145 ktime_t (*get_time)(void);
170 ktime_t (*get_softirq_time)(void);
171 ktime_t softirq_time; 146 ktime_t softirq_time;
172#ifdef CONFIG_HIGH_RES_TIMERS 147#ifdef CONFIG_HIGH_RES_TIMERS
173 ktime_t offset; 148 ktime_t offset;
174 int (*reprogram)(struct hrtimer *t,
175 struct hrtimer_clock_base *b,
176 ktime_t n);
177#endif 149#endif
178}; 150};
179 151
@@ -191,15 +163,11 @@ struct hrtimer_clock_base {
191 * @check_clocks: Indictator, when set evaluate time source and clock 163 * @check_clocks: Indictator, when set evaluate time source and clock
192 * event devices whether high resolution mode can be 164 * event devices whether high resolution mode can be
193 * activated. 165 * activated.
194 * @cb_pending: Expired timers are moved from the rbtree to this
195 * list in the timer interrupt. The list is processed
196 * in the softirq.
197 * @nr_events: Total number of timer interrupt events 166 * @nr_events: Total number of timer interrupt events
198 */ 167 */
199struct hrtimer_cpu_base { 168struct hrtimer_cpu_base {
200 spinlock_t lock; 169 spinlock_t lock;
201 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; 170 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
202 struct list_head cb_pending;
203#ifdef CONFIG_HIGH_RES_TIMERS 171#ifdef CONFIG_HIGH_RES_TIMERS
204 ktime_t expires_next; 172 ktime_t expires_next;
205 int hres_active; 173 int hres_active;
@@ -207,6 +175,71 @@ struct hrtimer_cpu_base {
207#endif 175#endif
208}; 176};
209 177
178static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
179{
180 timer->_expires = time;
181 timer->_softexpires = time;
182}
183
184static inline void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta)
185{
186 timer->_softexpires = time;
187 timer->_expires = ktime_add_safe(time, delta);
188}
189
190static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta)
191{
192 timer->_softexpires = time;
193 timer->_expires = ktime_add_safe(time, ns_to_ktime(delta));
194}
195
196static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
197{
198 timer->_expires.tv64 = tv64;
199 timer->_softexpires.tv64 = tv64;
200}
201
202static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
203{
204 timer->_expires = ktime_add_safe(timer->_expires, time);
205 timer->_softexpires = ktime_add_safe(timer->_softexpires, time);
206}
207
208static inline void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns)
209{
210 timer->_expires = ktime_add_ns(timer->_expires, ns);
211 timer->_softexpires = ktime_add_ns(timer->_softexpires, ns);
212}
213
214static inline ktime_t hrtimer_get_expires(const struct hrtimer *timer)
215{
216 return timer->_expires;
217}
218
219static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
220{
221 return timer->_softexpires;
222}
223
224static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
225{
226 return timer->_expires.tv64;
227}
228static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
229{
230 return timer->_softexpires.tv64;
231}
232
233static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
234{
235 return ktime_to_ns(timer->_expires);
236}
237
238static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
239{
240 return ktime_sub(timer->_expires, timer->base->get_time());
241}
242
210#ifdef CONFIG_HIGH_RES_TIMERS 243#ifdef CONFIG_HIGH_RES_TIMERS
211struct clock_event_device; 244struct clock_event_device;
212 245
@@ -227,6 +260,8 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
227 return timer->base->cpu_base->hres_active; 260 return timer->base->cpu_base->hres_active;
228} 261}
229 262
263extern void hrtimer_peek_ahead_timers(void);
264
230/* 265/*
231 * The resolution of the clocks. The resolution value is returned in 266 * The resolution of the clocks. The resolution value is returned in
232 * the clock_getres() system call to give application programmers an 267 * the clock_getres() system call to give application programmers an
@@ -249,6 +284,7 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
249 * is expired in the next softirq when the clock was advanced. 284 * is expired in the next softirq when the clock was advanced.
250 */ 285 */
251static inline void clock_was_set(void) { } 286static inline void clock_was_set(void) { }
287static inline void hrtimer_peek_ahead_timers(void) { }
252 288
253static inline void hres_timers_resume(void) { } 289static inline void hres_timers_resume(void) { }
254 290
@@ -270,6 +306,10 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
270extern ktime_t ktime_get(void); 306extern ktime_t ktime_get(void);
271extern ktime_t ktime_get_real(void); 307extern ktime_t ktime_get_real(void);
272 308
309
310DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
311
312
273/* Exported timer functions: */ 313/* Exported timer functions: */
274 314
275/* Initialize timers: */ 315/* Initialize timers: */
@@ -294,12 +334,25 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
294/* Basic timer operations: */ 334/* Basic timer operations: */
295extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, 335extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
296 const enum hrtimer_mode mode); 336 const enum hrtimer_mode mode);
337extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
338 unsigned long range_ns, const enum hrtimer_mode mode);
297extern int hrtimer_cancel(struct hrtimer *timer); 339extern int hrtimer_cancel(struct hrtimer *timer);
298extern int hrtimer_try_to_cancel(struct hrtimer *timer); 340extern int hrtimer_try_to_cancel(struct hrtimer *timer);
299 341
342static inline int hrtimer_start_expires(struct hrtimer *timer,
343 enum hrtimer_mode mode)
344{
345 unsigned long delta;
346 ktime_t soft, hard;
347 soft = hrtimer_get_softexpires(timer);
348 hard = hrtimer_get_expires(timer);
349 delta = ktime_to_ns(ktime_sub(hard, soft));
350 return hrtimer_start_range_ns(timer, soft, delta, mode);
351}
352
300static inline int hrtimer_restart(struct hrtimer *timer) 353static inline int hrtimer_restart(struct hrtimer *timer)
301{ 354{
302 return hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS); 355 return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
303} 356}
304 357
305/* Query timers: */ 358/* Query timers: */
@@ -322,8 +375,7 @@ static inline int hrtimer_active(const struct hrtimer *timer)
322 */ 375 */
323static inline int hrtimer_is_queued(struct hrtimer *timer) 376static inline int hrtimer_is_queued(struct hrtimer *timer)
324{ 377{
325 return timer->state & 378 return timer->state & HRTIMER_STATE_ENQUEUED;
326 (HRTIMER_STATE_ENQUEUED | HRTIMER_STATE_PENDING);
327} 379}
328 380
329/* 381/*
@@ -356,6 +408,10 @@ extern long hrtimer_nanosleep_restart(struct restart_block *restart_block);
356extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, 408extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl,
357 struct task_struct *tsk); 409 struct task_struct *tsk);
358 410
411extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
412 const enum hrtimer_mode mode);
413extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
414
359/* Soft interrupt function to run the hrtimer queues: */ 415/* Soft interrupt function to run the hrtimer queues: */
360extern void hrtimer_run_queues(void); 416extern void hrtimer_run_queues(void);
361extern void hrtimer_run_pending(void); 417extern void hrtimer_run_pending(void);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 32e0ef0f6e1f..e1c8afc002c0 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -27,7 +27,7 @@ void unmap_hugepage_range(struct vm_area_struct *,
27void __unmap_hugepage_range(struct vm_area_struct *, 27void __unmap_hugepage_range(struct vm_area_struct *,
28 unsigned long, unsigned long, struct page *); 28 unsigned long, unsigned long, struct page *);
29int hugetlb_prefault(struct address_space *, struct vm_area_struct *); 29int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
30int hugetlb_report_meminfo(char *); 30void hugetlb_report_meminfo(struct seq_file *);
31int hugetlb_report_node_meminfo(int, char *); 31int hugetlb_report_node_meminfo(int, char *);
32unsigned long hugetlb_total_pages(void); 32unsigned long hugetlb_total_pages(void);
33int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 33int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -79,7 +79,9 @@ static inline unsigned long hugetlb_total_pages(void)
79#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) 79#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
80#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) 80#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
81#define unmap_hugepage_range(vma, start, end, page) BUG() 81#define unmap_hugepage_range(vma, start, end, page) BUG()
82#define hugetlb_report_meminfo(buf) 0 82static inline void hugetlb_report_meminfo(struct seq_file *m)
83{
84}
83#define hugetlb_report_node_meminfo(n, buf) 0 85#define hugetlb_report_node_meminfo(n, buf) 0
84#define follow_huge_pmd(mm, addr, pmd, write) NULL 86#define follow_huge_pmd(mm, addr, pmd, write) NULL
85#define follow_huge_pud(mm, addr, pud, write) NULL 87#define follow_huge_pud(mm, addr, pud, write) NULL
diff --git a/include/linux/i2c-algo-pcf.h b/include/linux/i2c-algo-pcf.h
index 0177d280f733..0f91a957a690 100644
--- a/include/linux/i2c-algo-pcf.h
+++ b/include/linux/i2c-algo-pcf.h
@@ -31,7 +31,10 @@ struct i2c_algo_pcf_data {
31 int (*getpcf) (void *data, int ctl); 31 int (*getpcf) (void *data, int ctl);
32 int (*getown) (void *data); 32 int (*getown) (void *data);
33 int (*getclock) (void *data); 33 int (*getclock) (void *data);
34 void (*waitforpin) (void); 34 void (*waitforpin) (void *data);
35
36 void (*xfer_begin) (void *data);
37 void (*xfer_end) (void *data);
35 38
36 /* Multi-master lost arbitration back-off delay (msecs) 39 /* Multi-master lost arbitration back-off delay (msecs)
37 * This should be set by the bus adapter or knowledgable client 40 * This should be set by the bus adapter or knowledgable client
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 493435bcdbe5..01d67ba9e985 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -60,7 +60,7 @@
60#define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */ 60#define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */
61#define I2C_DRIVERID_CS53L32A 70 /* cs53l32a audio processor */ 61#define I2C_DRIVERID_CS53L32A 70 /* cs53l32a audio processor */
62#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */ 62#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */
63#define I2C_DRIVERID_SAA7127 72 /* saa7124 video encoder */ 63#define I2C_DRIVERID_SAA7127 72 /* saa7127 video encoder */
64#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ 64#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
65#define I2C_DRIVERID_AKITAIOEXP 74 /* IO Expander on Sharp SL-C1000 */ 65#define I2C_DRIVERID_AKITAIOEXP 74 /* IO Expander on Sharp SL-C1000 */
66#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */ 66#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 06115128047f..33a5992d4936 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -53,45 +53,44 @@ struct i2c_board_info;
53 * transmit one message at a time, a more complex version can be used to 53 * transmit one message at a time, a more complex version can be used to
54 * transmit an arbitrary number of messages without interruption. 54 * transmit an arbitrary number of messages without interruption.
55 */ 55 */
56extern int i2c_master_send(struct i2c_client *,const char* ,int); 56extern int i2c_master_send(struct i2c_client *client, const char *buf,
57extern int i2c_master_recv(struct i2c_client *,char* ,int); 57 int count);
58extern int i2c_master_recv(struct i2c_client *client, char *buf, int count);
58 59
59/* Transfer num messages. 60/* Transfer num messages.
60 */ 61 */
61extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num); 62extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
62 63 int num);
63 64
64/* This is the very generalized SMBus access routine. You probably do not 65/* This is the very generalized SMBus access routine. You probably do not
65 want to use this, though; one of the functions below may be much easier, 66 want to use this, though; one of the functions below may be much easier,
66 and probably just as fast. 67 and probably just as fast.
67 Note that we use i2c_adapter here, because you do not need a specific 68 Note that we use i2c_adapter here, because you do not need a specific
68 smbus adapter to call this function. */ 69 smbus adapter to call this function. */
69extern s32 i2c_smbus_xfer (struct i2c_adapter * adapter, u16 addr, 70extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
70 unsigned short flags, 71 unsigned short flags, char read_write, u8 command,
71 char read_write, u8 command, int size, 72 int size, union i2c_smbus_data *data);
72 union i2c_smbus_data * data);
73 73
74/* Now follow the 'nice' access routines. These also document the calling 74/* Now follow the 'nice' access routines. These also document the calling
75 conventions of i2c_smbus_xfer. */ 75 conventions of i2c_smbus_xfer. */
76 76
77extern s32 i2c_smbus_read_byte(struct i2c_client * client); 77extern s32 i2c_smbus_read_byte(struct i2c_client *client);
78extern s32 i2c_smbus_write_byte(struct i2c_client * client, u8 value); 78extern s32 i2c_smbus_write_byte(struct i2c_client *client, u8 value);
79extern s32 i2c_smbus_read_byte_data(struct i2c_client * client, u8 command); 79extern s32 i2c_smbus_read_byte_data(struct i2c_client *client, u8 command);
80extern s32 i2c_smbus_write_byte_data(struct i2c_client * client, 80extern s32 i2c_smbus_write_byte_data(struct i2c_client *client,
81 u8 command, u8 value); 81 u8 command, u8 value);
82extern s32 i2c_smbus_read_word_data(struct i2c_client * client, u8 command); 82extern s32 i2c_smbus_read_word_data(struct i2c_client *client, u8 command);
83extern s32 i2c_smbus_write_word_data(struct i2c_client * client, 83extern s32 i2c_smbus_write_word_data(struct i2c_client *client,
84 u8 command, u16 value); 84 u8 command, u16 value);
85/* Returns the number of read bytes */ 85/* Returns the number of read bytes */
86extern s32 i2c_smbus_read_block_data(struct i2c_client *client, 86extern s32 i2c_smbus_read_block_data(struct i2c_client *client,
87 u8 command, u8 *values); 87 u8 command, u8 *values);
88extern s32 i2c_smbus_write_block_data(struct i2c_client * client, 88extern s32 i2c_smbus_write_block_data(struct i2c_client *client,
89 u8 command, u8 length, 89 u8 command, u8 length, const u8 *values);
90 const u8 *values);
91/* Returns the number of read bytes */ 90/* Returns the number of read bytes */
92extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client * client, 91extern s32 i2c_smbus_read_i2c_block_data(struct i2c_client *client,
93 u8 command, u8 length, u8 *values); 92 u8 command, u8 length, u8 *values);
94extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client * client, 93extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
95 u8 command, u8 length, 94 u8 command, u8 length,
96 const u8 *values); 95 const u8 *values);
97 96
@@ -169,7 +168,7 @@ struct i2c_driver {
169 /* a ioctl like command that can be used to perform specific functions 168 /* a ioctl like command that can be used to perform specific functions
170 * with the device. 169 * with the device.
171 */ 170 */
172 int (*command)(struct i2c_client *client,unsigned int cmd, void *arg); 171 int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
173 172
174 struct device_driver driver; 173 struct device_driver driver;
175 const struct i2c_device_id *id_table; 174 const struct i2c_device_id *id_table;
@@ -224,14 +223,14 @@ static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
224 return to_i2c_client(dev); 223 return to_i2c_client(dev);
225} 224}
226 225
227static inline void *i2c_get_clientdata (struct i2c_client *dev) 226static inline void *i2c_get_clientdata(const struct i2c_client *dev)
228{ 227{
229 return dev_get_drvdata (&dev->dev); 228 return dev_get_drvdata(&dev->dev);
230} 229}
231 230
232static inline void i2c_set_clientdata (struct i2c_client *dev, void *data) 231static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
233{ 232{
234 dev_set_drvdata (&dev->dev, data); 233 dev_set_drvdata(&dev->dev, data);
235} 234}
236 235
237/** 236/**
@@ -240,6 +239,7 @@ static inline void i2c_set_clientdata (struct i2c_client *dev, void *data)
240 * @flags: to initialize i2c_client.flags 239 * @flags: to initialize i2c_client.flags
241 * @addr: stored in i2c_client.addr 240 * @addr: stored in i2c_client.addr
242 * @platform_data: stored in i2c_client.dev.platform_data 241 * @platform_data: stored in i2c_client.dev.platform_data
242 * @archdata: copied into i2c_client.dev.archdata
243 * @irq: stored in i2c_client.irq 243 * @irq: stored in i2c_client.irq
244 * 244 *
245 * I2C doesn't actually support hardware probing, although controllers and 245 * I2C doesn't actually support hardware probing, although controllers and
@@ -259,6 +259,7 @@ struct i2c_board_info {
259 unsigned short flags; 259 unsigned short flags;
260 unsigned short addr; 260 unsigned short addr;
261 void *platform_data; 261 void *platform_data;
262 struct dev_archdata *archdata;
262 int irq; 263 int irq;
263}; 264};
264 265
@@ -272,7 +273,7 @@ struct i2c_board_info {
272 * fields (such as associated irq, or device-specific platform_data) 273 * fields (such as associated irq, or device-specific platform_data)
273 * are provided using conventional syntax. 274 * are provided using conventional syntax.
274 */ 275 */
275#define I2C_BOARD_INFO(dev_type,dev_addr) \ 276#define I2C_BOARD_INFO(dev_type, dev_addr) \
276 .type = (dev_type), .addr = (dev_addr) 277 .type = (dev_type), .addr = (dev_addr)
277 278
278 279
@@ -306,10 +307,12 @@ extern void i2c_unregister_device(struct i2c_client *);
306 */ 307 */
307#ifdef CONFIG_I2C_BOARDINFO 308#ifdef CONFIG_I2C_BOARDINFO
308extern int 309extern int
309i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned n); 310i2c_register_board_info(int busnum, struct i2c_board_info const *info,
311 unsigned n);
310#else 312#else
311static inline int 313static inline int
312i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsigned n) 314i2c_register_board_info(int busnum, struct i2c_board_info const *info,
315 unsigned n)
313{ 316{
314 return 0; 317 return 0;
315} 318}
@@ -328,11 +331,11 @@ struct i2c_algorithm {
328 using common I2C messages */ 331 using common I2C messages */
329 /* master_xfer should return the number of messages successfully 332 /* master_xfer should return the number of messages successfully
330 processed, or a negative value on error */ 333 processed, or a negative value on error */
331 int (*master_xfer)(struct i2c_adapter *adap,struct i2c_msg *msgs, 334 int (*master_xfer)(struct i2c_adapter *adap, struct i2c_msg *msgs,
332 int num); 335 int num);
333 int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr, 336 int (*smbus_xfer) (struct i2c_adapter *adap, u16 addr,
334 unsigned short flags, char read_write, 337 unsigned short flags, char read_write,
335 u8 command, int size, union i2c_smbus_data * data); 338 u8 command, int size, union i2c_smbus_data *data);
336 339
337 /* To determine what the adapter supports */ 340 /* To determine what the adapter supports */
338 u32 (*functionality) (struct i2c_adapter *); 341 u32 (*functionality) (struct i2c_adapter *);
@@ -345,7 +348,7 @@ struct i2c_algorithm {
345struct i2c_adapter { 348struct i2c_adapter {
346 struct module *owner; 349 struct module *owner;
347 unsigned int id; 350 unsigned int id;
348 unsigned int class; 351 unsigned int class; /* classes to allow probing for */
349 const struct i2c_algorithm *algo; /* the algorithm to access the bus */ 352 const struct i2c_algorithm *algo; /* the algorithm to access the bus */
350 void *algo_data; 353 void *algo_data;
351 354
@@ -369,14 +372,14 @@ struct i2c_adapter {
369}; 372};
370#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) 373#define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev)
371 374
372static inline void *i2c_get_adapdata (struct i2c_adapter *dev) 375static inline void *i2c_get_adapdata(const struct i2c_adapter *dev)
373{ 376{
374 return dev_get_drvdata (&dev->dev); 377 return dev_get_drvdata(&dev->dev);
375} 378}
376 379
377static inline void i2c_set_adapdata (struct i2c_adapter *dev, void *data) 380static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
378{ 381{
379 dev_set_drvdata (&dev->dev, data); 382 dev_set_drvdata(&dev->dev, data);
380} 383}
381 384
382/*flags for the client struct: */ 385/*flags for the client struct: */
@@ -449,7 +452,7 @@ extern int i2c_probe(struct i2c_adapter *adapter,
449 const struct i2c_client_address_data *address_data, 452 const struct i2c_client_address_data *address_data,
450 int (*found_proc) (struct i2c_adapter *, int, int)); 453 int (*found_proc) (struct i2c_adapter *, int, int));
451 454
452extern struct i2c_adapter* i2c_get_adapter(int id); 455extern struct i2c_adapter *i2c_get_adapter(int id);
453extern void i2c_put_adapter(struct i2c_adapter *adap); 456extern void i2c_put_adapter(struct i2c_adapter *adap);
454 457
455 458
@@ -465,7 +468,7 @@ static inline int i2c_check_functionality(struct i2c_adapter *adap, u32 func)
465 return (func & i2c_get_functionality(adap)) == func; 468 return (func & i2c_get_functionality(adap)) == func;
466} 469}
467 470
468/* Return id number for a specific adapter */ 471/* Return the adapter number for a specific adapter */
469static inline int i2c_adapter_id(struct i2c_adapter *adap) 472static inline int i2c_adapter_id(struct i2c_adapter *adap)
470{ 473{
471 return adap->nr; 474 return adap->nr;
@@ -526,7 +529,7 @@ struct i2c_msg {
526 529
527#define I2C_FUNC_I2C 0x00000001 530#define I2C_FUNC_I2C 0x00000001
528#define I2C_FUNC_10BIT_ADDR 0x00000002 531#define I2C_FUNC_10BIT_ADDR 0x00000002
529#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_{REV_DIR_ADDR,NOSTART,..} */ 532#define I2C_FUNC_PROTOCOL_MANGLING 0x00000004 /* I2C_M_NOSTART etc. */
530#define I2C_FUNC_SMBUS_PEC 0x00000008 533#define I2C_FUNC_SMBUS_PEC 0x00000008
531#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */ 534#define I2C_FUNC_SMBUS_BLOCK_PROC_CALL 0x00008000 /* SMBus 2.0 */
532#define I2C_FUNC_SMBUS_QUICK 0x00010000 535#define I2C_FUNC_SMBUS_QUICK 0x00010000
@@ -541,30 +544,26 @@ struct i2c_msg {
541#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000 544#define I2C_FUNC_SMBUS_WRITE_BLOCK_DATA 0x02000000
542#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */ 545#define I2C_FUNC_SMBUS_READ_I2C_BLOCK 0x04000000 /* I2C-like block xfer */
543#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */ 546#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK 0x08000000 /* w/ 1-byte reg. addr. */
544#define I2C_FUNC_SMBUS_READ_I2C_BLOCK_2 0x10000000 /* I2C-like block xfer */ 547
545#define I2C_FUNC_SMBUS_WRITE_I2C_BLOCK_2 0x20000000 /* w/ 2-byte reg. addr. */ 548#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \
546 549 I2C_FUNC_SMBUS_WRITE_BYTE)
547#define I2C_FUNC_SMBUS_BYTE (I2C_FUNC_SMBUS_READ_BYTE | \ 550#define I2C_FUNC_SMBUS_BYTE_DATA (I2C_FUNC_SMBUS_READ_BYTE_DATA | \
548 I2C_FUNC_SMBUS_WRITE_BYTE) 551 I2C_FUNC_SMBUS_WRITE_BYTE_DATA)
549#define I2C_FUNC_SMBUS_BYTE_DATA (I2C_FUNC_SMBUS_READ_BYTE_DATA | \ 552#define I2C_FUNC_SMBUS_WORD_DATA (I2C_FUNC_SMBUS_READ_WORD_DATA | \
550 I2C_FUNC_SMBUS_WRITE_BYTE_DATA) 553 I2C_FUNC_SMBUS_WRITE_WORD_DATA)
551#define I2C_FUNC_SMBUS_WORD_DATA (I2C_FUNC_SMBUS_READ_WORD_DATA | \ 554#define I2C_FUNC_SMBUS_BLOCK_DATA (I2C_FUNC_SMBUS_READ_BLOCK_DATA | \
552 I2C_FUNC_SMBUS_WRITE_WORD_DATA) 555 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA)
553#define I2C_FUNC_SMBUS_BLOCK_DATA (I2C_FUNC_SMBUS_READ_BLOCK_DATA | \ 556#define I2C_FUNC_SMBUS_I2C_BLOCK (I2C_FUNC_SMBUS_READ_I2C_BLOCK | \
554 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA) 557 I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)
555#define I2C_FUNC_SMBUS_I2C_BLOCK (I2C_FUNC_SMBUS_READ_I2C_BLOCK | \ 558
556 I2C_FUNC_SMBUS_WRITE_I2C_BLOCK) 559#define I2C_FUNC_SMBUS_EMUL (I2C_FUNC_SMBUS_QUICK | \
557#define I2C_FUNC_SMBUS_I2C_BLOCK_2 (I2C_FUNC_SMBUS_READ_I2C_BLOCK_2 | \ 560 I2C_FUNC_SMBUS_BYTE | \
558 I2C_FUNC_SMBUS_WRITE_I2C_BLOCK_2) 561 I2C_FUNC_SMBUS_BYTE_DATA | \
559 562 I2C_FUNC_SMBUS_WORD_DATA | \
560#define I2C_FUNC_SMBUS_EMUL (I2C_FUNC_SMBUS_QUICK | \ 563 I2C_FUNC_SMBUS_PROC_CALL | \
561 I2C_FUNC_SMBUS_BYTE | \ 564 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \
562 I2C_FUNC_SMBUS_BYTE_DATA | \ 565 I2C_FUNC_SMBUS_I2C_BLOCK | \
563 I2C_FUNC_SMBUS_WORD_DATA | \ 566 I2C_FUNC_SMBUS_PEC)
564 I2C_FUNC_SMBUS_PROC_CALL | \
565 I2C_FUNC_SMBUS_WRITE_BLOCK_DATA | \
566 I2C_FUNC_SMBUS_I2C_BLOCK | \
567 I2C_FUNC_SMBUS_PEC)
568 567
569/* 568/*
570 * Data for SMBus Messages 569 * Data for SMBus Messages
@@ -574,7 +573,7 @@ union i2c_smbus_data {
574 __u8 byte; 573 __u8 byte;
575 __u16 word; 574 __u16 word;
576 __u8 block[I2C_SMBUS_BLOCK_MAX + 2]; /* block[0] is used for length */ 575 __u8 block[I2C_SMBUS_BLOCK_MAX + 2]; /* block[0] is used for length */
577 /* and one more for user-space compatibility */ 576 /* and one more for user-space compatibility */
578}; 577};
579 578
580/* i2c_smbus_xfer read or write markers */ 579/* i2c_smbus_xfer read or write markers */
@@ -602,21 +601,21 @@ union i2c_smbus_data {
602 601
603/* Default fill of many variables */ 602/* Default fill of many variables */
604#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 603#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
605 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 604 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
606 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 605 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
607 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 606 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
608 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 607 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
609 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 608 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
610 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 609 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
611 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 610 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
612 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 611 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
613 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 612 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
614 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 613 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
615 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 614 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
616 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 615 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
617 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 616 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
618 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ 617 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
619 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END} 618 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
620 619
621/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the 620/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the
622 module header */ 621 module header */
@@ -625,7 +624,7 @@ union i2c_smbus_data {
625 static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \ 624 static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \
626 static unsigned int var##_num; \ 625 static unsigned int var##_num; \
627 module_param_array(var, short, &var##_num, 0); \ 626 module_param_array(var, short, &var##_num, 0); \
628 MODULE_PARM_DESC(var,desc) 627 MODULE_PARM_DESC(var, desc)
629 628
630#define I2C_CLIENT_MODULE_PARM_FORCE(name) \ 629#define I2C_CLIENT_MODULE_PARM_FORCE(name) \
631I2C_CLIENT_MODULE_PARM(force_##name, \ 630I2C_CLIENT_MODULE_PARM(force_##name, \
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h
new file mode 100644
index 000000000000..fb604dcd38f1
--- /dev/null
+++ b/include/linux/i2c/twl4030.h
@@ -0,0 +1,343 @@
1/*
2 * twl4030.h - header for TWL4030 PM and audio CODEC device
3 *
4 * Copyright (C) 2005-2006 Texas Instruments, Inc.
5 *
6 * Based on tlv320aic23.c:
7 * Copyright (c) by Kai Svahn <kai.svahn@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
25#ifndef __TWL4030_H_
26#define __TWL4030_H_
27
28/*
29 * Using the twl4030 core we address registers using a pair
30 * { module id, relative register offset }
31 * which that core then maps to the relevant
32 * { i2c slave, absolute register address }
33 *
34 * The module IDs are meaningful only to the twl4030 core code,
35 * which uses them as array indices to look up the first register
36 * address each module uses within a given i2c slave.
37 */
38
39/* Slave 0 (i2c address 0x48) */
40#define TWL4030_MODULE_USB 0x00
41
42/* Slave 1 (i2c address 0x49) */
43#define TWL4030_MODULE_AUDIO_VOICE 0x01
44#define TWL4030_MODULE_GPIO 0x02
45#define TWL4030_MODULE_INTBR 0x03
46#define TWL4030_MODULE_PIH 0x04
47#define TWL4030_MODULE_TEST 0x05
48
49/* Slave 2 (i2c address 0x4a) */
50#define TWL4030_MODULE_KEYPAD 0x06
51#define TWL4030_MODULE_MADC 0x07
52#define TWL4030_MODULE_INTERRUPTS 0x08
53#define TWL4030_MODULE_LED 0x09
54#define TWL4030_MODULE_MAIN_CHARGE 0x0A
55#define TWL4030_MODULE_PRECHARGE 0x0B
56#define TWL4030_MODULE_PWM0 0x0C
57#define TWL4030_MODULE_PWM1 0x0D
58#define TWL4030_MODULE_PWMA 0x0E
59#define TWL4030_MODULE_PWMB 0x0F
60
61/* Slave 3 (i2c address 0x4b) */
62#define TWL4030_MODULE_BACKUP 0x10
63#define TWL4030_MODULE_INT 0x11
64#define TWL4030_MODULE_PM_MASTER 0x12
65#define TWL4030_MODULE_PM_RECEIVER 0x13
66#define TWL4030_MODULE_RTC 0x14
67#define TWL4030_MODULE_SECURED_REG 0x15
68
69/*
70 * Read and write single 8-bit registers
71 */
72int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
73int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
74
75/*
76 * Read and write several 8-bit registers at once.
77 *
78 * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1
79 * for the value, and populate your data starting at offset 1.
80 */
81int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, u8 num_bytes);
82int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, u8 num_bytes);
83
84/*----------------------------------------------------------------------*/
85
86/*
87 * NOTE: at up to 1024 registers, this is a big chip.
88 *
89 * Avoid putting register declarations in this file, instead of into
90 * a driver-private file, unless some of the registers in a block
91 * need to be shared with other drivers. One example is blocks that
92 * have Secondary IRQ Handler (SIH) registers.
93 */
94
95#define TWL4030_SIH_CTRL_EXCLEN_MASK BIT(0)
96#define TWL4030_SIH_CTRL_PENDDIS_MASK BIT(1)
97#define TWL4030_SIH_CTRL_COR_MASK BIT(2)
98
99/*----------------------------------------------------------------------*/
100
101/*
102 * GPIO Block Register offsets (use TWL4030_MODULE_GPIO)
103 */
104
105#define REG_GPIODATAIN1 0x0
106#define REG_GPIODATAIN2 0x1
107#define REG_GPIODATAIN3 0x2
108#define REG_GPIODATADIR1 0x3
109#define REG_GPIODATADIR2 0x4
110#define REG_GPIODATADIR3 0x5
111#define REG_GPIODATAOUT1 0x6
112#define REG_GPIODATAOUT2 0x7
113#define REG_GPIODATAOUT3 0x8
114#define REG_CLEARGPIODATAOUT1 0x9
115#define REG_CLEARGPIODATAOUT2 0xA
116#define REG_CLEARGPIODATAOUT3 0xB
117#define REG_SETGPIODATAOUT1 0xC
118#define REG_SETGPIODATAOUT2 0xD
119#define REG_SETGPIODATAOUT3 0xE
120#define REG_GPIO_DEBEN1 0xF
121#define REG_GPIO_DEBEN2 0x10
122#define REG_GPIO_DEBEN3 0x11
123#define REG_GPIO_CTRL 0x12
124#define REG_GPIOPUPDCTR1 0x13
125#define REG_GPIOPUPDCTR2 0x14
126#define REG_GPIOPUPDCTR3 0x15
127#define REG_GPIOPUPDCTR4 0x16
128#define REG_GPIOPUPDCTR5 0x17
129#define REG_GPIO_ISR1A 0x19
130#define REG_GPIO_ISR2A 0x1A
131#define REG_GPIO_ISR3A 0x1B
132#define REG_GPIO_IMR1A 0x1C
133#define REG_GPIO_IMR2A 0x1D
134#define REG_GPIO_IMR3A 0x1E
135#define REG_GPIO_ISR1B 0x1F
136#define REG_GPIO_ISR2B 0x20
137#define REG_GPIO_ISR3B 0x21
138#define REG_GPIO_IMR1B 0x22
139#define REG_GPIO_IMR2B 0x23
140#define REG_GPIO_IMR3B 0x24
141#define REG_GPIO_EDR1 0x28
142#define REG_GPIO_EDR2 0x29
143#define REG_GPIO_EDR3 0x2A
144#define REG_GPIO_EDR4 0x2B
145#define REG_GPIO_EDR5 0x2C
146#define REG_GPIO_SIH_CTRL 0x2D
147
148/* Up to 18 signals are available as GPIOs, when their
149 * pins are not assigned to another use (such as ULPI/USB).
150 */
151#define TWL4030_GPIO_MAX 18
152
153/*----------------------------------------------------------------------*/
154
155/*
156 * Keypad register offsets (use TWL4030_MODULE_KEYPAD)
157 * ... SIH/interrupt only
158 */
159
160#define TWL4030_KEYPAD_KEYP_ISR1 0x11
161#define TWL4030_KEYPAD_KEYP_IMR1 0x12
162#define TWL4030_KEYPAD_KEYP_ISR2 0x13
163#define TWL4030_KEYPAD_KEYP_IMR2 0x14
164#define TWL4030_KEYPAD_KEYP_SIR 0x15 /* test register */
165#define TWL4030_KEYPAD_KEYP_EDR 0x16
166#define TWL4030_KEYPAD_KEYP_SIH_CTRL 0x17
167
168/*----------------------------------------------------------------------*/
169
170/*
171 * Multichannel ADC register offsets (use TWL4030_MODULE_MADC)
172 * ... SIH/interrupt only
173 */
174
175#define TWL4030_MADC_ISR1 0x61
176#define TWL4030_MADC_IMR1 0x62
177#define TWL4030_MADC_ISR2 0x63
178#define TWL4030_MADC_IMR2 0x64
179#define TWL4030_MADC_SIR 0x65 /* test register */
180#define TWL4030_MADC_EDR 0x66
181#define TWL4030_MADC_SIH_CTRL 0x67
182
183/*----------------------------------------------------------------------*/
184
185/*
186 * Battery charger register offsets (use TWL4030_MODULE_INTERRUPTS)
187 */
188
189#define TWL4030_INTERRUPTS_BCIISR1A 0x0
190#define TWL4030_INTERRUPTS_BCIISR2A 0x1
191#define TWL4030_INTERRUPTS_BCIIMR1A 0x2
192#define TWL4030_INTERRUPTS_BCIIMR2A 0x3
193#define TWL4030_INTERRUPTS_BCIISR1B 0x4
194#define TWL4030_INTERRUPTS_BCIISR2B 0x5
195#define TWL4030_INTERRUPTS_BCIIMR1B 0x6
196#define TWL4030_INTERRUPTS_BCIIMR2B 0x7
197#define TWL4030_INTERRUPTS_BCISIR1 0x8 /* test register */
198#define TWL4030_INTERRUPTS_BCISIR2 0x9 /* test register */
199#define TWL4030_INTERRUPTS_BCIEDR1 0xa
200#define TWL4030_INTERRUPTS_BCIEDR2 0xb
201#define TWL4030_INTERRUPTS_BCIEDR3 0xc
202#define TWL4030_INTERRUPTS_BCISIHCTRL 0xd
203
204/*----------------------------------------------------------------------*/
205
206/*
207 * Power Interrupt block register offsets (use TWL4030_MODULE_INT)
208 */
209
210#define TWL4030_INT_PWR_ISR1 0x0
211#define TWL4030_INT_PWR_IMR1 0x1
212#define TWL4030_INT_PWR_ISR2 0x2
213#define TWL4030_INT_PWR_IMR2 0x3
214#define TWL4030_INT_PWR_SIR 0x4 /* test register */
215#define TWL4030_INT_PWR_EDR1 0x5
216#define TWL4030_INT_PWR_EDR2 0x6
217#define TWL4030_INT_PWR_SIH_CTRL 0x7
218
219/*----------------------------------------------------------------------*/
220
221struct twl4030_bci_platform_data {
222 int *battery_tmp_tbl;
223 unsigned int tblsize;
224};
225
226/* TWL4030_GPIO_MAX (18) GPIOs, with interrupts */
227struct twl4030_gpio_platform_data {
228 int gpio_base;
229 unsigned irq_base, irq_end;
230
231 /* package the two LED signals as output-only GPIOs? */
232 bool use_leds;
233
234 /* gpio-n should control VMMC(n+1) if BIT(n) in mmc_cd is set */
235 u8 mmc_cd;
236
237 /* For gpio-N, bit (1 << N) in "pullups" is set if that pullup
238 * should be enabled. Else, if that bit is set in "pulldowns",
239 * that pulldown is enabled. Don't waste power by letting any
240 * digital inputs float...
241 */
242 u32 pullups;
243 u32 pulldowns;
244
245 int (*setup)(struct device *dev,
246 unsigned gpio, unsigned ngpio);
247 int (*teardown)(struct device *dev,
248 unsigned gpio, unsigned ngpio);
249};
250
251struct twl4030_madc_platform_data {
252 int irq_line;
253};
254
255struct twl4030_keypad_data {
256 int rows;
257 int cols;
258 int *keymap;
259 int irq;
260 unsigned int keymapsize;
261 unsigned int rep:1;
262};
263
264enum twl4030_usb_mode {
265 T2_USB_MODE_ULPI = 1,
266 T2_USB_MODE_CEA2011_3PIN = 2,
267};
268
269struct twl4030_usb_data {
270 enum twl4030_usb_mode usb_mode;
271};
272
273struct twl4030_platform_data {
274 unsigned irq_base, irq_end;
275 struct twl4030_bci_platform_data *bci;
276 struct twl4030_gpio_platform_data *gpio;
277 struct twl4030_madc_platform_data *madc;
278 struct twl4030_keypad_data *keypad;
279 struct twl4030_usb_data *usb;
280
281 /* REVISIT more to come ... _nothing_ should be hard-wired */
282};
283
284/*----------------------------------------------------------------------*/
285
286int twl4030_sih_setup(int module);
287
288/*
289 * FIXME completely stop using TWL4030_IRQ_BASE ... instead, pass the
290 * IRQ data to subsidiary devices using platform device resources.
291 */
292
293/* IRQ information-need base */
294#include <mach/irqs.h>
295/* TWL4030 interrupts */
296
297/* #define TWL4030_MODIRQ_GPIO (TWL4030_IRQ_BASE + 0) */
298#define TWL4030_MODIRQ_KEYPAD (TWL4030_IRQ_BASE + 1)
299#define TWL4030_MODIRQ_BCI (TWL4030_IRQ_BASE + 2)
300#define TWL4030_MODIRQ_MADC (TWL4030_IRQ_BASE + 3)
301/* #define TWL4030_MODIRQ_USB (TWL4030_IRQ_BASE + 4) */
302/* #define TWL4030_MODIRQ_PWR (TWL4030_IRQ_BASE + 5) */
303
304#define TWL4030_PWRIRQ_PWRBTN (TWL4030_PWR_IRQ_BASE + 0)
305/* #define TWL4030_PWRIRQ_CHG_PRES (TWL4030_PWR_IRQ_BASE + 1) */
306/* #define TWL4030_PWRIRQ_USB_PRES (TWL4030_PWR_IRQ_BASE + 2) */
307/* #define TWL4030_PWRIRQ_RTC (TWL4030_PWR_IRQ_BASE + 3) */
308/* #define TWL4030_PWRIRQ_HOT_DIE (TWL4030_PWR_IRQ_BASE + 4) */
309/* #define TWL4030_PWRIRQ_PWROK_TIMEOUT (TWL4030_PWR_IRQ_BASE + 5) */
310/* #define TWL4030_PWRIRQ_MBCHG (TWL4030_PWR_IRQ_BASE + 6) */
311/* #define TWL4030_PWRIRQ_SC_DETECT (TWL4030_PWR_IRQ_BASE + 7) */
312
313/* Rest are unsued currently*/
314
315/* Offsets to Power Registers */
316#define TWL4030_VDAC_DEV_GRP 0x3B
317#define TWL4030_VDAC_DEDICATED 0x3E
318#define TWL4030_VAUX1_DEV_GRP 0x17
319#define TWL4030_VAUX1_DEDICATED 0x1A
320#define TWL4030_VAUX2_DEV_GRP 0x1B
321#define TWL4030_VAUX2_DEDICATED 0x1E
322#define TWL4030_VAUX3_DEV_GRP 0x1F
323#define TWL4030_VAUX3_DEDICATED 0x22
324
325/* TWL4030 GPIO interrupt definitions */
326
327#define TWL4030_GPIO_IRQ_NO(n) (TWL4030_GPIO_IRQ_BASE + (n))
328
329/*
330 * Exported TWL4030 GPIO APIs
331 *
332 * WARNING -- use standard GPIO and IRQ calls instead; these will vanish.
333 */
334int twl4030_set_gpio_debounce(int gpio, int enable);
335
336#if defined(CONFIG_TWL4030_BCI_BATTERY) || \
337 defined(CONFIG_TWL4030_BCI_BATTERY_MODULE)
338 extern int twl4030charger_usb_en(int enable);
339#else
340 static inline int twl4030charger_usb_en(int enable) { return 0; }
341#endif
342
343#endif /* End of __TWL4030_H */
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index 75ae6d8aba4f..4c4e57d1f19d 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -570,7 +570,6 @@ struct i2o_controller {
570#endif 570#endif
571 spinlock_t lock; /* lock for controller 571 spinlock_t lock; /* lock for controller
572 configuration */ 572 configuration */
573
574 void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */ 573 void *driver_data[I2O_MAX_DRIVERS]; /* storage for drivers */
575}; 574};
576 575
@@ -691,289 +690,22 @@ static inline u32 i2o_dma_high(dma_addr_t dma_addr)
691}; 690};
692#endif 691#endif
693 692
694/** 693extern u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size);
695 * i2o_sg_tablesize - Calculate the maximum number of elements in a SGL 694extern dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
696 * @c: I2O controller for which the calculation should be done
697 * @body_size: maximum body size used for message in 32-bit words.
698 *
699 * Return the maximum number of SG elements in a SG list.
700 */
701static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
702{
703 i2o_status_block *sb = c->status_block.virt;
704 u16 sg_count =
705 (sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
706 body_size;
707
708 if (c->pae_support) {
709 /*
710 * for 64-bit a SG attribute element must be added and each
711 * SG element needs 12 bytes instead of 8.
712 */
713 sg_count -= 2;
714 sg_count /= 3;
715 } else
716 sg_count /= 2;
717
718 if (c->short_req && (sg_count > 8))
719 sg_count = 8;
720
721 return sg_count;
722};
723
724/**
725 * i2o_dma_map_single - Map pointer to controller and fill in I2O message.
726 * @c: I2O controller
727 * @ptr: pointer to the data which should be mapped
728 * @size: size of data in bytes
729 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
730 * @sg_ptr: pointer to the SG list inside the I2O message
731 *
732 * This function does all necessary DMA handling and also writes the I2O
733 * SGL elements into the I2O message. For details on DMA handling see also
734 * dma_map_single(). The pointer sg_ptr will only be set to the end of the
735 * SG list if the allocation was successful.
736 *
737 * Returns DMA address which must be checked for failures using
738 * dma_mapping_error().
739 */
740static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
741 size_t size, 695 size_t size,
742 enum dma_data_direction direction, 696 enum dma_data_direction direction,
743 u32 ** sg_ptr) 697 u32 ** sg_ptr);
744{ 698extern int i2o_dma_map_sg(struct i2o_controller *c,
745 u32 sg_flags;
746 u32 *mptr = *sg_ptr;
747 dma_addr_t dma_addr;
748
749 switch (direction) {
750 case DMA_TO_DEVICE:
751 sg_flags = 0xd4000000;
752 break;
753 case DMA_FROM_DEVICE:
754 sg_flags = 0xd0000000;
755 break;
756 default:
757 return 0;
758 }
759
760 dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
761 if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
762#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
763 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
764 *mptr++ = cpu_to_le32(0x7C020002);
765 *mptr++ = cpu_to_le32(PAGE_SIZE);
766 }
767#endif
768
769 *mptr++ = cpu_to_le32(sg_flags | size);
770 *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
771#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
772 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
773 *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
774#endif
775 *sg_ptr = mptr;
776 }
777 return dma_addr;
778};
779
780/**
781 * i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
782 * @c: I2O controller
783 * @sg: SG list to be mapped
784 * @sg_count: number of elements in the SG list
785 * @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
786 * @sg_ptr: pointer to the SG list inside the I2O message
787 *
788 * This function does all necessary DMA handling and also writes the I2O
789 * SGL elements into the I2O message. For details on DMA handling see also
790 * dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
791 * list if the allocation was successful.
792 *
793 * Returns 0 on failure or 1 on success.
794 */
795static inline int i2o_dma_map_sg(struct i2o_controller *c,
796 struct scatterlist *sg, int sg_count, 699 struct scatterlist *sg, int sg_count,
797 enum dma_data_direction direction, 700 enum dma_data_direction direction,
798 u32 ** sg_ptr) 701 u32 ** sg_ptr);
799{ 702extern int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len);
800 u32 sg_flags; 703extern void i2o_dma_free(struct device *dev, struct i2o_dma *addr);
801 u32 *mptr = *sg_ptr; 704extern int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
802 705 size_t len);
803 switch (direction) { 706extern int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
804 case DMA_TO_DEVICE: 707 size_t size, int min_nr);
805 sg_flags = 0x14000000; 708extern void i2o_pool_free(struct i2o_pool *pool);
806 break;
807 case DMA_FROM_DEVICE:
808 sg_flags = 0x10000000;
809 break;
810 default:
811 return 0;
812 }
813
814 sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
815 if (!sg_count)
816 return 0;
817
818#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
819 if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
820 *mptr++ = cpu_to_le32(0x7C020002);
821 *mptr++ = cpu_to_le32(PAGE_SIZE);
822 }
823#endif
824
825 while (sg_count-- > 0) {
826 if (!sg_count)
827 sg_flags |= 0xC0000000;
828 *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
829 *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
830#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
831 if ((sizeof(dma_addr_t) > 4) && c->pae_support)
832 *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
833#endif
834 sg = sg_next(sg);
835 }
836 *sg_ptr = mptr;
837
838 return 1;
839};
840
841/**
842 * i2o_dma_alloc - Allocate DMA memory
843 * @dev: struct device pointer to the PCI device of the I2O controller
844 * @addr: i2o_dma struct which should get the DMA buffer
845 * @len: length of the new DMA memory
846 * @gfp_mask: GFP mask
847 *
848 * Allocate a coherent DMA memory and write the pointers into addr.
849 *
850 * Returns 0 on success or -ENOMEM on failure.
851 */
852static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
853 size_t len, gfp_t gfp_mask)
854{
855 struct pci_dev *pdev = to_pci_dev(dev);
856 int dma_64 = 0;
857
858 if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
859 dma_64 = 1;
860 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
861 return -ENOMEM;
862 }
863
864 addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
865
866 if ((sizeof(dma_addr_t) > 4) && dma_64)
867 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
868 printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
869
870 if (!addr->virt)
871 return -ENOMEM;
872
873 memset(addr->virt, 0, len);
874 addr->len = len;
875
876 return 0;
877};
878
879/**
880 * i2o_dma_free - Free DMA memory
881 * @dev: struct device pointer to the PCI device of the I2O controller
882 * @addr: i2o_dma struct which contains the DMA buffer
883 *
884 * Free a coherent DMA memory and set virtual address of addr to NULL.
885 */
886static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
887{
888 if (addr->virt) {
889 if (addr->phys)
890 dma_free_coherent(dev, addr->len, addr->virt,
891 addr->phys);
892 else
893 kfree(addr->virt);
894 addr->virt = NULL;
895 }
896};
897
898/**
899 * i2o_dma_realloc - Realloc DMA memory
900 * @dev: struct device pointer to the PCI device of the I2O controller
901 * @addr: pointer to a i2o_dma struct DMA buffer
902 * @len: new length of memory
903 * @gfp_mask: GFP mask
904 *
905 * If there was something allocated in the addr, free it first. If len > 0
906 * than try to allocate it and write the addresses back to the addr
907 * structure. If len == 0 set the virtual address to NULL.
908 *
909 * Returns the 0 on success or negative error code on failure.
910 */
911static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
912 size_t len, gfp_t gfp_mask)
913{
914 i2o_dma_free(dev, addr);
915
916 if (len)
917 return i2o_dma_alloc(dev, addr, len, gfp_mask);
918
919 return 0;
920};
921
922/*
923 * i2o_pool_alloc - Allocate an slab cache and mempool
924 * @mempool: pointer to struct i2o_pool to write data into.
925 * @name: name which is used to identify cache
926 * @size: size of each object
927 * @min_nr: minimum number of objects
928 *
929 * First allocates a slab cache with name and size. Then allocates a
930 * mempool which uses the slab cache for allocation and freeing.
931 *
932 * Returns 0 on success or negative error code on failure.
933 */
934static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
935 size_t size, int min_nr)
936{
937 pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
938 if (!pool->name)
939 goto exit;
940 strcpy(pool->name, name);
941
942 pool->slab =
943 kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL);
944 if (!pool->slab)
945 goto free_name;
946
947 pool->mempool = mempool_create_slab_pool(min_nr, pool->slab);
948 if (!pool->mempool)
949 goto free_slab;
950
951 return 0;
952
953 free_slab:
954 kmem_cache_destroy(pool->slab);
955
956 free_name:
957 kfree(pool->name);
958
959 exit:
960 return -ENOMEM;
961};
962
963/*
964 * i2o_pool_free - Free slab cache and mempool again
965 * @mempool: pointer to struct i2o_pool which should be freed
966 *
967 * Note that you have to return all objects to the mempool again before
968 * calling i2o_pool_free().
969 */
970static inline void i2o_pool_free(struct i2o_pool *pool)
971{
972 mempool_destroy(pool->mempool);
973 kmem_cache_destroy(pool->slab);
974 kfree(pool->name);
975};
976
977/* I2O driver (OSM) functions */ 709/* I2O driver (OSM) functions */
978extern int i2o_driver_register(struct i2o_driver *); 710extern int i2o_driver_register(struct i2o_driver *);
979extern void i2o_driver_unregister(struct i2o_driver *); 711extern void i2o_driver_unregister(struct i2o_driver *);
diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h
new file mode 100644
index 000000000000..05a80c44513c
--- /dev/null
+++ b/include/linux/i7300_idle.h
@@ -0,0 +1,83 @@
1
2#ifndef I7300_IDLE_H
3#define I7300_IDLE_H
4
5#include <linux/pci.h>
6
7/*
8 * I/O AT controls (PCI bus 0 device 8 function 0)
9 * DIMM controls (PCI bus 0 device 16 function 1)
10 */
11#define IOAT_BUS 0
12#define IOAT_DEVFN PCI_DEVFN(8, 0)
13#define MEMCTL_BUS 0
14#define MEMCTL_DEVFN PCI_DEVFN(16, 1)
15
16struct fbd_ioat {
17 unsigned int vendor;
18 unsigned int ioat_dev;
19};
20
21/*
22 * The i5000 chip-set has the same hooks as the i7300
23 * but support is disabled by default because this driver
24 * has not been validated on that platform.
25 */
26#define SUPPORT_I5000 0
27
28static const struct fbd_ioat fbd_ioat_list[] = {
29 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB},
30#if SUPPORT_I5000
31 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT},
32#endif
33 {0, 0}
34};
35
36/* table of devices that work with this driver */
37static const struct pci_device_id pci_tbl[] = {
38 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
39#if SUPPORT_I5000
40 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
41#endif
42 { } /* Terminating entry */
43};
44
45/* Check for known platforms with I/O-AT */
46static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
47 struct pci_dev **ioat_dev)
48{
49 int i;
50 struct pci_dev *memdev, *dmadev;
51
52 memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN);
53 if (!memdev)
54 return -ENODEV;
55
56 for (i = 0; pci_tbl[i].vendor != 0; i++) {
57 if (memdev->vendor == pci_tbl[i].vendor &&
58 memdev->device == pci_tbl[i].device) {
59 break;
60 }
61 }
62 if (pci_tbl[i].vendor == 0)
63 return -ENODEV;
64
65 dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN);
66 if (!dmadev)
67 return -ENODEV;
68
69 for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
70 if (dmadev->vendor == fbd_ioat_list[i].vendor &&
71 dmadev->device == fbd_ioat_list[i].ioat_dev) {
72 if (fbd_dev)
73 *fbd_dev = memdev;
74 if (ioat_dev)
75 *ioat_dev = dmadev;
76
77 return 0;
78 }
79 }
80 return -ENODEV;
81}
82
83#endif
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index 03067443198a..a93a8dd33118 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -40,16 +40,18 @@ struct icmp6hdr {
40 struct icmpv6_nd_ra { 40 struct icmpv6_nd_ra {
41 __u8 hop_limit; 41 __u8 hop_limit;
42#if defined(__LITTLE_ENDIAN_BITFIELD) 42#if defined(__LITTLE_ENDIAN_BITFIELD)
43 __u8 reserved:4, 43 __u8 reserved:3,
44 router_pref:2, 44 router_pref:2,
45 home_agent:1,
45 other:1, 46 other:1,
46 managed:1; 47 managed:1;
47 48
48#elif defined(__BIG_ENDIAN_BITFIELD) 49#elif defined(__BIG_ENDIAN_BITFIELD)
49 __u8 managed:1, 50 __u8 managed:1,
50 other:1, 51 other:1,
52 home_agent:1,
51 router_pref:2, 53 router_pref:2,
52 reserved:4; 54 reserved:3;
53#else 55#else
54#error "Please fix <asm/byteorder.h>" 56#error "Please fix <asm/byteorder.h>"
55#endif 57#endif
diff --git a/include/linux/ide.h b/include/linux/ide.h
index c47e371554c1..e99c56de7f56 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -122,8 +122,6 @@ struct ide_io_ports {
122#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ 122#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
123#define SECTOR_SIZE 512 123#define SECTOR_SIZE 512
124 124
125#define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t)))
126
127/* 125/*
128 * Timeouts for various operations: 126 * Timeouts for various operations:
129 */ 127 */
@@ -172,9 +170,7 @@ typedef int (ide_ack_intr_t)(struct hwif_s *);
172enum { ide_unknown, ide_generic, ide_pci, 170enum { ide_unknown, ide_generic, ide_pci,
173 ide_cmd640, ide_dtc2278, ide_ali14xx, 171 ide_cmd640, ide_dtc2278, ide_ali14xx,
174 ide_qd65xx, ide_umc8672, ide_ht6560b, 172 ide_qd65xx, ide_umc8672, ide_ht6560b,
175 ide_rz1000, ide_trm290, 173 ide_4drives, ide_pmac, ide_acorn,
176 ide_cmd646, ide_cy82c693, ide_4drives,
177 ide_pmac, ide_acorn,
178 ide_au1xxx, ide_palm3710 174 ide_au1xxx, ide_palm3710
179}; 175};
180 176
@@ -461,12 +457,26 @@ struct ide_acpi_drive_link;
461struct ide_acpi_hwif_link; 457struct ide_acpi_hwif_link;
462#endif 458#endif
463 459
460struct ide_drive_s;
461
462struct ide_disk_ops {
463 int (*check)(struct ide_drive_s *, const char *);
464 int (*get_capacity)(struct ide_drive_s *);
465 void (*setup)(struct ide_drive_s *);
466 void (*flush)(struct ide_drive_s *);
467 int (*init_media)(struct ide_drive_s *, struct gendisk *);
468 int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
469 int);
470 ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
471 sector_t);
472 int (*end_request)(struct ide_drive_s *, int, int);
473 int (*ioctl)(struct ide_drive_s *, struct block_device *,
474 fmode_t, unsigned int, unsigned long);
475};
476
464/* ATAPI device flags */ 477/* ATAPI device flags */
465enum { 478enum {
466 IDE_AFLAG_DRQ_INTERRUPT = (1 << 0), 479 IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
467 IDE_AFLAG_MEDIA_CHANGED = (1 << 1),
468 /* Drive cannot lock the door. */
469 IDE_AFLAG_NO_DOORLOCK = (1 << 2),
470 480
471 /* ide-cd */ 481 /* ide-cd */
472 /* Drive cannot eject the disc. */ 482 /* Drive cannot eject the disc. */
@@ -482,8 +492,6 @@ enum {
482 * when more than one interrupt is needed. 492 * when more than one interrupt is needed.
483 */ 493 */
484 IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), 494 IDE_AFLAG_LIMIT_NFRAMES = (1 << 7),
485 /* Seeking in progress. */
486 IDE_AFLAG_SEEKING = (1 << 8),
487 /* Saved TOC information is current. */ 495 /* Saved TOC information is current. */
488 IDE_AFLAG_TOC_VALID = (1 << 9), 496 IDE_AFLAG_TOC_VALID = (1 << 9),
489 /* We think that the drive door is locked. */ 497 /* We think that the drive door is locked. */
@@ -498,14 +506,10 @@ enum {
498 IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17), 506 IDE_AFLAG_LE_SPEED_FIELDS = (1 << 17),
499 507
500 /* ide-floppy */ 508 /* ide-floppy */
501 /* Format in progress */
502 IDE_AFLAG_FORMAT_IN_PROGRESS = (1 << 18),
503 /* Avoid commands not supported in Clik drive */ 509 /* Avoid commands not supported in Clik drive */
504 IDE_AFLAG_CLIK_DRIVE = (1 << 19), 510 IDE_AFLAG_CLIK_DRIVE = (1 << 19),
505 /* Requires BH algorithm for packets */ 511 /* Requires BH algorithm for packets */
506 IDE_AFLAG_ZIP_DRIVE = (1 << 20), 512 IDE_AFLAG_ZIP_DRIVE = (1 << 20),
507 /* Write protect */
508 IDE_AFLAG_WP = (1 << 21),
509 /* Supports format progress report */ 513 /* Supports format progress report */
510 IDE_AFLAG_SRFP = (1 << 22), 514 IDE_AFLAG_SRFP = (1 << 22),
511 515
@@ -578,7 +582,11 @@ enum {
578 /* don't unload heads */ 582 /* don't unload heads */
579 IDE_DFLAG_NO_UNLOAD = (1 << 27), 583 IDE_DFLAG_NO_UNLOAD = (1 << 27),
580 /* heads unloaded, please don't reset port */ 584 /* heads unloaded, please don't reset port */
581 IDE_DFLAG_PARKED = (1 << 28) 585 IDE_DFLAG_PARKED = (1 << 28),
586 IDE_DFLAG_MEDIA_CHANGED = (1 << 29),
587 /* write protect */
588 IDE_DFLAG_WP = (1 << 30),
589 IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 31),
582}; 590};
583 591
584struct ide_drive_s { 592struct ide_drive_s {
@@ -597,6 +605,8 @@ struct ide_drive_s {
597#endif 605#endif
598 struct hwif_s *hwif; /* actually (ide_hwif_t *) */ 606 struct hwif_s *hwif; /* actually (ide_hwif_t *) */
599 607
608 const struct ide_disk_ops *disk_ops;
609
600 unsigned long dev_flags; 610 unsigned long dev_flags;
601 611
602 unsigned long sleep; /* sleep until this time */ 612 unsigned long sleep; /* sleep until this time */
@@ -829,8 +839,6 @@ typedef struct hwif_s {
829 unsigned extra_ports; /* number of extra dma ports */ 839 unsigned extra_ports; /* number of extra dma ports */
830 840
831 unsigned present : 1; /* this interface exists */ 841 unsigned present : 1; /* this interface exists */
832 unsigned serialized : 1; /* serialized all channel operation */
833 unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
834 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ 842 unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
835 843
836 struct device gendev; 844 struct device gendev;
@@ -893,6 +901,8 @@ typedef struct hwgroup_s {
893 901
894 int req_gen; 902 int req_gen;
895 int req_gen_timer; 903 int req_gen_timer;
904
905 spinlock_t lock;
896} ide_hwgroup_t; 906} ide_hwgroup_t;
897 907
898typedef struct ide_driver_s ide_driver_t; 908typedef struct ide_driver_s ide_driver_t;
@@ -1106,6 +1116,14 @@ enum {
1106 IDE_PM_COMPLETED, 1116 IDE_PM_COMPLETED,
1107}; 1117};
1108 1118
1119int generic_ide_suspend(struct device *, pm_message_t);
1120int generic_ide_resume(struct device *);
1121
1122void ide_complete_power_step(ide_drive_t *, struct request *);
1123ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
1124void ide_complete_pm_request(ide_drive_t *, struct request *);
1125void ide_check_pm_state(ide_drive_t *, struct request *);
1126
1109/* 1127/*
1110 * Subdrivers support. 1128 * Subdrivers support.
1111 * 1129 *
@@ -1123,8 +1141,8 @@ struct ide_driver_s {
1123 void (*resume)(ide_drive_t *); 1141 void (*resume)(ide_drive_t *);
1124 void (*shutdown)(ide_drive_t *); 1142 void (*shutdown)(ide_drive_t *);
1125#ifdef CONFIG_IDE_PROC_FS 1143#ifdef CONFIG_IDE_PROC_FS
1126 ide_proc_entry_t *proc; 1144 ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
1127 const struct ide_proc_devset *settings; 1145 const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
1128#endif 1146#endif
1129}; 1147};
1130 1148
@@ -1142,8 +1160,7 @@ struct ide_ioctl_devset {
1142int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int, 1160int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
1143 unsigned long, const struct ide_ioctl_devset *); 1161 unsigned long, const struct ide_ioctl_devset *);
1144 1162
1145int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, 1163int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
1146 unsigned, unsigned long);
1147 1164
1148extern int ide_vlb_clk; 1165extern int ide_vlb_clk;
1149extern int ide_pci_clk; 1166extern int ide_pci_clk;
@@ -1281,6 +1298,13 @@ extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *o
1281#define ide_pci_register_driver(d) pci_register_driver(d) 1298#define ide_pci_register_driver(d) pci_register_driver(d)
1282#endif 1299#endif
1283 1300
1301static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
1302{
1303 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
1304 return 1;
1305 return 0;
1306}
1307
1284void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, 1308void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int,
1285 hw_regs_t *, hw_regs_t **); 1309 hw_regs_t *, hw_regs_t **);
1286void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); 1310void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
@@ -1354,12 +1378,13 @@ enum {
1354 IDE_HFLAG_LEGACY_IRQS = (1 << 21), 1378 IDE_HFLAG_LEGACY_IRQS = (1 << 21),
1355 /* force use of legacy IRQs */ 1379 /* force use of legacy IRQs */
1356 IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22), 1380 IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22),
1357 /* limit LBA48 requests to 256 sectors */ 1381 /* host is TRM290 */
1358 IDE_HFLAG_RQSIZE_256 = (1 << 23), 1382 IDE_HFLAG_TRM290 = (1 << 23),
1359 /* use 32-bit I/O ops */ 1383 /* use 32-bit I/O ops */
1360 IDE_HFLAG_IO_32BIT = (1 << 24), 1384 IDE_HFLAG_IO_32BIT = (1 << 24),
1361 /* unmask IRQs */ 1385 /* unmask IRQs */
1362 IDE_HFLAG_UNMASK_IRQS = (1 << 25), 1386 IDE_HFLAG_UNMASK_IRQS = (1 << 25),
1387 IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26),
1363 /* serialize ports if DMA is possible (for sl82c105) */ 1388 /* serialize ports if DMA is possible (for sl82c105) */
1364 IDE_HFLAG_SERIALIZE_DMA = (1 << 27), 1389 IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
1365 /* force host out of "simplex" mode */ 1390 /* force host out of "simplex" mode */
@@ -1392,6 +1417,9 @@ struct ide_port_info {
1392 1417
1393 ide_pci_enablebit_t enablebits[2]; 1418 ide_pci_enablebit_t enablebits[2];
1394 hwif_chipset_t chipset; 1419 hwif_chipset_t chipset;
1420
1421 u16 max_sectors; /* if < than the default one */
1422
1395 u32 host_flags; 1423 u32 host_flags;
1396 u8 pio_mask; 1424 u8 pio_mask;
1397 u8 swdma_mask; 1425 u8 swdma_mask;
@@ -1587,13 +1615,13 @@ extern struct mutex ide_cfg_mtx;
1587/* 1615/*
1588 * Structure locking: 1616 * Structure locking:
1589 * 1617 *
1590 * ide_cfg_mtx and ide_lock together protect changes to 1618 * ide_cfg_mtx and hwgroup->lock together protect changes to
1591 * ide_hwif_t->{next,hwgroup} 1619 * ide_hwif_t->next
1592 * ide_drive_t->next 1620 * ide_drive_t->next
1593 * 1621 *
1594 * ide_hwgroup_t->busy: ide_lock 1622 * ide_hwgroup_t->busy: hwgroup->lock
1595 * ide_hwgroup_t->hwif: ide_lock 1623 * ide_hwgroup_t->hwif: hwgroup->lock
1596 * ide_hwif_t->mate: constant, no locking 1624 * ide_hwif_t->{hwgroup,mate}: constant, no locking
1597 * ide_drive_t->hwif: constant, no locking 1625 * ide_drive_t->hwif: constant, no locking
1598 */ 1626 */
1599 1627
diff --git a/include/linux/idr.h b/include/linux/idr.h
index fa035f96f2a3..dd846df8cd32 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -52,13 +52,14 @@ struct idr_layer {
52 unsigned long bitmap; /* A zero bit means "space here" */ 52 unsigned long bitmap; /* A zero bit means "space here" */
53 struct idr_layer *ary[1<<IDR_BITS]; 53 struct idr_layer *ary[1<<IDR_BITS];
54 int count; /* When zero, we can release it */ 54 int count; /* When zero, we can release it */
55 int layer; /* distance from leaf */
55 struct rcu_head rcu_head; 56 struct rcu_head rcu_head;
56}; 57};
57 58
58struct idr { 59struct idr {
59 struct idr_layer *top; 60 struct idr_layer *top;
60 struct idr_layer *id_free; 61 struct idr_layer *id_free;
61 int layers; 62 int layers; /* only valid without concurrent changes */
62 int id_free_cnt; 63 int id_free_cnt;
63 spinlock_t lock; 64 spinlock_t lock;
64}; 65};
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 14126bc36641..c4e6ca1a6306 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -12,8 +12,8 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#ifndef IEEE80211_H 15#ifndef LINUX_IEEE80211_H
16#define IEEE80211_H 16#define LINUX_IEEE80211_H
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19#include <asm/byteorder.h> 19#include <asm/byteorder.h>
@@ -97,7 +97,10 @@
97#define IEEE80211_MAX_FRAME_LEN 2352 97#define IEEE80211_MAX_FRAME_LEN 2352
98 98
99#define IEEE80211_MAX_SSID_LEN 32 99#define IEEE80211_MAX_SSID_LEN 32
100
100#define IEEE80211_MAX_MESH_ID_LEN 32 101#define IEEE80211_MAX_MESH_ID_LEN 32
102#define IEEE80211_MESH_CONFIG_LEN 19
103
101#define IEEE80211_QOS_CTL_LEN 2 104#define IEEE80211_QOS_CTL_LEN 2
102#define IEEE80211_QOS_CTL_TID_MASK 0x000F 105#define IEEE80211_QOS_CTL_TID_MASK 0x000F
103#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007 106#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007
@@ -666,6 +669,13 @@ struct ieee80211_cts {
666 u8 ra[6]; 669 u8 ra[6];
667} __attribute__ ((packed)); 670} __attribute__ ((packed));
668 671
672struct ieee80211_pspoll {
673 __le16 frame_control;
674 __le16 aid;
675 u8 bssid[6];
676 u8 ta[6];
677} __attribute__ ((packed));
678
669/** 679/**
670 * struct ieee80211_bar - HT Block Ack Request 680 * struct ieee80211_bar - HT Block Ack Request
671 * 681 *
@@ -685,28 +695,88 @@ struct ieee80211_bar {
685#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000 695#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
686#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004 696#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
687 697
698
699#define IEEE80211_HT_MCS_MASK_LEN 10
700
701/**
702 * struct ieee80211_mcs_info - MCS information
703 * @rx_mask: RX mask
704 * @rx_highest: highest supported RX rate
705 * @tx_params: TX parameters
706 */
707struct ieee80211_mcs_info {
708 u8 rx_mask[IEEE80211_HT_MCS_MASK_LEN];
709 __le16 rx_highest;
710 u8 tx_params;
711 u8 reserved[3];
712} __attribute__((packed));
713
714/* 802.11n HT capability MSC set */
715#define IEEE80211_HT_MCS_RX_HIGHEST_MASK 0x3ff
716#define IEEE80211_HT_MCS_TX_DEFINED 0x01
717#define IEEE80211_HT_MCS_TX_RX_DIFF 0x02
718/* value 0 == 1 stream etc */
719#define IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK 0x0C
720#define IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT 2
721#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
722#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
723
724/*
725 * 802.11n D5.0 20.3.5 / 20.6 says:
726 * - indices 0 to 7 and 32 are single spatial stream
727 * - 8 to 31 are multiple spatial streams using equal modulation
728 * [8..15 for two streams, 16..23 for three and 24..31 for four]
729 * - remainder are multiple spatial streams using unequal modulation
730 */
731#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START 33
732#define IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE \
733 (IEEE80211_HT_MCS_UNEQUAL_MODULATION_START / 8)
734
688/** 735/**
689 * struct ieee80211_ht_cap - HT capabilities 736 * struct ieee80211_ht_cap - HT capabilities
690 * 737 *
691 * This structure refers to "HT capabilities element" as 738 * This structure is the "HT capabilities element" as
692 * described in 802.11n draft section 7.3.2.52 739 * described in 802.11n D5.0 7.3.2.57
693 */ 740 */
694struct ieee80211_ht_cap { 741struct ieee80211_ht_cap {
695 __le16 cap_info; 742 __le16 cap_info;
696 u8 ampdu_params_info; 743 u8 ampdu_params_info;
697 u8 supp_mcs_set[16]; 744
745 /* 16 bytes MCS information */
746 struct ieee80211_mcs_info mcs;
747
698 __le16 extended_ht_cap_info; 748 __le16 extended_ht_cap_info;
699 __le32 tx_BF_cap_info; 749 __le32 tx_BF_cap_info;
700 u8 antenna_selection_info; 750 u8 antenna_selection_info;
701} __attribute__ ((packed)); 751} __attribute__ ((packed));
702 752
753/* 802.11n HT capabilities masks (for cap_info) */
754#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
755#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
756#define IEEE80211_HT_CAP_SM_PS 0x000C
757#define IEEE80211_HT_CAP_GRN_FLD 0x0010
758#define IEEE80211_HT_CAP_SGI_20 0x0020
759#define IEEE80211_HT_CAP_SGI_40 0x0040
760#define IEEE80211_HT_CAP_TX_STBC 0x0080
761#define IEEE80211_HT_CAP_RX_STBC 0x0300
762#define IEEE80211_HT_CAP_DELAY_BA 0x0400
763#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
764#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
765#define IEEE80211_HT_CAP_PSMP_SUPPORT 0x2000
766#define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000
767#define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000
768
769/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
770#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
771#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
772
703/** 773/**
704 * struct ieee80211_ht_cap - HT additional information 774 * struct ieee80211_ht_info - HT information
705 * 775 *
706 * This structure refers to "HT information element" as 776 * This structure is the "HT information element" as
707 * described in 802.11n draft section 7.3.2.53 777 * described in 802.11n D5.0 7.3.2.58
708 */ 778 */
709struct ieee80211_ht_addt_info { 779struct ieee80211_ht_info {
710 u8 control_chan; 780 u8 control_chan;
711 u8 ht_param; 781 u8 ht_param;
712 __le16 operation_mode; 782 __le16 operation_mode;
@@ -714,36 +784,33 @@ struct ieee80211_ht_addt_info {
714 u8 basic_set[16]; 784 u8 basic_set[16];
715} __attribute__ ((packed)); 785} __attribute__ ((packed));
716 786
717/* 802.11n HT capabilities masks */ 787/* for ht_param */
718#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002 788#define IEEE80211_HT_PARAM_CHA_SEC_OFFSET 0x03
719#define IEEE80211_HT_CAP_SM_PS 0x000C 789#define IEEE80211_HT_PARAM_CHA_SEC_NONE 0x00
720#define IEEE80211_HT_CAP_GRN_FLD 0x0010 790#define IEEE80211_HT_PARAM_CHA_SEC_ABOVE 0x01
721#define IEEE80211_HT_CAP_SGI_20 0x0020 791#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03
722#define IEEE80211_HT_CAP_SGI_40 0x0040 792#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04
723#define IEEE80211_HT_CAP_DELAY_BA 0x0400 793#define IEEE80211_HT_PARAM_RIFS_MODE 0x08
724#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 794#define IEEE80211_HT_PARAM_SPSMP_SUPPORT 0x10
725#define IEEE80211_HT_CAP_DSSSCCK40 0x1000 795#define IEEE80211_HT_PARAM_SERV_INTERVAL_GRAN 0xE0
726/* 802.11n HT capability AMPDU settings */ 796
727#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03 797/* for operation_mode */
728#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C 798#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003
729/* 802.11n HT capability MSC set */ 799#define IEEE80211_HT_OP_MODE_PROTECTION_NONE 0
730#define IEEE80211_SUPP_MCS_SET_UEQM 4 800#define IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER 1
731#define IEEE80211_HT_CAP_MAX_STREAMS 4 801#define IEEE80211_HT_OP_MODE_PROTECTION_20MHZ 2
732#define IEEE80211_SUPP_MCS_SET_LEN 10 802#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
733/* maximum streams the spec allows */ 803#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
734#define IEEE80211_HT_CAP_MCS_TX_DEFINED 0x01 804#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
735#define IEEE80211_HT_CAP_MCS_TX_RX_DIFF 0x02 805
736#define IEEE80211_HT_CAP_MCS_TX_STREAMS 0x0C 806/* for stbc_param */
737#define IEEE80211_HT_CAP_MCS_TX_UEQM 0x10 807#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
738/* 802.11n HT IE masks */ 808#define IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT 0x0080
739#define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03 809#define IEEE80211_HT_STBC_PARAM_STBC_BEACON 0x0100
740#define IEEE80211_HT_IE_CHA_SEC_NONE 0x00 810#define IEEE80211_HT_STBC_PARAM_LSIG_TXOP_FULLPROT 0x0200
741#define IEEE80211_HT_IE_CHA_SEC_ABOVE 0x01 811#define IEEE80211_HT_STBC_PARAM_PCO_ACTIVE 0x0400
742#define IEEE80211_HT_IE_CHA_SEC_BELOW 0x03 812#define IEEE80211_HT_STBC_PARAM_PCO_PHASE 0x0800
743#define IEEE80211_HT_IE_CHA_WIDTH 0x04 813
744#define IEEE80211_HT_IE_HT_PROTECTION 0x0003
745#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
746#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
747 814
748/* block-ack parameters */ 815/* block-ack parameters */
749#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002 816#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
@@ -769,7 +836,6 @@ struct ieee80211_ht_addt_info {
769/* Authentication algorithms */ 836/* Authentication algorithms */
770#define WLAN_AUTH_OPEN 0 837#define WLAN_AUTH_OPEN 0
771#define WLAN_AUTH_SHARED_KEY 1 838#define WLAN_AUTH_SHARED_KEY 1
772#define WLAN_AUTH_FAST_BSS_TRANSITION 2
773#define WLAN_AUTH_LEAP 128 839#define WLAN_AUTH_LEAP 128
774 840
775#define WLAN_AUTH_CHALLENGE_LEN 128 841#define WLAN_AUTH_CHALLENGE_LEN 128
@@ -949,7 +1015,7 @@ enum ieee80211_eid {
949 WLAN_EID_EXT_SUPP_RATES = 50, 1015 WLAN_EID_EXT_SUPP_RATES = 50,
950 /* 802.11n */ 1016 /* 802.11n */
951 WLAN_EID_HT_CAPABILITY = 45, 1017 WLAN_EID_HT_CAPABILITY = 45,
952 WLAN_EID_HT_EXTRA_INFO = 61, 1018 WLAN_EID_HT_INFORMATION = 61,
953 /* 802.11i */ 1019 /* 802.11i */
954 WLAN_EID_RSN = 48, 1020 WLAN_EID_RSN = 48,
955 WLAN_EID_WPA = 221, 1021 WLAN_EID_WPA = 221,
@@ -976,6 +1042,68 @@ enum ieee80211_spectrum_mgmt_actioncode {
976 WLAN_ACTION_SPCT_CHL_SWITCH = 4, 1042 WLAN_ACTION_SPCT_CHL_SWITCH = 4,
977}; 1043};
978 1044
1045/*
1046 * IEEE 802.11-2007 7.3.2.9 Country information element
1047 *
1048 * Minimum length is 8 octets, ie len must be evenly
1049 * divisible by 2
1050 */
1051
1052/* Although the spec says 8 I'm seeing 6 in practice */
1053#define IEEE80211_COUNTRY_IE_MIN_LEN 6
1054
1055/*
1056 * For regulatory extension stuff see IEEE 802.11-2007
1057 * Annex I (page 1141) and Annex J (page 1147). Also
1058 * review 7.3.2.9.
1059 *
1060 * When dot11RegulatoryClassesRequired is true and the
1061 * first_channel/reg_extension_id is >= 201 then the IE
1062 * compromises of the 'ext' struct represented below:
1063 *
1064 * - Regulatory extension ID - when generating IE this just needs
1065 * to be monotonically increasing for each triplet passed in
1066 * the IE
1067 * - Regulatory class - index into set of rules
1068 * - Coverage class - index into air propagation time (Table 7-27),
1069 * in microseconds, you can compute the air propagation time from
1070 * the index by multiplying by 3, so index 10 yields a propagation
1071 * of 10 us. Valid values are 0-31, values 32-255 are not defined
1072 * yet. A value of 0 inicates air propagation of <= 1 us.
1073 *
1074 * See also Table I.2 for Emission limit sets and table
1075 * I.3 for Behavior limit sets. Table J.1 indicates how to map
1076 * a reg_class to an emission limit set and behavior limit set.
1077 */
1078#define IEEE80211_COUNTRY_EXTENSION_ID 201
1079
1080/*
1081 * Channels numbers in the IE must be monotonically increasing
1082 * if dot11RegulatoryClassesRequired is not true.
1083 *
1084 * If dot11RegulatoryClassesRequired is true consecutive
1085 * subband triplets following a regulatory triplet shall
1086 * have monotonically increasing first_channel number fields.
1087 *
1088 * Channel numbers shall not overlap.
1089 *
1090 * Note that max_power is signed.
1091 */
1092struct ieee80211_country_ie_triplet {
1093 union {
1094 struct {
1095 u8 first_channel;
1096 u8 num_channels;
1097 s8 max_power;
1098 } __attribute__ ((packed)) chans;
1099 struct {
1100 u8 reg_extension_id;
1101 u8 reg_class;
1102 u8 coverage_class;
1103 } __attribute__ ((packed)) ext;
1104 };
1105} __attribute__ ((packed));
1106
979/* BACK action code */ 1107/* BACK action code */
980enum ieee80211_back_actioncode { 1108enum ieee80211_back_actioncode {
981 WLAN_ACTION_ADDBA_REQ = 0, 1109 WLAN_ACTION_ADDBA_REQ = 0,
@@ -1057,4 +1185,4 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
1057 return hdr->addr1; 1185 return hdr->addr1;
1058} 1186}
1059 1187
1060#endif /* IEEE80211_H */ 1188#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if.h b/include/linux/if.h
index 65246846c844..2a6e29620a96 100644
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -65,6 +65,7 @@
65#define IFF_BONDING 0x20 /* bonding master or slave */ 65#define IFF_BONDING 0x20 /* bonding master or slave */
66#define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */ 66#define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */
67#define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */ 67#define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */
68#define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */
68 69
69#define IF_GET_IFACE 0x0001 /* for querying only */ 70#define IF_GET_IFACE 0x0001 /* for querying only */
70#define IF_GET_PROTO 0x0002 71#define IF_GET_PROTO 0x0002
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 4d3401812e6c..5ff89809a581 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -87,6 +87,9 @@
87#define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */ 87#define ARPHRD_IEEE80211_PRISM 802 /* IEEE 802.11 + Prism2 header */
88#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */ 88#define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
89 89
90#define ARPHRD_PHONET 820 /* PhoNet media type */
91#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */
92
90#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */ 93#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */
91#define ARPHRD_NONE 0xFFFE /* zero header length */ 94#define ARPHRD_NONE 0xFFFE /* zero header length */
92 95
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 9e7b49b8062d..a5cb0c3f6dcf 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -114,6 +114,8 @@ extern u16 vlan_dev_vlan_id(const struct net_device *dev);
114 114
115extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, 115extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
116 u16 vlan_tci, int polling); 116 u16 vlan_tci, int polling);
117extern int vlan_hwaccel_do_receive(struct sk_buff *skb);
118
117#else 119#else
118static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) 120static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev)
119{ 121{
@@ -133,6 +135,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
133 BUG(); 135 BUG();
134 return NET_XMIT_SUCCESS; 136 return NET_XMIT_SUCCESS;
135} 137}
138
139static inline int vlan_hwaccel_do_receive(struct sk_buff *skb)
140{
141 return 0;
142}
136#endif 143#endif
137 144
138/** 145/**
diff --git a/include/linux/in.h b/include/linux/in.h
index db458beef19d..d60122a3a088 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -80,6 +80,10 @@ struct in_addr {
80/* BSD compatibility */ 80/* BSD compatibility */
81#define IP_RECVRETOPTS IP_RETOPTS 81#define IP_RECVRETOPTS IP_RETOPTS
82 82
83/* TProxy original addresses */
84#define IP_ORIGDSTADDR 20
85#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
86
83/* IP_MTU_DISCOVER values */ 87/* IP_MTU_DISCOVER values */
84#define IP_PMTUDISC_DONT 0 /* Never send DF frames */ 88#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
85#define IP_PMTUDISC_WANT 1 /* Use per route hints */ 89#define IP_PMTUDISC_WANT 1 /* Use per route hints */
diff --git a/include/linux/init.h b/include/linux/init.h
index 93538b696e3d..68cb0265d009 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -40,7 +40,7 @@
40 40
41/* These are for everybody (although not all archs will actually 41/* These are for everybody (although not all archs will actually
42 discard it in modules) */ 42 discard it in modules) */
43#define __init __section(.init.text) __cold 43#define __init __section(.init.text) __cold notrace
44#define __initdata __section(.init.data) 44#define __initdata __section(.init.data)
45#define __initconst __section(.init.rodata) 45#define __initconst __section(.init.rodata)
46#define __exitdata __section(.exit.data) 46#define __exitdata __section(.exit.data)
@@ -112,21 +112,25 @@
112#define __FINIT .previous 112#define __FINIT .previous
113 113
114#define __INITDATA .section ".init.data","aw" 114#define __INITDATA .section ".init.data","aw"
115#define __INITRODATA .section ".init.rodata","a"
115#define __FINITDATA .previous 116#define __FINITDATA .previous
116 117
117#define __DEVINIT .section ".devinit.text", "ax" 118#define __DEVINIT .section ".devinit.text", "ax"
118#define __DEVINITDATA .section ".devinit.data", "aw" 119#define __DEVINITDATA .section ".devinit.data", "aw"
120#define __DEVINITRODATA .section ".devinit.rodata", "a"
119 121
120#define __CPUINIT .section ".cpuinit.text", "ax" 122#define __CPUINIT .section ".cpuinit.text", "ax"
121#define __CPUINITDATA .section ".cpuinit.data", "aw" 123#define __CPUINITDATA .section ".cpuinit.data", "aw"
124#define __CPUINITRODATA .section ".cpuinit.rodata", "a"
122 125
123#define __MEMINIT .section ".meminit.text", "ax" 126#define __MEMINIT .section ".meminit.text", "ax"
124#define __MEMINITDATA .section ".meminit.data", "aw" 127#define __MEMINITDATA .section ".meminit.data", "aw"
128#define __MEMINITRODATA .section ".meminit.rodata", "a"
125 129
126/* silence warnings when references are OK */ 130/* silence warnings when references are OK */
127#define __REF .section ".ref.text", "ax" 131#define __REF .section ".ref.text", "ax"
128#define __REFDATA .section ".ref.data", "aw" 132#define __REFDATA .section ".ref.data", "aw"
129#define __REFCONST .section ".ref.rodata", "aw" 133#define __REFCONST .section ".ref.rodata", "a"
130 134
131#ifndef __ASSEMBLY__ 135#ifndef __ASSEMBLY__
132/* 136/*
@@ -233,9 +237,6 @@ struct obs_kernel_param {
233 __attribute__((aligned((sizeof(long))))) \ 237 __attribute__((aligned((sizeof(long))))) \
234 = { __setup_str_##unique_id, fn, early } 238 = { __setup_str_##unique_id, fn, early }
235 239
236#define __setup_null_param(str, unique_id) \
237 __setup_param(str, unique_id, NULL, 0)
238
239#define __setup(str, fn) \ 240#define __setup(str, fn) \
240 __setup_param(str, fn, fn, 0) 241 __setup_param(str, fn, fn, 0)
241 242
@@ -296,7 +297,6 @@ void __init parse_early_param(void);
296 void cleanup_module(void) __attribute__((alias(#exitfn))); 297 void cleanup_module(void) __attribute__((alias(#exitfn)));
297 298
298#define __setup_param(str, unique_id, fn) /* nothing */ 299#define __setup_param(str, unique_id, fn) /* nothing */
299#define __setup_null_param(str, unique_id) /* nothing */
300#define __setup(str, func) /* nothing */ 300#define __setup(str, func) /* nothing */
301#endif 301#endif
302 302
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 021d8e720c79..959f5522d10a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -57,7 +57,6 @@ extern struct nsproxy init_nsproxy;
57 .mnt_ns = NULL, \ 57 .mnt_ns = NULL, \
58 INIT_NET_NS(net_ns) \ 58 INIT_NET_NS(net_ns) \
59 INIT_IPC_NS(ipc_ns) \ 59 INIT_IPC_NS(ipc_ns) \
60 .user_ns = &init_user_ns, \
61} 60}
62 61
63#define INIT_SIGHAND(sighand) { \ 62#define INIT_SIGHAND(sighand) { \
@@ -113,6 +112,8 @@ extern struct group_info init_groups;
113# define CAP_INIT_BSET CAP_INIT_EFF_SET 112# define CAP_INIT_BSET CAP_INIT_EFF_SET
114#endif 113#endif
115 114
115extern struct cred init_cred;
116
116/* 117/*
117 * INIT_TASK is used to set up the first task table, touch at 118 * INIT_TASK is used to set up the first task table, touch at
118 * your own risk!. Base=0, limit=0x1fffff (=2MB) 119 * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -147,13 +148,10 @@ extern struct group_info init_groups;
147 .children = LIST_HEAD_INIT(tsk.children), \ 148 .children = LIST_HEAD_INIT(tsk.children), \
148 .sibling = LIST_HEAD_INIT(tsk.sibling), \ 149 .sibling = LIST_HEAD_INIT(tsk.sibling), \
149 .group_leader = &tsk, \ 150 .group_leader = &tsk, \
150 .group_info = &init_groups, \ 151 .real_cred = &init_cred, \
151 .cap_effective = CAP_INIT_EFF_SET, \ 152 .cred = &init_cred, \
152 .cap_inheritable = CAP_INIT_INH_SET, \ 153 .cred_exec_mutex = \
153 .cap_permitted = CAP_FULL_SET, \ 154 __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \
154 .cap_bset = CAP_INIT_BSET, \
155 .securebits = SECUREBITS_DEFAULT, \
156 .user = INIT_USER, \
157 .comm = "swapper", \ 155 .comm = "swapper", \
158 .thread = INIT_THREAD, \ 156 .thread = INIT_THREAD, \
159 .fs = &init_fs, \ 157 .fs = &init_fs, \
@@ -170,6 +168,7 @@ extern struct group_info init_groups;
170 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 168 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
171 .fs_excl = ATOMIC_INIT(0), \ 169 .fs_excl = ATOMIC_INIT(0), \
172 .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ 170 .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
171 .timer_slack_ns = 50000, /* 50 usec default slack */ \
173 .pids = { \ 172 .pids = { \
174 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ 173 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
175 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ 174 [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index bd578578a8b9..37ea2894b3c0 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -134,6 +134,8 @@ extern void inotify_remove_watch_locked(struct inotify_handle *,
134 struct inotify_watch *); 134 struct inotify_watch *);
135extern void get_inotify_watch(struct inotify_watch *); 135extern void get_inotify_watch(struct inotify_watch *);
136extern void put_inotify_watch(struct inotify_watch *); 136extern void put_inotify_watch(struct inotify_watch *);
137extern int pin_inotify_watch(struct inotify_watch *);
138extern void unpin_inotify_watch(struct inotify_watch *);
137 139
138#else 140#else
139 141
@@ -228,6 +230,15 @@ static inline void put_inotify_watch(struct inotify_watch *watch)
228{ 230{
229} 231}
230 232
233extern inline int pin_inotify_watch(struct inotify_watch *watch)
234{
235 return 0;
236}
237
238extern inline void unpin_inotify_watch(struct inotify_watch *watch)
239{
240}
241
231#endif /* CONFIG_INOTIFY */ 242#endif /* CONFIG_INOTIFY */
232 243
233#endif /* __KERNEL __ */ 244#endif /* __KERNEL __ */
diff --git a/include/linux/input.h b/include/linux/input.h
index a5802c9c81a4..9a6355f74db2 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -238,6 +238,7 @@ struct input_absinfo {
238#define KEY_KPEQUAL 117 238#define KEY_KPEQUAL 117
239#define KEY_KPPLUSMINUS 118 239#define KEY_KPPLUSMINUS 118
240#define KEY_PAUSE 119 240#define KEY_PAUSE 119
241#define KEY_SCALE 120 /* AL Compiz Scale (Expose) */
241 242
242#define KEY_KPCOMMA 121 243#define KEY_KPCOMMA 121
243#define KEY_HANGEUL 122 244#define KEY_HANGEUL 122
@@ -322,6 +323,7 @@ struct input_absinfo {
322#define KEY_PAUSECD 201 323#define KEY_PAUSECD 201
323#define KEY_PROG3 202 324#define KEY_PROG3 202
324#define KEY_PROG4 203 325#define KEY_PROG4 203
326#define KEY_DASHBOARD 204 /* AL Dashboard */
325#define KEY_SUSPEND 205 327#define KEY_SUSPEND 205
326#define KEY_CLOSE 206 /* AC Close */ 328#define KEY_CLOSE 206 /* AC Close */
327#define KEY_PLAY 207 329#define KEY_PLAY 207
@@ -577,9 +579,22 @@ struct input_absinfo {
577#define KEY_BRL_DOT9 0x1f9 579#define KEY_BRL_DOT9 0x1f9
578#define KEY_BRL_DOT10 0x1fa 580#define KEY_BRL_DOT10 0x1fa
579 581
582#define KEY_NUMERIC_0 0x200 /* used by phones, remote controls, */
583#define KEY_NUMERIC_1 0x201 /* and other keypads */
584#define KEY_NUMERIC_2 0x202
585#define KEY_NUMERIC_3 0x203
586#define KEY_NUMERIC_4 0x204
587#define KEY_NUMERIC_5 0x205
588#define KEY_NUMERIC_6 0x206
589#define KEY_NUMERIC_7 0x207
590#define KEY_NUMERIC_8 0x208
591#define KEY_NUMERIC_9 0x209
592#define KEY_NUMERIC_STAR 0x20a
593#define KEY_NUMERIC_POUND 0x20b
594
580/* We avoid low common keys in module aliases so they don't get huge. */ 595/* We avoid low common keys in module aliases so they don't get huge. */
581#define KEY_MIN_INTERESTING KEY_MUTE 596#define KEY_MIN_INTERESTING KEY_MUTE
582#define KEY_MAX 0x1ff 597#define KEY_MAX 0x2ff
583#define KEY_CNT (KEY_MAX+1) 598#define KEY_CNT (KEY_MAX+1)
584 599
585/* 600/*
@@ -644,6 +659,8 @@ struct input_absinfo {
644#define SW_RADIO SW_RFKILL_ALL /* deprecated */ 659#define SW_RADIO SW_RFKILL_ALL /* deprecated */
645#define SW_MICROPHONE_INSERT 0x04 /* set = inserted */ 660#define SW_MICROPHONE_INSERT 0x04 /* set = inserted */
646#define SW_DOCK 0x05 /* set = plugged into dock */ 661#define SW_DOCK 0x05 /* set = plugged into dock */
662#define SW_LINEOUT_INSERT 0x06 /* set = inserted */
663#define SW_JACK_PHYSICAL_INSERT 0x07 /* set = mechanical switch set */
647#define SW_MAX 0x0f 664#define SW_MAX 0x0f
648#define SW_CNT (SW_MAX+1) 665#define SW_CNT (SW_MAX+1)
649 666
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
new file mode 100644
index 000000000000..3d017cfd245b
--- /dev/null
+++ b/include/linux/intel-iommu.h
@@ -0,0 +1,363 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Author: Ashok Raj <ashok.raj@intel.com>
19 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
20 */
21
22#ifndef _INTEL_IOMMU_H_
23#define _INTEL_IOMMU_H_
24
25#include <linux/types.h>
26#include <linux/msi.h>
27#include <linux/sysdev.h>
28#include <linux/iova.h>
29#include <linux/io.h>
30#include <linux/dma_remapping.h>
31#include <asm/cacheflush.h>
32#include <asm/iommu.h>
33
34/*
35 * Intel IOMMU register specification per version 1.0 public spec.
36 */
37
38#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
39#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
40#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
41#define DMAR_GCMD_REG 0x18 /* Global command register */
42#define DMAR_GSTS_REG 0x1c /* Global status register */
43#define DMAR_RTADDR_REG 0x20 /* Root entry table */
44#define DMAR_CCMD_REG 0x28 /* Context command reg */
45#define DMAR_FSTS_REG 0x34 /* Fault Status register */
46#define DMAR_FECTL_REG 0x38 /* Fault control register */
47#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
48#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
49#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
50#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
51#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
52#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
53#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
54#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
55#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
56#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
57#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
58#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
59#define DMAR_ICS_REG 0x98 /* Invalidation complete status register */
60#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
61
62#define OFFSET_STRIDE (9)
63/*
64#define dmar_readl(dmar, reg) readl(dmar + reg)
65#define dmar_readq(dmar, reg) ({ \
66 u32 lo, hi; \
67 lo = readl(dmar + reg); \
68 hi = readl(dmar + reg + 4); \
69 (((u64) hi) << 32) + lo; })
70*/
71static inline u64 dmar_readq(void __iomem *addr)
72{
73 u32 lo, hi;
74 lo = readl(addr);
75 hi = readl(addr + 4);
76 return (((u64) hi) << 32) + lo;
77}
78
79static inline void dmar_writeq(void __iomem *addr, u64 val)
80{
81 writel((u32)val, addr);
82 writel((u32)(val >> 32), addr + 4);
83}
84
85#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
86#define DMAR_VER_MINOR(v) ((v) & 0x0f)
87
88/*
89 * Decoding Capability Register
90 */
91#define cap_read_drain(c) (((c) >> 55) & 1)
92#define cap_write_drain(c) (((c) >> 54) & 1)
93#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
94#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
95#define cap_pgsel_inv(c) (((c) >> 39) & 1)
96
97#define cap_super_page_val(c) (((c) >> 34) & 0xf)
98#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
99 * OFFSET_STRIDE) + 21)
100
101#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
102#define cap_max_fault_reg_offset(c) \
103 (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
104
105#define cap_zlr(c) (((c) >> 22) & 1)
106#define cap_isoch(c) (((c) >> 23) & 1)
107#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
108#define cap_sagaw(c) (((c) >> 8) & 0x1f)
109#define cap_caching_mode(c) (((c) >> 7) & 1)
110#define cap_phmr(c) (((c) >> 6) & 1)
111#define cap_plmr(c) (((c) >> 5) & 1)
112#define cap_rwbf(c) (((c) >> 4) & 1)
113#define cap_afl(c) (((c) >> 3) & 1)
114#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
115/*
116 * Extended Capability Register
117 */
118
119#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1)
120#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
121#define ecap_max_iotlb_offset(e) \
122 (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16)
123#define ecap_coherent(e) ((e) & 0x1)
124#define ecap_qis(e) ((e) & 0x2)
125#define ecap_eim_support(e) ((e >> 4) & 0x1)
126#define ecap_ir_support(e) ((e >> 3) & 0x1)
127#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
128
129
130/* IOTLB_REG */
131#define DMA_TLB_FLUSH_GRANU_OFFSET 60
132#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
133#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
134#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
135#define DMA_TLB_IIRG(type) ((type >> 60) & 7)
136#define DMA_TLB_IAIG(val) (((val) >> 57) & 7)
137#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
138#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
139#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
140#define DMA_TLB_IVT (((u64)1) << 63)
141#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
142#define DMA_TLB_MAX_SIZE (0x3f)
143
144/* INVALID_DESC */
145#define DMA_CCMD_INVL_GRANU_OFFSET 61
146#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
147#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
148#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
149#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
150#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
151#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
152#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
153#define DMA_ID_TLB_ADDR(addr) (addr)
154#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
155
156/* PMEN_REG */
157#define DMA_PMEN_EPM (((u32)1)<<31)
158#define DMA_PMEN_PRS (((u32)1)<<0)
159
160/* GCMD_REG */
161#define DMA_GCMD_TE (((u32)1) << 31)
162#define DMA_GCMD_SRTP (((u32)1) << 30)
163#define DMA_GCMD_SFL (((u32)1) << 29)
164#define DMA_GCMD_EAFL (((u32)1) << 28)
165#define DMA_GCMD_WBF (((u32)1) << 27)
166#define DMA_GCMD_QIE (((u32)1) << 26)
167#define DMA_GCMD_SIRTP (((u32)1) << 24)
168#define DMA_GCMD_IRE (((u32) 1) << 25)
169
170/* GSTS_REG */
171#define DMA_GSTS_TES (((u32)1) << 31)
172#define DMA_GSTS_RTPS (((u32)1) << 30)
173#define DMA_GSTS_FLS (((u32)1) << 29)
174#define DMA_GSTS_AFLS (((u32)1) << 28)
175#define DMA_GSTS_WBFS (((u32)1) << 27)
176#define DMA_GSTS_QIES (((u32)1) << 26)
177#define DMA_GSTS_IRTPS (((u32)1) << 24)
178#define DMA_GSTS_IRES (((u32)1) << 25)
179
180/* CCMD_REG */
181#define DMA_CCMD_ICC (((u64)1) << 63)
182#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
183#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
184#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
185#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
186#define DMA_CCMD_MASK_NOBIT 0
187#define DMA_CCMD_MASK_1BIT 1
188#define DMA_CCMD_MASK_2BIT 2
189#define DMA_CCMD_MASK_3BIT 3
190#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
191#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
192
193/* FECTL_REG */
194#define DMA_FECTL_IM (((u32)1) << 31)
195
196/* FSTS_REG */
197#define DMA_FSTS_PPF ((u32)2)
198#define DMA_FSTS_PFO ((u32)1)
199#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
200
201/* FRCD_REG, 32 bits access */
202#define DMA_FRCD_F (((u32)1) << 31)
203#define dma_frcd_type(d) ((d >> 30) & 1)
204#define dma_frcd_fault_reason(c) (c & 0xff)
205#define dma_frcd_source_id(c) (c & 0xffff)
206/* low 64 bit */
207#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
208
209#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
210do { \
211 cycles_t start_time = get_cycles(); \
212 while (1) { \
213 sts = op(iommu->reg + offset); \
214 if (cond) \
215 break; \
216 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
217 panic("DMAR hardware is malfunctioning\n"); \
218 cpu_relax(); \
219 } \
220} while (0)
221
222#define QI_LENGTH 256 /* queue length */
223
224enum {
225 QI_FREE,
226 QI_IN_USE,
227 QI_DONE
228};
229
230#define QI_CC_TYPE 0x1
231#define QI_IOTLB_TYPE 0x2
232#define QI_DIOTLB_TYPE 0x3
233#define QI_IEC_TYPE 0x4
234#define QI_IWD_TYPE 0x5
235
236#define QI_IEC_SELECTIVE (((u64)1) << 4)
237#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
238#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
239
240#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
241#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
242
243#define QI_IOTLB_DID(did) (((u64)did) << 16)
244#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
245#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
246#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
247#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
248#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
249#define QI_IOTLB_AM(am) (((u8)am))
250
251#define QI_CC_FM(fm) (((u64)fm) << 48)
252#define QI_CC_SID(sid) (((u64)sid) << 32)
253#define QI_CC_DID(did) (((u64)did) << 16)
254#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
255
256struct qi_desc {
257 u64 low, high;
258};
259
260struct q_inval {
261 spinlock_t q_lock;
262 struct qi_desc *desc; /* invalidation queue */
263 int *desc_status; /* desc status */
264 int free_head; /* first free entry */
265 int free_tail; /* last free entry */
266 int free_cnt;
267};
268
269#ifdef CONFIG_INTR_REMAP
270/* 1MB - maximum possible interrupt remapping table size */
271#define INTR_REMAP_PAGE_ORDER 8
272#define INTR_REMAP_TABLE_REG_SIZE 0xf
273
274#define INTR_REMAP_TABLE_ENTRIES 65536
275
276struct ir_table {
277 struct irte *base;
278};
279#endif
280
281struct iommu_flush {
282 int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
283 u64 type, int non_present_entry_flush);
284 int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
285 unsigned int size_order, u64 type, int non_present_entry_flush);
286};
287
288struct intel_iommu {
289 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
290 u64 cap;
291 u64 ecap;
292 int seg;
293 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
294 spinlock_t register_lock; /* protect register handling */
295 int seq_id; /* sequence id of the iommu */
296
297#ifdef CONFIG_DMAR
298 unsigned long *domain_ids; /* bitmap of domains */
299 struct dmar_domain **domains; /* ptr to domains */
300 spinlock_t lock; /* protect context, domain ids */
301 struct root_entry *root_entry; /* virtual address */
302
303 unsigned int irq;
304 unsigned char name[7]; /* Device Name */
305 struct msi_msg saved_msg;
306 struct sys_device sysdev;
307 struct iommu_flush flush;
308#endif
309 struct q_inval *qi; /* Queued invalidation info */
310#ifdef CONFIG_INTR_REMAP
311 struct ir_table *ir_table; /* Interrupt remapping info */
312#endif
313};
314
315static inline void __iommu_flush_cache(
316 struct intel_iommu *iommu, void *addr, int size)
317{
318 if (!ecap_coherent(iommu->ecap))
319 clflush_cache_range(addr, size);
320}
321
322extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
323
324extern int alloc_iommu(struct dmar_drhd_unit *drhd);
325extern void free_iommu(struct intel_iommu *iommu);
326extern int dmar_enable_qi(struct intel_iommu *iommu);
327extern void qi_global_iec(struct intel_iommu *iommu);
328
329extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
330 u8 fm, u64 type, int non_present_entry_flush);
331extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
332 unsigned int size_order, u64 type,
333 int non_present_entry_flush);
334
335extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
336
337void intel_iommu_domain_exit(struct dmar_domain *domain);
338struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev);
339int intel_iommu_context_mapping(struct dmar_domain *domain,
340 struct pci_dev *pdev);
341int intel_iommu_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
342 u64 hpa, size_t size, int prot);
343void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn);
344struct dmar_domain *intel_iommu_find_domain(struct pci_dev *pdev);
345u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova);
346
347#ifdef CONFIG_DMAR
348int intel_iommu_found(void);
349#else /* CONFIG_DMAR */
350static inline int intel_iommu_found(void)
351{
352 return 0;
353}
354#endif /* CONFIG_DMAR */
355
356extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
357extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
358extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
359extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
360extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
361extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
362
363#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 58ff4e74b2f3..be3c484b5242 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -8,9 +8,14 @@
8#include <linux/preempt.h> 8#include <linux/preempt.h>
9#include <linux/cpumask.h> 9#include <linux/cpumask.h>
10#include <linux/irqreturn.h> 10#include <linux/irqreturn.h>
11#include <linux/irqnr.h>
11#include <linux/hardirq.h> 12#include <linux/hardirq.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/irqflags.h> 14#include <linux/irqflags.h>
15#include <linux/smp.h>
16#include <linux/percpu.h>
17#include <linux/irqnr.h>
18
14#include <asm/atomic.h> 19#include <asm/atomic.h>
15#include <asm/ptrace.h> 20#include <asm/ptrace.h>
16#include <asm/system.h> 21#include <asm/system.h>
@@ -248,10 +253,9 @@ enum
248 BLOCK_SOFTIRQ, 253 BLOCK_SOFTIRQ,
249 TASKLET_SOFTIRQ, 254 TASKLET_SOFTIRQ,
250 SCHED_SOFTIRQ, 255 SCHED_SOFTIRQ,
251#ifdef CONFIG_HIGH_RES_TIMERS
252 HRTIMER_SOFTIRQ,
253#endif
254 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 256 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
257
258 NR_SOFTIRQS
255}; 259};
256 260
257/* softirq mask and active fields moved to irq_cpustat_t in 261/* softirq mask and active fields moved to irq_cpustat_t in
@@ -271,6 +275,25 @@ extern void softirq_init(void);
271extern void raise_softirq_irqoff(unsigned int nr); 275extern void raise_softirq_irqoff(unsigned int nr);
272extern void raise_softirq(unsigned int nr); 276extern void raise_softirq(unsigned int nr);
273 277
278/* This is the worklist that queues up per-cpu softirq work.
279 *
280 * send_remote_sendirq() adds work to these lists, and
281 * the softirq handler itself dequeues from them. The queues
282 * are protected by disabling local cpu interrupts and they must
283 * only be accessed by the local cpu that they are for.
284 */
285DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
286
287/* Try to send a softirq to a remote cpu. If this cannot be done, the
288 * work will be queued to the local cpu.
289 */
290extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
291
292/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
293 * and compute the current cpu, passed in as 'this_cpu'.
294 */
295extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
296 int this_cpu, int softirq);
274 297
275/* Tasklets --- multithreaded analogue of BHs. 298/* Tasklets --- multithreaded analogue of BHs.
276 299
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
new file mode 100644
index 000000000000..82df31726a54
--- /dev/null
+++ b/include/linux/io-mapping.h
@@ -0,0 +1,125 @@
1/*
2 * Copyright © 2008 Keith Packard <keithp@keithp.com>
3 *
4 * This file is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
16 */
17
18#ifndef _LINUX_IO_MAPPING_H
19#define _LINUX_IO_MAPPING_H
20
21#include <linux/types.h>
22#include <asm/io.h>
23#include <asm/page.h>
24#include <asm/iomap.h>
25
26/*
27 * The io_mapping mechanism provides an abstraction for mapping
28 * individual pages from an io device to the CPU in an efficient fashion.
29 *
30 * See Documentation/io_mapping.txt
31 */
32
33/* this struct isn't actually defined anywhere */
34struct io_mapping;
35
36#ifdef CONFIG_HAVE_ATOMIC_IOMAP
37
38/*
39 * For small address space machines, mapping large objects
40 * into the kernel virtual space isn't practical. Where
41 * available, use fixmap support to dynamically map pages
42 * of the object at run time.
43 */
44
45static inline struct io_mapping *
46io_mapping_create_wc(unsigned long base, unsigned long size)
47{
48 return (struct io_mapping *) base;
49}
50
51static inline void
52io_mapping_free(struct io_mapping *mapping)
53{
54}
55
56/* Atomic map/unmap */
57static inline void *
58io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
59{
60 offset += (unsigned long) mapping;
61 return iomap_atomic_prot_pfn(offset >> PAGE_SHIFT, KM_USER0,
62 __pgprot(__PAGE_KERNEL_WC));
63}
64
65static inline void
66io_mapping_unmap_atomic(void *vaddr)
67{
68 iounmap_atomic(vaddr, KM_USER0);
69}
70
71static inline void *
72io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
73{
74 offset += (unsigned long) mapping;
75 return ioremap_wc(offset, PAGE_SIZE);
76}
77
78static inline void
79io_mapping_unmap(void *vaddr)
80{
81 iounmap(vaddr);
82}
83
84#else
85
86/* Create the io_mapping object*/
87static inline struct io_mapping *
88io_mapping_create_wc(unsigned long base, unsigned long size)
89{
90 return (struct io_mapping *) ioremap_wc(base, size);
91}
92
93static inline void
94io_mapping_free(struct io_mapping *mapping)
95{
96 iounmap(mapping);
97}
98
99/* Atomic map/unmap */
100static inline void *
101io_mapping_map_atomic_wc(struct io_mapping *mapping, unsigned long offset)
102{
103 return ((char *) mapping) + offset;
104}
105
106static inline void
107io_mapping_unmap_atomic(void *vaddr)
108{
109}
110
111/* Non-atomic map/unmap */
112static inline void *
113io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
114{
115 return ((char *) mapping) + offset;
116}
117
118static inline void
119io_mapping_unmap(void *vaddr)
120{
121}
122
123#endif /* HAVE_ATOMIC_IOMAP */
124
125#endif /* _LINUX_IO_MAPPING_H */
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index a6d0586e2bf7..3b068e5b5671 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -23,4 +23,7 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
23extern void iommu_area_free(unsigned long *map, unsigned long start, 23extern void iommu_area_free(unsigned long *map, unsigned long start,
24 unsigned int nr); 24 unsigned int nr);
25 25
26extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
27 unsigned long io_page_size);
28
26#endif 29#endif
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index ee9bcc6f32b6..041e95aac2bf 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -34,7 +34,8 @@ struct resource_list {
34 */ 34 */
35#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ 35#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
36 36
37#define IORESOURCE_IO 0x00000100 /* Resource type */ 37#define IORESOURCE_TYPE_BITS 0x00000f00 /* Resource type */
38#define IORESOURCE_IO 0x00000100
38#define IORESOURCE_MEM 0x00000200 39#define IORESOURCE_MEM 0x00000200
39#define IORESOURCE_IRQ 0x00000400 40#define IORESOURCE_IRQ 0x00000400
40#define IORESOURCE_DMA 0x00000800 41#define IORESOURCE_DMA 0x00000800
@@ -126,6 +127,10 @@ static inline resource_size_t resource_size(struct resource *res)
126{ 127{
127 return res->end - res->start + 1; 128 return res->end - res->start + 1;
128} 129}
130static inline unsigned long resource_type(struct resource *res)
131{
132 return res->flags & IORESOURCE_TYPE_BITS;
133}
129 134
130/* Convenience shorthand with allocation */ 135/* Convenience shorthand with allocation */
131#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name)) 136#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
@@ -169,6 +174,7 @@ extern struct resource * __devm_request_region(struct device *dev,
169 174
170extern void __devm_release_region(struct device *dev, struct resource *parent, 175extern void __devm_release_region(struct device *dev, struct resource *parent,
171 resource_size_t start, resource_size_t n); 176 resource_size_t start, resource_size_t n);
177extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
172 178
173#endif /* __ASSEMBLY__ */ 179#endif /* __ASSEMBLY__ */
174#endif /* _LINUX_IOPORT_H */ 180#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
new file mode 100644
index 000000000000..228f6c94b69c
--- /dev/null
+++ b/include/linux/iova.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This file is released under the GPLv2.
5 *
6 * Copyright (C) 2006-2008 Intel Corporation
7 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 *
9 */
10
11#ifndef _IOVA_H_
12#define _IOVA_H_
13
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/rbtree.h>
17#include <linux/dma-mapping.h>
18
19/* IO virtual address start page frame number */
20#define IOVA_START_PFN (1)
21
22/* iova structure */
23struct iova {
24 struct rb_node node;
25 unsigned long pfn_hi; /* IOMMU dish out addr hi */
26 unsigned long pfn_lo; /* IOMMU dish out addr lo */
27};
28
29/* holds all the iova translations for a domain */
30struct iova_domain {
31 spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
32 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
33 struct rb_root rbroot; /* iova domain rbtree root */
34 struct rb_node *cached32_node; /* Save last alloced node */
35 unsigned long dma_32bit_pfn;
36};
37
38struct iova *alloc_iova_mem(void);
39void free_iova_mem(struct iova *iova);
40void free_iova(struct iova_domain *iovad, unsigned long pfn);
41void __free_iova(struct iova_domain *iovad, struct iova *iova);
42struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
43 unsigned long limit_pfn,
44 bool size_aligned);
45struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
46 unsigned long pfn_hi);
47void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
48void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit);
49struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
50void put_iova_domain(struct iova_domain *iovad);
51
52#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 641e026eee8f..0b816cae533e 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -278,6 +278,7 @@ struct ipv6_pinfo {
278 struct in6_addr saddr; 278 struct in6_addr saddr;
279 struct in6_addr rcv_saddr; 279 struct in6_addr rcv_saddr;
280 struct in6_addr daddr; 280 struct in6_addr daddr;
281 struct in6_pktinfo sticky_pktinfo;
281 struct in6_addr *daddr_cache; 282 struct in6_addr *daddr_cache;
282#ifdef CONFIG_IPV6_SUBTREES 283#ifdef CONFIG_IPV6_SUBTREES
283 struct in6_addr *saddr_cache; 284 struct in6_addr *saddr_cache;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 8d9411bc60f6..98564dc64476 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -18,6 +18,7 @@
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/cpumask.h> 19#include <linux/cpumask.h>
20#include <linux/irqreturn.h> 20#include <linux/irqreturn.h>
21#include <linux/irqnr.h>
21#include <linux/errno.h> 22#include <linux/errno.h>
22 23
23#include <asm/irq.h> 24#include <asm/irq.h>
@@ -62,7 +63,8 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
62#define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ 63#define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */
63#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ 64#define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */
64#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ 65#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */
65#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ 66#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
67#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
66 68
67#ifdef CONFIG_IRQ_PER_CPU 69#ifdef CONFIG_IRQ_PER_CPU
68# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) 70# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
@@ -127,9 +129,14 @@ struct irq_chip {
127 const char *typename; 129 const char *typename;
128}; 130};
129 131
132struct timer_rand_state;
133struct irq_2_iommu;
130/** 134/**
131 * struct irq_desc - interrupt descriptor 135 * struct irq_desc - interrupt descriptor
132 * 136 * @irq: interrupt number for this descriptor
137 * @timer_rand_state: pointer to timer rand state struct
138 * @kstat_irqs: irq stats per cpu
139 * @irq_2_iommu: iommu with this irq
133 * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] 140 * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()]
134 * @chip: low level interrupt hardware access 141 * @chip: low level interrupt hardware access
135 * @msi_desc: MSI descriptor 142 * @msi_desc: MSI descriptor
@@ -141,17 +148,24 @@ struct irq_chip {
141 * @depth: disable-depth, for nested irq_disable() calls 148 * @depth: disable-depth, for nested irq_disable() calls
142 * @wake_depth: enable depth, for multiple set_irq_wake() callers 149 * @wake_depth: enable depth, for multiple set_irq_wake() callers
143 * @irq_count: stats field to detect stalled irqs 150 * @irq_count: stats field to detect stalled irqs
144 * @irqs_unhandled: stats field for spurious unhandled interrupts
145 * @last_unhandled: aging timer for unhandled count 151 * @last_unhandled: aging timer for unhandled count
152 * @irqs_unhandled: stats field for spurious unhandled interrupts
146 * @lock: locking for SMP 153 * @lock: locking for SMP
147 * @affinity: IRQ affinity on SMP 154 * @affinity: IRQ affinity on SMP
148 * @cpu: cpu index useful for balancing 155 * @cpu: cpu index useful for balancing
149 * @pending_mask: pending rebalanced interrupts 156 * @pending_mask: pending rebalanced interrupts
150 * @dir: /proc/irq/ procfs entry 157 * @dir: /proc/irq/ procfs entry
151 * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP
152 * @name: flow handler name for /proc/interrupts output 158 * @name: flow handler name for /proc/interrupts output
153 */ 159 */
154struct irq_desc { 160struct irq_desc {
161 unsigned int irq;
162#ifdef CONFIG_SPARSE_IRQ
163 struct timer_rand_state *timer_rand_state;
164 unsigned int *kstat_irqs;
165# ifdef CONFIG_INTR_REMAP
166 struct irq_2_iommu *irq_2_iommu;
167# endif
168#endif
155 irq_flow_handler_t handle_irq; 169 irq_flow_handler_t handle_irq;
156 struct irq_chip *chip; 170 struct irq_chip *chip;
157 struct msi_desc *msi_desc; 171 struct msi_desc *msi_desc;
@@ -163,14 +177,14 @@ struct irq_desc {
163 unsigned int depth; /* nested irq disables */ 177 unsigned int depth; /* nested irq disables */
164 unsigned int wake_depth; /* nested wake enables */ 178 unsigned int wake_depth; /* nested wake enables */
165 unsigned int irq_count; /* For detecting broken IRQs */ 179 unsigned int irq_count; /* For detecting broken IRQs */
166 unsigned int irqs_unhandled;
167 unsigned long last_unhandled; /* Aging timer for unhandled count */ 180 unsigned long last_unhandled; /* Aging timer for unhandled count */
181 unsigned int irqs_unhandled;
168 spinlock_t lock; 182 spinlock_t lock;
169#ifdef CONFIG_SMP 183#ifdef CONFIG_SMP
170 cpumask_t affinity; 184 cpumask_t affinity;
171 unsigned int cpu; 185 unsigned int cpu;
172#endif 186#endif
173#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) 187#ifdef CONFIG_GENERIC_PENDING_IRQ
174 cpumask_t pending_mask; 188 cpumask_t pending_mask;
175#endif 189#endif
176#ifdef CONFIG_PROC_FS 190#ifdef CONFIG_PROC_FS
@@ -179,8 +193,53 @@ struct irq_desc {
179 const char *name; 193 const char *name;
180} ____cacheline_internodealigned_in_smp; 194} ____cacheline_internodealigned_in_smp;
181 195
196extern void early_irq_init(void);
197extern void arch_early_irq_init(void);
198extern void arch_init_chip_data(struct irq_desc *desc, int cpu);
199extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
200 struct irq_desc *desc, int cpu);
201extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
202
203#ifndef CONFIG_SPARSE_IRQ
182extern struct irq_desc irq_desc[NR_IRQS]; 204extern struct irq_desc irq_desc[NR_IRQS];
183 205
206static inline struct irq_desc *irq_to_desc(unsigned int irq)
207{
208 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
209}
210static inline struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
211{
212 return irq_to_desc(irq);
213}
214
215#else
216
217extern struct irq_desc *irq_to_desc(unsigned int irq);
218extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
219extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
220
221# define for_each_irq_desc(irq, desc) \
222 for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; irq++, desc = irq_to_desc(irq))
223# define for_each_irq_desc_reverse(irq, desc) \
224 for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; irq--, desc = irq_to_desc(irq))
225
226#define kstat_irqs_this_cpu(DESC) \
227 ((DESC)->kstat_irqs[smp_processor_id()])
228#define kstat_incr_irqs_this_cpu(irqno, DESC) \
229 ((DESC)->kstat_irqs[smp_processor_id()]++)
230
231#endif
232
233static inline struct irq_desc *
234irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
235{
236#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
237 return irq_to_desc(irq);
238#else
239 return desc;
240#endif
241}
242
184/* 243/*
185 * Migration helpers for obsolete names, they will go away: 244 * Migration helpers for obsolete names, they will go away:
186 */ 245 */
@@ -198,19 +257,14 @@ extern int setup_irq(unsigned int irq, struct irqaction *new);
198 257
199#ifdef CONFIG_GENERIC_HARDIRQS 258#ifdef CONFIG_GENERIC_HARDIRQS
200 259
201#ifndef handle_dynamic_tick
202# define handle_dynamic_tick(a) do { } while (0)
203#endif
204
205#ifdef CONFIG_SMP 260#ifdef CONFIG_SMP
206 261
207#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) 262#ifdef CONFIG_GENERIC_PENDING_IRQ
208 263
209void set_pending_irq(unsigned int irq, cpumask_t mask);
210void move_native_irq(int irq); 264void move_native_irq(int irq);
211void move_masked_irq(int irq); 265void move_masked_irq(int irq);
212 266
213#else /* CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE */ 267#else /* CONFIG_GENERIC_PENDING_IRQ */
214 268
215static inline void move_irq(int irq) 269static inline void move_irq(int irq)
216{ 270{
@@ -224,10 +278,6 @@ static inline void move_masked_irq(int irq)
224{ 278{
225} 279}
226 280
227static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
228{
229}
230
231#endif /* CONFIG_GENERIC_PENDING_IRQ */ 281#endif /* CONFIG_GENERIC_PENDING_IRQ */
232 282
233#else /* CONFIG_SMP */ 283#else /* CONFIG_SMP */
@@ -237,19 +287,14 @@ static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
237 287
238#endif /* CONFIG_SMP */ 288#endif /* CONFIG_SMP */
239 289
240#ifdef CONFIG_IRQBALANCE
241extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask);
242#else
243static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
244{
245}
246#endif
247
248extern int no_irq_affinity; 290extern int no_irq_affinity;
249 291
250static inline int irq_balancing_disabled(unsigned int irq) 292static inline int irq_balancing_disabled(unsigned int irq)
251{ 293{
252 return irq_desc[irq].status & IRQ_NO_BALANCING_MASK; 294 struct irq_desc *desc;
295
296 desc = irq_to_desc(irq);
297 return desc->status & IRQ_NO_BALANCING_MASK;
253} 298}
254 299
255/* Handle irq action chains: */ 300/* Handle irq action chains: */
@@ -279,10 +324,8 @@ extern unsigned int __do_IRQ(unsigned int irq);
279 * irqchip-style controller then we call the ->handle_irq() handler, 324 * irqchip-style controller then we call the ->handle_irq() handler,
280 * and it calls __do_IRQ() if it's attached to an irqtype-style controller. 325 * and it calls __do_IRQ() if it's attached to an irqtype-style controller.
281 */ 326 */
282static inline void generic_handle_irq(unsigned int irq) 327static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
283{ 328{
284 struct irq_desc *desc = irq_desc + irq;
285
286#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 329#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
287 desc->handle_irq(irq, desc); 330 desc->handle_irq(irq, desc);
288#else 331#else
@@ -293,6 +336,11 @@ static inline void generic_handle_irq(unsigned int irq)
293#endif 336#endif
294} 337}
295 338
339static inline void generic_handle_irq(unsigned int irq)
340{
341 generic_handle_irq_desc(irq, irq_to_desc(irq));
342}
343
296/* Handling of unhandled and spurious interrupts: */ 344/* Handling of unhandled and spurious interrupts: */
297extern void note_interrupt(unsigned int irq, struct irq_desc *desc, 345extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
298 int action_ret); 346 int action_ret);
@@ -325,7 +373,10 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
325static inline void __set_irq_handler_unlocked(int irq, 373static inline void __set_irq_handler_unlocked(int irq,
326 irq_flow_handler_t handler) 374 irq_flow_handler_t handler)
327{ 375{
328 irq_desc[irq].handle_irq = handler; 376 struct irq_desc *desc;
377
378 desc = irq_to_desc(irq);
379 desc->handle_irq = handler;
329} 380}
330 381
331/* 382/*
@@ -353,13 +404,14 @@ extern void set_irq_noprobe(unsigned int irq);
353extern void set_irq_probe(unsigned int irq); 404extern void set_irq_probe(unsigned int irq);
354 405
355/* Handle dynamic irq creation and destruction */ 406/* Handle dynamic irq creation and destruction */
407extern unsigned int create_irq_nr(unsigned int irq_want);
356extern int create_irq(void); 408extern int create_irq(void);
357extern void destroy_irq(unsigned int irq); 409extern void destroy_irq(unsigned int irq);
358 410
359/* Test to see if a driver has successfully requested an irq */ 411/* Test to see if a driver has successfully requested an irq */
360static inline int irq_has_action(unsigned int irq) 412static inline int irq_has_action(unsigned int irq)
361{ 413{
362 struct irq_desc *desc = irq_desc + irq; 414 struct irq_desc *desc = irq_to_desc(irq);
363 return desc->action != NULL; 415 return desc->action != NULL;
364} 416}
365 417
@@ -374,10 +426,15 @@ extern int set_irq_chip_data(unsigned int irq, void *data);
374extern int set_irq_type(unsigned int irq, unsigned int type); 426extern int set_irq_type(unsigned int irq, unsigned int type);
375extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); 427extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
376 428
377#define get_irq_chip(irq) (irq_desc[irq].chip) 429#define get_irq_chip(irq) (irq_to_desc(irq)->chip)
378#define get_irq_chip_data(irq) (irq_desc[irq].chip_data) 430#define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data)
379#define get_irq_data(irq) (irq_desc[irq].handler_data) 431#define get_irq_data(irq) (irq_to_desc(irq)->handler_data)
380#define get_irq_msi(irq) (irq_desc[irq].msi_desc) 432#define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc)
433
434#define get_irq_desc_chip(desc) ((desc)->chip)
435#define get_irq_desc_chip_data(desc) ((desc)->chip_data)
436#define get_irq_desc_data(desc) ((desc)->handler_data)
437#define get_irq_desc_msi(desc) ((desc)->msi_desc)
381 438
382#endif /* CONFIG_GENERIC_HARDIRQS */ 439#endif /* CONFIG_GENERIC_HARDIRQS */
383 440
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
new file mode 100644
index 000000000000..95d2b74641f5
--- /dev/null
+++ b/include/linux/irqnr.h
@@ -0,0 +1,38 @@
1#ifndef _LINUX_IRQNR_H
2#define _LINUX_IRQNR_H
3
4/*
5 * Generic irq_desc iterators:
6 */
7#ifdef __KERNEL__
8
9#ifndef CONFIG_GENERIC_HARDIRQS
10#include <asm/irq.h>
11# define nr_irqs NR_IRQS
12
13# define for_each_irq_desc(irq, desc) \
14 for (irq = 0; irq < nr_irqs; irq++)
15
16# define for_each_irq_desc_reverse(irq, desc) \
17 for (irq = nr_irqs - 1; irq >= 0; irq--)
18#else
19
20extern int nr_irqs;
21
22#ifndef CONFIG_SPARSE_IRQ
23
24struct irq_desc;
25# define for_each_irq_desc(irq, desc) \
26 for (irq = 0, desc = irq_desc; irq < nr_irqs; irq++, desc++)
27# define for_each_irq_desc_reverse(irq, desc) \
28 for (irq = nr_irqs - 1, desc = irq_desc + (nr_irqs - 1); \
29 irq >= 0; irq--, desc--)
30#endif
31#endif
32
33#define for_each_irq_nr(irq) \
34 for (irq = 0; irq < nr_irqs; irq++)
35
36#endif /* __KERNEL__ */
37
38#endif
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 07a9b52a2654..346e2b80be7d 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -61,7 +61,7 @@ extern u8 journal_enable_debug;
61 do { \ 61 do { \
62 if ((n) <= journal_enable_debug) { \ 62 if ((n) <= journal_enable_debug) { \
63 printk (KERN_DEBUG "(%s, %d): %s: ", \ 63 printk (KERN_DEBUG "(%s, %d): %s: ", \
64 __FILE__, __LINE__, __FUNCTION__); \ 64 __FILE__, __LINE__, __func__); \
65 printk (f, ## a); \ 65 printk (f, ## a); \
66 } \ 66 } \
67 } while (0) 67 } while (0)
@@ -816,6 +816,9 @@ struct journal_s
816#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */ 816#define JFS_FLUSHED 0x008 /* The journal superblock has been flushed */
817#define JFS_LOADED 0x010 /* The journal superblock has been loaded */ 817#define JFS_LOADED 0x010 /* The journal superblock has been loaded */
818#define JFS_BARRIER 0x020 /* Use IDE barriers */ 818#define JFS_BARRIER 0x020 /* Use IDE barriers */
819#define JFS_ABORT_ON_SYNCDATA_ERR 0x040 /* Abort the journal on file
820 * data write error in ordered
821 * mode */
819 822
820/* 823/*
821 * Function declarations for the journaling transaction and buffer 824 * Function declarations for the journaling transaction and buffer
@@ -908,7 +911,7 @@ extern int journal_set_features
908 (journal_t *, unsigned long, unsigned long, unsigned long); 911 (journal_t *, unsigned long, unsigned long, unsigned long);
909extern int journal_create (journal_t *); 912extern int journal_create (journal_t *);
910extern int journal_load (journal_t *journal); 913extern int journal_load (journal_t *journal);
911extern void journal_destroy (journal_t *); 914extern int journal_destroy (journal_t *);
912extern int journal_recover (journal_t *journal); 915extern int journal_recover (journal_t *journal);
913extern int journal_wipe (journal_t *, int); 916extern int journal_wipe (journal_t *, int);
914extern int journal_skip_recovery (journal_t *); 917extern int journal_skip_recovery (journal_t *);
@@ -984,7 +987,7 @@ extern int cleanup_journal_tail(journal_t *);
984 987
985#define jbd_ENOSYS() \ 988#define jbd_ENOSYS() \
986do { \ 989do { \
987 printk (KERN_ERR "JBD unimplemented function %s\n", __FUNCTION__); \ 990 printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
988 current->state = TASK_UNINTERRUPTIBLE; \ 991 current->state = TASK_UNINTERRUPTIBLE; \
989 schedule(); \ 992 schedule(); \
990} while (1) 993} while (1)
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index d2e91ea998fd..c7d106ef22e2 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -61,7 +61,7 @@ extern u8 jbd2_journal_enable_debug;
61 do { \ 61 do { \
62 if ((n) <= jbd2_journal_enable_debug) { \ 62 if ((n) <= jbd2_journal_enable_debug) { \
63 printk (KERN_DEBUG "(%s, %d): %s: ", \ 63 printk (KERN_DEBUG "(%s, %d): %s: ", \
64 __FILE__, __LINE__, __FUNCTION__); \ 64 __FILE__, __LINE__, __func__); \
65 printk (f, ## a); \ 65 printk (f, ## a); \
66 } \ 66 } \
67 } while (0) 67 } while (0)
@@ -641,6 +641,11 @@ struct transaction_s
641 */ 641 */
642 int t_handle_count; 642 int t_handle_count;
643 643
644 /*
645 * For use by the filesystem to store fs-specific data
646 * structures associated with the transaction
647 */
648 struct list_head t_private_list;
644}; 649};
645 650
646struct transaction_run_stats_s { 651struct transaction_run_stats_s {
@@ -935,6 +940,10 @@ struct journal_s
935 940
936 pid_t j_last_sync_writer; 941 pid_t j_last_sync_writer;
937 942
943 /* This function is called when a transaction is closed */
944 void (*j_commit_callback)(journal_t *,
945 transaction_t *);
946
938 /* 947 /*
939 * Journal statistics 948 * Journal statistics
940 */ 949 */
@@ -1143,7 +1152,7 @@ extern int jbd2_cleanup_journal_tail(journal_t *);
1143 1152
1144#define jbd_ENOSYS() \ 1153#define jbd_ENOSYS() \
1145do { \ 1154do { \
1146 printk (KERN_ERR "JBD unimplemented function %s\n", __FUNCTION__); \ 1155 printk (KERN_ERR "JBD unimplemented function %s\n", __func__); \
1147 current->state = TASK_UNINTERRUPTIBLE; \ 1156 current->state = TASK_UNINTERRUPTIBLE; \
1148 schedule(); \ 1157 schedule(); \
1149} while (1) 1158} while (1)
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index abb6ac639e8e..1a9cf78bfce5 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -115,10 +115,20 @@ static inline u64 get_jiffies_64(void)
115 ((long)(a) - (long)(b) >= 0)) 115 ((long)(a) - (long)(b) >= 0))
116#define time_before_eq(a,b) time_after_eq(b,a) 116#define time_before_eq(a,b) time_after_eq(b,a)
117 117
118/*
119 * Calculate whether a is in the range of [b, c].
120 */
118#define time_in_range(a,b,c) \ 121#define time_in_range(a,b,c) \
119 (time_after_eq(a,b) && \ 122 (time_after_eq(a,b) && \
120 time_before_eq(a,c)) 123 time_before_eq(a,c))
121 124
125/*
126 * Calculate whether a is in the range of [b, c).
127 */
128#define time_in_range_open(a,b,c) \
129 (time_after_eq(a,b) && \
130 time_before(a,c))
131
122/* Same as above, but does so with platform independent 64bit types. 132/* Same as above, but does so with platform independent 64bit types.
123 * These must be used when utilizing jiffies_64 (i.e. return value of 133 * These must be used when utilizing jiffies_64 (i.e. return value of
124 * get_jiffies_64() */ 134 * get_jiffies_64() */
diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h
index 8a62d1e84b9b..bb70ebb6a2d5 100644
--- a/include/linux/journal-head.h
+++ b/include/linux/journal-head.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * buffer_head fields for JBD 4 * buffer_head fields for JBD
5 * 5 *
6 * 27 May 2001 Andrew Morton <akpm@digeo.com> 6 * 27 May 2001 Andrew Morton
7 * Created - pulled out of fs.h 7 * Created - pulled out of fs.h
8 */ 8 */
9 9
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index b96144887444..f3fe34391d8e 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -93,12 +93,10 @@ static inline void print_symbol(const char *fmt, unsigned long addr)
93} 93}
94 94
95/* 95/*
96 * Pretty-print a function pointer. 96 * Pretty-print a function pointer. This function is deprecated.
97 * 97 * Please use the "%pF" vsprintf format instead.
98 * ia64 and ppc64 function pointers are really function descriptors,
99 * which contain a pointer the real address.
100 */ 98 */
101static inline void print_fn_descriptor_symbol(const char *fmt, void *addr) 99static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr)
102{ 100{
103#if defined(CONFIG_IA64) || defined(CONFIG_PPC64) 101#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
104 addr = *(void **)addr; 102 addr = *(void **)addr;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 75d81f157d2e..ca9ff6411dfa 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -16,6 +16,7 @@
16#include <linux/log2.h> 16#include <linux/log2.h>
17#include <linux/typecheck.h> 17#include <linux/typecheck.h>
18#include <linux/ratelimit.h> 18#include <linux/ratelimit.h>
19#include <linux/dynamic_printk.h>
19#include <asm/byteorder.h> 20#include <asm/byteorder.h>
20#include <asm/bug.h> 21#include <asm/bug.h>
21 22
@@ -115,6 +116,8 @@ extern int _cond_resched(void);
115# define might_resched() do { } while (0) 116# define might_resched() do { } while (0)
116#endif 117#endif
117 118
119#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
120 void __might_sleep(char *file, int line);
118/** 121/**
119 * might_sleep - annotation for functions that can sleep 122 * might_sleep - annotation for functions that can sleep
120 * 123 *
@@ -125,8 +128,6 @@ extern int _cond_resched(void);
125 * be bitten later when the calling function happens to sleep when it is not 128 * be bitten later when the calling function happens to sleep when it is not
126 * supposed to. 129 * supposed to.
127 */ 130 */
128#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
129 void __might_sleep(char *file, int line);
130# define might_sleep() \ 131# define might_sleep() \
131 do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0) 132 do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
132#else 133#else
@@ -140,6 +141,15 @@ extern int _cond_resched(void);
140 (__x < 0) ? -__x : __x; \ 141 (__x < 0) ? -__x : __x; \
141 }) 142 })
142 143
144#ifdef CONFIG_PROVE_LOCKING
145void might_fault(void);
146#else
147static inline void might_fault(void)
148{
149 might_sleep();
150}
151#endif
152
143extern struct atomic_notifier_head panic_notifier_list; 153extern struct atomic_notifier_head panic_notifier_list;
144extern long (*panic_blink)(long time); 154extern long (*panic_blink)(long time);
145NORET_TYPE void panic(const char * fmt, ...) 155NORET_TYPE void panic(const char * fmt, ...)
@@ -187,9 +197,35 @@ extern unsigned long long memparse(const char *ptr, char **retptr);
187extern int core_kernel_text(unsigned long addr); 197extern int core_kernel_text(unsigned long addr);
188extern int __kernel_text_address(unsigned long addr); 198extern int __kernel_text_address(unsigned long addr);
189extern int kernel_text_address(unsigned long addr); 199extern int kernel_text_address(unsigned long addr);
200extern int func_ptr_is_kernel_text(void *ptr);
201
190struct pid; 202struct pid;
191extern struct pid *session_of_pgrp(struct pid *pgrp); 203extern struct pid *session_of_pgrp(struct pid *pgrp);
192 204
205/*
206 * FW_BUG
207 * Add this to a message where you are sure the firmware is buggy or behaves
208 * really stupid or out of spec. Be aware that the responsible BIOS developer
209 * should be able to fix this issue or at least get a concrete idea of the
210 * problem by reading your message without the need of looking at the kernel
211 * code.
212 *
213 * Use it for definite and high priority BIOS bugs.
214 *
215 * FW_WARN
216 * Use it for not that clear (e.g. could the kernel messed up things already?)
217 * and medium priority BIOS bugs.
218 *
219 * FW_INFO
220 * Use this one if you want to tell the user or vendor about something
221 * suspicious, but generally harmless related to the firmware.
222 *
223 * Use it for information or very low priority BIOS bugs.
224 */
225#define FW_BUG "[Firmware Bug]: "
226#define FW_WARN "[Firmware Warn]: "
227#define FW_INFO "[Firmware Info]: "
228
193#ifdef CONFIG_PRINTK 229#ifdef CONFIG_PRINTK
194asmlinkage int vprintk(const char *fmt, va_list args) 230asmlinkage int vprintk(const char *fmt, va_list args)
195 __attribute__ ((format (printf, 1, 0))); 231 __attribute__ ((format (printf, 1, 0)));
@@ -213,6 +249,9 @@ static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
213 { return false; } 249 { return false; }
214#endif 250#endif
215 251
252extern int printk_needs_cpu(int cpu);
253extern void printk_tick(void);
254
216extern void asmlinkage __attribute__((format(printf, 1, 2))) 255extern void asmlinkage __attribute__((format(printf, 1, 2)))
217 early_printk(const char *fmt, ...); 256 early_printk(const char *fmt, ...);
218 257
@@ -235,9 +274,10 @@ extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in
235extern int panic_timeout; 274extern int panic_timeout;
236extern int panic_on_oops; 275extern int panic_on_oops;
237extern int panic_on_unrecovered_nmi; 276extern int panic_on_unrecovered_nmi;
238extern int tainted;
239extern const char *print_tainted(void); 277extern const char *print_tainted(void);
240extern void add_taint(unsigned); 278extern void add_taint(unsigned flag);
279extern int test_taint(unsigned flag);
280extern unsigned long get_taint(void);
241extern int root_mountflags; 281extern int root_mountflags;
242 282
243/* Values used for system_state */ 283/* Values used for system_state */
@@ -250,16 +290,17 @@ extern enum system_states {
250 SYSTEM_SUSPEND_DISK, 290 SYSTEM_SUSPEND_DISK,
251} system_state; 291} system_state;
252 292
253#define TAINT_PROPRIETARY_MODULE (1<<0) 293#define TAINT_PROPRIETARY_MODULE 0
254#define TAINT_FORCED_MODULE (1<<1) 294#define TAINT_FORCED_MODULE 1
255#define TAINT_UNSAFE_SMP (1<<2) 295#define TAINT_UNSAFE_SMP 2
256#define TAINT_FORCED_RMMOD (1<<3) 296#define TAINT_FORCED_RMMOD 3
257#define TAINT_MACHINE_CHECK (1<<4) 297#define TAINT_MACHINE_CHECK 4
258#define TAINT_BAD_PAGE (1<<5) 298#define TAINT_BAD_PAGE 5
259#define TAINT_USER (1<<6) 299#define TAINT_USER 6
260#define TAINT_DIE (1<<7) 300#define TAINT_DIE 7
261#define TAINT_OVERRIDDEN_ACPI_TABLE (1<<8) 301#define TAINT_OVERRIDDEN_ACPI_TABLE 8
262#define TAINT_WARN (1<<9) 302#define TAINT_WARN 9
303#define TAINT_CRAP 10
263 304
264extern void dump_stack(void) __cold; 305extern void dump_stack(void) __cold;
265 306
@@ -288,28 +329,36 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
288 return buf; 329 return buf;
289} 330}
290 331
291#define pr_emerg(fmt, arg...) \ 332#ifndef pr_fmt
292 printk(KERN_EMERG fmt, ##arg) 333#define pr_fmt(fmt) fmt
293#define pr_alert(fmt, arg...) \ 334#endif
294 printk(KERN_ALERT fmt, ##arg) 335
295#define pr_crit(fmt, arg...) \ 336#define pr_emerg(fmt, ...) \
296 printk(KERN_CRIT fmt, ##arg) 337 printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
297#define pr_err(fmt, arg...) \ 338#define pr_alert(fmt, ...) \
298 printk(KERN_ERR fmt, ##arg) 339 printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
299#define pr_warning(fmt, arg...) \ 340#define pr_crit(fmt, ...) \
300 printk(KERN_WARNING fmt, ##arg) 341 printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
301#define pr_notice(fmt, arg...) \ 342#define pr_err(fmt, ...) \
302 printk(KERN_NOTICE fmt, ##arg) 343 printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
303#define pr_info(fmt, arg...) \ 344#define pr_warning(fmt, ...) \
304 printk(KERN_INFO fmt, ##arg) 345 printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
305 346#define pr_notice(fmt, ...) \
306#ifdef DEBUG 347 printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
348#define pr_info(fmt, ...) \
349 printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
350
307/* If you are writing a driver, please use dev_dbg instead */ 351/* If you are writing a driver, please use dev_dbg instead */
308#define pr_debug(fmt, arg...) \ 352#if defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
309 printk(KERN_DEBUG fmt, ##arg) 353#define pr_debug(fmt, ...) do { \
354 dynamic_pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
355 } while (0)
356#elif defined(DEBUG)
357#define pr_debug(fmt, ...) \
358 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
310#else 359#else
311#define pr_debug(fmt, arg...) \ 360#define pr_debug(fmt, ...) \
312 ({ if (0) printk(KERN_DEBUG fmt, ##arg); 0; }) 361 ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
313#endif 362#endif
314 363
315/* 364/*
@@ -323,18 +372,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
323 ((unsigned char *)&addr)[3] 372 ((unsigned char *)&addr)[3]
324#define NIPQUAD_FMT "%u.%u.%u.%u" 373#define NIPQUAD_FMT "%u.%u.%u.%u"
325 374
326#define NIP6(addr) \
327 ntohs((addr).s6_addr16[0]), \
328 ntohs((addr).s6_addr16[1]), \
329 ntohs((addr).s6_addr16[2]), \
330 ntohs((addr).s6_addr16[3]), \
331 ntohs((addr).s6_addr16[4]), \
332 ntohs((addr).s6_addr16[5]), \
333 ntohs((addr).s6_addr16[6]), \
334 ntohs((addr).s6_addr16[7])
335#define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
336#define NIP6_SEQFMT "%04x%04x%04x%04x%04x%04x%04x%04x"
337
338#if defined(__LITTLE_ENDIAN) 375#if defined(__LITTLE_ENDIAN)
339#define HIPQUAD(addr) \ 376#define HIPQUAD(addr) \
340 ((unsigned char *)&addr)[3], \ 377 ((unsigned char *)&addr)[3], \
@@ -486,4 +523,9 @@ struct sysinfo {
486#define NUMA_BUILD 0 523#define NUMA_BUILD 0
487#endif 524#endif
488 525
526/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
527#ifdef CONFIG_FTRACE_MCOUNT_RECORD
528# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
529#endif
530
489#endif 531#endif
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index cf9f40a91c9c..4ee4b3d2316f 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -28,7 +28,9 @@ struct cpu_usage_stat {
28 28
29struct kernel_stat { 29struct kernel_stat {
30 struct cpu_usage_stat cpustat; 30 struct cpu_usage_stat cpustat;
31 unsigned int irqs[NR_IRQS]; 31#ifndef CONFIG_SPARSE_IRQ
32 unsigned int irqs[NR_IRQS];
33#endif
32}; 34};
33 35
34DECLARE_PER_CPU(struct kernel_stat, kstat); 36DECLARE_PER_CPU(struct kernel_stat, kstat);
@@ -39,19 +41,44 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
39 41
40extern unsigned long long nr_context_switches(void); 42extern unsigned long long nr_context_switches(void);
41 43
44#ifndef CONFIG_SPARSE_IRQ
45#define kstat_irqs_this_cpu(irq) \
46 (kstat_this_cpu.irqs[irq])
47
48struct irq_desc;
49
50static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
51 struct irq_desc *desc)
52{
53 kstat_this_cpu.irqs[irq]++;
54}
55#endif
56
57
58#ifndef CONFIG_SPARSE_IRQ
59static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
60{
61 return kstat_cpu(cpu).irqs[irq];
62}
63#else
64extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
65#endif
66
42/* 67/*
43 * Number of interrupts per specific IRQ source, since bootup 68 * Number of interrupts per specific IRQ source, since bootup
44 */ 69 */
45static inline int kstat_irqs(int irq) 70static inline unsigned int kstat_irqs(unsigned int irq)
46{ 71{
47 int cpu, sum = 0; 72 unsigned int sum = 0;
73 int cpu;
48 74
49 for_each_possible_cpu(cpu) 75 for_each_possible_cpu(cpu)
50 sum += kstat_cpu(cpu).irqs[irq]; 76 sum += kstat_irqs_cpu(irq, cpu);
51 77
52 return sum; 78 return sum;
53} 79}
54 80
81extern unsigned long long task_delta_exec(struct task_struct *);
55extern void account_user_time(struct task_struct *, cputime_t); 82extern void account_user_time(struct task_struct *, cputime_t);
56extern void account_user_time_scaled(struct task_struct *, cputime_t); 83extern void account_user_time_scaled(struct task_struct *, cputime_t);
57extern void account_system_time(struct task_struct *, int, cputime_t); 84extern void account_system_time(struct task_struct *, int, cputime_t);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 17f76fc05173..adc34f2c6eff 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -100,6 +100,10 @@ struct kimage {
100#define KEXEC_TYPE_DEFAULT 0 100#define KEXEC_TYPE_DEFAULT 0
101#define KEXEC_TYPE_CRASH 1 101#define KEXEC_TYPE_CRASH 1
102 unsigned int preserve_context : 1; 102 unsigned int preserve_context : 1;
103
104#ifdef ARCH_HAS_KIMAGE_ARCH
105 struct kimage_arch arch;
106#endif
103}; 107};
104 108
105 109
diff --git a/include/linux/key-ui.h b/include/linux/key-ui.h
deleted file mode 100644
index e8b8a7a5c496..000000000000
--- a/include/linux/key-ui.h
+++ /dev/null
@@ -1,66 +0,0 @@
1/* key-ui.h: key userspace interface stuff
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#ifndef _LINUX_KEY_UI_H
13#define _LINUX_KEY_UI_H
14
15#include <linux/key.h>
16
17/* the key tree */
18extern struct rb_root key_serial_tree;
19extern spinlock_t key_serial_lock;
20
21/* required permissions */
22#define KEY_VIEW 0x01 /* require permission to view attributes */
23#define KEY_READ 0x02 /* require permission to read content */
24#define KEY_WRITE 0x04 /* require permission to update / modify */
25#define KEY_SEARCH 0x08 /* require permission to search (keyring) or find (key) */
26#define KEY_LINK 0x10 /* require permission to link */
27#define KEY_SETATTR 0x20 /* require permission to change attributes */
28#define KEY_ALL 0x3f /* all the above permissions */
29
30/*
31 * the keyring payload contains a list of the keys to which the keyring is
32 * subscribed
33 */
34struct keyring_list {
35 struct rcu_head rcu; /* RCU deletion hook */
36 unsigned short maxkeys; /* max keys this list can hold */
37 unsigned short nkeys; /* number of keys currently held */
38 unsigned short delkey; /* key to be unlinked by RCU */
39 struct key *keys[0];
40};
41
42/*
43 * check to see whether permission is granted to use a key in the desired way
44 */
45extern int key_task_permission(const key_ref_t key_ref,
46 struct task_struct *context,
47 key_perm_t perm);
48
49static inline int key_permission(const key_ref_t key_ref, key_perm_t perm)
50{
51 return key_task_permission(key_ref, current, perm);
52}
53
54extern key_ref_t lookup_user_key(struct task_struct *context,
55 key_serial_t id, int create, int partial,
56 key_perm_t perm);
57
58extern long join_session_keyring(const char *name);
59
60extern struct key_type *key_type_lookup(const char *type);
61extern void key_type_put(struct key_type *ktype);
62
63#define key_negative_timeout 60 /* default timeout on a negative key's existence */
64
65
66#endif /* _LINUX_KEY_UI_H */
diff --git a/include/linux/key.h b/include/linux/key.h
index 1b70e35a71e3..21d32a142c00 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -73,6 +73,7 @@ struct key;
73struct seq_file; 73struct seq_file;
74struct user_struct; 74struct user_struct;
75struct signal_struct; 75struct signal_struct;
76struct cred;
76 77
77struct key_type; 78struct key_type;
78struct key_owner; 79struct key_owner;
@@ -181,7 +182,7 @@ struct key {
181extern struct key *key_alloc(struct key_type *type, 182extern struct key *key_alloc(struct key_type *type,
182 const char *desc, 183 const char *desc,
183 uid_t uid, gid_t gid, 184 uid_t uid, gid_t gid,
184 struct task_struct *ctx, 185 const struct cred *cred,
185 key_perm_t perm, 186 key_perm_t perm,
186 unsigned long flags); 187 unsigned long flags);
187 188
@@ -249,7 +250,7 @@ extern int key_unlink(struct key *keyring,
249 struct key *key); 250 struct key *key);
250 251
251extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, 252extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
252 struct task_struct *ctx, 253 const struct cred *cred,
253 unsigned long flags, 254 unsigned long flags,
254 struct key *dest); 255 struct key *dest);
255 256
@@ -276,24 +277,11 @@ extern ctl_table key_sysctls[];
276/* 277/*
277 * the userspace interface 278 * the userspace interface
278 */ 279 */
279extern void switch_uid_keyring(struct user_struct *new_user); 280extern int install_thread_keyring_to_cred(struct cred *cred);
280extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk);
281extern int copy_thread_group_keys(struct task_struct *tsk);
282extern void exit_keys(struct task_struct *tsk);
283extern void exit_thread_group_keys(struct signal_struct *tg);
284extern int suid_keys(struct task_struct *tsk);
285extern int exec_keys(struct task_struct *tsk);
286extern void key_fsuid_changed(struct task_struct *tsk); 281extern void key_fsuid_changed(struct task_struct *tsk);
287extern void key_fsgid_changed(struct task_struct *tsk); 282extern void key_fsgid_changed(struct task_struct *tsk);
288extern void key_init(void); 283extern void key_init(void);
289 284
290#define __install_session_keyring(tsk, keyring) \
291({ \
292 struct key *old_session = tsk->signal->session_keyring; \
293 tsk->signal->session_keyring = keyring; \
294 old_session; \
295})
296
297#else /* CONFIG_KEYS */ 285#else /* CONFIG_KEYS */
298 286
299#define key_validate(k) 0 287#define key_validate(k) 0
@@ -302,17 +290,9 @@ extern void key_init(void);
302#define key_revoke(k) do { } while(0) 290#define key_revoke(k) do { } while(0)
303#define key_put(k) do { } while(0) 291#define key_put(k) do { } while(0)
304#define key_ref_put(k) do { } while(0) 292#define key_ref_put(k) do { } while(0)
305#define make_key_ref(k, p) ({ NULL; }) 293#define make_key_ref(k, p) NULL
306#define key_ref_to_ptr(k) ({ NULL; }) 294#define key_ref_to_ptr(k) NULL
307#define is_key_possessed(k) 0 295#define is_key_possessed(k) 0
308#define switch_uid_keyring(u) do { } while(0)
309#define __install_session_keyring(t, k) ({ NULL; })
310#define copy_keys(f,t) 0
311#define copy_thread_group_keys(t) 0
312#define exit_keys(t) do { } while(0)
313#define exit_thread_group_keys(tg) do { } while(0)
314#define suid_keys(t) do { } while(0)
315#define exec_keys(t) do { } while(0)
316#define key_fsuid_changed(t) do { } while(0) 296#define key_fsuid_changed(t) do { } while(0)
317#define key_fsgid_changed(t) do { } while(0) 297#define key_fsgid_changed(t) do { } while(0)
318#define key_init() do { } while(0) 298#define key_init() do { } while(0)
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h
index 656ee6b77a4a..c0688eb72093 100644
--- a/include/linux/keyctl.h
+++ b/include/linux/keyctl.h
@@ -1,6 +1,6 @@
1/* keyctl.h: keyctl command IDs 1/* keyctl.h: keyctl command IDs
2 * 2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. 3 * Copyright (C) 2004, 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com) 4 * Written by David Howells (dhowells@redhat.com)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
@@ -20,6 +20,7 @@
20#define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */ 20#define KEY_SPEC_USER_SESSION_KEYRING -5 /* - key ID for UID-session keyring */
21#define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */ 21#define KEY_SPEC_GROUP_KEYRING -6 /* - key ID for GID-specific keyring */
22#define KEY_SPEC_REQKEY_AUTH_KEY -7 /* - key ID for assumed request_key auth key */ 22#define KEY_SPEC_REQKEY_AUTH_KEY -7 /* - key ID for assumed request_key auth key */
23#define KEY_SPEC_REQUESTOR_KEYRING -8 /* - key ID for request_key() dest keyring */
23 24
24/* request-key default keyrings */ 25/* request-key default keyrings */
25#define KEY_REQKEY_DEFL_NO_CHANGE -1 26#define KEY_REQKEY_DEFL_NO_CHANGE -1
@@ -30,6 +31,7 @@
30#define KEY_REQKEY_DEFL_USER_KEYRING 4 31#define KEY_REQKEY_DEFL_USER_KEYRING 4
31#define KEY_REQKEY_DEFL_USER_SESSION_KEYRING 5 32#define KEY_REQKEY_DEFL_USER_SESSION_KEYRING 5
32#define KEY_REQKEY_DEFL_GROUP_KEYRING 6 33#define KEY_REQKEY_DEFL_GROUP_KEYRING 6
34#define KEY_REQKEY_DEFL_REQUESTOR_KEYRING 7
33 35
34/* keyctl commands */ 36/* keyctl commands */
35#define KEYCTL_GET_KEYRING_ID 0 /* ask for a keyring's ID */ 37#define KEYCTL_GET_KEYRING_ID 0 /* ask for a keyring's ID */
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index a1a91577813c..92213a9194e1 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -99,4 +99,7 @@ struct file;
99extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[], 99extern int call_usermodehelper_pipe(char *path, char *argv[], char *envp[],
100 struct file **filp); 100 struct file **filp);
101 101
102extern int usermodehelper_disable(void);
103extern void usermodehelper_enable(void);
104
102#endif /* __LINUX_KMOD_H__ */ 105#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 0be7795655fa..497b1d1f7a05 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -29,6 +29,7 @@
29 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi 29 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
30 * <prasanna@in.ibm.com> added function-return probes. 30 * <prasanna@in.ibm.com> added function-return probes.
31 */ 31 */
32#include <linux/linkage.h>
32#include <linux/list.h> 33#include <linux/list.h>
33#include <linux/notifier.h> 34#include <linux/notifier.h>
34#include <linux/smp.h> 35#include <linux/smp.h>
@@ -47,7 +48,7 @@
47#define KPROBE_HIT_SSDONE 0x00000008 48#define KPROBE_HIT_SSDONE 0x00000008
48 49
49/* Attach to insert probes on any functions which should be ignored*/ 50/* Attach to insert probes on any functions which should be ignored*/
50#define __kprobes __attribute__((__section__(".kprobes.text"))) 51#define __kprobes __attribute__((__section__(".kprobes.text"))) notrace
51 52
52struct kprobe; 53struct kprobe;
53struct pt_regs; 54struct pt_regs;
@@ -256,7 +257,7 @@ void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
256 257
257#else /* CONFIG_KPROBES */ 258#else /* CONFIG_KPROBES */
258 259
259#define __kprobes /**/ 260#define __kprobes notrace
260struct jprobe; 261struct jprobe;
261struct kretprobe; 262struct kretprobe;
262 263
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 70a30651cd12..f18b86fa8655 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -311,22 +311,33 @@ struct kvm_s390_interrupt {
311 311
312/* This structure represents a single trace buffer record. */ 312/* This structure represents a single trace buffer record. */
313struct kvm_trace_rec { 313struct kvm_trace_rec {
314 __u32 event:28; 314 /* variable rec_val
315 __u32 extra_u32:3; 315 * is split into:
316 __u32 cycle_in:1; 316 * bits 0 - 27 -> event id
317 * bits 28 -30 -> number of extra data args of size u32
318 * bits 31 -> binary indicator for if tsc is in record
319 */
320 __u32 rec_val;
317 __u32 pid; 321 __u32 pid;
318 __u32 vcpu_id; 322 __u32 vcpu_id;
319 union { 323 union {
320 struct { 324 struct {
321 __u64 cycle_u64; 325 __u64 timestamp;
322 __u32 extra_u32[KVM_TRC_EXTRA_MAX]; 326 __u32 extra_u32[KVM_TRC_EXTRA_MAX];
323 } __attribute__((packed)) cycle; 327 } __attribute__((packed)) timestamp;
324 struct { 328 struct {
325 __u32 extra_u32[KVM_TRC_EXTRA_MAX]; 329 __u32 extra_u32[KVM_TRC_EXTRA_MAX];
326 } nocycle; 330 } notimestamp;
327 } u; 331 } u;
328}; 332};
329 333
334#define TRACE_REC_EVENT_ID(val) \
335 (0x0fffffff & (val))
336#define TRACE_REC_NUM_DATA_ARGS(val) \
337 (0x70000000 & ((val) << 28))
338#define TRACE_REC_TCS(val) \
339 (0x80000000 & ((val) << 31))
340
330#define KVMIO 0xAE 341#define KVMIO 0xAE
331 342
332/* 343/*
@@ -372,6 +383,10 @@ struct kvm_trace_rec {
372#define KVM_CAP_MP_STATE 14 383#define KVM_CAP_MP_STATE 14
373#define KVM_CAP_COALESCED_MMIO 15 384#define KVM_CAP_COALESCED_MMIO 15
374#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */ 385#define KVM_CAP_SYNC_MMU 16 /* Changes to host mmap are reflected in guest */
386#if defined(CONFIG_X86)||defined(CONFIG_IA64)
387#define KVM_CAP_DEVICE_ASSIGNMENT 17
388#endif
389#define KVM_CAP_IOMMU 18
375 390
376/* 391/*
377 * ioctls for VM fds 392 * ioctls for VM fds
@@ -401,6 +416,10 @@ struct kvm_trace_rec {
401 _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone) 416 _IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
402#define KVM_UNREGISTER_COALESCED_MMIO \ 417#define KVM_UNREGISTER_COALESCED_MMIO \
403 _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone) 418 _IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)
419#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
420 struct kvm_assigned_pci_dev)
421#define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
422 struct kvm_assigned_irq)
404 423
405/* 424/*
406 * ioctls for vcpu fds 425 * ioctls for vcpu fds
@@ -440,4 +459,51 @@ struct kvm_trace_rec {
440#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state) 459#define KVM_GET_MP_STATE _IOR(KVMIO, 0x98, struct kvm_mp_state)
441#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state) 460#define KVM_SET_MP_STATE _IOW(KVMIO, 0x99, struct kvm_mp_state)
442 461
462#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
463#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
464#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
465#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
466#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
467#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
468#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
469#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
470#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
471#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
472#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
473#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
474#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
475#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
476#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
477#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
478#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
479#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
480#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
481#define KVM_TRC_TDP_FAULT (KVM_TRC_HANDLER + 0x15)
482#define KVM_TRC_GTLB_WRITE (KVM_TRC_HANDLER + 0x16)
483#define KVM_TRC_STLB_WRITE (KVM_TRC_HANDLER + 0x17)
484#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
485#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
486
487struct kvm_assigned_pci_dev {
488 __u32 assigned_dev_id;
489 __u32 busnr;
490 __u32 devfn;
491 __u32 flags;
492 union {
493 __u32 reserved[12];
494 };
495};
496
497struct kvm_assigned_irq {
498 __u32 assigned_dev_id;
499 __u32 host_irq;
500 __u32 guest_irq;
501 __u32 flags;
502 union {
503 __u32 reserved[12];
504 };
505};
506
507#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
508
443#endif 509#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8525afc53107..bb92be2153bc 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -34,6 +34,10 @@
34#define KVM_REQ_MMU_RELOAD 3 34#define KVM_REQ_MMU_RELOAD 3
35#define KVM_REQ_TRIPLE_FAULT 4 35#define KVM_REQ_TRIPLE_FAULT 4
36#define KVM_REQ_PENDING_TIMER 5 36#define KVM_REQ_PENDING_TIMER 5
37#define KVM_REQ_UNHALT 6
38#define KVM_REQ_MMU_SYNC 7
39
40#define KVM_USERSPACE_IRQ_SOURCE_ID 0
37 41
38struct kvm_vcpu; 42struct kvm_vcpu;
39extern struct kmem_cache *kvm_vcpu_cache; 43extern struct kmem_cache *kvm_vcpu_cache;
@@ -279,12 +283,71 @@ void kvm_free_physmem(struct kvm *kvm);
279 283
280struct kvm *kvm_arch_create_vm(void); 284struct kvm *kvm_arch_create_vm(void);
281void kvm_arch_destroy_vm(struct kvm *kvm); 285void kvm_arch_destroy_vm(struct kvm *kvm);
286void kvm_free_all_assigned_devices(struct kvm *kvm);
282 287
283int kvm_cpu_get_interrupt(struct kvm_vcpu *v); 288int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
284int kvm_cpu_has_interrupt(struct kvm_vcpu *v); 289int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
285int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 290int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
286void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 291void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
287 292
293int kvm_is_mmio_pfn(pfn_t pfn);
294
295struct kvm_irq_ack_notifier {
296 struct hlist_node link;
297 unsigned gsi;
298 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
299};
300
301struct kvm_assigned_dev_kernel {
302 struct kvm_irq_ack_notifier ack_notifier;
303 struct work_struct interrupt_work;
304 struct list_head list;
305 int assigned_dev_id;
306 int host_busnr;
307 int host_devfn;
308 int host_irq;
309 int guest_irq;
310 int irq_requested;
311 int irq_source_id;
312 struct pci_dev *dev;
313 struct kvm *kvm;
314};
315void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
316void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
317void kvm_register_irq_ack_notifier(struct kvm *kvm,
318 struct kvm_irq_ack_notifier *kian);
319void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
320 struct kvm_irq_ack_notifier *kian);
321int kvm_request_irq_source_id(struct kvm *kvm);
322void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
323
324#ifdef CONFIG_DMAR
325int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
326 unsigned long npages);
327int kvm_iommu_map_guest(struct kvm *kvm,
328 struct kvm_assigned_dev_kernel *assigned_dev);
329int kvm_iommu_unmap_guest(struct kvm *kvm);
330#else /* CONFIG_DMAR */
331static inline int kvm_iommu_map_pages(struct kvm *kvm,
332 gfn_t base_gfn,
333 unsigned long npages)
334{
335 return 0;
336}
337
338static inline int kvm_iommu_map_guest(struct kvm *kvm,
339 struct kvm_assigned_dev_kernel
340 *assigned_dev)
341{
342 return -ENODEV;
343}
344
345static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
346{
347 return 0;
348}
349#endif /* CONFIG_DMAR */
350
288static inline void kvm_guest_enter(void) 351static inline void kvm_guest_enter(void)
289{ 352{
290 account_system_vtime(current); 353 account_system_vtime(current);
@@ -307,6 +370,11 @@ static inline gpa_t gfn_to_gpa(gfn_t gfn)
307 return (gpa_t)gfn << PAGE_SHIFT; 370 return (gpa_t)gfn << PAGE_SHIFT;
308} 371}
309 372
373static inline hpa_t pfn_to_hpa(pfn_t pfn)
374{
375 return (hpa_t)pfn << PAGE_SHIFT;
376}
377
310static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu) 378static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
311{ 379{
312 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); 380 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
@@ -326,6 +394,25 @@ struct kvm_stats_debugfs_item {
326extern struct kvm_stats_debugfs_item debugfs_entries[]; 394extern struct kvm_stats_debugfs_item debugfs_entries[];
327extern struct dentry *kvm_debugfs_dir; 395extern struct dentry *kvm_debugfs_dir;
328 396
397#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
398 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
399 vcpu, 5, d1, d2, d3, d4, d5)
400#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
401 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
402 vcpu, 4, d1, d2, d3, d4, 0)
403#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
404 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
405 vcpu, 3, d1, d2, d3, 0, 0)
406#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
407 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
408 vcpu, 2, d1, d2, 0, 0, 0)
409#define KVMTRACE_1D(evt, vcpu, d1, name) \
410 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
411 vcpu, 1, d1, 0, 0, 0, 0)
412#define KVMTRACE_0D(evt, vcpu, name) \
413 trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
414 vcpu, 0, 0, 0, 0, 0, 0)
415
329#ifdef CONFIG_KVM_TRACE 416#ifdef CONFIG_KVM_TRACE
330int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg); 417int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
331void kvm_trace_cleanup(void); 418void kvm_trace_cleanup(void);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index d41ccb56146a..d3a73f5a48c3 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -123,7 +123,7 @@ extern void ledtrig_ide_activity(void);
123 */ 123 */
124struct led_info { 124struct led_info {
125 const char *name; 125 const char *name;
126 char *default_trigger; 126 const char *default_trigger;
127 int flags; 127 int flags;
128}; 128};
129 129
@@ -135,7 +135,7 @@ struct led_platform_data {
135/* For the leds-gpio driver */ 135/* For the leds-gpio driver */
136struct gpio_led { 136struct gpio_led {
137 const char *name; 137 const char *name;
138 char *default_trigger; 138 const char *default_trigger;
139 unsigned gpio; 139 unsigned gpio;
140 u8 active_low; 140 u8 active_low;
141}; 141};
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index e7217dc58f39..a53407a4165c 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -54,9 +54,13 @@ struct lguest_vqconfig {
54/* Write command first word is a request. */ 54/* Write command first word is a request. */
55enum lguest_req 55enum lguest_req
56{ 56{
57 LHREQ_INITIALIZE, /* + base, pfnlimit, pgdir, start */ 57 LHREQ_INITIALIZE, /* + base, pfnlimit, start */
58 LHREQ_GETDMA, /* No longer used */ 58 LHREQ_GETDMA, /* No longer used */
59 LHREQ_IRQ, /* + irq */ 59 LHREQ_IRQ, /* + irq */
60 LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ 60 LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */
61}; 61};
62
63/* The alignment to use between consumer and producer parts of vring.
64 * x86 pagesize for historical reasons. */
65#define LGUEST_VRING_ALIGN 4096
62#endif /* _LINUX_LGUEST_LAUNCHER */ 66#endif /* _LINUX_LGUEST_LAUNCHER */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 947cf84e555d..3449de597eff 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -213,10 +213,11 @@ enum {
213 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ 213 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
214 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ 214 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
215 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ 215 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
216 ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */
217 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ 216 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
218 ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */ 217 ATA_PFLAG_INITIALIZING = (1 << 7), /* being initialized, don't touch */
219 ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */ 218 ATA_PFLAG_RESETTING = (1 << 8), /* reset in progress */
219 ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
220 ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
220 221
221 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ 222 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
222 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ 223 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
@@ -340,6 +341,9 @@ enum {
340 341
341 ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, 342 ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET,
342 343
344 /* mask of flags to transfer *to* the slave link */
345 ATA_EHI_TO_SLAVE_MASK = ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET,
346
343 /* max tries if error condition is still set after ->error_handler */ 347 /* max tries if error condition is still set after ->error_handler */
344 ATA_EH_MAX_TRIES = 5, 348 ATA_EH_MAX_TRIES = 5,
345 349
@@ -369,6 +373,10 @@ enum {
369 ATA_HORKAGE_IPM = (1 << 7), /* Link PM problems */ 373 ATA_HORKAGE_IPM = (1 << 7), /* Link PM problems */
370 ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ 374 ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */
371 ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ 375 ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */
376 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
377 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
378 not multiple of 16 bytes */
379 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */
372 380
373 /* DMA mask for user DMA control: User visible values; DO NOT 381 /* DMA mask for user DMA control: User visible values; DO NOT
374 renumber */ 382 renumber */
@@ -1278,26 +1286,62 @@ static inline int ata_link_active(struct ata_link *link)
1278 return ata_tag_valid(link->active_tag) || link->sactive; 1286 return ata_tag_valid(link->active_tag) || link->sactive;
1279} 1287}
1280 1288
1281extern struct ata_link *__ata_port_next_link(struct ata_port *ap, 1289/*
1282 struct ata_link *link, 1290 * Iterators
1283 bool dev_only); 1291 *
1292 * ATA_LITER_* constants are used to select link iteration mode and
1293 * ATA_DITER_* device iteration mode.
1294 *
1295 * For a custom iteration directly using ata_{link|dev}_next(), if
1296 * @link or @dev, respectively, is NULL, the first element is
1297 * returned. @dev and @link can be any valid device or link and the
1298 * next element according to the iteration mode will be returned.
1299 * After the last element, NULL is returned.
1300 */
1301enum ata_link_iter_mode {
1302 ATA_LITER_EDGE, /* if present, PMP links only; otherwise,
1303 * host link. no slave link */
1304 ATA_LITER_HOST_FIRST, /* host link followed by PMP or slave links */
1305 ATA_LITER_PMP_FIRST, /* PMP links followed by host link,
1306 * slave link still comes after host link */
1307};
1308
1309enum ata_dev_iter_mode {
1310 ATA_DITER_ENABLED,
1311 ATA_DITER_ENABLED_REVERSE,
1312 ATA_DITER_ALL,
1313 ATA_DITER_ALL_REVERSE,
1314};
1284 1315
1285#define __ata_port_for_each_link(link, ap) \ 1316extern struct ata_link *ata_link_next(struct ata_link *link,
1286 for ((link) = __ata_port_next_link((ap), NULL, false); (link); \ 1317 struct ata_port *ap,
1287 (link) = __ata_port_next_link((ap), (link), false)) 1318 enum ata_link_iter_mode mode);
1288 1319
1289#define ata_port_for_each_link(link, ap) \ 1320extern struct ata_device *ata_dev_next(struct ata_device *dev,
1290 for ((link) = __ata_port_next_link((ap), NULL, true); (link); \ 1321 struct ata_link *link,
1291 (link) = __ata_port_next_link((ap), (link), true)) 1322 enum ata_dev_iter_mode mode);
1292 1323
1293#define ata_link_for_each_dev(dev, link) \ 1324/*
1294 for ((dev) = (link)->device; \ 1325 * Shortcut notation for iterations
1295 (dev) < (link)->device + ata_link_max_devices(link) || ((dev) = NULL); \ 1326 *
1296 (dev)++) 1327 * ata_for_each_link() iterates over each link of @ap according to
1328 * @mode. @link points to the current link in the loop. @link is
1329 * NULL after loop termination. ata_for_each_dev() works the same way
1330 * except that it iterates over each device of @link.
1331 *
1332 * Note that the mode prefixes ATA_{L|D}ITER_ shouldn't need to be
1333 * specified when using the following shorthand notations. Only the
1334 * mode itself (EDGE, HOST_FIRST, ENABLED, etc...) should be
1335 * specified. This not only increases brevity but also makes it
1336 * impossible to use ATA_LITER_* for device iteration or vice-versa.
1337 */
1338#define ata_for_each_link(link, ap, mode) \
1339 for ((link) = ata_link_next(NULL, (ap), ATA_LITER_##mode); (link); \
1340 (link) = ata_link_next((link), (ap), ATA_LITER_##mode))
1297 1341
1298#define ata_link_for_each_dev_reverse(dev, link) \ 1342#define ata_for_each_dev(dev, link, mode) \
1299 for ((dev) = (link)->device + ata_link_max_devices(link) - 1; \ 1343 for ((dev) = ata_dev_next(NULL, (link), ATA_DITER_##mode); (dev); \
1300 (dev) >= (link)->device || ((dev) = NULL); (dev)--) 1344 (dev) = ata_dev_next((dev), (link), ATA_DITER_##mode))
1301 1345
1302/** 1346/**
1303 * ata_ncq_enabled - Test whether NCQ is enabled 1347 * ata_ncq_enabled - Test whether NCQ is enabled
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 56ba37394656..fee9e59649c1 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -4,8 +4,6 @@
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5#include <asm/linkage.h> 5#include <asm/linkage.h>
6 6
7#define notrace __attribute__((no_instrument_function))
8
9#ifdef __cplusplus 7#ifdef __cplusplus
10#define CPP_ASMLINKAGE extern "C" 8#define CPP_ASMLINKAGE extern "C"
11#else 9#else
@@ -66,14 +64,6 @@
66 name: 64 name:
67#endif 65#endif
68 66
69#define KPROBE_ENTRY(name) \
70 .pushsection .kprobes.text, "ax"; \
71 ENTRY(name)
72
73#define KPROBE_END(name) \
74 END(name); \
75 .popsection
76
77#ifndef END 67#ifndef END
78#define END(name) \ 68#define END(name) \
79 .size name, .-name 69 .size name, .-name
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
new file mode 100644
index 000000000000..93150ecf3ea4
--- /dev/null
+++ b/include/linux/list_nulls.h
@@ -0,0 +1,94 @@
1#ifndef _LINUX_LIST_NULLS_H
2#define _LINUX_LIST_NULLS_H
3
4/*
5 * Special version of lists, where end of list is not a NULL pointer,
6 * but a 'nulls' marker, which can have many different values.
7 * (up to 2^31 different values guaranteed on all platforms)
8 *
9 * In the standard hlist, termination of a list is the NULL pointer.
10 * In this special 'nulls' variant, we use the fact that objects stored in
11 * a list are aligned on a word (4 or 8 bytes alignment).
12 * We therefore use the last significant bit of 'ptr' :
13 * Set to 1 : This is a 'nulls' end-of-list marker (ptr >> 1)
14 * Set to 0 : This is a pointer to some object (ptr)
15 */
16
17struct hlist_nulls_head {
18 struct hlist_nulls_node *first;
19};
20
21struct hlist_nulls_node {
22 struct hlist_nulls_node *next, **pprev;
23};
24#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
25 ((ptr)->first = (struct hlist_nulls_node *) (1UL | (((long)nulls) << 1)))
26
27#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
28/**
29 * ptr_is_a_nulls - Test if a ptr is a nulls
30 * @ptr: ptr to be tested
31 *
32 */
33static inline int is_a_nulls(const struct hlist_nulls_node *ptr)
34{
35 return ((unsigned long)ptr & 1);
36}
37
38/**
39 * get_nulls_value - Get the 'nulls' value of the end of chain
40 * @ptr: end of chain
41 *
42 * Should be called only if is_a_nulls(ptr);
43 */
44static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
45{
46 return ((unsigned long)ptr) >> 1;
47}
48
49static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
50{
51 return !h->pprev;
52}
53
54static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
55{
56 return is_a_nulls(h->first);
57}
58
59static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
60{
61 struct hlist_nulls_node *next = n->next;
62 struct hlist_nulls_node **pprev = n->pprev;
63 *pprev = next;
64 if (!is_a_nulls(next))
65 next->pprev = pprev;
66}
67
68/**
69 * hlist_nulls_for_each_entry - iterate over list of given type
70 * @tpos: the type * to use as a loop cursor.
71 * @pos: the &struct hlist_node to use as a loop cursor.
72 * @head: the head for your list.
73 * @member: the name of the hlist_node within the struct.
74 *
75 */
76#define hlist_nulls_for_each_entry(tpos, pos, head, member) \
77 for (pos = (head)->first; \
78 (!is_a_nulls(pos)) && \
79 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
80 pos = pos->next)
81
82/**
83 * hlist_nulls_for_each_entry_from - iterate over a hlist continuing from current point
84 * @tpos: the type * to use as a loop cursor.
85 * @pos: the &struct hlist_node to use as a loop cursor.
86 * @member: the name of the hlist_node within the struct.
87 *
88 */
89#define hlist_nulls_for_each_entry_from(tpos, pos, member) \
90 for (; (!is_a_nulls(pos)) && \
91 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
92 pos = pos->next)
93
94#endif
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index e5872dc994c0..fbc48f898521 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -41,6 +41,7 @@ struct nlmclnt_initdata {
41 size_t addrlen; 41 size_t addrlen;
42 unsigned short protocol; 42 unsigned short protocol;
43 u32 nfs_version; 43 u32 nfs_version;
44 int noresvport;
44}; 45};
45 46
46/* 47/*
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index b56d5aa9b194..23da3fa69efa 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -49,6 +49,7 @@ struct nlm_host {
49 unsigned short h_proto; /* transport proto */ 49 unsigned short h_proto; /* transport proto */
50 unsigned short h_reclaiming : 1, 50 unsigned short h_reclaiming : 1,
51 h_server : 1, /* server side, not client side */ 51 h_server : 1, /* server side, not client side */
52 h_noresvport : 1,
52 h_inuse : 1; 53 h_inuse : 1;
53 wait_queue_head_t h_gracewait; /* wait while reclaiming */ 54 wait_queue_head_t h_gracewait; /* wait while reclaiming */
54 struct rw_semaphore h_rwsem; /* Reboot recovery lock */ 55 struct rw_semaphore h_rwsem; /* Reboot recovery lock */
@@ -220,7 +221,8 @@ struct nlm_host *nlmclnt_lookup_host(const struct sockaddr *sap,
220 const size_t salen, 221 const size_t salen,
221 const unsigned short protocol, 222 const unsigned short protocol,
222 const u32 version, 223 const u32 version,
223 const char *hostname); 224 const char *hostname,
225 int noresvport);
224struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp, 226struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
225 const char *hostname, 227 const char *hostname,
226 const size_t hostname_len); 228 const size_t hostname_len);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 331e5f1c2d8e..23bf02fb124f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -73,6 +73,8 @@ struct lock_class_key {
73 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; 73 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
74}; 74};
75 75
76#define LOCKSTAT_POINTS 4
77
76/* 78/*
77 * The lock-class itself: 79 * The lock-class itself:
78 */ 80 */
@@ -119,7 +121,8 @@ struct lock_class {
119 int name_version; 121 int name_version;
120 122
121#ifdef CONFIG_LOCK_STAT 123#ifdef CONFIG_LOCK_STAT
122 unsigned long contention_point[4]; 124 unsigned long contention_point[LOCKSTAT_POINTS];
125 unsigned long contending_point[LOCKSTAT_POINTS];
123#endif 126#endif
124}; 127};
125 128
@@ -144,6 +147,7 @@ enum bounce_type {
144 147
145struct lock_class_stats { 148struct lock_class_stats {
146 unsigned long contention_point[4]; 149 unsigned long contention_point[4];
150 unsigned long contending_point[4];
147 struct lock_time read_waittime; 151 struct lock_time read_waittime;
148 struct lock_time write_waittime; 152 struct lock_time write_waittime;
149 struct lock_time read_holdtime; 153 struct lock_time read_holdtime;
@@ -165,6 +169,7 @@ struct lockdep_map {
165 const char *name; 169 const char *name;
166#ifdef CONFIG_LOCK_STAT 170#ifdef CONFIG_LOCK_STAT
167 int cpu; 171 int cpu;
172 unsigned long ip;
168#endif 173#endif
169}; 174};
170 175
@@ -309,8 +314,15 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
309extern void lock_release(struct lockdep_map *lock, int nested, 314extern void lock_release(struct lockdep_map *lock, int nested,
310 unsigned long ip); 315 unsigned long ip);
311 316
312extern void lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, 317extern void lock_set_class(struct lockdep_map *lock, const char *name,
313 unsigned long ip); 318 struct lock_class_key *key, unsigned int subclass,
319 unsigned long ip);
320
321static inline void lock_set_subclass(struct lockdep_map *lock,
322 unsigned int subclass, unsigned long ip)
323{
324 lock_set_class(lock, lock->name, lock->key, subclass, ip);
325}
314 326
315# define INIT_LOCKDEP .lockdep_recursion = 0, 327# define INIT_LOCKDEP .lockdep_recursion = 0,
316 328
@@ -328,13 +340,15 @@ static inline void lockdep_on(void)
328 340
329# define lock_acquire(l, s, t, r, c, n, i) do { } while (0) 341# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
330# define lock_release(l, n, i) do { } while (0) 342# define lock_release(l, n, i) do { } while (0)
343# define lock_set_class(l, n, k, s, i) do { } while (0)
331# define lock_set_subclass(l, s, i) do { } while (0) 344# define lock_set_subclass(l, s, i) do { } while (0)
332# define lockdep_init() do { } while (0) 345# define lockdep_init() do { } while (0)
333# define lockdep_info() do { } while (0) 346# define lockdep_info() do { } while (0)
334# define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) 347# define lockdep_init_map(lock, name, key, sub) \
348 do { (void)(name); (void)(key); } while (0)
335# define lockdep_set_class(lock, key) do { (void)(key); } while (0) 349# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
336# define lockdep_set_class_and_name(lock, key, name) \ 350# define lockdep_set_class_and_name(lock, key, name) \
337 do { (void)(key); } while (0) 351 do { (void)(key); (void)(name); } while (0)
338#define lockdep_set_class_and_subclass(lock, key, sub) \ 352#define lockdep_set_class_and_subclass(lock, key, sub) \
339 do { (void)(key); } while (0) 353 do { (void)(key); } while (0)
340#define lockdep_set_subclass(lock, sub) do { } while (0) 354#define lockdep_set_subclass(lock, sub) do { } while (0)
@@ -355,7 +369,7 @@ struct lock_class_key { };
355#ifdef CONFIG_LOCK_STAT 369#ifdef CONFIG_LOCK_STAT
356 370
357extern void lock_contended(struct lockdep_map *lock, unsigned long ip); 371extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
358extern void lock_acquired(struct lockdep_map *lock); 372extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
359 373
360#define LOCK_CONTENDED(_lock, try, lock) \ 374#define LOCK_CONTENDED(_lock, try, lock) \
361do { \ 375do { \
@@ -363,20 +377,20 @@ do { \
363 lock_contended(&(_lock)->dep_map, _RET_IP_); \ 377 lock_contended(&(_lock)->dep_map, _RET_IP_); \
364 lock(_lock); \ 378 lock(_lock); \
365 } \ 379 } \
366 lock_acquired(&(_lock)->dep_map); \ 380 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
367} while (0) 381} while (0)
368 382
369#else /* CONFIG_LOCK_STAT */ 383#else /* CONFIG_LOCK_STAT */
370 384
371#define lock_contended(lockdep_map, ip) do {} while (0) 385#define lock_contended(lockdep_map, ip) do {} while (0)
372#define lock_acquired(lockdep_map) do {} while (0) 386#define lock_acquired(lockdep_map, ip) do {} while (0)
373 387
374#define LOCK_CONTENDED(_lock, try, lock) \ 388#define LOCK_CONTENDED(_lock, try, lock) \
375 lock(_lock) 389 lock(_lock)
376 390
377#endif /* CONFIG_LOCK_STAT */ 391#endif /* CONFIG_LOCK_STAT */
378 392
379#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS) 393#ifdef CONFIG_GENERIC_HARDIRQS
380extern void early_init_irq_lock_class(void); 394extern void early_init_irq_lock_class(void);
381#else 395#else
382static inline void early_init_irq_lock_class(void) 396static inline void early_init_irq_lock_class(void)
@@ -480,4 +494,22 @@ static inline void print_irqtrace_events(struct task_struct *curr)
480# define lock_map_release(l) do { } while (0) 494# define lock_map_release(l) do { } while (0)
481#endif 495#endif
482 496
497#ifdef CONFIG_PROVE_LOCKING
498# define might_lock(lock) \
499do { \
500 typecheck(struct lockdep_map *, &(lock)->dep_map); \
501 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
502 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
503} while (0)
504# define might_lock_read(lock) \
505do { \
506 typecheck(struct lockdep_map *, &(lock)->dep_map); \
507 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
508 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
509} while (0)
510#else
511# define might_lock(lock) do { } while (0)
512# define might_lock_read(lock) do { } while (0)
513#endif
514
483#endif /* __LINUX_LOCKDEP_H */ 515#endif /* __LINUX_LOCKDEP_H */
diff --git a/include/linux/map_to_7segment.h b/include/linux/map_to_7segment.h
new file mode 100644
index 000000000000..7df8432c4402
--- /dev/null
+++ b/include/linux/map_to_7segment.h
@@ -0,0 +1,187 @@
1/*
2 * Copyright (c) 2005 Henk Vergonet <Henk.Vergonet@gmail.com>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of
7 * the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef MAP_TO_7SEGMENT_H
20#define MAP_TO_7SEGMENT_H
21
22/* This file provides translation primitives and tables for the conversion
23 * of (ASCII) characters to a 7-segments notation.
24 *
25 * The 7 segment's wikipedia notation below is used as standard.
26 * See: http://en.wikipedia.org/wiki/Seven_segment_display
27 *
28 * Notation: +-a-+
29 * f b
30 * +-g-+
31 * e c
32 * +-d-+
33 *
34 * Usage:
35 *
36 * Register a map variable, and fill it with a character set:
37 * static SEG7_DEFAULT_MAP(map_seg7);
38 *
39 *
40 * Then use for conversion:
41 * seg7 = map_to_seg7(&map_seg7, some_char);
42 * ...
43 *
44 * In device drivers it is recommended, if required, to make the char map
45 * accessible via the sysfs interface using the following scheme:
46 *
47 * static ssize_t show_map(struct device *dev, char *buf) {
48 * memcpy(buf, &map_seg7, sizeof(map_seg7));
49 * return sizeof(map_seg7);
50 * }
51 * static ssize_t store_map(struct device *dev, const char *buf, size_t cnt) {
52 * if(cnt != sizeof(map_seg7))
53 * return -EINVAL;
54 * memcpy(&map_seg7, buf, cnt);
55 * return cnt;
56 * }
57 * static DEVICE_ATTR(map_seg7, PERMS_RW, show_map, store_map);
58 *
59 * History:
60 * 2005-05-31 RFC linux-kernel@vger.kernel.org
61 */
62#include <linux/errno.h>
63
64
65#define BIT_SEG7_A 0
66#define BIT_SEG7_B 1
67#define BIT_SEG7_C 2
68#define BIT_SEG7_D 3
69#define BIT_SEG7_E 4
70#define BIT_SEG7_F 5
71#define BIT_SEG7_G 6
72#define BIT_SEG7_RESERVED 7
73
74struct seg7_conversion_map {
75 unsigned char table[128];
76};
77
78static inline int map_to_seg7(struct seg7_conversion_map *map, int c)
79{
80 return c >= 0 && c < sizeof(map->table) ? map->table[c] : -EINVAL;
81}
82
83#define SEG7_CONVERSION_MAP(_name, _map) \
84 struct seg7_conversion_map _name = { .table = { _map } }
85
86/*
87 * It is recommended to use a facility that allows user space to redefine
88 * custom character sets for LCD devices. Please use a sysfs interface
89 * as described above.
90 */
91#define MAP_TO_SEG7_SYSFS_FILE "map_seg7"
92
93/*******************************************************************************
94 * ASCII conversion table
95 ******************************************************************************/
96
97#define _SEG7(l,a,b,c,d,e,f,g) \
98 ( a<<BIT_SEG7_A | b<<BIT_SEG7_B | c<<BIT_SEG7_C | d<<BIT_SEG7_D | \
99 e<<BIT_SEG7_E | f<<BIT_SEG7_F | g<<BIT_SEG7_G )
100
101#define _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \
102 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
103
104#define _MAP_33_47_ASCII_SEG7_SYMBOL \
105 _SEG7('!',0,0,0,0,1,1,0), _SEG7('"',0,1,0,0,0,1,0), _SEG7('#',0,1,1,0,1,1,0),\
106 _SEG7('$',1,0,1,1,0,1,1), _SEG7('%',0,0,1,0,0,1,0), _SEG7('&',1,0,1,1,1,1,1),\
107 _SEG7('\'',0,0,0,0,0,1,0),_SEG7('(',1,0,0,1,1,1,0), _SEG7(')',1,1,1,1,0,0,0),\
108 _SEG7('*',0,1,1,0,1,1,1), _SEG7('+',0,1,1,0,0,0,1), _SEG7(',',0,0,0,0,1,0,0),\
109 _SEG7('-',0,0,0,0,0,0,1), _SEG7('.',0,0,0,0,1,0,0), _SEG7('/',0,1,0,0,1,0,1),
110
111#define _MAP_48_57_ASCII_SEG7_NUMERIC \
112 _SEG7('0',1,1,1,1,1,1,0), _SEG7('1',0,1,1,0,0,0,0), _SEG7('2',1,1,0,1,1,0,1),\
113 _SEG7('3',1,1,1,1,0,0,1), _SEG7('4',0,1,1,0,0,1,1), _SEG7('5',1,0,1,1,0,1,1),\
114 _SEG7('6',1,0,1,1,1,1,1), _SEG7('7',1,1,1,0,0,0,0), _SEG7('8',1,1,1,1,1,1,1),\
115 _SEG7('9',1,1,1,1,0,1,1),
116
117#define _MAP_58_64_ASCII_SEG7_SYMBOL \
118 _SEG7(':',0,0,0,1,0,0,1), _SEG7(';',0,0,0,1,0,0,1), _SEG7('<',1,0,0,0,0,1,1),\
119 _SEG7('=',0,0,0,1,0,0,1), _SEG7('>',1,1,0,0,0,0,1), _SEG7('?',1,1,1,0,0,1,0),\
120 _SEG7('@',1,1,0,1,1,1,1),
121
122#define _MAP_65_90_ASCII_SEG7_ALPHA_UPPR \
123 _SEG7('A',1,1,1,0,1,1,1), _SEG7('B',1,1,1,1,1,1,1), _SEG7('C',1,0,0,1,1,1,0),\
124 _SEG7('D',1,1,1,1,1,1,0), _SEG7('E',1,0,0,1,1,1,1), _SEG7('F',1,0,0,0,1,1,1),\
125 _SEG7('G',1,1,1,1,0,1,1), _SEG7('H',0,1,1,0,1,1,1), _SEG7('I',0,1,1,0,0,0,0),\
126 _SEG7('J',0,1,1,1,0,0,0), _SEG7('K',0,1,1,0,1,1,1), _SEG7('L',0,0,0,1,1,1,0),\
127 _SEG7('M',1,1,1,0,1,1,0), _SEG7('N',1,1,1,0,1,1,0), _SEG7('O',1,1,1,1,1,1,0),\
128 _SEG7('P',1,1,0,0,1,1,1), _SEG7('Q',1,1,1,1,1,1,0), _SEG7('R',1,1,1,0,1,1,1),\
129 _SEG7('S',1,0,1,1,0,1,1), _SEG7('T',0,0,0,1,1,1,1), _SEG7('U',0,1,1,1,1,1,0),\
130 _SEG7('V',0,1,1,1,1,1,0), _SEG7('W',0,1,1,1,1,1,1), _SEG7('X',0,1,1,0,1,1,1),\
131 _SEG7('Y',0,1,1,0,0,1,1), _SEG7('Z',1,1,0,1,1,0,1),
132
133#define _MAP_91_96_ASCII_SEG7_SYMBOL \
134 _SEG7('[',1,0,0,1,1,1,0), _SEG7('\\',0,0,1,0,0,1,1),_SEG7(']',1,1,1,1,0,0,0),\
135 _SEG7('^',1,1,0,0,0,1,0), _SEG7('_',0,0,0,1,0,0,0), _SEG7('`',0,1,0,0,0,0,0),
136
137#define _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \
138 _SEG7('A',1,1,1,0,1,1,1), _SEG7('b',0,0,1,1,1,1,1), _SEG7('c',0,0,0,1,1,0,1),\
139 _SEG7('d',0,1,1,1,1,0,1), _SEG7('E',1,0,0,1,1,1,1), _SEG7('F',1,0,0,0,1,1,1),\
140 _SEG7('G',1,1,1,1,0,1,1), _SEG7('h',0,0,1,0,1,1,1), _SEG7('i',0,0,1,0,0,0,0),\
141 _SEG7('j',0,0,1,1,0,0,0), _SEG7('k',0,0,1,0,1,1,1), _SEG7('L',0,0,0,1,1,1,0),\
142 _SEG7('M',1,1,1,0,1,1,0), _SEG7('n',0,0,1,0,1,0,1), _SEG7('o',0,0,1,1,1,0,1),\
143 _SEG7('P',1,1,0,0,1,1,1), _SEG7('q',1,1,1,0,0,1,1), _SEG7('r',0,0,0,0,1,0,1),\
144 _SEG7('S',1,0,1,1,0,1,1), _SEG7('T',0,0,0,1,1,1,1), _SEG7('u',0,0,1,1,1,0,0),\
145 _SEG7('v',0,0,1,1,1,0,0), _SEG7('W',0,1,1,1,1,1,1), _SEG7('X',0,1,1,0,1,1,1),\
146 _SEG7('y',0,1,1,1,0,1,1), _SEG7('Z',1,1,0,1,1,0,1),
147
148#define _MAP_123_126_ASCII_SEG7_SYMBOL \
149 _SEG7('{',1,0,0,1,1,1,0), _SEG7('|',0,0,0,0,1,1,0), _SEG7('}',1,1,1,1,0,0,0),\
150 _SEG7('~',1,0,0,0,0,0,0),
151
152/* Maps */
153
154/* This set tries to map as close as possible to the visible characteristics
155 * of the ASCII symbol, lowercase and uppercase letters may differ in
156 * presentation on the display.
157 */
158#define MAP_ASCII7SEG_ALPHANUM \
159 _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \
160 _MAP_33_47_ASCII_SEG7_SYMBOL \
161 _MAP_48_57_ASCII_SEG7_NUMERIC \
162 _MAP_58_64_ASCII_SEG7_SYMBOL \
163 _MAP_65_90_ASCII_SEG7_ALPHA_UPPR \
164 _MAP_91_96_ASCII_SEG7_SYMBOL \
165 _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \
166 _MAP_123_126_ASCII_SEG7_SYMBOL
167
168/* This set tries to map as close as possible to the symbolic characteristics
169 * of the ASCII character for maximum discrimination.
170 * For now this means all alpha chars are in lower case representations.
171 * (This for example facilitates the use of hex numbers with uppercase input.)
172 */
173#define MAP_ASCII7SEG_ALPHANUM_LC \
174 _MAP_0_32_ASCII_SEG7_NON_PRINTABLE \
175 _MAP_33_47_ASCII_SEG7_SYMBOL \
176 _MAP_48_57_ASCII_SEG7_NUMERIC \
177 _MAP_58_64_ASCII_SEG7_SYMBOL \
178 _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \
179 _MAP_91_96_ASCII_SEG7_SYMBOL \
180 _MAP_97_122_ASCII_SEG7_ALPHA_LOWER \
181 _MAP_123_126_ASCII_SEG7_SYMBOL
182
183#define SEG7_DEFAULT_MAP(_name) \
184 SEG7_CONVERSION_MAP(_name,MAP_ASCII7SEG_ALPHANUM)
185
186#endif /* MAP_TO_7SEGMENT_H */
187
diff --git a/include/linux/marker.h b/include/linux/marker.h
index 1290653f9241..b85e74ca782f 100644
--- a/include/linux/marker.h
+++ b/include/linux/marker.h
@@ -12,6 +12,7 @@
12 * See the file COPYING for more details. 12 * See the file COPYING for more details.
13 */ 13 */
14 14
15#include <stdarg.h>
15#include <linux/types.h> 16#include <linux/types.h>
16 17
17struct module; 18struct module;
@@ -48,10 +49,28 @@ struct marker {
48 void (*call)(const struct marker *mdata, void *call_private, ...); 49 void (*call)(const struct marker *mdata, void *call_private, ...);
49 struct marker_probe_closure single; 50 struct marker_probe_closure single;
50 struct marker_probe_closure *multi; 51 struct marker_probe_closure *multi;
52 const char *tp_name; /* Optional tracepoint name */
53 void *tp_cb; /* Optional tracepoint callback */
51} __attribute__((aligned(8))); 54} __attribute__((aligned(8)));
52 55
53#ifdef CONFIG_MARKERS 56#ifdef CONFIG_MARKERS
54 57
58#define _DEFINE_MARKER(name, tp_name_str, tp_cb, format) \
59 static const char __mstrtab_##name[] \
60 __attribute__((section("__markers_strings"))) \
61 = #name "\0" format; \
62 static struct marker __mark_##name \
63 __attribute__((section("__markers"), aligned(8))) = \
64 { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \
65 0, 0, marker_probe_cb, { __mark_empty_function, NULL},\
66 NULL, tp_name_str, tp_cb }
67
68#define DEFINE_MARKER(name, format) \
69 _DEFINE_MARKER(name, NULL, NULL, format)
70
71#define DEFINE_MARKER_TP(name, tp_name, tp_cb, format) \
72 _DEFINE_MARKER(name, #tp_name, tp_cb, format)
73
55/* 74/*
56 * Note : the empty asm volatile with read constraint is used here instead of a 75 * Note : the empty asm volatile with read constraint is used here instead of a
57 * "used" attribute to fix a gcc 4.1.x bug. 76 * "used" attribute to fix a gcc 4.1.x bug.
@@ -65,14 +84,7 @@ struct marker {
65 */ 84 */
66#define __trace_mark(generic, name, call_private, format, args...) \ 85#define __trace_mark(generic, name, call_private, format, args...) \
67 do { \ 86 do { \
68 static const char __mstrtab_##name[] \ 87 DEFINE_MARKER(name, format); \
69 __attribute__((section("__markers_strings"))) \
70 = #name "\0" format; \
71 static struct marker __mark_##name \
72 __attribute__((section("__markers"), aligned(8))) = \
73 { __mstrtab_##name, &__mstrtab_##name[sizeof(#name)], \
74 0, 0, marker_probe_cb, \
75 { __mark_empty_function, NULL}, NULL }; \
76 __mark_check_format(format, ## args); \ 88 __mark_check_format(format, ## args); \
77 if (unlikely(__mark_##name.state)) { \ 89 if (unlikely(__mark_##name.state)) { \
78 (*__mark_##name.call) \ 90 (*__mark_##name.call) \
@@ -80,14 +92,39 @@ struct marker {
80 } \ 92 } \
81 } while (0) 93 } while (0)
82 94
95#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
96 do { \
97 void __check_tp_type(void) \
98 { \
99 register_trace_##tp_name(tp_cb); \
100 } \
101 DEFINE_MARKER_TP(name, tp_name, tp_cb, format); \
102 __mark_check_format(format, ## args); \
103 (*__mark_##name.call)(&__mark_##name, call_private, \
104 ## args); \
105 } while (0)
106
83extern void marker_update_probe_range(struct marker *begin, 107extern void marker_update_probe_range(struct marker *begin,
84 struct marker *end); 108 struct marker *end);
109
110#define GET_MARKER(name) (__mark_##name)
111
85#else /* !CONFIG_MARKERS */ 112#else /* !CONFIG_MARKERS */
113#define DEFINE_MARKER(name, tp_name, tp_cb, format)
86#define __trace_mark(generic, name, call_private, format, args...) \ 114#define __trace_mark(generic, name, call_private, format, args...) \
87 __mark_check_format(format, ## args) 115 __mark_check_format(format, ## args)
116#define __trace_mark_tp(name, call_private, tp_name, tp_cb, format, args...) \
117 do { \
118 void __check_tp_type(void) \
119 { \
120 register_trace_##tp_name(tp_cb); \
121 } \
122 __mark_check_format(format, ## args); \
123 } while (0)
88static inline void marker_update_probe_range(struct marker *begin, 124static inline void marker_update_probe_range(struct marker *begin,
89 struct marker *end) 125 struct marker *end)
90{ } 126{ }
127#define GET_MARKER(name)
91#endif /* CONFIG_MARKERS */ 128#endif /* CONFIG_MARKERS */
92 129
93/** 130/**
@@ -117,6 +154,20 @@ static inline void marker_update_probe_range(struct marker *begin,
117 __trace_mark(1, name, NULL, format, ## args) 154 __trace_mark(1, name, NULL, format, ## args)
118 155
119/** 156/**
157 * trace_mark_tp - Marker in a tracepoint callback
158 * @name: marker name, not quoted.
159 * @tp_name: tracepoint name, not quoted.
160 * @tp_cb: tracepoint callback. Should have an associated global symbol so it
161 * is not optimized away by the compiler (should not be static).
162 * @format: format string
163 * @args...: variable argument list
164 *
165 * Places a marker in a tracepoint callback.
166 */
167#define trace_mark_tp(name, tp_name, tp_cb, format, args...) \
168 __trace_mark_tp(name, NULL, tp_name, tp_cb, format, ## args)
169
170/**
120 * MARK_NOARGS - Format string for a marker with no argument. 171 * MARK_NOARGS - Format string for a marker with no argument.
121 */ 172 */
122#define MARK_NOARGS " " 173#define MARK_NOARGS " "
@@ -136,8 +187,6 @@ extern marker_probe_func __mark_empty_function;
136 187
137extern void marker_probe_cb(const struct marker *mdata, 188extern void marker_probe_cb(const struct marker *mdata,
138 void *call_private, ...); 189 void *call_private, ...);
139extern void marker_probe_cb_noarg(const struct marker *mdata,
140 void *call_private, ...);
141 190
142/* 191/*
143 * Connect a probe to a marker. 192 * Connect a probe to a marker.
@@ -160,4 +209,13 @@ extern int marker_probe_unregister_private_data(marker_probe_func *probe,
160extern void *marker_get_private_data(const char *name, marker_probe_func *probe, 209extern void *marker_get_private_data(const char *name, marker_probe_func *probe,
161 int num); 210 int num);
162 211
212/*
213 * marker_synchronize_unregister must be called between the last marker probe
214 * unregistration and the first one of
215 * - the end of module exit function
216 * - the free of any resource used by the probes
217 * to ensure the code and data are valid for any possibly running probes.
218 */
219#define marker_synchronize_unregister() synchronize_sched()
220
163#endif 221#endif
diff --git a/include/linux/mdio-gpio.h b/include/linux/mdio-gpio.h
new file mode 100644
index 000000000000..e9d3fdfe41d7
--- /dev/null
+++ b/include/linux/mdio-gpio.h
@@ -0,0 +1,25 @@
1/*
2 * MDIO-GPIO bus platform data structures
3 *
4 * Copyright (C) 2008, Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 */
10
11#ifndef __LINUX_MDIO_GPIO_H
12#define __LINUX_MDIO_GPIO_H
13
14#include <linux/mdio-bitbang.h>
15
16struct mdio_gpio_platform_data {
17 /* GPIO numbers for bus pins */
18 unsigned int mdc;
19 unsigned int mdio;
20
21 unsigned int phy_mask;
22 int irqs[PHY_MAX_ADDR];
23};
24
25#endif /* __LINUX_MDIO_GPIO_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index fdf3967e1397..1fbe14d39521 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -27,16 +27,13 @@ struct mm_struct;
27 27
28#ifdef CONFIG_CGROUP_MEM_RES_CTLR 28#ifdef CONFIG_CGROUP_MEM_RES_CTLR
29 29
30#define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0)
31
32extern struct page_cgroup *page_get_page_cgroup(struct page *page);
33extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm, 30extern int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
34 gfp_t gfp_mask); 31 gfp_t gfp_mask);
35extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 32extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
36 gfp_t gfp_mask); 33 gfp_t gfp_mask);
34extern void mem_cgroup_move_lists(struct page *page, enum lru_list lru);
37extern void mem_cgroup_uncharge_page(struct page *page); 35extern void mem_cgroup_uncharge_page(struct page *page);
38extern void mem_cgroup_uncharge_cache_page(struct page *page); 36extern void mem_cgroup_uncharge_cache_page(struct page *page);
39extern void mem_cgroup_move_lists(struct page *page, bool active);
40extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask); 37extern int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask);
41 38
42extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, 39extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -44,7 +41,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
44 unsigned long *scanned, int order, 41 unsigned long *scanned, int order,
45 int mode, struct zone *z, 42 int mode, struct zone *z,
46 struct mem_cgroup *mem_cont, 43 struct mem_cgroup *mem_cont,
47 int active); 44 int active, int file);
48extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); 45extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
49int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); 46int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
50 47
@@ -69,21 +66,11 @@ extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
69extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, 66extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
70 int priority); 67 int priority);
71 68
72extern long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, 69extern long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
73 struct zone *zone, int priority); 70 int priority, enum lru_list lru);
74extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
75 struct zone *zone, int priority);
76
77#else /* CONFIG_CGROUP_MEM_RES_CTLR */
78static inline void page_reset_bad_cgroup(struct page *page)
79{
80}
81 71
82static inline struct page_cgroup *page_get_page_cgroup(struct page *page)
83{
84 return NULL;
85}
86 72
73#else /* CONFIG_CGROUP_MEM_RES_CTLR */
87static inline int mem_cgroup_charge(struct page *page, 74static inline int mem_cgroup_charge(struct page *page,
88 struct mm_struct *mm, gfp_t gfp_mask) 75 struct mm_struct *mm, gfp_t gfp_mask)
89{ 76{
@@ -159,14 +146,9 @@ static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
159{ 146{
160} 147}
161 148
162static inline long mem_cgroup_calc_reclaim_active(struct mem_cgroup *mem, 149static inline long mem_cgroup_calc_reclaim(struct mem_cgroup *mem,
163 struct zone *zone, int priority) 150 struct zone *zone, int priority,
164{ 151 enum lru_list lru)
165 return 0;
166}
167
168static inline long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
169 struct zone *zone, int priority)
170{ 152{
171 return 0; 153 return 0;
172} 154}
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 2f5f8a5ef2a0..36c82c9e6ea7 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -91,7 +91,7 @@ extern int memory_notify(unsigned long val, void *v);
91 91
92#ifdef CONFIG_MEMORY_HOTPLUG 92#ifdef CONFIG_MEMORY_HOTPLUG
93#define hotplug_memory_notifier(fn, pri) { \ 93#define hotplug_memory_notifier(fn, pri) { \
94 static struct notifier_block fn##_mem_nb = \ 94 static __meminitdata struct notifier_block fn##_mem_nb =\
95 { .notifier_call = fn, .priority = pri }; \ 95 { .notifier_call = fn, .priority = pri }; \
96 register_memory_notifier(&fn##_mem_nb); \ 96 register_memory_notifier(&fn##_mem_nb); \
97} 97}
diff --git a/include/linux/mfd/da903x.h b/include/linux/mfd/da903x.h
new file mode 100644
index 000000000000..cad314c12439
--- /dev/null
+++ b/include/linux/mfd/da903x.h
@@ -0,0 +1,201 @@
1#ifndef __LINUX_PMIC_DA903X_H
2#define __LINUX_PMIC_DA903X_H
3
4/* Unified sub device IDs for DA9030/DA9034 */
5enum {
6 DA9030_ID_LED_1,
7 DA9030_ID_LED_2,
8 DA9030_ID_LED_3,
9 DA9030_ID_LED_4,
10 DA9030_ID_LED_PC,
11 DA9030_ID_VIBRA,
12 DA9030_ID_WLED,
13 DA9030_ID_BUCK1,
14 DA9030_ID_BUCK2,
15 DA9030_ID_LDO1,
16 DA9030_ID_LDO2,
17 DA9030_ID_LDO3,
18 DA9030_ID_LDO4,
19 DA9030_ID_LDO5,
20 DA9030_ID_LDO6,
21 DA9030_ID_LDO7,
22 DA9030_ID_LDO8,
23 DA9030_ID_LDO9,
24 DA9030_ID_LDO10,
25 DA9030_ID_LDO11,
26 DA9030_ID_LDO12,
27 DA9030_ID_LDO13,
28 DA9030_ID_LDO14,
29 DA9030_ID_LDO15,
30 DA9030_ID_LDO16,
31 DA9030_ID_LDO17,
32 DA9030_ID_LDO18,
33 DA9030_ID_LDO19,
34 DA9030_ID_LDO_INT, /* LDO Internal */
35
36 DA9034_ID_LED_1,
37 DA9034_ID_LED_2,
38 DA9034_ID_VIBRA,
39 DA9034_ID_WLED,
40 DA9034_ID_TOUCH,
41
42 DA9034_ID_BUCK1,
43 DA9034_ID_BUCK2,
44 DA9034_ID_LDO1,
45 DA9034_ID_LDO2,
46 DA9034_ID_LDO3,
47 DA9034_ID_LDO4,
48 DA9034_ID_LDO5,
49 DA9034_ID_LDO6,
50 DA9034_ID_LDO7,
51 DA9034_ID_LDO8,
52 DA9034_ID_LDO9,
53 DA9034_ID_LDO10,
54 DA9034_ID_LDO11,
55 DA9034_ID_LDO12,
56 DA9034_ID_LDO13,
57 DA9034_ID_LDO14,
58 DA9034_ID_LDO15,
59};
60
61/*
62 * DA9030/DA9034 LEDs sub-devices uses generic "struct led_info"
63 * as the platform_data
64 */
65
66/* DA9030 flags for "struct led_info"
67 */
68#define DA9030_LED_RATE_ON (0 << 5)
69#define DA9030_LED_RATE_052S (1 << 5)
70#define DA9030_LED_DUTY_1_16 (0 << 3)
71#define DA9030_LED_DUTY_1_8 (1 << 3)
72#define DA9030_LED_DUTY_1_4 (2 << 3)
73#define DA9030_LED_DUTY_1_2 (3 << 3)
74
75#define DA9030_VIBRA_MODE_1P3V (0 << 1)
76#define DA9030_VIBRA_MODE_2P7V (1 << 1)
77#define DA9030_VIBRA_FREQ_1HZ (0 << 2)
78#define DA9030_VIBRA_FREQ_2HZ (1 << 2)
79#define DA9030_VIBRA_FREQ_4HZ (2 << 2)
80#define DA9030_VIBRA_FREQ_8HZ (3 << 2)
81#define DA9030_VIBRA_DUTY_ON (0 << 4)
82#define DA9030_VIBRA_DUTY_75P (1 << 4)
83#define DA9030_VIBRA_DUTY_50P (2 << 4)
84#define DA9030_VIBRA_DUTY_25P (3 << 4)
85
86/* DA9034 flags for "struct led_info" */
87#define DA9034_LED_RAMP (1 << 7)
88
89/* DA9034 touch screen platform data */
90struct da9034_touch_pdata {
91 int interval_ms; /* sampling interval while pen down */
92 int x_inverted;
93 int y_inverted;
94};
95
96struct da903x_subdev_info {
97 int id;
98 const char *name;
99 void *platform_data;
100};
101
102struct da903x_platform_data {
103 int num_subdevs;
104 struct da903x_subdev_info *subdevs;
105};
106
107/* bit definitions for DA9030 events */
108#define DA9030_EVENT_ONKEY (1 << 0)
109#define DA9030_EVENT_PWREN (1 << 1)
110#define DA9030_EVENT_EXTON (1 << 2)
111#define DA9030_EVENT_CHDET (1 << 3)
112#define DA9030_EVENT_TBAT (1 << 4)
113#define DA9030_EVENT_VBATMON (1 << 5)
114#define DA9030_EVENT_VBATMON_TXON (1 << 6)
115#define DA9030_EVENT_CHIOVER (1 << 7)
116#define DA9030_EVENT_TCTO (1 << 8)
117#define DA9030_EVENT_CCTO (1 << 9)
118#define DA9030_EVENT_ADC_READY (1 << 10)
119#define DA9030_EVENT_VBUS_4P4 (1 << 11)
120#define DA9030_EVENT_VBUS_4P0 (1 << 12)
121#define DA9030_EVENT_SESS_VALID (1 << 13)
122#define DA9030_EVENT_SRP_DETECT (1 << 14)
123#define DA9030_EVENT_WATCHDOG (1 << 15)
124#define DA9030_EVENT_LDO15 (1 << 16)
125#define DA9030_EVENT_LDO16 (1 << 17)
126#define DA9030_EVENT_LDO17 (1 << 18)
127#define DA9030_EVENT_LDO18 (1 << 19)
128#define DA9030_EVENT_LDO19 (1 << 20)
129#define DA9030_EVENT_BUCK2 (1 << 21)
130
131/* bit definitions for DA9034 events */
132#define DA9034_EVENT_ONKEY (1 << 0)
133#define DA9034_EVENT_EXTON (1 << 2)
134#define DA9034_EVENT_CHDET (1 << 3)
135#define DA9034_EVENT_TBAT (1 << 4)
136#define DA9034_EVENT_VBATMON (1 << 5)
137#define DA9034_EVENT_REV_IOVER (1 << 6)
138#define DA9034_EVENT_CH_IOVER (1 << 7)
139#define DA9034_EVENT_CH_TCTO (1 << 8)
140#define DA9034_EVENT_CH_CCTO (1 << 9)
141#define DA9034_EVENT_USB_DEV (1 << 10)
142#define DA9034_EVENT_OTGCP_IOVER (1 << 11)
143#define DA9034_EVENT_VBUS_4P55 (1 << 12)
144#define DA9034_EVENT_VBUS_3P8 (1 << 13)
145#define DA9034_EVENT_SESS_1P8 (1 << 14)
146#define DA9034_EVENT_SRP_READY (1 << 15)
147#define DA9034_EVENT_ADC_MAN (1 << 16)
148#define DA9034_EVENT_ADC_AUTO4 (1 << 17)
149#define DA9034_EVENT_ADC_AUTO5 (1 << 18)
150#define DA9034_EVENT_ADC_AUTO6 (1 << 19)
151#define DA9034_EVENT_PEN_DOWN (1 << 20)
152#define DA9034_EVENT_TSI_READY (1 << 21)
153#define DA9034_EVENT_UART_TX (1 << 22)
154#define DA9034_EVENT_UART_RX (1 << 23)
155#define DA9034_EVENT_HEADSET (1 << 25)
156#define DA9034_EVENT_HOOKSWITCH (1 << 26)
157#define DA9034_EVENT_WATCHDOG (1 << 27)
158
159extern int da903x_register_notifier(struct device *dev,
160 struct notifier_block *nb, unsigned int events);
161extern int da903x_unregister_notifier(struct device *dev,
162 struct notifier_block *nb, unsigned int events);
163
164/* Status Query Interface */
165#define DA9030_STATUS_ONKEY (1 << 0)
166#define DA9030_STATUS_PWREN1 (1 << 1)
167#define DA9030_STATUS_EXTON (1 << 2)
168#define DA9030_STATUS_CHDET (1 << 3)
169#define DA9030_STATUS_TBAT (1 << 4)
170#define DA9030_STATUS_VBATMON (1 << 5)
171#define DA9030_STATUS_VBATMON_TXON (1 << 6)
172#define DA9030_STATUS_MCLKDET (1 << 7)
173
174#define DA9034_STATUS_ONKEY (1 << 0)
175#define DA9034_STATUS_EXTON (1 << 2)
176#define DA9034_STATUS_CHDET (1 << 3)
177#define DA9034_STATUS_TBAT (1 << 4)
178#define DA9034_STATUS_VBATMON (1 << 5)
179#define DA9034_STATUS_PEN_DOWN (1 << 6)
180#define DA9034_STATUS_MCLKDET (1 << 7)
181#define DA9034_STATUS_USB_DEV (1 << 8)
182#define DA9034_STATUS_HEADSET (1 << 9)
183#define DA9034_STATUS_HOOKSWITCH (1 << 10)
184#define DA9034_STATUS_REMCON (1 << 11)
185#define DA9034_STATUS_VBUS_VALID_4P55 (1 << 12)
186#define DA9034_STATUS_VBUS_VALID_3P8 (1 << 13)
187#define DA9034_STATUS_SESS_VALID_1P8 (1 << 14)
188#define DA9034_STATUS_SRP_READY (1 << 15)
189
190extern int da903x_query_status(struct device *dev, unsigned int status);
191
192
193/* NOTE: the two functions below are not intended for use outside
194 * of the DA9034 sub-device drivers
195 */
196extern int da903x_write(struct device *dev, int reg, uint8_t val);
197extern int da903x_read(struct device *dev, int reg, uint8_t *val);
198extern int da903x_update(struct device *dev, int reg, uint8_t val, uint8_t mask);
199extern int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask);
200extern int da903x_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
201#endif /* __LINUX_PMIC_DA903X_H */
diff --git a/include/linux/mfd/t7l66xb.h b/include/linux/mfd/t7l66xb.h
index e83c7f2036f9..b4629818aea5 100644
--- a/include/linux/mfd/t7l66xb.h
+++ b/include/linux/mfd/t7l66xb.h
@@ -15,8 +15,6 @@
15#include <linux/mfd/tmio.h> 15#include <linux/mfd/tmio.h>
16 16
17struct t7l66xb_platform_data { 17struct t7l66xb_platform_data {
18 int (*enable_clk32k)(struct platform_device *dev);
19 void (*disable_clk32k)(struct platform_device *dev);
20 int (*enable)(struct platform_device *dev); 18 int (*enable)(struct platform_device *dev);
21 int (*disable)(struct platform_device *dev); 19 int (*disable)(struct platform_device *dev);
22 int (*suspend)(struct platform_device *dev); 20 int (*suspend)(struct platform_device *dev);
diff --git a/include/linux/mfd/tc6387xb.h b/include/linux/mfd/tc6387xb.h
index fa06e0610b8e..b4888209494a 100644
--- a/include/linux/mfd/tc6387xb.h
+++ b/include/linux/mfd/tc6387xb.h
@@ -11,9 +11,6 @@
11#define MFD_TC6387XB_H 11#define MFD_TC6387XB_H
12 12
13struct tc6387xb_platform_data { 13struct tc6387xb_platform_data {
14 int (*enable_clk32k)(struct platform_device *dev);
15 void (*disable_clk32k)(struct platform_device *dev);
16
17 int (*enable)(struct platform_device *dev); 14 int (*enable)(struct platform_device *dev);
18 int (*disable)(struct platform_device *dev); 15 int (*disable)(struct platform_device *dev);
19 int (*suspend)(struct platform_device *dev); 16 int (*suspend)(struct platform_device *dev);
diff --git a/include/linux/mfd/tc6393xb.h b/include/linux/mfd/tc6393xb.h
index fec7b3f7a81f..626e448205c5 100644
--- a/include/linux/mfd/tc6393xb.h
+++ b/include/linux/mfd/tc6393xb.h
@@ -17,12 +17,12 @@
17#ifndef MFD_TC6393XB_H 17#ifndef MFD_TC6393XB_H
18#define MFD_TC6393XB_H 18#define MFD_TC6393XB_H
19 19
20#include <linux/fb.h>
21
20/* Also one should provide the CK3P6MI clock */ 22/* Also one should provide the CK3P6MI clock */
21struct tc6393xb_platform_data { 23struct tc6393xb_platform_data {
22 u16 scr_pll2cr; /* PLL2 Control */ 24 u16 scr_pll2cr; /* PLL2 Control */
23 u16 scr_gper; /* GP Enable */ 25 u16 scr_gper; /* GP Enable */
24 u32 scr_gpo_doecr; /* GPO Data OE Control */
25 u32 scr_gpo_dsr; /* GPO Data Set */
26 26
27 int (*enable)(struct platform_device *dev); 27 int (*enable)(struct platform_device *dev);
28 int (*disable)(struct platform_device *dev); 28 int (*disable)(struct platform_device *dev);
@@ -31,15 +31,28 @@ struct tc6393xb_platform_data {
31 31
32 int irq_base; /* base for subdevice irqs */ 32 int irq_base; /* base for subdevice irqs */
33 int gpio_base; 33 int gpio_base;
34 int (*setup)(struct platform_device *dev);
35 void (*teardown)(struct platform_device *dev);
34 36
35 struct tmio_nand_data *nand_data; 37 struct tmio_nand_data *nand_data;
38 struct tmio_fb_data *fb_data;
39
40 unsigned resume_restore : 1; /* make special actions
41 to preserve the state
42 on suspend/resume */
36}; 43};
37 44
45extern int tc6393xb_lcd_mode(struct platform_device *fb,
46 const struct fb_videomode *mode);
47extern int tc6393xb_lcd_set_power(struct platform_device *fb, bool on);
48
38/* 49/*
39 * Relative to irq_base 50 * Relative to irq_base
40 */ 51 */
41#define IRQ_TC6393_NAND 0 52#define IRQ_TC6393_NAND 0
42#define IRQ_TC6393_MMC 1 53#define IRQ_TC6393_MMC 1
54#define IRQ_TC6393_OHCI 2
55#define IRQ_TC6393_FB 4
43 56
44#define TC6393XB_NR_IRQS 8 57#define TC6393XB_NR_IRQS 8
45 58
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index ec612e66391c..516d955ab8a1 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -1,6 +1,8 @@
1#ifndef MFD_TMIO_H 1#ifndef MFD_TMIO_H
2#define MFD_TMIO_H 2#define MFD_TMIO_H
3 3
4#include <linux/fb.h>
5
4#define tmio_ioread8(addr) readb(addr) 6#define tmio_ioread8(addr) readb(addr)
5#define tmio_ioread16(addr) readw(addr) 7#define tmio_ioread16(addr) readw(addr)
6#define tmio_ioread16_rep(r, b, l) readsw(r, b, l) 8#define tmio_ioread16_rep(r, b, l) readsw(r, b, l)
@@ -25,4 +27,21 @@ struct tmio_nand_data {
25 unsigned int num_partitions; 27 unsigned int num_partitions;
26}; 28};
27 29
30#define FBIO_TMIO_ACC_WRITE 0x7C639300
31#define FBIO_TMIO_ACC_SYNC 0x7C639301
32
33struct tmio_fb_data {
34 int (*lcd_set_power)(struct platform_device *fb_dev,
35 bool on);
36 int (*lcd_mode)(struct platform_device *fb_dev,
37 const struct fb_videomode *mode);
38 int num_modes;
39 struct fb_videomode *modes;
40
41 /* in mm: size of screen */
42 int height;
43 int width;
44};
45
46
28#endif 47#endif
diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h
index 217bb22ebb8e..af95a1d2f3a1 100644
--- a/include/linux/mfd/wm8350/audio.h
+++ b/include/linux/mfd/wm8350/audio.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * audio.h -- Audio Driver for Wolfson WM8350 PMIC 2 * audio.h -- Audio Driver for Wolfson WM8350 PMIC
3 * 3 *
4 * Copyright 2007 Wolfson Microelectronics PLC 4 * Copyright 2007, 2008 Wolfson Microelectronics PLC
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -70,9 +70,9 @@
70#define WM8350_CODEC_ISEL_0_5 3 /* x0.5 */ 70#define WM8350_CODEC_ISEL_0_5 3 /* x0.5 */
71 71
72#define WM8350_VMID_OFF 0 72#define WM8350_VMID_OFF 0
73#define WM8350_VMID_500K 1 73#define WM8350_VMID_300K 1
74#define WM8350_VMID_100K 2 74#define WM8350_VMID_50K 2
75#define WM8350_VMID_10K 3 75#define WM8350_VMID_5K 3
76 76
77/* 77/*
78 * R40 (0x28) - Clock Control 1 78 * R40 (0x28) - Clock Control 1
@@ -591,8 +591,38 @@
591#define WM8350_IRQ_CODEC_MICSCD 41 591#define WM8350_IRQ_CODEC_MICSCD 41
592#define WM8350_IRQ_CODEC_MICD 42 592#define WM8350_IRQ_CODEC_MICD 42
593 593
594/*
595 * WM8350 Platform data.
596 *
597 * This must be initialised per platform for best audio performance.
598 * Please see WM8350 datasheet for information.
599 */
600struct wm8350_audio_platform_data {
601 int vmid_discharge_msecs; /* VMID --> OFF discharge time */
602 int drain_msecs; /* OFF drain time */
603 int cap_discharge_msecs; /* Cap ON (from OFF) discharge time */
604 int vmid_charge_msecs; /* vmid power up time */
605 u32 vmid_s_curve:2; /* vmid enable s curve speed */
606 u32 dis_out4:2; /* out4 discharge speed */
607 u32 dis_out3:2; /* out3 discharge speed */
608 u32 dis_out2:2; /* out2 discharge speed */
609 u32 dis_out1:2; /* out1 discharge speed */
610 u32 vroi_out4:1; /* out4 tie off */
611 u32 vroi_out3:1; /* out3 tie off */
612 u32 vroi_out2:1; /* out2 tie off */
613 u32 vroi_out1:1; /* out1 tie off */
614 u32 vroi_enable:1; /* enable tie off */
615 u32 codec_current_on:2; /* current level ON */
616 u32 codec_current_standby:2; /* current level STANDBY */
617 u32 codec_current_charge:2; /* codec current @ vmid charge */
618};
619
620struct snd_soc_codec;
621
594struct wm8350_codec { 622struct wm8350_codec {
595 struct platform_device *pdev; 623 struct platform_device *pdev;
624 struct snd_soc_codec *codec;
625 struct wm8350_audio_platform_data *platform_data;
596}; 626};
597 627
598#endif 628#endif
diff --git a/include/linux/mfd/wm8350/rtc.h b/include/linux/mfd/wm8350/rtc.h
index dfda69e9f440..24add2bef6c9 100644
--- a/include/linux/mfd/wm8350/rtc.h
+++ b/include/linux/mfd/wm8350/rtc.h
@@ -261,6 +261,8 @@
261 261
262struct wm8350_rtc { 262struct wm8350_rtc {
263 struct platform_device *pdev; 263 struct platform_device *pdev;
264 struct rtc_device *rtc;
265 int alarm_enabled; /* used over suspend/resume */
264}; 266};
265 267
266#endif 268#endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 03aea612d284..3f34005068d4 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -7,7 +7,6 @@
7typedef struct page *new_page_t(struct page *, unsigned long private, int **); 7typedef struct page *new_page_t(struct page *, unsigned long private, int **);
8 8
9#ifdef CONFIG_MIGRATION 9#ifdef CONFIG_MIGRATION
10extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
11extern int putback_lru_pages(struct list_head *l); 10extern int putback_lru_pages(struct list_head *l);
12extern int migrate_page(struct address_space *, 11extern int migrate_page(struct address_space *,
13 struct page *, struct page *); 12 struct page *, struct page *);
@@ -21,8 +20,6 @@ extern int migrate_vmas(struct mm_struct *mm,
21 const nodemask_t *from, const nodemask_t *to, 20 const nodemask_t *from, const nodemask_t *to,
22 unsigned long flags); 21 unsigned long flags);
23#else 22#else
24static inline int isolate_lru_page(struct page *p, struct list_head *list)
25 { return -ENOSYS; }
26static inline int putback_lru_pages(struct list_head *l) { return 0; } 23static inline int putback_lru_pages(struct list_head *l) { return 0; }
27static inline int migrate_pages(struct list_head *l, new_page_t x, 24static inline int migrate_pages(struct list_head *l, new_page_t x,
28 unsigned long private) { return -ENOSYS; } 25 unsigned long private) { return -ENOSYS; }
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 151b7e0182c7..ad748588faf1 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -135,6 +135,10 @@
135#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */ 135#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */
136#define LPA_1000HALF 0x0400 /* Link partner 1000BASE-T half duplex */ 136#define LPA_1000HALF 0x0400 /* Link partner 1000BASE-T half duplex */
137 137
138/* Flow control flags */
139#define FLOW_CTRL_TX 0x01
140#define FLOW_CTRL_RX 0x02
141
138/* This structure is used in all SIOCxMIIxxx ioctl calls */ 142/* This structure is used in all SIOCxMIIxxx ioctl calls */
139struct mii_ioctl_data { 143struct mii_ioctl_data {
140 __u16 phy_id; 144 __u16 phy_id;
@@ -235,5 +239,34 @@ static inline unsigned int mii_duplex (unsigned int duplex_lock,
235 return 0; 239 return 0;
236} 240}
237 241
242/**
243 * mii_resolve_flowctrl_fdx
244 * @lcladv: value of MII ADVERTISE register
245 * @rmtadv: value of MII LPA register
246 *
247 * Resolve full duplex flow control as per IEEE 802.3-2005 table 28B-3
248 */
249static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv)
250{
251 u8 cap = 0;
252
253 if (lcladv & ADVERTISE_PAUSE_CAP) {
254 if (lcladv & ADVERTISE_PAUSE_ASYM) {
255 if (rmtadv & LPA_PAUSE_CAP)
256 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
257 else if (rmtadv & LPA_PAUSE_ASYM)
258 cap = FLOW_CTRL_RX;
259 } else {
260 if (rmtadv & LPA_PAUSE_CAP)
261 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
262 }
263 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
264 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
265 cap = FLOW_CTRL_TX;
266 }
267
268 return cap;
269}
270
238#endif /* __KERNEL__ */ 271#endif /* __KERNEL__ */
239#endif /* __LINUX_MII_H__ */ 272#endif /* __LINUX_MII_H__ */
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 77323a72dd3c..cf9c679ab38b 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -132,6 +132,15 @@ enum {
132 MLX4_MAILBOX_SIZE = 4096 132 MLX4_MAILBOX_SIZE = 4096
133}; 133};
134 134
135enum {
136 /* set port opcode modifiers */
137 MLX4_SET_PORT_GENERAL = 0x0,
138 MLX4_SET_PORT_RQP_CALC = 0x1,
139 MLX4_SET_PORT_MAC_TABLE = 0x2,
140 MLX4_SET_PORT_VLAN_TABLE = 0x3,
141 MLX4_SET_PORT_PRIO_MAP = 0x4,
142};
143
135struct mlx4_dev; 144struct mlx4_dev;
136 145
137struct mlx4_cmd_mailbox { 146struct mlx4_cmd_mailbox {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b2f944468313..8f659cc29960 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -60,6 +60,7 @@ enum {
60 MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7, 60 MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7,
61 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, 61 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8,
62 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, 62 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9,
63 MLX4_DEV_CAP_FLAG_DPDP = 1 << 12,
63 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, 64 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16,
64 MLX4_DEV_CAP_FLAG_APM = 1 << 17, 65 MLX4_DEV_CAP_FLAG_APM = 1 << 17,
65 MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, 66 MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18,
@@ -145,6 +146,29 @@ enum {
145 MLX4_MTT_FLAG_PRESENT = 1 146 MLX4_MTT_FLAG_PRESENT = 1
146}; 147};
147 148
149enum mlx4_qp_region {
150 MLX4_QP_REGION_FW = 0,
151 MLX4_QP_REGION_ETH_ADDR,
152 MLX4_QP_REGION_FC_ADDR,
153 MLX4_QP_REGION_FC_EXCH,
154 MLX4_NUM_QP_REGION
155};
156
157enum mlx4_port_type {
158 MLX4_PORT_TYPE_IB = 1 << 0,
159 MLX4_PORT_TYPE_ETH = 1 << 1,
160};
161
162enum mlx4_special_vlan_idx {
163 MLX4_NO_VLAN_IDX = 0,
164 MLX4_VLAN_MISS_IDX,
165 MLX4_VLAN_REGULAR
166};
167
168enum {
169 MLX4_NUM_FEXCH = 64 * 1024,
170};
171
148static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor) 172static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
149{ 173{
150 return (major << 32) | (minor << 16) | subminor; 174 return (major << 32) | (minor << 16) | subminor;
@@ -154,7 +178,10 @@ struct mlx4_caps {
154 u64 fw_ver; 178 u64 fw_ver;
155 int num_ports; 179 int num_ports;
156 int vl_cap[MLX4_MAX_PORTS + 1]; 180 int vl_cap[MLX4_MAX_PORTS + 1];
157 int mtu_cap[MLX4_MAX_PORTS + 1]; 181 int ib_mtu_cap[MLX4_MAX_PORTS + 1];
182 __be32 ib_port_def_cap[MLX4_MAX_PORTS + 1];
183 u64 def_mac[MLX4_MAX_PORTS + 1];
184 int eth_mtu_cap[MLX4_MAX_PORTS + 1];
158 int gid_table_len[MLX4_MAX_PORTS + 1]; 185 int gid_table_len[MLX4_MAX_PORTS + 1];
159 int pkey_table_len[MLX4_MAX_PORTS + 1]; 186 int pkey_table_len[MLX4_MAX_PORTS + 1];
160 int local_ca_ack_delay; 187 int local_ca_ack_delay;
@@ -169,7 +196,6 @@ struct mlx4_caps {
169 int max_rq_desc_sz; 196 int max_rq_desc_sz;
170 int max_qp_init_rdma; 197 int max_qp_init_rdma;
171 int max_qp_dest_rdma; 198 int max_qp_dest_rdma;
172 int reserved_qps;
173 int sqp_start; 199 int sqp_start;
174 int num_srqs; 200 int num_srqs;
175 int max_srq_wqes; 201 int max_srq_wqes;
@@ -180,6 +206,7 @@ struct mlx4_caps {
180 int reserved_cqs; 206 int reserved_cqs;
181 int num_eqs; 207 int num_eqs;
182 int reserved_eqs; 208 int reserved_eqs;
209 int num_comp_vectors;
183 int num_mpts; 210 int num_mpts;
184 int num_mtt_segs; 211 int num_mtt_segs;
185 int fmr_reserved_mtts; 212 int fmr_reserved_mtts;
@@ -201,6 +228,15 @@ struct mlx4_caps {
201 u16 stat_rate_support; 228 u16 stat_rate_support;
202 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 229 u8 port_width_cap[MLX4_MAX_PORTS + 1];
203 int max_gso_sz; 230 int max_gso_sz;
231 int reserved_qps_cnt[MLX4_NUM_QP_REGION];
232 int reserved_qps;
233 int reserved_qps_base[MLX4_NUM_QP_REGION];
234 int log_num_macs;
235 int log_num_vlans;
236 int log_num_prios;
237 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
238 u8 supported_type[MLX4_MAX_PORTS + 1];
239 u32 port_mask;
204}; 240};
205 241
206struct mlx4_buf_list { 242struct mlx4_buf_list {
@@ -293,6 +329,7 @@ struct mlx4_cq {
293 int arm_sn; 329 int arm_sn;
294 330
295 int cqn; 331 int cqn;
332 unsigned vector;
296 333
297 atomic_t refcount; 334 atomic_t refcount;
298 struct completion free; 335 struct completion free;
@@ -355,6 +392,11 @@ struct mlx4_init_port_param {
355 u64 si_guid; 392 u64 si_guid;
356}; 393};
357 394
395#define mlx4_foreach_port(port, dev, type) \
396 for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
397 if (((type) == MLX4_PORT_TYPE_IB ? (dev)->caps.port_mask : \
398 ~(dev)->caps.port_mask) & 1 << ((port) - 1))
399
358int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, 400int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
359 struct mlx4_buf *buf); 401 struct mlx4_buf *buf);
360void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); 402void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
@@ -397,10 +439,13 @@ void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
397 439
398int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 440int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
399 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, 441 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
400 int collapsed); 442 unsigned vector, int collapsed);
401void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq); 443void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
402 444
403int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp); 445int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base);
446void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
447
448int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
404void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp); 449void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
405 450
406int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, 451int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt,
@@ -416,6 +461,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
416 int block_mcast_loopback); 461 int block_mcast_loopback);
417int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]); 462int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16]);
418 463
464int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index);
465void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index);
466
467int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
468void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
469
419int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list, 470int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list,
420 int npages, u64 iova, u32 *lkey, u32 *rkey); 471 int npages, u64 iova, u32 *lkey, u32 *rkey);
421int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, 472int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index c61ba10768ea..aaa8b843be28 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -132,6 +132,11 @@ extern unsigned int kobjsize(const void *objp);
132#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ) 132#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
133 133
134/* 134/*
135 * special vmas that are non-mergable, non-mlock()able
136 */
137#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
138
139/*
135 * mapping from the currently active vm_flags protection bits (the 140 * mapping from the currently active vm_flags protection bits (the
136 * low four bits) to a page protection mask.. 141 * low four bits) to a page protection mask..
137 */ 142 */
@@ -140,6 +145,23 @@ extern pgprot_t protection_map[16];
140#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 145#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
141#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 146#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
142 147
148/*
149 * This interface is used by x86 PAT code to identify a pfn mapping that is
150 * linear over entire vma. This is to optimize PAT code that deals with
151 * marking the physical region with a particular prot. This is not for generic
152 * mm use. Note also that this check will not work if the pfn mapping is
153 * linear for a vma starting at physical address 0. In which case PAT code
154 * falls back to slow path of reserving physical range page by page.
155 */
156static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
157{
158 return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff);
159}
160
161static inline int is_pfn_mapping(struct vm_area_struct *vma)
162{
163 return (vma->vm_flags & VM_PFNMAP);
164}
143 165
144/* 166/*
145 * vm_fault is filled by the the pagefault handler and passed to the vma's 167 * vm_fault is filled by the the pagefault handler and passed to the vma's
@@ -700,10 +722,10 @@ static inline int page_mapped(struct page *page)
700extern void show_free_areas(void); 722extern void show_free_areas(void);
701 723
702#ifdef CONFIG_SHMEM 724#ifdef CONFIG_SHMEM
703int shmem_lock(struct file *file, int lock, struct user_struct *user); 725extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
704#else 726#else
705static inline int shmem_lock(struct file *file, int lock, 727static inline int shmem_lock(struct file *file, int lock,
706 struct user_struct *user) 728 struct user_struct *user)
707{ 729{
708 return 0; 730 return 0;
709} 731}
@@ -776,6 +798,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
776 struct vm_area_struct *vma); 798 struct vm_area_struct *vma);
777void unmap_mapping_range(struct address_space *mapping, 799void unmap_mapping_range(struct address_space *mapping,
778 loff_t const holebegin, loff_t const holelen, int even_cows); 800 loff_t const holebegin, loff_t const holelen, int even_cows);
801int follow_phys(struct vm_area_struct *vma, unsigned long address,
802 unsigned int flags, unsigned long *prot, resource_size_t *phys);
779int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, 803int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
780 void *buf, int len, int write); 804 void *buf, int len, int write);
781 805
@@ -1281,5 +1305,7 @@ int vmemmap_populate_basepages(struct page *start_page,
1281int vmemmap_populate(struct page *start_page, unsigned long pages, int node); 1305int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
1282void vmemmap_populate_print_last(void); 1306void vmemmap_populate_print_last(void);
1283 1307
1308extern void *alloc_locked_buffer(size_t size);
1309extern void free_locked_buffer(void *buffer, size_t size);
1284#endif /* __KERNEL__ */ 1310#endif /* __KERNEL__ */
1285#endif /* _LINUX_MM_H */ 1311#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 895bc4e93039..c948350c378e 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -1,40 +1,100 @@
1static inline void 1#ifndef LINUX_MM_INLINE_H
2add_page_to_active_list(struct zone *zone, struct page *page) 2#define LINUX_MM_INLINE_H
3{
4 list_add(&page->lru, &zone->active_list);
5 __inc_zone_state(zone, NR_ACTIVE);
6}
7 3
8static inline void 4/**
9add_page_to_inactive_list(struct zone *zone, struct page *page) 5 * page_is_file_cache - should the page be on a file LRU or anon LRU?
6 * @page: the page to test
7 *
8 * Returns LRU_FILE if @page is page cache page backed by a regular filesystem,
9 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
10 * Used by functions that manipulate the LRU lists, to sort a page
11 * onto the right LRU list.
12 *
13 * We would like to get this info without a page flag, but the state
14 * needs to survive until the page is last deleted from the LRU, which
15 * could be as far down as __page_cache_release.
16 */
17static inline int page_is_file_cache(struct page *page)
10{ 18{
11 list_add(&page->lru, &zone->inactive_list); 19 if (PageSwapBacked(page))
12 __inc_zone_state(zone, NR_INACTIVE); 20 return 0;
21
22 /* The page is page cache backed by a normal filesystem. */
23 return LRU_FILE;
13} 24}
14 25
15static inline void 26static inline void
16del_page_from_active_list(struct zone *zone, struct page *page) 27add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
17{ 28{
18 list_del(&page->lru); 29 list_add(&page->lru, &zone->lru[l].list);
19 __dec_zone_state(zone, NR_ACTIVE); 30 __inc_zone_state(zone, NR_LRU_BASE + l);
20} 31}
21 32
22static inline void 33static inline void
23del_page_from_inactive_list(struct zone *zone, struct page *page) 34del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
24{ 35{
25 list_del(&page->lru); 36 list_del(&page->lru);
26 __dec_zone_state(zone, NR_INACTIVE); 37 __dec_zone_state(zone, NR_LRU_BASE + l);
27} 38}
28 39
29static inline void 40static inline void
30del_page_from_lru(struct zone *zone, struct page *page) 41del_page_from_lru(struct zone *zone, struct page *page)
31{ 42{
43 enum lru_list l = LRU_BASE;
44
32 list_del(&page->lru); 45 list_del(&page->lru);
33 if (PageActive(page)) { 46 if (PageUnevictable(page)) {
34 __ClearPageActive(page); 47 __ClearPageUnevictable(page);
35 __dec_zone_state(zone, NR_ACTIVE); 48 l = LRU_UNEVICTABLE;
36 } else { 49 } else {
37 __dec_zone_state(zone, NR_INACTIVE); 50 if (PageActive(page)) {
51 __ClearPageActive(page);
52 l += LRU_ACTIVE;
53 }
54 l += page_is_file_cache(page);
55 }
56 __dec_zone_state(zone, NR_LRU_BASE + l);
57}
58
59/**
60 * page_lru - which LRU list should a page be on?
61 * @page: the page to test
62 *
63 * Returns the LRU list a page should be on, as an index
64 * into the array of LRU lists.
65 */
66static inline enum lru_list page_lru(struct page *page)
67{
68 enum lru_list lru = LRU_BASE;
69
70 if (PageUnevictable(page))
71 lru = LRU_UNEVICTABLE;
72 else {
73 if (PageActive(page))
74 lru += LRU_ACTIVE;
75 lru += page_is_file_cache(page);
38 } 76 }
77
78 return lru;
39} 79}
40 80
81/**
82 * inactive_anon_is_low - check if anonymous pages need to be deactivated
83 * @zone: zone to check
84 *
85 * Returns true if the zone does not have enough inactive anon pages,
86 * meaning some active anon pages need to be deactivated.
87 */
88static inline int inactive_anon_is_low(struct zone *zone)
89{
90 unsigned long active, inactive;
91
92 active = zone_page_state(zone, NR_ACTIVE_ANON);
93 inactive = zone_page_state(zone, NR_INACTIVE_ANON);
94
95 if (inactive * zone->inactive_ratio < active)
96 return 1;
97
98 return 0;
99}
100#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 9d49fa36bbef..9cfc9b627fdd 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -94,9 +94,6 @@ struct page {
94 void *virtual; /* Kernel virtual address (NULL if 94 void *virtual; /* Kernel virtual address (NULL if
95 not kmapped, ie. highmem) */ 95 not kmapped, ie. highmem) */
96#endif /* WANT_PAGE_VIRTUAL */ 96#endif /* WANT_PAGE_VIRTUAL */
97#ifdef CONFIG_CGROUP_MEM_RES_CTLR
98 unsigned long page_cgroup;
99#endif
100}; 97};
101 98
102/* 99/*
@@ -235,8 +232,9 @@ struct mm_struct {
235 struct core_state *core_state; /* coredumping support */ 232 struct core_state *core_state; /* coredumping support */
236 233
237 /* aio bits */ 234 /* aio bits */
238 rwlock_t ioctx_list_lock; /* aio lock */ 235 spinlock_t ioctx_lock;
239 struct kioctx *ioctx_list; 236 struct hlist_head ioctx_list;
237
240#ifdef CONFIG_MM_OWNER 238#ifdef CONFIG_MM_OWNER
241 /* 239 /*
242 * "owner" points to a task that is regarded as the canonical 240 * "owner" points to a task that is regarded as the canonical
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index ee6e822d5994..403aa505f27e 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -130,7 +130,7 @@ struct mmc_card {
130#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR) 130#define mmc_card_set_blockaddr(c) ((c)->state |= MMC_STATE_BLOCKADDR)
131 131
132#define mmc_card_name(c) ((c)->cid.prod_name) 132#define mmc_card_name(c) ((c)->cid.prod_name)
133#define mmc_card_id(c) ((c)->dev.bus_id) 133#define mmc_card_id(c) (dev_name(&(c)->dev))
134 134
135#define mmc_list_to_card(l) container_of(l, struct mmc_card, node) 135#define mmc_list_to_card(l) container_of(l, struct mmc_card, node)
136#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev) 136#define mmc_get_drvdata(c) dev_get_drvdata(&(c)->dev)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index bde891f64591..f842f234e44f 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -176,7 +176,7 @@ static inline void *mmc_priv(struct mmc_host *host)
176 176
177#define mmc_dev(x) ((x)->parent) 177#define mmc_dev(x) ((x)->parent)
178#define mmc_classdev(x) (&(x)->class_dev) 178#define mmc_classdev(x) (&(x)->class_dev)
179#define mmc_hostname(x) ((x)->class_dev.bus_id) 179#define mmc_hostname(x) (dev_name(&(x)->class_dev))
180 180
181extern int mmc_suspend_host(struct mmc_host *, pm_message_t); 181extern int mmc_suspend_host(struct mmc_host *, pm_message_t);
182extern int mmc_resume_host(struct mmc_host *); 182extern int mmc_resume_host(struct mmc_host *);
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 07bee4a0d457..451bdfc85830 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -63,7 +63,7 @@ struct sdio_func {
63 63
64#define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT) 64#define sdio_func_set_present(f) ((f)->state |= SDIO_STATE_PRESENT)
65 65
66#define sdio_func_id(f) ((f)->dev.bus_id) 66#define sdio_func_id(f) (dev_name(&(f)->dev))
67 67
68#define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev) 68#define sdio_get_drvdata(f) dev_get_drvdata(&(f)->dev)
69#define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d) 69#define sdio_set_drvdata(f,d) dev_set_drvdata(&(f)->dev, d)
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index 61d19e1b7a0b..139d7c88d9c9 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -34,11 +34,15 @@ extern void unregister_kmmio_probe(struct kmmio_probe *p);
34/* Called from page fault handler. */ 34/* Called from page fault handler. */
35extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); 35extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
36 36
37/* Called from ioremap.c */
38#ifdef CONFIG_MMIOTRACE 37#ifdef CONFIG_MMIOTRACE
38/* Called from ioremap.c */
39extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, 39extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
40 void __iomem *addr); 40 void __iomem *addr);
41extern void mmiotrace_iounmap(volatile void __iomem *addr); 41extern void mmiotrace_iounmap(volatile void __iomem *addr);
42
43/* For anyone to insert markers. Remember trailing newline. */
44extern int mmiotrace_printk(const char *fmt, ...)
45 __attribute__ ((format (printf, 1, 2)));
42#else 46#else
43static inline void mmiotrace_ioremap(resource_size_t offset, 47static inline void mmiotrace_ioremap(resource_size_t offset,
44 unsigned long size, void __iomem *addr) 48 unsigned long size, void __iomem *addr)
@@ -48,15 +52,22 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
48static inline void mmiotrace_iounmap(volatile void __iomem *addr) 52static inline void mmiotrace_iounmap(volatile void __iomem *addr)
49{ 53{
50} 54}
51#endif /* CONFIG_MMIOTRACE_HOOKS */ 55
56static inline int mmiotrace_printk(const char *fmt, ...)
57 __attribute__ ((format (printf, 1, 0)));
58
59static inline int mmiotrace_printk(const char *fmt, ...)
60{
61 return 0;
62}
63#endif /* CONFIG_MMIOTRACE */
52 64
53enum mm_io_opcode { 65enum mm_io_opcode {
54 MMIO_READ = 0x1, /* struct mmiotrace_rw */ 66 MMIO_READ = 0x1, /* struct mmiotrace_rw */
55 MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ 67 MMIO_WRITE = 0x2, /* struct mmiotrace_rw */
56 MMIO_PROBE = 0x3, /* struct mmiotrace_map */ 68 MMIO_PROBE = 0x3, /* struct mmiotrace_map */
57 MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ 69 MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */
58 MMIO_MARKER = 0x5, /* raw char data */ 70 MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
59 MMIO_UNKNOWN_OP = 0x6, /* struct mmiotrace_rw */
60}; 71};
61 72
62struct mmiotrace_rw { 73struct mmiotrace_rw {
@@ -81,5 +92,6 @@ extern void enable_mmiotrace(void);
81extern void disable_mmiotrace(void); 92extern void disable_mmiotrace(void);
82extern void mmio_trace_rw(struct mmiotrace_rw *rw); 93extern void mmio_trace_rw(struct mmiotrace_rw *rw);
83extern void mmio_trace_mapping(struct mmiotrace_map *map); 94extern void mmio_trace_mapping(struct mmiotrace_map *map);
95extern int mmio_trace_printk(const char *fmt, va_list args);
84 96
85#endif /* MMIOTRACE_H */ 97#endif /* MMIOTRACE_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 428328a05fa1..35a7b5e19465 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -81,21 +81,31 @@ struct zone_padding {
81enum zone_stat_item { 81enum zone_stat_item {
82 /* First 128 byte cacheline (assuming 64 bit words) */ 82 /* First 128 byte cacheline (assuming 64 bit words) */
83 NR_FREE_PAGES, 83 NR_FREE_PAGES,
84 NR_INACTIVE, 84 NR_LRU_BASE,
85 NR_ACTIVE, 85 NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
86 NR_ACTIVE_ANON, /* " " " " " */
87 NR_INACTIVE_FILE, /* " " " " " */
88 NR_ACTIVE_FILE, /* " " " " " */
89#ifdef CONFIG_UNEVICTABLE_LRU
90 NR_UNEVICTABLE, /* " " " " " */
91 NR_MLOCK, /* mlock()ed pages found and moved off LRU */
92#else
93 NR_UNEVICTABLE = NR_ACTIVE_FILE, /* avoid compiler errors in dead code */
94 NR_MLOCK = NR_ACTIVE_FILE,
95#endif
86 NR_ANON_PAGES, /* Mapped anonymous pages */ 96 NR_ANON_PAGES, /* Mapped anonymous pages */
87 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 97 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
88 only modified from process context */ 98 only modified from process context */
89 NR_FILE_PAGES, 99 NR_FILE_PAGES,
90 NR_FILE_DIRTY, 100 NR_FILE_DIRTY,
91 NR_WRITEBACK, 101 NR_WRITEBACK,
92 /* Second 128 byte cacheline */
93 NR_SLAB_RECLAIMABLE, 102 NR_SLAB_RECLAIMABLE,
94 NR_SLAB_UNRECLAIMABLE, 103 NR_SLAB_UNRECLAIMABLE,
95 NR_PAGETABLE, /* used for pagetables */ 104 NR_PAGETABLE, /* used for pagetables */
96 NR_UNSTABLE_NFS, /* NFS unstable pages */ 105 NR_UNSTABLE_NFS, /* NFS unstable pages */
97 NR_BOUNCE, 106 NR_BOUNCE,
98 NR_VMSCAN_WRITE, 107 NR_VMSCAN_WRITE,
108 /* Second 128 byte cacheline */
99 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ 109 NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
100#ifdef CONFIG_NUMA 110#ifdef CONFIG_NUMA
101 NUMA_HIT, /* allocated in intended node */ 111 NUMA_HIT, /* allocated in intended node */
@@ -107,6 +117,55 @@ enum zone_stat_item {
107#endif 117#endif
108 NR_VM_ZONE_STAT_ITEMS }; 118 NR_VM_ZONE_STAT_ITEMS };
109 119
120/*
121 * We do arithmetic on the LRU lists in various places in the code,
122 * so it is important to keep the active lists LRU_ACTIVE higher in
123 * the array than the corresponding inactive lists, and to keep
124 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
125 *
126 * This has to be kept in sync with the statistics in zone_stat_item
127 * above and the descriptions in vmstat_text in mm/vmstat.c
128 */
129#define LRU_BASE 0
130#define LRU_ACTIVE 1
131#define LRU_FILE 2
132
133enum lru_list {
134 LRU_INACTIVE_ANON = LRU_BASE,
135 LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
136 LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
137 LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
138#ifdef CONFIG_UNEVICTABLE_LRU
139 LRU_UNEVICTABLE,
140#else
141 LRU_UNEVICTABLE = LRU_ACTIVE_FILE, /* avoid compiler errors in dead code */
142#endif
143 NR_LRU_LISTS
144};
145
146#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)
147
148#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)
149
150static inline int is_file_lru(enum lru_list l)
151{
152 return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
153}
154
155static inline int is_active_lru(enum lru_list l)
156{
157 return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
158}
159
160static inline int is_unevictable_lru(enum lru_list l)
161{
162#ifdef CONFIG_UNEVICTABLE_LRU
163 return (l == LRU_UNEVICTABLE);
164#else
165 return 0;
166#endif
167}
168
110struct per_cpu_pages { 169struct per_cpu_pages {
111 int count; /* number of pages in the list */ 170 int count; /* number of pages in the list */
112 int high; /* high watermark, emptying needed */ 171 int high; /* high watermark, emptying needed */
@@ -251,10 +310,22 @@ struct zone {
251 310
252 /* Fields commonly accessed by the page reclaim scanner */ 311 /* Fields commonly accessed by the page reclaim scanner */
253 spinlock_t lru_lock; 312 spinlock_t lru_lock;
254 struct list_head active_list; 313 struct {
255 struct list_head inactive_list; 314 struct list_head list;
256 unsigned long nr_scan_active; 315 unsigned long nr_scan;
257 unsigned long nr_scan_inactive; 316 } lru[NR_LRU_LISTS];
317
318 /*
319 * The pageout code in vmscan.c keeps track of how many of the
320 * mem/swap backed and file backed pages are refeferenced.
321 * The higher the rotated/scanned ratio, the more valuable
322 * that cache is.
323 *
324 * The anon LRU stats live in [0], file LRU stats in [1]
325 */
326 unsigned long recent_rotated[2];
327 unsigned long recent_scanned[2];
328
258 unsigned long pages_scanned; /* since last reclaim */ 329 unsigned long pages_scanned; /* since last reclaim */
259 unsigned long flags; /* zone flags, see below */ 330 unsigned long flags; /* zone flags, see below */
260 331
@@ -276,6 +347,12 @@ struct zone {
276 */ 347 */
277 int prev_priority; 348 int prev_priority;
278 349
350 /*
351 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
352 * this zone's LRU. Maintained by the pageout code.
353 */
354 unsigned int inactive_ratio;
355
279 356
280 ZONE_PADDING(_pad2_) 357 ZONE_PADDING(_pad2_)
281 /* Rarely used or read-mostly fields */ 358 /* Rarely used or read-mostly fields */
@@ -524,8 +601,11 @@ typedef struct pglist_data {
524 struct zone node_zones[MAX_NR_ZONES]; 601 struct zone node_zones[MAX_NR_ZONES];
525 struct zonelist node_zonelists[MAX_ZONELISTS]; 602 struct zonelist node_zonelists[MAX_ZONELISTS];
526 int nr_zones; 603 int nr_zones;
527#ifdef CONFIG_FLAT_NODE_MEM_MAP 604#ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */
528 struct page *node_mem_map; 605 struct page *node_mem_map;
606#ifdef CONFIG_CGROUP_MEM_RES_CTLR
607 struct page_cgroup *node_page_cgroup;
608#endif
529#endif 609#endif
530 struct bootmem_data *bdata; 610 struct bootmem_data *bdata;
531#ifdef CONFIG_MEMORY_HOTPLUG 611#ifdef CONFIG_MEMORY_HOTPLUG
@@ -854,6 +934,7 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn)
854#endif 934#endif
855 935
856struct page; 936struct page;
937struct page_cgroup;
857struct mem_section { 938struct mem_section {
858 /* 939 /*
859 * This is, logically, a pointer to an array of struct 940 * This is, logically, a pointer to an array of struct
@@ -871,6 +952,14 @@ struct mem_section {
871 952
872 /* See declaration of similar field in struct zone */ 953 /* See declaration of similar field in struct zone */
873 unsigned long *pageblock_flags; 954 unsigned long *pageblock_flags;
955#ifdef CONFIG_CGROUP_MEM_RES_CTLR
956 /*
957 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
958 * section. (see memcontrol.h/page_cgroup.h about this.)
959 */
960 struct page_cgroup *page_cgroup;
961 unsigned long pad;
962#endif
874}; 963};
875 964
876#ifdef CONFIG_SPARSEMEM_EXTREME 965#ifdef CONFIG_SPARSEMEM_EXTREME
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index d6a3f47e95cb..97b91d1abb43 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -135,6 +135,7 @@ struct usb_device_id {
135 135
136struct hid_device_id { 136struct hid_device_id {
137 __u16 bus; 137 __u16 bus;
138 __u16 pad1;
138 __u32 vendor; 139 __u32 vendor;
139 __u32 product; 140 __u32 product;
140 kernel_ulong_t driver_data 141 kernel_ulong_t driver_data
@@ -284,7 +285,7 @@ struct pcmcia_device_id {
284/* Input */ 285/* Input */
285#define INPUT_DEVICE_ID_EV_MAX 0x1f 286#define INPUT_DEVICE_ID_EV_MAX 0x1f
286#define INPUT_DEVICE_ID_KEY_MIN_INTERESTING 0x71 287#define INPUT_DEVICE_ID_KEY_MIN_INTERESTING 0x71
287#define INPUT_DEVICE_ID_KEY_MAX 0x1ff 288#define INPUT_DEVICE_ID_KEY_MAX 0x2ff
288#define INPUT_DEVICE_ID_REL_MAX 0x0f 289#define INPUT_DEVICE_ID_REL_MAX 0x0f
289#define INPUT_DEVICE_ID_ABS_MAX 0x3f 290#define INPUT_DEVICE_ID_ABS_MAX 0x3f
290#define INPUT_DEVICE_ID_MSC_MAX 0x07 291#define INPUT_DEVICE_ID_MSC_MAX 0x07
diff --git a/include/linux/module.h b/include/linux/module.h
index 68e09557c951..3bfed013350b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -16,6 +16,7 @@
16#include <linux/kobject.h> 16#include <linux/kobject.h>
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <linux/marker.h> 18#include <linux/marker.h>
19#include <linux/tracepoint.h>
19#include <asm/local.h> 20#include <asm/local.h>
20 21
21#include <asm/module.h> 22#include <asm/module.h>
@@ -28,7 +29,7 @@
28#define MODULE_SYMBOL_PREFIX "" 29#define MODULE_SYMBOL_PREFIX ""
29#endif 30#endif
30 31
31#define MODULE_NAME_LEN (64 - sizeof(unsigned long)) 32#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN
32 33
33struct kernel_symbol 34struct kernel_symbol
34{ 35{
@@ -59,6 +60,7 @@ struct module_kobject
59 struct kobject kobj; 60 struct kobject kobj;
60 struct module *mod; 61 struct module *mod;
61 struct kobject *drivers_dir; 62 struct kobject *drivers_dir;
63 struct module_param_attrs *mp;
62}; 64};
63 65
64/* These are either module local, or the kernel's dummy ones. */ 66/* These are either module local, or the kernel's dummy ones. */
@@ -241,7 +243,6 @@ struct module
241 243
242 /* Sysfs stuff. */ 244 /* Sysfs stuff. */
243 struct module_kobject mkobj; 245 struct module_kobject mkobj;
244 struct module_param_attrs *param_attrs;
245 struct module_attribute *modinfo_attrs; 246 struct module_attribute *modinfo_attrs;
246 const char *version; 247 const char *version;
247 const char *srcversion; 248 const char *srcversion;
@@ -276,7 +277,7 @@ struct module
276 277
277 /* Exception table */ 278 /* Exception table */
278 unsigned int num_exentries; 279 unsigned int num_exentries;
279 const struct exception_table_entry *extable; 280 struct exception_table_entry *extable;
280 281
281 /* Startup function. */ 282 /* Startup function. */
282 int (*init)(void); 283 int (*init)(void);
@@ -331,6 +332,10 @@ struct module
331 struct marker *markers; 332 struct marker *markers;
332 unsigned int num_markers; 333 unsigned int num_markers;
333#endif 334#endif
335#ifdef CONFIG_TRACEPOINTS
336 struct tracepoint *tracepoints;
337 unsigned int num_tracepoints;
338#endif
334 339
335#ifdef CONFIG_MODULE_UNLOAD 340#ifdef CONFIG_MODULE_UNLOAD
336 /* What modules depend on me? */ 341 /* What modules depend on me? */
@@ -345,7 +350,6 @@ struct module
345 /* Reference counts */ 350 /* Reference counts */
346 struct module_ref ref[NR_CPUS]; 351 struct module_ref ref[NR_CPUS];
347#endif 352#endif
348
349}; 353};
350#ifndef MODULE_ARCH_INIT 354#ifndef MODULE_ARCH_INIT
351#define MODULE_ARCH_INIT {} 355#define MODULE_ARCH_INIT {}
@@ -454,6 +458,9 @@ extern void print_modules(void);
454 458
455extern void module_update_markers(void); 459extern void module_update_markers(void);
456 460
461extern void module_update_tracepoints(void);
462extern int module_get_iter_tracepoints(struct tracepoint_iter *iter);
463
457#else /* !CONFIG_MODULES... */ 464#else /* !CONFIG_MODULES... */
458#define EXPORT_SYMBOL(sym) 465#define EXPORT_SYMBOL(sym)
459#define EXPORT_SYMBOL_GPL(sym) 466#define EXPORT_SYMBOL_GPL(sym)
@@ -558,6 +565,15 @@ static inline void module_update_markers(void)
558{ 565{
559} 566}
560 567
568static inline void module_update_tracepoints(void)
569{
570}
571
572static inline int module_get_iter_tracepoints(struct tracepoint_iter *iter)
573{
574 return 0;
575}
576
561#endif /* CONFIG_MODULES */ 577#endif /* CONFIG_MODULES */
562 578
563struct device_driver; 579struct device_driver;
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index ec624381c844..e4af3399ef48 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -13,6 +13,9 @@
13#define MODULE_PARAM_PREFIX KBUILD_MODNAME "." 13#define MODULE_PARAM_PREFIX KBUILD_MODNAME "."
14#endif 14#endif
15 15
16/* Chosen so that structs with an unsigned long line up. */
17#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long))
18
16#ifdef MODULE 19#ifdef MODULE
17#define ___module_cat(a,b) __mod_ ## a ## b 20#define ___module_cat(a,b) __mod_ ## a ## b
18#define __module_cat(a,b) ___module_cat(a,b) 21#define __module_cat(a,b) ___module_cat(a,b)
@@ -79,7 +82,8 @@ struct kparam_array
79#define __module_param_call(prefix, name, set, get, arg, perm) \ 82#define __module_param_call(prefix, name, set, get, arg, perm) \
80 /* Default value instead of permissions? */ \ 83 /* Default value instead of permissions? */ \
81 static int __param_perm_check_##name __attribute__((unused)) = \ 84 static int __param_perm_check_##name __attribute__((unused)) = \
82 BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)); \ 85 BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \
86 + BUILD_BUG_ON_ZERO(sizeof(""prefix) > MAX_PARAM_PREFIX_LEN); \
83 static const char __param_str_##name[] = prefix #name; \ 87 static const char __param_str_##name[] = prefix #name; \
84 static struct kernel_param __moduleparam_const __param_##name \ 88 static struct kernel_param __moduleparam_const __param_##name \
85 __used \ 89 __used \
@@ -100,6 +104,25 @@ struct kparam_array
100#define module_param(name, type, perm) \ 104#define module_param(name, type, perm) \
101 module_param_named(name, name, type, perm) 105 module_param_named(name, name, type, perm)
102 106
107#ifndef MODULE
108/**
109 * core_param - define a historical core kernel parameter.
110 * @name: the name of the cmdline and sysfs parameter (often the same as var)
111 * @var: the variable
112 * @type: the type (for param_set_##type and param_get_##type)
113 * @perm: visibility in sysfs
114 *
115 * core_param is just like module_param(), but cannot be modular and
116 * doesn't add a prefix (such as "printk."). This is for compatibility
117 * with __setup(), and it makes sense as truly core parameters aren't
118 * tied to the particular file they're in.
119 */
120#define core_param(name, var, type, perm) \
121 param_check_##type(name, &(var)); \
122 __module_param_call("", name, param_set_##type, param_get_##type, \
123 &var, perm)
124#endif /* !MODULE */
125
103/* Actually copy string: maxlen param is usually sizeof(string). */ 126/* Actually copy string: maxlen param is usually sizeof(string). */
104#define module_param_string(name, string, len, perm) \ 127#define module_param_string(name, string, len, perm) \
105 static const struct kparam_string __param_string_##name \ 128 static const struct kparam_string __param_string_##name \
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 30a1d63b6fb5..cab2a85e2ee8 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -5,8 +5,6 @@
5 * 5 *
6 * Author: Marco van Wieringen <mvw@planets.elm.net> 6 * Author: Marco van Wieringen <mvw@planets.elm.net>
7 * 7 *
8 * Version: $Id: mount.h,v 2.0 1996/11/17 16:48:14 mvw Exp mvw $
9 *
10 */ 8 */
11#ifndef _LINUX_MOUNT_H 9#ifndef _LINUX_MOUNT_H
12#define _LINUX_MOUNT_H 10#define _LINUX_MOUNT_H
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 6f4c180179e2..5375faca1f72 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -117,6 +117,7 @@ struct sioc_mif_req6
117 117
118#include <linux/pim.h> 118#include <linux/pim.h>
119#include <linux/skbuff.h> /* for struct sk_buff_head */ 119#include <linux/skbuff.h> /* for struct sk_buff_head */
120#include <net/net_namespace.h>
120 121
121#ifdef CONFIG_IPV6_MROUTE 122#ifdef CONFIG_IPV6_MROUTE
122static inline int ip6_mroute_opt(int opt) 123static inline int ip6_mroute_opt(int opt)
@@ -187,6 +188,9 @@ struct mif_device
187struct mfc6_cache 188struct mfc6_cache
188{ 189{
189 struct mfc6_cache *next; /* Next entry on cache line */ 190 struct mfc6_cache *next; /* Next entry on cache line */
191#ifdef CONFIG_NET_NS
192 struct net *mfc6_net;
193#endif
190 struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */ 194 struct in6_addr mf6c_mcastgrp; /* Group the entry belongs to */
191 struct in6_addr mf6c_origin; /* Source of packet */ 195 struct in6_addr mf6c_origin; /* Source of packet */
192 mifi_t mf6c_parent; /* Source interface */ 196 mifi_t mf6c_parent; /* Source interface */
@@ -209,6 +213,18 @@ struct mfc6_cache
209 } mfc_un; 213 } mfc_un;
210}; 214};
211 215
216static inline
217struct net *mfc6_net(const struct mfc6_cache *mfc)
218{
219 return read_pnet(&mfc->mfc6_net);
220}
221
222static inline
223void mfc6_net_set(struct mfc6_cache *mfc, struct net *net)
224{
225 write_pnet(&mfc->mfc6_net, hold_net(net));
226}
227
212#define MFC_STATIC 1 228#define MFC_STATIC 1
213#define MFC_NOTIFY 2 229#define MFC_NOTIFY 2
214 230
@@ -229,13 +245,17 @@ struct mfc6_cache
229 245
230#ifdef __KERNEL__ 246#ifdef __KERNEL__
231struct rtmsg; 247struct rtmsg;
232extern int ip6mr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait); 248extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
249 struct rtmsg *rtm, int nowait);
233 250
234#ifdef CONFIG_IPV6_MROUTE 251#ifdef CONFIG_IPV6_MROUTE
235extern struct sock *mroute6_socket; 252static inline struct sock *mroute6_socket(struct net *net)
253{
254 return net->ipv6.mroute6_sk;
255}
236extern int ip6mr_sk_done(struct sock *sk); 256extern int ip6mr_sk_done(struct sock *sk);
237#else 257#else
238#define mroute6_socket NULL 258static inline struct sock *mroute6_socket(struct net *net) { return NULL; }
239static inline int ip6mr_sk_done(struct sock *sk) { return 0; } 259static inline int ip6mr_sk_done(struct sock *sk) { return 0; }
240#endif 260#endif
241#endif 261#endif
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index ba63858056c7..e0a9b207920d 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -46,11 +46,6 @@
46#define DELETED_FLAG 0xe5 /* marks file as deleted when in name[0] */ 46#define DELETED_FLAG 0xe5 /* marks file as deleted when in name[0] */
47#define IS_FREE(n) (!*(n) || *(n) == DELETED_FLAG) 47#define IS_FREE(n) (!*(n) || *(n) == DELETED_FLAG)
48 48
49/* valid file mode bits */
50#define MSDOS_VALID_MODE (S_IFREG | S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO)
51/* Convert attribute bits and a mask to the UNIX mode. */
52#define MSDOS_MKMODE(a, m) (m & (a & ATTR_RO ? S_IRUGO|S_IXUGO : S_IRWXUGO))
53
54#define MSDOS_NAME 11 /* maximum name length */ 49#define MSDOS_NAME 11 /* maximum name length */
55#define MSDOS_LONGNAME 256 /* maximum name length */ 50#define MSDOS_LONGNAME 256 /* maximum name length */
56#define MSDOS_SLOTS 21 /* max # of slots for short and long names */ 51#define MSDOS_SLOTS 21 /* max # of slots for short and long names */
@@ -167,282 +162,10 @@ struct msdos_dir_slot {
167}; 162};
168 163
169#ifdef __KERNEL__ 164#ifdef __KERNEL__
170
171#include <linux/buffer_head.h>
172#include <linux/string.h>
173#include <linux/nls.h>
174#include <linux/fs.h>
175#include <linux/mutex.h>
176
177/*
178 * vfat shortname flags
179 */
180#define VFAT_SFN_DISPLAY_LOWER 0x0001 /* convert to lowercase for display */
181#define VFAT_SFN_DISPLAY_WIN95 0x0002 /* emulate win95 rule for display */
182#define VFAT_SFN_DISPLAY_WINNT 0x0004 /* emulate winnt rule for display */
183#define VFAT_SFN_CREATE_WIN95 0x0100 /* emulate win95 rule for create */
184#define VFAT_SFN_CREATE_WINNT 0x0200 /* emulate winnt rule for create */
185
186struct fat_mount_options {
187 uid_t fs_uid;
188 gid_t fs_gid;
189 unsigned short fs_fmask;
190 unsigned short fs_dmask;
191 unsigned short codepage; /* Codepage for shortname conversions */
192 char *iocharset; /* Charset used for filename input/display */
193 unsigned short shortname; /* flags for shortname display/create rule */
194 unsigned char name_check; /* r = relaxed, n = normal, s = strict */
195 unsigned short allow_utime;/* permission for setting the [am]time */
196 unsigned quiet:1, /* set = fake successful chmods and chowns */
197 showexec:1, /* set = only set x bit for com/exe/bat */
198 sys_immutable:1, /* set = system files are immutable */
199 dotsOK:1, /* set = hidden and system files are named '.filename' */
200 isvfat:1, /* 0=no vfat long filename support, 1=vfat support */
201 utf8:1, /* Use of UTF-8 character set (Default) */
202 unicode_xlate:1, /* create escape sequences for unhandled Unicode */
203 numtail:1, /* Does first alias have a numeric '~1' type tail? */
204 flush:1, /* write things quickly */
205 nocase:1, /* Does this need case conversion? 0=need case conversion*/
206 usefree:1, /* Use free_clusters for FAT32 */
207 tz_utc:1; /* Filesystem timestamps are in UTC */
208};
209
210#define FAT_HASH_BITS 8
211#define FAT_HASH_SIZE (1UL << FAT_HASH_BITS)
212#define FAT_HASH_MASK (FAT_HASH_SIZE-1)
213
214/*
215 * MS-DOS file system in-core superblock data
216 */
217struct msdos_sb_info {
218 unsigned short sec_per_clus; /* sectors/cluster */
219 unsigned short cluster_bits; /* log2(cluster_size) */
220 unsigned int cluster_size; /* cluster size */
221 unsigned char fats,fat_bits; /* number of FATs, FAT bits (12 or 16) */
222 unsigned short fat_start;
223 unsigned long fat_length; /* FAT start & length (sec.) */
224 unsigned long dir_start;
225 unsigned short dir_entries; /* root dir start & entries */
226 unsigned long data_start; /* first data sector */
227 unsigned long max_cluster; /* maximum cluster number */
228 unsigned long root_cluster; /* first cluster of the root directory */
229 unsigned long fsinfo_sector; /* sector number of FAT32 fsinfo */
230 struct mutex fat_lock;
231 unsigned int prev_free; /* previously allocated cluster number */
232 unsigned int free_clusters; /* -1 if undefined */
233 unsigned int free_clus_valid; /* is free_clusters valid? */
234 struct fat_mount_options options;
235 struct nls_table *nls_disk; /* Codepage used on disk */
236 struct nls_table *nls_io; /* Charset used for input and display */
237 const void *dir_ops; /* Opaque; default directory operations */
238 int dir_per_block; /* dir entries per block */
239 int dir_per_block_bits; /* log2(dir_per_block) */
240
241 int fatent_shift;
242 struct fatent_operations *fatent_ops;
243
244 spinlock_t inode_hash_lock;
245 struct hlist_head inode_hashtable[FAT_HASH_SIZE];
246};
247
248#define FAT_CACHE_VALID 0 /* special case for valid cache */
249
250/*
251 * MS-DOS file system inode data in memory
252 */
253struct msdos_inode_info {
254 spinlock_t cache_lru_lock;
255 struct list_head cache_lru;
256 int nr_caches;
257 /* for avoiding the race between fat_free() and fat_get_cluster() */
258 unsigned int cache_valid_id;
259
260 loff_t mmu_private;
261 int i_start; /* first cluster or 0 */
262 int i_logstart; /* logical first cluster */
263 int i_attrs; /* unused attribute bits */
264 loff_t i_pos; /* on-disk position of directory entry or 0 */
265 struct hlist_node i_fat_hash; /* hash by i_location */
266 struct inode vfs_inode;
267};
268
269struct fat_slot_info {
270 loff_t i_pos; /* on-disk position of directory entry */
271 loff_t slot_off; /* offset for slot or de start */
272 int nr_slots; /* number of slots + 1(de) in filename */
273 struct msdos_dir_entry *de;
274 struct buffer_head *bh;
275};
276
277static inline struct msdos_sb_info *MSDOS_SB(struct super_block *sb)
278{
279 return sb->s_fs_info;
280}
281
282static inline struct msdos_inode_info *MSDOS_I(struct inode *inode)
283{
284 return container_of(inode, struct msdos_inode_info, vfs_inode);
285}
286
287/* Return the FAT attribute byte for this inode */
288static inline u8 fat_attr(struct inode *inode)
289{
290 return ((inode->i_mode & S_IWUGO) ? ATTR_NONE : ATTR_RO) |
291 (S_ISDIR(inode->i_mode) ? ATTR_DIR : ATTR_NONE) |
292 MSDOS_I(inode)->i_attrs;
293}
294
295static inline unsigned char fat_checksum(const __u8 *name)
296{
297 unsigned char s = name[0];
298 s = (s<<7) + (s>>1) + name[1]; s = (s<<7) + (s>>1) + name[2];
299 s = (s<<7) + (s>>1) + name[3]; s = (s<<7) + (s>>1) + name[4];
300 s = (s<<7) + (s>>1) + name[5]; s = (s<<7) + (s>>1) + name[6];
301 s = (s<<7) + (s>>1) + name[7]; s = (s<<7) + (s>>1) + name[8];
302 s = (s<<7) + (s>>1) + name[9]; s = (s<<7) + (s>>1) + name[10];
303 return s;
304}
305
306static inline sector_t fat_clus_to_blknr(struct msdos_sb_info *sbi, int clus)
307{
308 return ((sector_t)clus - FAT_START_ENT) * sbi->sec_per_clus
309 + sbi->data_start;
310}
311
312static inline void fat16_towchar(wchar_t *dst, const __u8 *src, size_t len)
313{
314#ifdef __BIG_ENDIAN
315 while (len--) {
316 *dst++ = src[0] | (src[1] << 8);
317 src += 2;
318 }
319#else
320 memcpy(dst, src, len * 2);
321#endif
322}
323
324static inline void fatwchar_to16(__u8 *dst, const wchar_t *src, size_t len)
325{
326#ifdef __BIG_ENDIAN
327 while (len--) {
328 dst[0] = *src & 0x00FF;
329 dst[1] = (*src & 0xFF00) >> 8;
330 dst += 2;
331 src++;
332 }
333#else
334 memcpy(dst, src, len * 2);
335#endif
336}
337
338/* media of boot sector */ 165/* media of boot sector */
339static inline int fat_valid_media(u8 media) 166static inline int fat_valid_media(u8 media)
340{ 167{
341 return 0xf8 <= media || media == 0xf0; 168 return 0xf8 <= media || media == 0xf0;
342} 169}
343 170#endif /* !__KERNEL__ */
344/* fat/cache.c */ 171#endif /* !_LINUX_MSDOS_FS_H */
345extern void fat_cache_inval_inode(struct inode *inode);
346extern int fat_get_cluster(struct inode *inode, int cluster,
347 int *fclus, int *dclus);
348extern int fat_bmap(struct inode *inode, sector_t sector, sector_t *phys,
349 unsigned long *mapped_blocks);
350
351/* fat/dir.c */
352extern const struct file_operations fat_dir_operations;
353extern int fat_search_long(struct inode *inode, const unsigned char *name,
354 int name_len, struct fat_slot_info *sinfo);
355extern int fat_dir_empty(struct inode *dir);
356extern int fat_subdirs(struct inode *dir);
357extern int fat_scan(struct inode *dir, const unsigned char *name,
358 struct fat_slot_info *sinfo);
359extern int fat_get_dotdot_entry(struct inode *dir, struct buffer_head **bh,
360 struct msdos_dir_entry **de, loff_t *i_pos);
361extern int fat_alloc_new_dir(struct inode *dir, struct timespec *ts);
362extern int fat_add_entries(struct inode *dir, void *slots, int nr_slots,
363 struct fat_slot_info *sinfo);
364extern int fat_remove_entries(struct inode *dir, struct fat_slot_info *sinfo);
365
366/* fat/fatent.c */
367struct fat_entry {
368 int entry;
369 union {
370 u8 *ent12_p[2];
371 __le16 *ent16_p;
372 __le32 *ent32_p;
373 } u;
374 int nr_bhs;
375 struct buffer_head *bhs[2];
376};
377
378static inline void fatent_init(struct fat_entry *fatent)
379{
380 fatent->nr_bhs = 0;
381 fatent->entry = 0;
382 fatent->u.ent32_p = NULL;
383 fatent->bhs[0] = fatent->bhs[1] = NULL;
384}
385
386static inline void fatent_set_entry(struct fat_entry *fatent, int entry)
387{
388 fatent->entry = entry;
389 fatent->u.ent32_p = NULL;
390}
391
392static inline void fatent_brelse(struct fat_entry *fatent)
393{
394 int i;
395 fatent->u.ent32_p = NULL;
396 for (i = 0; i < fatent->nr_bhs; i++)
397 brelse(fatent->bhs[i]);
398 fatent->nr_bhs = 0;
399 fatent->bhs[0] = fatent->bhs[1] = NULL;
400}
401
402extern void fat_ent_access_init(struct super_block *sb);
403extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
404 int entry);
405extern int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
406 int new, int wait);
407extern int fat_alloc_clusters(struct inode *inode, int *cluster,
408 int nr_cluster);
409extern int fat_free_clusters(struct inode *inode, int cluster);
410extern int fat_count_free_clusters(struct super_block *sb);
411
412/* fat/file.c */
413extern int fat_generic_ioctl(struct inode *inode, struct file *filp,
414 unsigned int cmd, unsigned long arg);
415extern const struct file_operations fat_file_operations;
416extern const struct inode_operations fat_file_inode_operations;
417extern int fat_setattr(struct dentry * dentry, struct iattr * attr);
418extern void fat_truncate(struct inode *inode);
419extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry,
420 struct kstat *stat);
421
422/* fat/inode.c */
423extern void fat_attach(struct inode *inode, loff_t i_pos);
424extern void fat_detach(struct inode *inode);
425extern struct inode *fat_iget(struct super_block *sb, loff_t i_pos);
426extern struct inode *fat_build_inode(struct super_block *sb,
427 struct msdos_dir_entry *de, loff_t i_pos);
428extern int fat_sync_inode(struct inode *inode);
429extern int fat_fill_super(struct super_block *sb, void *data, int silent,
430 const struct inode_operations *fs_dir_inode_ops, int isvfat);
431
432extern int fat_flush_inodes(struct super_block *sb, struct inode *i1,
433 struct inode *i2);
434/* fat/misc.c */
435extern void fat_fs_panic(struct super_block *s, const char *fmt, ...);
436extern void fat_clusters_flush(struct super_block *sb);
437extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
438extern int date_dos2unix(unsigned short time, unsigned short date, int tz_utc);
439extern void fat_date_unix2dos(int unix_date, __le16 *time, __le16 *date,
440 int tz_utc);
441extern int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs);
442
443int fat_cache_init(void);
444void fat_cache_destroy(void);
445
446#endif /* __KERNEL__ */
447
448#endif
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8f2939227207..d2b8a1e8ca11 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -10,8 +10,11 @@ struct msi_msg {
10}; 10};
11 11
12/* Helper functions */ 12/* Helper functions */
13struct irq_desc;
13extern void mask_msi_irq(unsigned int irq); 14extern void mask_msi_irq(unsigned int irq);
14extern void unmask_msi_irq(unsigned int irq); 15extern void unmask_msi_irq(unsigned int irq);
16extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
17extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg);
15extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); 18extern void read_msi_msg(unsigned int irq, struct msi_msg *msg);
16extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); 19extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
17 20
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index d6fb115f5a07..00e2b575021f 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -12,6 +12,7 @@
12#include <linux/mtd/flashchip.h> 12#include <linux/mtd/flashchip.h>
13#include <linux/mtd/map.h> 13#include <linux/mtd/map.h>
14#include <linux/mtd/cfi_endian.h> 14#include <linux/mtd/cfi_endian.h>
15#include <linux/mtd/xip.h>
15 16
16#ifdef CONFIG_MTD_CFI_I1 17#ifdef CONFIG_MTD_CFI_I1
17#define cfi_interleave(cfi) 1 18#define cfi_interleave(cfi) 1
@@ -281,9 +282,25 @@ struct cfi_private {
281/* 282/*
282 * Returns the command address according to the given geometry. 283 * Returns the command address according to the given geometry.
283 */ 284 */
284static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type) 285static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
286 struct map_info *map, struct cfi_private *cfi)
285{ 287{
286 return (cmd_ofs * type) * interleave; 288 unsigned bankwidth = map_bankwidth(map);
289 unsigned interleave = cfi_interleave(cfi);
290 unsigned type = cfi->device_type;
291 uint32_t addr;
292
293 addr = (cmd_ofs * type) * interleave;
294
295 /* Modify the unlock address if we are in compatiblity mode.
296 * For 16bit devices on 8 bit busses
297 * and 32bit devices on 16 bit busses
298 * set the low bit of the alternating bit sequence of the address.
299 */
300 if (((type * interleave) > bankwidth) && ((uint8_t)cmd_ofs == 0xaa))
301 addr |= (type >> 1)*interleave;
302
303 return addr;
287} 304}
288 305
289/* 306/*
@@ -429,8 +446,7 @@ static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t
429 int type, map_word *prev_val) 446 int type, map_word *prev_val)
430{ 447{
431 map_word val; 448 map_word val;
432 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type); 449 uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
433
434 val = cfi_build_cmd(cmd, map, cfi); 450 val = cfi_build_cmd(cmd, map, cfi);
435 451
436 if (prev_val) 452 if (prev_val)
@@ -483,6 +499,13 @@ static inline void cfi_udelay(int us)
483 } 499 }
484} 500}
485 501
502int __xipram cfi_qry_present(struct map_info *map, __u32 base,
503 struct cfi_private *cfi);
504int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
505 struct cfi_private *cfi);
506void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
507 struct cfi_private *cfi);
508
486struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size, 509struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
487 const char* name); 510 const char* name);
488struct cfi_fixup { 511struct cfi_fixup {
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index 08dd131301c1..d4f38c5fd44e 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -73,6 +73,10 @@ struct flchip {
73 int buffer_write_time; 73 int buffer_write_time;
74 int erase_time; 74 int erase_time;
75 75
76 int word_write_time_max;
77 int buffer_write_time_max;
78 int erase_time_max;
79
76 void *priv; 80 void *priv;
77}; 81};
78 82
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 922636548558..eae26bb6430a 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -25,8 +25,10 @@
25#define MTD_ERASE_DONE 0x08 25#define MTD_ERASE_DONE 0x08
26#define MTD_ERASE_FAILED 0x10 26#define MTD_ERASE_FAILED 0x10
27 27
28#define MTD_FAIL_ADDR_UNKNOWN 0xffffffff
29
28/* If the erase fails, fail_addr might indicate exactly which block failed. If 30/* If the erase fails, fail_addr might indicate exactly which block failed. If
29 fail_addr = 0xffffffff, the failure was not at the device level or was not 31 fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level or was not
30 specific to any particular block. */ 32 specific to any particular block. */
31struct erase_info { 33struct erase_info {
32 struct mtd_info *mtd; 34 struct mtd_info *mtd;
diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h
new file mode 100644
index 000000000000..51534e50f7fc
--- /dev/null
+++ b/include/linux/mtd/nand-gpio.h
@@ -0,0 +1,19 @@
1#ifndef __LINUX_MTD_NAND_GPIO_H
2#define __LINUX_MTD_NAND_GPIO_H
3
4#include <linux/mtd/nand.h>
5
6struct gpio_nand_platdata {
7 int gpio_nce;
8 int gpio_nwp;
9 int gpio_cle;
10 int gpio_ale;
11 int gpio_rdy;
12 void (*adjust_parts)(struct gpio_nand_platdata *, size_t);
13 struct mtd_partition *parts;
14 unsigned int num_parts;
15 unsigned int options;
16 int chip_delay;
17};
18
19#endif
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 81774e5facf4..733d3f3b4eb8 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -248,6 +248,7 @@ struct nand_hw_control {
248 * @read_page_raw: function to read a raw page without ECC 248 * @read_page_raw: function to read a raw page without ECC
249 * @write_page_raw: function to write a raw page without ECC 249 * @write_page_raw: function to write a raw page without ECC
250 * @read_page: function to read a page according to the ecc generator requirements 250 * @read_page: function to read a page according to the ecc generator requirements
251 * @read_subpage: function to read parts of the page covered by ECC.
251 * @write_page: function to write a page according to the ecc generator requirements 252 * @write_page: function to write a page according to the ecc generator requirements
252 * @read_oob: function to read chip OOB data 253 * @read_oob: function to read chip OOB data
253 * @write_oob: function to write chip OOB data 254 * @write_oob: function to write chip OOB data
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index d1b310c92eb4..0c6bbe28f38c 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -152,6 +152,8 @@
152#define ONENAND_SYS_CFG1_INT (1 << 6) 152#define ONENAND_SYS_CFG1_INT (1 << 6)
153#define ONENAND_SYS_CFG1_IOBE (1 << 5) 153#define ONENAND_SYS_CFG1_IOBE (1 << 5)
154#define ONENAND_SYS_CFG1_RDY_CONF (1 << 4) 154#define ONENAND_SYS_CFG1_RDY_CONF (1 << 4)
155#define ONENAND_SYS_CFG1_HF (1 << 2)
156#define ONENAND_SYS_CFG1_SYNC_WRITE (1 << 1)
155 157
156/* 158/*
157 * Controller Status Register F240h (R) 159 * Controller Status Register F240h (R)
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index 5014f7a9f5df..c92b4d439609 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -73,7 +73,6 @@ struct device;
73struct device_node; 73struct device_node;
74 74
75int __devinit of_mtd_parse_partitions(struct device *dev, 75int __devinit of_mtd_parse_partitions(struct device *dev,
76 struct mtd_info *mtd,
77 struct device_node *node, 76 struct device_node *node,
78 struct mtd_partition **pparts); 77 struct mtd_partition **pparts);
79 78
diff --git a/include/linux/mtd/sh_flctl.h b/include/linux/mtd/sh_flctl.h
new file mode 100644
index 000000000000..e77c1cea404d
--- /dev/null
+++ b/include/linux/mtd/sh_flctl.h
@@ -0,0 +1,125 @@
1/*
2 * SuperH FLCTL nand controller
3 *
4 * Copyright © 2008 Renesas Solutions Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19
20#ifndef __SH_FLCTL_H__
21#define __SH_FLCTL_H__
22
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand.h>
25#include <linux/mtd/partitions.h>
26
27/* FLCTL registers */
28#define FLCMNCR(f) (f->reg + 0x0)
29#define FLCMDCR(f) (f->reg + 0x4)
30#define FLCMCDR(f) (f->reg + 0x8)
31#define FLADR(f) (f->reg + 0xC)
32#define FLADR2(f) (f->reg + 0x3C)
33#define FLDATAR(f) (f->reg + 0x10)
34#define FLDTCNTR(f) (f->reg + 0x14)
35#define FLINTDMACR(f) (f->reg + 0x18)
36#define FLBSYTMR(f) (f->reg + 0x1C)
37#define FLBSYCNT(f) (f->reg + 0x20)
38#define FLDTFIFO(f) (f->reg + 0x24)
39#define FLECFIFO(f) (f->reg + 0x28)
40#define FLTRCR(f) (f->reg + 0x2C)
41#define FL4ECCRESULT0(f) (f->reg + 0x80)
42#define FL4ECCRESULT1(f) (f->reg + 0x84)
43#define FL4ECCRESULT2(f) (f->reg + 0x88)
44#define FL4ECCRESULT3(f) (f->reg + 0x8C)
45#define FL4ECCCR(f) (f->reg + 0x90)
46#define FL4ECCCNT(f) (f->reg + 0x94)
47#define FLERRADR(f) (f->reg + 0x98)
48
49/* FLCMNCR control bits */
50#define ECCPOS2 (0x1 << 25)
51#define _4ECCCNTEN (0x1 << 24)
52#define _4ECCEN (0x1 << 23)
53#define _4ECCCORRECT (0x1 << 22)
54#define SNAND_E (0x1 << 18) /* SNAND (0=512 1=2048)*/
55#define QTSEL_E (0x1 << 17)
56#define ENDIAN (0x1 << 16) /* 1 = little endian */
57#define FCKSEL_E (0x1 << 15)
58#define ECCPOS_00 (0x00 << 12)
59#define ECCPOS_01 (0x01 << 12)
60#define ECCPOS_02 (0x02 << 12)
61#define ACM_SACCES_MODE (0x01 << 10)
62#define NANWF_E (0x1 << 9)
63#define SE_D (0x1 << 8) /* Spare area disable */
64#define CE1_ENABLE (0x1 << 4) /* Chip Enable 1 */
65#define CE0_ENABLE (0x1 << 3) /* Chip Enable 0 */
66#define TYPESEL_SET (0x1 << 0)
67
68/* FLCMDCR control bits */
69#define ADRCNT2_E (0x1 << 31) /* 5byte address enable */
70#define ADRMD_E (0x1 << 26) /* Sector address access */
71#define CDSRC_E (0x1 << 25) /* Data buffer selection */
72#define DOSR_E (0x1 << 24) /* Status read check */
73#define SELRW (0x1 << 21) /* 0:read 1:write */
74#define DOADR_E (0x1 << 20) /* Address stage execute */
75#define ADRCNT_1 (0x00 << 18) /* Address data bytes: 1byte */
76#define ADRCNT_2 (0x01 << 18) /* Address data bytes: 2byte */
77#define ADRCNT_3 (0x02 << 18) /* Address data bytes: 3byte */
78#define ADRCNT_4 (0x03 << 18) /* Address data bytes: 4byte */
79#define DOCMD2_E (0x1 << 17) /* 2nd cmd stage execute */
80#define DOCMD1_E (0x1 << 16) /* 1st cmd stage execute */
81
82/* FLTRCR control bits */
83#define TRSTRT (0x1 << 0) /* translation start */
84#define TREND (0x1 << 1) /* translation end */
85
86/* FL4ECCCR control bits */
87#define _4ECCFA (0x1 << 2) /* 4 symbols correct fault */
88#define _4ECCEND (0x1 << 1) /* 4 symbols end */
89#define _4ECCEXST (0x1 << 0) /* 4 symbols exist */
90
91#define INIT_FL4ECCRESULT_VAL 0x03FF03FF
92#define LOOP_TIMEOUT_MAX 0x00010000
93
94#define mtd_to_flctl(mtd) container_of(mtd, struct sh_flctl, mtd)
95
96struct sh_flctl {
97 struct mtd_info mtd;
98 struct nand_chip chip;
99 void __iomem *reg;
100
101 uint8_t done_buff[2048 + 64]; /* max size 2048 + 64 */
102 int read_bytes;
103 int index;
104 int seqin_column; /* column in SEQIN cmd */
105 int seqin_page_addr; /* page_addr in SEQIN cmd */
106 uint32_t seqin_read_cmd; /* read cmd in SEQIN cmd */
107 int erase1_page_addr; /* page_addr in ERASE1 cmd */
108 uint32_t erase_ADRCNT; /* bits of FLCMDCR in ERASE1 cmd */
109 uint32_t rw_ADRCNT; /* bits of FLCMDCR in READ WRITE cmd */
110
111 int hwecc_cant_correct[4];
112
113 unsigned page_size:1; /* NAND page size (0 = 512, 1 = 2048) */
114 unsigned hwecc:1; /* Hardware ECC (0 = disabled, 1 = enabled) */
115};
116
117struct sh_flctl_platform_data {
118 struct mtd_partition *parts;
119 int nr_parts;
120 unsigned long flcmncr_val;
121
122 unsigned has_hwecc:1;
123};
124
125#endif /* __SH_FLCTL_H__ */
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index bc6da10ceee0..7a0e5c4f8072 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -144,6 +144,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
144/* 144/*
145 * NOTE: mutex_trylock() follows the spin_trylock() convention, 145 * NOTE: mutex_trylock() follows the spin_trylock() convention,
146 * not the down_trylock() convention! 146 * not the down_trylock() convention!
147 *
148 * Returns 1 if the mutex has been acquired successfully, and 0 on contention.
147 */ 149 */
148extern int mutex_trylock(struct mutex *lock); 150extern int mutex_trylock(struct mutex *lock);
149extern void mutex_unlock(struct mutex *lock); 151extern void mutex_unlock(struct mutex *lock);
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 68f8c3203c89..99eb80306dc5 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -51,8 +51,10 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
51/* 51/*
52 * Intent data 52 * Intent data
53 */ 53 */
54#define LOOKUP_OPEN (0x0100) 54#define LOOKUP_OPEN 0x0100
55#define LOOKUP_CREATE (0x0200) 55#define LOOKUP_CREATE 0x0200
56#define LOOKUP_EXCL 0x0400
57#define LOOKUP_RENAME_TARGET 0x0800
56 58
57extern int user_path_at(int, const char __user *, unsigned, struct path *); 59extern int user_path_at(int, const char __user *, unsigned, struct path *);
58 60
@@ -61,6 +63,8 @@ extern int user_path_at(int, const char __user *, unsigned, struct path *);
61#define user_path_dir(name, path) \ 63#define user_path_dir(name, path) \
62 user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path) 64 user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path)
63 65
66extern int kern_path(const char *, unsigned, struct path *);
67
64extern int path_lookup(const char *, unsigned, struct nameidata *); 68extern int path_lookup(const char *, unsigned, struct nameidata *);
65extern int vfs_path_lookup(struct dentry *, struct vfsmount *, 69extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
66 const char *, unsigned int, struct nameidata *); 70 const char *, unsigned int, struct nameidata *);
diff --git a/include/linux/net.h b/include/linux/net.h
index 6dc14a240042..4515efae4c39 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -40,7 +40,7 @@
40#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */ 40#define SYS_GETSOCKOPT 15 /* sys_getsockopt(2) */
41#define SYS_SENDMSG 16 /* sys_sendmsg(2) */ 41#define SYS_SENDMSG 16 /* sys_sendmsg(2) */
42#define SYS_RECVMSG 17 /* sys_recvmsg(2) */ 42#define SYS_RECVMSG 17 /* sys_recvmsg(2) */
43#define SYS_PACCEPT 18 /* sys_paccept(2) */ 43#define SYS_ACCEPT4 18 /* sys_accept4(2) */
44 44
45typedef enum { 45typedef enum {
46 SS_FREE = 0, /* not allocated */ 46 SS_FREE = 0, /* not allocated */
@@ -100,7 +100,7 @@ enum sock_type {
100 * remaining bits are used as flags. */ 100 * remaining bits are used as flags. */
101#define SOCK_TYPE_MASK 0xf 101#define SOCK_TYPE_MASK 0xf
102 102
103/* Flags for socket, socketpair, paccept */ 103/* Flags for socket, socketpair, accept4 */
104#define SOCK_CLOEXEC O_CLOEXEC 104#define SOCK_CLOEXEC O_CLOEXEC
105#ifndef SOCK_NONBLOCK 105#ifndef SOCK_NONBLOCK
106#define SOCK_NONBLOCK O_NONBLOCK 106#define SOCK_NONBLOCK O_NONBLOCK
@@ -223,8 +223,6 @@ extern int sock_map_fd(struct socket *sock, int flags);
223extern struct socket *sockfd_lookup(int fd, int *err); 223extern struct socket *sockfd_lookup(int fd, int *err);
224#define sockfd_put(sock) fput(sock->file) 224#define sockfd_put(sock) fput(sock->file)
225extern int net_ratelimit(void); 225extern int net_ratelimit(void);
226extern long do_accept(int fd, struct sockaddr __user *upeer_sockaddr,
227 int __user *upeer_addrlen, int flags);
228 226
229#define net_random() random32() 227#define net_random() random32()
230#define net_srandom(seed) srandom32((__force u32)seed) 228#define net_srandom(seed) srandom32((__force u32)seed)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 64875859d654..41e1224651cf 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -43,6 +43,9 @@
43 43
44#include <net/net_namespace.h> 44#include <net/net_namespace.h>
45#include <net/dsa.h> 45#include <net/dsa.h>
46#ifdef CONFIG_DCB
47#include <net/dcbnl.h>
48#endif
46 49
47struct vlan_group; 50struct vlan_group;
48struct ethtool_ops; 51struct ethtool_ops;
@@ -311,14 +314,16 @@ struct napi_struct {
311 spinlock_t poll_lock; 314 spinlock_t poll_lock;
312 int poll_owner; 315 int poll_owner;
313 struct net_device *dev; 316 struct net_device *dev;
314 struct list_head dev_list;
315#endif 317#endif
318 struct list_head dev_list;
319 struct sk_buff *gro_list;
316}; 320};
317 321
318enum 322enum
319{ 323{
320 NAPI_STATE_SCHED, /* Poll is scheduled */ 324 NAPI_STATE_SCHED, /* Poll is scheduled */
321 NAPI_STATE_DISABLE, /* Disable pending */ 325 NAPI_STATE_DISABLE, /* Disable pending */
326 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
322}; 327};
323 328
324extern void __napi_schedule(struct napi_struct *n); 329extern void __napi_schedule(struct napi_struct *n);
@@ -372,22 +377,8 @@ static inline int napi_reschedule(struct napi_struct *napi)
372 * 377 *
373 * Mark NAPI processing as complete. 378 * Mark NAPI processing as complete.
374 */ 379 */
375static inline void __napi_complete(struct napi_struct *n) 380extern void __napi_complete(struct napi_struct *n);
376{ 381extern void napi_complete(struct napi_struct *n);
377 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
378 list_del(&n->poll_list);
379 smp_mb__before_clear_bit();
380 clear_bit(NAPI_STATE_SCHED, &n->state);
381}
382
383static inline void napi_complete(struct napi_struct *n)
384{
385 unsigned long flags;
386
387 local_irq_save(flags);
388 __napi_complete(n);
389 local_irq_restore(flags);
390}
391 382
392/** 383/**
393 * napi_disable - prevent NAPI from scheduling 384 * napi_disable - prevent NAPI from scheduling
@@ -451,6 +442,147 @@ struct netdev_queue {
451 struct Qdisc *qdisc_sleeping; 442 struct Qdisc *qdisc_sleeping;
452} ____cacheline_aligned_in_smp; 443} ____cacheline_aligned_in_smp;
453 444
445
446/*
447 * This structure defines the management hooks for network devices.
448 * The following hooks can be defined; unless noted otherwise, they are
449 * optional and can be filled with a null pointer.
450 *
451 * int (*ndo_init)(struct net_device *dev);
452 * This function is called once when network device is registered.
453 * The network device can use this to any late stage initializaton
454 * or semantic validattion. It can fail with an error code which will
455 * be propogated back to register_netdev
456 *
457 * void (*ndo_uninit)(struct net_device *dev);
458 * This function is called when device is unregistered or when registration
459 * fails. It is not called if init fails.
460 *
461 * int (*ndo_open)(struct net_device *dev);
462 * This function is called when network device transistions to the up
463 * state.
464 *
465 * int (*ndo_stop)(struct net_device *dev);
466 * This function is called when network device transistions to the down
467 * state.
468 *
469 * int (*ndo_hard_start_xmit)(struct sk_buff *skb, struct net_device *dev);
470 * Called when a packet needs to be transmitted.
471 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED,
472 * Required can not be NULL.
473 *
474 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
475 * Called to decide which queue to when device supports multiple
476 * transmit queues.
477 *
478 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
479 * This function is called to allow device receiver to make
480 * changes to configuration when multicast or promiscious is enabled.
481 *
482 * void (*ndo_set_rx_mode)(struct net_device *dev);
483 * This function is called device changes address list filtering.
484 *
485 * void (*ndo_set_multicast_list)(struct net_device *dev);
486 * This function is called when the multicast address list changes.
487 *
488 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
489 * This function is called when the Media Access Control address
490 * needs to be changed. If not this interface is not defined, the
491 * mac address can not be changed.
492 *
493 * int (*ndo_validate_addr)(struct net_device *dev);
494 * Test if Media Access Control address is valid for the device.
495 *
496 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
497 * Called when a user request an ioctl which can't be handled by
498 * the generic interface code. If not defined ioctl's return
499 * not supported error code.
500 *
501 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
502 * Used to set network devices bus interface parameters. This interface
503 * is retained for legacy reason, new devices should use the bus
504 * interface (PCI) for low level management.
505 *
506 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
507 * Called when a user wants to change the Maximum Transfer Unit
508 * of a device. If not defined, any request to change MTU will
509 * will return an error.
510 *
511 * void (*ndo_tx_timeout)(struct net_device *dev);
512 * Callback uses when the transmitter has not made any progress
513 * for dev->watchdog ticks.
514 *
515 * struct net_device_stats* (*get_stats)(struct net_device *dev);
516 * Called when a user wants to get the network device usage
517 * statistics. If not defined, the counters in dev->stats will
518 * be used.
519 *
520 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
521 * If device support VLAN receive accleration
522 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
523 * when vlan groups for the device changes. Note: grp is NULL
524 * if no vlan's groups are being used.
525 *
526 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
527 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
528 * this function is called when a VLAN id is registered.
529 *
530 * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
531 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
532 * this function is called when a VLAN id is unregistered.
533 *
534 * void (*ndo_poll_controller)(struct net_device *dev);
535 */
536#define HAVE_NET_DEVICE_OPS
537struct net_device_ops {
538 int (*ndo_init)(struct net_device *dev);
539 void (*ndo_uninit)(struct net_device *dev);
540 int (*ndo_open)(struct net_device *dev);
541 int (*ndo_stop)(struct net_device *dev);
542 int (*ndo_start_xmit) (struct sk_buff *skb,
543 struct net_device *dev);
544 u16 (*ndo_select_queue)(struct net_device *dev,
545 struct sk_buff *skb);
546#define HAVE_CHANGE_RX_FLAGS
547 void (*ndo_change_rx_flags)(struct net_device *dev,
548 int flags);
549#define HAVE_SET_RX_MODE
550 void (*ndo_set_rx_mode)(struct net_device *dev);
551#define HAVE_MULTICAST
552 void (*ndo_set_multicast_list)(struct net_device *dev);
553#define HAVE_SET_MAC_ADDR
554 int (*ndo_set_mac_address)(struct net_device *dev,
555 void *addr);
556#define HAVE_VALIDATE_ADDR
557 int (*ndo_validate_addr)(struct net_device *dev);
558#define HAVE_PRIVATE_IOCTL
559 int (*ndo_do_ioctl)(struct net_device *dev,
560 struct ifreq *ifr, int cmd);
561#define HAVE_SET_CONFIG
562 int (*ndo_set_config)(struct net_device *dev,
563 struct ifmap *map);
564#define HAVE_CHANGE_MTU
565 int (*ndo_change_mtu)(struct net_device *dev,
566 int new_mtu);
567 int (*ndo_neigh_setup)(struct net_device *dev,
568 struct neigh_parms *);
569#define HAVE_TX_TIMEOUT
570 void (*ndo_tx_timeout) (struct net_device *dev);
571
572 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
573
574 void (*ndo_vlan_rx_register)(struct net_device *dev,
575 struct vlan_group *grp);
576 void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
577 unsigned short vid);
578 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
579 unsigned short vid);
580#ifdef CONFIG_NET_POLL_CONTROLLER
581#define HAVE_NETDEV_POLL
582 void (*ndo_poll_controller)(struct net_device *dev);
583#endif
584};
585
454/* 586/*
455 * The DEVICE structure. 587 * The DEVICE structure.
456 * Actually, this whole structure is a big mistake. It mixes I/O 588 * Actually, this whole structure is a big mistake. It mixes I/O
@@ -495,14 +627,7 @@ struct net_device
495 unsigned long state; 627 unsigned long state;
496 628
497 struct list_head dev_list; 629 struct list_head dev_list;
498#ifdef CONFIG_NETPOLL
499 struct list_head napi_list; 630 struct list_head napi_list;
500#endif
501
502 /* The device initialization function. Called only once. */
503 int (*init)(struct net_device *dev);
504
505 /* ------- Fields preinitialized in Space.c finish here ------- */
506 631
507 /* Net device features */ 632 /* Net device features */
508 unsigned long features; 633 unsigned long features;
@@ -521,6 +646,7 @@ struct net_device
521#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ 646#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
522 /* do not use LLTX in new drivers */ 647 /* do not use LLTX in new drivers */
523#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ 648#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
649#define NETIF_F_GRO 16384 /* Generic receive offload */
524#define NETIF_F_LRO 32768 /* large receive offload */ 650#define NETIF_F_LRO 32768 /* large receive offload */
525 651
526 /* Segmentation offload features */ 652 /* Segmentation offload features */
@@ -541,12 +667,18 @@ struct net_device
541#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) 667#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
542#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) 668#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
543 669
670 /*
671 * If one device supports one of these features, then enable them
672 * for all in netdev_increment_features.
673 */
674#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
675 NETIF_F_SG | NETIF_F_HIGHDMA | \
676 NETIF_F_FRAGLIST)
677
544 /* Interface index. Unique device identifier */ 678 /* Interface index. Unique device identifier */
545 int ifindex; 679 int ifindex;
546 int iflink; 680 int iflink;
547 681
548
549 struct net_device_stats* (*get_stats)(struct net_device *dev);
550 struct net_device_stats stats; 682 struct net_device_stats stats;
551 683
552#ifdef CONFIG_WIRELESS_EXT 684#ifdef CONFIG_WIRELESS_EXT
@@ -556,18 +688,13 @@ struct net_device
556 /* Instance data managed by the core of Wireless Extensions. */ 688 /* Instance data managed by the core of Wireless Extensions. */
557 struct iw_public_data * wireless_data; 689 struct iw_public_data * wireless_data;
558#endif 690#endif
691 /* Management operations */
692 const struct net_device_ops *netdev_ops;
559 const struct ethtool_ops *ethtool_ops; 693 const struct ethtool_ops *ethtool_ops;
560 694
561 /* Hardware header description */ 695 /* Hardware header description */
562 const struct header_ops *header_ops; 696 const struct header_ops *header_ops;
563 697
564 /*
565 * This marks the end of the "visible" part of the structure. All
566 * fields hereafter are internal to the system, and may change at
567 * will (read: may be cleaned up at will).
568 */
569
570
571 unsigned int flags; /* interface flags (a la BSD) */ 698 unsigned int flags; /* interface flags (a la BSD) */
572 unsigned short gflags; 699 unsigned short gflags;
573 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ 700 unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
@@ -626,7 +753,7 @@ struct net_device
626 unsigned long last_rx; /* Time of last Rx */ 753 unsigned long last_rx; /* Time of last Rx */
627 /* Interface address info used in eth_type_trans() */ 754 /* Interface address info used in eth_type_trans() */
628 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast 755 unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
629 because most packets are unicast) */ 756 because most packets are unicast) */
630 757
631 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 758 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
632 759
@@ -645,18 +772,12 @@ struct net_device
645/* 772/*
646 * One part is mostly used on xmit path (device) 773 * One part is mostly used on xmit path (device)
647 */ 774 */
648 void *priv; /* pointer to private data */
649 int (*hard_start_xmit) (struct sk_buff *skb,
650 struct net_device *dev);
651 /* These may be needed for future network-power-down code. */ 775 /* These may be needed for future network-power-down code. */
652 unsigned long trans_start; /* Time (in jiffies) of last Tx */ 776 unsigned long trans_start; /* Time (in jiffies) of last Tx */
653 777
654 int watchdog_timeo; /* used by dev_watchdog() */ 778 int watchdog_timeo; /* used by dev_watchdog() */
655 struct timer_list watchdog_timer; 779 struct timer_list watchdog_timer;
656 780
657/*
658 * refcnt is a very hot point, so align it on SMP
659 */
660 /* Number of references to this device */ 781 /* Number of references to this device */
661 atomic_t refcnt ____cacheline_aligned_in_smp; 782 atomic_t refcnt ____cacheline_aligned_in_smp;
662 783
@@ -675,56 +796,12 @@ struct net_device
675 NETREG_RELEASED, /* called free_netdev */ 796 NETREG_RELEASED, /* called free_netdev */
676 } reg_state; 797 } reg_state;
677 798
678 /* Called after device is detached from network. */ 799 /* Called from unregister, can be used to call free_netdev */
679 void (*uninit)(struct net_device *dev); 800 void (*destructor)(struct net_device *dev);
680 /* Called after last user reference disappears. */
681 void (*destructor)(struct net_device *dev);
682
683 /* Pointers to interface service routines. */
684 int (*open)(struct net_device *dev);
685 int (*stop)(struct net_device *dev);
686#define HAVE_NETDEV_POLL
687#define HAVE_CHANGE_RX_FLAGS
688 void (*change_rx_flags)(struct net_device *dev,
689 int flags);
690#define HAVE_SET_RX_MODE
691 void (*set_rx_mode)(struct net_device *dev);
692#define HAVE_MULTICAST
693 void (*set_multicast_list)(struct net_device *dev);
694#define HAVE_SET_MAC_ADDR
695 int (*set_mac_address)(struct net_device *dev,
696 void *addr);
697#define HAVE_VALIDATE_ADDR
698 int (*validate_addr)(struct net_device *dev);
699#define HAVE_PRIVATE_IOCTL
700 int (*do_ioctl)(struct net_device *dev,
701 struct ifreq *ifr, int cmd);
702#define HAVE_SET_CONFIG
703 int (*set_config)(struct net_device *dev,
704 struct ifmap *map);
705#define HAVE_CHANGE_MTU
706 int (*change_mtu)(struct net_device *dev, int new_mtu);
707 801
708#define HAVE_TX_TIMEOUT
709 void (*tx_timeout) (struct net_device *dev);
710
711 void (*vlan_rx_register)(struct net_device *dev,
712 struct vlan_group *grp);
713 void (*vlan_rx_add_vid)(struct net_device *dev,
714 unsigned short vid);
715 void (*vlan_rx_kill_vid)(struct net_device *dev,
716 unsigned short vid);
717
718 int (*neigh_setup)(struct net_device *dev, struct neigh_parms *);
719#ifdef CONFIG_NETPOLL 802#ifdef CONFIG_NETPOLL
720 struct netpoll_info *npinfo; 803 struct netpoll_info *npinfo;
721#endif 804#endif
722#ifdef CONFIG_NET_POLL_CONTROLLER
723 void (*poll_controller)(struct net_device *dev);
724#endif
725
726 u16 (*select_queue)(struct net_device *dev,
727 struct sk_buff *skb);
728 805
729#ifdef CONFIG_NET_NS 806#ifdef CONFIG_NET_NS
730 /* Network namespace this network device is inside */ 807 /* Network namespace this network device is inside */
@@ -755,6 +832,49 @@ struct net_device
755 /* for setting kernel sock attribute on TCP connection setup */ 832 /* for setting kernel sock attribute on TCP connection setup */
756#define GSO_MAX_SIZE 65536 833#define GSO_MAX_SIZE 65536
757 unsigned int gso_max_size; 834 unsigned int gso_max_size;
835
836#ifdef CONFIG_DCB
837 /* Data Center Bridging netlink ops */
838 struct dcbnl_rtnl_ops *dcbnl_ops;
839#endif
840
841#ifdef CONFIG_COMPAT_NET_DEV_OPS
842 struct {
843 int (*init)(struct net_device *dev);
844 void (*uninit)(struct net_device *dev);
845 int (*open)(struct net_device *dev);
846 int (*stop)(struct net_device *dev);
847 int (*hard_start_xmit) (struct sk_buff *skb,
848 struct net_device *dev);
849 u16 (*select_queue)(struct net_device *dev,
850 struct sk_buff *skb);
851 void (*change_rx_flags)(struct net_device *dev,
852 int flags);
853 void (*set_rx_mode)(struct net_device *dev);
854 void (*set_multicast_list)(struct net_device *dev);
855 int (*set_mac_address)(struct net_device *dev,
856 void *addr);
857 int (*validate_addr)(struct net_device *dev);
858 int (*do_ioctl)(struct net_device *dev,
859 struct ifreq *ifr, int cmd);
860 int (*set_config)(struct net_device *dev,
861 struct ifmap *map);
862 int (*change_mtu)(struct net_device *dev, int new_mtu);
863 int (*neigh_setup)(struct net_device *dev,
864 struct neigh_parms *);
865 void (*tx_timeout) (struct net_device *dev);
866 struct net_device_stats* (*get_stats)(struct net_device *dev);
867 void (*vlan_rx_register)(struct net_device *dev,
868 struct vlan_group *grp);
869 void (*vlan_rx_add_vid)(struct net_device *dev,
870 unsigned short vid);
871 void (*vlan_rx_kill_vid)(struct net_device *dev,
872 unsigned short vid);
873#ifdef CONFIG_NET_POLL_CONTROLLER
874 void (*poll_controller)(struct net_device *dev);
875#endif
876 };
877#endif
758}; 878};
759#define to_net_dev(d) container_of(d, struct net_device, dev) 879#define to_net_dev(d) container_of(d, struct net_device, dev)
760 880
@@ -850,22 +970,8 @@ static inline void *netdev_priv(const struct net_device *dev)
850 * netif_napi_add() must be used to initialize a napi context prior to calling 970 * netif_napi_add() must be used to initialize a napi context prior to calling
851 * *any* of the other napi related functions. 971 * *any* of the other napi related functions.
852 */ 972 */
853static inline void netif_napi_add(struct net_device *dev, 973void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
854 struct napi_struct *napi, 974 int (*poll)(struct napi_struct *, int), int weight);
855 int (*poll)(struct napi_struct *, int),
856 int weight)
857{
858 INIT_LIST_HEAD(&napi->poll_list);
859 napi->poll = poll;
860 napi->weight = weight;
861#ifdef CONFIG_NETPOLL
862 napi->dev = dev;
863 list_add(&napi->dev_list, &dev->napi_list);
864 spin_lock_init(&napi->poll_lock);
865 napi->poll_owner = -1;
866#endif
867 set_bit(NAPI_STATE_SCHED, &napi->state);
868}
869 975
870/** 976/**
871 * netif_napi_del - remove a napi context 977 * netif_napi_del - remove a napi context
@@ -873,12 +979,20 @@ static inline void netif_napi_add(struct net_device *dev,
873 * 979 *
874 * netif_napi_del() removes a napi context from the network device napi list 980 * netif_napi_del() removes a napi context from the network device napi list
875 */ 981 */
876static inline void netif_napi_del(struct napi_struct *napi) 982void netif_napi_del(struct napi_struct *napi);
877{ 983
878#ifdef CONFIG_NETPOLL 984struct napi_gro_cb {
879 list_del(&napi->dev_list); 985 /* This is non-zero if the packet may be of the same flow. */
880#endif 986 int same_flow;
881} 987
988 /* This is non-zero if the packet cannot be merged with the new skb. */
989 int flush;
990
991 /* Number of segments aggregated. */
992 int count;
993};
994
995#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
882 996
883struct packet_type { 997struct packet_type {
884 __be16 type; /* This is really htons(ether_type). */ 998 __be16 type; /* This is really htons(ether_type). */
@@ -890,6 +1004,9 @@ struct packet_type {
890 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1004 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
891 int features); 1005 int features);
892 int (*gso_send_check)(struct sk_buff *skb); 1006 int (*gso_send_check)(struct sk_buff *skb);
1007 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1008 struct sk_buff *skb);
1009 int (*gro_complete)(struct sk_buff *skb);
893 void *af_packet_priv; 1010 void *af_packet_priv;
894 struct list_head list; 1011 struct list_head list;
895}; 1012};
@@ -1243,6 +1360,9 @@ extern int netif_rx(struct sk_buff *skb);
1243extern int netif_rx_ni(struct sk_buff *skb); 1360extern int netif_rx_ni(struct sk_buff *skb);
1244#define HAVE_NETIF_RECEIVE_SKB 1 1361#define HAVE_NETIF_RECEIVE_SKB 1
1245extern int netif_receive_skb(struct sk_buff *skb); 1362extern int netif_receive_skb(struct sk_buff *skb);
1363extern void napi_gro_flush(struct napi_struct *napi);
1364extern int napi_gro_receive(struct napi_struct *napi,
1365 struct sk_buff *skb);
1246extern void netif_nit_deliver(struct sk_buff *skb); 1366extern void netif_nit_deliver(struct sk_buff *skb);
1247extern int dev_valid_name(const char *name); 1367extern int dev_valid_name(const char *name);
1248extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); 1368extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
@@ -1435,8 +1555,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
1435} 1555}
1436 1556
1437/* Test if receive needs to be scheduled but only if up */ 1557/* Test if receive needs to be scheduled but only if up */
1438static inline int netif_rx_schedule_prep(struct net_device *dev, 1558static inline int netif_rx_schedule_prep(struct napi_struct *napi)
1439 struct napi_struct *napi)
1440{ 1559{
1441 return napi_schedule_prep(napi); 1560 return napi_schedule_prep(napi);
1442} 1561}
@@ -1444,27 +1563,24 @@ static inline int netif_rx_schedule_prep(struct net_device *dev,
1444/* Add interface to tail of rx poll list. This assumes that _prep has 1563/* Add interface to tail of rx poll list. This assumes that _prep has
1445 * already been called and returned 1. 1564 * already been called and returned 1.
1446 */ 1565 */
1447static inline void __netif_rx_schedule(struct net_device *dev, 1566static inline void __netif_rx_schedule(struct napi_struct *napi)
1448 struct napi_struct *napi)
1449{ 1567{
1450 __napi_schedule(napi); 1568 __napi_schedule(napi);
1451} 1569}
1452 1570
1453/* Try to reschedule poll. Called by irq handler. */ 1571/* Try to reschedule poll. Called by irq handler. */
1454 1572
1455static inline void netif_rx_schedule(struct net_device *dev, 1573static inline void netif_rx_schedule(struct napi_struct *napi)
1456 struct napi_struct *napi)
1457{ 1574{
1458 if (netif_rx_schedule_prep(dev, napi)) 1575 if (netif_rx_schedule_prep(napi))
1459 __netif_rx_schedule(dev, napi); 1576 __netif_rx_schedule(napi);
1460} 1577}
1461 1578
1462/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */ 1579/* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). */
1463static inline int netif_rx_reschedule(struct net_device *dev, 1580static inline int netif_rx_reschedule(struct napi_struct *napi)
1464 struct napi_struct *napi)
1465{ 1581{
1466 if (napi_schedule_prep(napi)) { 1582 if (napi_schedule_prep(napi)) {
1467 __netif_rx_schedule(dev, napi); 1583 __netif_rx_schedule(napi);
1468 return 1; 1584 return 1;
1469 } 1585 }
1470 return 0; 1586 return 0;
@@ -1473,8 +1589,7 @@ static inline int netif_rx_reschedule(struct net_device *dev,
1473/* same as netif_rx_complete, except that local_irq_save(flags) 1589/* same as netif_rx_complete, except that local_irq_save(flags)
1474 * has already been issued 1590 * has already been issued
1475 */ 1591 */
1476static inline void __netif_rx_complete(struct net_device *dev, 1592static inline void __netif_rx_complete(struct napi_struct *napi)
1477 struct napi_struct *napi)
1478{ 1593{
1479 __napi_complete(napi); 1594 __napi_complete(napi);
1480} 1595}
@@ -1484,14 +1599,9 @@ static inline void __netif_rx_complete(struct net_device *dev,
1484 * it completes the work. The device cannot be out of poll list at this 1599 * it completes the work. The device cannot be out of poll list at this
1485 * moment, it is BUG(). 1600 * moment, it is BUG().
1486 */ 1601 */
1487static inline void netif_rx_complete(struct net_device *dev, 1602static inline void netif_rx_complete(struct napi_struct *napi)
1488 struct napi_struct *napi)
1489{ 1603{
1490 unsigned long flags; 1604 napi_complete(napi);
1491
1492 local_irq_save(flags);
1493 __netif_rx_complete(dev, napi);
1494 local_irq_restore(flags);
1495} 1605}
1496 1606
1497static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 1607static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
@@ -1529,7 +1639,6 @@ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
1529/** 1639/**
1530 * netif_tx_lock - grab network device transmit lock 1640 * netif_tx_lock - grab network device transmit lock
1531 * @dev: network device 1641 * @dev: network device
1532 * @cpu: cpu number of lock owner
1533 * 1642 *
1534 * Get network device transmit lock 1643 * Get network device transmit lock
1535 */ 1644 */
@@ -1669,6 +1778,8 @@ extern void netdev_features_change(struct net_device *dev);
1669/* Load a device via the kmod */ 1778/* Load a device via the kmod */
1670extern void dev_load(struct net *net, const char *name); 1779extern void dev_load(struct net *net, const char *name);
1671extern void dev_mcast_init(void); 1780extern void dev_mcast_init(void);
1781extern const struct net_device_stats *dev_get_stats(struct net_device *dev);
1782
1672extern int netdev_max_backlog; 1783extern int netdev_max_backlog;
1673extern int weight_p; 1784extern int weight_p;
1674extern int netdev_set_master(struct net_device *dev, struct net_device *master); 1785extern int netdev_set_master(struct net_device *dev, struct net_device *master);
@@ -1698,7 +1809,9 @@ extern char *netdev_drivername(const struct net_device *dev, char *buffer, int l
1698 1809
1699extern void linkwatch_run_queue(void); 1810extern void linkwatch_run_queue(void);
1700 1811
1701extern int netdev_compute_features(unsigned long all, unsigned long one); 1812unsigned long netdev_increment_features(unsigned long all, unsigned long one,
1813 unsigned long mask);
1814unsigned long netdev_fix_features(unsigned long features, const char *name);
1702 1815
1703static inline int net_gso_ok(int features, int gso_type) 1816static inline int net_gso_ok(int features, int gso_type)
1704{ 1817{
@@ -1715,6 +1828,8 @@ static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
1715{ 1828{
1716 return skb_is_gso(skb) && 1829 return skb_is_gso(skb) &&
1717 (!skb_gso_ok(skb, dev->features) || 1830 (!skb_gso_ok(skb, dev->features) ||
1831 (skb_shinfo(skb)->frag_list &&
1832 !(dev->features & NETIF_F_FRAGLIST)) ||
1718 unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); 1833 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
1719} 1834}
1720 1835
@@ -1733,26 +1848,31 @@ static inline int skb_bond_should_drop(struct sk_buff *skb)
1733 struct net_device *dev = skb->dev; 1848 struct net_device *dev = skb->dev;
1734 struct net_device *master = dev->master; 1849 struct net_device *master = dev->master;
1735 1850
1736 if (master && 1851 if (master) {
1737 (dev->priv_flags & IFF_SLAVE_INACTIVE)) { 1852 if (master->priv_flags & IFF_MASTER_ARPMON)
1738 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) && 1853 dev->last_rx = jiffies;
1739 skb->protocol == __constant_htons(ETH_P_ARP))
1740 return 0;
1741 1854
1742 if (master->priv_flags & IFF_MASTER_ALB) { 1855 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
1743 if (skb->pkt_type != PACKET_BROADCAST && 1856 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
1744 skb->pkt_type != PACKET_MULTICAST) 1857 skb->protocol == __constant_htons(ETH_P_ARP))
1745 return 0; 1858 return 0;
1746 }
1747 if (master->priv_flags & IFF_MASTER_8023AD &&
1748 skb->protocol == __constant_htons(ETH_P_SLOW))
1749 return 0;
1750 1859
1751 return 1; 1860 if (master->priv_flags & IFF_MASTER_ALB) {
1861 if (skb->pkt_type != PACKET_BROADCAST &&
1862 skb->pkt_type != PACKET_MULTICAST)
1863 return 0;
1864 }
1865 if (master->priv_flags & IFF_MASTER_8023AD &&
1866 skb->protocol == __constant_htons(ETH_P_SLOW))
1867 return 0;
1868
1869 return 1;
1870 }
1752 } 1871 }
1753 return 0; 1872 return 0;
1754} 1873}
1755 1874
1875extern struct pernet_operations __net_initdata loopback_net_ops;
1756#endif /* __KERNEL__ */ 1876#endif /* __KERNEL__ */
1757 1877
1758#endif /* _LINUX_DEV_H */ 1878#endif /* _LINUX_DEV_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 0d8424f76899..7d8e0455ccac 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -78,6 +78,9 @@ extern int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group,
78 int echo); 78 int echo);
79extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags); 79extern int nfnetlink_unicast(struct sk_buff *skb, u_int32_t pid, int flags);
80 80
81extern void nfnl_lock(void);
82extern void nfnl_unlock(void);
83
81#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \ 84#define MODULE_ALIAS_NFNL_SUBSYS(subsys) \
82 MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys)) 85 MODULE_ALIAS("nfnetlink-subsys-" __stringify(subsys))
83 86
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h
index c19595c89304..29fe9ea1d346 100644
--- a/include/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/linux/netfilter/nfnetlink_conntrack.h
@@ -141,6 +141,7 @@ enum ctattr_protonat {
141#define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1) 141#define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1)
142 142
143enum ctattr_natseq { 143enum ctattr_natseq {
144 CTA_NAT_SEQ_UNSPEC,
144 CTA_NAT_SEQ_CORRECTION_POS, 145 CTA_NAT_SEQ_CORRECTION_POS,
145 CTA_NAT_SEQ_OFFSET_BEFORE, 146 CTA_NAT_SEQ_OFFSET_BEFORE,
146 CTA_NAT_SEQ_OFFSET_AFTER, 147 CTA_NAT_SEQ_OFFSET_AFTER,
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index be41b609c88f..e52ce475d19f 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -251,7 +251,7 @@ struct xt_target_param {
251 */ 251 */
252struct xt_tgchk_param { 252struct xt_tgchk_param {
253 const char *table; 253 const char *table;
254 void *entryinfo; 254 const void *entryinfo;
255 const struct xt_target *target; 255 const struct xt_target *target;
256 void *targinfo; 256 void *targinfo;
257 unsigned int hook_mask; 257 unsigned int hook_mask;
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index d45e29cd1cfb..e40ddb94b1af 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -300,7 +300,8 @@ struct ebt_table
300 300
301#define EBT_ALIGN(s) (((s) + (__alignof__(struct ebt_replace)-1)) & \ 301#define EBT_ALIGN(s) (((s) + (__alignof__(struct ebt_replace)-1)) & \
302 ~(__alignof__(struct ebt_replace)-1)) 302 ~(__alignof__(struct ebt_replace)-1))
303extern int ebt_register_table(struct ebt_table *table); 303extern struct ebt_table *ebt_register_table(struct net *net,
304 struct ebt_table *table);
304extern void ebt_unregister_table(struct ebt_table *table); 305extern void ebt_unregister_table(struct ebt_table *table);
305extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb, 306extern unsigned int ebt_do_table(unsigned int hook, struct sk_buff *skb,
306 const struct net_device *in, const struct net_device *out, 307 const struct net_device *in, const struct net_device *out,
diff --git a/include/linux/netfilter_ipv4/ipt_policy.h b/include/linux/netfilter_ipv4/ipt_policy.h
index b9478a255301..1037fb2cd206 100644
--- a/include/linux/netfilter_ipv4/ipt_policy.h
+++ b/include/linux/netfilter_ipv4/ipt_policy.h
@@ -1,6 +1,8 @@
1#ifndef _IPT_POLICY_H 1#ifndef _IPT_POLICY_H
2#define _IPT_POLICY_H 2#define _IPT_POLICY_H
3 3
4#include <linux/netfilter/xt_policy.h>
5
4#define IPT_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM 6#define IPT_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM
5 7
6/* ipt_policy_flags */ 8/* ipt_policy_flags */
diff --git a/include/linux/netfilter_ipv6/ip6t_policy.h b/include/linux/netfilter_ipv6/ip6t_policy.h
index 6bab3163d2fb..b1c449d7ec89 100644
--- a/include/linux/netfilter_ipv6/ip6t_policy.h
+++ b/include/linux/netfilter_ipv6/ip6t_policy.h
@@ -1,6 +1,8 @@
1#ifndef _IP6T_POLICY_H 1#ifndef _IP6T_POLICY_H
2#define _IP6T_POLICY_H 2#define _IP6T_POLICY_H
3 3
4#include <linux/netfilter/xt_policy.h>
5
4#define IP6T_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM 6#define IP6T_POLICY_MAX_ELEM XT_POLICY_MAX_ELEM
5 7
6/* ip6t_policy_flags */ 8/* ip6t_policy_flags */
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9ff1b54908f3..51b09a1f46c3 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -242,7 +242,8 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
242 nlh->nlmsg_flags = flags; 242 nlh->nlmsg_flags = flags;
243 nlh->nlmsg_pid = pid; 243 nlh->nlmsg_pid = pid;
244 nlh->nlmsg_seq = seq; 244 nlh->nlmsg_seq = seq;
245 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size); 245 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
246 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
246 return nlh; 247 return nlh;
247} 248}
248 249
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index e3d79593fb3a..e38d3c9dccda 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -94,11 +94,6 @@ static inline void netpoll_poll_unlock(void *have)
94 rcu_read_unlock(); 94 rcu_read_unlock();
95} 95}
96 96
97static inline void netpoll_netdev_init(struct net_device *dev)
98{
99 INIT_LIST_HEAD(&dev->napi_list);
100}
101
102#else 97#else
103static inline int netpoll_rx(struct sk_buff *skb) 98static inline int netpoll_rx(struct sk_buff *skb)
104{ 99{
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 78a5922a2f11..db867b04ac3c 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -83,7 +83,7 @@ struct nfs_open_context {
83 struct rpc_cred *cred; 83 struct rpc_cred *cred;
84 struct nfs4_state *state; 84 struct nfs4_state *state;
85 fl_owner_t lockowner; 85 fl_owner_t lockowner;
86 int mode; 86 fmode_t mode;
87 87
88 unsigned long flags; 88 unsigned long flags;
89#define NFS_CONTEXT_ERROR_WRITE (0) 89#define NFS_CONTEXT_ERROR_WRITE (0)
@@ -130,14 +130,17 @@ struct nfs_inode {
130 * 130 *
131 * We need to revalidate the cached attrs for this inode if 131 * We need to revalidate the cached attrs for this inode if
132 * 132 *
133 * jiffies - read_cache_jiffies > attrtimeo 133 * jiffies - read_cache_jiffies >= attrtimeo
134 *
135 * Please note the comparison is greater than or equal
136 * so that zero timeout values can be specified.
134 */ 137 */
135 unsigned long read_cache_jiffies; 138 unsigned long read_cache_jiffies;
136 unsigned long attrtimeo; 139 unsigned long attrtimeo;
137 unsigned long attrtimeo_timestamp; 140 unsigned long attrtimeo_timestamp;
138 __u64 change_attr; /* v4 only */ 141 __u64 change_attr; /* v4 only */
139 142
140 unsigned long last_updated; 143 unsigned long attr_gencount;
141 /* "Generation counter" for the attribute cache. This is 144 /* "Generation counter" for the attribute cache. This is
142 * bumped whenever we update the metadata on the 145 * bumped whenever we update the metadata on the
143 * server. 146 * server.
@@ -180,7 +183,7 @@ struct nfs_inode {
180 /* NFSv4 state */ 183 /* NFSv4 state */
181 struct list_head open_states; 184 struct list_head open_states;
182 struct nfs_delegation *delegation; 185 struct nfs_delegation *delegation;
183 int delegation_state; 186 fmode_t delegation_state;
184 struct rw_semaphore rwsem; 187 struct rw_semaphore rwsem;
185#endif /* CONFIG_NFS_V4*/ 188#endif /* CONFIG_NFS_V4*/
186 struct inode vfs_inode; 189 struct inode vfs_inode;
@@ -200,11 +203,10 @@ struct nfs_inode {
200/* 203/*
201 * Bit offsets in flags field 204 * Bit offsets in flags field
202 */ 205 */
203#define NFS_INO_REVALIDATING (0) /* revalidating attrs */ 206#define NFS_INO_ADVISE_RDPLUS (0) /* advise readdirplus */
204#define NFS_INO_ADVISE_RDPLUS (1) /* advise readdirplus */ 207#define NFS_INO_STALE (1) /* possible stale inode */
205#define NFS_INO_STALE (2) /* possible stale inode */ 208#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
206#define NFS_INO_ACL_LRU_SET (3) /* Inode is on the LRU list */ 209#define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */
207#define NFS_INO_MOUNTPOINT (4) /* inode is remote mountpoint */
208 210
209static inline struct nfs_inode *NFS_I(const struct inode *inode) 211static inline struct nfs_inode *NFS_I(const struct inode *inode)
210{ 212{
@@ -343,17 +345,13 @@ extern int nfs_setattr(struct dentry *, struct iattr *);
343extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); 345extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr);
344extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); 346extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
345extern void put_nfs_open_context(struct nfs_open_context *ctx); 347extern void put_nfs_open_context(struct nfs_open_context *ctx);
346extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, int mode); 348extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode);
347extern u64 nfs_compat_user_ino64(u64 fileid); 349extern u64 nfs_compat_user_ino64(u64 fileid);
350extern void nfs_fattr_init(struct nfs_fattr *fattr);
348 351
349/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */ 352/* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
350extern __be32 root_nfs_parse_addr(char *name); /*__init*/ 353extern __be32 root_nfs_parse_addr(char *name); /*__init*/
351 354extern unsigned long nfs_inc_attr_generation_counter(void);
352static inline void nfs_fattr_init(struct nfs_fattr *fattr)
353{
354 fattr->valid = 0;
355 fattr->time_start = jiffies;
356}
357 355
358/* 356/*
359 * linux/fs/nfs/file.c 357 * linux/fs/nfs/file.c
@@ -372,8 +370,12 @@ static inline struct nfs_open_context *nfs_file_open_context(struct file *filp)
372 370
373static inline struct rpc_cred *nfs_file_cred(struct file *file) 371static inline struct rpc_cred *nfs_file_cred(struct file *file)
374{ 372{
375 if (file != NULL) 373 if (file != NULL) {
376 return nfs_file_open_context(file)->cred; 374 struct nfs_open_context *ctx =
375 nfs_file_open_context(file);
376 if (ctx)
377 return ctx->cred;
378 }
377 return NULL; 379 return NULL;
378} 380}
379 381
@@ -534,12 +536,6 @@ static inline void nfs3_forget_cached_acls(struct inode *inode)
534#endif /* CONFIG_NFS_V3_ACL */ 536#endif /* CONFIG_NFS_V3_ACL */
535 537
536/* 538/*
537 * linux/fs/mount_clnt.c
538 */
539extern int nfs_mount(struct sockaddr *, size_t, char *, char *,
540 int, int, struct nfs_fh *);
541
542/*
543 * inline functions 539 * inline functions
544 */ 540 */
545 541
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index c9beacd16c00..9bb81aec91cf 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -42,12 +42,6 @@ struct nfs_client {
42 struct rb_root cl_openowner_id; 42 struct rb_root cl_openowner_id;
43 struct rb_root cl_lockowner_id; 43 struct rb_root cl_lockowner_id;
44 44
45 /*
46 * The following rwsem ensures exclusive access to the server
47 * while we recover the state following a lease expiration.
48 */
49 struct rw_semaphore cl_sem;
50
51 struct list_head cl_delegations; 45 struct list_head cl_delegations;
52 struct rb_root cl_state_owners; 46 struct rb_root cl_state_owners;
53 spinlock_t cl_lock; 47 spinlock_t cl_lock;
@@ -119,7 +113,6 @@ struct nfs_server {
119 void (*destroy)(struct nfs_server *); 113 void (*destroy)(struct nfs_server *);
120 114
121 atomic_t active; /* Keep trace of any activity to this server */ 115 atomic_t active; /* Keep trace of any activity to this server */
122 wait_queue_head_t active_wq; /* Wait for any activity to stop */
123 116
124 /* mountd-related mount options */ 117 /* mountd-related mount options */
125 struct sockaddr_storage mountd_address; 118 struct sockaddr_storage mountd_address;
diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h
index df7c6b7a7ebb..4499016e6d0d 100644
--- a/include/linux/nfs_mount.h
+++ b/include/linux/nfs_mount.h
@@ -45,7 +45,7 @@ struct nfs_mount_data {
45 char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */ 45 char context[NFS_MAX_CONTEXT_LEN + 1]; /* 6 */
46}; 46};
47 47
48/* bits in the flags field */ 48/* bits in the flags field visible to user space */
49 49
50#define NFS_MOUNT_SOFT 0x0001 /* 1 */ 50#define NFS_MOUNT_SOFT 0x0001 /* 1 */
51#define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */ 51#define NFS_MOUNT_INTR 0x0002 /* 1 */ /* now unused, but ABI */
@@ -65,4 +65,9 @@ struct nfs_mount_data {
65#define NFS_MOUNT_UNSHARED 0x8000 /* 5 */ 65#define NFS_MOUNT_UNSHARED 0x8000 /* 5 */
66#define NFS_MOUNT_FLAGMASK 0xFFFF 66#define NFS_MOUNT_FLAGMASK 0xFFFF
67 67
68/* The following are for internal use only */
69#define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000
70#define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000
71#define NFS_MOUNT_NORESVPORT 0x40000
72
68#endif 73#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 8c77c11224d1..a550b528319f 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -36,6 +36,7 @@ struct nfs_fattr {
36 __u32 nlink; 36 __u32 nlink;
37 __u32 uid; 37 __u32 uid;
38 __u32 gid; 38 __u32 gid;
39 dev_t rdev;
39 __u64 size; 40 __u64 size;
40 union { 41 union {
41 struct { 42 struct {
@@ -46,7 +47,6 @@ struct nfs_fattr {
46 __u64 used; 47 __u64 used;
47 } nfs3; 48 } nfs3;
48 } du; 49 } du;
49 dev_t rdev;
50 struct nfs_fsid fsid; 50 struct nfs_fsid fsid;
51 __u64 fileid; 51 __u64 fileid;
52 struct timespec atime; 52 struct timespec atime;
@@ -56,6 +56,7 @@ struct nfs_fattr {
56 __u64 change_attr; /* NFSv4 change attribute */ 56 __u64 change_attr; /* NFSv4 change attribute */
57 __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ 57 __u64 pre_change_attr;/* pre-op NFSv4 change attribute */
58 unsigned long time_start; 58 unsigned long time_start;
59 unsigned long gencount;
59}; 60};
60 61
61#define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */ 62#define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */
@@ -119,13 +120,14 @@ struct nfs_openargs {
119 const struct nfs_fh * fh; 120 const struct nfs_fh * fh;
120 struct nfs_seqid * seqid; 121 struct nfs_seqid * seqid;
121 int open_flags; 122 int open_flags;
123 fmode_t fmode;
122 __u64 clientid; 124 __u64 clientid;
123 __u64 id; 125 __u64 id;
124 union { 126 union {
125 struct iattr * attrs; /* UNCHECKED, GUARDED */ 127 struct iattr * attrs; /* UNCHECKED, GUARDED */
126 nfs4_verifier verifier; /* EXCLUSIVE */ 128 nfs4_verifier verifier; /* EXCLUSIVE */
127 nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ 129 nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
128 int delegation_type; /* CLAIM_PREVIOUS */ 130 fmode_t delegation_type; /* CLAIM_PREVIOUS */
129 } u; 131 } u;
130 const struct qstr * name; 132 const struct qstr * name;
131 const struct nfs_server *server; /* Needed for ID mapping */ 133 const struct nfs_server *server; /* Needed for ID mapping */
@@ -142,7 +144,7 @@ struct nfs_openres {
142 struct nfs_fattr * dir_attr; 144 struct nfs_fattr * dir_attr;
143 struct nfs_seqid * seqid; 145 struct nfs_seqid * seqid;
144 const struct nfs_server *server; 146 const struct nfs_server *server;
145 int delegation_type; 147 fmode_t delegation_type;
146 nfs4_stateid delegation; 148 nfs4_stateid delegation;
147 __u32 do_recall; 149 __u32 do_recall;
148 __u64 maxsize; 150 __u64 maxsize;
@@ -170,7 +172,7 @@ struct nfs_closeargs {
170 struct nfs_fh * fh; 172 struct nfs_fh * fh;
171 nfs4_stateid * stateid; 173 nfs4_stateid * stateid;
172 struct nfs_seqid * seqid; 174 struct nfs_seqid * seqid;
173 int open_flags; 175 fmode_t fmode;
174 const u32 * bitmask; 176 const u32 * bitmask;
175}; 177};
176 178
@@ -672,16 +674,16 @@ struct nfs4_rename_res {
672 struct nfs_fattr * new_fattr; 674 struct nfs_fattr * new_fattr;
673}; 675};
674 676
675#define NFS4_SETCLIENTID_NAMELEN (56) 677#define NFS4_SETCLIENTID_NAMELEN (127)
676struct nfs4_setclientid { 678struct nfs4_setclientid {
677 const nfs4_verifier * sc_verifier; 679 const nfs4_verifier * sc_verifier;
678 unsigned int sc_name_len; 680 unsigned int sc_name_len;
679 char sc_name[NFS4_SETCLIENTID_NAMELEN]; 681 char sc_name[NFS4_SETCLIENTID_NAMELEN + 1];
680 u32 sc_prog; 682 u32 sc_prog;
681 unsigned int sc_netid_len; 683 unsigned int sc_netid_len;
682 char sc_netid[RPCBIND_MAXNETIDLEN]; 684 char sc_netid[RPCBIND_MAXNETIDLEN + 1];
683 unsigned int sc_uaddr_len; 685 unsigned int sc_uaddr_len;
684 char sc_uaddr[RPCBIND_MAXUADDRLEN]; 686 char sc_uaddr[RPCBIND_MAXUADDRLEN + 1];
685 u32 sc_cb_ident; 687 u32 sc_cb_ident;
686}; 688};
687 689
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index d0fe2e378452..128298c0362d 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -124,6 +124,8 @@ struct nfs4_client {
124 nfs4_verifier cl_verifier; /* generated by client */ 124 nfs4_verifier cl_verifier; /* generated by client */
125 time_t cl_time; /* time of last lease renewal */ 125 time_t cl_time; /* time of last lease renewal */
126 __be32 cl_addr; /* client ipaddress */ 126 __be32 cl_addr; /* client ipaddress */
127 u32 cl_flavor; /* setclientid pseudoflavor */
128 char *cl_principal; /* setclientid principal name */
127 struct svc_cred cl_cred; /* setclientid principal */ 129 struct svc_cred cl_cred; /* setclientid principal */
128 clientid_t cl_clientid; /* generated by server */ 130 clientid_t cl_clientid; /* generated by server */
129 nfs4_verifier cl_confirm; /* generated by server */ 131 nfs4_verifier cl_confirm; /* generated by server */
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 9bad65400fba..e86ed59f9ad5 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -3,7 +3,26 @@
3/* 3/*
4 * 802.11 netlink interface public header 4 * 802.11 netlink interface public header
5 * 5 *
6 * Copyright 2006, 2007 Johannes Berg <johannes@sipsolutions.net> 6 * Copyright 2006, 2007, 2008 Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2008 Michael Wu <flamingice@sourmilk.net>
8 * Copyright 2008 Luis Carlos Cobo <luisca@cozybit.com>
9 * Copyright 2008 Michael Buesch <mb@bu3sch.de>
10 * Copyright 2008 Luis R. Rodriguez <lrodriguez@atheros.com>
11 * Copyright 2008 Jouni Malinen <jouni.malinen@atheros.com>
12 * Copyright 2008 Colin McCabe <colin@cozybit.com>
13 *
14 * Permission to use, copy, modify, and/or distribute this software for any
15 * purpose with or without fee is hereby granted, provided that the above
16 * copyright notice and this permission notice appear in all copies.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
19 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
21 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
22 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
23 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
24 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
25 *
7 */ 26 */
8 27
9/** 28/**
@@ -25,8 +44,10 @@
25 * 44 *
26 * @NL80211_CMD_GET_WIPHY: request information about a wiphy or dump request 45 * @NL80211_CMD_GET_WIPHY: request information about a wiphy or dump request
27 * to get a list of all present wiphys. 46 * to get a list of all present wiphys.
28 * @NL80211_CMD_SET_WIPHY: set wiphy name, needs %NL80211_ATTR_WIPHY and 47 * @NL80211_CMD_SET_WIPHY: set wiphy parameters, needs %NL80211_ATTR_WIPHY or
29 * %NL80211_ATTR_WIPHY_NAME. 48 * %NL80211_ATTR_IFINDEX; can be used to set %NL80211_ATTR_WIPHY_NAME,
49 * %NL80211_ATTR_WIPHY_TXQ_PARAMS, %NL80211_ATTR_WIPHY_FREQ, and/or
50 * %NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET.
30 * @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request 51 * @NL80211_CMD_NEW_WIPHY: Newly created wiphy, response to get request
31 * or rename notification. Has attributes %NL80211_ATTR_WIPHY and 52 * or rename notification. Has attributes %NL80211_ATTR_WIPHY and
32 * %NL80211_ATTR_WIPHY_NAME. 53 * %NL80211_ATTR_WIPHY_NAME.
@@ -106,6 +127,12 @@
106 * to the the specified ISO/IEC 3166-1 alpha2 country code. The core will 127 * to the the specified ISO/IEC 3166-1 alpha2 country code. The core will
107 * store this as a valid request and then query userspace for it. 128 * store this as a valid request and then query userspace for it.
108 * 129 *
130 * @NL80211_CMD_GET_MESH_PARAMS: Get mesh networking properties for the
131 * interface identified by %NL80211_ATTR_IFINDEX
132 *
133 * @NL80211_CMD_SET_MESH_PARAMS: Set mesh networking properties for the
134 * interface identified by %NL80211_ATTR_IFINDEX
135 *
109 * @NL80211_CMD_MAX: highest used command number 136 * @NL80211_CMD_MAX: highest used command number
110 * @__NL80211_CMD_AFTER_LAST: internal use 137 * @__NL80211_CMD_AFTER_LAST: internal use
111 */ 138 */
@@ -148,6 +175,9 @@ enum nl80211_commands {
148 NL80211_CMD_SET_REG, 175 NL80211_CMD_SET_REG,
149 NL80211_CMD_REQ_SET_REG, 176 NL80211_CMD_REQ_SET_REG,
150 177
178 NL80211_CMD_GET_MESH_PARAMS,
179 NL80211_CMD_SET_MESH_PARAMS,
180
151 /* add new commands above here */ 181 /* add new commands above here */
152 182
153 /* used to define NL80211_CMD_MAX below */ 183 /* used to define NL80211_CMD_MAX below */
@@ -169,6 +199,15 @@ enum nl80211_commands {
169 * @NL80211_ATTR_WIPHY: index of wiphy to operate on, cf. 199 * @NL80211_ATTR_WIPHY: index of wiphy to operate on, cf.
170 * /sys/class/ieee80211/<phyname>/index 200 * /sys/class/ieee80211/<phyname>/index
171 * @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming) 201 * @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming)
202 * @NL80211_ATTR_WIPHY_TXQ_PARAMS: a nested array of TX queue parameters
203 * @NL80211_ATTR_WIPHY_FREQ: frequency of the selected channel in MHz
204 * @NL80211_ATTR_WIPHY_CHANNEL_TYPE: included with NL80211_ATTR_WIPHY_FREQ
205 * if HT20 or HT40 are allowed (i.e., 802.11n disabled if not included):
206 * NL80211_CHAN_NO_HT = HT not allowed (i.e., same as not including
207 * this attribute)
208 * NL80211_CHAN_HT20 = HT20 only
209 * NL80211_CHAN_HT40MINUS = secondary channel is below the primary channel
210 * NL80211_CHAN_HT40PLUS = secondary channel is above the primary channel
172 * 211 *
173 * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on 212 * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on
174 * @NL80211_ATTR_IFNAME: network interface name 213 * @NL80211_ATTR_IFNAME: network interface name
@@ -234,6 +273,9 @@ enum nl80211_commands {
234 * (u8, 0 or 1) 273 * (u8, 0 or 1)
235 * @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled 274 * @NL80211_ATTR_BSS_SHORT_SLOT_TIME: whether short slot time enabled
236 * (u8, 0 or 1) 275 * (u8, 0 or 1)
276 * @NL80211_ATTR_BSS_BASIC_RATES: basic rates, array of basic
277 * rates in format defined by IEEE 802.11 7.3.2.2 but without the length
278 * restriction (at most %NL80211_MAX_SUPP_RATES).
237 * 279 *
238 * @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from 280 * @NL80211_ATTR_HT_CAPABILITY: HT Capability information element (from
239 * association request when used with NL80211_CMD_NEW_STATION) 281 * association request when used with NL80211_CMD_NEW_STATION)
@@ -296,6 +338,14 @@ enum nl80211_attrs {
296 NL80211_ATTR_REG_ALPHA2, 338 NL80211_ATTR_REG_ALPHA2,
297 NL80211_ATTR_REG_RULES, 339 NL80211_ATTR_REG_RULES,
298 340
341 NL80211_ATTR_MESH_PARAMS,
342
343 NL80211_ATTR_BSS_BASIC_RATES,
344
345 NL80211_ATTR_WIPHY_TXQ_PARAMS,
346 NL80211_ATTR_WIPHY_FREQ,
347 NL80211_ATTR_WIPHY_CHANNEL_TYPE,
348
299 /* add attributes here, update the policy in nl80211.c */ 349 /* add attributes here, update the policy in nl80211.c */
300 350
301 __NL80211_ATTR_AFTER_LAST, 351 __NL80211_ATTR_AFTER_LAST,
@@ -307,6 +357,10 @@ enum nl80211_attrs {
307 * here 357 * here
308 */ 358 */
309#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY 359#define NL80211_ATTR_HT_CAPABILITY NL80211_ATTR_HT_CAPABILITY
360#define NL80211_ATTR_BSS_BASIC_RATES NL80211_ATTR_BSS_BASIC_RATES
361#define NL80211_ATTR_WIPHY_TXQ_PARAMS NL80211_ATTR_WIPHY_TXQ_PARAMS
362#define NL80211_ATTR_WIPHY_FREQ NL80211_ATTR_WIPHY_FREQ
363#define NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET NL80211_ATTR_WIPHY_SEC_CHAN_OFFSET
310 364
311#define NL80211_MAX_SUPP_RATES 32 365#define NL80211_MAX_SUPP_RATES 32
312#define NL80211_MAX_SUPP_REG_RULES 32 366#define NL80211_MAX_SUPP_REG_RULES 32
@@ -371,6 +425,32 @@ enum nl80211_sta_flags {
371}; 425};
372 426
373/** 427/**
428 * enum nl80211_rate_info - bitrate information
429 *
430 * These attribute types are used with %NL80211_STA_INFO_TXRATE
431 * when getting information about the bitrate of a station.
432 *
433 * @__NL80211_RATE_INFO_INVALID: attribute number 0 is reserved
434 * @NL80211_RATE_INFO_BITRATE: total bitrate (u16, 100kbit/s)
435 * @NL80211_RATE_INFO_MCS: mcs index for 802.11n (u8)
436 * @NL80211_RATE_INFO_40_MHZ_WIDTH: 40 Mhz dualchannel bitrate
437 * @NL80211_RATE_INFO_SHORT_GI: 400ns guard interval
438 * @NL80211_RATE_INFO_MAX: highest rate_info number currently defined
439 * @__NL80211_RATE_INFO_AFTER_LAST: internal use
440 */
441enum nl80211_rate_info {
442 __NL80211_RATE_INFO_INVALID,
443 NL80211_RATE_INFO_BITRATE,
444 NL80211_RATE_INFO_MCS,
445 NL80211_RATE_INFO_40_MHZ_WIDTH,
446 NL80211_RATE_INFO_SHORT_GI,
447
448 /* keep last */
449 __NL80211_RATE_INFO_AFTER_LAST,
450 NL80211_RATE_INFO_MAX = __NL80211_RATE_INFO_AFTER_LAST - 1
451};
452
453/**
374 * enum nl80211_sta_info - station information 454 * enum nl80211_sta_info - station information
375 * 455 *
376 * These attribute types are used with %NL80211_ATTR_STA_INFO 456 * These attribute types are used with %NL80211_ATTR_STA_INFO
@@ -382,6 +462,9 @@ enum nl80211_sta_flags {
382 * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station) 462 * @NL80211_STA_INFO_TX_BYTES: total transmitted bytes (u32, to this station)
383 * @__NL80211_STA_INFO_AFTER_LAST: internal 463 * @__NL80211_STA_INFO_AFTER_LAST: internal
384 * @NL80211_STA_INFO_MAX: highest possible station info attribute 464 * @NL80211_STA_INFO_MAX: highest possible station info attribute
465 * @NL80211_STA_INFO_SIGNAL: signal strength of last received PPDU (u8, dBm)
466 * @NL80211_STA_INFO_TX_BITRATE: current unicast tx rate, nested attribute
467 * containing info as possible, see &enum nl80211_sta_info_txrate.
385 */ 468 */
386enum nl80211_sta_info { 469enum nl80211_sta_info {
387 __NL80211_STA_INFO_INVALID, 470 __NL80211_STA_INFO_INVALID,
@@ -391,6 +474,8 @@ enum nl80211_sta_info {
391 NL80211_STA_INFO_LLID, 474 NL80211_STA_INFO_LLID,
392 NL80211_STA_INFO_PLID, 475 NL80211_STA_INFO_PLID,
393 NL80211_STA_INFO_PLINK_STATE, 476 NL80211_STA_INFO_PLINK_STATE,
477 NL80211_STA_INFO_SIGNAL,
478 NL80211_STA_INFO_TX_BITRATE,
394 479
395 /* keep last */ 480 /* keep last */
396 __NL80211_STA_INFO_AFTER_LAST, 481 __NL80211_STA_INFO_AFTER_LAST,
@@ -452,17 +537,29 @@ enum nl80211_mpath_info {
452 * an array of nested frequency attributes 537 * an array of nested frequency attributes
453 * @NL80211_BAND_ATTR_RATES: supported bitrates in this band, 538 * @NL80211_BAND_ATTR_RATES: supported bitrates in this band,
454 * an array of nested bitrate attributes 539 * an array of nested bitrate attributes
540 * @NL80211_BAND_ATTR_HT_MCS_SET: 16-byte attribute containing the MCS set as
541 * defined in 802.11n
542 * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE
543 * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n
544 * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n
455 */ 545 */
456enum nl80211_band_attr { 546enum nl80211_band_attr {
457 __NL80211_BAND_ATTR_INVALID, 547 __NL80211_BAND_ATTR_INVALID,
458 NL80211_BAND_ATTR_FREQS, 548 NL80211_BAND_ATTR_FREQS,
459 NL80211_BAND_ATTR_RATES, 549 NL80211_BAND_ATTR_RATES,
460 550
551 NL80211_BAND_ATTR_HT_MCS_SET,
552 NL80211_BAND_ATTR_HT_CAPA,
553 NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
554 NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
555
461 /* keep last */ 556 /* keep last */
462 __NL80211_BAND_ATTR_AFTER_LAST, 557 __NL80211_BAND_ATTR_AFTER_LAST,
463 NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1 558 NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1
464}; 559};
465 560
561#define NL80211_BAND_ATTR_HT_CAPA NL80211_BAND_ATTR_HT_CAPA
562
466/** 563/**
467 * enum nl80211_frequency_attr - frequency attributes 564 * enum nl80211_frequency_attr - frequency attributes
468 * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz 565 * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz
@@ -474,6 +571,8 @@ enum nl80211_band_attr {
474 * on this channel in current regulatory domain. 571 * on this channel in current regulatory domain.
475 * @NL80211_FREQUENCY_ATTR_RADAR: Radar detection is mandatory 572 * @NL80211_FREQUENCY_ATTR_RADAR: Radar detection is mandatory
476 * on this channel in current regulatory domain. 573 * on this channel in current regulatory domain.
574 * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm
575 * (100 * dBm).
477 */ 576 */
478enum nl80211_frequency_attr { 577enum nl80211_frequency_attr {
479 __NL80211_FREQUENCY_ATTR_INVALID, 578 __NL80211_FREQUENCY_ATTR_INVALID,
@@ -482,12 +581,15 @@ enum nl80211_frequency_attr {
482 NL80211_FREQUENCY_ATTR_PASSIVE_SCAN, 581 NL80211_FREQUENCY_ATTR_PASSIVE_SCAN,
483 NL80211_FREQUENCY_ATTR_NO_IBSS, 582 NL80211_FREQUENCY_ATTR_NO_IBSS,
484 NL80211_FREQUENCY_ATTR_RADAR, 583 NL80211_FREQUENCY_ATTR_RADAR,
584 NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
485 585
486 /* keep last */ 586 /* keep last */
487 __NL80211_FREQUENCY_ATTR_AFTER_LAST, 587 __NL80211_FREQUENCY_ATTR_AFTER_LAST,
488 NL80211_FREQUENCY_ATTR_MAX = __NL80211_FREQUENCY_ATTR_AFTER_LAST - 1 588 NL80211_FREQUENCY_ATTR_MAX = __NL80211_FREQUENCY_ATTR_AFTER_LAST - 1
489}; 589};
490 590
591#define NL80211_FREQUENCY_ATTR_MAX_TX_POWER NL80211_FREQUENCY_ATTR_MAX_TX_POWER
592
491/** 593/**
492 * enum nl80211_bitrate_attr - bitrate attributes 594 * enum nl80211_bitrate_attr - bitrate attributes
493 * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps 595 * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps
@@ -594,4 +696,119 @@ enum nl80211_mntr_flags {
594 NL80211_MNTR_FLAG_MAX = __NL80211_MNTR_FLAG_AFTER_LAST - 1 696 NL80211_MNTR_FLAG_MAX = __NL80211_MNTR_FLAG_AFTER_LAST - 1
595}; 697};
596 698
699/**
700 * enum nl80211_meshconf_params - mesh configuration parameters
701 *
702 * Mesh configuration parameters
703 *
704 * @__NL80211_MESHCONF_INVALID: internal use
705 *
706 * @NL80211_MESHCONF_RETRY_TIMEOUT: specifies the initial retry timeout in
707 * millisecond units, used by the Peer Link Open message
708 *
709 * @NL80211_MESHCONF_CONFIRM_TIMEOUT: specifies the inital confirm timeout, in
710 * millisecond units, used by the peer link management to close a peer link
711 *
712 * @NL80211_MESHCONF_HOLDING_TIMEOUT: specifies the holding timeout, in
713 * millisecond units
714 *
715 * @NL80211_MESHCONF_MAX_PEER_LINKS: maximum number of peer links allowed
716 * on this mesh interface
717 *
718 * @NL80211_MESHCONF_MAX_RETRIES: specifies the maximum number of peer link
719 * open retries that can be sent to establish a new peer link instance in a
720 * mesh
721 *
722 * @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh
723 * point.
724 *
725 * @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically
726 * open peer links when we detect compatible mesh peers.
727 *
728 * @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames
729 * containing a PREQ that an MP can send to a particular destination (path
730 * target)
731 *
732 * @NL80211_MESHCONF_PATH_REFRESH_TIME: how frequently to refresh mesh paths
733 * (in milliseconds)
734 *
735 * @NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT: minimum length of time to wait
736 * until giving up on a path discovery (in milliseconds)
737 *
738 * @NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT: The time (in TUs) for which mesh
739 * points receiving a PREQ shall consider the forwarding information from the
740 * root to be valid. (TU = time unit)
741 *
742 * @NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL: The minimum interval of time (in
743 * TUs) during which an MP can send only one action frame containing a PREQ
744 * reference element
745 *
746 * @NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME: The interval of time (in TUs)
747 * that it takes for an HWMP information element to propagate across the mesh
748 *
749 * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
750 *
751 * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
752 */
753enum nl80211_meshconf_params {
754 __NL80211_MESHCONF_INVALID,
755 NL80211_MESHCONF_RETRY_TIMEOUT,
756 NL80211_MESHCONF_CONFIRM_TIMEOUT,
757 NL80211_MESHCONF_HOLDING_TIMEOUT,
758 NL80211_MESHCONF_MAX_PEER_LINKS,
759 NL80211_MESHCONF_MAX_RETRIES,
760 NL80211_MESHCONF_TTL,
761 NL80211_MESHCONF_AUTO_OPEN_PLINKS,
762 NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
763 NL80211_MESHCONF_PATH_REFRESH_TIME,
764 NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
765 NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
766 NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
767 NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
768
769 /* keep last */
770 __NL80211_MESHCONF_ATTR_AFTER_LAST,
771 NL80211_MESHCONF_ATTR_MAX = __NL80211_MESHCONF_ATTR_AFTER_LAST - 1
772};
773
774/**
775 * enum nl80211_txq_attr - TX queue parameter attributes
776 * @__NL80211_TXQ_ATTR_INVALID: Attribute number 0 is reserved
777 * @NL80211_TXQ_ATTR_QUEUE: TX queue identifier (NL80211_TXQ_Q_*)
778 * @NL80211_TXQ_ATTR_TXOP: Maximum burst time in units of 32 usecs, 0 meaning
779 * disabled
780 * @NL80211_TXQ_ATTR_CWMIN: Minimum contention window [a value of the form
781 * 2^n-1 in the range 1..32767]
782 * @NL80211_TXQ_ATTR_CWMAX: Maximum contention window [a value of the form
783 * 2^n-1 in the range 1..32767]
784 * @NL80211_TXQ_ATTR_AIFS: Arbitration interframe space [0..255]
785 * @__NL80211_TXQ_ATTR_AFTER_LAST: Internal
786 * @NL80211_TXQ_ATTR_MAX: Maximum TXQ attribute number
787 */
788enum nl80211_txq_attr {
789 __NL80211_TXQ_ATTR_INVALID,
790 NL80211_TXQ_ATTR_QUEUE,
791 NL80211_TXQ_ATTR_TXOP,
792 NL80211_TXQ_ATTR_CWMIN,
793 NL80211_TXQ_ATTR_CWMAX,
794 NL80211_TXQ_ATTR_AIFS,
795
796 /* keep last */
797 __NL80211_TXQ_ATTR_AFTER_LAST,
798 NL80211_TXQ_ATTR_MAX = __NL80211_TXQ_ATTR_AFTER_LAST - 1
799};
800
801enum nl80211_txq_q {
802 NL80211_TXQ_Q_VO,
803 NL80211_TXQ_Q_VI,
804 NL80211_TXQ_Q_BE,
805 NL80211_TXQ_Q_BK
806};
807
808enum nl80211_channel_type {
809 NL80211_CHAN_NO_HT,
810 NL80211_CHAN_HT20,
811 NL80211_CHAN_HT40MINUS,
812 NL80211_CHAN_HT40PLUS
813};
597#endif /* __LINUX_NL80211_H */ 814#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index c8a768e59640..afad7dec1b36 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -27,7 +27,6 @@ struct nsproxy {
27 struct ipc_namespace *ipc_ns; 27 struct ipc_namespace *ipc_ns;
28 struct mnt_namespace *mnt_ns; 28 struct mnt_namespace *mnt_ns;
29 struct pid_namespace *pid_ns; 29 struct pid_namespace *pid_ns;
30 struct user_namespace *user_ns;
31 struct net *net_ns; 30 struct net *net_ns;
32}; 31};
33extern struct nsproxy init_nsproxy; 32extern struct nsproxy init_nsproxy;
diff --git a/include/linux/of.h b/include/linux/of.h
index 79886ade070f..6a7efa242f5e 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -57,6 +57,12 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
57 for (child = of_get_next_child(parent, NULL); child != NULL; \ 57 for (child = of_get_next_child(parent, NULL); child != NULL; \
58 child = of_get_next_child(parent, child)) 58 child = of_get_next_child(parent, child))
59 59
60extern struct device_node *of_find_node_with_property(
61 struct device_node *from, const char *prop_name);
62#define for_each_node_with_property(dn, prop_name) \
63 for (dn = of_find_node_with_property(NULL, prop_name); dn; \
64 dn = of_find_node_with_property(dn, prop_name))
65
60extern struct property *of_find_property(const struct device_node *np, 66extern struct property *of_find_property(const struct device_node *np,
61 const char *name, 67 const char *name,
62 int *lenp); 68 int *lenp);
@@ -71,5 +77,8 @@ extern int of_n_size_cells(struct device_node *np);
71extern const struct of_device_id *of_match_node( 77extern const struct of_device_id *of_match_node(
72 const struct of_device_id *matches, const struct device_node *node); 78 const struct of_device_id *matches, const struct device_node *node);
73extern int of_modalias_node(struct device_node *node, char *modalias, int len); 79extern int of_modalias_node(struct device_node *node, char *modalias, int len);
80extern int of_parse_phandles_with_args(struct device_node *np,
81 const char *list_name, const char *cells_name, int index,
82 struct device_node **out_node, const void **out_args);
74 83
75#endif /* _LINUX_OF_H */ 84#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 67db101d0eb8..fc2472c3c254 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -14,9 +14,22 @@
14#ifndef __LINUX_OF_GPIO_H 14#ifndef __LINUX_OF_GPIO_H
15#define __LINUX_OF_GPIO_H 15#define __LINUX_OF_GPIO_H
16 16
17#include <linux/compiler.h>
18#include <linux/kernel.h>
17#include <linux/errno.h> 19#include <linux/errno.h>
18#include <linux/gpio.h> 20#include <linux/gpio.h>
19 21
22struct device_node;
23
24/*
25 * This is Linux-specific flags. By default controllers' and Linux' mapping
26 * match, but GPIO controllers are free to translate their own flags to
27 * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended.
28 */
29enum of_gpio_flags {
30 OF_GPIO_ACTIVE_LOW = 0x1,
31};
32
20#ifdef CONFIG_OF_GPIO 33#ifdef CONFIG_OF_GPIO
21 34
22/* 35/*
@@ -26,7 +39,7 @@ struct of_gpio_chip {
26 struct gpio_chip gc; 39 struct gpio_chip gc;
27 int gpio_cells; 40 int gpio_cells;
28 int (*xlate)(struct of_gpio_chip *of_gc, struct device_node *np, 41 int (*xlate)(struct of_gpio_chip *of_gc, struct device_node *np,
29 const void *gpio_spec); 42 const void *gpio_spec, enum of_gpio_flags *flags);
30}; 43};
31 44
32static inline struct of_gpio_chip *to_of_gpio_chip(struct gpio_chip *gc) 45static inline struct of_gpio_chip *to_of_gpio_chip(struct gpio_chip *gc)
@@ -50,20 +63,43 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
50 return container_of(of_gc, struct of_mm_gpio_chip, of_gc); 63 return container_of(of_gc, struct of_mm_gpio_chip, of_gc);
51} 64}
52 65
53extern int of_get_gpio(struct device_node *np, int index); 66extern int of_get_gpio_flags(struct device_node *np, int index,
67 enum of_gpio_flags *flags);
68extern unsigned int of_gpio_count(struct device_node *np);
69
54extern int of_mm_gpiochip_add(struct device_node *np, 70extern int of_mm_gpiochip_add(struct device_node *np,
55 struct of_mm_gpio_chip *mm_gc); 71 struct of_mm_gpio_chip *mm_gc);
56extern int of_gpio_simple_xlate(struct of_gpio_chip *of_gc, 72extern int of_gpio_simple_xlate(struct of_gpio_chip *of_gc,
57 struct device_node *np, 73 struct device_node *np,
58 const void *gpio_spec); 74 const void *gpio_spec,
75 enum of_gpio_flags *flags);
59#else 76#else
60 77
61/* Drivers may not strictly depend on the GPIO support, so let them link. */ 78/* Drivers may not strictly depend on the GPIO support, so let them link. */
62static inline int of_get_gpio(struct device_node *np, int index) 79static inline int of_get_gpio_flags(struct device_node *np, int index,
80 enum of_gpio_flags *flags)
63{ 81{
64 return -ENOSYS; 82 return -ENOSYS;
65} 83}
66 84
85static inline unsigned int of_gpio_count(struct device_node *np)
86{
87 return 0;
88}
89
67#endif /* CONFIG_OF_GPIO */ 90#endif /* CONFIG_OF_GPIO */
68 91
92/**
93 * of_get_gpio - Get a GPIO number to use with GPIO API
94 * @np: device node to get GPIO from
95 * @index: index of the GPIO
96 *
97 * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
98 * value on the error condition.
99 */
100static inline int of_get_gpio(struct device_node *np, int index)
101{
102 return of_get_gpio_flags(np, index, NULL);
103}
104
69#endif /* __LINUX_OF_GPIO_H */ 105#endif /* __LINUX_OF_GPIO_H */
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index a8efcfeea732..3d327b67d7e2 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -26,8 +26,7 @@ extern struct bus_type of_platform_bus_type;
26 26
27/* 27/*
28 * An of_platform_driver driver is attached to a basic of_device on 28 * An of_platform_driver driver is attached to a basic of_device on
29 * the "platform bus" (of_platform_bus_type) (or ISA, EBUS and SBUS 29 * the "platform bus" (of_platform_bus_type).
30 * busses on sparc).
31 */ 30 */
32struct of_platform_driver 31struct of_platform_driver
33{ 32{
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index bcb8f725427c..1ce9fe572e51 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -86,15 +86,7 @@ int oprofile_arch_init(struct oprofile_operations * ops);
86void oprofile_arch_exit(void); 86void oprofile_arch_exit(void);
87 87
88/** 88/**
89 * Add data to the event buffer. 89 * Add a sample. This may be called from any context.
90 * The data passed is free-form, but typically consists of
91 * file offsets, dcookies, context information, and ESCAPE codes.
92 */
93void add_event_entry(unsigned long data);
94
95/**
96 * Add a sample. This may be called from any context. Pass
97 * smp_processor_id() as cpu.
98 */ 90 */
99void oprofile_add_sample(struct pt_regs * const regs, unsigned long event); 91void oprofile_add_sample(struct pt_regs * const regs, unsigned long event);
100 92
@@ -162,5 +154,14 @@ int oprofilefs_ulong_from_user(unsigned long * val, char const __user * buf, siz
162 154
163/** lock for read/write safety */ 155/** lock for read/write safety */
164extern spinlock_t oprofilefs_lock; 156extern spinlock_t oprofilefs_lock;
157
158/**
159 * Add the contents of a circular buffer to the event buffer.
160 */
161void oprofile_put_buff(unsigned long *buf, unsigned int start,
162 unsigned int stop, unsigned int max);
163
164unsigned long oprofile_get_cpu_buffer_size(void);
165void oprofile_cpu_buffer_inc_smpl_lost(void);
165 166
166#endif /* OPROFILE_H */ 167#endif /* OPROFILE_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c74d3e875314..b12f93a3c345 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -93,6 +93,11 @@ enum pageflags {
93 PG_mappedtodisk, /* Has blocks allocated on-disk */ 93 PG_mappedtodisk, /* Has blocks allocated on-disk */
94 PG_reclaim, /* To be reclaimed asap */ 94 PG_reclaim, /* To be reclaimed asap */
95 PG_buddy, /* Page is free, on buddy lists */ 95 PG_buddy, /* Page is free, on buddy lists */
96 PG_swapbacked, /* Page is backed by RAM/swap */
97#ifdef CONFIG_UNEVICTABLE_LRU
98 PG_unevictable, /* Page is "unevictable" */
99 PG_mlocked, /* Page is vma mlocked */
100#endif
96#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 101#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
97 PG_uncached, /* Page has been mapped as uncached */ 102 PG_uncached, /* Page has been mapped as uncached */
98#endif 103#endif
@@ -161,6 +166,18 @@ static inline int Page##uname(struct page *page) \
161#define TESTSCFLAG(uname, lname) \ 166#define TESTSCFLAG(uname, lname) \
162 TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname) 167 TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
163 168
169#define SETPAGEFLAG_NOOP(uname) \
170static inline void SetPage##uname(struct page *page) { }
171
172#define CLEARPAGEFLAG_NOOP(uname) \
173static inline void ClearPage##uname(struct page *page) { }
174
175#define __CLEARPAGEFLAG_NOOP(uname) \
176static inline void __ClearPage##uname(struct page *page) { }
177
178#define TESTCLEARFLAG_FALSE(uname) \
179static inline int TestClearPage##uname(struct page *page) { return 0; }
180
164struct page; /* forward declaration */ 181struct page; /* forward declaration */
165 182
166TESTPAGEFLAG(Locked, locked) 183TESTPAGEFLAG(Locked, locked)
@@ -169,6 +186,7 @@ PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
169PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) 186PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
170PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru) 187PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
171PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active) 188PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
189 TESTCLEARFLAG(Active, active)
172__PAGEFLAG(Slab, slab) 190__PAGEFLAG(Slab, slab)
173PAGEFLAG(Checked, checked) /* Used by some filesystems */ 191PAGEFLAG(Checked, checked) /* Used by some filesystems */
174PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ 192PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
@@ -176,6 +194,7 @@ PAGEFLAG(SavePinned, savepinned); /* Xen */
176PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 194PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
177PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) 195PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
178 __SETPAGEFLAG(Private, private) 196 __SETPAGEFLAG(Private, private)
197PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
179 198
180__PAGEFLAG(SlobPage, slob_page) 199__PAGEFLAG(SlobPage, slob_page)
181__PAGEFLAG(SlobFree, slob_free) 200__PAGEFLAG(SlobFree, slob_free)
@@ -211,6 +230,25 @@ PAGEFLAG(SwapCache, swapcache)
211PAGEFLAG_FALSE(SwapCache) 230PAGEFLAG_FALSE(SwapCache)
212#endif 231#endif
213 232
233#ifdef CONFIG_UNEVICTABLE_LRU
234PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
235 TESTCLEARFLAG(Unevictable, unevictable)
236
237#define MLOCK_PAGES 1
238PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
239 TESTSCFLAG(Mlocked, mlocked)
240
241#else
242
243#define MLOCK_PAGES 0
244PAGEFLAG_FALSE(Mlocked)
245 SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
246
247PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
248 SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
249 __CLEARPAGEFLAG_NOOP(Unevictable)
250#endif
251
214#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 252#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
215PAGEFLAG(Uncached, uncached) 253PAGEFLAG(Uncached, uncached)
216#else 254#else
@@ -326,15 +364,25 @@ static inline void __ClearPageTail(struct page *page)
326 364
327#endif /* !PAGEFLAGS_EXTENDED */ 365#endif /* !PAGEFLAGS_EXTENDED */
328 366
367#ifdef CONFIG_UNEVICTABLE_LRU
368#define __PG_UNEVICTABLE (1 << PG_unevictable)
369#define __PG_MLOCKED (1 << PG_mlocked)
370#else
371#define __PG_UNEVICTABLE 0
372#define __PG_MLOCKED 0
373#endif
374
329#define PAGE_FLAGS (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ 375#define PAGE_FLAGS (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \
330 1 << PG_buddy | 1 << PG_writeback | \ 376 1 << PG_buddy | 1 << PG_writeback | \
331 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active) 377 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
378 __PG_UNEVICTABLE | __PG_MLOCKED)
332 379
333/* 380/*
334 * Flags checked in bad_page(). Pages on the free list should not have 381 * Flags checked in bad_page(). Pages on the free list should not have
335 * these flags set. It they are, there is a problem. 382 * these flags set. It they are, there is a problem.
336 */ 383 */
337#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | 1 << PG_reclaim | 1 << PG_dirty) 384#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | \
385 1 << PG_reclaim | 1 << PG_dirty | 1 << PG_swapbacked)
338 386
339/* 387/*
340 * Flags checked when a page is freed. Pages being freed should not have 388 * Flags checked when a page is freed. Pages being freed should not have
@@ -347,7 +395,8 @@ static inline void __ClearPageTail(struct page *page)
347 * Pages being prepped should not have these flags set. It they are, there 395 * Pages being prepped should not have these flags set. It they are, there
348 * is a problem. 396 * is a problem.
349 */ 397 */
350#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | 1 << PG_reserved | 1 << PG_dirty) 398#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | \
399 1 << PG_reserved | 1 << PG_dirty | 1 << PG_swapbacked)
351 400
352#endif /* !__GENERATING_BOUNDS_H */ 401#endif /* !__GENERATING_BOUNDS_H */
353#endif /* PAGE_FLAGS_H */ 402#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
new file mode 100644
index 000000000000..1e6d34bfa094
--- /dev/null
+++ b/include/linux/page_cgroup.h
@@ -0,0 +1,108 @@
1#ifndef __LINUX_PAGE_CGROUP_H
2#define __LINUX_PAGE_CGROUP_H
3
4#ifdef CONFIG_CGROUP_MEM_RES_CTLR
5#include <linux/bit_spinlock.h>
6/*
7 * Page Cgroup can be considered as an extended mem_map.
8 * A page_cgroup page is associated with every page descriptor. The
9 * page_cgroup helps us identify information about the cgroup
10 * All page cgroups are allocated at boot or memory hotplug event,
11 * then the page cgroup for pfn always exists.
12 */
13struct page_cgroup {
14 unsigned long flags;
15 struct mem_cgroup *mem_cgroup;
16 struct page *page;
17 struct list_head lru; /* per cgroup LRU list */
18};
19
20void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
21void __init page_cgroup_init(void);
22struct page_cgroup *lookup_page_cgroup(struct page *page);
23
24enum {
25 /* flags for mem_cgroup */
26 PCG_LOCK, /* page cgroup is locked */
27 PCG_CACHE, /* charged as cache */
28 PCG_USED, /* this object is in use. */
29 /* flags for LRU placement */
30 PCG_ACTIVE, /* page is active in this cgroup */
31 PCG_FILE, /* page is file system backed */
32 PCG_UNEVICTABLE, /* page is unevictableable */
33};
34
35#define TESTPCGFLAG(uname, lname) \
36static inline int PageCgroup##uname(struct page_cgroup *pc) \
37 { return test_bit(PCG_##lname, &pc->flags); }
38
39#define SETPCGFLAG(uname, lname) \
40static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
41 { set_bit(PCG_##lname, &pc->flags); }
42
43#define CLEARPCGFLAG(uname, lname) \
44static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
45 { clear_bit(PCG_##lname, &pc->flags); }
46
47/* Cache flag is set only once (at allocation) */
48TESTPCGFLAG(Cache, CACHE)
49
50TESTPCGFLAG(Used, USED)
51CLEARPCGFLAG(Used, USED)
52
53/* LRU management flags (from global-lru definition) */
54TESTPCGFLAG(File, FILE)
55SETPCGFLAG(File, FILE)
56CLEARPCGFLAG(File, FILE)
57
58TESTPCGFLAG(Active, ACTIVE)
59SETPCGFLAG(Active, ACTIVE)
60CLEARPCGFLAG(Active, ACTIVE)
61
62TESTPCGFLAG(Unevictable, UNEVICTABLE)
63SETPCGFLAG(Unevictable, UNEVICTABLE)
64CLEARPCGFLAG(Unevictable, UNEVICTABLE)
65
66static inline int page_cgroup_nid(struct page_cgroup *pc)
67{
68 return page_to_nid(pc->page);
69}
70
71static inline enum zone_type page_cgroup_zid(struct page_cgroup *pc)
72{
73 return page_zonenum(pc->page);
74}
75
76static inline void lock_page_cgroup(struct page_cgroup *pc)
77{
78 bit_spin_lock(PCG_LOCK, &pc->flags);
79}
80
81static inline int trylock_page_cgroup(struct page_cgroup *pc)
82{
83 return bit_spin_trylock(PCG_LOCK, &pc->flags);
84}
85
86static inline void unlock_page_cgroup(struct page_cgroup *pc)
87{
88 bit_spin_unlock(PCG_LOCK, &pc->flags);
89}
90
91#else /* CONFIG_CGROUP_MEM_RES_CTLR */
92struct page_cgroup;
93
94static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
95{
96}
97
98static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
99{
100 return NULL;
101}
102
103static inline void page_cgroup_init(void)
104{
105}
106
107#endif
108#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 5da31c12101c..709742be02f0 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -32,6 +32,34 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
32 } 32 }
33} 33}
34 34
35#ifdef CONFIG_UNEVICTABLE_LRU
36#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */
37
38static inline void mapping_set_unevictable(struct address_space *mapping)
39{
40 set_bit(AS_UNEVICTABLE, &mapping->flags);
41}
42
43static inline void mapping_clear_unevictable(struct address_space *mapping)
44{
45 clear_bit(AS_UNEVICTABLE, &mapping->flags);
46}
47
48static inline int mapping_unevictable(struct address_space *mapping)
49{
50 if (likely(mapping))
51 return test_bit(AS_UNEVICTABLE, &mapping->flags);
52 return !!mapping;
53}
54#else
55static inline void mapping_set_unevictable(struct address_space *mapping) { }
56static inline void mapping_clear_unevictable(struct address_space *mapping) { }
57static inline int mapping_unevictable(struct address_space *mapping)
58{
59 return 0;
60}
61#endif
62
35static inline gfp_t mapping_gfp_mask(struct address_space * mapping) 63static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
36{ 64{
37 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; 65 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
@@ -271,19 +299,19 @@ extern int __lock_page_killable(struct page *page);
271extern void __lock_page_nosync(struct page *page); 299extern void __lock_page_nosync(struct page *page);
272extern void unlock_page(struct page *page); 300extern void unlock_page(struct page *page);
273 301
274static inline void set_page_locked(struct page *page) 302static inline void __set_page_locked(struct page *page)
275{ 303{
276 set_bit(PG_locked, &page->flags); 304 __set_bit(PG_locked, &page->flags);
277} 305}
278 306
279static inline void clear_page_locked(struct page *page) 307static inline void __clear_page_locked(struct page *page)
280{ 308{
281 clear_bit(PG_locked, &page->flags); 309 __clear_bit(PG_locked, &page->flags);
282} 310}
283 311
284static inline int trylock_page(struct page *page) 312static inline int trylock_page(struct page *page)
285{ 313{
286 return !test_and_set_bit(PG_locked, &page->flags); 314 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
287} 315}
288 316
289/* 317/*
@@ -410,17 +438,17 @@ extern void __remove_from_page_cache(struct page *page);
410 438
411/* 439/*
412 * Like add_to_page_cache_locked, but used to add newly allocated pages: 440 * Like add_to_page_cache_locked, but used to add newly allocated pages:
413 * the page is new, so we can just run set_page_locked() against it. 441 * the page is new, so we can just run __set_page_locked() against it.
414 */ 442 */
415static inline int add_to_page_cache(struct page *page, 443static inline int add_to_page_cache(struct page *page,
416 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) 444 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
417{ 445{
418 int error; 446 int error;
419 447
420 set_page_locked(page); 448 __set_page_locked(page);
421 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); 449 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
422 if (unlikely(error)) 450 if (unlikely(error))
423 clear_page_locked(page); 451 __clear_page_locked(page);
424 return error; 452 return error;
425} 453}
426 454
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 8eb7fa76c1d0..e90a2cb02915 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -23,9 +23,9 @@ struct pagevec {
23void __pagevec_release(struct pagevec *pvec); 23void __pagevec_release(struct pagevec *pvec);
24void __pagevec_release_nonlru(struct pagevec *pvec); 24void __pagevec_release_nonlru(struct pagevec *pvec);
25void __pagevec_free(struct pagevec *pvec); 25void __pagevec_free(struct pagevec *pvec);
26void __pagevec_lru_add(struct pagevec *pvec); 26void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
27void __pagevec_lru_add_active(struct pagevec *pvec);
28void pagevec_strip(struct pagevec *pvec); 27void pagevec_strip(struct pagevec *pvec);
28void pagevec_swap_free(struct pagevec *pvec);
29unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 29unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
30 pgoff_t start, unsigned nr_pages); 30 pgoff_t start, unsigned nr_pages);
31unsigned pagevec_lookup_tag(struct pagevec *pvec, 31unsigned pagevec_lookup_tag(struct pagevec *pvec,
@@ -81,10 +81,36 @@ static inline void pagevec_free(struct pagevec *pvec)
81 __pagevec_free(pvec); 81 __pagevec_free(pvec);
82} 82}
83 83
84static inline void pagevec_lru_add(struct pagevec *pvec) 84static inline void __pagevec_lru_add_anon(struct pagevec *pvec)
85{
86 ____pagevec_lru_add(pvec, LRU_INACTIVE_ANON);
87}
88
89static inline void __pagevec_lru_add_active_anon(struct pagevec *pvec)
90{
91 ____pagevec_lru_add(pvec, LRU_ACTIVE_ANON);
92}
93
94static inline void __pagevec_lru_add_file(struct pagevec *pvec)
95{
96 ____pagevec_lru_add(pvec, LRU_INACTIVE_FILE);
97}
98
99static inline void __pagevec_lru_add_active_file(struct pagevec *pvec)
100{
101 ____pagevec_lru_add(pvec, LRU_ACTIVE_FILE);
102}
103
104static inline void pagevec_lru_add_file(struct pagevec *pvec)
105{
106 if (pagevec_count(pvec))
107 __pagevec_lru_add_file(pvec);
108}
109
110static inline void pagevec_lru_add_anon(struct pagevec *pvec)
85{ 111{
86 if (pagevec_count(pvec)) 112 if (pagevec_count(pvec))
87 __pagevec_lru_add(pvec); 113 __pagevec_lru_add_anon(pvec);
88} 114}
89 115
90#endif /* _LINUX_PAGEVEC_H */ 116#endif /* _LINUX_PAGEVEC_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 6a0d7cdb5774..e1f83c5065c5 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -1,5 +1,3 @@
1/* $Id: parport.h,v 1.1 1998/05/17 10:57:52 andrea Exp andrea $ */
2
3/* 1/*
4 * Any part of this program may be used in documents licensed under 2 * Any part of this program may be used in documents licensed under
5 * the GNU Free Documentation License, Version 1.1 or any later version 3 * the GNU Free Documentation License, Version 1.1 or any later version
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 98dc6243a706..03b0b8c3c81b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -51,6 +51,7 @@
51#include <linux/kobject.h> 51#include <linux/kobject.h>
52#include <asm/atomic.h> 52#include <asm/atomic.h>
53#include <linux/device.h> 53#include <linux/device.h>
54#include <linux/io.h>
54 55
55/* Include the ID list */ 56/* Include the ID list */
56#include <linux/pci_ids.h> 57#include <linux/pci_ids.h>
@@ -64,6 +65,11 @@ struct pci_slot {
64 struct kobject kobj; 65 struct kobject kobj;
65}; 66};
66 67
68static inline const char *pci_slot_name(const struct pci_slot *slot)
69{
70 return kobject_name(&slot->kobj);
71}
72
67/* File state for mmap()s on /proc/bus/pci/X/Y */ 73/* File state for mmap()s on /proc/bus/pci/X/Y */
68enum pci_mmap_state { 74enum pci_mmap_state {
69 pci_mmap_io, 75 pci_mmap_io,
@@ -128,6 +134,11 @@ enum pci_dev_flags {
128 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, 134 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
129}; 135};
130 136
137enum pci_irq_reroute_variant {
138 INTEL_IRQ_REROUTE_VARIANT = 1,
139 MAX_IRQ_REROUTE_VARIANTS = 3
140};
141
131typedef unsigned short __bitwise pci_bus_flags_t; 142typedef unsigned short __bitwise pci_bus_flags_t;
132enum pci_bus_flags { 143enum pci_bus_flags {
133 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, 144 PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1,
@@ -212,8 +223,10 @@ struct pci_dev {
212 unsigned int no_msi:1; /* device may not use msi */ 223 unsigned int no_msi:1; /* device may not use msi */
213 unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ 224 unsigned int block_ucfg_access:1; /* userspace config space access is blocked */
214 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 225 unsigned int broken_parity_status:1; /* Device generates false positive parity */
226 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
215 unsigned int msi_enabled:1; 227 unsigned int msi_enabled:1;
216 unsigned int msix_enabled:1; 228 unsigned int msix_enabled:1;
229 unsigned int ari_enabled:1; /* ARI forwarding */
217 unsigned int is_managed:1; 230 unsigned int is_managed:1;
218 unsigned int is_pcie:1; 231 unsigned int is_pcie:1;
219 pci_dev_flags_t dev_flags; 232 pci_dev_flags_t dev_flags;
@@ -347,7 +360,6 @@ struct pci_bus_region {
347struct pci_dynids { 360struct pci_dynids {
348 spinlock_t lock; /* protects list, index */ 361 spinlock_t lock; /* protects list, index */
349 struct list_head list; /* for IDs added at runtime */ 362 struct list_head list; /* for IDs added at runtime */
350 unsigned int use_driver_data:1; /* pci_device_id->driver_data is used */
351}; 363};
352 364
353/* ---------------------------------------------------------------- */ 365/* ---------------------------------------------------------------- */
@@ -456,8 +468,8 @@ struct pci_driver {
456 468
457/** 469/**
458 * PCI_VDEVICE - macro used to describe a specific pci device in short form 470 * PCI_VDEVICE - macro used to describe a specific pci device in short form
459 * @vend: the vendor name 471 * @vendor: the vendor name
460 * @dev: the 16 bit PCI Device ID 472 * @device: the 16 bit PCI Device ID
461 * 473 *
462 * This macro is used to create a struct pci_device_id that matches a 474 * This macro is used to create a struct pci_device_id that matches a
463 * specific PCI device. The subvendor, and subdevice fields will be set 475 * specific PCI device. The subvendor, and subdevice fields will be set
@@ -509,9 +521,10 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus,
509struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, 521struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
510 int busnr); 522 int busnr);
511struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, 523struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
512 const char *name); 524 const char *name,
525 struct hotplug_slot *hotplug);
513void pci_destroy_slot(struct pci_slot *slot); 526void pci_destroy_slot(struct pci_slot *slot);
514void pci_update_slot_number(struct pci_slot *slot, int slot_nr); 527void pci_renumber_slot(struct pci_slot *slot, int slot_nr);
515int pci_scan_slot(struct pci_bus *bus, int devfn); 528int pci_scan_slot(struct pci_bus *bus, int devfn);
516struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn); 529struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn);
517void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); 530void pci_device_add(struct pci_dev *dev, struct pci_bus *bus);
@@ -539,6 +552,13 @@ struct pci_dev __deprecated *pci_find_slot(unsigned int bus,
539 unsigned int devfn); 552 unsigned int devfn);
540#endif /* CONFIG_PCI_LEGACY */ 553#endif /* CONFIG_PCI_LEGACY */
541 554
555enum pci_lost_interrupt_reason {
556 PCI_LOST_IRQ_NO_INFORMATION = 0,
557 PCI_LOST_IRQ_DISABLE_MSI,
558 PCI_LOST_IRQ_DISABLE_MSIX,
559 PCI_LOST_IRQ_DISABLE_ACPI,
560};
561enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *dev);
542int pci_find_capability(struct pci_dev *dev, int cap); 562int pci_find_capability(struct pci_dev *dev, int cap);
543int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap); 563int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap);
544int pci_find_ext_capability(struct pci_dev *dev, int cap); 564int pci_find_ext_capability(struct pci_dev *dev, int cap);
@@ -626,11 +646,15 @@ int pcix_get_mmrbc(struct pci_dev *dev);
626int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc); 646int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
627int pcie_get_readrq(struct pci_dev *dev); 647int pcie_get_readrq(struct pci_dev *dev);
628int pcie_set_readrq(struct pci_dev *dev, int rq); 648int pcie_set_readrq(struct pci_dev *dev, int rq);
649int pci_reset_function(struct pci_dev *dev);
650int pci_execute_reset_function(struct pci_dev *dev);
629void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); 651void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno);
630int __must_check pci_assign_resource(struct pci_dev *dev, int i); 652int __must_check pci_assign_resource(struct pci_dev *dev, int i);
631int pci_select_bars(struct pci_dev *dev, unsigned long flags); 653int pci_select_bars(struct pci_dev *dev, unsigned long flags);
632 654
633/* ROM control related routines */ 655/* ROM control related routines */
656int pci_enable_rom(struct pci_dev *pdev);
657void pci_disable_rom(struct pci_dev *pdev);
634void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); 658void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
635void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); 659void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
636size_t pci_get_rom_size(void __iomem *rom, size_t size); 660size_t pci_get_rom_size(void __iomem *rom, size_t size);
@@ -643,6 +667,7 @@ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
643bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); 667bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
644void pci_pme_active(struct pci_dev *dev, bool enable); 668void pci_pme_active(struct pci_dev *dev, bool enable);
645int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); 669int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable);
670int pci_wake_from_d3(struct pci_dev *dev, bool enable);
646pci_power_t pci_target_state(struct pci_dev *dev); 671pci_power_t pci_target_state(struct pci_dev *dev);
647int pci_prepare_to_sleep(struct pci_dev *dev); 672int pci_prepare_to_sleep(struct pci_dev *dev);
648int pci_back_from_sleep(struct pci_dev *dev); 673int pci_back_from_sleep(struct pci_dev *dev);
@@ -723,7 +748,7 @@ enum pci_dma_burst_strategy {
723}; 748};
724 749
725struct msix_entry { 750struct msix_entry {
726 u16 vector; /* kernel uses to write allocated vector */ 751 u32 vector; /* kernel uses to write allocated vector */
727 u16 entry; /* driver uses to specify entry, OS writes */ 752 u16 entry; /* driver uses to specify entry, OS writes */
728}; 753};
729 754
@@ -1116,5 +1141,20 @@ static inline void pci_mmcfg_early_init(void) { }
1116static inline void pci_mmcfg_late_init(void) { } 1141static inline void pci_mmcfg_late_init(void) { }
1117#endif 1142#endif
1118 1143
1144#ifdef CONFIG_HAS_IOMEM
1145static inline void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
1146{
1147 /*
1148 * Make sure the BAR is actually a memory resource, not an IO resource
1149 */
1150 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
1151 WARN_ON(1);
1152 return NULL;
1153 }
1154 return ioremap_nocache(pci_resource_start(pdev, bar),
1155 pci_resource_len(pdev, bar));
1156}
1157#endif
1158
1119#endif /* __KERNEL__ */ 1159#endif /* __KERNEL__ */
1120#endif /* LINUX_PCI_H */ 1160#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index a08cd06b541a..a00bd1a0f156 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -142,8 +142,6 @@ struct hotplug_slot_info {
142 142
143/** 143/**
144 * struct hotplug_slot - used to register a physical slot with the hotplug pci core 144 * struct hotplug_slot - used to register a physical slot with the hotplug pci core
145 * @name: the name of the slot being registered. This string must
146 * be unique amoung slots registered on this system.
147 * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot 145 * @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
148 * @info: pointer to the &struct hotplug_slot_info for the initial values for 146 * @info: pointer to the &struct hotplug_slot_info for the initial values for
149 * this slot. 147 * this slot.
@@ -153,7 +151,6 @@ struct hotplug_slot_info {
153 * needs. 151 * needs.
154 */ 152 */
155struct hotplug_slot { 153struct hotplug_slot {
156 char *name;
157 struct hotplug_slot_ops *ops; 154 struct hotplug_slot_ops *ops;
158 struct hotplug_slot_info *info; 155 struct hotplug_slot_info *info;
159 void (*release) (struct hotplug_slot *slot); 156 void (*release) (struct hotplug_slot *slot);
@@ -165,7 +162,13 @@ struct hotplug_slot {
165}; 162};
166#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj) 163#define to_hotplug_slot(n) container_of(n, struct hotplug_slot, kobj)
167 164
168extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr); 165static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
166{
167 return pci_slot_name(slot->pci_slot);
168}
169
170extern int pci_hp_register(struct hotplug_slot *, struct pci_bus *, int nr,
171 const char *name);
169extern int pci_hp_deregister(struct hotplug_slot *slot); 172extern int pci_hp_deregister(struct hotplug_slot *slot);
170extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot, 173extern int __must_check pci_hp_change_slot_info (struct hotplug_slot *slot,
171 struct hotplug_slot_info *info); 174 struct hotplug_slot_info *info);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 1176f1f177e2..b6e694454280 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -587,6 +587,7 @@
587#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520 587#define PCI_DEVICE_ID_MATROX_G200_PCI 0x0520
588#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521 588#define PCI_DEVICE_ID_MATROX_G200_AGP 0x0521
589#define PCI_DEVICE_ID_MATROX_G400 0x0525 589#define PCI_DEVICE_ID_MATROX_G400 0x0525
590#define PCI_DEVICE_ID_MATROX_G200EV_PCI 0x0530
590#define PCI_DEVICE_ID_MATROX_G550 0x2527 591#define PCI_DEVICE_ID_MATROX_G550 0x2527
591#define PCI_DEVICE_ID_MATROX_VIA 0x4536 592#define PCI_DEVICE_ID_MATROX_VIA 0x4536
592 593
@@ -1943,6 +1944,14 @@
1943 1944
1944#define PCI_VENDOR_ID_OXSEMI 0x1415 1945#define PCI_VENDOR_ID_OXSEMI 0x1415
1945#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403 1946#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
1947#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000
1948#define PCI_DEVICE_ID_OXSEMI_PCIe840_G 0xC004
1949#define PCI_DEVICE_ID_OXSEMI_PCIe952_0 0xC100
1950#define PCI_DEVICE_ID_OXSEMI_PCIe952_0_G 0xC104
1951#define PCI_DEVICE_ID_OXSEMI_PCIe952_1 0xC110
1952#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_G 0xC114
1953#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118
1954#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C
1946#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 1955#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501
1947#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 1956#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
1948#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 1957#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
@@ -2295,6 +2304,10 @@
2295#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329 2304#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
2296#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A 2305#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
2297#define PCI_DEVICE_ID_INTEL_PXHV 0x032C 2306#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
2307#define PCI_DEVICE_ID_INTEL_80332_0 0x0330
2308#define PCI_DEVICE_ID_INTEL_80332_1 0x0332
2309#define PCI_DEVICE_ID_INTEL_80333_0 0x0370
2310#define PCI_DEVICE_ID_INTEL_80333_1 0x0372
2298#define PCI_DEVICE_ID_INTEL_82375 0x0482 2311#define PCI_DEVICE_ID_INTEL_82375 0x0482
2299#define PCI_DEVICE_ID_INTEL_82424 0x0483 2312#define PCI_DEVICE_ID_INTEL_82424 0x0483
2300#define PCI_DEVICE_ID_INTEL_82378 0x0484 2313#define PCI_DEVICE_ID_INTEL_82378 0x0484
@@ -2367,6 +2380,7 @@
2367#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4 2380#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
2368#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6 2381#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
2369#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab 2382#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
2383#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
2370#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500 2384#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500
2371#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501 2385#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
2372#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530 2386#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
@@ -2447,15 +2461,16 @@
2447#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a 2461#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
2448#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e 2462#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
2449#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b 2463#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
2464#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
2450#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 2465#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
2451#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 2466#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
2452#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 2467#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
2453#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a 2468#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
2454#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30 2469#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
2455#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60 2470#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
2456#define PCI_DEVICE_ID_INTEL_PCH_0 0x3b10 2471#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00
2457#define PCI_DEVICE_ID_INTEL_PCH_1 0x3b11 2472#define PCI_DEVICE_ID_INTEL_PCH_LPC_MAX 0x3b1f
2458#define PCI_DEVICE_ID_INTEL_PCH_2 0x3b30 2473#define PCI_DEVICE_ID_INTEL_PCH_SMBUS 0x3b30
2459#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f 2474#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
2460#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0 2475#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
2461#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5 2476#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 450684f7eaac..e5effd47ed74 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -377,6 +377,7 @@
377#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */ 377#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */
378#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */ 378#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */
379#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */ 379#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */
380#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
380#define PCI_EXP_DEVCTL 8 /* Device Control */ 381#define PCI_EXP_DEVCTL 8 /* Device Control */
381#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */ 382#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
382#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */ 383#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
@@ -389,6 +390,7 @@
389#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */ 390#define PCI_EXP_DEVCTL_AUX_PME 0x0400 /* Auxiliary Power PM Enable */
390#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */ 391#define PCI_EXP_DEVCTL_NOSNOOP_EN 0x0800 /* Enable No Snoop */
391#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ 392#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
393#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
392#define PCI_EXP_DEVSTA 10 /* Device Status */ 394#define PCI_EXP_DEVSTA 10 /* Device Status */
393#define PCI_EXP_DEVSTA_CED 0x01 /* Correctable Error Detected */ 395#define PCI_EXP_DEVSTA_CED 0x01 /* Correctable Error Detected */
394#define PCI_EXP_DEVSTA_NFED 0x02 /* Non-Fatal Error Detected */ 396#define PCI_EXP_DEVSTA_NFED 0x02 /* Non-Fatal Error Detected */
@@ -419,6 +421,10 @@
419#define PCI_EXP_RTCTL_CRSSVE 0x10 /* CRS Software Visibility Enable */ 421#define PCI_EXP_RTCTL_CRSSVE 0x10 /* CRS Software Visibility Enable */
420#define PCI_EXP_RTCAP 30 /* Root Capabilities */ 422#define PCI_EXP_RTCAP 30 /* Root Capabilities */
421#define PCI_EXP_RTSTA 32 /* Root Status */ 423#define PCI_EXP_RTSTA 32 /* Root Status */
424#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
425#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */
426#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
427#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */
422 428
423/* Extended Capabilities (PCI-X 2.0 and Express) */ 429/* Extended Capabilities (PCI-X 2.0 and Express) */
424#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff) 430#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
@@ -429,6 +435,7 @@
429#define PCI_EXT_CAP_ID_VC 2 435#define PCI_EXT_CAP_ID_VC 2
430#define PCI_EXT_CAP_ID_DSN 3 436#define PCI_EXT_CAP_ID_DSN 3
431#define PCI_EXT_CAP_ID_PWR 4 437#define PCI_EXT_CAP_ID_PWR 4
438#define PCI_EXT_CAP_ID_ARI 14
432 439
433/* Advanced Error Reporting */ 440/* Advanced Error Reporting */
434#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ 441#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
@@ -536,5 +543,14 @@
536#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */ 543#define HT_CAPTYPE_GEN3 0xD0 /* Generation 3 hypertransport configuration */
537#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */ 544#define HT_CAPTYPE_PM 0xE0 /* Hypertransport powermanagement configuration */
538 545
546/* Alternative Routing-ID Interpretation */
547#define PCI_ARI_CAP 0x04 /* ARI Capability Register */
548#define PCI_ARI_CAP_MFVC 0x0001 /* MFVC Function Groups Capability */
549#define PCI_ARI_CAP_ACS 0x0002 /* ACS Function Groups Capability */
550#define PCI_ARI_CAP_NFN(x) (((x) >> 8) & 0xff) /* Next Function Number */
551#define PCI_ARI_CTRL 0x06 /* ARI Control Register */
552#define PCI_ARI_CTRL_MFVC 0x0001 /* MFVC Function Groups Enable */
553#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
554#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
539 555
540#endif /* LINUX_PCI_REGS_H */ 556#endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
index bb01f8b92b56..7646637221f3 100644
--- a/include/linux/pfn.h
+++ b/include/linux/pfn.h
@@ -1,9 +1,13 @@
1#ifndef _LINUX_PFN_H_ 1#ifndef _LINUX_PFN_H_
2#define _LINUX_PFN_H_ 2#define _LINUX_PFN_H_
3 3
4#ifndef __ASSEMBLY__
5#include <linux/types.h>
6#endif
7
4#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) 8#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
5#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) 9#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
6#define PFN_DOWN(x) ((x) >> PAGE_SHIFT) 10#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
7#define PFN_PHYS(x) ((x) << PAGE_SHIFT) 11#define PFN_PHYS(x) ((phys_addr_t)(x) << PAGE_SHIFT)
8 12
9#endif 13#endif
diff --git a/include/linux/phonet.h b/include/linux/phonet.h
index c9609f9aedac..4157faa857b6 100644
--- a/include/linux/phonet.h
+++ b/include/linux/phonet.h
@@ -72,6 +72,7 @@ struct phonetmsg {
72 } pn_msg_u; 72 } pn_msg_u;
73}; 73};
74#define PN_COMMON_MESSAGE 0xF0 74#define PN_COMMON_MESSAGE 0xF0
75#define PN_COMMGR 0x10
75#define PN_PREFIX 0xE0 /* resource for extended messages */ 76#define PN_PREFIX 0xE0 /* resource for extended messages */
76#define pn_submsg_id pn_msg_u.base.pn_submsg_id 77#define pn_submsg_id pn_msg_u.base.pn_submsg_id
77#define pn_e_submsg_id pn_msg_u.ext.pn_e_submsg_id 78#define pn_e_submsg_id pn_msg_u.ext.pn_e_submsg_id
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 77c4ed60b982..d7e54d98869f 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -467,6 +467,8 @@ int genphy_restart_aneg(struct phy_device *phydev);
467int genphy_config_aneg(struct phy_device *phydev); 467int genphy_config_aneg(struct phy_device *phydev);
468int genphy_update_link(struct phy_device *phydev); 468int genphy_update_link(struct phy_device *phydev);
469int genphy_read_status(struct phy_device *phydev); 469int genphy_read_status(struct phy_device *phydev);
470int genphy_suspend(struct phy_device *phydev);
471int genphy_resume(struct phy_device *phydev);
470void phy_driver_unregister(struct phy_driver *drv); 472void phy_driver_unregister(struct phy_driver *drv);
471int phy_driver_register(struct phy_driver *new_driver); 473int phy_driver_register(struct phy_driver *new_driver);
472void phy_prepare_link(struct phy_device *phydev, 474void phy_prepare_link(struct phy_device *phydev,
diff --git a/include/linux/pid.h b/include/linux/pid.h
index d7e98ff8021e..bb206c56d1f0 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -147,9 +147,9 @@ pid_t pid_vnr(struct pid *pid);
147#define do_each_pid_task(pid, type, task) \ 147#define do_each_pid_task(pid, type, task) \
148 do { \ 148 do { \
149 struct hlist_node *pos___; \ 149 struct hlist_node *pos___; \
150 if (pid != NULL) \ 150 if ((pid) != NULL) \
151 hlist_for_each_entry_rcu((task), pos___, \ 151 hlist_for_each_entry_rcu((task), pos___, \
152 &pid->tasks[type], pids[type].node) { 152 &(pid)->tasks[type], pids[type].node) {
153 153
154 /* 154 /*
155 * Both old and new leaders may be attached to 155 * Both old and new leaders may be attached to
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 1af82c4e17d4..d82fe825d62f 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -84,12 +84,6 @@ static inline struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
84 return tsk->nsproxy->pid_ns; 84 return tsk->nsproxy->pid_ns;
85} 85}
86 86
87static inline struct task_struct *task_child_reaper(struct task_struct *tsk)
88{
89 BUG_ON(tsk != current);
90 return tsk->nsproxy->pid_ns->child_reaper;
91}
92
93void pidhash_init(void); 87void pidhash_init(void);
94void pidmap_init(void); 88void pidmap_init(void);
95 89
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h
index 7cf7824df778..e6aa8482ad7a 100644
--- a/include/linux/pkt_cls.h
+++ b/include/linux/pkt_cls.h
@@ -394,6 +394,20 @@ enum
394 394
395#define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1) 395#define TCA_BASIC_MAX (__TCA_BASIC_MAX - 1)
396 396
397
398/* Cgroup classifier */
399
400enum
401{
402 TCA_CGROUP_UNSPEC,
403 TCA_CGROUP_ACT,
404 TCA_CGROUP_POLICE,
405 TCA_CGROUP_EMATCHES,
406 __TCA_CGROUP_MAX,
407};
408
409#define TCA_CGROUP_MAX (__TCA_CGROUP_MAX - 1)
410
397/* Extended Matches */ 411/* Extended Matches */
398 412
399struct tcf_ematch_tree_hdr 413struct tcf_ematch_tree_hdr
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 5d921fa91a5b..e3f133adba78 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -500,4 +500,20 @@ struct tc_netem_corrupt
500 500
501#define NETEM_DIST_SCALE 8192 501#define NETEM_DIST_SCALE 8192
502 502
503/* DRR */
504
505enum
506{
507 TCA_DRR_UNSPEC,
508 TCA_DRR_QUANTUM,
509 __TCA_DRR_MAX
510};
511
512#define TCA_DRR_MAX (__TCA_DRR_MAX - 1)
513
514struct tc_drr_stats
515{
516 u32 deficit;
517};
518
503#endif 519#endif
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 95ac21ab3a09..4b8cc6a32479 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -37,6 +37,8 @@ extern int platform_add_devices(struct platform_device **, int);
37 37
38extern struct platform_device *platform_device_register_simple(const char *, int id, 38extern struct platform_device *platform_device_register_simple(const char *, int id,
39 struct resource *, unsigned int); 39 struct resource *, unsigned int);
40extern struct platform_device *platform_device_register_data(struct device *,
41 const char *, int, const void *, size_t);
40 42
41extern struct platform_device *platform_device_alloc(const char *name, int id); 43extern struct platform_device *platform_device_alloc(const char *name, int id);
42extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num); 44extern int platform_device_add_resources(struct platform_device *pdev, struct resource *res, unsigned int num);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 4dcce54b6d76..42de4003c4ee 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -419,7 +419,7 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
419 419
420#define suspend_report_result(fn, ret) \ 420#define suspend_report_result(fn, ret) \
421 do { \ 421 do { \
422 __suspend_report_result(__FUNCTION__, fn, ret); \ 422 __suspend_report_result(__func__, fn, ret); \
423 } while (0) 423 } while (0)
424 424
425#else /* !CONFIG_PM_SLEEP */ 425#else /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index be764e514e35..ca3c88773028 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -22,9 +22,11 @@ struct pnp_dev;
22 * Resource Management 22 * Resource Management
23 */ 23 */
24#ifdef CONFIG_PNP 24#ifdef CONFIG_PNP
25struct resource *pnp_get_resource(struct pnp_dev *, unsigned int, unsigned int); 25struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned long type,
26 unsigned int num);
26#else 27#else
27static inline struct resource *pnp_get_resource(struct pnp_dev *dev, unsigned int type, unsigned int num) 28static inline struct resource *pnp_get_resource(struct pnp_dev *dev,
29 unsigned long type, unsigned int num)
28{ 30{
29 return NULL; 31 return NULL;
30} 32}
@@ -483,14 +485,4 @@ static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
483 485
484#endif /* CONFIG_PNP */ 486#endif /* CONFIG_PNP */
485 487
486#define pnp_err(format, arg...) printk(KERN_ERR "pnp: " format "\n" , ## arg)
487#define pnp_info(format, arg...) printk(KERN_INFO "pnp: " format "\n" , ## arg)
488#define pnp_warn(format, arg...) printk(KERN_WARNING "pnp: " format "\n" , ## arg)
489
490#ifdef CONFIG_PNP_DEBUG
491#define pnp_dbg(format, arg...) printk(KERN_DEBUG "pnp: " format "\n" , ## arg)
492#else
493#define pnp_dbg(format, arg...) do {} while (0)
494#endif
495
496#endif /* _LINUX_PNP_H */ 488#endif /* _LINUX_PNP_H */
diff --git a/include/linux/poll.h b/include/linux/poll.h
index ef453828877a..badd98ab06f6 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -114,11 +114,13 @@ void zero_fd_set(unsigned long nr, unsigned long *fdset)
114 114
115#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) 115#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
116 116
117extern int do_select(int n, fd_set_bits *fds, s64 *timeout); 117extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time);
118extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, 118extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
119 s64 *timeout); 119 struct timespec *end_time);
120extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, 120extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
121 fd_set __user *exp, s64 *timeout); 121 fd_set __user *exp, struct timespec *end_time);
122
123extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec);
122 124
123#endif /* KERNEL */ 125#endif /* KERNEL */
124 126
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index a7dd38f30ade..4f71bf4e628c 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -45,9 +45,11 @@ struct k_itimer {
45 int it_requeue_pending; /* waiting to requeue this timer */ 45 int it_requeue_pending; /* waiting to requeue this timer */
46#define REQUEUE_PENDING 1 46#define REQUEUE_PENDING 1
47 int it_sigev_notify; /* notify word of sigevent struct */ 47 int it_sigev_notify; /* notify word of sigevent struct */
48 int it_sigev_signo; /* signo word of sigevent struct */ 48 struct signal_struct *it_signal;
49 sigval_t it_sigev_value; /* value word of sigevent struct */ 49 union {
50 struct task_struct *it_process; /* process to send signal to */ 50 struct pid *it_pid; /* pid of process to send signal to */
51 struct task_struct *it_process; /* for clock_nanosleep */
52 };
51 struct sigqueue *sigq; /* signal queue entry. */ 53 struct sigqueue *sigq; /* signal queue entry. */
52 union { 54 union {
53 struct { 55 struct {
@@ -115,4 +117,6 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
115 117
116long clock_nanosleep_restart(struct restart_block *restart_block); 118long clock_nanosleep_restart(struct restart_block *restart_block);
117 119
120void update_rlimit_cpu(unsigned long rlim_new);
121
118#endif 122#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index ea96ead1d39d..f9348cba6dc1 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -165,6 +165,12 @@ struct power_supply_info {
165extern void power_supply_changed(struct power_supply *psy); 165extern void power_supply_changed(struct power_supply *psy);
166extern int power_supply_am_i_supplied(struct power_supply *psy); 166extern int power_supply_am_i_supplied(struct power_supply *psy);
167 167
168#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE)
169extern int power_supply_is_system_supplied(void);
170#else
171static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
172#endif
173
168extern int power_supply_register(struct device *parent, 174extern int power_supply_register(struct device *parent,
169 struct power_supply *psy); 175 struct power_supply *psy);
170extern void power_supply_unregister(struct power_supply *psy); 176extern void power_supply_unregister(struct power_supply *psy);
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 5ad79198d6f9..48d887e3c6e7 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -78,4 +78,11 @@
78#define PR_GET_SECUREBITS 27 78#define PR_GET_SECUREBITS 27
79#define PR_SET_SECUREBITS 28 79#define PR_SET_SECUREBITS 28
80 80
81/*
82 * Get/set the timerslack as used by poll/select/nanosleep
83 * A value of 0 means "use default"
84 */
85#define PR_SET_TIMERSLACK 29
86#define PR_GET_TIMERSLACK 30
87
81#endif /* _LINUX_PRCTL_H */ 88#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index fb61850d1cfc..b8bdb96eff78 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -97,12 +97,9 @@ struct vmcore {
97 97
98#ifdef CONFIG_PROC_FS 98#ifdef CONFIG_PROC_FS
99 99
100extern struct proc_dir_entry *proc_root_kcore;
101
102extern spinlock_t proc_subdir_lock; 100extern spinlock_t proc_subdir_lock;
103 101
104extern void proc_root_init(void); 102extern void proc_root_init(void);
105extern void proc_misc_init(void);
106 103
107void proc_flush_task(struct task_struct *task); 104void proc_flush_task(struct task_struct *task);
108struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *); 105struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *);
@@ -138,9 +135,6 @@ extern struct inode *proc_get_inode(struct super_block *, unsigned int, struct p
138extern int proc_readdir(struct file *, void *, filldir_t); 135extern int proc_readdir(struct file *, void *, filldir_t);
139extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *); 136extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *);
140 137
141extern const struct file_operations proc_kcore_operations;
142extern const struct file_operations ppc_htab_operations;
143
144extern int pid_ns_prepare_proc(struct pid_namespace *ns); 138extern int pid_ns_prepare_proc(struct pid_namespace *ns);
145extern void pid_ns_release_proc(struct pid_namespace *ns); 139extern void pid_ns_release_proc(struct pid_namespace *ns);
146 140
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 7e7087239af5..a0fc32279fc0 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -19,10 +19,16 @@ struct notifier_block;
19 19
20#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS) 20#if defined(CONFIG_PROFILING) && defined(CONFIG_PROC_FS)
21void create_prof_cpu_mask(struct proc_dir_entry *de); 21void create_prof_cpu_mask(struct proc_dir_entry *de);
22int create_proc_profile(void);
22#else 23#else
23static inline void create_prof_cpu_mask(struct proc_dir_entry *de) 24static inline void create_prof_cpu_mask(struct proc_dir_entry *de)
24{ 25{
25} 26}
27
28static inline int create_proc_profile(void)
29{
30 return 0;
31}
26#endif 32#endif
27 33
28enum profile_type { 34enum profile_type {
@@ -35,7 +41,8 @@ enum profile_type {
35extern int prof_on __read_mostly; 41extern int prof_on __read_mostly;
36 42
37/* init basic kernel profiler */ 43/* init basic kernel profiler */
38void __init profile_init(void); 44int profile_init(void);
45int profile_setup(char *str);
39void profile_tick(int type); 46void profile_tick(int type);
40 47
41/* 48/*
@@ -84,9 +91,9 @@ struct pt_regs;
84 91
85#define prof_on 0 92#define prof_on 0
86 93
87static inline void profile_init(void) 94static inline int profile_init(void)
88{ 95{
89 return; 96 return 0;
90} 97}
91 98
92static inline void profile_tick(int type) 99static inline void profile_tick(int type)
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index ea7416c901d1..98b93ca4db06 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -94,7 +94,7 @@ extern void ptrace_notify(int exit_code);
94extern void __ptrace_link(struct task_struct *child, 94extern void __ptrace_link(struct task_struct *child,
95 struct task_struct *new_parent); 95 struct task_struct *new_parent);
96extern void __ptrace_unlink(struct task_struct *child); 96extern void __ptrace_unlink(struct task_struct *child);
97extern void ptrace_untrace(struct task_struct *child); 97extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
98#define PTRACE_MODE_READ 1 98#define PTRACE_MODE_READ 1
99#define PTRACE_MODE_ATTACH 2 99#define PTRACE_MODE_ATTACH 2
100/* Returns 0 on success, -errno on denial. */ 100/* Returns 0 on success, -errno on denial. */
@@ -314,6 +314,27 @@ static inline void user_enable_block_step(struct task_struct *task)
314#define arch_ptrace_stop(code, info) do { } while (0) 314#define arch_ptrace_stop(code, info) do { } while (0)
315#endif 315#endif
316 316
317#ifndef arch_ptrace_untrace
318/*
319 * Do machine-specific work before untracing child.
320 *
321 * This is called for a normal detach as well as from ptrace_exit()
322 * when the tracing task dies.
323 *
324 * Called with write_lock(&tasklist_lock) held.
325 */
326#define arch_ptrace_untrace(task) do { } while (0)
327#endif
328
329#ifndef arch_ptrace_fork
330/*
331 * Do machine-specific work to initialize a new task.
332 *
333 * This is called from copy_process().
334 */
335#define arch_ptrace_fork(child, clone_flags) do { } while (0)
336#endif
337
317extern int task_current_syscall(struct task_struct *target, long *callno, 338extern int task_current_syscall(struct task_struct *target, long *callno,
318 unsigned long args[6], unsigned int maxargs, 339 unsigned long args[6], unsigned int maxargs,
319 unsigned long *sp, unsigned long *pc); 340 unsigned long *sp, unsigned long *pc);
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 376a05048bc5..40401b554484 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -28,8 +28,6 @@
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE. 30 * SUCH DAMAGE.
31 *
32 * Version: $Id: quota.h,v 2.0 1996/11/17 16:48:14 mvw Exp mvw $
33 */ 31 */
34 32
35#ifndef _LINUX_QUOTA_ 33#ifndef _LINUX_QUOTA_
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index ca6b9b5c8d52..a558a4c1d35a 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -3,9 +3,6 @@
3 * macros expand to the right source-code. 3 * macros expand to the right source-code.
4 * 4 *
5 * Author: Marco van Wieringen <mvw@planets.elm.net> 5 * Author: Marco van Wieringen <mvw@planets.elm.net>
6 *
7 * Version: $Id: quotaops.h,v 1.2 1998/01/15 16:22:26 ecd Exp $
8 *
9 */ 6 */
10#ifndef _LINUX_QUOTAOPS_ 7#ifndef _LINUX_QUOTAOPS_
11#define _LINUX_QUOTAOPS_ 8#define _LINUX_QUOTAOPS_
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h
index 7e375111d007..f38b9c586afb 100644
--- a/include/linux/raid/linear.h
+++ b/include/linux/raid/linear.h
@@ -5,8 +5,8 @@
5 5
6struct dev_info { 6struct dev_info {
7 mdk_rdev_t *rdev; 7 mdk_rdev_t *rdev;
8 sector_t size; 8 sector_t num_sectors;
9 sector_t offset; 9 sector_t start_sector;
10}; 10};
11 11
12typedef struct dev_info dev_info_t; 12typedef struct dev_info dev_info_t;
@@ -15,9 +15,11 @@ struct linear_private_data
15{ 15{
16 struct linear_private_data *prev; /* earlier version */ 16 struct linear_private_data *prev; /* earlier version */
17 dev_info_t **hash_table; 17 dev_info_t **hash_table;
18 sector_t hash_spacing; 18 sector_t spacing;
19 sector_t array_sectors; 19 sector_t array_sectors;
20 int preshift; /* shift before dividing by hash_spacing */ 20 int sector_shift; /* shift before dividing
21 * by spacing
22 */
21 dev_info_t disks[0]; 23 dev_info_t disks[0];
22}; 24};
23 25
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
index dc0e3fcb9f28..82bea14cae1a 100644
--- a/include/linux/raid/md.h
+++ b/include/linux/raid/md.h
@@ -19,27 +19,7 @@
19#define _MD_H 19#define _MD_H
20 20
21#include <linux/blkdev.h> 21#include <linux/blkdev.h>
22#include <linux/major.h>
23#include <linux/ioctl.h>
24#include <linux/types.h>
25#include <linux/bitops.h>
26#include <linux/module.h>
27#include <linux/hdreg.h>
28#include <linux/proc_fs.h>
29#include <linux/seq_file.h> 22#include <linux/seq_file.h>
30#include <linux/smp_lock.h>
31#include <linux/delay.h>
32#include <net/checksum.h>
33#include <linux/random.h>
34#include <linux/kernel_stat.h>
35#include <asm/io.h>
36#include <linux/completion.h>
37#include <linux/mempool.h>
38#include <linux/list.h>
39#include <linux/reboot.h>
40#include <linux/vmalloc.h>
41#include <linux/blkpg.h>
42#include <linux/bio.h>
43 23
44/* 24/*
45 * 'md_p.h' holds the 'physical' layout of RAID devices 25 * 'md_p.h' holds the 'physical' layout of RAID devices
@@ -74,19 +54,17 @@
74 54
75extern int mdp_major; 55extern int mdp_major;
76 56
77extern int register_md_personality (struct mdk_personality *p); 57extern int register_md_personality(struct mdk_personality *p);
78extern int unregister_md_personality (struct mdk_personality *p); 58extern int unregister_md_personality(struct mdk_personality *p);
79extern mdk_thread_t * md_register_thread (void (*run) (mddev_t *mddev), 59extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
80 mddev_t *mddev, const char *name); 60 mddev_t *mddev, const char *name);
81extern void md_unregister_thread (mdk_thread_t *thread); 61extern void md_unregister_thread(mdk_thread_t *thread);
82extern void md_wakeup_thread(mdk_thread_t *thread); 62extern void md_wakeup_thread(mdk_thread_t *thread);
83extern void md_check_recovery(mddev_t *mddev); 63extern void md_check_recovery(mddev_t *mddev);
84extern void md_write_start(mddev_t *mddev, struct bio *bi); 64extern void md_write_start(mddev_t *mddev, struct bio *bi);
85extern void md_write_end(mddev_t *mddev); 65extern void md_write_end(mddev_t *mddev);
86extern void md_handle_safemode(mddev_t *mddev);
87extern void md_done_sync(mddev_t *mddev, int blocks, int ok); 66extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
88extern void md_error (mddev_t *mddev, mdk_rdev_t *rdev); 67extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
89extern void md_unplug_mddev(mddev_t *mddev);
90 68
91extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 69extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
92 sector_t sector, int size, struct page *page); 70 sector_t sector, int size, struct page *page);
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
index c200b9a34aff..8fc909ef6787 100644
--- a/include/linux/raid/md_k.h
+++ b/include/linux/raid/md_k.h
@@ -115,6 +115,9 @@ struct mdk_rdev_s
115 * in superblock. 115 * in superblock.
116 */ 116 */
117 struct work_struct del_work; /* used for delayed sysfs removal */ 117 struct work_struct del_work; /* used for delayed sysfs removal */
118
119 struct sysfs_dirent *sysfs_state; /* handle for 'state'
120 * sysfs entry */
118}; 121};
119 122
120struct mddev_s 123struct mddev_s
@@ -128,7 +131,6 @@ struct mddev_s
128#define MD_CHANGE_DEVS 0 /* Some device status has changed */ 131#define MD_CHANGE_DEVS 0 /* Some device status has changed */
129#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ 132#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
130#define MD_CHANGE_PENDING 2 /* superblock update in progress */ 133#define MD_CHANGE_PENDING 2 /* superblock update in progress */
131#define MD_NOTIFY_ARRAY_STATE 3 /* atomic context wants to notify userspace */
132 134
133 int ro; 135 int ro;
134 136
@@ -239,6 +241,10 @@ struct mddev_s
239 sector_t resync_max; /* resync should pause 241 sector_t resync_max; /* resync should pause
240 * when it gets here */ 242 * when it gets here */
241 243
244 struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
245 * file in sysfs.
246 */
247
242 spinlock_t write_lock; 248 spinlock_t write_lock;
243 wait_queue_head_t sb_wait; /* for waiting on superblock updates */ 249 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
244 atomic_t pending_writes; /* number of active superblock writes */ 250 atomic_t pending_writes; /* number of active superblock writes */
diff --git a/include/linux/random.h b/include/linux/random.h
index 36f125c0c603..adbf3bd3c6b3 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -8,6 +8,7 @@
8#define _LINUX_RANDOM_H 8#define _LINUX_RANDOM_H
9 9
10#include <linux/ioctl.h> 10#include <linux/ioctl.h>
11#include <linux/irqnr.h>
11 12
12/* ioctl()'s for the random number generator */ 13/* ioctl()'s for the random number generator */
13 14
@@ -44,6 +45,56 @@ struct rand_pool_info {
44 45
45extern void rand_initialize_irq(int irq); 46extern void rand_initialize_irq(int irq);
46 47
48struct timer_rand_state;
49#ifndef CONFIG_SPARSE_IRQ
50
51extern struct timer_rand_state *irq_timer_state[];
52
53static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
54{
55 if (irq >= nr_irqs)
56 return NULL;
57
58 return irq_timer_state[irq];
59}
60
61static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
62{
63 if (irq >= nr_irqs)
64 return;
65
66 irq_timer_state[irq] = state;
67}
68
69#else
70
71#include <linux/irq.h>
72static inline struct timer_rand_state *get_timer_rand_state(unsigned int irq)
73{
74 struct irq_desc *desc;
75
76 desc = irq_to_desc(irq);
77
78 if (!desc)
79 return NULL;
80
81 return desc->timer_rand_state;
82}
83
84static inline void set_timer_rand_state(unsigned int irq, struct timer_rand_state *state)
85{
86 struct irq_desc *desc;
87
88 desc = irq_to_desc(irq);
89
90 if (!desc)
91 return;
92
93 desc->timer_rand_state = state;
94}
95#endif
96
97
47extern void add_input_randomness(unsigned int type, unsigned int code, 98extern void add_input_randomness(unsigned int type, unsigned int code,
48 unsigned int value); 99 unsigned int value);
49extern void add_interrupt_randomness(int irq); 100extern void add_interrupt_randomness(int irq);
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h
index 18a5b9ba9d40..00044b856453 100644
--- a/include/linux/ratelimit.h
+++ b/include/linux/ratelimit.h
@@ -17,11 +17,4 @@ struct ratelimit_state {
17 struct ratelimit_state name = {interval, burst,} 17 struct ratelimit_state name = {interval, burst,}
18 18
19extern int __ratelimit(struct ratelimit_state *rs); 19extern int __ratelimit(struct ratelimit_state *rs);
20
21static inline int ratelimit(void)
22{
23 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
24 DEFAULT_RATELIMIT_BURST);
25 return __ratelimit(&rs);
26}
27#endif 20#endif
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 5f89b62e6983..301dda829e37 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -41,7 +41,7 @@
41#include <linux/seqlock.h> 41#include <linux/seqlock.h>
42 42
43#ifdef CONFIG_RCU_CPU_STALL_DETECTOR 43#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
44#define RCU_SECONDS_TILL_STALL_CHECK ( 3 * HZ) /* for rcp->jiffies_stall */ 44#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */
45#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ 45#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */
46#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ 46#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
47 47
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
new file mode 100644
index 000000000000..f9ddd03961a8
--- /dev/null
+++ b/include/linux/rculist_nulls.h
@@ -0,0 +1,110 @@
1#ifndef _LINUX_RCULIST_NULLS_H
2#define _LINUX_RCULIST_NULLS_H
3
4#ifdef __KERNEL__
5
6/*
7 * RCU-protected list version
8 */
9#include <linux/list_nulls.h>
10#include <linux/rcupdate.h>
11
12/**
13 * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization
14 * @n: the element to delete from the hash list.
15 *
16 * Note: hlist_nulls_unhashed() on the node return true after this. It is
17 * useful for RCU based read lockfree traversal if the writer side
18 * must know if the list entry is still hashed or already unhashed.
19 *
20 * In particular, it means that we can not poison the forward pointers
21 * that may still be used for walking the hash list and we can only
22 * zero the pprev pointer so list_unhashed() will return true after
23 * this.
24 *
25 * The caller must take whatever precautions are necessary (such as
26 * holding appropriate locks) to avoid racing with another
27 * list-mutation primitive, such as hlist_nulls_add_head_rcu() or
28 * hlist_nulls_del_rcu(), running on this same list. However, it is
29 * perfectly legal to run concurrently with the _rcu list-traversal
30 * primitives, such as hlist_nulls_for_each_entry_rcu().
31 */
32static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
33{
34 if (!hlist_nulls_unhashed(n)) {
35 __hlist_nulls_del(n);
36 n->pprev = NULL;
37 }
38}
39
40/**
41 * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
42 * @n: the element to delete from the hash list.
43 *
44 * Note: hlist_nulls_unhashed() on entry does not return true after this,
45 * the entry is in an undefined state. It is useful for RCU based
46 * lockfree traversal.
47 *
48 * In particular, it means that we can not poison the forward
49 * pointers that may still be used for walking the hash list.
50 *
51 * The caller must take whatever precautions are necessary
52 * (such as holding appropriate locks) to avoid racing
53 * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
54 * or hlist_nulls_del_rcu(), running on this same list.
55 * However, it is perfectly legal to run concurrently with
56 * the _rcu list-traversal primitives, such as
57 * hlist_nulls_for_each_entry().
58 */
59static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
60{
61 __hlist_nulls_del(n);
62 n->pprev = LIST_POISON2;
63}
64
65/**
66 * hlist_nulls_add_head_rcu
67 * @n: the element to add to the hash list.
68 * @h: the list to add to.
69 *
70 * Description:
71 * Adds the specified element to the specified hlist_nulls,
72 * while permitting racing traversals.
73 *
74 * The caller must take whatever precautions are necessary
75 * (such as holding appropriate locks) to avoid racing
76 * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
77 * or hlist_nulls_del_rcu(), running on this same list.
78 * However, it is perfectly legal to run concurrently with
79 * the _rcu list-traversal primitives, such as
80 * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
81 * problems on Alpha CPUs. Regardless of the type of CPU, the
82 * list-traversal primitive must be guarded by rcu_read_lock().
83 */
84static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
85 struct hlist_nulls_head *h)
86{
87 struct hlist_nulls_node *first = h->first;
88
89 n->next = first;
90 n->pprev = &h->first;
91 rcu_assign_pointer(h->first, n);
92 if (!is_a_nulls(first))
93 first->pprev = &n->next;
94}
95/**
96 * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
97 * @tpos: the type * to use as a loop cursor.
98 * @pos: the &struct hlist_nulls_node to use as a loop cursor.
99 * @head: the head for your list.
100 * @member: the name of the hlist_nulls_node within the struct.
101 *
102 */
103#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
104 for (pos = rcu_dereference((head)->first); \
105 (!is_a_nulls(pos)) && \
106 ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
107 pos = rcu_dereference(pos->next))
108
109#endif
110#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 86f1f5e43e33..1168fbcea8d4 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,11 +52,15 @@ struct rcu_head {
52 void (*func)(struct rcu_head *head); 52 void (*func)(struct rcu_head *head);
53}; 53};
54 54
55#ifdef CONFIG_CLASSIC_RCU 55#if defined(CONFIG_CLASSIC_RCU)
56#include <linux/rcuclassic.h> 56#include <linux/rcuclassic.h>
57#else /* #ifdef CONFIG_CLASSIC_RCU */ 57#elif defined(CONFIG_TREE_RCU)
58#include <linux/rcutree.h>
59#elif defined(CONFIG_PREEMPT_RCU)
58#include <linux/rcupreempt.h> 60#include <linux/rcupreempt.h>
59#endif /* #else #ifdef CONFIG_CLASSIC_RCU */ 61#else
62#error "Unknown RCU implementation specified to kernel configuration"
63#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */
60 64
61#define RCU_HEAD_INIT { .next = NULL, .func = NULL } 65#define RCU_HEAD_INIT { .next = NULL, .func = NULL }
62#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT 66#define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT
@@ -142,6 +146,7 @@ struct rcu_head {
142 * on the write-side to insure proper synchronization. 146 * on the write-side to insure proper synchronization.
143 */ 147 */
144#define rcu_read_lock_sched() preempt_disable() 148#define rcu_read_lock_sched() preempt_disable()
149#define rcu_read_lock_sched_notrace() preempt_disable_notrace()
145 150
146/* 151/*
147 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section 152 * rcu_read_unlock_sched - marks the end of a RCU-classic critical section
@@ -149,6 +154,7 @@ struct rcu_head {
149 * See rcu_read_lock_sched for more information. 154 * See rcu_read_lock_sched for more information.
150 */ 155 */
151#define rcu_read_unlock_sched() preempt_enable() 156#define rcu_read_unlock_sched() preempt_enable()
157#define rcu_read_unlock_sched_notrace() preempt_enable_notrace()
152 158
153 159
154 160
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
new file mode 100644
index 000000000000..d4368b7975c3
--- /dev/null
+++ b/include/linux/rcutree.h
@@ -0,0 +1,329 @@
1/*
2 * Read-Copy Update mechanism for mutual exclusion (tree-based version)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Dipankar Sarma <dipankar@in.ibm.com>
21 * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm
22 *
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
25 *
26 * For detailed explanation of Read-Copy Update mechanism see -
27 * Documentation/RCU
28 */
29
30#ifndef __LINUX_RCUTREE_H
31#define __LINUX_RCUTREE_H
32
33#include <linux/cache.h>
34#include <linux/spinlock.h>
35#include <linux/threads.h>
36#include <linux/percpu.h>
37#include <linux/cpumask.h>
38#include <linux/seqlock.h>
39
40/*
41 * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT.
42 * In theory, it should be possible to add more levels straightforwardly.
43 * In practice, this has not been tested, so there is probably some
44 * bug somewhere.
45 */
46#define MAX_RCU_LVLS 3
47#define RCU_FANOUT (CONFIG_RCU_FANOUT)
48#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT)
49#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT)
50
51#if NR_CPUS <= RCU_FANOUT
52# define NUM_RCU_LVLS 1
53# define NUM_RCU_LVL_0 1
54# define NUM_RCU_LVL_1 (NR_CPUS)
55# define NUM_RCU_LVL_2 0
56# define NUM_RCU_LVL_3 0
57#elif NR_CPUS <= RCU_FANOUT_SQ
58# define NUM_RCU_LVLS 2
59# define NUM_RCU_LVL_0 1
60# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT)
61# define NUM_RCU_LVL_2 (NR_CPUS)
62# define NUM_RCU_LVL_3 0
63#elif NR_CPUS <= RCU_FANOUT_CUBE
64# define NUM_RCU_LVLS 3
65# define NUM_RCU_LVL_0 1
66# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ)
67# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT))
68# define NUM_RCU_LVL_3 NR_CPUS
69#else
70# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
71#endif /* #if (NR_CPUS) <= RCU_FANOUT */
72
73#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
74#define NUM_RCU_NODES (RCU_SUM - NR_CPUS)
75
76/*
77 * Dynticks per-CPU state.
78 */
79struct rcu_dynticks {
80 int dynticks_nesting; /* Track nesting level, sort of. */
81 int dynticks; /* Even value for dynticks-idle, else odd. */
82 int dynticks_nmi; /* Even value for either dynticks-idle or */
83 /* not in nmi handler, else odd. So this */
84 /* remains even for nmi from irq handler. */
85};
86
87/*
88 * Definition for node within the RCU grace-period-detection hierarchy.
89 */
90struct rcu_node {
91 spinlock_t lock;
92 unsigned long qsmask; /* CPUs or groups that need to switch in */
93 /* order for current grace period to proceed.*/
94 unsigned long qsmaskinit;
95 /* Per-GP initialization for qsmask. */
96 unsigned long grpmask; /* Mask to apply to parent qsmask. */
97 int grplo; /* lowest-numbered CPU or group here. */
98 int grphi; /* highest-numbered CPU or group here. */
99 u8 grpnum; /* CPU/group number for next level up. */
100 u8 level; /* root is at level 0. */
101 struct rcu_node *parent;
102} ____cacheline_internodealigned_in_smp;
103
104/* Index values for nxttail array in struct rcu_data. */
105#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
106#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
107#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
108#define RCU_NEXT_TAIL 3
109#define RCU_NEXT_SIZE 4
110
111/* Per-CPU data for read-copy update. */
112struct rcu_data {
113 /* 1) quiescent-state and grace-period handling : */
114 long completed; /* Track rsp->completed gp number */
115 /* in order to detect GP end. */
116 long gpnum; /* Highest gp number that this CPU */
117 /* is aware of having started. */
118 long passed_quiesc_completed;
119 /* Value of completed at time of qs. */
120 bool passed_quiesc; /* User-mode/idle loop etc. */
121 bool qs_pending; /* Core waits for quiesc state. */
122 bool beenonline; /* CPU online at least once. */
123 struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
124 unsigned long grpmask; /* Mask to apply to leaf qsmask. */
125
126 /* 2) batch handling */
127 /*
128 * If nxtlist is not NULL, it is partitioned as follows.
129 * Any of the partitions might be empty, in which case the
130 * pointer to that partition will be equal to the pointer for
131 * the following partition. When the list is empty, all of
132 * the nxttail elements point to nxtlist, which is NULL.
133 *
134 * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]):
135 * Entries that might have arrived after current GP ended
136 * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]):
137 * Entries known to have arrived before current GP ended
138 * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]):
139 * Entries that batch # <= ->completed - 1: waiting for current GP
140 * [nxtlist, *nxttail[RCU_DONE_TAIL]):
141 * Entries that batch # <= ->completed
142 * The grace period for these entries has completed, and
143 * the other grace-period-completed entries may be moved
144 * here temporarily in rcu_process_callbacks().
145 */
146 struct rcu_head *nxtlist;
147 struct rcu_head **nxttail[RCU_NEXT_SIZE];
148 long qlen; /* # of queued callbacks */
149 long blimit; /* Upper limit on a processed batch */
150
151#ifdef CONFIG_NO_HZ
152 /* 3) dynticks interface. */
153 struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */
154 int dynticks_snap; /* Per-GP tracking for dynticks. */
155 int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */
156#endif /* #ifdef CONFIG_NO_HZ */
157
158 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
159#ifdef CONFIG_NO_HZ
160 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
161#endif /* #ifdef CONFIG_NO_HZ */
162 unsigned long offline_fqs; /* Kicked due to being offline. */
163 unsigned long resched_ipi; /* Sent a resched IPI. */
164
165 /* 5) state to allow this CPU to force_quiescent_state on others */
166 long n_rcu_pending; /* rcu_pending() calls since boot. */
167 long n_rcu_pending_force_qs; /* when to force quiescent states. */
168
169 int cpu;
170};
171
172/* Values for signaled field in struct rcu_state. */
173#define RCU_GP_INIT 0 /* Grace period being initialized. */
174#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */
175#define RCU_FORCE_QS 2 /* Need to force quiescent state. */
176#ifdef CONFIG_NO_HZ
177#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
178#else /* #ifdef CONFIG_NO_HZ */
179#define RCU_SIGNAL_INIT RCU_FORCE_QS
180#endif /* #else #ifdef CONFIG_NO_HZ */
181
182#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */
183#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
184#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */
185#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */
186#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */
187 /* to take at least one */
188 /* scheduling clock irq */
189 /* before ratting on them. */
190
191#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
192
193/*
194 * RCU global state, including node hierarchy. This hierarchy is
195 * represented in "heap" form in a dense array. The root (first level)
196 * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second
197 * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]),
198 * and the third level in ->node[m+1] and following (->node[m+1] referenced
199 * by ->level[2]). The number of levels is determined by the number of
200 * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy"
201 * consisting of a single rcu_node.
202 */
203struct rcu_state {
204 struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */
205 struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */
206 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
207 u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */
208 struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */
209
210 /* The following fields are guarded by the root rcu_node's lock. */
211
212 u8 signaled ____cacheline_internodealigned_in_smp;
213 /* Force QS state. */
214 long gpnum; /* Current gp number. */
215 long completed; /* # of last completed gp. */
216 spinlock_t onofflock; /* exclude on/offline and */
217 /* starting new GP. */
218 spinlock_t fqslock; /* Only one task forcing */
219 /* quiescent states. */
220 unsigned long jiffies_force_qs; /* Time at which to invoke */
221 /* force_quiescent_state(). */
222 unsigned long n_force_qs; /* Number of calls to */
223 /* force_quiescent_state(). */
224 unsigned long n_force_qs_lh; /* ~Number of calls leaving */
225 /* due to lock unavailable. */
226 unsigned long n_force_qs_ngp; /* Number of calls leaving */
227 /* due to no GP active. */
228#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
229 unsigned long gp_start; /* Time at which GP started, */
230 /* but in jiffies. */
231 unsigned long jiffies_stall; /* Time at which to check */
232 /* for CPU stalls. */
233#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
234#ifdef CONFIG_NO_HZ
235 long dynticks_completed; /* Value of completed @ snap. */
236#endif /* #ifdef CONFIG_NO_HZ */
237};
238
239extern struct rcu_state rcu_state;
240DECLARE_PER_CPU(struct rcu_data, rcu_data);
241
242extern struct rcu_state rcu_bh_state;
243DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
244
245/*
246 * Increment the quiescent state counter.
247 * The counter is a bit degenerated: We do not need to know
248 * how many quiescent states passed, just if there was at least
249 * one since the start of the grace period. Thus just a flag.
250 */
251static inline void rcu_qsctr_inc(int cpu)
252{
253 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
254 rdp->passed_quiesc = 1;
255 rdp->passed_quiesc_completed = rdp->completed;
256}
257static inline void rcu_bh_qsctr_inc(int cpu)
258{
259 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
260 rdp->passed_quiesc = 1;
261 rdp->passed_quiesc_completed = rdp->completed;
262}
263
264extern int rcu_pending(int cpu);
265extern int rcu_needs_cpu(int cpu);
266
267#ifdef CONFIG_DEBUG_LOCK_ALLOC
268extern struct lockdep_map rcu_lock_map;
269# define rcu_read_acquire() \
270 lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_)
271# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_)
272#else
273# define rcu_read_acquire() do { } while (0)
274# define rcu_read_release() do { } while (0)
275#endif
276
277static inline void __rcu_read_lock(void)
278{
279 preempt_disable();
280 __acquire(RCU);
281 rcu_read_acquire();
282}
283static inline void __rcu_read_unlock(void)
284{
285 rcu_read_release();
286 __release(RCU);
287 preempt_enable();
288}
289static inline void __rcu_read_lock_bh(void)
290{
291 local_bh_disable();
292 __acquire(RCU_BH);
293 rcu_read_acquire();
294}
295static inline void __rcu_read_unlock_bh(void)
296{
297 rcu_read_release();
298 __release(RCU_BH);
299 local_bh_enable();
300}
301
302#define __synchronize_sched() synchronize_rcu()
303
304#define call_rcu_sched(head, func) call_rcu(head, func)
305
306static inline void rcu_init_sched(void)
307{
308}
309
310extern void __rcu_init(void);
311extern void rcu_check_callbacks(int cpu, int user);
312extern void rcu_restart_cpu(int cpu);
313
314extern long rcu_batches_completed(void);
315extern long rcu_batches_completed_bh(void);
316
317#ifdef CONFIG_NO_HZ
318void rcu_enter_nohz(void);
319void rcu_exit_nohz(void);
320#else /* CONFIG_NO_HZ */
321static inline void rcu_enter_nohz(void)
322{
323}
324static inline void rcu_exit_nohz(void)
325{
326}
327#endif /* CONFIG_NO_HZ */
328
329#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index e9963af16cda..bc5114d35e99 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -87,7 +87,7 @@ void reiserfs_warning(struct super_block *s, const char *fmt, ...);
87if( !( cond ) ) \ 87if( !( cond ) ) \
88 reiserfs_panic( NULL, "reiserfs[%i]: assertion " scond " failed at " \ 88 reiserfs_panic( NULL, "reiserfs[%i]: assertion " scond " failed at " \
89 __FILE__ ":%i:%s: " format "\n", \ 89 __FILE__ ":%i:%s: " format "\n", \
90 in_interrupt() ? -1 : task_pid_nr(current), __LINE__ , __FUNCTION__ , ##args ) 90 in_interrupt() ? -1 : task_pid_nr(current), __LINE__ , __func__ , ##args )
91 91
92#define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args) 92#define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args)
93 93
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 315517e8bfa1..bda6b562a1e0 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -178,6 +178,7 @@ struct reiserfs_journal {
178 struct reiserfs_journal_cnode *j_first; /* oldest journal block. start here for traverse */ 178 struct reiserfs_journal_cnode *j_first; /* oldest journal block. start here for traverse */
179 179
180 struct block_device *j_dev_bd; 180 struct block_device *j_dev_bd;
181 fmode_t j_dev_mode;
181 int j_1st_reserved_block; /* first block on s_dev of reserved area journal */ 182 int j_1st_reserved_block; /* first block on s_dev of reserved area journal */
182 183
183 unsigned long j_state; 184 unsigned long j_state;
diff --git a/include/linux/resource.h b/include/linux/resource.h
index aaa423a6f3d9..40fc7e626082 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -59,10 +59,10 @@ struct rlimit {
59#define _STK_LIM (8*1024*1024) 59#define _STK_LIM (8*1024*1024)
60 60
61/* 61/*
62 * GPG wants 32kB of mlocked memory, to make sure pass phrases 62 * GPG2 wants 64kB of mlocked memory, to make sure pass phrases
63 * and other sensitive information are never written to disk. 63 * and other sensitive information are never written to disk.
64 */ 64 */
65#define MLOCK_LIMIT (8 * PAGE_SIZE) 65#define MLOCK_LIMIT ((PAGE_SIZE > 64*1024) ? PAGE_SIZE : 64*1024)
66 66
67/* 67/*
68 * Due to binary compatibility, the actual resource numbers 68 * Due to binary compatibility, the actual resource numbers
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index 4cd64b0d9825..164332cbb77c 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -108,6 +108,7 @@ struct rfkill {
108 108
109 struct device dev; 109 struct device dev;
110 struct list_head node; 110 struct list_head node;
111 enum rfkill_state state_for_resume;
111}; 112};
112#define to_rfkill(d) container_of(d, struct rfkill, dev) 113#define to_rfkill(d) container_of(d, struct rfkill, dev)
113 114
@@ -148,11 +149,4 @@ static inline char *rfkill_get_led_name(struct rfkill *rfkill)
148#endif 149#endif
149} 150}
150 151
151/* rfkill notification chain */
152#define RFKILL_STATE_CHANGED 0x0001 /* state of a normal rfkill
153 switch has changed */
154
155int register_rfkill_notifier(struct notifier_block *nb);
156int unregister_rfkill_notifier(struct notifier_block *nb);
157
158#endif /* RFKILL_H */ 152#endif /* RFKILL_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
new file mode 100644
index 000000000000..b3b359660082
--- /dev/null
+++ b/include/linux/ring_buffer.h
@@ -0,0 +1,140 @@
1#ifndef _LINUX_RING_BUFFER_H
2#define _LINUX_RING_BUFFER_H
3
4#include <linux/mm.h>
5#include <linux/seq_file.h>
6
7struct ring_buffer;
8struct ring_buffer_iter;
9
10/*
11 * Don't reference this struct directly, use functions below.
12 */
13struct ring_buffer_event {
14 u32 type:2, len:3, time_delta:27;
15 u32 array[];
16};
17
18/**
19 * enum ring_buffer_type - internal ring buffer types
20 *
21 * @RINGBUF_TYPE_PADDING: Left over page padding
22 * array is ignored
23 * size is variable depending on how much
24 * padding is needed
25 *
26 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
27 * array[0] = time delta (28 .. 59)
28 * size = 8 bytes
29 *
30 * @RINGBUF_TYPE_TIME_STAMP: Sync time stamp with external clock
31 * array[0] = tv_nsec
32 * array[1..2] = tv_sec
33 * size = 16 bytes
34 *
35 * @RINGBUF_TYPE_DATA: Data record
36 * If len is zero:
37 * array[0] holds the actual length
38 * array[1..(length+3)/4] holds data
39 * size = 4 + 4 + length (bytes)
40 * else
41 * length = len << 2
42 * array[0..(length+3)/4-1] holds data
43 * size = 4 + length (bytes)
44 */
45enum ring_buffer_type {
46 RINGBUF_TYPE_PADDING,
47 RINGBUF_TYPE_TIME_EXTEND,
48 /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
49 RINGBUF_TYPE_TIME_STAMP,
50 RINGBUF_TYPE_DATA,
51};
52
53unsigned ring_buffer_event_length(struct ring_buffer_event *event);
54void *ring_buffer_event_data(struct ring_buffer_event *event);
55
56/**
57 * ring_buffer_event_time_delta - return the delta timestamp of the event
58 * @event: the event to get the delta timestamp of
59 *
60 * The delta timestamp is the 27 bit timestamp since the last event.
61 */
62static inline unsigned
63ring_buffer_event_time_delta(struct ring_buffer_event *event)
64{
65 return event->time_delta;
66}
67
68/*
69 * size is in bytes for each per CPU buffer.
70 */
71struct ring_buffer *
72ring_buffer_alloc(unsigned long size, unsigned flags);
73void ring_buffer_free(struct ring_buffer *buffer);
74
75int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
76
77struct ring_buffer_event *
78ring_buffer_lock_reserve(struct ring_buffer *buffer,
79 unsigned long length,
80 unsigned long *flags);
81int ring_buffer_unlock_commit(struct ring_buffer *buffer,
82 struct ring_buffer_event *event,
83 unsigned long flags);
84int ring_buffer_write(struct ring_buffer *buffer,
85 unsigned long length, void *data);
86
87struct ring_buffer_event *
88ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts);
89struct ring_buffer_event *
90ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts);
91
92struct ring_buffer_iter *
93ring_buffer_read_start(struct ring_buffer *buffer, int cpu);
94void ring_buffer_read_finish(struct ring_buffer_iter *iter);
95
96struct ring_buffer_event *
97ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
98struct ring_buffer_event *
99ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
100void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
101int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
102
103unsigned long ring_buffer_size(struct ring_buffer *buffer);
104
105void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
106void ring_buffer_reset(struct ring_buffer *buffer);
107
108int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
109 struct ring_buffer *buffer_b, int cpu);
110
111int ring_buffer_empty(struct ring_buffer *buffer);
112int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
113
114void ring_buffer_record_disable(struct ring_buffer *buffer);
115void ring_buffer_record_enable(struct ring_buffer *buffer);
116void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
117void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
118
119unsigned long ring_buffer_entries(struct ring_buffer *buffer);
120unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
121unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
122unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
123
124u64 ring_buffer_time_stamp(int cpu);
125void ring_buffer_normalize_time_stamp(int cpu, u64 *ts);
126
127void tracing_on(void);
128void tracing_off(void);
129void tracing_off_permanent(void);
130
131void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
132void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
133int ring_buffer_read_page(struct ring_buffer *buffer,
134 void **data_page, int cpu, int full);
135
136enum ring_buffer_flags {
137 RB_FL_OVERWRITE = 1 << 0,
138};
139
140#endif /* _LINUX_RING_BUFFER_H */
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index 90987b7bcc1b..32c0547ffafc 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -427,9 +427,9 @@ void rio_dev_put(struct rio_dev *);
427 * Get the unique RIO device identifier. Returns the device 427 * Get the unique RIO device identifier. Returns the device
428 * identifier string. 428 * identifier string.
429 */ 429 */
430static inline char *rio_name(struct rio_dev *rdev) 430static inline const char *rio_name(struct rio_dev *rdev)
431{ 431{
432 return rdev->dev.bus_id; 432 return dev_name(&rdev->dev);
433} 433}
434 434
435/** 435/**
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index fed6f5e0b411..89f0564b10c8 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -39,18 +39,6 @@ struct anon_vma {
39 39
40#ifdef CONFIG_MMU 40#ifdef CONFIG_MMU
41 41
42extern struct kmem_cache *anon_vma_cachep;
43
44static inline struct anon_vma *anon_vma_alloc(void)
45{
46 return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
47}
48
49static inline void anon_vma_free(struct anon_vma *anon_vma)
50{
51 kmem_cache_free(anon_vma_cachep, anon_vma);
52}
53
54static inline void anon_vma_lock(struct vm_area_struct *vma) 42static inline void anon_vma_lock(struct vm_area_struct *vma)
55{ 43{
56 struct anon_vma *anon_vma = vma->anon_vma; 44 struct anon_vma *anon_vma = vma->anon_vma;
@@ -75,6 +63,9 @@ void anon_vma_unlink(struct vm_area_struct *);
75void anon_vma_link(struct vm_area_struct *); 63void anon_vma_link(struct vm_area_struct *);
76void __anon_vma_link(struct vm_area_struct *); 64void __anon_vma_link(struct vm_area_struct *);
77 65
66extern struct anon_vma *page_lock_anon_vma(struct page *page);
67extern void page_unlock_anon_vma(struct anon_vma *anon_vma);
68
78/* 69/*
79 * rmap interfaces called when adding or removing pte of page 70 * rmap interfaces called when adding or removing pte of page
80 */ 71 */
@@ -117,6 +108,19 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
117 */ 108 */
118int page_mkclean(struct page *); 109int page_mkclean(struct page *);
119 110
111#ifdef CONFIG_UNEVICTABLE_LRU
112/*
113 * called in munlock()/munmap() path to check for other vmas holding
114 * the page mlocked.
115 */
116int try_to_munlock(struct page *);
117#else
118static inline int try_to_munlock(struct page *page)
119{
120 return 0; /* a.k.a. SWAP_SUCCESS */
121}
122#endif
123
120#else /* !CONFIG_MMU */ 124#else /* !CONFIG_MMU */
121 125
122#define anon_vma_init() do {} while (0) 126#define anon_vma_init() do {} while (0)
@@ -140,5 +144,6 @@ static inline int page_mkclean(struct page *page)
140#define SWAP_SUCCESS 0 144#define SWAP_SUCCESS 0
141#define SWAP_AGAIN 1 145#define SWAP_AGAIN 1
142#define SWAP_FAIL 2 146#define SWAP_FAIL 2
147#define SWAP_MLOCK 3
143 148
144#endif /* _LINUX_RMAP_H */ 149#endif /* _LINUX_RMAP_H */
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 382bb7951166..f19b00b7d530 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -54,7 +54,7 @@ struct hrtimer_sleeper;
54#ifdef CONFIG_DEBUG_RT_MUTEXES 54#ifdef CONFIG_DEBUG_RT_MUTEXES
55# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ 55# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
56 , .name = #mutexname, .file = __FILE__, .line = __LINE__ 56 , .name = #mutexname, .file = __FILE__, .line = __LINE__
57# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __FUNCTION__) 57# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__)
58 extern void rt_mutex_debug_task_free(struct task_struct *tsk); 58 extern void rt_mutex_debug_task_free(struct task_struct *tsk);
59#else 59#else
60# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) 60# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2b3d51c6ec9c..e88f7058b3a1 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -107,6 +107,11 @@ enum {
107 RTM_GETADDRLABEL, 107 RTM_GETADDRLABEL,
108#define RTM_GETADDRLABEL RTM_GETADDRLABEL 108#define RTM_GETADDRLABEL RTM_GETADDRLABEL
109 109
110 RTM_GETDCB = 78,
111#define RTM_GETDCB RTM_GETDCB
112 RTM_SETDCB,
113#define RTM_SETDCB RTM_SETDCB
114
110 __RTM_MAX, 115 __RTM_MAX,
111#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) 116#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
112}; 117};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1a7e8461db5a..bd5ff78798c2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -96,6 +96,7 @@ struct exec_domain;
96struct futex_pi_state; 96struct futex_pi_state;
97struct robust_list_head; 97struct robust_list_head;
98struct bio; 98struct bio;
99struct bts_tracer;
99 100
100/* 101/*
101 * List of flags we want to share for kernel threads, 102 * List of flags we want to share for kernel threads,
@@ -247,6 +248,7 @@ extern void init_idle(struct task_struct *idle, int cpu);
247extern void init_idle_bootup_task(struct task_struct *idle); 248extern void init_idle_bootup_task(struct task_struct *idle);
248 249
249extern int runqueue_is_locked(void); 250extern int runqueue_is_locked(void);
251extern void task_rq_unlock_wait(struct task_struct *p);
250 252
251extern cpumask_t nohz_cpu_mask; 253extern cpumask_t nohz_cpu_mask;
252#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) 254#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
@@ -258,8 +260,6 @@ static inline int select_nohz_load_balancer(int cpu)
258} 260}
259#endif 261#endif
260 262
261extern unsigned long rt_needs_cpu(int cpu);
262
263/* 263/*
264 * Only dump TASK_* tasks. (0 for all tasks) 264 * Only dump TASK_* tasks. (0 for all tasks)
265 */ 265 */
@@ -287,7 +287,6 @@ extern void trap_init(void);
287extern void account_process_tick(struct task_struct *task, int user); 287extern void account_process_tick(struct task_struct *task, int user);
288extern void update_process_times(int user); 288extern void update_process_times(int user);
289extern void scheduler_tick(void); 289extern void scheduler_tick(void);
290extern void hrtick_resched(void);
291 290
292extern void sched_show_task(struct task_struct *p); 291extern void sched_show_task(struct task_struct *p);
293 292
@@ -403,12 +402,21 @@ extern int get_dumpable(struct mm_struct *mm);
403#define MMF_DUMP_MAPPED_PRIVATE 4 402#define MMF_DUMP_MAPPED_PRIVATE 4
404#define MMF_DUMP_MAPPED_SHARED 5 403#define MMF_DUMP_MAPPED_SHARED 5
405#define MMF_DUMP_ELF_HEADERS 6 404#define MMF_DUMP_ELF_HEADERS 6
405#define MMF_DUMP_HUGETLB_PRIVATE 7
406#define MMF_DUMP_HUGETLB_SHARED 8
406#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS 407#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
407#define MMF_DUMP_FILTER_BITS 5 408#define MMF_DUMP_FILTER_BITS 7
408#define MMF_DUMP_FILTER_MASK \ 409#define MMF_DUMP_FILTER_MASK \
409 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) 410 (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
410#define MMF_DUMP_FILTER_DEFAULT \ 411#define MMF_DUMP_FILTER_DEFAULT \
411 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED)) 412 ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
413 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
414
415#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
416# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
417#else
418# define MMF_DUMP_MASK_DEFAULT_ELF 0
419#endif
412 420
413struct sighand_struct { 421struct sighand_struct {
414 atomic_t count; 422 atomic_t count;
@@ -425,6 +433,39 @@ struct pacct_struct {
425 unsigned long ac_minflt, ac_majflt; 433 unsigned long ac_minflt, ac_majflt;
426}; 434};
427 435
436/**
437 * struct task_cputime - collected CPU time counts
438 * @utime: time spent in user mode, in &cputime_t units
439 * @stime: time spent in kernel mode, in &cputime_t units
440 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
441 *
442 * This structure groups together three kinds of CPU time that are
443 * tracked for threads and thread groups. Most things considering
444 * CPU time want to group these counts together and treat all three
445 * of them in parallel.
446 */
447struct task_cputime {
448 cputime_t utime;
449 cputime_t stime;
450 unsigned long long sum_exec_runtime;
451};
452/* Alternate field names when used to cache expirations. */
453#define prof_exp stime
454#define virt_exp utime
455#define sched_exp sum_exec_runtime
456
457/**
458 * struct thread_group_cputime - thread group interval timer counts
459 * @totals: thread group interval timers; substructure for
460 * uniprocessor kernel, per-cpu for SMP kernel.
461 *
462 * This structure contains the version of task_cputime, above, that is
463 * used for thread group CPU clock calculations.
464 */
465struct thread_group_cputime {
466 struct task_cputime *totals;
467};
468
428/* 469/*
429 * NOTE! "signal_struct" does not have it's own 470 * NOTE! "signal_struct" does not have it's own
430 * locking, because a shared signal_struct always 471 * locking, because a shared signal_struct always
@@ -470,6 +511,17 @@ struct signal_struct {
470 cputime_t it_prof_expires, it_virt_expires; 511 cputime_t it_prof_expires, it_virt_expires;
471 cputime_t it_prof_incr, it_virt_incr; 512 cputime_t it_prof_incr, it_virt_incr;
472 513
514 /*
515 * Thread group totals for process CPU clocks.
516 * See thread_group_cputime(), et al, for details.
517 */
518 struct thread_group_cputime cputime;
519
520 /* Earliest-expiration cache. */
521 struct task_cputime cputime_expires;
522
523 struct list_head cpu_timers[3];
524
473 /* job control IDs */ 525 /* job control IDs */
474 526
475 /* 527 /*
@@ -500,7 +552,7 @@ struct signal_struct {
500 * Live threads maintain their own counters and add to these 552 * Live threads maintain their own counters and add to these
501 * in __exit_signal, except for the group leader. 553 * in __exit_signal, except for the group leader.
502 */ 554 */
503 cputime_t utime, stime, cutime, cstime; 555 cputime_t cutime, cstime;
504 cputime_t gtime; 556 cputime_t gtime;
505 cputime_t cgtime; 557 cputime_t cgtime;
506 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; 558 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
@@ -509,14 +561,6 @@ struct signal_struct {
509 struct task_io_accounting ioac; 561 struct task_io_accounting ioac;
510 562
511 /* 563 /*
512 * Cumulative ns of scheduled CPU time for dead threads in the
513 * group, not including a zombie group leader. (This only differs
514 * from jiffies_to_ns(utime + stime) if sched_clock uses something
515 * other than jiffies.)
516 */
517 unsigned long long sum_sched_runtime;
518
519 /*
520 * We don't bother to synchronize most readers of this at all, 564 * We don't bother to synchronize most readers of this at all,
521 * because there is no reader checking a limit that actually needs 565 * because there is no reader checking a limit that actually needs
522 * to get both rlim_cur and rlim_max atomically, and either one 566 * to get both rlim_cur and rlim_max atomically, and either one
@@ -527,14 +571,6 @@ struct signal_struct {
527 */ 571 */
528 struct rlimit rlim[RLIM_NLIMITS]; 572 struct rlimit rlim[RLIM_NLIMITS];
529 573
530 struct list_head cpu_timers[3];
531
532 /* keep the process-shared keyrings here so that they do the right
533 * thing in threads created with CLONE_THREAD */
534#ifdef CONFIG_KEYS
535 struct key *session_keyring; /* keyring inherited over fork */
536 struct key *process_keyring; /* keyring private to this process */
537#endif
538#ifdef CONFIG_BSD_PROCESS_ACCT 574#ifdef CONFIG_BSD_PROCESS_ACCT
539 struct pacct_struct pacct; /* per-process accounting information */ 575 struct pacct_struct pacct; /* per-process accounting information */
540#endif 576#endif
@@ -587,6 +623,10 @@ struct user_struct {
587 atomic_t inotify_watches; /* How many inotify watches does this user have? */ 623 atomic_t inotify_watches; /* How many inotify watches does this user have? */
588 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */ 624 atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
589#endif 625#endif
626#ifdef CONFIG_EPOLL
627 atomic_t epoll_devs; /* The number of epoll descriptors currently open */
628 atomic_t epoll_watches; /* The number of file descriptors currently watched */
629#endif
590#ifdef CONFIG_POSIX_MQUEUE 630#ifdef CONFIG_POSIX_MQUEUE
591 /* protected by mq_lock */ 631 /* protected by mq_lock */
592 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */ 632 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
@@ -601,6 +641,7 @@ struct user_struct {
601 /* Hash table maintenance information */ 641 /* Hash table maintenance information */
602 struct hlist_node uidhash_node; 642 struct hlist_node uidhash_node;
603 uid_t uid; 643 uid_t uid;
644 struct user_namespace *user_ns;
604 645
605#ifdef CONFIG_USER_SCHED 646#ifdef CONFIG_USER_SCHED
606 struct task_group *tg; 647 struct task_group *tg;
@@ -618,6 +659,7 @@ extern struct user_struct *find_user(uid_t);
618extern struct user_struct root_user; 659extern struct user_struct root_user;
619#define INIT_USER (&root_user) 660#define INIT_USER (&root_user)
620 661
662
621struct backing_dev_info; 663struct backing_dev_info;
622struct reclaim_state; 664struct reclaim_state;
623 665
@@ -625,8 +667,7 @@ struct reclaim_state;
625struct sched_info { 667struct sched_info {
626 /* cumulative counters */ 668 /* cumulative counters */
627 unsigned long pcount; /* # of times run on this cpu */ 669 unsigned long pcount; /* # of times run on this cpu */
628 unsigned long long cpu_time, /* time spent on the cpu */ 670 unsigned long long run_delay; /* time spent waiting on a runqueue */
629 run_delay; /* time spent waiting on a runqueue */
630 671
631 /* timestamps */ 672 /* timestamps */
632 unsigned long long last_arrival,/* when we last ran on a cpu */ 673 unsigned long long last_arrival,/* when we last ran on a cpu */
@@ -638,10 +679,6 @@ struct sched_info {
638}; 679};
639#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ 680#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
640 681
641#ifdef CONFIG_SCHEDSTATS
642extern const struct file_operations proc_schedstat_operations;
643#endif /* CONFIG_SCHEDSTATS */
644
645#ifdef CONFIG_TASK_DELAY_ACCT 682#ifdef CONFIG_TASK_DELAY_ACCT
646struct task_delay_info { 683struct task_delay_info {
647 spinlock_t lock; 684 spinlock_t lock;
@@ -845,38 +882,7 @@ partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
845#endif /* !CONFIG_SMP */ 882#endif /* !CONFIG_SMP */
846 883
847struct io_context; /* See blkdev.h */ 884struct io_context; /* See blkdev.h */
848#define NGROUPS_SMALL 32
849#define NGROUPS_PER_BLOCK ((unsigned int)(PAGE_SIZE / sizeof(gid_t)))
850struct group_info {
851 int ngroups;
852 atomic_t usage;
853 gid_t small_block[NGROUPS_SMALL];
854 int nblocks;
855 gid_t *blocks[0];
856};
857
858/*
859 * get_group_info() must be called with the owning task locked (via task_lock())
860 * when task != current. The reason being that the vast majority of callers are
861 * looking at current->group_info, which can not be changed except by the
862 * current task. Changing current->group_info requires the task lock, too.
863 */
864#define get_group_info(group_info) do { \
865 atomic_inc(&(group_info)->usage); \
866} while (0)
867 885
868#define put_group_info(group_info) do { \
869 if (atomic_dec_and_test(&(group_info)->usage)) \
870 groups_free(group_info); \
871} while (0)
872
873extern struct group_info *groups_alloc(int gidsetsize);
874extern void groups_free(struct group_info *group_info);
875extern int set_current_groups(struct group_info *group_info);
876extern int groups_search(struct group_info *group_info, gid_t grp);
877/* access the groups "array" with this macro */
878#define GROUP_AT(gi, i) \
879 ((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
880 886
881#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK 887#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
882extern void prefetch_stack(struct task_struct *t); 888extern void prefetch_stack(struct task_struct *t);
@@ -898,7 +904,6 @@ struct sched_class {
898 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 904 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
899 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 905 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
900 void (*yield_task) (struct rq *rq); 906 void (*yield_task) (struct rq *rq);
901 int (*select_task_rq)(struct task_struct *p, int sync);
902 907
903 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); 908 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
904 909
@@ -906,6 +911,8 @@ struct sched_class {
906 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 911 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
907 912
908#ifdef CONFIG_SMP 913#ifdef CONFIG_SMP
914 int (*select_task_rq)(struct task_struct *p, int sync);
915
909 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, 916 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
910 struct rq *busiest, unsigned long max_load_move, 917 struct rq *busiest, unsigned long max_load_move,
911 struct sched_domain *sd, enum cpu_idle_type idle, 918 struct sched_domain *sd, enum cpu_idle_type idle,
@@ -917,16 +924,17 @@ struct sched_class {
917 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 924 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
918 void (*post_schedule) (struct rq *this_rq); 925 void (*post_schedule) (struct rq *this_rq);
919 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 926 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
920#endif
921 927
922 void (*set_curr_task) (struct rq *rq);
923 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
924 void (*task_new) (struct rq *rq, struct task_struct *p);
925 void (*set_cpus_allowed)(struct task_struct *p, 928 void (*set_cpus_allowed)(struct task_struct *p,
926 const cpumask_t *newmask); 929 const cpumask_t *newmask);
927 930
928 void (*rq_online)(struct rq *rq); 931 void (*rq_online)(struct rq *rq);
929 void (*rq_offline)(struct rq *rq); 932 void (*rq_offline)(struct rq *rq);
933#endif
934
935 void (*set_curr_task) (struct rq *rq);
936 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
937 void (*task_new) (struct rq *rq, struct task_struct *p);
930 938
931 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 939 void (*switched_from) (struct rq *this_rq, struct task_struct *task,
932 int running); 940 int running);
@@ -1119,6 +1127,19 @@ struct task_struct {
1119 struct list_head ptraced; 1127 struct list_head ptraced;
1120 struct list_head ptrace_entry; 1128 struct list_head ptrace_entry;
1121 1129
1130#ifdef CONFIG_X86_PTRACE_BTS
1131 /*
1132 * This is the tracer handle for the ptrace BTS extension.
1133 * This field actually belongs to the ptracer task.
1134 */
1135 struct bts_tracer *bts;
1136 /*
1137 * The buffer to hold the BTS data.
1138 */
1139 void *bts_buffer;
1140 size_t bts_size;
1141#endif /* CONFIG_X86_PTRACE_BTS */
1142
1122 /* PID/PID hash table linkage. */ 1143 /* PID/PID hash table linkage. */
1123 struct pid_link pids[PIDTYPE_MAX]; 1144 struct pid_link pids[PIDTYPE_MAX];
1124 struct list_head thread_group; 1145 struct list_head thread_group;
@@ -1136,22 +1157,16 @@ struct task_struct {
1136/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1157/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1137 unsigned long min_flt, maj_flt; 1158 unsigned long min_flt, maj_flt;
1138 1159
1139 cputime_t it_prof_expires, it_virt_expires; 1160 struct task_cputime cputime_expires;
1140 unsigned long long it_sched_expires;
1141 struct list_head cpu_timers[3]; 1161 struct list_head cpu_timers[3];
1142 1162
1143/* process credentials */ 1163/* process credentials */
1144 uid_t uid,euid,suid,fsuid; 1164 const struct cred *real_cred; /* objective and real subjective task
1145 gid_t gid,egid,sgid,fsgid; 1165 * credentials (COW) */
1146 struct group_info *group_info; 1166 const struct cred *cred; /* effective (overridable) subjective task
1147 kernel_cap_t cap_effective, cap_inheritable, cap_permitted, cap_bset; 1167 * credentials (COW) */
1148 struct user_struct *user; 1168 struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */
1149 unsigned securebits; 1169
1150#ifdef CONFIG_KEYS
1151 unsigned char jit_keyring; /* default keyring to attach requested keys to */
1152 struct key *request_key_auth; /* assumed request_key authority */
1153 struct key *thread_keyring; /* keyring private to this thread */
1154#endif
1155 char comm[TASK_COMM_LEN]; /* executable name excluding path 1170 char comm[TASK_COMM_LEN]; /* executable name excluding path
1156 - access with [gs]et_task_comm (which lock 1171 - access with [gs]et_task_comm (which lock
1157 it with task_lock()) 1172 it with task_lock())
@@ -1188,9 +1203,6 @@ struct task_struct {
1188 int (*notifier)(void *priv); 1203 int (*notifier)(void *priv);
1189 void *notifier_data; 1204 void *notifier_data;
1190 sigset_t *notifier_mask; 1205 sigset_t *notifier_mask;
1191#ifdef CONFIG_SECURITY
1192 void *security;
1193#endif
1194 struct audit_context *audit_context; 1206 struct audit_context *audit_context;
1195#ifdef CONFIG_AUDITSYSCALL 1207#ifdef CONFIG_AUDITSYSCALL
1196 uid_t loginuid; 1208 uid_t loginuid;
@@ -1303,6 +1315,31 @@ struct task_struct {
1303 int latency_record_count; 1315 int latency_record_count;
1304 struct latency_record latency_record[LT_SAVECOUNT]; 1316 struct latency_record latency_record[LT_SAVECOUNT];
1305#endif 1317#endif
1318 /*
1319 * time slack values; these are used to round up poll() and
1320 * select() etc timeout values. These are in nanoseconds.
1321 */
1322 unsigned long timer_slack_ns;
1323 unsigned long default_timer_slack_ns;
1324
1325 struct list_head *scm_work_list;
1326#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1327 /* Index of current stored adress in ret_stack */
1328 int curr_ret_stack;
1329 /* Stack of return addresses for return function tracing */
1330 struct ftrace_ret_stack *ret_stack;
1331 /*
1332 * Number of functions that haven't been traced
1333 * because of depth overrun.
1334 */
1335 atomic_t trace_overrun;
1336 /* Pause for the tracing */
1337 atomic_t tracing_graph_pause;
1338#endif
1339#ifdef CONFIG_TRACING
1340 /* state flags for use by tracers */
1341 unsigned long trace;
1342#endif
1306}; 1343};
1307 1344
1308/* 1345/*
@@ -1587,6 +1624,7 @@ extern unsigned long long cpu_clock(int cpu);
1587 1624
1588extern unsigned long long 1625extern unsigned long long
1589task_sched_runtime(struct task_struct *task); 1626task_sched_runtime(struct task_struct *task);
1627extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
1590 1628
1591/* sched_exec is called by processes performing an exec */ 1629/* sched_exec is called by processes performing an exec */
1592#ifdef CONFIG_SMP 1630#ifdef CONFIG_SMP
@@ -1621,6 +1659,7 @@ extern unsigned int sysctl_sched_features;
1621extern unsigned int sysctl_sched_migration_cost; 1659extern unsigned int sysctl_sched_migration_cost;
1622extern unsigned int sysctl_sched_nr_migrate; 1660extern unsigned int sysctl_sched_nr_migrate;
1623extern unsigned int sysctl_sched_shares_ratelimit; 1661extern unsigned int sysctl_sched_shares_ratelimit;
1662extern unsigned int sysctl_sched_shares_thresh;
1624 1663
1625int sched_nr_latency_handler(struct ctl_table *table, int write, 1664int sched_nr_latency_handler(struct ctl_table *table, int write,
1626 struct file *file, void __user *buffer, size_t *length, 1665 struct file *file, void __user *buffer, size_t *length,
@@ -1720,7 +1759,6 @@ static inline struct user_struct *get_uid(struct user_struct *u)
1720 return u; 1759 return u;
1721} 1760}
1722extern void free_uid(struct user_struct *); 1761extern void free_uid(struct user_struct *);
1723extern void switch_uid(struct user_struct *);
1724extern void release_uids(struct user_namespace *ns); 1762extern void release_uids(struct user_namespace *ns);
1725 1763
1726#include <asm/current.h> 1764#include <asm/current.h>
@@ -1739,9 +1777,6 @@ extern void wake_up_new_task(struct task_struct *tsk,
1739extern void sched_fork(struct task_struct *p, int clone_flags); 1777extern void sched_fork(struct task_struct *p, int clone_flags);
1740extern void sched_dead(struct task_struct *p); 1778extern void sched_dead(struct task_struct *p);
1741 1779
1742extern int in_group_p(gid_t);
1743extern int in_egroup_p(gid_t);
1744
1745extern void proc_caches_init(void); 1780extern void proc_caches_init(void);
1746extern void flush_signals(struct task_struct *); 1781extern void flush_signals(struct task_struct *);
1747extern void ignore_signals(struct task_struct *); 1782extern void ignore_signals(struct task_struct *);
@@ -1873,6 +1908,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
1873#define for_each_process(p) \ 1908#define for_each_process(p) \
1874 for (p = &init_task ; (p = next_task(p)) != &init_task ; ) 1909 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
1875 1910
1911extern bool is_single_threaded(struct task_struct *);
1912
1876/* 1913/*
1877 * Careful: do_each_thread/while_each_thread is a double loop so 1914 * Careful: do_each_thread/while_each_thread is a double loop so
1878 * 'break' will not work as expected - use goto instead. 1915 * 'break' will not work as expected - use goto instead.
@@ -2097,6 +2134,30 @@ static inline int spin_needbreak(spinlock_t *lock)
2097} 2134}
2098 2135
2099/* 2136/*
2137 * Thread group CPU time accounting.
2138 */
2139
2140extern int thread_group_cputime_alloc(struct task_struct *);
2141extern void thread_group_cputime(struct task_struct *, struct task_cputime *);
2142
2143static inline void thread_group_cputime_init(struct signal_struct *sig)
2144{
2145 sig->cputime.totals = NULL;
2146}
2147
2148static inline int thread_group_cputime_clone_thread(struct task_struct *curr)
2149{
2150 if (curr->signal->cputime.totals)
2151 return 0;
2152 return thread_group_cputime_alloc(curr);
2153}
2154
2155static inline void thread_group_cputime_free(struct signal_struct *sig)
2156{
2157 free_percpu(sig->cputime.totals);
2158}
2159
2160/*
2100 * Reevaluate whether the task has signals pending delivery. 2161 * Reevaluate whether the task has signals pending delivery.
2101 * Wake the task if so. 2162 * Wake the task if so.
2102 * This is required every time the blocked sigset_t changes. 2163 * This is required every time the blocked sigset_t changes.
@@ -2158,6 +2219,7 @@ extern void normalize_rt_tasks(void);
2158extern struct task_group init_task_group; 2219extern struct task_group init_task_group;
2159#ifdef CONFIG_USER_SCHED 2220#ifdef CONFIG_USER_SCHED
2160extern struct task_group root_task_group; 2221extern struct task_group root_task_group;
2222extern void set_tg_uid(struct user_struct *user);
2161#endif 2223#endif
2162 2224
2163extern struct task_group *sched_create_group(struct task_group *parent); 2225extern struct task_group *sched_create_group(struct task_group *parent);
diff --git a/include/linux/securebits.h b/include/linux/securebits.h
index 92f09bdf1175..d2c5ed845bcc 100644
--- a/include/linux/securebits.h
+++ b/include/linux/securebits.h
@@ -32,7 +32,7 @@
32 setting is locked or not. A setting which is locked cannot be 32 setting is locked or not. A setting which is locked cannot be
33 changed from user-level. */ 33 changed from user-level. */
34#define issecure_mask(X) (1 << (X)) 34#define issecure_mask(X) (1 << (X))
35#define issecure(X) (issecure_mask(X) & current->securebits) 35#define issecure(X) (issecure_mask(X) & current_cred_xxx(securebits))
36 36
37#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \ 37#define SECURE_ALL_BITS (issecure_mask(SECURE_NOROOT) | \
38 issecure_mask(SECURE_NO_SETUID_FIXUP) | \ 38 issecure_mask(SECURE_NO_SETUID_FIXUP) | \
diff --git a/include/linux/security.h b/include/linux/security.h
index f5c4a51eb42e..3416cb85e77b 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -37,6 +37,10 @@
37/* Maximum number of letters for an LSM name string */ 37/* Maximum number of letters for an LSM name string */
38#define SECURITY_NAME_MAX 10 38#define SECURITY_NAME_MAX 10
39 39
40/* If capable should audit the security request */
41#define SECURITY_CAP_NOAUDIT 0
42#define SECURITY_CAP_AUDIT 1
43
40struct ctl_table; 44struct ctl_table;
41struct audit_krule; 45struct audit_krule;
42 46
@@ -44,25 +48,25 @@ struct audit_krule;
44 * These functions are in security/capability.c and are used 48 * These functions are in security/capability.c and are used
45 * as the default capabilities functions 49 * as the default capabilities functions
46 */ 50 */
47extern int cap_capable(struct task_struct *tsk, int cap); 51extern int cap_capable(struct task_struct *tsk, int cap, int audit);
48extern int cap_settime(struct timespec *ts, struct timezone *tz); 52extern int cap_settime(struct timespec *ts, struct timezone *tz);
49extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode); 53extern int cap_ptrace_may_access(struct task_struct *child, unsigned int mode);
50extern int cap_ptrace_traceme(struct task_struct *parent); 54extern int cap_ptrace_traceme(struct task_struct *parent);
51extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); 55extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
52extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); 56extern int cap_capset(struct cred *new, const struct cred *old,
53extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); 57 const kernel_cap_t *effective,
54extern int cap_bprm_set_security(struct linux_binprm *bprm); 58 const kernel_cap_t *inheritable,
55extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); 59 const kernel_cap_t *permitted);
60extern int cap_bprm_set_creds(struct linux_binprm *bprm);
56extern int cap_bprm_secureexec(struct linux_binprm *bprm); 61extern int cap_bprm_secureexec(struct linux_binprm *bprm);
57extern int cap_inode_setxattr(struct dentry *dentry, const char *name, 62extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
58 const void *value, size_t size, int flags); 63 const void *value, size_t size, int flags);
59extern int cap_inode_removexattr(struct dentry *dentry, const char *name); 64extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
60extern int cap_inode_need_killpriv(struct dentry *dentry); 65extern int cap_inode_need_killpriv(struct dentry *dentry);
61extern int cap_inode_killpriv(struct dentry *dentry); 66extern int cap_inode_killpriv(struct dentry *dentry);
62extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags); 67extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
63extern void cap_task_reparent_to_init(struct task_struct *p);
64extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, 68extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
65 unsigned long arg4, unsigned long arg5, long *rc_p); 69 unsigned long arg4, unsigned long arg5);
66extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp); 70extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
67extern int cap_task_setioprio(struct task_struct *p, int ioprio); 71extern int cap_task_setioprio(struct task_struct *p, int ioprio);
68extern int cap_task_setnice(struct task_struct *p, int nice); 72extern int cap_task_setnice(struct task_struct *p, int nice);
@@ -105,7 +109,7 @@ extern unsigned long mmap_min_addr;
105struct sched_param; 109struct sched_param;
106struct request_sock; 110struct request_sock;
107 111
108/* bprm_apply_creds unsafe reasons */ 112/* bprm->unsafe reasons */
109#define LSM_UNSAFE_SHARE 1 113#define LSM_UNSAFE_SHARE 1
110#define LSM_UNSAFE_PTRACE 2 114#define LSM_UNSAFE_PTRACE 2
111#define LSM_UNSAFE_PTRACE_CAP 4 115#define LSM_UNSAFE_PTRACE_CAP 4
@@ -149,36 +153,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
149 * 153 *
150 * Security hooks for program execution operations. 154 * Security hooks for program execution operations.
151 * 155 *
152 * @bprm_alloc_security: 156 * @bprm_set_creds:
153 * Allocate and attach a security structure to the @bprm->security field.
154 * The security field is initialized to NULL when the bprm structure is
155 * allocated.
156 * @bprm contains the linux_binprm structure to be modified.
157 * Return 0 if operation was successful.
158 * @bprm_free_security:
159 * @bprm contains the linux_binprm structure to be modified.
160 * Deallocate and clear the @bprm->security field.
161 * @bprm_apply_creds:
162 * Compute and set the security attributes of a process being transformed
163 * by an execve operation based on the old attributes (current->security)
164 * and the information saved in @bprm->security by the set_security hook.
165 * Since this hook function (and its caller) are void, this hook can not
166 * return an error. However, it can leave the security attributes of the
167 * process unchanged if an access failure occurs at this point.
168 * bprm_apply_creds is called under task_lock. @unsafe indicates various
169 * reasons why it may be unsafe to change security state.
170 * @bprm contains the linux_binprm structure.
171 * @bprm_post_apply_creds:
172 * Runs after bprm_apply_creds with the task_lock dropped, so that
173 * functions which cannot be called safely under the task_lock can
174 * be used. This hook is a good place to perform state changes on
175 * the process such as closing open file descriptors to which access
176 * is no longer granted if the attributes were changed.
177 * Note that a security module might need to save state between
178 * bprm_apply_creds and bprm_post_apply_creds to store the decision
179 * on whether the process may proceed.
180 * @bprm contains the linux_binprm structure.
181 * @bprm_set_security:
182 * Save security information in the bprm->security field, typically based 157 * Save security information in the bprm->security field, typically based
183 * on information about the bprm->file, for later use by the apply_creds 158 * on information about the bprm->file, for later use by the apply_creds
184 * hook. This hook may also optionally check permissions (e.g. for 159 * hook. This hook may also optionally check permissions (e.g. for
@@ -191,15 +166,30 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
191 * @bprm contains the linux_binprm structure. 166 * @bprm contains the linux_binprm structure.
192 * Return 0 if the hook is successful and permission is granted. 167 * Return 0 if the hook is successful and permission is granted.
193 * @bprm_check_security: 168 * @bprm_check_security:
194 * This hook mediates the point when a search for a binary handler will 169 * This hook mediates the point when a search for a binary handler will
195 * begin. It allows a check the @bprm->security value which is set in 170 * begin. It allows a check the @bprm->security value which is set in the
196 * the preceding set_security call. The primary difference from 171 * preceding set_creds call. The primary difference from set_creds is
197 * set_security is that the argv list and envp list are reliably 172 * that the argv list and envp list are reliably available in @bprm. This
198 * available in @bprm. This hook may be called multiple times 173 * hook may be called multiple times during a single execve; and in each
199 * during a single execve; and in each pass set_security is called 174 * pass set_creds is called first.
200 * first.
201 * @bprm contains the linux_binprm structure. 175 * @bprm contains the linux_binprm structure.
202 * Return 0 if the hook is successful and permission is granted. 176 * Return 0 if the hook is successful and permission is granted.
177 * @bprm_committing_creds:
178 * Prepare to install the new security attributes of a process being
179 * transformed by an execve operation, based on the old credentials
180 * pointed to by @current->cred and the information set in @bprm->cred by
181 * the bprm_set_creds hook. @bprm points to the linux_binprm structure.
182 * This hook is a good place to perform state changes on the process such
183 * as closing open file descriptors to which access will no longer be
184 * granted when the attributes are changed. This is called immediately
185 * before commit_creds().
186 * @bprm_committed_creds:
187 * Tidy up after the installation of the new security attributes of a
188 * process being transformed by an execve operation. The new credentials
189 * have, by this point, been set to @current->cred. @bprm points to the
190 * linux_binprm structure. This hook is a good place to perform state
191 * changes on the process such as clearing out non-inheritable signal
192 * state. This is called immediately after commit_creds().
203 * @bprm_secureexec: 193 * @bprm_secureexec:
204 * Return a boolean value (0 or 1) indicating whether a "secure exec" 194 * Return a boolean value (0 or 1) indicating whether a "secure exec"
205 * is required. The flag is passed in the auxiliary table 195 * is required. The flag is passed in the auxiliary table
@@ -585,15 +575,31 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
585 * manual page for definitions of the @clone_flags. 575 * manual page for definitions of the @clone_flags.
586 * @clone_flags contains the flags indicating what should be shared. 576 * @clone_flags contains the flags indicating what should be shared.
587 * Return 0 if permission is granted. 577 * Return 0 if permission is granted.
588 * @task_alloc_security: 578 * @cred_free:
589 * @p contains the task_struct for child process. 579 * @cred points to the credentials.
590 * Allocate and attach a security structure to the p->security field. The 580 * Deallocate and clear the cred->security field in a set of credentials.
591 * security field is initialized to NULL when the task structure is 581 * @cred_prepare:
592 * allocated. 582 * @new points to the new credentials.
593 * Return 0 if operation was successful. 583 * @old points to the original credentials.
594 * @task_free_security: 584 * @gfp indicates the atomicity of any memory allocations.
595 * @p contains the task_struct for process. 585 * Prepare a new set of credentials by copying the data from the old set.
596 * Deallocate and clear the p->security field. 586 * @cred_commit:
587 * @new points to the new credentials.
588 * @old points to the original credentials.
589 * Install a new set of credentials.
590 * @kernel_act_as:
591 * Set the credentials for a kernel service to act as (subjective context).
592 * @new points to the credentials to be modified.
593 * @secid specifies the security ID to be set
594 * The current task must be the one that nominated @secid.
595 * Return 0 if successful.
596 * @kernel_create_files_as:
597 * Set the file creation context in a set of credentials to be the same as
598 * the objective context of the specified inode.
599 * @new points to the credentials to be modified.
600 * @inode points to the inode to use as a reference.
601 * The current task must be the one that nominated @inode.
602 * Return 0 if successful.
597 * @task_setuid: 603 * @task_setuid:
598 * Check permission before setting one or more of the user identity 604 * Check permission before setting one or more of the user identity
599 * attributes of the current process. The @flags parameter indicates 605 * attributes of the current process. The @flags parameter indicates
@@ -606,15 +612,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
606 * @id2 contains a uid. 612 * @id2 contains a uid.
607 * @flags contains one of the LSM_SETID_* values. 613 * @flags contains one of the LSM_SETID_* values.
608 * Return 0 if permission is granted. 614 * Return 0 if permission is granted.
609 * @task_post_setuid: 615 * @task_fix_setuid:
610 * Update the module's state after setting one or more of the user 616 * Update the module's state after setting one or more of the user
611 * identity attributes of the current process. The @flags parameter 617 * identity attributes of the current process. The @flags parameter
612 * indicates which of the set*uid system calls invoked this hook. If 618 * indicates which of the set*uid system calls invoked this hook. If
613 * @flags is LSM_SETID_FS, then @old_ruid is the old fs uid and the other 619 * @new is the set of credentials that will be installed. Modifications
614 * parameters are not used. 620 * should be made to this rather than to @current->cred.
615 * @old_ruid contains the old real uid (or fs uid if LSM_SETID_FS). 621 * @old is the set of credentials that are being replaces
616 * @old_euid contains the old effective uid (or -1 if LSM_SETID_FS).
617 * @old_suid contains the old saved uid (or -1 if LSM_SETID_FS).
618 * @flags contains one of the LSM_SETID_* values. 622 * @flags contains one of the LSM_SETID_* values.
619 * Return 0 on success. 623 * Return 0 on success.
620 * @task_setgid: 624 * @task_setgid:
@@ -717,13 +721,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
717 * @arg3 contains a argument. 721 * @arg3 contains a argument.
718 * @arg4 contains a argument. 722 * @arg4 contains a argument.
719 * @arg5 contains a argument. 723 * @arg5 contains a argument.
720 * @rc_p contains a pointer to communicate back the forced return code 724 * Return -ENOSYS if no-one wanted to handle this op, any other value to
721 * Return 0 if permission is granted, and non-zero if the security module 725 * cause prctl() to return immediately with that value.
722 * has taken responsibility (setting *rc_p) for the prctl call.
723 * @task_reparent_to_init:
724 * Set the security attributes in @p->security for a kernel thread that
725 * is being reparented to the init task.
726 * @p contains the task_struct for the kernel thread.
727 * @task_to_inode: 726 * @task_to_inode:
728 * Set the security attributes for an inode based on an associated task's 727 * Set the security attributes for an inode based on an associated task's
729 * security attributes, e.g. for /proc/pid inodes. 728 * security attributes, e.g. for /proc/pid inodes.
@@ -1000,7 +999,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1000 * See whether a specific operational right is granted to a process on a 999 * See whether a specific operational right is granted to a process on a
1001 * key. 1000 * key.
1002 * @key_ref refers to the key (key pointer + possession attribute bit). 1001 * @key_ref refers to the key (key pointer + possession attribute bit).
1003 * @context points to the process to provide the context against which to 1002 * @cred points to the credentials to provide the context against which to
1004 * evaluate the security data on the key. 1003 * evaluate the security data on the key.
1005 * @perm describes the combination of permissions required of this key. 1004 * @perm describes the combination of permissions required of this key.
1006 * Return 1 if permission granted, 0 if permission denied and -ve it the 1005 * Return 1 if permission granted, 0 if permission denied and -ve it the
@@ -1162,6 +1161,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1162 * @child process. 1161 * @child process.
1163 * Security modules may also want to perform a process tracing check 1162 * Security modules may also want to perform a process tracing check
1164 * during an execve in the set_security or apply_creds hooks of 1163 * during an execve in the set_security or apply_creds hooks of
1164 * tracing check during an execve in the bprm_set_creds hook of
1165 * binprm_security_ops if the process is being traced and its security 1165 * binprm_security_ops if the process is being traced and its security
1166 * attributes would be changed by the execve. 1166 * attributes would be changed by the execve.
1167 * @child contains the task_struct structure for the target process. 1167 * @child contains the task_struct structure for the target process.
@@ -1185,29 +1185,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
1185 * @inheritable contains the inheritable capability set. 1185 * @inheritable contains the inheritable capability set.
1186 * @permitted contains the permitted capability set. 1186 * @permitted contains the permitted capability set.
1187 * Return 0 if the capability sets were successfully obtained. 1187 * Return 0 if the capability sets were successfully obtained.
1188 * @capset_check: 1188 * @capset:
1189 * Check permission before setting the @effective, @inheritable, and
1190 * @permitted capability sets for the @target process.
1191 * Caveat: @target is also set to current if a set of processes is
1192 * specified (i.e. all processes other than current and init or a
1193 * particular process group). Hence, the capset_set hook may need to
1194 * revalidate permission to the actual target process.
1195 * @target contains the task_struct structure for target process.
1196 * @effective contains the effective capability set.
1197 * @inheritable contains the inheritable capability set.
1198 * @permitted contains the permitted capability set.
1199 * Return 0 if permission is granted.
1200 * @capset_set:
1201 * Set the @effective, @inheritable, and @permitted capability sets for 1189 * Set the @effective, @inheritable, and @permitted capability sets for
1202 * the @target process. Since capset_check cannot always check permission 1190 * the current process.
1203 * to the real @target process, this hook may also perform permission 1191 * @new contains the new credentials structure for target process.
1204 * checking to determine if the current process is allowed to set the 1192 * @old contains the current credentials structure for target process.
1205 * capability sets of the @target process. However, this hook has no way
1206 * of returning an error due to the structure of the sys_capset code.
1207 * @target contains the task_struct structure for target process.
1208 * @effective contains the effective capability set. 1193 * @effective contains the effective capability set.
1209 * @inheritable contains the inheritable capability set. 1194 * @inheritable contains the inheritable capability set.
1210 * @permitted contains the permitted capability set. 1195 * @permitted contains the permitted capability set.
1196 * Return 0 and update @new if permission is granted.
1211 * @capable: 1197 * @capable:
1212 * Check whether the @tsk process has the @cap capability. 1198 * Check whether the @tsk process has the @cap capability.
1213 * @tsk contains the task_struct for the process. 1199 * @tsk contains the task_struct for the process.
@@ -1299,15 +1285,12 @@ struct security_operations {
1299 int (*capget) (struct task_struct *target, 1285 int (*capget) (struct task_struct *target,
1300 kernel_cap_t *effective, 1286 kernel_cap_t *effective,
1301 kernel_cap_t *inheritable, kernel_cap_t *permitted); 1287 kernel_cap_t *inheritable, kernel_cap_t *permitted);
1302 int (*capset_check) (struct task_struct *target, 1288 int (*capset) (struct cred *new,
1303 kernel_cap_t *effective, 1289 const struct cred *old,
1304 kernel_cap_t *inheritable, 1290 const kernel_cap_t *effective,
1305 kernel_cap_t *permitted); 1291 const kernel_cap_t *inheritable,
1306 void (*capset_set) (struct task_struct *target, 1292 const kernel_cap_t *permitted);
1307 kernel_cap_t *effective, 1293 int (*capable) (struct task_struct *tsk, int cap, int audit);
1308 kernel_cap_t *inheritable,
1309 kernel_cap_t *permitted);
1310 int (*capable) (struct task_struct *tsk, int cap);
1311 int (*acct) (struct file *file); 1294 int (*acct) (struct file *file);
1312 int (*sysctl) (struct ctl_table *table, int op); 1295 int (*sysctl) (struct ctl_table *table, int op);
1313 int (*quotactl) (int cmds, int type, int id, struct super_block *sb); 1296 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
@@ -1316,18 +1299,16 @@ struct security_operations {
1316 int (*settime) (struct timespec *ts, struct timezone *tz); 1299 int (*settime) (struct timespec *ts, struct timezone *tz);
1317 int (*vm_enough_memory) (struct mm_struct *mm, long pages); 1300 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
1318 1301
1319 int (*bprm_alloc_security) (struct linux_binprm *bprm); 1302 int (*bprm_set_creds) (struct linux_binprm *bprm);
1320 void (*bprm_free_security) (struct linux_binprm *bprm);
1321 void (*bprm_apply_creds) (struct linux_binprm *bprm, int unsafe);
1322 void (*bprm_post_apply_creds) (struct linux_binprm *bprm);
1323 int (*bprm_set_security) (struct linux_binprm *bprm);
1324 int (*bprm_check_security) (struct linux_binprm *bprm); 1303 int (*bprm_check_security) (struct linux_binprm *bprm);
1325 int (*bprm_secureexec) (struct linux_binprm *bprm); 1304 int (*bprm_secureexec) (struct linux_binprm *bprm);
1305 void (*bprm_committing_creds) (struct linux_binprm *bprm);
1306 void (*bprm_committed_creds) (struct linux_binprm *bprm);
1326 1307
1327 int (*sb_alloc_security) (struct super_block *sb); 1308 int (*sb_alloc_security) (struct super_block *sb);
1328 void (*sb_free_security) (struct super_block *sb); 1309 void (*sb_free_security) (struct super_block *sb);
1329 int (*sb_copy_data) (char *orig, char *copy); 1310 int (*sb_copy_data) (char *orig, char *copy);
1330 int (*sb_kern_mount) (struct super_block *sb, void *data); 1311 int (*sb_kern_mount) (struct super_block *sb, int flags, void *data);
1331 int (*sb_show_options) (struct seq_file *m, struct super_block *sb); 1312 int (*sb_show_options) (struct seq_file *m, struct super_block *sb);
1332 int (*sb_statfs) (struct dentry *dentry); 1313 int (*sb_statfs) (struct dentry *dentry);
1333 int (*sb_mount) (char *dev_name, struct path *path, 1314 int (*sb_mount) (char *dev_name, struct path *path,
@@ -1406,14 +1387,18 @@ struct security_operations {
1406 int (*file_send_sigiotask) (struct task_struct *tsk, 1387 int (*file_send_sigiotask) (struct task_struct *tsk,
1407 struct fown_struct *fown, int sig); 1388 struct fown_struct *fown, int sig);
1408 int (*file_receive) (struct file *file); 1389 int (*file_receive) (struct file *file);
1409 int (*dentry_open) (struct file *file); 1390 int (*dentry_open) (struct file *file, const struct cred *cred);
1410 1391
1411 int (*task_create) (unsigned long clone_flags); 1392 int (*task_create) (unsigned long clone_flags);
1412 int (*task_alloc_security) (struct task_struct *p); 1393 void (*cred_free) (struct cred *cred);
1413 void (*task_free_security) (struct task_struct *p); 1394 int (*cred_prepare)(struct cred *new, const struct cred *old,
1395 gfp_t gfp);
1396 void (*cred_commit)(struct cred *new, const struct cred *old);
1397 int (*kernel_act_as)(struct cred *new, u32 secid);
1398 int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
1414 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags); 1399 int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
1415 int (*task_post_setuid) (uid_t old_ruid /* or fsuid */ , 1400 int (*task_fix_setuid) (struct cred *new, const struct cred *old,
1416 uid_t old_euid, uid_t old_suid, int flags); 1401 int flags);
1417 int (*task_setgid) (gid_t id0, gid_t id1, gid_t id2, int flags); 1402 int (*task_setgid) (gid_t id0, gid_t id1, gid_t id2, int flags);
1418 int (*task_setpgid) (struct task_struct *p, pid_t pgid); 1403 int (*task_setpgid) (struct task_struct *p, pid_t pgid);
1419 int (*task_getpgid) (struct task_struct *p); 1404 int (*task_getpgid) (struct task_struct *p);
@@ -1433,8 +1418,7 @@ struct security_operations {
1433 int (*task_wait) (struct task_struct *p); 1418 int (*task_wait) (struct task_struct *p);
1434 int (*task_prctl) (int option, unsigned long arg2, 1419 int (*task_prctl) (int option, unsigned long arg2,
1435 unsigned long arg3, unsigned long arg4, 1420 unsigned long arg3, unsigned long arg4,
1436 unsigned long arg5, long *rc_p); 1421 unsigned long arg5);
1437 void (*task_reparent_to_init) (struct task_struct *p);
1438 void (*task_to_inode) (struct task_struct *p, struct inode *inode); 1422 void (*task_to_inode) (struct task_struct *p, struct inode *inode);
1439 1423
1440 int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag); 1424 int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag);
@@ -1539,10 +1523,10 @@ struct security_operations {
1539 1523
1540 /* key management security hooks */ 1524 /* key management security hooks */
1541#ifdef CONFIG_KEYS 1525#ifdef CONFIG_KEYS
1542 int (*key_alloc) (struct key *key, struct task_struct *tsk, unsigned long flags); 1526 int (*key_alloc) (struct key *key, const struct cred *cred, unsigned long flags);
1543 void (*key_free) (struct key *key); 1527 void (*key_free) (struct key *key);
1544 int (*key_permission) (key_ref_t key_ref, 1528 int (*key_permission) (key_ref_t key_ref,
1545 struct task_struct *context, 1529 const struct cred *cred,
1546 key_perm_t perm); 1530 key_perm_t perm);
1547 int (*key_getsecurity)(struct key *key, char **_buffer); 1531 int (*key_getsecurity)(struct key *key, char **_buffer);
1548#endif /* CONFIG_KEYS */ 1532#endif /* CONFIG_KEYS */
@@ -1568,15 +1552,12 @@ int security_capget(struct task_struct *target,
1568 kernel_cap_t *effective, 1552 kernel_cap_t *effective,
1569 kernel_cap_t *inheritable, 1553 kernel_cap_t *inheritable,
1570 kernel_cap_t *permitted); 1554 kernel_cap_t *permitted);
1571int security_capset_check(struct task_struct *target, 1555int security_capset(struct cred *new, const struct cred *old,
1572 kernel_cap_t *effective, 1556 const kernel_cap_t *effective,
1573 kernel_cap_t *inheritable, 1557 const kernel_cap_t *inheritable,
1574 kernel_cap_t *permitted); 1558 const kernel_cap_t *permitted);
1575void security_capset_set(struct task_struct *target,
1576 kernel_cap_t *effective,
1577 kernel_cap_t *inheritable,
1578 kernel_cap_t *permitted);
1579int security_capable(struct task_struct *tsk, int cap); 1559int security_capable(struct task_struct *tsk, int cap);
1560int security_capable_noaudit(struct task_struct *tsk, int cap);
1580int security_acct(struct file *file); 1561int security_acct(struct file *file);
1581int security_sysctl(struct ctl_table *table, int op); 1562int security_sysctl(struct ctl_table *table, int op);
1582int security_quotactl(int cmds, int type, int id, struct super_block *sb); 1563int security_quotactl(int cmds, int type, int id, struct super_block *sb);
@@ -1585,17 +1566,16 @@ int security_syslog(int type);
1585int security_settime(struct timespec *ts, struct timezone *tz); 1566int security_settime(struct timespec *ts, struct timezone *tz);
1586int security_vm_enough_memory(long pages); 1567int security_vm_enough_memory(long pages);
1587int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); 1568int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
1588int security_bprm_alloc(struct linux_binprm *bprm); 1569int security_vm_enough_memory_kern(long pages);
1589void security_bprm_free(struct linux_binprm *bprm); 1570int security_bprm_set_creds(struct linux_binprm *bprm);
1590void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe);
1591void security_bprm_post_apply_creds(struct linux_binprm *bprm);
1592int security_bprm_set(struct linux_binprm *bprm);
1593int security_bprm_check(struct linux_binprm *bprm); 1571int security_bprm_check(struct linux_binprm *bprm);
1572void security_bprm_committing_creds(struct linux_binprm *bprm);
1573void security_bprm_committed_creds(struct linux_binprm *bprm);
1594int security_bprm_secureexec(struct linux_binprm *bprm); 1574int security_bprm_secureexec(struct linux_binprm *bprm);
1595int security_sb_alloc(struct super_block *sb); 1575int security_sb_alloc(struct super_block *sb);
1596void security_sb_free(struct super_block *sb); 1576void security_sb_free(struct super_block *sb);
1597int security_sb_copy_data(char *orig, char *copy); 1577int security_sb_copy_data(char *orig, char *copy);
1598int security_sb_kern_mount(struct super_block *sb, void *data); 1578int security_sb_kern_mount(struct super_block *sb, int flags, void *data);
1599int security_sb_show_options(struct seq_file *m, struct super_block *sb); 1579int security_sb_show_options(struct seq_file *m, struct super_block *sb);
1600int security_sb_statfs(struct dentry *dentry); 1580int security_sb_statfs(struct dentry *dentry);
1601int security_sb_mount(char *dev_name, struct path *path, 1581int security_sb_mount(char *dev_name, struct path *path,
@@ -1662,13 +1642,16 @@ int security_file_set_fowner(struct file *file);
1662int security_file_send_sigiotask(struct task_struct *tsk, 1642int security_file_send_sigiotask(struct task_struct *tsk,
1663 struct fown_struct *fown, int sig); 1643 struct fown_struct *fown, int sig);
1664int security_file_receive(struct file *file); 1644int security_file_receive(struct file *file);
1665int security_dentry_open(struct file *file); 1645int security_dentry_open(struct file *file, const struct cred *cred);
1666int security_task_create(unsigned long clone_flags); 1646int security_task_create(unsigned long clone_flags);
1667int security_task_alloc(struct task_struct *p); 1647void security_cred_free(struct cred *cred);
1668void security_task_free(struct task_struct *p); 1648int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp);
1649void security_commit_creds(struct cred *new, const struct cred *old);
1650int security_kernel_act_as(struct cred *new, u32 secid);
1651int security_kernel_create_files_as(struct cred *new, struct inode *inode);
1669int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags); 1652int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
1670int security_task_post_setuid(uid_t old_ruid, uid_t old_euid, 1653int security_task_fix_setuid(struct cred *new, const struct cred *old,
1671 uid_t old_suid, int flags); 1654 int flags);
1672int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags); 1655int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags);
1673int security_task_setpgid(struct task_struct *p, pid_t pgid); 1656int security_task_setpgid(struct task_struct *p, pid_t pgid);
1674int security_task_getpgid(struct task_struct *p); 1657int security_task_getpgid(struct task_struct *p);
@@ -1687,8 +1670,7 @@ int security_task_kill(struct task_struct *p, struct siginfo *info,
1687 int sig, u32 secid); 1670 int sig, u32 secid);
1688int security_task_wait(struct task_struct *p); 1671int security_task_wait(struct task_struct *p);
1689int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, 1672int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
1690 unsigned long arg4, unsigned long arg5, long *rc_p); 1673 unsigned long arg4, unsigned long arg5);
1691void security_task_reparent_to_init(struct task_struct *p);
1692void security_task_to_inode(struct task_struct *p, struct inode *inode); 1674void security_task_to_inode(struct task_struct *p, struct inode *inode);
1693int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); 1675int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
1694void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); 1676void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid);
@@ -1763,25 +1745,23 @@ static inline int security_capget(struct task_struct *target,
1763 return cap_capget(target, effective, inheritable, permitted); 1745 return cap_capget(target, effective, inheritable, permitted);
1764} 1746}
1765 1747
1766static inline int security_capset_check(struct task_struct *target, 1748static inline int security_capset(struct cred *new,
1767 kernel_cap_t *effective, 1749 const struct cred *old,
1768 kernel_cap_t *inheritable, 1750 const kernel_cap_t *effective,
1769 kernel_cap_t *permitted) 1751 const kernel_cap_t *inheritable,
1752 const kernel_cap_t *permitted)
1770{ 1753{
1771 return cap_capset_check(target, effective, inheritable, permitted); 1754 return cap_capset(new, old, effective, inheritable, permitted);
1772} 1755}
1773 1756
1774static inline void security_capset_set(struct task_struct *target, 1757static inline int security_capable(struct task_struct *tsk, int cap)
1775 kernel_cap_t *effective,
1776 kernel_cap_t *inheritable,
1777 kernel_cap_t *permitted)
1778{ 1758{
1779 cap_capset_set(target, effective, inheritable, permitted); 1759 return cap_capable(tsk, cap, SECURITY_CAP_AUDIT);
1780} 1760}
1781 1761
1782static inline int security_capable(struct task_struct *tsk, int cap) 1762static inline int security_capable_noaudit(struct task_struct *tsk, int cap)
1783{ 1763{
1784 return cap_capable(tsk, cap); 1764 return cap_capable(tsk, cap, SECURITY_CAP_NOAUDIT);
1785} 1765}
1786 1766
1787static inline int security_acct(struct file *file) 1767static inline int security_acct(struct file *file)
@@ -1817,40 +1797,39 @@ static inline int security_settime(struct timespec *ts, struct timezone *tz)
1817 1797
1818static inline int security_vm_enough_memory(long pages) 1798static inline int security_vm_enough_memory(long pages)
1819{ 1799{
1800 WARN_ON(current->mm == NULL);
1820 return cap_vm_enough_memory(current->mm, pages); 1801 return cap_vm_enough_memory(current->mm, pages);
1821} 1802}
1822 1803
1823static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) 1804static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
1824{ 1805{
1806 WARN_ON(mm == NULL);
1825 return cap_vm_enough_memory(mm, pages); 1807 return cap_vm_enough_memory(mm, pages);
1826} 1808}
1827 1809
1828static inline int security_bprm_alloc(struct linux_binprm *bprm) 1810static inline int security_vm_enough_memory_kern(long pages)
1829{ 1811{
1830 return 0; 1812 /* If current->mm is a kernel thread then we will pass NULL,
1813 for this specific case that is fine */
1814 return cap_vm_enough_memory(current->mm, pages);
1831} 1815}
1832 1816
1833static inline void security_bprm_free(struct linux_binprm *bprm) 1817static inline int security_bprm_set_creds(struct linux_binprm *bprm)
1834{ }
1835
1836static inline void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe)
1837{ 1818{
1838 cap_bprm_apply_creds(bprm, unsafe); 1819 return cap_bprm_set_creds(bprm);
1839} 1820}
1840 1821
1841static inline void security_bprm_post_apply_creds(struct linux_binprm *bprm) 1822static inline int security_bprm_check(struct linux_binprm *bprm)
1842{ 1823{
1843 return; 1824 return 0;
1844} 1825}
1845 1826
1846static inline int security_bprm_set(struct linux_binprm *bprm) 1827static inline void security_bprm_committing_creds(struct linux_binprm *bprm)
1847{ 1828{
1848 return cap_bprm_set_security(bprm);
1849} 1829}
1850 1830
1851static inline int security_bprm_check(struct linux_binprm *bprm) 1831static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
1852{ 1832{
1853 return 0;
1854} 1833}
1855 1834
1856static inline int security_bprm_secureexec(struct linux_binprm *bprm) 1835static inline int security_bprm_secureexec(struct linux_binprm *bprm)
@@ -1871,7 +1850,7 @@ static inline int security_sb_copy_data(char *orig, char *copy)
1871 return 0; 1850 return 0;
1872} 1851}
1873 1852
1874static inline int security_sb_kern_mount(struct super_block *sb, void *data) 1853static inline int security_sb_kern_mount(struct super_block *sb, int flags, void *data)
1875{ 1854{
1876 return 0; 1855 return 0;
1877} 1856}
@@ -2167,7 +2146,8 @@ static inline int security_file_receive(struct file *file)
2167 return 0; 2146 return 0;
2168} 2147}
2169 2148
2170static inline int security_dentry_open(struct file *file) 2149static inline int security_dentry_open(struct file *file,
2150 const struct cred *cred)
2171{ 2151{
2172 return 0; 2152 return 0;
2173} 2153}
@@ -2177,13 +2157,31 @@ static inline int security_task_create(unsigned long clone_flags)
2177 return 0; 2157 return 0;
2178} 2158}
2179 2159
2180static inline int security_task_alloc(struct task_struct *p) 2160static inline void security_cred_free(struct cred *cred)
2161{ }
2162
2163static inline int security_prepare_creds(struct cred *new,
2164 const struct cred *old,
2165 gfp_t gfp)
2181{ 2166{
2182 return 0; 2167 return 0;
2183} 2168}
2184 2169
2185static inline void security_task_free(struct task_struct *p) 2170static inline void security_commit_creds(struct cred *new,
2186{ } 2171 const struct cred *old)
2172{
2173}
2174
2175static inline int security_kernel_act_as(struct cred *cred, u32 secid)
2176{
2177 return 0;
2178}
2179
2180static inline int security_kernel_create_files_as(struct cred *cred,
2181 struct inode *inode)
2182{
2183 return 0;
2184}
2187 2185
2188static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, 2186static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
2189 int flags) 2187 int flags)
@@ -2191,10 +2189,11 @@ static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
2191 return 0; 2189 return 0;
2192} 2190}
2193 2191
2194static inline int security_task_post_setuid(uid_t old_ruid, uid_t old_euid, 2192static inline int security_task_fix_setuid(struct cred *new,
2195 uid_t old_suid, int flags) 2193 const struct cred *old,
2194 int flags)
2196{ 2195{
2197 return cap_task_post_setuid(old_ruid, old_euid, old_suid, flags); 2196 return cap_task_fix_setuid(new, old, flags);
2198} 2197}
2199 2198
2200static inline int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, 2199static inline int security_task_setgid(gid_t id0, gid_t id1, gid_t id2,
@@ -2281,14 +2280,9 @@ static inline int security_task_wait(struct task_struct *p)
2281static inline int security_task_prctl(int option, unsigned long arg2, 2280static inline int security_task_prctl(int option, unsigned long arg2,
2282 unsigned long arg3, 2281 unsigned long arg3,
2283 unsigned long arg4, 2282 unsigned long arg4,
2284 unsigned long arg5, long *rc_p) 2283 unsigned long arg5)
2285{
2286 return cap_task_prctl(option, arg2, arg3, arg3, arg5, rc_p);
2287}
2288
2289static inline void security_task_reparent_to_init(struct task_struct *p)
2290{ 2284{
2291 cap_task_reparent_to_init(p); 2285 return cap_task_prctl(option, arg2, arg3, arg3, arg5);
2292} 2286}
2293 2287
2294static inline void security_task_to_inode(struct task_struct *p, struct inode *inode) 2288static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
@@ -2714,16 +2708,16 @@ static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi
2714#ifdef CONFIG_KEYS 2708#ifdef CONFIG_KEYS
2715#ifdef CONFIG_SECURITY 2709#ifdef CONFIG_SECURITY
2716 2710
2717int security_key_alloc(struct key *key, struct task_struct *tsk, unsigned long flags); 2711int security_key_alloc(struct key *key, const struct cred *cred, unsigned long flags);
2718void security_key_free(struct key *key); 2712void security_key_free(struct key *key);
2719int security_key_permission(key_ref_t key_ref, 2713int security_key_permission(key_ref_t key_ref,
2720 struct task_struct *context, key_perm_t perm); 2714 const struct cred *cred, key_perm_t perm);
2721int security_key_getsecurity(struct key *key, char **_buffer); 2715int security_key_getsecurity(struct key *key, char **_buffer);
2722 2716
2723#else 2717#else
2724 2718
2725static inline int security_key_alloc(struct key *key, 2719static inline int security_key_alloc(struct key *key,
2726 struct task_struct *tsk, 2720 const struct cred *cred,
2727 unsigned long flags) 2721 unsigned long flags)
2728{ 2722{
2729 return 0; 2723 return 0;
@@ -2734,7 +2728,7 @@ static inline void security_key_free(struct key *key)
2734} 2728}
2735 2729
2736static inline int security_key_permission(key_ref_t key_ref, 2730static inline int security_key_permission(key_ref_t key_ref,
2737 struct task_struct *context, 2731 const struct cred *cred,
2738 key_perm_t perm) 2732 key_perm_t perm)
2739{ 2733{
2740 return 0; 2734 return 0;
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index a1783b229ef4..b3dfa72f13b9 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -34,6 +34,7 @@ struct seq_operations {
34 34
35#define SEQ_SKIP 1 35#define SEQ_SKIP 1
36 36
37char *mangle_path(char *s, char *p, char *esc);
37int seq_open(struct file *, const struct seq_operations *); 38int seq_open(struct file *, const struct seq_operations *);
38ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); 39ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
39loff_t seq_lseek(struct file *, loff_t, int); 40loff_t seq_lseek(struct file *, loff_t, int);
@@ -60,6 +61,19 @@ static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
60 return seq_bitmap(m, mask->bits, MAX_NUMNODES); 61 return seq_bitmap(m, mask->bits, MAX_NUMNODES);
61} 62}
62 63
64int seq_bitmap_list(struct seq_file *m, unsigned long *bits,
65 unsigned int nr_bits);
66
67static inline int seq_cpumask_list(struct seq_file *m, cpumask_t *mask)
68{
69 return seq_bitmap_list(m, mask->bits, NR_CPUS);
70}
71
72static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
73{
74 return seq_bitmap_list(m, mask->bits, MAX_NUMNODES);
75}
76
63int single_open(struct file *, int (*)(struct seq_file *, void *), void *); 77int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
64int single_release(struct inode *, struct file *); 78int single_release(struct inode *, struct file *);
65void *__seq_open_private(struct file *, const struct seq_operations *, int); 79void *__seq_open_private(struct file *, const struct seq_operations *, int);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index e27f216361fc..feb3b939ec4b 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -155,6 +155,11 @@
155 155
156#define PORT_SC26XX 82 156#define PORT_SC26XX 82
157 157
158/* SH-SCI */
159#define PORT_SCIFA 83
160
161#define PORT_S3C6400 84
162
158#ifdef __KERNEL__ 163#ifdef __KERNEL__
159 164
160#include <linux/compiler.h> 165#include <linux/compiler.h>
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h
new file mode 100644
index 000000000000..68e212ff9dde
--- /dev/null
+++ b/include/linux/sh_intc.h
@@ -0,0 +1,91 @@
1#ifndef __SH_INTC_H
2#define __SH_INTC_H
3
4typedef unsigned char intc_enum;
5
6struct intc_vect {
7 intc_enum enum_id;
8 unsigned short vect;
9};
10
11#define INTC_VECT(enum_id, vect) { enum_id, vect }
12#define INTC_IRQ(enum_id, irq) INTC_VECT(enum_id, irq2evt(irq))
13
14struct intc_group {
15 intc_enum enum_id;
16 intc_enum enum_ids[32];
17};
18
19#define INTC_GROUP(enum_id, ids...) { enum_id, { ids } }
20
21struct intc_mask_reg {
22 unsigned long set_reg, clr_reg, reg_width;
23 intc_enum enum_ids[32];
24#ifdef CONFIG_SMP
25 unsigned long smp;
26#endif
27};
28
29struct intc_prio_reg {
30 unsigned long set_reg, clr_reg, reg_width, field_width;
31 intc_enum enum_ids[16];
32#ifdef CONFIG_SMP
33 unsigned long smp;
34#endif
35};
36
37struct intc_sense_reg {
38 unsigned long reg, reg_width, field_width;
39 intc_enum enum_ids[16];
40};
41
42#ifdef CONFIG_SMP
43#define INTC_SMP(stride, nr) .smp = (stride) | ((nr) << 8)
44#else
45#define INTC_SMP(stride, nr)
46#endif
47
48struct intc_desc {
49 struct intc_vect *vectors;
50 unsigned int nr_vectors;
51 struct intc_group *groups;
52 unsigned int nr_groups;
53 struct intc_mask_reg *mask_regs;
54 unsigned int nr_mask_regs;
55 struct intc_prio_reg *prio_regs;
56 unsigned int nr_prio_regs;
57 struct intc_sense_reg *sense_regs;
58 unsigned int nr_sense_regs;
59 char *name;
60#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
61 struct intc_mask_reg *ack_regs;
62 unsigned int nr_ack_regs;
63#endif
64};
65
66#define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a)
67#define DECLARE_INTC_DESC(symbol, chipname, vectors, groups, \
68 mask_regs, prio_regs, sense_regs) \
69struct intc_desc symbol __initdata = { \
70 _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \
71 _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \
72 _INTC_ARRAY(sense_regs), \
73 chipname, \
74}
75
76#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
77#define DECLARE_INTC_DESC_ACK(symbol, chipname, vectors, groups, \
78 mask_regs, prio_regs, sense_regs, ack_regs) \
79struct intc_desc symbol __initdata = { \
80 _INTC_ARRAY(vectors), _INTC_ARRAY(groups), \
81 _INTC_ARRAY(mask_regs), _INTC_ARRAY(prio_regs), \
82 _INTC_ARRAY(sense_regs), \
83 chipname, \
84 _INTC_ARRAY(ack_regs), \
85}
86#endif
87
88void __init register_intc_controller(struct intc_desc *desc);
89int intc_set_priority(unsigned int irq, unsigned int prio);
90
91#endif /* __SH_INTC_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 2725f4e5a9bf..cf2cb50f77d1 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -250,6 +250,9 @@ typedef unsigned char *sk_buff_data_t;
250 * @tc_verd: traffic control verdict 250 * @tc_verd: traffic control verdict
251 * @ndisc_nodetype: router type (from link layer) 251 * @ndisc_nodetype: router type (from link layer)
252 * @do_not_encrypt: set to prevent encryption of this frame 252 * @do_not_encrypt: set to prevent encryption of this frame
253 * @requeue: set to indicate that the wireless core should attempt
254 * a software retry on this frame if we failed to
255 * receive an ACK for it
253 * @dma_cookie: a cookie to one of several possible DMA operations 256 * @dma_cookie: a cookie to one of several possible DMA operations
254 * done by skb DMA functions 257 * done by skb DMA functions
255 * @secmark: security marking 258 * @secmark: security marking
@@ -269,8 +272,9 @@ struct sk_buff {
269 struct dst_entry *dst; 272 struct dst_entry *dst;
270 struct rtable *rtable; 273 struct rtable *rtable;
271 }; 274 };
275#ifdef CONFIG_XFRM
272 struct sec_path *sp; 276 struct sec_path *sp;
273 277#endif
274 /* 278 /*
275 * This is the control buffer. It is free to use for every 279 * This is the control buffer. It is free to use for every
276 * layer. Please put your private variables there. If you 280 * layer. Please put your private variables there. If you
@@ -325,6 +329,7 @@ struct sk_buff {
325#endif 329#endif
326#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE) 330#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
327 __u8 do_not_encrypt:1; 331 __u8 do_not_encrypt:1;
332 __u8 requeue:1;
328#endif 333#endif
329 /* 0/13/14 bit hole */ 334 /* 0/13/14 bit hole */
330 335
@@ -488,6 +493,19 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
488} 493}
489 494
490/** 495/**
496 * skb_queue_is_first - check if skb is the first entry in the queue
497 * @list: queue head
498 * @skb: buffer
499 *
500 * Returns true if @skb is the first buffer on the list.
501 */
502static inline bool skb_queue_is_first(const struct sk_buff_head *list,
503 const struct sk_buff *skb)
504{
505 return (skb->prev == (struct sk_buff *) list);
506}
507
508/**
491 * skb_queue_next - return the next packet in the queue 509 * skb_queue_next - return the next packet in the queue
492 * @list: queue head 510 * @list: queue head
493 * @skb: current buffer 511 * @skb: current buffer
@@ -506,6 +524,24 @@ static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
506} 524}
507 525
508/** 526/**
527 * skb_queue_prev - return the prev packet in the queue
528 * @list: queue head
529 * @skb: current buffer
530 *
531 * Return the prev packet in @list before @skb. It is only valid to
532 * call this if skb_queue_is_first() evaluates to false.
533 */
534static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
535 const struct sk_buff *skb)
536{
537 /* This BUG_ON may seem severe, but if we just return then we
538 * are going to dereference garbage.
539 */
540 BUG_ON(skb_queue_is_first(list, skb));
541 return skb->prev;
542}
543
544/**
509 * skb_get - reference buffer 545 * skb_get - reference buffer
510 * @skb: buffer to reference 546 * @skb: buffer to reference
511 * 547 *
@@ -1647,8 +1683,12 @@ extern int skb_splice_bits(struct sk_buff *skb,
1647extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 1683extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1648extern void skb_split(struct sk_buff *skb, 1684extern void skb_split(struct sk_buff *skb,
1649 struct sk_buff *skb1, const u32 len); 1685 struct sk_buff *skb1, const u32 len);
1686extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
1687 int shiftlen);
1650 1688
1651extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 1689extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
1690extern int skb_gro_receive(struct sk_buff **head,
1691 struct sk_buff *skb);
1652 1692
1653static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1693static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1654 int len, void *buffer) 1694 int len, void *buffer)
@@ -1864,6 +1904,18 @@ static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_bu
1864 to->queue_mapping = from->queue_mapping; 1904 to->queue_mapping = from->queue_mapping;
1865} 1905}
1866 1906
1907#ifdef CONFIG_XFRM
1908static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
1909{
1910 return skb->sp;
1911}
1912#else
1913static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
1914{
1915 return NULL;
1916}
1917#endif
1918
1867static inline int skb_is_gso(const struct sk_buff *skb) 1919static inline int skb_is_gso(const struct sk_buff *skb)
1868{ 1920{
1869 return skb_shinfo(skb)->gso_size; 1921 return skb_shinfo(skb)->gso_size;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 5ff9676c1e2c..f96d13c281e8 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -23,6 +23,34 @@
23#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 23#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
24#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 24#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
25#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 25#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
26/*
27 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
28 *
29 * This delays freeing the SLAB page by a grace period, it does _NOT_
30 * delay object freeing. This means that if you do kmem_cache_free()
31 * that memory location is free to be reused at any time. Thus it may
32 * be possible to see another object there in the same RCU grace period.
33 *
34 * This feature only ensures the memory location backing the object
35 * stays valid, the trick to using this is relying on an independent
36 * object validation pass. Something like:
37 *
38 * rcu_read_lock()
39 * again:
40 * obj = lockless_lookup(key);
41 * if (obj) {
42 * if (!try_get_ref(obj)) // might fail for free objects
43 * goto again;
44 *
45 * if (obj->key != key) { // not the object we expected
46 * put_ref(obj);
47 * goto again;
48 * }
49 * }
50 * rcu_read_unlock();
51 *
52 * See also the comment on struct slab_rcu in mm/slab.c.
53 */
26#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 54#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
27#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 55#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
28#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 56#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
@@ -225,9 +253,9 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
225 * request comes from. 253 * request comes from.
226 */ 254 */
227#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) 255#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
228extern void *__kmalloc_track_caller(size_t, gfp_t, void*); 256extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
229#define kmalloc_track_caller(size, flags) \ 257#define kmalloc_track_caller(size, flags) \
230 __kmalloc_track_caller(size, flags, __builtin_return_address(0)) 258 __kmalloc_track_caller(size, flags, _RET_IP_)
231#else 259#else
232#define kmalloc_track_caller(size, flags) \ 260#define kmalloc_track_caller(size, flags) \
233 __kmalloc(size, flags) 261 __kmalloc(size, flags)
@@ -243,10 +271,10 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
243 * allocation request comes from. 271 * allocation request comes from.
244 */ 272 */
245#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) 273#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
246extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *); 274extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
247#define kmalloc_node_track_caller(size, flags, node) \ 275#define kmalloc_node_track_caller(size, flags, node) \
248 __kmalloc_node_track_caller(size, flags, node, \ 276 __kmalloc_node_track_caller(size, flags, node, \
249 __builtin_return_address(0)) 277 _RET_IP_)
250#else 278#else
251#define kmalloc_node_track_caller(size, flags, node) \ 279#define kmalloc_node_track_caller(size, flags, node) \
252 __kmalloc_node(size, flags, node) 280 __kmalloc_node(size, flags, node)
@@ -257,7 +285,7 @@ extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
257#define kmalloc_node_track_caller(size, flags, node) \ 285#define kmalloc_node_track_caller(size, flags, node) \
258 kmalloc_track_caller(size, flags) 286 kmalloc_track_caller(size, flags)
259 287
260#endif /* DEBUG_SLAB */ 288#endif /* CONFIG_NUMA */
261 289
262/* 290/*
263 * Shortcuts 291 * Shortcuts
@@ -288,9 +316,4 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
288 return kmalloc_node(size, flags | __GFP_ZERO, node); 316 return kmalloc_node(size, flags | __GFP_ZERO, node);
289} 317}
290 318
291#ifdef CONFIG_SLABINFO
292extern const struct seq_operations slabinfo_op;
293ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
294#endif
295
296#endif /* _LINUX_SLAB_H */ 319#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/smc911x.h b/include/linux/smc911x.h
index b58f54c24183..521f37143fae 100644
--- a/include/linux/smc911x.h
+++ b/include/linux/smc911x.h
@@ -7,6 +7,7 @@
7struct smc911x_platdata { 7struct smc911x_platdata {
8 unsigned long flags; 8 unsigned long flags;
9 unsigned long irq_flags; /* IRQF_... */ 9 unsigned long irq_flags; /* IRQF_... */
10 int irq_polarity;
10}; 11};
11 12
12#endif /* __SMC911X_H__ */ 13#endif /* __SMC911X_H__ */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 66484d4a8459..6e7ba16ff454 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/types.h>
10#include <linux/list.h> 11#include <linux/list.h>
11#include <linux/cpumask.h> 12#include <linux/cpumask.h>
12 13
@@ -16,7 +17,8 @@ struct call_single_data {
16 struct list_head list; 17 struct list_head list;
17 void (*func) (void *info); 18 void (*func) (void *info);
18 void *info; 19 void *info;
19 unsigned int flags; 20 u16 flags;
21 u16 priv;
20}; 22};
21 23
22#ifdef CONFIG_SMP 24#ifdef CONFIG_SMP
@@ -62,8 +64,17 @@ extern void smp_cpus_done(unsigned int max_cpus);
62 * Call a function on all other processors 64 * Call a function on all other processors
63 */ 65 */
64int smp_call_function(void(*func)(void *info), void *info, int wait); 66int smp_call_function(void(*func)(void *info), void *info, int wait);
67/* Deprecated: use smp_call_function_many() which uses a cpumask ptr. */
65int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, 68int smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
66 int wait); 69 int wait);
70
71static inline void smp_call_function_many(const struct cpumask *mask,
72 void (*func)(void *info), void *info,
73 int wait)
74{
75 smp_call_function_mask(*mask, func, info, wait);
76}
77
67int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 78int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
68 int wait); 79 int wait);
69void __smp_call_function_single(int cpuid, struct call_single_data *data); 80void __smp_call_function_single(int cpuid, struct call_single_data *data);
@@ -135,6 +146,8 @@ static inline void smp_send_reschedule(int cpu) { }
135}) 146})
136#define smp_call_function_mask(mask, func, info, wait) \ 147#define smp_call_function_mask(mask, func, info, wait) \
137 (up_smp_call_function(func, info)) 148 (up_smp_call_function(func, info))
149#define smp_call_function_many(mask, func, info, wait) \
150 (up_smp_call_function(func, info))
138static inline void init_call_single_data(void) 151static inline void init_call_single_data(void)
139{ 152{
140} 153}
diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h
new file mode 100644
index 000000000000..1cbf0313adde
--- /dev/null
+++ b/include/linux/smsc911x.h
@@ -0,0 +1,47 @@
1/***************************************************************************
2 *
3 * Copyright (C) 2004-2008 SMSC
4 * Copyright (C) 2005-2008 ARM
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 ***************************************************************************/
21#ifndef __LINUX_SMSC911X_H__
22#define __LINUX_SMSC911X_H__
23
24#include <linux/phy.h>
25
26/* platform_device configuration data, should be assigned to
27 * the platform_device's dev.platform_data */
28struct smsc911x_platform_config {
29 unsigned int irq_polarity;
30 unsigned int irq_type;
31 unsigned int flags;
32 phy_interface_t phy_interface;
33};
34
35/* Constants for platform_device irq polarity configuration */
36#define SMSC911X_IRQ_POLARITY_ACTIVE_LOW 0
37#define SMSC911X_IRQ_POLARITY_ACTIVE_HIGH 1
38
39/* Constants for platform_device irq type configuration */
40#define SMSC911X_IRQ_TYPE_OPEN_DRAIN 0
41#define SMSC911X_IRQ_TYPE_PUSH_PULL 1
42
43/* Constants for flags */
44#define SMSC911X_USE_16BIT (BIT(0))
45#define SMSC911X_USE_32BIT (BIT(1))
46
47#endif /* __LINUX_SMSC911X_H__ */
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 7a6e6bba4a71..aee3f1e1d1ce 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -216,6 +216,9 @@ enum
216 LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */ 216 LINUX_MIB_TCPSPURIOUSRTOS, /* TCPSpuriousRTOs */
217 LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */ 217 LINUX_MIB_TCPMD5NOTFOUND, /* TCPMD5NotFound */
218 LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */ 218 LINUX_MIB_TCPMD5UNEXPECTED, /* TCPMD5Unexpected */
219 LINUX_MIB_SACKSHIFTED,
220 LINUX_MIB_SACKMERGED,
221 LINUX_MIB_SACKSHIFTFALLBACK,
219 __LINUX_MIB_MAX 222 __LINUX_MIB_MAX
220}; 223};
221 224
diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h
index b4d9fa6f797c..decf6d8c77b7 100644
--- a/include/linux/spi/orion_spi.h
+++ b/include/linux/spi/orion_spi.h
@@ -11,6 +11,7 @@
11 11
12struct orion_spi_info { 12struct orion_spi_info {
13 u32 tclk; /* no <linux/clk.h> support yet */ 13 u32 tclk; /* no <linux/clk.h> support yet */
14 u32 enable_clock_fix;
14}; 15};
15 16
16 17
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index b8db32cea1de..bf8de281b4ed 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -18,6 +18,9 @@
18 * duplex (MicroWire) controllers. Provide chipslect() and txrx_bufs(), 18 * duplex (MicroWire) controllers. Provide chipslect() and txrx_bufs(),
19 * and custom setup()/cleanup() methods. 19 * and custom setup()/cleanup() methods.
20 */ 20 */
21
22#include <linux/workqueue.h>
23
21struct spi_bitbang { 24struct spi_bitbang {
22 struct workqueue_struct *workqueue; 25 struct workqueue_struct *workqueue;
23 struct work_struct work; 26 struct work_struct work;
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index e530026eedf7..17d9b58f6379 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -427,12 +427,16 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
427{ 427{
428 switch (dev->bus->bustype) { 428 switch (dev->bus->bustype) {
429 case SSB_BUSTYPE_PCI: 429 case SSB_BUSTYPE_PCI:
430#ifdef CONFIG_SSB_PCIHOST
430 return pci_dma_mapping_error(dev->bus->host_pci, addr); 431 return pci_dma_mapping_error(dev->bus->host_pci, addr);
432#endif
433 break;
431 case SSB_BUSTYPE_SSB: 434 case SSB_BUSTYPE_SSB:
432 return dma_mapping_error(dev->dev, addr); 435 return dma_mapping_error(dev->dev, addr);
433 default: 436 default:
434 __ssb_dma_not_implemented(dev); 437 break;
435 } 438 }
439 __ssb_dma_not_implemented(dev);
436 return -ENOSYS; 440 return -ENOSYS;
437} 441}
438 442
@@ -441,12 +445,16 @@ static inline dma_addr_t ssb_dma_map_single(struct ssb_device *dev, void *p,
441{ 445{
442 switch (dev->bus->bustype) { 446 switch (dev->bus->bustype) {
443 case SSB_BUSTYPE_PCI: 447 case SSB_BUSTYPE_PCI:
448#ifdef CONFIG_SSB_PCIHOST
444 return pci_map_single(dev->bus->host_pci, p, size, dir); 449 return pci_map_single(dev->bus->host_pci, p, size, dir);
450#endif
451 break;
445 case SSB_BUSTYPE_SSB: 452 case SSB_BUSTYPE_SSB:
446 return dma_map_single(dev->dev, p, size, dir); 453 return dma_map_single(dev->dev, p, size, dir);
447 default: 454 default:
448 __ssb_dma_not_implemented(dev); 455 break;
449 } 456 }
457 __ssb_dma_not_implemented(dev);
450 return 0; 458 return 0;
451} 459}
452 460
@@ -455,14 +463,18 @@ static inline void ssb_dma_unmap_single(struct ssb_device *dev, dma_addr_t dma_a
455{ 463{
456 switch (dev->bus->bustype) { 464 switch (dev->bus->bustype) {
457 case SSB_BUSTYPE_PCI: 465 case SSB_BUSTYPE_PCI:
466#ifdef CONFIG_SSB_PCIHOST
458 pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir); 467 pci_unmap_single(dev->bus->host_pci, dma_addr, size, dir);
459 return; 468 return;
469#endif
470 break;
460 case SSB_BUSTYPE_SSB: 471 case SSB_BUSTYPE_SSB:
461 dma_unmap_single(dev->dev, dma_addr, size, dir); 472 dma_unmap_single(dev->dev, dma_addr, size, dir);
462 return; 473 return;
463 default: 474 default:
464 __ssb_dma_not_implemented(dev); 475 break;
465 } 476 }
477 __ssb_dma_not_implemented(dev);
466} 478}
467 479
468static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev, 480static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
@@ -472,15 +484,19 @@ static inline void ssb_dma_sync_single_for_cpu(struct ssb_device *dev,
472{ 484{
473 switch (dev->bus->bustype) { 485 switch (dev->bus->bustype) {
474 case SSB_BUSTYPE_PCI: 486 case SSB_BUSTYPE_PCI:
487#ifdef CONFIG_SSB_PCIHOST
475 pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, 488 pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
476 size, dir); 489 size, dir);
477 return; 490 return;
491#endif
492 break;
478 case SSB_BUSTYPE_SSB: 493 case SSB_BUSTYPE_SSB:
479 dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir); 494 dma_sync_single_for_cpu(dev->dev, dma_addr, size, dir);
480 return; 495 return;
481 default: 496 default:
482 __ssb_dma_not_implemented(dev); 497 break;
483 } 498 }
499 __ssb_dma_not_implemented(dev);
484} 500}
485 501
486static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev, 502static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
@@ -490,15 +506,19 @@ static inline void ssb_dma_sync_single_for_device(struct ssb_device *dev,
490{ 506{
491 switch (dev->bus->bustype) { 507 switch (dev->bus->bustype) {
492 case SSB_BUSTYPE_PCI: 508 case SSB_BUSTYPE_PCI:
509#ifdef CONFIG_SSB_PCIHOST
493 pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, 510 pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
494 size, dir); 511 size, dir);
495 return; 512 return;
513#endif
514 break;
496 case SSB_BUSTYPE_SSB: 515 case SSB_BUSTYPE_SSB:
497 dma_sync_single_for_device(dev->dev, dma_addr, size, dir); 516 dma_sync_single_for_device(dev->dev, dma_addr, size, dir);
498 return; 517 return;
499 default: 518 default:
500 __ssb_dma_not_implemented(dev); 519 break;
501 } 520 }
521 __ssb_dma_not_implemented(dev);
502} 522}
503 523
504static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev, 524static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
@@ -509,17 +529,21 @@ static inline void ssb_dma_sync_single_range_for_cpu(struct ssb_device *dev,
509{ 529{
510 switch (dev->bus->bustype) { 530 switch (dev->bus->bustype) {
511 case SSB_BUSTYPE_PCI: 531 case SSB_BUSTYPE_PCI:
532#ifdef CONFIG_SSB_PCIHOST
512 /* Just sync everything. That's all the PCI API can do. */ 533 /* Just sync everything. That's all the PCI API can do. */
513 pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr, 534 pci_dma_sync_single_for_cpu(dev->bus->host_pci, dma_addr,
514 offset + size, dir); 535 offset + size, dir);
515 return; 536 return;
537#endif
538 break;
516 case SSB_BUSTYPE_SSB: 539 case SSB_BUSTYPE_SSB:
517 dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset, 540 dma_sync_single_range_for_cpu(dev->dev, dma_addr, offset,
518 size, dir); 541 size, dir);
519 return; 542 return;
520 default: 543 default:
521 __ssb_dma_not_implemented(dev); 544 break;
522 } 545 }
546 __ssb_dma_not_implemented(dev);
523} 547}
524 548
525static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev, 549static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
@@ -530,17 +554,21 @@ static inline void ssb_dma_sync_single_range_for_device(struct ssb_device *dev,
530{ 554{
531 switch (dev->bus->bustype) { 555 switch (dev->bus->bustype) {
532 case SSB_BUSTYPE_PCI: 556 case SSB_BUSTYPE_PCI:
557#ifdef CONFIG_SSB_PCIHOST
533 /* Just sync everything. That's all the PCI API can do. */ 558 /* Just sync everything. That's all the PCI API can do. */
534 pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr, 559 pci_dma_sync_single_for_device(dev->bus->host_pci, dma_addr,
535 offset + size, dir); 560 offset + size, dir);
536 return; 561 return;
562#endif
563 break;
537 case SSB_BUSTYPE_SSB: 564 case SSB_BUSTYPE_SSB:
538 dma_sync_single_range_for_device(dev->dev, dma_addr, offset, 565 dma_sync_single_range_for_device(dev->dev, dma_addr, offset,
539 size, dir); 566 size, dir);
540 return; 567 return;
541 default: 568 default:
542 __ssb_dma_not_implemented(dev); 569 break;
543 } 570 }
571 __ssb_dma_not_implemented(dev);
544} 572}
545 573
546 574
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index b106fd8e0d5c..1a8cecc4f38c 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -15,9 +15,17 @@ extern void save_stack_trace_tsk(struct task_struct *tsk,
15 struct stack_trace *trace); 15 struct stack_trace *trace);
16 16
17extern void print_stack_trace(struct stack_trace *trace, int spaces); 17extern void print_stack_trace(struct stack_trace *trace, int spaces);
18
19#ifdef CONFIG_USER_STACKTRACE_SUPPORT
20extern void save_stack_trace_user(struct stack_trace *trace);
21#else
22# define save_stack_trace_user(trace) do { } while (0)
23#endif
24
18#else 25#else
19# define save_stack_trace(trace) do { } while (0) 26# define save_stack_trace(trace) do { } while (0)
20# define save_stack_trace_tsk(tsk, trace) do { } while (0) 27# define save_stack_trace_tsk(tsk, trace) do { } while (0)
28# define save_stack_trace_user(trace) do { } while (0)
21# define print_stack_trace(trace, spaces) do { } while (0) 29# define print_stack_trace(trace, spaces) do { } while (0)
22#endif 30#endif
23 31
diff --git a/include/linux/string.h b/include/linux/string.h
index 810d80df0a1d..d18fc198aa2f 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -1,7 +1,7 @@
1#ifndef _LINUX_STRING_H_ 1#ifndef _LINUX_STRING_H_
2#define _LINUX_STRING_H_ 2#define _LINUX_STRING_H_
3 3
4/* We don't want strings.h stuff being user by user stuff by accident */ 4/* We don't want strings.h stuff being used by user stuff by accident */
5 5
6#ifndef __KERNEL__ 6#ifndef __KERNEL__
7#include <string.h> 7#include <string.h>
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 6f0ee1b84a4f..c39a21040dcb 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -58,6 +58,7 @@ struct rpc_clnt {
58 struct rpc_timeout cl_timeout_default; 58 struct rpc_timeout cl_timeout_default;
59 struct rpc_program * cl_program; 59 struct rpc_program * cl_program;
60 char cl_inline_name[32]; 60 char cl_inline_name[32];
61 char *cl_principal; /* target to authenticate to */
61}; 62};
62 63
63/* 64/*
@@ -108,6 +109,7 @@ struct rpc_create_args {
108 u32 version; 109 u32 version;
109 rpc_authflavor_t authflavor; 110 rpc_authflavor_t authflavor;
110 unsigned long flags; 111 unsigned long flags;
112 char *client_name;
111}; 113};
112 114
113/* Values for "flags" field */ 115/* Values for "flags" field */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index 51b977a4ca20..cea764c2359f 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -15,6 +15,7 @@ struct rpc_pipe_ops {
15 ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t); 15 ssize_t (*upcall)(struct file *, struct rpc_pipe_msg *, char __user *, size_t);
16 ssize_t (*downcall)(struct file *, const char __user *, size_t); 16 ssize_t (*downcall)(struct file *, const char __user *, size_t);
17 void (*release_pipe)(struct inode *); 17 void (*release_pipe)(struct inode *);
18 int (*open_pipe)(struct inode *);
18 void (*destroy_msg)(struct rpc_pipe_msg *); 19 void (*destroy_msg)(struct rpc_pipe_msg *);
19}; 20};
20 21
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 6fd7b016517f..0127daca4354 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -139,14 +139,14 @@ static inline char *__svc_print_addr(struct sockaddr *addr,
139{ 139{
140 switch (addr->sa_family) { 140 switch (addr->sa_family) {
141 case AF_INET: 141 case AF_INET:
142 snprintf(buf, len, "%u.%u.%u.%u, port=%u", 142 snprintf(buf, len, "%pI4, port=%u",
143 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), 143 &((struct sockaddr_in *)addr)->sin_addr,
144 ntohs(((struct sockaddr_in *) addr)->sin_port)); 144 ntohs(((struct sockaddr_in *) addr)->sin_port));
145 break; 145 break;
146 146
147 case AF_INET6: 147 case AF_INET6:
148 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", 148 snprintf(buf, len, "%pI6, port=%u",
149 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), 149 &((struct sockaddr_in6 *)addr)->sin6_addr,
150 ntohs(((struct sockaddr_in6 *) addr)->sin6_port)); 150 ntohs(((struct sockaddr_in6 *) addr)->sin6_port));
151 break; 151 break;
152 152
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index c9165d9771a8..ca7d725861fc 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -20,6 +20,7 @@ int gss_svc_init(void);
20void gss_svc_shutdown(void); 20void gss_svc_shutdown(void);
21int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); 21int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
22u32 svcauth_gss_flavor(struct auth_domain *dom); 22u32 svcauth_gss_flavor(struct auth_domain *dom);
23char *svc_gss_principal(struct svc_rqst *);
23 24
24#endif /* __KERNEL__ */ 25#endif /* __KERNEL__ */
25#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ 26#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index e4057d729f03..49e1eb454465 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -37,21 +37,6 @@ struct xdr_netobj {
37typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj); 37typedef int (*kxdrproc_t)(void *rqstp, __be32 *data, void *obj);
38 38
39/* 39/*
40 * We're still requiring the BKL in the xdr code until it's been
41 * more carefully audited, at which point this wrapper will become
42 * unnecessary.
43 */
44static inline int rpc_call_xdrproc(kxdrproc_t xdrproc, void *rqstp, __be32 *data, void *obj)
45{
46 int ret;
47
48 lock_kernel();
49 ret = xdrproc(rqstp, data, obj);
50 unlock_kernel();
51 return ret;
52}
53
54/*
55 * Basic structure for transmission/reception of a client XDR message. 40 * Basic structure for transmission/reception of a client XDR message.
56 * Features a header (for a linear buffer containing RPC headers 41 * Features a header (for a linear buffer containing RPC headers
57 * and the data payload for short messages), and then an array of 42 * and the data payload for short messages), and then an array of
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 4d80a118d538..11fc71d50c1e 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -76,8 +76,7 @@ struct rpc_rqst {
76 struct list_head rq_list; 76 struct list_head rq_list;
77 77
78 __u32 * rq_buffer; /* XDR encode buffer */ 78 __u32 * rq_buffer; /* XDR encode buffer */
79 size_t rq_bufsize, 79 size_t rq_callsize,
80 rq_callsize,
81 rq_rcvsize; 80 rq_rcvsize;
82 81
83 struct xdr_buf rq_private_buf; /* The receive buffer 82 struct xdr_buf rq_private_buf; /* The receive buffer
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h
index 4de56b1d372b..54a379c9e8eb 100644
--- a/include/linux/sunrpc/xprtrdma.h
+++ b/include/linux/sunrpc/xprtrdma.h
@@ -66,9 +66,6 @@
66 66
67#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */ 67#define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */
68 68
69#define RDMA_RESOLVE_TIMEOUT (5*HZ) /* TBD 5 seconds */
70#define RDMA_CONNECT_RETRY_MAX (2) /* retries if no listener backlog */
71
72/* memory registration strategies */ 69/* memory registration strategies */
73#define RPCRDMA_PERSISTENT_REGISTRATION (1) 70#define RPCRDMA_PERSISTENT_REGISTRATION (1)
74 71
@@ -78,6 +75,7 @@ enum rpcrdma_memreg {
78 RPCRDMA_MEMWINDOWS, 75 RPCRDMA_MEMWINDOWS,
79 RPCRDMA_MEMWINDOWS_ASYNC, 76 RPCRDMA_MEMWINDOWS_ASYNC,
80 RPCRDMA_MTHCAFMR, 77 RPCRDMA_MTHCAFMR,
78 RPCRDMA_FRMR,
81 RPCRDMA_ALLPHYSICAL, 79 RPCRDMA_ALLPHYSICAL,
82 RPCRDMA_LAST 80 RPCRDMA_LAST
83}; 81};
diff --git a/include/linux/swab.h b/include/linux/swab.h
index 270d5c208a89..bbed279f3b32 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -47,8 +47,6 @@ static inline __attribute_const__ __u16 ___swab16(__u16 val)
47{ 47{
48#ifdef __arch_swab16 48#ifdef __arch_swab16
49 return __arch_swab16(val); 49 return __arch_swab16(val);
50#elif defined(__arch_swab16p)
51 return __arch_swab16p(&val);
52#else 50#else
53 return __const_swab16(val); 51 return __const_swab16(val);
54#endif 52#endif
@@ -58,8 +56,6 @@ static inline __attribute_const__ __u32 ___swab32(__u32 val)
58{ 56{
59#ifdef __arch_swab32 57#ifdef __arch_swab32
60 return __arch_swab32(val); 58 return __arch_swab32(val);
61#elif defined(__arch_swab32p)
62 return __arch_swab32p(&val);
63#else 59#else
64 return __const_swab32(val); 60 return __const_swab32(val);
65#endif 61#endif
@@ -69,8 +65,6 @@ static inline __attribute_const__ __u64 ___swab64(__u64 val)
69{ 65{
70#ifdef __arch_swab64 66#ifdef __arch_swab64
71 return __arch_swab64(val); 67 return __arch_swab64(val);
72#elif defined(__arch_swab64p)
73 return __arch_swab64p(&val);
74#elif defined(__SWAB_64_THRU_32__) 68#elif defined(__SWAB_64_THRU_32__)
75 __u32 h = val >> 32; 69 __u32 h = val >> 32;
76 __u32 l = val & ((1ULL << 32) - 1); 70 __u32 l = val & ((1ULL << 32) - 1);
@@ -84,8 +78,6 @@ static inline __attribute_const__ __u32 ___swahw32(__u32 val)
84{ 78{
85#ifdef __arch_swahw32 79#ifdef __arch_swahw32
86 return __arch_swahw32(val); 80 return __arch_swahw32(val);
87#elif defined(__arch_swahw32p)
88 return __arch_swahw32p(&val);
89#else 81#else
90 return __const_swahw32(val); 82 return __const_swahw32(val);
91#endif 83#endif
@@ -95,8 +87,6 @@ static inline __attribute_const__ __u32 ___swahb32(__u32 val)
95{ 87{
96#ifdef __arch_swahb32 88#ifdef __arch_swahb32
97 return __arch_swahb32(val); 89 return __arch_swahb32(val);
98#elif defined(__arch_swahb32p)
99 return __arch_swahb32p(&val);
100#else 90#else
101 return __const_swahb32(val); 91 return __const_swahb32(val);
102#endif 92#endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index de40f169a4e4..a3af95b2cb6d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -7,6 +7,7 @@
7#include <linux/list.h> 7#include <linux/list.h>
8#include <linux/memcontrol.h> 8#include <linux/memcontrol.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/node.h>
10 11
11#include <asm/atomic.h> 12#include <asm/atomic.h>
12#include <asm/page.h> 13#include <asm/page.h>
@@ -171,8 +172,10 @@ extern unsigned int nr_free_pagecache_pages(void);
171 172
172 173
173/* linux/mm/swap.c */ 174/* linux/mm/swap.c */
174extern void lru_cache_add(struct page *); 175extern void __lru_cache_add(struct page *, enum lru_list lru);
175extern void lru_cache_add_active(struct page *); 176extern void lru_cache_add_lru(struct page *, enum lru_list lru);
177extern void lru_cache_add_active_or_unevictable(struct page *,
178 struct vm_area_struct *);
176extern void activate_page(struct page *); 179extern void activate_page(struct page *);
177extern void mark_page_accessed(struct page *); 180extern void mark_page_accessed(struct page *);
178extern void lru_add_drain(void); 181extern void lru_add_drain(void);
@@ -180,12 +183,38 @@ extern int lru_add_drain_all(void);
180extern void rotate_reclaimable_page(struct page *page); 183extern void rotate_reclaimable_page(struct page *page);
181extern void swap_setup(void); 184extern void swap_setup(void);
182 185
186extern void add_page_to_unevictable_list(struct page *page);
187
188/**
189 * lru_cache_add: add a page to the page lists
190 * @page: the page to add
191 */
192static inline void lru_cache_add_anon(struct page *page)
193{
194 __lru_cache_add(page, LRU_INACTIVE_ANON);
195}
196
197static inline void lru_cache_add_active_anon(struct page *page)
198{
199 __lru_cache_add(page, LRU_ACTIVE_ANON);
200}
201
202static inline void lru_cache_add_file(struct page *page)
203{
204 __lru_cache_add(page, LRU_INACTIVE_FILE);
205}
206
207static inline void lru_cache_add_active_file(struct page *page)
208{
209 __lru_cache_add(page, LRU_ACTIVE_FILE);
210}
211
183/* linux/mm/vmscan.c */ 212/* linux/mm/vmscan.c */
184extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 213extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
185 gfp_t gfp_mask); 214 gfp_t gfp_mask);
186extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 215extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
187 gfp_t gfp_mask); 216 gfp_t gfp_mask);
188extern int __isolate_lru_page(struct page *page, int mode); 217extern int __isolate_lru_page(struct page *page, int mode, int file);
189extern unsigned long shrink_all_memory(unsigned long nr_pages); 218extern unsigned long shrink_all_memory(unsigned long nr_pages);
190extern int vm_swappiness; 219extern int vm_swappiness;
191extern int remove_mapping(struct address_space *mapping, struct page *page); 220extern int remove_mapping(struct address_space *mapping, struct page *page);
@@ -204,6 +233,34 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
204} 233}
205#endif 234#endif
206 235
236#ifdef CONFIG_UNEVICTABLE_LRU
237extern int page_evictable(struct page *page, struct vm_area_struct *vma);
238extern void scan_mapping_unevictable_pages(struct address_space *);
239
240extern unsigned long scan_unevictable_pages;
241extern int scan_unevictable_handler(struct ctl_table *, int, struct file *,
242 void __user *, size_t *, loff_t *);
243extern int scan_unevictable_register_node(struct node *node);
244extern void scan_unevictable_unregister_node(struct node *node);
245#else
246static inline int page_evictable(struct page *page,
247 struct vm_area_struct *vma)
248{
249 return 1;
250}
251
252static inline void scan_mapping_unevictable_pages(struct address_space *mapping)
253{
254}
255
256static inline int scan_unevictable_register_node(struct node *node)
257{
258 return 0;
259}
260
261static inline void scan_unevictable_unregister_node(struct node *node) { }
262#endif
263
207extern int kswapd_run(int nid); 264extern int kswapd_run(int nid);
208 265
209#ifdef CONFIG_MMU 266#ifdef CONFIG_MMU
@@ -251,6 +308,7 @@ extern sector_t swapdev_block(int, pgoff_t);
251extern struct swap_info_struct *get_swap_info_struct(unsigned); 308extern struct swap_info_struct *get_swap_info_struct(unsigned);
252extern int can_share_swap_page(struct page *); 309extern int can_share_swap_page(struct page *);
253extern int remove_exclusive_swap_page(struct page *); 310extern int remove_exclusive_swap_page(struct page *);
311extern int remove_exclusive_swap_page_ref(struct page *);
254struct backing_dev_info; 312struct backing_dev_info;
255 313
256/* linux/mm/thrash.c */ 314/* linux/mm/thrash.c */
@@ -339,6 +397,11 @@ static inline int remove_exclusive_swap_page(struct page *p)
339 return 0; 397 return 0;
340} 398}
341 399
400static inline int remove_exclusive_swap_page_ref(struct page *page)
401{
402 return 0;
403}
404
342static inline swp_entry_t get_swap_page(void) 405static inline swp_entry_t get_swap_page(void)
343{ 406{
344 swp_entry_t entry; 407 swp_entry_t entry;
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
new file mode 100644
index 000000000000..325af1de0351
--- /dev/null
+++ b/include/linux/swiotlb.h
@@ -0,0 +1,105 @@
1#ifndef __LINUX_SWIOTLB_H
2#define __LINUX_SWIOTLB_H
3
4#include <linux/types.h>
5
6struct device;
7struct dma_attrs;
8struct scatterlist;
9
10/*
11 * Maximum allowable number of contiguous slabs to map,
12 * must be a power of 2. What is the appropriate value ?
13 * The complexity of {map,unmap}_single is linearly dependent on this value.
14 */
15#define IO_TLB_SEGSIZE 128
16
17
18/*
19 * log of the size of each IO TLB slab. The number of slabs is command line
20 * controllable.
21 */
22#define IO_TLB_SHIFT 11
23
24extern void
25swiotlb_init(void);
26
27extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
28extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
29
30extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address);
31extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
32
33extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size);
34
35extern void
36*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
37 dma_addr_t *dma_handle, gfp_t flags);
38
39extern void
40swiotlb_free_coherent(struct device *hwdev, size_t size,
41 void *vaddr, dma_addr_t dma_handle);
42
43extern dma_addr_t
44swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir);
45
46extern void
47swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
48 size_t size, int dir);
49
50extern dma_addr_t
51swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
52 int dir, struct dma_attrs *attrs);
53
54extern void
55swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
56 size_t size, int dir, struct dma_attrs *attrs);
57
58extern int
59swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
60 int direction);
61
62extern void
63swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
64 int direction);
65
66extern int
67swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
68 int dir, struct dma_attrs *attrs);
69
70extern void
71swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
72 int nelems, int dir, struct dma_attrs *attrs);
73
74extern void
75swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
76 size_t size, int dir);
77
78extern void
79swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
80 int nelems, int dir);
81
82extern void
83swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
84 size_t size, int dir);
85
86extern void
87swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
88 int nelems, int dir);
89
90extern void
91swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
92 unsigned long offset, size_t size, int dir);
93
94extern void
95swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
96 unsigned long offset, size_t size,
97 int dir);
98
99extern int
100swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
101
102extern int
103swiotlb_dma_supported(struct device *hwdev, u64 mask);
104
105#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index d6ff145919ca..04fb47bfb920 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -410,8 +410,7 @@ asmlinkage long sys_getsockopt(int fd, int level, int optname,
410asmlinkage long sys_bind(int, struct sockaddr __user *, int); 410asmlinkage long sys_bind(int, struct sockaddr __user *, int);
411asmlinkage long sys_connect(int, struct sockaddr __user *, int); 411asmlinkage long sys_connect(int, struct sockaddr __user *, int);
412asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *); 412asmlinkage long sys_accept(int, struct sockaddr __user *, int __user *);
413asmlinkage long sys_paccept(int, struct sockaddr __user *, int __user *, 413asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
414 const __user sigset_t *, size_t, int);
415asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *); 414asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
416asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *); 415asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
417asmlinkage long sys_send(int, void __user *, size_t, unsigned); 416asmlinkage long sys_send(int, void __user *, size_t, unsigned);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index d0437f36921f..39d471d1163b 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -972,7 +972,7 @@ extern int sysctl_perm(struct ctl_table_root *root,
972 972
973typedef struct ctl_table ctl_table; 973typedef struct ctl_table ctl_table;
974 974
975typedef int ctl_handler (struct ctl_table *table, int __user *name, int nlen, 975typedef int ctl_handler (struct ctl_table *table,
976 void __user *oldval, size_t __user *oldlenp, 976 void __user *oldval, size_t __user *oldlenp,
977 void __user *newval, size_t newlen); 977 void __user *newval, size_t newlen);
978 978
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 37fa24152bd8..9d68fed50f11 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -21,8 +21,9 @@ struct kobject;
21struct module; 21struct module;
22 22
23/* FIXME 23/* FIXME
24 * The *owner field is no longer used, but leave around 24 * The *owner field is no longer used.
25 * until the tree gets cleaned up fully. 25 * x86 tree has been cleaned up. The owner
26 * attribute is still left for other arches.
26 */ 27 */
27struct attribute { 28struct attribute {
28 const char *name; 29 const char *name;
@@ -78,6 +79,8 @@ struct sysfs_ops {
78 ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t); 79 ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
79}; 80};
80 81
82struct sysfs_dirent;
83
81#ifdef CONFIG_SYSFS 84#ifdef CONFIG_SYSFS
82 85
83int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *), 86int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
@@ -117,9 +120,14 @@ int sysfs_add_file_to_group(struct kobject *kobj,
117void sysfs_remove_file_from_group(struct kobject *kobj, 120void sysfs_remove_file_from_group(struct kobject *kobj,
118 const struct attribute *attr, const char *group); 121 const struct attribute *attr, const char *group);
119 122
120void sysfs_notify(struct kobject *kobj, char *dir, char *attr); 123void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr);
121 124void sysfs_notify_dirent(struct sysfs_dirent *sd);
122extern int __must_check sysfs_init(void); 125struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
126 const unsigned char *name);
127struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
128void sysfs_put(struct sysfs_dirent *sd);
129void sysfs_printk_last_file(void);
130int __must_check sysfs_init(void);
123 131
124#else /* CONFIG_SYSFS */ 132#else /* CONFIG_SYSFS */
125 133
@@ -222,7 +230,24 @@ static inline void sysfs_remove_file_from_group(struct kobject *kobj,
222{ 230{
223} 231}
224 232
225static inline void sysfs_notify(struct kobject *kobj, char *dir, char *attr) 233static inline void sysfs_notify(struct kobject *kobj, const char *dir,
234 const char *attr)
235{
236}
237static inline void sysfs_notify_dirent(struct sysfs_dirent *sd)
238{
239}
240static inline
241struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
242 const unsigned char *name)
243{
244 return NULL;
245}
246static inline struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd)
247{
248 return NULL;
249}
250static inline void sysfs_put(struct sysfs_dirent *sd)
226{ 251{
227} 252}
228 253
@@ -231,6 +256,10 @@ static inline int __must_check sysfs_init(void)
231 return 0; 256 return 0;
232} 257}
233 258
259static inline void sysfs_printk_last_file(void)
260{
261}
262
234#endif /* CONFIG_SYSFS */ 263#endif /* CONFIG_SYSFS */
235 264
236#endif /* _SYSFS_H_ */ 265#endif /* _SYSFS_H_ */
diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h
index 5e88afc9a2fb..bdf855c2856f 100644
--- a/include/linux/task_io_accounting.h
+++ b/include/linux/task_io_accounting.h
@@ -5,7 +5,7 @@
5 * Don't include this header file directly - it is designed to be dragged in via 5 * Don't include this header file directly - it is designed to be dragged in via
6 * sched.h. 6 * sched.h.
7 * 7 *
8 * Blame akpm@osdl.org for all this. 8 * Blame Andrew Morton for all this.
9 */ 9 */
10 10
11struct task_io_accounting { 11struct task_io_accounting {
diff --git a/include/linux/telephony.h b/include/linux/telephony.h
index 0d0cf2a1e7bc..f63afe330add 100644
--- a/include/linux/telephony.h
+++ b/include/linux/telephony.h
@@ -14,7 +14,7 @@
14 * Authors: Ed Okerson, <eokerson@quicknet.net> 14 * Authors: Ed Okerson, <eokerson@quicknet.net>
15 * Greg Herlein, <gherlein@quicknet.net> 15 * Greg Herlein, <gherlein@quicknet.net>
16 * 16 *
17 * Contributors: Alan Cox, <alan@redhat.com> 17 * Contributors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
18 * David W. Erhart, <derhart@quicknet.net> 18 * David W. Erhart, <derhart@quicknet.net>
19 * 19 *
20 * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR 20 * IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR
@@ -28,10 +28,6 @@
28 * ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION 28 * ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION
29 * TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. 29 * TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
30 * 30 *
31 * Version: $Revision: 4.2 $
32 *
33 * $Id: telephony.h,v 4.2 2001/08/06 07:09:43 craigs Exp $
34 *
35 *****************************************************************************/ 31 *****************************************************************************/
36 32
37#ifndef TELEPHONY_H 33#ifndef TELEPHONY_H
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 38a56477f27a..e6b820f8b56b 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -38,6 +38,14 @@ struct restart_block {
38#endif 38#endif
39 u64 expires; 39 u64 expires;
40 } nanosleep; 40 } nanosleep;
41 /* For poll */
42 struct {
43 struct pollfd __user *ufds;
44 int nfds;
45 int has_timeout;
46 unsigned long tv_sec;
47 unsigned long tv_nsec;
48 } poll;
41 }; 49 };
42}; 50};
43 51
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 98921a3e1aa8..b6ec8189ac0c 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -96,9 +96,11 @@ extern cpumask_t *tick_get_broadcast_oneshot_mask(void);
96extern void tick_clock_notify(void); 96extern void tick_clock_notify(void);
97extern int tick_check_oneshot_change(int allow_nohz); 97extern int tick_check_oneshot_change(int allow_nohz);
98extern struct tick_sched *tick_get_tick_sched(int cpu); 98extern struct tick_sched *tick_get_tick_sched(int cpu);
99extern void tick_check_idle(int cpu);
99# else 100# else
100static inline void tick_clock_notify(void) { } 101static inline void tick_clock_notify(void) { }
101static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } 102static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
103static inline void tick_check_idle(int cpu) { }
102# endif 104# endif
103 105
104#else /* CONFIG_GENERIC_CLOCKEVENTS */ 106#else /* CONFIG_GENERIC_CLOCKEVENTS */
@@ -106,26 +108,23 @@ static inline void tick_init(void) { }
106static inline void tick_cancel_sched_timer(int cpu) { } 108static inline void tick_cancel_sched_timer(int cpu) { }
107static inline void tick_clock_notify(void) { } 109static inline void tick_clock_notify(void) { }
108static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } 110static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
111static inline void tick_check_idle(int cpu) { }
109#endif /* !CONFIG_GENERIC_CLOCKEVENTS */ 112#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
110 113
111# ifdef CONFIG_NO_HZ 114# ifdef CONFIG_NO_HZ
112extern void tick_nohz_stop_sched_tick(int inidle); 115extern void tick_nohz_stop_sched_tick(int inidle);
113extern void tick_nohz_restart_sched_tick(void); 116extern void tick_nohz_restart_sched_tick(void);
114extern void tick_nohz_update_jiffies(void);
115extern ktime_t tick_nohz_get_sleep_length(void); 117extern ktime_t tick_nohz_get_sleep_length(void);
116extern void tick_nohz_stop_idle(int cpu);
117extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); 118extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
118# else 119# else
119static inline void tick_nohz_stop_sched_tick(int inidle) { } 120static inline void tick_nohz_stop_sched_tick(int inidle) { }
120static inline void tick_nohz_restart_sched_tick(void) { } 121static inline void tick_nohz_restart_sched_tick(void) { }
121static inline void tick_nohz_update_jiffies(void) { }
122static inline ktime_t tick_nohz_get_sleep_length(void) 122static inline ktime_t tick_nohz_get_sleep_length(void)
123{ 123{
124 ktime_t len = { .tv64 = NSEC_PER_SEC/HZ }; 124 ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
125 125
126 return len; 126 return len;
127} 127}
128static inline void tick_nohz_stop_idle(int cpu) { }
129static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } 128static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
130# endif /* !NO_HZ */ 129# endif /* !NO_HZ */
131 130
diff --git a/include/linux/time.h b/include/linux/time.h
index e15206a7e82e..ce321ac5c8f8 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -29,6 +29,8 @@ struct timezone {
29 29
30#ifdef __KERNEL__ 30#ifdef __KERNEL__
31 31
32extern struct timezone sys_tz;
33
32/* Parameters used to convert the timespec values: */ 34/* Parameters used to convert the timespec values: */
33#define MSEC_PER_SEC 1000L 35#define MSEC_PER_SEC 1000L
34#define USEC_PER_MSEC 1000L 36#define USEC_PER_MSEC 1000L
@@ -38,6 +40,8 @@ struct timezone {
38#define NSEC_PER_SEC 1000000000L 40#define NSEC_PER_SEC 1000000000L
39#define FSEC_PER_SEC 1000000000000000L 41#define FSEC_PER_SEC 1000000000000000L
40 42
43#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
44
41static inline int timespec_equal(const struct timespec *a, 45static inline int timespec_equal(const struct timespec *a,
42 const struct timespec *b) 46 const struct timespec *b)
43{ 47{
@@ -72,6 +76,8 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon,
72 const unsigned int min, const unsigned int sec); 76 const unsigned int min, const unsigned int sec);
73 77
74extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec); 78extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec);
79extern struct timespec timespec_add_safe(const struct timespec lhs,
80 const struct timespec rhs);
75 81
76/* 82/*
77 * sub = lhs - rhs, in normalized form 83 * sub = lhs - rhs, in normalized form
@@ -117,6 +123,7 @@ extern int do_setitimer(int which, struct itimerval *value,
117extern unsigned int alarm_setitimer(unsigned int seconds); 123extern unsigned int alarm_setitimer(unsigned int seconds);
118extern int do_getitimer(int which, struct itimerval *value); 124extern int do_getitimer(int which, struct itimerval *value);
119extern void getnstimeofday(struct timespec *tv); 125extern void getnstimeofday(struct timespec *tv);
126extern void getrawmonotonic(struct timespec *ts);
120extern void getboottime(struct timespec *ts); 127extern void getboottime(struct timespec *ts);
121extern void monotonic_to_bootbased(struct timespec *ts); 128extern void monotonic_to_bootbased(struct timespec *ts);
122 129
@@ -125,6 +132,9 @@ extern int timekeeping_valid_for_hres(void);
125extern void update_wall_time(void); 132extern void update_wall_time(void);
126extern void update_xtime_cache(u64 nsec); 133extern void update_xtime_cache(u64 nsec);
127 134
135struct tms;
136extern void do_sys_times(struct tms *);
137
128/** 138/**
129 * timespec_to_ns - Convert timespec to nanoseconds 139 * timespec_to_ns - Convert timespec to nanoseconds
130 * @ts: pointer to the timespec variable to be converted 140 * @ts: pointer to the timespec variable to be converted
@@ -214,6 +224,7 @@ struct itimerval {
214#define CLOCK_MONOTONIC 1 224#define CLOCK_MONOTONIC 1
215#define CLOCK_PROCESS_CPUTIME_ID 2 225#define CLOCK_PROCESS_CPUTIME_ID 2
216#define CLOCK_THREAD_CPUTIME_ID 3 226#define CLOCK_THREAD_CPUTIME_ID 3
227#define CLOCK_MONOTONIC_RAW 4
217 228
218/* 229/*
219 * The IDs of various hardware clocks: 230 * The IDs of various hardware clocks:
diff --git a/include/linux/timer.h b/include/linux/timer.h
index d4ba79248a27..daf9685b861c 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -186,4 +186,9 @@ unsigned long __round_jiffies_relative(unsigned long j, int cpu);
186unsigned long round_jiffies(unsigned long j); 186unsigned long round_jiffies(unsigned long j);
187unsigned long round_jiffies_relative(unsigned long j); 187unsigned long round_jiffies_relative(unsigned long j);
188 188
189unsigned long __round_jiffies_up(unsigned long j, int cpu);
190unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
191unsigned long round_jiffies_up(unsigned long j);
192unsigned long round_jiffies_up_relative(unsigned long j);
193
189#endif 194#endif
diff --git a/include/linux/timex.h b/include/linux/timex.h
index fc6035d29d56..998a55d80acf 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -53,47 +53,11 @@
53#ifndef _LINUX_TIMEX_H 53#ifndef _LINUX_TIMEX_H
54#define _LINUX_TIMEX_H 54#define _LINUX_TIMEX_H
55 55
56#include <linux/compiler.h>
57#include <linux/time.h> 56#include <linux/time.h>
58 57
59#include <asm/param.h>
60
61#define NTP_API 4 /* NTP API version */ 58#define NTP_API 4 /* NTP API version */
62 59
63/* 60/*
64 * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
65 * for a slightly underdamped convergence characteristic. SHIFT_KH
66 * establishes the damping of the FLL and is chosen by wisdom and black
67 * art.
68 *
69 * MAXTC establishes the maximum time constant of the PLL. With the
70 * SHIFT_KG and SHIFT_KF values given and a time constant range from
71 * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
72 * respectively.
73 */
74#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
75#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
76#define MAXTC 10 /* maximum time constant (shift) */
77
78/*
79 * SHIFT_USEC defines the scaling (shift) of the time_freq and
80 * time_tolerance variables, which represent the current frequency
81 * offset and maximum frequency tolerance.
82 */
83#define SHIFT_USEC 16 /* frequency offset scale (shift) */
84#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
85#define PPM_SCALE_INV_SHIFT 20
86#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
87 PPM_SCALE + 1)
88
89#define MAXPHASE 500000000l /* max phase error (ns) */
90#define MAXFREQ 500000 /* max frequency error (ns/s) */
91#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
92#define MINSEC 256 /* min interval between updates (s) */
93#define MAXSEC 2048 /* max interval between updates (s) */
94#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
95
96/*
97 * syscall interface - used (mainly by NTP daemon) 61 * syscall interface - used (mainly by NTP daemon)
98 * to discipline kernel clock oscillator 62 * to discipline kernel clock oscillator
99 */ 63 */
@@ -141,8 +105,15 @@ struct timex {
141#define ADJ_MICRO 0x1000 /* select microsecond resolution */ 105#define ADJ_MICRO 0x1000 /* select microsecond resolution */
142#define ADJ_NANO 0x2000 /* select nanosecond resolution */ 106#define ADJ_NANO 0x2000 /* select nanosecond resolution */
143#define ADJ_TICK 0x4000 /* tick value */ 107#define ADJ_TICK 0x4000 /* tick value */
108
109#ifdef __KERNEL__
110#define ADJ_ADJTIME 0x8000 /* switch between adjtime/adjtimex modes */
111#define ADJ_OFFSET_SINGLESHOT 0x0001 /* old-fashioned adjtime */
112#define ADJ_OFFSET_READONLY 0x2000 /* read-only adjtime */
113#else
144#define ADJ_OFFSET_SINGLESHOT 0x8001 /* old-fashioned adjtime */ 114#define ADJ_OFFSET_SINGLESHOT 0x8001 /* old-fashioned adjtime */
145#define ADJ_OFFSET_SS_READ 0xa001 /* read-only adjtime */ 115#define ADJ_OFFSET_SS_READ 0xa001 /* read-only adjtime */
116#endif
146 117
147/* xntp 3.4 compatibility names */ 118/* xntp 3.4 compatibility names */
148#define MOD_OFFSET ADJ_OFFSET 119#define MOD_OFFSET ADJ_OFFSET
@@ -192,9 +163,46 @@ struct timex {
192#define TIME_BAD TIME_ERROR /* bw compat */ 163#define TIME_BAD TIME_ERROR /* bw compat */
193 164
194#ifdef __KERNEL__ 165#ifdef __KERNEL__
166#include <linux/compiler.h>
167#include <linux/types.h>
168#include <linux/param.h>
169
195#include <asm/timex.h> 170#include <asm/timex.h>
196 171
197/* 172/*
173 * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
174 * for a slightly underdamped convergence characteristic. SHIFT_KH
175 * establishes the damping of the FLL and is chosen by wisdom and black
176 * art.
177 *
178 * MAXTC establishes the maximum time constant of the PLL. With the
179 * SHIFT_KG and SHIFT_KF values given and a time constant range from
180 * zero to MAXTC, the PLL will converge in 15 minutes to 16 hours,
181 * respectively.
182 */
183#define SHIFT_PLL 4 /* PLL frequency factor (shift) */
184#define SHIFT_FLL 2 /* FLL frequency factor (shift) */
185#define MAXTC 10 /* maximum time constant (shift) */
186
187/*
188 * SHIFT_USEC defines the scaling (shift) of the time_freq and
189 * time_tolerance variables, which represent the current frequency
190 * offset and maximum frequency tolerance.
191 */
192#define SHIFT_USEC 16 /* frequency offset scale (shift) */
193#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
194#define PPM_SCALE_INV_SHIFT 19
195#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
196 PPM_SCALE + 1)
197
198#define MAXPHASE 500000000l /* max phase error (ns) */
199#define MAXFREQ 500000 /* max frequency error (ns/s) */
200#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
201#define MINSEC 256 /* min interval between updates (s) */
202#define MAXSEC 2048 /* max interval between updates (s) */
203#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
204
205/*
198 * kernel variables 206 * kernel variables
199 * Note: maximum error = NTP synch distance = dispersion + delay / 2; 207 * Note: maximum error = NTP synch distance = dispersion + delay / 2;
200 * estimated error = NTP dispersion. 208 * estimated error = NTP dispersion.
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 2158fc0d5a56..0c5b5ac36d8e 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -49,7 +49,7 @@
49 for_each_online_node(node) \ 49 for_each_online_node(node) \
50 if (nr_cpus_node(node)) 50 if (nr_cpus_node(node))
51 51
52void arch_update_cpu_topology(void); 52int arch_update_cpu_topology(void);
53 53
54/* Conform to ACPI 2.0 SLIT distance definitions */ 54/* Conform to ACPI 2.0 SLIT distance definitions */
55#define LOCAL_DISTANCE 10 55#define LOCAL_DISTANCE 10
@@ -99,7 +99,7 @@ void arch_update_cpu_topology(void);
99 | SD_BALANCE_FORK \ 99 | SD_BALANCE_FORK \
100 | SD_BALANCE_EXEC \ 100 | SD_BALANCE_EXEC \
101 | SD_WAKE_AFFINE \ 101 | SD_WAKE_AFFINE \
102 | SD_WAKE_IDLE \ 102 | SD_WAKE_BALANCE \
103 | SD_SHARE_CPUPOWER, \ 103 | SD_SHARE_CPUPOWER, \
104 .last_balance = jiffies, \ 104 .last_balance = jiffies, \
105 .balance_interval = 1, \ 105 .balance_interval = 1, \
@@ -120,10 +120,10 @@ void arch_update_cpu_topology(void);
120 .wake_idx = 1, \ 120 .wake_idx = 1, \
121 .forkexec_idx = 1, \ 121 .forkexec_idx = 1, \
122 .flags = SD_LOAD_BALANCE \ 122 .flags = SD_LOAD_BALANCE \
123 | SD_BALANCE_NEWIDLE \
124 | SD_BALANCE_FORK \ 123 | SD_BALANCE_FORK \
125 | SD_BALANCE_EXEC \ 124 | SD_BALANCE_EXEC \
126 | SD_WAKE_AFFINE \ 125 | SD_WAKE_AFFINE \
126 | SD_WAKE_BALANCE \
127 | SD_SHARE_PKG_RESOURCES\ 127 | SD_SHARE_PKG_RESOURCES\
128 | BALANCE_FOR_MC_POWER, \ 128 | BALANCE_FOR_MC_POWER, \
129 .last_balance = jiffies, \ 129 .last_balance = jiffies, \
@@ -146,10 +146,10 @@ void arch_update_cpu_topology(void);
146 .wake_idx = 1, \ 146 .wake_idx = 1, \
147 .forkexec_idx = 1, \ 147 .forkexec_idx = 1, \
148 .flags = SD_LOAD_BALANCE \ 148 .flags = SD_LOAD_BALANCE \
149 | SD_BALANCE_NEWIDLE \
150 | SD_BALANCE_FORK \
151 | SD_BALANCE_EXEC \ 149 | SD_BALANCE_EXEC \
150 | SD_BALANCE_FORK \
152 | SD_WAKE_AFFINE \ 151 | SD_WAKE_AFFINE \
152 | SD_WAKE_BALANCE \
153 | BALANCE_FOR_PKG_POWER,\ 153 | BALANCE_FOR_PKG_POWER,\
154 .last_balance = jiffies, \ 154 .last_balance = jiffies, \
155 .balance_interval = 1, \ 155 .balance_interval = 1, \
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
new file mode 100644
index 000000000000..757005458366
--- /dev/null
+++ b/include/linux/tracepoint.h
@@ -0,0 +1,156 @@
1#ifndef _LINUX_TRACEPOINT_H
2#define _LINUX_TRACEPOINT_H
3
4/*
5 * Kernel Tracepoint API.
6 *
7 * See Documentation/tracepoint.txt.
8 *
9 * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
10 *
11 * Heavily inspired from the Linux Kernel Markers.
12 *
13 * This file is released under the GPLv2.
14 * See the file COPYING for more details.
15 */
16
17#include <linux/types.h>
18#include <linux/rcupdate.h>
19
20struct module;
21struct tracepoint;
22
23struct tracepoint {
24 const char *name; /* Tracepoint name */
25 int state; /* State. */
26 void **funcs;
27} __attribute__((aligned(32))); /*
28 * Aligned on 32 bytes because it is
29 * globally visible and gcc happily
30 * align these on the structure size.
31 * Keep in sync with vmlinux.lds.h.
32 */
33
34#define TPPROTO(args...) args
35#define TPARGS(args...) args
36
37#ifdef CONFIG_TRACEPOINTS
38
39/*
40 * it_func[0] is never NULL because there is at least one element in the array
41 * when the array itself is non NULL.
42 */
43#define __DO_TRACE(tp, proto, args) \
44 do { \
45 void **it_func; \
46 \
47 rcu_read_lock_sched_notrace(); \
48 it_func = rcu_dereference((tp)->funcs); \
49 if (it_func) { \
50 do { \
51 ((void(*)(proto))(*it_func))(args); \
52 } while (*(++it_func)); \
53 } \
54 rcu_read_unlock_sched_notrace(); \
55 } while (0)
56
57/*
58 * Make sure the alignment of the structure in the __tracepoints section will
59 * not add unwanted padding between the beginning of the section and the
60 * structure. Force alignment to the same alignment as the section start.
61 */
62#define DECLARE_TRACE(name, proto, args) \
63 extern struct tracepoint __tracepoint_##name; \
64 static inline void trace_##name(proto) \
65 { \
66 if (unlikely(__tracepoint_##name.state)) \
67 __DO_TRACE(&__tracepoint_##name, \
68 TPPROTO(proto), TPARGS(args)); \
69 } \
70 static inline int register_trace_##name(void (*probe)(proto)) \
71 { \
72 return tracepoint_probe_register(#name, (void *)probe); \
73 } \
74 static inline int unregister_trace_##name(void (*probe)(proto)) \
75 { \
76 return tracepoint_probe_unregister(#name, (void *)probe);\
77 }
78
79#define DEFINE_TRACE(name) \
80 static const char __tpstrtab_##name[] \
81 __attribute__((section("__tracepoints_strings"))) = #name; \
82 struct tracepoint __tracepoint_##name \
83 __attribute__((section("__tracepoints"), aligned(32))) = \
84 { __tpstrtab_##name, 0, NULL }
85
86#define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \
87 EXPORT_SYMBOL_GPL(__tracepoint_##name)
88#define EXPORT_TRACEPOINT_SYMBOL(name) \
89 EXPORT_SYMBOL(__tracepoint_##name)
90
91extern void tracepoint_update_probe_range(struct tracepoint *begin,
92 struct tracepoint *end);
93
94#else /* !CONFIG_TRACEPOINTS */
95#define DECLARE_TRACE(name, proto, args) \
96 static inline void _do_trace_##name(struct tracepoint *tp, proto) \
97 { } \
98 static inline void trace_##name(proto) \
99 { } \
100 static inline int register_trace_##name(void (*probe)(proto)) \
101 { \
102 return -ENOSYS; \
103 } \
104 static inline int unregister_trace_##name(void (*probe)(proto)) \
105 { \
106 return -ENOSYS; \
107 }
108
109#define DEFINE_TRACE(name)
110#define EXPORT_TRACEPOINT_SYMBOL_GPL(name)
111#define EXPORT_TRACEPOINT_SYMBOL(name)
112
113static inline void tracepoint_update_probe_range(struct tracepoint *begin,
114 struct tracepoint *end)
115{ }
116#endif /* CONFIG_TRACEPOINTS */
117
118/*
119 * Connect a probe to a tracepoint.
120 * Internal API, should not be used directly.
121 */
122extern int tracepoint_probe_register(const char *name, void *probe);
123
124/*
125 * Disconnect a probe from a tracepoint.
126 * Internal API, should not be used directly.
127 */
128extern int tracepoint_probe_unregister(const char *name, void *probe);
129
130extern int tracepoint_probe_register_noupdate(const char *name, void *probe);
131extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe);
132extern void tracepoint_probe_update_all(void);
133
134struct tracepoint_iter {
135 struct module *module;
136 struct tracepoint *tracepoint;
137};
138
139extern void tracepoint_iter_start(struct tracepoint_iter *iter);
140extern void tracepoint_iter_next(struct tracepoint_iter *iter);
141extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
142extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
143extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
144 struct tracepoint *begin, struct tracepoint *end);
145
146/*
147 * tracepoint_synchronize_unregister must be called between the last tracepoint
148 * probe unregistration and the end of module exit to make sure there is no
149 * caller executing a probe when it is freed.
150 */
151static inline void tracepoint_synchronize_unregister(void)
152{
153 synchronize_sched();
154}
155
156#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 3b8121d4e36f..3f4954c55e53 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -325,7 +325,7 @@ extern struct class *tty_class;
325 * go away 325 * go away
326 */ 326 */
327 327
328extern inline struct tty_struct *tty_kref_get(struct tty_struct *tty) 328static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
329{ 329{
330 if (tty) 330 if (tty)
331 kref_get(&tty->kref); 331 kref_get(&tty->kref);
@@ -442,6 +442,7 @@ extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
442 size_t size); 442 size_t size);
443extern void tty_audit_exit(void); 443extern void tty_audit_exit(void);
444extern void tty_audit_fork(struct signal_struct *sig); 444extern void tty_audit_fork(struct signal_struct *sig);
445extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
445extern void tty_audit_push(struct tty_struct *tty); 446extern void tty_audit_push(struct tty_struct *tty);
446extern void tty_audit_push_task(struct task_struct *tsk, 447extern void tty_audit_push_task(struct task_struct *tsk,
447 uid_t loginuid, u32 sessionid); 448 uid_t loginuid, u32 sessionid);
@@ -450,6 +451,9 @@ static inline void tty_audit_add_data(struct tty_struct *tty,
450 unsigned char *data, size_t size) 451 unsigned char *data, size_t size)
451{ 452{
452} 453}
454static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
455{
456}
453static inline void tty_audit_exit(void) 457static inline void tty_audit_exit(void)
454{ 458{
455} 459}
diff --git a/include/linux/types.h b/include/linux/types.h
index d4a9ce6e2760..121f349cb7ec 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -135,19 +135,14 @@ typedef __s64 int64_t;
135 * 135 *
136 * Linux always considers sectors to be 512 bytes long independently 136 * Linux always considers sectors to be 512 bytes long independently
137 * of the devices real block size. 137 * of the devices real block size.
138 *
139 * blkcnt_t is the type of the inode's block count.
138 */ 140 */
139#ifdef CONFIG_LBD 141#ifdef CONFIG_LBD
140typedef u64 sector_t; 142typedef u64 sector_t;
141#else
142typedef unsigned long sector_t;
143#endif
144
145/*
146 * The type of the inode's block count.
147 */
148#ifdef CONFIG_LSF
149typedef u64 blkcnt_t; 143typedef u64 blkcnt_t;
150#else 144#else
145typedef unsigned long sector_t;
151typedef unsigned long blkcnt_t; 146typedef unsigned long blkcnt_t;
152#endif 147#endif
153 148
@@ -190,13 +185,16 @@ typedef __u32 __bitwise __wsum;
190 185
191#ifdef __KERNEL__ 186#ifdef __KERNEL__
192typedef unsigned __bitwise__ gfp_t; 187typedef unsigned __bitwise__ gfp_t;
188typedef unsigned __bitwise__ fmode_t;
193 189
194#ifdef CONFIG_RESOURCES_64BIT 190#ifdef CONFIG_PHYS_ADDR_T_64BIT
195typedef u64 resource_size_t; 191typedef u64 phys_addr_t;
196#else 192#else
197typedef u32 resource_size_t; 193typedef u32 phys_addr_t;
198#endif 194#endif
199 195
196typedef phys_addr_t resource_size_t;
197
200struct ustat { 198struct ustat {
201 __kernel_daddr_t f_tfree; 199 __kernel_daddr_t f_tfree;
202 __kernel_ino_t f_tinode; 200 __kernel_ino_t f_tinode;
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index fec6decfb983..6b58367d145e 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
78 \ 78 \
79 set_fs(KERNEL_DS); \ 79 set_fs(KERNEL_DS); \
80 pagefault_disable(); \ 80 pagefault_disable(); \
81 ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \ 81 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
82 pagefault_enable(); \ 82 pagefault_enable(); \
83 set_fs(old_fs); \ 83 set_fs(old_fs); \
84 ret; \ 84 ret; \
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 94ac74aba6b6..f72aa51f7bcd 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -108,6 +108,7 @@ enum usb_interface_condition {
108 * (in probe()), bound to a driver, or unbinding (in disconnect()) 108 * (in probe()), bound to a driver, or unbinding (in disconnect())
109 * @is_active: flag set when the interface is bound and not suspended. 109 * @is_active: flag set when the interface is bound and not suspended.
110 * @sysfs_files_created: sysfs attributes exist 110 * @sysfs_files_created: sysfs attributes exist
111 * @unregistering: flag set when the interface is being unregistered
111 * @needs_remote_wakeup: flag set when the driver requires remote-wakeup 112 * @needs_remote_wakeup: flag set when the driver requires remote-wakeup
112 * capability during autosuspend. 113 * capability during autosuspend.
113 * @needs_altsetting0: flag set when a set-interface request for altsetting 0 114 * @needs_altsetting0: flag set when a set-interface request for altsetting 0
@@ -163,6 +164,7 @@ struct usb_interface {
163 enum usb_interface_condition condition; /* state of binding */ 164 enum usb_interface_condition condition; /* state of binding */
164 unsigned is_active:1; /* the interface is not suspended */ 165 unsigned is_active:1; /* the interface is not suspended */
165 unsigned sysfs_files_created:1; /* the sysfs attributes exist */ 166 unsigned sysfs_files_created:1; /* the sysfs attributes exist */
167 unsigned unregistering:1; /* unregistration is in progress */
166 unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */ 168 unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
167 unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ 169 unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
168 unsigned needs_binding:1; /* needs delayed unbind/rebind */ 170 unsigned needs_binding:1; /* needs delayed unbind/rebind */
@@ -1135,6 +1137,7 @@ struct usb_anchor {
1135 struct list_head urb_list; 1137 struct list_head urb_list;
1136 wait_queue_head_t wait; 1138 wait_queue_head_t wait;
1137 spinlock_t lock; 1139 spinlock_t lock;
1140 unsigned int poisoned:1;
1138}; 1141};
1139 1142
1140static inline void init_usb_anchor(struct usb_anchor *anchor) 1143static inline void init_usb_anchor(struct usb_anchor *anchor)
@@ -1459,12 +1462,18 @@ extern struct urb *usb_get_urb(struct urb *urb);
1459extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags); 1462extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags);
1460extern int usb_unlink_urb(struct urb *urb); 1463extern int usb_unlink_urb(struct urb *urb);
1461extern void usb_kill_urb(struct urb *urb); 1464extern void usb_kill_urb(struct urb *urb);
1465extern void usb_poison_urb(struct urb *urb);
1466extern void usb_unpoison_urb(struct urb *urb);
1462extern void usb_kill_anchored_urbs(struct usb_anchor *anchor); 1467extern void usb_kill_anchored_urbs(struct usb_anchor *anchor);
1468extern void usb_poison_anchored_urbs(struct usb_anchor *anchor);
1463extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor); 1469extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor);
1464extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor); 1470extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor);
1465extern void usb_unanchor_urb(struct urb *urb); 1471extern void usb_unanchor_urb(struct urb *urb);
1466extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor, 1472extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
1467 unsigned int timeout); 1473 unsigned int timeout);
1474extern struct urb *usb_get_from_anchor(struct usb_anchor *anchor);
1475extern void usb_scuttle_anchored_urbs(struct usb_anchor *anchor);
1476extern int usb_anchor_empty(struct usb_anchor *anchor);
1468 1477
1469/** 1478/**
1470 * usb_urb_dir_in - check if an URB describes an IN transfer 1479 * usb_urb_dir_in - check if an URB describes an IN transfer
diff --git a/include/linux/usb/Kbuild b/include/linux/usb/Kbuild
index 42e84fc315e3..54c446309a2a 100644
--- a/include/linux/usb/Kbuild
+++ b/include/linux/usb/Kbuild
@@ -4,4 +4,5 @@ header-y += ch9.h
4header-y += gadgetfs.h 4header-y += gadgetfs.h
5header-y += midi.h 5header-y += midi.h
6header-y += g_printer.h 6header-y += g_printer.h
7 7header-y += tmc.h
8header-y += vstusb.h
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h
index ca228bb94218..18a729343ffa 100644
--- a/include/linux/usb/cdc.h
+++ b/include/linux/usb/cdc.h
@@ -160,6 +160,15 @@ struct usb_cdc_mdlm_detail_desc {
160 __u8 bDetailData[0]; 160 __u8 bDetailData[0];
161} __attribute__ ((packed)); 161} __attribute__ ((packed));
162 162
163/* "OBEX Control Model Functional Descriptor" */
164struct usb_cdc_obex_desc {
165 __u8 bLength;
166 __u8 bDescriptorType;
167 __u8 bDescriptorSubType;
168
169 __le16 bcdVersion;
170} __attribute__ ((packed));
171
163/*-------------------------------------------------------------------------*/ 172/*-------------------------------------------------------------------------*/
164 173
165/* 174/*
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 73a2f4eb1f7a..9b42baed3900 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -158,8 +158,12 @@ struct usb_ctrlrequest {
158 * (rarely) accepted by SET_DESCRIPTOR. 158 * (rarely) accepted by SET_DESCRIPTOR.
159 * 159 *
160 * Note that all multi-byte values here are encoded in little endian 160 * Note that all multi-byte values here are encoded in little endian
161 * byte order "on the wire". But when exposed through Linux-USB APIs, 161 * byte order "on the wire". Within the kernel and when exposed
162 * they've been converted to cpu byte order. 162 * through the Linux-USB APIs, they are not converted to cpu byte
163 * order; it is the responsibility of the client code to do this.
164 * The single exception is when device and configuration descriptors (but
165 * not other descriptors) are read from usbfs (i.e. /proc/bus/usb/BBB/DDD);
166 * in this case the fields are converted to host endianness by the kernel.
163 */ 167 */
164 168
165/* 169/*
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index c932390c6da0..935c380ffe47 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -130,6 +130,9 @@ struct usb_function {
130 130
131int usb_add_function(struct usb_configuration *, struct usb_function *); 131int usb_add_function(struct usb_configuration *, struct usb_function *);
132 132
133int usb_function_deactivate(struct usb_function *);
134int usb_function_activate(struct usb_function *);
135
133int usb_interface_id(struct usb_configuration *, struct usb_function *); 136int usb_interface_id(struct usb_configuration *, struct usb_function *);
134 137
135/** 138/**
@@ -316,9 +319,13 @@ struct usb_composite_dev {
316 struct usb_composite_driver *driver; 319 struct usb_composite_driver *driver;
317 u8 next_string_id; 320 u8 next_string_id;
318 321
319 spinlock_t lock; 322 /* the gadget driver won't enable the data pullup
323 * while the deactivation count is nonzero.
324 */
325 unsigned deactivations;
320 326
321 /* REVISIT use and existence of lock ... */ 327 /* protects at least deactivation count */
328 spinlock_t lock;
322}; 329};
323 330
324extern int usb_string_id(struct usb_composite_dev *c); 331extern int usb_string_id(struct usb_composite_dev *c);
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 655341d0f534..0b8617a9176d 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -192,7 +192,7 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
192 * The driver.owner field should be set to the module owner of this driver. 192 * The driver.owner field should be set to the module owner of this driver.
193 * The driver.name field should be set to the name of this driver (remember 193 * The driver.name field should be set to the name of this driver (remember
194 * it will show up in sysfs, so it needs to be short and to the point. 194 * it will show up in sysfs, so it needs to be short and to the point.
195 * Useing the module name is a good idea.) 195 * Using the module name is a good idea.)
196 */ 196 */
197struct usb_serial_driver { 197struct usb_serial_driver {
198 const char *description; 198 const char *description;
diff --git a/include/linux/usb/tmc.h b/include/linux/usb/tmc.h
new file mode 100644
index 000000000000..c045ae12556c
--- /dev/null
+++ b/include/linux/usb/tmc.h
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) 2007 Stefan Kopp, Gechingen, Germany
3 * Copyright (C) 2008 Novell, Inc.
4 * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
5 *
6 * This file holds USB constants defined by the USB Device Class
7 * Definition for Test and Measurement devices published by the USB-IF.
8 *
9 * It also has the ioctl definitions for the usbtmc kernel driver that
10 * userspace needs to know about.
11 */
12
13#ifndef __LINUX_USB_TMC_H
14#define __LINUX_USB_TMC_H
15
16/* USB TMC status values */
17#define USBTMC_STATUS_SUCCESS 0x01
18#define USBTMC_STATUS_PENDING 0x02
19#define USBTMC_STATUS_FAILED 0x80
20#define USBTMC_STATUS_TRANSFER_NOT_IN_PROGRESS 0x81
21#define USBTMC_STATUS_SPLIT_NOT_IN_PROGRESS 0x82
22#define USBTMC_STATUS_SPLIT_IN_PROGRESS 0x83
23
24/* USB TMC requests values */
25#define USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT 1
26#define USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS 2
27#define USBTMC_REQUEST_INITIATE_ABORT_BULK_IN 3
28#define USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS 4
29#define USBTMC_REQUEST_INITIATE_CLEAR 5
30#define USBTMC_REQUEST_CHECK_CLEAR_STATUS 6
31#define USBTMC_REQUEST_GET_CAPABILITIES 7
32#define USBTMC_REQUEST_INDICATOR_PULSE 64
33
34/* Request values for USBTMC driver's ioctl entry point */
35#define USBTMC_IOC_NR 91
36#define USBTMC_IOCTL_INDICATOR_PULSE _IO(USBTMC_IOC_NR, 1)
37#define USBTMC_IOCTL_CLEAR _IO(USBTMC_IOC_NR, 2)
38#define USBTMC_IOCTL_ABORT_BULK_OUT _IO(USBTMC_IOC_NR, 3)
39#define USBTMC_IOCTL_ABORT_BULK_IN _IO(USBTMC_IOC_NR, 4)
40#define USBTMC_IOCTL_CLEAR_OUT_HALT _IO(USBTMC_IOC_NR, 6)
41#define USBTMC_IOCTL_CLEAR_IN_HALT _IO(USBTMC_IOC_NR, 7)
42
43#endif
diff --git a/include/linux/usb/vstusb.h b/include/linux/usb/vstusb.h
new file mode 100644
index 000000000000..1cfac67191ff
--- /dev/null
+++ b/include/linux/usb/vstusb.h
@@ -0,0 +1,71 @@
1/*****************************************************************************
2 * File: drivers/usb/misc/vstusb.h
3 *
4 * Purpose: Support for the bulk USB Vernier Spectrophotometers
5 *
6 * Author: EQware Engineering, Inc.
7 * Oregon City, OR, USA 97045
8 *
9 * Copyright: 2007, 2008
10 * Vernier Software & Technology
11 * Beaverton, OR, USA 97005
12 *
13 * Web: www.vernier.com
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License version 2 as
17 * published by the Free Software Foundation.
18 *
19 *****************************************************************************/
20/*****************************************************************************
21 *
22 * The vstusb module is a standard usb 'client' driver running on top of the
23 * standard usb host controller stack.
24 *
25 * In general, vstusb supports standard bulk usb pipes. It supports multiple
26 * devices and multiple pipes per device.
27 *
28 * The vstusb driver supports two interfaces:
29 * 1 - ioctl SEND_PIPE/RECV_PIPE - a general bulk write/read msg
30 * interface to any pipe with timeout support;
31 * 2 - standard read/write with ioctl config - offers standard read/write
32 * interface with ioctl configured pipes and timeouts.
33 *
34 * Both interfaces can be signal from other process and will abort its i/o
35 * operation.
36 *
37 * A timeout of 0 means NO timeout. The user can still terminate the read via
38 * signal.
39 *
40 * If using multiple threads with this driver, the user should ensure that
41 * any reads, writes, or ioctls are complete before closing the device.
42 * Changing read/write timeouts or pipes takes effect on next read/write.
43 *
44 *****************************************************************************/
45
46struct vstusb_args {
47 union {
48 /* this struct is used for IOCTL_VSTUSB_SEND_PIPE, *
49 * IOCTL_VSTUSB_RECV_PIPE, and read()/write() fops */
50 struct {
51 void __user *buffer;
52 size_t count;
53 unsigned int timeout_ms;
54 int pipe;
55 };
56
57 /* this one is used for IOCTL_VSTUSB_CONFIG_RW */
58 struct {
59 int rd_pipe;
60 int rd_timeout_ms;
61 int wr_pipe;
62 int wr_timeout_ms;
63 };
64 };
65};
66
67#define VST_IOC_MAGIC 'L'
68#define VST_IOC_FIRST 0x20
69#define IOCTL_VSTUSB_SEND_PIPE _IO(VST_IOC_MAGIC, VST_IOC_FIRST)
70#define IOCTL_VSTUSB_RECV_PIPE _IO(VST_IOC_MAGIC, VST_IOC_FIRST + 1)
71#define IOCTL_VSTUSB_CONFIG_RW _IO(VST_IOC_MAGIC, VST_IOC_FIRST + 2)
diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h
new file mode 100644
index 000000000000..a102561e7026
--- /dev/null
+++ b/include/linux/usb/wusb-wa.h
@@ -0,0 +1,271 @@
1/*
2 * Wireless USB Wire Adapter constants and structures.
3 *
4 * Copyright (C) 2005-2006 Intel Corporation.
5 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * FIXME: docs
23 * FIXME: organize properly, group logically
24 *
25 * All the event structures are defined in uwb/spec.h, as they are
26 * common to the WHCI and WUSB radio control interfaces.
27 *
28 * References:
29 * [WUSB] Wireless Universal Serial Bus Specification, revision 1.0, ch8
30 */
31#ifndef __LINUX_USB_WUSB_WA_H
32#define __LINUX_USB_WUSB_WA_H
33
34/**
35 * Radio Command Request for the Radio Control Interface
36 *
37 * Radio Control Interface command and event codes are the same as
38 * WHCI, and listed in include/linux/uwb.h:UWB_RC_{CMD,EVT}_*
39 */
40enum {
41 WA_EXEC_RC_CMD = 40, /* Radio Control command Request */
42};
43
44/* Wireless Adapter Requests ([WUSB] table 8-51) */
45enum {
46 WUSB_REQ_ADD_MMC_IE = 20,
47 WUSB_REQ_REMOVE_MMC_IE = 21,
48 WUSB_REQ_SET_NUM_DNTS = 22,
49 WUSB_REQ_SET_CLUSTER_ID = 23,
50 WUSB_REQ_SET_DEV_INFO = 24,
51 WUSB_REQ_GET_TIME = 25,
52 WUSB_REQ_SET_STREAM_IDX = 26,
53 WUSB_REQ_SET_WUSB_MAS = 27,
54};
55
56
57/* Wireless Adapter WUSB Channel Time types ([WUSB] table 8-52) */
58enum {
59 WUSB_TIME_ADJ = 0,
60 WUSB_TIME_BPST = 1,
61 WUSB_TIME_WUSB = 2,
62};
63
64enum {
65 WA_ENABLE = 0x01,
66 WA_RESET = 0x02,
67 RPIPE_PAUSE = 0x1,
68};
69
70/* Responses from Get Status request ([WUSB] section 8.3.1.6) */
71enum {
72 WA_STATUS_ENABLED = 0x01,
73 WA_STATUS_RESETTING = 0x02
74};
75
76enum rpipe_crs {
77 RPIPE_CRS_CTL = 0x01,
78 RPIPE_CRS_ISO = 0x02,
79 RPIPE_CRS_BULK = 0x04,
80 RPIPE_CRS_INTR = 0x08
81};
82
83/**
84 * RPipe descriptor ([WUSB] section 8.5.2.11)
85 *
86 * FIXME: explain rpipes
87 */
88struct usb_rpipe_descriptor {
89 u8 bLength;
90 u8 bDescriptorType;
91 __le16 wRPipeIndex;
92 __le16 wRequests;
93 __le16 wBlocks; /* rw if 0 */
94 __le16 wMaxPacketSize; /* rw? */
95 u8 bHSHubAddress; /* reserved: 0 */
96 u8 bHSHubPort; /* ??? FIXME ??? */
97 u8 bSpeed; /* rw: xfer rate 'enum uwb_phy_rate' */
98 u8 bDeviceAddress; /* rw: Target device address */
99 u8 bEndpointAddress; /* rw: Target EP address */
100 u8 bDataSequence; /* ro: Current Data sequence */
101 __le32 dwCurrentWindow; /* ro */
102 u8 bMaxDataSequence; /* ro?: max supported seq */
103 u8 bInterval; /* rw: */
104 u8 bOverTheAirInterval; /* rw: */
105 u8 bmAttribute; /* ro? */
106 u8 bmCharacteristics; /* ro? enum rpipe_attr, supported xsactions */
107 u8 bmRetryOptions; /* rw? */
108 __le16 wNumTransactionErrors; /* rw */
109} __attribute__ ((packed));
110
111/**
112 * Wire Adapter Notification types ([WUSB] sections 8.4.5 & 8.5.4)
113 *
114 * These are the notifications coming on the notification endpoint of
115 * an HWA and a DWA.
116 */
117enum wa_notif_type {
118 DWA_NOTIF_RWAKE = 0x91,
119 DWA_NOTIF_PORTSTATUS = 0x92,
120 WA_NOTIF_TRANSFER = 0x93,
121 HWA_NOTIF_BPST_ADJ = 0x94,
122 HWA_NOTIF_DN = 0x95,
123};
124
125/**
126 * Wire Adapter notification header
127 *
128 * Notifications coming from a wire adapter use a common header
129 * defined in [WUSB] sections 8.4.5 & 8.5.4.
130 */
131struct wa_notif_hdr {
132 u8 bLength;
133 u8 bNotifyType; /* enum wa_notif_type */
134} __attribute__((packed));
135
136/**
137 * HWA DN Received notification [(WUSB] section 8.5.4.2)
138 *
139 * The DNData is specified in WUSB1.0[7.6]. For each device
140 * notification we received, we just need to dispatch it.
141 *
142 * @dndata: this is really an array of notifications, but all start
143 * with the same header.
144 */
145struct hwa_notif_dn {
146 struct wa_notif_hdr hdr;
147 u8 bSourceDeviceAddr; /* from errata 2005/07 */
148 u8 bmAttributes;
149 struct wusb_dn_hdr dndata[];
150} __attribute__((packed));
151
152/* [WUSB] section 8.3.3 */
153enum wa_xfer_type {
154 WA_XFER_TYPE_CTL = 0x80,
155 WA_XFER_TYPE_BI = 0x81, /* bulk/interrupt */
156 WA_XFER_TYPE_ISO = 0x82,
157 WA_XFER_RESULT = 0x83,
158 WA_XFER_ABORT = 0x84,
159};
160
161/* [WUSB] section 8.3.3 */
162struct wa_xfer_hdr {
163 u8 bLength; /* 0x18 */
164 u8 bRequestType; /* 0x80 WA_REQUEST_TYPE_CTL */
165 __le16 wRPipe; /* RPipe index */
166 __le32 dwTransferID; /* Host-assigned ID */
167 __le32 dwTransferLength; /* Length of data to xfer */
168 u8 bTransferSegment;
169} __attribute__((packed));
170
171struct wa_xfer_ctl {
172 struct wa_xfer_hdr hdr;
173 u8 bmAttribute;
174 __le16 wReserved;
175 struct usb_ctrlrequest baSetupData;
176} __attribute__((packed));
177
178struct wa_xfer_bi {
179 struct wa_xfer_hdr hdr;
180 u8 bReserved;
181 __le16 wReserved;
182} __attribute__((packed));
183
184struct wa_xfer_hwaiso {
185 struct wa_xfer_hdr hdr;
186 u8 bReserved;
187 __le16 wPresentationTime;
188 __le32 dwNumOfPackets;
189 /* FIXME: u8 pktdata[]? */
190} __attribute__((packed));
191
192/* [WUSB] section 8.3.3.5 */
193struct wa_xfer_abort {
194 u8 bLength;
195 u8 bRequestType;
196 __le16 wRPipe; /* RPipe index */
197 __le32 dwTransferID; /* Host-assigned ID */
198} __attribute__((packed));
199
200/**
201 * WA Transfer Complete notification ([WUSB] section 8.3.3.3)
202 *
203 */
204struct wa_notif_xfer {
205 struct wa_notif_hdr hdr;
206 u8 bEndpoint;
207 u8 Reserved;
208} __attribute__((packed));
209
210/** Transfer result basic codes [WUSB] table 8-15 */
211enum {
212 WA_XFER_STATUS_SUCCESS,
213 WA_XFER_STATUS_HALTED,
214 WA_XFER_STATUS_DATA_BUFFER_ERROR,
215 WA_XFER_STATUS_BABBLE,
216 WA_XFER_RESERVED,
217 WA_XFER_STATUS_NOT_FOUND,
218 WA_XFER_STATUS_INSUFFICIENT_RESOURCE,
219 WA_XFER_STATUS_TRANSACTION_ERROR,
220 WA_XFER_STATUS_ABORTED,
221 WA_XFER_STATUS_RPIPE_NOT_READY,
222 WA_XFER_INVALID_FORMAT,
223 WA_XFER_UNEXPECTED_SEGMENT_NUMBER,
224 WA_XFER_STATUS_RPIPE_TYPE_MISMATCH,
225};
226
227/** [WUSB] section 8.3.3.4 */
228struct wa_xfer_result {
229 struct wa_notif_hdr hdr;
230 __le32 dwTransferID;
231 __le32 dwTransferLength;
232 u8 bTransferSegment;
233 u8 bTransferStatus;
234 __le32 dwNumOfPackets;
235} __attribute__((packed));
236
237/**
238 * Wire Adapter Class Descriptor ([WUSB] section 8.5.2.7).
239 *
240 * NOTE: u16 fields are read Little Endian from the hardware.
241 *
242 * @bNumPorts is the original max number of devices that the host can
243 * connect; we might chop this so the stack can handle
244 * it. In case you need to access it, use wusbhc->ports_max
245 * if it is a Wireless USB WA.
246 */
247struct usb_wa_descriptor {
248 u8 bLength;
249 u8 bDescriptorType;
250 u16 bcdWAVersion;
251 u8 bNumPorts; /* don't use!! */
252 u8 bmAttributes; /* Reserved == 0 */
253 u16 wNumRPipes;
254 u16 wRPipeMaxBlock;
255 u8 bRPipeBlockSize;
256 u8 bPwrOn2PwrGood;
257 u8 bNumMMCIEs;
258 u8 DeviceRemovable; /* FIXME: in DWA this is up to 16 bytes */
259} __attribute__((packed));
260
261/**
262 * HWA Device Information Buffer (WUSB1.0[T8.54])
263 */
264struct hwa_dev_info {
265 u8 bmDeviceAvailability[32]; /* FIXME: ignored for now */
266 u8 bDeviceAddress;
267 __le16 wPHYRates;
268 u8 bmDeviceAttribute;
269} __attribute__((packed));
270
271#endif /* #ifndef __LINUX_USB_WUSB_WA_H */
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
new file mode 100644
index 000000000000..5f401b644ed5
--- /dev/null
+++ b/include/linux/usb/wusb.h
@@ -0,0 +1,376 @@
1/*
2 * Wireless USB Standard Definitions
3 * Event Size Tables
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: docs
24 * FIXME: organize properly, group logically
25 *
26 * All the event structures are defined in uwb/spec.h, as they are
27 * common to the WHCI and WUSB radio control interfaces.
28 */
29
30#ifndef __WUSB_H__
31#define __WUSB_H__
32
33#include <linux/types.h>
34#include <linux/kernel.h>
35#include <linux/uwb/spec.h>
36#include <linux/usb/ch9.h>
37#include <linux/param.h>
38
39/**
40 * WUSB Information Element header
41 *
42 * I don't know why, they decided to make it different to the MBOA MAC
43 * IE Header; beats me.
44 */
45struct wuie_hdr {
46 u8 bLength;
47 u8 bIEIdentifier;
48} __attribute__((packed));
49
50enum {
51 WUIE_ID_WCTA = 0x80,
52 WUIE_ID_CONNECTACK,
53 WUIE_ID_HOST_INFO,
54 WUIE_ID_CHANGE_ANNOUNCE,
55 WUIE_ID_DEVICE_DISCONNECT,
56 WUIE_ID_HOST_DISCONNECT,
57 WUIE_ID_KEEP_ALIVE = 0x89,
58 WUIE_ID_ISOCH_DISCARD,
59 WUIE_ID_RESET_DEVICE,
60};
61
62/**
63 * Maximum number of array elements in a WUSB IE.
64 *
65 * WUSB1.0[7.5 before table 7-38] says that in WUSB IEs that
66 * are "arrays" have to limited to 4 elements. So we define it
67 * like that to ease up and submit only the neeed size.
68 */
69#define WUIE_ELT_MAX 4
70
71/**
72 * Wrapper for the data that defines a CHID, a CDID or a CK
73 *
74 * WUSB defines that CHIDs, CDIDs and CKs are a 16 byte string of
75 * data. In order to avoid confusion and enforce types, we wrap it.
76 *
77 * Make it packed, as we use it in some hw defintions.
78 */
79struct wusb_ckhdid {
80 u8 data[16];
81} __attribute__((packed));
82
83const static
84struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
85
86#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
87
88/**
89 * WUSB IE: Host Information (WUSB1.0[7.5.2])
90 *
91 * Used to provide information about the host to the Wireless USB
92 * devices in range (CHID can be used as an ASCII string).
93 */
94struct wuie_host_info {
95 struct wuie_hdr hdr;
96 __le16 attributes;
97 struct wusb_ckhdid CHID;
98} __attribute__((packed));
99
100/**
101 * WUSB IE: Connect Ack (WUSB1.0[7.5.1])
102 *
103 * Used to acknowledge device connect requests. See note for
104 * WUIE_ELT_MAX.
105 */
106struct wuie_connect_ack {
107 struct wuie_hdr hdr;
108 struct {
109 struct wusb_ckhdid CDID;
110 u8 bDeviceAddress; /* 0 means unused */
111 u8 bReserved;
112 } blk[WUIE_ELT_MAX];
113} __attribute__((packed));
114
115/**
116 * WUSB IE Host Information Element, Connect Availability
117 *
118 * WUSB1.0[7.5.2], bmAttributes description
119 */
120enum {
121 WUIE_HI_CAP_RECONNECT = 0,
122 WUIE_HI_CAP_LIMITED,
123 WUIE_HI_CAP_RESERVED,
124 WUIE_HI_CAP_ALL,
125};
126
127/**
128 * WUSB IE: Channel Stop (WUSB1.0[7.5.8])
129 *
130 * Tells devices the host is going to stop sending MMCs and will dissapear.
131 */
132struct wuie_channel_stop {
133 struct wuie_hdr hdr;
134 u8 attributes;
135 u8 timestamp[3];
136} __attribute__((packed));
137
138/**
139 * WUSB IE: Keepalive (WUSB1.0[7.5.9])
140 *
141 * Ask device(s) to send keepalives.
142 */
143struct wuie_keep_alive {
144 struct wuie_hdr hdr;
145 u8 bDeviceAddress[WUIE_ELT_MAX];
146} __attribute__((packed));
147
148/**
149 * WUSB IE: Reset device (WUSB1.0[7.5.11])
150 *
151 * Tell device to reset; in all truth, we can fit 4 CDIDs, but we only
152 * use it for one at the time...
153 *
154 * In any case, this request is a wee bit silly: why don't they target
155 * by address??
156 */
157struct wuie_reset {
158 struct wuie_hdr hdr;
159 struct wusb_ckhdid CDID;
160} __attribute__((packed));
161
162/**
163 * WUSB IE: Disconnect device (WUSB1.0[7.5.11])
164 *
165 * Tell device to disconnect; we can fit 4 addresses, but we only use
166 * it for one at the time...
167 */
168struct wuie_disconnect {
169 struct wuie_hdr hdr;
170 u8 bDeviceAddress;
171 u8 padding;
172} __attribute__((packed));
173
174/**
175 * WUSB IE: Host disconnect ([WUSB] section 7.5.5)
176 *
177 * Tells all connected devices to disconnect.
178 */
179struct wuie_host_disconnect {
180 struct wuie_hdr hdr;
181} __attribute__((packed));
182
183/**
184 * WUSB Device Notification header (WUSB1.0[7.6])
185 */
186struct wusb_dn_hdr {
187 u8 bType;
188 u8 notifdata[];
189} __attribute__((packed));
190
191/** Device Notification codes (WUSB1.0[Table 7-54]) */
192enum WUSB_DN {
193 WUSB_DN_CONNECT = 0x01,
194 WUSB_DN_DISCONNECT = 0x02,
195 WUSB_DN_EPRDY = 0x03,
196 WUSB_DN_MASAVAILCHANGED = 0x04,
197 WUSB_DN_RWAKE = 0x05,
198 WUSB_DN_SLEEP = 0x06,
199 WUSB_DN_ALIVE = 0x07,
200};
201
202/** WUSB Device Notification Connect */
203struct wusb_dn_connect {
204 struct wusb_dn_hdr hdr;
205 __le16 attributes;
206 struct wusb_ckhdid CDID;
207} __attribute__((packed));
208
209static inline int wusb_dn_connect_prev_dev_addr(const struct wusb_dn_connect *dn)
210{
211 return le16_to_cpu(dn->attributes) & 0xff;
212}
213
214static inline int wusb_dn_connect_new_connection(const struct wusb_dn_connect *dn)
215{
216 return (le16_to_cpu(dn->attributes) >> 8) & 0x1;
217}
218
219static inline int wusb_dn_connect_beacon_behavior(const struct wusb_dn_connect *dn)
220{
221 return (le16_to_cpu(dn->attributes) >> 9) & 0x03;
222}
223
224/** Device is alive (aka: pong) (WUSB1.0[7.6.7]) */
225struct wusb_dn_alive {
226 struct wusb_dn_hdr hdr;
227} __attribute__((packed));
228
229/** Device is disconnecting (WUSB1.0[7.6.2]) */
230struct wusb_dn_disconnect {
231 struct wusb_dn_hdr hdr;
232} __attribute__((packed));
233
234/* General constants */
235enum {
236 WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */
237};
238
239static inline size_t ckhdid_printf(char *pr_ckhdid, size_t size,
240 const struct wusb_ckhdid *ckhdid)
241{
242 return scnprintf(pr_ckhdid, size,
243 "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx "
244 "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx",
245 ckhdid->data[0], ckhdid->data[1],
246 ckhdid->data[2], ckhdid->data[3],
247 ckhdid->data[4], ckhdid->data[5],
248 ckhdid->data[6], ckhdid->data[7],
249 ckhdid->data[8], ckhdid->data[9],
250 ckhdid->data[10], ckhdid->data[11],
251 ckhdid->data[12], ckhdid->data[13],
252 ckhdid->data[14], ckhdid->data[15]);
253}
254
255/*
256 * WUSB Crypto stuff (WUSB1.0[6])
257 */
258
259extern const char *wusb_et_name(u8);
260
261/**
262 * WUSB key index WUSB1.0[7.3.2.4], for usage when setting keys for
263 * the host or the device.
264 */
265static inline u8 wusb_key_index(int index, int type, int originator)
266{
267 return (originator << 6) | (type << 4) | index;
268}
269
270#define WUSB_KEY_INDEX_TYPE_PTK 0 /* for HWA only */
271#define WUSB_KEY_INDEX_TYPE_ASSOC 1
272#define WUSB_KEY_INDEX_TYPE_GTK 2
273#define WUSB_KEY_INDEX_ORIGINATOR_HOST 0
274#define WUSB_KEY_INDEX_ORIGINATOR_DEVICE 1
275
276/* A CCM Nonce, defined in WUSB1.0[6.4.1] */
277struct aes_ccm_nonce {
278 u8 sfn[6]; /* Little Endian */
279 u8 tkid[3]; /* LE */
280 struct uwb_dev_addr dest_addr;
281 struct uwb_dev_addr src_addr;
282} __attribute__((packed));
283
284/* A CCM operation label, defined on WUSB1.0[6.5.x] */
285struct aes_ccm_label {
286 u8 data[14];
287} __attribute__((packed));
288
289/*
290 * Input to the key derivation sequence defined in
291 * WUSB1.0[6.5.1]. Rest of the data is in the CCM Nonce passed to the
292 * PRF function.
293 */
294struct wusb_keydvt_in {
295 u8 hnonce[16];
296 u8 dnonce[16];
297} __attribute__((packed));
298
299/*
300 * Output from the key derivation sequence defined in
301 * WUSB1.0[6.5.1].
302 */
303struct wusb_keydvt_out {
304 u8 kck[16];
305 u8 ptk[16];
306} __attribute__((packed));
307
308/* Pseudo Random Function WUSB1.0[6.5] */
309extern int wusb_crypto_init(void);
310extern void wusb_crypto_exit(void);
311extern ssize_t wusb_prf(void *out, size_t out_size,
312 const u8 key[16], const struct aes_ccm_nonce *_n,
313 const struct aes_ccm_label *a,
314 const void *b, size_t blen, size_t len);
315
316static inline int wusb_prf_64(void *out, size_t out_size, const u8 key[16],
317 const struct aes_ccm_nonce *n,
318 const struct aes_ccm_label *a,
319 const void *b, size_t blen)
320{
321 return wusb_prf(out, out_size, key, n, a, b, blen, 64);
322}
323
324static inline int wusb_prf_128(void *out, size_t out_size, const u8 key[16],
325 const struct aes_ccm_nonce *n,
326 const struct aes_ccm_label *a,
327 const void *b, size_t blen)
328{
329 return wusb_prf(out, out_size, key, n, a, b, blen, 128);
330}
331
332static inline int wusb_prf_256(void *out, size_t out_size, const u8 key[16],
333 const struct aes_ccm_nonce *n,
334 const struct aes_ccm_label *a,
335 const void *b, size_t blen)
336{
337 return wusb_prf(out, out_size, key, n, a, b, blen, 256);
338}
339
340/* Key derivation WUSB1.0[6.5.1] */
341static inline int wusb_key_derive(struct wusb_keydvt_out *keydvt_out,
342 const u8 key[16],
343 const struct aes_ccm_nonce *n,
344 const struct wusb_keydvt_in *keydvt_in)
345{
346 const struct aes_ccm_label a = { .data = "Pair-wise keys" };
347 return wusb_prf_256(keydvt_out, sizeof(*keydvt_out), key, n, &a,
348 keydvt_in, sizeof(*keydvt_in));
349}
350
351/*
352 * Out-of-band MIC Generation WUSB1.0[6.5.2]
353 *
354 * Compute the MIC over @key, @n and @hs and place it in @mic_out.
355 *
356 * @mic_out: Where to place the 8 byte MIC tag
357 * @key: KCK from the derivation process
358 * @n: CCM nonce, n->sfn == 0, TKID as established in the
359 * process.
360 * @hs: Handshake struct for phase 2 of the 4-way.
361 * hs->bStatus and hs->bReserved are zero.
362 * hs->bMessageNumber is 2 (WUSB1.0[7.3.2.5.2]
363 * hs->dest_addr is the device's USB address padded with 0
364 * hs->src_addr is the hosts's UWB device address
365 * hs->mic is ignored (as we compute that value).
366 */
367static inline int wusb_oob_mic(u8 mic_out[8], const u8 key[16],
368 const struct aes_ccm_nonce *n,
369 const struct usb_handshake *hs)
370{
371 const struct aes_ccm_label a = { .data = "out-of-bandMIC" };
372 return wusb_prf_64(mic_out, 8, key, n, &a,
373 hs, sizeof(*hs) - sizeof(hs->MIC));
374}
375
376#endif /* #ifndef __WUSB_H__ */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index b5f41d4c2eec..315bcd375224 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -12,7 +12,7 @@
12struct user_namespace { 12struct user_namespace {
13 struct kref kref; 13 struct kref kref;
14 struct hlist_head uidhash_table[UIDHASH_SZ]; 14 struct hlist_head uidhash_table[UIDHASH_SZ];
15 struct user_struct *root_user; 15 struct user_struct *creator;
16}; 16};
17 17
18extern struct user_namespace init_user_ns; 18extern struct user_namespace init_user_ns;
@@ -26,8 +26,7 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
26 return ns; 26 return ns;
27} 27}
28 28
29extern struct user_namespace *copy_user_ns(int flags, 29extern int create_user_ns(struct cred *new);
30 struct user_namespace *old_ns);
31extern void free_user_ns(struct kref *kref); 30extern void free_user_ns(struct kref *kref);
32 31
33static inline void put_user_ns(struct user_namespace *ns) 32static inline void put_user_ns(struct user_namespace *ns)
@@ -43,13 +42,9 @@ static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
43 return &init_user_ns; 42 return &init_user_ns;
44} 43}
45 44
46static inline struct user_namespace *copy_user_ns(int flags, 45static inline int create_user_ns(struct cred *new)
47 struct user_namespace *old_ns)
48{ 46{
49 if (flags & CLONE_NEWUSER) 47 return -EINVAL;
50 return ERR_PTR(-EINVAL);
51
52 return old_ns;
53} 48}
54 49
55static inline void put_user_ns(struct user_namespace *ns) 50static inline void put_user_ns(struct user_namespace *ns)
diff --git a/include/linux/uwb.h b/include/linux/uwb.h
new file mode 100644
index 000000000000..f9ccbd9a2ced
--- /dev/null
+++ b/include/linux/uwb.h
@@ -0,0 +1,765 @@
1/*
2 * Ultra Wide Band
3 * UWB API
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: doc: overview of the API, different parts and pointers
24 */
25
26#ifndef __LINUX__UWB_H__
27#define __LINUX__UWB_H__
28
29#include <linux/limits.h>
30#include <linux/device.h>
31#include <linux/mutex.h>
32#include <linux/timer.h>
33#include <linux/workqueue.h>
34#include <linux/uwb/spec.h>
35
36struct uwb_dev;
37struct uwb_beca_e;
38struct uwb_rc;
39struct uwb_rsv;
40struct uwb_dbg;
41
42/**
43 * struct uwb_dev - a UWB Device
44 * @rc: UWB Radio Controller that discovered the device (kind of its
45 * parent).
46 * @bce: a beacon cache entry for this device; or NULL if the device
47 * is a local radio controller.
48 * @mac_addr: the EUI-48 address of this device.
49 * @dev_addr: the current DevAddr used by this device.
50 * @beacon_slot: the slot number the beacon is using.
51 * @streams: bitmap of streams allocated to reservations targeted at
52 * this device. For an RC, this is the streams allocated for
53 * reservations targeted at DevAddrs.
54 *
55 * A UWB device may either by a neighbor or part of a local radio
56 * controller.
57 */
58struct uwb_dev {
59 struct mutex mutex;
60 struct list_head list_node;
61 struct device dev;
62 struct uwb_rc *rc; /* radio controller */
63 struct uwb_beca_e *bce; /* Beacon Cache Entry */
64
65 struct uwb_mac_addr mac_addr;
66 struct uwb_dev_addr dev_addr;
67 int beacon_slot;
68 DECLARE_BITMAP(streams, UWB_NUM_STREAMS);
69};
70#define to_uwb_dev(d) container_of(d, struct uwb_dev, dev)
71
72/**
73 * UWB HWA/WHCI Radio Control {Command|Event} Block context IDs
74 *
75 * RC[CE]Bs have a 'context ID' field that matches the command with
76 * the event received to confirm it.
77 *
78 * Maximum number of context IDs
79 */
80enum { UWB_RC_CTX_MAX = 256 };
81
82
83/** Notification chain head for UWB generated events to listeners */
84struct uwb_notifs_chain {
85 struct list_head list;
86 struct mutex mutex;
87};
88
89/**
90 * struct uwb_mas_bm - a bitmap of all MAS in a superframe
91 * @bm: a bitmap of length #UWB_NUM_MAS
92 */
93struct uwb_mas_bm {
94 DECLARE_BITMAP(bm, UWB_NUM_MAS);
95};
96
97/**
98 * uwb_rsv_state - UWB Reservation state.
99 *
100 * NONE - reservation is not active (no DRP IE being transmitted).
101 *
102 * Owner reservation states:
103 *
104 * INITIATED - owner has sent an initial DRP request.
105 * PENDING - target responded with pending Reason Code.
106 * MODIFIED - reservation manager is modifying an established
107 * reservation with a different MAS allocation.
108 * ESTABLISHED - the reservation has been successfully negotiated.
109 *
110 * Target reservation states:
111 *
112 * DENIED - request is denied.
113 * ACCEPTED - request is accepted.
114 * PENDING - PAL has yet to make a decision to whether to accept or
115 * deny.
116 *
117 * FIXME: further target states TBD.
118 */
119enum uwb_rsv_state {
120 UWB_RSV_STATE_NONE,
121 UWB_RSV_STATE_O_INITIATED,
122 UWB_RSV_STATE_O_PENDING,
123 UWB_RSV_STATE_O_MODIFIED,
124 UWB_RSV_STATE_O_ESTABLISHED,
125 UWB_RSV_STATE_T_ACCEPTED,
126 UWB_RSV_STATE_T_DENIED,
127 UWB_RSV_STATE_T_PENDING,
128
129 UWB_RSV_STATE_LAST,
130};
131
132enum uwb_rsv_target_type {
133 UWB_RSV_TARGET_DEV,
134 UWB_RSV_TARGET_DEVADDR,
135};
136
137/**
138 * struct uwb_rsv_target - the target of a reservation.
139 *
140 * Reservations unicast and targeted at a single device
141 * (UWB_RSV_TARGET_DEV); or (e.g., in the case of WUSB) targeted at a
142 * specific (private) DevAddr (UWB_RSV_TARGET_DEVADDR).
143 */
144struct uwb_rsv_target {
145 enum uwb_rsv_target_type type;
146 union {
147 struct uwb_dev *dev;
148 struct uwb_dev_addr devaddr;
149 };
150};
151
152/*
153 * Number of streams reserved for reservations targeted at DevAddrs.
154 */
155#define UWB_NUM_GLOBAL_STREAMS 1
156
157typedef void (*uwb_rsv_cb_f)(struct uwb_rsv *rsv);
158
159/**
160 * struct uwb_rsv - a DRP reservation
161 *
162 * Data structure management:
163 *
164 * @rc: the radio controller this reservation is for
165 * (as target or owner)
166 * @rc_node: a list node for the RC
167 * @pal_node: a list node for the PAL
168 *
169 * Owner and target parameters:
170 *
171 * @owner: the UWB device owning this reservation
172 * @target: the target UWB device
173 * @type: reservation type
174 *
175 * Owner parameters:
176 *
177 * @max_mas: maxiumum number of MAS
178 * @min_mas: minimum number of MAS
179 * @sparsity: owner selected sparsity
180 * @is_multicast: true iff multicast
181 *
182 * @callback: callback function when the reservation completes
183 * @pal_priv: private data for the PAL making the reservation
184 *
185 * Reservation status:
186 *
187 * @status: negotiation status
188 * @stream: stream index allocated for this reservation
189 * @mas: reserved MAS
190 * @drp_ie: the DRP IE
191 * @ie_valid: true iff the DRP IE matches the reservation parameters
192 *
193 * DRP reservations are uniquely identified by the owner, target and
194 * stream index. However, when using a DevAddr as a target (e.g., for
195 * a WUSB cluster reservation) the responses may be received from
196 * devices with different DevAddrs. In this case, reservations are
197 * uniquely identified by just the stream index. A number of stream
198 * indexes (UWB_NUM_GLOBAL_STREAMS) are reserved for this.
199 */
200struct uwb_rsv {
201 struct uwb_rc *rc;
202 struct list_head rc_node;
203 struct list_head pal_node;
204
205 struct uwb_dev *owner;
206 struct uwb_rsv_target target;
207 enum uwb_drp_type type;
208 int max_mas;
209 int min_mas;
210 int sparsity;
211 bool is_multicast;
212
213 uwb_rsv_cb_f callback;
214 void *pal_priv;
215
216 enum uwb_rsv_state state;
217 u8 stream;
218 struct uwb_mas_bm mas;
219 struct uwb_ie_drp *drp_ie;
220 bool ie_valid;
221 struct timer_list timer;
222 bool expired;
223};
224
225static const
226struct uwb_mas_bm uwb_mas_bm_zero = { .bm = { 0 } };
227
228static inline void uwb_mas_bm_copy_le(void *dst, const struct uwb_mas_bm *mas)
229{
230 bitmap_copy_le(dst, mas->bm, UWB_NUM_MAS);
231}
232
233/**
234 * struct uwb_drp_avail - a radio controller's view of MAS usage
235 * @global: MAS unused by neighbors (excluding reservations targetted
236 * or owned by the local radio controller) or the beaon period
237 * @local: MAS unused by local established reservations
238 * @pending: MAS unused by local pending reservations
239 * @ie: DRP Availability IE to be included in the beacon
240 * @ie_valid: true iff @ie is valid and does not need to regenerated from
241 * @global and @local
242 *
243 * Each radio controller maintains a view of MAS usage or
244 * availability. MAS available for a new reservation are determined
245 * from the intersection of @global, @local, and @pending.
246 *
247 * The radio controller must transmit a DRP Availability IE that's the
248 * intersection of @global and @local.
249 *
250 * A set bit indicates the MAS is unused and available.
251 *
252 * rc->rsvs_mutex should be held before accessing this data structure.
253 *
254 * [ECMA-368] section 17.4.3.
255 */
256struct uwb_drp_avail {
257 DECLARE_BITMAP(global, UWB_NUM_MAS);
258 DECLARE_BITMAP(local, UWB_NUM_MAS);
259 DECLARE_BITMAP(pending, UWB_NUM_MAS);
260 struct uwb_ie_drp_avail ie;
261 bool ie_valid;
262};
263
264
265const char *uwb_rsv_state_str(enum uwb_rsv_state state);
266const char *uwb_rsv_type_str(enum uwb_drp_type type);
267
268struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb,
269 void *pal_priv);
270void uwb_rsv_destroy(struct uwb_rsv *rsv);
271
272int uwb_rsv_establish(struct uwb_rsv *rsv);
273int uwb_rsv_modify(struct uwb_rsv *rsv,
274 int max_mas, int min_mas, int sparsity);
275void uwb_rsv_terminate(struct uwb_rsv *rsv);
276
277void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv);
278
279/**
280 * Radio Control Interface instance
281 *
282 *
283 * Life cycle rules: those of the UWB Device.
284 *
285 * @index: an index number for this radio controller, as used in the
286 * device name.
287 * @version: version of protocol supported by this device
288 * @priv: Backend implementation; rw with uwb_dev.dev.sem taken.
289 * @cmd: Backend implementation to execute commands; rw and call
290 * only with uwb_dev.dev.sem taken.
291 * @reset: Hardware reset of radio controller and any PAL controllers.
292 * @filter: Backend implementation to manipulate data to and from device
293 * to be compliant to specification assumed by driver (WHCI
294 * 0.95).
295 *
296 * uwb_dev.dev.mutex is used to execute commands and update
297 * the corresponding structures; can't use a spinlock
298 * because rc->cmd() can sleep.
299 * @ies: This is a dynamically allocated array cacheing the
300 * IEs (settable by the host) that the beacon of this
301 * radio controller is currently sending.
302 *
303 * In reality, we store here the full command we set to
304 * the radio controller (which is basically a command
305 * prefix followed by all the IEs the beacon currently
306 * contains). This way we don't have to realloc and
307 * memcpy when setting it.
308 *
309 * We set this up in uwb_rc_ie_setup(), where we alloc
310 * this struct, call get_ie() [so we know which IEs are
311 * currently being sent, if any].
312 *
313 * @ies_capacity:Amount of space (in bytes) allocated in @ies. The
314 * amount used is given by sizeof(*ies) plus ies->wIELength
315 * (which is a little endian quantity all the time).
316 * @ies_mutex: protect the IE cache
317 * @dbg: information for the debug interface
318 */
319struct uwb_rc {
320 struct uwb_dev uwb_dev;
321 int index;
322 u16 version;
323
324 struct module *owner;
325 void *priv;
326 int (*start)(struct uwb_rc *rc);
327 void (*stop)(struct uwb_rc *rc);
328 int (*cmd)(struct uwb_rc *, const struct uwb_rccb *, size_t);
329 int (*reset)(struct uwb_rc *rc);
330 int (*filter_cmd)(struct uwb_rc *, struct uwb_rccb **, size_t *);
331 int (*filter_event)(struct uwb_rc *, struct uwb_rceb **, const size_t,
332 size_t *, size_t *);
333
334 spinlock_t neh_lock; /* protects neh_* and ctx_* */
335 struct list_head neh_list; /* Open NE handles */
336 unsigned long ctx_bm[UWB_RC_CTX_MAX / 8 / sizeof(unsigned long)];
337 u8 ctx_roll;
338
339 int beaconing; /* Beaconing state [channel number] */
340 int scanning;
341 enum uwb_scan_type scan_type:3;
342 unsigned ready:1;
343 struct uwb_notifs_chain notifs_chain;
344
345 struct uwb_drp_avail drp_avail;
346 struct list_head reservations;
347 struct mutex rsvs_mutex;
348 struct workqueue_struct *rsv_workq;
349 struct work_struct rsv_update_work;
350
351 struct mutex ies_mutex;
352 struct uwb_rc_cmd_set_ie *ies;
353 size_t ies_capacity;
354
355 spinlock_t pal_lock;
356 struct list_head pals;
357
358 struct uwb_dbg *dbg;
359};
360
361
362/**
363 * struct uwb_pal - a UWB PAL
364 * @name: descriptive name for this PAL (wushc, wlp, etc.).
365 * @device: a device for the PAL. Used to link the PAL and the radio
366 * controller in sysfs.
367 * @new_rsv: called when a peer requests a reservation (may be NULL if
368 * the PAL cannot accept reservation requests).
369 *
370 * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB
371 * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP).
372 *
373 * The PALs using a radio controller must register themselves to
374 * permit the UWB stack to coordinate usage of the radio between the
375 * various PALs or to allow PALs to response to certain requests from
376 * peers.
377 *
378 * A struct uwb_pal should be embedded in a containing structure
379 * belonging to the PAL and initialized with uwb_pal_init()). Fields
380 * should be set appropriately by the PAL before registering the PAL
381 * with uwb_pal_register().
382 */
383struct uwb_pal {
384 struct list_head node;
385 const char *name;
386 struct device *device;
387 void (*new_rsv)(struct uwb_rsv *rsv);
388};
389
390void uwb_pal_init(struct uwb_pal *pal);
391int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal);
392void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal);
393
394/*
395 * General public API
396 *
397 * This API can be used by UWB device drivers or by those implementing
398 * UWB Radio Controllers
399 */
400struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc,
401 const struct uwb_dev_addr *devaddr);
402struct uwb_dev *uwb_dev_get_by_rc(struct uwb_dev *, struct uwb_rc *);
403static inline void uwb_dev_get(struct uwb_dev *uwb_dev)
404{
405 get_device(&uwb_dev->dev);
406}
407static inline void uwb_dev_put(struct uwb_dev *uwb_dev)
408{
409 put_device(&uwb_dev->dev);
410}
411struct uwb_dev *uwb_dev_try_get(struct uwb_rc *rc, struct uwb_dev *uwb_dev);
412
413/**
414 * Callback function for 'uwb_{dev,rc}_foreach()'.
415 *
416 * @dev: Linux device instance
417 * 'uwb_dev = container_of(dev, struct uwb_dev, dev)'
418 * @priv: Data passed by the caller to 'uwb_{dev,rc}_foreach()'.
419 *
420 * @returns: 0 to continue the iterations, any other val to stop
421 * iterating and return the value to the caller of
422 * _foreach().
423 */
424typedef int (*uwb_dev_for_each_f)(struct device *dev, void *priv);
425int uwb_dev_for_each(struct uwb_rc *rc, uwb_dev_for_each_f func, void *priv);
426
427struct uwb_rc *uwb_rc_alloc(void);
428struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *);
429struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *);
430void uwb_rc_put(struct uwb_rc *rc);
431
432typedef void (*uwb_rc_cmd_cb_f)(struct uwb_rc *rc, void *arg,
433 struct uwb_rceb *reply, ssize_t reply_size);
434
435int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name,
436 struct uwb_rccb *cmd, size_t cmd_size,
437 u8 expected_type, u16 expected_event,
438 uwb_rc_cmd_cb_f cb, void *arg);
439ssize_t uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name,
440 struct uwb_rccb *cmd, size_t cmd_size,
441 struct uwb_rceb *reply, size_t reply_size);
442ssize_t uwb_rc_vcmd(struct uwb_rc *rc, const char *cmd_name,
443 struct uwb_rccb *cmd, size_t cmd_size,
444 u8 expected_type, u16 expected_event,
445 struct uwb_rceb **preply);
446ssize_t uwb_rc_get_ie(struct uwb_rc *, struct uwb_rc_evt_get_ie **);
447int uwb_bg_joined(struct uwb_rc *rc);
448
449size_t __uwb_addr_print(char *, size_t, const unsigned char *, int);
450
451int uwb_rc_dev_addr_set(struct uwb_rc *, const struct uwb_dev_addr *);
452int uwb_rc_dev_addr_get(struct uwb_rc *, struct uwb_dev_addr *);
453int uwb_rc_mac_addr_set(struct uwb_rc *, const struct uwb_mac_addr *);
454int uwb_rc_mac_addr_get(struct uwb_rc *, struct uwb_mac_addr *);
455int __uwb_mac_addr_assigned_check(struct device *, void *);
456int __uwb_dev_addr_assigned_check(struct device *, void *);
457
458/* Print in @buf a pretty repr of @addr */
459static inline size_t uwb_dev_addr_print(char *buf, size_t buf_size,
460 const struct uwb_dev_addr *addr)
461{
462 return __uwb_addr_print(buf, buf_size, addr->data, 0);
463}
464
465/* Print in @buf a pretty repr of @addr */
466static inline size_t uwb_mac_addr_print(char *buf, size_t buf_size,
467 const struct uwb_mac_addr *addr)
468{
469 return __uwb_addr_print(buf, buf_size, addr->data, 1);
470}
471
472/* @returns 0 if device addresses @addr2 and @addr1 are equal */
473static inline int uwb_dev_addr_cmp(const struct uwb_dev_addr *addr1,
474 const struct uwb_dev_addr *addr2)
475{
476 return memcmp(addr1, addr2, sizeof(*addr1));
477}
478
479/* @returns 0 if MAC addresses @addr2 and @addr1 are equal */
480static inline int uwb_mac_addr_cmp(const struct uwb_mac_addr *addr1,
481 const struct uwb_mac_addr *addr2)
482{
483 return memcmp(addr1, addr2, sizeof(*addr1));
484}
485
486/* @returns !0 if a MAC @addr is a broadcast address */
487static inline int uwb_mac_addr_bcast(const struct uwb_mac_addr *addr)
488{
489 struct uwb_mac_addr bcast = {
490 .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }
491 };
492 return !uwb_mac_addr_cmp(addr, &bcast);
493}
494
495/* @returns !0 if a MAC @addr is all zeroes*/
496static inline int uwb_mac_addr_unset(const struct uwb_mac_addr *addr)
497{
498 struct uwb_mac_addr unset = {
499 .data = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }
500 };
501 return !uwb_mac_addr_cmp(addr, &unset);
502}
503
504/* @returns !0 if the address is in use. */
505static inline unsigned __uwb_dev_addr_assigned(struct uwb_rc *rc,
506 struct uwb_dev_addr *addr)
507{
508 return uwb_dev_for_each(rc, __uwb_dev_addr_assigned_check, addr);
509}
510
511/*
512 * UWB Radio Controller API
513 *
514 * This API is used (in addition to the general API) to implement UWB
515 * Radio Controllers.
516 */
517void uwb_rc_init(struct uwb_rc *);
518int uwb_rc_add(struct uwb_rc *, struct device *dev, void *rc_priv);
519void uwb_rc_rm(struct uwb_rc *);
520void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t);
521void uwb_rc_neh_error(struct uwb_rc *, int);
522void uwb_rc_reset_all(struct uwb_rc *rc);
523
524/**
525 * uwb_rsv_is_owner - is the owner of this reservation the RC?
526 * @rsv: the reservation
527 */
528static inline bool uwb_rsv_is_owner(struct uwb_rsv *rsv)
529{
530 return rsv->owner == &rsv->rc->uwb_dev;
531}
532
533/**
534 * Events generated by UWB that can be passed to any listeners
535 *
536 * Higher layers can register callback functions with the radio
537 * controller using uwb_notifs_register(). The radio controller
538 * maintains a list of all registered handlers and will notify all
539 * nodes when an event occurs.
540 */
541enum uwb_notifs {
542 UWB_NOTIF_BG_JOIN = 0, /* radio controller joined a beacon group */
543 UWB_NOTIF_BG_LEAVE = 1, /* radio controller left a beacon group */
544 UWB_NOTIF_ONAIR,
545 UWB_NOTIF_OFFAIR,
546};
547
548/* Callback function registered with UWB */
549struct uwb_notifs_handler {
550 struct list_head list_node;
551 void (*cb)(void *, struct uwb_dev *, enum uwb_notifs);
552 void *data;
553};
554
555int uwb_notifs_register(struct uwb_rc *, struct uwb_notifs_handler *);
556int uwb_notifs_deregister(struct uwb_rc *, struct uwb_notifs_handler *);
557
558
559/**
560 * UWB radio controller Event Size Entry (for creating entry tables)
561 *
562 * WUSB and WHCI define events and notifications, and they might have
563 * fixed or variable size.
564 *
565 * Each event/notification has a size which is not necessarily known
566 * in advance based on the event code. As well, vendor specific
567 * events/notifications will have a size impossible to determine
568 * unless we know about the device's specific details.
569 *
570 * It was way too smart of the spec writers not to think that it would
571 * be impossible for a generic driver to skip over vendor specific
572 * events/notifications if there are no LENGTH fields in the HEADER of
573 * each message...the transaction size cannot be counted on as the
574 * spec does not forbid to pack more than one event in a single
575 * transaction.
576 *
577 * Thus, we guess sizes with tables (or for events, when you know the
578 * size ahead of time you can use uwb_rc_neh_extra_size*()). We
579 * register tables with the known events and their sizes, and then we
580 * traverse those tables. For those with variable length, we provide a
581 * way to lookup the size inside the event/notification's
582 * payload. This allows device-specific event size tables to be
583 * registered.
584 *
585 * @size: Size of the payload
586 *
587 * @offset: if != 0, at offset @offset-1 starts a field with a length
588 * that has to be added to @size. The format of the field is
589 * given by @type.
590 *
591 * @type: Type and length of the offset field. Most common is LE 16
592 * bits (that's why that is zero); others are there mostly to
593 * cover for bugs and weirdos.
594 */
595struct uwb_est_entry {
596 size_t size;
597 unsigned offset;
598 enum { UWB_EST_16 = 0, UWB_EST_8 = 1 } type;
599};
600
601int uwb_est_register(u8 type, u8 code_high, u16 vendor, u16 product,
602 const struct uwb_est_entry *, size_t entries);
603int uwb_est_unregister(u8 type, u8 code_high, u16 vendor, u16 product,
604 const struct uwb_est_entry *, size_t entries);
605ssize_t uwb_est_find_size(struct uwb_rc *rc, const struct uwb_rceb *rceb,
606 size_t len);
607
608/* -- Misc */
609
610enum {
611 EDC_MAX_ERRORS = 10,
612 EDC_ERROR_TIMEFRAME = HZ,
613};
614
615/* error density counter */
616struct edc {
617 unsigned long timestart;
618 u16 errorcount;
619};
620
621static inline
622void edc_init(struct edc *edc)
623{
624 edc->timestart = jiffies;
625}
626
627/* Called when an error occured.
628 * This is way to determine if the number of acceptable errors per time
629 * period has been exceeded. It is not accurate as there are cases in which
630 * this scheme will not work, for example if there are periodic occurences
631 * of errors that straddle updates to the start time. This scheme is
632 * sufficient for our usage.
633 *
634 * @returns 1 if maximum acceptable errors per timeframe has been exceeded.
635 */
636static inline int edc_inc(struct edc *err_hist, u16 max_err, u16 timeframe)
637{
638 unsigned long now;
639
640 now = jiffies;
641 if (now - err_hist->timestart > timeframe) {
642 err_hist->errorcount = 1;
643 err_hist->timestart = now;
644 } else if (++err_hist->errorcount > max_err) {
645 err_hist->errorcount = 0;
646 err_hist->timestart = now;
647 return 1;
648 }
649 return 0;
650}
651
652
653/* Information Element handling */
654
655/* For representing the state of writing to a buffer when iterating */
656struct uwb_buf_ctx {
657 char *buf;
658 size_t bytes, size;
659};
660
661typedef int (*uwb_ie_f)(struct uwb_dev *, const struct uwb_ie_hdr *,
662 size_t, void *);
663struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
664ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data,
665 const void *buf, size_t size);
666int uwb_ie_dump_hex(struct uwb_dev *, const struct uwb_ie_hdr *,
667 size_t, void *);
668int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *);
669struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len);
670
671
672/*
673 * Transmission statistics
674 *
675 * UWB uses LQI and RSSI (one byte values) for reporting radio signal
676 * strength and line quality indication. We do quick and dirty
677 * averages of those. They are signed values, btw.
678 *
679 * For 8 bit quantities, we keep the min, the max, an accumulator
680 * (@sigma) and a # of samples. When @samples gets to 255, we compute
681 * the average (@sigma / @samples), place it in @sigma and reset
682 * @samples to 1 (so we use it as the first sample).
683 *
684 * Now, statistically speaking, probably I am kicking the kidneys of
685 * some books I have in my shelves collecting dust, but I just want to
686 * get an approx, not the Nobel.
687 *
688 * LOCKING: there is no locking per se, but we try to keep a lockless
689 * schema. Only _add_samples() modifies the values--as long as you
690 * have other locking on top that makes sure that no two calls of
691 * _add_sample() happen at the same time, then we are fine. Now, for
692 * resetting the values we just set @samples to 0 and that makes the
693 * next _add_sample() to start with defaults. Reading the values in
694 * _show() currently can race, so you need to make sure the calls are
695 * under the same lock that protects calls to _add_sample(). FIXME:
696 * currently unlocked (It is not ultraprecise but does the trick. Bite
697 * me).
698 */
699struct stats {
700 s8 min, max;
701 s16 sigma;
702 atomic_t samples;
703};
704
705static inline
706void stats_init(struct stats *stats)
707{
708 atomic_set(&stats->samples, 0);
709 wmb();
710}
711
712static inline
713void stats_add_sample(struct stats *stats, s8 sample)
714{
715 s8 min, max;
716 s16 sigma;
717 unsigned samples = atomic_read(&stats->samples);
718 if (samples == 0) { /* it was zero before, so we initialize */
719 min = 127;
720 max = -128;
721 sigma = 0;
722 } else {
723 min = stats->min;
724 max = stats->max;
725 sigma = stats->sigma;
726 }
727
728 if (sample < min) /* compute new values */
729 min = sample;
730 else if (sample > max)
731 max = sample;
732 sigma += sample;
733
734 stats->min = min; /* commit */
735 stats->max = max;
736 stats->sigma = sigma;
737 if (atomic_add_return(1, &stats->samples) > 255) {
738 /* wrapped around! reset */
739 stats->sigma = sigma / 256;
740 atomic_set(&stats->samples, 1);
741 }
742}
743
744static inline ssize_t stats_show(struct stats *stats, char *buf)
745{
746 int min, max, avg;
747 int samples = atomic_read(&stats->samples);
748 if (samples == 0)
749 min = max = avg = 0;
750 else {
751 min = stats->min;
752 max = stats->max;
753 avg = stats->sigma / samples;
754 }
755 return scnprintf(buf, PAGE_SIZE, "%d %d %d\n", min, max, avg);
756}
757
758static inline ssize_t stats_store(struct stats *stats, const char *buf,
759 size_t size)
760{
761 stats_init(stats);
762 return size;
763}
764
765#endif /* #ifndef __LINUX__UWB_H__ */
diff --git a/include/linux/uwb/debug-cmd.h b/include/linux/uwb/debug-cmd.h
new file mode 100644
index 000000000000..1141f41bab5c
--- /dev/null
+++ b/include/linux/uwb/debug-cmd.h
@@ -0,0 +1,57 @@
1/*
2 * Ultra Wide Band
3 * Debug interface commands
4 *
5 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __LINUX__UWB__DEBUG_CMD_H__
20#define __LINUX__UWB__DEBUG_CMD_H__
21
22#include <linux/types.h>
23
24/*
25 * Debug interface commands
26 *
27 * UWB_DBG_CMD_RSV_ESTABLISH: Establish a new unicast reservation.
28 *
29 * UWB_DBG_CMD_RSV_TERMINATE: Terminate the Nth reservation.
30 */
31
32enum uwb_dbg_cmd_type {
33 UWB_DBG_CMD_RSV_ESTABLISH = 1,
34 UWB_DBG_CMD_RSV_TERMINATE = 2,
35};
36
37struct uwb_dbg_cmd_rsv_establish {
38 __u8 target[6];
39 __u8 type;
40 __u16 max_mas;
41 __u16 min_mas;
42 __u8 sparsity;
43};
44
45struct uwb_dbg_cmd_rsv_terminate {
46 int index;
47};
48
49struct uwb_dbg_cmd {
50 __u32 type;
51 union {
52 struct uwb_dbg_cmd_rsv_establish rsv_establish;
53 struct uwb_dbg_cmd_rsv_terminate rsv_terminate;
54 };
55};
56
57#endif /* #ifndef __LINUX__UWB__DEBUG_CMD_H__ */
diff --git a/include/linux/uwb/debug.h b/include/linux/uwb/debug.h
new file mode 100644
index 000000000000..a86a73fe303f
--- /dev/null
+++ b/include/linux/uwb/debug.h
@@ -0,0 +1,82 @@
1/*
2 * Ultra Wide Band
3 * Debug Support
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * FIXME: doc
24 * Invoke like:
25 *
26 * #define D_LOCAL 4
27 * #include <linux/uwb/debug.h>
28 *
29 * At the end of your include files.
30 */
31#include <linux/types.h>
32
33struct device;
34extern void dump_bytes(struct device *dev, const void *_buf, size_t rsize);
35
36/* Master debug switch; !0 enables, 0 disables */
37#define D_MASTER (!0)
38
39/* Local (per-file) debug switch; #define before #including */
40#ifndef D_LOCAL
41#define D_LOCAL 0
42#endif
43
44#undef __d_printf
45#undef d_fnstart
46#undef d_fnend
47#undef d_printf
48#undef d_dump
49
50#define __d_printf(l, _tag, _dev, f, a...) \
51do { \
52 struct device *__dev = (_dev); \
53 if (D_MASTER && D_LOCAL >= (l)) { \
54 char __head[64] = ""; \
55 if (_dev != NULL) { \
56 if ((unsigned long)__dev < 4096) \
57 printk(KERN_ERR "E: Corrupt dev %p\n", \
58 __dev); \
59 else \
60 snprintf(__head, sizeof(__head), \
61 "%s %s: ", \
62 dev_driver_string(__dev), \
63 __dev->bus_id); \
64 } \
65 printk(KERN_ERR "%s%s" _tag ": " f, __head, \
66 __func__, ## a); \
67 } \
68} while (0 && _dev)
69
70#define d_fnstart(l, _dev, f, a...) \
71 __d_printf(l, " FNSTART", _dev, f, ## a)
72#define d_fnend(l, _dev, f, a...) \
73 __d_printf(l, " FNEND", _dev, f, ## a)
74#define d_printf(l, _dev, f, a...) \
75 __d_printf(l, "", _dev, f, ## a)
76#define d_dump(l, _dev, ptr, size) \
77do { \
78 struct device *__dev = _dev; \
79 if (D_MASTER && D_LOCAL >= (l)) \
80 dump_bytes(__dev, ptr, size); \
81} while (0 && _dev)
82#define d_test(l) (D_MASTER && D_LOCAL >= (l))
diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h
new file mode 100644
index 000000000000..198c15f8e251
--- /dev/null
+++ b/include/linux/uwb/spec.h
@@ -0,0 +1,727 @@
1/*
2 * Ultra Wide Band
3 * UWB Standard definitions
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 *
23 * All these definitions are based on the ECMA-368 standard.
24 *
25 * Note all definitions are Little Endian in the wire, and we will
26 * convert them to host order before operating on the bitfields (that
27 * yes, we use extensively).
28 */
29
30#ifndef __LINUX__UWB_SPEC_H__
31#define __LINUX__UWB_SPEC_H__
32
33#include <linux/types.h>
34#include <linux/bitmap.h>
35
36#define i1480_FW 0x00000303
37/* #define i1480_FW 0x00000302 */
38
39/**
40 * Number of Medium Access Slots in a superframe.
41 *
42 * UWB divides time in SuperFrames, each one divided in 256 pieces, or
43 * Medium Access Slots. See MBOA MAC[5.4.5] for details. The MAS is the
44 * basic bandwidth allocation unit in UWB.
45 */
46enum { UWB_NUM_MAS = 256 };
47
48/**
49 * Number of Zones in superframe.
50 *
51 * UWB divides the superframe into zones with numbering starting from BPST.
52 * See MBOA MAC[16.8.6]
53 */
54enum { UWB_NUM_ZONES = 16 };
55
56/*
57 * Number of MAS in a zone.
58 */
59#define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES)
60
61/*
62 * Number of streams per DRP reservation between a pair of devices.
63 *
64 * [ECMA-368] section 16.8.6.
65 */
66enum { UWB_NUM_STREAMS = 8 };
67
68/*
69 * mMasLength
70 *
71 * The length of a MAS in microseconds.
72 *
73 * [ECMA-368] section 17.16.
74 */
75enum { UWB_MAS_LENGTH_US = 256 };
76
77/*
78 * mBeaconSlotLength
79 *
80 * The length of the beacon slot in microseconds.
81 *
82 * [ECMA-368] section 17.16
83 */
84enum { UWB_BEACON_SLOT_LENGTH_US = 85 };
85
86/*
87 * mMaxLostBeacons
88 *
89 * The number beacons missing in consecutive superframes before a
90 * device can be considered as unreachable.
91 *
92 * [ECMA-368] section 17.16
93 */
94enum { UWB_MAX_LOST_BEACONS = 3 };
95
96/*
97 * Length of a superframe in microseconds.
98 */
99#define UWB_SUPERFRAME_LENGTH_US (UWB_MAS_LENGTH_US * UWB_NUM_MAS)
100
101/**
102 * UWB MAC address
103 *
104 * It is *imperative* that this struct is exactly 6 packed bytes (as
105 * it is also used to define headers sent down and up the wire/radio).
106 */
107struct uwb_mac_addr {
108 u8 data[6];
109} __attribute__((packed));
110
111
112/**
113 * UWB device address
114 *
115 * It is *imperative* that this struct is exactly 6 packed bytes (as
116 * it is also used to define headers sent down and up the wire/radio).
117 */
118struct uwb_dev_addr {
119 u8 data[2];
120} __attribute__((packed));
121
122
123/**
124 * Types of UWB addresses
125 *
126 * Order matters (by size).
127 */
128enum uwb_addr_type {
129 UWB_ADDR_DEV = 0,
130 UWB_ADDR_MAC = 1,
131};
132
133
134/** Size of a char buffer for printing a MAC/device address */
135enum { UWB_ADDR_STRSIZE = 32 };
136
137
138/** UWB WiMedia protocol IDs. */
139enum uwb_prid {
140 UWB_PRID_WLP_RESERVED = 0x0000,
141 UWB_PRID_WLP = 0x0001,
142 UWB_PRID_WUSB_BOT = 0x0010,
143 UWB_PRID_WUSB = 0x0010,
144 UWB_PRID_WUSB_TOP = 0x001F,
145};
146
147
148/** PHY Rate (MBOA MAC[7.8.12, Table 61]) */
149enum uwb_phy_rate {
150 UWB_PHY_RATE_53 = 0,
151 UWB_PHY_RATE_80,
152 UWB_PHY_RATE_106,
153 UWB_PHY_RATE_160,
154 UWB_PHY_RATE_200,
155 UWB_PHY_RATE_320,
156 UWB_PHY_RATE_400,
157 UWB_PHY_RATE_480,
158 UWB_PHY_RATE_INVALID
159};
160
161
162/**
163 * Different ways to scan (MBOA MAC[6.2.2, Table 8], WUSB[Table 8-78])
164 */
165enum uwb_scan_type {
166 UWB_SCAN_ONLY = 0,
167 UWB_SCAN_OUTSIDE_BP,
168 UWB_SCAN_WHILE_INACTIVE,
169 UWB_SCAN_DISABLED,
170 UWB_SCAN_ONLY_STARTTIME,
171 UWB_SCAN_TOP
172};
173
174
175/** ACK Policy types (MBOA MAC[7.2.1.3]) */
176enum uwb_ack_pol {
177 UWB_ACK_NO = 0,
178 UWB_ACK_INM = 1,
179 UWB_ACK_B = 2,
180 UWB_ACK_B_REQ = 3,
181};
182
183
184/** DRP reservation types ([ECMA-368 table 106) */
185enum uwb_drp_type {
186 UWB_DRP_TYPE_ALIEN_BP = 0,
187 UWB_DRP_TYPE_HARD,
188 UWB_DRP_TYPE_SOFT,
189 UWB_DRP_TYPE_PRIVATE,
190 UWB_DRP_TYPE_PCA,
191};
192
193
194/** DRP Reason Codes ([ECMA-368] table 107) */
195enum uwb_drp_reason {
196 UWB_DRP_REASON_ACCEPTED = 0,
197 UWB_DRP_REASON_CONFLICT,
198 UWB_DRP_REASON_PENDING,
199 UWB_DRP_REASON_DENIED,
200 UWB_DRP_REASON_MODIFIED,
201};
202
203/**
204 * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9])
205 */
206enum uwb_drp_notif_reason {
207 UWB_DRP_NOTIF_DRP_IE_RCVD = 0,
208 UWB_DRP_NOTIF_CONFLICT,
209 UWB_DRP_NOTIF_TERMINATE,
210};
211
212
213/** Allocation of MAS slots in a DRP request MBOA MAC[7.8.7] */
214struct uwb_drp_alloc {
215 __le16 zone_bm;
216 __le16 mas_bm;
217} __attribute__((packed));
218
219
220/** General MAC Header format (ECMA-368[16.2]) */
221struct uwb_mac_frame_hdr {
222 __le16 Frame_Control;
223 struct uwb_dev_addr DestAddr;
224 struct uwb_dev_addr SrcAddr;
225 __le16 Sequence_Control;
226 __le16 Access_Information;
227} __attribute__((packed));
228
229
230/**
231 * uwb_beacon_frame - a beacon frame including MAC headers
232 *
233 * [ECMA] section 16.3.
234 */
235struct uwb_beacon_frame {
236 struct uwb_mac_frame_hdr hdr;
237 struct uwb_mac_addr Device_Identifier; /* may be a NULL EUI-48 */
238 u8 Beacon_Slot_Number;
239 u8 Device_Control;
240 u8 IEData[];
241} __attribute__((packed));
242
243
244/** Information Element codes (MBOA MAC[T54]) */
245enum uwb_ie {
246 UWB_PCA_AVAILABILITY = 2,
247 UWB_IE_DRP_AVAILABILITY = 8,
248 UWB_IE_DRP = 9,
249 UWB_BP_SWITCH_IE = 11,
250 UWB_MAC_CAPABILITIES_IE = 12,
251 UWB_PHY_CAPABILITIES_IE = 13,
252 UWB_APP_SPEC_PROBE_IE = 15,
253 UWB_IDENTIFICATION_IE = 19,
254 UWB_MASTER_KEY_ID_IE = 20,
255 UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */
256 UWB_APP_SPEC_IE = 255,
257};
258
259
260/**
261 * Header common to all Information Elements (IEs)
262 */
263struct uwb_ie_hdr {
264 u8 element_id; /* enum uwb_ie */
265 u8 length;
266} __attribute__((packed));
267
268
269/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.6]) */
270struct uwb_ie_drp {
271 struct uwb_ie_hdr hdr;
272 __le16 drp_control;
273 struct uwb_dev_addr dev_addr;
274 struct uwb_drp_alloc allocs[];
275} __attribute__((packed));
276
277static inline int uwb_ie_drp_type(struct uwb_ie_drp *ie)
278{
279 return (le16_to_cpu(ie->drp_control) >> 0) & 0x7;
280}
281
282static inline int uwb_ie_drp_stream_index(struct uwb_ie_drp *ie)
283{
284 return (le16_to_cpu(ie->drp_control) >> 3) & 0x7;
285}
286
287static inline int uwb_ie_drp_reason_code(struct uwb_ie_drp *ie)
288{
289 return (le16_to_cpu(ie->drp_control) >> 6) & 0x7;
290}
291
292static inline int uwb_ie_drp_status(struct uwb_ie_drp *ie)
293{
294 return (le16_to_cpu(ie->drp_control) >> 9) & 0x1;
295}
296
297static inline int uwb_ie_drp_owner(struct uwb_ie_drp *ie)
298{
299 return (le16_to_cpu(ie->drp_control) >> 10) & 0x1;
300}
301
302static inline int uwb_ie_drp_tiebreaker(struct uwb_ie_drp *ie)
303{
304 return (le16_to_cpu(ie->drp_control) >> 11) & 0x1;
305}
306
307static inline int uwb_ie_drp_unsafe(struct uwb_ie_drp *ie)
308{
309 return (le16_to_cpu(ie->drp_control) >> 12) & 0x1;
310}
311
312static inline void uwb_ie_drp_set_type(struct uwb_ie_drp *ie, enum uwb_drp_type type)
313{
314 u16 drp_control = le16_to_cpu(ie->drp_control);
315 drp_control = (drp_control & ~(0x7 << 0)) | (type << 0);
316 ie->drp_control = cpu_to_le16(drp_control);
317}
318
319static inline void uwb_ie_drp_set_stream_index(struct uwb_ie_drp *ie, int stream_index)
320{
321 u16 drp_control = le16_to_cpu(ie->drp_control);
322 drp_control = (drp_control & ~(0x7 << 3)) | (stream_index << 3);
323 ie->drp_control = cpu_to_le16(drp_control);
324}
325
326static inline void uwb_ie_drp_set_reason_code(struct uwb_ie_drp *ie,
327 enum uwb_drp_reason reason_code)
328{
329 u16 drp_control = le16_to_cpu(ie->drp_control);
330 drp_control = (ie->drp_control & ~(0x7 << 6)) | (reason_code << 6);
331 ie->drp_control = cpu_to_le16(drp_control);
332}
333
334static inline void uwb_ie_drp_set_status(struct uwb_ie_drp *ie, int status)
335{
336 u16 drp_control = le16_to_cpu(ie->drp_control);
337 drp_control = (drp_control & ~(0x1 << 9)) | (status << 9);
338 ie->drp_control = cpu_to_le16(drp_control);
339}
340
341static inline void uwb_ie_drp_set_owner(struct uwb_ie_drp *ie, int owner)
342{
343 u16 drp_control = le16_to_cpu(ie->drp_control);
344 drp_control = (drp_control & ~(0x1 << 10)) | (owner << 10);
345 ie->drp_control = cpu_to_le16(drp_control);
346}
347
348static inline void uwb_ie_drp_set_tiebreaker(struct uwb_ie_drp *ie, int tiebreaker)
349{
350 u16 drp_control = le16_to_cpu(ie->drp_control);
351 drp_control = (drp_control & ~(0x1 << 11)) | (tiebreaker << 11);
352 ie->drp_control = cpu_to_le16(drp_control);
353}
354
355static inline void uwb_ie_drp_set_unsafe(struct uwb_ie_drp *ie, int unsafe)
356{
357 u16 drp_control = le16_to_cpu(ie->drp_control);
358 drp_control = (drp_control & ~(0x1 << 12)) | (unsafe << 12);
359 ie->drp_control = cpu_to_le16(drp_control);
360}
361
362/** Dynamic Reservation Protocol IE (MBOA MAC[7.8.7]) */
363struct uwb_ie_drp_avail {
364 struct uwb_ie_hdr hdr;
365 DECLARE_BITMAP(bmp, UWB_NUM_MAS);
366} __attribute__((packed));
367
368/**
369 * The Vendor ID is set to an OUI that indicates the vendor of the device.
370 * ECMA-368 [16.8.10]
371 */
372struct uwb_vendor_id {
373 u8 data[3];
374} __attribute__((packed));
375
376/**
377 * The device type ID
378 * FIXME: clarify what this means
379 * ECMA-368 [16.8.10]
380 */
381struct uwb_device_type_id {
382 u8 data[3];
383} __attribute__((packed));
384
385
386/**
387 * UWB device information types
388 * ECMA-368 [16.8.10]
389 */
390enum uwb_dev_info_type {
391 UWB_DEV_INFO_VENDOR_ID = 0,
392 UWB_DEV_INFO_VENDOR_TYPE,
393 UWB_DEV_INFO_NAME,
394};
395
396/**
397 * UWB device information found in Identification IE
398 * ECMA-368 [16.8.10]
399 */
400struct uwb_dev_info {
401 u8 type; /* enum uwb_dev_info_type */
402 u8 length;
403 u8 data[];
404} __attribute__((packed));
405
406/**
407 * UWB Identification IE
408 * ECMA-368 [16.8.10]
409 */
410struct uwb_identification_ie {
411 struct uwb_ie_hdr hdr;
412 struct uwb_dev_info info[];
413} __attribute__((packed));
414
415/*
416 * UWB Radio Controller
417 *
418 * These definitions are common to the Radio Control layers as
419 * exported by the WUSB1.0 HWA and WHCI interfaces.
420 */
421
422/** Radio Control Command Block (WUSB1.0[Table 8-65] and WHCI 0.95) */
423struct uwb_rccb {
424 u8 bCommandType; /* enum hwa_cet */
425 __le16 wCommand; /* Command code */
426 u8 bCommandContext; /* Context ID */
427} __attribute__((packed));
428
429
430/** Radio Control Event Block (WUSB[table 8-66], WHCI 0.95) */
431struct uwb_rceb {
432 u8 bEventType; /* enum hwa_cet */
433 __le16 wEvent; /* Event code */
434 u8 bEventContext; /* Context ID */
435} __attribute__((packed));
436
437
438enum {
439 UWB_RC_CET_GENERAL = 0, /* General Command/Event type */
440 UWB_RC_CET_EX_TYPE_1 = 1, /* Extended Type 1 Command/Event type */
441};
442
443/* Commands to the radio controller */
444enum uwb_rc_cmd {
445 UWB_RC_CMD_CHANNEL_CHANGE = 16,
446 UWB_RC_CMD_DEV_ADDR_MGMT = 17, /* Device Address Management */
447 UWB_RC_CMD_GET_IE = 18, /* GET Information Elements */
448 UWB_RC_CMD_RESET = 19,
449 UWB_RC_CMD_SCAN = 20, /* Scan management */
450 UWB_RC_CMD_SET_BEACON_FILTER = 21,
451 UWB_RC_CMD_SET_DRP_IE = 22, /* Dynamic Reservation Protocol IEs */
452 UWB_RC_CMD_SET_IE = 23, /* Information Element management */
453 UWB_RC_CMD_SET_NOTIFICATION_FILTER = 24,
454 UWB_RC_CMD_SET_TX_POWER = 25,
455 UWB_RC_CMD_SLEEP = 26,
456 UWB_RC_CMD_START_BEACON = 27,
457 UWB_RC_CMD_STOP_BEACON = 28,
458 UWB_RC_CMD_BP_MERGE = 29,
459 UWB_RC_CMD_SEND_COMMAND_FRAME = 30,
460 UWB_RC_CMD_SET_ASIE_NOTIF = 31,
461};
462
463/* Notifications from the radio controller */
464enum uwb_rc_evt {
465 UWB_RC_EVT_IE_RCV = 0,
466 UWB_RC_EVT_BEACON = 1,
467 UWB_RC_EVT_BEACON_SIZE = 2,
468 UWB_RC_EVT_BPOIE_CHANGE = 3,
469 UWB_RC_EVT_BP_SLOT_CHANGE = 4,
470 UWB_RC_EVT_BP_SWITCH_IE_RCV = 5,
471 UWB_RC_EVT_DEV_ADDR_CONFLICT = 6,
472 UWB_RC_EVT_DRP_AVAIL = 7,
473 UWB_RC_EVT_DRP = 8,
474 UWB_RC_EVT_BP_SWITCH_STATUS = 9,
475 UWB_RC_EVT_CMD_FRAME_RCV = 10,
476 UWB_RC_EVT_CHANNEL_CHANGE_IE_RCV = 11,
477 /* Events (command responses) use the same code as the command */
478 UWB_RC_EVT_UNKNOWN_CMD_RCV = 65535,
479};
480
481enum uwb_rc_extended_type_1_cmd {
482 UWB_RC_SET_DAA_ENERGY_MASK = 32,
483 UWB_RC_SET_NOTIFICATION_FILTER_EX = 33,
484};
485
486enum uwb_rc_extended_type_1_evt {
487 UWB_RC_DAA_ENERGY_DETECTED = 0,
488};
489
490/* Radio Control Result Code. [WHCI] table 3-3. */
491enum {
492 UWB_RC_RES_SUCCESS = 0,
493 UWB_RC_RES_FAIL,
494 UWB_RC_RES_FAIL_HARDWARE,
495 UWB_RC_RES_FAIL_NO_SLOTS,
496 UWB_RC_RES_FAIL_BEACON_TOO_LARGE,
497 UWB_RC_RES_FAIL_INVALID_PARAMETER,
498 UWB_RC_RES_FAIL_UNSUPPORTED_PWR_LEVEL,
499 UWB_RC_RES_FAIL_INVALID_IE_DATA,
500 UWB_RC_RES_FAIL_BEACON_SIZE_EXCEEDED,
501 UWB_RC_RES_FAIL_CANCELLED,
502 UWB_RC_RES_FAIL_INVALID_STATE,
503 UWB_RC_RES_FAIL_INVALID_SIZE,
504 UWB_RC_RES_FAIL_ACK_NOT_RECEIVED,
505 UWB_RC_RES_FAIL_NO_MORE_ASIE_NOTIF,
506 UWB_RC_RES_FAIL_TIME_OUT = 255,
507};
508
509/* Confirm event. [WHCI] section 3.1.3.1 etc. */
510struct uwb_rc_evt_confirm {
511 struct uwb_rceb rceb;
512 u8 bResultCode;
513} __attribute__((packed));
514
515/* Device Address Management event. [WHCI] section 3.1.3.2. */
516struct uwb_rc_evt_dev_addr_mgmt {
517 struct uwb_rceb rceb;
518 u8 baAddr[6];
519 u8 bResultCode;
520} __attribute__((packed));
521
522
523/* Get IE Event. [WHCI] section 3.1.3.3. */
524struct uwb_rc_evt_get_ie {
525 struct uwb_rceb rceb;
526 __le16 wIELength;
527 u8 IEData[];
528} __attribute__((packed));
529
530/* Set DRP IE Event. [WHCI] section 3.1.3.7. */
531struct uwb_rc_evt_set_drp_ie {
532 struct uwb_rceb rceb;
533 __le16 wRemainingSpace;
534 u8 bResultCode;
535} __attribute__((packed));
536
537/* Set IE Event. [WHCI] section 3.1.3.8. */
538struct uwb_rc_evt_set_ie {
539 struct uwb_rceb rceb;
540 __le16 RemainingSpace;
541 u8 bResultCode;
542} __attribute__((packed));
543
544/* Scan command. [WHCI] 3.1.3.5. */
545struct uwb_rc_cmd_scan {
546 struct uwb_rccb rccb;
547 u8 bChannelNumber;
548 u8 bScanState;
549 __le16 wStartTime;
550} __attribute__((packed));
551
552/* Set DRP IE command. [WHCI] section 3.1.3.7. */
553struct uwb_rc_cmd_set_drp_ie {
554 struct uwb_rccb rccb;
555 __le16 wIELength;
556 struct uwb_ie_drp IEData[];
557} __attribute__((packed));
558
559/* Set IE command. [WHCI] section 3.1.3.8. */
560struct uwb_rc_cmd_set_ie {
561 struct uwb_rccb rccb;
562 __le16 wIELength;
563 u8 IEData[];
564} __attribute__((packed));
565
566/* Set DAA Energy Mask event. [WHCI 0.96] section 3.1.3.17. */
567struct uwb_rc_evt_set_daa_energy_mask {
568 struct uwb_rceb rceb;
569 __le16 wLength;
570 u8 result;
571} __attribute__((packed));
572
573/* Set Notification Filter Extended event. [WHCI 0.96] section 3.1.3.18. */
574struct uwb_rc_evt_set_notification_filter_ex {
575 struct uwb_rceb rceb;
576 __le16 wLength;
577 u8 result;
578} __attribute__((packed));
579
580/* IE Received notification. [WHCI] section 3.1.4.1. */
581struct uwb_rc_evt_ie_rcv {
582 struct uwb_rceb rceb;
583 struct uwb_dev_addr SrcAddr;
584 __le16 wIELength;
585 u8 IEData[];
586} __attribute__((packed));
587
588/* Type of the received beacon. [WHCI] section 3.1.4.2. */
589enum uwb_rc_beacon_type {
590 UWB_RC_BEACON_TYPE_SCAN = 0,
591 UWB_RC_BEACON_TYPE_NEIGHBOR,
592 UWB_RC_BEACON_TYPE_OL_ALIEN,
593 UWB_RC_BEACON_TYPE_NOL_ALIEN,
594};
595
596/* Beacon received notification. [WHCI] 3.1.4.2. */
597struct uwb_rc_evt_beacon {
598 struct uwb_rceb rceb;
599 u8 bChannelNumber;
600 u8 bBeaconType;
601 __le16 wBPSTOffset;
602 u8 bLQI;
603 u8 bRSSI;
604 __le16 wBeaconInfoLength;
605 u8 BeaconInfo[];
606} __attribute__((packed));
607
608
609/* Beacon Size Change notification. [WHCI] section 3.1.4.3 */
610struct uwb_rc_evt_beacon_size {
611 struct uwb_rceb rceb;
612 __le16 wNewBeaconSize;
613} __attribute__((packed));
614
615
616/* BPOIE Change notification. [WHCI] section 3.1.4.4. */
617struct uwb_rc_evt_bpoie_change {
618 struct uwb_rceb rceb;
619 __le16 wBPOIELength;
620 u8 BPOIE[];
621} __attribute__((packed));
622
623
624/* Beacon Slot Change notification. [WHCI] section 3.1.4.5. */
625struct uwb_rc_evt_bp_slot_change {
626 struct uwb_rceb rceb;
627 u8 slot_info;
628} __attribute__((packed));
629
630static inline int uwb_rc_evt_bp_slot_change_slot_num(
631 const struct uwb_rc_evt_bp_slot_change *evt)
632{
633 return evt->slot_info & 0x7f;
634}
635
636static inline int uwb_rc_evt_bp_slot_change_no_slot(
637 const struct uwb_rc_evt_bp_slot_change *evt)
638{
639 return (evt->slot_info & 0x80) >> 7;
640}
641
642/* BP Switch IE Received notification. [WHCI] section 3.1.4.6. */
643struct uwb_rc_evt_bp_switch_ie_rcv {
644 struct uwb_rceb rceb;
645 struct uwb_dev_addr wSrcAddr;
646 __le16 wIELength;
647 u8 IEData[];
648} __attribute__((packed));
649
650/* DevAddr Conflict notification. [WHCI] section 3.1.4.7. */
651struct uwb_rc_evt_dev_addr_conflict {
652 struct uwb_rceb rceb;
653} __attribute__((packed));
654
655/* DRP notification. [WHCI] section 3.1.4.9. */
656struct uwb_rc_evt_drp {
657 struct uwb_rceb rceb;
658 struct uwb_dev_addr src_addr;
659 u8 reason;
660 u8 beacon_slot_number;
661 __le16 ie_length;
662 u8 ie_data[];
663} __attribute__((packed));
664
665static inline enum uwb_drp_notif_reason uwb_rc_evt_drp_reason(struct uwb_rc_evt_drp *evt)
666{
667 return evt->reason & 0x0f;
668}
669
670
671/* DRP Availability Change notification. [WHCI] section 3.1.4.8. */
672struct uwb_rc_evt_drp_avail {
673 struct uwb_rceb rceb;
674 DECLARE_BITMAP(bmp, UWB_NUM_MAS);
675} __attribute__((packed));
676
677/* BP switch status notification. [WHCI] section 3.1.4.10. */
678struct uwb_rc_evt_bp_switch_status {
679 struct uwb_rceb rceb;
680 u8 status;
681 u8 slot_offset;
682 __le16 bpst_offset;
683 u8 move_countdown;
684} __attribute__((packed));
685
686/* Command Frame Received notification. [WHCI] section 3.1.4.11. */
687struct uwb_rc_evt_cmd_frame_rcv {
688 struct uwb_rceb rceb;
689 __le16 receive_time;
690 struct uwb_dev_addr wSrcAddr;
691 struct uwb_dev_addr wDstAddr;
692 __le16 control;
693 __le16 reserved;
694 __le16 dataLength;
695 u8 data[];
696} __attribute__((packed));
697
698/* Channel Change IE Received notification. [WHCI] section 3.1.4.12. */
699struct uwb_rc_evt_channel_change_ie_rcv {
700 struct uwb_rceb rceb;
701 struct uwb_dev_addr wSrcAddr;
702 __le16 wIELength;
703 u8 IEData[];
704} __attribute__((packed));
705
706/* DAA Energy Detected notification. [WHCI 0.96] section 3.1.4.14. */
707struct uwb_rc_evt_daa_energy_detected {
708 struct uwb_rceb rceb;
709 __le16 wLength;
710 u8 bandID;
711 u8 reserved;
712 u8 toneBmp[16];
713} __attribute__((packed));
714
715
716/**
717 * Radio Control Interface Class Descriptor
718 *
719 * WUSB 1.0 [8.6.1.2]
720 */
721struct uwb_rc_control_intf_class_desc {
722 u8 bLength;
723 u8 bDescriptorType;
724 __le16 bcdRCIVersion;
725} __attribute__((packed));
726
727#endif /* #ifndef __LINUX__UWB_SPEC_H__ */
diff --git a/include/linux/uwb/umc.h b/include/linux/uwb/umc.h
new file mode 100644
index 000000000000..36a39e34f8d7
--- /dev/null
+++ b/include/linux/uwb/umc.h
@@ -0,0 +1,194 @@
1/*
2 * UWB Multi-interface Controller support.
3 *
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5 *
6 * This file is released under the GPLv2
7 *
8 * UMC (UWB Multi-interface Controller) capabilities (e.g., radio
9 * controller, host controller) are presented as devices on the "umc"
10 * bus.
11 *
12 * The radio controller is not strictly a UMC capability but it's
13 * useful to present it as such.
14 *
15 * References:
16 *
17 * [WHCI] Wireless Host Controller Interface Specification for
18 * Certified Wireless Universal Serial Bus, revision 0.95.
19 *
20 * How this works is kind of convoluted but simple. The whci.ko driver
21 * loads when WHCI devices are detected. These WHCI devices expose
22 * many devices in the same PCI function (they couldn't have reused
23 * functions, no), so for each PCI function that exposes these many
24 * devices, whci ceates a umc_dev [whci_probe() -> whci_add_cap()]
25 * with umc_device_create() and adds it to the bus with
26 * umc_device_register().
27 *
28 * umc_device_register() calls device_register() which will push the
29 * bus management code to load your UMC driver's somehting_probe()
30 * that you have registered for that capability code.
31 *
32 * Now when the WHCI device is removed, whci_remove() will go over
33 * each umc_dev assigned to each of the PCI function's capabilities
34 * and through whci_del_cap() call umc_device_unregister() each
35 * created umc_dev. Of course, if you are bound to the device, your
36 * driver's something_remove() will be called.
37 */
38
39#ifndef _LINUX_UWB_UMC_H_
40#define _LINUX_UWB_UMC_H_
41
42#include <linux/device.h>
43#include <linux/pci.h>
44
45/*
46 * UMC capability IDs.
47 *
48 * 0x00 is reserved so use it for the radio controller device.
49 *
50 * [WHCI] table 2-8
51 */
52#define UMC_CAP_ID_WHCI_RC 0x00 /* radio controller */
53#define UMC_CAP_ID_WHCI_WUSB_HC 0x01 /* WUSB host controller */
54
55/**
56 * struct umc_dev - UMC capability device
57 *
58 * @version: version of the specification this capability conforms to.
59 * @cap_id: capability ID.
60 * @bar: PCI Bar (64 bit) where the resource lies
61 * @resource: register space resource.
62 * @irq: interrupt line.
63 */
64struct umc_dev {
65 u16 version;
66 u8 cap_id;
67 u8 bar;
68 struct resource resource;
69 unsigned irq;
70 struct device dev;
71};
72
73#define to_umc_dev(d) container_of(d, struct umc_dev, dev)
74
75/**
76 * struct umc_driver - UMC capability driver
77 * @cap_id: supported capability ID.
78 * @match: driver specific capability matching function.
79 * @match_data: driver specific data for match() (e.g., a
80 * table of pci_device_id's if umc_match_pci_id() is used).
81 */
82struct umc_driver {
83 char *name;
84 u8 cap_id;
85 int (*match)(struct umc_driver *, struct umc_dev *);
86 const void *match_data;
87
88 int (*probe)(struct umc_dev *);
89 void (*remove)(struct umc_dev *);
90 int (*suspend)(struct umc_dev *, pm_message_t state);
91 int (*resume)(struct umc_dev *);
92
93 struct device_driver driver;
94};
95
96#define to_umc_driver(d) container_of(d, struct umc_driver, driver)
97
98extern struct bus_type umc_bus_type;
99
100struct umc_dev *umc_device_create(struct device *parent, int n);
101int __must_check umc_device_register(struct umc_dev *umc);
102void umc_device_unregister(struct umc_dev *umc);
103
104int __must_check __umc_driver_register(struct umc_driver *umc_drv,
105 struct module *mod,
106 const char *mod_name);
107
108/**
109 * umc_driver_register - register a UMC capabiltity driver.
110 * @umc_drv: pointer to the driver.
111 */
112static inline int __must_check umc_driver_register(struct umc_driver *umc_drv)
113{
114 return __umc_driver_register(umc_drv, THIS_MODULE, KBUILD_MODNAME);
115}
116void umc_driver_unregister(struct umc_driver *umc_drv);
117
118/*
119 * Utility function you can use to match (umc_driver->match) against a
120 * null-terminated array of 'struct pci_device_id' in
121 * umc_driver->match_data.
122 */
123int umc_match_pci_id(struct umc_driver *umc_drv, struct umc_dev *umc);
124
125/**
126 * umc_parent_pci_dev - return the UMC's parent PCI device or NULL if none
127 * @umc_dev: UMC device whose parent PCI device we are looking for
128 *
129 * DIRTY!!! DON'T RELY ON THIS
130 *
131 * FIXME: This is as dirty as it gets, but we need some way to check
132 * the correct type of umc_dev->parent (so that for example, we can
133 * cast to pci_dev). Casting to pci_dev is necesary because at some
134 * point we need to request resources from the device. Mapping is
135 * easily over come (ioremap and stuff are bus agnostic), but hooking
136 * up to some error handlers (such as pci error handlers) might need
137 * this.
138 *
139 * THIS might (probably will) be removed in the future, so don't count
140 * on it.
141 */
142static inline struct pci_dev *umc_parent_pci_dev(struct umc_dev *umc_dev)
143{
144 struct pci_dev *pci_dev = NULL;
145 if (umc_dev->dev.parent->bus == &pci_bus_type)
146 pci_dev = to_pci_dev(umc_dev->dev.parent);
147 return pci_dev;
148}
149
150/**
151 * umc_dev_get() - reference a UMC device.
152 * @umc_dev: Pointer to UMC device.
153 *
154 * NOTE: we are assuming in this whole scheme that the parent device
155 * is referenced at _probe() time and unreferenced at _remove()
156 * time by the parent's subsystem.
157 */
158static inline struct umc_dev *umc_dev_get(struct umc_dev *umc_dev)
159{
160 get_device(&umc_dev->dev);
161 return umc_dev;
162}
163
164/**
165 * umc_dev_put() - unreference a UMC device.
166 * @umc_dev: Pointer to UMC device.
167 */
168static inline void umc_dev_put(struct umc_dev *umc_dev)
169{
170 put_device(&umc_dev->dev);
171}
172
173/**
174 * umc_set_drvdata - set UMC device's driver data.
175 * @umc_dev: Pointer to UMC device.
176 * @data: Data to set.
177 */
178static inline void umc_set_drvdata(struct umc_dev *umc_dev, void *data)
179{
180 dev_set_drvdata(&umc_dev->dev, data);
181}
182
183/**
184 * umc_get_drvdata - recover UMC device's driver data.
185 * @umc_dev: Pointer to UMC device.
186 */
187static inline void *umc_get_drvdata(struct umc_dev *umc_dev)
188{
189 return dev_get_drvdata(&umc_dev->dev);
190}
191
192int umc_controller_reset(struct umc_dev *umc);
193
194#endif /* #ifndef _LINUX_UWB_UMC_H_ */
diff --git a/include/linux/uwb/whci.h b/include/linux/uwb/whci.h
new file mode 100644
index 000000000000..915ec23042d4
--- /dev/null
+++ b/include/linux/uwb/whci.h
@@ -0,0 +1,117 @@
1/*
2 * Wireless Host Controller Interface for Ultra-Wide-Band and Wireless USB
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 *
23 * References:
24 * [WHCI] Wireless Host Controller Interface Specification for
25 * Certified Wireless Universal Serial Bus, revision 0.95.
26 */
27#ifndef _LINUX_UWB_WHCI_H_
28#define _LINUX_UWB_WHCI_H_
29
30#include <linux/pci.h>
31
32/*
33 * UWB interface capability registers (offsets from UWBBASE)
34 *
35 * [WHCI] section 2.2
36 */
37#define UWBCAPINFO 0x00 /* == UWBCAPDATA(0) */
38# define UWBCAPINFO_TO_N_CAPS(c) (((c) >> 0) & 0xFull)
39#define UWBCAPDATA(n) (8*(n))
40# define UWBCAPDATA_TO_VERSION(c) (((c) >> 32) & 0xFFFFull)
41# define UWBCAPDATA_TO_OFFSET(c) (((c) >> 18) & 0x3FFFull)
42# define UWBCAPDATA_TO_BAR(c) (((c) >> 16) & 0x3ull)
43# define UWBCAPDATA_TO_SIZE(c) ((((c) >> 8) & 0xFFull) * sizeof(u32))
44# define UWBCAPDATA_TO_CAP_ID(c) (((c) >> 0) & 0xFFull)
45
46/* Size of the WHCI capability data (including the RC capability) for
47 a device with n capabilities. */
48#define UWBCAPDATA_SIZE(n) (8 + 8*(n))
49
50
51/*
52 * URC registers (offsets from URCBASE)
53 *
54 * [WHCI] section 2.3
55 */
56#define URCCMD 0x00
57# define URCCMD_RESET (1 << 31) /* UMC Hardware reset */
58# define URCCMD_RS (1 << 30) /* Run/Stop */
59# define URCCMD_EARV (1 << 29) /* Event Address Register Valid */
60# define URCCMD_ACTIVE (1 << 15) /* Command is active */
61# define URCCMD_IWR (1 << 14) /* Interrupt When Ready */
62# define URCCMD_SIZE_MASK 0x00000fff /* Command size mask */
63#define URCSTS 0x04
64# define URCSTS_EPS (1 << 17) /* Event Processing Status */
65# define URCSTS_HALTED (1 << 16) /* RC halted */
66# define URCSTS_HSE (1 << 10) /* Host System Error...fried */
67# define URCSTS_ER (1 << 9) /* Event Ready */
68# define URCSTS_RCI (1 << 8) /* Ready for Command Interrupt */
69# define URCSTS_INT_MASK 0x00000700 /* URC interrupt sources */
70# define URCSTS_ISI 0x000000ff /* Interrupt Source Identification */
71#define URCINTR 0x08
72# define URCINTR_EN_ALL 0x000007ff /* Enable all interrupt sources */
73#define URCCMDADDR 0x10
74#define URCEVTADDR 0x18
75# define URCEVTADDR_OFFSET_MASK 0xfff /* Event pointer offset mask */
76
77
78/** Write 32 bit @value to little endian register at @addr */
79static inline
80void le_writel(u32 value, void __iomem *addr)
81{
82 iowrite32(value, addr);
83}
84
85
86/** Read from 32 bit little endian register at @addr */
87static inline
88u32 le_readl(void __iomem *addr)
89{
90 return ioread32(addr);
91}
92
93
94/** Write 64 bit @value to little endian register at @addr */
95static inline
96void le_writeq(u64 value, void __iomem *addr)
97{
98 iowrite32(value, addr);
99 iowrite32(value >> 32, addr + 4);
100}
101
102
103/** Read from 64 bit little endian register at @addr */
104static inline
105u64 le_readq(void __iomem *addr)
106{
107 u64 value;
108 value = ioread32(addr);
109 value |= (u64)ioread32(addr + 4) << 32;
110 return value;
111}
112
113extern int whci_wait_for(struct device *dev, u32 __iomem *reg,
114 u32 mask, u32 result,
115 unsigned long max_ms, const char *tag);
116
117#endif /* #ifndef _LINUX_UWB_WHCI_H_ */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index d4b03034ee73..1f126e30766c 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -293,6 +293,7 @@ struct v4l2_pix_format {
293#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */ 293#define V4L2_PIX_FMT_YVU420 v4l2_fourcc('Y', 'V', '1', '2') /* 12 YVU 4:2:0 */
294#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */ 294#define V4L2_PIX_FMT_YUYV v4l2_fourcc('Y', 'U', 'Y', 'V') /* 16 YUV 4:2:2 */
295#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */ 295#define V4L2_PIX_FMT_UYVY v4l2_fourcc('U', 'Y', 'V', 'Y') /* 16 YUV 4:2:2 */
296#define V4L2_PIX_FMT_VYUY v4l2_fourcc('V', 'Y', 'U', 'Y') /* 16 YUV 4:2:2 */
296#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */ 297#define V4L2_PIX_FMT_YUV422P v4l2_fourcc('4', '2', '2', 'P') /* 16 YVU422 planar */
297#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */ 298#define V4L2_PIX_FMT_YUV411P v4l2_fourcc('4', '1', '1', 'P') /* 16 YVU411 planar */
298#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */ 299#define V4L2_PIX_FMT_Y41P v4l2_fourcc('Y', '4', '1', 'P') /* 12 YUV 4:1:1 */
@@ -304,6 +305,8 @@ struct v4l2_pix_format {
304/* two planes -- one Y, one Cr + Cb interleaved */ 305/* two planes -- one Y, one Cr + Cb interleaved */
305#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ 306#define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */
306#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ 307#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */
308#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */
309#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
307 310
308/* The following formats are not defined in the V4L2 specification */ 311/* The following formats are not defined in the V4L2 specification */
309#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ 312#define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */
@@ -315,6 +318,13 @@ struct v4l2_pix_format {
315/* see http://www.siliconimaging.com/RGB%20Bayer.htm */ 318/* see http://www.siliconimaging.com/RGB%20Bayer.htm */
316#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ 319#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
317#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ 320#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
321/*
322 * 10bit raw bayer, expanded to 16 bits
323 * xxxxrrrrrrrrrrxxxxgggggggggg xxxxggggggggggxxxxbbbbbbbbbb...
324 */
325#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0')
326/* 10bit raw bayer DPCM compressed to 8 bits */
327#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
318#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */ 328#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
319 329
320/* compressed formats */ 330/* compressed formats */
@@ -1043,7 +1053,7 @@ enum v4l2_mpeg_video_bitrate_mode {
1043#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210) 1053#define V4L2_CID_MPEG_VIDEO_MUTE (V4L2_CID_MPEG_BASE+210)
1044#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211) 1054#define V4L2_CID_MPEG_VIDEO_MUTE_YUV (V4L2_CID_MPEG_BASE+211)
1045 1055
1046/* MPEG-class control IDs specific to the CX2584x driver as defined by V4L2 */ 1056/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
1047#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000) 1057#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000)
1048#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0) 1058#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0)
1049enum v4l2_mpeg_cx2341x_video_spatial_filter_mode { 1059enum v4l2_mpeg_cx2341x_video_spatial_filter_mode {
@@ -1110,6 +1120,12 @@ enum v4l2_exposure_auto_type {
1110#define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11) 1120#define V4L2_CID_FOCUS_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+11)
1111#define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12) 1121#define V4L2_CID_FOCUS_AUTO (V4L2_CID_CAMERA_CLASS_BASE+12)
1112 1122
1123#define V4L2_CID_ZOOM_ABSOLUTE (V4L2_CID_CAMERA_CLASS_BASE+13)
1124#define V4L2_CID_ZOOM_RELATIVE (V4L2_CID_CAMERA_CLASS_BASE+14)
1125#define V4L2_CID_ZOOM_CONTINUOUS (V4L2_CID_CAMERA_CLASS_BASE+15)
1126
1127#define V4L2_CID_PRIVACY (V4L2_CID_CAMERA_CLASS_BASE+16)
1128
1113/* 1129/*
1114 * T U N I N G 1130 * T U N I N G
1115 */ 1131 */
@@ -1362,6 +1378,7 @@ struct v4l2_streamparm {
1362#define V4L2_CHIP_MATCH_HOST 0 /* Match against chip ID on host (0 for the host) */ 1378#define V4L2_CHIP_MATCH_HOST 0 /* Match against chip ID on host (0 for the host) */
1363#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver ID */ 1379#define V4L2_CHIP_MATCH_I2C_DRIVER 1 /* Match against I2C driver ID */
1364#define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */ 1380#define V4L2_CHIP_MATCH_I2C_ADDR 2 /* Match against I2C 7-bit address */
1381#define V4L2_CHIP_MATCH_AC97 3 /* Match against anciliary AC97 chip */
1365 1382
1366struct v4l2_register { 1383struct v4l2_register {
1367 __u32 match_type; /* Match type */ 1384 __u32 match_type; /* Match type */
@@ -1451,6 +1468,8 @@ struct v4l2_chip_ident {
1451#define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident) 1468#define VIDIOC_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_chip_ident)
1452#endif 1469#endif
1453#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) 1470#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
1471/* Reminder: when adding new ioctls please add support for them to
1472 drivers/media/video/v4l2-compat-ioctl32.c as well! */
1454 1473
1455#ifdef __OLD_VIDIOC_ 1474#ifdef __OLD_VIDIOC_
1456/* for compatibility, will go away some day */ 1475/* for compatibility, will go away some day */
diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h
index c30c7bfbf39b..8726ff77763e 100644
--- a/include/linux/virtio_balloon.h
+++ b/include/linux/virtio_balloon.h
@@ -10,6 +10,9 @@
10/* The feature bitmap for virtio balloon */ 10/* The feature bitmap for virtio balloon */
11#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */ 11#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
12 12
13/* Size of a PFN in the balloon interface. */
14#define VIRTIO_BALLOON_PFN_SHIFT 12
15
13struct virtio_balloon_config 16struct virtio_balloon_config
14{ 17{
15 /* Number of pages host wants Guest to give up. */ 18 /* Number of pages host wants Guest to give up. */
diff --git a/include/linux/virtio_console.h b/include/linux/virtio_console.h
index 19a0da0dba41..7615ffcdd555 100644
--- a/include/linux/virtio_console.h
+++ b/include/linux/virtio_console.h
@@ -7,6 +7,17 @@
7/* The ID for virtio console */ 7/* The ID for virtio console */
8#define VIRTIO_ID_CONSOLE 3 8#define VIRTIO_ID_CONSOLE 3
9 9
10/* Feature bits */
11#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
12
13struct virtio_console_config {
14 /* colums of the screens */
15 __u16 cols;
16 /* rows of the screens */
17 __u16 rows;
18} __attribute__((packed));
19
20
10#ifdef __KERNEL__ 21#ifdef __KERNEL__
11int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int)); 22int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int));
12#endif /* __KERNEL__ */ 23#endif /* __KERNEL__ */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 5e33761b9b8a..5cdd0aa8bde9 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -20,6 +20,7 @@
20#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */ 20#define VIRTIO_NET_F_HOST_TSO6 12 /* Host can handle TSOv6 in. */
21#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */ 21#define VIRTIO_NET_F_HOST_ECN 13 /* Host can handle TSO[6] w/ ECN in. */
22#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */ 22#define VIRTIO_NET_F_HOST_UFO 14 /* Host can handle UFO in. */
23#define VIRTIO_NET_F_MRG_RXBUF 15 /* Host can merge receive buffers. */
23 24
24struct virtio_net_config 25struct virtio_net_config
25{ 26{
@@ -44,4 +45,12 @@ struct virtio_net_hdr
44 __u16 csum_start; /* Position to start checksumming from */ 45 __u16 csum_start; /* Position to start checksumming from */
45 __u16 csum_offset; /* Offset after that to place checksum */ 46 __u16 csum_offset; /* Offset after that to place checksum */
46}; 47};
48
49/* This is the version of the header to use when the MRG_RXBUF
50 * feature has been negotiated. */
51struct virtio_net_hdr_mrg_rxbuf {
52 struct virtio_net_hdr hdr;
53 __u16 num_buffers; /* Number of merged rx buffers */
54};
55
47#endif /* _LINUX_VIRTIO_NET_H */ 56#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h
index cdef35742932..cd0fd5d181a6 100644
--- a/include/linux/virtio_pci.h
+++ b/include/linux/virtio_pci.h
@@ -53,4 +53,12 @@
53 53
54/* Virtio ABI version, this must match exactly */ 54/* Virtio ABI version, this must match exactly */
55#define VIRTIO_PCI_ABI_VERSION 0 55#define VIRTIO_PCI_ABI_VERSION 0
56
57/* How many bits to shift physical queue address written to QUEUE_PFN.
58 * 12 is historical, and due to x86 page size. */
59#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
60
61/* The alignment to use between consumer and producer parts of vring.
62 * x86 pagesize again. */
63#define VIRTIO_PCI_VRING_ALIGN 4096
56#endif 64#endif
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index c4a598fb3826..71e03722fb59 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -83,7 +83,7 @@ struct vring {
83 * __u16 avail_idx; 83 * __u16 avail_idx;
84 * __u16 available[num]; 84 * __u16 available[num];
85 * 85 *
86 * // Padding to the next page boundary. 86 * // Padding to the next align boundary.
87 * char pad[]; 87 * char pad[];
88 * 88 *
89 * // A ring of used descriptor heads with free-running index. 89 * // A ring of used descriptor heads with free-running index.
@@ -93,19 +93,19 @@ struct vring {
93 * }; 93 * };
94 */ 94 */
95static inline void vring_init(struct vring *vr, unsigned int num, void *p, 95static inline void vring_init(struct vring *vr, unsigned int num, void *p,
96 unsigned long pagesize) 96 unsigned long align)
97{ 97{
98 vr->num = num; 98 vr->num = num;
99 vr->desc = p; 99 vr->desc = p;
100 vr->avail = p + num*sizeof(struct vring_desc); 100 vr->avail = p + num*sizeof(struct vring_desc);
101 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + pagesize-1) 101 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1)
102 & ~(pagesize - 1)); 102 & ~(align - 1));
103} 103}
104 104
105static inline unsigned vring_size(unsigned int num, unsigned long pagesize) 105static inline unsigned vring_size(unsigned int num, unsigned long align)
106{ 106{
107 return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num) 107 return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num)
108 + pagesize - 1) & ~(pagesize - 1)) 108 + align - 1) & ~(align - 1))
109 + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num; 109 + sizeof(__u16) * 2 + sizeof(struct vring_used_elem) * num;
110} 110}
111 111
@@ -115,6 +115,7 @@ struct virtio_device;
115struct virtqueue; 115struct virtqueue;
116 116
117struct virtqueue *vring_new_virtqueue(unsigned int num, 117struct virtqueue *vring_new_virtqueue(unsigned int num,
118 unsigned int vring_align,
118 struct virtio_device *vdev, 119 struct virtio_device *vdev,
119 void *pages, 120 void *pages,
120 void (*notify)(struct virtqueue *vq), 121 void (*notify)(struct virtqueue *vq),
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 328eb4022727..307b88577eaa 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -2,6 +2,7 @@
2#define _LINUX_VMALLOC_H 2#define _LINUX_VMALLOC_H
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <linux/init.h>
5#include <asm/page.h> /* pgprot_t */ 6#include <asm/page.h> /* pgprot_t */
6 7
7struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 8struct vm_area_struct; /* vma defining user mapping in mm_types.h */
@@ -23,7 +24,6 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
23#endif 24#endif
24 25
25struct vm_struct { 26struct vm_struct {
26 /* keep next,addr,size together to speedup lookups */
27 struct vm_struct *next; 27 struct vm_struct *next;
28 void *addr; 28 void *addr;
29 unsigned long size; 29 unsigned long size;
@@ -37,6 +37,19 @@ struct vm_struct {
37/* 37/*
38 * Highlevel APIs for driver use 38 * Highlevel APIs for driver use
39 */ 39 */
40extern void vm_unmap_ram(const void *mem, unsigned int count);
41extern void *vm_map_ram(struct page **pages, unsigned int count,
42 int node, pgprot_t prot);
43extern void vm_unmap_aliases(void);
44
45#ifdef CONFIG_MMU
46extern void __init vmalloc_init(void);
47#else
48static inline void vmalloc_init(void)
49{
50}
51#endif
52
40extern void *vmalloc(unsigned long size); 53extern void *vmalloc(unsigned long size);
41extern void *vmalloc_user(unsigned long size); 54extern void *vmalloc_user(unsigned long size);
42extern void *vmalloc_node(unsigned long size, int node); 55extern void *vmalloc_node(unsigned long size, int node);
@@ -90,6 +103,4 @@ extern void free_vm_area(struct vm_struct *area);
90extern rwlock_t vmlist_lock; 103extern rwlock_t vmlist_lock;
91extern struct vm_struct *vmlist; 104extern struct vm_struct *vmlist;
92 105
93extern const struct seq_operations vmalloc_op;
94
95#endif /* _LINUX_VMALLOC_H */ 106#endif /* _LINUX_VMALLOC_H */
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 58334d439516..524cd1b28ecb 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -41,13 +41,19 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
41#ifdef CONFIG_HUGETLB_PAGE 41#ifdef CONFIG_HUGETLB_PAGE
42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, 42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
43#endif 43#endif
44#ifdef CONFIG_UNEVICTABLE_LRU
45 UNEVICTABLE_PGCULLED, /* culled to noreclaim list */
46 UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */
47 UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */
48 UNEVICTABLE_PGMLOCKED,
49 UNEVICTABLE_PGMUNLOCKED,
50 UNEVICTABLE_PGCLEARED, /* on COW, page truncate */
51 UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
52 UNEVICTABLE_MLOCKFREED,
53#endif
44 NR_VM_EVENT_ITEMS 54 NR_VM_EVENT_ITEMS
45}; 55};
46 56
47extern const struct seq_operations fragmentation_op;
48extern const struct seq_operations pagetypeinfo_op;
49extern const struct seq_operations zoneinfo_op;
50extern const struct seq_operations vmstat_op;
51extern int sysctl_stat_interval; 57extern int sysctl_stat_interval;
52 58
53#ifdef CONFIG_VM_EVENT_COUNTERS 59#ifdef CONFIG_VM_EVENT_COUNTERS
@@ -159,6 +165,16 @@ static inline unsigned long zone_page_state(struct zone *zone,
159 return x; 165 return x;
160} 166}
161 167
168extern unsigned long global_lru_pages(void);
169
170static inline unsigned long zone_lru_pages(struct zone *zone)
171{
172 return (zone_page_state(zone, NR_ACTIVE_ANON)
173 + zone_page_state(zone, NR_ACTIVE_FILE)
174 + zone_page_state(zone, NR_INACTIVE_ANON)
175 + zone_page_state(zone, NR_INACTIVE_FILE));
176}
177
162#ifdef CONFIG_NUMA 178#ifdef CONFIG_NUMA
163/* 179/*
164 * Determine the per node value of a stat item. This function 180 * Determine the per node value of a stat item. This function
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 0081147a9fe8..ef609f842fac 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -108,15 +108,6 @@ static inline int waitqueue_active(wait_queue_head_t *q)
108 return !list_empty(&q->task_list); 108 return !list_empty(&q->task_list);
109} 109}
110 110
111/*
112 * Used to distinguish between sync and async io wait context:
113 * sync i/o typically specifies a NULL wait queue entry or a wait
114 * queue entry bound to a task (current task) to wake up.
115 * aio specifies a wait queue entry with an async notification
116 * callback routine, not associated with any task.
117 */
118#define is_sync_wait(wait) (!(wait) || ((wait)->private))
119
120extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 111extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
121extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); 112extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
122extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); 113extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
diff --git a/include/linux/wlp.h b/include/linux/wlp.h
new file mode 100644
index 000000000000..033545e145c7
--- /dev/null
+++ b/include/linux/wlp.h
@@ -0,0 +1,735 @@
1/*
2 * WiMedia Logical Link Control Protocol (WLP)
3 *
4 * Copyright (C) 2005-2006 Intel Corporation
5 * Reinette Chatre <reinette.chatre@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * FIXME: docs
23 *
24 * - Does not (yet) include support for WLP control frames
25 * WLP Draft 0.99 [6.5].
26 *
27 * A visual representation of the data structures.
28 *
29 * wssidB wssidB
30 * ^ ^
31 * | |
32 * wssidA wssidA
33 * wlp interface { ^ ^
34 * ... | |
35 * ... ... wssid wssid ...
36 * wlp --- ... | |
37 * }; neighbors --> neighbA --> neighbB
38 * ...
39 * wss
40 * ...
41 * eda cache --> neighborA --> neighborB --> neighborC ...
42 */
43
44#ifndef __LINUX__WLP_H_
45#define __LINUX__WLP_H_
46
47#include <linux/netdevice.h>
48#include <linux/skbuff.h>
49#include <linux/list.h>
50#include <linux/uwb.h>
51
52/**
53 * WLP Protocol ID
54 * WLP Draft 0.99 [6.2]
55 *
56 * The MUX header for all WLP frames
57 */
58#define WLP_PROTOCOL_ID 0x0100
59
60/**
61 * WLP Version
62 * WLP version placed in the association frames (WLP 0.99 [6.6])
63 */
64#define WLP_VERSION 0x10
65
66/**
67 * Bytes needed to print UUID as string
68 */
69#define WLP_WSS_UUID_STRSIZE 48
70
71/**
72 * Bytes needed to print nonce as string
73 */
74#define WLP_WSS_NONCE_STRSIZE 48
75
76
77/**
78 * Size used for WLP name size
79 *
80 * The WSS name is set to 65 bytes, 1 byte larger than the maximum
81 * allowed by the WLP spec. This is to have a null terminated string
82 * for display to the user. A maximum of 64 bytes will still be used
83 * when placing the WSS name field in association frames.
84 */
85#define WLP_WSS_NAME_SIZE 65
86
87/**
88 * Number of bytes added by WLP to data frame
89 *
90 * A data frame transmitted from a host will be placed in a Standard or
91 * Abbreviated WLP frame. These have an extra 4 bytes of header (struct
92 * wlp_frame_std_abbrv_hdr).
93 * When the stack sends this data frame for transmission it needs to ensure
94 * there is enough headroom for this header.
95 */
96#define WLP_DATA_HLEN 4
97
98/**
99 * State of device regarding WLP Service Set
100 *
101 * WLP_WSS_STATE_NONE: the host does not participate in any WSS
102 * WLP_WSS_STATE_PART_ENROLLED: used as part of the enrollment sequence
103 * ("Partial Enroll"). This state is used to
104 * indicate the first part of enrollment that is
105 * unsecure. If the WSS is unsecure then the
106 * state will promptly go to WLP_WSS_STATE_ENROLLED,
107 * if the WSS is not secure then the enrollment
108 * procedure is a few more steps before we are
109 * enrolled.
110 * WLP_WSS_STATE_ENROLLED: the host is enrolled in a WSS
111 * WLP_WSS_STATE_ACTIVE: WSS is activated
112 * WLP_WSS_STATE_CONNECTED: host is connected to neighbor in WSS
113 *
114 */
115enum wlp_wss_state {
116 WLP_WSS_STATE_NONE = 0,
117 WLP_WSS_STATE_PART_ENROLLED,
118 WLP_WSS_STATE_ENROLLED,
119 WLP_WSS_STATE_ACTIVE,
120 WLP_WSS_STATE_CONNECTED,
121};
122
123/**
124 * WSS Secure status
125 * WLP 0.99 Table 6
126 *
127 * Set to one if the WSS is secure, zero if it is not secure
128 */
129enum wlp_wss_sec_status {
130 WLP_WSS_UNSECURE = 0,
131 WLP_WSS_SECURE,
132};
133
134/**
135 * WLP frame type
136 * WLP Draft 0.99 [6.2 Table 1]
137 */
138enum wlp_frame_type {
139 WLP_FRAME_STANDARD = 0,
140 WLP_FRAME_ABBREVIATED,
141 WLP_FRAME_CONTROL,
142 WLP_FRAME_ASSOCIATION,
143};
144
145/**
146 * WLP Association Message Type
147 * WLP Draft 0.99 [6.6.1.2 Table 8]
148 */
149enum wlp_assoc_type {
150 WLP_ASSOC_D1 = 2,
151 WLP_ASSOC_D2 = 3,
152 WLP_ASSOC_M1 = 4,
153 WLP_ASSOC_M2 = 5,
154 WLP_ASSOC_M3 = 7,
155 WLP_ASSOC_M4 = 8,
156 WLP_ASSOC_M5 = 9,
157 WLP_ASSOC_M6 = 10,
158 WLP_ASSOC_M7 = 11,
159 WLP_ASSOC_M8 = 12,
160 WLP_ASSOC_F0 = 14,
161 WLP_ASSOC_E1 = 32,
162 WLP_ASSOC_E2 = 33,
163 WLP_ASSOC_C1 = 34,
164 WLP_ASSOC_C2 = 35,
165 WLP_ASSOC_C3 = 36,
166 WLP_ASSOC_C4 = 37,
167};
168
169/**
170 * WLP Attribute Type
171 * WLP Draft 0.99 [6.6.1 Table 6]
172 */
173enum wlp_attr_type {
174 WLP_ATTR_AUTH = 0x1005, /* Authenticator */
175 WLP_ATTR_DEV_NAME = 0x1011, /* Device Name */
176 WLP_ATTR_DEV_PWD_ID = 0x1012, /* Device Password ID */
177 WLP_ATTR_E_HASH1 = 0x1014, /* E-Hash1 */
178 WLP_ATTR_E_HASH2 = 0x1015, /* E-Hash2 */
179 WLP_ATTR_E_SNONCE1 = 0x1016, /* E-SNonce1 */
180 WLP_ATTR_E_SNONCE2 = 0x1017, /* E-SNonce2 */
181 WLP_ATTR_ENCR_SET = 0x1018, /* Encrypted Settings */
182 WLP_ATTR_ENRL_NONCE = 0x101A, /* Enrollee Nonce */
183 WLP_ATTR_KEYWRAP_AUTH = 0x101E, /* Key Wrap Authenticator */
184 WLP_ATTR_MANUF = 0x1021, /* Manufacturer */
185 WLP_ATTR_MSG_TYPE = 0x1022, /* Message Type */
186 WLP_ATTR_MODEL_NAME = 0x1023, /* Model Name */
187 WLP_ATTR_MODEL_NR = 0x1024, /* Model Number */
188 WLP_ATTR_PUB_KEY = 0x1032, /* Public Key */
189 WLP_ATTR_REG_NONCE = 0x1039, /* Registrar Nonce */
190 WLP_ATTR_R_HASH1 = 0x103D, /* R-Hash1 */
191 WLP_ATTR_R_HASH2 = 0x103E, /* R-Hash2 */
192 WLP_ATTR_R_SNONCE1 = 0x103F, /* R-SNonce1 */
193 WLP_ATTR_R_SNONCE2 = 0x1040, /* R-SNonce2 */
194 WLP_ATTR_SERIAL = 0x1042, /* Serial number */
195 WLP_ATTR_UUID_E = 0x1047, /* UUID-E */
196 WLP_ATTR_UUID_R = 0x1048, /* UUID-R */
197 WLP_ATTR_PRI_DEV_TYPE = 0x1054, /* Primary Device Type */
198 WLP_ATTR_SEC_DEV_TYPE = 0x1055, /* Secondary Device Type */
199 WLP_ATTR_PORT_DEV = 0x1056, /* Portable Device */
200 WLP_ATTR_APP_EXT = 0x1058, /* Application Extension */
201 WLP_ATTR_WLP_VER = 0x2000, /* WLP Version */
202 WLP_ATTR_WSSID = 0x2001, /* WSSID */
203 WLP_ATTR_WSS_NAME = 0x2002, /* WSS Name */
204 WLP_ATTR_WSS_SEC_STAT = 0x2003, /* WSS Secure Status */
205 WLP_ATTR_WSS_BCAST = 0x2004, /* WSS Broadcast Address */
206 WLP_ATTR_WSS_M_KEY = 0x2005, /* WSS Master Key */
207 WLP_ATTR_ACC_ENRL = 0x2006, /* Accepting Enrollment */
208 WLP_ATTR_WSS_INFO = 0x2007, /* WSS Information */
209 WLP_ATTR_WSS_SEL_MTHD = 0x2008, /* WSS Selection Method */
210 WLP_ATTR_ASSC_MTHD_LIST = 0x2009, /* Association Methods List */
211 WLP_ATTR_SEL_ASSC_MTHD = 0x200A, /* Selected Association Method */
212 WLP_ATTR_ENRL_HASH_COMM = 0x200B, /* Enrollee Hash Commitment */
213 WLP_ATTR_WSS_TAG = 0x200C, /* WSS Tag */
214 WLP_ATTR_WSS_VIRT = 0x200D, /* WSS Virtual EUI-48 */
215 WLP_ATTR_WLP_ASSC_ERR = 0x200E, /* WLP Association Error */
216 WLP_ATTR_VNDR_EXT = 0x200F, /* Vendor Extension */
217};
218
219/**
220 * WLP Category ID of primary/secondary device
221 * WLP Draft 0.99 [6.6.1.8 Table 12]
222 */
223enum wlp_dev_category_id {
224 WLP_DEV_CAT_COMPUTER = 1,
225 WLP_DEV_CAT_INPUT,
226 WLP_DEV_CAT_PRINT_SCAN_FAX_COPIER,
227 WLP_DEV_CAT_CAMERA,
228 WLP_DEV_CAT_STORAGE,
229 WLP_DEV_CAT_INFRASTRUCTURE,
230 WLP_DEV_CAT_DISPLAY,
231 WLP_DEV_CAT_MULTIM,
232 WLP_DEV_CAT_GAMING,
233 WLP_DEV_CAT_TELEPHONE,
234 WLP_DEV_CAT_OTHER = 65535,
235};
236
237/**
238 * WLP WSS selection method
239 * WLP Draft 0.99 [6.6.1.6 Table 10]
240 */
241enum wlp_wss_sel_mthd {
242 WLP_WSS_ENRL_SELECT = 1, /* Enrollee selects */
243 WLP_WSS_REG_SELECT, /* Registrar selects */
244};
245
246/**
247 * WLP association error values
248 * WLP Draft 0.99 [6.6.1.5 Table 9]
249 */
250enum wlp_assc_error {
251 WLP_ASSOC_ERROR_NONE,
252 WLP_ASSOC_ERROR_AUTH, /* Authenticator Failure */
253 WLP_ASSOC_ERROR_ROGUE, /* Rogue activity suspected */
254 WLP_ASSOC_ERROR_BUSY, /* Device busy */
255 WLP_ASSOC_ERROR_LOCK, /* Setup Locked */
256 WLP_ASSOC_ERROR_NOT_READY, /* Registrar not ready */
257 WLP_ASSOC_ERROR_INV, /* Invalid WSS selection */
258 WLP_ASSOC_ERROR_MSG_TIME, /* Message timeout */
259 WLP_ASSOC_ERROR_ENR_TIME, /* Enrollment session timeout */
260 WLP_ASSOC_ERROR_PW, /* Device password invalid */
261 WLP_ASSOC_ERROR_VER, /* Unsupported version */
262 WLP_ASSOC_ERROR_INT, /* Internal error */
263 WLP_ASSOC_ERROR_UNDEF, /* Undefined error */
264 WLP_ASSOC_ERROR_NUM, /* Numeric comparison failure */
265 WLP_ASSOC_ERROR_WAIT, /* Waiting for user input */
266};
267
268/**
269 * WLP Parameters
270 * WLP 0.99 [7.7]
271 */
272enum wlp_parameters {
273 WLP_PER_MSG_TIMEOUT = 15, /* Seconds to wait for response to
274 association message. */
275};
276
277/**
278 * WLP IE
279 *
280 * The WLP IE should be included in beacons by all devices.
281 *
282 * The driver can set only a few of the fields in this information element,
283 * most fields are managed by the device self. When the driver needs to set
284 * a field it will only provide values for the fields of interest, the rest
285 * will be filled with zeroes. The fields of interest are:
286 *
287 * Element ID
288 * Length
289 * Capabilities (only to include WSSID Hash list length)
290 * WSSID Hash List fields
291 *
292 * WLP 0.99 [6.7]
293 *
294 * Only the fields that will be used are detailed in this structure, rest
295 * are not detailed or marked as "notused".
296 */
297struct wlp_ie {
298 struct uwb_ie_hdr hdr;
299 __le16 capabilities;
300 __le16 cycle_param;
301 __le16 acw_anchor_addr;
302 u8 wssid_hash_list[];
303} __attribute__((packed));
304
305static inline int wlp_ie_hash_length(struct wlp_ie *ie)
306{
307 return (le16_to_cpu(ie->capabilities) >> 12) & 0xf;
308}
309
310static inline void wlp_ie_set_hash_length(struct wlp_ie *ie, int hash_length)
311{
312 u16 caps = le16_to_cpu(ie->capabilities);
313 caps = (caps & ~(0xf << 12)) | (hash_length << 12);
314 ie->capabilities = cpu_to_le16(caps);
315}
316
317/**
318 * WLP nonce
319 * WLP Draft 0.99 [6.6.1 Table 6]
320 *
321 * A 128-bit random number often used (E-SNonce1, E-SNonce2, Enrollee
322 * Nonce, Registrar Nonce, R-SNonce1, R-SNonce2). It is passed to HW so
323 * it is packed.
324 */
325struct wlp_nonce {
326 u8 data[16];
327} __attribute__((packed));
328
329/**
330 * WLP UUID
331 * WLP Draft 0.99 [6.6.1 Table 6]
332 *
333 * Universally Unique Identifier (UUID) encoded as an octet string in the
334 * order the octets are shown in string representation in RFC4122. A UUID
335 * is often used (UUID-E, UUID-R, WSSID). It is passed to HW so it is packed.
336 */
337struct wlp_uuid {
338 u8 data[16];
339} __attribute__((packed));
340
341
342/**
343 * Primary and secondary device type attributes
344 * WLP Draft 0.99 [6.6.1.8]
345 */
346struct wlp_dev_type {
347 enum wlp_dev_category_id category:16;
348 u8 OUI[3];
349 u8 OUIsubdiv;
350 __le16 subID;
351} __attribute__((packed));
352
353/**
354 * WLP frame header
355 * WLP Draft 0.99 [6.2]
356 */
357struct wlp_frame_hdr {
358 __le16 mux_hdr; /* WLP_PROTOCOL_ID */
359 enum wlp_frame_type type:8;
360} __attribute__((packed));
361
362/**
363 * WLP attribute field header
364 * WLP Draft 0.99 [6.6.1]
365 *
366 * Header of each attribute found in an association frame
367 */
368struct wlp_attr_hdr {
369 __le16 type;
370 __le16 length;
371} __attribute__((packed));
372
373/**
374 * Device information commonly used together
375 *
376 * Each of these device information elements has a specified range in which it
377 * should fit (WLP 0.99 [Table 6]). This range provided in the spec does not
378 * include the termination null '\0' character (when used in the
379 * association protocol the attribute fields are accompanied
380 * with a "length" field so the full range from the spec can be used for
381 * the value). We thus allocate an extra byte to be able to store a string
382 * of max length with a terminating '\0'.
383 */
384struct wlp_device_info {
385 char name[33];
386 char model_name[33];
387 char manufacturer[65];
388 char model_nr[33];
389 char serial[33];
390 struct wlp_dev_type prim_dev_type;
391};
392
393/**
394 * Macros for the WLP attributes
395 *
396 * There are quite a few attributes (total is 43). The attribute layout can be
397 * in one of three categories: one value, an array, an enum forced to 8 bits.
398 * These macros help with their definitions.
399 */
400#define wlp_attr(type, name) \
401struct wlp_attr_##name { \
402 struct wlp_attr_hdr hdr; \
403 type name; \
404} __attribute__((packed));
405
406#define wlp_attr_array(type, name) \
407struct wlp_attr_##name { \
408 struct wlp_attr_hdr hdr; \
409 type name[]; \
410} __attribute__((packed));
411
412/**
413 * WLP association attribute fields
414 * WLP Draft 0.99 [6.6.1 Table 6]
415 *
416 * Attributes appear in same order as the Table in the spec
417 * FIXME Does not define all attributes yet
418 */
419
420/* Device name: Friendly name of sending device */
421wlp_attr_array(u8, dev_name)
422
423/* Enrollee Nonce: Random number generated by enrollee for an enrollment
424 * session */
425wlp_attr(struct wlp_nonce, enonce)
426
427/* Manufacturer name: Name of manufacturer of the sending device */
428wlp_attr_array(u8, manufacturer)
429
430/* WLP Message Type */
431wlp_attr(u8, msg_type)
432
433/* WLP Model name: Model name of sending device */
434wlp_attr_array(u8, model_name)
435
436/* WLP Model number: Model number of sending device */
437wlp_attr_array(u8, model_nr)
438
439/* Registrar Nonce: Random number generated by registrar for an enrollment
440 * session */
441wlp_attr(struct wlp_nonce, rnonce)
442
443/* Serial number of device */
444wlp_attr_array(u8, serial)
445
446/* UUID of enrollee */
447wlp_attr(struct wlp_uuid, uuid_e)
448
449/* UUID of registrar */
450wlp_attr(struct wlp_uuid, uuid_r)
451
452/* WLP Primary device type */
453wlp_attr(struct wlp_dev_type, prim_dev_type)
454
455/* WLP Secondary device type */
456wlp_attr(struct wlp_dev_type, sec_dev_type)
457
458/* WLP protocol version */
459wlp_attr(u8, version)
460
461/* WLP service set identifier */
462wlp_attr(struct wlp_uuid, wssid)
463
464/* WLP WSS name */
465wlp_attr_array(u8, wss_name)
466
467/* WLP WSS Secure Status */
468wlp_attr(u8, wss_sec_status)
469
470/* WSS Broadcast Address */
471wlp_attr(struct uwb_mac_addr, wss_bcast)
472
473/* WLP Accepting Enrollment */
474wlp_attr(u8, accept_enrl)
475
476/**
477 * WSS information attributes
478 * WLP Draft 0.99 [6.6.3 Table 15]
479 */
480struct wlp_wss_info {
481 struct wlp_attr_wssid wssid;
482 struct wlp_attr_wss_name name;
483 struct wlp_attr_accept_enrl accept;
484 struct wlp_attr_wss_sec_status sec_stat;
485 struct wlp_attr_wss_bcast bcast;
486} __attribute__((packed));
487
488/* WLP WSS Information */
489wlp_attr_array(struct wlp_wss_info, wss_info)
490
491/* WLP WSS Selection method */
492wlp_attr(u8, wss_sel_mthd)
493
494/* WLP WSS tag */
495wlp_attr(u8, wss_tag)
496
497/* WSS Virtual Address */
498wlp_attr(struct uwb_mac_addr, wss_virt)
499
500/* WLP association error */
501wlp_attr(u8, wlp_assc_err)
502
503/**
504 * WLP standard and abbreviated frames
505 *
506 * WLP Draft 0.99 [6.3] and [6.4]
507 *
508 * The difference between the WLP standard frame and the WLP
509 * abbreviated frame is that the standard frame includes the src
510 * and dest addresses from the Ethernet header, the abbreviated frame does
511 * not.
512 * The src/dest (as well as the type/length and client data) are already
513 * defined as part of the Ethernet header, we do not do this here.
514 * From this perspective the standard and abbreviated frames appear the
515 * same - they will be treated differently though.
516 *
517 * The size of this header is also captured in WLP_DATA_HLEN to enable
518 * interfaces to prepare their headroom.
519 */
520struct wlp_frame_std_abbrv_hdr {
521 struct wlp_frame_hdr hdr;
522 u8 tag;
523} __attribute__((packed));
524
525/**
526 * WLP association frames
527 *
528 * WLP Draft 0.99 [6.6]
529 */
530struct wlp_frame_assoc {
531 struct wlp_frame_hdr hdr;
532 enum wlp_assoc_type type:8;
533 struct wlp_attr_version version;
534 struct wlp_attr_msg_type msg_type;
535 u8 attr[];
536} __attribute__((packed));
537
538/* Ethernet to dev address mapping */
539struct wlp_eda {
540 spinlock_t lock;
541 struct list_head cache; /* Eth<->Dev Addr cache */
542};
543
544/**
545 * WSS information temporary storage
546 *
547 * This information is only stored temporarily during discovery. It should
548 * not be stored unless the device is enrolled in the advertised WSS. This
549 * is done mainly because we follow the letter of the spec in this regard.
550 * See WLP 0.99 [7.2.3].
551 * When the device does become enrolled in a WSS the WSS information will
552 * be stored as part of the more comprehensive struct wlp_wss.
553 */
554struct wlp_wss_tmp_info {
555 char name[WLP_WSS_NAME_SIZE];
556 u8 accept_enroll;
557 u8 sec_status;
558 struct uwb_mac_addr bcast;
559};
560
561struct wlp_wssid_e {
562 struct list_head node;
563 struct wlp_uuid wssid;
564 struct wlp_wss_tmp_info *info;
565};
566
567/**
568 * A cache entry of WLP neighborhood
569 *
570 * @node: head of list is wlp->neighbors
571 * @wssid: list of wssids of this neighbor, element is wlp_wssid_e
572 * @info: temporary storage for information learned during discovery. This
573 * storage is used together with the wssid_e temporary storage
574 * during discovery.
575 */
576struct wlp_neighbor_e {
577 struct list_head node;
578 struct wlp_uuid uuid;
579 struct uwb_dev *uwb_dev;
580 struct list_head wssid; /* Elements are wlp_wssid_e */
581 struct wlp_device_info *info;
582};
583
584struct wlp;
585/**
586 * Information for an association session in progress.
587 *
588 * @exp_message: The type of the expected message. Both this message and a
589 * F0 message (which can be sent in response to any
590 * association frame) will be accepted as a valid message for
591 * this session.
592 * @cb: The function that will be called upon receipt of this
593 * message.
594 * @cb_priv: Private data of callback
595 * @data: Data used in association process (always a sk_buff?)
596 * @neighbor: Address of neighbor with which association session is in
597 * progress.
598 */
599struct wlp_session {
600 enum wlp_assoc_type exp_message;
601 void (*cb)(struct wlp *);
602 void *cb_priv;
603 void *data;
604 struct uwb_dev_addr neighbor_addr;
605};
606
607/**
608 * WLP Service Set
609 *
610 * @mutex: used to protect entire WSS structure.
611 *
612 * @name: The WSS name is set to 65 bytes, 1 byte larger than the maximum
613 * allowed by the WLP spec. This is to have a null terminated string
614 * for display to the user. A maximum of 64 bytes will still be used
615 * when placing the WSS name field in association frames.
616 *
617 * @accept_enroll: Accepting enrollment: Set to one if registrar is
618 * accepting enrollment in WSS, or zero otherwise.
619 *
620 * Global and local information for each WSS in which we are enrolled.
621 * WLP 0.99 Section 7.2.1 and Section 7.2.2
622 */
623struct wlp_wss {
624 struct mutex mutex;
625 struct kobject kobj;
626 /* Global properties. */
627 struct wlp_uuid wssid;
628 u8 hash;
629 char name[WLP_WSS_NAME_SIZE];
630 struct uwb_mac_addr bcast;
631 u8 secure_status:1;
632 u8 master_key[16];
633 /* Local properties. */
634 u8 tag;
635 struct uwb_mac_addr virtual_addr;
636 /* Extra */
637 u8 accept_enroll:1;
638 enum wlp_wss_state state;
639};
640
641/**
642 * WLP main structure
643 * @mutex: protect changes to WLP structure. We only allow changes to the
644 * uuid, so currently this mutex only protects this field.
645 */
646struct wlp {
647 struct mutex mutex;
648 struct uwb_rc *rc; /* UWB radio controller */
649 struct uwb_pal pal;
650 struct wlp_eda eda;
651 struct wlp_uuid uuid;
652 struct wlp_session *session;
653 struct wlp_wss wss;
654 struct mutex nbmutex; /* Neighbor mutex protects neighbors list */
655 struct list_head neighbors; /* Elements are wlp_neighbor_e */
656 struct uwb_notifs_handler uwb_notifs_handler;
657 struct wlp_device_info *dev_info;
658 void (*fill_device_info)(struct wlp *wlp, struct wlp_device_info *info);
659 int (*xmit_frame)(struct wlp *, struct sk_buff *,
660 struct uwb_dev_addr *);
661 void (*stop_queue)(struct wlp *);
662 void (*start_queue)(struct wlp *);
663};
664
665/* sysfs */
666
667
668struct wlp_wss_attribute {
669 struct attribute attr;
670 ssize_t (*show)(struct wlp_wss *wss, char *buf);
671 ssize_t (*store)(struct wlp_wss *wss, const char *buf, size_t count);
672};
673
674#define WSS_ATTR(_name, _mode, _show, _store) \
675static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \
676 _show, _store)
677
678extern int wlp_setup(struct wlp *, struct uwb_rc *);
679extern void wlp_remove(struct wlp *);
680extern ssize_t wlp_neighborhood_show(struct wlp *, char *);
681extern int wlp_wss_setup(struct net_device *, struct wlp_wss *);
682extern void wlp_wss_remove(struct wlp_wss *);
683extern ssize_t wlp_wss_activate_show(struct wlp_wss *, char *);
684extern ssize_t wlp_wss_activate_store(struct wlp_wss *, const char *, size_t);
685extern ssize_t wlp_eda_show(struct wlp *, char *);
686extern ssize_t wlp_eda_store(struct wlp *, const char *, size_t);
687extern ssize_t wlp_uuid_show(struct wlp *, char *);
688extern ssize_t wlp_uuid_store(struct wlp *, const char *, size_t);
689extern ssize_t wlp_dev_name_show(struct wlp *, char *);
690extern ssize_t wlp_dev_name_store(struct wlp *, const char *, size_t);
691extern ssize_t wlp_dev_manufacturer_show(struct wlp *, char *);
692extern ssize_t wlp_dev_manufacturer_store(struct wlp *, const char *, size_t);
693extern ssize_t wlp_dev_model_name_show(struct wlp *, char *);
694extern ssize_t wlp_dev_model_name_store(struct wlp *, const char *, size_t);
695extern ssize_t wlp_dev_model_nr_show(struct wlp *, char *);
696extern ssize_t wlp_dev_model_nr_store(struct wlp *, const char *, size_t);
697extern ssize_t wlp_dev_serial_show(struct wlp *, char *);
698extern ssize_t wlp_dev_serial_store(struct wlp *, const char *, size_t);
699extern ssize_t wlp_dev_prim_category_show(struct wlp *, char *);
700extern ssize_t wlp_dev_prim_category_store(struct wlp *, const char *,
701 size_t);
702extern ssize_t wlp_dev_prim_OUI_show(struct wlp *, char *);
703extern ssize_t wlp_dev_prim_OUI_store(struct wlp *, const char *, size_t);
704extern ssize_t wlp_dev_prim_OUI_sub_show(struct wlp *, char *);
705extern ssize_t wlp_dev_prim_OUI_sub_store(struct wlp *, const char *,
706 size_t);
707extern ssize_t wlp_dev_prim_subcat_show(struct wlp *, char *);
708extern ssize_t wlp_dev_prim_subcat_store(struct wlp *, const char *,
709 size_t);
710extern int wlp_receive_frame(struct device *, struct wlp *, struct sk_buff *,
711 struct uwb_dev_addr *);
712extern int wlp_prepare_tx_frame(struct device *, struct wlp *,
713 struct sk_buff *, struct uwb_dev_addr *);
714void wlp_reset_all(struct wlp *wlp);
715
716/**
717 * Initialize WSS
718 */
719static inline
720void wlp_wss_init(struct wlp_wss *wss)
721{
722 mutex_init(&wss->mutex);
723}
724
725static inline
726void wlp_init(struct wlp *wlp)
727{
728 INIT_LIST_HEAD(&wlp->neighbors);
729 mutex_init(&wlp->mutex);
730 mutex_init(&wlp->nbmutex);
731 wlp_wss_init(&wlp->wss);
732}
733
734
735#endif /* #ifndef __LINUX__WLP_H_ */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 5c158c477ac7..b36291130f22 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -149,11 +149,11 @@ struct execute_work {
149 149
150extern struct workqueue_struct * 150extern struct workqueue_struct *
151__create_workqueue_key(const char *name, int singlethread, 151__create_workqueue_key(const char *name, int singlethread,
152 int freezeable, struct lock_class_key *key, 152 int freezeable, int rt, struct lock_class_key *key,
153 const char *lock_name); 153 const char *lock_name);
154 154
155#ifdef CONFIG_LOCKDEP 155#ifdef CONFIG_LOCKDEP
156#define __create_workqueue(name, singlethread, freezeable) \ 156#define __create_workqueue(name, singlethread, freezeable, rt) \
157({ \ 157({ \
158 static struct lock_class_key __key; \ 158 static struct lock_class_key __key; \
159 const char *__lock_name; \ 159 const char *__lock_name; \
@@ -164,17 +164,19 @@ __create_workqueue_key(const char *name, int singlethread,
164 __lock_name = #name; \ 164 __lock_name = #name; \
165 \ 165 \
166 __create_workqueue_key((name), (singlethread), \ 166 __create_workqueue_key((name), (singlethread), \
167 (freezeable), &__key, \ 167 (freezeable), (rt), &__key, \
168 __lock_name); \ 168 __lock_name); \
169}) 169})
170#else 170#else
171#define __create_workqueue(name, singlethread, freezeable) \ 171#define __create_workqueue(name, singlethread, freezeable, rt) \
172 __create_workqueue_key((name), (singlethread), (freezeable), NULL, NULL) 172 __create_workqueue_key((name), (singlethread), (freezeable), (rt), \
173 NULL, NULL)
173#endif 174#endif
174 175
175#define create_workqueue(name) __create_workqueue((name), 0, 0) 176#define create_workqueue(name) __create_workqueue((name), 0, 0, 0)
176#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) 177#define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1)
177#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) 178#define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0)
179#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0)
178 180
179extern void destroy_workqueue(struct workqueue_struct *wq); 181extern void destroy_workqueue(struct workqueue_struct *wq);
180 182
@@ -238,4 +240,12 @@ void cancel_rearming_delayed_work(struct delayed_work *work)
238 cancel_delayed_work_sync(work); 240 cancel_delayed_work_sync(work);
239} 241}
240 242
243#ifndef CONFIG_SMP
244static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
245{
246 return fn(arg);
247}
248#else
249long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg);
250#endif /* CONFIG_SMP */
241#endif 251#endif
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 12b15c561a1f..e585657e9831 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -63,7 +63,15 @@ struct writeback_control {
63 unsigned for_writepages:1; /* This is a writepages() call */ 63 unsigned for_writepages:1; /* This is a writepages() call */
64 unsigned range_cyclic:1; /* range_start is cyclic */ 64 unsigned range_cyclic:1; /* range_start is cyclic */
65 unsigned more_io:1; /* more io to be dispatched */ 65 unsigned more_io:1; /* more io to be dispatched */
66 unsigned range_cont:1; 66 /*
67 * write_cache_pages() won't update wbc->nr_to_write and
68 * mapping->writeback_index if no_nrwrite_index_update
69 * is set. write_cache_pages() may write more than we
70 * requested and we want to make sure nr_to_write and
71 * writeback_index are updated in a consistent manner
72 * so we use a single control to update them
73 */
74 unsigned no_nrwrite_index_update:1;
67}; 75};
68 76
69/* 77/*
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 4bc1e6b86cb2..52f3abd453a1 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -199,6 +199,9 @@ enum {
199#define XFRM_MSG_NEWSPDINFO XFRM_MSG_NEWSPDINFO 199#define XFRM_MSG_NEWSPDINFO XFRM_MSG_NEWSPDINFO
200 XFRM_MSG_GETSPDINFO, 200 XFRM_MSG_GETSPDINFO,
201#define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO 201#define XFRM_MSG_GETSPDINFO XFRM_MSG_GETSPDINFO
202
203 XFRM_MSG_MAPPING,
204#define XFRM_MSG_MAPPING XFRM_MSG_MAPPING
202 __XFRM_MSG_MAX 205 __XFRM_MSG_MAX
203}; 206};
204#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1) 207#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -438,6 +441,15 @@ struct xfrm_user_migrate {
438 __u16 new_family; 441 __u16 new_family;
439}; 442};
440 443
444struct xfrm_user_mapping {
445 struct xfrm_usersa_id id;
446 __u32 reqid;
447 xfrm_address_t old_saddr;
448 xfrm_address_t new_saddr;
449 __be16 old_sport;
450 __be16 new_sport;
451};
452
441#ifndef __KERNEL__ 453#ifndef __KERNEL__
442/* backwards compatibility for userspace */ 454/* backwards compatibility for userspace */
443#define XFRMGRP_ACQUIRE 1 455#define XFRMGRP_ACQUIRE 1
@@ -464,6 +476,8 @@ enum xfrm_nlgroups {
464#define XFRMNLGRP_REPORT XFRMNLGRP_REPORT 476#define XFRMNLGRP_REPORT XFRMNLGRP_REPORT
465 XFRMNLGRP_MIGRATE, 477 XFRMNLGRP_MIGRATE,
466#define XFRMNLGRP_MIGRATE XFRMNLGRP_MIGRATE 478#define XFRMNLGRP_MIGRATE XFRMNLGRP_MIGRATE
479 XFRMNLGRP_MAPPING,
480#define XFRMNLGRP_MAPPING XFRMNLGRP_MAPPING
467 __XFRMNLGRP_MAX 481 __XFRMNLGRP_MAX
468}; 482};
469#define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1) 483#define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1)