aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-04-07 16:34:16 -0400
committerJeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>2009-04-07 16:34:16 -0400
commit38f4b8c0da01ae7cd9b93386842ce272d6fde9ab (patch)
tree3c8c52201aac038094bfea7efdd0984a8f62045e /include
parenta811454027352c762e0d5bba1b1d8f7d26bf96ae (diff)
parent8e2c4f2844c0e8dcdfe312e5f2204854ca8532c6 (diff)
Merge commit 'origin/master' into for-linus/xen/master
* commit 'origin/master': (4825 commits) Fix build errors due to CONFIG_BRANCH_TRACER=y parport: Use the PCI IRQ if offered tty: jsm cleanups Adjust path to gpio headers KGDB_SERIAL_CONSOLE check for module Change KCONFIG name tty: Blackin CTS/RTS Change hardware flow control from poll to interrupt driven Add support for the MAX3100 SPI UART. lanana: assign a device name and numbering for MAX3100 serqt: initial clean up pass for tty side tty: Use the generic RS485 ioctl on CRIS tty: Correct inline types for tty_driver_kref_get() splice: fix deadlock in splicing to file nilfs2: support nanosecond timestamp nilfs2: introduce secondary super block nilfs2: simplify handling of active state of segments nilfs2: mark minor flag for checkpoint created by internal operation nilfs2: clean up sketch file nilfs2: super block operations fix endian bug ... Conflicts: arch/x86/include/asm/thread_info.h arch/x86/lguest/boot.c drivers/xen/manage.c
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acexcep.h6
-rw-r--r--include/acpi/acpi_bus.h18
-rw-r--r--include/acpi/acpi_drivers.h33
-rw-r--r--include/acpi/acpiosxf.h4
-rw-r--r--include/acpi/acpixf.h18
-rw-r--r--include/acpi/actbl.h74
-rw-r--r--include/acpi/actbl1.h6
-rw-r--r--include/acpi/actypes.h18
-rw-r--r--include/acpi/processor.h4
-rw-r--r--include/acpi/video.h11
-rw-r--r--include/asm-frv/ftrace.h1
-rw-r--r--include/asm-frv/highmem.h2
-rw-r--r--include/asm-generic/dma-mapping.h308
-rw-r--r--include/asm-generic/gpio.h5
-rw-r--r--include/asm-generic/topology.h10
-rw-r--r--include/asm-generic/vmlinux.lds.h29
-rw-r--r--include/asm-m32r/ftrace.h1
-rw-r--r--include/asm-m32r/spinlock.h3
-rw-r--r--include/asm-mn10300/ftrace.h1
-rw-r--r--include/asm-mn10300/highmem.h2
-rw-r--r--include/drm/drmP.h68
-rw-r--r--include/drm/drm_crtc.h6
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_os_linux.h19
-rw-r--r--include/drm/drm_pciids.h113
-rw-r--r--include/drm/radeon_drm.h5
-rw-r--r--include/linux/Kbuild3
-rw-r--r--include/linux/acpi.h35
-rw-r--r--include/linux/async_tx.h9
-rw-r--r--include/linux/ata.h41
-rw-r--r--include/linux/auto_dev-ioctl.h7
-rw-r--r--include/linux/auto_fs.h6
-rw-r--r--include/linux/backing-dev.h12
-rw-r--r--include/linux/binfmts.h3
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/blkdev.h57
-rw-r--r--include/linux/blktrace_api.h5
-rw-r--r--include/linux/bootmem.h6
-rw-r--r--include/linux/buffer_head.h14
-rw-r--r--include/linux/cgroup.h142
-rw-r--r--include/linux/compat.h6
-rw-r--r--include/linux/compiler.h10
-rw-r--r--include/linux/connector.h4
-rw-r--r--include/linux/cpu.h16
-rw-r--r--include/linux/cpuset.h37
-rw-r--r--include/linux/debugfs.h8
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/dm-dirty-log.h13
-rw-r--r--include/linux/dma-debug.h174
-rw-r--r--include/linux/dma-mapping.h50
-rw-r--r--include/linux/dma_remapping.h1
-rw-r--r--include/linux/dmaengine.h30
-rw-r--r--include/linux/dmar.h63
-rw-r--r--include/linux/dmi.h7
-rw-r--r--include/linux/ds1wm.h12
-rw-r--r--include/linux/dst.h587
-rw-r--r--include/linux/dw_dmac.h19
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/eventfd.h12
-rw-r--r--include/linux/ext3_fs.h6
-rw-r--r--include/linux/fb.h11
-rw-r--r--include/linux/fdtable.h4
-rw-r--r--include/linux/fs.h63
-rw-r--r--include/linux/fs_struct.h10
-rw-r--r--include/linux/fscache-cache.h505
-rw-r--r--include/linux/fscache.h618
-rw-r--r--include/linux/fsl_devices.h29
-rw-r--r--include/linux/ftrace.h264
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/gfp.h1
-rw-r--r--include/linux/hardirq.h73
-rw-r--r--include/linux/hdreg.h66
-rw-r--r--include/linux/hid.h23
-rw-r--r--include/linux/highmem.h17
-rw-r--r--include/linux/hrtimer.h5
-rw-r--r--include/linux/i2c-algo-sgi.h26
-rw-r--r--include/linux/i2c-id.h38
-rw-r--r--include/linux/i2c.h4
-rw-r--r--include/linux/i2c/at24.h4
-rw-r--r--include/linux/i2c/s6000.h10
-rw-r--r--include/linux/i2c/twl4030.h47
-rw-r--r--include/linux/ide.h285
-rw-r--r--include/linux/idr.h1
-rw-r--r--include/linux/intel-iommu.h27
-rw-r--r--include/linux/interrupt.h15
-rw-r--r--include/linux/iommu.h13
-rw-r--r--include/linux/ipc_namespace.h65
-rw-r--r--include/linux/ipmi_smi.h2
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/irqflags.h8
-rw-r--r--include/linux/ivtv.h10
-rw-r--r--include/linux/jbd.h7
-rw-r--r--include/linux/jbd2.h6
-rw-r--r--include/linux/kallsyms.h15
-rw-r--r--include/linux/kernel.h168
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/kmod.h11
-rw-r--r--include/linux/kprobes.h52
-rw-r--r--include/linux/leds-bd2802.h26
-rw-r--r--include/linux/leds.h4
-rw-r--r--include/linux/leds_pwm.h21
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/lockd/xdr.h12
-rw-r--r--include/linux/lockd/xdr4.h10
-rw-r--r--include/linux/lockdep.h67
-rw-r--r--include/linux/loop.h1
-rw-r--r--include/linux/memcontrol.h20
-rw-r--r--include/linux/memory.h17
-rw-r--r--include/linux/mfd/ds1wm.h6
-rw-r--r--include/linux/mfd/htc-pasic3.h1
-rw-r--r--include/linux/mfd/wm8350/core.h2
-rw-r--r--include/linux/mg_disk.h206
-rw-r--r--include/linux/mm.h9
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/mnt_namespace.h2
-rw-r--r--include/linux/module.h68
-rw-r--r--include/linux/moduleparam.h10
-rw-r--r--include/linux/mpage.h10
-rw-r--r--include/linux/msi.h13
-rw-r--r--include/linux/mtd/mtd.h21
-rw-r--r--include/linux/mtd/nand.h4
-rw-r--r--include/linux/mtd/partitions.h12
-rw-r--r--include/linux/mutex.h5
-rw-r--r--include/linux/netpoll.h2
-rw-r--r--include/linux/nfs.h1
-rw-r--r--include/linux/nfs4.h138
-rw-r--r--include/linux/nfs_fs.h17
-rw-r--r--include/linux/nfs_fs_sb.h16
-rw-r--r--include/linux/nfs_iostat.h12
-rw-r--r--include/linux/nfs_xdr.h59
-rw-r--r--include/linux/nfsd/cache.h8
-rw-r--r--include/linux/nfsd/nfsd.h225
-rw-r--r--include/linux/nfsd/nfsfh.h7
-rw-r--r--include/linux/nfsd/state.h84
-rw-r--r--include/linux/nfsd/stats.h9
-rw-r--r--include/linux/nfsd/xdr4.h129
-rw-r--r--include/linux/nilfs2_fs.h801
-rw-r--r--include/linux/nsproxy.h1
-rw-r--r--include/linux/page-debug-flags.h30
-rw-r--r--include/linux/page-flags.h60
-rw-r--r--include/linux/page_cgroup.h13
-rw-r--r--include/linux/pagemap.h17
-rw-r--r--include/linux/pagevec.h1
-rw-r--r--include/linux/parport_pc.h11
-rw-r--r--include/linux/pci-acpi.h67
-rw-r--r--include/linux/pci.h67
-rw-r--r--include/linux/pci_ids.h36
-rw-r--r--include/linux/pci_regs.h37
-rw-r--r--include/linux/pcieport_if.h36
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/power_supply.h2
-rw-r--r--include/linux/proc_fs.h4
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/pwm.h2
-rw-r--r--include/linux/raid/bitmap.h288
-rw-r--r--include/linux/raid/linear.h31
-rw-r--r--include/linux/raid/md.h81
-rw-r--r--include/linux/raid/md_k.h402
-rw-r--r--include/linux/raid/md_u.h35
-rw-r--r--include/linux/raid/multipath.h42
-rw-r--r--include/linux/raid/pq.h132
-rw-r--r--include/linux/raid/raid0.h30
-rw-r--r--include/linux/raid/raid1.h134
-rw-r--r--include/linux/raid/raid10.h123
-rw-r--r--include/linux/raid/raid5.h402
-rw-r--r--include/linux/raid/xor.h2
-rw-r--r--include/linux/rcuclassic.h16
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/rcupreempt.h53
-rw-r--r--include/linux/rcutree.h27
-rw-r--r--include/linux/regulator/bq24022.h3
-rw-r--r--include/linux/regulator/consumer.h6
-rw-r--r--include/linux/regulator/driver.h81
-rw-r--r--include/linux/regulator/fixed.h3
-rw-r--r--include/linux/regulator/machine.h12
-rw-r--r--include/linux/reiserfs_acl.h22
-rw-r--r--include/linux/reiserfs_fs.h168
-rw-r--r--include/linux/reiserfs_fs_i.h4
-rw-r--r--include/linux/reiserfs_fs_sb.h31
-rw-r--r--include/linux/reiserfs_xattr.h154
-rw-r--r--include/linux/ring_buffer.h38
-rw-r--r--include/linux/rtc-v3020.h6
-rw-r--r--include/linux/rtc.h6
-rw-r--r--include/linux/sched.h94
-rw-r--r--include/linux/security.h24
-rw-r--r--include/linux/seq_file.h9
-rw-r--r--include/linux/serial_core.h5
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/slab_def.h68
-rw-r--r--include/linux/slob_def.h9
-rw-r--r--include/linux/slow-work.h95
-rw-r--r--include/linux/slub_def.h49
-rw-r--r--include/linux/smp.h7
-rw-r--r--include/linux/sonypi.h8
-rw-r--r--include/linux/spi/eeprom.h6
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/spi/spi_gpio.h6
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/string.h17
-rw-r--r--include/linux/sunrpc/svc.h25
-rw-r--r--include/linux/sunrpc/svc_xprt.h52
-rw-r--r--include/linux/sunrpc/xdr.h42
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/suspend.h3
-rw-r--r--include/linux/swap.h7
-rw-r--r--include/linux/swiotlb.h41
-rw-r--r--include/linux/synclink.h1
-rw-r--r--include/linux/syscalls.h64
-rw-r--r--include/linux/thermal.h48
-rw-r--r--include/linux/timer.h93
-rw-r--r--include/linux/timeriomem-rng.h2
-rw-r--r--include/linux/topology.h11
-rw-r--r--include/linux/trace_clock.h19
-rw-r--r--include/linux/tracehook.h15
-rw-r--r--include/linux/tracepoint.h116
-rw-r--r--include/linux/tty_driver.h6
-rw-r--r--include/linux/usb/wusb.h3
-rw-r--r--include/linux/video_decoder.h48
-rw-r--r--include/linux/video_encoder.h23
-rw-r--r--include/linux/videodev.h18
-rw-r--r--include/linux/videodev2.h74
-rw-r--r--include/linux/wait.h29
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/linux/writeback.h6
-rw-r--r--include/media/bt819.h33
-rw-r--r--include/media/cx2341x.h6
-rw-r--r--include/media/cx25840.h12
-rw-r--r--include/media/ir-common.h3
-rw-r--r--include/media/ir-kbd-i2c.h3
-rw-r--r--include/media/msp3400.h4
-rw-r--r--include/media/ov772x.h40
-rw-r--r--include/media/saa7146.h10
-rw-r--r--include/media/saa7146_vv.h17
-rw-r--r--include/media/sh_mobile_ceu.h5
-rw-r--r--include/media/soc_camera.h24
-rw-r--r--include/media/tvaudio.h19
-rw-r--r--include/media/v4l2-chip-ident.h94
-rw-r--r--include/media/v4l2-common.h159
-rw-r--r--include/media/v4l2-dev.h2
-rw-r--r--include/media/v4l2-device.h40
-rw-r--r--include/media/v4l2-i2c-drv-legacy.h152
-rw-r--r--include/media/v4l2-i2c-drv.h6
-rw-r--r--include/media/v4l2-ioctl.h2
-rw-r--r--include/media/v4l2-subdev.h137
-rw-r--r--include/media/videobuf-core.h1
-rw-r--r--include/net/tcp.h15
-rw-r--r--include/scsi/fc/fc_fip.h237
-rw-r--r--include/scsi/fc_transport_fcoe.h54
-rw-r--r--include/scsi/libfc.h45
-rw-r--r--include/scsi/libfcoe.h227
-rw-r--r--include/sound/tea575x-tuner.h8
-rw-r--r--include/trace/block.h70
-rw-r--r--include/trace/irq.h9
-rw-r--r--include/trace/irq_event_types.h55
-rw-r--r--include/trace/kmemtrace.h63
-rw-r--r--include/trace/lockdep.h9
-rw-r--r--include/trace/lockdep_event_types.h44
-rw-r--r--include/trace/power.h32
-rw-r--r--include/trace/sched.h49
-rw-r--r--include/trace/sched_event_types.h337
-rw-r--r--include/trace/skb.h4
-rw-r--r--include/trace/trace_event_types.h5
-rw-r--r--include/trace/trace_events.h5
-rw-r--r--include/trace/workqueue.h25
-rw-r--r--include/video/aty128.h2
-rw-r--r--include/video/cirrus.h2
-rw-r--r--include/video/newport.h4
-rw-r--r--include/video/radeon.h564
-rw-r--r--include/video/s1d13xxxfb.h16
-rw-r--r--include/video/tdfx.h26
272 files changed, 8797 insertions, 4536 deletions
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index eda04546cdf6..473d584b1d31 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -103,8 +103,9 @@
103#define AE_BAD_OCTAL_CONSTANT (acpi_status) (0x0006 | AE_CODE_PROGRAMMER) 103#define AE_BAD_OCTAL_CONSTANT (acpi_status) (0x0006 | AE_CODE_PROGRAMMER)
104#define AE_BAD_DECIMAL_CONSTANT (acpi_status) (0x0007 | AE_CODE_PROGRAMMER) 104#define AE_BAD_DECIMAL_CONSTANT (acpi_status) (0x0007 | AE_CODE_PROGRAMMER)
105#define AE_MISSING_ARGUMENTS (acpi_status) (0x0008 | AE_CODE_PROGRAMMER) 105#define AE_MISSING_ARGUMENTS (acpi_status) (0x0008 | AE_CODE_PROGRAMMER)
106#define AE_BAD_ADDRESS (acpi_status) (0x0009 | AE_CODE_PROGRAMMER)
106 107
107#define AE_CODE_PGM_MAX 0x0008 108#define AE_CODE_PGM_MAX 0x0009
108 109
109/* 110/*
110 * Acpi table exceptions 111 * Acpi table exceptions
@@ -224,7 +225,8 @@ char const *acpi_gbl_exception_names_pgm[] = {
224 "AE_BAD_HEX_CONSTANT", 225 "AE_BAD_HEX_CONSTANT",
225 "AE_BAD_OCTAL_CONSTANT", 226 "AE_BAD_OCTAL_CONSTANT",
226 "AE_BAD_DECIMAL_CONSTANT", 227 "AE_BAD_DECIMAL_CONSTANT",
227 "AE_MISSING_ARGUMENTS" 228 "AE_MISSING_ARGUMENTS",
229 "AE_BAD_ADDRESS"
228}; 230};
229 231
230char const *acpi_gbl_exception_names_tbl[] = { 232char const *acpi_gbl_exception_names_tbl[] = {
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index e9f6574930ef..a2228511d4be 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -88,44 +88,30 @@ struct acpi_device;
88 88
89typedef int (*acpi_op_add) (struct acpi_device * device); 89typedef int (*acpi_op_add) (struct acpi_device * device);
90typedef int (*acpi_op_remove) (struct acpi_device * device, int type); 90typedef int (*acpi_op_remove) (struct acpi_device * device, int type);
91typedef int (*acpi_op_lock) (struct acpi_device * device, int type);
92typedef int (*acpi_op_start) (struct acpi_device * device); 91typedef int (*acpi_op_start) (struct acpi_device * device);
93typedef int (*acpi_op_stop) (struct acpi_device * device, int type); 92typedef int (*acpi_op_stop) (struct acpi_device * device, int type);
94typedef int (*acpi_op_suspend) (struct acpi_device * device, 93typedef int (*acpi_op_suspend) (struct acpi_device * device,
95 pm_message_t state); 94 pm_message_t state);
96typedef int (*acpi_op_resume) (struct acpi_device * device); 95typedef int (*acpi_op_resume) (struct acpi_device * device);
97typedef int (*acpi_op_scan) (struct acpi_device * device);
98typedef int (*acpi_op_bind) (struct acpi_device * device); 96typedef int (*acpi_op_bind) (struct acpi_device * device);
99typedef int (*acpi_op_unbind) (struct acpi_device * device); 97typedef int (*acpi_op_unbind) (struct acpi_device * device);
100typedef int (*acpi_op_shutdown) (struct acpi_device * device); 98typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event);
101 99
102struct acpi_bus_ops { 100struct acpi_bus_ops {
103 u32 acpi_op_add:1; 101 u32 acpi_op_add:1;
104 u32 acpi_op_remove:1;
105 u32 acpi_op_lock:1;
106 u32 acpi_op_start:1; 102 u32 acpi_op_start:1;
107 u32 acpi_op_stop:1;
108 u32 acpi_op_suspend:1;
109 u32 acpi_op_resume:1;
110 u32 acpi_op_scan:1;
111 u32 acpi_op_bind:1;
112 u32 acpi_op_unbind:1;
113 u32 acpi_op_shutdown:1;
114 u32 reserved:21;
115}; 103};
116 104
117struct acpi_device_ops { 105struct acpi_device_ops {
118 acpi_op_add add; 106 acpi_op_add add;
119 acpi_op_remove remove; 107 acpi_op_remove remove;
120 acpi_op_lock lock;
121 acpi_op_start start; 108 acpi_op_start start;
122 acpi_op_stop stop; 109 acpi_op_stop stop;
123 acpi_op_suspend suspend; 110 acpi_op_suspend suspend;
124 acpi_op_resume resume; 111 acpi_op_resume resume;
125 acpi_op_scan scan;
126 acpi_op_bind bind; 112 acpi_op_bind bind;
127 acpi_op_unbind unbind; 113 acpi_op_unbind unbind;
128 acpi_op_shutdown shutdown; 114 acpi_op_notify notify;
129}; 115};
130 116
131struct acpi_driver { 117struct acpi_driver {
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 5fc1bb0f4a90..0352c8f0b05b 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -67,6 +67,16 @@
67#define ACPI_BAY_HID "LNXIOBAY" 67#define ACPI_BAY_HID "LNXIOBAY"
68#define ACPI_DOCK_HID "LNXDOCK" 68#define ACPI_DOCK_HID "LNXDOCK"
69 69
70/*
71 * For fixed hardware buttons, we fabricate acpi_devices with HID
72 * ACPI_BUTTON_HID_POWERF or ACPI_BUTTON_HID_SLEEPF. Fixed hardware
73 * signals only an event; it doesn't supply a notification value.
74 * To allow drivers to treat notifications from fixed hardware the
75 * same as those from real devices, we turn the events into this
76 * notification value.
77 */
78#define ACPI_FIXED_HARDWARE_EVENT 0x100
79
70/* -------------------------------------------------------------------------- 80/* --------------------------------------------------------------------------
71 PCI 81 PCI
72 -------------------------------------------------------------------------- */ 82 -------------------------------------------------------------------------- */
@@ -99,24 +109,6 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain,
99 int bus); 109 int bus);
100 110
101/* -------------------------------------------------------------------------- 111/* --------------------------------------------------------------------------
102 Power Resource
103 -------------------------------------------------------------------------- */
104
105int acpi_device_sleep_wake(struct acpi_device *dev,
106 int enable, int sleep_state, int dev_state);
107int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state);
108int acpi_disable_wakeup_device_power(struct acpi_device *dev);
109int acpi_power_get_inferred_state(struct acpi_device *device);
110int acpi_power_transition(struct acpi_device *device, int state);
111extern int acpi_power_nocheck;
112
113/* --------------------------------------------------------------------------
114 Embedded Controller
115 -------------------------------------------------------------------------- */
116int acpi_ec_ecdt_probe(void);
117int acpi_boot_ec_enable(void);
118
119/* --------------------------------------------------------------------------
120 Processor 112 Processor
121 -------------------------------------------------------------------------- */ 113 -------------------------------------------------------------------------- */
122 114
@@ -165,9 +157,4 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle)
165} 157}
166#endif 158#endif
167 159
168/*--------------------------------------------------------------------------
169 Suspend/Resume
170 -------------------------------------------------------------------------- */
171extern int acpi_sleep_init(void);
172
173#endif /*__ACPI_DRIVERS_H__*/ 160#endif /*__ACPI_DRIVERS_H__*/
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index ab0b85cf21f3..3e798593b17b 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -242,10 +242,6 @@ acpi_os_derive_pci_id(acpi_handle rhandle,
242acpi_status acpi_os_validate_interface(char *interface); 242acpi_status acpi_os_validate_interface(char *interface);
243acpi_status acpi_osi_invalidate(char* interface); 243acpi_status acpi_osi_invalidate(char* interface);
244 244
245acpi_status
246acpi_os_validate_address(u8 space_id, acpi_physical_address address,
247 acpi_size length, char *name);
248
249u64 acpi_os_get_timer(void); 245u64 acpi_os_get_timer(void);
250 246
251acpi_status acpi_os_signal(u32 function, void *info); 247acpi_status acpi_os_signal(u32 function, void *info);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index cc40102fe2f3..4db89e98535d 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
47 47
48/* Current ACPICA subsystem version in YYYYMMDD format */ 48/* Current ACPICA subsystem version in YYYYMMDD format */
49 49
50#define ACPI_CA_VERSION 0x20081204 50#define ACPI_CA_VERSION 0x20090320
51 51
52#include "actypes.h" 52#include "actypes.h"
53#include "actbl.h" 53#include "actbl.h"
@@ -191,14 +191,12 @@ acpi_evaluate_object(acpi_handle object,
191 struct acpi_object_list *parameter_objects, 191 struct acpi_object_list *parameter_objects,
192 struct acpi_buffer *return_object_buffer); 192 struct acpi_buffer *return_object_buffer);
193 193
194#ifdef ACPI_FUTURE_USAGE
195acpi_status 194acpi_status
196acpi_evaluate_object_typed(acpi_handle object, 195acpi_evaluate_object_typed(acpi_handle object,
197 acpi_string pathname, 196 acpi_string pathname,
198 struct acpi_object_list *external_params, 197 struct acpi_object_list *external_params,
199 struct acpi_buffer *return_buffer, 198 struct acpi_buffer *return_buffer,
200 acpi_object_type return_type); 199 acpi_object_type return_type);
201#endif
202 200
203acpi_status 201acpi_status
204acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer); 202acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer);
@@ -349,17 +347,15 @@ acpi_resource_to_address64(struct acpi_resource *resource,
349 */ 347 */
350acpi_status acpi_reset(void); 348acpi_status acpi_reset(void);
351 349
352acpi_status acpi_get_register(u32 register_id, u32 * return_value); 350acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value);
353
354acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value);
355 351
356acpi_status acpi_set_register(u32 register_id, u32 value); 352acpi_status acpi_write_bit_register(u32 register_id, u32 value);
357 353
358acpi_status 354acpi_status acpi_set_firmware_waking_vector(u32 physical_address);
359acpi_set_firmware_waking_vector(u32 physical_address);
360 355
361acpi_status 356#if ACPI_MACHINE_WIDTH == 64
362acpi_set_firmware_waking_vector64(u64 physical_address); 357acpi_status acpi_set_firmware_waking_vector64(u64 physical_address);
358#endif
363 359
364acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg); 360acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg);
365 361
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index bf8d4cfd8cf5..222733d01f36 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -214,11 +214,11 @@ struct acpi_table_fadt {
214 u16 flush_size; /* Processor's memory cache line width, in bytes */ 214 u16 flush_size; /* Processor's memory cache line width, in bytes */
215 u16 flush_stride; /* Number of flush strides that need to be read */ 215 u16 flush_stride; /* Number of flush strides that need to be read */
216 u8 duty_offset; /* Processor duty cycle index in processor's P_CNT reg */ 216 u8 duty_offset; /* Processor duty cycle index in processor's P_CNT reg */
217 u8 duty_width; /* Processor duty cycle value bit width in P_CNT register. */ 217 u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */
218 u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ 218 u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */
219 u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ 219 u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */
220 u8 century; /* Index to century in RTC CMOS RAM */ 220 u8 century; /* Index to century in RTC CMOS RAM */
221 u16 boot_flags; /* IA-PC Boot Architecture Flags. See Table 5-10 for description */ 221 u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */
222 u8 reserved; /* Reserved, must be zero */ 222 u8 reserved; /* Reserved, must be zero */
223 u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ 223 u32 flags; /* Miscellaneous flag bits (see below for individual flags) */
224 struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ 224 struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */
@@ -236,32 +236,41 @@ struct acpi_table_fadt {
236 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ 236 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
237}; 237};
238 238
239/* FADT Boot Architecture Flags (boot_flags) */
240
241#define ACPI_FADT_LEGACY_DEVICES (1) /* 00: [V2] System has LPC or ISA bus devices */
242#define ACPI_FADT_8042 (1<<1) /* 01: [V3] System has an 8042 controller on port 60/64 */
243#define ACPI_FADT_NO_VGA (1<<2) /* 02: [V4] It is not safe to probe for VGA hardware */
244#define ACPI_FADT_NO_MSI (1<<3) /* 03: [V4] Message Signaled Interrupts (MSI) must not be enabled */
245#define ACPI_FADT_NO_ASPM (1<<4) /* 04: [V4] PCIe ASPM control must not be enabled */
246
247#define FADT2_REVISION_ID 3
248
239/* FADT flags */ 249/* FADT flags */
240 250
241#define ACPI_FADT_WBINVD (1) /* 00: The wbinvd instruction works properly */ 251#define ACPI_FADT_WBINVD (1) /* 00: [V1] The wbinvd instruction works properly */
242#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: The wbinvd flushes but does not invalidate */ 252#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: [V1] wbinvd flushes but does not invalidate caches */
243#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: All processors support C1 state */ 253#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: [V1] All processors support C1 state */
244#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: C2 state works on MP system */ 254#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: [V1] C2 state works on MP system */
245#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: Power button is handled as a generic feature */ 255#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: [V1] Power button is handled as a control method device */
246#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: Sleep button is handled as a generic feature, or not present */ 256#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: [V1] Sleep button is handled as a control method device */
247#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: RTC wakeup stat not in fixed register space */ 257#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: [V1] RTC wakeup status not in fixed register space */
248#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: RTC wakeup possible from S4 */ 258#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: [V1] RTC alarm can wake system from S4 */
249#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: tmr_val is 32 bits 0=24-bits */ 259#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: [V1] ACPI timer width is 32-bit (0=24-bit) */
250#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: Docking supported */ 260#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: [V1] Docking supported */
251#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: System reset via the FADT RESET_REG supported */ 261#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: [V2] System reset via the FADT RESET_REG supported */
252#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: No internal expansion capabilities and case is sealed */ 262#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: [V3] No internal expansion capabilities and case is sealed */
253#define ACPI_FADT_HEADLESS (1<<12) /* 12: No local video capabilities or local input devices */ 263#define ACPI_FADT_HEADLESS (1<<12) /* 12: [V3] No local video capabilities or local input devices */
254#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: Must execute native instruction after writing SLP_TYPx register */ 264#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: [V3] Must execute native instruction after writing SLP_TYPx register */
255#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */ 265#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: [V4] System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
256#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: OSPM should use platform-provided timer (ACPI 3.0) */ 266#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: [V4] OSPM should use platform-provided timer (ACPI 3.0) */
257#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */ 267#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: [V4] Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
258#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: System is compatible with remote power on (ACPI 3.0) */ 268#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */
259#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: All local APICs must use cluster model (ACPI 3.0) */ 269#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */
260#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */ 270#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */
271
272/* FADT Prefered Power Management Profiles */
261 273
262/*
263 * FADT Prefered Power Management Profiles
264 */
265enum acpi_prefered_pm_profiles { 274enum acpi_prefered_pm_profiles {
266 PM_UNSPECIFIED = 0, 275 PM_UNSPECIFIED = 0,
267 PM_DESKTOP = 1, 276 PM_DESKTOP = 1,
@@ -272,16 +281,6 @@ enum acpi_prefered_pm_profiles {
272 PM_APPLIANCE_PC = 6 281 PM_APPLIANCE_PC = 6
273}; 282};
274 283
275/* FADT Boot Arch Flags */
276
277#define BAF_LEGACY_DEVICES 0x0001
278#define BAF_8042_KEYBOARD_CONTROLLER 0x0002
279#define BAF_MSI_NOT_SUPPORTED 0x0008
280#define BAF_PCIE_ASPM_CONTROL 0x0010
281
282#define FADT2_REVISION_ID 3
283#define FADT2_MINUS_REVISION_ID 2
284
285/* Reset to default packing */ 284/* Reset to default packing */
286 285
287#pragma pack() 286#pragma pack()
@@ -310,8 +309,9 @@ struct acpi_table_desc {
310#define ACPI_TABLE_ORIGIN_UNKNOWN (0) 309#define ACPI_TABLE_ORIGIN_UNKNOWN (0)
311#define ACPI_TABLE_ORIGIN_MAPPED (1) 310#define ACPI_TABLE_ORIGIN_MAPPED (1)
312#define ACPI_TABLE_ORIGIN_ALLOCATED (2) 311#define ACPI_TABLE_ORIGIN_ALLOCATED (2)
313#define ACPI_TABLE_ORIGIN_MASK (3) 312#define ACPI_TABLE_ORIGIN_OVERRIDE (4)
314#define ACPI_TABLE_IS_LOADED (4) 313#define ACPI_TABLE_ORIGIN_MASK (7)
314#define ACPI_TABLE_IS_LOADED (8)
315 315
316/* 316/*
317 * Get the remaining ACPI tables 317 * Get the remaining ACPI tables
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 18963b968114..59ade0752473 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -1016,9 +1016,9 @@ struct acpi_madt_interrupt_source {
1016struct acpi_madt_local_x2apic { 1016struct acpi_madt_local_x2apic {
1017 struct acpi_subtable_header header; 1017 struct acpi_subtable_header header;
1018 u16 reserved; /* Reserved - must be zero */ 1018 u16 reserved; /* Reserved - must be zero */
1019 u32 local_apic_id; /* Processor X2_APIC ID */ 1019 u32 local_apic_id; /* Processor x2APIC ID */
1020 u32 lapic_flags; 1020 u32 lapic_flags;
1021 u32 uid; /* Extended X2_APIC processor ID */ 1021 u32 uid; /* ACPI processor UID */
1022}; 1022};
1023 1023
1024/* 10: Local X2APIC NMI (07/2008) */ 1024/* 10: Local X2APIC NMI (07/2008) */
@@ -1026,7 +1026,7 @@ struct acpi_madt_local_x2apic {
1026struct acpi_madt_local_x2apic_nmi { 1026struct acpi_madt_local_x2apic_nmi {
1027 struct acpi_subtable_header header; 1027 struct acpi_subtable_header header;
1028 u16 inti_flags; 1028 u16 inti_flags;
1029 u32 uid; /* Processor X2_APIC ID */ 1029 u32 uid; /* ACPI processor UID */
1030 u8 lint; /* LINTn to which NMI is connected */ 1030 u8 lint; /* LINTn to which NMI is connected */
1031 u8 reserved[3]; 1031 u8 reserved[3];
1032}; 1032};
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index a20aab510173..f555d927f7c0 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -777,17 +777,25 @@ typedef u8 acpi_adr_space_type;
777#define ACPI_BITREG_SCI_ENABLE 0x0E 777#define ACPI_BITREG_SCI_ENABLE 0x0E
778#define ACPI_BITREG_BUS_MASTER_RLD 0x0F 778#define ACPI_BITREG_BUS_MASTER_RLD 0x0F
779#define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10 779#define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10
780#define ACPI_BITREG_SLEEP_TYPE_A 0x11 780#define ACPI_BITREG_SLEEP_TYPE 0x11
781#define ACPI_BITREG_SLEEP_TYPE_B 0x12 781#define ACPI_BITREG_SLEEP_ENABLE 0x12
782#define ACPI_BITREG_SLEEP_ENABLE 0x13
783 782
784/* PM2 Control register */ 783/* PM2 Control register */
785 784
786#define ACPI_BITREG_ARB_DISABLE 0x14 785#define ACPI_BITREG_ARB_DISABLE 0x13
787 786
788#define ACPI_BITREG_MAX 0x14 787#define ACPI_BITREG_MAX 0x13
789#define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 788#define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1
790 789
790/* Status register values. A 1 clears a status bit. 0 = no effect */
791
792#define ACPI_CLEAR_STATUS 1
793
794/* Enable and Control register values */
795
796#define ACPI_ENABLE_EVENT 1
797#define ACPI_DISABLE_EVENT 0
798
791/* 799/*
792 * External ACPI object definition 800 * External ACPI object definition
793 */ 801 */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 0574add2a1e3..b09c4fde9725 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -322,7 +322,7 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
322int acpi_processor_tstate_has_changed(struct acpi_processor *pr); 322int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
323int acpi_processor_get_throttling_info(struct acpi_processor *pr); 323int acpi_processor_get_throttling_info(struct acpi_processor *pr);
324extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state); 324extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
325extern struct file_operations acpi_processor_throttling_fops; 325extern const struct file_operations acpi_processor_throttling_fops;
326extern void acpi_processor_throttling_init(void); 326extern void acpi_processor_throttling_init(void);
327/* in processor_idle.c */ 327/* in processor_idle.c */
328int acpi_processor_power_init(struct acpi_processor *pr, 328int acpi_processor_power_init(struct acpi_processor *pr,
@@ -336,7 +336,7 @@ extern struct cpuidle_driver acpi_idle_driver;
336 336
337/* in processor_thermal.c */ 337/* in processor_thermal.c */
338int acpi_processor_get_limit_info(struct acpi_processor *pr); 338int acpi_processor_get_limit_info(struct acpi_processor *pr);
339extern struct file_operations acpi_processor_limit_fops; 339extern const struct file_operations acpi_processor_limit_fops;
340extern struct thermal_cooling_device_ops processor_cooling_ops; 340extern struct thermal_cooling_device_ops processor_cooling_ops;
341#ifdef CONFIG_CPU_FREQ 341#ifdef CONFIG_CPU_FREQ
342void acpi_thermal_cpufreq_init(void); 342void acpi_thermal_cpufreq_init(void);
diff --git a/include/acpi/video.h b/include/acpi/video.h
new file mode 100644
index 000000000000..f0275bb79ce4
--- /dev/null
+++ b/include/acpi/video.h
@@ -0,0 +1,11 @@
1#ifndef __ACPI_VIDEO_H
2#define __ACPI_VIDEO_H
3
4#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
5extern int acpi_video_register(void);
6#else
7static inline int acpi_video_register(void) { return 0; }
8#endif
9
10#endif
11
diff --git a/include/asm-frv/ftrace.h b/include/asm-frv/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-frv/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index 26cefcde5cee..68e4677fb9e7 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -18,6 +18,7 @@
18#ifdef __KERNEL__ 18#ifdef __KERNEL__
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/highmem.h>
21#include <asm/mem-layout.h> 22#include <asm/mem-layout.h>
22#include <asm/spr-regs.h> 23#include <asm/spr-regs.h>
23#include <asm/mb-regs.h> 24#include <asm/mb-regs.h>
@@ -116,6 +117,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
116 unsigned long paddr; 117 unsigned long paddr;
117 118
118 pagefault_disable(); 119 pagefault_disable();
120 debug_kmap_atomic(type);
119 paddr = page_to_phys(page); 121 paddr = page_to_phys(page);
120 122
121 switch (type) { 123 switch (type) {
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
deleted file mode 100644
index 189486c3f92e..000000000000
--- a/include/asm-generic/dma-mapping.h
+++ /dev/null
@@ -1,308 +0,0 @@
1/* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com
2 *
3 * Implements the generic device dma API via the existing pci_ one
4 * for unconverted architectures
5 */
6
7#ifndef _ASM_GENERIC_DMA_MAPPING_H
8#define _ASM_GENERIC_DMA_MAPPING_H
9
10
11#ifdef CONFIG_PCI
12
13/* we implement the API below in terms of the existing PCI one,
14 * so include it */
15#include <linux/pci.h>
16/* need struct page definitions */
17#include <linux/mm.h>
18
19static inline int
20dma_supported(struct device *dev, u64 mask)
21{
22 BUG_ON(dev->bus != &pci_bus_type);
23
24 return pci_dma_supported(to_pci_dev(dev), mask);
25}
26
27static inline int
28dma_set_mask(struct device *dev, u64 dma_mask)
29{
30 BUG_ON(dev->bus != &pci_bus_type);
31
32 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
33}
34
35static inline void *
36dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
37 gfp_t flag)
38{
39 BUG_ON(dev->bus != &pci_bus_type);
40
41 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
42}
43
44static inline void
45dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
46 dma_addr_t dma_handle)
47{
48 BUG_ON(dev->bus != &pci_bus_type);
49
50 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
51}
52
53static inline dma_addr_t
54dma_map_single(struct device *dev, void *cpu_addr, size_t size,
55 enum dma_data_direction direction)
56{
57 BUG_ON(dev->bus != &pci_bus_type);
58
59 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
60}
61
62static inline void
63dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
64 enum dma_data_direction direction)
65{
66 BUG_ON(dev->bus != &pci_bus_type);
67
68 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
69}
70
71static inline dma_addr_t
72dma_map_page(struct device *dev, struct page *page,
73 unsigned long offset, size_t size,
74 enum dma_data_direction direction)
75{
76 BUG_ON(dev->bus != &pci_bus_type);
77
78 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
79}
80
81static inline void
82dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
83 enum dma_data_direction direction)
84{
85 BUG_ON(dev->bus != &pci_bus_type);
86
87 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
88}
89
90static inline int
91dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
92 enum dma_data_direction direction)
93{
94 BUG_ON(dev->bus != &pci_bus_type);
95
96 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
97}
98
99static inline void
100dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
101 enum dma_data_direction direction)
102{
103 BUG_ON(dev->bus != &pci_bus_type);
104
105 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
106}
107
108static inline void
109dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
110 enum dma_data_direction direction)
111{
112 BUG_ON(dev->bus != &pci_bus_type);
113
114 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
115 size, (int)direction);
116}
117
118static inline void
119dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
120 enum dma_data_direction direction)
121{
122 BUG_ON(dev->bus != &pci_bus_type);
123
124 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
125 size, (int)direction);
126}
127
128static inline void
129dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
130 enum dma_data_direction direction)
131{
132 BUG_ON(dev->bus != &pci_bus_type);
133
134 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
135}
136
137static inline void
138dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
139 enum dma_data_direction direction)
140{
141 BUG_ON(dev->bus != &pci_bus_type);
142
143 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
144}
145
146static inline int
147dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
148{
149 return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
150}
151
152
153#else
154
155static inline int
156dma_supported(struct device *dev, u64 mask)
157{
158 return 0;
159}
160
161static inline int
162dma_set_mask(struct device *dev, u64 dma_mask)
163{
164 BUG();
165 return 0;
166}
167
168static inline void *
169dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
170 gfp_t flag)
171{
172 BUG();
173 return NULL;
174}
175
176static inline void
177dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
178 dma_addr_t dma_handle)
179{
180 BUG();
181}
182
183static inline dma_addr_t
184dma_map_single(struct device *dev, void *cpu_addr, size_t size,
185 enum dma_data_direction direction)
186{
187 BUG();
188 return 0;
189}
190
191static inline void
192dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
193 enum dma_data_direction direction)
194{
195 BUG();
196}
197
198static inline dma_addr_t
199dma_map_page(struct device *dev, struct page *page,
200 unsigned long offset, size_t size,
201 enum dma_data_direction direction)
202{
203 BUG();
204 return 0;
205}
206
207static inline void
208dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
209 enum dma_data_direction direction)
210{
211 BUG();
212}
213
214static inline int
215dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
216 enum dma_data_direction direction)
217{
218 BUG();
219 return 0;
220}
221
222static inline void
223dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
224 enum dma_data_direction direction)
225{
226 BUG();
227}
228
229static inline void
230dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
231 enum dma_data_direction direction)
232{
233 BUG();
234}
235
236static inline void
237dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
238 enum dma_data_direction direction)
239{
240 BUG();
241}
242
243static inline void
244dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
245 enum dma_data_direction direction)
246{
247 BUG();
248}
249
250static inline void
251dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
252 enum dma_data_direction direction)
253{
254 BUG();
255}
256
257static inline int
258dma_error(dma_addr_t dma_addr)
259{
260 return 0;
261}
262
263#endif
264
265/* Now for the API extensions over the pci_ one */
266
267#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
268#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
269#define dma_is_consistent(d, h) (1)
270
271static inline int
272dma_get_cache_alignment(void)
273{
274 /* no easy way to get cache size on all processors, so return
275 * the maximum possible, to be safe */
276 return (1 << INTERNODE_CACHE_SHIFT);
277}
278
279static inline void
280dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
281 unsigned long offset, size_t size,
282 enum dma_data_direction direction)
283{
284 /* just sync everything, that's all the pci API can do */
285 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
286}
287
288static inline void
289dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
290 unsigned long offset, size_t size,
291 enum dma_data_direction direction)
292{
293 /* just sync everything, that's all the pci API can do */
294 dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
295}
296
297static inline void
298dma_cache_sync(struct device *dev, void *vaddr, size_t size,
299 enum dma_data_direction direction)
300{
301 /* could define this in terms of the dma_cache ... operations,
302 * but if you get this on a platform, you should convert the platform
303 * to using the generic device DMA API */
304 BUG();
305}
306
307#endif
308
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 81797ec9ab29..d6c379dc64fa 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -55,6 +55,10 @@ struct module;
55 * handled is (base + ngpio - 1). 55 * handled is (base + ngpio - 1).
56 * @can_sleep: flag must be set iff get()/set() methods sleep, as they 56 * @can_sleep: flag must be set iff get()/set() methods sleep, as they
57 * must while accessing GPIO expander chips over I2C or SPI 57 * must while accessing GPIO expander chips over I2C or SPI
58 * @names: if set, must be an array of strings to use as alternative
59 * names for the GPIOs in this chip. Any entry in the array
60 * may be NULL if there is no alias for the GPIO, however the
61 * array must be @ngpio entries long.
58 * 62 *
59 * A gpio_chip can help platforms abstract various sources of GPIOs so 63 * A gpio_chip can help platforms abstract various sources of GPIOs so
60 * they can all be accessed through a common programing interface. 64 * they can all be accessed through a common programing interface.
@@ -92,6 +96,7 @@ struct gpio_chip {
92 struct gpio_chip *chip); 96 struct gpio_chip *chip);
93 int base; 97 int base;
94 u16 ngpio; 98 u16 ngpio;
99 char **names;
95 unsigned can_sleep:1; 100 unsigned can_sleep:1;
96 unsigned exported:1; 101 unsigned exported:1;
97}; 102};
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index 0e9e2bc0ee96..88bada2ebc4b 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -43,20 +43,10 @@
43#ifndef cpumask_of_node 43#ifndef cpumask_of_node
44#define cpumask_of_node(node) ((void)node, cpu_online_mask) 44#define cpumask_of_node(node) ((void)node, cpu_online_mask)
45#endif 45#endif
46#ifndef node_to_first_cpu
47#define node_to_first_cpu(node) ((void)(node),0)
48#endif
49#ifndef pcibus_to_node 46#ifndef pcibus_to_node
50#define pcibus_to_node(bus) ((void)(bus), -1) 47#define pcibus_to_node(bus) ((void)(bus), -1)
51#endif 48#endif
52 49
53#ifndef pcibus_to_cpumask
54#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
55 CPU_MASK_ALL : \
56 node_to_cpumask(pcibus_to_node(bus)) \
57 )
58#endif
59
60#ifndef cpumask_of_pcibus 50#ifndef cpumask_of_pcibus
61#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 51#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
62 cpu_all_mask : \ 52 cpu_all_mask : \
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index a654d724d3b0..7fa660fd449c 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -61,6 +61,30 @@
61#define BRANCH_PROFILE() 61#define BRANCH_PROFILE()
62#endif 62#endif
63 63
64#ifdef CONFIG_EVENT_TRACER
65#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
66 *(_ftrace_events) \
67 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
68#else
69#define FTRACE_EVENTS()
70#endif
71
72#ifdef CONFIG_TRACING
73#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
74 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
75 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
76#else
77#define TRACE_PRINTKS()
78#endif
79
80#ifdef CONFIG_FTRACE_SYSCALLS
81#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
82 *(__syscalls_metadata) \
83 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
84#else
85#define TRACE_SYSCALLS()
86#endif
87
64/* .data section */ 88/* .data section */
65#define DATA_DATA \ 89#define DATA_DATA \
66 *(.data) \ 90 *(.data) \
@@ -86,7 +110,10 @@
86 *(__verbose) \ 110 *(__verbose) \
87 VMLINUX_SYMBOL(__stop___verbose) = .; \ 111 VMLINUX_SYMBOL(__stop___verbose) = .; \
88 LIKELY_PROFILE() \ 112 LIKELY_PROFILE() \
89 BRANCH_PROFILE() 113 BRANCH_PROFILE() \
114 TRACE_PRINTKS() \
115 FTRACE_EVENTS() \
116 TRACE_SYSCALLS()
90 117
91#define RO_DATA(align) \ 118#define RO_DATA(align) \
92 . = ALIGN((align)); \ 119 . = ALIGN((align)); \
diff --git a/include/asm-m32r/ftrace.h b/include/asm-m32r/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-m32r/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-m32r/spinlock.h b/include/asm-m32r/spinlock.h
index f5cfba81ee10..dded923883b2 100644
--- a/include/asm-m32r/spinlock.h
+++ b/include/asm-m32r/spinlock.h
@@ -316,6 +316,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
316 return 0; 316 return 0;
317} 317}
318 318
319#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
320#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
321
319#define _raw_spin_relax(lock) cpu_relax() 322#define _raw_spin_relax(lock) cpu_relax()
320#define _raw_read_relax(lock) cpu_relax() 323#define _raw_read_relax(lock) cpu_relax()
321#define _raw_write_relax(lock) cpu_relax() 324#define _raw_write_relax(lock) cpu_relax()
diff --git a/include/asm-mn10300/ftrace.h b/include/asm-mn10300/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-mn10300/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-mn10300/highmem.h b/include/asm-mn10300/highmem.h
index 5256854c0453..90f2abb04bfd 100644
--- a/include/asm-mn10300/highmem.h
+++ b/include/asm-mn10300/highmem.h
@@ -16,6 +16,7 @@
16 16
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/highmem.h>
19#include <asm/kmap_types.h> 20#include <asm/kmap_types.h>
20#include <asm/pgtable.h> 21#include <asm/pgtable.h>
21 22
@@ -77,6 +78,7 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
77 if (page < highmem_start_page) 78 if (page < highmem_start_page)
78 return page_address(page); 79 return page_address(page);
79 80
81 debug_kmap_atomic(type);
80 idx = type + KM_TYPE_NR * smp_processor_id(); 82 idx = type + KM_TYPE_NR * smp_processor_id();
81 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 83 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
82#if HIGHMEM_DEBUG 84#if HIGHMEM_DEBUG
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index c19a93c3be85..c8c422151431 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -281,16 +281,16 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
281 281
282struct drm_ioctl_desc { 282struct drm_ioctl_desc {
283 unsigned int cmd; 283 unsigned int cmd;
284 drm_ioctl_t *func;
285 int flags; 284 int flags;
285 drm_ioctl_t *func;
286}; 286};
287 287
288/** 288/**
289 * Creates a driver or general drm_ioctl_desc array entry for the given 289 * Creates a driver or general drm_ioctl_desc array entry for the given
290 * ioctl, for use by drm_ioctl(). 290 * ioctl, for use by drm_ioctl().
291 */ 291 */
292#define DRM_IOCTL_DEF(ioctl, func, flags) \ 292#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
293 [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags} 293 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags}
294 294
295struct drm_magic_entry { 295struct drm_magic_entry {
296 struct list_head head; 296 struct list_head head;
@@ -523,19 +523,32 @@ struct drm_mm {
523 523
524 524
525/** 525/**
526 * Kernel side of a mapping
527 */
528struct drm_local_map {
529 resource_size_t offset; /**< Requested physical address (0 for SAREA)*/
530 unsigned long size; /**< Requested physical size (bytes) */
531 enum drm_map_type type; /**< Type of memory to map */
532 enum drm_map_flags flags; /**< Flags */
533 void *handle; /**< User-space: "Handle" to pass to mmap() */
534 /**< Kernel-space: kernel-virtual address */
535 int mtrr; /**< MTRR slot used */
536};
537
538typedef struct drm_local_map drm_local_map_t;
539
540/**
526 * Mappings list 541 * Mappings list
527 */ 542 */
528struct drm_map_list { 543struct drm_map_list {
529 struct list_head head; /**< list head */ 544 struct list_head head; /**< list head */
530 struct drm_hash_item hash; 545 struct drm_hash_item hash;
531 struct drm_map *map; /**< mapping */ 546 struct drm_local_map *map; /**< mapping */
532 uint64_t user_token; 547 uint64_t user_token;
533 struct drm_master *master; 548 struct drm_master *master;
534 struct drm_mm_node *file_offset_node; /**< fake offset */ 549 struct drm_mm_node *file_offset_node; /**< fake offset */
535}; 550};
536 551
537typedef struct drm_map drm_local_map_t;
538
539/** 552/**
540 * Context handle list 553 * Context handle list
541 */ 554 */
@@ -560,7 +573,7 @@ struct drm_ati_pcigart_info {
560 dma_addr_t bus_addr; 573 dma_addr_t bus_addr;
561 dma_addr_t table_mask; 574 dma_addr_t table_mask;
562 struct drm_dma_handle *table_handle; 575 struct drm_dma_handle *table_handle;
563 drm_local_map_t mapping; 576 struct drm_local_map mapping;
564 int table_size; 577 int table_size;
565}; 578};
566 579
@@ -675,7 +688,6 @@ struct drm_driver {
675 int (*kernel_context_switch) (struct drm_device *dev, int old, 688 int (*kernel_context_switch) (struct drm_device *dev, int old,
676 int new); 689 int new);
677 void (*kernel_context_switch_unlock) (struct drm_device *dev); 690 void (*kernel_context_switch_unlock) (struct drm_device *dev);
678 int (*dri_library_name) (struct drm_device *dev, char *buf);
679 691
680 /** 692 /**
681 * get_vblank_counter - get raw hardware vblank counter 693 * get_vblank_counter - get raw hardware vblank counter
@@ -747,8 +759,8 @@ struct drm_driver {
747 struct drm_file *file_priv); 759 struct drm_file *file_priv);
748 void (*reclaim_buffers_idlelocked) (struct drm_device *dev, 760 void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
749 struct drm_file *file_priv); 761 struct drm_file *file_priv);
750 unsigned long (*get_map_ofs) (struct drm_map * map); 762 resource_size_t (*get_map_ofs) (struct drm_local_map * map);
751 unsigned long (*get_reg_ofs) (struct drm_device *dev); 763 resource_size_t (*get_reg_ofs) (struct drm_device *dev);
752 void (*set_version) (struct drm_device *dev, 764 void (*set_version) (struct drm_device *dev,
753 struct drm_set_version *sv); 765 struct drm_set_version *sv);
754 766
@@ -981,7 +993,7 @@ struct drm_device {
981 sigset_t sigmask; 993 sigset_t sigmask;
982 994
983 struct drm_driver *driver; 995 struct drm_driver *driver;
984 drm_local_map_t *agp_buffer_map; 996 struct drm_local_map *agp_buffer_map;
985 unsigned int agp_buffer_token; 997 unsigned int agp_buffer_token;
986 struct drm_minor *control; /**< Control node for card */ 998 struct drm_minor *control; /**< Control node for card */
987 struct drm_minor *primary; /**< render type primary screen head */ 999 struct drm_minor *primary; /**< render type primary screen head */
@@ -1098,8 +1110,8 @@ extern int drm_release(struct inode *inode, struct file *filp);
1098extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); 1110extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
1099extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); 1111extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
1100extern void drm_vm_open_locked(struct vm_area_struct *vma); 1112extern void drm_vm_open_locked(struct vm_area_struct *vma);
1101extern unsigned long drm_core_get_map_ofs(struct drm_map * map); 1113extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);
1102extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); 1114extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);
1103extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); 1115extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
1104 1116
1105 /* Memory management support (drm_memory.h) */ 1117 /* Memory management support (drm_memory.h) */
@@ -1202,13 +1214,13 @@ extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv
1202 /* Buffer management support (drm_bufs.h) */ 1214 /* Buffer management support (drm_bufs.h) */
1203extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); 1215extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
1204extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); 1216extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
1205extern int drm_addmap(struct drm_device *dev, unsigned int offset, 1217extern int drm_addmap(struct drm_device *dev, resource_size_t offset,
1206 unsigned int size, enum drm_map_type type, 1218 unsigned int size, enum drm_map_type type,
1207 enum drm_map_flags flags, drm_local_map_t ** map_ptr); 1219 enum drm_map_flags flags, struct drm_local_map **map_ptr);
1208extern int drm_addmap_ioctl(struct drm_device *dev, void *data, 1220extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
1209 struct drm_file *file_priv); 1221 struct drm_file *file_priv);
1210extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); 1222extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map);
1211extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); 1223extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
1212extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, 1224extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
1213 struct drm_file *file_priv); 1225 struct drm_file *file_priv);
1214extern int drm_addbufs(struct drm_device *dev, void *data, 1226extern int drm_addbufs(struct drm_device *dev, void *data,
@@ -1222,10 +1234,10 @@ extern int drm_freebufs(struct drm_device *dev, void *data,
1222extern int drm_mapbufs(struct drm_device *dev, void *data, 1234extern int drm_mapbufs(struct drm_device *dev, void *data,
1223 struct drm_file *file_priv); 1235 struct drm_file *file_priv);
1224extern int drm_order(unsigned long size); 1236extern int drm_order(unsigned long size);
1225extern unsigned long drm_get_resource_start(struct drm_device *dev, 1237extern resource_size_t drm_get_resource_start(struct drm_device *dev,
1238 unsigned int resource);
1239extern resource_size_t drm_get_resource_len(struct drm_device *dev,
1226 unsigned int resource); 1240 unsigned int resource);
1227extern unsigned long drm_get_resource_len(struct drm_device *dev,
1228 unsigned int resource);
1229 1241
1230 /* DMA support (drm_dma.h) */ 1242 /* DMA support (drm_dma.h) */
1231extern int drm_dma_setup(struct drm_device *dev); 1243extern int drm_dma_setup(struct drm_device *dev);
@@ -1301,7 +1313,7 @@ extern struct drm_master *drm_master_get(struct drm_master *master);
1301extern void drm_master_put(struct drm_master **master); 1313extern void drm_master_put(struct drm_master **master);
1302extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, 1314extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
1303 struct drm_driver *driver); 1315 struct drm_driver *driver);
1304extern int drm_put_dev(struct drm_device *dev); 1316extern void drm_put_dev(struct drm_device *dev);
1305extern int drm_put_minor(struct drm_minor **minor); 1317extern int drm_put_minor(struct drm_minor **minor);
1306extern unsigned int drm_debug; 1318extern unsigned int drm_debug;
1307 1319
@@ -1311,7 +1323,7 @@ extern struct dentry *drm_debugfs_root;
1311 1323
1312extern struct idr drm_minors_idr; 1324extern struct idr drm_minors_idr;
1313 1325
1314extern drm_local_map_t *drm_getsarea(struct drm_device *dev); 1326extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
1315 1327
1316 /* Proc support (drm_proc.h) */ 1328 /* Proc support (drm_proc.h) */
1317extern int drm_proc_init(struct drm_minor *minor, int minor_id, 1329extern int drm_proc_init(struct drm_minor *minor, int minor_id,
@@ -1453,12 +1465,12 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data,
1453void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); 1465void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
1454void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); 1466void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1455 1467
1456extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); 1468extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev);
1457extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev); 1469extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
1458extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); 1470extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
1459 1471
1460static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, 1472static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
1461 unsigned int token) 1473 unsigned int token)
1462{ 1474{
1463 struct drm_map_list *_entry; 1475 struct drm_map_list *_entry;
1464 list_for_each_entry(_entry, &dev->maplist, head) 1476 list_for_each_entry(_entry, &dev->maplist, head)
@@ -1485,7 +1497,7 @@ static __inline__ int drm_device_is_pcie(struct drm_device *dev)
1485 return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); 1497 return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
1486} 1498}
1487 1499
1488static __inline__ void drm_core_dropmap(struct drm_map *map) 1500static __inline__ void drm_core_dropmap(struct drm_local_map *map)
1489{ 1501{
1490} 1502}
1491 1503
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 5ded1acfb543..3c1924c010e8 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -550,7 +550,7 @@ struct drm_mode_config {
550 int min_width, min_height; 550 int min_width, min_height;
551 int max_width, max_height; 551 int max_width, max_height;
552 struct drm_mode_config_funcs *funcs; 552 struct drm_mode_config_funcs *funcs;
553 unsigned long fb_base; 553 resource_size_t fb_base;
554 554
555 /* pointers to standard properties */ 555 /* pointers to standard properties */
556 struct list_head property_blob_list; 556 struct list_head property_blob_list;
@@ -613,7 +613,8 @@ extern void drm_fb_release(struct drm_file *file_priv);
613extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); 613extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group);
614extern struct edid *drm_get_edid(struct drm_connector *connector, 614extern struct edid *drm_get_edid(struct drm_connector *connector,
615 struct i2c_adapter *adapter); 615 struct i2c_adapter *adapter);
616extern unsigned char *drm_do_probe_ddc_edid(struct i2c_adapter *adapter); 616extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
617 unsigned char *buf, int len);
617extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); 618extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
618extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); 619extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
619extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode); 620extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
@@ -731,4 +732,5 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
731 void *data, struct drm_file *file_priv); 732 void *data, struct drm_file *file_priv);
732extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, 733extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
733 void *data, struct drm_file *file_priv); 734 void *data, struct drm_file *file_priv);
735extern bool drm_detect_hdmi_monitor(struct edid *edid);
734#endif /* __DRM_CRTC_H__ */ 736#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index c7d4b2e606a5..ec073d8288d9 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -33,7 +33,6 @@
33#ifndef __DRM_CRTC_HELPER_H__ 33#ifndef __DRM_CRTC_HELPER_H__
34#define __DRM_CRTC_HELPER_H__ 34#define __DRM_CRTC_HELPER_H__
35 35
36#include <linux/i2c.h>
37#include <linux/spinlock.h> 36#include <linux/spinlock.h>
38#include <linux/types.h> 37#include <linux/types.h>
39#include <linux/idr.h> 38#include <linux/idr.h>
@@ -92,7 +91,7 @@ struct drm_connector_helper_funcs {
92extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); 91extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
93extern void drm_helper_disable_unused_functions(struct drm_device *dev); 92extern void drm_helper_disable_unused_functions(struct drm_device *dev);
94extern int drm_helper_hotplug_stage_two(struct drm_device *dev); 93extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
95extern bool drm_helper_initial_config(struct drm_device *dev, bool can_grow); 94extern bool drm_helper_initial_config(struct drm_device *dev);
96extern int drm_crtc_helper_set_config(struct drm_mode_set *set); 95extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
97extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, 96extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
98 struct drm_display_mode *mode, 97 struct drm_display_mode *mode,
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 8dbd2572b7c3..26641e95e0a4 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -6,6 +6,19 @@
6#include <linux/interrupt.h> /* For task queue support */ 6#include <linux/interrupt.h> /* For task queue support */
7#include <linux/delay.h> 7#include <linux/delay.h>
8 8
9#ifndef readq
10static inline u64 readq(void __iomem *reg)
11{
12 return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
13}
14
15static inline void writeq(u64 val, void __iomem *reg)
16{
17 writel(val & 0xffffffff, reg);
18 writel(val >> 32, reg + 0x4UL);
19}
20#endif
21
9/** Current process ID */ 22/** Current process ID */
10#define DRM_CURRENTPID task_pid_nr(current) 23#define DRM_CURRENTPID task_pid_nr(current)
11#define DRM_SUSER(p) capable(CAP_SYS_ADMIN) 24#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
@@ -23,6 +36,12 @@
23/** Write a dword into a MMIO region */ 36/** Write a dword into a MMIO region */
24#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset)) 37#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
25/** Read memory barrier */ 38/** Read memory barrier */
39
40/** Read a qword from a MMIO region - be careful using these unless you really understand them */
41#define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset))
42/** Write a qword into a MMIO region */
43#define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset))
44
26#define DRM_READMEMORYBARRIER() rmb() 45#define DRM_READMEMORYBARRIER() rmb()
27/** Write memory barrier */ 46/** Write memory barrier */
28#define DRM_WRITEMEMORYBARRIER() wmb() 47#define DRM_WRITEMEMORYBARRIER() wmb()
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 76c4c8243038..2df74eb09563 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -239,10 +239,123 @@
239 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 239 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
240 {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 240 {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
241 {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 241 {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
242 {0x1002, 0x793f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
243 {0x1002, 0x7941, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
244 {0x1002, 0x7942, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS600|RADEON_IS_IGP|RADEON_NEW_MEMMAP}, \
242 {0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 245 {0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
243 {0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 246 {0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
244 {0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 247 {0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
245 {0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 248 {0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \
249 {0x1002, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
250 {0x1002, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
251 {0x1002, 0x9402, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
252 {0x1002, 0x9403, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
253 {0x1002, 0x9405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
254 {0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
255 {0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
256 {0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
257 {0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
258 {0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
259 {0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
260 {0x1002, 0x9444, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
261 {0x1002, 0x9446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
262 {0x1002, 0x944A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
263 {0x1002, 0x944B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
264 {0x1002, 0x944C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
265 {0x1002, 0x944E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
266 {0x1002, 0x9450, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
267 {0x1002, 0x9452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
268 {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
269 {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
270 {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
271 {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
272 {0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
273 {0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
274 {0x1002, 0x947B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
275 {0x1002, 0x9480, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
276 {0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
277 {0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
278 {0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
279 {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
280 {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
281 {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
282 {0x1002, 0x9498, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
283 {0x1002, 0x949C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
284 {0x1002, 0x949E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
285 {0x1002, 0x949F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
286 {0x1002, 0x94C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
287 {0x1002, 0x94C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
288 {0x1002, 0x94C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
289 {0x1002, 0x94C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
290 {0x1002, 0x94C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
291 {0x1002, 0x94C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
292 {0x1002, 0x94C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
293 {0x1002, 0x94C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
294 {0x1002, 0x94C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
295 {0x1002, 0x94CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
296 {0x1002, 0x94CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
297 {0x1002, 0x94CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV610|RADEON_NEW_MEMMAP}, \
298 {0x1002, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
299 {0x1002, 0x9501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
300 {0x1002, 0x9504, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
301 {0x1002, 0x9505, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
302 {0x1002, 0x9506, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
303 {0x1002, 0x9507, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
304 {0x1002, 0x9508, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
305 {0x1002, 0x9509, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
306 {0x1002, 0x950F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
307 {0x1002, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
308 {0x1002, 0x9515, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
309 {0x1002, 0x9517, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
310 {0x1002, 0x9519, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV670|RADEON_NEW_MEMMAP}, \
311 {0x1002, 0x9540, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
312 {0x1002, 0x9541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
313 {0x1002, 0x9542, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
314 {0x1002, 0x954E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
315 {0x1002, 0x954F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_NEW_MEMMAP}, \
316 {0x1002, 0x9552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
317 {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
318 {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
319 {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
320 {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
321 {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
322 {0x1002, 0x9586, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
323 {0x1002, 0x9587, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
324 {0x1002, 0x9588, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
325 {0x1002, 0x9589, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
326 {0x1002, 0x958A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
327 {0x1002, 0x958B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
328 {0x1002, 0x958C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
329 {0x1002, 0x958D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
330 {0x1002, 0x958E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
331 {0x1002, 0x958F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
332 {0x1002, 0x9590, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
333 {0x1002, 0x9591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
334 {0x1002, 0x9593, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
335 {0x1002, 0x9595, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
336 {0x1002, 0x9596, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
337 {0x1002, 0x9597, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
338 {0x1002, 0x9598, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
339 {0x1002, 0x9599, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_NEW_MEMMAP}, \
340 {0x1002, 0x959B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV635|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
341 {0x1002, 0x95C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
342 {0x1002, 0x95C5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
343 {0x1002, 0x95C6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
344 {0x1002, 0x95C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
345 {0x1002, 0x95C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
346 {0x1002, 0x95C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
347 {0x1002, 0x95C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
348 {0x1002, 0x95CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
349 {0x1002, 0x95CD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
350 {0x1002, 0x95CE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
351 {0x1002, 0x95CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV620|RADEON_NEW_MEMMAP}, \
352 {0x1002, 0x9610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
353 {0x1002, 0x9611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
354 {0x1002, 0x9612, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
355 {0x1002, 0x9613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
356 {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
357 {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
358 {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
246 {0, 0, 0} 359 {0, 0, 0}
247 360
248#define r128_PCI_IDS \ 361#define r128_PCI_IDS \
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 72ecf67ad3ec..fe3e3a4b4aed 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -306,6 +306,8 @@ typedef union {
306 306
307#define RADEON_SCRATCH_REG_OFFSET 32 307#define RADEON_SCRATCH_REG_OFFSET 32
308 308
309#define R600_SCRATCH_REG_OFFSET 256
310
309#define RADEON_NR_SAREA_CLIPRECTS 12 311#define RADEON_NR_SAREA_CLIPRECTS 12
310 312
311/* There are 2 heaps (local/GART). Each region within a heap is a 313/* There are 2 heaps (local/GART). Each region within a heap is a
@@ -528,7 +530,8 @@ typedef struct drm_radeon_init {
528 RADEON_INIT_CP = 0x01, 530 RADEON_INIT_CP = 0x01,
529 RADEON_CLEANUP_CP = 0x02, 531 RADEON_CLEANUP_CP = 0x02,
530 RADEON_INIT_R200_CP = 0x03, 532 RADEON_INIT_R200_CP = 0x03,
531 RADEON_INIT_R300_CP = 0x04 533 RADEON_INIT_R300_CP = 0x04,
534 RADEON_INIT_R600_CP = 0x05
532 } func; 535 } func;
533 unsigned long sarea_priv_offset; 536 unsigned long sarea_priv_offset;
534 int is_pci; 537 int is_pci;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index e9581fd9fb66..ca9b9b9bd331 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -67,6 +67,7 @@ header-y += falloc.h
67header-y += fd.h 67header-y += fd.h
68header-y += fdreg.h 68header-y += fdreg.h
69header-y += fib_rules.h 69header-y += fib_rules.h
70header-y += fiemap.h
70header-y += firewire-cdev.h 71header-y += firewire-cdev.h
71header-y += firewire-constants.h 72header-y += firewire-constants.h
72header-y += fuse.h 73header-y += fuse.h
@@ -158,8 +159,6 @@ header-y += ultrasound.h
158header-y += un.h 159header-y += un.h
159header-y += utime.h 160header-y += utime.h
160header-y += veth.h 161header-y += veth.h
161header-y += video_decoder.h
162header-y += video_encoder.h
163header-y += videotext.h 162header-y += videotext.h
164header-y += x25.h 163header-y += x25.h
165 164
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 78199151c00b..6586cbd0d4af 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -97,6 +97,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
97/* the following four functions are architecture-dependent */ 97/* the following four functions are architecture-dependent */
98void acpi_numa_slit_init (struct acpi_table_slit *slit); 98void acpi_numa_slit_init (struct acpi_table_slit *slit);
99void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); 99void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
100void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
100void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); 101void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
101void acpi_numa_arch_fixup(void); 102void acpi_numa_arch_fixup(void);
102 103
@@ -257,6 +258,40 @@ void __init acpi_no_s4_hw_signature(void);
257void __init acpi_old_suspend_ordering(void); 258void __init acpi_old_suspend_ordering(void);
258void __init acpi_s4_no_nvs(void); 259void __init acpi_s4_no_nvs(void);
259#endif /* CONFIG_PM_SLEEP */ 260#endif /* CONFIG_PM_SLEEP */
261
262#define OSC_QUERY_TYPE 0
263#define OSC_SUPPORT_TYPE 1
264#define OSC_CONTROL_TYPE 2
265#define OSC_SUPPORT_MASKS 0x1f
266
267/* _OSC DW0 Definition */
268#define OSC_QUERY_ENABLE 1
269#define OSC_REQUEST_ERROR 2
270#define OSC_INVALID_UUID_ERROR 4
271#define OSC_INVALID_REVISION_ERROR 8
272#define OSC_CAPABILITIES_MASK_ERROR 16
273
274/* _OSC DW1 Definition (OS Support Fields) */
275#define OSC_EXT_PCI_CONFIG_SUPPORT 1
276#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
277#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
278#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
279#define OSC_MSI_SUPPORT 16
280
281/* _OSC DW1 Definition (OS Control Fields) */
282#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
283#define OSC_SHPC_NATIVE_HP_CONTROL 2
284#define OSC_PCI_EXPRESS_PME_CONTROL 4
285#define OSC_PCI_EXPRESS_AER_CONTROL 8
286#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
287
288#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
289 OSC_SHPC_NATIVE_HP_CONTROL | \
290 OSC_PCI_EXPRESS_PME_CONTROL | \
291 OSC_PCI_EXPRESS_AER_CONTROL | \
292 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
293
294extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
260#else /* CONFIG_ACPI */ 295#else /* CONFIG_ACPI */
261 296
262static inline int early_acpi_boot_init(void) 297static inline int early_acpi_boot_init(void)
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 45f6297821bd..5fc2ef8d97fa 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -21,6 +21,15 @@
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23 23
24/* on architectures without dma-mapping capabilities we need to ensure
25 * that the asynchronous path compiles away
26 */
27#ifdef CONFIG_HAS_DMA
28#define __async_inline
29#else
30#define __async_inline __always_inline
31#endif
32
24/** 33/**
25 * dma_chan_ref - object used to manage dma channels received from the 34 * dma_chan_ref - object used to manage dma channels received from the
26 * dmaengine core. 35 * dmaengine core.
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 6617c9f8f2ca..cb79b7a208e1 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -29,6 +29,8 @@
29#ifndef __LINUX_ATA_H__ 29#ifndef __LINUX_ATA_H__
30#define __LINUX_ATA_H__ 30#define __LINUX_ATA_H__
31 31
32#include <linux/kernel.h>
33#include <linux/string.h>
32#include <linux/types.h> 34#include <linux/types.h>
33#include <asm/byteorder.h> 35#include <asm/byteorder.h>
34 36
@@ -91,6 +93,7 @@ enum {
91 ATA_ID_CFA_POWER = 160, 93 ATA_ID_CFA_POWER = 160,
92 ATA_ID_CFA_KEY_MGMT = 162, 94 ATA_ID_CFA_KEY_MGMT = 162,
93 ATA_ID_CFA_MODES = 163, 95 ATA_ID_CFA_MODES = 163,
96 ATA_ID_DATA_SET_MGMT = 169,
94 ATA_ID_ROT_SPEED = 217, 97 ATA_ID_ROT_SPEED = 217,
95 ATA_ID_PIO4 = (1 << 1), 98 ATA_ID_PIO4 = (1 << 1),
96 99
@@ -248,6 +251,7 @@ enum {
248 ATA_CMD_SMART = 0xB0, 251 ATA_CMD_SMART = 0xB0,
249 ATA_CMD_MEDIA_LOCK = 0xDE, 252 ATA_CMD_MEDIA_LOCK = 0xDE,
250 ATA_CMD_MEDIA_UNLOCK = 0xDF, 253 ATA_CMD_MEDIA_UNLOCK = 0xDF,
254 ATA_CMD_DSM = 0x06,
251 /* marked obsolete in the ATA/ATAPI-7 spec */ 255 /* marked obsolete in the ATA/ATAPI-7 spec */
252 ATA_CMD_RESTORE = 0x10, 256 ATA_CMD_RESTORE = 0x10,
253 257
@@ -321,6 +325,9 @@ enum {
321 ATA_SMART_READ_VALUES = 0xD0, 325 ATA_SMART_READ_VALUES = 0xD0,
322 ATA_SMART_READ_THRESHOLDS = 0xD1, 326 ATA_SMART_READ_THRESHOLDS = 0xD1,
323 327
328 /* feature values for Data Set Management */
329 ATA_DSM_TRIM = 0x01,
330
324 /* password used in LBA Mid / LBA High for executing SMART commands */ 331 /* password used in LBA Mid / LBA High for executing SMART commands */
325 ATA_SMART_LBAM_PASS = 0x4F, 332 ATA_SMART_LBAM_PASS = 0x4F,
326 ATA_SMART_LBAH_PASS = 0xC2, 333 ATA_SMART_LBAH_PASS = 0xC2,
@@ -723,6 +730,14 @@ static inline int ata_id_has_unload(const u16 *id)
723 return 0; 730 return 0;
724} 731}
725 732
733static inline int ata_id_has_trim(const u16 *id)
734{
735 if (ata_id_major_version(id) >= 7 &&
736 (id[ATA_ID_DATA_SET_MGMT] & 1))
737 return 1;
738 return 0;
739}
740
726static inline int ata_id_current_chs_valid(const u16 *id) 741static inline int ata_id_current_chs_valid(const u16 *id)
727{ 742{
728 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 743 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -863,6 +878,32 @@ static inline void ata_id_to_hd_driveid(u16 *id)
863#endif 878#endif
864} 879}
865 880
881/*
882 * Write up to 'max' LBA Range Entries to the buffer that will cover the
883 * extent from sector to sector + count. This is used for TRIM and for
884 * ADD LBA(S) TO NV CACHE PINNED SET.
885 */
886static inline unsigned ata_set_lba_range_entries(void *_buffer, unsigned max,
887 u64 sector, unsigned long count)
888{
889 __le64 *buffer = _buffer;
890 unsigned i = 0;
891
892 while (i < max) {
893 u64 entry = sector |
894 ((u64)(count > 0xffff ? 0xffff : count) << 48);
895 buffer[i++] = __cpu_to_le64(entry);
896 if (count <= 0xffff)
897 break;
898 count -= 0xffff;
899 sector += 0xffff;
900 }
901
902 max = ALIGN(i * 8, 512);
903 memset(buffer + i, 0, max - i * 8);
904 return max;
905}
906
866static inline int is_multi_taskfile(struct ata_taskfile *tf) 907static inline int is_multi_taskfile(struct ata_taskfile *tf)
867{ 908{
868 return (tf->command == ATA_CMD_READ_MULTI) || 909 return (tf->command == ATA_CMD_READ_MULTI) ||
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
index 91a773993a5c..850f39b33e74 100644
--- a/include/linux/auto_dev-ioctl.h
+++ b/include/linux/auto_dev-ioctl.h
@@ -10,8 +10,13 @@
10#ifndef _LINUX_AUTO_DEV_IOCTL_H 10#ifndef _LINUX_AUTO_DEV_IOCTL_H
11#define _LINUX_AUTO_DEV_IOCTL_H 11#define _LINUX_AUTO_DEV_IOCTL_H
12 12
13#include <linux/auto_fs.h>
14
15#ifdef __KERNEL__
13#include <linux/string.h> 16#include <linux/string.h>
14#include <linux/types.h> 17#else
18#include <string.h>
19#endif /* __KERNEL__ */
15 20
16#define AUTOFS_DEVICE_NAME "autofs" 21#define AUTOFS_DEVICE_NAME "autofs"
17 22
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index c21e5972a3e8..63265852b7d1 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -17,11 +17,13 @@
17#ifdef __KERNEL__ 17#ifdef __KERNEL__
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/limits.h> 19#include <linux/limits.h>
20#include <linux/types.h>
21#include <linux/ioctl.h>
22#else
20#include <asm/types.h> 23#include <asm/types.h>
24#include <sys/ioctl.h>
21#endif /* __KERNEL__ */ 25#endif /* __KERNEL__ */
22 26
23#include <linux/ioctl.h>
24
25/* This file describes autofs v3 */ 27/* This file describes autofs v3 */
26#define AUTOFS_PROTO_VERSION 3 28#define AUTOFS_PROTO_VERSION 3
27 29
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index bee52abb8a4d..0ec2c594868e 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -24,8 +24,8 @@ struct dentry;
24 */ 24 */
25enum bdi_state { 25enum bdi_state {
26 BDI_pdflush, /* A pdflush thread is working this device */ 26 BDI_pdflush, /* A pdflush thread is working this device */
27 BDI_write_congested, /* The write queue is getting full */ 27 BDI_async_congested, /* The async (write) queue is getting full */
28 BDI_read_congested, /* The read queue is getting full */ 28 BDI_sync_congested, /* The sync queue is getting full */
29 BDI_unused, /* Available bits start here */ 29 BDI_unused, /* Available bits start here */
30}; 30};
31 31
@@ -215,18 +215,18 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
215 215
216static inline int bdi_read_congested(struct backing_dev_info *bdi) 216static inline int bdi_read_congested(struct backing_dev_info *bdi)
217{ 217{
218 return bdi_congested(bdi, 1 << BDI_read_congested); 218 return bdi_congested(bdi, 1 << BDI_sync_congested);
219} 219}
220 220
221static inline int bdi_write_congested(struct backing_dev_info *bdi) 221static inline int bdi_write_congested(struct backing_dev_info *bdi)
222{ 222{
223 return bdi_congested(bdi, 1 << BDI_write_congested); 223 return bdi_congested(bdi, 1 << BDI_async_congested);
224} 224}
225 225
226static inline int bdi_rw_congested(struct backing_dev_info *bdi) 226static inline int bdi_rw_congested(struct backing_dev_info *bdi)
227{ 227{
228 return bdi_congested(bdi, (1 << BDI_read_congested)| 228 return bdi_congested(bdi, (1 << BDI_sync_congested) |
229 (1 << BDI_write_congested)); 229 (1 << BDI_async_congested));
230} 230}
231 231
232void clear_bdi_congested(struct backing_dev_info *bdi, int rw); 232void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 77b4a9e46004..6638b8148de7 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -35,8 +35,7 @@ struct linux_binprm{
35#endif 35#endif
36 struct mm_struct *mm; 36 struct mm_struct *mm;
37 unsigned long p; /* current top of mem */ 37 unsigned long p; /* current top of mem */
38 unsigned int sh_bang:1, 38 unsigned int
39 misc_bang:1,
40 cred_prepared:1,/* true if creds already prepared (multiple 39 cred_prepared:1,/* true if creds already prepared (multiple
41 * preps happen for interpreters) */ 40 * preps happen for interpreters) */
42 cap_effective:1;/* true if has elevated effective capabilities, 41 cap_effective:1;/* true if has elevated effective capabilities,
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b05b1d4d17d2..b900d2c67d29 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -145,20 +145,21 @@ struct bio {
145 * bit 2 -- barrier 145 * bit 2 -- barrier
146 * Insert a serialization point in the IO queue, forcing previously 146 * Insert a serialization point in the IO queue, forcing previously
147 * submitted IO to be completed before this one is issued. 147 * submitted IO to be completed before this one is issued.
148 * bit 3 -- synchronous I/O hint: the block layer will unplug immediately 148 * bit 3 -- synchronous I/O hint.
149 * Note that this does NOT indicate that the IO itself is sync, just 149 * bit 4 -- Unplug the device immediately after submitting this bio.
150 * that the block layer will not postpone issue of this IO by plugging. 150 * bit 5 -- metadata request
151 * bit 4 -- metadata request
152 * Used for tracing to differentiate metadata and data IO. May also 151 * Used for tracing to differentiate metadata and data IO. May also
153 * get some preferential treatment in the IO scheduler 152 * get some preferential treatment in the IO scheduler
154 * bit 5 -- discard sectors 153 * bit 6 -- discard sectors
155 * Informs the lower level device that this range of sectors is no longer 154 * Informs the lower level device that this range of sectors is no longer
156 * used by the file system and may thus be freed by the device. Used 155 * used by the file system and may thus be freed by the device. Used
157 * for flash based storage. 156 * for flash based storage.
158 * bit 6 -- fail fast device errors 157 * bit 7 -- fail fast device errors
159 * bit 7 -- fail fast transport errors 158 * bit 8 -- fail fast transport errors
160 * bit 8 -- fail fast driver errors 159 * bit 9 -- fail fast driver errors
161 * Don't want driver retries for any fast fail whatever the reason. 160 * Don't want driver retries for any fast fail whatever the reason.
161 * bit 10 -- Tell the IO scheduler not to wait for more requests after this
162 one has been submitted, even if it is a SYNC request.
162 */ 163 */
163#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ 164#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
164#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ 165#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
@@ -170,6 +171,7 @@ struct bio {
170#define BIO_RW_FAILFAST_DEV 7 171#define BIO_RW_FAILFAST_DEV 7
171#define BIO_RW_FAILFAST_TRANSPORT 8 172#define BIO_RW_FAILFAST_TRANSPORT 8
172#define BIO_RW_FAILFAST_DRIVER 9 173#define BIO_RW_FAILFAST_DRIVER 9
174#define BIO_RW_NOIDLE 10
173 175
174#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) 176#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag)))
175 177
@@ -188,6 +190,7 @@ struct bio {
188#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) 190#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
189#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) 191#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
190#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) 192#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
193#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE)
191 194
192/* 195/*
193 * upper 16 bits of bi_rw define the io priority of this bio 196 * upper 16 bits of bi_rw define the io priority of this bio
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 465d6babc847..ba54c834a590 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -38,6 +38,10 @@ struct request;
38typedef void (rq_end_io_fn)(struct request *, int); 38typedef void (rq_end_io_fn)(struct request *, int);
39 39
40struct request_list { 40struct request_list {
41 /*
42 * count[], starved[], and wait[] are indexed by
43 * BLK_RW_SYNC/BLK_RW_ASYNC
44 */
41 int count[2]; 45 int count[2];
42 int starved[2]; 46 int starved[2];
43 int elvpriv; 47 int elvpriv;
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits {
66 REQ_TYPE_ATA_PC, 70 REQ_TYPE_ATA_PC,
67}; 71};
68 72
73enum {
74 BLK_RW_ASYNC = 0,
75 BLK_RW_SYNC = 1,
76};
77
69/* 78/*
70 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 79 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
71 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 80 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -103,12 +112,12 @@ enum rq_flag_bits {
103 __REQ_QUIET, /* don't worry about errors */ 112 __REQ_QUIET, /* don't worry about errors */
104 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 113 __REQ_PREEMPT, /* set for "ide_preempt" requests */
105 __REQ_ORDERED_COLOR, /* is before or after barrier */ 114 __REQ_ORDERED_COLOR, /* is before or after barrier */
106 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 115 __REQ_RW_SYNC, /* request is sync (sync write or read) */
107 __REQ_ALLOCED, /* request came from our alloc pool */ 116 __REQ_ALLOCED, /* request came from our alloc pool */
108 __REQ_RW_META, /* metadata io request */ 117 __REQ_RW_META, /* metadata io request */
109 __REQ_COPY_USER, /* contains copies of user pages */ 118 __REQ_COPY_USER, /* contains copies of user pages */
110 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 119 __REQ_INTEGRITY, /* integrity metadata has been remapped */
111 __REQ_UNPLUG, /* unplug queue on submission */ 120 __REQ_NOIDLE, /* Don't anticipate more IO after this one */
112 __REQ_NR_BITS, /* stops here */ 121 __REQ_NR_BITS, /* stops here */
113}; 122};
114 123
@@ -135,7 +144,7 @@ enum rq_flag_bits {
135#define REQ_RW_META (1 << __REQ_RW_META) 144#define REQ_RW_META (1 << __REQ_RW_META)
136#define REQ_COPY_USER (1 << __REQ_COPY_USER) 145#define REQ_COPY_USER (1 << __REQ_COPY_USER)
137#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 146#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
138#define REQ_UNPLUG (1 << __REQ_UNPLUG) 147#define REQ_NOIDLE (1 << __REQ_NOIDLE)
139 148
140#define BLK_MAX_CDB 16 149#define BLK_MAX_CDB 16
141 150
@@ -438,8 +447,8 @@ struct request_queue
438#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 447#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
439#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 448#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
440#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 449#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
441#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 450#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
442#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 451#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
443#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 452#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
444#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 453#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
445#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 454#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
@@ -611,32 +620,42 @@ enum {
611#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 620#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
612 621
613/* 622/*
614 * We regard a request as sync, if it's a READ or a SYNC write. 623 * We regard a request as sync, if either a read or a sync write
615 */ 624 */
616#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 625static inline bool rw_is_sync(unsigned int rw_flags)
626{
627 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
628}
629
630static inline bool rq_is_sync(struct request *rq)
631{
632 return rw_is_sync(rq->cmd_flags);
633}
634
617#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 635#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
636#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
618 637
619static inline int blk_queue_full(struct request_queue *q, int rw) 638static inline int blk_queue_full(struct request_queue *q, int sync)
620{ 639{
621 if (rw == READ) 640 if (sync)
622 return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 641 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
623 return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 642 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
624} 643}
625 644
626static inline void blk_set_queue_full(struct request_queue *q, int rw) 645static inline void blk_set_queue_full(struct request_queue *q, int sync)
627{ 646{
628 if (rw == READ) 647 if (sync)
629 queue_flag_set(QUEUE_FLAG_READFULL, q); 648 queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
630 else 649 else
631 queue_flag_set(QUEUE_FLAG_WRITEFULL, q); 650 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
632} 651}
633 652
634static inline void blk_clear_queue_full(struct request_queue *q, int rw) 653static inline void blk_clear_queue_full(struct request_queue *q, int sync)
635{ 654{
636 if (rw == READ) 655 if (sync)
637 queue_flag_clear(QUEUE_FLAG_READFULL, q); 656 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
638 else 657 else
639 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); 658 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
640} 659}
641 660
642 661
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 6e915878e88c..d960889e92ef 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -144,6 +144,9 @@ struct blk_user_trace_setup {
144 144
145#ifdef __KERNEL__ 145#ifdef __KERNEL__
146#if defined(CONFIG_BLK_DEV_IO_TRACE) 146#if defined(CONFIG_BLK_DEV_IO_TRACE)
147
148#include <linux/sysfs.h>
149
147struct blk_trace { 150struct blk_trace {
148 int trace_state; 151 int trace_state;
149 struct rchan *rchan; 152 struct rchan *rchan;
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
194extern int blk_trace_startstop(struct request_queue *q, int start); 197extern int blk_trace_startstop(struct request_queue *q, int start);
195extern int blk_trace_remove(struct request_queue *q); 198extern int blk_trace_remove(struct request_queue *q);
196 199
200extern struct attribute_group blk_trace_attr_group;
201
197#else /* !CONFIG_BLK_DEV_IO_TRACE */ 202#else /* !CONFIG_BLK_DEV_IO_TRACE */
198#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 203#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
199#define blk_trace_shutdown(q) do { } while (0) 204#define blk_trace_shutdown(q) do { } while (0)
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 455d83219fae..bc3ab7073695 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -146,10 +146,10 @@ extern void *alloc_large_system_hash(const char *tablename,
146 146
147#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ 147#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
148 148
149/* Only NUMA needs hash distribution. 149/* Only NUMA needs hash distribution. 64bit NUMA architectures have
150 * IA64 and x86_64 have sufficient vmalloc space. 150 * sufficient vmalloc space.
151 */ 151 */
152#if defined(CONFIG_NUMA) && (defined(CONFIG_IA64) || defined(CONFIG_X86_64)) 152#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT)
153#define HASHDIST_DEFAULT 1 153#define HASHDIST_DEFAULT 1
154#else 154#else
155#define HASHDIST_DEFAULT 0 155#define HASHDIST_DEFAULT 0
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index f19fd9045ea0..7b73bb8f1970 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -216,7 +216,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
216 get_block_t *, loff_t *); 216 get_block_t *, loff_t *);
217int generic_cont_expand_simple(struct inode *inode, loff_t size); 217int generic_cont_expand_simple(struct inode *inode, loff_t size);
218int block_commit_write(struct page *page, unsigned from, unsigned to); 218int block_commit_write(struct page *page, unsigned from, unsigned to);
219int block_page_mkwrite(struct vm_area_struct *vma, struct page *page, 219int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
220 get_block_t get_block); 220 get_block_t get_block);
221void block_sync_page(struct page *); 221void block_sync_page(struct page *);
222sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); 222sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
@@ -332,22 +332,10 @@ extern int __set_page_dirty_buffers(struct page *page);
332 332
333static inline void buffer_init(void) {} 333static inline void buffer_init(void) {}
334static inline int try_to_free_buffers(struct page *page) { return 1; } 334static inline int try_to_free_buffers(struct page *page) { return 1; }
335static inline int sync_blockdev(struct block_device *bdev) { return 0; }
336static inline int inode_has_buffers(struct inode *inode) { return 0; } 335static inline int inode_has_buffers(struct inode *inode) { return 0; }
337static inline void invalidate_inode_buffers(struct inode *inode) {} 336static inline void invalidate_inode_buffers(struct inode *inode) {}
338static inline int remove_inode_buffers(struct inode *inode) { return 1; } 337static inline int remove_inode_buffers(struct inode *inode) { return 1; }
339static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } 338static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
340static inline void invalidate_bdev(struct block_device *bdev) {}
341
342static inline struct super_block *freeze_bdev(struct block_device *sb)
343{
344 return NULL;
345}
346
347static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
348{
349 return 0;
350}
351 339
352#endif /* CONFIG_BLOCK */ 340#endif /* CONFIG_BLOCK */
353#endif /* _LINUX_BUFFER_HEAD_H */ 341#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 499900d0cee7..665fa70e4094 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -15,6 +15,7 @@
15#include <linux/cgroupstats.h> 15#include <linux/cgroupstats.h>
16#include <linux/prio_heap.h> 16#include <linux/prio_heap.h>
17#include <linux/rwsem.h> 17#include <linux/rwsem.h>
18#include <linux/idr.h>
18 19
19#ifdef CONFIG_CGROUPS 20#ifdef CONFIG_CGROUPS
20 21
@@ -22,6 +23,7 @@ struct cgroupfs_root;
22struct cgroup_subsys; 23struct cgroup_subsys;
23struct inode; 24struct inode;
24struct cgroup; 25struct cgroup;
26struct css_id;
25 27
26extern int cgroup_init_early(void); 28extern int cgroup_init_early(void);
27extern int cgroup_init(void); 29extern int cgroup_init(void);
@@ -47,18 +49,24 @@ enum cgroup_subsys_id {
47 49
48/* Per-subsystem/per-cgroup state maintained by the system. */ 50/* Per-subsystem/per-cgroup state maintained by the system. */
49struct cgroup_subsys_state { 51struct cgroup_subsys_state {
50 /* The cgroup that this subsystem is attached to. Useful 52 /*
53 * The cgroup that this subsystem is attached to. Useful
51 * for subsystems that want to know about the cgroup 54 * for subsystems that want to know about the cgroup
52 * hierarchy structure */ 55 * hierarchy structure
56 */
53 struct cgroup *cgroup; 57 struct cgroup *cgroup;
54 58
55 /* State maintained by the cgroup system to allow subsystems 59 /*
60 * State maintained by the cgroup system to allow subsystems
56 * to be "busy". Should be accessed via css_get(), 61 * to be "busy". Should be accessed via css_get(),
57 * css_tryget() and and css_put(). */ 62 * css_tryget() and and css_put().
63 */
58 64
59 atomic_t refcnt; 65 atomic_t refcnt;
60 66
61 unsigned long flags; 67 unsigned long flags;
68 /* ID for this css, if possible */
69 struct css_id *id;
62}; 70};
63 71
64/* bits in struct cgroup_subsys_state flags field */ 72/* bits in struct cgroup_subsys_state flags field */
@@ -120,19 +128,26 @@ static inline void css_put(struct cgroup_subsys_state *css)
120enum { 128enum {
121 /* Control Group is dead */ 129 /* Control Group is dead */
122 CGRP_REMOVED, 130 CGRP_REMOVED,
123 /* Control Group has previously had a child cgroup or a task, 131 /*
124 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ 132 * Control Group has previously had a child cgroup or a task,
133 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
134 */
125 CGRP_RELEASABLE, 135 CGRP_RELEASABLE,
126 /* Control Group requires release notifications to userspace */ 136 /* Control Group requires release notifications to userspace */
127 CGRP_NOTIFY_ON_RELEASE, 137 CGRP_NOTIFY_ON_RELEASE,
138 /*
139 * A thread in rmdir() is wating for this cgroup.
140 */
141 CGRP_WAIT_ON_RMDIR,
128}; 142};
129 143
130struct cgroup { 144struct cgroup {
131 unsigned long flags; /* "unsigned long" so bitops work */ 145 unsigned long flags; /* "unsigned long" so bitops work */
132 146
133 /* count users of this cgroup. >0 means busy, but doesn't 147 /*
134 * necessarily indicate the number of tasks in the 148 * count users of this cgroup. >0 means busy, but doesn't
135 * cgroup */ 149 * necessarily indicate the number of tasks in the cgroup
150 */
136 atomic_t count; 151 atomic_t count;
137 152
138 /* 153 /*
@@ -142,7 +157,7 @@ struct cgroup {
142 struct list_head sibling; /* my parent's children */ 157 struct list_head sibling; /* my parent's children */
143 struct list_head children; /* my children */ 158 struct list_head children; /* my children */
144 159
145 struct cgroup *parent; /* my parent */ 160 struct cgroup *parent; /* my parent */
146 struct dentry *dentry; /* cgroup fs entry, RCU protected */ 161 struct dentry *dentry; /* cgroup fs entry, RCU protected */
147 162
148 /* Private pointers for each registered subsystem */ 163 /* Private pointers for each registered subsystem */
@@ -177,11 +192,12 @@ struct cgroup {
177 struct rcu_head rcu_head; 192 struct rcu_head rcu_head;
178}; 193};
179 194
180/* A css_set is a structure holding pointers to a set of 195/*
196 * A css_set is a structure holding pointers to a set of
181 * cgroup_subsys_state objects. This saves space in the task struct 197 * cgroup_subsys_state objects. This saves space in the task struct
182 * object and speeds up fork()/exit(), since a single inc/dec and a 198 * object and speeds up fork()/exit(), since a single inc/dec and a
183 * list_add()/del() can bump the reference count on the entire 199 * list_add()/del() can bump the reference count on the entire cgroup
184 * cgroup set for a task. 200 * set for a task.
185 */ 201 */
186 202
187struct css_set { 203struct css_set {
@@ -226,13 +242,8 @@ struct cgroup_map_cb {
226 void *state; 242 void *state;
227}; 243};
228 244
229/* struct cftype: 245/*
230 * 246 * struct cftype: handler definitions for cgroup control files
231 * The files in the cgroup filesystem mostly have a very simple read/write
232 * handling, some common function will take care of it. Nevertheless some cases
233 * (read tasks) are special and therefore I define this structure for every
234 * kind of file.
235 *
236 * 247 *
237 * When reading/writing to a file: 248 * When reading/writing to a file:
238 * - the cgroup to use is file->f_dentry->d_parent->d_fsdata 249 * - the cgroup to use is file->f_dentry->d_parent->d_fsdata
@@ -241,10 +252,17 @@ struct cgroup_map_cb {
241 252
242#define MAX_CFTYPE_NAME 64 253#define MAX_CFTYPE_NAME 64
243struct cftype { 254struct cftype {
244 /* By convention, the name should begin with the name of the 255 /*
245 * subsystem, followed by a period */ 256 * By convention, the name should begin with the name of the
257 * subsystem, followed by a period
258 */
246 char name[MAX_CFTYPE_NAME]; 259 char name[MAX_CFTYPE_NAME];
247 int private; 260 int private;
261 /*
262 * If not 0, file mode is set to this value, otherwise it will
263 * be figured out automatically
264 */
265 mode_t mode;
248 266
249 /* 267 /*
250 * If non-zero, defines the maximum length of string that can 268 * If non-zero, defines the maximum length of string that can
@@ -319,15 +337,20 @@ struct cgroup_scanner {
319 void (*process_task)(struct task_struct *p, 337 void (*process_task)(struct task_struct *p,
320 struct cgroup_scanner *scan); 338 struct cgroup_scanner *scan);
321 struct ptr_heap *heap; 339 struct ptr_heap *heap;
340 void *data;
322}; 341};
323 342
324/* Add a new file to the given cgroup directory. Should only be 343/*
325 * called by subsystems from within a populate() method */ 344 * Add a new file to the given cgroup directory. Should only be
345 * called by subsystems from within a populate() method
346 */
326int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, 347int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
327 const struct cftype *cft); 348 const struct cftype *cft);
328 349
329/* Add a set of new files to the given cgroup directory. Should 350/*
330 * only be called by subsystems from within a populate() method */ 351 * Add a set of new files to the given cgroup directory. Should
352 * only be called by subsystems from within a populate() method
353 */
331int cgroup_add_files(struct cgroup *cgrp, 354int cgroup_add_files(struct cgroup *cgrp,
332 struct cgroup_subsys *subsys, 355 struct cgroup_subsys *subsys,
333 const struct cftype cft[], 356 const struct cftype cft[],
@@ -339,15 +362,18 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
339 362
340int cgroup_task_count(const struct cgroup *cgrp); 363int cgroup_task_count(const struct cgroup *cgrp);
341 364
342/* Return true if the cgroup is a descendant of the current cgroup */ 365/* Return true if cgrp is a descendant of the task's cgroup */
343int cgroup_is_descendant(const struct cgroup *cgrp); 366int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
344 367
345/* Control Group subsystem type. See Documentation/cgroups.txt for details */ 368/*
369 * Control Group subsystem type.
370 * See Documentation/cgroups/cgroups.txt for details
371 */
346 372
347struct cgroup_subsys { 373struct cgroup_subsys {
348 struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, 374 struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
349 struct cgroup *cgrp); 375 struct cgroup *cgrp);
350 void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 376 int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
351 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 377 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
352 int (*can_attach)(struct cgroup_subsys *ss, 378 int (*can_attach)(struct cgroup_subsys *ss,
353 struct cgroup *cgrp, struct task_struct *tsk); 379 struct cgroup *cgrp, struct task_struct *tsk);
@@ -364,6 +390,11 @@ struct cgroup_subsys {
364 int active; 390 int active;
365 int disabled; 391 int disabled;
366 int early_init; 392 int early_init;
393 /*
394 * True if this subsys uses ID. ID is not available before cgroup_init()
395 * (not available in early_init time.)
396 */
397 bool use_id;
367#define MAX_CGROUP_TYPE_NAMELEN 32 398#define MAX_CGROUP_TYPE_NAMELEN 32
368 const char *name; 399 const char *name;
369 400
@@ -386,6 +417,9 @@ struct cgroup_subsys {
386 */ 417 */
387 struct cgroupfs_root *root; 418 struct cgroupfs_root *root;
388 struct list_head sibling; 419 struct list_head sibling;
420 /* used when use_id == true */
421 struct idr idr;
422 spinlock_t id_lock;
389}; 423};
390 424
391#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; 425#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
@@ -419,7 +453,8 @@ struct cgroup_iter {
419 struct list_head *task; 453 struct list_head *task;
420}; 454};
421 455
422/* To iterate across the tasks in a cgroup: 456/*
457 * To iterate across the tasks in a cgroup:
423 * 458 *
424 * 1) call cgroup_iter_start to intialize an iterator 459 * 1) call cgroup_iter_start to intialize an iterator
425 * 460 *
@@ -428,9 +463,10 @@ struct cgroup_iter {
428 * 463 *
429 * 3) call cgroup_iter_end() to destroy the iterator. 464 * 3) call cgroup_iter_end() to destroy the iterator.
430 * 465 *
431 * Or, call cgroup_scan_tasks() to iterate through every task in a cpuset. 466 * Or, call cgroup_scan_tasks() to iterate through every task in a
432 * - cgroup_scan_tasks() holds the css_set_lock when calling the test_task() 467 * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
433 * callback, but not while calling the process_task() callback. 468 * the test_task() callback, but not while calling the process_task()
469 * callback.
434 */ 470 */
435void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); 471void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
436struct task_struct *cgroup_iter_next(struct cgroup *cgrp, 472struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
@@ -439,6 +475,44 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
439int cgroup_scan_tasks(struct cgroup_scanner *scan); 475int cgroup_scan_tasks(struct cgroup_scanner *scan);
440int cgroup_attach_task(struct cgroup *, struct task_struct *); 476int cgroup_attach_task(struct cgroup *, struct task_struct *);
441 477
478/*
479 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
480 * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
481 * CSS ID is assigned at cgroup allocation (create) automatically
482 * and removed when subsys calls free_css_id() function. This is because
483 * the lifetime of cgroup_subsys_state is subsys's matter.
484 *
485 * Looking up and scanning function should be called under rcu_read_lock().
486 * Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls.
487 * But the css returned by this routine can be "not populated yet" or "being
488 * destroyed". The caller should check css and cgroup's status.
489 */
490
491/*
492 * Typically Called at ->destroy(), or somewhere the subsys frees
493 * cgroup_subsys_state.
494 */
495void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
496
497/* Find a cgroup_subsys_state which has given ID */
498
499struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
500
501/*
502 * Get a cgroup whose id is greater than or equal to id under tree of root.
503 * Returning a cgroup_subsys_state or NULL.
504 */
505struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id,
506 struct cgroup_subsys_state *root, int *foundid);
507
508/* Returns true if root is ancestor of cg */
509bool css_is_ancestor(struct cgroup_subsys_state *cg,
510 const struct cgroup_subsys_state *root);
511
512/* Get id and depth of css */
513unsigned short css_id(struct cgroup_subsys_state *css);
514unsigned short css_depth(struct cgroup_subsys_state *css);
515
442#else /* !CONFIG_CGROUPS */ 516#else /* !CONFIG_CGROUPS */
443 517
444static inline int cgroup_init_early(void) { return 0; } 518static inline int cgroup_init_early(void) { return 0; }
diff --git a/include/linux/compat.h b/include/linux/compat.h
index b880864672de..f2ded21f9a3c 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -191,6 +191,12 @@ asmlinkage ssize_t compat_sys_readv(unsigned long fd,
191 const struct compat_iovec __user *vec, unsigned long vlen); 191 const struct compat_iovec __user *vec, unsigned long vlen);
192asmlinkage ssize_t compat_sys_writev(unsigned long fd, 192asmlinkage ssize_t compat_sys_writev(unsigned long fd,
193 const struct compat_iovec __user *vec, unsigned long vlen); 193 const struct compat_iovec __user *vec, unsigned long vlen);
194asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
195 const struct compat_iovec __user *vec,
196 unsigned long vlen, u32 pos_low, u32 pos_high);
197asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
198 const struct compat_iovec __user *vec,
199 unsigned long vlen, u32 pos_low, u32 pos_high);
194 200
195int compat_do_execve(char * filename, compat_uptr_t __user *argv, 201int compat_do_execve(char * filename, compat_uptr_t __user *argv,
196 compat_uptr_t __user *envp, struct pt_regs * regs); 202 compat_uptr_t __user *envp, struct pt_regs * regs);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d95da1020f1c..cebfdcd3dbdd 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -68,6 +68,7 @@ struct ftrace_branch_data {
68 unsigned long miss; 68 unsigned long miss;
69 unsigned long hit; 69 unsigned long hit;
70 }; 70 };
71 unsigned long miss_hit[2];
71 }; 72 };
72}; 73};
73 74
@@ -113,7 +114,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
113 * "Define 'is'", Bill Clinton 114 * "Define 'is'", Bill Clinton
114 * "Define 'if'", Steven Rostedt 115 * "Define 'if'", Steven Rostedt
115 */ 116 */
116#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \ 117#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
118#define __trace_if(cond) \
119 if (__builtin_constant_p((cond)) ? !!(cond) : \
117 ({ \ 120 ({ \
118 int ______r; \ 121 int ______r; \
119 static struct ftrace_branch_data \ 122 static struct ftrace_branch_data \
@@ -125,10 +128,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
125 .line = __LINE__, \ 128 .line = __LINE__, \
126 }; \ 129 }; \
127 ______r = !!(cond); \ 130 ______r = !!(cond); \
128 if (______r) \ 131 ______f.miss_hit[______r]++; \
129 ______f.hit++; \
130 else \
131 ______f.miss++; \
132 ______r; \ 132 ______r; \
133 })) 133 }))
134#endif /* CONFIG_PROFILE_ALL_BRANCHES */ 134#endif /* CONFIG_PROFILE_ALL_BRANCHES */
diff --git a/include/linux/connector.h b/include/linux/connector.h
index fc65d219d88c..b9966e64604e 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -39,8 +39,10 @@
39#define CN_IDX_V86D 0x4 39#define CN_IDX_V86D 0x4
40#define CN_VAL_V86D_UVESAFB 0x1 40#define CN_VAL_V86D_UVESAFB 0x1
41#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ 41#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */
42#define CN_DST_IDX 0x6
43#define CN_DST_VAL 0x1
42 44
43#define CN_NETLINK_USERS 6 45#define CN_NETLINK_USERS 7
44 46
45/* 47/*
46 * Maximum connector's message size. 48 * Maximum connector's message size.
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c2747ac2ae43..2643d848df90 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -23,7 +23,6 @@
23#include <linux/node.h> 23#include <linux/node.h>
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/cpumask.h> 25#include <linux/cpumask.h>
26#include <linux/mutex.h>
27 26
28struct cpu { 27struct cpu {
29 int node_id; /* The node which contains the CPU */ 28 int node_id; /* The node which contains the CPU */
@@ -103,16 +102,6 @@ extern struct sysdev_class cpu_sysdev_class;
103#ifdef CONFIG_HOTPLUG_CPU 102#ifdef CONFIG_HOTPLUG_CPU
104/* Stop CPUs going up and down. */ 103/* Stop CPUs going up and down. */
105 104
106static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
107{
108 mutex_lock(cpu_hp_mutex);
109}
110
111static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
112{
113 mutex_unlock(cpu_hp_mutex);
114}
115
116extern void get_online_cpus(void); 105extern void get_online_cpus(void);
117extern void put_online_cpus(void); 106extern void put_online_cpus(void);
118#define hotcpu_notifier(fn, pri) { \ 107#define hotcpu_notifier(fn, pri) { \
@@ -126,11 +115,6 @@ int cpu_down(unsigned int cpu);
126 115
127#else /* CONFIG_HOTPLUG_CPU */ 116#else /* CONFIG_HOTPLUG_CPU */
128 117
129static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
130{ }
131static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
132{ }
133
134#define get_online_cpus() do { } while (0) 118#define get_online_cpus() do { } while (0)
135#define put_online_cpus() do { } while (0) 119#define put_online_cpus() do { } while (0)
136#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) 120#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 90c6074a36ca..05ea1dd7d681 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -12,6 +12,7 @@
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/nodemask.h> 13#include <linux/nodemask.h>
14#include <linux/cgroup.h> 14#include <linux/cgroup.h>
15#include <linux/mm.h>
15 16
16#ifdef CONFIG_CPUSETS 17#ifdef CONFIG_CPUSETS
17 18
@@ -29,19 +30,29 @@ void cpuset_init_current_mems_allowed(void);
29void cpuset_update_task_memory_state(void); 30void cpuset_update_task_memory_state(void);
30int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 31int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
31 32
32extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); 33extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
33extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); 34extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
34 35
35static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) 36static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
36{ 37{
37 return number_of_cpusets <= 1 || 38 return number_of_cpusets <= 1 ||
38 __cpuset_zone_allowed_softwall(z, gfp_mask); 39 __cpuset_node_allowed_softwall(node, gfp_mask);
39} 40}
40 41
41static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) 42static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
42{ 43{
43 return number_of_cpusets <= 1 || 44 return number_of_cpusets <= 1 ||
44 __cpuset_zone_allowed_hardwall(z, gfp_mask); 45 __cpuset_node_allowed_hardwall(node, gfp_mask);
46}
47
48static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
49{
50 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
51}
52
53static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
54{
55 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
45} 56}
46 57
47extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 58extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -90,12 +101,12 @@ static inline void cpuset_init_smp(void) {}
90static inline void cpuset_cpus_allowed(struct task_struct *p, 101static inline void cpuset_cpus_allowed(struct task_struct *p,
91 struct cpumask *mask) 102 struct cpumask *mask)
92{ 103{
93 *mask = cpu_possible_map; 104 cpumask_copy(mask, cpu_possible_mask);
94} 105}
95static inline void cpuset_cpus_allowed_locked(struct task_struct *p, 106static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
96 struct cpumask *mask) 107 struct cpumask *mask)
97{ 108{
98 *mask = cpu_possible_map; 109 cpumask_copy(mask, cpu_possible_mask);
99} 110}
100 111
101static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) 112static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
@@ -112,6 +123,16 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
112 return 1; 123 return 1;
113} 124}
114 125
126static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
127{
128 return 1;
129}
130
131static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
132{
133 return 1;
134}
135
115static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) 136static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
116{ 137{
117 return 1; 138 return 1;
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index af0e01d4c663..eb5c2ba2f81a 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode,
71struct dentry *debugfs_create_blob(const char *name, mode_t mode, 71struct dentry *debugfs_create_blob(const char *name, mode_t mode,
72 struct dentry *parent, 72 struct dentry *parent,
73 struct debugfs_blob_wrapper *blob); 73 struct debugfs_blob_wrapper *blob);
74
75bool debugfs_initialized(void);
76
74#else 77#else
75 78
76#include <linux/err.h> 79#include <linux/err.h>
@@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
183 return ERR_PTR(-ENODEV); 186 return ERR_PTR(-ENODEV);
184} 187}
185 188
189static inline bool debugfs_initialized(void)
190{
191 return false;
192}
193
186#endif 194#endif
187 195
188#endif 196#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 8209e08969f9..66ec05a57955 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -139,6 +139,9 @@ struct target_type {
139 dm_ioctl_fn ioctl; 139 dm_ioctl_fn ioctl;
140 dm_merge_fn merge; 140 dm_merge_fn merge;
141 dm_busy_fn busy; 141 dm_busy_fn busy;
142
143 /* For internal device-mapper use. */
144 struct list_head list;
142}; 145};
143 146
144struct io_restrictions { 147struct io_restrictions {
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 600c5fb2daad..5e8b11d88f6f 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -28,6 +28,9 @@ struct dm_dirty_log_type {
28 const char *name; 28 const char *name;
29 struct module *module; 29 struct module *module;
30 30
31 /* For internal device-mapper use */
32 struct list_head list;
33
31 int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, 34 int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
32 unsigned argc, char **argv); 35 unsigned argc, char **argv);
33 void (*dtr)(struct dm_dirty_log *log); 36 void (*dtr)(struct dm_dirty_log *log);
@@ -113,6 +116,16 @@ struct dm_dirty_log_type {
113 */ 116 */
114 int (*status)(struct dm_dirty_log *log, status_type_t status_type, 117 int (*status)(struct dm_dirty_log *log, status_type_t status_type,
115 char *result, unsigned maxlen); 118 char *result, unsigned maxlen);
119
120 /*
121 * is_remote_recovering is necessary for cluster mirroring. It provides
122 * a way to detect recovery on another node, so we aren't writing
123 * concurrently. This function is likely to block (when a cluster log
124 * is used).
125 *
126 * Returns: 0, 1
127 */
128 int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
116}; 129};
117 130
118int dm_dirty_log_type_register(struct dm_dirty_log_type *type); 131int dm_dirty_log_type_register(struct dm_dirty_log_type *type);
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
new file mode 100644
index 000000000000..28d53cb7b5a2
--- /dev/null
+++ b/include/linux/dma-debug.h
@@ -0,0 +1,174 @@
1/*
2 * Copyright (C) 2008 Advanced Micro Devices, Inc.
3 *
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef __DMA_DEBUG_H
21#define __DMA_DEBUG_H
22
23#include <linux/types.h>
24
25struct device;
26struct scatterlist;
27struct bus_type;
28
29#ifdef CONFIG_DMA_API_DEBUG
30
31extern void dma_debug_add_bus(struct bus_type *bus);
32
33extern void dma_debug_init(u32 num_entries);
34
35extern void debug_dma_map_page(struct device *dev, struct page *page,
36 size_t offset, size_t size,
37 int direction, dma_addr_t dma_addr,
38 bool map_single);
39
40extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
41 size_t size, int direction, bool map_single);
42
43extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
44 int nents, int mapped_ents, int direction);
45
46extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
47 int nelems, int dir);
48
49extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
50 dma_addr_t dma_addr, void *virt);
51
52extern void debug_dma_free_coherent(struct device *dev, size_t size,
53 void *virt, dma_addr_t addr);
54
55extern void debug_dma_sync_single_for_cpu(struct device *dev,
56 dma_addr_t dma_handle, size_t size,
57 int direction);
58
59extern void debug_dma_sync_single_for_device(struct device *dev,
60 dma_addr_t dma_handle,
61 size_t size, int direction);
62
63extern void debug_dma_sync_single_range_for_cpu(struct device *dev,
64 dma_addr_t dma_handle,
65 unsigned long offset,
66 size_t size,
67 int direction);
68
69extern void debug_dma_sync_single_range_for_device(struct device *dev,
70 dma_addr_t dma_handle,
71 unsigned long offset,
72 size_t size, int direction);
73
74extern void debug_dma_sync_sg_for_cpu(struct device *dev,
75 struct scatterlist *sg,
76 int nelems, int direction);
77
78extern void debug_dma_sync_sg_for_device(struct device *dev,
79 struct scatterlist *sg,
80 int nelems, int direction);
81
82extern void debug_dma_dump_mappings(struct device *dev);
83
84#else /* CONFIG_DMA_API_DEBUG */
85
86static inline void dma_debug_add_bus(struct bus_type *bus)
87{
88}
89
90static inline void dma_debug_init(u32 num_entries)
91{
92}
93
94static inline void debug_dma_map_page(struct device *dev, struct page *page,
95 size_t offset, size_t size,
96 int direction, dma_addr_t dma_addr,
97 bool map_single)
98{
99}
100
101static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
102 size_t size, int direction,
103 bool map_single)
104{
105}
106
107static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
108 int nents, int mapped_ents, int direction)
109{
110}
111
112static inline void debug_dma_unmap_sg(struct device *dev,
113 struct scatterlist *sglist,
114 int nelems, int dir)
115{
116}
117
118static inline void debug_dma_alloc_coherent(struct device *dev, size_t size,
119 dma_addr_t dma_addr, void *virt)
120{
121}
122
123static inline void debug_dma_free_coherent(struct device *dev, size_t size,
124 void *virt, dma_addr_t addr)
125{
126}
127
128static inline void debug_dma_sync_single_for_cpu(struct device *dev,
129 dma_addr_t dma_handle,
130 size_t size, int direction)
131{
132}
133
134static inline void debug_dma_sync_single_for_device(struct device *dev,
135 dma_addr_t dma_handle,
136 size_t size, int direction)
137{
138}
139
140static inline void debug_dma_sync_single_range_for_cpu(struct device *dev,
141 dma_addr_t dma_handle,
142 unsigned long offset,
143 size_t size,
144 int direction)
145{
146}
147
148static inline void debug_dma_sync_single_range_for_device(struct device *dev,
149 dma_addr_t dma_handle,
150 unsigned long offset,
151 size_t size,
152 int direction)
153{
154}
155
156static inline void debug_dma_sync_sg_for_cpu(struct device *dev,
157 struct scatterlist *sg,
158 int nelems, int direction)
159{
160}
161
162static inline void debug_dma_sync_sg_for_device(struct device *dev,
163 struct scatterlist *sg,
164 int nelems, int direction)
165{
166}
167
168static inline void debug_dma_dump_mappings(struct device *dev)
169{
170}
171
172#endif /* CONFIG_DMA_API_DEBUG */
173
174#endif /* __DMA_DEBUG_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ba9114ec5d3a..8083b6a36a38 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -3,6 +3,8 @@
3 3
4#include <linux/device.h> 4#include <linux/device.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/dma-attrs.h>
7#include <linux/scatterlist.h>
6 8
7/* These definitions mirror those in pci.h, so they can be used 9/* These definitions mirror those in pci.h, so they can be used
8 * interchangeably with their PCI_ counterparts */ 10 * interchangeably with their PCI_ counterparts */
@@ -13,6 +15,52 @@ enum dma_data_direction {
13 DMA_NONE = 3, 15 DMA_NONE = 3,
14}; 16};
15 17
18struct dma_map_ops {
19 void* (*alloc_coherent)(struct device *dev, size_t size,
20 dma_addr_t *dma_handle, gfp_t gfp);
21 void (*free_coherent)(struct device *dev, size_t size,
22 void *vaddr, dma_addr_t dma_handle);
23 dma_addr_t (*map_page)(struct device *dev, struct page *page,
24 unsigned long offset, size_t size,
25 enum dma_data_direction dir,
26 struct dma_attrs *attrs);
27 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
28 size_t size, enum dma_data_direction dir,
29 struct dma_attrs *attrs);
30 int (*map_sg)(struct device *dev, struct scatterlist *sg,
31 int nents, enum dma_data_direction dir,
32 struct dma_attrs *attrs);
33 void (*unmap_sg)(struct device *dev,
34 struct scatterlist *sg, int nents,
35 enum dma_data_direction dir,
36 struct dma_attrs *attrs);
37 void (*sync_single_for_cpu)(struct device *dev,
38 dma_addr_t dma_handle, size_t size,
39 enum dma_data_direction dir);
40 void (*sync_single_for_device)(struct device *dev,
41 dma_addr_t dma_handle, size_t size,
42 enum dma_data_direction dir);
43 void (*sync_single_range_for_cpu)(struct device *dev,
44 dma_addr_t dma_handle,
45 unsigned long offset,
46 size_t size,
47 enum dma_data_direction dir);
48 void (*sync_single_range_for_device)(struct device *dev,
49 dma_addr_t dma_handle,
50 unsigned long offset,
51 size_t size,
52 enum dma_data_direction dir);
53 void (*sync_sg_for_cpu)(struct device *dev,
54 struct scatterlist *sg, int nents,
55 enum dma_data_direction dir);
56 void (*sync_sg_for_device)(struct device *dev,
57 struct scatterlist *sg, int nents,
58 enum dma_data_direction dir);
59 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
60 int (*dma_supported)(struct device *dev, u64 mask);
61 int is_phys;
62};
63
16#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) 64#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
17 65
18/* 66/*
@@ -67,7 +115,7 @@ static inline u64 dma_get_mask(struct device *dev)
67{ 115{
68 if (dev && dev->dma_mask && *dev->dma_mask) 116 if (dev && dev->dma_mask && *dev->dma_mask)
69 return *dev->dma_mask; 117 return *dev->dma_mask;
70 return DMA_32BIT_MASK; 118 return DMA_BIT_MASK(32);
71} 119}
72 120
73extern u64 dma_get_required_mask(struct device *dev); 121extern u64 dma_get_required_mask(struct device *dev);
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index af1dab41674b..1a455f1f86d7 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -11,6 +11,7 @@
11 11
12#define DMA_PTE_READ (1) 12#define DMA_PTE_READ (1)
13#define DMA_PTE_WRITE (2) 13#define DMA_PTE_WRITE (2)
14#define DMA_PTE_SNP (1 << 11)
14 15
15struct intel_iommu; 16struct intel_iommu;
16struct dmar_domain; 17struct dmar_domain;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 1956c8d46d32..2e2aa3df170c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -23,9 +23,6 @@
23 23
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/uio.h> 25#include <linux/uio.h>
26#include <linux/kref.h>
27#include <linux/completion.h>
28#include <linux/rcupdate.h>
29#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
30 27
31/** 28/**
@@ -205,6 +202,7 @@ struct dma_async_tx_descriptor {
205/** 202/**
206 * struct dma_device - info on the entity supplying DMA services 203 * struct dma_device - info on the entity supplying DMA services
207 * @chancnt: how many DMA channels are supported 204 * @chancnt: how many DMA channels are supported
205 * @privatecnt: how many DMA channels are requested by dma_request_channel
208 * @channels: the list of struct dma_chan 206 * @channels: the list of struct dma_chan
209 * @global_node: list_head for global dma_device_list 207 * @global_node: list_head for global dma_device_list
210 * @cap_mask: one or more dma_capability flags 208 * @cap_mask: one or more dma_capability flags
@@ -227,6 +225,7 @@ struct dma_async_tx_descriptor {
227struct dma_device { 225struct dma_device {
228 226
229 unsigned int chancnt; 227 unsigned int chancnt;
228 unsigned int privatecnt;
230 struct list_head channels; 229 struct list_head channels;
231 struct list_head global_node; 230 struct list_head global_node;
232 dma_cap_mask_t cap_mask; 231 dma_cap_mask_t cap_mask;
@@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void)
291} 290}
292#endif 291#endif
293 292
293#ifdef CONFIG_ASYNC_TX_DMA
294#define async_dmaengine_get() dmaengine_get()
295#define async_dmaengine_put() dmaengine_put()
296#define async_dma_find_channel(type) dma_find_channel(type)
297#else
298static inline void async_dmaengine_get(void)
299{
300}
301static inline void async_dmaengine_put(void)
302{
303}
304static inline struct dma_chan *
305async_dma_find_channel(enum dma_transaction_type type)
306{
307 return NULL;
308}
309#endif
310
294dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 311dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
295 void *dest, void *src, size_t len); 312 void *dest, void *src, size_t len);
296dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 313dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
337 set_bit(tx_type, dstp->bits); 354 set_bit(tx_type, dstp->bits);
338} 355}
339 356
357#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
358static inline void
359__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
360{
361 clear_bit(tx_type, dstp->bits);
362}
363
340#define dma_cap_zero(mask) __dma_cap_zero(&(mask)) 364#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
341static inline void __dma_cap_zero(dma_cap_mask_t *dstp) 365static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
342{ 366{
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index f28440784cf0..e397dc342cda 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -24,16 +24,17 @@
24#include <linux/acpi.h> 24#include <linux/acpi.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/msi.h> 26#include <linux/msi.h>
27#include <linux/irqreturn.h>
27 28
28#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
29struct intel_iommu; 29struct intel_iommu;
30 30#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
31struct dmar_drhd_unit { 31struct dmar_drhd_unit {
32 struct list_head list; /* list of drhd units */ 32 struct list_head list; /* list of drhd units */
33 struct acpi_dmar_header *hdr; /* ACPI header */ 33 struct acpi_dmar_header *hdr; /* ACPI header */
34 u64 reg_base_addr; /* register base address*/ 34 u64 reg_base_addr; /* register base address*/
35 struct pci_dev **devices; /* target device array */ 35 struct pci_dev **devices; /* target device array */
36 int devices_cnt; /* target device count */ 36 int devices_cnt; /* target device count */
37 u16 segment; /* PCI domain */
37 u8 ignored:1; /* ignore drhd */ 38 u8 ignored:1; /* ignore drhd */
38 u8 include_all:1; 39 u8 include_all:1;
39 struct intel_iommu *iommu; 40 struct intel_iommu *iommu;
@@ -44,12 +45,20 @@ extern struct list_head dmar_drhd_units;
44#define for_each_drhd_unit(drhd) \ 45#define for_each_drhd_unit(drhd) \
45 list_for_each_entry(drhd, &dmar_drhd_units, list) 46 list_for_each_entry(drhd, &dmar_drhd_units, list)
46 47
48#define for_each_active_iommu(i, drhd) \
49 list_for_each_entry(drhd, &dmar_drhd_units, list) \
50 if (i=drhd->iommu, drhd->ignored) {} else
51
52#define for_each_iommu(i, drhd) \
53 list_for_each_entry(drhd, &dmar_drhd_units, list) \
54 if (i=drhd->iommu, 0) {} else
55
47extern int dmar_table_init(void); 56extern int dmar_table_init(void);
48extern int dmar_dev_scope_init(void); 57extern int dmar_dev_scope_init(void);
49 58
50/* Intel IOMMU detection */ 59/* Intel IOMMU detection */
51extern void detect_intel_iommu(void); 60extern void detect_intel_iommu(void);
52 61extern int enable_drhd_fault_handling(void);
53 62
54extern int parse_ioapics_under_ir(void); 63extern int parse_ioapics_under_ir(void);
55extern int alloc_iommu(struct dmar_drhd_unit *); 64extern int alloc_iommu(struct dmar_drhd_unit *);
@@ -63,12 +72,12 @@ static inline int dmar_table_init(void)
63{ 72{
64 return -ENODEV; 73 return -ENODEV;
65} 74}
75static inline int enable_drhd_fault_handling(void)
76{
77 return -1;
78}
66#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */ 79#endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */
67 80
68#ifdef CONFIG_INTR_REMAP
69extern int intr_remapping_enabled;
70extern int enable_intr_remapping(int);
71
72struct irte { 81struct irte {
73 union { 82 union {
74 struct { 83 struct {
@@ -97,6 +106,12 @@ struct irte {
97 __u64 high; 106 __u64 high;
98 }; 107 };
99}; 108};
109#ifdef CONFIG_INTR_REMAP
110extern int intr_remapping_enabled;
111extern int enable_intr_remapping(int);
112extern void disable_intr_remapping(void);
113extern int reenable_intr_remapping(int);
114
100extern int get_irte(int irq, struct irte *entry); 115extern int get_irte(int irq, struct irte *entry);
101extern int modify_irte(int irq, struct irte *irte_modified); 116extern int modify_irte(int irq, struct irte *irte_modified);
102extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); 117extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count);
@@ -111,14 +126,40 @@ extern int irq_remapped(int irq);
111extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); 126extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
112extern struct intel_iommu *map_ioapic_to_ir(int apic); 127extern struct intel_iommu *map_ioapic_to_ir(int apic);
113#else 128#else
129static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
130{
131 return -1;
132}
133static inline int modify_irte(int irq, struct irte *irte_modified)
134{
135 return -1;
136}
137static inline int free_irte(int irq)
138{
139 return -1;
140}
141static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle)
142{
143 return -1;
144}
145static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index,
146 u16 sub_handle)
147{
148 return -1;
149}
150static inline struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
151{
152 return NULL;
153}
154static inline struct intel_iommu *map_ioapic_to_ir(int apic)
155{
156 return NULL;
157}
114#define irq_remapped(irq) (0) 158#define irq_remapped(irq) (0)
115#define enable_intr_remapping(mode) (-1) 159#define enable_intr_remapping(mode) (-1)
116#define intr_remapping_enabled (0) 160#define intr_remapping_enabled (0)
117#endif 161#endif
118 162
119#ifdef CONFIG_DMAR
120extern const char *dmar_get_fault_reason(u8 fault_reason);
121
122/* Can't use the common MSI interrupt functions 163/* Can't use the common MSI interrupt functions
123 * since DMAR is not a pci device 164 * since DMAR is not a pci device
124 */ 165 */
@@ -127,8 +168,10 @@ extern void dmar_msi_mask(unsigned int irq);
127extern void dmar_msi_read(int irq, struct msi_msg *msg); 168extern void dmar_msi_read(int irq, struct msi_msg *msg);
128extern void dmar_msi_write(int irq, struct msi_msg *msg); 169extern void dmar_msi_write(int irq, struct msi_msg *msg);
129extern int dmar_set_interrupt(struct intel_iommu *iommu); 170extern int dmar_set_interrupt(struct intel_iommu *iommu);
171extern irqreturn_t dmar_fault(int irq, void *dev_id);
130extern int arch_setup_dmar_msi(unsigned int irq); 172extern int arch_setup_dmar_msi(unsigned int irq);
131 173
174#ifdef CONFIG_DMAR
132extern int iommu_detected, no_iommu; 175extern int iommu_detected, no_iommu;
133extern struct list_head dmar_rmrr_units; 176extern struct list_head dmar_rmrr_units;
134struct dmar_rmrr_unit { 177struct dmar_rmrr_unit {
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index d741b9ceb0e0..bb5489c82c99 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -47,7 +47,8 @@ extern int dmi_get_year(int field);
47extern int dmi_name_in_vendors(const char *str); 47extern int dmi_name_in_vendors(const char *str);
48extern int dmi_name_in_serial(const char *str); 48extern int dmi_name_in_serial(const char *str);
49extern int dmi_available; 49extern int dmi_available;
50extern int dmi_walk(void (*decode)(const struct dmi_header *)); 50extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
51 void *private_data);
51extern bool dmi_match(enum dmi_field f, const char *str); 52extern bool dmi_match(enum dmi_field f, const char *str);
52 53
53#else 54#else
@@ -61,8 +62,8 @@ static inline int dmi_get_year(int year) { return 0; }
61static inline int dmi_name_in_vendors(const char *s) { return 0; } 62static inline int dmi_name_in_vendors(const char *s) { return 0; }
62static inline int dmi_name_in_serial(const char *s) { return 0; } 63static inline int dmi_name_in_serial(const char *s) { return 0; }
63#define dmi_available 0 64#define dmi_available 0
64static inline int dmi_walk(void (*decode)(const struct dmi_header *)) 65static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *),
65 { return -1; } 66 void *private_data) { return -1; }
66static inline bool dmi_match(enum dmi_field f, const char *str) 67static inline bool dmi_match(enum dmi_field f, const char *str)
67 { return false; } 68 { return false; }
68static inline const struct dmi_system_id * 69static inline const struct dmi_system_id *
diff --git a/include/linux/ds1wm.h b/include/linux/ds1wm.h
deleted file mode 100644
index d3c65e48a2e7..000000000000
--- a/include/linux/ds1wm.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/* platform data for the DS1WM driver */
2
3struct ds1wm_platform_data {
4 int bus_shift; /* number of shifts needed to calculate the
5 * offset between DS1WM registers;
6 * e.g. on h5xxx and h2200 this is 2
7 * (registers aligned to 4-byte boundaries),
8 * while on hx4700 this is 1 */
9 int active_high;
10 void (*enable)(struct platform_device *pdev);
11 void (*disable)(struct platform_device *pdev);
12};
diff --git a/include/linux/dst.h b/include/linux/dst.h
new file mode 100644
index 000000000000..e26fed84b1aa
--- /dev/null
+++ b/include/linux/dst.h
@@ -0,0 +1,587 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __DST_H
17#define __DST_H
18
19#include <linux/types.h>
20#include <linux/connector.h>
21
22#define DST_NAMELEN 32
23#define DST_NAME "dst"
24
25enum {
26 /* Remove node with given id from storage */
27 DST_DEL_NODE = 0,
28 /* Add remote node with given id to the storage */
29 DST_ADD_REMOTE,
30 /* Add local node with given id to the storage to be exported and used by remote peers */
31 DST_ADD_EXPORT,
32 /* Crypto initialization command (hash/cipher used to protect the connection) */
33 DST_CRYPTO,
34 /* Security attributes for given connection (permissions for example) */
35 DST_SECURITY,
36 /* Register given node in the block layer subsystem */
37 DST_START,
38 DST_CMD_MAX
39};
40
41struct dst_ctl
42{
43 /* Storage name */
44 char name[DST_NAMELEN];
45 /* Command flags */
46 __u32 flags;
47 /* Command itself (see above) */
48 __u32 cmd;
49 /* Maximum number of pages per single request in this device */
50 __u32 max_pages;
51 /* Stale/error transaction scanning timeout in milliseconds */
52 __u32 trans_scan_timeout;
53 /* Maximum number of retry sends before completing transaction as broken */
54 __u32 trans_max_retries;
55 /* Storage size */
56 __u64 size;
57};
58
59/* Reply command carries completion status */
60struct dst_ctl_ack
61{
62 struct cn_msg msg;
63 int error;
64 int unused[3];
65};
66
67/*
68 * Unfortunaltely socket address structure is not exported to userspace
69 * and is redefined there.
70 */
71#define SADDR_MAX_DATA 128
72
73struct saddr {
74 /* address family, AF_xxx */
75 unsigned short sa_family;
76 /* 14 bytes of protocol address */
77 char sa_data[SADDR_MAX_DATA];
78 /* Number of bytes used in sa_data */
79 unsigned short sa_data_len;
80};
81
82/* Address structure */
83struct dst_network_ctl
84{
85 /* Socket type: datagram, stream...*/
86 unsigned int type;
87 /* Let me guess, is it a Jupiter diameter? */
88 unsigned int proto;
89 /* Peer's address */
90 struct saddr addr;
91};
92
93struct dst_crypto_ctl
94{
95 /* Cipher and hash names */
96 char cipher_algo[DST_NAMELEN];
97 char hash_algo[DST_NAMELEN];
98
99 /* Key sizes. Can be zero for digest for example */
100 unsigned int cipher_keysize, hash_keysize;
101 /* Alignment. Calculated by the DST itself. */
102 unsigned int crypto_attached_size;
103 /* Number of threads to perform crypto operations */
104 int thread_num;
105};
106
107/* Export security attributes have this bits checked in when client connects */
108#define DST_PERM_READ (1<<0)
109#define DST_PERM_WRITE (1<<1)
110
111/*
112 * Right now it is simple model, where each remote address
113 * is assigned to set of permissions it is allowed to perform.
114 * In real world block device does not know anything but
115 * reading and writing, so it should be more than enough.
116 */
117struct dst_secure_user
118{
119 unsigned int permissions;
120 struct saddr addr;
121};
122
123/*
124 * Export control command: device to export and network address to accept
125 * clients to work with given device
126 */
127struct dst_export_ctl
128{
129 char device[DST_NAMELEN];
130 struct dst_network_ctl ctl;
131};
132
133enum {
134 DST_CFG = 1, /* Request remote configuration */
135 DST_IO, /* IO command */
136 DST_IO_RESPONSE, /* IO response */
137 DST_PING, /* Keepalive message */
138 DST_NCMD_MAX,
139};
140
141struct dst_cmd
142{
143 /* Network command itself, see above */
144 __u32 cmd;
145 /*
146 * Size of the attached data
147 * (in most cases, for READ command it means how many bytes were requested)
148 */
149 __u32 size;
150 /* Crypto size: number of attached bytes with digest/hmac */
151 __u32 csize;
152 /* Here we can carry secret data */
153 __u32 reserved;
154 /* Read/write bits, see how they are encoded in bio structure */
155 __u64 rw;
156 /* BIO flags */
157 __u64 flags;
158 /* Unique command id (like transaction ID) */
159 __u64 id;
160 /* Sector to start IO from */
161 __u64 sector;
162 /* Hash data is placed after this header */
163 __u8 hash[0];
164};
165
166/*
167 * Convert command to/from network byte order.
168 * We do not use hton*() functions, since there is
169 * no 64-bit implementation.
170 */
171static inline void dst_convert_cmd(struct dst_cmd *c)
172{
173 c->cmd = __cpu_to_be32(c->cmd);
174 c->csize = __cpu_to_be32(c->csize);
175 c->size = __cpu_to_be32(c->size);
176 c->sector = __cpu_to_be64(c->sector);
177 c->id = __cpu_to_be64(c->id);
178 c->flags = __cpu_to_be64(c->flags);
179 c->rw = __cpu_to_be64(c->rw);
180}
181
182/* Transaction id */
183typedef __u64 dst_gen_t;
184
185#ifdef __KERNEL__
186
187#include <linux/blkdev.h>
188#include <linux/bio.h>
189#include <linux/device.h>
190#include <linux/mempool.h>
191#include <linux/net.h>
192#include <linux/poll.h>
193#include <linux/rbtree.h>
194
195#ifdef CONFIG_DST_DEBUG
196#define dprintk(f, a...) printk(KERN_NOTICE f, ##a)
197#else
198static inline void __attribute__ ((format (printf, 1, 2)))
199 dprintk(const char *fmt, ...) {}
200#endif
201
202struct dst_node;
203
204struct dst_trans
205{
206 /* DST node we are working with */
207 struct dst_node *n;
208
209 /* Entry inside transaction tree */
210 struct rb_node trans_entry;
211
212 /* Merlin kills this transaction when this memory cell equals zero */
213 atomic_t refcnt;
214
215 /* How this transaction should be processed by crypto engine */
216 short enc;
217 /* How many times this transaction was resent */
218 short retries;
219 /* Completion status */
220 int error;
221
222 /* When did we send it to the remote peer */
223 long send_time;
224
225 /* My name is...
226 * Well, computers does not speak, they have unique id instead */
227 dst_gen_t gen;
228
229 /* Block IO we are working with */
230 struct bio *bio;
231
232 /* Network command for above block IO request */
233 struct dst_cmd cmd;
234};
235
236struct dst_crypto_engine
237{
238 /* What should we do with all block requests */
239 struct crypto_hash *hash;
240 struct crypto_ablkcipher *cipher;
241
242 /* Pool of pages used to encrypt data into before sending */
243 int page_num;
244 struct page **pages;
245
246 /* What to do with current request */
247 int enc;
248 /* Who we are and where do we go */
249 struct scatterlist *src, *dst;
250
251 /* Maximum timeout waiting for encryption to be completed */
252 long timeout;
253 /* IV is a 64-bit sequential counter */
254 u64 iv;
255
256 /* Secret data */
257 void *private;
258
259 /* Cached temporary data lives here */
260 int size;
261 void *data;
262};
263
264struct dst_state
265{
266 /* The main state protection */
267 struct mutex state_lock;
268
269 /* Polling machinery for sockets */
270 wait_queue_t wait;
271 wait_queue_head_t *whead;
272 /* Most of events are being waited here */
273 wait_queue_head_t thread_wait;
274
275 /* Who owns this? */
276 struct dst_node *node;
277
278 /* Network address for this state */
279 struct dst_network_ctl ctl;
280
281 /* Permissions to work with: read-only or rw connection */
282 u32 permissions;
283
284 /* Called when we need to clean private data */
285 void (* cleanup)(struct dst_state *st);
286
287 /* Used by the server: BIO completion queues BIOs here */
288 struct list_head request_list;
289 spinlock_t request_lock;
290
291 /* Guess what? No, it is not number of planets */
292 atomic_t refcnt;
293
294 /* This flags is set when connection should be dropped */
295 int need_exit;
296
297 /*
298 * Socket to work with. Second pointer is used for
299 * lockless check if socket was changed before performing
300 * next action (like working with cached polling result)
301 */
302 struct socket *socket, *read_socket;
303
304 /* Cached preallocated data */
305 void *data;
306 unsigned int size;
307
308 /* Currently processed command */
309 struct dst_cmd cmd;
310};
311
312struct dst_info
313{
314 /* Device size */
315 u64 size;
316
317 /* Local device name for export devices */
318 char local[DST_NAMELEN];
319
320 /* Network setup */
321 struct dst_network_ctl net;
322
323 /* Sysfs bits use this */
324 struct device device;
325};
326
327struct dst_node
328{
329 struct list_head node_entry;
330
331 /* Hi, my name is stored here */
332 char name[DST_NAMELEN];
333 /* My cache name is stored here */
334 char cache_name[DST_NAMELEN];
335
336 /* Block device attached to given node.
337 * Only valid for exporting nodes */
338 struct block_device *bdev;
339 /* Network state machine for given peer */
340 struct dst_state *state;
341
342 /* Block IO machinery */
343 struct request_queue *queue;
344 struct gendisk *disk;
345
346 /* Number of threads in processing pool */
347 int thread_num;
348 /* Maximum number of pages in single IO */
349 int max_pages;
350
351 /* I'm that big in bytes */
352 loff_t size;
353
354 /* Exported to userspace node information */
355 struct dst_info *info;
356
357 /*
358 * Security attribute list.
359 * Used only by exporting node currently.
360 */
361 struct list_head security_list;
362 struct mutex security_lock;
363
364 /*
365 * When this unerflows below zero, university collapses.
366 * But this will not happen, since node will be freed,
367 * when reference counter reaches zero.
368 */
369 atomic_t refcnt;
370
371 /* How precisely should I be started? */
372 int (*start)(struct dst_node *);
373
374 /* Crypto capabilities */
375 struct dst_crypto_ctl crypto;
376 u8 *hash_key;
377 u8 *cipher_key;
378
379 /* Pool of processing thread */
380 struct thread_pool *pool;
381
382 /* Transaction IDs live here */
383 atomic_long_t gen;
384
385 /*
386 * How frequently and how many times transaction
387 * tree should be scanned to drop stale objects.
388 */
389 long trans_scan_timeout;
390 int trans_max_retries;
391
392 /* Small gnomes live here */
393 struct rb_root trans_root;
394 struct mutex trans_lock;
395
396 /*
397 * Transaction cache/memory pool.
398 * It is big enough to contain not only transaction
399 * itself, but additional crypto data (digest/hmac).
400 */
401 struct kmem_cache *trans_cache;
402 mempool_t *trans_pool;
403
404 /* This entity scans transaction tree */
405 struct delayed_work trans_work;
406
407 wait_queue_head_t wait;
408};
409
410/* Kernel representation of the security attribute */
411struct dst_secure
412{
413 struct list_head sec_entry;
414 struct dst_secure_user sec;
415};
416
417int dst_process_bio(struct dst_node *n, struct bio *bio);
418
419int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r);
420int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le);
421
422static inline struct dst_state *dst_state_get(struct dst_state *st)
423{
424 BUG_ON(atomic_read(&st->refcnt) == 0);
425 atomic_inc(&st->refcnt);
426 return st;
427}
428
429void dst_state_put(struct dst_state *st);
430
431struct dst_state *dst_state_alloc(struct dst_node *n);
432int dst_state_socket_create(struct dst_state *st);
433void dst_state_socket_release(struct dst_state *st);
434
435void dst_state_exit_connected(struct dst_state *st);
436
437int dst_state_schedule_receiver(struct dst_state *st);
438
439void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str);
440
441static inline void dst_state_lock(struct dst_state *st)
442{
443 mutex_lock(&st->state_lock);
444}
445
446static inline void dst_state_unlock(struct dst_state *st)
447{
448 mutex_unlock(&st->state_lock);
449}
450
451void dst_poll_exit(struct dst_state *st);
452int dst_poll_init(struct dst_state *st);
453
454static inline unsigned int dst_state_poll(struct dst_state *st)
455{
456 unsigned int revents = POLLHUP | POLLERR;
457
458 dst_state_lock(st);
459 if (st->socket)
460 revents = st->socket->ops->poll(NULL, st->socket, NULL);
461 dst_state_unlock(st);
462
463 return revents;
464}
465
466static inline int dst_thread_setup(void *private, void *data)
467{
468 return 0;
469}
470
471void dst_node_put(struct dst_node *n);
472
473static inline struct dst_node *dst_node_get(struct dst_node *n)
474{
475 atomic_inc(&n->refcnt);
476 return n;
477}
478
479int dst_data_recv(struct dst_state *st, void *data, unsigned int size);
480int dst_recv_cdata(struct dst_state *st, void *cdata);
481int dst_data_send_header(struct socket *sock,
482 void *data, unsigned int size, int more);
483
484int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio);
485
486int dst_process_io(struct dst_state *st);
487int dst_export_crypto(struct dst_node *n, struct bio *bio);
488int dst_export_send_bio(struct bio *bio);
489int dst_start_export(struct dst_node *n);
490
491int __init dst_export_init(void);
492void dst_export_exit(void);
493
494/* Private structure for export block IO requests */
495struct dst_export_priv
496{
497 struct list_head request_entry;
498 struct dst_state *state;
499 struct bio *bio;
500 struct dst_cmd cmd;
501};
502
503static inline void dst_trans_get(struct dst_trans *t)
504{
505 atomic_inc(&t->refcnt);
506}
507
508struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen);
509int dst_trans_remove(struct dst_trans *t);
510int dst_trans_remove_nolock(struct dst_trans *t);
511void dst_trans_put(struct dst_trans *t);
512
513/*
514 * Convert bio into network command.
515 */
516static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd,
517 u32 command, u64 id)
518{
519 cmd->cmd = command;
520 cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS;
521 cmd->rw = bio->bi_rw;
522 cmd->size = bio->bi_size;
523 cmd->csize = 0;
524 cmd->id = id;
525 cmd->sector = bio->bi_sector;
526};
527
528int dst_trans_send(struct dst_trans *t);
529int dst_trans_crypto(struct dst_trans *t);
530
531int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl);
532void dst_node_crypto_exit(struct dst_node *n);
533
534static inline int dst_need_crypto(struct dst_node *n)
535{
536 struct dst_crypto_ctl *c = &n->crypto;
537 /*
538 * Logical OR is appropriate here, but boolean one produces
539 * more optimal code, so it is used instead.
540 */
541 return (c->hash_algo[0] | c->cipher_algo[0]);
542}
543
544int dst_node_trans_init(struct dst_node *n, unsigned int size);
545void dst_node_trans_exit(struct dst_node *n);
546
547/*
548 * Pool of threads.
549 * Ready list contains threads currently free to be used,
550 * active one contains threads with some work scheduled for them.
551 * Caller can wait in given queue when thread is ready.
552 */
553struct thread_pool
554{
555 int thread_num;
556 struct mutex thread_lock;
557 struct list_head ready_list, active_list;
558
559 wait_queue_head_t wait;
560};
561
562void thread_pool_del_worker(struct thread_pool *p);
563void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id);
564int thread_pool_add_worker(struct thread_pool *p,
565 char *name,
566 unsigned int id,
567 void *(* init)(void *data),
568 void (* cleanup)(void *data),
569 void *data);
570
571void thread_pool_destroy(struct thread_pool *p);
572struct thread_pool *thread_pool_create(int num, char *name,
573 void *(* init)(void *data),
574 void (* cleanup)(void *data),
575 void *data);
576
577int thread_pool_schedule(struct thread_pool *p,
578 int (* setup)(void *stored_private, void *setup_data),
579 int (* action)(void *stored_private, void *setup_data),
580 void *setup_data, long timeout);
581int thread_pool_schedule_private(struct thread_pool *p,
582 int (* setup)(void *private, void *data),
583 int (* action)(void *private, void *data),
584 void *data, long timeout, void *id);
585
586#endif /* __KERNEL__ */
587#endif /* __DST_H */
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index d797dde247f7..c8aad713a046 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -74,4 +74,23 @@ struct dw_dma_slave {
74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ 74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ 75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
76 76
77/* DMA API extensions */
78struct dw_cyclic_desc {
79 struct dw_desc **desc;
80 unsigned long periods;
81 void (*period_callback)(void *param);
82 void *period_callback_param;
83};
84
85struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
86 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
87 enum dma_data_direction direction);
88void dw_dma_cyclic_free(struct dma_chan *chan);
89int dw_dma_cyclic_start(struct dma_chan *chan);
90void dw_dma_cyclic_stop(struct dma_chan *chan);
91
92dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
93
94dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
95
77#endif /* DW_DMAC_H */ 96#endif /* DW_DMAC_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 7a204256b155..c59b769f62b0 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -116,6 +116,7 @@ extern void elv_abort_queue(struct request_queue *);
116extern void elv_completed_request(struct request_queue *, struct request *); 116extern void elv_completed_request(struct request_queue *, struct request *);
117extern int elv_set_request(struct request_queue *, struct request *, gfp_t); 117extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
118extern void elv_put_request(struct request_queue *, struct request *); 118extern void elv_put_request(struct request_queue *, struct request *);
119extern void elv_drain_elevator(struct request_queue *);
119 120
120/* 121/*
121 * io scheduler registration 122 * io scheduler registration
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index a667637b54e3..f45a8ae5f828 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -13,10 +13,20 @@
13/* For O_CLOEXEC and O_NONBLOCK */ 13/* For O_CLOEXEC and O_NONBLOCK */
14#include <linux/fcntl.h> 14#include <linux/fcntl.h>
15 15
16/* Flags for eventfd2. */ 16/*
17 * CAREFUL: Check include/asm-generic/fcntl.h when defining
18 * new flags, since they might collide with O_* ones. We want
19 * to re-use O_* flags that couldn't possibly have a meaning
20 * from eventfd, in order to leave a free define-space for
21 * shared O_* flags.
22 */
23#define EFD_SEMAPHORE (1 << 0)
17#define EFD_CLOEXEC O_CLOEXEC 24#define EFD_CLOEXEC O_CLOEXEC
18#define EFD_NONBLOCK O_NONBLOCK 25#define EFD_NONBLOCK O_NONBLOCK
19 26
27#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
28#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
29
20struct file *eventfd_fget(int fd); 30struct file *eventfd_fget(int fd);
21int eventfd_signal(struct file *file, int n); 31int eventfd_signal(struct file *file, int n);
22 32
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index dd495b8c3091..634a5e5aba3e 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -208,6 +208,7 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
208#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */ 208#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */
209#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */ 209#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */
210#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */ 210#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */
211#define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008
211 212
212/* Used to pass group descriptor data when online resize is done */ 213/* Used to pass group descriptor data when online resize is done */
213struct ext3_new_group_input { 214struct ext3_new_group_input {
@@ -893,9 +894,8 @@ extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
893 u64 start, u64 len); 894 u64 start, u64 len);
894 895
895/* ioctl.c */ 896/* ioctl.c */
896extern int ext3_ioctl (struct inode *, struct file *, unsigned int, 897extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
897 unsigned long); 898extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long);
898extern long ext3_compat_ioctl (struct file *, unsigned int, unsigned long);
899 899
900/* namei.c */ 900/* namei.c */
901extern int ext3_orphan_add(handle_t *, struct inode *); 901extern int ext3_orphan_add(handle_t *, struct inode *);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 31527e17076b..f563c5013932 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -123,6 +123,7 @@ struct dentry;
123#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ 123#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */
124#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ 124#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */
125#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ 125#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */
126#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */
126#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ 127#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */
127#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ 128#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */
128#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ 129#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */
@@ -960,15 +961,7 @@ extern struct fb_info *registered_fb[FB_MAX];
960extern int num_registered_fb; 961extern int num_registered_fb;
961extern struct class *fb_class; 962extern struct class *fb_class;
962 963
963static inline int lock_fb_info(struct fb_info *info) 964extern int lock_fb_info(struct fb_info *info);
964{
965 mutex_lock(&info->lock);
966 if (!info->fbops) {
967 mutex_unlock(&info->lock);
968 return 0;
969 }
970 return 1;
971}
972 965
973static inline void unlock_fb_info(struct fb_info *info) 966static inline void unlock_fb_info(struct fb_info *info)
974{ 967{
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 09d6c5bbdddd..a2ec74bc4812 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -5,12 +5,14 @@
5#ifndef __LINUX_FDTABLE_H 5#ifndef __LINUX_FDTABLE_H
6#define __LINUX_FDTABLE_H 6#define __LINUX_FDTABLE_H
7 7
8#include <asm/atomic.h>
9#include <linux/posix_types.h> 8#include <linux/posix_types.h>
10#include <linux/compiler.h> 9#include <linux/compiler.h>
11#include <linux/spinlock.h> 10#include <linux/spinlock.h>
12#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
13#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/init.h>
14
15#include <asm/atomic.h>
14 16
15/* 17/*
16 * The default fd array needs to be at least BITS_PER_LONG, 18 * The default fd array needs to be at least BITS_PER_LONG,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 42436ae42f70..562d2855cf30 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -95,8 +95,12 @@ struct inodes_stat_t {
95#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ 95#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
96#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 96#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
97#define READ_META (READ | (1 << BIO_RW_META)) 97#define READ_META (READ | (1 << BIO_RW_META))
98#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 98#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
99#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 99#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
100#define WRITE_ODIRECT (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
101#define SWRITE_SYNC_PLUG \
102 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
103#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
100#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) 104#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
101#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) 105#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
102#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) 106#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
@@ -849,7 +853,7 @@ struct file {
849#define f_dentry f_path.dentry 853#define f_dentry f_path.dentry
850#define f_vfsmnt f_path.mnt 854#define f_vfsmnt f_path.mnt
851 const struct file_operations *f_op; 855 const struct file_operations *f_op;
852 spinlock_t f_lock; /* f_ep_links, f_flags */ 856 spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */
853 atomic_long_t f_count; 857 atomic_long_t f_count;
854 unsigned int f_flags; 858 unsigned int f_flags;
855 fmode_t f_mode; 859 fmode_t f_mode;
@@ -1695,6 +1699,9 @@ struct file_system_type {
1695 struct lock_class_key i_alloc_sem_key; 1699 struct lock_class_key i_alloc_sem_key;
1696}; 1700};
1697 1701
1702extern int get_sb_ns(struct file_system_type *fs_type, int flags, void *data,
1703 int (*fill_super)(struct super_block *, void *, int),
1704 struct vfsmount *mnt);
1698extern int get_sb_bdev(struct file_system_type *fs_type, 1705extern int get_sb_bdev(struct file_system_type *fs_type,
1699 int flags, const char *dev_name, void *data, 1706 int flags, const char *dev_name, void *data,
1700 int (*fill_super)(struct super_block *, void *, int), 1707 int (*fill_super)(struct super_block *, void *, int),
@@ -1741,6 +1748,8 @@ extern void drop_collected_mounts(struct vfsmount *);
1741 1748
1742extern int vfs_statfs(struct dentry *, struct kstatfs *); 1749extern int vfs_statfs(struct dentry *, struct kstatfs *);
1743 1750
1751extern int current_umask(void);
1752
1744/* /sys/fs */ 1753/* /sys/fs */
1745extern struct kobject *fs_kobj; 1754extern struct kobject *fs_kobj;
1746 1755
@@ -1878,12 +1887,25 @@ extern struct block_device *open_by_devnum(dev_t, fmode_t);
1878extern void invalidate_bdev(struct block_device *); 1887extern void invalidate_bdev(struct block_device *);
1879extern int sync_blockdev(struct block_device *bdev); 1888extern int sync_blockdev(struct block_device *bdev);
1880extern struct super_block *freeze_bdev(struct block_device *); 1889extern struct super_block *freeze_bdev(struct block_device *);
1890extern void emergency_thaw_all(void);
1881extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); 1891extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
1882extern int fsync_bdev(struct block_device *); 1892extern int fsync_bdev(struct block_device *);
1883extern int fsync_super(struct super_block *); 1893extern int fsync_super(struct super_block *);
1884extern int fsync_no_super(struct block_device *); 1894extern int fsync_no_super(struct block_device *);
1885#else 1895#else
1886static inline void bd_forget(struct inode *inode) {} 1896static inline void bd_forget(struct inode *inode) {}
1897static inline int sync_blockdev(struct block_device *bdev) { return 0; }
1898static inline void invalidate_bdev(struct block_device *bdev) {}
1899
1900static inline struct super_block *freeze_bdev(struct block_device *sb)
1901{
1902 return NULL;
1903}
1904
1905static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
1906{
1907 return 0;
1908}
1887#endif 1909#endif
1888extern const struct file_operations def_blk_fops; 1910extern const struct file_operations def_blk_fops;
1889extern const struct file_operations def_chr_fops; 1911extern const struct file_operations def_chr_fops;
@@ -2322,19 +2344,7 @@ ssize_t simple_transaction_read(struct file *file, char __user *buf,
2322 size_t size, loff_t *pos); 2344 size_t size, loff_t *pos);
2323int simple_transaction_release(struct inode *inode, struct file *file); 2345int simple_transaction_release(struct inode *inode, struct file *file);
2324 2346
2325static inline void simple_transaction_set(struct file *file, size_t n) 2347void simple_transaction_set(struct file *file, size_t n);
2326{
2327 struct simple_transaction_argresp *ar = file->private_data;
2328
2329 BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
2330
2331 /*
2332 * The barrier ensures that ar->size will really remain zero until
2333 * ar->data is ready for reading.
2334 */
2335 smp_mb();
2336 ar->size = n;
2337}
2338 2348
2339/* 2349/*
2340 * simple attribute files 2350 * simple attribute files
@@ -2381,27 +2391,6 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
2381ssize_t simple_attr_write(struct file *file, const char __user *buf, 2391ssize_t simple_attr_write(struct file *file, const char __user *buf,
2382 size_t len, loff_t *ppos); 2392 size_t len, loff_t *ppos);
2383 2393
2384
2385#ifdef CONFIG_SECURITY
2386static inline char *alloc_secdata(void)
2387{
2388 return (char *)get_zeroed_page(GFP_KERNEL);
2389}
2390
2391static inline void free_secdata(void *secdata)
2392{
2393 free_page((unsigned long)secdata);
2394}
2395#else
2396static inline char *alloc_secdata(void)
2397{
2398 return (char *)1;
2399}
2400
2401static inline void free_secdata(void *secdata)
2402{ }
2403#endif /* CONFIG_SECURITY */
2404
2405struct ctl_table; 2394struct ctl_table;
2406int proc_nr_files(struct ctl_table *table, int write, struct file *filp, 2395int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
2407 void __user *buffer, size_t *lenp, loff_t *ppos); 2396 void __user *buffer, size_t *lenp, loff_t *ppos);
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 18b467dbe278..78a05bfcd8eb 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -4,12 +4,10 @@
4#include <linux/path.h> 4#include <linux/path.h>
5 5
6struct fs_struct { 6struct fs_struct {
7 atomic_t count; /* This usage count is used by check_unsafe_exec() for 7 int users;
8 * security checking purposes - therefore it may not be
9 * incremented, except by clone(CLONE_FS).
10 */
11 rwlock_t lock; 8 rwlock_t lock;
12 int umask; 9 int umask;
10 int in_exec;
13 struct path root, pwd; 11 struct path root, pwd;
14}; 12};
15 13
@@ -19,6 +17,8 @@ extern void exit_fs(struct task_struct *);
19extern void set_fs_root(struct fs_struct *, struct path *); 17extern void set_fs_root(struct fs_struct *, struct path *);
20extern void set_fs_pwd(struct fs_struct *, struct path *); 18extern void set_fs_pwd(struct fs_struct *, struct path *);
21extern struct fs_struct *copy_fs_struct(struct fs_struct *); 19extern struct fs_struct *copy_fs_struct(struct fs_struct *);
22extern void put_fs_struct(struct fs_struct *); 20extern void free_fs_struct(struct fs_struct *);
21extern void daemonize_fs_struct(void);
22extern int unshare_fs_struct(void);
23 23
24#endif /* _LINUX_FS_STRUCT_H */ 24#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
new file mode 100644
index 000000000000..84d3532dd3ea
--- /dev/null
+++ b/include/linux/fscache-cache.h
@@ -0,0 +1,505 @@
1/* General filesystem caching backing cache interface
2 *
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * NOTE!!! See:
12 *
13 * Documentation/filesystems/caching/backend-api.txt
14 *
15 * for a description of the cache backend interface declared here.
16 */
17
18#ifndef _LINUX_FSCACHE_CACHE_H
19#define _LINUX_FSCACHE_CACHE_H
20
21#include <linux/fscache.h>
22#include <linux/sched.h>
23#include <linux/slow-work.h>
24
25#define NR_MAXCACHES BITS_PER_LONG
26
27struct fscache_cache;
28struct fscache_cache_ops;
29struct fscache_object;
30struct fscache_operation;
31
32/*
33 * cache tag definition
34 */
35struct fscache_cache_tag {
36 struct list_head link;
37 struct fscache_cache *cache; /* cache referred to by this tag */
38 unsigned long flags;
39#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
40 atomic_t usage;
41 char name[0]; /* tag name */
42};
43
44/*
45 * cache definition
46 */
47struct fscache_cache {
48 const struct fscache_cache_ops *ops;
49 struct fscache_cache_tag *tag; /* tag representing this cache */
50 struct kobject *kobj; /* system representation of this cache */
51 struct list_head link; /* link in list of caches */
52 size_t max_index_size; /* maximum size of index data */
53 char identifier[36]; /* cache label */
54
55 /* node management */
56 struct work_struct op_gc; /* operation garbage collector */
57 struct list_head object_list; /* list of data/index objects */
58 struct list_head op_gc_list; /* list of ops to be deleted */
59 spinlock_t object_list_lock;
60 spinlock_t op_gc_list_lock;
61 atomic_t object_count; /* no. of live objects in this cache */
62 struct fscache_object *fsdef; /* object for the fsdef index */
63 unsigned long flags;
64#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
65#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
66};
67
68extern wait_queue_head_t fscache_cache_cleared_wq;
69
70/*
71 * operation to be applied to a cache object
72 * - retrieval initiation operations are done in the context of the process
73 * that issued them, and not in an async thread pool
74 */
75typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
76typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
77
78struct fscache_operation {
79 union {
80 struct work_struct fast_work; /* record for fast ops */
81 struct slow_work slow_work; /* record for (very) slow ops */
82 };
83 struct list_head pend_link; /* link in object->pending_ops */
84 struct fscache_object *object; /* object to be operated upon */
85
86 unsigned long flags;
87#define FSCACHE_OP_TYPE 0x000f /* operation type */
88#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */
89#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */
90#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */
91#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
92#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
93#define FSCACHE_OP_DEAD 6 /* op is now dead */
94
95 atomic_t usage;
96 unsigned debug_id; /* debugging ID */
97
98 /* operation processor callback
99 * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
100 * the op in a non-pool thread */
101 fscache_operation_processor_t processor;
102
103 /* operation releaser */
104 fscache_operation_release_t release;
105};
106
107extern atomic_t fscache_op_debug_id;
108extern const struct slow_work_ops fscache_op_slow_work_ops;
109
110extern void fscache_enqueue_operation(struct fscache_operation *);
111extern void fscache_put_operation(struct fscache_operation *);
112
113/**
114 * fscache_operation_init - Do basic initialisation of an operation
115 * @op: The operation to initialise
116 * @release: The release function to assign
117 *
118 * Do basic initialisation of an operation. The caller must still set flags,
119 * object, either fast_work or slow_work if necessary, and processor if needed.
120 */
121static inline void fscache_operation_init(struct fscache_operation *op,
122 fscache_operation_release_t release)
123{
124 atomic_set(&op->usage, 1);
125 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
126 op->release = release;
127 INIT_LIST_HEAD(&op->pend_link);
128}
129
130/**
131 * fscache_operation_init_slow - Do additional initialisation of a slow op
132 * @op: The operation to initialise
133 * @processor: The processor function to assign
134 *
135 * Do additional initialisation of an operation as required for slow work.
136 */
137static inline
138void fscache_operation_init_slow(struct fscache_operation *op,
139 fscache_operation_processor_t processor)
140{
141 op->processor = processor;
142 slow_work_init(&op->slow_work, &fscache_op_slow_work_ops);
143}
144
145/*
146 * data read operation
147 */
148struct fscache_retrieval {
149 struct fscache_operation op;
150 struct address_space *mapping; /* netfs pages */
151 fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
152 void *context; /* netfs read context (pinned) */
153 struct list_head to_do; /* list of things to be done by the backend */
154 unsigned long start_time; /* time at which retrieval started */
155};
156
157typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
158 struct page *page,
159 gfp_t gfp);
160
161typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
162 struct list_head *pages,
163 unsigned *nr_pages,
164 gfp_t gfp);
165
166/**
167 * fscache_get_retrieval - Get an extra reference on a retrieval operation
168 * @op: The retrieval operation to get a reference on
169 *
170 * Get an extra reference on a retrieval operation.
171 */
172static inline
173struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
174{
175 atomic_inc(&op->op.usage);
176 return op;
177}
178
179/**
180 * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
181 * @op: The retrieval operation affected
182 *
183 * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
184 */
185static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
186{
187 fscache_enqueue_operation(&op->op);
188}
189
190/**
191 * fscache_put_retrieval - Drop a reference to a retrieval operation
192 * @op: The retrieval operation affected
193 *
194 * Drop a reference to a retrieval operation.
195 */
196static inline void fscache_put_retrieval(struct fscache_retrieval *op)
197{
198 fscache_put_operation(&op->op);
199}
200
201/*
202 * cached page storage work item
203 * - used to do three things:
204 * - batch writes to the cache
205 * - do cache writes asynchronously
206 * - defer writes until cache object lookup completion
207 */
208struct fscache_storage {
209 struct fscache_operation op;
210 pgoff_t store_limit; /* don't write more than this */
211};
212
213/*
214 * cache operations
215 */
216struct fscache_cache_ops {
217 /* name of cache provider */
218 const char *name;
219
220 /* allocate an object record for a cookie */
221 struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
222 struct fscache_cookie *cookie);
223
224 /* look up the object for a cookie */
225 void (*lookup_object)(struct fscache_object *object);
226
227 /* finished looking up */
228 void (*lookup_complete)(struct fscache_object *object);
229
230 /* increment the usage count on this object (may fail if unmounting) */
231 struct fscache_object *(*grab_object)(struct fscache_object *object);
232
233 /* pin an object in the cache */
234 int (*pin_object)(struct fscache_object *object);
235
236 /* unpin an object in the cache */
237 void (*unpin_object)(struct fscache_object *object);
238
239 /* store the updated auxilliary data on an object */
240 void (*update_object)(struct fscache_object *object);
241
242 /* discard the resources pinned by an object and effect retirement if
243 * necessary */
244 void (*drop_object)(struct fscache_object *object);
245
246 /* dispose of a reference to an object */
247 void (*put_object)(struct fscache_object *object);
248
249 /* sync a cache */
250 void (*sync_cache)(struct fscache_cache *cache);
251
252 /* notification that the attributes of a non-index object (such as
253 * i_size) have changed */
254 int (*attr_changed)(struct fscache_object *object);
255
256 /* reserve space for an object's data and associated metadata */
257 int (*reserve_space)(struct fscache_object *object, loff_t i_size);
258
259 /* request a backing block for a page be read or allocated in the
260 * cache */
261 fscache_page_retrieval_func_t read_or_alloc_page;
262
263 /* request backing blocks for a list of pages be read or allocated in
264 * the cache */
265 fscache_pages_retrieval_func_t read_or_alloc_pages;
266
267 /* request a backing block for a page be allocated in the cache so that
268 * it can be written directly */
269 fscache_page_retrieval_func_t allocate_page;
270
271 /* request backing blocks for pages be allocated in the cache so that
272 * they can be written directly */
273 fscache_pages_retrieval_func_t allocate_pages;
274
275 /* write a page to its backing block in the cache */
276 int (*write_page)(struct fscache_storage *op, struct page *page);
277
278 /* detach backing block from a page (optional)
279 * - must release the cookie lock before returning
280 * - may sleep
281 */
282 void (*uncache_page)(struct fscache_object *object,
283 struct page *page);
284
285 /* dissociate a cache from all the pages it was backing */
286 void (*dissociate_pages)(struct fscache_cache *cache);
287};
288
289/*
290 * data file or index object cookie
291 * - a file will only appear in one cache
292 * - a request to cache a file may or may not be honoured, subject to
293 * constraints such as disk space
294 * - indices are created on disk just-in-time
295 */
296struct fscache_cookie {
297 atomic_t usage; /* number of users of this cookie */
298 atomic_t n_children; /* number of children of this cookie */
299 spinlock_t lock;
300 struct hlist_head backing_objects; /* object(s) backing this file/index */
301 const struct fscache_cookie_def *def; /* definition */
302 struct fscache_cookie *parent; /* parent of this entry */
303 void *netfs_data; /* back pointer to netfs */
304 struct radix_tree_root stores; /* pages to be stored on this cookie */
305#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
306
307 unsigned long flags;
308#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
309#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */
310#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */
311#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */
312#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */
313#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */
314};
315
316extern struct fscache_cookie fscache_fsdef_index;
317
318/*
319 * on-disk cache file or index handle
320 */
321struct fscache_object {
322 enum fscache_object_state {
323 FSCACHE_OBJECT_INIT, /* object in initial unbound state */
324 FSCACHE_OBJECT_LOOKING_UP, /* looking up object */
325 FSCACHE_OBJECT_CREATING, /* creating object */
326
327 /* active states */
328 FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */
329 FSCACHE_OBJECT_ACTIVE, /* object is usable */
330 FSCACHE_OBJECT_UPDATING, /* object is updating */
331
332 /* terminal states */
333 FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */
334 FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */
335 FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */
336 FSCACHE_OBJECT_RELEASING, /* releasing object */
337 FSCACHE_OBJECT_RECYCLING, /* retiring object */
338 FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
339 FSCACHE_OBJECT_DEAD, /* object is now dead */
340 } state;
341
342 int debug_id; /* debugging ID */
343 int n_children; /* number of child objects */
344 int n_ops; /* number of ops outstanding on object */
345 int n_obj_ops; /* number of object ops outstanding on object */
346 int n_in_progress; /* number of ops in progress */
347 int n_exclusive; /* number of exclusive ops queued */
348 spinlock_t lock; /* state and operations lock */
349
350 unsigned long lookup_jif; /* time at which lookup started */
351 unsigned long event_mask; /* events this object is interested in */
352 unsigned long events; /* events to be processed by this object
353 * (order is important - using fls) */
354#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */
355#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */
356#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */
357#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */
358#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
359#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
360#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
361
362 unsigned long flags;
363#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
364#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
365#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
366
367 struct list_head cache_link; /* link in cache->object_list */
368 struct hlist_node cookie_link; /* link in cookie->backing_objects */
369 struct fscache_cache *cache; /* cache that supplied this object */
370 struct fscache_cookie *cookie; /* netfs's file/index object */
371 struct fscache_object *parent; /* parent object */
372 struct slow_work work; /* attention scheduling record */
373 struct list_head dependents; /* FIFO of dependent objects */
374 struct list_head dep_link; /* link in parent's dependents list */
375 struct list_head pending_ops; /* unstarted operations on this object */
376 pgoff_t store_limit; /* current storage limit */
377};
378
379extern const char *fscache_object_states[];
380
381#define fscache_object_is_active(obj) \
382 (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
383 (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
384 (obj)->state < FSCACHE_OBJECT_DYING)
385
386extern const struct slow_work_ops fscache_object_slow_work_ops;
387
388/**
389 * fscache_object_init - Initialise a cache object description
390 * @object: Object description
391 *
392 * Initialise a cache object description to its basic values.
393 *
394 * See Documentation/filesystems/caching/backend-api.txt for a complete
395 * description.
396 */
397static inline
398void fscache_object_init(struct fscache_object *object,
399 struct fscache_cookie *cookie,
400 struct fscache_cache *cache)
401{
402 atomic_inc(&cache->object_count);
403
404 object->state = FSCACHE_OBJECT_INIT;
405 spin_lock_init(&object->lock);
406 INIT_LIST_HEAD(&object->cache_link);
407 INIT_HLIST_NODE(&object->cookie_link);
408 vslow_work_init(&object->work, &fscache_object_slow_work_ops);
409 INIT_LIST_HEAD(&object->dependents);
410 INIT_LIST_HEAD(&object->dep_link);
411 INIT_LIST_HEAD(&object->pending_ops);
412 object->n_children = 0;
413 object->n_ops = object->n_in_progress = object->n_exclusive = 0;
414 object->events = object->event_mask = 0;
415 object->flags = 0;
416 object->store_limit = 0;
417 object->cache = cache;
418 object->cookie = cookie;
419 object->parent = NULL;
420}
421
422extern void fscache_object_lookup_negative(struct fscache_object *object);
423extern void fscache_obtained_object(struct fscache_object *object);
424
425/**
426 * fscache_object_destroyed - Note destruction of an object in a cache
427 * @cache: The cache from which the object came
428 *
429 * Note the destruction and deallocation of an object record in a cache.
430 */
431static inline void fscache_object_destroyed(struct fscache_cache *cache)
432{
433 if (atomic_dec_and_test(&cache->object_count))
434 wake_up_all(&fscache_cache_cleared_wq);
435}
436
437/**
438 * fscache_object_lookup_error - Note an object encountered an error
439 * @object: The object on which the error was encountered
440 *
441 * Note that an object encountered a fatal error (usually an I/O error) and
442 * that it should be withdrawn as soon as possible.
443 */
444static inline void fscache_object_lookup_error(struct fscache_object *object)
445{
446 set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
447}
448
449/**
450 * fscache_set_store_limit - Set the maximum size to be stored in an object
451 * @object: The object to set the maximum on
452 * @i_size: The limit to set in bytes
453 *
454 * Set the maximum size an object is permitted to reach, implying the highest
455 * byte that may be written. Intended to be called by the attr_changed() op.
456 *
457 * See Documentation/filesystems/caching/backend-api.txt for a complete
458 * description.
459 */
460static inline
461void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
462{
463 object->store_limit = i_size >> PAGE_SHIFT;
464 if (i_size & ~PAGE_MASK)
465 object->store_limit++;
466}
467
468/**
469 * fscache_end_io - End a retrieval operation on a page
470 * @op: The FS-Cache operation covering the retrieval
471 * @page: The page that was to be fetched
472 * @error: The error code (0 if successful)
473 *
474 * Note the end of an operation to retrieve a page, as covered by a particular
475 * operation record.
476 */
477static inline void fscache_end_io(struct fscache_retrieval *op,
478 struct page *page, int error)
479{
480 op->end_io_func(page, op->context, error);
481}
482
483/*
484 * out-of-line cache backend functions
485 */
486extern void fscache_init_cache(struct fscache_cache *cache,
487 const struct fscache_cache_ops *ops,
488 const char *idfmt,
489 ...) __attribute__ ((format (printf, 3, 4)));
490
491extern int fscache_add_cache(struct fscache_cache *cache,
492 struct fscache_object *fsdef,
493 const char *tagname);
494extern void fscache_withdraw_cache(struct fscache_cache *cache);
495
496extern void fscache_io_error(struct fscache_cache *cache);
497
498extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
499 struct pagevec *pagevec);
500
501extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
502 const void *data,
503 uint16_t datalen);
504
505#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
new file mode 100644
index 000000000000..6d8ee466e0a0
--- /dev/null
+++ b/include/linux/fscache.h
@@ -0,0 +1,618 @@
1/* General filesystem caching interface
2 *
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * NOTE!!! See:
12 *
13 * Documentation/filesystems/caching/netfs-api.txt
14 *
15 * for a description of the network filesystem interface declared here.
16 */
17
18#ifndef _LINUX_FSCACHE_H
19#define _LINUX_FSCACHE_H
20
21#include <linux/fs.h>
22#include <linux/list.h>
23#include <linux/pagemap.h>
24#include <linux/pagevec.h>
25
26#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
27#define fscache_available() (1)
28#define fscache_cookie_valid(cookie) (cookie)
29#else
30#define fscache_available() (0)
31#define fscache_cookie_valid(cookie) (0)
32#endif
33
34
35/*
36 * overload PG_private_2 to give us PG_fscache - this is used to indicate that
37 * a page is currently backed by a local disk cache
38 */
39#define PageFsCache(page) PagePrivate2((page))
40#define SetPageFsCache(page) SetPagePrivate2((page))
41#define ClearPageFsCache(page) ClearPagePrivate2((page))
42#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
43#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
44
45/* pattern used to fill dead space in an index entry */
46#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
47
48struct pagevec;
49struct fscache_cache_tag;
50struct fscache_cookie;
51struct fscache_netfs;
52
53typedef void (*fscache_rw_complete_t)(struct page *page,
54 void *context,
55 int error);
56
57/* result of index entry consultation */
58enum fscache_checkaux {
59 FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
60 FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
61 FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
62};
63
64/*
65 * fscache cookie definition
66 */
67struct fscache_cookie_def {
68 /* name of cookie type */
69 char name[16];
70
71 /* cookie type */
72 uint8_t type;
73#define FSCACHE_COOKIE_TYPE_INDEX 0
74#define FSCACHE_COOKIE_TYPE_DATAFILE 1
75
76 /* select the cache into which to insert an entry in this index
77 * - optional
78 * - should return a cache identifier or NULL to cause the cache to be
79 * inherited from the parent if possible or the first cache picked
80 * for a non-index file if not
81 */
82 struct fscache_cache_tag *(*select_cache)(
83 const void *parent_netfs_data,
84 const void *cookie_netfs_data);
85
86 /* get an index key
87 * - should store the key data in the buffer
88 * - should return the amount of amount stored
89 * - not permitted to return an error
90 * - the netfs data from the cookie being used as the source is
91 * presented
92 */
93 uint16_t (*get_key)(const void *cookie_netfs_data,
94 void *buffer,
95 uint16_t bufmax);
96
97 /* get certain file attributes from the netfs data
98 * - this function can be absent for an index
99 * - not permitted to return an error
100 * - the netfs data from the cookie being used as the source is
101 * presented
102 */
103 void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
104
105 /* get the auxilliary data from netfs data
106 * - this function can be absent if the index carries no state data
107 * - should store the auxilliary data in the buffer
108 * - should return the amount of amount stored
109 * - not permitted to return an error
110 * - the netfs data from the cookie being used as the source is
111 * presented
112 */
113 uint16_t (*get_aux)(const void *cookie_netfs_data,
114 void *buffer,
115 uint16_t bufmax);
116
117 /* consult the netfs about the state of an object
118 * - this function can be absent if the index carries no state data
119 * - the netfs data from the cookie being used as the target is
120 * presented, as is the auxilliary data
121 */
122 enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
123 const void *data,
124 uint16_t datalen);
125
126 /* get an extra reference on a read context
127 * - this function can be absent if the completion function doesn't
128 * require a context
129 */
130 void (*get_context)(void *cookie_netfs_data, void *context);
131
132 /* release an extra reference on a read context
133 * - this function can be absent if the completion function doesn't
134 * require a context
135 */
136 void (*put_context)(void *cookie_netfs_data, void *context);
137
138 /* indicate pages that now have cache metadata retained
139 * - this function should mark the specified pages as now being cached
140 * - the pages will have been marked with PG_fscache before this is
141 * called, so this is optional
142 */
143 void (*mark_pages_cached)(void *cookie_netfs_data,
144 struct address_space *mapping,
145 struct pagevec *cached_pvec);
146
147 /* indicate the cookie is no longer cached
148 * - this function is called when the backing store currently caching
149 * a cookie is removed
150 * - the netfs should use this to clean up any markers indicating
151 * cached pages
152 * - this is mandatory for any object that may have data
153 */
154 void (*now_uncached)(void *cookie_netfs_data);
155};
156
157/*
158 * fscache cached network filesystem type
159 * - name, version and ops must be filled in before registration
160 * - all other fields will be set during registration
161 */
162struct fscache_netfs {
163 uint32_t version; /* indexing version */
164 const char *name; /* filesystem name */
165 struct fscache_cookie *primary_index;
166 struct list_head link; /* internal link */
167};
168
169/*
170 * slow-path functions for when there is actually caching available, and the
171 * netfs does actually have a valid token
172 * - these are not to be called directly
173 * - these are undefined symbols when FS-Cache is not configured and the
174 * optimiser takes care of not using them
175 */
176extern int __fscache_register_netfs(struct fscache_netfs *);
177extern void __fscache_unregister_netfs(struct fscache_netfs *);
178extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
179extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
180
181extern struct fscache_cookie *__fscache_acquire_cookie(
182 struct fscache_cookie *,
183 const struct fscache_cookie_def *,
184 void *);
185extern void __fscache_relinquish_cookie(struct fscache_cookie *, int);
186extern void __fscache_update_cookie(struct fscache_cookie *);
187extern int __fscache_attr_changed(struct fscache_cookie *);
188extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
189 struct page *,
190 fscache_rw_complete_t,
191 void *,
192 gfp_t);
193extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
194 struct address_space *,
195 struct list_head *,
196 unsigned *,
197 fscache_rw_complete_t,
198 void *,
199 gfp_t);
200extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
201extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
202extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
203extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
204extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
205
206/**
207 * fscache_register_netfs - Register a filesystem as desiring caching services
208 * @netfs: The description of the filesystem
209 *
210 * Register a filesystem as desiring caching services if they're available.
211 *
212 * See Documentation/filesystems/caching/netfs-api.txt for a complete
213 * description.
214 */
215static inline
216int fscache_register_netfs(struct fscache_netfs *netfs)
217{
218 if (fscache_available())
219 return __fscache_register_netfs(netfs);
220 else
221 return 0;
222}
223
224/**
225 * fscache_unregister_netfs - Indicate that a filesystem no longer desires
226 * caching services
227 * @netfs: The description of the filesystem
228 *
229 * Indicate that a filesystem no longer desires caching services for the
230 * moment.
231 *
232 * See Documentation/filesystems/caching/netfs-api.txt for a complete
233 * description.
234 */
235static inline
236void fscache_unregister_netfs(struct fscache_netfs *netfs)
237{
238 if (fscache_available())
239 __fscache_unregister_netfs(netfs);
240}
241
242/**
243 * fscache_lookup_cache_tag - Look up a cache tag
244 * @name: The name of the tag to search for
245 *
246 * Acquire a specific cache referral tag that can be used to select a specific
247 * cache in which to cache an index.
248 *
249 * See Documentation/filesystems/caching/netfs-api.txt for a complete
250 * description.
251 */
252static inline
253struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
254{
255 if (fscache_available())
256 return __fscache_lookup_cache_tag(name);
257 else
258 return NULL;
259}
260
261/**
262 * fscache_release_cache_tag - Release a cache tag
263 * @tag: The tag to release
264 *
265 * Release a reference to a cache referral tag previously looked up.
266 *
267 * See Documentation/filesystems/caching/netfs-api.txt for a complete
268 * description.
269 */
270static inline
271void fscache_release_cache_tag(struct fscache_cache_tag *tag)
272{
273 if (fscache_available())
274 __fscache_release_cache_tag(tag);
275}
276
277/**
278 * fscache_acquire_cookie - Acquire a cookie to represent a cache object
279 * @parent: The cookie that's to be the parent of this one
280 * @def: A description of the cache object, including callback operations
281 * @netfs_data: An arbitrary piece of data to be kept in the cookie to
282 * represent the cache object to the netfs
283 *
284 * This function is used to inform FS-Cache about part of an index hierarchy
285 * that can be used to locate files. This is done by requesting a cookie for
286 * each index in the path to the file.
287 *
288 * See Documentation/filesystems/caching/netfs-api.txt for a complete
289 * description.
290 */
291static inline
292struct fscache_cookie *fscache_acquire_cookie(
293 struct fscache_cookie *parent,
294 const struct fscache_cookie_def *def,
295 void *netfs_data)
296{
297 if (fscache_cookie_valid(parent))
298 return __fscache_acquire_cookie(parent, def, netfs_data);
299 else
300 return NULL;
301}
302
303/**
304 * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
305 * it
306 * @cookie: The cookie being returned
307 * @retire: True if the cache object the cookie represents is to be discarded
308 *
309 * This function returns a cookie to the cache, forcibly discarding the
310 * associated cache object if retire is set to true.
311 *
312 * See Documentation/filesystems/caching/netfs-api.txt for a complete
313 * description.
314 */
315static inline
316void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
317{
318 if (fscache_cookie_valid(cookie))
319 __fscache_relinquish_cookie(cookie, retire);
320}
321
322/**
323 * fscache_update_cookie - Request that a cache object be updated
324 * @cookie: The cookie representing the cache object
325 *
326 * Request an update of the index data for the cache object associated with the
327 * cookie.
328 *
329 * See Documentation/filesystems/caching/netfs-api.txt for a complete
330 * description.
331 */
332static inline
333void fscache_update_cookie(struct fscache_cookie *cookie)
334{
335 if (fscache_cookie_valid(cookie))
336 __fscache_update_cookie(cookie);
337}
338
339/**
340 * fscache_pin_cookie - Pin a data-storage cache object in its cache
341 * @cookie: The cookie representing the cache object
342 *
343 * Permit data-storage cache objects to be pinned in the cache.
344 *
345 * See Documentation/filesystems/caching/netfs-api.txt for a complete
346 * description.
347 */
348static inline
349int fscache_pin_cookie(struct fscache_cookie *cookie)
350{
351 return -ENOBUFS;
352}
353
354/**
355 * fscache_pin_cookie - Unpin a data-storage cache object in its cache
356 * @cookie: The cookie representing the cache object
357 *
358 * Permit data-storage cache objects to be unpinned from the cache.
359 *
360 * See Documentation/filesystems/caching/netfs-api.txt for a complete
361 * description.
362 */
363static inline
364void fscache_unpin_cookie(struct fscache_cookie *cookie)
365{
366}
367
368/**
369 * fscache_attr_changed - Notify cache that an object's attributes changed
370 * @cookie: The cookie representing the cache object
371 *
372 * Send a notification to the cache indicating that an object's attributes have
373 * changed. This includes the data size. These attributes will be obtained
374 * through the get_attr() cookie definition op.
375 *
376 * See Documentation/filesystems/caching/netfs-api.txt for a complete
377 * description.
378 */
379static inline
380int fscache_attr_changed(struct fscache_cookie *cookie)
381{
382 if (fscache_cookie_valid(cookie))
383 return __fscache_attr_changed(cookie);
384 else
385 return -ENOBUFS;
386}
387
388/**
389 * fscache_reserve_space - Reserve data space for a cached object
390 * @cookie: The cookie representing the cache object
391 * @i_size: The amount of space to be reserved
392 *
393 * Reserve an amount of space in the cache for the cache object attached to a
394 * cookie so that a write to that object within the space can always be
395 * honoured.
396 *
397 * See Documentation/filesystems/caching/netfs-api.txt for a complete
398 * description.
399 */
400static inline
401int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
402{
403 return -ENOBUFS;
404}
405
406/**
407 * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
408 * in which to store it
409 * @cookie: The cookie representing the cache object
410 * @page: The netfs page to fill if possible
411 * @end_io_func: The callback to invoke when and if the page is filled
412 * @context: An arbitrary piece of data to pass on to end_io_func()
413 * @gfp: The conditions under which memory allocation should be made
414 *
415 * Read a page from the cache, or if that's not possible make a potential
416 * one-block reservation in the cache into which the page may be stored once
417 * fetched from the server.
418 *
419 * If the page is not backed by the cache object, or if it there's some reason
420 * it can't be, -ENOBUFS will be returned and nothing more will be done for
421 * that page.
422 *
423 * Else, if that page is backed by the cache, a read will be initiated directly
424 * to the netfs's page and 0 will be returned by this function. The
425 * end_io_func() callback will be invoked when the operation terminates on a
426 * completion or failure. Note that the callback may be invoked before the
427 * return.
428 *
429 * Else, if the page is unbacked, -ENODATA is returned and a block may have
430 * been allocated in the cache.
431 *
432 * See Documentation/filesystems/caching/netfs-api.txt for a complete
433 * description.
434 */
435static inline
436int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
437 struct page *page,
438 fscache_rw_complete_t end_io_func,
439 void *context,
440 gfp_t gfp)
441{
442 if (fscache_cookie_valid(cookie))
443 return __fscache_read_or_alloc_page(cookie, page, end_io_func,
444 context, gfp);
445 else
446 return -ENOBUFS;
447}
448
449/**
450 * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
451 * blocks in which to store them
452 * @cookie: The cookie representing the cache object
453 * @mapping: The netfs inode mapping to which the pages will be attached
454 * @pages: A list of potential netfs pages to be filled
455 * @end_io_func: The callback to invoke when and if each page is filled
456 * @context: An arbitrary piece of data to pass on to end_io_func()
457 * @gfp: The conditions under which memory allocation should be made
458 *
459 * Read a set of pages from the cache, or if that's not possible, attempt to
460 * make a potential one-block reservation for each page in the cache into which
461 * that page may be stored once fetched from the server.
462 *
463 * If some pages are not backed by the cache object, or if it there's some
464 * reason they can't be, -ENOBUFS will be returned and nothing more will be
465 * done for that pages.
466 *
467 * Else, if some of the pages are backed by the cache, a read will be initiated
468 * directly to the netfs's page and 0 will be returned by this function. The
469 * end_io_func() callback will be invoked when the operation terminates on a
470 * completion or failure. Note that the callback may be invoked before the
471 * return.
472 *
473 * Else, if a page is unbacked, -ENODATA is returned and a block may have
474 * been allocated in the cache.
475 *
476 * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
477 * regard to different pages, the return values are prioritised in that order.
478 * Any pages submitted for reading are removed from the pages list.
479 *
480 * See Documentation/filesystems/caching/netfs-api.txt for a complete
481 * description.
482 */
483static inline
484int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
485 struct address_space *mapping,
486 struct list_head *pages,
487 unsigned *nr_pages,
488 fscache_rw_complete_t end_io_func,
489 void *context,
490 gfp_t gfp)
491{
492 if (fscache_cookie_valid(cookie))
493 return __fscache_read_or_alloc_pages(cookie, mapping, pages,
494 nr_pages, end_io_func,
495 context, gfp);
496 else
497 return -ENOBUFS;
498}
499
500/**
501 * fscache_alloc_page - Allocate a block in which to store a page
502 * @cookie: The cookie representing the cache object
503 * @page: The netfs page to allocate a page for
504 * @gfp: The conditions under which memory allocation should be made
505 *
506 * Request Allocation a block in the cache in which to store a netfs page
507 * without retrieving any contents from the cache.
508 *
509 * If the page is not backed by a file then -ENOBUFS will be returned and
510 * nothing more will be done, and no reservation will be made.
511 *
512 * Else, a block will be allocated if one wasn't already, and 0 will be
513 * returned
514 *
515 * See Documentation/filesystems/caching/netfs-api.txt for a complete
516 * description.
517 */
518static inline
519int fscache_alloc_page(struct fscache_cookie *cookie,
520 struct page *page,
521 gfp_t gfp)
522{
523 if (fscache_cookie_valid(cookie))
524 return __fscache_alloc_page(cookie, page, gfp);
525 else
526 return -ENOBUFS;
527}
528
529/**
530 * fscache_write_page - Request storage of a page in the cache
531 * @cookie: The cookie representing the cache object
532 * @page: The netfs page to store
533 * @gfp: The conditions under which memory allocation should be made
534 *
535 * Request the contents of the netfs page be written into the cache. This
536 * request may be ignored if no cache block is currently allocated, in which
537 * case it will return -ENOBUFS.
538 *
539 * If a cache block was already allocated, a write will be initiated and 0 will
540 * be returned. The PG_fscache_write page bit is set immediately and will then
541 * be cleared at the completion of the write to indicate the success or failure
542 * of the operation. Note that the completion may happen before the return.
543 *
544 * See Documentation/filesystems/caching/netfs-api.txt for a complete
545 * description.
546 */
547static inline
548int fscache_write_page(struct fscache_cookie *cookie,
549 struct page *page,
550 gfp_t gfp)
551{
552 if (fscache_cookie_valid(cookie))
553 return __fscache_write_page(cookie, page, gfp);
554 else
555 return -ENOBUFS;
556}
557
558/**
559 * fscache_uncache_page - Indicate that caching is no longer required on a page
560 * @cookie: The cookie representing the cache object
561 * @page: The netfs page that was being cached.
562 *
563 * Tell the cache that we no longer want a page to be cached and that it should
564 * remove any knowledge of the netfs page it may have.
565 *
566 * Note that this cannot cancel any outstanding I/O operations between this
567 * page and the cache.
568 *
569 * See Documentation/filesystems/caching/netfs-api.txt for a complete
570 * description.
571 */
572static inline
573void fscache_uncache_page(struct fscache_cookie *cookie,
574 struct page *page)
575{
576 if (fscache_cookie_valid(cookie))
577 __fscache_uncache_page(cookie, page);
578}
579
580/**
581 * fscache_check_page_write - Ask if a page is being writing to the cache
582 * @cookie: The cookie representing the cache object
583 * @page: The netfs page that is being cached.
584 *
585 * Ask the cache if a page is being written to the cache.
586 *
587 * See Documentation/filesystems/caching/netfs-api.txt for a complete
588 * description.
589 */
590static inline
591bool fscache_check_page_write(struct fscache_cookie *cookie,
592 struct page *page)
593{
594 if (fscache_cookie_valid(cookie))
595 return __fscache_check_page_write(cookie, page);
596 return false;
597}
598
599/**
600 * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
601 * @cookie: The cookie representing the cache object
602 * @page: The netfs page that is being cached.
603 *
604 * Ask the cache to wake us up when a page is no longer being written to the
605 * cache.
606 *
607 * See Documentation/filesystems/caching/netfs-api.txt for a complete
608 * description.
609 */
610static inline
611void fscache_wait_on_page_write(struct fscache_cookie *cookie,
612 struct page *page)
613{
614 if (fscache_cookie_valid(cookie))
615 __fscache_wait_on_page_write(cookie, page);
616}
617
618#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index d9051d717d27..f2a78b5e8b55 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -18,7 +18,6 @@
18#define _FSL_DEVICE_H_ 18#define _FSL_DEVICE_H_
19 19
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/phy.h>
22 21
23/* 22/*
24 * Some conventions on how we handle peripherals on Freescale chips 23 * Some conventions on how we handle peripherals on Freescale chips
@@ -44,27 +43,6 @@
44 * 43 *
45 */ 44 */
46 45
47struct gianfar_platform_data {
48 /* device specific information */
49 u32 device_flags;
50 char bus_id[BUS_ID_SIZE];
51 phy_interface_t interface;
52};
53
54struct gianfar_mdio_data {
55 /* board specific information */
56 int irq[32];
57};
58
59/* Flags in gianfar_platform_data */
60#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */
61#define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */
62
63struct fsl_i2c_platform_data {
64 /* device specific information */
65 u32 device_flags;
66};
67
68/* Flags related to I2C device features */ 46/* Flags related to I2C device features */
69#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001 47#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001
70#define FSL_I2C_DEV_CLOCK_5200 0x00000002 48#define FSL_I2C_DEV_CLOCK_5200 0x00000002
@@ -95,14 +73,15 @@ struct fsl_usb2_platform_data {
95#define FSL_USB2_PORT0_ENABLED 0x00000001 73#define FSL_USB2_PORT0_ENABLED 0x00000001
96#define FSL_USB2_PORT1_ENABLED 0x00000002 74#define FSL_USB2_PORT1_ENABLED 0x00000002
97 75
76struct spi_device;
77
98struct fsl_spi_platform_data { 78struct fsl_spi_platform_data {
99 u32 initial_spmode; /* initial SPMODE value */ 79 u32 initial_spmode; /* initial SPMODE value */
100 u16 bus_num; 80 s16 bus_num;
101 bool qe_mode; 81 bool qe_mode;
102 /* board specific information */ 82 /* board specific information */
103 u16 max_chipselect; 83 u16 max_chipselect;
104 void (*activate_cs)(u8 cs, u8 polarity); 84 void (*cs_control)(struct spi_device *spi, bool on);
105 void (*deactivate_cs)(u8 cs, u8 polarity);
106 u32 sysclk; 85 u32 sysclk;
107}; 86};
108 87
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 677432b9cb7e..015a3d22cf74 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,15 +1,18 @@
1#ifndef _LINUX_FTRACE_H 1#ifndef _LINUX_FTRACE_H
2#define _LINUX_FTRACE_H 2#define _LINUX_FTRACE_H
3 3
4#include <linux/linkage.h> 4#include <linux/trace_clock.h>
5#include <linux/fs.h>
6#include <linux/ktime.h>
7#include <linux/init.h>
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6#include <linux/linkage.h>
11#include <linux/bitops.h> 7#include <linux/bitops.h>
8#include <linux/module.h>
9#include <linux/ktime.h>
12#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14
15#include <asm/ftrace.h>
13 16
14#ifdef CONFIG_FUNCTION_TRACER 17#ifdef CONFIG_FUNCTION_TRACER
15 18
@@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write,
95 loff_t *ppos); 98 loff_t *ppos);
96#endif 99#endif
97 100
101struct ftrace_func_command {
102 struct list_head list;
103 char *name;
104 int (*func)(char *func, char *cmd,
105 char *params, int enable);
106};
107
98#ifdef CONFIG_DYNAMIC_FTRACE 108#ifdef CONFIG_DYNAMIC_FTRACE
99/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ 109
100#include <asm/ftrace.h> 110int ftrace_arch_code_modify_prepare(void);
111int ftrace_arch_code_modify_post_process(void);
112
113struct seq_file;
114
115struct ftrace_probe_ops {
116 void (*func)(unsigned long ip,
117 unsigned long parent_ip,
118 void **data);
119 int (*callback)(unsigned long ip, void **data);
120 void (*free)(void **data);
121 int (*print)(struct seq_file *m,
122 unsigned long ip,
123 struct ftrace_probe_ops *ops,
124 void *data);
125};
126
127extern int
128register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
129 void *data);
130extern void
131unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
132 void *data);
133extern void
134unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
135extern void unregister_ftrace_function_probe_all(char *glob);
101 136
102enum { 137enum {
103 FTRACE_FL_FREE = (1 << 0), 138 FTRACE_FL_FREE = (1 << 0),
@@ -110,15 +145,23 @@ enum {
110}; 145};
111 146
112struct dyn_ftrace { 147struct dyn_ftrace {
113 struct list_head list; 148 union {
114 unsigned long ip; /* address of mcount call-site */ 149 unsigned long ip; /* address of mcount call-site */
115 unsigned long flags; 150 struct dyn_ftrace *freelist;
116 struct dyn_arch_ftrace arch; 151 };
152 union {
153 unsigned long flags;
154 struct dyn_ftrace *newlist;
155 };
156 struct dyn_arch_ftrace arch;
117}; 157};
118 158
119int ftrace_force_update(void); 159int ftrace_force_update(void);
120void ftrace_set_filter(unsigned char *buf, int len, int reset); 160void ftrace_set_filter(unsigned char *buf, int len, int reset);
121 161
162int register_ftrace_command(struct ftrace_func_command *cmd);
163int unregister_ftrace_command(struct ftrace_func_command *cmd);
164
122/* defined in arch */ 165/* defined in arch */
123extern int ftrace_ip_converted(unsigned long ip); 166extern int ftrace_ip_converted(unsigned long ip);
124extern int ftrace_dyn_arch_init(void *data); 167extern int ftrace_dyn_arch_init(void *data);
@@ -126,6 +169,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
126extern void ftrace_caller(void); 169extern void ftrace_caller(void);
127extern void ftrace_call(void); 170extern void ftrace_call(void);
128extern void mcount_call(void); 171extern void mcount_call(void);
172
173#ifndef FTRACE_ADDR
174#define FTRACE_ADDR ((unsigned long)ftrace_caller)
175#endif
129#ifdef CONFIG_FUNCTION_GRAPH_TRACER 176#ifdef CONFIG_FUNCTION_GRAPH_TRACER
130extern void ftrace_graph_caller(void); 177extern void ftrace_graph_caller(void);
131extern int ftrace_enable_ftrace_graph_caller(void); 178extern int ftrace_enable_ftrace_graph_caller(void);
@@ -136,7 +183,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
136#endif 183#endif
137 184
138/** 185/**
139 * ftrace_make_nop - convert code into top 186 * ftrace_make_nop - convert code into nop
140 * @mod: module structure if called by module load initialization 187 * @mod: module structure if called by module load initialization
141 * @rec: the mcount call site record 188 * @rec: the mcount call site record
142 * @addr: the address that the call site should be calling 189 * @addr: the address that the call site should be calling
@@ -181,7 +228,6 @@ extern int ftrace_make_nop(struct module *mod,
181 */ 228 */
182extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 229extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
183 230
184
185/* May be defined in arch */ 231/* May be defined in arch */
186extern int ftrace_arch_read_dyn_info(char *buf, int size); 232extern int ftrace_arch_read_dyn_info(char *buf, int size);
187 233
@@ -198,6 +244,14 @@ extern void ftrace_enable_daemon(void);
198# define ftrace_disable_daemon() do { } while (0) 244# define ftrace_disable_daemon() do { } while (0)
199# define ftrace_enable_daemon() do { } while (0) 245# define ftrace_enable_daemon() do { } while (0)
200static inline void ftrace_release(void *start, unsigned long size) { } 246static inline void ftrace_release(void *start, unsigned long size) { }
247static inline int register_ftrace_command(struct ftrace_func_command *cmd)
248{
249 return -EINVAL;
250}
251static inline int unregister_ftrace_command(char *cmd_name)
252{
253 return -EINVAL;
254}
201#endif /* CONFIG_DYNAMIC_FTRACE */ 255#endif /* CONFIG_DYNAMIC_FTRACE */
202 256
203/* totally disable ftrace - can not re-enable after this */ 257/* totally disable ftrace - can not re-enable after this */
@@ -233,24 +287,25 @@ static inline void __ftrace_enabled_restore(int enabled)
233#endif 287#endif
234} 288}
235 289
236#ifdef CONFIG_FRAME_POINTER 290#ifndef HAVE_ARCH_CALLER_ADDR
237/* TODO: need to fix this for ARM */ 291# ifdef CONFIG_FRAME_POINTER
238# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 292# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
239# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) 293# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
240# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) 294# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
241# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) 295# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
242# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) 296# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
243# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) 297# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
244# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) 298# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
245#else 299# else
246# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 300# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
247# define CALLER_ADDR1 0UL 301# define CALLER_ADDR1 0UL
248# define CALLER_ADDR2 0UL 302# define CALLER_ADDR2 0UL
249# define CALLER_ADDR3 0UL 303# define CALLER_ADDR3 0UL
250# define CALLER_ADDR4 0UL 304# define CALLER_ADDR4 0UL
251# define CALLER_ADDR5 0UL 305# define CALLER_ADDR5 0UL
252# define CALLER_ADDR6 0UL 306# define CALLER_ADDR6 0UL
253#endif 307# endif
308#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
254 309
255#ifdef CONFIG_IRQSOFF_TRACER 310#ifdef CONFIG_IRQSOFF_TRACER
256 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 311 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
@@ -268,54 +323,6 @@ static inline void __ftrace_enabled_restore(int enabled)
268# define trace_preempt_off(a0, a1) do { } while (0) 323# define trace_preempt_off(a0, a1) do { } while (0)
269#endif 324#endif
270 325
271#ifdef CONFIG_TRACING
272extern int ftrace_dump_on_oops;
273
274extern void tracing_start(void);
275extern void tracing_stop(void);
276extern void ftrace_off_permanent(void);
277
278extern void
279ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
280
281/**
282 * ftrace_printk - printf formatting in the ftrace buffer
283 * @fmt: the printf format for printing
284 *
285 * Note: __ftrace_printk is an internal function for ftrace_printk and
286 * the @ip is passed in via the ftrace_printk macro.
287 *
288 * This function allows a kernel developer to debug fast path sections
289 * that printk is not appropriate for. By scattering in various
290 * printk like tracing in the code, a developer can quickly see
291 * where problems are occurring.
292 *
293 * This is intended as a debugging tool for the developer only.
294 * Please refrain from leaving ftrace_printks scattered around in
295 * your code.
296 */
297# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
298extern int
299__ftrace_printk(unsigned long ip, const char *fmt, ...)
300 __attribute__ ((format (printf, 2, 3)));
301extern void ftrace_dump(void);
302#else
303static inline void
304ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
305static inline int
306ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
307
308static inline void tracing_start(void) { }
309static inline void tracing_stop(void) { }
310static inline void ftrace_off_permanent(void) { }
311static inline int
312ftrace_printk(const char *fmt, ...)
313{
314 return 0;
315}
316static inline void ftrace_dump(void) { }
317#endif
318
319#ifdef CONFIG_FTRACE_MCOUNT_RECORD 326#ifdef CONFIG_FTRACE_MCOUNT_RECORD
320extern void ftrace_init(void); 327extern void ftrace_init(void);
321extern void ftrace_init_module(struct module *mod, 328extern void ftrace_init_module(struct module *mod,
@@ -327,36 +334,6 @@ ftrace_init_module(struct module *mod,
327 unsigned long *start, unsigned long *end) { } 334 unsigned long *start, unsigned long *end) { }
328#endif 335#endif
329 336
330enum {
331 POWER_NONE = 0,
332 POWER_CSTATE = 1,
333 POWER_PSTATE = 2,
334};
335
336struct power_trace {
337#ifdef CONFIG_POWER_TRACER
338 ktime_t stamp;
339 ktime_t end;
340 int type;
341 int state;
342#endif
343};
344
345#ifdef CONFIG_POWER_TRACER
346extern void trace_power_start(struct power_trace *it, unsigned int type,
347 unsigned int state);
348extern void trace_power_mark(struct power_trace *it, unsigned int type,
349 unsigned int state);
350extern void trace_power_end(struct power_trace *it);
351#else
352static inline void trace_power_start(struct power_trace *it, unsigned int type,
353 unsigned int state) { }
354static inline void trace_power_mark(struct power_trace *it, unsigned int type,
355 unsigned int state) { }
356static inline void trace_power_end(struct power_trace *it) { }
357#endif
358
359
360/* 337/*
361 * Structure that defines an entry function trace. 338 * Structure that defines an entry function trace.
362 */ 339 */
@@ -380,6 +357,29 @@ struct ftrace_graph_ret {
380#ifdef CONFIG_FUNCTION_GRAPH_TRACER 357#ifdef CONFIG_FUNCTION_GRAPH_TRACER
381 358
382/* 359/*
360 * Stack of return addresses for functions
361 * of a thread.
362 * Used in struct thread_info
363 */
364struct ftrace_ret_stack {
365 unsigned long ret;
366 unsigned long func;
367 unsigned long long calltime;
368};
369
370/*
371 * Primary handler of a function return.
372 * It relays on ftrace_return_to_handler.
373 * Defined in entry_32/64.S
374 */
375extern void return_to_handler(void);
376
377extern int
378ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
379extern void
380ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
381
382/*
383 * Sometimes we don't want to trace a function with the function 383 * Sometimes we don't want to trace a function with the function
384 * graph tracer but we want them to keep traced by the usual function 384 * graph tracer but we want them to keep traced by the usual function
385 * tracer if the function graph tracer is not configured. 385 * tracer if the function graph tracer is not configured.
@@ -490,6 +490,50 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
490 return tsk->trace & TSK_TRACE_FL_GRAPH; 490 return tsk->trace & TSK_TRACE_FL_GRAPH;
491} 491}
492 492
493extern int ftrace_dump_on_oops;
494
493#endif /* CONFIG_TRACING */ 495#endif /* CONFIG_TRACING */
494 496
497
498#ifdef CONFIG_HW_BRANCH_TRACER
499
500void trace_hw_branch(u64 from, u64 to);
501void trace_hw_branch_oops(void);
502
503#else /* CONFIG_HW_BRANCH_TRACER */
504
505static inline void trace_hw_branch(u64 from, u64 to) {}
506static inline void trace_hw_branch_oops(void) {}
507
508#endif /* CONFIG_HW_BRANCH_TRACER */
509
510/*
511 * A syscall entry in the ftrace syscalls array.
512 *
513 * @name: name of the syscall
514 * @nb_args: number of parameters it takes
515 * @types: list of types as strings
516 * @args: list of args as strings (args[i] matches types[i])
517 */
518struct syscall_metadata {
519 const char *name;
520 int nb_args;
521 const char **types;
522 const char **args;
523};
524
525#ifdef CONFIG_FTRACE_SYSCALLS
526extern void arch_init_ftrace_syscalls(void);
527extern struct syscall_metadata *syscall_nr_to_meta(int nr);
528extern void start_ftrace_syscalls(void);
529extern void stop_ftrace_syscalls(void);
530extern void ftrace_syscall_enter(struct pt_regs *regs);
531extern void ftrace_syscall_exit(struct pt_regs *regs);
532#else
533static inline void start_ftrace_syscalls(void) { }
534static inline void stop_ftrace_syscalls(void) { }
535static inline void ftrace_syscall_enter(struct pt_regs *regs) { }
536static inline void ftrace_syscall_exit(struct pt_regs *regs) { }
537#endif
538
495#endif /* _LINUX_FTRACE_H */ 539#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 366a054d0b05..dca7bf8cffe2 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
2#define _LINUX_FTRACE_IRQ_H 2#define _LINUX_FTRACE_IRQ_H
3 3
4 4
5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) 5#ifdef CONFIG_FTRACE_NMI_ENTER
6extern void ftrace_nmi_enter(void); 6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void); 7extern void ftrace_nmi_exit(void);
8#else 8#else
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index dd20cd78faa8..0bbc15f54536 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -4,6 +4,7 @@
4#include <linux/mmzone.h> 4#include <linux/mmzone.h>
5#include <linux/stddef.h> 5#include <linux/stddef.h>
6#include <linux/linkage.h> 6#include <linux/linkage.h>
7#include <linux/topology.h>
7 8
8struct vm_area_struct; 9struct vm_area_struct;
9 10
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f83288347dda..faa1cf848bcd 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -15,55 +15,61 @@
15 * - bits 0-7 are the preemption count (max preemption depth: 256) 15 * - bits 0-7 are the preemption count (max preemption depth: 256)
16 * - bits 8-15 are the softirq count (max # of softirqs: 256) 16 * - bits 8-15 are the softirq count (max # of softirqs: 256)
17 * 17 *
18 * The hardirq count can be overridden per architecture, the default is: 18 * The hardirq count can in theory reach the same as NR_IRQS.
19 * In reality, the number of nested IRQS is limited to the stack
20 * size as well. For archs with over 1000 IRQS it is not practical
21 * to expect that they will all nest. We give a max of 10 bits for
22 * hardirq nesting. An arch may choose to give less than 10 bits.
23 * m68k expects it to be 8.
19 * 24 *
20 * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) 25 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
21 * - ( bit 28 is the PREEMPT_ACTIVE flag. ) 26 * - bit 26 is the NMI_MASK
27 * - bit 28 is the PREEMPT_ACTIVE flag
22 * 28 *
23 * PREEMPT_MASK: 0x000000ff 29 * PREEMPT_MASK: 0x000000ff
24 * SOFTIRQ_MASK: 0x0000ff00 30 * SOFTIRQ_MASK: 0x0000ff00
25 * HARDIRQ_MASK: 0x0fff0000 31 * HARDIRQ_MASK: 0x03ff0000
32 * NMI_MASK: 0x04000000
26 */ 33 */
27#define PREEMPT_BITS 8 34#define PREEMPT_BITS 8
28#define SOFTIRQ_BITS 8 35#define SOFTIRQ_BITS 8
36#define NMI_BITS 1
29 37
30#ifndef HARDIRQ_BITS 38#define MAX_HARDIRQ_BITS 10
31#define HARDIRQ_BITS 12
32 39
33#ifndef MAX_HARDIRQS_PER_CPU 40#ifndef HARDIRQ_BITS
34#define MAX_HARDIRQS_PER_CPU NR_IRQS 41# define HARDIRQ_BITS MAX_HARDIRQ_BITS
35#endif 42#endif
36 43
37/* 44#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
38 * The hardirq mask has to be large enough to have space for potentially 45#error HARDIRQ_BITS too high!
39 * all IRQ sources in the system nesting on a single CPU.
40 */
41#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
42# error HARDIRQ_BITS is too low!
43#endif
44#endif 46#endif
45 47
46#define PREEMPT_SHIFT 0 48#define PREEMPT_SHIFT 0
47#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 49#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
48#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 50#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
51#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
49 52
50#define __IRQ_MASK(x) ((1UL << (x))-1) 53#define __IRQ_MASK(x) ((1UL << (x))-1)
51 54
52#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) 55#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
53#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) 56#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
54#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) 57#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
58#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
55 59
56#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) 60#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
57#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) 61#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
58#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 62#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
63#define NMI_OFFSET (1UL << NMI_SHIFT)
59 64
60#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) 65#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
61#error PREEMPT_ACTIVE is too low! 66#error PREEMPT_ACTIVE is too low!
62#endif 67#endif
63 68
64#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 69#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
65#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 70#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
66#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) 71#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
72 | NMI_MASK))
67 73
68/* 74/*
69 * Are we doing bottom half or hardware interrupt processing? 75 * Are we doing bottom half or hardware interrupt processing?
@@ -73,6 +79,11 @@
73#define in_softirq() (softirq_count()) 79#define in_softirq() (softirq_count())
74#define in_interrupt() (irq_count()) 80#define in_interrupt() (irq_count())
75 81
82/*
83 * Are we in NMI context?
84 */
85#define in_nmi() (preempt_count() & NMI_MASK)
86
76#if defined(CONFIG_PREEMPT) 87#if defined(CONFIG_PREEMPT)
77# define PREEMPT_INATOMIC_BASE kernel_locked() 88# define PREEMPT_INATOMIC_BASE kernel_locked()
78# define PREEMPT_CHECK_OFFSET 1 89# define PREEMPT_CHECK_OFFSET 1
@@ -164,20 +175,24 @@ extern void irq_enter(void);
164 */ 175 */
165extern void irq_exit(void); 176extern void irq_exit(void);
166 177
167#define nmi_enter() \ 178#define nmi_enter() \
168 do { \ 179 do { \
169 ftrace_nmi_enter(); \ 180 ftrace_nmi_enter(); \
170 lockdep_off(); \ 181 BUG_ON(in_nmi()); \
171 rcu_nmi_enter(); \ 182 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
172 __irq_enter(); \ 183 lockdep_off(); \
184 rcu_nmi_enter(); \
185 trace_hardirq_enter(); \
173 } while (0) 186 } while (0)
174 187
175#define nmi_exit() \ 188#define nmi_exit() \
176 do { \ 189 do { \
177 __irq_exit(); \ 190 trace_hardirq_exit(); \
178 rcu_nmi_exit(); \ 191 rcu_nmi_exit(); \
179 lockdep_on(); \ 192 lockdep_on(); \
180 ftrace_nmi_exit(); \ 193 BUG_ON(!in_nmi()); \
194 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
195 ftrace_nmi_exit(); \
181 } while (0) 196 } while (0)
182 197
183#endif /* LINUX_HARDIRQ_H */ 198#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h
index ed21bd3dbd25..29ee2873f4a8 100644
--- a/include/linux/hdreg.h
+++ b/include/linux/hdreg.h
@@ -1,68 +1,6 @@
1#ifndef _LINUX_HDREG_H 1#ifndef _LINUX_HDREG_H
2#define _LINUX_HDREG_H 2#define _LINUX_HDREG_H
3 3
4#ifdef __KERNEL__
5#include <linux/ata.h>
6
7/*
8 * This file contains some defines for the AT-hd-controller.
9 * Various sources.
10 */
11
12/* ide.c has its own port definitions in "ide.h" */
13
14#define HD_IRQ 14
15
16/* Hd controller regs. Ref: IBM AT Bios-listing */
17#define HD_DATA 0x1f0 /* _CTL when writing */
18#define HD_ERROR 0x1f1 /* see err-bits */
19#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */
20#define HD_SECTOR 0x1f3 /* starting sector */
21#define HD_LCYL 0x1f4 /* starting cylinder */
22#define HD_HCYL 0x1f5 /* high byte of starting cyl */
23#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */
24#define HD_STATUS 0x1f7 /* see status-bits */
25#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */
26#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */
27#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */
28
29#define HD_CMD 0x3f6 /* used for resets */
30#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */
31
32/* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */
33
34/* Bits of HD_STATUS */
35#define ERR_STAT 0x01
36#define INDEX_STAT 0x02
37#define ECC_STAT 0x04 /* Corrected error */
38#define DRQ_STAT 0x08
39#define SEEK_STAT 0x10
40#define SRV_STAT 0x10
41#define WRERR_STAT 0x20
42#define READY_STAT 0x40
43#define BUSY_STAT 0x80
44
45/* Bits for HD_ERROR */
46#define MARK_ERR 0x01 /* Bad address mark */
47#define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */
48#define TRK0_ERR 0x02 /* couldn't find track 0 */
49#define EOM_ERR 0x02 /* End Of Media (ATAPI) */
50#define ABRT_ERR 0x04 /* Command aborted */
51#define MCR_ERR 0x08 /* media change request */
52#define ID_ERR 0x10 /* ID field not found */
53#define MC_ERR 0x20 /* media changed */
54#define ECC_ERR 0x40 /* Uncorrectable ECC error */
55#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
56#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
57#define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */
58
59/* Bits of HD_NSECTOR */
60#define CD 0x01
61#define IO 0x02
62#define REL 0x04
63#define TAG_MASK 0xf8
64#endif /* __KERNEL__ */
65
66#include <linux/types.h> 4#include <linux/types.h>
67 5
68/* 6/*
@@ -191,6 +129,7 @@ typedef struct hd_drive_hob_hdr {
191#define TASKFILE_INVALID 0x7fff 129#define TASKFILE_INVALID 0x7fff
192#endif 130#endif
193 131
132#ifndef __KERNEL__
194/* ATA/ATAPI Commands pre T13 Spec */ 133/* ATA/ATAPI Commands pre T13 Spec */
195#define WIN_NOP 0x00 134#define WIN_NOP 0x00
196/* 135/*
@@ -379,6 +318,7 @@ typedef struct hd_drive_hob_hdr {
379#define SECURITY_ERASE_UNIT 0xBD 318#define SECURITY_ERASE_UNIT 0xBD
380#define SECURITY_FREEZE_LOCK 0xBE 319#define SECURITY_FREEZE_LOCK 0xBE
381#define SECURITY_DISABLE_PASSWORD 0xBF 320#define SECURITY_DISABLE_PASSWORD 0xBF
321#endif /* __KERNEL__ */
382 322
383struct hd_geometry { 323struct hd_geometry {
384 unsigned char heads; 324 unsigned char heads;
@@ -448,6 +388,7 @@ enum {
448 388
449#define __NEW_HD_DRIVE_ID 389#define __NEW_HD_DRIVE_ID
450 390
391#ifndef __KERNEL__
451/* 392/*
452 * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec. 393 * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec.
453 * 394 *
@@ -699,6 +640,7 @@ struct hd_driveid {
699 * 7:0 Signature 640 * 7:0 Signature
700 */ 641 */
701}; 642};
643#endif /* __KERNEL__ */
702 644
703/* 645/*
704 * IDE "nice" flags. These are used on a per drive basis to determine 646 * IDE "nice" flags. These are used on a per drive basis to determine
diff --git a/include/linux/hid.h b/include/linux/hid.h
index fa8ee9cef7be..a72876e43589 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -270,6 +270,7 @@ struct hid_item {
270 270
271#define HID_QUIRK_INVERT 0x00000001 271#define HID_QUIRK_INVERT 0x00000001
272#define HID_QUIRK_NOTOUCH 0x00000002 272#define HID_QUIRK_NOTOUCH 0x00000002
273#define HID_QUIRK_IGNORE 0x00000004
273#define HID_QUIRK_NOGET 0x00000008 274#define HID_QUIRK_NOGET 0x00000008
274#define HID_QUIRK_BADPAD 0x00000020 275#define HID_QUIRK_BADPAD 0x00000020
275#define HID_QUIRK_MULTI_INPUT 0x00000040 276#define HID_QUIRK_MULTI_INPUT 0x00000040
@@ -603,12 +604,17 @@ struct hid_ll_driver {
603 int (*open)(struct hid_device *hdev); 604 int (*open)(struct hid_device *hdev);
604 void (*close)(struct hid_device *hdev); 605 void (*close)(struct hid_device *hdev);
605 606
607 int (*power)(struct hid_device *hdev, int level);
608
606 int (*hidinput_input_event) (struct input_dev *idev, unsigned int type, 609 int (*hidinput_input_event) (struct input_dev *idev, unsigned int type,
607 unsigned int code, int value); 610 unsigned int code, int value);
608 611
609 int (*parse)(struct hid_device *hdev); 612 int (*parse)(struct hid_device *hdev);
610}; 613};
611 614
615#define PM_HINT_FULLON 1<<5
616#define PM_HINT_NORMAL 1<<1
617
612/* Applications from HID Usage Tables 4/8/99 Version 1.1 */ 618/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
613/* We ignore a few input applications that are not widely used */ 619/* We ignore a few input applications that are not widely used */
614#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) 620#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002))
@@ -641,6 +647,7 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
641void hid_output_report(struct hid_report *report, __u8 *data); 647void hid_output_report(struct hid_report *report, __u8 *data);
642struct hid_device *hid_allocate_device(void); 648struct hid_device *hid_allocate_device(void);
643int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); 649int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
650int hid_check_keys_pressed(struct hid_device *hid);
644int hid_connect(struct hid_device *hid, unsigned int connect_mask); 651int hid_connect(struct hid_device *hid, unsigned int connect_mask);
645 652
646/** 653/**
@@ -791,21 +798,5 @@ dbg_hid(const char *fmt, ...)
791 __FILE__ , ## arg) 798 __FILE__ , ## arg)
792#endif /* HID_FF */ 799#endif /* HID_FF */
793 800
794#ifdef __KERNEL__
795#ifdef CONFIG_HID_COMPAT
796#define HID_COMPAT_LOAD_DRIVER(name) \
797/* prototype to avoid sparse warning */ \
798extern void hid_compat_##name(void); \
799void hid_compat_##name(void) { } \
800EXPORT_SYMBOL(hid_compat_##name)
801#else
802#define HID_COMPAT_LOAD_DRIVER(name)
803#endif /* HID_COMPAT */
804#define HID_COMPAT_CALL_DRIVER(name) do { \
805 extern void hid_compat_##name(void); \
806 hid_compat_##name(); \
807} while (0)
808#endif /* __KERNEL__ */
809
810#endif 801#endif
811 802
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 13875ce9112a..1fcb7126a01f 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -19,8 +19,21 @@ static inline void flush_kernel_dcache_page(struct page *page)
19} 19}
20#endif 20#endif
21 21
22#ifdef CONFIG_HIGHMEM 22#include <asm/kmap_types.h>
23
24#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
25
26void debug_kmap_atomic(enum km_type type);
23 27
28#else
29
30static inline void debug_kmap_atomic(enum km_type type)
31{
32}
33
34#endif
35
36#ifdef CONFIG_HIGHMEM
24#include <asm/highmem.h> 37#include <asm/highmem.h>
25 38
26/* declarations for linux/mm/highmem.c */ 39/* declarations for linux/mm/highmem.c */
@@ -44,8 +57,6 @@ static inline void *kmap(struct page *page)
44 57
45#define kunmap(page) do { (void) (page); } while (0) 58#define kunmap(page) do { (void) (page); } while (0)
46 59
47#include <asm/kmap_types.h>
48
49static inline void *kmap_atomic(struct page *page, enum km_type idx) 60static inline void *kmap_atomic(struct page *page, enum km_type idx)
50{ 61{
51 pagefault_disable(); 62 pagefault_disable();
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index bd37078c2d7d..0d2f7c8a33d6 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -336,6 +336,11 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
336 const enum hrtimer_mode mode); 336 const enum hrtimer_mode mode);
337extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, 337extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
338 unsigned long range_ns, const enum hrtimer_mode mode); 338 unsigned long range_ns, const enum hrtimer_mode mode);
339extern int
340__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
341 unsigned long delta_ns,
342 const enum hrtimer_mode mode, int wakeup);
343
339extern int hrtimer_cancel(struct hrtimer *timer); 344extern int hrtimer_cancel(struct hrtimer *timer);
340extern int hrtimer_try_to_cancel(struct hrtimer *timer); 345extern int hrtimer_try_to_cancel(struct hrtimer *timer);
341 346
diff --git a/include/linux/i2c-algo-sgi.h b/include/linux/i2c-algo-sgi.h
deleted file mode 100644
index 3b7715024e69..000000000000
--- a/include/linux/i2c-algo-sgi.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License version 2 as published by the Free Software Foundation.
4 *
5 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
6 */
7
8#ifndef I2C_ALGO_SGI_H
9#define I2C_ALGO_SGI_H 1
10
11#include <linux/i2c.h>
12
13struct i2c_algo_sgi_data {
14 void *data; /* private data for lowlevel routines */
15 unsigned (*getctrl)(void *data);
16 void (*setctrl)(void *data, unsigned val);
17 unsigned (*rdata)(void *data);
18 void (*wdata)(void *data, unsigned val);
19
20 int xfer_timeout;
21 int ack_timeout;
22};
23
24int i2c_sgi_add_bus(struct i2c_adapter *);
25
26#endif /* I2C_ALGO_SGI_H */
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index 1ffc23bc5d1e..c9087de5c6c6 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -33,46 +33,10 @@
33 33
34#define I2C_DRIVERID_MSP3400 1 34#define I2C_DRIVERID_MSP3400 1
35#define I2C_DRIVERID_TUNER 2 35#define I2C_DRIVERID_TUNER 2
36#define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */
37#define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */
38#define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */
39#define I2C_DRIVERID_SAA7111A 8 /* video input processor */
40#define I2C_DRIVERID_SAA7185B 13 /* video encoder */
41#define I2C_DRIVERID_SAA7110 22 /* video decoder */
42#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */
43#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */ 36#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */
44#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */ 37#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */
45#define I2C_DRIVERID_TDA9875 32 /* TV sound decoder chip */
46#define I2C_DRIVERID_BT819 40 /* video decoder */
47#define I2C_DRIVERID_BT856 41 /* video encoder */
48#define I2C_DRIVERID_VPX3220 42 /* video decoder+vbi/vtxt */
49#define I2C_DRIVERID_ADV7175 48 /* ADV 7175/7176 video encoder */
50#define I2C_DRIVERID_SAA7114 49 /* video decoder */
51#define I2C_DRIVERID_ADV7170 54 /* video encoder */
52#define I2C_DRIVERID_SAA7191 57 /* video decoder */
53#define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */
54#define I2C_DRIVERID_OVCAMCHIP 61 /* OmniVision CMOS image sens. */
55#define I2C_DRIVERID_SAA6752HS 67 /* MPEG2 encoder */
56#define I2C_DRIVERID_TVEEPROM 68 /* TV EEPROM */
57#define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */
58#define I2C_DRIVERID_CS53L32A 70 /* cs53l32a audio processor */
59#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */
60#define I2C_DRIVERID_SAA7127 72 /* saa7127 video encoder */
61#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ 38#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
62#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */ 39#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */
63#define I2C_DRIVERID_TVP5150 76 /* TVP5150 video decoder */
64#define I2C_DRIVERID_WM8739 77 /* wm8739 audio processor */
65#define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */
66#define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */
67#define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */
68#define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */
69#define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */
70#define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */
71#define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */
72#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */
73#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */
74
75#define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */
76 40
77/* 41/*
78 * ---- Adapter types ---------------------------------------------------- 42 * ---- Adapter types ----------------------------------------------------
@@ -87,6 +51,8 @@
87#define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */ 51#define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */
88#define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */ 52#define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */
89#define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ 53#define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */
54#define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */
55#define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */
90 56
91/* --- SGI adapters */ 57/* --- SGI adapters */
92#define I2C_HW_SGI_VINO 0x160000 58#define I2C_HW_SGI_VINO 0x160000
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index c86c3b07604c..00ee11eb9092 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -353,8 +353,8 @@ struct i2c_adapter {
353 void *algo_data; 353 void *algo_data;
354 354
355 /* --- administration stuff. */ 355 /* --- administration stuff. */
356 int (*client_register)(struct i2c_client *); 356 int (*client_register)(struct i2c_client *) __deprecated;
357 int (*client_unregister)(struct i2c_client *); 357 int (*client_unregister)(struct i2c_client *) __deprecated;
358 358
359 /* data fields that are valid for all devices */ 359 /* data fields that are valid for all devices */
360 u8 level; /* nesting level for lockdep */ 360 u8 level; /* nesting level for lockdep */
diff --git a/include/linux/i2c/at24.h b/include/linux/i2c/at24.h
index f6edd522a929..8ace93024d60 100644
--- a/include/linux/i2c/at24.h
+++ b/include/linux/i2c/at24.h
@@ -2,6 +2,7 @@
2#define _LINUX_AT24_H 2#define _LINUX_AT24_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/memory.h>
5 6
6/* 7/*
7 * As seen through Linux I2C, differences between the most common types of I2C 8 * As seen through Linux I2C, differences between the most common types of I2C
@@ -23,6 +24,9 @@ struct at24_platform_data {
23#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ 24#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */
24#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ 25#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */
25#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ 26#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */
27
28 void (*setup)(struct memory_accessor *, void *context);
29 void *context;
26}; 30};
27 31
28#endif /* _LINUX_AT24_H */ 32#endif /* _LINUX_AT24_H */
diff --git a/include/linux/i2c/s6000.h b/include/linux/i2c/s6000.h
new file mode 100644
index 000000000000..d9b34bfdae76
--- /dev/null
+++ b/include/linux/i2c/s6000.h
@@ -0,0 +1,10 @@
1#ifndef __LINUX_I2C_S6000_H
2#define __LINUX_I2C_S6000_H
3
4struct s6_i2c_platform_data {
5 const char *clock; /* the clock to use */
6 int bus_num; /* the bus number to register */
7};
8
9#endif
10
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h
index 8137f660a5cc..0dc80ef24975 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl4030.h
@@ -218,6 +218,53 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
218 218
219/*----------------------------------------------------------------------*/ 219/*----------------------------------------------------------------------*/
220 220
221/* Power bus message definitions */
222
223#define DEV_GRP_NULL 0x0
224#define DEV_GRP_P1 0x1
225#define DEV_GRP_P2 0x2
226#define DEV_GRP_P3 0x4
227
228#define RES_GRP_RES 0x0
229#define RES_GRP_PP 0x1
230#define RES_GRP_RC 0x2
231#define RES_GRP_PP_RC 0x3
232#define RES_GRP_PR 0x4
233#define RES_GRP_PP_PR 0x5
234#define RES_GRP_RC_PR 0x6
235#define RES_GRP_ALL 0x7
236
237#define RES_TYPE2_R0 0x0
238
239#define RES_TYPE_ALL 0x7
240
241#define RES_STATE_WRST 0xF
242#define RES_STATE_ACTIVE 0xE
243#define RES_STATE_SLEEP 0x8
244#define RES_STATE_OFF 0x0
245
246/*
247 * Power Bus Message Format ... these can be sent individually by Linux,
248 * but are usually part of downloaded scripts that are run when various
249 * power events are triggered.
250 *
251 * Broadcast Message (16 Bits):
252 * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4]
253 * RES_STATE[3:0]
254 *
255 * Singular Message (16 Bits):
256 * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0]
257 */
258
259#define MSG_BROADCAST(devgrp, grp, type, type2, state) \
260 ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \
261 | (type) << 4 | (state))
262
263#define MSG_SINGULAR(devgrp, id, state) \
264 ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
265
266/*----------------------------------------------------------------------*/
267
221struct twl4030_bci_platform_data { 268struct twl4030_bci_platform_data {
222 int *battery_tmp_tbl; 269 int *battery_tmp_tbl;
223 unsigned int tblsize; 270 unsigned int tblsize;
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 854eba8b2ba3..a5d26f66ef78 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -40,6 +40,13 @@
40#define ERROR_RESET 3 /* Reset controller every 4th retry */ 40#define ERROR_RESET 3 /* Reset controller every 4th retry */
41#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ 41#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
42 42
43/* Error codes returned in rq->errors to the higher part of the driver. */
44enum {
45 IDE_DRV_ERROR_GENERAL = 101,
46 IDE_DRV_ERROR_FILEMARK = 102,
47 IDE_DRV_ERROR_EOD = 103,
48};
49
43/* 50/*
44 * Definitions for accessing IDE controller registers 51 * Definitions for accessing IDE controller registers
45 */ 52 */
@@ -193,26 +200,8 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
193 hw->io_ports.ctl_addr = ctl_addr; 200 hw->io_ports.ctl_addr = ctl_addr;
194} 201}
195 202
196#if defined(CONFIG_ARM) || defined(CONFIG_M68K) || defined(CONFIG_MIPS) || \
197 defined(CONFIG_PARISC) || defined(CONFIG_PPC) || defined(CONFIG_SPARC)
198#include <asm/ide.h>
199#else
200#include <asm-generic/ide_iops.h>
201#endif
202
203#define MAX_HWIFS 10 203#define MAX_HWIFS 10
204 204
205/* Currently only m68k, apus and m8xx need it */
206#ifndef IDE_ARCH_ACK_INTR
207# define ide_ack_intr(hwif) (1)
208#endif
209
210/* Currently only Atari needs it */
211#ifndef IDE_ARCH_LOCK
212# define ide_release_lock() do {} while (0)
213# define ide_get_lock(hdlr, data) do {} while (0)
214#endif /* IDE_ARCH_LOCK */
215
216/* 205/*
217 * Now for the data we need to maintain per-drive: ide_drive_t 206 * Now for the data we need to maintain per-drive: ide_drive_t
218 */ 207 */
@@ -252,56 +241,52 @@ typedef enum {
252 241
253enum { 242enum {
254 IDE_TFLAG_LBA48 = (1 << 0), 243 IDE_TFLAG_LBA48 = (1 << 0),
255 IDE_TFLAG_FLAGGED = (1 << 2), 244 IDE_TFLAG_OUT_HOB_FEATURE = (1 << 1),
256 IDE_TFLAG_OUT_DATA = (1 << 3), 245 IDE_TFLAG_OUT_HOB_NSECT = (1 << 2),
257 IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4), 246 IDE_TFLAG_OUT_HOB_LBAL = (1 << 3),
258 IDE_TFLAG_OUT_HOB_NSECT = (1 << 5), 247 IDE_TFLAG_OUT_HOB_LBAM = (1 << 4),
259 IDE_TFLAG_OUT_HOB_LBAL = (1 << 6), 248 IDE_TFLAG_OUT_HOB_LBAH = (1 << 5),
260 IDE_TFLAG_OUT_HOB_LBAM = (1 << 7),
261 IDE_TFLAG_OUT_HOB_LBAH = (1 << 8),
262 IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE | 249 IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE |
263 IDE_TFLAG_OUT_HOB_NSECT | 250 IDE_TFLAG_OUT_HOB_NSECT |
264 IDE_TFLAG_OUT_HOB_LBAL | 251 IDE_TFLAG_OUT_HOB_LBAL |
265 IDE_TFLAG_OUT_HOB_LBAM | 252 IDE_TFLAG_OUT_HOB_LBAM |
266 IDE_TFLAG_OUT_HOB_LBAH, 253 IDE_TFLAG_OUT_HOB_LBAH,
267 IDE_TFLAG_OUT_FEATURE = (1 << 9), 254 IDE_TFLAG_OUT_FEATURE = (1 << 6),
268 IDE_TFLAG_OUT_NSECT = (1 << 10), 255 IDE_TFLAG_OUT_NSECT = (1 << 7),
269 IDE_TFLAG_OUT_LBAL = (1 << 11), 256 IDE_TFLAG_OUT_LBAL = (1 << 8),
270 IDE_TFLAG_OUT_LBAM = (1 << 12), 257 IDE_TFLAG_OUT_LBAM = (1 << 9),
271 IDE_TFLAG_OUT_LBAH = (1 << 13), 258 IDE_TFLAG_OUT_LBAH = (1 << 10),
272 IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE | 259 IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE |
273 IDE_TFLAG_OUT_NSECT | 260 IDE_TFLAG_OUT_NSECT |
274 IDE_TFLAG_OUT_LBAL | 261 IDE_TFLAG_OUT_LBAL |
275 IDE_TFLAG_OUT_LBAM | 262 IDE_TFLAG_OUT_LBAM |
276 IDE_TFLAG_OUT_LBAH, 263 IDE_TFLAG_OUT_LBAH,
277 IDE_TFLAG_OUT_DEVICE = (1 << 14), 264 IDE_TFLAG_OUT_DEVICE = (1 << 11),
278 IDE_TFLAG_WRITE = (1 << 15), 265 IDE_TFLAG_WRITE = (1 << 12),
279 IDE_TFLAG_FLAGGED_SET_IN_FLAGS = (1 << 16), 266 IDE_TFLAG_CUSTOM_HANDLER = (1 << 13),
280 IDE_TFLAG_IN_DATA = (1 << 17), 267 IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 14),
281 IDE_TFLAG_CUSTOM_HANDLER = (1 << 18), 268 IDE_TFLAG_IN_HOB_ERROR = (1 << 15),
282 IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 19), 269 IDE_TFLAG_IN_HOB_NSECT = (1 << 16),
283 IDE_TFLAG_IN_HOB_FEATURE = (1 << 20), 270 IDE_TFLAG_IN_HOB_LBAL = (1 << 17),
284 IDE_TFLAG_IN_HOB_NSECT = (1 << 21), 271 IDE_TFLAG_IN_HOB_LBAM = (1 << 18),
285 IDE_TFLAG_IN_HOB_LBAL = (1 << 22), 272 IDE_TFLAG_IN_HOB_LBAH = (1 << 19),
286 IDE_TFLAG_IN_HOB_LBAM = (1 << 23),
287 IDE_TFLAG_IN_HOB_LBAH = (1 << 24),
288 IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL | 273 IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL |
289 IDE_TFLAG_IN_HOB_LBAM | 274 IDE_TFLAG_IN_HOB_LBAM |
290 IDE_TFLAG_IN_HOB_LBAH, 275 IDE_TFLAG_IN_HOB_LBAH,
291 IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | 276 IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_ERROR |
292 IDE_TFLAG_IN_HOB_NSECT | 277 IDE_TFLAG_IN_HOB_NSECT |
293 IDE_TFLAG_IN_HOB_LBA, 278 IDE_TFLAG_IN_HOB_LBA,
294 IDE_TFLAG_IN_FEATURE = (1 << 1), 279 IDE_TFLAG_IN_ERROR = (1 << 20),
295 IDE_TFLAG_IN_NSECT = (1 << 25), 280 IDE_TFLAG_IN_NSECT = (1 << 21),
296 IDE_TFLAG_IN_LBAL = (1 << 26), 281 IDE_TFLAG_IN_LBAL = (1 << 22),
297 IDE_TFLAG_IN_LBAM = (1 << 27), 282 IDE_TFLAG_IN_LBAM = (1 << 23),
298 IDE_TFLAG_IN_LBAH = (1 << 28), 283 IDE_TFLAG_IN_LBAH = (1 << 24),
299 IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL | 284 IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL |
300 IDE_TFLAG_IN_LBAM | 285 IDE_TFLAG_IN_LBAM |
301 IDE_TFLAG_IN_LBAH, 286 IDE_TFLAG_IN_LBAH,
302 IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT | 287 IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT |
303 IDE_TFLAG_IN_LBA, 288 IDE_TFLAG_IN_LBA,
304 IDE_TFLAG_IN_DEVICE = (1 << 29), 289 IDE_TFLAG_IN_DEVICE = (1 << 25),
305 IDE_TFLAG_HOB = IDE_TFLAG_OUT_HOB | 290 IDE_TFLAG_HOB = IDE_TFLAG_OUT_HOB |
306 IDE_TFLAG_IN_HOB, 291 IDE_TFLAG_IN_HOB,
307 IDE_TFLAG_TF = IDE_TFLAG_OUT_TF | 292 IDE_TFLAG_TF = IDE_TFLAG_OUT_TF |
@@ -309,15 +294,28 @@ enum {
309 IDE_TFLAG_DEVICE = IDE_TFLAG_OUT_DEVICE | 294 IDE_TFLAG_DEVICE = IDE_TFLAG_OUT_DEVICE |
310 IDE_TFLAG_IN_DEVICE, 295 IDE_TFLAG_IN_DEVICE,
311 /* force 16-bit I/O operations */ 296 /* force 16-bit I/O operations */
312 IDE_TFLAG_IO_16BIT = (1 << 30), 297 IDE_TFLAG_IO_16BIT = (1 << 26),
313 /* ide_task_t was allocated using kmalloc() */ 298 /* struct ide_cmd was allocated using kmalloc() */
314 IDE_TFLAG_DYN = (1 << 31), 299 IDE_TFLAG_DYN = (1 << 27),
300 IDE_TFLAG_FS = (1 << 28),
301 IDE_TFLAG_MULTI_PIO = (1 << 29),
302};
303
304enum {
305 IDE_FTFLAG_FLAGGED = (1 << 0),
306 IDE_FTFLAG_SET_IN_FLAGS = (1 << 1),
307 IDE_FTFLAG_OUT_DATA = (1 << 2),
308 IDE_FTFLAG_IN_DATA = (1 << 3),
315}; 309};
316 310
317struct ide_taskfile { 311struct ide_taskfile {
318 u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */ 312 u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */
313 /* 1-5: additional data to support LBA48 */
314 union {
315 u8 hob_error; /* read: error */
316 u8 hob_feature; /* write: feature */
317 };
319 318
320 u8 hob_feature; /* 1-5: additional data to support LBA48 */
321 u8 hob_nsect; 319 u8 hob_nsect;
322 u8 hob_lbal; 320 u8 hob_lbal;
323 u8 hob_lbam; 321 u8 hob_lbam;
@@ -343,16 +341,29 @@ struct ide_taskfile {
343 }; 341 };
344}; 342};
345 343
346typedef struct ide_task_s { 344struct ide_cmd {
347 union { 345 union {
348 struct ide_taskfile tf; 346 struct ide_taskfile tf;
349 u8 tf_array[14]; 347 u8 tf_array[14];
350 }; 348 };
349 u8 ftf_flags; /* for TASKFILE ioctl */
351 u32 tf_flags; 350 u32 tf_flags;
352 int data_phase; 351 int protocol;
352
353 int sg_nents; /* number of sg entries */
354 int orig_sg_nents;
355 int sg_dma_direction; /* DMA transfer direction */
356
357 unsigned int nbytes;
358 unsigned int nleft;
359 unsigned int last_xfer_len;
360
361 struct scatterlist *cursg;
362 unsigned int cursg_ofs;
363
353 struct request *rq; /* copy of request */ 364 struct request *rq; /* copy of request */
354 void *special; /* valid_t generally */ 365 void *special; /* valid_t generally */
355} ide_task_t; 366};
356 367
357/* ATAPI packet command flags */ 368/* ATAPI packet command flags */
358enum { 369enum {
@@ -364,15 +375,13 @@ enum {
364 PC_FLAG_DMA_IN_PROGRESS = (1 << 4), 375 PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
365 PC_FLAG_DMA_ERROR = (1 << 5), 376 PC_FLAG_DMA_ERROR = (1 << 5),
366 PC_FLAG_WRITING = (1 << 6), 377 PC_FLAG_WRITING = (1 << 6),
367 /* command timed out */
368 PC_FLAG_TIMEDOUT = (1 << 7),
369}; 378};
370 379
371/* 380/*
372 * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes. 381 * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes.
373 * This is used for several packet commands (not for READ/WRITE commands). 382 * This is used for several packet commands (not for READ/WRITE commands).
374 */ 383 */
375#define IDE_PC_BUFFER_SIZE 256 384#define IDE_PC_BUFFER_SIZE 64
376#define ATAPI_WAIT_PC (60 * HZ) 385#define ATAPI_WAIT_PC (60 * HZ)
377 386
378struct ide_atapi_pc { 387struct ide_atapi_pc {
@@ -410,9 +419,6 @@ struct ide_atapi_pc {
410 struct idetape_bh *bh; 419 struct idetape_bh *bh;
411 char *b_data; 420 char *b_data;
412 421
413 struct scatterlist *sg;
414 unsigned int sg_cnt;
415
416 unsigned long timeout; 422 unsigned long timeout;
417}; 423};
418 424
@@ -436,7 +442,6 @@ struct ide_disk_ops {
436 int); 442 int);
437 ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *, 443 ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
438 sector_t); 444 sector_t);
439 int (*end_request)(struct ide_drive_s *, int, int);
440 int (*ioctl)(struct ide_drive_s *, struct block_device *, 445 int (*ioctl)(struct ide_drive_s *, struct block_device *,
441 fmode_t, unsigned int, unsigned long); 446 fmode_t, unsigned int, unsigned long);
442}; 447};
@@ -454,11 +459,6 @@ enum {
454 IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), 459 IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3),
455 /* TOC track numbers are in BCD. */ 460 /* TOC track numbers are in BCD. */
456 IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), 461 IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4),
457 /*
458 * Drive does not provide data in multiples of SECTOR_SIZE
459 * when more than one interrupt is needed.
460 */
461 IDE_AFLAG_LIMIT_NFRAMES = (1 << 5),
462 /* Saved TOC information is current. */ 462 /* Saved TOC information is current. */
463 IDE_AFLAG_TOC_VALID = (1 << 6), 463 IDE_AFLAG_TOC_VALID = (1 << 6),
464 /* We think that the drive door is locked. */ 464 /* We think that the drive door is locked. */
@@ -512,8 +512,6 @@ enum {
512 IDE_DFLAG_NICE1 = (1 << 5), 512 IDE_DFLAG_NICE1 = (1 << 5),
513 /* device is physically present */ 513 /* device is physically present */
514 IDE_DFLAG_PRESENT = (1 << 6), 514 IDE_DFLAG_PRESENT = (1 << 6),
515 /* device ejected hint */
516 IDE_DFLAG_DEAD = (1 << 7),
517 /* id read from device (synthetic if not set) */ 515 /* id read from device (synthetic if not set) */
518 IDE_DFLAG_ID_READ = (1 << 8), 516 IDE_DFLAG_ID_READ = (1 << 8),
519 IDE_DFLAG_NOPROBE = (1 << 9), 517 IDE_DFLAG_NOPROBE = (1 << 9),
@@ -605,7 +603,7 @@ struct ide_drive_s {
605 603
606 unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */ 604 unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
607 unsigned int cyl; /* "real" number of cyls */ 605 unsigned int cyl; /* "real" number of cyls */
608 unsigned int drive_data; /* used by set_pio_mode/selectproc */ 606 unsigned int drive_data; /* used by set_pio_mode/dev_select() */
609 unsigned int failures; /* current failure count */ 607 unsigned int failures; /* current failure count */
610 unsigned int max_failures; /* maximum allowed failure count */ 608 unsigned int max_failures; /* maximum allowed failure count */
611 u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */ 609 u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */
@@ -627,8 +625,11 @@ struct ide_drive_s {
627 /* current packet command */ 625 /* current packet command */
628 struct ide_atapi_pc *pc; 626 struct ide_atapi_pc *pc;
629 627
628 /* last failed packet command */
629 struct ide_atapi_pc *failed_pc;
630
630 /* callback for packet commands */ 631 /* callback for packet commands */
631 void (*pc_callback)(struct ide_drive_s *, int); 632 int (*pc_callback)(struct ide_drive_s *, int);
632 633
633 void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *); 634 void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *);
634 int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *, 635 int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *,
@@ -658,16 +659,16 @@ struct ide_tp_ops {
658 void (*exec_command)(struct hwif_s *, u8); 659 void (*exec_command)(struct hwif_s *, u8);
659 u8 (*read_status)(struct hwif_s *); 660 u8 (*read_status)(struct hwif_s *);
660 u8 (*read_altstatus)(struct hwif_s *); 661 u8 (*read_altstatus)(struct hwif_s *);
662 void (*write_devctl)(struct hwif_s *, u8);
661 663
662 void (*set_irq)(struct hwif_s *, int); 664 void (*dev_select)(ide_drive_t *);
663 665 void (*tf_load)(ide_drive_t *, struct ide_cmd *);
664 void (*tf_load)(ide_drive_t *, struct ide_task_s *); 666 void (*tf_read)(ide_drive_t *, struct ide_cmd *);
665 void (*tf_read)(ide_drive_t *, struct ide_task_s *);
666 667
667 void (*input_data)(ide_drive_t *, struct request *, void *, 668 void (*input_data)(ide_drive_t *, struct ide_cmd *,
668 unsigned int); 669 void *, unsigned int);
669 void (*output_data)(ide_drive_t *, struct request *, void *, 670 void (*output_data)(ide_drive_t *, struct ide_cmd *,
670 unsigned int); 671 void *, unsigned int);
671}; 672};
672 673
673extern const struct ide_tp_ops default_tp_ops; 674extern const struct ide_tp_ops default_tp_ops;
@@ -678,7 +679,6 @@ extern const struct ide_tp_ops default_tp_ops;
678 * @init_dev: host specific initialization of a device 679 * @init_dev: host specific initialization of a device
679 * @set_pio_mode: routine to program host for PIO mode 680 * @set_pio_mode: routine to program host for PIO mode
680 * @set_dma_mode: routine to program host for DMA mode 681 * @set_dma_mode: routine to program host for DMA mode
681 * @selectproc: tweaks hardware to select drive
682 * @reset_poll: chipset polling based on hba specifics 682 * @reset_poll: chipset polling based on hba specifics
683 * @pre_reset: chipset specific changes to default for device-hba resets 683 * @pre_reset: chipset specific changes to default for device-hba resets
684 * @resetproc: routine to reset controller after a disk reset 684 * @resetproc: routine to reset controller after a disk reset
@@ -695,7 +695,6 @@ struct ide_port_ops {
695 void (*init_dev)(ide_drive_t *); 695 void (*init_dev)(ide_drive_t *);
696 void (*set_pio_mode)(ide_drive_t *, const u8); 696 void (*set_pio_mode)(ide_drive_t *, const u8);
697 void (*set_dma_mode)(ide_drive_t *, const u8); 697 void (*set_dma_mode)(ide_drive_t *, const u8);
698 void (*selectproc)(ide_drive_t *);
699 int (*reset_poll)(ide_drive_t *); 698 int (*reset_poll)(ide_drive_t *);
700 void (*pre_reset)(ide_drive_t *); 699 void (*pre_reset)(ide_drive_t *);
701 void (*resetproc)(ide_drive_t *); 700 void (*resetproc)(ide_drive_t *);
@@ -711,13 +710,15 @@ struct ide_port_ops {
711 710
712struct ide_dma_ops { 711struct ide_dma_ops {
713 void (*dma_host_set)(struct ide_drive_s *, int); 712 void (*dma_host_set)(struct ide_drive_s *, int);
714 int (*dma_setup)(struct ide_drive_s *); 713 int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
715 void (*dma_exec_cmd)(struct ide_drive_s *, u8);
716 void (*dma_start)(struct ide_drive_s *); 714 void (*dma_start)(struct ide_drive_s *);
717 int (*dma_end)(struct ide_drive_s *); 715 int (*dma_end)(struct ide_drive_s *);
718 int (*dma_test_irq)(struct ide_drive_s *); 716 int (*dma_test_irq)(struct ide_drive_s *);
719 void (*dma_lost_irq)(struct ide_drive_s *); 717 void (*dma_lost_irq)(struct ide_drive_s *);
720 void (*dma_timeout)(struct ide_drive_s *); 718 /* below ones are optional */
719 int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
720 int (*dma_timer_expiry)(struct ide_drive_s *);
721 void (*dma_clear)(struct ide_drive_s *);
721 /* 722 /*
722 * The following method is optional and only required to be 723 * The following method is optional and only required to be
723 * implemented for the SFF-8038i compatible controllers. 724 * implemented for the SFF-8038i compatible controllers.
@@ -780,19 +781,8 @@ typedef struct hwif_s {
780 /* Scatter-gather list used to build the above */ 781 /* Scatter-gather list used to build the above */
781 struct scatterlist *sg_table; 782 struct scatterlist *sg_table;
782 int sg_max_nents; /* Maximum number of entries in it */ 783 int sg_max_nents; /* Maximum number of entries in it */
783 int sg_nents; /* Current number of entries in it */
784 int orig_sg_nents;
785 int sg_dma_direction; /* dma transfer direction */
786
787 /* data phase of the active command (currently only valid for PIO/DMA) */
788 int data_phase;
789
790 struct ide_task_s task; /* current command */
791 784
792 unsigned int nsect; 785 struct ide_cmd cmd; /* current command */
793 unsigned int nleft;
794 struct scatterlist *cursg;
795 unsigned int cursg_ofs;
796 786
797 int rqsize; /* max sectors per request */ 787 int rqsize; /* max sectors per request */
798 int irq; /* our irq number */ 788 int irq; /* our irq number */
@@ -850,9 +840,18 @@ struct ide_host {
850 ide_hwif_t *ports[MAX_HOST_PORTS + 1]; 840 ide_hwif_t *ports[MAX_HOST_PORTS + 1];
851 unsigned int n_ports; 841 unsigned int n_ports;
852 struct device *dev[2]; 842 struct device *dev[2];
843
853 int (*init_chipset)(struct pci_dev *); 844 int (*init_chipset)(struct pci_dev *);
845
846 void (*get_lock)(irq_handler_t, void *);
847 void (*release_lock)(void);
848
854 irq_handler_t irq_handler; 849 irq_handler_t irq_handler;
850
855 unsigned long host_flags; 851 unsigned long host_flags;
852
853 int irq_flags;
854
856 void *host_priv; 855 void *host_priv;
857 ide_hwif_t *cur_port; /* for hosts requiring serialization */ 856 ide_hwif_t *cur_port; /* for hosts requiring serialization */
858 857
@@ -869,7 +868,7 @@ typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
869typedef int (ide_expiry_t)(ide_drive_t *); 868typedef int (ide_expiry_t)(ide_drive_t *);
870 869
871/* used by ide-cd, ide-floppy, etc. */ 870/* used by ide-cd, ide-floppy, etc. */
872typedef void (xfer_func_t)(ide_drive_t *, struct request *rq, void *, unsigned); 871typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
873 872
874extern struct mutex ide_setting_mtx; 873extern struct mutex ide_setting_mtx;
875 874
@@ -1045,10 +1044,11 @@ enum {
1045}; 1044};
1046 1045
1047/* DRV_NAME has to be defined in the driver before using the macro below */ 1046/* DRV_NAME has to be defined in the driver before using the macro below */
1048#define __ide_debug_log(lvl, fmt, args...) \ 1047#define __ide_debug_log(lvl, fmt, args...) \
1049{ \ 1048{ \
1050 if (unlikely(drive->debug_mask & lvl)) \ 1049 if (unlikely(drive->debug_mask & lvl)) \
1051 printk(KERN_INFO DRV_NAME ": " fmt, ## args); \ 1050 printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
1051 __func__, ## args); \
1052} 1052}
1053 1053
1054/* 1054/*
@@ -1087,7 +1087,7 @@ int generic_ide_resume(struct device *);
1087 1087
1088void ide_complete_power_step(ide_drive_t *, struct request *); 1088void ide_complete_power_step(ide_drive_t *, struct request *);
1089ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *); 1089ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
1090void ide_complete_pm_request(ide_drive_t *, struct request *); 1090void ide_complete_pm_rq(ide_drive_t *, struct request *);
1091void ide_check_pm_state(ide_drive_t *, struct request *); 1091void ide_check_pm_state(ide_drive_t *, struct request *);
1092 1092
1093/* 1093/*
@@ -1099,7 +1099,6 @@ void ide_check_pm_state(ide_drive_t *, struct request *);
1099struct ide_driver { 1099struct ide_driver {
1100 const char *version; 1100 const char *version;
1101 ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); 1101 ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
1102 int (*end_request)(ide_drive_t *, int, int);
1103 struct device_driver gen_driver; 1102 struct device_driver gen_driver;
1104 int (*probe)(ide_drive_t *); 1103 int (*probe)(ide_drive_t *);
1105 void (*remove)(ide_drive_t *); 1104 void (*remove)(ide_drive_t *);
@@ -1130,19 +1129,15 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l
1130extern int ide_vlb_clk; 1129extern int ide_vlb_clk;
1131extern int ide_pci_clk; 1130extern int ide_pci_clk;
1132 1131
1133int ide_end_request(ide_drive_t *, int, int); 1132unsigned int ide_rq_bytes(struct request *);
1134int ide_end_dequeued_request(ide_drive_t *, struct request *, int, int); 1133int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
1135void ide_kill_rq(ide_drive_t *, struct request *); 1134void ide_kill_rq(ide_drive_t *, struct request *);
1136 1135
1137void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int, 1136void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
1138 ide_expiry_t *); 1137void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
1139void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int,
1140 ide_expiry_t *);
1141
1142void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int,
1143 ide_expiry_t *);
1144 1138
1145void ide_execute_pkt_cmd(ide_drive_t *); 1139void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
1140 unsigned int);
1146 1141
1147void ide_pad_transfer(ide_drive_t *, int, int); 1142void ide_pad_transfer(ide_drive_t *, int, int);
1148 1143
@@ -1164,25 +1159,23 @@ extern ide_startstop_t ide_do_reset (ide_drive_t *);
1164extern int ide_devset_execute(ide_drive_t *drive, 1159extern int ide_devset_execute(ide_drive_t *drive,
1165 const struct ide_devset *setting, int arg); 1160 const struct ide_devset *setting, int arg);
1166 1161
1167extern void ide_end_drive_cmd(ide_drive_t *, u8, u8); 1162void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
1163int ide_complete_rq(ide_drive_t *, int, unsigned int);
1168 1164
1169void ide_tf_dump(const char *, struct ide_taskfile *); 1165void ide_tf_dump(const char *, struct ide_taskfile *);
1170 1166
1171void ide_exec_command(ide_hwif_t *, u8); 1167void ide_exec_command(ide_hwif_t *, u8);
1172u8 ide_read_status(ide_hwif_t *); 1168u8 ide_read_status(ide_hwif_t *);
1173u8 ide_read_altstatus(ide_hwif_t *); 1169u8 ide_read_altstatus(ide_hwif_t *);
1170void ide_write_devctl(ide_hwif_t *, u8);
1174 1171
1175void ide_set_irq(ide_hwif_t *, int); 1172void ide_dev_select(ide_drive_t *);
1176 1173void ide_tf_load(ide_drive_t *, struct ide_cmd *);
1177void ide_tf_load(ide_drive_t *, ide_task_t *); 1174void ide_tf_read(ide_drive_t *, struct ide_cmd *);
1178void ide_tf_read(ide_drive_t *, ide_task_t *);
1179 1175
1180void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int); 1176void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
1181void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int); 1177void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
1182 1178
1183int ide_io_buffers(ide_drive_t *, struct ide_atapi_pc *, unsigned int, int);
1184
1185extern void SELECT_DRIVE(ide_drive_t *);
1186void SELECT_MASK(ide_drive_t *, int); 1179void SELECT_MASK(ide_drive_t *, int);
1187 1180
1188u8 ide_read_error(ide_drive_t *); 1181u8 ide_read_error(ide_drive_t *);
@@ -1224,16 +1217,18 @@ int ide_cd_expiry(ide_drive_t *);
1224 1217
1225int ide_cd_get_xferlen(struct request *); 1218int ide_cd_get_xferlen(struct request *);
1226 1219
1227ide_startstop_t ide_issue_pc(ide_drive_t *); 1220ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
1221
1222ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
1228 1223
1229ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); 1224void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
1230 1225
1231void task_end_request(ide_drive_t *, struct request *, u8); 1226void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
1232 1227
1233int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16); 1228int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
1234int ide_no_data_taskfile(ide_drive_t *, ide_task_t *); 1229int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
1235 1230
1236int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); 1231int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
1237 1232
1238int ide_dev_read_id(ide_drive_t *, u8, u16 *); 1233int ide_dev_read_id(ide_drive_t *, u8, u16 *);
1239 1234
@@ -1335,6 +1330,10 @@ enum {
1335 IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19), 1330 IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
1336 /* serialize ports */ 1331 /* serialize ports */
1337 IDE_HFLAG_SERIALIZE = (1 << 20), 1332 IDE_HFLAG_SERIALIZE = (1 << 20),
1333 /* host is DTC2278 */
1334 IDE_HFLAG_DTC2278 = (1 << 21),
1335 /* 4 devices on a single set of I/O ports */
1336 IDE_HFLAG_4DRIVES = (1 << 22),
1338 /* host is TRM290 */ 1337 /* host is TRM290 */
1339 IDE_HFLAG_TRM290 = (1 << 23), 1338 IDE_HFLAG_TRM290 = (1 << 23),
1340 /* use 32-bit I/O ops */ 1339 /* use 32-bit I/O ops */
@@ -1362,7 +1361,12 @@ enum {
1362 1361
1363struct ide_port_info { 1362struct ide_port_info {
1364 char *name; 1363 char *name;
1364
1365 int (*init_chipset)(struct pci_dev *); 1365 int (*init_chipset)(struct pci_dev *);
1366
1367 void (*get_lock)(irq_handler_t, void *);
1368 void (*release_lock)(void);
1369
1366 void (*init_iops)(ide_hwif_t *); 1370 void (*init_iops)(ide_hwif_t *);
1367 void (*init_hwif)(ide_hwif_t *); 1371 void (*init_hwif)(ide_hwif_t *);
1368 int (*init_dma)(ide_hwif_t *, 1372 int (*init_dma)(ide_hwif_t *,
@@ -1379,6 +1383,9 @@ struct ide_port_info {
1379 u16 max_sectors; /* if < than the default one */ 1383 u16 max_sectors; /* if < than the default one */
1380 1384
1381 u32 host_flags; 1385 u32 host_flags;
1386
1387 int irq_flags;
1388
1382 u8 pio_mask; 1389 u8 pio_mask;
1383 u8 swdma_mask; 1390 u8 swdma_mask;
1384 u8 mwdma_mask; 1391 u8 mwdma_mask;
@@ -1398,8 +1405,8 @@ int ide_pci_resume(struct pci_dev *);
1398#define ide_pci_resume NULL 1405#define ide_pci_resume NULL
1399#endif 1406#endif
1400 1407
1401void ide_map_sg(ide_drive_t *, struct request *); 1408void ide_map_sg(ide_drive_t *, struct ide_cmd *);
1402void ide_init_sg_cmd(ide_drive_t *, struct request *); 1409void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
1403 1410
1404#define BAD_DMA_DRIVE 0 1411#define BAD_DMA_DRIVE 0
1405#define GOOD_DMA_DRIVE 1 1412#define GOOD_DMA_DRIVE 1
@@ -1433,18 +1440,18 @@ ide_startstop_t ide_dma_intr(ide_drive_t *);
1433int ide_allocate_dma_engine(ide_hwif_t *); 1440int ide_allocate_dma_engine(ide_hwif_t *);
1434void ide_release_dma_engine(ide_hwif_t *); 1441void ide_release_dma_engine(ide_hwif_t *);
1435 1442
1436int ide_build_sglist(ide_drive_t *, struct request *); 1443int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
1437void ide_destroy_dmatable(ide_drive_t *); 1444void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
1438 1445
1439#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 1446#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
1440int config_drive_for_dma(ide_drive_t *); 1447int config_drive_for_dma(ide_drive_t *);
1441extern int ide_build_dmatable(ide_drive_t *, struct request *); 1448int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
1442void ide_dma_host_set(ide_drive_t *, int); 1449void ide_dma_host_set(ide_drive_t *, int);
1443extern int ide_dma_setup(ide_drive_t *); 1450int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
1444void ide_dma_exec_cmd(ide_drive_t *, u8);
1445extern void ide_dma_start(ide_drive_t *); 1451extern void ide_dma_start(ide_drive_t *);
1446int ide_dma_end(ide_drive_t *); 1452int ide_dma_end(ide_drive_t *);
1447int ide_dma_test_irq(ide_drive_t *); 1453int ide_dma_test_irq(ide_drive_t *);
1454int ide_dma_sff_timer_expiry(ide_drive_t *);
1448u8 ide_dma_sff_read_status(ide_hwif_t *); 1455u8 ide_dma_sff_read_status(ide_hwif_t *);
1449extern const struct ide_dma_ops sff_dma_ops; 1456extern const struct ide_dma_ops sff_dma_ops;
1450#else 1457#else
@@ -1452,7 +1459,6 @@ static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
1452#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 1459#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
1453 1460
1454void ide_dma_lost_irq(ide_drive_t *); 1461void ide_dma_lost_irq(ide_drive_t *);
1455void ide_dma_timeout(ide_drive_t *);
1456ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int); 1462ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
1457 1463
1458#else 1464#else
@@ -1465,8 +1471,13 @@ static inline void ide_dma_on(ide_drive_t *drive) { ; }
1465static inline void ide_dma_verbose(ide_drive_t *drive) { ; } 1471static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
1466static inline int ide_set_dma(ide_drive_t *drive) { return 1; } 1472static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
1467static inline void ide_check_dma_crc(ide_drive_t *drive) { ; } 1473static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
1474static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
1468static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; } 1475static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
1469static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; } 1476static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
1477static inline int ide_dma_prepare(ide_drive_t *drive,
1478 struct ide_cmd *cmd) { return 1; }
1479static inline void ide_dma_unmap_sg(ide_drive_t *drive,
1480 struct ide_cmd *cmd) { ; }
1470#endif /* CONFIG_BLK_DEV_IDEDMA */ 1481#endif /* CONFIG_BLK_DEV_IDEDMA */
1471 1482
1472#ifdef CONFIG_BLK_DEV_IDEACPI 1483#ifdef CONFIG_BLK_DEV_IDEACPI
diff --git a/include/linux/idr.h b/include/linux/idr.h
index dd846df8cd32..e968db71e33a 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -106,6 +106,7 @@ int idr_get_new(struct idr *idp, void *ptr, int *id);
106int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); 106int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
107int idr_for_each(struct idr *idp, 107int idr_for_each(struct idr *idp,
108 int (*fn)(int id, void *p, void *data), void *data); 108 int (*fn)(int id, void *p, void *data), void *data);
109void *idr_get_next(struct idr *idp, int *nextid);
109void *idr_replace(struct idr *idp, void *ptr, int id); 110void *idr_replace(struct idr *idp, void *ptr, int id);
110void idr_remove(struct idr *idp, int id); 111void idr_remove(struct idr *idp, int id);
111void idr_remove_all(struct idr *idp); 112void idr_remove_all(struct idr *idp);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index d2e3cbfba14f..aa8c53171233 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
123#define ecap_eim_support(e) ((e >> 4) & 0x1) 123#define ecap_eim_support(e) ((e >> 4) & 0x1)
124#define ecap_ir_support(e) ((e >> 3) & 0x1) 124#define ecap_ir_support(e) ((e >> 3) & 0x1)
125#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) 125#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
126 126#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
127 127
128/* IOTLB_REG */ 128/* IOTLB_REG */
129#define DMA_TLB_FLUSH_GRANU_OFFSET 60 129#define DMA_TLB_FLUSH_GRANU_OFFSET 60
@@ -164,6 +164,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
164#define DMA_GCMD_QIE (((u32)1) << 26) 164#define DMA_GCMD_QIE (((u32)1) << 26)
165#define DMA_GCMD_SIRTP (((u32)1) << 24) 165#define DMA_GCMD_SIRTP (((u32)1) << 24)
166#define DMA_GCMD_IRE (((u32) 1) << 25) 166#define DMA_GCMD_IRE (((u32) 1) << 25)
167#define DMA_GCMD_CFI (((u32) 1) << 23)
167 168
168/* GSTS_REG */ 169/* GSTS_REG */
169#define DMA_GSTS_TES (((u32)1) << 31) 170#define DMA_GSTS_TES (((u32)1) << 31)
@@ -174,6 +175,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
174#define DMA_GSTS_QIES (((u32)1) << 26) 175#define DMA_GSTS_QIES (((u32)1) << 26)
175#define DMA_GSTS_IRTPS (((u32)1) << 24) 176#define DMA_GSTS_IRTPS (((u32)1) << 24)
176#define DMA_GSTS_IRES (((u32)1) << 25) 177#define DMA_GSTS_IRES (((u32)1) << 25)
178#define DMA_GSTS_CFIS (((u32)1) << 23)
177 179
178/* CCMD_REG */ 180/* CCMD_REG */
179#define DMA_CCMD_ICC (((u64)1) << 63) 181#define DMA_CCMD_ICC (((u64)1) << 63)
@@ -284,6 +286,14 @@ struct iommu_flush {
284 unsigned int size_order, u64 type, int non_present_entry_flush); 286 unsigned int size_order, u64 type, int non_present_entry_flush);
285}; 287};
286 288
289enum {
290 SR_DMAR_FECTL_REG,
291 SR_DMAR_FEDATA_REG,
292 SR_DMAR_FEADDR_REG,
293 SR_DMAR_FEUADDR_REG,
294 MAX_SR_DMAR_REGS
295};
296
287struct intel_iommu { 297struct intel_iommu {
288 void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 298 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
289 u64 cap; 299 u64 cap;
@@ -292,6 +302,8 @@ struct intel_iommu {
292 spinlock_t register_lock; /* protect register handling */ 302 spinlock_t register_lock; /* protect register handling */
293 int seq_id; /* sequence id of the iommu */ 303 int seq_id; /* sequence id of the iommu */
294 int agaw; /* agaw of this iommu */ 304 int agaw; /* agaw of this iommu */
305 unsigned int irq;
306 unsigned char name[13]; /* Device Name */
295 307
296#ifdef CONFIG_DMAR 308#ifdef CONFIG_DMAR
297 unsigned long *domain_ids; /* bitmap of domains */ 309 unsigned long *domain_ids; /* bitmap of domains */
@@ -299,11 +311,11 @@ struct intel_iommu {
299 spinlock_t lock; /* protect context, domain ids */ 311 spinlock_t lock; /* protect context, domain ids */
300 struct root_entry *root_entry; /* virtual address */ 312 struct root_entry *root_entry; /* virtual address */
301 313
302 unsigned int irq;
303 unsigned char name[7]; /* Device Name */
304 struct iommu_flush flush; 314 struct iommu_flush flush;
305#endif 315#endif
306 struct q_inval *qi; /* Queued invalidation info */ 316 struct q_inval *qi; /* Queued invalidation info */
317 u32 *iommu_state; /* Store iommu states between suspend and resume.*/
318
307#ifdef CONFIG_INTR_REMAP 319#ifdef CONFIG_INTR_REMAP
308 struct ir_table *ir_table; /* Interrupt remapping info */ 320 struct ir_table *ir_table; /* Interrupt remapping info */
309#endif 321#endif
@@ -321,6 +333,8 @@ extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
321extern int alloc_iommu(struct dmar_drhd_unit *drhd); 333extern int alloc_iommu(struct dmar_drhd_unit *drhd);
322extern void free_iommu(struct intel_iommu *iommu); 334extern void free_iommu(struct intel_iommu *iommu);
323extern int dmar_enable_qi(struct intel_iommu *iommu); 335extern int dmar_enable_qi(struct intel_iommu *iommu);
336extern void dmar_disable_qi(struct intel_iommu *iommu);
337extern int dmar_reenable_qi(struct intel_iommu *iommu);
324extern void qi_global_iec(struct intel_iommu *iommu); 338extern void qi_global_iec(struct intel_iommu *iommu);
325 339
326extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, 340extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
@@ -331,11 +345,4 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
331 345
332extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); 346extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
333 347
334extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
335extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
336extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
337extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
338extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
339extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
340
341#endif 348#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0c9cb63e6895..8a9613d0c674 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -117,6 +117,15 @@ extern void disable_irq_nosync(unsigned int irq);
117extern void disable_irq(unsigned int irq); 117extern void disable_irq(unsigned int irq);
118extern void enable_irq(unsigned int irq); 118extern void enable_irq(unsigned int irq);
119 119
120/* The following three functions are for the core kernel use only. */
121extern void suspend_device_irqs(void);
122extern void resume_device_irqs(void);
123#ifdef CONFIG_PM_SLEEP
124extern int check_wakeup_irqs(void);
125#else
126static inline int check_wakeup_irqs(void) { return 0; }
127#endif
128
120#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) 129#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
121 130
122extern cpumask_var_t irq_default_affinity; 131extern cpumask_var_t irq_default_affinity;
@@ -269,6 +278,11 @@ enum
269 NR_SOFTIRQS 278 NR_SOFTIRQS
270}; 279};
271 280
281/* map softirq index to softirq name. update 'softirq_to_name' in
282 * kernel/softirq.c when adding a new softirq.
283 */
284extern char *softirq_to_name[NR_SOFTIRQS];
285
272/* softirq mask and active fields moved to irq_cpustat_t in 286/* softirq mask and active fields moved to irq_cpustat_t in
273 * asm/hardirq.h to get better cache usage. KAO 287 * asm/hardirq.h to get better cache usage. KAO
274 */ 288 */
@@ -285,6 +299,7 @@ extern void softirq_init(void);
285#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) 299#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
286extern void raise_softirq_irqoff(unsigned int nr); 300extern void raise_softirq_irqoff(unsigned int nr);
287extern void raise_softirq(unsigned int nr); 301extern void raise_softirq(unsigned int nr);
302extern void wakeup_softirqd(void);
288 303
289/* This is the worklist that queues up per-cpu softirq work. 304/* This is the worklist that queues up per-cpu softirq work.
290 * 305 *
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8a7bfb1b6ca0..3af4ffd591b9 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,6 +21,7 @@
21 21
22#define IOMMU_READ (1) 22#define IOMMU_READ (1)
23#define IOMMU_WRITE (2) 23#define IOMMU_WRITE (2)
24#define IOMMU_CACHE (4) /* DMA cache coherency */
24 25
25struct device; 26struct device;
26 27
@@ -28,6 +29,8 @@ struct iommu_domain {
28 void *priv; 29 void *priv;
29}; 30};
30 31
32#define IOMMU_CAP_CACHE_COHERENCY 0x1
33
31struct iommu_ops { 34struct iommu_ops {
32 int (*domain_init)(struct iommu_domain *domain); 35 int (*domain_init)(struct iommu_domain *domain);
33 void (*domain_destroy)(struct iommu_domain *domain); 36 void (*domain_destroy)(struct iommu_domain *domain);
@@ -39,6 +42,8 @@ struct iommu_ops {
39 size_t size); 42 size_t size);
40 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 43 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
41 unsigned long iova); 44 unsigned long iova);
45 int (*domain_has_cap)(struct iommu_domain *domain,
46 unsigned long cap);
42}; 47};
43 48
44#ifdef CONFIG_IOMMU_API 49#ifdef CONFIG_IOMMU_API
@@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
57 size_t size); 62 size_t size);
58extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 63extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
59 unsigned long iova); 64 unsigned long iova);
65extern int iommu_domain_has_cap(struct iommu_domain *domain,
66 unsigned long cap);
60 67
61#else /* CONFIG_IOMMU_API */ 68#else /* CONFIG_IOMMU_API */
62 69
@@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
107 return 0; 114 return 0;
108} 115}
109 116
117static inline int domain_has_cap(struct iommu_domain *domain,
118 unsigned long cap)
119{
120 return 0;
121}
122
110#endif /* CONFIG_IOMMU_API */ 123#endif /* CONFIG_IOMMU_API */
111 124
112#endif /* __LINUX_IOMMU_H */ 125#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index ea330f9e7100..3bf40e246a80 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -25,7 +25,7 @@ struct ipc_ids {
25}; 25};
26 26
27struct ipc_namespace { 27struct ipc_namespace {
28 struct kref kref; 28 atomic_t count;
29 struct ipc_ids ids[3]; 29 struct ipc_ids ids[3];
30 30
31 int sem_ctls[4]; 31 int sem_ctls[4];
@@ -44,25 +44,57 @@ struct ipc_namespace {
44 int shm_tot; 44 int shm_tot;
45 45
46 struct notifier_block ipcns_nb; 46 struct notifier_block ipcns_nb;
47
48 /* The kern_mount of the mqueuefs sb. We take a ref on it */
49 struct vfsmount *mq_mnt;
50
51 /* # queues in this ns, protected by mq_lock */
52 unsigned int mq_queues_count;
53
54 /* next fields are set through sysctl */
55 unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */
56 unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */
57 unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */
58
47}; 59};
48 60
49extern struct ipc_namespace init_ipc_ns; 61extern struct ipc_namespace init_ipc_ns;
50extern atomic_t nr_ipc_ns; 62extern atomic_t nr_ipc_ns;
51 63
52#ifdef CONFIG_SYSVIPC 64extern spinlock_t mq_lock;
65#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
53#define INIT_IPC_NS(ns) .ns = &init_ipc_ns, 66#define INIT_IPC_NS(ns) .ns = &init_ipc_ns,
67#else
68#define INIT_IPC_NS(ns)
69#endif
54 70
71#ifdef CONFIG_SYSVIPC
55extern int register_ipcns_notifier(struct ipc_namespace *); 72extern int register_ipcns_notifier(struct ipc_namespace *);
56extern int cond_register_ipcns_notifier(struct ipc_namespace *); 73extern int cond_register_ipcns_notifier(struct ipc_namespace *);
57extern void unregister_ipcns_notifier(struct ipc_namespace *); 74extern void unregister_ipcns_notifier(struct ipc_namespace *);
58extern int ipcns_notify(unsigned long); 75extern int ipcns_notify(unsigned long);
59
60#else /* CONFIG_SYSVIPC */ 76#else /* CONFIG_SYSVIPC */
61#define INIT_IPC_NS(ns) 77static inline int register_ipcns_notifier(struct ipc_namespace *ns)
78{ return 0; }
79static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns)
80{ return 0; }
81static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { }
82static inline int ipcns_notify(unsigned long l) { return 0; }
62#endif /* CONFIG_SYSVIPC */ 83#endif /* CONFIG_SYSVIPC */
63 84
64#if defined(CONFIG_SYSVIPC) && defined(CONFIG_IPC_NS) 85#ifdef CONFIG_POSIX_MQUEUE
65extern void free_ipc_ns(struct kref *kref); 86extern int mq_init_ns(struct ipc_namespace *ns);
87/* default values */
88#define DFLT_QUEUESMAX 256 /* max number of message queues */
89#define DFLT_MSGMAX 10 /* max number of messages in each queue */
90#define HARD_MSGMAX (131072/sizeof(void *))
91#define DFLT_MSGSIZEMAX 8192 /* max message size */
92#else
93static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
94#endif
95
96#if defined(CONFIG_IPC_NS)
97extern void free_ipc_ns(struct ipc_namespace *ns);
66extern struct ipc_namespace *copy_ipcs(unsigned long flags, 98extern struct ipc_namespace *copy_ipcs(unsigned long flags,
67 struct ipc_namespace *ns); 99 struct ipc_namespace *ns);
68extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, 100extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
@@ -72,14 +104,11 @@ extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
72static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) 104static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
73{ 105{
74 if (ns) 106 if (ns)
75 kref_get(&ns->kref); 107 atomic_inc(&ns->count);
76 return ns; 108 return ns;
77} 109}
78 110
79static inline void put_ipc_ns(struct ipc_namespace *ns) 111extern void put_ipc_ns(struct ipc_namespace *ns);
80{
81 kref_put(&ns->kref, free_ipc_ns);
82}
83#else 112#else
84static inline struct ipc_namespace *copy_ipcs(unsigned long flags, 113static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
85 struct ipc_namespace *ns) 114 struct ipc_namespace *ns)
@@ -99,4 +128,18 @@ static inline void put_ipc_ns(struct ipc_namespace *ns)
99{ 128{
100} 129}
101#endif 130#endif
131
132#ifdef CONFIG_POSIX_MQUEUE_SYSCTL
133
134struct ctl_table_header;
135extern struct ctl_table_header *mq_register_sysctl_table(void);
136
137#else /* CONFIG_POSIX_MQUEUE_SYSCTL */
138
139static inline struct ctl_table_header *mq_register_sysctl_table(void)
140{
141 return NULL;
142}
143
144#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
102#endif 145#endif
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 62b73668b602..f7c9c75a2775 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -230,6 +230,6 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
230 automatically be dstroyed when the interface is destroyed. */ 230 automatically be dstroyed when the interface is destroyed. */
231int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, 231int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
232 read_proc_t *read_proc, 232 read_proc_t *read_proc,
233 void *data, struct module *owner); 233 void *data);
234 234
235#endif /* __LINUX_IPMI_SMI_H */ 235#endif /* __LINUX_IPMI_SMI_H */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 9c62fbe2ef30..974890b3c52f 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -67,6 +67,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
67#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ 67#define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */
68#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ 68#define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */
69#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ 69#define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/
70#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
70 71
71#ifdef CONFIG_IRQ_PER_CPU 72#ifdef CONFIG_IRQ_PER_CPU
72# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) 73# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 74bde13224c9..b02a3f1d46a0 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -24,8 +24,8 @@
24# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) 24# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
25# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) 25# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
26# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) 26# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
27# define trace_softirq_enter() do { current->softirq_context++; } while (0) 27# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
28# define trace_softirq_exit() do { current->softirq_context--; } while (0) 28# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
29# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, 29# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
30#else 30#else
31# define trace_hardirqs_on() do { } while (0) 31# define trace_hardirqs_on() do { } while (0)
@@ -38,8 +38,8 @@
38# define trace_softirqs_enabled(p) 0 38# define trace_softirqs_enabled(p) 0
39# define trace_hardirq_enter() do { } while (0) 39# define trace_hardirq_enter() do { } while (0)
40# define trace_hardirq_exit() do { } while (0) 40# define trace_hardirq_exit() do { } while (0)
41# define trace_softirq_enter() do { } while (0) 41# define lockdep_softirq_enter() do { } while (0)
42# define trace_softirq_exit() do { } while (0) 42# define lockdep_softirq_exit() do { } while (0)
43# define INIT_TRACE_IRQFLAGS 43# define INIT_TRACE_IRQFLAGS
44#endif 44#endif
45 45
diff --git a/include/linux/ivtv.h b/include/linux/ivtv.h
index f2720280b9ec..062d20f74322 100644
--- a/include/linux/ivtv.h
+++ b/include/linux/ivtv.h
@@ -60,10 +60,10 @@ struct ivtv_dma_frame {
60 60
61#define IVTV_IOC_DMA_FRAME _IOW ('V', BASE_VIDIOC_PRIVATE+0, struct ivtv_dma_frame) 61#define IVTV_IOC_DMA_FRAME _IOW ('V', BASE_VIDIOC_PRIVATE+0, struct ivtv_dma_frame)
62 62
63/* These are the VBI types as they appear in the embedded VBI private packets. */ 63/* Deprecated defines: applications should use the defines from videodev2.h */
64#define IVTV_SLICED_TYPE_TELETEXT_B (1) 64#define IVTV_SLICED_TYPE_TELETEXT_B V4L2_MPEG_VBI_IVTV_TELETEXT_B
65#define IVTV_SLICED_TYPE_CAPTION_525 (4) 65#define IVTV_SLICED_TYPE_CAPTION_525 V4L2_MPEG_VBI_IVTV_CAPTION_525
66#define IVTV_SLICED_TYPE_WSS_625 (5) 66#define IVTV_SLICED_TYPE_WSS_625 V4L2_MPEG_VBI_IVTV_WSS_625
67#define IVTV_SLICED_TYPE_VPS (7) 67#define IVTV_SLICED_TYPE_VPS V4L2_MPEG_VBI_IVTV_VPS
68 68
69#endif /* _LINUX_IVTV_H */ 69#endif /* _LINUX_IVTV_H */
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 64246dce5663..53ae4399da2d 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -35,7 +35,7 @@
35#define journal_oom_retry 1 35#define journal_oom_retry 1
36 36
37/* 37/*
38 * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds 38 * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
39 * certain classes of error which can occur due to failed IOs. Under 39 * certain classes of error which can occur due to failed IOs. Under
40 * normal use we want ext3 to continue after such errors, because 40 * normal use we want ext3 to continue after such errors, because
41 * hardware _can_ fail, but for debugging purposes when running tests on 41 * hardware _can_ fail, but for debugging purposes when running tests on
@@ -552,6 +552,11 @@ struct transaction_s
552 */ 552 */
553 int t_handle_count; 553 int t_handle_count;
554 554
555 /*
556 * This transaction is being forced and some process is
557 * waiting for it to finish.
558 */
559 int t_synchronous_commit:1;
555}; 560};
556 561
557/** 562/**
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 4d248b3f1323..8815a3456b3b 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -649,6 +649,12 @@ struct transaction_s
649 int t_handle_count; 649 int t_handle_count;
650 650
651 /* 651 /*
652 * This transaction is being forced and some process is
653 * waiting for it to finish.
654 */
655 int t_synchronous_commit:1;
656
657 /*
652 * For use by the filesystem to store fs-specific data 658 * For use by the filesystem to store fs-specific data
653 * structures associated with the transaction 659 * structures associated with the transaction
654 */ 660 */
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index f3fe34391d8e..792274269f2b 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -13,10 +13,17 @@
13#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 13#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
14 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) 14 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
15 15
16struct module;
17
16#ifdef CONFIG_KALLSYMS 18#ifdef CONFIG_KALLSYMS
17/* Lookup the address for a symbol. Returns 0 if not found. */ 19/* Lookup the address for a symbol. Returns 0 if not found. */
18unsigned long kallsyms_lookup_name(const char *name); 20unsigned long kallsyms_lookup_name(const char *name);
19 21
22/* Call a function on each kallsyms symbol in the core kernel */
23int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
24 unsigned long),
25 void *data);
26
20extern int kallsyms_lookup_size_offset(unsigned long addr, 27extern int kallsyms_lookup_size_offset(unsigned long addr,
21 unsigned long *symbolsize, 28 unsigned long *symbolsize,
22 unsigned long *offset); 29 unsigned long *offset);
@@ -43,6 +50,14 @@ static inline unsigned long kallsyms_lookup_name(const char *name)
43 return 0; 50 return 0;
44} 51}
45 52
53static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
54 struct module *,
55 unsigned long),
56 void *data)
57{
58 return 0;
59}
60
46static inline int kallsyms_lookup_size_offset(unsigned long addr, 61static inline int kallsyms_lookup_size_offset(unsigned long addr,
47 unsigned long *symbolsize, 62 unsigned long *symbolsize,
48 unsigned long *offset) 63 unsigned long *offset)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 914918abfdd1..d9e75ec7def5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -242,6 +242,20 @@ extern struct ratelimit_state printk_ratelimit_state;
242extern int printk_ratelimit(void); 242extern int printk_ratelimit(void);
243extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, 243extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
244 unsigned int interval_msec); 244 unsigned int interval_msec);
245
246/*
247 * Print a one-time message (analogous to WARN_ONCE() et al):
248 */
249#define printk_once(x...) ({ \
250 static int __print_once = 1; \
251 \
252 if (__print_once) { \
253 __print_once = 0; \
254 printk(x); \
255 } \
256})
257
258void log_buf_kexec_setup(void);
245#else 259#else
246static inline int vprintk(const char *s, va_list args) 260static inline int vprintk(const char *s, va_list args)
247 __attribute__ ((format (printf, 1, 0))); 261 __attribute__ ((format (printf, 1, 0)));
@@ -253,6 +267,13 @@ static inline int printk_ratelimit(void) { return 0; }
253static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ 267static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
254 unsigned int interval_msec) \ 268 unsigned int interval_msec) \
255 { return false; } 269 { return false; }
270
271/* No effect, but we still get type checking even in the !PRINTK case: */
272#define printk_once(x...) printk(x)
273
274static inline void log_buf_kexec_setup(void)
275{
276}
256#endif 277#endif
257 278
258extern int printk_needs_cpu(int cpu); 279extern int printk_needs_cpu(int cpu);
@@ -353,6 +374,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
353 printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) 374 printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
354#define pr_info(fmt, ...) \ 375#define pr_info(fmt, ...) \
355 printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) 376 printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
377#define pr_cont(fmt, ...) \
378 printk(KERN_CONT fmt, ##__VA_ARGS__)
356 379
357/* If you are writing a driver, please use dev_dbg instead */ 380/* If you are writing a driver, please use dev_dbg instead */
358#if defined(DEBUG) 381#if defined(DEBUG)
@@ -369,6 +392,139 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
369#endif 392#endif
370 393
371/* 394/*
395 * General tracing related utility functions - trace_printk(),
396 * tracing_on/tracing_off and tracing_start()/tracing_stop
397 *
398 * Use tracing_on/tracing_off when you want to quickly turn on or off
399 * tracing. It simply enables or disables the recording of the trace events.
400 * This also corresponds to the user space debugfs/tracing/tracing_on
401 * file, which gives a means for the kernel and userspace to interact.
402 * Place a tracing_off() in the kernel where you want tracing to end.
403 * From user space, examine the trace, and then echo 1 > tracing_on
404 * to continue tracing.
405 *
406 * tracing_stop/tracing_start has slightly more overhead. It is used
407 * by things like suspend to ram where disabling the recording of the
408 * trace is not enough, but tracing must actually stop because things
409 * like calling smp_processor_id() may crash the system.
410 *
411 * Most likely, you want to use tracing_on/tracing_off.
412 */
413#ifdef CONFIG_RING_BUFFER
414void tracing_on(void);
415void tracing_off(void);
416/* trace_off_permanent stops recording with no way to bring it back */
417void tracing_off_permanent(void);
418int tracing_is_on(void);
419#else
420static inline void tracing_on(void) { }
421static inline void tracing_off(void) { }
422static inline void tracing_off_permanent(void) { }
423static inline int tracing_is_on(void) { return 0; }
424#endif
425#ifdef CONFIG_TRACING
426extern void tracing_start(void);
427extern void tracing_stop(void);
428extern void ftrace_off_permanent(void);
429
430extern void
431ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
432
433static inline void __attribute__ ((format (printf, 1, 2)))
434____trace_printk_check_format(const char *fmt, ...)
435{
436}
437#define __trace_printk_check_format(fmt, args...) \
438do { \
439 if (0) \
440 ____trace_printk_check_format(fmt, ##args); \
441} while (0)
442
443/**
444 * trace_printk - printf formatting in the ftrace buffer
445 * @fmt: the printf format for printing
446 *
447 * Note: __trace_printk is an internal function for trace_printk and
448 * the @ip is passed in via the trace_printk macro.
449 *
450 * This function allows a kernel developer to debug fast path sections
451 * that printk is not appropriate for. By scattering in various
452 * printk like tracing in the code, a developer can quickly see
453 * where problems are occurring.
454 *
455 * This is intended as a debugging tool for the developer only.
456 * Please refrain from leaving trace_printks scattered around in
457 * your code.
458 */
459
460#define trace_printk(fmt, args...) \
461do { \
462 __trace_printk_check_format(fmt, ##args); \
463 if (__builtin_constant_p(fmt)) { \
464 static const char *trace_printk_fmt \
465 __attribute__((section("__trace_printk_fmt"))) = \
466 __builtin_constant_p(fmt) ? fmt : NULL; \
467 \
468 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
469 } else \
470 __trace_printk(_THIS_IP_, fmt, ##args); \
471} while (0)
472
473extern int
474__trace_bprintk(unsigned long ip, const char *fmt, ...)
475 __attribute__ ((format (printf, 2, 3)));
476
477extern int
478__trace_printk(unsigned long ip, const char *fmt, ...)
479 __attribute__ ((format (printf, 2, 3)));
480
481/*
482 * The double __builtin_constant_p is because gcc will give us an error
483 * if we try to allocate the static variable to fmt if it is not a
484 * constant. Even with the outer if statement.
485 */
486#define ftrace_vprintk(fmt, vargs) \
487do { \
488 if (__builtin_constant_p(fmt)) { \
489 static const char *trace_printk_fmt \
490 __attribute__((section("__trace_printk_fmt"))) = \
491 __builtin_constant_p(fmt) ? fmt : NULL; \
492 \
493 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
494 } else \
495 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
496} while (0)
497
498extern int
499__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
500
501extern int
502__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
503
504extern void ftrace_dump(void);
505#else
506static inline void
507ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
508static inline int
509trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
510
511static inline void tracing_start(void) { }
512static inline void tracing_stop(void) { }
513static inline void ftrace_off_permanent(void) { }
514static inline int
515trace_printk(const char *fmt, ...)
516{
517 return 0;
518}
519static inline int
520ftrace_vprintk(const char *fmt, va_list ap)
521{
522 return 0;
523}
524static inline void ftrace_dump(void) { }
525#endif /* CONFIG_TRACING */
526
527/*
372 * Display an IP address in readable format. 528 * Display an IP address in readable format.
373 */ 529 */
374 530
@@ -379,18 +535,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
379 ((unsigned char *)&addr)[3] 535 ((unsigned char *)&addr)[3]
380#define NIPQUAD_FMT "%u.%u.%u.%u" 536#define NIPQUAD_FMT "%u.%u.%u.%u"
381 537
382#if defined(__LITTLE_ENDIAN)
383#define HIPQUAD(addr) \
384 ((unsigned char *)&addr)[3], \
385 ((unsigned char *)&addr)[2], \
386 ((unsigned char *)&addr)[1], \
387 ((unsigned char *)&addr)[0]
388#elif defined(__BIG_ENDIAN)
389#define HIPQUAD NIPQUAD
390#else
391#error "Please fix asm/byteorder.h"
392#endif /* __LITTLE_ENDIAN */
393
394/* 538/*
395 * min()/max()/clamp() macros that also do 539 * min()/max()/clamp() macros that also do
396 * strict type-checking.. See the 540 * strict type-checking.. See the
diff --git a/include/linux/key.h b/include/linux/key.h
index 21d32a142c00..e544f466d69a 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -20,6 +20,7 @@
20#include <linux/rbtree.h> 20#include <linux/rbtree.h>
21#include <linux/rcupdate.h> 21#include <linux/rcupdate.h>
22#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/rwsem.h>
23#include <asm/atomic.h> 24#include <asm/atomic.h>
24 25
25#ifdef __KERNEL__ 26#ifdef __KERNEL__
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 92213a9194e1..d5fa565086d1 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -29,10 +29,15 @@
29#ifdef CONFIG_MODULES 29#ifdef CONFIG_MODULES
30/* modprobe exit status on success, -ve on error. Return value 30/* modprobe exit status on success, -ve on error. Return value
31 * usually useless though. */ 31 * usually useless though. */
32extern int request_module(const char * name, ...) __attribute__ ((format (printf, 1, 2))); 32extern int __request_module(bool wait, const char *name, ...) \
33#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) 33 __attribute__((format(printf, 2, 3)));
34#define request_module(mod...) __request_module(true, mod)
35#define request_module_nowait(mod...) __request_module(false, mod)
36#define try_then_request_module(x, mod...) \
37 ((x) ?: (__request_module(false, mod), (x)))
34#else 38#else
35static inline int request_module(const char * name, ...) { return -ENOSYS; } 39static inline int request_module(const char *name, ...) { return -ENOSYS; }
40static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; }
36#define try_then_request_module(x, mod...) (x) 41#define try_then_request_module(x, mod...) (x)
37#endif 42#endif
38 43
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 2ec6cc14a114..bcd9c07848be 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -94,12 +94,16 @@ struct kprobe {
94 /* Called after addr is executed, unless... */ 94 /* Called after addr is executed, unless... */
95 kprobe_post_handler_t post_handler; 95 kprobe_post_handler_t post_handler;
96 96
97 /* ... called if executing addr causes a fault (eg. page fault). 97 /*
98 * Return 1 if it handled fault, otherwise kernel will see it. */ 98 * ... called if executing addr causes a fault (eg. page fault).
99 * Return 1 if it handled fault, otherwise kernel will see it.
100 */
99 kprobe_fault_handler_t fault_handler; 101 kprobe_fault_handler_t fault_handler;
100 102
101 /* ... called if breakpoint trap occurs in probe handler. 103 /*
102 * Return 1 if it handled break, otherwise kernel will see it. */ 104 * ... called if breakpoint trap occurs in probe handler.
105 * Return 1 if it handled break, otherwise kernel will see it.
106 */
103 kprobe_break_handler_t break_handler; 107 kprobe_break_handler_t break_handler;
104 108
105 /* Saved opcode (which has been replaced with breakpoint) */ 109 /* Saved opcode (which has been replaced with breakpoint) */
@@ -108,18 +112,28 @@ struct kprobe {
108 /* copy of the original instruction */ 112 /* copy of the original instruction */
109 struct arch_specific_insn ainsn; 113 struct arch_specific_insn ainsn;
110 114
111 /* Indicates various status flags. Protected by kprobe_mutex. */ 115 /*
116 * Indicates various status flags.
117 * Protected by kprobe_mutex after this kprobe is registered.
118 */
112 u32 flags; 119 u32 flags;
113}; 120};
114 121
115/* Kprobe status flags */ 122/* Kprobe status flags */
116#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ 123#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */
124#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */
117 125
126/* Has this kprobe gone ? */
118static inline int kprobe_gone(struct kprobe *p) 127static inline int kprobe_gone(struct kprobe *p)
119{ 128{
120 return p->flags & KPROBE_FLAG_GONE; 129 return p->flags & KPROBE_FLAG_GONE;
121} 130}
122 131
132/* Is this kprobe disabled ? */
133static inline int kprobe_disabled(struct kprobe *p)
134{
135 return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
136}
123/* 137/*
124 * Special probe type that uses setjmp-longjmp type tricks to resume 138 * Special probe type that uses setjmp-longjmp type tricks to resume
125 * execution at a specified entry with a matching prototype corresponding 139 * execution at a specified entry with a matching prototype corresponding
@@ -279,6 +293,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num);
279void kprobe_flush_task(struct task_struct *tk); 293void kprobe_flush_task(struct task_struct *tk);
280void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); 294void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
281 295
296int disable_kprobe(struct kprobe *kp);
297int enable_kprobe(struct kprobe *kp);
298
282#else /* !CONFIG_KPROBES: */ 299#else /* !CONFIG_KPROBES: */
283 300
284static inline int kprobes_built_in(void) 301static inline int kprobes_built_in(void)
@@ -345,5 +362,30 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
345static inline void kprobe_flush_task(struct task_struct *tk) 362static inline void kprobe_flush_task(struct task_struct *tk)
346{ 363{
347} 364}
365static inline int disable_kprobe(struct kprobe *kp)
366{
367 return -ENOSYS;
368}
369static inline int enable_kprobe(struct kprobe *kp)
370{
371 return -ENOSYS;
372}
348#endif /* CONFIG_KPROBES */ 373#endif /* CONFIG_KPROBES */
374static inline int disable_kretprobe(struct kretprobe *rp)
375{
376 return disable_kprobe(&rp->kp);
377}
378static inline int enable_kretprobe(struct kretprobe *rp)
379{
380 return enable_kprobe(&rp->kp);
381}
382static inline int disable_jprobe(struct jprobe *jp)
383{
384 return disable_kprobe(&jp->kp);
385}
386static inline int enable_jprobe(struct jprobe *jp)
387{
388 return enable_kprobe(&jp->kp);
389}
390
349#endif /* _LINUX_KPROBES_H */ 391#endif /* _LINUX_KPROBES_H */
diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h
new file mode 100644
index 000000000000..42f854a1a199
--- /dev/null
+++ b/include/linux/leds-bd2802.h
@@ -0,0 +1,26 @@
1/*
2 * leds-bd2802.h - RGB LED Driver
3 *
4 * Copyright (C) 2009 Samsung Electronics
5 * Kim Kyuwon <q1.kim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf
12 *
13 */
14#ifndef _LEDS_BD2802_H_
15#define _LEDS_BD2802_H_
16
17struct bd2802_led_platform_data{
18 int reset_gpio;
19 u8 rgb_time;
20};
21
22#define RGB_TIME(slopedown, slopeup, waveform) \
23 ((slopedown) << 6 | (slopeup) << 4 | (waveform))
24
25#endif /* _LEDS_BD2802_H_ */
26
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 24489da701e3..376fe07732ea 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -30,6 +30,7 @@ enum led_brightness {
30struct led_classdev { 30struct led_classdev {
31 const char *name; 31 const char *name;
32 int brightness; 32 int brightness;
33 int max_brightness;
33 int flags; 34 int flags;
34 35
35 /* Lower 16 bits reflect status */ 36 /* Lower 16 bits reflect status */
@@ -140,7 +141,8 @@ struct gpio_led {
140 const char *name; 141 const char *name;
141 const char *default_trigger; 142 const char *default_trigger;
142 unsigned gpio; 143 unsigned gpio;
143 u8 active_low; 144 u8 active_low : 1;
145 u8 retain_state_suspended : 1;
144}; 146};
145 147
146struct gpio_led_platform_data { 148struct gpio_led_platform_data {
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h
new file mode 100644
index 000000000000..33a071167489
--- /dev/null
+++ b/include/linux/leds_pwm.h
@@ -0,0 +1,21 @@
1/*
2 * PWM LED driver data - see drivers/leds/leds-pwm.c
3 */
4#ifndef __LINUX_LEDS_PWM_H
5#define __LINUX_LEDS_PWM_H
6
7struct led_pwm {
8 const char *name;
9 const char *default_trigger;
10 unsigned pwm_id;
11 u8 active_low;
12 unsigned max_brightness;
13 unsigned pwm_period_ns;
14};
15
16struct led_pwm_platform_data {
17 int num_leds;
18 struct led_pwm *leds;
19};
20
21#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 76262d83656b..b450a2628855 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -379,7 +379,7 @@ enum {
379 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ 379 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
380 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands 380 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
381 not multiple of 16 bytes */ 381 not multiple of 16 bytes */
382 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ 382 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
383 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ 383 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
384 384
385 /* DMA mask for user DMA control: User visible values; DO NOT 385 /* DMA mask for user DMA control: User visible values; DO NOT
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 7dc5b6cb44cd..d39ed1cc5fbf 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -25,13 +25,13 @@ struct svc_rqst;
25#define NLM_MAXCOOKIELEN 32 25#define NLM_MAXCOOKIELEN 32
26#define NLM_MAXSTRLEN 1024 26#define NLM_MAXSTRLEN 1024
27 27
28#define nlm_granted __constant_htonl(NLM_LCK_GRANTED) 28#define nlm_granted cpu_to_be32(NLM_LCK_GRANTED)
29#define nlm_lck_denied __constant_htonl(NLM_LCK_DENIED) 29#define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED)
30#define nlm_lck_denied_nolocks __constant_htonl(NLM_LCK_DENIED_NOLOCKS) 30#define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS)
31#define nlm_lck_blocked __constant_htonl(NLM_LCK_BLOCKED) 31#define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED)
32#define nlm_lck_denied_grace_period __constant_htonl(NLM_LCK_DENIED_GRACE_PERIOD) 32#define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD)
33 33
34#define nlm_drop_reply __constant_htonl(30000) 34#define nlm_drop_reply cpu_to_be32(30000)
35 35
36/* Lock info passed via NLM */ 36/* Lock info passed via NLM */
37struct nlm_lock { 37struct nlm_lock {
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index 12bfe09de2b1..7353821341ed 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -15,11 +15,11 @@
15#include <linux/lockd/xdr.h> 15#include <linux/lockd/xdr.h>
16 16
17/* error codes new to NLMv4 */ 17/* error codes new to NLMv4 */
18#define nlm4_deadlock __constant_htonl(NLM_DEADLCK) 18#define nlm4_deadlock cpu_to_be32(NLM_DEADLCK)
19#define nlm4_rofs __constant_htonl(NLM_ROFS) 19#define nlm4_rofs cpu_to_be32(NLM_ROFS)
20#define nlm4_stale_fh __constant_htonl(NLM_STALE_FH) 20#define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH)
21#define nlm4_fbig __constant_htonl(NLM_FBIG) 21#define nlm4_fbig cpu_to_be32(NLM_FBIG)
22#define nlm4_failed __constant_htonl(NLM_FAILED) 22#define nlm4_failed cpu_to_be32(NLM_FAILED)
23 23
24 24
25 25
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 23bf02fb124f..da5a5a1f4cd2 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -20,43 +20,10 @@ struct lockdep_map;
20#include <linux/stacktrace.h> 20#include <linux/stacktrace.h>
21 21
22/* 22/*
23 * Lock-class usage-state bits: 23 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
24 * the total number of states... :-(
24 */ 25 */
25enum lock_usage_bit 26#define XXX_LOCK_USAGE_STATES (1+3*4)
26{
27 LOCK_USED = 0,
28 LOCK_USED_IN_HARDIRQ,
29 LOCK_USED_IN_SOFTIRQ,
30 LOCK_ENABLED_SOFTIRQS,
31 LOCK_ENABLED_HARDIRQS,
32 LOCK_USED_IN_HARDIRQ_READ,
33 LOCK_USED_IN_SOFTIRQ_READ,
34 LOCK_ENABLED_SOFTIRQS_READ,
35 LOCK_ENABLED_HARDIRQS_READ,
36 LOCK_USAGE_STATES
37};
38
39/*
40 * Usage-state bitmasks:
41 */
42#define LOCKF_USED (1 << LOCK_USED)
43#define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
44#define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
45#define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
46#define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
47
48#define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
49#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
50
51#define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
52#define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
53#define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
54#define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
55
56#define LOCKF_ENABLED_IRQS_READ \
57 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
58#define LOCKF_USED_IN_IRQ_READ \
59 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
60 27
61#define MAX_LOCKDEP_SUBCLASSES 8UL 28#define MAX_LOCKDEP_SUBCLASSES 8UL
62 29
@@ -97,7 +64,7 @@ struct lock_class {
97 * IRQ/softirq usage tracking bits: 64 * IRQ/softirq usage tracking bits:
98 */ 65 */
99 unsigned long usage_mask; 66 unsigned long usage_mask;
100 struct stack_trace usage_traces[LOCK_USAGE_STATES]; 67 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
101 68
102 /* 69 /*
103 * These fields represent a directed graph of lock dependencies, 70 * These fields represent a directed graph of lock dependencies,
@@ -324,7 +291,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
324 lock_set_class(lock, lock->name, lock->key, subclass, ip); 291 lock_set_class(lock, lock->name, lock->key, subclass, ip);
325} 292}
326 293
327# define INIT_LOCKDEP .lockdep_recursion = 0, 294extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
295extern void lockdep_clear_current_reclaim_state(void);
296extern void lockdep_trace_alloc(gfp_t mask);
297
298# define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
328 299
329#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) 300#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
330 301
@@ -342,6 +313,9 @@ static inline void lockdep_on(void)
342# define lock_release(l, n, i) do { } while (0) 313# define lock_release(l, n, i) do { } while (0)
343# define lock_set_class(l, n, k, s, i) do { } while (0) 314# define lock_set_class(l, n, k, s, i) do { } while (0)
344# define lock_set_subclass(l, s, i) do { } while (0) 315# define lock_set_subclass(l, s, i) do { } while (0)
316# define lockdep_set_current_reclaim_state(g) do { } while (0)
317# define lockdep_clear_current_reclaim_state() do { } while (0)
318# define lockdep_trace_alloc(g) do { } while (0)
345# define lockdep_init() do { } while (0) 319# define lockdep_init() do { } while (0)
346# define lockdep_info() do { } while (0) 320# define lockdep_info() do { } while (0)
347# define lockdep_init_map(lock, name, key, sub) \ 321# define lockdep_init_map(lock, name, key, sub) \
@@ -390,6 +364,23 @@ do { \
390 364
391#endif /* CONFIG_LOCK_STAT */ 365#endif /* CONFIG_LOCK_STAT */
392 366
367#ifdef CONFIG_LOCKDEP
368
369/*
370 * On lockdep we dont want the hand-coded irq-enable of
371 * _raw_*_lock_flags() code, because lockdep assumes
372 * that interrupts are not re-enabled during lock-acquire:
373 */
374#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
375 LOCK_CONTENDED((_lock), (try), (lock))
376
377#else /* CONFIG_LOCKDEP */
378
379#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
380 lockfl((_lock), (flags))
381
382#endif /* CONFIG_LOCKDEP */
383
393#ifdef CONFIG_GENERIC_HARDIRQS 384#ifdef CONFIG_GENERIC_HARDIRQS
394extern void early_init_irq_lock_class(void); 385extern void early_init_irq_lock_class(void);
395#else 386#else
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 6ffd6db5bb0d..40725447f5e0 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -160,5 +160,6 @@ int loop_unregister_transfer(int number);
160#define LOOP_SET_STATUS64 0x4C04 160#define LOOP_SET_STATUS64 0x4C04
161#define LOOP_GET_STATUS64 0x4C05 161#define LOOP_GET_STATUS64 0x4C05
162#define LOOP_CHANGE_FD 0x4C06 162#define LOOP_CHANGE_FD 0x4C06
163#define LOOP_SET_CAPACITY 0x4C07
163 164
164#endif 165#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 326f45c86530..18146c980b68 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -88,9 +88,6 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
88/* 88/*
89 * For memory reclaim. 89 * For memory reclaim.
90 */ 90 */
91extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
92extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
93
94extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem); 91extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
95extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, 92extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
96 int priority); 93 int priority);
@@ -104,6 +101,8 @@ struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
104 struct zone *zone); 101 struct zone *zone);
105struct zone_reclaim_stat* 102struct zone_reclaim_stat*
106mem_cgroup_get_reclaim_stat_from_page(struct page *page); 103mem_cgroup_get_reclaim_stat_from_page(struct page *page);
104extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
105 struct task_struct *p);
107 106
108#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 107#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
109extern int do_swap_account; 108extern int do_swap_account;
@@ -209,16 +208,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
209{ 208{
210} 209}
211 210
212static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
213{
214 return 0;
215}
216
217static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
218{
219 return 0;
220}
221
222static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 211static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
223{ 212{
224 return 0; 213 return 0;
@@ -270,6 +259,11 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
270 return NULL; 259 return NULL;
271} 260}
272 261
262static inline void
263mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
264{
265}
266
273#endif /* CONFIG_CGROUP_MEM_CONT */ 267#endif /* CONFIG_CGROUP_MEM_CONT */
274 268
275#endif /* _LINUX_MEMCONTROL_H */ 269#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 3fdc10806d31..37fa19b34ef5 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -99,4 +99,21 @@ enum mem_add_context { BOOT, HOTPLUG };
99#define hotplug_memory_notifier(fn, pri) do { } while (0) 99#define hotplug_memory_notifier(fn, pri) do { } while (0)
100#endif 100#endif
101 101
102/*
103 * 'struct memory_accessor' is a generic interface to provide
104 * in-kernel access to persistent memory such as i2c or SPI EEPROMs
105 */
106struct memory_accessor {
107 ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset,
108 size_t count);
109 ssize_t (*write)(struct memory_accessor *, const char *buf,
110 off_t offset, size_t count);
111};
112
113/*
114 * Kernel text modification mutex, used for code patching. Users of this lock
115 * can sleep.
116 */
117extern struct mutex text_mutex;
118
102#endif /* _LINUX_MEMORY_H_ */ 119#endif /* _LINUX_MEMORY_H_ */
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
new file mode 100644
index 000000000000..be469a357cbb
--- /dev/null
+++ b/include/linux/mfd/ds1wm.h
@@ -0,0 +1,6 @@
1/* MFD cell driver data for the DS1WM driver */
2
3struct ds1wm_driver_data {
4 int active_high;
5 int clock_rate;
6};
diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h
index b4294f12c4f8..3d3ed67bd969 100644
--- a/include/linux/mfd/htc-pasic3.h
+++ b/include/linux/mfd/htc-pasic3.h
@@ -48,7 +48,6 @@ struct pasic3_leds_machinfo {
48 48
49struct pasic3_platform_data { 49struct pasic3_platform_data {
50 struct pasic3_leds_machinfo *led_pdata; 50 struct pasic3_leds_machinfo *led_pdata;
51 unsigned int bus_shift;
52 unsigned int clock_rate; 51 unsigned int clock_rate;
53}; 52};
54 53
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 980669d50dca..42cca672f340 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -640,9 +640,11 @@ struct wm8350 {
640 * 640 *
641 * @init: Function called during driver initialisation. Should be 641 * @init: Function called during driver initialisation. Should be
642 * used by the platform to configure GPIO functions and similar. 642 * used by the platform to configure GPIO functions and similar.
643 * @irq_high: Set if WM8350 IRQ is active high.
643 */ 644 */
644struct wm8350_platform_data { 645struct wm8350_platform_data {
645 int (*init)(struct wm8350 *wm8350); 646 int (*init)(struct wm8350 *wm8350);
647 int irq_high;
646}; 648};
647 649
648 650
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
new file mode 100644
index 000000000000..1f76b1ebf627
--- /dev/null
+++ b/include/linux/mg_disk.h
@@ -0,0 +1,206 @@
1/*
2 * include/linux/mg_disk.c
3 *
4 * Support for the mGine m[g]flash IO mode.
5 * Based on legacy hd.c
6 *
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef __MG_DISK_H__
16#define __MG_DISK_H__
17
18#include <linux/blkdev.h>
19#include <linux/ata.h>
20
21/* name for block device */
22#define MG_DISK_NAME "mgd"
23/* name for platform device */
24#define MG_DEV_NAME "mg_disk"
25
26#define MG_DISK_MAJ 0
27#define MG_DISK_MAX_PART 16
28#define MG_SECTOR_SIZE 512
29#define MG_MAX_SECTS 256
30
31/* Register offsets */
32#define MG_BUFF_OFFSET 0x8000
33#define MG_STORAGE_BUFFER_SIZE 0x200
34#define MG_REG_OFFSET 0xC000
35#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
36#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
37#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
38#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
39#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
40#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
41#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
42#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
43#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
44#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
45#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
46
47/* "Drive Select/Head Register" bit values */
48#define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */
49#define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON)
50#define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON)
51#define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON)
52
53
54/* "Device Control Register" bit values */
55#define MG_REG_CTRL_INTR_ENABLE 0x0
56#define MG_REG_CTRL_INTR_DISABLE (0x1<<1)
57#define MG_REG_CTRL_RESET (0x1<<2)
58#define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0
59#define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4)
60#define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0
61#define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5)
62#define MG_REG_CTRL_DPD_DISABLE 0x0
63#define MG_REG_CTRL_DPD_ENABLE (0x1<<6)
64
65/* Status register bit */
66/* error bit in status register */
67#define MG_REG_STATUS_BIT_ERROR 0x01
68/* corrected error in status register */
69#define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04
70/* data request bit in status register */
71#define MG_REG_STATUS_BIT_DATA_REQ 0x08
72/* DSC - Drive Seek Complete */
73#define MG_REG_STATUS_BIT_SEEK_DONE 0x10
74/* DWF - Drive Write Fault */
75#define MG_REG_STATUS_BIT_WRITE_FAULT 0x20
76#define MG_REG_STATUS_BIT_READY 0x40
77#define MG_REG_STATUS_BIT_BUSY 0x80
78
79/* handy status */
80#define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE)
81#define MG_READY_OK(s) (((s) & (MG_STAT_READY | \
82 (MG_REG_STATUS_BIT_BUSY | \
83 MG_REG_STATUS_BIT_WRITE_FAULT | \
84 MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY)
85
86/* Error register */
87#define MG_REG_ERR_AMNF 0x01
88#define MG_REG_ERR_ABRT 0x04
89#define MG_REG_ERR_IDNF 0x10
90#define MG_REG_ERR_UNC 0x40
91#define MG_REG_ERR_BBK 0x80
92
93/* error code for others */
94#define MG_ERR_NONE 0
95#define MG_ERR_TIMEOUT 0x100
96#define MG_ERR_INIT_STAT 0x101
97#define MG_ERR_TRANSLATION 0x102
98#define MG_ERR_CTRL_RST 0x103
99#define MG_ERR_INV_STAT 0x104
100#define MG_ERR_RSTOUT 0x105
101
102#define MG_MAX_ERRORS 6 /* Max read/write errors */
103
104/* command */
105#define MG_CMD_RD 0x20
106#define MG_CMD_WR 0x30
107#define MG_CMD_SLEEP 0x99
108#define MG_CMD_WAKEUP 0xC3
109#define MG_CMD_ID 0xEC
110#define MG_CMD_WR_CONF 0x3C
111#define MG_CMD_RD_CONF 0x40
112
113/* operation mode */
114#define MG_OP_CASCADE (1 << 0)
115#define MG_OP_CASCADE_SYNC_RD (1 << 1)
116#define MG_OP_CASCADE_SYNC_WR (1 << 2)
117#define MG_OP_INTERLEAVE (1 << 3)
118
119/* synchronous */
120#define MG_BURST_LAT_4 (3 << 4)
121#define MG_BURST_LAT_5 (4 << 4)
122#define MG_BURST_LAT_6 (5 << 4)
123#define MG_BURST_LAT_7 (6 << 4)
124#define MG_BURST_LAT_8 (7 << 4)
125#define MG_BURST_LEN_4 (1 << 1)
126#define MG_BURST_LEN_8 (2 << 1)
127#define MG_BURST_LEN_16 (3 << 1)
128#define MG_BURST_LEN_32 (4 << 1)
129#define MG_BURST_LEN_CONT (0 << 1)
130
131/* timeout value (unit: ms) */
132#define MG_TMAX_CONF_TO_CMD 1
133#define MG_TMAX_WAIT_RD_DRQ 10
134#define MG_TMAX_WAIT_WR_DRQ 500
135#define MG_TMAX_RST_TO_BUSY 10
136#define MG_TMAX_HDRST_TO_RDY 500
137#define MG_TMAX_SWRST_TO_RDY 500
138#define MG_TMAX_RSTOUT 3000
139
140/* device attribution */
141/* use mflash as boot device */
142#define MG_BOOT_DEV (1 << 0)
143/* use mflash as storage device */
144#define MG_STORAGE_DEV (1 << 1)
145/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
146#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
147
148#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
149
150/* names of GPIO resource */
151#define MG_RST_PIN "mg_rst"
152/* except MG_BOOT_DEV, reset-out pin should be assigned */
153#define MG_RSTOUT_PIN "mg_rstout"
154
155/* private driver data */
156struct mg_drv_data {
157 /* disk resource */
158 u32 use_polling;
159
160 /* device attribution */
161 u32 dev_attr;
162
163 /* internally used */
164 struct mg_host *host;
165};
166
167/* main structure for mflash driver */
168struct mg_host {
169 struct device *dev;
170
171 struct request_queue *breq;
172 spinlock_t lock;
173 struct gendisk *gd;
174
175 struct timer_list timer;
176 void (*mg_do_intr) (struct mg_host *);
177
178 u16 id[ATA_ID_WORDS];
179
180 u16 cyls;
181 u16 heads;
182 u16 sectors;
183 u32 n_sectors;
184 u32 nres_sectors;
185
186 void __iomem *dev_base;
187 unsigned int irq;
188 unsigned int rst;
189 unsigned int rstout;
190
191 u32 major;
192 u32 error;
193};
194
195/*
196 * Debugging macro and defines
197 */
198#undef DO_MG_DEBUG
199#ifdef DO_MG_DEBUG
200# define MG_DBG(fmt, args...) \
201 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
202#else /* CONFIG_MG_DEBUG */
203# define MG_DBG(fmt, args...) do { } while (0)
204#endif /* CONFIG_MG_DEBUG */
205
206#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 065cdf8c09fb..bff1f0d475c7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -104,6 +104,7 @@ extern unsigned int kobjsize(const void *objp);
104#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ 104#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
105#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ 105#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
106#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ 106#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
107#define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
107 108
108#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ 109#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
109#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS 110#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -134,6 +135,7 @@ extern pgprot_t protection_map[16];
134 135
135#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 136#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
136#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 137#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
138#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
137 139
138/* 140/*
139 * This interface is used by x86 PAT code to identify a pfn mapping that is 141 * This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -145,7 +147,7 @@ extern pgprot_t protection_map[16];
145 */ 147 */
146static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) 148static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
147{ 149{
148 return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff); 150 return (vma->vm_flags & VM_PFN_AT_MMAP);
149} 151}
150 152
151static inline int is_pfn_mapping(struct vm_area_struct *vma) 153static inline int is_pfn_mapping(struct vm_area_struct *vma)
@@ -186,7 +188,7 @@ struct vm_operations_struct {
186 188
187 /* notification that a previously read-only page is about to become 189 /* notification that a previously read-only page is about to become
188 * writable, if an error is returned it will cause a SIGBUS */ 190 * writable, if an error is returned it will cause a SIGBUS */
189 int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); 191 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
190 192
191 /* called by access_process_vm when get_user_pages() fails, typically 193 /* called by access_process_vm when get_user_pages() fails, typically
192 * for use by special VMAs that can switch between memory and hardware 194 * for use by special VMAs that can switch between memory and hardware
@@ -833,6 +835,7 @@ int __set_page_dirty_nobuffers(struct page *page);
833int __set_page_dirty_no_writeback(struct page *page); 835int __set_page_dirty_no_writeback(struct page *page);
834int redirty_page_for_writepage(struct writeback_control *wbc, 836int redirty_page_for_writepage(struct writeback_control *wbc,
835 struct page *page); 837 struct page *page);
838void account_page_dirtied(struct page *page, struct address_space *mapping);
836int set_page_dirty(struct page *page); 839int set_page_dirty(struct page *page);
837int set_page_dirty_lock(struct page *page); 840int set_page_dirty_lock(struct page *page);
838int clear_page_dirty_for_io(struct page *page); 841int clear_page_dirty_for_io(struct page *page);
@@ -1076,7 +1079,7 @@ static inline void setup_per_cpu_pageset(void) {}
1076#endif 1079#endif
1077 1080
1078/* nommu.c */ 1081/* nommu.c */
1079extern atomic_t mmap_pages_allocated; 1082extern atomic_long_t mmap_pages_allocated;
1080 1083
1081/* prio_tree.c */ 1084/* prio_tree.c */
1082void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); 1085void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d84feb7bdbf0..0e80e26ecf21 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -11,6 +11,7 @@
11#include <linux/rwsem.h> 11#include <linux/rwsem.h>
12#include <linux/completion.h> 12#include <linux/completion.h>
13#include <linux/cpumask.h> 13#include <linux/cpumask.h>
14#include <linux/page-debug-flags.h>
14#include <asm/page.h> 15#include <asm/page.h>
15#include <asm/mmu.h> 16#include <asm/mmu.h>
16 17
@@ -94,6 +95,9 @@ struct page {
94 void *virtual; /* Kernel virtual address (NULL if 95 void *virtual; /* Kernel virtual address (NULL if
95 not kmapped, ie. highmem) */ 96 not kmapped, ie. highmem) */
96#endif /* WANT_PAGE_VIRTUAL */ 97#endif /* WANT_PAGE_VIRTUAL */
98#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
99 unsigned long debug_flags; /* Use atomic bitops on this */
100#endif
97}; 101};
98 102
99/* 103/*
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4e457256bd33..3e7615e9087e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -192,5 +192,10 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
192 wake_up_process(host->sdio_irq_thread); 192 wake_up_process(host->sdio_irq_thread);
193} 193}
194 194
195struct regulator;
196
197int mmc_regulator_get_ocrmask(struct regulator *supply);
198int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit);
199
195#endif 200#endif
196 201
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1aca6cebbb78..186ec6ab334d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -764,12 +764,6 @@ extern int numa_zonelist_order_handler(struct ctl_table *, int,
764extern char numa_zonelist_order[]; 764extern char numa_zonelist_order[];
765#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ 765#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
766 766
767#include <linux/topology.h>
768/* Returns the number of the current Node. */
769#ifndef numa_node_id
770#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
771#endif
772
773#ifndef CONFIG_NEED_MULTIPLE_NODES 767#ifndef CONFIG_NEED_MULTIPLE_NODES
774 768
775extern struct pglist_data contig_page_data; 769extern struct pglist_data contig_page_data;
@@ -806,6 +800,14 @@ extern struct zone *next_zone(struct zone *zone);
806 zone; \ 800 zone; \
807 zone = next_zone(zone)) 801 zone = next_zone(zone))
808 802
803#define for_each_populated_zone(zone) \
804 for (zone = (first_online_pgdat())->node_zones; \
805 zone; \
806 zone = next_zone(zone)) \
807 if (!populated_zone(zone)) \
808 ; /* do nothing */ \
809 else
810
809static inline struct zone *zonelist_zone(struct zoneref *zoneref) 811static inline struct zone *zonelist_zone(struct zoneref *zoneref)
810{ 812{
811 return zoneref->zone; 813 return zoneref->zone;
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 830bbcd449d6..3a059298cc19 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -22,6 +22,8 @@ struct proc_mounts {
22 int event; 22 int event;
23}; 23};
24 24
25struct fs_struct;
26
25extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, 27extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
26 struct fs_struct *); 28 struct fs_struct *);
27extern void __put_mnt_ns(struct mnt_namespace *ns); 29extern void __put_mnt_ns(struct mnt_namespace *ns);
diff --git a/include/linux/module.h b/include/linux/module.h
index 145a75528cc1..627ac082e2a6 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -248,6 +248,10 @@ struct module
248 const unsigned long *crcs; 248 const unsigned long *crcs;
249 unsigned int num_syms; 249 unsigned int num_syms;
250 250
251 /* Kernel parameters. */
252 struct kernel_param *kp;
253 unsigned int num_kp;
254
251 /* GPL-only exported symbols. */ 255 /* GPL-only exported symbols. */
252 unsigned int num_gpl_syms; 256 unsigned int num_gpl_syms;
253 const struct kernel_symbol *gpl_syms; 257 const struct kernel_symbol *gpl_syms;
@@ -329,6 +333,11 @@ struct module
329 unsigned int num_tracepoints; 333 unsigned int num_tracepoints;
330#endif 334#endif
331 335
336#ifdef CONFIG_TRACING
337 const char **trace_bprintk_fmt_start;
338 unsigned int num_trace_bprintk_fmt;
339#endif
340
332#ifdef CONFIG_MODULE_UNLOAD 341#ifdef CONFIG_MODULE_UNLOAD
333 /* What modules depend on me? */ 342 /* What modules depend on me? */
334 struct list_head modules_which_use_me; 343 struct list_head modules_which_use_me;
@@ -350,6 +359,8 @@ struct module
350#define MODULE_ARCH_INIT {} 359#define MODULE_ARCH_INIT {}
351#endif 360#endif
352 361
362extern struct mutex module_mutex;
363
353/* FIXME: It'd be nice to isolate modules during init, too, so they 364/* FIXME: It'd be nice to isolate modules during init, too, so they
354 aren't used before they (may) fail. But presently too much code 365 aren't used before they (may) fail. But presently too much code
355 (IDE & SCSI) require entry into the module during init.*/ 366 (IDE & SCSI) require entry into the module during init.*/
@@ -358,10 +369,10 @@ static inline int module_is_live(struct module *mod)
358 return mod->state != MODULE_STATE_GOING; 369 return mod->state != MODULE_STATE_GOING;
359} 370}
360 371
361/* Is this address in a module? (second is with no locks, for oops) */
362struct module *module_text_address(unsigned long addr);
363struct module *__module_text_address(unsigned long addr); 372struct module *__module_text_address(unsigned long addr);
364int is_module_address(unsigned long addr); 373struct module *__module_address(unsigned long addr);
374bool is_module_address(unsigned long addr);
375bool is_module_text_address(unsigned long addr);
365 376
366static inline int within_module_core(unsigned long addr, struct module *mod) 377static inline int within_module_core(unsigned long addr, struct module *mod)
367{ 378{
@@ -375,6 +386,31 @@ static inline int within_module_init(unsigned long addr, struct module *mod)
375 addr < (unsigned long)mod->module_init + mod->init_size; 386 addr < (unsigned long)mod->module_init + mod->init_size;
376} 387}
377 388
389/* Search for module by name: must hold module_mutex. */
390struct module *find_module(const char *name);
391
392struct symsearch {
393 const struct kernel_symbol *start, *stop;
394 const unsigned long *crcs;
395 enum {
396 NOT_GPL_ONLY,
397 GPL_ONLY,
398 WILL_BE_GPL_ONLY,
399 } licence;
400 bool unused;
401};
402
403/* Search for an exported symbol by name. */
404const struct kernel_symbol *find_symbol(const char *name,
405 struct module **owner,
406 const unsigned long **crc,
407 bool gplok,
408 bool warn);
409
410/* Walk the exported symbol table */
411bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
412 unsigned int symnum, void *data), void *data);
413
378/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if 414/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
379 symnum out of range. */ 415 symnum out of range. */
380int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 416int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
@@ -383,6 +419,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
383/* Look for this name: can be of form module:name. */ 419/* Look for this name: can be of form module:name. */
384unsigned long module_kallsyms_lookup_name(const char *name); 420unsigned long module_kallsyms_lookup_name(const char *name);
385 421
422int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
423 struct module *, unsigned long),
424 void *data);
425
386extern void __module_put_and_exit(struct module *mod, long code) 426extern void __module_put_and_exit(struct module *mod, long code)
387 __attribute__((noreturn)); 427 __attribute__((noreturn));
388#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); 428#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code);
@@ -444,6 +484,7 @@ static inline void __module_get(struct module *module)
444#define symbol_put_addr(p) do { } while(0) 484#define symbol_put_addr(p) do { } while(0)
445 485
446#endif /* CONFIG_MODULE_UNLOAD */ 486#endif /* CONFIG_MODULE_UNLOAD */
487int use_module(struct module *a, struct module *b);
447 488
448/* This is a #define so the string doesn't get put in every .o file */ 489/* This is a #define so the string doesn't get put in every .o file */
449#define module_name(mod) \ 490#define module_name(mod) \
@@ -490,21 +531,24 @@ search_module_extables(unsigned long addr)
490 return NULL; 531 return NULL;
491} 532}
492 533
493/* Is this address in a module? */ 534static inline struct module *__module_address(unsigned long addr)
494static inline struct module *module_text_address(unsigned long addr)
495{ 535{
496 return NULL; 536 return NULL;
497} 537}
498 538
499/* Is this address in a module? (don't take a lock, we're oopsing) */
500static inline struct module *__module_text_address(unsigned long addr) 539static inline struct module *__module_text_address(unsigned long addr)
501{ 540{
502 return NULL; 541 return NULL;
503} 542}
504 543
505static inline int is_module_address(unsigned long addr) 544static inline bool is_module_address(unsigned long addr)
506{ 545{
507 return 0; 546 return false;
547}
548
549static inline bool is_module_text_address(unsigned long addr)
550{
551 return false;
508} 552}
509 553
510/* Get/put a kernel symbol (calls should be symmetric) */ 554/* Get/put a kernel symbol (calls should be symmetric) */
@@ -559,6 +603,14 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name)
559 return 0; 603 return 0;
560} 604}
561 605
606static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
607 struct module *,
608 unsigned long),
609 void *data)
610{
611 return 0;
612}
613
562static inline int register_module_notifier(struct notifier_block * nb) 614static inline int register_module_notifier(struct notifier_block * nb)
563{ 615{
564 /* no events will happen anyway, so this can always succeed */ 616 /* no events will happen anyway, so this can always succeed */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index e4af3399ef48..a4f0b931846c 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -138,6 +138,16 @@ extern int parse_args(const char *name,
138 unsigned num, 138 unsigned num,
139 int (*unknown)(char *param, char *val)); 139 int (*unknown)(char *param, char *val));
140 140
141/* Called by module remove. */
142#ifdef CONFIG_SYSFS
143extern void destroy_params(const struct kernel_param *params, unsigned num);
144#else
145static inline void destroy_params(const struct kernel_param *params,
146 unsigned num)
147{
148}
149#endif /* !CONFIG_SYSFS */
150
141/* All the helper functions */ 151/* All the helper functions */
142/* The macros to do compile-time type checking stolen from Jakub 152/* The macros to do compile-time type checking stolen from Jakub
143 Jelinek, who IIRC came up with this idea for the 2.4 module init code. */ 153 Jelinek, who IIRC came up with this idea for the 2.4 module init code. */
diff --git a/include/linux/mpage.h b/include/linux/mpage.h
index 5c42821da2d1..068a0c9946af 100644
--- a/include/linux/mpage.h
+++ b/include/linux/mpage.h
@@ -11,21 +11,11 @@
11 */ 11 */
12#ifdef CONFIG_BLOCK 12#ifdef CONFIG_BLOCK
13 13
14struct mpage_data {
15 struct bio *bio;
16 sector_t last_block_in_bio;
17 get_block_t *get_block;
18 unsigned use_writepage;
19};
20
21struct writeback_control; 14struct writeback_control;
22 15
23struct bio *mpage_bio_submit(int rw, struct bio *bio);
24int mpage_readpages(struct address_space *mapping, struct list_head *pages, 16int mpage_readpages(struct address_space *mapping, struct list_head *pages,
25 unsigned nr_pages, get_block_t get_block); 17 unsigned nr_pages, get_block_t get_block);
26int mpage_readpage(struct page *page, get_block_t get_block); 18int mpage_readpage(struct page *page, get_block_t get_block);
27int __mpage_writepage(struct page *page, struct writeback_control *wbc,
28 void *data);
29int mpage_writepages(struct address_space *mapping, 19int mpage_writepages(struct address_space *mapping,
30 struct writeback_control *wbc, get_block_t get_block); 20 struct writeback_control *wbc, get_block_t get_block);
31int mpage_writepage(struct page *page, get_block_t *get_block, 21int mpage_writepage(struct page *page, get_block_t *get_block,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index d2b8a1e8ca11..6991ab5b24d1 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -20,20 +20,23 @@ extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
20 20
21struct msi_desc { 21struct msi_desc {
22 struct { 22 struct {
23 __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */ 23 __u8 is_msix : 1;
24 __u8 multiple: 3; /* log2 number of messages */
24 __u8 maskbit : 1; /* mask-pending bit supported ? */ 25 __u8 maskbit : 1; /* mask-pending bit supported ? */
25 __u8 masked : 1;
26 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 26 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
27 __u8 pos; /* Location of the msi capability */ 27 __u8 pos; /* Location of the msi capability */
28 __u32 maskbits_mask; /* mask bits mask */
29 __u16 entry_nr; /* specific enabled entry */ 28 __u16 entry_nr; /* specific enabled entry */
30 unsigned default_irq; /* default pre-assigned irq */ 29 unsigned default_irq; /* default pre-assigned irq */
31 }msi_attrib; 30 } msi_attrib;
32 31
32 u32 masked; /* mask bits */
33 unsigned int irq; 33 unsigned int irq;
34 struct list_head list; 34 struct list_head list;
35 35
36 void __iomem *mask_base; 36 union {
37 void __iomem *mask_base;
38 u8 mask_pos;
39 };
37 struct pci_dev *dev; 40 struct pci_dev *dev;
38 41
39 /* Last set MSI message */ 42 /* Last set MSI message */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 3aa5d77c2cdb..5675b63a0631 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/uio.h> 12#include <linux/uio.h>
13#include <linux/notifier.h> 13#include <linux/notifier.h>
14#include <linux/device.h>
14 15
15#include <linux/mtd/compatmac.h> 16#include <linux/mtd/compatmac.h>
16#include <mtd/mtd-abi.h> 17#include <mtd/mtd-abi.h>
@@ -162,6 +163,20 @@ struct mtd_info {
162 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 163 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
163 void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len); 164 void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
164 165
166 /* Allow NOMMU mmap() to directly map the device (if not NULL)
167 * - return the address to which the offset maps
168 * - return -ENOSYS to indicate refusal to do the mapping
169 */
170 unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
171 unsigned long len,
172 unsigned long offset,
173 unsigned long flags);
174
175 /* Backing device capabilities for this device
176 * - provides mmap capabilities
177 */
178 struct backing_dev_info *backing_dev_info;
179
165 180
166 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 181 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
167 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); 182 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
@@ -223,6 +238,7 @@ struct mtd_info {
223 void *priv; 238 void *priv;
224 239
225 struct module *owner; 240 struct module *owner;
241 struct device dev;
226 int usecount; 242 int usecount;
227 243
228 /* If the driver is something smart, like UBI, it may need to maintain 244 /* If the driver is something smart, like UBI, it may need to maintain
@@ -233,6 +249,11 @@ struct mtd_info {
233 void (*put_device) (struct mtd_info *mtd); 249 void (*put_device) (struct mtd_info *mtd);
234}; 250};
235 251
252static inline struct mtd_info *dev_to_mtd(struct device *dev)
253{
254 return dev ? container_of(dev, struct mtd_info, dev) : NULL;
255}
256
236static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) 257static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
237{ 258{
238 if (mtd->erasesize_shift) 259 if (mtd->erasesize_shift)
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index db5b63da2a7e..7efb9be34662 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -43,8 +43,8 @@ extern void nand_wait_ready(struct mtd_info *mtd);
43 * is supported now. If you add a chip with bigger oobsize/page 43 * is supported now. If you add a chip with bigger oobsize/page
44 * adjust this accordingly. 44 * adjust this accordingly.
45 */ 45 */
46#define NAND_MAX_OOBSIZE 64 46#define NAND_MAX_OOBSIZE 128
47#define NAND_MAX_PAGESIZE 2048 47#define NAND_MAX_PAGESIZE 4096
48 48
49/* 49/*
50 * Constants for hardware specific CLE/ALE/NCE function 50 * Constants for hardware specific CLE/ALE/NCE function
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index a45dd831b3f8..7535a74083b9 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -76,4 +76,16 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
76 struct device_node *node, 76 struct device_node *node,
77 struct mtd_partition **pparts); 77 struct mtd_partition **pparts);
78 78
79#ifdef CONFIG_MTD_PARTITIONS
80static inline int mtd_has_partitions(void) { return 1; }
81#else
82static inline int mtd_has_partitions(void) { return 0; }
83#endif
84
85#ifdef CONFIG_MTD_CMDLINE_PARTS
86static inline int mtd_has_cmdlinepart(void) { return 1; }
87#else
88static inline int mtd_has_cmdlinepart(void) { return 0; }
89#endif
90
79#endif 91#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 7a0e5c4f8072..3069ec7e0ab8 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -50,8 +50,10 @@ struct mutex {
50 atomic_t count; 50 atomic_t count;
51 spinlock_t wait_lock; 51 spinlock_t wait_lock;
52 struct list_head wait_list; 52 struct list_head wait_list;
53#ifdef CONFIG_DEBUG_MUTEXES 53#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
54 struct thread_info *owner; 54 struct thread_info *owner;
55#endif
56#ifdef CONFIG_DEBUG_MUTEXES
55 const char *name; 57 const char *name;
56 void *magic; 58 void *magic;
57#endif 59#endif
@@ -68,7 +70,6 @@ struct mutex_waiter {
68 struct list_head list; 70 struct list_head list;
69 struct task_struct *task; 71 struct task_struct *task;
70#ifdef CONFIG_DEBUG_MUTEXES 72#ifdef CONFIG_DEBUG_MUTEXES
71 struct mutex *lock;
72 void *magic; 73 void *magic;
73#endif 74#endif
74}; 75};
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index de99025f2c5d..2524267210d3 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -18,7 +18,7 @@ struct netpoll {
18 const char *name; 18 const char *name;
19 void (*rx_hook)(struct netpoll *, int, char *, int); 19 void (*rx_hook)(struct netpoll *, int, char *, int);
20 20
21 u32 local_ip, remote_ip; 21 __be32 local_ip, remote_ip;
22 u16 local_port, remote_port; 22 u16 local_port, remote_port;
23 u8 remote_mac[ETH_ALEN]; 23 u8 remote_mac[ETH_ALEN];
24}; 24};
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 54af92c1c70b..214d499718f7 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -109,7 +109,6 @@
109 NFSERR_FILE_OPEN = 10046, /* v4 */ 109 NFSERR_FILE_OPEN = 10046, /* v4 */
110 NFSERR_ADMIN_REVOKED = 10047, /* v4 */ 110 NFSERR_ADMIN_REVOKED = 10047, /* v4 */
111 NFSERR_CB_PATH_DOWN = 10048, /* v4 */ 111 NFSERR_CB_PATH_DOWN = 10048, /* v4 */
112 NFSERR_REPLAY_ME = 10049 /* v4 */
113}; 112};
114 113
115/* NFSv2 file types - beware, these are not the same in NFSv3 */ 114/* NFSv2 file types - beware, these are not the same in NFSv3 */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index b912311a56b1..e3f0cbcbd0db 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -21,6 +21,7 @@
21#define NFS4_FHSIZE 128 21#define NFS4_FHSIZE 128
22#define NFS4_MAXPATHLEN PATH_MAX 22#define NFS4_MAXPATHLEN PATH_MAX
23#define NFS4_MAXNAMLEN NAME_MAX 23#define NFS4_MAXNAMLEN NAME_MAX
24#define NFS4_MAX_SESSIONID_LEN 16
24 25
25#define NFS4_ACCESS_READ 0x0001 26#define NFS4_ACCESS_READ 0x0001
26#define NFS4_ACCESS_LOOKUP 0x0002 27#define NFS4_ACCESS_LOOKUP 0x0002
@@ -38,6 +39,7 @@
38#define NFS4_OPEN_RESULT_CONFIRM 0x0002 39#define NFS4_OPEN_RESULT_CONFIRM 0x0002
39#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 40#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
40 41
42#define NFS4_SHARE_ACCESS_MASK 0x000F
41#define NFS4_SHARE_ACCESS_READ 0x0001 43#define NFS4_SHARE_ACCESS_READ 0x0001
42#define NFS4_SHARE_ACCESS_WRITE 0x0002 44#define NFS4_SHARE_ACCESS_WRITE 0x0002
43#define NFS4_SHARE_ACCESS_BOTH 0x0003 45#define NFS4_SHARE_ACCESS_BOTH 0x0003
@@ -45,6 +47,19 @@
45#define NFS4_SHARE_DENY_WRITE 0x0002 47#define NFS4_SHARE_DENY_WRITE 0x0002
46#define NFS4_SHARE_DENY_BOTH 0x0003 48#define NFS4_SHARE_DENY_BOTH 0x0003
47 49
50/* nfs41 */
51#define NFS4_SHARE_WANT_MASK 0xFF00
52#define NFS4_SHARE_WANT_NO_PREFERENCE 0x0000
53#define NFS4_SHARE_WANT_READ_DELEG 0x0100
54#define NFS4_SHARE_WANT_WRITE_DELEG 0x0200
55#define NFS4_SHARE_WANT_ANY_DELEG 0x0300
56#define NFS4_SHARE_WANT_NO_DELEG 0x0400
57#define NFS4_SHARE_WANT_CANCEL 0x0500
58
59#define NFS4_SHARE_WHEN_MASK 0xF0000
60#define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000
61#define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000
62
48#define NFS4_SET_TO_SERVER_TIME 0 63#define NFS4_SET_TO_SERVER_TIME 0
49#define NFS4_SET_TO_CLIENT_TIME 1 64#define NFS4_SET_TO_CLIENT_TIME 1
50 65
@@ -88,6 +103,31 @@
88#define NFS4_ACE_GENERIC_EXECUTE 0x001200A0 103#define NFS4_ACE_GENERIC_EXECUTE 0x001200A0
89#define NFS4_ACE_MASK_ALL 0x001F01FF 104#define NFS4_ACE_MASK_ALL 0x001F01FF
90 105
106#define EXCHGID4_FLAG_SUPP_MOVED_REFER 0x00000001
107#define EXCHGID4_FLAG_SUPP_MOVED_MIGR 0x00000002
108#define EXCHGID4_FLAG_USE_NON_PNFS 0x00010000
109#define EXCHGID4_FLAG_USE_PNFS_MDS 0x00020000
110#define EXCHGID4_FLAG_USE_PNFS_DS 0x00040000
111#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000
112#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000
113/*
114 * Since the validity of these bits depends on whether
115 * they're set in the argument or response, have separate
116 * invalid flag masks for arg (_A) and resp (_R).
117 */
118#define EXCHGID4_FLAG_MASK_A 0x40070003
119#define EXCHGID4_FLAG_MASK_R 0x80070003
120
121#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001
122#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002
123#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED 0x00000004
124#define SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED 0x00000008
125#define SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED 0x00000010
126#define SEQ4_STATUS_ADMIN_STATE_REVOKED 0x00000020
127#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040
128#define SEQ4_STATUS_LEASE_MOVED 0x00000080
129#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100
130
91#define NFS4_MAX_UINT64 (~(u64)0) 131#define NFS4_MAX_UINT64 (~(u64)0)
92 132
93enum nfs4_acl_whotype { 133enum nfs4_acl_whotype {
@@ -154,6 +194,28 @@ enum nfs_opnum4 {
154 OP_VERIFY = 37, 194 OP_VERIFY = 37,
155 OP_WRITE = 38, 195 OP_WRITE = 38,
156 OP_RELEASE_LOCKOWNER = 39, 196 OP_RELEASE_LOCKOWNER = 39,
197
198 /* nfs41 */
199 OP_BACKCHANNEL_CTL = 40,
200 OP_BIND_CONN_TO_SESSION = 41,
201 OP_EXCHANGE_ID = 42,
202 OP_CREATE_SESSION = 43,
203 OP_DESTROY_SESSION = 44,
204 OP_FREE_STATEID = 45,
205 OP_GET_DIR_DELEGATION = 46,
206 OP_GETDEVICEINFO = 47,
207 OP_GETDEVICELIST = 48,
208 OP_LAYOUTCOMMIT = 49,
209 OP_LAYOUTGET = 50,
210 OP_LAYOUTRETURN = 51,
211 OP_SECINFO_NO_NAME = 52,
212 OP_SEQUENCE = 53,
213 OP_SET_SSV = 54,
214 OP_TEST_STATEID = 55,
215 OP_WANT_DELEGATION = 56,
216 OP_DESTROY_CLIENTID = 57,
217 OP_RECLAIM_COMPLETE = 58,
218
157 OP_ILLEGAL = 10044, 219 OP_ILLEGAL = 10044,
158}; 220};
159 221
@@ -230,7 +292,48 @@ enum nfsstat4 {
230 NFS4ERR_DEADLOCK = 10045, 292 NFS4ERR_DEADLOCK = 10045,
231 NFS4ERR_FILE_OPEN = 10046, 293 NFS4ERR_FILE_OPEN = 10046,
232 NFS4ERR_ADMIN_REVOKED = 10047, 294 NFS4ERR_ADMIN_REVOKED = 10047,
233 NFS4ERR_CB_PATH_DOWN = 10048 295 NFS4ERR_CB_PATH_DOWN = 10048,
296
297 /* nfs41 */
298 NFS4ERR_BADIOMODE = 10049,
299 NFS4ERR_BADLAYOUT = 10050,
300 NFS4ERR_BAD_SESSION_DIGEST = 10051,
301 NFS4ERR_BADSESSION = 10052,
302 NFS4ERR_BADSLOT = 10053,
303 NFS4ERR_COMPLETE_ALREADY = 10054,
304 NFS4ERR_CONN_NOT_BOUND_TO_SESSION = 10055,
305 NFS4ERR_DELEG_ALREADY_WANTED = 10056,
306 NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */
307 NFS4ERR_LAYOUTTRYLATER = 10058,
308 NFS4ERR_LAYOUTUNAVAILABLE = 10059,
309 NFS4ERR_NOMATCHING_LAYOUT = 10060,
310 NFS4ERR_RECALLCONFLICT = 10061,
311 NFS4ERR_UNKNOWN_LAYOUTTYPE = 10062,
312 NFS4ERR_SEQ_MISORDERED = 10063, /* unexpected seq.id in req */
313 NFS4ERR_SEQUENCE_POS = 10064, /* [CB_]SEQ. op not 1st op */
314 NFS4ERR_REQ_TOO_BIG = 10065, /* request too big */
315 NFS4ERR_REP_TOO_BIG = 10066, /* reply too big */
316 NFS4ERR_REP_TOO_BIG_TO_CACHE = 10067, /* rep. not all cached */
317 NFS4ERR_RETRY_UNCACHED_REP = 10068, /* retry & rep. uncached */
318 NFS4ERR_UNSAFE_COMPOUND = 10069, /* retry/recovery too hard */
319 NFS4ERR_TOO_MANY_OPS = 10070, /* too many ops in [CB_]COMP */
320 NFS4ERR_OP_NOT_IN_SESSION = 10071, /* op needs [CB_]SEQ. op */
321 NFS4ERR_HASH_ALG_UNSUPP = 10072, /* hash alg. not supp. */
322 /* Error 10073 is unused. */
323 NFS4ERR_CLIENTID_BUSY = 10074, /* clientid has state */
324 NFS4ERR_PNFS_IO_HOLE = 10075, /* IO to _SPARSE file hole */
325 NFS4ERR_SEQ_FALSE_RETRY = 10076, /* retry not origional */
326 NFS4ERR_BAD_HIGH_SLOT = 10077, /* sequence arg bad */
327 NFS4ERR_DEADSESSION = 10078, /* persistent session dead */
328 NFS4ERR_ENCR_ALG_UNSUPP = 10079, /* SSV alg mismatch */
329 NFS4ERR_PNFS_NO_LAYOUT = 10080, /* direct I/O with no layout */
330 NFS4ERR_NOT_ONLY_OP = 10081, /* bad compound */
331 NFS4ERR_WRONG_CRED = 10082, /* permissions:state change */
332 NFS4ERR_WRONG_TYPE = 10083, /* current operation mismatch */
333 NFS4ERR_DIRDELEG_UNAVAIL = 10084, /* no directory delegation */
334 NFS4ERR_REJECT_DELEG = 10085, /* on callback */
335 NFS4ERR_RETURNCONFLICT = 10086, /* outstanding layoutreturn */
336 NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */
234}; 337};
235 338
236/* 339/*
@@ -265,7 +368,13 @@ enum opentype4 {
265enum createmode4 { 368enum createmode4 {
266 NFS4_CREATE_UNCHECKED = 0, 369 NFS4_CREATE_UNCHECKED = 0,
267 NFS4_CREATE_GUARDED = 1, 370 NFS4_CREATE_GUARDED = 1,
268 NFS4_CREATE_EXCLUSIVE = 2 371 NFS4_CREATE_EXCLUSIVE = 2,
372 /*
373 * New to NFSv4.1. If session is persistent,
374 * GUARDED4 MUST be used. Otherwise, use
375 * EXCLUSIVE4_1 instead of EXCLUSIVE4.
376 */
377 NFS4_CREATE_EXCLUSIVE4_1 = 3
269}; 378};
270 379
271enum limit_by4 { 380enum limit_by4 {
@@ -301,6 +410,8 @@ enum lock_type4 {
301#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9) 410#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9)
302#define FATTR4_WORD0_LEASE_TIME (1UL << 10) 411#define FATTR4_WORD0_LEASE_TIME (1UL << 10)
303#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11) 412#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11)
413/* Mandatory in NFSv4.1 */
414#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11)
304 415
305/* Recommended Attributes */ 416/* Recommended Attributes */
306#define FATTR4_WORD0_ACL (1UL << 12) 417#define FATTR4_WORD0_ACL (1UL << 12)
@@ -391,6 +502,29 @@ enum {
391 NFSPROC4_CLNT_GETACL, 502 NFSPROC4_CLNT_GETACL,
392 NFSPROC4_CLNT_SETACL, 503 NFSPROC4_CLNT_SETACL,
393 NFSPROC4_CLNT_FS_LOCATIONS, 504 NFSPROC4_CLNT_FS_LOCATIONS,
505
506 /* nfs41 */
507 NFSPROC4_CLNT_EXCHANGE_ID,
508 NFSPROC4_CLNT_CREATE_SESSION,
509 NFSPROC4_CLNT_DESTROY_SESSION,
510 NFSPROC4_CLNT_SEQUENCE,
511 NFSPROC4_CLNT_GET_LEASE_TIME,
512};
513
514/* nfs41 types */
515struct nfs4_sessionid {
516 unsigned char data[NFS4_MAX_SESSIONID_LEN];
517};
518
519/* Create Session Flags */
520#define SESSION4_PERSIST 0x001
521#define SESSION4_BACK_CHAN 0x002
522#define SESSION4_RDMA 0x004
523
524enum state_protect_how4 {
525 SP4_NONE = 0,
526 SP4_MACH_CRED = 1,
527 SP4_SSV = 2
394}; 528};
395 529
396#endif 530#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 8cc8807f77d6..fdffb413b192 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -166,8 +166,7 @@ struct nfs_inode {
166 */ 166 */
167 struct radix_tree_root nfs_page_tree; 167 struct radix_tree_root nfs_page_tree;
168 168
169 unsigned long ncommit, 169 unsigned long npages;
170 npages;
171 170
172 /* Open contexts for shared mmap writes */ 171 /* Open contexts for shared mmap writes */
173 struct list_head open_files; 172 struct list_head open_files;
@@ -186,6 +185,9 @@ struct nfs_inode {
186 fmode_t delegation_state; 185 fmode_t delegation_state;
187 struct rw_semaphore rwsem; 186 struct rw_semaphore rwsem;
188#endif /* CONFIG_NFS_V4*/ 187#endif /* CONFIG_NFS_V4*/
188#ifdef CONFIG_NFS_FSCACHE
189 struct fscache_cookie *fscache;
190#endif
189 struct inode vfs_inode; 191 struct inode vfs_inode;
190}; 192};
191 193
@@ -207,6 +209,9 @@ struct nfs_inode {
207#define NFS_INO_STALE (1) /* possible stale inode */ 209#define NFS_INO_STALE (1) /* possible stale inode */
208#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ 210#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
209#define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */ 211#define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */
212#define NFS_INO_FLUSHING (4) /* inode is flushing out data */
213#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
214#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
210 215
211static inline struct nfs_inode *NFS_I(const struct inode *inode) 216static inline struct nfs_inode *NFS_I(const struct inode *inode)
212{ 217{
@@ -260,6 +265,11 @@ static inline int NFS_STALE(const struct inode *inode)
260 return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); 265 return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
261} 266}
262 267
268static inline int NFS_FSCACHE(const struct inode *inode)
269{
270 return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
271}
272
263static inline __u64 NFS_FILEID(const struct inode *inode) 273static inline __u64 NFS_FILEID(const struct inode *inode)
264{ 274{
265 return NFS_I(inode)->fileid; 275 return NFS_I(inode)->fileid;
@@ -506,6 +516,8 @@ extern int nfs_readpages(struct file *, struct address_space *,
506 struct list_head *, unsigned); 516 struct list_head *, unsigned);
507extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); 517extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
508extern void nfs_readdata_release(void *data); 518extern void nfs_readdata_release(void *data);
519extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
520 struct page *);
509 521
510/* 522/*
511 * Allocate nfs_read_data structures 523 * Allocate nfs_read_data structures
@@ -583,6 +595,7 @@ extern void * nfs_root_data(void);
583#define NFSDBG_CALLBACK 0x0100 595#define NFSDBG_CALLBACK 0x0100
584#define NFSDBG_CLIENT 0x0200 596#define NFSDBG_CLIENT 0x0200
585#define NFSDBG_MOUNT 0x0400 597#define NFSDBG_MOUNT 0x0400
598#define NFSDBG_FSCACHE 0x0800
586#define NFSDBG_ALL 0xFFFF 599#define NFSDBG_ALL 0xFFFF
587 600
588#ifdef __KERNEL__ 601#ifdef __KERNEL__
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 9bb81aec91cf..6ad75948cbf7 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -64,6 +64,10 @@ struct nfs_client {
64 char cl_ipaddr[48]; 64 char cl_ipaddr[48];
65 unsigned char cl_id_uniquifier; 65 unsigned char cl_id_uniquifier;
66#endif 66#endif
67
68#ifdef CONFIG_NFS_FSCACHE
69 struct fscache_cookie *fscache; /* client index cache cookie */
70#endif
67}; 71};
68 72
69/* 73/*
@@ -96,16 +100,28 @@ struct nfs_server {
96 unsigned int acdirmin; 100 unsigned int acdirmin;
97 unsigned int acdirmax; 101 unsigned int acdirmax;
98 unsigned int namelen; 102 unsigned int namelen;
103 unsigned int options; /* extra options enabled by mount */
104#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */
99 105
100 struct nfs_fsid fsid; 106 struct nfs_fsid fsid;
101 __u64 maxfilesize; /* maximum file size */ 107 __u64 maxfilesize; /* maximum file size */
102 unsigned long mount_time; /* when this fs was mounted */ 108 unsigned long mount_time; /* when this fs was mounted */
103 dev_t s_dev; /* superblock dev numbers */ 109 dev_t s_dev; /* superblock dev numbers */
104 110
111#ifdef CONFIG_NFS_FSCACHE
112 struct nfs_fscache_key *fscache_key; /* unique key for superblock */
113 struct fscache_cookie *fscache; /* superblock cookie */
114#endif
115
105#ifdef CONFIG_NFS_V4 116#ifdef CONFIG_NFS_V4
106 u32 attr_bitmask[2];/* V4 bitmask representing the set 117 u32 attr_bitmask[2];/* V4 bitmask representing the set
107 of attributes supported on this 118 of attributes supported on this
108 filesystem */ 119 filesystem */
120 u32 cache_consistency_bitmask[2];
121 /* V4 bitmask representing the subset
122 of change attribute, size, ctime
123 and mtime attributes supported by
124 the server */
109 u32 acl_bitmask; /* V4 bitmask representing the ACEs 125 u32 acl_bitmask; /* V4 bitmask representing the ACEs
110 that are supported on this 126 that are supported on this
111 filesystem */ 127 filesystem */
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
index 1cb9a3fed2b3..68b10f5f8907 100644
--- a/include/linux/nfs_iostat.h
+++ b/include/linux/nfs_iostat.h
@@ -116,4 +116,16 @@ enum nfs_stat_eventcounters {
116 __NFSIOS_COUNTSMAX, 116 __NFSIOS_COUNTSMAX,
117}; 117};
118 118
119/*
120 * NFS local caching servicing counters
121 */
122enum nfs_stat_fscachecounters {
123 NFSIOS_FSCACHE_PAGES_READ_OK,
124 NFSIOS_FSCACHE_PAGES_READ_FAIL,
125 NFSIOS_FSCACHE_PAGES_WRITTEN_OK,
126 NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL,
127 NFSIOS_FSCACHE_PAGES_UNCACHED,
128 __NFSIOS_FSCACHEMAX,
129};
130
119#endif /* _LINUX_NFS_IOSTAT */ 131#endif /* _LINUX_NFS_IOSTAT */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 43a713fce11c..b89c34e40bc2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -27,12 +27,8 @@ static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid
27} 27}
28 28
29struct nfs_fattr { 29struct nfs_fattr {
30 unsigned short valid; /* which fields are valid */ 30 unsigned int valid; /* which fields are valid */
31 __u64 pre_size; /* pre_op_attr.size */ 31 umode_t mode;
32 struct timespec pre_mtime; /* pre_op_attr.mtime */
33 struct timespec pre_ctime; /* pre_op_attr.ctime */
34 enum nfs_ftype type; /* always use NFSv2 types */
35 __u32 mode;
36 __u32 nlink; 32 __u32 nlink;
37 __u32 uid; 33 __u32 uid;
38 __u32 gid; 34 __u32 gid;
@@ -52,19 +48,55 @@ struct nfs_fattr {
52 struct timespec atime; 48 struct timespec atime;
53 struct timespec mtime; 49 struct timespec mtime;
54 struct timespec ctime; 50 struct timespec ctime;
55 __u32 bitmap[2]; /* NFSv4 returned attribute bitmap */
56 __u64 change_attr; /* NFSv4 change attribute */ 51 __u64 change_attr; /* NFSv4 change attribute */
57 __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ 52 __u64 pre_change_attr;/* pre-op NFSv4 change attribute */
53 __u64 pre_size; /* pre_op_attr.size */
54 struct timespec pre_mtime; /* pre_op_attr.mtime */
55 struct timespec pre_ctime; /* pre_op_attr.ctime */
58 unsigned long time_start; 56 unsigned long time_start;
59 unsigned long gencount; 57 unsigned long gencount;
60}; 58};
61 59
62#define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */ 60#define NFS_ATTR_FATTR_TYPE (1U << 0)
63#define NFS_ATTR_FATTR 0x0002 /* post-op attributes */ 61#define NFS_ATTR_FATTR_MODE (1U << 1)
64#define NFS_ATTR_FATTR_V3 0x0004 /* NFSv3 attributes */ 62#define NFS_ATTR_FATTR_NLINK (1U << 2)
65#define NFS_ATTR_FATTR_V4 0x0008 /* NFSv4 change attribute */ 63#define NFS_ATTR_FATTR_OWNER (1U << 3)
66#define NFS_ATTR_WCC_V4 0x0010 /* pre-op change attribute */ 64#define NFS_ATTR_FATTR_GROUP (1U << 4)
67#define NFS_ATTR_FATTR_V4_REFERRAL 0x0020 /* NFSv4 referral */ 65#define NFS_ATTR_FATTR_RDEV (1U << 5)
66#define NFS_ATTR_FATTR_SIZE (1U << 6)
67#define NFS_ATTR_FATTR_PRESIZE (1U << 7)
68#define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8)
69#define NFS_ATTR_FATTR_SPACE_USED (1U << 9)
70#define NFS_ATTR_FATTR_FSID (1U << 10)
71#define NFS_ATTR_FATTR_FILEID (1U << 11)
72#define NFS_ATTR_FATTR_ATIME (1U << 12)
73#define NFS_ATTR_FATTR_MTIME (1U << 13)
74#define NFS_ATTR_FATTR_CTIME (1U << 14)
75#define NFS_ATTR_FATTR_PREMTIME (1U << 15)
76#define NFS_ATTR_FATTR_PRECTIME (1U << 16)
77#define NFS_ATTR_FATTR_CHANGE (1U << 17)
78#define NFS_ATTR_FATTR_PRECHANGE (1U << 18)
79#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 19) /* NFSv4 referral */
80
81#define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \
82 | NFS_ATTR_FATTR_MODE \
83 | NFS_ATTR_FATTR_NLINK \
84 | NFS_ATTR_FATTR_OWNER \
85 | NFS_ATTR_FATTR_GROUP \
86 | NFS_ATTR_FATTR_RDEV \
87 | NFS_ATTR_FATTR_SIZE \
88 | NFS_ATTR_FATTR_FSID \
89 | NFS_ATTR_FATTR_FILEID \
90 | NFS_ATTR_FATTR_ATIME \
91 | NFS_ATTR_FATTR_MTIME \
92 | NFS_ATTR_FATTR_CTIME)
93#define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \
94 | NFS_ATTR_FATTR_BLOCKS_USED)
95#define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \
96 | NFS_ATTR_FATTR_SPACE_USED)
97#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \
98 | NFS_ATTR_FATTR_SPACE_USED \
99 | NFS_ATTR_FATTR_CHANGE)
68 100
69/* 101/*
70 * Info on the file system 102 * Info on the file system
@@ -836,6 +868,7 @@ struct nfs_rpc_ops {
836 int (*lock)(struct file *, int, struct file_lock *); 868 int (*lock)(struct file *, int, struct file_lock *);
837 int (*lock_check_bounds)(const struct file_lock *); 869 int (*lock_check_bounds)(const struct file_lock *);
838 void (*clear_acl_cache)(struct inode *); 870 void (*clear_acl_cache)(struct inode *);
871 void (*close_context)(struct nfs_open_context *ctx, int);
839}; 872};
840 873
841/* 874/*
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h
index 04b355c801d8..5bccaab81056 100644
--- a/include/linux/nfsd/cache.h
+++ b/include/linux/nfsd/cache.h
@@ -76,4 +76,12 @@ void nfsd_reply_cache_shutdown(void);
76int nfsd_cache_lookup(struct svc_rqst *, int); 76int nfsd_cache_lookup(struct svc_rqst *, int);
77void nfsd_cache_update(struct svc_rqst *, int, __be32 *); 77void nfsd_cache_update(struct svc_rqst *, int, __be32 *);
78 78
79#ifdef CONFIG_NFSD_V4
80void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp);
81#else /* CONFIG_NFSD_V4 */
82static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
83{
84}
85#endif /* CONFIG_NFSD_V4 */
86
79#endif /* NFSCACHE_H */ 87#endif /* NFSCACHE_H */
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index e19f45991b2e..2b49d676d0c9 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -23,7 +23,7 @@
23/* 23/*
24 * nfsd version 24 * nfsd version
25 */ 25 */
26#define NFSD_SUPPORTED_MINOR_VERSION 0 26#define NFSD_SUPPORTED_MINOR_VERSION 1
27 27
28/* 28/*
29 * Flags for nfsd_permission 29 * Flags for nfsd_permission
@@ -53,6 +53,7 @@ typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int);
53extern struct svc_program nfsd_program; 53extern struct svc_program nfsd_program;
54extern struct svc_version nfsd_version2, nfsd_version3, 54extern struct svc_version nfsd_version2, nfsd_version3,
55 nfsd_version4; 55 nfsd_version4;
56extern u32 nfsd_supported_minorversion;
56extern struct mutex nfsd_mutex; 57extern struct mutex nfsd_mutex;
57extern struct svc_serv *nfsd_serv; 58extern struct svc_serv *nfsd_serv;
58 59
@@ -105,7 +106,7 @@ void nfsd_close(struct file *);
105__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *, 106__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *,
106 loff_t, struct kvec *, int, unsigned long *); 107 loff_t, struct kvec *, int, unsigned long *);
107__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, 108__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *,
108 loff_t, struct kvec *,int, unsigned long, int *); 109 loff_t, struct kvec *,int, unsigned long *, int *);
109__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, 110__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
110 char *, int *); 111 char *, int *);
111__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, 112__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
@@ -149,6 +150,7 @@ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
149 150
150enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; 151enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
151int nfsd_vers(int vers, enum vers_op change); 152int nfsd_vers(int vers, enum vers_op change);
153int nfsd_minorversion(u32 minorversion, enum vers_op change);
152void nfsd_reset_versions(void); 154void nfsd_reset_versions(void);
153int nfsd_create_serv(void); 155int nfsd_create_serv(void);
154 156
@@ -186,78 +188,119 @@ void nfsd_lockd_shutdown(void);
186/* 188/*
187 * These macros provide pre-xdr'ed values for faster operation. 189 * These macros provide pre-xdr'ed values for faster operation.
188 */ 190 */
189#define nfs_ok __constant_htonl(NFS_OK) 191#define nfs_ok cpu_to_be32(NFS_OK)
190#define nfserr_perm __constant_htonl(NFSERR_PERM) 192#define nfserr_perm cpu_to_be32(NFSERR_PERM)
191#define nfserr_noent __constant_htonl(NFSERR_NOENT) 193#define nfserr_noent cpu_to_be32(NFSERR_NOENT)
192#define nfserr_io __constant_htonl(NFSERR_IO) 194#define nfserr_io cpu_to_be32(NFSERR_IO)
193#define nfserr_nxio __constant_htonl(NFSERR_NXIO) 195#define nfserr_nxio cpu_to_be32(NFSERR_NXIO)
194#define nfserr_eagain __constant_htonl(NFSERR_EAGAIN) 196#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN)
195#define nfserr_acces __constant_htonl(NFSERR_ACCES) 197#define nfserr_acces cpu_to_be32(NFSERR_ACCES)
196#define nfserr_exist __constant_htonl(NFSERR_EXIST) 198#define nfserr_exist cpu_to_be32(NFSERR_EXIST)
197#define nfserr_xdev __constant_htonl(NFSERR_XDEV) 199#define nfserr_xdev cpu_to_be32(NFSERR_XDEV)
198#define nfserr_nodev __constant_htonl(NFSERR_NODEV) 200#define nfserr_nodev cpu_to_be32(NFSERR_NODEV)
199#define nfserr_notdir __constant_htonl(NFSERR_NOTDIR) 201#define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR)
200#define nfserr_isdir __constant_htonl(NFSERR_ISDIR) 202#define nfserr_isdir cpu_to_be32(NFSERR_ISDIR)
201#define nfserr_inval __constant_htonl(NFSERR_INVAL) 203#define nfserr_inval cpu_to_be32(NFSERR_INVAL)
202#define nfserr_fbig __constant_htonl(NFSERR_FBIG) 204#define nfserr_fbig cpu_to_be32(NFSERR_FBIG)
203#define nfserr_nospc __constant_htonl(NFSERR_NOSPC) 205#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC)
204#define nfserr_rofs __constant_htonl(NFSERR_ROFS) 206#define nfserr_rofs cpu_to_be32(NFSERR_ROFS)
205#define nfserr_mlink __constant_htonl(NFSERR_MLINK) 207#define nfserr_mlink cpu_to_be32(NFSERR_MLINK)
206#define nfserr_opnotsupp __constant_htonl(NFSERR_OPNOTSUPP) 208#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP)
207#define nfserr_nametoolong __constant_htonl(NFSERR_NAMETOOLONG) 209#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG)
208#define nfserr_notempty __constant_htonl(NFSERR_NOTEMPTY) 210#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY)
209#define nfserr_dquot __constant_htonl(NFSERR_DQUOT) 211#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT)
210#define nfserr_stale __constant_htonl(NFSERR_STALE) 212#define nfserr_stale cpu_to_be32(NFSERR_STALE)
211#define nfserr_remote __constant_htonl(NFSERR_REMOTE) 213#define nfserr_remote cpu_to_be32(NFSERR_REMOTE)
212#define nfserr_wflush __constant_htonl(NFSERR_WFLUSH) 214#define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH)
213#define nfserr_badhandle __constant_htonl(NFSERR_BADHANDLE) 215#define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE)
214#define nfserr_notsync __constant_htonl(NFSERR_NOT_SYNC) 216#define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC)
215#define nfserr_badcookie __constant_htonl(NFSERR_BAD_COOKIE) 217#define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE)
216#define nfserr_notsupp __constant_htonl(NFSERR_NOTSUPP) 218#define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP)
217#define nfserr_toosmall __constant_htonl(NFSERR_TOOSMALL) 219#define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL)
218#define nfserr_serverfault __constant_htonl(NFSERR_SERVERFAULT) 220#define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT)
219#define nfserr_badtype __constant_htonl(NFSERR_BADTYPE) 221#define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE)
220#define nfserr_jukebox __constant_htonl(NFSERR_JUKEBOX) 222#define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX)
221#define nfserr_denied __constant_htonl(NFSERR_DENIED) 223#define nfserr_denied cpu_to_be32(NFSERR_DENIED)
222#define nfserr_deadlock __constant_htonl(NFSERR_DEADLOCK) 224#define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK)
223#define nfserr_expired __constant_htonl(NFSERR_EXPIRED) 225#define nfserr_expired cpu_to_be32(NFSERR_EXPIRED)
224#define nfserr_bad_cookie __constant_htonl(NFSERR_BAD_COOKIE) 226#define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE)
225#define nfserr_same __constant_htonl(NFSERR_SAME) 227#define nfserr_same cpu_to_be32(NFSERR_SAME)
226#define nfserr_clid_inuse __constant_htonl(NFSERR_CLID_INUSE) 228#define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE)
227#define nfserr_stale_clientid __constant_htonl(NFSERR_STALE_CLIENTID) 229#define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID)
228#define nfserr_resource __constant_htonl(NFSERR_RESOURCE) 230#define nfserr_resource cpu_to_be32(NFSERR_RESOURCE)
229#define nfserr_moved __constant_htonl(NFSERR_MOVED) 231#define nfserr_moved cpu_to_be32(NFSERR_MOVED)
230#define nfserr_nofilehandle __constant_htonl(NFSERR_NOFILEHANDLE) 232#define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE)
231#define nfserr_minor_vers_mismatch __constant_htonl(NFSERR_MINOR_VERS_MISMATCH) 233#define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH)
232#define nfserr_share_denied __constant_htonl(NFSERR_SHARE_DENIED) 234#define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED)
233#define nfserr_stale_stateid __constant_htonl(NFSERR_STALE_STATEID) 235#define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID)
234#define nfserr_old_stateid __constant_htonl(NFSERR_OLD_STATEID) 236#define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID)
235#define nfserr_bad_stateid __constant_htonl(NFSERR_BAD_STATEID) 237#define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID)
236#define nfserr_bad_seqid __constant_htonl(NFSERR_BAD_SEQID) 238#define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID)
237#define nfserr_symlink __constant_htonl(NFSERR_SYMLINK) 239#define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK)
238#define nfserr_not_same __constant_htonl(NFSERR_NOT_SAME) 240#define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME)
239#define nfserr_restorefh __constant_htonl(NFSERR_RESTOREFH) 241#define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH)
240#define nfserr_attrnotsupp __constant_htonl(NFSERR_ATTRNOTSUPP) 242#define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP)
241#define nfserr_bad_xdr __constant_htonl(NFSERR_BAD_XDR) 243#define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR)
242#define nfserr_openmode __constant_htonl(NFSERR_OPENMODE) 244#define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE)
243#define nfserr_locks_held __constant_htonl(NFSERR_LOCKS_HELD) 245#define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD)
244#define nfserr_op_illegal __constant_htonl(NFSERR_OP_ILLEGAL) 246#define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL)
245#define nfserr_grace __constant_htonl(NFSERR_GRACE) 247#define nfserr_grace cpu_to_be32(NFSERR_GRACE)
246#define nfserr_no_grace __constant_htonl(NFSERR_NO_GRACE) 248#define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE)
247#define nfserr_reclaim_bad __constant_htonl(NFSERR_RECLAIM_BAD) 249#define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD)
248#define nfserr_badname __constant_htonl(NFSERR_BADNAME) 250#define nfserr_badname cpu_to_be32(NFSERR_BADNAME)
249#define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN) 251#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN)
250#define nfserr_locked __constant_htonl(NFSERR_LOCKED) 252#define nfserr_locked cpu_to_be32(NFSERR_LOCKED)
251#define nfserr_wrongsec __constant_htonl(NFSERR_WRONGSEC) 253#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC)
252#define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME) 254#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE)
255#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT)
256#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST)
257#define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION)
258#define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT)
259#define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY)
260#define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
261#define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED)
262#define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY)
263#define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER)
264#define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE)
265#define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT)
266#define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT)
267#define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE)
268#define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED)
269#define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS)
270#define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG)
271#define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG)
272#define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE)
273#define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP)
274#define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND)
275#define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS)
276#define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION)
277#define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP)
278#define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY)
279#define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE)
280#define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY)
281#define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT)
282#define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION)
283#define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP)
284#define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT)
285#define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP)
286#define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED)
287#define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE)
288#define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL)
289#define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG)
290#define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT)
291#define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED)
253 292
254/* error codes for internal use */ 293/* error codes for internal use */
255/* if a request fails due to kmalloc failure, it gets dropped. 294/* if a request fails due to kmalloc failure, it gets dropped.
256 * Client should resend eventually 295 * Client should resend eventually
257 */ 296 */
258#define nfserr_dropit __constant_htonl(30000) 297#define nfserr_dropit cpu_to_be32(30000)
259/* end-of-file indicator in readdir */ 298/* end-of-file indicator in readdir */
260#define nfserr_eof __constant_htonl(30001) 299#define nfserr_eof cpu_to_be32(30001)
300/* replay detected */
301#define nfserr_replay_me cpu_to_be32(11001)
302/* nfs41 replay detected */
303#define nfserr_replay_cache cpu_to_be32(11002)
261 304
262/* Check for dir entries '.' and '..' */ 305/* Check for dir entries '.' and '..' */
263#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) 306#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.'))
@@ -300,7 +343,7 @@ extern struct timeval nfssvc_boot;
300 * TIME_BACKUP (unlikely to be supported any time soon) 343 * TIME_BACKUP (unlikely to be supported any time soon)
301 * TIME_CREATE (unlikely to be supported any time soon) 344 * TIME_CREATE (unlikely to be supported any time soon)
302 */ 345 */
303#define NFSD_SUPPORTED_ATTRS_WORD0 \ 346#define NFSD4_SUPPORTED_ATTRS_WORD0 \
304(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \ 347(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \
305 | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \ 348 | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \
306 | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \ 349 | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \
@@ -312,7 +355,7 @@ extern struct timeval nfssvc_boot;
312 | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \ 355 | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \
313 | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL) 356 | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL)
314 357
315#define NFSD_SUPPORTED_ATTRS_WORD1 \ 358#define NFSD4_SUPPORTED_ATTRS_WORD1 \
316(FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \ 359(FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \
317 | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \ 360 | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \
318 | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \ 361 | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \
@@ -320,6 +363,35 @@ extern struct timeval nfssvc_boot;
320 | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \ 363 | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \
321 | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID) 364 | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID)
322 365
366#define NFSD4_SUPPORTED_ATTRS_WORD2 0
367
368#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \
369 NFSD4_SUPPORTED_ATTRS_WORD0
370
371#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \
372 NFSD4_SUPPORTED_ATTRS_WORD1
373
374#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \
375 (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
376
377static inline u32 nfsd_suppattrs0(u32 minorversion)
378{
379 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
380 : NFSD4_SUPPORTED_ATTRS_WORD0;
381}
382
383static inline u32 nfsd_suppattrs1(u32 minorversion)
384{
385 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1
386 : NFSD4_SUPPORTED_ATTRS_WORD1;
387}
388
389static inline u32 nfsd_suppattrs2(u32 minorversion)
390{
391 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2
392 : NFSD4_SUPPORTED_ATTRS_WORD2;
393}
394
323/* These will return ERR_INVAL if specified in GETATTR or READDIR. */ 395/* These will return ERR_INVAL if specified in GETATTR or READDIR. */
324#define NFSD_WRITEONLY_ATTRS_WORD1 \ 396#define NFSD_WRITEONLY_ATTRS_WORD1 \
325(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) 397(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
@@ -330,6 +402,19 @@ extern struct timeval nfssvc_boot;
330#define NFSD_WRITEABLE_ATTRS_WORD1 \ 402#define NFSD_WRITEABLE_ATTRS_WORD1 \
331(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \ 403(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \
332 | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) 404 | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
405#define NFSD_WRITEABLE_ATTRS_WORD2 0
406
407#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \
408 NFSD_WRITEABLE_ATTRS_WORD0
409/*
410 * we currently store the exclusive create verifier in the v_{a,m}time
411 * attributes so the client can't set these at create time using EXCLUSIVE4_1
412 */
413#define NFSD_SUPPATTR_EXCLCREAT_WORD1 \
414 (NFSD_WRITEABLE_ATTRS_WORD1 & \
415 ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET))
416#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \
417 NFSD_WRITEABLE_ATTRS_WORD2
333 418
334#endif /* CONFIG_NFSD_V4 */ 419#endif /* CONFIG_NFSD_V4 */
335 420
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index fa317f6c154b..afa19016c4a8 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -269,6 +269,13 @@ fh_copy(struct svc_fh *dst, struct svc_fh *src)
269 return dst; 269 return dst;
270} 270}
271 271
272static inline void
273fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
274{
275 dst->fh_size = src->fh_size;
276 memcpy(&dst->fh_base, &src->fh_base, src->fh_size);
277}
278
272static __inline__ struct svc_fh * 279static __inline__ struct svc_fh *
273fh_init(struct svc_fh *fhp, int maxsize) 280fh_init(struct svc_fh *fhp, int maxsize)
274{ 281{
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index 128298c0362d..4d61c873feed 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -66,8 +66,7 @@ struct nfs4_cb_recall {
66 u32 cbr_ident; 66 u32 cbr_ident;
67 int cbr_trunc; 67 int cbr_trunc;
68 stateid_t cbr_stateid; 68 stateid_t cbr_stateid;
69 u32 cbr_fhlen; 69 struct knfsd_fh cbr_fh;
70 char cbr_fhval[NFS4_FHSIZE];
71 struct nfs4_delegation *cbr_dp; 70 struct nfs4_delegation *cbr_dp;
72}; 71};
73 72
@@ -86,8 +85,7 @@ struct nfs4_delegation {
86}; 85};
87 86
88#define dl_stateid dl_recall.cbr_stateid 87#define dl_stateid dl_recall.cbr_stateid
89#define dl_fhlen dl_recall.cbr_fhlen 88#define dl_fh dl_recall.cbr_fh
90#define dl_fhval dl_recall.cbr_fhval
91 89
92/* client delegation callback info */ 90/* client delegation callback info */
93struct nfs4_callback { 91struct nfs4_callback {
@@ -101,6 +99,64 @@ struct nfs4_callback {
101 struct rpc_clnt * cb_client; 99 struct rpc_clnt * cb_client;
102}; 100};
103 101
102/* Maximum number of slots per session. 128 is useful for long haul TCP */
103#define NFSD_MAX_SLOTS_PER_SESSION 128
104/* Maximum number of pages per slot cache entry */
105#define NFSD_PAGES_PER_SLOT 1
106/* Maximum number of operations per session compound */
107#define NFSD_MAX_OPS_PER_COMPOUND 16
108
109struct nfsd4_cache_entry {
110 __be32 ce_status;
111 struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */
112 struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1];
113 int ce_cachethis;
114 short ce_resused;
115 int ce_opcnt;
116 int ce_rpchdrlen;
117};
118
119struct nfsd4_slot {
120 bool sl_inuse;
121 u32 sl_seqid;
122 struct nfsd4_cache_entry sl_cache_entry;
123};
124
125struct nfsd4_session {
126 struct kref se_ref;
127 struct list_head se_hash; /* hash by sessionid */
128 struct list_head se_perclnt;
129 u32 se_flags;
130 struct nfs4_client *se_client; /* for expire_client */
131 struct nfs4_sessionid se_sessionid;
132 u32 se_fmaxreq_sz;
133 u32 se_fmaxresp_sz;
134 u32 se_fmaxresp_cached;
135 u32 se_fmaxops;
136 u32 se_fnumslots;
137 struct nfsd4_slot se_slots[]; /* forward channel slots */
138};
139
140static inline void
141nfsd4_put_session(struct nfsd4_session *ses)
142{
143 extern void free_session(struct kref *kref);
144 kref_put(&ses->se_ref, free_session);
145}
146
147static inline void
148nfsd4_get_session(struct nfsd4_session *ses)
149{
150 kref_get(&ses->se_ref);
151}
152
153/* formatted contents of nfs4_sessionid */
154struct nfsd4_sessionid {
155 clientid_t clientid;
156 u32 sequence;
157 u32 reserved;
158};
159
104#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */ 160#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */
105 161
106/* 162/*
@@ -132,6 +188,12 @@ struct nfs4_client {
132 struct nfs4_callback cl_callback; /* callback info */ 188 struct nfs4_callback cl_callback; /* callback info */
133 atomic_t cl_count; /* ref count */ 189 atomic_t cl_count; /* ref count */
134 u32 cl_firststate; /* recovery dir creation */ 190 u32 cl_firststate; /* recovery dir creation */
191
192 /* for nfs41 */
193 struct list_head cl_sessions;
194 struct nfsd4_slot cl_slot; /* create_session slot */
195 u32 cl_exchange_flags;
196 struct nfs4_sessionid cl_sessionid;
135}; 197};
136 198
137/* struct nfs4_client_reset 199/* struct nfs4_client_reset
@@ -168,8 +230,7 @@ struct nfs4_replay {
168 unsigned int rp_buflen; 230 unsigned int rp_buflen;
169 char *rp_buf; 231 char *rp_buf;
170 unsigned intrp_allocated; 232 unsigned intrp_allocated;
171 int rp_openfh_len; 233 struct knfsd_fh rp_openfh;
172 char rp_openfh[NFS4_FHSIZE];
173 char rp_ibuf[NFSD4_REPLAY_ISIZE]; 234 char rp_ibuf[NFSD4_REPLAY_ISIZE];
174}; 235};
175 236
@@ -217,7 +278,7 @@ struct nfs4_stateowner {
217* share_acces, share_deny on the file. 278* share_acces, share_deny on the file.
218*/ 279*/
219struct nfs4_file { 280struct nfs4_file {
220 struct kref fi_ref; 281 atomic_t fi_ref;
221 struct list_head fi_hash; /* hash by "struct inode *" */ 282 struct list_head fi_hash; /* hash by "struct inode *" */
222 struct list_head fi_stateids; 283 struct list_head fi_stateids;
223 struct list_head fi_delegations; 284 struct list_head fi_delegations;
@@ -259,14 +320,13 @@ struct nfs4_stateid {
259}; 320};
260 321
261/* flags for preprocess_seqid_op() */ 322/* flags for preprocess_seqid_op() */
262#define CHECK_FH 0x00000001 323#define HAS_SESSION 0x00000001
263#define CONFIRM 0x00000002 324#define CONFIRM 0x00000002
264#define OPEN_STATE 0x00000004 325#define OPEN_STATE 0x00000004
265#define LOCK_STATE 0x00000008 326#define LOCK_STATE 0x00000008
266#define RD_STATE 0x00000010 327#define RD_STATE 0x00000010
267#define WR_STATE 0x00000020 328#define WR_STATE 0x00000020
268#define CLOSE_STATE 0x00000040 329#define CLOSE_STATE 0x00000040
269#define DELEG_RET 0x00000080
270 330
271#define seqid_mutating_err(err) \ 331#define seqid_mutating_err(err) \
272 (((err) != nfserr_stale_clientid) && \ 332 (((err) != nfserr_stale_clientid) && \
@@ -274,7 +334,9 @@ struct nfs4_stateid {
274 ((err) != nfserr_stale_stateid) && \ 334 ((err) != nfserr_stale_stateid) && \
275 ((err) != nfserr_bad_stateid)) 335 ((err) != nfserr_bad_stateid))
276 336
277extern __be32 nfs4_preprocess_stateid_op(struct svc_fh *current_fh, 337struct nfsd4_compound_state;
338
339extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
278 stateid_t *stateid, int flags, struct file **filp); 340 stateid_t *stateid, int flags, struct file **filp);
279extern void nfs4_lock_state(void); 341extern void nfs4_lock_state(void);
280extern void nfs4_unlock_state(void); 342extern void nfs4_unlock_state(void);
@@ -290,7 +352,7 @@ extern void nfsd4_init_recdir(char *recdir_name);
290extern int nfsd4_recdir_load(void); 352extern int nfsd4_recdir_load(void);
291extern void nfsd4_shutdown_recdir(void); 353extern void nfsd4_shutdown_recdir(void);
292extern int nfs4_client_to_reclaim(const char *name); 354extern int nfs4_client_to_reclaim(const char *name);
293extern int nfs4_has_reclaimed_state(const char *name); 355extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
294extern void nfsd4_recdir_purge_old(void); 356extern void nfsd4_recdir_purge_old(void);
295extern int nfsd4_create_clid_dir(struct nfs4_client *clp); 357extern int nfsd4_create_clid_dir(struct nfs4_client *clp);
296extern void nfsd4_remove_clid_dir(struct nfs4_client *clp); 358extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
diff --git a/include/linux/nfsd/stats.h b/include/linux/nfsd/stats.h
index 7678cfbe9960..2693ef647df6 100644
--- a/include/linux/nfsd/stats.h
+++ b/include/linux/nfsd/stats.h
@@ -11,6 +11,11 @@
11 11
12#include <linux/nfs4.h> 12#include <linux/nfs4.h>
13 13
14/* thread usage wraps very million seconds (approx one fortnight) */
15#define NFSD_USAGE_WRAP (HZ*1000000)
16
17#ifdef __KERNEL__
18
14struct nfsd_stats { 19struct nfsd_stats {
15 unsigned int rchits; /* repcache hits */ 20 unsigned int rchits; /* repcache hits */
16 unsigned int rcmisses; /* repcache hits */ 21 unsigned int rcmisses; /* repcache hits */
@@ -35,10 +40,6 @@ struct nfsd_stats {
35 40
36}; 41};
37 42
38/* thread usage wraps very million seconds (approx one fortnight) */
39#define NFSD_USAGE_WRAP (HZ*1000000)
40
41#ifdef __KERNEL__
42 43
43extern struct nfsd_stats nfsdstats; 44extern struct nfsd_stats nfsdstats;
44extern struct svc_stat nfsd_svcstats; 45extern struct svc_stat nfsd_svcstats;
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 27bd3e38ec5a..f80d6013fdc3 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -45,10 +45,22 @@
45#define XDR_LEN(n) (((n) + 3) & ~3) 45#define XDR_LEN(n) (((n) + 3) & ~3)
46 46
47struct nfsd4_compound_state { 47struct nfsd4_compound_state {
48 struct svc_fh current_fh; 48 struct svc_fh current_fh;
49 struct svc_fh save_fh; 49 struct svc_fh save_fh;
50 struct nfs4_stateowner *replay_owner; 50 struct nfs4_stateowner *replay_owner;
51}; 51 /* For sessions DRC */
52 struct nfsd4_session *session;
53 struct nfsd4_slot *slot;
54 __be32 *statp;
55 size_t iovlen;
56 u32 minorversion;
57 u32 status;
58};
59
60static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs)
61{
62 return cs->slot != NULL;
63}
52 64
53struct nfsd4_change_info { 65struct nfsd4_change_info {
54 u32 atomic; 66 u32 atomic;
@@ -90,7 +102,7 @@ struct nfsd4_create {
90 u32 specdata2; 102 u32 specdata2;
91 } dev; /* NF4BLK, NF4CHR */ 103 } dev; /* NF4BLK, NF4CHR */
92 } u; 104 } u;
93 u32 cr_bmval[2]; /* request */ 105 u32 cr_bmval[3]; /* request */
94 struct iattr cr_iattr; /* request */ 106 struct iattr cr_iattr; /* request */
95 struct nfsd4_change_info cr_cinfo; /* response */ 107 struct nfsd4_change_info cr_cinfo; /* response */
96 struct nfs4_acl *cr_acl; 108 struct nfs4_acl *cr_acl;
@@ -105,7 +117,7 @@ struct nfsd4_delegreturn {
105}; 117};
106 118
107struct nfsd4_getattr { 119struct nfsd4_getattr {
108 u32 ga_bmval[2]; /* request */ 120 u32 ga_bmval[3]; /* request */
109 struct svc_fh *ga_fhp; /* response */ 121 struct svc_fh *ga_fhp; /* response */
110}; 122};
111 123
@@ -206,11 +218,9 @@ struct nfsd4_open {
206 stateid_t op_delegate_stateid; /* request - response */ 218 stateid_t op_delegate_stateid; /* request - response */
207 u32 op_create; /* request */ 219 u32 op_create; /* request */
208 u32 op_createmode; /* request */ 220 u32 op_createmode; /* request */
209 u32 op_bmval[2]; /* request */ 221 u32 op_bmval[3]; /* request */
210 union { /* request */ 222 struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
211 struct iattr iattr; /* UNCHECKED4,GUARDED4 */ 223 nfs4_verifier verf; /* EXCLUSIVE4 */
212 nfs4_verifier verf; /* EXCLUSIVE4 */
213 } u;
214 clientid_t op_clientid; /* request */ 224 clientid_t op_clientid; /* request */
215 struct xdr_netobj op_owner; /* request */ 225 struct xdr_netobj op_owner; /* request */
216 u32 op_seqid; /* request */ 226 u32 op_seqid; /* request */
@@ -224,8 +234,8 @@ struct nfsd4_open {
224 struct nfs4_stateowner *op_stateowner; /* used during processing */ 234 struct nfs4_stateowner *op_stateowner; /* used during processing */
225 struct nfs4_acl *op_acl; 235 struct nfs4_acl *op_acl;
226}; 236};
227#define op_iattr u.iattr 237#define op_iattr iattr
228#define op_verf u.verf 238#define op_verf verf
229 239
230struct nfsd4_open_confirm { 240struct nfsd4_open_confirm {
231 stateid_t oc_req_stateid /* request */; 241 stateid_t oc_req_stateid /* request */;
@@ -259,7 +269,7 @@ struct nfsd4_readdir {
259 nfs4_verifier rd_verf; /* request */ 269 nfs4_verifier rd_verf; /* request */
260 u32 rd_dircount; /* request */ 270 u32 rd_dircount; /* request */
261 u32 rd_maxcount; /* request */ 271 u32 rd_maxcount; /* request */
262 u32 rd_bmval[2]; /* request */ 272 u32 rd_bmval[3]; /* request */
263 struct svc_rqst *rd_rqstp; /* response */ 273 struct svc_rqst *rd_rqstp; /* response */
264 struct svc_fh * rd_fhp; /* response */ 274 struct svc_fh * rd_fhp; /* response */
265 275
@@ -301,7 +311,7 @@ struct nfsd4_secinfo {
301 311
302struct nfsd4_setattr { 312struct nfsd4_setattr {
303 stateid_t sa_stateid; /* request */ 313 stateid_t sa_stateid; /* request */
304 u32 sa_bmval[2]; /* request */ 314 u32 sa_bmval[3]; /* request */
305 struct iattr sa_iattr; /* request */ 315 struct iattr sa_iattr; /* request */
306 struct nfs4_acl *sa_acl; 316 struct nfs4_acl *sa_acl;
307}; 317};
@@ -327,7 +337,7 @@ struct nfsd4_setclientid_confirm {
327 337
328/* also used for NVERIFY */ 338/* also used for NVERIFY */
329struct nfsd4_verify { 339struct nfsd4_verify {
330 u32 ve_bmval[2]; /* request */ 340 u32 ve_bmval[3]; /* request */
331 u32 ve_attrlen; /* request */ 341 u32 ve_attrlen; /* request */
332 char * ve_attrval; /* request */ 342 char * ve_attrval; /* request */
333}; 343};
@@ -344,6 +354,54 @@ struct nfsd4_write {
344 nfs4_verifier wr_verifier; /* response */ 354 nfs4_verifier wr_verifier; /* response */
345}; 355};
346 356
357struct nfsd4_exchange_id {
358 nfs4_verifier verifier;
359 struct xdr_netobj clname;
360 u32 flags;
361 clientid_t clientid;
362 u32 seqid;
363 int spa_how;
364};
365
366struct nfsd4_channel_attrs {
367 u32 headerpadsz;
368 u32 maxreq_sz;
369 u32 maxresp_sz;
370 u32 maxresp_cached;
371 u32 maxops;
372 u32 maxreqs;
373 u32 nr_rdma_attrs;
374 u32 rdma_attrs;
375};
376
377struct nfsd4_create_session {
378 clientid_t clientid;
379 struct nfs4_sessionid sessionid;
380 u32 seqid;
381 u32 flags;
382 struct nfsd4_channel_attrs fore_channel;
383 struct nfsd4_channel_attrs back_channel;
384 u32 callback_prog;
385 u32 uid;
386 u32 gid;
387};
388
389struct nfsd4_sequence {
390 struct nfs4_sessionid sessionid; /* request/response */
391 u32 seqid; /* request/response */
392 u32 slotid; /* request/response */
393 u32 maxslots; /* request/response */
394 u32 cachethis; /* request */
395#if 0
396 u32 target_maxslots; /* response */
397 u32 status_flags; /* response */
398#endif /* not yet */
399};
400
401struct nfsd4_destroy_session {
402 struct nfs4_sessionid sessionid;
403};
404
347struct nfsd4_op { 405struct nfsd4_op {
348 int opnum; 406 int opnum;
349 __be32 status; 407 __be32 status;
@@ -378,6 +436,12 @@ struct nfsd4_op {
378 struct nfsd4_verify verify; 436 struct nfsd4_verify verify;
379 struct nfsd4_write write; 437 struct nfsd4_write write;
380 struct nfsd4_release_lockowner release_lockowner; 438 struct nfsd4_release_lockowner release_lockowner;
439
440 /* NFSv4.1 */
441 struct nfsd4_exchange_id exchange_id;
442 struct nfsd4_create_session create_session;
443 struct nfsd4_destroy_session destroy_session;
444 struct nfsd4_sequence sequence;
381 } u; 445 } u;
382 struct nfs4_replay * replay; 446 struct nfs4_replay * replay;
383}; 447};
@@ -416,9 +480,22 @@ struct nfsd4_compoundres {
416 u32 taglen; 480 u32 taglen;
417 char * tag; 481 char * tag;
418 u32 opcnt; 482 u32 opcnt;
419 __be32 * tagp; /* where to encode tag and opcount */ 483 __be32 * tagp; /* tag, opcount encode location */
484 struct nfsd4_compound_state cstate;
420}; 485};
421 486
487static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
488{
489 struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
490 return args->opcnt == 1;
491}
492
493static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
494{
495 return !resp->cstate.slot->sl_cache_entry.ce_cachethis ||
496 nfsd4_is_solo_sequence(resp);
497}
498
422#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) 499#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
423 500
424static inline void 501static inline void
@@ -448,7 +525,23 @@ extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
448extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 525extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
449 struct nfsd4_compound_state *, 526 struct nfsd4_compound_state *,
450 struct nfsd4_setclientid_confirm *setclientid_confirm); 527 struct nfsd4_setclientid_confirm *setclientid_confirm);
451extern __be32 nfsd4_process_open1(struct nfsd4_open *open); 528extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp);
529extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
530 struct nfsd4_sequence *seq);
531extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
532 struct nfsd4_compound_state *,
533struct nfsd4_exchange_id *);
534 extern __be32 nfsd4_create_session(struct svc_rqst *,
535 struct nfsd4_compound_state *,
536 struct nfsd4_create_session *);
537extern __be32 nfsd4_sequence(struct svc_rqst *,
538 struct nfsd4_compound_state *,
539 struct nfsd4_sequence *);
540extern __be32 nfsd4_destroy_session(struct svc_rqst *,
541 struct nfsd4_compound_state *,
542 struct nfsd4_destroy_session *);
543extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
544 struct nfsd4_open *open);
452extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, 545extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
453 struct svc_fh *current_fh, struct nfsd4_open *open); 546 struct svc_fh *current_fh, struct nfsd4_open *open);
454extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, 547extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
new file mode 100644
index 000000000000..79fec6af3f9f
--- /dev/null
+++ b/include/linux/nilfs2_fs.h
@@ -0,0 +1,801 @@
1/*
2 * nilfs2_fs.h - NILFS2 on-disk structures and common declarations.
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>
21 * Ryusuke Konishi <ryusuke@osrg.net>
22 */
23/*
24 * linux/include/linux/ext2_fs.h
25 *
26 * Copyright (C) 1992, 1993, 1994, 1995
27 * Remy Card (card@masi.ibp.fr)
28 * Laboratoire MASI - Institut Blaise Pascal
29 * Universite Pierre et Marie Curie (Paris VI)
30 *
31 * from
32 *
33 * linux/include/linux/minix_fs.h
34 *
35 * Copyright (C) 1991, 1992 Linus Torvalds
36 */
37
38#ifndef _LINUX_NILFS_FS_H
39#define _LINUX_NILFS_FS_H
40
41#include <linux/types.h>
42#include <linux/ioctl.h>
43
44/*
45 * Inode flags stored in nilfs_inode and on-memory nilfs inode
46 *
47 * We define these flags based on ext2-fs because of the
48 * compatibility reason; to avoid problems in chattr(1)
49 */
50#define NILFS_SECRM_FL 0x00000001 /* Secure deletion */
51#define NILFS_UNRM_FL 0x00000002 /* Undelete */
52#define NILFS_SYNC_FL 0x00000008 /* Synchronous updates */
53#define NILFS_IMMUTABLE_FL 0x00000010 /* Immutable file */
54#define NILFS_APPEND_FL 0x00000020 /* writes to file may only append */
55#define NILFS_NODUMP_FL 0x00000040 /* do not dump file */
56#define NILFS_NOATIME_FL 0x00000080 /* do not update atime */
57/* Reserved for compression usage... */
58#define NILFS_NOTAIL_FL 0x00008000 /* file tail should not be merged */
59#define NILFS_DIRSYNC_FL 0x00010000 /* dirsync behaviour */
60
61#define NILFS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
62#define NILFS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
63
64
65#define NILFS_INODE_BMAP_SIZE 7
66/**
67 * struct nilfs_inode - structure of an inode on disk
68 * @i_blocks: blocks count
69 * @i_size: size in bytes
70 * @i_ctime: creation time (seconds)
71 * @i_mtime: modification time (seconds)
72 * @i_ctime_nsec: creation time (nano seconds)
73 * @i_mtime_nsec: modification time (nano seconds)
74 * @i_uid: user id
75 * @i_gid: group id
76 * @i_mode: file mode
77 * @i_links_count: links count
78 * @i_flags: file flags
79 * @i_bmap: block mapping
80 * @i_xattr: extended attributes
81 * @i_generation: file generation (for NFS)
82 * @i_pad: padding
83 */
84struct nilfs_inode {
85 __le64 i_blocks;
86 __le64 i_size;
87 __le64 i_ctime;
88 __le64 i_mtime;
89 __le32 i_ctime_nsec;
90 __le32 i_mtime_nsec;
91 __le32 i_uid;
92 __le32 i_gid;
93 __le16 i_mode;
94 __le16 i_links_count;
95 __le32 i_flags;
96 __le64 i_bmap[NILFS_INODE_BMAP_SIZE];
97#define i_device_code i_bmap[0]
98 __le64 i_xattr;
99 __le32 i_generation;
100 __le32 i_pad;
101};
102
103/**
104 * struct nilfs_super_root - structure of super root
105 * @sr_sum: check sum
106 * @sr_bytes: byte count of the structure
107 * @sr_flags: flags (reserved)
108 * @sr_nongc_ctime: write time of the last segment not for cleaner operation
109 * @sr_dat: DAT file inode
110 * @sr_cpfile: checkpoint file inode
111 * @sr_sufile: segment usage file inode
112 */
113struct nilfs_super_root {
114 __le32 sr_sum;
115 __le16 sr_bytes;
116 __le16 sr_flags;
117 __le64 sr_nongc_ctime;
118 struct nilfs_inode sr_dat;
119 struct nilfs_inode sr_cpfile;
120 struct nilfs_inode sr_sufile;
121};
122
123#define NILFS_SR_MDT_OFFSET(inode_size, i) \
124 ((unsigned long)&((struct nilfs_super_root *)0)->sr_dat + \
125 (inode_size) * (i))
126#define NILFS_SR_DAT_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 0)
127#define NILFS_SR_CPFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 1)
128#define NILFS_SR_SUFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 2)
129#define NILFS_SR_BYTES (sizeof(struct nilfs_super_root))
130
131/*
132 * Maximal mount counts
133 */
134#define NILFS_DFL_MAX_MNT_COUNT 50 /* 50 mounts */
135
136/*
137 * File system states (sbp->s_state, nilfs->ns_mount_state)
138 */
139#define NILFS_VALID_FS 0x0001 /* Unmounted cleanly */
140#define NILFS_ERROR_FS 0x0002 /* Errors detected */
141#define NILFS_RESIZE_FS 0x0004 /* Resize required */
142
143/*
144 * Mount flags (sbi->s_mount_opt)
145 */
146#define NILFS_MOUNT_ERROR_MODE 0x0070 /* Error mode mask */
147#define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */
148#define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */
149#define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */
150#define NILFS_MOUNT_SNAPSHOT 0x0080 /* Snapshot flag */
151#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */
152#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order
153 semantics also for data */
154
155
156/**
157 * struct nilfs_super_block - structure of super block on disk
158 */
159struct nilfs_super_block {
160 __le32 s_rev_level; /* Revision level */
161 __le16 s_minor_rev_level; /* minor revision level */
162 __le16 s_magic; /* Magic signature */
163
164 __le16 s_bytes; /* Bytes count of CRC calculation
165 for this structure. s_reserved
166 is excluded. */
167 __le16 s_flags; /* flags */
168 __le32 s_crc_seed; /* Seed value of CRC calculation */
169 __le32 s_sum; /* Check sum of super block */
170
171 __le32 s_log_block_size; /* Block size represented as follows
172 blocksize =
173 1 << (s_log_block_size + 10) */
174 __le64 s_nsegments; /* Number of segments in filesystem */
175 __le64 s_dev_size; /* block device size in bytes */
176 __le64 s_first_data_block; /* 1st seg disk block number */
177 __le32 s_blocks_per_segment; /* number of blocks per full segment */
178 __le32 s_r_segments_percentage; /* Reserved segments percentage */
179
180 __le64 s_last_cno; /* Last checkpoint number */
181 __le64 s_last_pseg; /* disk block addr pseg written last */
182 __le64 s_last_seq; /* seq. number of seg written last */
183 __le64 s_free_blocks_count; /* Free blocks count */
184
185 __le64 s_ctime; /* Creation time (execution time of
186 newfs) */
187 __le64 s_mtime; /* Mount time */
188 __le64 s_wtime; /* Write time */
189 __le16 s_mnt_count; /* Mount count */
190 __le16 s_max_mnt_count; /* Maximal mount count */
191 __le16 s_state; /* File system state */
192 __le16 s_errors; /* Behaviour when detecting errors */
193 __le64 s_lastcheck; /* time of last check */
194
195 __le32 s_checkinterval; /* max. time between checks */
196 __le32 s_creator_os; /* OS */
197 __le16 s_def_resuid; /* Default uid for reserved blocks */
198 __le16 s_def_resgid; /* Default gid for reserved blocks */
199 __le32 s_first_ino; /* First non-reserved inode */
200
201 __le16 s_inode_size; /* Size of an inode */
202 __le16 s_dat_entry_size; /* Size of a dat entry */
203 __le16 s_checkpoint_size; /* Size of a checkpoint */
204 __le16 s_segment_usage_size; /* Size of a segment usage */
205
206 __u8 s_uuid[16]; /* 128-bit uuid for volume */
207 char s_volume_name[16]; /* volume name */
208 char s_last_mounted[64]; /* directory where last mounted */
209
210 __le32 s_c_interval; /* Commit interval of segment */
211 __le32 s_c_block_max; /* Threshold of data amount for
212 the segment construction */
213 __u32 s_reserved[192]; /* padding to the end of the block */
214};
215
216/*
217 * Codes for operating systems
218 */
219#define NILFS_OS_LINUX 0
220/* Codes from 1 to 4 are reserved to keep compatibility with ext2 creator-OS */
221
222/*
223 * Revision levels
224 */
225#define NILFS_CURRENT_REV 2 /* current major revision */
226#define NILFS_MINOR_REV 0 /* minor revision */
227
228/*
229 * Bytes count of super_block for CRC-calculation
230 */
231#define NILFS_SB_BYTES \
232 ((long)&((struct nilfs_super_block *)0)->s_reserved)
233
234/*
235 * Special inode number
236 */
237#define NILFS_ROOT_INO 2 /* Root file inode */
238#define NILFS_DAT_INO 3 /* DAT file */
239#define NILFS_CPFILE_INO 4 /* checkpoint file */
240#define NILFS_SUFILE_INO 5 /* segment usage file */
241#define NILFS_IFILE_INO 6 /* ifile */
242#define NILFS_ATIME_INO 7 /* Atime file (reserved) */
243#define NILFS_XATTR_INO 8 /* Xattribute file (reserved) */
244#define NILFS_SKETCH_INO 10 /* Sketch file */
245#define NILFS_USER_INO 11 /* Fisrt user's file inode number */
246
247#define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */
248#define NILFS_SUPER_MAGIC 0x3434 /* NILFS filesystem magic number */
249
250#define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in
251 a full segment */
252#define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in
253 a partial segment */
254#define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved
255 segments */
256
257/*
258 * bytes offset of secondary super block
259 */
260#define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12)
261
262/*
263 * Maximal count of links to a file
264 */
265#define NILFS_LINK_MAX 32000
266
267/*
268 * Structure of a directory entry
269 * (Same as ext2)
270 */
271
272#define NILFS_NAME_LEN 255
273
274/*
275 * The new version of the directory entry. Since V0 structures are
276 * stored in intel byte order, and the name_len field could never be
277 * bigger than 255 chars, it's safe to reclaim the extra byte for the
278 * file_type field.
279 */
280struct nilfs_dir_entry {
281 __le64 inode; /* Inode number */
282 __le16 rec_len; /* Directory entry length */
283 __u8 name_len; /* Name length */
284 __u8 file_type;
285 char name[NILFS_NAME_LEN]; /* File name */
286 char pad;
287};
288
289/*
290 * NILFS directory file types. Only the low 3 bits are used. The
291 * other bits are reserved for now.
292 */
293enum {
294 NILFS_FT_UNKNOWN,
295 NILFS_FT_REG_FILE,
296 NILFS_FT_DIR,
297 NILFS_FT_CHRDEV,
298 NILFS_FT_BLKDEV,
299 NILFS_FT_FIFO,
300 NILFS_FT_SOCK,
301 NILFS_FT_SYMLINK,
302 NILFS_FT_MAX
303};
304
305/*
306 * NILFS_DIR_PAD defines the directory entries boundaries
307 *
308 * NOTE: It must be a multiple of 8
309 */
310#define NILFS_DIR_PAD 8
311#define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1)
312#define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \
313 ~NILFS_DIR_ROUND)
314
315
316/**
317 * struct nilfs_finfo - file information
318 * @fi_ino: inode number
319 * @fi_cno: checkpoint number
320 * @fi_nblocks: number of blocks (including intermediate blocks)
321 * @fi_ndatablk: number of file data blocks
322 */
323struct nilfs_finfo {
324 __le64 fi_ino;
325 __le64 fi_cno;
326 __le32 fi_nblocks;
327 __le32 fi_ndatablk;
328 /* array of virtual block numbers */
329};
330
331/**
332 * struct nilfs_binfo_v - information for the block to which a virtual block number is assigned
333 * @bi_vblocknr: virtual block number
334 * @bi_blkoff: block offset
335 */
336struct nilfs_binfo_v {
337 __le64 bi_vblocknr;
338 __le64 bi_blkoff;
339};
340
341/**
342 * struct nilfs_binfo_dat - information for the block which belongs to the DAT file
343 * @bi_blkoff: block offset
344 * @bi_level: level
345 * @bi_pad: padding
346 */
347struct nilfs_binfo_dat {
348 __le64 bi_blkoff;
349 __u8 bi_level;
350 __u8 bi_pad[7];
351};
352
353/**
354 * union nilfs_binfo: block information
355 * @bi_v: nilfs_binfo_v structure
356 * @bi_dat: nilfs_binfo_dat structure
357 */
358union nilfs_binfo {
359 struct nilfs_binfo_v bi_v;
360 struct nilfs_binfo_dat bi_dat;
361};
362
363/**
364 * struct nilfs_segment_summary - segment summary
365 * @ss_datasum: checksum of data
366 * @ss_sumsum: checksum of segment summary
367 * @ss_magic: magic number
368 * @ss_bytes: size of this structure in bytes
369 * @ss_flags: flags
370 * @ss_seq: sequence number
371 * @ss_create: creation timestamp
372 * @ss_next: next segment
373 * @ss_nblocks: number of blocks
374 * @ss_nfinfo: number of finfo structures
375 * @ss_sumbytes: total size of segment summary in bytes
376 * @ss_pad: padding
377 */
378struct nilfs_segment_summary {
379 __le32 ss_datasum;
380 __le32 ss_sumsum;
381 __le32 ss_magic;
382 __le16 ss_bytes;
383 __le16 ss_flags;
384 __le64 ss_seq;
385 __le64 ss_create;
386 __le64 ss_next;
387 __le32 ss_nblocks;
388 __le32 ss_nfinfo;
389 __le32 ss_sumbytes;
390 __le32 ss_pad;
391 /* array of finfo structures */
392};
393
394#define NILFS_SEGSUM_MAGIC 0x1eaffa11 /* segment summary magic number */
395
396/*
397 * Segment summary flags
398 */
399#define NILFS_SS_LOGBGN 0x0001 /* begins a logical segment */
400#define NILFS_SS_LOGEND 0x0002 /* ends a logical segment */
401#define NILFS_SS_SR 0x0004 /* has super root */
402#define NILFS_SS_SYNDT 0x0008 /* includes data only updates */
403#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */
404
405/**
406 * struct nilfs_palloc_group_desc - block group descriptor
407 * @pg_nfrees: number of free entries in block group
408 */
409struct nilfs_palloc_group_desc {
410 __le32 pg_nfrees;
411};
412
413/**
414 * struct nilfs_dat_entry - disk address translation entry
415 * @dt_blocknr: block number
416 * @dt_start: start checkpoint number
417 * @dt_end: end checkpoint number
418 * @dt_rsv: reserved for future use
419 */
420struct nilfs_dat_entry {
421 __le64 de_blocknr;
422 __le64 de_start;
423 __le64 de_end;
424 __le64 de_rsv;
425};
426
427/**
428 * struct nilfs_dat_group_desc - block group descriptor
429 * @dg_nfrees: number of free virtual block numbers in block group
430 */
431struct nilfs_dat_group_desc {
432 __le32 dg_nfrees;
433};
434
435
436/**
437 * struct nilfs_snapshot_list - snapshot list
438 * @ssl_next: next checkpoint number on snapshot list
439 * @ssl_prev: previous checkpoint number on snapshot list
440 */
441struct nilfs_snapshot_list {
442 __le64 ssl_next;
443 __le64 ssl_prev;
444};
445
446/**
447 * struct nilfs_checkpoint - checkpoint structure
448 * @cp_flags: flags
449 * @cp_checkpoints_count: checkpoints count in a block
450 * @cp_snapshot_list: snapshot list
451 * @cp_cno: checkpoint number
452 * @cp_create: creation timestamp
453 * @cp_nblk_inc: number of blocks incremented by this checkpoint
454 * @cp_inodes_count: inodes count
455 * @cp_blocks_count: blocks count
456 * @cp_ifile_inode: inode of ifile
457 */
458struct nilfs_checkpoint {
459 __le32 cp_flags;
460 __le32 cp_checkpoints_count;
461 struct nilfs_snapshot_list cp_snapshot_list;
462 __le64 cp_cno;
463 __le64 cp_create;
464 __le64 cp_nblk_inc;
465 __le64 cp_inodes_count;
466 __le64 cp_blocks_count; /* Reserved (might be deleted) */
467
468 /* Do not change the byte offset of ifile inode.
469 To keep the compatibility of the disk format,
470 additional fields should be added behind cp_ifile_inode. */
471 struct nilfs_inode cp_ifile_inode;
472};
473
474/* checkpoint flags */
475enum {
476 NILFS_CHECKPOINT_SNAPSHOT,
477 NILFS_CHECKPOINT_INVALID,
478 NILFS_CHECKPOINT_SKETCH,
479 NILFS_CHECKPOINT_MINOR,
480};
481
482#define NILFS_CHECKPOINT_FNS(flag, name) \
483static inline void \
484nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
485{ \
486 cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \
487 (1UL << NILFS_CHECKPOINT_##flag)); \
488} \
489static inline void \
490nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
491{ \
492 cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \
493 ~(1UL << NILFS_CHECKPOINT_##flag)); \
494} \
495static inline int \
496nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
497{ \
498 return !!(le32_to_cpu(cp->cp_flags) & \
499 (1UL << NILFS_CHECKPOINT_##flag)); \
500}
501
502NILFS_CHECKPOINT_FNS(SNAPSHOT, snapshot)
503NILFS_CHECKPOINT_FNS(INVALID, invalid)
504NILFS_CHECKPOINT_FNS(MINOR, minor)
505
506/**
507 * struct nilfs_cpinfo - checkpoint information
508 * @ci_flags: flags
509 * @ci_pad: padding
510 * @ci_cno: checkpoint number
511 * @ci_create: creation timestamp
512 * @ci_nblk_inc: number of blocks incremented by this checkpoint
513 * @ci_inodes_count: inodes count
514 * @ci_blocks_count: blocks count
515 * @ci_next: next checkpoint number in snapshot list
516 */
517struct nilfs_cpinfo {
518 __u32 ci_flags;
519 __u32 ci_pad;
520 __u64 ci_cno;
521 __u64 ci_create;
522 __u64 ci_nblk_inc;
523 __u64 ci_inodes_count;
524 __u64 ci_blocks_count;
525 __u64 ci_next;
526};
527
528#define NILFS_CPINFO_FNS(flag, name) \
529static inline int \
530nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \
531{ \
532 return !!(cpinfo->ci_flags & (1UL << NILFS_CHECKPOINT_##flag)); \
533}
534
535NILFS_CPINFO_FNS(SNAPSHOT, snapshot)
536NILFS_CPINFO_FNS(INVALID, invalid)
537NILFS_CPINFO_FNS(MINOR, minor)
538
539
540/**
541 * struct nilfs_cpfile_header - checkpoint file header
542 * @ch_ncheckpoints: number of checkpoints
543 * @ch_nsnapshots: number of snapshots
544 * @ch_snapshot_list: snapshot list
545 */
546struct nilfs_cpfile_header {
547 __le64 ch_ncheckpoints;
548 __le64 ch_nsnapshots;
549 struct nilfs_snapshot_list ch_snapshot_list;
550};
551
552#define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \
553 ((sizeof(struct nilfs_cpfile_header) + \
554 sizeof(struct nilfs_checkpoint) - 1) / \
555 sizeof(struct nilfs_checkpoint))
556
557/**
558 * struct nilfs_segment_usage - segment usage
559 * @su_lastmod: last modified timestamp
560 * @su_nblocks: number of blocks in segment
561 * @su_flags: flags
562 */
563struct nilfs_segment_usage {
564 __le64 su_lastmod;
565 __le32 su_nblocks;
566 __le32 su_flags;
567};
568
569/* segment usage flag */
570enum {
571 NILFS_SEGMENT_USAGE_ACTIVE,
572 NILFS_SEGMENT_USAGE_DIRTY,
573 NILFS_SEGMENT_USAGE_ERROR,
574
575 /* ... */
576};
577
578#define NILFS_SEGMENT_USAGE_FNS(flag, name) \
579static inline void \
580nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
581{ \
582 su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \
583 (1UL << NILFS_SEGMENT_USAGE_##flag));\
584} \
585static inline void \
586nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
587{ \
588 su->su_flags = \
589 cpu_to_le32(le32_to_cpu(su->su_flags) & \
590 ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
591} \
592static inline int \
593nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
594{ \
595 return !!(le32_to_cpu(su->su_flags) & \
596 (1UL << NILFS_SEGMENT_USAGE_##flag)); \
597}
598
599NILFS_SEGMENT_USAGE_FNS(ACTIVE, active)
600NILFS_SEGMENT_USAGE_FNS(DIRTY, dirty)
601NILFS_SEGMENT_USAGE_FNS(ERROR, error)
602
603static inline void
604nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
605{
606 su->su_lastmod = cpu_to_le64(0);
607 su->su_nblocks = cpu_to_le32(0);
608 su->su_flags = cpu_to_le32(0);
609}
610
611static inline int
612nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
613{
614 return !le32_to_cpu(su->su_flags);
615}
616
617/**
618 * struct nilfs_sufile_header - segment usage file header
619 * @sh_ncleansegs: number of clean segments
620 * @sh_ndirtysegs: number of dirty segments
621 * @sh_last_alloc: last allocated segment number
622 */
623struct nilfs_sufile_header {
624 __le64 sh_ncleansegs;
625 __le64 sh_ndirtysegs;
626 __le64 sh_last_alloc;
627 /* ... */
628};
629
630#define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \
631 ((sizeof(struct nilfs_sufile_header) + \
632 sizeof(struct nilfs_segment_usage) - 1) / \
633 sizeof(struct nilfs_segment_usage))
634
635/**
636 * nilfs_suinfo - segment usage information
637 * @sui_lastmod:
638 * @sui_nblocks:
639 * @sui_flags:
640 */
641struct nilfs_suinfo {
642 __u64 sui_lastmod;
643 __u32 sui_nblocks;
644 __u32 sui_flags;
645};
646
647#define NILFS_SUINFO_FNS(flag, name) \
648static inline int \
649nilfs_suinfo_##name(const struct nilfs_suinfo *si) \
650{ \
651 return si->sui_flags & (1UL << NILFS_SEGMENT_USAGE_##flag); \
652}
653
654NILFS_SUINFO_FNS(ACTIVE, active)
655NILFS_SUINFO_FNS(DIRTY, dirty)
656NILFS_SUINFO_FNS(ERROR, error)
657
658static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si)
659{
660 return !si->sui_flags;
661}
662
663/* ioctl */
664enum {
665 NILFS_CHECKPOINT,
666 NILFS_SNAPSHOT,
667};
668
669/**
670 * struct nilfs_cpmode -
671 * @cc_cno:
672 * @cc_mode:
673 */
674struct nilfs_cpmode {
675 __u64 cm_cno;
676 __u32 cm_mode;
677 __u32 cm_pad;
678};
679
680/**
681 * struct nilfs_argv - argument vector
682 * @v_base:
683 * @v_nmembs:
684 * @v_size:
685 * @v_flags:
686 * @v_index:
687 */
688struct nilfs_argv {
689 __u64 v_base;
690 __u32 v_nmembs; /* number of members */
691 __u16 v_size; /* size of members */
692 __u16 v_flags;
693 __u64 v_index;
694};
695
696/**
697 * struct nilfs_period -
698 * @p_start:
699 * @p_end:
700 */
701struct nilfs_period {
702 __u64 p_start;
703 __u64 p_end;
704};
705
706/**
707 * struct nilfs_cpstat -
708 * @cs_cno: checkpoint number
709 * @cs_ncps: number of checkpoints
710 * @cs_nsss: number of snapshots
711 */
712struct nilfs_cpstat {
713 __u64 cs_cno;
714 __u64 cs_ncps;
715 __u64 cs_nsss;
716};
717
718/**
719 * struct nilfs_sustat -
720 * @ss_nsegs: number of segments
721 * @ss_ncleansegs: number of clean segments
722 * @ss_ndirtysegs: number of dirty segments
723 * @ss_ctime: creation time of the last segment
724 * @ss_nongc_ctime: creation time of the last segment not for GC
725 * @ss_prot_seq: least sequence number of segments which must not be reclaimed
726 */
727struct nilfs_sustat {
728 __u64 ss_nsegs;
729 __u64 ss_ncleansegs;
730 __u64 ss_ndirtysegs;
731 __u64 ss_ctime;
732 __u64 ss_nongc_ctime;
733 __u64 ss_prot_seq;
734};
735
736/**
737 * struct nilfs_vinfo - virtual block number information
738 * @vi_vblocknr:
739 * @vi_start:
740 * @vi_end:
741 * @vi_blocknr:
742 */
743struct nilfs_vinfo {
744 __u64 vi_vblocknr;
745 __u64 vi_start;
746 __u64 vi_end;
747 __u64 vi_blocknr;
748};
749
750/**
751 * struct nilfs_vdesc -
752 */
753struct nilfs_vdesc {
754 __u64 vd_ino;
755 __u64 vd_cno;
756 __u64 vd_vblocknr;
757 struct nilfs_period vd_period;
758 __u64 vd_blocknr;
759 __u64 vd_offset;
760 __u32 vd_flags;
761 __u32 vd_pad;
762};
763
764/**
765 * struct nilfs_bdesc -
766 */
767struct nilfs_bdesc {
768 __u64 bd_ino;
769 __u64 bd_oblocknr;
770 __u64 bd_blocknr;
771 __u64 bd_offset;
772 __u32 bd_level;
773 __u32 bd_pad;
774};
775
776#define NILFS_IOCTL_IDENT 'n'
777
778#define NILFS_IOCTL_CHANGE_CPMODE \
779 _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode)
780#define NILFS_IOCTL_DELETE_CHECKPOINT \
781 _IOW(NILFS_IOCTL_IDENT, 0x81, __u64)
782#define NILFS_IOCTL_GET_CPINFO \
783 _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv)
784#define NILFS_IOCTL_GET_CPSTAT \
785 _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat)
786#define NILFS_IOCTL_GET_SUINFO \
787 _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv)
788#define NILFS_IOCTL_GET_SUSTAT \
789 _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat)
790#define NILFS_IOCTL_GET_VINFO \
791 _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv)
792#define NILFS_IOCTL_GET_BDESCS \
793 _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv)
794#define NILFS_IOCTL_CLEAN_SEGMENTS \
795 _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5])
796#define NILFS_IOCTL_SYNC \
797 _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64)
798#define NILFS_IOCTL_RESIZE \
799 _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64)
800
801#endif /* _LINUX_NILFS_FS_H */
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index afad7dec1b36..7b370c7cfeff 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -8,6 +8,7 @@ struct mnt_namespace;
8struct uts_namespace; 8struct uts_namespace;
9struct ipc_namespace; 9struct ipc_namespace;
10struct pid_namespace; 10struct pid_namespace;
11struct fs_struct;
11 12
12/* 13/*
13 * A structure to contain pointers to all per-process 14 * A structure to contain pointers to all per-process
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
new file mode 100644
index 000000000000..b0638fd91e92
--- /dev/null
+++ b/include/linux/page-debug-flags.h
@@ -0,0 +1,30 @@
1#ifndef LINUX_PAGE_DEBUG_FLAGS_H
2#define LINUX_PAGE_DEBUG_FLAGS_H
3
4/*
5 * page->debug_flags bits:
6 *
7 * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to
8 * implement generic debug pagealloc feature. The pages are filled with
9 * poison patterns and set this flag after free_pages(). The poisoned
10 * pages are verified whether the patterns are not corrupted and clear
11 * the flag before alloc_pages().
12 */
13
14enum page_debug_flags {
15 PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */
16};
17
18/*
19 * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably
20 * gets turned off when no debug features are enabling it!
21 */
22
23#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
24#if !defined(CONFIG_PAGE_POISONING) \
25/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
26#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
27#endif
28#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */
29
30#endif /* LINUX_PAGE_DEBUG_FLAGS_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 219a523ecdb0..62214c7d2d93 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -82,6 +82,7 @@ enum pageflags {
82 PG_arch_1, 82 PG_arch_1,
83 PG_reserved, 83 PG_reserved,
84 PG_private, /* If pagecache, has fs-private data */ 84 PG_private, /* If pagecache, has fs-private data */
85 PG_private_2, /* If pagecache, has fs aux data */
85 PG_writeback, /* Page is under writeback */ 86 PG_writeback, /* Page is under writeback */
86#ifdef CONFIG_PAGEFLAGS_EXTENDED 87#ifdef CONFIG_PAGEFLAGS_EXTENDED
87 PG_head, /* A head page */ 88 PG_head, /* A head page */
@@ -96,6 +97,8 @@ enum pageflags {
96 PG_swapbacked, /* Page is backed by RAM/swap */ 97 PG_swapbacked, /* Page is backed by RAM/swap */
97#ifdef CONFIG_UNEVICTABLE_LRU 98#ifdef CONFIG_UNEVICTABLE_LRU
98 PG_unevictable, /* Page is "unevictable" */ 99 PG_unevictable, /* Page is "unevictable" */
100#endif
101#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
99 PG_mlocked, /* Page is vma mlocked */ 102 PG_mlocked, /* Page is vma mlocked */
100#endif 103#endif
101#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 104#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
@@ -106,6 +109,12 @@ enum pageflags {
106 /* Filesystems */ 109 /* Filesystems */
107 PG_checked = PG_owner_priv_1, 110 PG_checked = PG_owner_priv_1,
108 111
112 /* Two page bits are conscripted by FS-Cache to maintain local caching
113 * state. These bits are set on pages belonging to the netfs's inodes
114 * when those inodes are being locally cached.
115 */
116 PG_fscache = PG_private_2, /* page backed by cache */
117
109 /* XEN */ 118 /* XEN */
110 PG_pinned = PG_owner_priv_1, 119 PG_pinned = PG_owner_priv_1,
111 PG_savepinned = PG_dirty, 120 PG_savepinned = PG_dirty,
@@ -180,7 +189,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
180 189
181struct page; /* forward declaration */ 190struct page; /* forward declaration */
182 191
183TESTPAGEFLAG(Locked, locked) 192TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked)
184PAGEFLAG(Error, error) 193PAGEFLAG(Error, error)
185PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) 194PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
186PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) 195PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
@@ -192,8 +201,6 @@ PAGEFLAG(Checked, checked) /* Used by some filesystems */
192PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ 201PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
193PAGEFLAG(SavePinned, savepinned); /* Xen */ 202PAGEFLAG(SavePinned, savepinned); /* Xen */
194PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 203PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
195PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
196 __SETPAGEFLAG(Private, private)
197PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) 204PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
198 205
199__PAGEFLAG(SlobPage, slob_page) 206__PAGEFLAG(SlobPage, slob_page)
@@ -203,6 +210,16 @@ __PAGEFLAG(SlubFrozen, slub_frozen)
203__PAGEFLAG(SlubDebug, slub_debug) 210__PAGEFLAG(SlubDebug, slub_debug)
204 211
205/* 212/*
213 * Private page markings that may be used by the filesystem that owns the page
214 * for its own purposes.
215 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
216 */
217PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
218 __CLEARPAGEFLAG(Private, private)
219PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
220PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
221
222/*
206 * Only test-and-set exist for PG_writeback. The unconditional operators are 223 * Only test-and-set exist for PG_writeback. The unconditional operators are
207 * risky: they bypass page accounting. 224 * risky: they bypass page accounting.
208 */ 225 */
@@ -234,20 +251,20 @@ PAGEFLAG_FALSE(SwapCache)
234#ifdef CONFIG_UNEVICTABLE_LRU 251#ifdef CONFIG_UNEVICTABLE_LRU
235PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) 252PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
236 TESTCLEARFLAG(Unevictable, unevictable) 253 TESTCLEARFLAG(Unevictable, unevictable)
254#else
255PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
256 SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
257 __CLEARPAGEFLAG_NOOP(Unevictable)
258#endif
237 259
260#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
238#define MLOCK_PAGES 1 261#define MLOCK_PAGES 1
239PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) 262PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
240 TESTSCFLAG(Mlocked, mlocked) 263 TESTSCFLAG(Mlocked, mlocked)
241
242#else 264#else
243
244#define MLOCK_PAGES 0 265#define MLOCK_PAGES 0
245PAGEFLAG_FALSE(Mlocked) 266PAGEFLAG_FALSE(Mlocked)
246 SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) 267 SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
247
248PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
249 SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
250 __CLEARPAGEFLAG_NOOP(Unevictable)
251#endif 268#endif
252 269
253#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 270#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
@@ -367,9 +384,13 @@ static inline void __ClearPageTail(struct page *page)
367 384
368#ifdef CONFIG_UNEVICTABLE_LRU 385#ifdef CONFIG_UNEVICTABLE_LRU
369#define __PG_UNEVICTABLE (1 << PG_unevictable) 386#define __PG_UNEVICTABLE (1 << PG_unevictable)
370#define __PG_MLOCKED (1 << PG_mlocked)
371#else 387#else
372#define __PG_UNEVICTABLE 0 388#define __PG_UNEVICTABLE 0
389#endif
390
391#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
392#define __PG_MLOCKED (1 << PG_mlocked)
393#else
373#define __PG_MLOCKED 0 394#define __PG_MLOCKED 0
374#endif 395#endif
375 396
@@ -378,9 +399,10 @@ static inline void __ClearPageTail(struct page *page)
378 * these flags set. It they are, there is a problem. 399 * these flags set. It they are, there is a problem.
379 */ 400 */
380#define PAGE_FLAGS_CHECK_AT_FREE \ 401#define PAGE_FLAGS_CHECK_AT_FREE \
381 (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ 402 (1 << PG_lru | 1 << PG_locked | \
382 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ 403 1 << PG_private | 1 << PG_private_2 | \
383 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 404 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
405 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
384 __PG_UNEVICTABLE | __PG_MLOCKED) 406 __PG_UNEVICTABLE | __PG_MLOCKED)
385 407
386/* 408/*
@@ -391,4 +413,16 @@ static inline void __ClearPageTail(struct page *page)
391#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) 413#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
392 414
393#endif /* !__GENERATING_BOUNDS_H */ 415#endif /* !__GENERATING_BOUNDS_H */
416
417/**
418 * page_has_private - Determine if page has private stuff
419 * @page: The page to be checked
420 *
421 * Determine if a page has private stuff, indicating that release routines
422 * should be invoked upon it.
423 */
424#define page_has_private(page) \
425 ((page)->flags & ((1 << PG_private) | \
426 (1 << PG_private_2)))
427
394#endif /* PAGE_FLAGS_H */ 428#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 602cc1fdee90..7339c7bf7331 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -91,24 +91,23 @@ static inline void page_cgroup_init(void)
91 91
92#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 92#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
93#include <linux/swap.h> 93#include <linux/swap.h>
94extern struct mem_cgroup * 94extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
95swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem); 95extern unsigned short lookup_swap_cgroup(swp_entry_t ent);
96extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent);
97extern int swap_cgroup_swapon(int type, unsigned long max_pages); 96extern int swap_cgroup_swapon(int type, unsigned long max_pages);
98extern void swap_cgroup_swapoff(int type); 97extern void swap_cgroup_swapoff(int type);
99#else 98#else
100#include <linux/swap.h> 99#include <linux/swap.h>
101 100
102static inline 101static inline
103struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem) 102unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
104{ 103{
105 return NULL; 104 return 0;
106} 105}
107 106
108static inline 107static inline
109struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent) 108unsigned short lookup_swap_cgroup(swp_entry_t ent)
110{ 109{
111 return NULL; 110 return 0;
112} 111}
113 112
114static inline int 113static inline int
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 01ca0856caff..34da5230faab 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -18,9 +18,14 @@
18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page 18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
19 * allocation mode flags. 19 * allocation mode flags.
20 */ 20 */
21#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ 21enum mapping_flags {
22#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ 22 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
23#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ 23 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
24 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
25#ifdef CONFIG_UNEVICTABLE_LRU
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27#endif
28};
24 29
25static inline void mapping_set_error(struct address_space *mapping, int error) 30static inline void mapping_set_error(struct address_space *mapping, int error)
26{ 31{
@@ -33,7 +38,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
33} 38}
34 39
35#ifdef CONFIG_UNEVICTABLE_LRU 40#ifdef CONFIG_UNEVICTABLE_LRU
36#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */
37 41
38static inline void mapping_set_unevictable(struct address_space *mapping) 42static inline void mapping_set_unevictable(struct address_space *mapping)
39{ 43{
@@ -380,6 +384,11 @@ static inline void wait_on_page_writeback(struct page *page)
380extern void end_page_writeback(struct page *page); 384extern void end_page_writeback(struct page *page);
381 385
382/* 386/*
387 * Add an arbitrary waiter to a page's wait queue
388 */
389extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
390
391/*
383 * Fault a userspace page into pagetables. Return non-zero on a fault. 392 * Fault a userspace page into pagetables. Return non-zero on a fault.
384 * 393 *
385 * This assumes that two userspace pages are always sufficient. That's 394 * This assumes that two userspace pages are always sufficient. That's
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 7b2886fa7fdc..bab82f4c571c 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -24,7 +24,6 @@ void __pagevec_release(struct pagevec *pvec);
24void __pagevec_free(struct pagevec *pvec); 24void __pagevec_free(struct pagevec *pvec);
25void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); 25void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
26void pagevec_strip(struct pagevec *pvec); 26void pagevec_strip(struct pagevec *pvec);
27void pagevec_swap_free(struct pagevec *pvec);
28unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 27unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
29 pgoff_t start, unsigned nr_pages); 28 pgoff_t start, unsigned nr_pages);
30unsigned pagevec_lookup_tag(struct pagevec *pvec, 29unsigned pagevec_lookup_tag(struct pagevec *pvec,
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index ea8c6d84996d..cc1767f5cca8 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -228,10 +228,11 @@ extern void parport_pc_release_resources(struct parport *p);
228extern int parport_pc_claim_resources(struct parport *p); 228extern int parport_pc_claim_resources(struct parport *p);
229 229
230/* PCMCIA code will want to get us to look at a port. Provide a mechanism. */ 230/* PCMCIA code will want to get us to look at a port. Provide a mechanism. */
231extern struct parport *parport_pc_probe_port (unsigned long base, 231extern struct parport *parport_pc_probe_port(unsigned long base,
232 unsigned long base_hi, 232 unsigned long base_hi,
233 int irq, int dma, 233 int irq, int dma,
234 struct device *dev); 234 struct device *dev,
235extern void parport_pc_unregister_port (struct parport *p); 235 int irqflags);
236extern void parport_pc_unregister_port(struct parport *p);
236 237
237#endif 238#endif
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 042c166f65d5..092e82e0048c 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -10,72 +10,25 @@
10 10
11#include <linux/acpi.h> 11#include <linux/acpi.h>
12 12
13#define OSC_QUERY_TYPE 0
14#define OSC_SUPPORT_TYPE 1
15#define OSC_CONTROL_TYPE 2
16#define OSC_SUPPORT_MASKS 0x1f
17
18/*
19 * _OSC DW0 Definition
20 */
21#define OSC_QUERY_ENABLE 1
22#define OSC_REQUEST_ERROR 2
23#define OSC_INVALID_UUID_ERROR 4
24#define OSC_INVALID_REVISION_ERROR 8
25#define OSC_CAPABILITIES_MASK_ERROR 16
26
27/*
28 * _OSC DW1 Definition (OS Support Fields)
29 */
30#define OSC_EXT_PCI_CONFIG_SUPPORT 1
31#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
32#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
33#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
34#define OSC_MSI_SUPPORT 16
35
36/*
37 * _OSC DW1 Definition (OS Control Fields)
38 */
39#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
40#define OSC_SHPC_NATIVE_HP_CONTROL 2
41#define OSC_PCI_EXPRESS_PME_CONTROL 4
42#define OSC_PCI_EXPRESS_AER_CONTROL 8
43#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
44
45#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
46 OSC_SHPC_NATIVE_HP_CONTROL | \
47 OSC_PCI_EXPRESS_PME_CONTROL | \
48 OSC_PCI_EXPRESS_AER_CONTROL | \
49 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
50
51#ifdef CONFIG_ACPI 13#ifdef CONFIG_ACPI
52extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
53int pci_acpi_osc_support(acpi_handle handle, u32 flags);
54static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) 14static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
55{ 15{
56 /* Find root host bridge */ 16 struct pci_bus *pbus = pdev->bus;
57 while (pdev->bus->self) 17 /* Find a PCI root bus */
58 pdev = pdev->bus->self; 18 while (pbus->parent)
59 19 pbus = pbus->parent;
60 return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus), 20 return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
61 pdev->bus->number); 21 pbus->number);
62} 22}
63 23
64static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) 24static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
65{ 25{
66 int seg = pci_domain_nr(pbus), busnr = pbus->number; 26 if (pbus->parent)
67 struct pci_dev *bridge = pbus->self; 27 return DEVICE_ACPI_HANDLE(&(pbus->self->dev));
68 if (bridge) 28 return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
69 return DEVICE_ACPI_HANDLE(&(bridge->dev)); 29 pbus->number);
70 return acpi_get_pci_rootbridge_handle(seg, busnr);
71} 30}
72#else 31#else
73#if !defined(AE_ERROR)
74typedef u32 acpi_status;
75#define AE_ERROR (acpi_status) (0x0001)
76#endif
77static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
78{return AE_ERROR;}
79static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) 32static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
80{ return NULL; } 33{ return NULL; }
81#endif 34#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 7bd624bfdcfd..72698d89e767 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -52,6 +52,7 @@
52#include <asm/atomic.h> 52#include <asm/atomic.h>
53#include <linux/device.h> 53#include <linux/device.h>
54#include <linux/io.h> 54#include <linux/io.h>
55#include <linux/irqreturn.h>
55 56
56/* Include the ID list */ 57/* Include the ID list */
57#include <linux/pci_ids.h> 58#include <linux/pci_ids.h>
@@ -93,6 +94,12 @@ enum {
93 /* #6: expansion ROM resource */ 94 /* #6: expansion ROM resource */
94 PCI_ROM_RESOURCE, 95 PCI_ROM_RESOURCE,
95 96
97 /* device specific resources */
98#ifdef CONFIG_PCI_IOV
99 PCI_IOV_RESOURCES,
100 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
101#endif
102
96 /* resources assigned to buses behind the bridge */ 103 /* resources assigned to buses behind the bridge */
97#define PCI_BRIDGE_RESOURCE_NUM 4 104#define PCI_BRIDGE_RESOURCE_NUM 4
98 105
@@ -180,6 +187,7 @@ struct pci_cap_saved_state {
180 187
181struct pcie_link_state; 188struct pcie_link_state;
182struct pci_vpd; 189struct pci_vpd;
190struct pci_sriov;
183 191
184/* 192/*
185 * The pci_dev structure is used to describe PCI devices. 193 * The pci_dev structure is used to describe PCI devices.
@@ -257,6 +265,8 @@ struct pci_dev {
257 unsigned int is_managed:1; 265 unsigned int is_managed:1;
258 unsigned int is_pcie:1; 266 unsigned int is_pcie:1;
259 unsigned int state_saved:1; 267 unsigned int state_saved:1;
268 unsigned int is_physfn:1;
269 unsigned int is_virtfn:1;
260 pci_dev_flags_t dev_flags; 270 pci_dev_flags_t dev_flags;
261 atomic_t enable_cnt; /* pci_enable_device has been called */ 271 atomic_t enable_cnt; /* pci_enable_device has been called */
262 272
@@ -270,6 +280,12 @@ struct pci_dev {
270 struct list_head msi_list; 280 struct list_head msi_list;
271#endif 281#endif
272 struct pci_vpd *vpd; 282 struct pci_vpd *vpd;
283#ifdef CONFIG_PCI_IOV
284 union {
285 struct pci_sriov *sriov; /* SR-IOV capability related */
286 struct pci_dev *physfn; /* the PF this VF is associated with */
287 };
288#endif
273}; 289};
274 290
275extern struct pci_dev *alloc_pci_dev(void); 291extern struct pci_dev *alloc_pci_dev(void);
@@ -341,6 +357,15 @@ struct pci_bus {
341#define pci_bus_b(n) list_entry(n, struct pci_bus, node) 357#define pci_bus_b(n) list_entry(n, struct pci_bus, node)
342#define to_pci_bus(n) container_of(n, struct pci_bus, dev) 358#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
343 359
360/*
361 * Returns true if the pci bus is root (behind host-pci bridge),
362 * false otherwise
363 */
364static inline bool pci_is_root_bus(struct pci_bus *pbus)
365{
366 return !(pbus->parent);
367}
368
344#ifdef CONFIG_PCI_MSI 369#ifdef CONFIG_PCI_MSI
345static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) 370static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
346{ 371{
@@ -528,7 +553,7 @@ void pcibios_update_irq(struct pci_dev *, int irq);
528/* Generic PCI functions used internally */ 553/* Generic PCI functions used internally */
529 554
530extern struct pci_bus *pci_find_bus(int domain, int busnr); 555extern struct pci_bus *pci_find_bus(int domain, int busnr);
531void pci_bus_add_devices(struct pci_bus *bus); 556void pci_bus_add_devices(const struct pci_bus *bus);
532struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, 557struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
533 struct pci_ops *ops, void *sysdata); 558 struct pci_ops *ops, void *sysdata);
534static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, 559static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
@@ -649,6 +674,11 @@ int __must_check pci_reenable_device(struct pci_dev *);
649int __must_check pcim_enable_device(struct pci_dev *pdev); 674int __must_check pcim_enable_device(struct pci_dev *pdev);
650void pcim_pin_device(struct pci_dev *pdev); 675void pcim_pin_device(struct pci_dev *pdev);
651 676
677static inline int pci_is_enabled(struct pci_dev *pdev)
678{
679 return (atomic_read(&pdev->enable_cnt) > 0);
680}
681
652static inline int pci_is_managed(struct pci_dev *pdev) 682static inline int pci_is_managed(struct pci_dev *pdev)
653{ 683{
654 return pdev->is_managed; 684 return pdev->is_managed;
@@ -689,6 +719,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
689/* Power management related routines */ 719/* Power management related routines */
690int pci_save_state(struct pci_dev *dev); 720int pci_save_state(struct pci_dev *dev);
691int pci_restore_state(struct pci_dev *dev); 721int pci_restore_state(struct pci_dev *dev);
722int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
692int pci_set_power_state(struct pci_dev *dev, pci_power_t state); 723int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
693pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); 724pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
694bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); 725bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
@@ -701,6 +732,9 @@ int pci_back_from_sleep(struct pci_dev *dev);
701 732
702/* Functions for PCI Hotplug drivers to use */ 733/* Functions for PCI Hotplug drivers to use */
703int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); 734int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
735#ifdef CONFIG_HOTPLUG
736unsigned int pci_rescan_bus(struct pci_bus *bus);
737#endif
704 738
705/* Vital product data routines */ 739/* Vital product data routines */
706ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 740ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
@@ -708,7 +742,7 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
708int pci_vpd_truncate(struct pci_dev *dev, size_t size); 742int pci_vpd_truncate(struct pci_dev *dev, size_t size);
709 743
710/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 744/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
711void pci_bus_assign_resources(struct pci_bus *bus); 745void pci_bus_assign_resources(const struct pci_bus *bus);
712void pci_bus_size_bridges(struct pci_bus *bus); 746void pci_bus_size_bridges(struct pci_bus *bus);
713int pci_claim_resource(struct pci_dev *, int); 747int pci_claim_resource(struct pci_dev *, int);
714void pci_assign_unassigned_resources(void); 748void pci_assign_unassigned_resources(void);
@@ -789,7 +823,7 @@ struct msix_entry {
789 823
790 824
791#ifndef CONFIG_PCI_MSI 825#ifndef CONFIG_PCI_MSI
792static inline int pci_enable_msi(struct pci_dev *dev) 826static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
793{ 827{
794 return -1; 828 return -1;
795} 829}
@@ -799,6 +833,10 @@ static inline void pci_msi_shutdown(struct pci_dev *dev)
799static inline void pci_disable_msi(struct pci_dev *dev) 833static inline void pci_disable_msi(struct pci_dev *dev)
800{ } 834{ }
801 835
836static inline int pci_msix_table_size(struct pci_dev *dev)
837{
838 return 0;
839}
802static inline int pci_enable_msix(struct pci_dev *dev, 840static inline int pci_enable_msix(struct pci_dev *dev,
803 struct msix_entry *entries, int nvec) 841 struct msix_entry *entries, int nvec)
804{ 842{
@@ -820,9 +858,10 @@ static inline int pci_msi_enabled(void)
820 return 0; 858 return 0;
821} 859}
822#else 860#else
823extern int pci_enable_msi(struct pci_dev *dev); 861extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec);
824extern void pci_msi_shutdown(struct pci_dev *dev); 862extern void pci_msi_shutdown(struct pci_dev *dev);
825extern void pci_disable_msi(struct pci_dev *dev); 863extern void pci_disable_msi(struct pci_dev *dev);
864extern int pci_msix_table_size(struct pci_dev *dev);
826extern int pci_enable_msix(struct pci_dev *dev, 865extern int pci_enable_msix(struct pci_dev *dev,
827 struct msix_entry *entries, int nvec); 866 struct msix_entry *entries, int nvec);
828extern void pci_msix_shutdown(struct pci_dev *dev); 867extern void pci_msix_shutdown(struct pci_dev *dev);
@@ -841,6 +880,8 @@ static inline int pcie_aspm_enabled(void)
841extern int pcie_aspm_enabled(void); 880extern int pcie_aspm_enabled(void);
842#endif 881#endif
843 882
883#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1)
884
844#ifdef CONFIG_HT_IRQ 885#ifdef CONFIG_HT_IRQ
845/* The functions a driver should call */ 886/* The functions a driver should call */
846int ht_create_irq(struct pci_dev *dev, int idx); 887int ht_create_irq(struct pci_dev *dev, int idx);
@@ -1194,5 +1235,23 @@ int pci_ext_cfg_avail(struct pci_dev *dev);
1194 1235
1195void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); 1236void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
1196 1237
1238#ifdef CONFIG_PCI_IOV
1239extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
1240extern void pci_disable_sriov(struct pci_dev *dev);
1241extern irqreturn_t pci_sriov_migration(struct pci_dev *dev);
1242#else
1243static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
1244{
1245 return -ENODEV;
1246}
1247static inline void pci_disable_sriov(struct pci_dev *dev)
1248{
1249}
1250static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev)
1251{
1252 return IRQ_NONE;
1253}
1254#endif
1255
1197#endif /* __KERNEL__ */ 1256#endif /* __KERNEL__ */
1198#endif /* LINUX_PCI_H */ 1257#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 2c9e8080da5e..ee98cd570885 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -526,6 +526,7 @@
526#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 526#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
527#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 527#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
528#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 528#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
529#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
529#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 530#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
530#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 531#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
531#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a 532#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a
@@ -943,6 +944,32 @@
943#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 944#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801
944#define PCI_DEVICE_ID_SUN_CASSINI 0xabba 945#define PCI_DEVICE_ID_SUN_CASSINI 0xabba
945 946
947#define PCI_VENDOR_ID_NI 0x1093
948#define PCI_DEVICE_ID_NI_PCI2322 0xd130
949#define PCI_DEVICE_ID_NI_PCI2324 0xd140
950#define PCI_DEVICE_ID_NI_PCI2328 0xd150
951#define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190
952#define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0
953#define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0
954#define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0
955#define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0
956#define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1
957#define PCI_DEVICE_ID_NI_PCI2322I 0xd250
958#define PCI_DEVICE_ID_NI_PCI2324I 0xd270
959#define PCI_DEVICE_ID_NI_PCI23216 0xd2b0
960#define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080
961#define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db
962#define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd
963#define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df
964#define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2
965#define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4
966#define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6
967#define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7
968#define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8
969#define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea
970#define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec
971#define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee
972
946#define PCI_VENDOR_ID_CMD 0x1095 973#define PCI_VENDOR_ID_CMD 0x1095
947#define PCI_DEVICE_ID_CMD_643 0x0643 974#define PCI_DEVICE_ID_CMD_643 0x0643
948#define PCI_DEVICE_ID_CMD_646 0x0646 975#define PCI_DEVICE_ID_CMD_646 0x0646
@@ -2232,6 +2259,14 @@
2232#define PCI_DEVICE_ID_TDI_EHCI 0x0101 2259#define PCI_DEVICE_ID_TDI_EHCI 0x0101
2233 2260
2234#define PCI_VENDOR_ID_FREESCALE 0x1957 2261#define PCI_VENDOR_ID_FREESCALE 0x1957
2262#define PCI_DEVICE_ID_MPC8315E 0x00b4
2263#define PCI_DEVICE_ID_MPC8315 0x00b5
2264#define PCI_DEVICE_ID_MPC8314E 0x00b6
2265#define PCI_DEVICE_ID_MPC8314 0x00b7
2266#define PCI_DEVICE_ID_MPC8378E 0x00c4
2267#define PCI_DEVICE_ID_MPC8378 0x00c5
2268#define PCI_DEVICE_ID_MPC8377E 0x00c6
2269#define PCI_DEVICE_ID_MPC8377 0x00c7
2235#define PCI_DEVICE_ID_MPC8548E 0x0012 2270#define PCI_DEVICE_ID_MPC8548E 0x0012
2236#define PCI_DEVICE_ID_MPC8548 0x0013 2271#define PCI_DEVICE_ID_MPC8548 0x0013
2237#define PCI_DEVICE_ID_MPC8543E 0x0014 2272#define PCI_DEVICE_ID_MPC8543E 0x0014
@@ -2388,6 +2423,7 @@
2388#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c 2423#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c
2389#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0 2424#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0
2390#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1 2425#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1
2426#define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2
2391#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3 2427#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3
2392#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5 2428#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5
2393#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6 2429#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 027815b4635e..e4d08c1b2e0b 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -235,7 +235,7 @@
235#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ 235#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
236#define PCI_PM_CTRL 4 /* PM control and status register */ 236#define PCI_PM_CTRL 4 /* PM control and status register */
237#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ 237#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
238#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */ 238#define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */
239#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */ 239#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
240#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */ 240#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */
241#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */ 241#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */
@@ -375,6 +375,7 @@
375#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */ 375#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
376#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ 376#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
377#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */ 377#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
378#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
378#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ 379#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
379#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ 380#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
380#define PCI_EXP_DEVCAP 4 /* Device capabilities */ 381#define PCI_EXP_DEVCAP 4 /* Device capabilities */
@@ -487,6 +488,8 @@
487#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */ 488#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */
488#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ 489#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
489#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */ 490#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */
491#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
492#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
490 493
491/* Extended Capabilities (PCI-X 2.0 and Express) */ 494/* Extended Capabilities (PCI-X 2.0 and Express) */
492#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff) 495#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
@@ -498,6 +501,7 @@
498#define PCI_EXT_CAP_ID_DSN 3 501#define PCI_EXT_CAP_ID_DSN 3
499#define PCI_EXT_CAP_ID_PWR 4 502#define PCI_EXT_CAP_ID_PWR 4
500#define PCI_EXT_CAP_ID_ARI 14 503#define PCI_EXT_CAP_ID_ARI 14
504#define PCI_EXT_CAP_ID_SRIOV 16
501 505
502/* Advanced Error Reporting */ 506/* Advanced Error Reporting */
503#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ 507#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
@@ -615,4 +619,35 @@
615#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */ 619#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
616#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */ 620#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
617 621
622/* Single Root I/O Virtualization */
623#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
624#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
625#define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */
626#define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */
627#define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */
628#define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */
629#define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */
630#define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */
631#define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */
632#define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */
633#define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */
634#define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */
635#define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
636#define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */
637#define PCI_SRIOV_FUNC_LINK 0x12 /* Function Dependency Link */
638#define PCI_SRIOV_VF_OFFSET 0x14 /* First VF Offset */
639#define PCI_SRIOV_VF_STRIDE 0x16 /* Following VF Stride */
640#define PCI_SRIOV_VF_DID 0x1a /* VF Device ID */
641#define PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */
642#define PCI_SRIOV_SYS_PGSIZE 0x20 /* System Page Size */
643#define PCI_SRIOV_BAR 0x24 /* VF BAR0 */
644#define PCI_SRIOV_NUM_BARS 6 /* Number of VF BARs */
645#define PCI_SRIOV_VFM 0x3c /* VF Migration State Array Offset*/
646#define PCI_SRIOV_VFM_BIR(x) ((x) & 7) /* State BIR */
647#define PCI_SRIOV_VFM_OFFSET(x) ((x) & ~7) /* State Offset */
648#define PCI_SRIOV_VFM_UA 0x0 /* Inactive.Unavailable */
649#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
650#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
651#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
652
618#endif /* LINUX_PCI_REGS_H */ 653#endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index 6cd91e3f9820..b4c79545330b 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -16,29 +16,30 @@
16#define PCIE_ANY_PORT 7 16#define PCIE_ANY_PORT 7
17 17
18/* Service Type */ 18/* Service Type */
19#define PCIE_PORT_SERVICE_PME 1 /* Power Management Event */ 19#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
20#define PCIE_PORT_SERVICE_AER 2 /* Advanced Error Reporting */ 20#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
21#define PCIE_PORT_SERVICE_HP 4 /* Native Hotplug */ 21#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */
22#define PCIE_PORT_SERVICE_VC 8 /* Virtual Channel */ 22#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT)
23#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */
24#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
25#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
26#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
23 27
24/* Root/Upstream/Downstream Port's Interrupt Mode */ 28/* Root/Upstream/Downstream Port's Interrupt Mode */
29#define PCIE_PORT_NO_IRQ (-1)
25#define PCIE_PORT_INTx_MODE 0 30#define PCIE_PORT_INTx_MODE 0
26#define PCIE_PORT_MSI_MODE 1 31#define PCIE_PORT_MSI_MODE 1
27#define PCIE_PORT_MSIX_MODE 2 32#define PCIE_PORT_MSIX_MODE 2
28 33
29struct pcie_port_service_id { 34struct pcie_port_data {
30 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ 35 int port_type; /* Type of the port */
31 __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ 36 int port_irq_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
32 __u32 class, class_mask; /* (class,subclass,prog-if) triplet */
33 __u32 port_type, service_type; /* Port Entity */
34 kernel_ulong_t driver_data;
35}; 37};
36 38
37struct pcie_device { 39struct pcie_device {
38 int irq; /* Service IRQ/MSI/MSI-X Vector */ 40 int irq; /* Service IRQ/MSI/MSI-X Vector */
39 int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */ 41 struct pci_dev *port; /* Root/Upstream/Downstream Port */
40 struct pcie_port_service_id id; /* Service ID */ 42 u32 service; /* Port service this device represents */
41 struct pci_dev *port; /* Root/Upstream/Downstream Port */
42 void *priv_data; /* Service Private Data */ 43 void *priv_data; /* Service Private Data */
43 struct device device; /* Generic Device Interface */ 44 struct device device; /* Generic Device Interface */
44}; 45};
@@ -56,10 +57,9 @@ static inline void* get_service_data(struct pcie_device *dev)
56 57
57struct pcie_port_service_driver { 58struct pcie_port_service_driver {
58 const char *name; 59 const char *name;
59 int (*probe) (struct pcie_device *dev, 60 int (*probe) (struct pcie_device *dev);
60 const struct pcie_port_service_id *id);
61 void (*remove) (struct pcie_device *dev); 61 void (*remove) (struct pcie_device *dev);
62 int (*suspend) (struct pcie_device *dev, pm_message_t state); 62 int (*suspend) (struct pcie_device *dev);
63 int (*resume) (struct pcie_device *dev); 63 int (*resume) (struct pcie_device *dev);
64 64
65 /* Service Error Recovery Handler */ 65 /* Service Error Recovery Handler */
@@ -68,7 +68,9 @@ struct pcie_port_service_driver {
68 /* Link Reset Capability - AER service driver specific */ 68 /* Link Reset Capability - AER service driver specific */
69 pci_ers_result_t (*reset_link) (struct pci_dev *dev); 69 pci_ers_result_t (*reset_link) (struct pci_dev *dev);
70 70
71 const struct pcie_port_service_id *id_table; 71 int port_type; /* Type of the port this driver can handle */
72 u32 service; /* Port service this device represents */
73
72 struct device_driver driver; 74 struct device_driver driver;
73}; 75};
74#define to_service_driver(d) \ 76#define to_service_driver(d) \
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 9f31683728fd..6729f7dcd60e 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -17,6 +17,9 @@
17 */ 17 */
18#define TIMER_ENTRY_STATIC ((void *) 0x74737461) 18#define TIMER_ENTRY_STATIC ((void *) 0x74737461)
19 19
20/********** mm/debug-pagealloc.c **********/
21#define PAGE_POISON 0xaa
22
20/********** mm/slab.c **********/ 23/********** mm/slab.c **********/
21/* 24/*
22 * Magic nums for obj red zoning. 25 * Magic nums for obj red zoning.
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 8ff25e0e7f7a..594c494ac3f0 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -73,6 +73,8 @@ enum power_supply_property {
73 POWER_SUPPLY_PROP_VOLTAGE_AVG, 73 POWER_SUPPLY_PROP_VOLTAGE_AVG,
74 POWER_SUPPLY_PROP_CURRENT_NOW, 74 POWER_SUPPLY_PROP_CURRENT_NOW,
75 POWER_SUPPLY_PROP_CURRENT_AVG, 75 POWER_SUPPLY_PROP_CURRENT_AVG,
76 POWER_SUPPLY_PROP_POWER_NOW,
77 POWER_SUPPLY_PROP_POWER_AVG,
76 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 78 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
77 POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN, 79 POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN,
78 POWER_SUPPLY_PROP_CHARGE_FULL, 80 POWER_SUPPLY_PROP_CHARGE_FULL,
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index b8bdb96eff78..fbfa3d44d33d 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -41,9 +41,6 @@ enum {
41 * while parent/subdir create the directory structure (every 41 * while parent/subdir create the directory structure (every
42 * /proc file has a parent, but "subdir" is NULL for all 42 * /proc file has a parent, but "subdir" is NULL for all
43 * non-directory entries). 43 * non-directory entries).
44 *
45 * "owner" is used to protect module
46 * from unloading while proc_dir_entry is in use
47 */ 44 */
48 45
49typedef int (read_proc_t)(char *page, char **start, off_t off, 46typedef int (read_proc_t)(char *page, char **start, off_t off,
@@ -70,7 +67,6 @@ struct proc_dir_entry {
70 * somewhere. 67 * somewhere.
71 */ 68 */
72 const struct file_operations *proc_fops; 69 const struct file_operations *proc_fops;
73 struct module *owner;
74 struct proc_dir_entry *next, *parent, *subdir; 70 struct proc_dir_entry *next, *parent, *subdir;
75 void *data; 71 void *data;
76 read_proc_t *read_proc; 72 read_proc_t *read_proc;
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 98b93ca4db06..67c15653fc23 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -94,6 +94,7 @@ extern void ptrace_notify(int exit_code);
94extern void __ptrace_link(struct task_struct *child, 94extern void __ptrace_link(struct task_struct *child,
95 struct task_struct *new_parent); 95 struct task_struct *new_parent);
96extern void __ptrace_unlink(struct task_struct *child); 96extern void __ptrace_unlink(struct task_struct *child);
97extern void exit_ptrace(struct task_struct *tracer);
97extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); 98extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
98#define PTRACE_MODE_READ 1 99#define PTRACE_MODE_READ 1
99#define PTRACE_MODE_ATTACH 2 100#define PTRACE_MODE_ATTACH 2
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 3945f803d514..7c775751392c 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -28,4 +28,4 @@ int pwm_enable(struct pwm_device *pwm);
28 */ 28 */
29void pwm_disable(struct pwm_device *pwm); 29void pwm_disable(struct pwm_device *pwm);
30 30
31#endif /* __ASM_ARCH_PWM_H */ 31#endif /* __LINUX_PWM_H */
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
deleted file mode 100644
index e98900671ca9..000000000000
--- a/include/linux/raid/bitmap.h
+++ /dev/null
@@ -1,288 +0,0 @@
1/*
2 * bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
3 *
4 * additions: Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
5 */
6#ifndef BITMAP_H
7#define BITMAP_H 1
8
9#define BITMAP_MAJOR_LO 3
10/* version 4 insists the bitmap is in little-endian order
11 * with version 3, it is host-endian which is non-portable
12 */
13#define BITMAP_MAJOR_HI 4
14#define BITMAP_MAJOR_HOSTENDIAN 3
15
16#define BITMAP_MINOR 39
17
18/*
19 * in-memory bitmap:
20 *
21 * Use 16 bit block counters to track pending writes to each "chunk".
22 * The 2 high order bits are special-purpose, the first is a flag indicating
23 * whether a resync is needed. The second is a flag indicating whether a
24 * resync is active.
25 * This means that the counter is actually 14 bits:
26 *
27 * +--------+--------+------------------------------------------------+
28 * | resync | resync | counter |
29 * | needed | active | |
30 * | (0-1) | (0-1) | (0-16383) |
31 * +--------+--------+------------------------------------------------+
32 *
33 * The "resync needed" bit is set when:
34 * a '1' bit is read from storage at startup.
35 * a write request fails on some drives
36 * a resync is aborted on a chunk with 'resync active' set
37 * It is cleared (and resync-active set) when a resync starts across all drives
38 * of the chunk.
39 *
40 *
41 * The "resync active" bit is set when:
42 * a resync is started on all drives, and resync_needed is set.
43 * resync_needed will be cleared (as long as resync_active wasn't already set).
44 * It is cleared when a resync completes.
45 *
46 * The counter counts pending write requests, plus the on-disk bit.
47 * When the counter is '1' and the resync bits are clear, the on-disk
48 * bit can be cleared aswell, thus setting the counter to 0.
49 * When we set a bit, or in the counter (to start a write), if the fields is
50 * 0, we first set the disk bit and set the counter to 1.
51 *
52 * If the counter is 0, the on-disk bit is clear and the stipe is clean
53 * Anything that dirties the stipe pushes the counter to 2 (at least)
54 * and sets the on-disk bit (lazily).
55 * If a periodic sweep find the counter at 2, it is decremented to 1.
56 * If the sweep find the counter at 1, the on-disk bit is cleared and the
57 * counter goes to zero.
58 *
59 * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block
60 * counters as a fallback when "page" memory cannot be allocated:
61 *
62 * Normal case (page memory allocated):
63 *
64 * page pointer (32-bit)
65 *
66 * [ ] ------+
67 * |
68 * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters)
69 * c1 c2 c2048
70 *
71 * Hijacked case (page memory allocation failed):
72 *
73 * hijacked page pointer (32-bit)
74 *
75 * [ ][ ] (no page memory allocated)
76 * counter #1 (16-bit) counter #2 (16-bit)
77 *
78 */
79
80#ifdef __KERNEL__
81
82#define PAGE_BITS (PAGE_SIZE << 3)
83#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3)
84
85typedef __u16 bitmap_counter_t;
86#define COUNTER_BITS 16
87#define COUNTER_BIT_SHIFT 4
88#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8)
89#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
90
91#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
92#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2)))
93#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1)
94#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK)
95#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK)
96#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX)
97
98/* how many counters per page? */
99#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS)
100/* same, except a shift value for more efficient bitops */
101#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT)
102/* same, except a mask value for more efficient bitops */
103#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1)
104
105#define BITMAP_BLOCK_SIZE 512
106#define BITMAP_BLOCK_SHIFT 9
107
108/* how many blocks per chunk? (this is variable) */
109#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT)
110#define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT)
111#define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1)
112
113/* when hijacked, the counters and bits represent even larger "chunks" */
114/* there will be 1024 chunks represented by each counter in the page pointers */
115#define PAGEPTR_BLOCK_RATIO(bitmap) \
116 (CHUNK_BLOCK_RATIO(bitmap) << PAGE_COUNTER_SHIFT >> 1)
117#define PAGEPTR_BLOCK_SHIFT(bitmap) \
118 (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1)
119#define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1)
120
121/*
122 * on-disk bitmap:
123 *
124 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
125 * file a page at a time. There's a superblock at the start of the file.
126 */
127
128/* map chunks (bits) to file pages - offset by the size of the superblock */
129#define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3))
130
131#endif
132
133/*
134 * bitmap structures:
135 */
136
137#define BITMAP_MAGIC 0x6d746962
138
139/* use these for bitmap->flags and bitmap->sb->state bit-fields */
140enum bitmap_state {
141 BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */
142 BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */
143 BITMAP_HOSTENDIAN = 0x8000,
144};
145
146/* the superblock at the front of the bitmap file -- little endian */
147typedef struct bitmap_super_s {
148 __le32 magic; /* 0 BITMAP_MAGIC */
149 __le32 version; /* 4 the bitmap major for now, could change... */
150 __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */
151 __le64 events; /* 24 event counter for the bitmap (1)*/
152 __le64 events_cleared;/*32 event counter when last bit cleared (2) */
153 __le64 sync_size; /* 40 the size of the md device's sync range(3) */
154 __le32 state; /* 48 bitmap state information */
155 __le32 chunksize; /* 52 the bitmap chunk size in bytes */
156 __le32 daemon_sleep; /* 56 seconds between disk flushes */
157 __le32 write_behind; /* 60 number of outstanding write-behind writes */
158
159 __u8 pad[256 - 64]; /* set to zero */
160} bitmap_super_t;
161
162/* notes:
163 * (1) This event counter is updated before the eventcounter in the md superblock
164 * When a bitmap is loaded, it is only accepted if this event counter is equal
165 * to, or one greater than, the event counter in the superblock.
166 * (2) This event counter is updated when the other one is *if*and*only*if* the
167 * array is not degraded. As bits are not cleared when the array is degraded,
168 * this represents the last time that any bits were cleared.
169 * If a device is being added that has an event count with this value or
170 * higher, it is accepted as conforming to the bitmap.
171 * (3)This is the number of sectors represented by the bitmap, and is the range that
172 * resync happens across. For raid1 and raid5/6 it is the size of individual
173 * devices. For raid10 it is the size of the array.
174 */
175
176#ifdef __KERNEL__
177
178/* the in-memory bitmap is represented by bitmap_pages */
179struct bitmap_page {
180 /*
181 * map points to the actual memory page
182 */
183 char *map;
184 /*
185 * in emergencies (when map cannot be alloced), hijack the map
186 * pointer and use it as two counters itself
187 */
188 unsigned int hijacked:1;
189 /*
190 * count of dirty bits on the page
191 */
192 unsigned int count:31;
193};
194
195/* keep track of bitmap file pages that have pending writes on them */
196struct page_list {
197 struct list_head list;
198 struct page *page;
199};
200
201/* the main bitmap structure - one per mddev */
202struct bitmap {
203 struct bitmap_page *bp;
204 unsigned long pages; /* total number of pages in the bitmap */
205 unsigned long missing_pages; /* number of pages not yet allocated */
206
207 mddev_t *mddev; /* the md device that the bitmap is for */
208
209 int counter_bits; /* how many bits per block counter */
210
211 /* bitmap chunksize -- how much data does each bit represent? */
212 unsigned long chunksize;
213 unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
214 unsigned long chunks; /* total number of data chunks for the array */
215
216 /* We hold a count on the chunk currently being synced, and drop
217 * it when the last block is started. If the resync is aborted
218 * midway, we need to be able to drop that count, so we remember
219 * the counted chunk..
220 */
221 unsigned long syncchunk;
222
223 __u64 events_cleared;
224 int need_sync;
225
226 /* bitmap spinlock */
227 spinlock_t lock;
228
229 long offset; /* offset from superblock if file is NULL */
230 struct file *file; /* backing disk file */
231 struct page *sb_page; /* cached copy of the bitmap file superblock */
232 struct page **filemap; /* list of cache pages for the file */
233 unsigned long *filemap_attr; /* attributes associated w/ filemap pages */
234 unsigned long file_pages; /* number of pages in the file */
235 int last_page_size; /* bytes in the last page */
236
237 unsigned long flags;
238
239 int allclean;
240
241 unsigned long max_write_behind; /* write-behind mode */
242 atomic_t behind_writes;
243
244 /*
245 * the bitmap daemon - periodically wakes up and sweeps the bitmap
246 * file, cleaning up bits and flushing out pages to disk as necessary
247 */
248 unsigned long daemon_lastrun; /* jiffies of last run */
249 unsigned long daemon_sleep; /* how many seconds between updates? */
250 unsigned long last_end_sync; /* when we lasted called end_sync to
251 * update bitmap with resync progress */
252
253 atomic_t pending_writes; /* pending writes to the bitmap file */
254 wait_queue_head_t write_wait;
255 wait_queue_head_t overflow_wait;
256
257};
258
259/* the bitmap API */
260
261/* these are used only by md/bitmap */
262int bitmap_create(mddev_t *mddev);
263void bitmap_flush(mddev_t *mddev);
264void bitmap_destroy(mddev_t *mddev);
265
266void bitmap_print_sb(struct bitmap *bitmap);
267void bitmap_update_sb(struct bitmap *bitmap);
268
269int bitmap_setallbits(struct bitmap *bitmap);
270void bitmap_write_all(struct bitmap *bitmap);
271
272void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
273
274/* these are exported */
275int bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
276 unsigned long sectors, int behind);
277void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
278 unsigned long sectors, int success, int behind);
279int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded);
280void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted);
281void bitmap_close_sync(struct bitmap *bitmap);
282void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
283
284void bitmap_unplug(struct bitmap *bitmap);
285void bitmap_daemon_work(struct bitmap *bitmap);
286#endif
287
288#endif
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h
deleted file mode 100644
index f38b9c586afb..000000000000
--- a/include/linux/raid/linear.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _LINEAR_H
2#define _LINEAR_H
3
4#include <linux/raid/md.h>
5
6struct dev_info {
7 mdk_rdev_t *rdev;
8 sector_t num_sectors;
9 sector_t start_sector;
10};
11
12typedef struct dev_info dev_info_t;
13
14struct linear_private_data
15{
16 struct linear_private_data *prev; /* earlier version */
17 dev_info_t **hash_table;
18 sector_t spacing;
19 sector_t array_sectors;
20 int sector_shift; /* shift before dividing
21 * by spacing
22 */
23 dev_info_t disks[0];
24};
25
26
27typedef struct linear_private_data linear_conf_t;
28
29#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private)
30
31#endif
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
deleted file mode 100644
index 82bea14cae1a..000000000000
--- a/include/linux/raid/md.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 md.h : Multiple Devices driver for Linux
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4 Copyright (C) 1994-96 Marc ZYNGIER
5 <zyngier@ufr-info-p7.ibp.fr> or
6 <maz@gloups.fdn.fr>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 You should have received a copy of the GNU General Public License
14 (for example /usr/src/linux/COPYING); if not, write to the Free
15 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16*/
17
18#ifndef _MD_H
19#define _MD_H
20
21#include <linux/blkdev.h>
22#include <linux/seq_file.h>
23
24/*
25 * 'md_p.h' holds the 'physical' layout of RAID devices
26 * 'md_u.h' holds the user <=> kernel API
27 *
28 * 'md_k.h' holds kernel internal definitions
29 */
30
31#include <linux/raid/md_p.h>
32#include <linux/raid/md_u.h>
33#include <linux/raid/md_k.h>
34
35#ifdef CONFIG_MD
36
37/*
38 * Different major versions are not compatible.
39 * Different minor versions are only downward compatible.
40 * Different patchlevel versions are downward and upward compatible.
41 */
42#define MD_MAJOR_VERSION 0
43#define MD_MINOR_VERSION 90
44/*
45 * MD_PATCHLEVEL_VERSION indicates kernel functionality.
46 * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
47 * and major_version/minor_version accordingly
48 * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
49 * in the super status byte
50 * >=3 means that bitmap superblock version 4 is supported, which uses
51 * little-ending representation rather than host-endian
52 */
53#define MD_PATCHLEVEL_VERSION 3
54
55extern int mdp_major;
56
57extern int register_md_personality(struct mdk_personality *p);
58extern int unregister_md_personality(struct mdk_personality *p);
59extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
60 mddev_t *mddev, const char *name);
61extern void md_unregister_thread(mdk_thread_t *thread);
62extern void md_wakeup_thread(mdk_thread_t *thread);
63extern void md_check_recovery(mddev_t *mddev);
64extern void md_write_start(mddev_t *mddev, struct bio *bi);
65extern void md_write_end(mddev_t *mddev);
66extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
67extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
68
69extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
70 sector_t sector, int size, struct page *page);
71extern void md_super_wait(mddev_t *mddev);
72extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
73 struct page *page, int rw);
74extern void md_do_sync(mddev_t *mddev);
75extern void md_new_event(mddev_t *mddev);
76extern int md_allow_write(mddev_t *mddev);
77extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
78
79#endif /* CONFIG_MD */
80#endif
81
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
deleted file mode 100644
index 9743e4dbc918..000000000000
--- a/include/linux/raid/md_k.h
+++ /dev/null
@@ -1,402 +0,0 @@
1/*
2 md_k.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13*/
14
15#ifndef _MD_K_H
16#define _MD_K_H
17
18/* and dm-bio-list.h is not under include/linux because.... ??? */
19#include "../../../drivers/md/dm-bio-list.h"
20
21#ifdef CONFIG_BLOCK
22
23#define LEVEL_MULTIPATH (-4)
24#define LEVEL_LINEAR (-1)
25#define LEVEL_FAULTY (-5)
26
27/* we need a value for 'no level specified' and 0
28 * means 'raid0', so we need something else. This is
29 * for internal use only
30 */
31#define LEVEL_NONE (-1000000)
32
33#define MaxSector (~(sector_t)0)
34
35typedef struct mddev_s mddev_t;
36typedef struct mdk_rdev_s mdk_rdev_t;
37
38/*
39 * options passed in raidrun:
40 */
41
42/* Currently this must fit in an 'int' */
43#define MAX_CHUNK_SIZE (1<<30)
44
45/*
46 * MD's 'extended' device
47 */
48struct mdk_rdev_s
49{
50 struct list_head same_set; /* RAID devices within the same set */
51
52 sector_t size; /* Device size (in blocks) */
53 mddev_t *mddev; /* RAID array if running */
54 long last_events; /* IO event timestamp */
55
56 struct block_device *bdev; /* block device handle */
57
58 struct page *sb_page;
59 int sb_loaded;
60 __u64 sb_events;
61 sector_t data_offset; /* start of data in array */
62 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
63 int sb_size; /* bytes in the superblock */
64 int preferred_minor; /* autorun support */
65
66 struct kobject kobj;
67
68 /* A device can be in one of three states based on two flags:
69 * Not working: faulty==1 in_sync==0
70 * Fully working: faulty==0 in_sync==1
71 * Working, but not
72 * in sync with array
73 * faulty==0 in_sync==0
74 *
75 * It can never have faulty==1, in_sync==1
76 * This reduces the burden of testing multiple flags in many cases
77 */
78
79 unsigned long flags;
80#define Faulty 1 /* device is known to have a fault */
81#define In_sync 2 /* device is in_sync with rest of array */
82#define WriteMostly 4 /* Avoid reading if at all possible */
83#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */
84#define AllReserved 6 /* If whole device is reserved for
85 * one array */
86#define AutoDetected 7 /* added by auto-detect */
87#define Blocked 8 /* An error occured on an externally
88 * managed array, don't allow writes
89 * until it is cleared */
90#define StateChanged 9 /* Faulty or Blocked has changed during
91 * interrupt, so it needs to be
92 * notified by the thread */
93 wait_queue_head_t blocked_wait;
94
95 int desc_nr; /* descriptor index in the superblock */
96 int raid_disk; /* role of device in array */
97 int saved_raid_disk; /* role that device used to have in the
98 * array and could again if we did a partial
99 * resync from the bitmap
100 */
101 sector_t recovery_offset;/* If this device has been partially
102 * recovered, this is where we were
103 * up to.
104 */
105
106 atomic_t nr_pending; /* number of pending requests.
107 * only maintained for arrays that
108 * support hot removal
109 */
110 atomic_t read_errors; /* number of consecutive read errors that
111 * we have tried to ignore.
112 */
113 atomic_t corrected_errors; /* number of corrected read errors,
114 * for reporting to userspace and storing
115 * in superblock.
116 */
117 struct work_struct del_work; /* used for delayed sysfs removal */
118
119 struct sysfs_dirent *sysfs_state; /* handle for 'state'
120 * sysfs entry */
121};
122
123struct mddev_s
124{
125 void *private;
126 struct mdk_personality *pers;
127 dev_t unit;
128 int md_minor;
129 struct list_head disks;
130 unsigned long flags;
131#define MD_CHANGE_DEVS 0 /* Some device status has changed */
132#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
133#define MD_CHANGE_PENDING 2 /* superblock update in progress */
134
135 int ro;
136
137 struct gendisk *gendisk;
138
139 struct kobject kobj;
140 int hold_active;
141#define UNTIL_IOCTL 1
142#define UNTIL_STOP 2
143
144 /* Superblock information */
145 int major_version,
146 minor_version,
147 patch_version;
148 int persistent;
149 int external; /* metadata is
150 * managed externally */
151 char metadata_type[17]; /* externally set*/
152 int chunk_size;
153 time_t ctime, utime;
154 int level, layout;
155 char clevel[16];
156 int raid_disks;
157 int max_disks;
158 sector_t size; /* used size of component devices */
159 sector_t array_sectors; /* exported array size */
160 __u64 events;
161
162 char uuid[16];
163
164 /* If the array is being reshaped, we need to record the
165 * new shape and an indication of where we are up to.
166 * This is written to the superblock.
167 * If reshape_position is MaxSector, then no reshape is happening (yet).
168 */
169 sector_t reshape_position;
170 int delta_disks, new_level, new_layout, new_chunk;
171
172 struct mdk_thread_s *thread; /* management thread */
173 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
174 sector_t curr_resync; /* last block scheduled */
175 unsigned long resync_mark; /* a recent timestamp */
176 sector_t resync_mark_cnt;/* blocks written at resync_mark */
177 sector_t curr_mark_cnt; /* blocks scheduled now */
178
179 sector_t resync_max_sectors; /* may be set by personality */
180
181 sector_t resync_mismatches; /* count of sectors where
182 * parity/replica mismatch found
183 */
184
185 /* allow user-space to request suspension of IO to regions of the array */
186 sector_t suspend_lo;
187 sector_t suspend_hi;
188 /* if zero, use the system-wide default */
189 int sync_speed_min;
190 int sync_speed_max;
191
192 /* resync even though the same disks are shared among md-devices */
193 int parallel_resync;
194
195 int ok_start_degraded;
196 /* recovery/resync flags
197 * NEEDED: we might need to start a resync/recover
198 * RUNNING: a thread is running, or about to be started
199 * SYNC: actually doing a resync, not a recovery
200 * RECOVER: doing recovery, or need to try it.
201 * INTR: resync needs to be aborted for some reason
202 * DONE: thread is done and is waiting to be reaped
203 * REQUEST: user-space has requested a sync (used with SYNC)
204 * CHECK: user-space request for for check-only, no repair
205 * RESHAPE: A reshape is happening
206 *
207 * If neither SYNC or RESHAPE are set, then it is a recovery.
208 */
209#define MD_RECOVERY_RUNNING 0
210#define MD_RECOVERY_SYNC 1
211#define MD_RECOVERY_RECOVER 2
212#define MD_RECOVERY_INTR 3
213#define MD_RECOVERY_DONE 4
214#define MD_RECOVERY_NEEDED 5
215#define MD_RECOVERY_REQUESTED 6
216#define MD_RECOVERY_CHECK 7
217#define MD_RECOVERY_RESHAPE 8
218#define MD_RECOVERY_FROZEN 9
219
220 unsigned long recovery;
221 int recovery_disabled; /* if we detect that recovery
222 * will always fail, set this
223 * so we don't loop trying */
224
225 int in_sync; /* know to not need resync */
226 struct mutex reconfig_mutex;
227 atomic_t active; /* general refcount */
228 atomic_t openers; /* number of active opens */
229
230 int changed; /* true if we might need to reread partition info */
231 int degraded; /* whether md should consider
232 * adding a spare
233 */
234 int barriers_work; /* initialised to true, cleared as soon
235 * as a barrier request to slave
236 * fails. Only supported
237 */
238 struct bio *biolist; /* bios that need to be retried
239 * because BIO_RW_BARRIER is not supported
240 */
241
242 atomic_t recovery_active; /* blocks scheduled, but not written */
243 wait_queue_head_t recovery_wait;
244 sector_t recovery_cp;
245 sector_t resync_min; /* user requested sync
246 * starts here */
247 sector_t resync_max; /* resync should pause
248 * when it gets here */
249
250 struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
251 * file in sysfs.
252 */
253 struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
254
255 struct work_struct del_work; /* used for delayed sysfs removal */
256
257 spinlock_t write_lock;
258 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
259 atomic_t pending_writes; /* number of active superblock writes */
260
261 unsigned int safemode; /* if set, update "clean" superblock
262 * when no writes pending.
263 */
264 unsigned int safemode_delay;
265 struct timer_list safemode_timer;
266 atomic_t writes_pending;
267 struct request_queue *queue; /* for plugging ... */
268
269 atomic_t write_behind; /* outstanding async IO */
270 unsigned int max_write_behind; /* 0 = sync */
271
272 struct bitmap *bitmap; /* the bitmap for the device */
273 struct file *bitmap_file; /* the bitmap file */
274 long bitmap_offset; /* offset from superblock of
275 * start of bitmap. May be
276 * negative, but not '0'
277 */
278 long default_bitmap_offset; /* this is the offset to use when
279 * hot-adding a bitmap. It should
280 * eventually be settable by sysfs.
281 */
282
283 struct list_head all_mddevs;
284};
285
286
287static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
288{
289 int faulty = test_bit(Faulty, &rdev->flags);
290 if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
291 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
292}
293
294static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
295{
296 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
297}
298
299struct mdk_personality
300{
301 char *name;
302 int level;
303 struct list_head list;
304 struct module *owner;
305 int (*make_request)(struct request_queue *q, struct bio *bio);
306 int (*run)(mddev_t *mddev);
307 int (*stop)(mddev_t *mddev);
308 void (*status)(struct seq_file *seq, mddev_t *mddev);
309 /* error_handler must set ->faulty and clear ->in_sync
310 * if appropriate, and should abort recovery if needed
311 */
312 void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
313 int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
314 int (*hot_remove_disk) (mddev_t *mddev, int number);
315 int (*spare_active) (mddev_t *mddev);
316 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
317 int (*resize) (mddev_t *mddev, sector_t sectors);
318 int (*check_reshape) (mddev_t *mddev);
319 int (*start_reshape) (mddev_t *mddev);
320 int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
321 /* quiesce moves between quiescence states
322 * 0 - fully active
323 * 1 - no new requests allowed
324 * others - reserved
325 */
326 void (*quiesce) (mddev_t *mddev, int state);
327};
328
329
330struct md_sysfs_entry {
331 struct attribute attr;
332 ssize_t (*show)(mddev_t *, char *);
333 ssize_t (*store)(mddev_t *, const char *, size_t);
334};
335
336
337static inline char * mdname (mddev_t * mddev)
338{
339 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
340}
341
342/*
343 * iterates through some rdev ringlist. It's safe to remove the
344 * current 'rdev'. Dont touch 'tmp' though.
345 */
346#define rdev_for_each_list(rdev, tmp, head) \
347 list_for_each_entry_safe(rdev, tmp, head, same_set)
348
349/*
350 * iterates through the 'same array disks' ringlist
351 */
352#define rdev_for_each(rdev, tmp, mddev) \
353 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
354
355#define rdev_for_each_rcu(rdev, mddev) \
356 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
357
358typedef struct mdk_thread_s {
359 void (*run) (mddev_t *mddev);
360 mddev_t *mddev;
361 wait_queue_head_t wqueue;
362 unsigned long flags;
363 struct task_struct *tsk;
364 unsigned long timeout;
365} mdk_thread_t;
366
367#define THREAD_WAKEUP 0
368
369#define __wait_event_lock_irq(wq, condition, lock, cmd) \
370do { \
371 wait_queue_t __wait; \
372 init_waitqueue_entry(&__wait, current); \
373 \
374 add_wait_queue(&wq, &__wait); \
375 for (;;) { \
376 set_current_state(TASK_UNINTERRUPTIBLE); \
377 if (condition) \
378 break; \
379 spin_unlock_irq(&lock); \
380 cmd; \
381 schedule(); \
382 spin_lock_irq(&lock); \
383 } \
384 current->state = TASK_RUNNING; \
385 remove_wait_queue(&wq, &__wait); \
386} while (0)
387
388#define wait_event_lock_irq(wq, condition, lock, cmd) \
389do { \
390 if (condition) \
391 break; \
392 __wait_event_lock_irq(wq, condition, lock, cmd); \
393} while (0)
394
395static inline void safe_put_page(struct page *p)
396{
397 if (p) put_page(p);
398}
399
400#endif /* CONFIG_BLOCK */
401#endif
402
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h
index 7192035fc4b0..fb1abb3367e9 100644
--- a/include/linux/raid/md_u.h
+++ b/include/linux/raid/md_u.h
@@ -15,6 +15,24 @@
15#ifndef _MD_U_H 15#ifndef _MD_U_H
16#define _MD_U_H 16#define _MD_U_H
17 17
18/*
19 * Different major versions are not compatible.
20 * Different minor versions are only downward compatible.
21 * Different patchlevel versions are downward and upward compatible.
22 */
23#define MD_MAJOR_VERSION 0
24#define MD_MINOR_VERSION 90
25/*
26 * MD_PATCHLEVEL_VERSION indicates kernel functionality.
27 * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
28 * and major_version/minor_version accordingly
29 * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
30 * in the super status byte
31 * >=3 means that bitmap superblock version 4 is supported, which uses
32 * little-ending representation rather than host-endian
33 */
34#define MD_PATCHLEVEL_VERSION 3
35
18/* ioctls */ 36/* ioctls */
19 37
20/* status */ 38/* status */
@@ -46,6 +64,12 @@
46#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) 64#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
47#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) 65#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
48 66
67/* 63 partitions with the alternate major number (mdp) */
68#define MdpMinorShift 6
69#ifdef __KERNEL__
70extern int mdp_major;
71#endif
72
49typedef struct mdu_version_s { 73typedef struct mdu_version_s {
50 int major; 74 int major;
51 int minor; 75 int minor;
@@ -85,6 +109,17 @@ typedef struct mdu_array_info_s {
85 109
86} mdu_array_info_t; 110} mdu_array_info_t;
87 111
112/* non-obvious values for 'level' */
113#define LEVEL_MULTIPATH (-4)
114#define LEVEL_LINEAR (-1)
115#define LEVEL_FAULTY (-5)
116
117/* we need a value for 'no level specified' and 0
118 * means 'raid0', so we need something else. This is
119 * for internal use only
120 */
121#define LEVEL_NONE (-1000000)
122
88typedef struct mdu_disk_info_s { 123typedef struct mdu_disk_info_s {
89 /* 124 /*
90 * configuration/status of one particular disk 125 * configuration/status of one particular disk
diff --git a/include/linux/raid/multipath.h b/include/linux/raid/multipath.h
deleted file mode 100644
index 6f53fc177a47..000000000000
--- a/include/linux/raid/multipath.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef _MULTIPATH_H
2#define _MULTIPATH_H
3
4#include <linux/raid/md.h>
5
6struct multipath_info {
7 mdk_rdev_t *rdev;
8};
9
10struct multipath_private_data {
11 mddev_t *mddev;
12 struct multipath_info *multipaths;
13 int raid_disks;
14 int working_disks;
15 spinlock_t device_lock;
16 struct list_head retry_list;
17
18 mempool_t *pool;
19};
20
21typedef struct multipath_private_data multipath_conf_t;
22
23/*
24 * this is the only point in the RAID code where we violate
25 * C type safety. mddev->private is an 'opaque' pointer.
26 */
27#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private)
28
29/*
30 * this is our 'private' 'collective' MULTIPATH buffer head.
31 * it contains information about what kind of IO operations were started
32 * for this MULTIPATH operation, and about their status:
33 */
34
35struct multipath_bh {
36 mddev_t *mddev;
37 struct bio *master_bio;
38 struct bio bio;
39 int path;
40 struct list_head retry_list;
41};
42#endif
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
new file mode 100644
index 000000000000..d92480f8285c
--- /dev/null
+++ b/include/linux/raid/pq.h
@@ -0,0 +1,132 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2003 H. Peter Anvin - All Rights Reserved
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
8 * Boston MA 02111-1307, USA; either version 2 of the License, or
9 * (at your option) any later version; incorporated herein by reference.
10 *
11 * ----------------------------------------------------------------------- */
12
13#ifndef LINUX_RAID_RAID6_H
14#define LINUX_RAID_RAID6_H
15
16#ifdef __KERNEL__
17
18/* Set to 1 to use kernel-wide empty_zero_page */
19#define RAID6_USE_EMPTY_ZERO_PAGE 0
20#include <linux/blkdev.h>
21
22/* We need a pre-zeroed page... if we don't want to use the kernel-provided
23 one define it here */
24#if RAID6_USE_EMPTY_ZERO_PAGE
25# define raid6_empty_zero_page empty_zero_page
26#else
27extern const char raid6_empty_zero_page[PAGE_SIZE];
28#endif
29
30#else /* ! __KERNEL__ */
31/* Used for testing in user space */
32
33#include <errno.h>
34#include <inttypes.h>
35#include <limits.h>
36#include <stddef.h>
37#include <sys/mman.h>
38#include <sys/types.h>
39
40/* Not standard, but glibc defines it */
41#define BITS_PER_LONG __WORDSIZE
42
43typedef uint8_t u8;
44typedef uint16_t u16;
45typedef uint32_t u32;
46typedef uint64_t u64;
47
48#ifndef PAGE_SIZE
49# define PAGE_SIZE 4096
50#endif
51extern const char raid6_empty_zero_page[PAGE_SIZE];
52
53#define __init
54#define __exit
55#define __attribute_const__ __attribute__((const))
56#define noinline __attribute__((noinline))
57
58#define preempt_enable()
59#define preempt_disable()
60#define cpu_has_feature(x) 1
61#define enable_kernel_altivec()
62#define disable_kernel_altivec()
63
64#define EXPORT_SYMBOL(sym)
65#define MODULE_LICENSE(licence)
66#define subsys_initcall(x)
67#define module_exit(x)
68#endif /* __KERNEL__ */
69
70/* Routine choices */
71struct raid6_calls {
72 void (*gen_syndrome)(int, size_t, void **);
73 int (*valid)(void); /* Returns 1 if this routine set is usable */
74 const char *name; /* Name of this routine set */
75 int prefer; /* Has special performance attribute */
76};
77
78/* Selected algorithm */
79extern struct raid6_calls raid6_call;
80
81/* Algorithm list */
82extern const struct raid6_calls * const raid6_algos[];
83int raid6_select_algo(void);
84
85/* Return values from chk_syndrome */
86#define RAID6_OK 0
87#define RAID6_P_BAD 1
88#define RAID6_Q_BAD 2
89#define RAID6_PQ_BAD 3
90
91/* Galois field tables */
92extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
93extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
94extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
95extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
96
97/* Recovery routines */
98void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
99 void **ptrs);
100void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs);
101void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
102 void **ptrs);
103
104/* Some definitions to allow code to be compiled for testing in userspace */
105#ifndef __KERNEL__
106
107# define jiffies raid6_jiffies()
108# define printk printf
109# define GFP_KERNEL 0
110# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
111 PROT_READ|PROT_WRITE, \
112 MAP_PRIVATE|MAP_ANONYMOUS,\
113 0, 0))
114# define free_pages(x, y) munmap((void *)(x), (y)*PAGE_SIZE)
115
116static inline void cpu_relax(void)
117{
118 /* Nothing */
119}
120
121#undef HZ
122#define HZ 1000
123static inline uint32_t raid6_jiffies(void)
124{
125 struct timeval tv;
126 gettimeofday(&tv, NULL);
127 return tv.tv_sec*1000 + tv.tv_usec/1000;
128}
129
130#endif /* ! __KERNEL__ */
131
132#endif /* LINUX_RAID_RAID6_H */
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h
deleted file mode 100644
index fd42aa87c391..000000000000
--- a/include/linux/raid/raid0.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _RAID0_H
2#define _RAID0_H
3
4#include <linux/raid/md.h>
5
6struct strip_zone
7{
8 sector_t zone_start; /* Zone offset in md_dev (in sectors) */
9 sector_t dev_start; /* Zone offset in real dev (in sectors) */
10 sector_t sectors; /* Zone size in sectors */
11 int nb_dev; /* # of devices attached to the zone */
12 mdk_rdev_t **dev; /* Devices attached to the zone */
13};
14
15struct raid0_private_data
16{
17 struct strip_zone **hash_table; /* Table of indexes into strip_zone */
18 struct strip_zone *strip_zone;
19 mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
20 int nr_strip_zones;
21
22 sector_t spacing;
23 int sector_shift; /* shift this before divide by spacing */
24};
25
26typedef struct raid0_private_data raid0_conf_t;
27
28#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private)
29
30#endif
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h
deleted file mode 100644
index 0a9ba7c3302e..000000000000
--- a/include/linux/raid/raid1.h
+++ /dev/null
@@ -1,134 +0,0 @@
1#ifndef _RAID1_H
2#define _RAID1_H
3
4#include <linux/raid/md.h>
5
6typedef struct mirror_info mirror_info_t;
7
8struct mirror_info {
9 mdk_rdev_t *rdev;
10 sector_t head_position;
11};
12
13/*
14 * memory pools need a pointer to the mddev, so they can force an unplug
15 * when memory is tight, and a count of the number of drives that the
16 * pool was allocated for, so they know how much to allocate and free.
17 * mddev->raid_disks cannot be used, as it can change while a pool is active
18 * These two datums are stored in a kmalloced struct.
19 */
20
21struct pool_info {
22 mddev_t *mddev;
23 int raid_disks;
24};
25
26
27typedef struct r1bio_s r1bio_t;
28
29struct r1_private_data_s {
30 mddev_t *mddev;
31 mirror_info_t *mirrors;
32 int raid_disks;
33 int last_used;
34 sector_t next_seq_sect;
35 spinlock_t device_lock;
36
37 struct list_head retry_list;
38 /* queue pending writes and submit them on unplug */
39 struct bio_list pending_bio_list;
40 /* queue of writes that have been unplugged */
41 struct bio_list flushing_bio_list;
42
43 /* for use when syncing mirrors: */
44
45 spinlock_t resync_lock;
46 int nr_pending;
47 int nr_waiting;
48 int nr_queued;
49 int barrier;
50 sector_t next_resync;
51 int fullsync; /* set to 1 if a full sync is needed,
52 * (fresh device added).
53 * Cleared when a sync completes.
54 */
55
56 wait_queue_head_t wait_barrier;
57
58 struct pool_info *poolinfo;
59
60 struct page *tmppage;
61
62 mempool_t *r1bio_pool;
63 mempool_t *r1buf_pool;
64};
65
66typedef struct r1_private_data_s conf_t;
67
68/*
69 * this is the only point in the RAID code where we violate
70 * C type safety. mddev->private is an 'opaque' pointer.
71 */
72#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
73
74/*
75 * this is our 'private' RAID1 bio.
76 *
77 * it contains information about what kind of IO operations were started
78 * for this RAID1 operation, and about their status:
79 */
80
81struct r1bio_s {
82 atomic_t remaining; /* 'have we finished' count,
83 * used from IRQ handlers
84 */
85 atomic_t behind_remaining; /* number of write-behind ios remaining
86 * in this BehindIO request
87 */
88 sector_t sector;
89 int sectors;
90 unsigned long state;
91 mddev_t *mddev;
92 /*
93 * original bio going to /dev/mdx
94 */
95 struct bio *master_bio;
96 /*
97 * if the IO is in READ direction, then this is where we read
98 */
99 int read_disk;
100
101 struct list_head retry_list;
102 struct bitmap_update *bitmap_update;
103 /*
104 * if the IO is in WRITE direction, then multiple bios are used.
105 * We choose the number when they are allocated.
106 */
107 struct bio *bios[0];
108 /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
109};
110
111/* when we get a read error on a read-only array, we redirect to another
112 * device without failing the first device, or trying to over-write to
113 * correct the read error. To keep track of bad blocks on a per-bio
114 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
115 */
116#define IO_BLOCKED ((struct bio*)1)
117
118/* bits for r1bio.state */
119#define R1BIO_Uptodate 0
120#define R1BIO_IsSync 1
121#define R1BIO_Degraded 2
122#define R1BIO_BehindIO 3
123#define R1BIO_Barrier 4
124#define R1BIO_BarrierRetry 5
125/* For write-behind requests, we call bi_end_io when
126 * the last non-write-behind device completes, providing
127 * any write was successful. Otherwise we call when
128 * any write-behind write succeeds, otherwise we call
129 * with failure when last write completes (and all failed).
130 * Record that bi_end_io was called with this flag...
131 */
132#define R1BIO_Returned 6
133
134#endif
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h
deleted file mode 100644
index e9091cfeb286..000000000000
--- a/include/linux/raid/raid10.h
+++ /dev/null
@@ -1,123 +0,0 @@
1#ifndef _RAID10_H
2#define _RAID10_H
3
4#include <linux/raid/md.h>
5
6typedef struct mirror_info mirror_info_t;
7
8struct mirror_info {
9 mdk_rdev_t *rdev;
10 sector_t head_position;
11};
12
13typedef struct r10bio_s r10bio_t;
14
15struct r10_private_data_s {
16 mddev_t *mddev;
17 mirror_info_t *mirrors;
18 int raid_disks;
19 spinlock_t device_lock;
20
21 /* geometry */
22 int near_copies; /* number of copies layed out raid0 style */
23 int far_copies; /* number of copies layed out
24 * at large strides across drives
25 */
26 int far_offset; /* far_copies are offset by 1 stripe
27 * instead of many
28 */
29 int copies; /* near_copies * far_copies.
30 * must be <= raid_disks
31 */
32 sector_t stride; /* distance between far copies.
33 * This is size / far_copies unless
34 * far_offset, in which case it is
35 * 1 stripe.
36 */
37
38 int chunk_shift; /* shift from chunks to sectors */
39 sector_t chunk_mask;
40
41 struct list_head retry_list;
42 /* queue pending writes and submit them on unplug */
43 struct bio_list pending_bio_list;
44
45
46 spinlock_t resync_lock;
47 int nr_pending;
48 int nr_waiting;
49 int nr_queued;
50 int barrier;
51 sector_t next_resync;
52 int fullsync; /* set to 1 if a full sync is needed,
53 * (fresh device added).
54 * Cleared when a sync completes.
55 */
56
57 wait_queue_head_t wait_barrier;
58
59 mempool_t *r10bio_pool;
60 mempool_t *r10buf_pool;
61 struct page *tmppage;
62};
63
64typedef struct r10_private_data_s conf_t;
65
66/*
67 * this is the only point in the RAID code where we violate
68 * C type safety. mddev->private is an 'opaque' pointer.
69 */
70#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
71
72/*
73 * this is our 'private' RAID10 bio.
74 *
75 * it contains information about what kind of IO operations were started
76 * for this RAID10 operation, and about their status:
77 */
78
79struct r10bio_s {
80 atomic_t remaining; /* 'have we finished' count,
81 * used from IRQ handlers
82 */
83 sector_t sector; /* virtual sector number */
84 int sectors;
85 unsigned long state;
86 mddev_t *mddev;
87 /*
88 * original bio going to /dev/mdx
89 */
90 struct bio *master_bio;
91 /*
92 * if the IO is in READ direction, then this is where we read
93 */
94 int read_slot;
95
96 struct list_head retry_list;
97 /*
98 * if the IO is in WRITE direction, then multiple bios are used,
99 * one for each copy.
100 * When resyncing we also use one for each copy.
101 * When reconstructing, we use 2 bios, one for read, one for write.
102 * We choose the number when they are allocated.
103 */
104 struct {
105 struct bio *bio;
106 sector_t addr;
107 int devnum;
108 } devs[0];
109};
110
111/* when we get a read error on a read-only array, we redirect to another
112 * device without failing the first device, or trying to over-write to
113 * correct the read error. To keep track of bad blocks on a per-bio
114 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
115 */
116#define IO_BLOCKED ((struct bio*)1)
117
118/* bits for r10bio.state */
119#define R10BIO_Uptodate 0
120#define R10BIO_IsSync 1
121#define R10BIO_IsRecover 2
122#define R10BIO_Degraded 3
123#endif
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
deleted file mode 100644
index 3b2672792457..000000000000
--- a/include/linux/raid/raid5.h
+++ /dev/null
@@ -1,402 +0,0 @@
1#ifndef _RAID5_H
2#define _RAID5_H
3
4#include <linux/raid/md.h>
5#include <linux/raid/xor.h>
6
7/*
8 *
9 * Each stripe contains one buffer per disc. Each buffer can be in
10 * one of a number of states stored in "flags". Changes between
11 * these states happen *almost* exclusively under a per-stripe
12 * spinlock. Some very specific changes can happen in bi_end_io, and
13 * these are not protected by the spin lock.
14 *
15 * The flag bits that are used to represent these states are:
16 * R5_UPTODATE and R5_LOCKED
17 *
18 * State Empty == !UPTODATE, !LOCK
19 * We have no data, and there is no active request
20 * State Want == !UPTODATE, LOCK
21 * A read request is being submitted for this block
22 * State Dirty == UPTODATE, LOCK
23 * Some new data is in this buffer, and it is being written out
24 * State Clean == UPTODATE, !LOCK
25 * We have valid data which is the same as on disc
26 *
27 * The possible state transitions are:
28 *
29 * Empty -> Want - on read or write to get old data for parity calc
30 * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
31 * Empty -> Clean - on compute_block when computing a block for failed drive
32 * Want -> Empty - on failed read
33 * Want -> Clean - on successful completion of read request
34 * Dirty -> Clean - on successful completion of write request
35 * Dirty -> Clean - on failed write
36 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
37 *
38 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
39 * all happen in b_end_io at interrupt time.
40 * Each sets the Uptodate bit before releasing the Lock bit.
41 * This leaves one multi-stage transition:
42 * Want->Dirty->Clean
43 * This is safe because thinking that a Clean buffer is actually dirty
44 * will at worst delay some action, and the stripe will be scheduled
45 * for attention after the transition is complete.
46 *
47 * There is one possibility that is not covered by these states. That
48 * is if one drive has failed and there is a spare being rebuilt. We
49 * can't distinguish between a clean block that has been generated
50 * from parity calculations, and a clean block that has been
51 * successfully written to the spare ( or to parity when resyncing).
52 * To distingush these states we have a stripe bit STRIPE_INSYNC that
53 * is set whenever a write is scheduled to the spare, or to the parity
54 * disc if there is no spare. A sync request clears this bit, and
55 * when we find it set with no buffers locked, we know the sync is
56 * complete.
57 *
58 * Buffers for the md device that arrive via make_request are attached
59 * to the appropriate stripe in one of two lists linked on b_reqnext.
60 * One list (bh_read) for read requests, one (bh_write) for write.
61 * There should never be more than one buffer on the two lists
62 * together, but we are not guaranteed of that so we allow for more.
63 *
64 * If a buffer is on the read list when the associated cache buffer is
65 * Uptodate, the data is copied into the read buffer and it's b_end_io
66 * routine is called. This may happen in the end_request routine only
67 * if the buffer has just successfully been read. end_request should
68 * remove the buffers from the list and then set the Uptodate bit on
69 * the buffer. Other threads may do this only if they first check
70 * that the Uptodate bit is set. Once they have checked that they may
71 * take buffers off the read queue.
72 *
73 * When a buffer on the write list is committed for write it is copied
74 * into the cache buffer, which is then marked dirty, and moved onto a
75 * third list, the written list (bh_written). Once both the parity
76 * block and the cached buffer are successfully written, any buffer on
77 * a written list can be returned with b_end_io.
78 *
79 * The write list and read list both act as fifos. The read list is
80 * protected by the device_lock. The write and written lists are
81 * protected by the stripe lock. The device_lock, which can be
82 * claimed while the stipe lock is held, is only for list
83 * manipulations and will only be held for a very short time. It can
84 * be claimed from interrupts.
85 *
86 *
87 * Stripes in the stripe cache can be on one of two lists (or on
88 * neither). The "inactive_list" contains stripes which are not
89 * currently being used for any request. They can freely be reused
90 * for another stripe. The "handle_list" contains stripes that need
91 * to be handled in some way. Both of these are fifo queues. Each
92 * stripe is also (potentially) linked to a hash bucket in the hash
93 * table so that it can be found by sector number. Stripes that are
94 * not hashed must be on the inactive_list, and will normally be at
95 * the front. All stripes start life this way.
96 *
97 * The inactive_list, handle_list and hash bucket lists are all protected by the
98 * device_lock.
99 * - stripes on the inactive_list never have their stripe_lock held.
100 * - stripes have a reference counter. If count==0, they are on a list.
101 * - If a stripe might need handling, STRIPE_HANDLE is set.
102 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
103 * handle_list else inactive_list
104 *
105 * This, combined with the fact that STRIPE_HANDLE is only ever
106 * cleared while a stripe has a non-zero count means that if the
107 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
108 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
109 * the stripe is on inactive_list.
110 *
111 * The possible transitions are:
112 * activate an unhashed/inactive stripe (get_active_stripe())
113 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
114 * activate a hashed, possibly active stripe (get_active_stripe())
115 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
116 * attach a request to an active stripe (add_stripe_bh())
117 * lockdev attach-buffer unlockdev
118 * handle a stripe (handle_stripe())
119 * lockstripe clrSTRIPE_HANDLE ...
120 * (lockdev check-buffers unlockdev) ..
121 * change-state ..
122 * record io/ops needed unlockstripe schedule io/ops
123 * release an active stripe (release_stripe())
124 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
125 *
126 * The refcount counts each thread that have activated the stripe,
127 * plus raid5d if it is handling it, plus one for each active request
128 * on a cached buffer, and plus one if the stripe is undergoing stripe
129 * operations.
130 *
131 * Stripe operations are performed outside the stripe lock,
132 * the stripe operations are:
133 * -copying data between the stripe cache and user application buffers
134 * -computing blocks to save a disk access, or to recover a missing block
135 * -updating the parity on a write operation (reconstruct write and
136 * read-modify-write)
137 * -checking parity correctness
138 * -running i/o to disk
139 * These operations are carried out by raid5_run_ops which uses the async_tx
140 * api to (optionally) offload operations to dedicated hardware engines.
141 * When requesting an operation handle_stripe sets the pending bit for the
142 * operation and increments the count. raid5_run_ops is then run whenever
143 * the count is non-zero.
144 * There are some critical dependencies between the operations that prevent some
145 * from being requested while another is in flight.
146 * 1/ Parity check operations destroy the in cache version of the parity block,
147 * so we prevent parity dependent operations like writes and compute_blocks
148 * from starting while a check is in progress. Some dma engines can perform
149 * the check without damaging the parity block, in these cases the parity
150 * block is re-marked up to date (assuming the check was successful) and is
151 * not re-read from disk.
152 * 2/ When a write operation is requested we immediately lock the affected
153 * blocks, and mark them as not up to date. This causes new read requests
154 * to be held off, as well as parity checks and compute block operations.
155 * 3/ Once a compute block operation has been requested handle_stripe treats
156 * that block as if it is up to date. raid5_run_ops guaruntees that any
157 * operation that is dependent on the compute block result is initiated after
158 * the compute block completes.
159 */
160
161/*
162 * Operations state - intermediate states that are visible outside of sh->lock
163 * In general _idle indicates nothing is running, _run indicates a data
164 * processing operation is active, and _result means the data processing result
165 * is stable and can be acted upon. For simple operations like biofill and
166 * compute that only have an _idle and _run state they are indicated with
167 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
168 */
169/**
170 * enum check_states - handles syncing / repairing a stripe
171 * @check_state_idle - check operations are quiesced
172 * @check_state_run - check operation is running
173 * @check_state_result - set outside lock when check result is valid
174 * @check_state_compute_run - check failed and we are repairing
175 * @check_state_compute_result - set outside lock when compute result is valid
176 */
177enum check_states {
178 check_state_idle = 0,
179 check_state_run, /* parity check */
180 check_state_check_result,
181 check_state_compute_run, /* parity repair */
182 check_state_compute_result,
183};
184
185/**
186 * enum reconstruct_states - handles writing or expanding a stripe
187 */
188enum reconstruct_states {
189 reconstruct_state_idle = 0,
190 reconstruct_state_prexor_drain_run, /* prexor-write */
191 reconstruct_state_drain_run, /* write */
192 reconstruct_state_run, /* expand */
193 reconstruct_state_prexor_drain_result,
194 reconstruct_state_drain_result,
195 reconstruct_state_result,
196};
197
198struct stripe_head {
199 struct hlist_node hash;
200 struct list_head lru; /* inactive_list or handle_list */
201 struct raid5_private_data *raid_conf;
202 sector_t sector; /* sector of this row */
203 int pd_idx; /* parity disk index */
204 unsigned long state; /* state flags */
205 atomic_t count; /* nr of active thread/requests */
206 spinlock_t lock;
207 int bm_seq; /* sequence number for bitmap flushes */
208 int disks; /* disks in stripe */
209 enum check_states check_state;
210 enum reconstruct_states reconstruct_state;
211 /* stripe_operations
212 * @target - STRIPE_OP_COMPUTE_BLK target
213 */
214 struct stripe_operations {
215 int target;
216 u32 zero_sum_result;
217 } ops;
218 struct r5dev {
219 struct bio req;
220 struct bio_vec vec;
221 struct page *page;
222 struct bio *toread, *read, *towrite, *written;
223 sector_t sector; /* sector of this page */
224 unsigned long flags;
225 } dev[1]; /* allocated with extra space depending of RAID geometry */
226};
227
228/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
229 * for handle_stripe. It is only valid under spin_lock(sh->lock);
230 */
231struct stripe_head_state {
232 int syncing, expanding, expanded;
233 int locked, uptodate, to_read, to_write, failed, written;
234 int to_fill, compute, req_compute, non_overwrite;
235 int failed_num;
236 unsigned long ops_request;
237};
238
239/* r6_state - extra state data only relevant to r6 */
240struct r6_state {
241 int p_failed, q_failed, qd_idx, failed_num[2];
242};
243
244/* Flags */
245#define R5_UPTODATE 0 /* page contains current data */
246#define R5_LOCKED 1 /* IO has been submitted on "req" */
247#define R5_OVERWRITE 2 /* towrite covers whole page */
248/* and some that are internal to handle_stripe */
249#define R5_Insync 3 /* rdev && rdev->in_sync at start */
250#define R5_Wantread 4 /* want to schedule a read */
251#define R5_Wantwrite 5
252#define R5_Overlap 7 /* There is a pending overlapping request on this block */
253#define R5_ReadError 8 /* seen a read error here recently */
254#define R5_ReWrite 9 /* have tried to over-write the readerror */
255
256#define R5_Expanded 10 /* This block now has post-expand data */
257#define R5_Wantcompute 11 /* compute_block in progress treat as
258 * uptodate
259 */
260#define R5_Wantfill 12 /* dev->toread contains a bio that needs
261 * filling
262 */
263#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
264/*
265 * Write method
266 */
267#define RECONSTRUCT_WRITE 1
268#define READ_MODIFY_WRITE 2
269/* not a write method, but a compute_parity mode */
270#define CHECK_PARITY 3
271
272/*
273 * Stripe state
274 */
275#define STRIPE_HANDLE 2
276#define STRIPE_SYNCING 3
277#define STRIPE_INSYNC 4
278#define STRIPE_PREREAD_ACTIVE 5
279#define STRIPE_DELAYED 6
280#define STRIPE_DEGRADED 7
281#define STRIPE_BIT_DELAY 8
282#define STRIPE_EXPANDING 9
283#define STRIPE_EXPAND_SOURCE 10
284#define STRIPE_EXPAND_READY 11
285#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
286#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
287#define STRIPE_BIOFILL_RUN 14
288#define STRIPE_COMPUTE_RUN 15
289/*
290 * Operation request flags
291 */
292#define STRIPE_OP_BIOFILL 0
293#define STRIPE_OP_COMPUTE_BLK 1
294#define STRIPE_OP_PREXOR 2
295#define STRIPE_OP_BIODRAIN 3
296#define STRIPE_OP_POSTXOR 4
297#define STRIPE_OP_CHECK 5
298
299/*
300 * Plugging:
301 *
302 * To improve write throughput, we need to delay the handling of some
303 * stripes until there has been a chance that several write requests
304 * for the one stripe have all been collected.
305 * In particular, any write request that would require pre-reading
306 * is put on a "delayed" queue until there are no stripes currently
307 * in a pre-read phase. Further, if the "delayed" queue is empty when
308 * a stripe is put on it then we "plug" the queue and do not process it
309 * until an unplug call is made. (the unplug_io_fn() is called).
310 *
311 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
312 * it to the count of prereading stripes.
313 * When write is initiated, or the stripe refcnt == 0 (just in case) we
314 * clear the PREREAD_ACTIVE flag and decrement the count
315 * Whenever the 'handle' queue is empty and the device is not plugged, we
316 * move any strips from delayed to handle and clear the DELAYED flag and set
317 * PREREAD_ACTIVE.
318 * In stripe_handle, if we find pre-reading is necessary, we do it if
319 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
320 * HANDLE gets cleared if stripe_handle leave nothing locked.
321 */
322
323
324struct disk_info {
325 mdk_rdev_t *rdev;
326};
327
328struct raid5_private_data {
329 struct hlist_head *stripe_hashtbl;
330 mddev_t *mddev;
331 struct disk_info *spare;
332 int chunk_size, level, algorithm;
333 int max_degraded;
334 int raid_disks;
335 int max_nr_stripes;
336
337 /* used during an expand */
338 sector_t expand_progress; /* MaxSector when no expand happening */
339 sector_t expand_lo; /* from here up to expand_progress it out-of-bounds
340 * as we haven't flushed the metadata yet
341 */
342 int previous_raid_disks;
343
344 struct list_head handle_list; /* stripes needing handling */
345 struct list_head hold_list; /* preread ready stripes */
346 struct list_head delayed_list; /* stripes that have plugged requests */
347 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
348 struct bio *retry_read_aligned; /* currently retrying aligned bios */
349 struct bio *retry_read_aligned_list; /* aligned bios retry list */
350 atomic_t preread_active_stripes; /* stripes with scheduled io */
351 atomic_t active_aligned_reads;
352 atomic_t pending_full_writes; /* full write backlog */
353 int bypass_count; /* bypassed prereads */
354 int bypass_threshold; /* preread nice */
355 struct list_head *last_hold; /* detect hold_list promotions */
356
357 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
358 /* unfortunately we need two cache names as we temporarily have
359 * two caches.
360 */
361 int active_name;
362 char cache_name[2][20];
363 struct kmem_cache *slab_cache; /* for allocating stripes */
364
365 int seq_flush, seq_write;
366 int quiesce;
367
368 int fullsync; /* set to 1 if a full sync is needed,
369 * (fresh device added).
370 * Cleared when a sync completes.
371 */
372
373 struct page *spare_page; /* Used when checking P/Q in raid6 */
374
375 /*
376 * Free stripes pool
377 */
378 atomic_t active_stripes;
379 struct list_head inactive_list;
380 wait_queue_head_t wait_for_stripe;
381 wait_queue_head_t wait_for_overlap;
382 int inactive_blocked; /* release of inactive stripes blocked,
383 * waiting for 25% to be free
384 */
385 int pool_size; /* number of disks in stripeheads in pool */
386 spinlock_t device_lock;
387 struct disk_info *disks;
388};
389
390typedef struct raid5_private_data raid5_conf_t;
391
392#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private)
393
394/*
395 * Our supported algorithms
396 */
397#define ALGORITHM_LEFT_ASYMMETRIC 0
398#define ALGORITHM_RIGHT_ASYMMETRIC 1
399#define ALGORITHM_LEFT_SYMMETRIC 2
400#define ALGORITHM_RIGHT_SYMMETRIC 3
401
402#endif
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 3e120587eada..5a210959e3f8 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -1,8 +1,6 @@
1#ifndef _XOR_H 1#ifndef _XOR_H
2#define _XOR_H 2#define _XOR_H
3 3
4#include <linux/raid/md.h>
5
6#define MAX_XOR_BLOCKS 4 4#define MAX_XOR_BLOCKS 4
7 5
8extern void xor_blocks(unsigned int count, unsigned int bytes, 6extern void xor_blocks(unsigned int count, unsigned int bytes,
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 80044a4f3ab9..bfd92e1e5d2c 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -36,7 +36,6 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h>
40#include <linux/cpumask.h> 39#include <linux/cpumask.h>
41#include <linux/seqlock.h> 40#include <linux/seqlock.h>
42 41
@@ -108,25 +107,14 @@ struct rcu_data {
108 struct rcu_head barrier; 107 struct rcu_head barrier;
109}; 108};
110 109
111DECLARE_PER_CPU(struct rcu_data, rcu_data);
112DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
113
114/* 110/*
115 * Increment the quiescent state counter. 111 * Increment the quiescent state counter.
116 * The counter is a bit degenerated: We do not need to know 112 * The counter is a bit degenerated: We do not need to know
117 * how many quiescent states passed, just if there was at least 113 * how many quiescent states passed, just if there was at least
118 * one since the start of the grace period. Thus just a flag. 114 * one since the start of the grace period. Thus just a flag.
119 */ 115 */
120static inline void rcu_qsctr_inc(int cpu) 116extern void rcu_qsctr_inc(int cpu);
121{ 117extern void rcu_bh_qsctr_inc(int cpu);
122 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
123 rdp->passed_quiesc = 1;
124}
125static inline void rcu_bh_qsctr_inc(int cpu)
126{
127 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
128 rdp->passed_quiesc = 1;
129}
130 118
131extern int rcu_pending(int cpu); 119extern int rcu_pending(int cpu);
132extern int rcu_needs_cpu(int cpu); 120extern int rcu_needs_cpu(int cpu);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 528343e6da51..15fbb3ca634d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -36,7 +36,6 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h>
40#include <linux/cpumask.h> 39#include <linux/cpumask.h>
41#include <linux/seqlock.h> 40#include <linux/seqlock.h>
42#include <linux/lockdep.h> 41#include <linux/lockdep.h>
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 74304b4538d8..fce522782ffa 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -36,34 +36,19 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h> 39#include <linux/smp.h>
40#include <linux/cpumask.h> 40#include <linux/cpumask.h>
41#include <linux/seqlock.h> 41#include <linux/seqlock.h>
42 42
43struct rcu_dyntick_sched { 43extern void rcu_qsctr_inc(int cpu);
44 int dynticks; 44static inline void rcu_bh_qsctr_inc(int cpu) { }
45 int dynticks_snap;
46 int sched_qs;
47 int sched_qs_snap;
48 int sched_dynticks_snap;
49};
50
51DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
52
53static inline void rcu_qsctr_inc(int cpu)
54{
55 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
56
57 rdssp->sched_qs++;
58}
59#define rcu_bh_qsctr_inc(cpu)
60 45
61/* 46/*
62 * Someone might want to pass call_rcu_bh as a function pointer. 47 * Someone might want to pass call_rcu_bh as a function pointer.
63 * So this needs to just be a rename and not a macro function. 48 * So this needs to just be a rename and not a macro function.
64 * (no parentheses) 49 * (no parentheses)
65 */ 50 */
66#define call_rcu_bh call_rcu 51#define call_rcu_bh call_rcu
67 52
68/** 53/**
69 * call_rcu_sched - Queue RCU callback for invocation after sched grace period. 54 * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
@@ -117,30 +102,12 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
117struct softirq_action; 102struct softirq_action;
118 103
119#ifdef CONFIG_NO_HZ 104#ifdef CONFIG_NO_HZ
120 105extern void rcu_enter_nohz(void);
121static inline void rcu_enter_nohz(void) 106extern void rcu_exit_nohz(void);
122{ 107#else
123 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); 108# define rcu_enter_nohz() do { } while (0)
124 109# define rcu_exit_nohz() do { } while (0)
125 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 110#endif
126 __get_cpu_var(rcu_dyntick_sched).dynticks++;
127 WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
128}
129
130static inline void rcu_exit_nohz(void)
131{
132 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
133
134 __get_cpu_var(rcu_dyntick_sched).dynticks++;
135 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
136 WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
137 &rs);
138}
139
140#else /* CONFIG_NO_HZ */
141#define rcu_enter_nohz() do { } while (0)
142#define rcu_exit_nohz() do { } while (0)
143#endif /* CONFIG_NO_HZ */
144 111
145/* 112/*
146 * A context switch is a grace period for rcupreempt synchronize_rcu() 113 * A context switch is a grace period for rcupreempt synchronize_rcu()
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index a722fb67bb2d..0cdda00f2b2a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -33,7 +33,6 @@
33#include <linux/cache.h> 33#include <linux/cache.h>
34#include <linux/spinlock.h> 34#include <linux/spinlock.h>
35#include <linux/threads.h> 35#include <linux/threads.h>
36#include <linux/percpu.h>
37#include <linux/cpumask.h> 36#include <linux/cpumask.h>
38#include <linux/seqlock.h> 37#include <linux/seqlock.h>
39 38
@@ -236,30 +235,8 @@ struct rcu_state {
236#endif /* #ifdef CONFIG_NO_HZ */ 235#endif /* #ifdef CONFIG_NO_HZ */
237}; 236};
238 237
239extern struct rcu_state rcu_state; 238extern void rcu_qsctr_inc(int cpu);
240DECLARE_PER_CPU(struct rcu_data, rcu_data); 239extern void rcu_bh_qsctr_inc(int cpu);
241
242extern struct rcu_state rcu_bh_state;
243DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
244
245/*
246 * Increment the quiescent state counter.
247 * The counter is a bit degenerated: We do not need to know
248 * how many quiescent states passed, just if there was at least
249 * one since the start of the grace period. Thus just a flag.
250 */
251static inline void rcu_qsctr_inc(int cpu)
252{
253 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
254 rdp->passed_quiesc = 1;
255 rdp->passed_quiesc_completed = rdp->completed;
256}
257static inline void rcu_bh_qsctr_inc(int cpu)
258{
259 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
260 rdp->passed_quiesc = 1;
261 rdp->passed_quiesc_completed = rdp->completed;
262}
263 240
264extern int rcu_pending(int cpu); 241extern int rcu_pending(int cpu);
265extern int rcu_needs_cpu(int cpu); 242extern int rcu_needs_cpu(int cpu);
diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h
index e84b0a9feda5..a6d014005d49 100644
--- a/include/linux/regulator/bq24022.h
+++ b/include/linux/regulator/bq24022.h
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13struct regulator_init_data;
14
13/** 15/**
14 * bq24022_mach_info - platform data for bq24022 16 * bq24022_mach_info - platform data for bq24022
15 * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging 17 * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging
@@ -18,4 +20,5 @@
18struct bq24022_mach_info { 20struct bq24022_mach_info {
19 int gpio_nce; 21 int gpio_nce;
20 int gpio_iset2; 22 int gpio_iset2;
23 struct regulator_init_data *init_data;
21}; 24};
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 801bf77ff4e2..277f4b964df5 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -88,6 +88,7 @@
88 * FAIL Regulator output has failed. 88 * FAIL Regulator output has failed.
89 * OVER_TEMP Regulator over temp. 89 * OVER_TEMP Regulator over temp.
90 * FORCE_DISABLE Regulator shut down by software. 90 * FORCE_DISABLE Regulator shut down by software.
91 * VOLTAGE_CHANGE Regulator voltage changed.
91 * 92 *
92 * NOTE: These events can be OR'ed together when passed into handler. 93 * NOTE: These events can be OR'ed together when passed into handler.
93 */ 94 */
@@ -98,6 +99,7 @@
98#define REGULATOR_EVENT_FAIL 0x08 99#define REGULATOR_EVENT_FAIL 0x08
99#define REGULATOR_EVENT_OVER_TEMP 0x10 100#define REGULATOR_EVENT_OVER_TEMP 0x10
100#define REGULATOR_EVENT_FORCE_DISABLE 0x20 101#define REGULATOR_EVENT_FORCE_DISABLE 0x20
102#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
101 103
102struct regulator; 104struct regulator;
103 105
@@ -140,6 +142,8 @@ int regulator_bulk_disable(int num_consumers,
140void regulator_bulk_free(int num_consumers, 142void regulator_bulk_free(int num_consumers,
141 struct regulator_bulk_data *consumers); 143 struct regulator_bulk_data *consumers);
142 144
145int regulator_count_voltages(struct regulator *regulator);
146int regulator_list_voltage(struct regulator *regulator, unsigned selector);
143int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); 147int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV);
144int regulator_get_voltage(struct regulator *regulator); 148int regulator_get_voltage(struct regulator *regulator);
145int regulator_set_current_limit(struct regulator *regulator, 149int regulator_set_current_limit(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 2dae05705f13..4848d8dacd90 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -21,25 +21,38 @@
21struct regulator_dev; 21struct regulator_dev;
22struct regulator_init_data; 22struct regulator_init_data;
23 23
24enum regulator_status {
25 REGULATOR_STATUS_OFF,
26 REGULATOR_STATUS_ON,
27 REGULATOR_STATUS_ERROR,
28 /* fast/normal/idle/standby are flavors of "on" */
29 REGULATOR_STATUS_FAST,
30 REGULATOR_STATUS_NORMAL,
31 REGULATOR_STATUS_IDLE,
32 REGULATOR_STATUS_STANDBY,
33};
34
24/** 35/**
25 * struct regulator_ops - regulator operations. 36 * struct regulator_ops - regulator operations.
26 * 37 *
27 * This struct describes regulator operations which can be implemented by 38 * @enable: Configure the regulator as enabled.
28 * regulator chip drivers. 39 * @disable: Configure the regulator as disabled.
29 *
30 * @enable: Enable the regulator.
31 * @disable: Disable the regulator.
32 * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. 40 * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise.
33 * 41 *
34 * @set_voltage: Set the voltage for the regulator within the range specified. 42 * @set_voltage: Set the voltage for the regulator within the range specified.
35 * The driver should select the voltage closest to min_uV. 43 * The driver should select the voltage closest to min_uV.
36 * @get_voltage: Return the currently configured voltage for the regulator. 44 * @get_voltage: Return the currently configured voltage for the regulator.
45 * @list_voltage: Return one of the supported voltages, in microvolts; zero
46 * if the selector indicates a voltage that is unusable on this system;
47 * or negative errno. Selectors range from zero to one less than
48 * regulator_desc.n_voltages. Voltages may be reported in any order.
37 * 49 *
38 * @set_current_limit: Configure a limit for a current-limited regulator. 50 * @set_current_limit: Configure a limit for a current-limited regulator.
39 * @get_current_limit: Get the limit for a current-limited regulator. 51 * @get_current_limit: Get the configured limit for a current-limited regulator.
40 * 52 *
41 * @set_mode: Set the operating mode for the regulator. 53 * @get_mode: Get the configured operating mode for the regulator.
42 * @get_mode: Get the current operating mode for the regulator. 54 * @get_status: Return actual (not as-configured) status of regulator, as a
55 * REGULATOR_STATUS value (or negative errno)
43 * @get_optimum_mode: Get the most efficient operating mode for the regulator 56 * @get_optimum_mode: Get the most efficient operating mode for the regulator
44 * when running with the specified parameters. 57 * when running with the specified parameters.
45 * 58 *
@@ -51,9 +64,15 @@ struct regulator_init_data;
51 * suspended. 64 * suspended.
52 * @set_suspend_mode: Set the operating mode for the regulator when the 65 * @set_suspend_mode: Set the operating mode for the regulator when the
53 * system is suspended. 66 * system is suspended.
67 *
68 * This struct describes regulator operations which can be implemented by
69 * regulator chip drivers.
54 */ 70 */
55struct regulator_ops { 71struct regulator_ops {
56 72
73 /* enumerate supported voltages */
74 int (*list_voltage) (struct regulator_dev *, unsigned selector);
75
57 /* get/set regulator voltage */ 76 /* get/set regulator voltage */
58 int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); 77 int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV);
59 int (*get_voltage) (struct regulator_dev *); 78 int (*get_voltage) (struct regulator_dev *);
@@ -72,6 +91,13 @@ struct regulator_ops {
72 int (*set_mode) (struct regulator_dev *, unsigned int mode); 91 int (*set_mode) (struct regulator_dev *, unsigned int mode);
73 unsigned int (*get_mode) (struct regulator_dev *); 92 unsigned int (*get_mode) (struct regulator_dev *);
74 93
94 /* report regulator status ... most other accessors report
95 * control inputs, this reports results of combining inputs
96 * from Linux (and other sources) with the actual load.
97 * returns REGULATOR_STATUS_* or negative errno.
98 */
99 int (*get_status)(struct regulator_dev *);
100
75 /* get most efficient regulator operating mode for load */ 101 /* get most efficient regulator operating mode for load */
76 unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, 102 unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV,
77 int output_uV, int load_uA); 103 int output_uV, int load_uA);
@@ -106,6 +132,7 @@ enum regulator_type {
106 * 132 *
107 * @name: Identifying name for the regulator. 133 * @name: Identifying name for the regulator.
108 * @id: Numerical identifier for the regulator. 134 * @id: Numerical identifier for the regulator.
135 * @n_voltages: Number of selectors available for ops.list_voltage().
109 * @ops: Regulator operations table. 136 * @ops: Regulator operations table.
110 * @irq: Interrupt number for the regulator. 137 * @irq: Interrupt number for the regulator.
111 * @type: Indicates if the regulator is a voltage or current regulator. 138 * @type: Indicates if the regulator is a voltage or current regulator.
@@ -114,14 +141,48 @@ enum regulator_type {
114struct regulator_desc { 141struct regulator_desc {
115 const char *name; 142 const char *name;
116 int id; 143 int id;
144 unsigned n_voltages;
117 struct regulator_ops *ops; 145 struct regulator_ops *ops;
118 int irq; 146 int irq;
119 enum regulator_type type; 147 enum regulator_type type;
120 struct module *owner; 148 struct module *owner;
121}; 149};
122 150
151/*
152 * struct regulator_dev
153 *
154 * Voltage / Current regulator class device. One for each
155 * regulator.
156 *
157 * This should *not* be used directly by anything except the regulator
158 * core and notification injection (which should take the mutex and do
159 * no other direct access).
160 */
161struct regulator_dev {
162 struct regulator_desc *desc;
163 int use_count;
164
165 /* lists we belong to */
166 struct list_head list; /* list of all regulators */
167 struct list_head slist; /* list of supplied regulators */
168
169 /* lists we own */
170 struct list_head consumer_list; /* consumers we supply */
171 struct list_head supply_list; /* regulators we supply */
172
173 struct blocking_notifier_head notifier;
174 struct mutex mutex; /* consumer lock */
175 struct module *owner;
176 struct device dev;
177 struct regulation_constraints *constraints;
178 struct regulator_dev *supply; /* for tree */
179
180 void *reg_data; /* regulator_dev data */
181};
182
123struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, 183struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
124 struct device *dev, void *driver_data); 184 struct device *dev, struct regulator_init_data *init_data,
185 void *driver_data);
125void regulator_unregister(struct regulator_dev *rdev); 186void regulator_unregister(struct regulator_dev *rdev);
126 187
127int regulator_notifier_call_chain(struct regulator_dev *rdev, 188int regulator_notifier_call_chain(struct regulator_dev *rdev,
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 1387a5d2190e..91b4da31f1b5 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -14,9 +14,12 @@
14#ifndef __REGULATOR_FIXED_H 14#ifndef __REGULATOR_FIXED_H
15#define __REGULATOR_FIXED_H 15#define __REGULATOR_FIXED_H
16 16
17struct regulator_init_data;
18
17struct fixed_voltage_config { 19struct fixed_voltage_config {
18 const char *supply_name; 20 const char *supply_name;
19 int microvolts; 21 int microvolts;
22 struct regulator_init_data *init_data;
20}; 23};
21 24
22#endif 25#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 3794773b23d2..bac64fa390f2 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -73,7 +73,9 @@ struct regulator_state {
73 * 73 *
74 * @always_on: Set if the regulator should never be disabled. 74 * @always_on: Set if the regulator should never be disabled.
75 * @boot_on: Set if the regulator is enabled when the system is initially 75 * @boot_on: Set if the regulator is enabled when the system is initially
76 * started. 76 * started. If the regulator is not enabled by the hardware or
77 * bootloader then it will be enabled when the constraints are
78 * applied.
77 * @apply_uV: Apply the voltage constraint when initialising. 79 * @apply_uV: Apply the voltage constraint when initialising.
78 * 80 *
79 * @input_uV: Input voltage for regulator when supplied by another regulator. 81 * @input_uV: Input voltage for regulator when supplied by another regulator.
@@ -83,6 +85,7 @@ struct regulator_state {
83 * @state_standby: State for regulator when system is suspended in standby 85 * @state_standby: State for regulator when system is suspended in standby
84 * mode. 86 * mode.
85 * @initial_state: Suspend state to set by default. 87 * @initial_state: Suspend state to set by default.
88 * @initial_mode: Mode to set at startup.
86 */ 89 */
87struct regulation_constraints { 90struct regulation_constraints {
88 91
@@ -111,6 +114,9 @@ struct regulation_constraints {
111 struct regulator_state state_standby; 114 struct regulator_state state_standby;
112 suspend_state_t initial_state; /* suspend state to set at init */ 115 suspend_state_t initial_state; /* suspend state to set at init */
113 116
117 /* mode to set on startup */
118 unsigned int initial_mode;
119
114 /* constriant flags */ 120 /* constriant flags */
115 unsigned always_on:1; /* regulator never off when system is on */ 121 unsigned always_on:1; /* regulator never off when system is on */
116 unsigned boot_on:1; /* bootloader/firmware enabled regulator */ 122 unsigned boot_on:1; /* bootloader/firmware enabled regulator */
@@ -160,4 +166,6 @@ struct regulator_init_data {
160 166
161int regulator_suspend_prepare(suspend_state_t state); 167int regulator_suspend_prepare(suspend_state_t state);
162 168
169void regulator_has_full_constraints(void);
170
163#endif 171#endif
diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h
index fe00f781a622..8cc65757e47a 100644
--- a/include/linux/reiserfs_acl.h
+++ b/include/linux/reiserfs_acl.h
@@ -49,13 +49,12 @@ static inline int reiserfs_acl_count(size_t size)
49#ifdef CONFIG_REISERFS_FS_POSIX_ACL 49#ifdef CONFIG_REISERFS_FS_POSIX_ACL
50struct posix_acl *reiserfs_get_acl(struct inode *inode, int type); 50struct posix_acl *reiserfs_get_acl(struct inode *inode, int type);
51int reiserfs_acl_chmod(struct inode *inode); 51int reiserfs_acl_chmod(struct inode *inode);
52int reiserfs_inherit_default_acl(struct inode *dir, struct dentry *dentry, 52int reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
53 struct inode *dir, struct dentry *dentry,
53 struct inode *inode); 54 struct inode *inode);
54int reiserfs_cache_default_acl(struct inode *dir); 55int reiserfs_cache_default_acl(struct inode *dir);
55extern int reiserfs_xattr_posix_acl_init(void) __init; 56extern struct xattr_handler reiserfs_posix_acl_default_handler;
56extern int reiserfs_xattr_posix_acl_exit(void); 57extern struct xattr_handler reiserfs_posix_acl_access_handler;
57extern struct reiserfs_xattr_handler posix_acl_default_handler;
58extern struct reiserfs_xattr_handler posix_acl_access_handler;
59 58
60static inline void reiserfs_init_acl_access(struct inode *inode) 59static inline void reiserfs_init_acl_access(struct inode *inode)
61{ 60{
@@ -75,23 +74,14 @@ static inline struct posix_acl *reiserfs_get_acl(struct inode *inode, int type)
75 return NULL; 74 return NULL;
76} 75}
77 76
78static inline int reiserfs_xattr_posix_acl_init(void)
79{
80 return 0;
81}
82
83static inline int reiserfs_xattr_posix_acl_exit(void)
84{
85 return 0;
86}
87
88static inline int reiserfs_acl_chmod(struct inode *inode) 77static inline int reiserfs_acl_chmod(struct inode *inode)
89{ 78{
90 return 0; 79 return 0;
91} 80}
92 81
93static inline int 82static inline int
94reiserfs_inherit_default_acl(const struct inode *dir, struct dentry *dentry, 83reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
84 const struct inode *dir, struct dentry *dentry,
95 struct inode *inode) 85 struct inode *inode)
96{ 86{
97 return 0; 87 return 0;
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index e356c99f0659..2245c78d5876 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -58,8 +58,6 @@
58#define reiserfs_write_lock( sb ) lock_kernel() 58#define reiserfs_write_lock( sb ) lock_kernel()
59#define reiserfs_write_unlock( sb ) unlock_kernel() 59#define reiserfs_write_unlock( sb ) unlock_kernel()
60 60
61/* xattr stuff */
62#define REISERFS_XATTR_DIR_SEM(s) (REISERFS_SB(s)->xattr_dir_sem)
63struct fid; 61struct fid;
64 62
65/* in reading the #defines, it may help to understand that they employ 63/* in reading the #defines, it may help to understand that they employ
@@ -104,15 +102,21 @@ struct fid;
104*/ 102*/
105#define REISERFS_DEBUG_CODE 5 /* extra messages to help find/debug errors */ 103#define REISERFS_DEBUG_CODE 5 /* extra messages to help find/debug errors */
106 104
107void reiserfs_warning(struct super_block *s, const char *fmt, ...); 105void __reiserfs_warning(struct super_block *s, const char *id,
106 const char *func, const char *fmt, ...);
107#define reiserfs_warning(s, id, fmt, args...) \
108 __reiserfs_warning(s, id, __func__, fmt, ##args)
108/* assertions handling */ 109/* assertions handling */
109 110
110/** always check a condition and panic if it's false. */ 111/** always check a condition and panic if it's false. */
111#define __RASSERT( cond, scond, format, args... ) \ 112#define __RASSERT(cond, scond, format, args...) \
112if( !( cond ) ) \ 113do { \
113 reiserfs_panic( NULL, "reiserfs[%i]: assertion " scond " failed at " \ 114 if (!(cond)) \
114 __FILE__ ":%i:%s: " format "\n", \ 115 reiserfs_panic(NULL, "assertion failure", "(" #cond ") at " \
115 in_interrupt() ? -1 : task_pid_nr(current), __LINE__ , __func__ , ##args ) 116 __FILE__ ":%i:%s: " format "\n", \
117 in_interrupt() ? -1 : task_pid_nr(current), \
118 __LINE__, __func__ , ##args); \
119} while (0)
116 120
117#define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args) 121#define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args)
118 122
@@ -196,7 +200,11 @@ struct reiserfs_super_block {
196 __le32 s_flags; /* Right now used only by inode-attributes, if enabled */ 200 __le32 s_flags; /* Right now used only by inode-attributes, if enabled */
197 unsigned char s_uuid[16]; /* filesystem unique identifier */ 201 unsigned char s_uuid[16]; /* filesystem unique identifier */
198 unsigned char s_label[16]; /* filesystem volume label */ 202 unsigned char s_label[16]; /* filesystem volume label */
199 char s_unused[88]; /* zero filled by mkreiserfs and 203 __le16 s_mnt_count; /* Count of mounts since last fsck */
204 __le16 s_max_mnt_count; /* Maximum mounts before check */
205 __le32 s_lastcheck; /* Timestamp of last fsck */
206 __le32 s_check_interval; /* Interval between checks */
207 char s_unused[76]; /* zero filled by mkreiserfs and
200 * reiserfs_convert_objectid_map_v1() 208 * reiserfs_convert_objectid_map_v1()
201 * so any additions must be updated 209 * so any additions must be updated
202 * there as well. */ 210 * there as well. */
@@ -578,10 +586,8 @@ static inline int uniqueness2type(__u32 uniqueness)
578 return TYPE_DIRECT; 586 return TYPE_DIRECT;
579 case V1_DIRENTRY_UNIQUENESS: 587 case V1_DIRENTRY_UNIQUENESS:
580 return TYPE_DIRENTRY; 588 return TYPE_DIRENTRY;
581 default:
582 reiserfs_warning(NULL, "vs-500: unknown uniqueness %d",
583 uniqueness);
584 case V1_ANY_UNIQUENESS: 589 case V1_ANY_UNIQUENESS:
590 default:
585 return TYPE_ANY; 591 return TYPE_ANY;
586 } 592 }
587} 593}
@@ -598,9 +604,8 @@ static inline __u32 type2uniqueness(int type)
598 return V1_DIRECT_UNIQUENESS; 604 return V1_DIRECT_UNIQUENESS;
599 case TYPE_DIRENTRY: 605 case TYPE_DIRENTRY:
600 return V1_DIRENTRY_UNIQUENESS; 606 return V1_DIRENTRY_UNIQUENESS;
601 default:
602 reiserfs_warning(NULL, "vs-501: unknown type %d", type);
603 case TYPE_ANY: 607 case TYPE_ANY:
608 default:
604 return V1_ANY_UNIQUENESS; 609 return V1_ANY_UNIQUENESS;
605 } 610 }
606} 611}
@@ -712,9 +717,9 @@ static inline void cpu_key_k_offset_dec(struct cpu_key *key)
712#define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key))) 717#define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key)))
713#define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key))) 718#define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key)))
714 719
715#define I_K_KEY_IN_ITEM(p_s_ih, p_s_key, n_blocksize) \ 720#define I_K_KEY_IN_ITEM(ih, key, n_blocksize) \
716 ( ! COMP_SHORT_KEYS(p_s_ih, p_s_key) && \ 721 (!COMP_SHORT_KEYS(ih, key) && \
717 I_OFF_BYTE_IN_ITEM(p_s_ih, k_offset (p_s_key), n_blocksize) ) 722 I_OFF_BYTE_IN_ITEM(ih, k_offset(key), n_blocksize))
718 723
719/* maximal length of item */ 724/* maximal length of item */
720#define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE) 725#define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE)
@@ -770,25 +775,25 @@ struct block_head {
770#define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */ 775#define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */
771 776
772/* Given the buffer head of a formatted node, resolve to the block head of that node. */ 777/* Given the buffer head of a formatted node, resolve to the block head of that node. */
773#define B_BLK_HEAD(p_s_bh) ((struct block_head *)((p_s_bh)->b_data)) 778#define B_BLK_HEAD(bh) ((struct block_head *)((bh)->b_data))
774/* Number of items that are in buffer. */ 779/* Number of items that are in buffer. */
775#define B_NR_ITEMS(p_s_bh) (blkh_nr_item(B_BLK_HEAD(p_s_bh))) 780#define B_NR_ITEMS(bh) (blkh_nr_item(B_BLK_HEAD(bh)))
776#define B_LEVEL(p_s_bh) (blkh_level(B_BLK_HEAD(p_s_bh))) 781#define B_LEVEL(bh) (blkh_level(B_BLK_HEAD(bh)))
777#define B_FREE_SPACE(p_s_bh) (blkh_free_space(B_BLK_HEAD(p_s_bh))) 782#define B_FREE_SPACE(bh) (blkh_free_space(B_BLK_HEAD(bh)))
778 783
779#define PUT_B_NR_ITEMS(p_s_bh,val) do { set_blkh_nr_item(B_BLK_HEAD(p_s_bh),val); } while (0) 784#define PUT_B_NR_ITEMS(bh, val) do { set_blkh_nr_item(B_BLK_HEAD(bh), val); } while (0)
780#define PUT_B_LEVEL(p_s_bh,val) do { set_blkh_level(B_BLK_HEAD(p_s_bh),val); } while (0) 785#define PUT_B_LEVEL(bh, val) do { set_blkh_level(B_BLK_HEAD(bh), val); } while (0)
781#define PUT_B_FREE_SPACE(p_s_bh,val) do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0) 786#define PUT_B_FREE_SPACE(bh, val) do { set_blkh_free_space(B_BLK_HEAD(bh), val); } while (0)
782 787
783/* Get right delimiting key. -- little endian */ 788/* Get right delimiting key. -- little endian */
784#define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh)))) 789#define B_PRIGHT_DELIM_KEY(bh) (&(blk_right_delim_key(B_BLK_HEAD(bh))))
785 790
786/* Does the buffer contain a disk leaf. */ 791/* Does the buffer contain a disk leaf. */
787#define B_IS_ITEMS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL) 792#define B_IS_ITEMS_LEVEL(bh) (B_LEVEL(bh) == DISK_LEAF_NODE_LEVEL)
788 793
789/* Does the buffer contain a disk internal node */ 794/* Does the buffer contain a disk internal node */
790#define B_IS_KEYS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) > DISK_LEAF_NODE_LEVEL \ 795#define B_IS_KEYS_LEVEL(bh) (B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL \
791 && B_LEVEL(p_s_bh) <= MAX_HEIGHT) 796 && B_LEVEL(bh) <= MAX_HEIGHT)
792 797
793/***************************************************************************/ 798/***************************************************************************/
794/* STAT DATA */ 799/* STAT DATA */
@@ -1138,12 +1143,13 @@ struct disk_child {
1138#define put_dc_size(dc_p, val) do { (dc_p)->dc_size = cpu_to_le16(val); } while(0) 1143#define put_dc_size(dc_p, val) do { (dc_p)->dc_size = cpu_to_le16(val); } while(0)
1139 1144
1140/* Get disk child by buffer header and position in the tree node. */ 1145/* Get disk child by buffer header and position in the tree node. */
1141#define B_N_CHILD(p_s_bh,n_pos) ((struct disk_child *)\ 1146#define B_N_CHILD(bh, n_pos) ((struct disk_child *)\
1142((p_s_bh)->b_data+BLKH_SIZE+B_NR_ITEMS(p_s_bh)*KEY_SIZE+DC_SIZE*(n_pos))) 1147((bh)->b_data + BLKH_SIZE + B_NR_ITEMS(bh) * KEY_SIZE + DC_SIZE * (n_pos)))
1143 1148
1144/* Get disk child number by buffer header and position in the tree node. */ 1149/* Get disk child number by buffer header and position in the tree node. */
1145#define B_N_CHILD_NUM(p_s_bh,n_pos) (dc_block_number(B_N_CHILD(p_s_bh,n_pos))) 1150#define B_N_CHILD_NUM(bh, n_pos) (dc_block_number(B_N_CHILD(bh, n_pos)))
1146#define PUT_B_N_CHILD_NUM(p_s_bh,n_pos, val) (put_dc_block_number(B_N_CHILD(p_s_bh,n_pos), val )) 1151#define PUT_B_N_CHILD_NUM(bh, n_pos, val) \
1152 (put_dc_block_number(B_N_CHILD(bh, n_pos), val))
1147 1153
1148 /* maximal value of field child_size in structure disk_child */ 1154 /* maximal value of field child_size in structure disk_child */
1149 /* child size is the combined size of all items and their headers */ 1155 /* child size is the combined size of all items and their headers */
@@ -1214,33 +1220,33 @@ struct treepath {
1214struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,} 1220struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
1215 1221
1216/* Get path element by path and path position. */ 1222/* Get path element by path and path position. */
1217#define PATH_OFFSET_PELEMENT(p_s_path,n_offset) ((p_s_path)->path_elements +(n_offset)) 1223#define PATH_OFFSET_PELEMENT(path, n_offset) ((path)->path_elements + (n_offset))
1218 1224
1219/* Get buffer header at the path by path and path position. */ 1225/* Get buffer header at the path by path and path position. */
1220#define PATH_OFFSET_PBUFFER(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_buffer) 1226#define PATH_OFFSET_PBUFFER(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_buffer)
1221 1227
1222/* Get position in the element at the path by path and path position. */ 1228/* Get position in the element at the path by path and path position. */
1223#define PATH_OFFSET_POSITION(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_position) 1229#define PATH_OFFSET_POSITION(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_position)
1224 1230
1225#define PATH_PLAST_BUFFER(p_s_path) (PATH_OFFSET_PBUFFER((p_s_path), (p_s_path)->path_length)) 1231#define PATH_PLAST_BUFFER(path) (PATH_OFFSET_PBUFFER((path), (path)->path_length))
1226 /* you know, to the person who didn't 1232 /* you know, to the person who didn't
1227 write this the macro name does not 1233 write this the macro name does not
1228 at first suggest what it does. 1234 at first suggest what it does.
1229 Maybe POSITION_FROM_PATH_END? Or 1235 Maybe POSITION_FROM_PATH_END? Or
1230 maybe we should just focus on 1236 maybe we should just focus on
1231 dumping paths... -Hans */ 1237 dumping paths... -Hans */
1232#define PATH_LAST_POSITION(p_s_path) (PATH_OFFSET_POSITION((p_s_path), (p_s_path)->path_length)) 1238#define PATH_LAST_POSITION(path) (PATH_OFFSET_POSITION((path), (path)->path_length))
1233 1239
1234#define PATH_PITEM_HEAD(p_s_path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_path),PATH_LAST_POSITION(p_s_path)) 1240#define PATH_PITEM_HEAD(path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path))
1235 1241
1236/* in do_balance leaf has h == 0 in contrast with path structure, 1242/* in do_balance leaf has h == 0 in contrast with path structure,
1237 where root has level == 0. That is why we need these defines */ 1243 where root has level == 0. That is why we need these defines */
1238#define PATH_H_PBUFFER(p_s_path, h) PATH_OFFSET_PBUFFER (p_s_path, p_s_path->path_length - (h)) /* tb->S[h] */ 1244#define PATH_H_PBUFFER(path, h) PATH_OFFSET_PBUFFER (path, path->path_length - (h)) /* tb->S[h] */
1239#define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */ 1245#define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */
1240#define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h)) 1246#define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h))
1241#define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */ 1247#define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */
1242 1248
1243#define PATH_H_PATH_OFFSET(p_s_path, n_h) ((p_s_path)->path_length - (n_h)) 1249#define PATH_H_PATH_OFFSET(path, n_h) ((path)->path_length - (n_h))
1244 1250
1245#define get_last_bh(path) PATH_PLAST_BUFFER(path) 1251#define get_last_bh(path) PATH_PLAST_BUFFER(path)
1246#define get_ih(path) PATH_PITEM_HEAD(path) 1252#define get_ih(path) PATH_PITEM_HEAD(path)
@@ -1470,6 +1476,16 @@ struct buffer_info {
1470 int bi_position; 1476 int bi_position;
1471}; 1477};
1472 1478
1479static inline struct super_block *sb_from_tb(struct tree_balance *tb)
1480{
1481 return tb ? tb->tb_sb : NULL;
1482}
1483
1484static inline struct super_block *sb_from_bi(struct buffer_info *bi)
1485{
1486 return bi ? sb_from_tb(bi->tb) : NULL;
1487}
1488
1473/* there are 4 types of items: stat data, directory item, indirect, direct. 1489/* there are 4 types of items: stat data, directory item, indirect, direct.
1474+-------------------+------------+--------------+------------+ 1490+-------------------+------------+--------------+------------+
1475| | k_offset | k_uniqueness | mergeable? | 1491| | k_offset | k_uniqueness | mergeable? |
@@ -1520,7 +1536,7 @@ extern struct item_operations *item_ops[TYPE_ANY + 1];
1520#define COMP_SHORT_KEYS comp_short_keys 1536#define COMP_SHORT_KEYS comp_short_keys
1521 1537
1522/* number of blocks pointed to by the indirect item */ 1538/* number of blocks pointed to by the indirect item */
1523#define I_UNFM_NUM(p_s_ih) ( ih_item_len(p_s_ih) / UNFM_P_SIZE ) 1539#define I_UNFM_NUM(ih) (ih_item_len(ih) / UNFM_P_SIZE)
1524 1540
1525/* the used space within the unformatted node corresponding to pos within the item pointed to by ih */ 1541/* the used space within the unformatted node corresponding to pos within the item pointed to by ih */
1526#define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size)) 1542#define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size))
@@ -1623,6 +1639,10 @@ struct reiserfs_journal_header {
1623#define JOURNAL_MAX_COMMIT_AGE 30 1639#define JOURNAL_MAX_COMMIT_AGE 30
1624#define JOURNAL_MAX_TRANS_AGE 30 1640#define JOURNAL_MAX_TRANS_AGE 30
1625#define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9) 1641#define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9)
1642#define JOURNAL_BLOCKS_PER_OBJECT(sb) (JOURNAL_PER_BALANCE_CNT * 3 + \
1643 2 * (REISERFS_QUOTA_INIT_BLOCKS(sb) + \
1644 REISERFS_QUOTA_TRANS_BLOCKS(sb)))
1645
1626#ifdef CONFIG_QUOTA 1646#ifdef CONFIG_QUOTA
1627/* We need to update data and inode (atime) */ 1647/* We need to update data and inode (atime) */
1628#define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? 2 : 0) 1648#define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? 2 : 0)
@@ -1697,7 +1717,7 @@ struct reiserfs_transaction_handle {
1697 int t_refcount; 1717 int t_refcount;
1698 int t_blocks_logged; /* number of blocks this writer has logged */ 1718 int t_blocks_logged; /* number of blocks this writer has logged */
1699 int t_blocks_allocated; /* number of blocks this writer allocated */ 1719 int t_blocks_allocated; /* number of blocks this writer allocated */
1700 unsigned long t_trans_id; /* sanity check, equals the current trans id */ 1720 unsigned int t_trans_id; /* sanity check, equals the current trans id */
1701 void *t_handle_save; /* save existing current->journal_info */ 1721 void *t_handle_save; /* save existing current->journal_info */
1702 unsigned displace_new_blocks:1; /* if new block allocation occurres, that block 1722 unsigned displace_new_blocks:1; /* if new block allocation occurres, that block
1703 should be displaced from others */ 1723 should be displaced from others */
@@ -1773,13 +1793,13 @@ int journal_end_sync(struct reiserfs_transaction_handle *, struct super_block *,
1773int journal_mark_freed(struct reiserfs_transaction_handle *, 1793int journal_mark_freed(struct reiserfs_transaction_handle *,
1774 struct super_block *, b_blocknr_t blocknr); 1794 struct super_block *, b_blocknr_t blocknr);
1775int journal_transaction_should_end(struct reiserfs_transaction_handle *, int); 1795int journal_transaction_should_end(struct reiserfs_transaction_handle *, int);
1776int reiserfs_in_journal(struct super_block *p_s_sb, unsigned int bmap_nr, 1796int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr,
1777 int bit_nr, int searchall, b_blocknr_t *next); 1797 int bit_nr, int searchall, b_blocknr_t *next);
1778int journal_begin(struct reiserfs_transaction_handle *, 1798int journal_begin(struct reiserfs_transaction_handle *,
1779 struct super_block *p_s_sb, unsigned long); 1799 struct super_block *sb, unsigned long);
1780int journal_join_abort(struct reiserfs_transaction_handle *, 1800int journal_join_abort(struct reiserfs_transaction_handle *,
1781 struct super_block *p_s_sb, unsigned long); 1801 struct super_block *sb, unsigned long);
1782void reiserfs_journal_abort(struct super_block *sb, int errno); 1802void reiserfs_abort_journal(struct super_block *sb, int errno);
1783void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...); 1803void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...);
1784int reiserfs_allocate_list_bitmaps(struct super_block *s, 1804int reiserfs_allocate_list_bitmaps(struct super_block *s,
1785 struct reiserfs_list_bitmap *, unsigned int); 1805 struct reiserfs_list_bitmap *, unsigned int);
@@ -1796,8 +1816,8 @@ int reiserfs_convert_objectid_map_v1(struct super_block *);
1796 1816
1797/* stree.c */ 1817/* stree.c */
1798int B_IS_IN_TREE(const struct buffer_head *); 1818int B_IS_IN_TREE(const struct buffer_head *);
1799extern void copy_item_head(struct item_head *p_v_to, 1819extern void copy_item_head(struct item_head *to,
1800 const struct item_head *p_v_from); 1820 const struct item_head *from);
1801 1821
1802// first key is in cpu form, second - le 1822// first key is in cpu form, second - le
1803extern int comp_short_keys(const struct reiserfs_key *le_key, 1823extern int comp_short_keys(const struct reiserfs_key *le_key,
@@ -1832,20 +1852,20 @@ static inline void copy_key(struct reiserfs_key *to,
1832 memcpy(to, from, KEY_SIZE); 1852 memcpy(to, from, KEY_SIZE);
1833} 1853}
1834 1854
1835int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path); 1855int comp_items(const struct item_head *stored_ih, const struct treepath *path);
1836const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path, 1856const struct reiserfs_key *get_rkey(const struct treepath *chk_path,
1837 const struct super_block *p_s_sb); 1857 const struct super_block *sb);
1838int search_by_key(struct super_block *, const struct cpu_key *, 1858int search_by_key(struct super_block *, const struct cpu_key *,
1839 struct treepath *, int); 1859 struct treepath *, int);
1840#define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL) 1860#define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL)
1841int search_for_position_by_key(struct super_block *p_s_sb, 1861int search_for_position_by_key(struct super_block *sb,
1842 const struct cpu_key *p_s_cpu_key, 1862 const struct cpu_key *cpu_key,
1843 struct treepath *p_s_search_path); 1863 struct treepath *search_path);
1844extern void decrement_bcount(struct buffer_head *p_s_bh); 1864extern void decrement_bcount(struct buffer_head *bh);
1845void decrement_counters_in_path(struct treepath *p_s_search_path); 1865void decrement_counters_in_path(struct treepath *search_path);
1846void pathrelse(struct treepath *p_s_search_path); 1866void pathrelse(struct treepath *search_path);
1847int reiserfs_check_path(struct treepath *p); 1867int reiserfs_check_path(struct treepath *p);
1848void pathrelse_and_restore(struct super_block *s, struct treepath *p_s_search_path); 1868void pathrelse_and_restore(struct super_block *s, struct treepath *search_path);
1849 1869
1850int reiserfs_insert_item(struct reiserfs_transaction_handle *th, 1870int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
1851 struct treepath *path, 1871 struct treepath *path,
@@ -1868,14 +1888,14 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
1868int reiserfs_delete_item(struct reiserfs_transaction_handle *th, 1888int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
1869 struct treepath *path, 1889 struct treepath *path,
1870 const struct cpu_key *key, 1890 const struct cpu_key *key,
1871 struct inode *inode, struct buffer_head *p_s_un_bh); 1891 struct inode *inode, struct buffer_head *un_bh);
1872 1892
1873void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, 1893void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
1874 struct inode *inode, struct reiserfs_key *key); 1894 struct inode *inode, struct reiserfs_key *key);
1875int reiserfs_delete_object(struct reiserfs_transaction_handle *th, 1895int reiserfs_delete_object(struct reiserfs_transaction_handle *th,
1876 struct inode *p_s_inode); 1896 struct inode *inode);
1877int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, 1897int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
1878 struct inode *p_s_inode, struct page *, 1898 struct inode *inode, struct page *,
1879 int update_timestamps); 1899 int update_timestamps);
1880 1900
1881#define i_block_size(inode) ((inode)->i_sb->s_blocksize) 1901#define i_block_size(inode) ((inode)->i_sb->s_blocksize)
@@ -1919,10 +1939,12 @@ void make_le_item_head(struct item_head *ih, const struct cpu_key *key,
1919 loff_t offset, int type, int length, int entry_count); 1939 loff_t offset, int type, int length, int entry_count);
1920struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key); 1940struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key);
1921 1941
1942struct reiserfs_security_handle;
1922int reiserfs_new_inode(struct reiserfs_transaction_handle *th, 1943int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
1923 struct inode *dir, int mode, 1944 struct inode *dir, int mode,
1924 const char *symname, loff_t i_size, 1945 const char *symname, loff_t i_size,
1925 struct dentry *dentry, struct inode *inode); 1946 struct dentry *dentry, struct inode *inode,
1947 struct reiserfs_security_handle *security);
1926 1948
1927void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th, 1949void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
1928 struct inode *inode, loff_t size); 1950 struct inode *inode, loff_t size);
@@ -1980,7 +2002,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
1980#define PROC_INFO_MAX( sb, field, value ) VOID_V 2002#define PROC_INFO_MAX( sb, field, value ) VOID_V
1981#define PROC_INFO_INC( sb, field ) VOID_V 2003#define PROC_INFO_INC( sb, field ) VOID_V
1982#define PROC_INFO_ADD( sb, field, val ) VOID_V 2004#define PROC_INFO_ADD( sb, field, val ) VOID_V
1983#define PROC_INFO_BH_STAT( p_s_sb, p_s_bh, n_node_level ) VOID_V 2005#define PROC_INFO_BH_STAT(sb, bh, n_node_level) VOID_V
1984#endif 2006#endif
1985 2007
1986/* dir.c */ 2008/* dir.c */
@@ -1988,6 +2010,7 @@ extern const struct inode_operations reiserfs_dir_inode_operations;
1988extern const struct inode_operations reiserfs_symlink_inode_operations; 2010extern const struct inode_operations reiserfs_symlink_inode_operations;
1989extern const struct inode_operations reiserfs_special_inode_operations; 2011extern const struct inode_operations reiserfs_special_inode_operations;
1990extern const struct file_operations reiserfs_dir_operations; 2012extern const struct file_operations reiserfs_dir_operations;
2013int reiserfs_readdir_dentry(struct dentry *, void *, filldir_t, loff_t *);
1991 2014
1992/* tail_conversion.c */ 2015/* tail_conversion.c */
1993int direct2indirect(struct reiserfs_transaction_handle *, struct inode *, 2016int direct2indirect(struct reiserfs_transaction_handle *, struct inode *,
@@ -2004,13 +2027,20 @@ extern const struct address_space_operations reiserfs_address_space_operations;
2004 2027
2005/* fix_nodes.c */ 2028/* fix_nodes.c */
2006 2029
2007int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, 2030int fix_nodes(int n_op_mode, struct tree_balance *tb,
2008 struct item_head *p_s_ins_ih, const void *); 2031 struct item_head *ins_ih, const void *);
2009void unfix_nodes(struct tree_balance *); 2032void unfix_nodes(struct tree_balance *);
2010 2033
2011/* prints.c */ 2034/* prints.c */
2012void reiserfs_panic(struct super_block *s, const char *fmt, ...) 2035void __reiserfs_panic(struct super_block *s, const char *id,
2036 const char *function, const char *fmt, ...)
2013 __attribute__ ((noreturn)); 2037 __attribute__ ((noreturn));
2038#define reiserfs_panic(s, id, fmt, args...) \
2039 __reiserfs_panic(s, id, __func__, fmt, ##args)
2040void __reiserfs_error(struct super_block *s, const char *id,
2041 const char *function, const char *fmt, ...);
2042#define reiserfs_error(s, id, fmt, args...) \
2043 __reiserfs_error(s, id, __func__, fmt, ##args)
2014void reiserfs_info(struct super_block *s, const char *fmt, ...); 2044void reiserfs_info(struct super_block *s, const char *fmt, ...);
2015void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...); 2045void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...);
2016void print_indirect_item(struct buffer_head *bh, int item_num); 2046void print_indirect_item(struct buffer_head *bh, int item_num);
@@ -2047,7 +2077,7 @@ void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num,
2047 int zeros_number); 2077 int zeros_number);
2048void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, 2078void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
2049 int pos_in_item, int cut_size); 2079 int pos_in_item, int cut_size);
2050void leaf_paste_entries(struct buffer_head *bh, int item_num, int before, 2080void leaf_paste_entries(struct buffer_info *bi, int item_num, int before,
2051 int new_entry_count, struct reiserfs_de_head *new_dehs, 2081 int new_entry_count, struct reiserfs_de_head *new_dehs,
2052 const char *records, int paste_size); 2082 const char *records, int paste_size);
2053/* ibalance.c */ 2083/* ibalance.c */
@@ -2203,6 +2233,6 @@ long reiserfs_compat_ioctl(struct file *filp,
2203 unsigned int cmd, unsigned long arg); 2233 unsigned int cmd, unsigned long arg);
2204int reiserfs_unpack(struct inode *inode, struct file *filp); 2234int reiserfs_unpack(struct inode *inode, struct file *filp);
2205 2235
2206
2207#endif /* __KERNEL__ */ 2236#endif /* __KERNEL__ */
2237
2208#endif /* _LINUX_REISER_FS_H */ 2238#endif /* _LINUX_REISER_FS_H */
diff --git a/include/linux/reiserfs_fs_i.h b/include/linux/reiserfs_fs_i.h
index ce3663fb0101..76360b36ac33 100644
--- a/include/linux/reiserfs_fs_i.h
+++ b/include/linux/reiserfs_fs_i.h
@@ -51,7 +51,7 @@ struct reiserfs_inode_info {
51 /* we use these for fsync or O_SYNC to decide which transaction 51 /* we use these for fsync or O_SYNC to decide which transaction
52 ** needs to be committed in order for this inode to be properly 52 ** needs to be committed in order for this inode to be properly
53 ** flushed */ 53 ** flushed */
54 unsigned long i_trans_id; 54 unsigned int i_trans_id;
55 struct reiserfs_journal_list *i_jl; 55 struct reiserfs_journal_list *i_jl;
56 struct mutex i_mmap; 56 struct mutex i_mmap;
57#ifdef CONFIG_REISERFS_FS_POSIX_ACL 57#ifdef CONFIG_REISERFS_FS_POSIX_ACL
@@ -59,7 +59,7 @@ struct reiserfs_inode_info {
59 struct posix_acl *i_acl_default; 59 struct posix_acl *i_acl_default;
60#endif 60#endif
61#ifdef CONFIG_REISERFS_FS_XATTR 61#ifdef CONFIG_REISERFS_FS_XATTR
62 struct rw_semaphore xattr_sem; 62 struct rw_semaphore i_xattr_sem;
63#endif 63#endif
64 struct inode vfs_inode; 64 struct inode vfs_inode;
65}; 65};
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index bda6b562a1e0..5621d87c4479 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -14,7 +14,7 @@ typedef enum {
14} reiserfs_super_block_flags; 14} reiserfs_super_block_flags;
15 15
16/* struct reiserfs_super_block accessors/mutators 16/* struct reiserfs_super_block accessors/mutators
17 * since this is a disk structure, it will always be in 17 * since this is a disk structure, it will always be in
18 * little endian format. */ 18 * little endian format. */
19#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count)) 19#define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count))
20#define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v)) 20#define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
@@ -73,6 +73,9 @@ typedef enum {
73#define sb_version(sbp) (le16_to_cpu((sbp)->s_v1.s_version)) 73#define sb_version(sbp) (le16_to_cpu((sbp)->s_v1.s_version))
74#define set_sb_version(sbp,v) ((sbp)->s_v1.s_version = cpu_to_le16(v)) 74#define set_sb_version(sbp,v) ((sbp)->s_v1.s_version = cpu_to_le16(v))
75 75
76#define sb_mnt_count(sbp) (le16_to_cpu((sbp)->s_mnt_count))
77#define set_sb_mnt_count(sbp, v) ((sbp)->s_mnt_count = cpu_to_le16(v))
78
76#define sb_reserved_for_journal(sbp) \ 79#define sb_reserved_for_journal(sbp) \
77 (le16_to_cpu((sbp)->s_v1.s_reserved_for_journal)) 80 (le16_to_cpu((sbp)->s_v1.s_reserved_for_journal))
78#define set_sb_reserved_for_journal(sbp,v) \ 81#define set_sb_reserved_for_journal(sbp,v) \
@@ -80,16 +83,16 @@ typedef enum {
80 83
81/* LOGGING -- */ 84/* LOGGING -- */
82 85
83/* These all interelate for performance. 86/* These all interelate for performance.
84** 87**
85** If the journal block count is smaller than n transactions, you lose speed. 88** If the journal block count is smaller than n transactions, you lose speed.
86** I don't know what n is yet, I'm guessing 8-16. 89** I don't know what n is yet, I'm guessing 8-16.
87** 90**
88** typical transaction size depends on the application, how often fsync is 91** typical transaction size depends on the application, how often fsync is
89** called, and how many metadata blocks you dirty in a 30 second period. 92** called, and how many metadata blocks you dirty in a 30 second period.
90** The more small files (<16k) you use, the larger your transactions will 93** The more small files (<16k) you use, the larger your transactions will
91** be. 94** be.
92** 95**
93** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal 96** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
94** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough 97** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough
95** to prevent wrapping before dirty meta blocks get to disk. 98** to prevent wrapping before dirty meta blocks get to disk.
@@ -153,7 +156,7 @@ struct reiserfs_journal_list {
153 atomic_t j_commit_left; 156 atomic_t j_commit_left;
154 atomic_t j_older_commits_done; /* all commits older than this on disk */ 157 atomic_t j_older_commits_done; /* all commits older than this on disk */
155 struct mutex j_commit_mutex; 158 struct mutex j_commit_mutex;
156 unsigned long j_trans_id; 159 unsigned int j_trans_id;
157 time_t j_timestamp; 160 time_t j_timestamp;
158 struct reiserfs_list_bitmap *j_list_bitmap; 161 struct reiserfs_list_bitmap *j_list_bitmap;
159 struct buffer_head *j_commit_bh; /* commit buffer head */ 162 struct buffer_head *j_commit_bh; /* commit buffer head */
@@ -182,7 +185,7 @@ struct reiserfs_journal {
182 int j_1st_reserved_block; /* first block on s_dev of reserved area journal */ 185 int j_1st_reserved_block; /* first block on s_dev of reserved area journal */
183 186
184 unsigned long j_state; 187 unsigned long j_state;
185 unsigned long j_trans_id; 188 unsigned int j_trans_id;
186 unsigned long j_mount_id; 189 unsigned long j_mount_id;
187 unsigned long j_start; /* start of current waiting commit (index into j_ap_blocks) */ 190 unsigned long j_start; /* start of current waiting commit (index into j_ap_blocks) */
188 unsigned long j_len; /* length of current waiting commit */ 191 unsigned long j_len; /* length of current waiting commit */
@@ -223,10 +226,10 @@ struct reiserfs_journal {
223 int j_num_work_lists; /* number that need attention from kreiserfsd */ 226 int j_num_work_lists; /* number that need attention from kreiserfsd */
224 227
225 /* debugging to make sure things are flushed in order */ 228 /* debugging to make sure things are flushed in order */
226 int j_last_flush_id; 229 unsigned int j_last_flush_id;
227 230
228 /* debugging to make sure things are committed in order */ 231 /* debugging to make sure things are committed in order */
229 int j_last_commit_id; 232 unsigned int j_last_commit_id;
230 233
231 struct list_head j_bitmap_nodes; 234 struct list_head j_bitmap_nodes;
232 struct list_head j_dirty_buffers; 235 struct list_head j_dirty_buffers;
@@ -239,7 +242,7 @@ struct reiserfs_journal {
239 242
240 struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */ 243 struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */
241 struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */ 244 struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */
242 struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all 245 struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
243 the transactions */ 246 the transactions */
244 struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */ 247 struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */
245 int j_persistent_trans; 248 int j_persistent_trans;
@@ -399,10 +402,7 @@ struct reiserfs_sb_info {
399 int reserved_blocks; /* amount of blocks reserved for further allocations */ 402 int reserved_blocks; /* amount of blocks reserved for further allocations */
400 spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */ 403 spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */
401 struct dentry *priv_root; /* root of /.reiserfs_priv */ 404 struct dentry *priv_root; /* root of /.reiserfs_priv */
402#ifdef CONFIG_REISERFS_FS_XATTR
403 struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */ 405 struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */
404 struct rw_semaphore xattr_dir_sem;
405#endif
406 int j_errno; 406 int j_errno;
407#ifdef CONFIG_QUOTA 407#ifdef CONFIG_QUOTA
408 char *s_qf_names[MAXQUOTAS]; 408 char *s_qf_names[MAXQUOTAS];
@@ -426,7 +426,7 @@ enum reiserfs_mount_options {
426 partition will be dealt with in a 426 partition will be dealt with in a
427 manner of 3.5.x */ 427 manner of 3.5.x */
428 428
429/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting 429/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
430** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option 430** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option
431** is not required. If the normal autodection code can't determine which 431** is not required. If the normal autodection code can't determine which
432** hash to use (because both hashes had the same value for a file) 432** hash to use (because both hashes had the same value for a file)
@@ -451,7 +451,6 @@ enum reiserfs_mount_options {
451 REISERFS_NO_UNHASHED_RELOCATION, 451 REISERFS_NO_UNHASHED_RELOCATION,
452 REISERFS_HASHED_RELOCATION, 452 REISERFS_HASHED_RELOCATION,
453 REISERFS_ATTRS, 453 REISERFS_ATTRS,
454 REISERFS_XATTRS,
455 REISERFS_XATTRS_USER, 454 REISERFS_XATTRS_USER,
456 REISERFS_POSIXACL, 455 REISERFS_POSIXACL,
457 REISERFS_BARRIER_NONE, 456 REISERFS_BARRIER_NONE,
@@ -489,7 +488,7 @@ enum reiserfs_mount_options {
489#define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG)) 488#define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG))
490#define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED)) 489#define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED))
491#define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK)) 490#define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK))
492#define reiserfs_xattrs(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS)) 491#define reiserfs_xattrs(s) ((s)->s_xattr != NULL)
493#define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER)) 492#define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER))
494#define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL)) 493#define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL))
495#define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s)) 494#define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s))
diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h
index af135ae895db..dcae01e63e40 100644
--- a/include/linux/reiserfs_xattr.h
+++ b/include/linux/reiserfs_xattr.h
@@ -15,6 +15,12 @@ struct reiserfs_xattr_header {
15 __le32 h_hash; /* hash of the value */ 15 __le32 h_hash; /* hash of the value */
16}; 16};
17 17
18struct reiserfs_security_handle {
19 char *name;
20 void *value;
21 size_t length;
22};
23
18#ifdef __KERNEL__ 24#ifdef __KERNEL__
19 25
20#include <linux/init.h> 26#include <linux/init.h>
@@ -29,22 +35,13 @@ struct iattr;
29struct super_block; 35struct super_block;
30struct nameidata; 36struct nameidata;
31 37
32struct reiserfs_xattr_handler { 38int reiserfs_xattr_register_handlers(void) __init;
33 char *prefix; 39void reiserfs_xattr_unregister_handlers(void);
34 int (*init) (void); 40int reiserfs_xattr_init(struct super_block *sb, int mount_flags);
35 void (*exit) (void); 41int reiserfs_delete_xattrs(struct inode *inode);
36 int (*get) (struct inode * inode, const char *name, void *buffer, 42int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs);
37 size_t size);
38 int (*set) (struct inode * inode, const char *name, const void *buffer,
39 size_t size, int flags);
40 int (*del) (struct inode * inode, const char *name);
41 int (*list) (struct inode * inode, const char *name, int namelen,
42 char *out);
43 struct list_head handlers;
44};
45 43
46#ifdef CONFIG_REISERFS_FS_XATTR 44#ifdef CONFIG_REISERFS_FS_XATTR
47#define is_reiserfs_priv_object(inode) IS_PRIVATE(inode)
48#define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) 45#define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir)
49ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name, 46ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name,
50 void *buffer, size_t size); 47 void *buffer, size_t size);
@@ -52,104 +49,97 @@ int reiserfs_setxattr(struct dentry *dentry, const char *name,
52 const void *value, size_t size, int flags); 49 const void *value, size_t size, int flags);
53ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size); 50ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
54int reiserfs_removexattr(struct dentry *dentry, const char *name); 51int reiserfs_removexattr(struct dentry *dentry, const char *name);
55int reiserfs_delete_xattrs(struct inode *inode);
56int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs);
57int reiserfs_xattr_init(struct super_block *sb, int mount_flags);
58int reiserfs_permission(struct inode *inode, int mask); 52int reiserfs_permission(struct inode *inode, int mask);
59 53
60int reiserfs_xattr_del(struct inode *, const char *); 54int reiserfs_xattr_get(struct inode *, const char *, void *, size_t);
61int reiserfs_xattr_get(const struct inode *, const char *, void *, size_t);
62int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int); 55int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int);
63 56int reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *,
64extern struct reiserfs_xattr_handler user_handler; 57 struct inode *, const char *, const void *,
65extern struct reiserfs_xattr_handler trusted_handler; 58 size_t, int);
66extern struct reiserfs_xattr_handler security_handler; 59
67 60extern struct xattr_handler reiserfs_xattr_user_handler;
68int reiserfs_xattr_register_handlers(void) __init; 61extern struct xattr_handler reiserfs_xattr_trusted_handler;
69void reiserfs_xattr_unregister_handlers(void); 62extern struct xattr_handler reiserfs_xattr_security_handler;
70 63#ifdef CONFIG_REISERFS_FS_SECURITY
71static inline void reiserfs_write_lock_xattrs(struct super_block *sb) 64int reiserfs_security_init(struct inode *dir, struct inode *inode,
72{ 65 struct reiserfs_security_handle *sec);
73 down_write(&REISERFS_XATTR_DIR_SEM(sb)); 66int reiserfs_security_write(struct reiserfs_transaction_handle *th,
74} 67 struct inode *inode,
75static inline void reiserfs_write_unlock_xattrs(struct super_block *sb) 68 struct reiserfs_security_handle *sec);
76{ 69void reiserfs_security_free(struct reiserfs_security_handle *sec);
77 up_write(&REISERFS_XATTR_DIR_SEM(sb)); 70#endif
78} 71
79static inline void reiserfs_read_lock_xattrs(struct super_block *sb) 72#define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
80{ 73static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size)
81 down_read(&REISERFS_XATTR_DIR_SEM(sb));
82}
83
84static inline void reiserfs_read_unlock_xattrs(struct super_block *sb)
85{ 74{
86 up_read(&REISERFS_XATTR_DIR_SEM(sb)); 75 loff_t ret = 0;
76 if (reiserfs_file_data_log(inode)) {
77 ret = _ROUND_UP(xattr_size(size), inode->i_sb->s_blocksize);
78 ret >>= inode->i_sb->s_blocksize_bits;
79 }
80 return ret;
87} 81}
88 82
89static inline void reiserfs_write_lock_xattr_i(struct inode *inode) 83/* We may have to create up to 3 objects: xattr root, xattr dir, xattr file.
90{ 84 * Let's try to be smart about it.
91 down_write(&REISERFS_I(inode)->xattr_sem); 85 * xattr root: We cache it. If it's not cached, we may need to create it.
92} 86 * xattr dir: If anything has been loaded for this inode, we can set a flag
93static inline void reiserfs_write_unlock_xattr_i(struct inode *inode) 87 * saying so.
88 * xattr file: Since we don't cache xattrs, we can't tell. We always include
89 * blocks for it.
90 *
91 * However, since root and dir can be created between calls - YOU MUST SAVE
92 * THIS VALUE.
93 */
94static inline size_t reiserfs_xattr_jcreate_nblocks(struct inode *inode)
94{ 95{
95 up_write(&REISERFS_I(inode)->xattr_sem); 96 size_t nblocks = JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
96}
97static inline void reiserfs_read_lock_xattr_i(struct inode *inode)
98{
99 down_read(&REISERFS_I(inode)->xattr_sem);
100}
101 97
102static inline void reiserfs_read_unlock_xattr_i(struct inode *inode) 98 if ((REISERFS_I(inode)->i_flags & i_has_xattr_dir) == 0) {
103{ 99 nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
104 up_read(&REISERFS_I(inode)->xattr_sem); 100 if (REISERFS_SB(inode->i_sb)->xattr_root == NULL)
105} 101 nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
102 }
106 103
107static inline void reiserfs_mark_inode_private(struct inode *inode) 104 return nblocks;
108{
109 inode->i_flags |= S_PRIVATE;
110} 105}
111 106
112static inline void reiserfs_init_xattr_rwsem(struct inode *inode) 107static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
113{ 108{
114 init_rwsem(&REISERFS_I(inode)->xattr_sem); 109 init_rwsem(&REISERFS_I(inode)->i_xattr_sem);
115} 110}
116 111
117#else 112#else
118 113
119#define is_reiserfs_priv_object(inode) 0
120#define reiserfs_mark_inode_private(inode) do {;} while(0)
121#define reiserfs_getxattr NULL 114#define reiserfs_getxattr NULL
122#define reiserfs_setxattr NULL 115#define reiserfs_setxattr NULL
123#define reiserfs_listxattr NULL 116#define reiserfs_listxattr NULL
124#define reiserfs_removexattr NULL 117#define reiserfs_removexattr NULL
125#define reiserfs_write_lock_xattrs(sb) do {;} while(0)
126#define reiserfs_write_unlock_xattrs(sb) do {;} while(0)
127#define reiserfs_read_lock_xattrs(sb)
128#define reiserfs_read_unlock_xattrs(sb)
129 118
130#define reiserfs_permission NULL 119#define reiserfs_permission NULL
131 120
132#define reiserfs_xattr_register_handlers() 0 121static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
133#define reiserfs_xattr_unregister_handlers()
134
135static inline int reiserfs_delete_xattrs(struct inode *inode)
136{ 122{
137 return 0; 123}
138}; 124#endif /* CONFIG_REISERFS_FS_XATTR */
139static inline int reiserfs_chown_xattrs(struct inode *inode, 125
140 struct iattr *attrs) 126#ifndef CONFIG_REISERFS_FS_SECURITY
127static inline int reiserfs_security_init(struct inode *dir,
128 struct inode *inode,
129 struct reiserfs_security_handle *sec)
141{ 130{
142 return 0; 131 return 0;
143}; 132}
144static inline int reiserfs_xattr_init(struct super_block *sb, int mount_flags) 133static inline int
134reiserfs_security_write(struct reiserfs_transaction_handle *th,
135 struct inode *inode,
136 struct reiserfs_security_handle *sec)
145{ 137{
146 sb->s_flags = (sb->s_flags & ~MS_POSIXACL); /* to be sure */
147 return 0; 138 return 0;
148};
149static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
150{
151} 139}
152#endif /* CONFIG_REISERFS_FS_XATTR */ 140static inline void reiserfs_security_free(struct reiserfs_security_handle *sec)
141{}
142#endif
153 143
154#endif /* __KERNEL__ */ 144#endif /* __KERNEL__ */
155 145
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b3b359660082..e1b7b2173885 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -8,7 +8,7 @@ struct ring_buffer;
8struct ring_buffer_iter; 8struct ring_buffer_iter;
9 9
10/* 10/*
11 * Don't reference this struct directly, use functions below. 11 * Don't refer to this struct directly, use functions below.
12 */ 12 */
13struct ring_buffer_event { 13struct ring_buffer_event {
14 u32 type:2, len:3, time_delta:27; 14 u32 type:2, len:3, time_delta:27;
@@ -18,10 +18,13 @@ struct ring_buffer_event {
18/** 18/**
19 * enum ring_buffer_type - internal ring buffer types 19 * enum ring_buffer_type - internal ring buffer types
20 * 20 *
21 * @RINGBUF_TYPE_PADDING: Left over page padding 21 * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
22 * array is ignored 22 * If time_delta is 0:
23 * size is variable depending on how much 23 * array is ignored
24 * size is variable depending on how much
24 * padding is needed 25 * padding is needed
26 * If time_delta is non zero:
27 * everything else same as RINGBUF_TYPE_DATA
25 * 28 *
26 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta 29 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
27 * array[0] = time delta (28 .. 59) 30 * array[0] = time delta (28 .. 59)
@@ -65,6 +68,8 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
65 return event->time_delta; 68 return event->time_delta;
66} 69}
67 70
71void ring_buffer_event_discard(struct ring_buffer_event *event);
72
68/* 73/*
69 * size is in bytes for each per CPU buffer. 74 * size is in bytes for each per CPU buffer.
70 */ 75 */
@@ -74,13 +79,10 @@ void ring_buffer_free(struct ring_buffer *buffer);
74 79
75int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); 80int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
76 81
77struct ring_buffer_event * 82struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
78ring_buffer_lock_reserve(struct ring_buffer *buffer, 83 unsigned long length);
79 unsigned long length,
80 unsigned long *flags);
81int ring_buffer_unlock_commit(struct ring_buffer *buffer, 84int ring_buffer_unlock_commit(struct ring_buffer *buffer,
82 struct ring_buffer_event *event, 85 struct ring_buffer_event *event);
83 unsigned long flags);
84int ring_buffer_write(struct ring_buffer *buffer, 86int ring_buffer_write(struct ring_buffer *buffer,
85 unsigned long length, void *data); 87 unsigned long length, void *data);
86 88
@@ -121,17 +123,19 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
121unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); 123unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
122unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); 124unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
123 125
124u64 ring_buffer_time_stamp(int cpu); 126u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
125void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); 127void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
128 int cpu, u64 *ts);
129void ring_buffer_set_clock(struct ring_buffer *buffer,
130 u64 (*clock)(void));
131
132size_t ring_buffer_page_len(void *page);
126 133
127void tracing_on(void);
128void tracing_off(void);
129void tracing_off_permanent(void);
130 134
131void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); 135void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
132void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); 136void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
133int ring_buffer_read_page(struct ring_buffer *buffer, 137int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
134 void **data_page, int cpu, int full); 138 size_t len, int cpu, int full);
135 139
136enum ring_buffer_flags { 140enum ring_buffer_flags {
137 RB_FL_OVERWRITE = 1 << 0, 141 RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/rtc-v3020.h b/include/linux/rtc-v3020.h
index bf74e63c98fe..8ba646e610d9 100644
--- a/include/linux/rtc-v3020.h
+++ b/include/linux/rtc-v3020.h
@@ -14,6 +14,12 @@
14 * is used depends on the board. */ 14 * is used depends on the board. */
15struct v3020_platform_data { 15struct v3020_platform_data {
16 int leftshift; /* (1<<(leftshift)) & readl() */ 16 int leftshift; /* (1<<(leftshift)) & readl() */
17
18 int use_gpio:1;
19 unsigned int gpio_cs;
20 unsigned int gpio_wr;
21 unsigned int gpio_rd;
22 unsigned int gpio_io;
17}; 23};
18 24
19#define V3020_STATUS_0 0x00 25#define V3020_STATUS_0 0x00
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 4046b75563c1..60f88a7fb13d 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -99,6 +99,7 @@ struct rtc_pll_info {
99 99
100#ifdef __KERNEL__ 100#ifdef __KERNEL__
101 101
102#include <linux/types.h>
102#include <linux/interrupt.h> 103#include <linux/interrupt.h>
103 104
104extern int rtc_month_days(unsigned int month, unsigned int year); 105extern int rtc_month_days(unsigned int month, unsigned int year);
@@ -232,6 +233,11 @@ int rtc_register(rtc_task_t *task);
232int rtc_unregister(rtc_task_t *task); 233int rtc_unregister(rtc_task_t *task);
233int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); 234int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
234 235
236static inline bool is_leap_year(unsigned int year)
237{
238 return (!(year % 4) && (year % 100)) || !(year % 400);
239}
240
235#endif /* __KERNEL__ */ 241#endif /* __KERNEL__ */
236 242
237#endif /* _LINUX_RTC_H_ */ 243#endif /* _LINUX_RTC_H_ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1d19c025f9d2..b94f3541f67b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -68,7 +68,7 @@ struct sched_param {
68#include <linux/smp.h> 68#include <linux/smp.h>
69#include <linux/sem.h> 69#include <linux/sem.h>
70#include <linux/signal.h> 70#include <linux/signal.h>
71#include <linux/fs_struct.h> 71#include <linux/path.h>
72#include <linux/compiler.h> 72#include <linux/compiler.h>
73#include <linux/completion.h> 73#include <linux/completion.h>
74#include <linux/pid.h> 74#include <linux/pid.h>
@@ -97,6 +97,7 @@ struct futex_pi_state;
97struct robust_list_head; 97struct robust_list_head;
98struct bio; 98struct bio;
99struct bts_tracer; 99struct bts_tracer;
100struct fs_struct;
100 101
101/* 102/*
102 * List of flags we want to share for kernel threads, 103 * List of flags we want to share for kernel threads,
@@ -137,6 +138,8 @@ extern unsigned long nr_uninterruptible(void);
137extern unsigned long nr_active(void); 138extern unsigned long nr_active(void);
138extern unsigned long nr_iowait(void); 139extern unsigned long nr_iowait(void);
139 140
141extern unsigned long get_parent_ip(unsigned long addr);
142
140struct seq_file; 143struct seq_file;
141struct cfs_rq; 144struct cfs_rq;
142struct task_group; 145struct task_group;
@@ -331,7 +334,9 @@ extern signed long schedule_timeout(signed long timeout);
331extern signed long schedule_timeout_interruptible(signed long timeout); 334extern signed long schedule_timeout_interruptible(signed long timeout);
332extern signed long schedule_timeout_killable(signed long timeout); 335extern signed long schedule_timeout_killable(signed long timeout);
333extern signed long schedule_timeout_uninterruptible(signed long timeout); 336extern signed long schedule_timeout_uninterruptible(signed long timeout);
337asmlinkage void __schedule(void);
334asmlinkage void schedule(void); 338asmlinkage void schedule(void);
339extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
335 340
336struct nsproxy; 341struct nsproxy;
337struct user_namespace; 342struct user_namespace;
@@ -389,8 +394,15 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
389 (mm)->hiwater_vm = (mm)->total_vm; \ 394 (mm)->hiwater_vm = (mm)->total_vm; \
390} while (0) 395} while (0)
391 396
392#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm)) 397static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
393#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm) 398{
399 return max(mm->hiwater_rss, get_mm_rss(mm));
400}
401
402static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
403{
404 return max(mm->hiwater_vm, mm->total_vm);
405}
394 406
395extern void set_dumpable(struct mm_struct *mm, int value); 407extern void set_dumpable(struct mm_struct *mm, int value);
396extern int get_dumpable(struct mm_struct *mm); 408extern int get_dumpable(struct mm_struct *mm);
@@ -538,25 +550,8 @@ struct signal_struct {
538 550
539 struct list_head cpu_timers[3]; 551 struct list_head cpu_timers[3];
540 552
541 /* job control IDs */
542
543 /*
544 * pgrp and session fields are deprecated.
545 * use the task_session_Xnr and task_pgrp_Xnr routines below
546 */
547
548 union {
549 pid_t pgrp __deprecated;
550 pid_t __pgrp;
551 };
552
553 struct pid *tty_old_pgrp; 553 struct pid *tty_old_pgrp;
554 554
555 union {
556 pid_t session __deprecated;
557 pid_t __session;
558 };
559
560 /* boolean value for session group leader */ 555 /* boolean value for session group leader */
561 int leader; 556 int leader;
562 557
@@ -1334,6 +1329,7 @@ struct task_struct {
1334 int lockdep_depth; 1329 int lockdep_depth;
1335 unsigned int lockdep_recursion; 1330 unsigned int lockdep_recursion;
1336 struct held_lock held_locks[MAX_LOCK_DEPTH]; 1331 struct held_lock held_locks[MAX_LOCK_DEPTH];
1332 gfp_t lockdep_reclaim_gfp;
1337#endif 1333#endif
1338 1334
1339/* journalling filesystem info */ 1335/* journalling filesystem info */
@@ -1411,6 +1407,8 @@ struct task_struct {
1411 int curr_ret_stack; 1407 int curr_ret_stack;
1412 /* Stack of return addresses for return function tracing */ 1408 /* Stack of return addresses for return function tracing */
1413 struct ftrace_ret_stack *ret_stack; 1409 struct ftrace_ret_stack *ret_stack;
1410 /* time stamp for last schedule */
1411 unsigned long long ftrace_timestamp;
1414 /* 1412 /*
1415 * Number of functions that haven't been traced 1413 * Number of functions that haven't been traced
1416 * because of depth overrun. 1414 * because of depth overrun.
@@ -1459,16 +1457,6 @@ static inline int rt_task(struct task_struct *p)
1459 return rt_prio(p->prio); 1457 return rt_prio(p->prio);
1460} 1458}
1461 1459
1462static inline void set_task_session(struct task_struct *tsk, pid_t session)
1463{
1464 tsk->signal->__session = session;
1465}
1466
1467static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
1468{
1469 tsk->signal->__pgrp = pgrp;
1470}
1471
1472static inline struct pid *task_pid(struct task_struct *task) 1460static inline struct pid *task_pid(struct task_struct *task)
1473{ 1461{
1474 return task->pids[PIDTYPE_PID].pid; 1462 return task->pids[PIDTYPE_PID].pid;
@@ -1479,6 +1467,11 @@ static inline struct pid *task_tgid(struct task_struct *task)
1479 return task->group_leader->pids[PIDTYPE_PID].pid; 1467 return task->group_leader->pids[PIDTYPE_PID].pid;
1480} 1468}
1481 1469
1470/*
1471 * Without tasklist or rcu lock it is not safe to dereference
1472 * the result of task_pgrp/task_session even if task == current,
1473 * we can race with another thread doing sys_setsid/sys_setpgid.
1474 */
1482static inline struct pid *task_pgrp(struct task_struct *task) 1475static inline struct pid *task_pgrp(struct task_struct *task)
1483{ 1476{
1484 return task->group_leader->pids[PIDTYPE_PGID].pid; 1477 return task->group_leader->pids[PIDTYPE_PGID].pid;
@@ -1504,17 +1497,23 @@ struct pid_namespace;
1504 * 1497 *
1505 * see also pid_nr() etc in include/linux/pid.h 1498 * see also pid_nr() etc in include/linux/pid.h
1506 */ 1499 */
1500pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1501 struct pid_namespace *ns);
1507 1502
1508static inline pid_t task_pid_nr(struct task_struct *tsk) 1503static inline pid_t task_pid_nr(struct task_struct *tsk)
1509{ 1504{
1510 return tsk->pid; 1505 return tsk->pid;
1511} 1506}
1512 1507
1513pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1508static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1509 struct pid_namespace *ns)
1510{
1511 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1512}
1514 1513
1515static inline pid_t task_pid_vnr(struct task_struct *tsk) 1514static inline pid_t task_pid_vnr(struct task_struct *tsk)
1516{ 1515{
1517 return pid_vnr(task_pid(tsk)); 1516 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1518} 1517}
1519 1518
1520 1519
@@ -1531,31 +1530,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1531} 1530}
1532 1531
1533 1532
1534static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1533static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1534 struct pid_namespace *ns)
1535{ 1535{
1536 return tsk->signal->__pgrp; 1536 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1537} 1537}
1538 1538
1539pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1540
1541static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1539static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1542{ 1540{
1543 return pid_vnr(task_pgrp(tsk)); 1541 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1544} 1542}
1545 1543
1546 1544
1547static inline pid_t task_session_nr(struct task_struct *tsk) 1545static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1546 struct pid_namespace *ns)
1548{ 1547{
1549 return tsk->signal->__session; 1548 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1550} 1549}
1551 1550
1552pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1553
1554static inline pid_t task_session_vnr(struct task_struct *tsk) 1551static inline pid_t task_session_vnr(struct task_struct *tsk)
1555{ 1552{
1556 return pid_vnr(task_session(tsk)); 1553 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1557} 1554}
1558 1555
1556/* obsolete, do not use */
1557static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1558{
1559 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1560}
1559 1561
1560/** 1562/**
1561 * pid_alive - check that a task structure is not stale 1563 * pid_alive - check that a task structure is not stale
@@ -1965,7 +1967,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *);
1965/* Allocate a new mm structure and copy contents from tsk->mm */ 1967/* Allocate a new mm structure and copy contents from tsk->mm */
1966extern struct mm_struct *dup_mm(struct task_struct *tsk); 1968extern struct mm_struct *dup_mm(struct task_struct *tsk);
1967 1969
1968extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); 1970extern int copy_thread(unsigned long, unsigned long, unsigned long,
1971 struct task_struct *, struct pt_regs *);
1969extern void flush_thread(void); 1972extern void flush_thread(void);
1970extern void exit_thread(void); 1973extern void exit_thread(void);
1971 1974
@@ -2050,6 +2053,11 @@ static inline int thread_group_empty(struct task_struct *p)
2050#define delay_group_leader(p) \ 2053#define delay_group_leader(p) \
2051 (thread_group_leader(p) && !thread_group_empty(p)) 2054 (thread_group_leader(p) && !thread_group_empty(p))
2052 2055
2056static inline int task_detached(struct task_struct *p)
2057{
2058 return p->exit_signal == -1;
2059}
2060
2053/* 2061/*
2054 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2062 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2055 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2063 * subscriptions and synchronises with wait4(). Also used in procfs. Also
diff --git a/include/linux/security.h b/include/linux/security.h
index 54ed15799a83..d5fd6163606f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -32,6 +32,7 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/key.h> 33#include <linux/key.h>
34#include <linux/xfrm.h> 34#include <linux/xfrm.h>
35#include <linux/gfp.h>
35#include <net/flow.h> 36#include <net/flow.h>
36 37
37/* Maximum number of letters for an LSM name string */ 38/* Maximum number of letters for an LSM name string */
@@ -2953,5 +2954,28 @@ static inline void securityfs_remove(struct dentry *dentry)
2953 2954
2954#endif 2955#endif
2955 2956
2957#ifdef CONFIG_SECURITY
2958
2959static inline char *alloc_secdata(void)
2960{
2961 return (char *)get_zeroed_page(GFP_KERNEL);
2962}
2963
2964static inline void free_secdata(void *secdata)
2965{
2966 free_page((unsigned long)secdata);
2967}
2968
2969#else
2970
2971static inline char *alloc_secdata(void)
2972{
2973 return (char *)1;
2974}
2975
2976static inline void free_secdata(void *secdata)
2977{ }
2978#endif /* CONFIG_SECURITY */
2979
2956#endif /* ! __LINUX_SECURITY_H */ 2980#endif /* ! __LINUX_SECURITY_H */
2957 2981
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index f616f31576d7..004f3b3342c5 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -55,7 +55,7 @@ int seq_bitmap(struct seq_file *m, const unsigned long *bits,
55 unsigned int nr_bits); 55 unsigned int nr_bits);
56static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) 56static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
57{ 57{
58 return seq_bitmap(m, mask->bits, nr_cpu_ids); 58 return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids);
59} 59}
60 60
61static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) 61static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
@@ -63,12 +63,13 @@ static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
63 return seq_bitmap(m, mask->bits, MAX_NUMNODES); 63 return seq_bitmap(m, mask->bits, MAX_NUMNODES);
64} 64}
65 65
66int seq_bitmap_list(struct seq_file *m, unsigned long *bits, 66int seq_bitmap_list(struct seq_file *m, const unsigned long *bits,
67 unsigned int nr_bits); 67 unsigned int nr_bits);
68 68
69static inline int seq_cpumask_list(struct seq_file *m, cpumask_t *mask) 69static inline int seq_cpumask_list(struct seq_file *m,
70 const struct cpumask *mask)
70{ 71{
71 return seq_bitmap_list(m, mask->bits, NR_CPUS); 72 return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids);
72} 73}
73 74
74static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask) 75static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index df9245c7bd3b..57a97e52e58d 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -164,6 +164,9 @@
164/* NWPSERIAL */ 164/* NWPSERIAL */
165#define PORT_NWPSERIAL 85 165#define PORT_NWPSERIAL 85
166 166
167/* MAX3100 */
168#define PORT_MAX3100 86
169
167#ifdef __KERNEL__ 170#ifdef __KERNEL__
168 171
169#include <linux/compiler.h> 172#include <linux/compiler.h>
@@ -277,7 +280,7 @@ struct uart_port {
277 struct uart_icount icount; /* statistics */ 280 struct uart_icount icount; /* statistics */
278 281
279 struct console *cons; /* struct console, if any */ 282 struct console *cons; /* struct console, if any */
280#ifdef CONFIG_SERIAL_CORE_CONSOLE 283#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ)
281 unsigned long sysrq; /* sysrq timeout */ 284 unsigned long sysrq; /* sysrq timeout */
282#endif 285#endif
283 286
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 55d67300fa10..5fd389162f01 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -168,6 +168,7 @@ struct skb_shared_hwtstamps {
168 * @software: generate software time stamp 168 * @software: generate software time stamp
169 * @in_progress: device driver is going to provide 169 * @in_progress: device driver is going to provide
170 * hardware time stamp 170 * hardware time stamp
171 * @flags: all shared_tx flags
171 * 172 *
172 * These flags are attached to packets as part of the 173 * These flags are attached to packets as part of the
173 * &skb_shared_info. Use skb_tx() to get a pointer. 174 * &skb_shared_info. Use skb_tx() to get a pointer.
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 6ca6a7b66d75..5ac9b0bcaf9a 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,6 +14,7 @@
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ 14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ 15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <trace/kmemtrace.h>
17 18
18/* Size description struct for general caches. */ 19/* Size description struct for general caches. */
19struct cache_sizes { 20struct cache_sizes {
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
28void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 29void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
29void *__kmalloc(size_t size, gfp_t flags); 30void *__kmalloc(size_t size, gfp_t flags);
30 31
31static inline void *kmalloc(size_t size, gfp_t flags) 32#ifdef CONFIG_KMEMTRACE
33extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
34extern size_t slab_buffer_size(struct kmem_cache *cachep);
35#else
36static __always_inline void *
37kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
32{ 38{
39 return kmem_cache_alloc(cachep, flags);
40}
41static inline size_t slab_buffer_size(struct kmem_cache *cachep)
42{
43 return 0;
44}
45#endif
46
47static __always_inline void *kmalloc(size_t size, gfp_t flags)
48{
49 struct kmem_cache *cachep;
50 void *ret;
51
33 if (__builtin_constant_p(size)) { 52 if (__builtin_constant_p(size)) {
34 int i = 0; 53 int i = 0;
35 54
@@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
47found: 66found:
48#ifdef CONFIG_ZONE_DMA 67#ifdef CONFIG_ZONE_DMA
49 if (flags & GFP_DMA) 68 if (flags & GFP_DMA)
50 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, 69 cachep = malloc_sizes[i].cs_dmacachep;
51 flags); 70 else
52#endif 71#endif
53 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); 72 cachep = malloc_sizes[i].cs_cachep;
73
74 ret = kmem_cache_alloc_notrace(cachep, flags);
75
76 trace_kmalloc(_THIS_IP_, ret,
77 size, slab_buffer_size(cachep), flags);
78
79 return ret;
54 } 80 }
55 return __kmalloc(size, flags); 81 return __kmalloc(size, flags);
56} 82}
@@ -59,8 +85,25 @@ found:
59extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 85extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
60extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 86extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
61 87
62static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 88#ifdef CONFIG_KMEMTRACE
89extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
90 gfp_t flags,
91 int nodeid);
92#else
93static __always_inline void *
94kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
95 gfp_t flags,
96 int nodeid)
97{
98 return kmem_cache_alloc_node(cachep, flags, nodeid);
99}
100#endif
101
102static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63{ 103{
104 struct kmem_cache *cachep;
105 void *ret;
106
64 if (__builtin_constant_p(size)) { 107 if (__builtin_constant_p(size)) {
65 int i = 0; 108 int i = 0;
66 109
@@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
78found: 121found:
79#ifdef CONFIG_ZONE_DMA 122#ifdef CONFIG_ZONE_DMA
80 if (flags & GFP_DMA) 123 if (flags & GFP_DMA)
81 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, 124 cachep = malloc_sizes[i].cs_dmacachep;
82 flags, node); 125 else
83#endif 126#endif
84 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, 127 cachep = malloc_sizes[i].cs_cachep;
85 flags, node); 128
129 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
130
131 trace_kmalloc_node(_THIS_IP_, ret,
132 size, slab_buffer_size(cachep),
133 flags, node);
134
135 return ret;
86 } 136 }
87 return __kmalloc_node(size, flags, node); 137 return __kmalloc_node(size, flags, node);
88} 138}
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 59a3fa476ab9..0ec00b39d006 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -3,14 +3,15 @@
3 3
4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
5 5
6static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 6static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
7 gfp_t flags)
7{ 8{
8 return kmem_cache_alloc_node(cachep, flags, -1); 9 return kmem_cache_alloc_node(cachep, flags, -1);
9} 10}
10 11
11void *__kmalloc_node(size_t size, gfp_t flags, int node); 12void *__kmalloc_node(size_t size, gfp_t flags, int node);
12 13
13static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 14static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
14{ 15{
15 return __kmalloc_node(size, flags, node); 16 return __kmalloc_node(size, flags, node);
16} 17}
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
23 * kmalloc is the normal method of allocating memory 24 * kmalloc is the normal method of allocating memory
24 * in the kernel. 25 * in the kernel.
25 */ 26 */
26static inline void *kmalloc(size_t size, gfp_t flags) 27static __always_inline void *kmalloc(size_t size, gfp_t flags)
27{ 28{
28 return __kmalloc_node(size, flags, -1); 29 return __kmalloc_node(size, flags, -1);
29} 30}
30 31
31static inline void *__kmalloc(size_t size, gfp_t flags) 32static __always_inline void *__kmalloc(size_t size, gfp_t flags)
32{ 33{
33 return kmalloc(size, flags); 34 return kmalloc(size, flags);
34} 35}
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
new file mode 100644
index 000000000000..85958277f83d
--- /dev/null
+++ b/include/linux/slow-work.h
@@ -0,0 +1,95 @@
1/* Worker thread pool for slow items, such as filesystem lookups or mkdirs
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 *
11 * See Documentation/slow-work.txt
12 */
13
14#ifndef _LINUX_SLOW_WORK_H
15#define _LINUX_SLOW_WORK_H
16
17#ifdef CONFIG_SLOW_WORK
18
19#include <linux/sysctl.h>
20
21struct slow_work;
22
23/*
24 * The operations used to support slow work items
25 */
26struct slow_work_ops {
27 /* get a ref on a work item
28 * - return 0 if successful, -ve if not
29 */
30 int (*get_ref)(struct slow_work *work);
31
32 /* discard a ref to a work item */
33 void (*put_ref)(struct slow_work *work);
34
35 /* execute a work item */
36 void (*execute)(struct slow_work *work);
37};
38
39/*
40 * A slow work item
41 * - A reference is held on the parent object by the thread pool when it is
42 * queued
43 */
44struct slow_work {
45 unsigned long flags;
46#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
47#define SLOW_WORK_EXECUTING 1 /* item currently executing */
48#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
49#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
50 const struct slow_work_ops *ops; /* operations table for this item */
51 struct list_head link; /* link in queue */
52};
53
54/**
55 * slow_work_init - Initialise a slow work item
56 * @work: The work item to initialise
57 * @ops: The operations to use to handle the slow work item
58 *
59 * Initialise a slow work item.
60 */
61static inline void slow_work_init(struct slow_work *work,
62 const struct slow_work_ops *ops)
63{
64 work->flags = 0;
65 work->ops = ops;
66 INIT_LIST_HEAD(&work->link);
67}
68
69/**
70 * slow_work_init - Initialise a very slow work item
71 * @work: The work item to initialise
72 * @ops: The operations to use to handle the slow work item
73 *
74 * Initialise a very slow work item. This item will be restricted such that
75 * only a certain number of the pool threads will be able to execute items of
76 * this type.
77 */
78static inline void vslow_work_init(struct slow_work *work,
79 const struct slow_work_ops *ops)
80{
81 work->flags = 1 << SLOW_WORK_VERY_SLOW;
82 work->ops = ops;
83 INIT_LIST_HEAD(&work->link);
84}
85
86extern int slow_work_enqueue(struct slow_work *work);
87extern int slow_work_register_user(void);
88extern void slow_work_unregister_user(void);
89
90#ifdef CONFIG_SYSCTL
91extern ctl_table slow_work_sysctls[];
92#endif
93
94#endif /* CONFIG_SLOW_WORK */
95#endif /* _LINUX_SLOW_WORK_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index e37b6aa8a9fb..5046f90c1171 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,6 +10,7 @@
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/kobject.h> 12#include <linux/kobject.h>
13#include <trace/kmemtrace.h>
13 14
14enum stat_item { 15enum stat_item {
15 ALLOC_FASTPATH, /* Allocation from cpu slab */ 16 ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -217,13 +218,30 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
217void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 218void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
218void *__kmalloc(size_t size, gfp_t flags); 219void *__kmalloc(size_t size, gfp_t flags);
219 220
221#ifdef CONFIG_KMEMTRACE
222extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
223#else
224static __always_inline void *
225kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
226{
227 return kmem_cache_alloc(s, gfpflags);
228}
229#endif
230
220static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 231static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
221{ 232{
222 return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); 233 unsigned int order = get_order(size);
234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
235
236 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
237
238 return ret;
223} 239}
224 240
225static __always_inline void *kmalloc(size_t size, gfp_t flags) 241static __always_inline void *kmalloc(size_t size, gfp_t flags)
226{ 242{
243 void *ret;
244
227 if (__builtin_constant_p(size)) { 245 if (__builtin_constant_p(size)) {
228 if (size > SLUB_MAX_SIZE) 246 if (size > SLUB_MAX_SIZE)
229 return kmalloc_large(size, flags); 247 return kmalloc_large(size, flags);
@@ -234,7 +252,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
234 if (!s) 252 if (!s)
235 return ZERO_SIZE_PTR; 253 return ZERO_SIZE_PTR;
236 254
237 return kmem_cache_alloc(s, flags); 255 ret = kmem_cache_alloc_notrace(s, flags);
256
257 trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
258
259 return ret;
238 } 260 }
239 } 261 }
240 return __kmalloc(size, flags); 262 return __kmalloc(size, flags);
@@ -244,8 +266,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
244void *__kmalloc_node(size_t size, gfp_t flags, int node); 266void *__kmalloc_node(size_t size, gfp_t flags, int node);
245void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 267void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
246 268
269#ifdef CONFIG_KMEMTRACE
270extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
271 gfp_t gfpflags,
272 int node);
273#else
274static __always_inline void *
275kmem_cache_alloc_node_notrace(struct kmem_cache *s,
276 gfp_t gfpflags,
277 int node)
278{
279 return kmem_cache_alloc_node(s, gfpflags, node);
280}
281#endif
282
247static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 283static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
248{ 284{
285 void *ret;
286
249 if (__builtin_constant_p(size) && 287 if (__builtin_constant_p(size) &&
250 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 288 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
251 struct kmem_cache *s = kmalloc_slab(size); 289 struct kmem_cache *s = kmalloc_slab(size);
@@ -253,7 +291,12 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
253 if (!s) 291 if (!s)
254 return ZERO_SIZE_PTR; 292 return ZERO_SIZE_PTR;
255 293
256 return kmem_cache_alloc_node(s, flags, node); 294 ret = kmem_cache_alloc_node_notrace(s, flags, node);
295
296 trace_kmalloc_node(_THIS_IP_, ret,
297 size, s->size, flags, node);
298
299 return ret;
257 } 300 }
258 return __kmalloc_node(size, flags, node); 301 return __kmalloc_node(size, flags, node);
259} 302}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index bbacb7baa446..a69db820eed6 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -38,7 +38,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
38/* 38/*
39 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. 39 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
40 * (defined in asm header): 40 * (defined in asm header):
41 */ 41 */
42 42
43/* 43/*
44 * stops all CPUs but the current one: 44 * stops all CPUs but the current one:
@@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
82 return 0; 82 return 0;
83} 83}
84 84
85void __smp_call_function_single(int cpuid, struct call_single_data *data); 85void __smp_call_function_single(int cpuid, struct call_single_data *data,
86 int wait);
86 87
87/* 88/*
88 * Generic and arch helpers 89 * Generic and arch helpers
@@ -121,6 +122,8 @@ extern unsigned int setup_max_cpus;
121 122
122#else /* !SMP */ 123#else /* !SMP */
123 124
125static inline void smp_send_stop(void) { }
126
124/* 127/*
125 * These macros fold the SMP functionality into a single CPU system 128 * These macros fold the SMP functionality into a single CPU system
126 */ 129 */
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
index f41ffd7c2dd9..34c4475ac4a2 100644
--- a/include/linux/sonypi.h
+++ b/include/linux/sonypi.h
@@ -103,6 +103,14 @@
103#define SONYPI_EVENT_WIRELESS_OFF 61 103#define SONYPI_EVENT_WIRELESS_OFF 61
104#define SONYPI_EVENT_ZOOM_IN_PRESSED 62 104#define SONYPI_EVENT_ZOOM_IN_PRESSED 62
105#define SONYPI_EVENT_ZOOM_OUT_PRESSED 63 105#define SONYPI_EVENT_ZOOM_OUT_PRESSED 63
106#define SONYPI_EVENT_CD_EJECT_PRESSED 64
107#define SONYPI_EVENT_MODEKEY_PRESSED 65
108#define SONYPI_EVENT_PKEY_P4 66
109#define SONYPI_EVENT_PKEY_P5 67
110#define SONYPI_EVENT_SETTINGKEY_PRESSED 68
111#define SONYPI_EVENT_VOLUME_INC_PRESSED 69
112#define SONYPI_EVENT_VOLUME_DEC_PRESSED 70
113#define SONYPI_EVENT_BRIGHTNESS_PRESSED 71
106 114
107/* get/set brightness */ 115/* get/set brightness */
108#define SONYPI_IOCGBRT _IOR('v', 0, __u8) 116#define SONYPI_IOCGBRT _IOR('v', 0, __u8)
diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h
index 1085212c446e..306e7b1c69ed 100644
--- a/include/linux/spi/eeprom.h
+++ b/include/linux/spi/eeprom.h
@@ -1,6 +1,8 @@
1#ifndef __LINUX_SPI_EEPROM_H 1#ifndef __LINUX_SPI_EEPROM_H
2#define __LINUX_SPI_EEPROM_H 2#define __LINUX_SPI_EEPROM_H
3 3
4#include <linux/memory.h>
5
4/* 6/*
5 * Put one of these structures in platform_data for SPI EEPROMS handled 7 * Put one of these structures in platform_data for SPI EEPROMS handled
6 * by the "at25" driver. On SPI, most EEPROMS understand the same core 8 * by the "at25" driver. On SPI, most EEPROMS understand the same core
@@ -17,6 +19,10 @@ struct spi_eeprom {
17#define EE_ADDR2 0x0002 /* 16 bit addrs */ 19#define EE_ADDR2 0x0002 /* 16 bit addrs */
18#define EE_ADDR3 0x0004 /* 24 bit addrs */ 20#define EE_ADDR3 0x0004 /* 24 bit addrs */
19#define EE_READONLY 0x0008 /* disallow writes */ 21#define EE_READONLY 0x0008 /* disallow writes */
22
23 /* for exporting this chip's data to other kernel code */
24 void (*setup)(struct memory_accessor *mem, void *context);
25 void *context;
20}; 26};
21 27
22#endif /* __LINUX_SPI_EEPROM_H */ 28#endif /* __LINUX_SPI_EEPROM_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 68bb1c501d0d..2cc43fa380cb 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -204,6 +204,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
204 * SPI slaves, and are numbered from zero to num_chipselects. 204 * SPI slaves, and are numbered from zero to num_chipselects.
205 * each slave has a chipselect signal, but it's common that not 205 * each slave has a chipselect signal, but it's common that not
206 * every chipselect is connected to a slave. 206 * every chipselect is connected to a slave.
207 * @dma_alignment: SPI controller constraint on DMA buffers alignment.
207 * @setup: updates the device mode and clocking records used by a 208 * @setup: updates the device mode and clocking records used by a
208 * device's SPI controller; protocol code may call this. This 209 * device's SPI controller; protocol code may call this. This
209 * must fail if an unrecognized or unsupported mode is requested. 210 * must fail if an unrecognized or unsupported mode is requested.
@@ -239,6 +240,11 @@ struct spi_master {
239 */ 240 */
240 u16 num_chipselect; 241 u16 num_chipselect;
241 242
243 /* some SPI controllers pose alignment requirements on DMAable
244 * buffers; let protocol drivers know about these requirements.
245 */
246 u16 dma_alignment;
247
242 /* setup mode and clock, etc (spi driver may call many times) */ 248 /* setup mode and clock, etc (spi driver may call many times) */
243 int (*setup)(struct spi_device *spi); 249 int (*setup)(struct spi_device *spi);
244 250
diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h
index 0f01a0f1f40c..ca6782ee4b9f 100644
--- a/include/linux/spi/spi_gpio.h
+++ b/include/linux/spi/spi_gpio.h
@@ -25,10 +25,16 @@
25 * ... 25 * ...
26 * }; 26 * };
27 * 27 *
28 * If chipselect is not used (there's only one device on the bus), assign
29 * SPI_GPIO_NO_CHIPSELECT to the controller_data:
30 * .controller_data = (void *) SPI_GPIO_NO_CHIPSELECT;
31 *
28 * If the bitbanged bus is later switched to a "native" controller, 32 * If the bitbanged bus is later switched to a "native" controller,
29 * that platform_device and controller_data should be removed. 33 * that platform_device and controller_data should be removed.
30 */ 34 */
31 35
36#define SPI_GPIO_NO_CHIPSELECT ((unsigned long)-1l)
37
32/** 38/**
33 * struct spi_gpio_platform_data - parameter for bitbanged SPI master 39 * struct spi_gpio_platform_data - parameter for bitbanged SPI master
34 * @sck: number of the GPIO used for clock output 40 * @sck: number of the GPIO used for clock output
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index a0c66a2e00ad..252b245cfcf4 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -153,9 +153,11 @@ do { \
153 extern int _raw_spin_trylock(spinlock_t *lock); 153 extern int _raw_spin_trylock(spinlock_t *lock);
154 extern void _raw_spin_unlock(spinlock_t *lock); 154 extern void _raw_spin_unlock(spinlock_t *lock);
155 extern void _raw_read_lock(rwlock_t *lock); 155 extern void _raw_read_lock(rwlock_t *lock);
156#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
156 extern int _raw_read_trylock(rwlock_t *lock); 157 extern int _raw_read_trylock(rwlock_t *lock);
157 extern void _raw_read_unlock(rwlock_t *lock); 158 extern void _raw_read_unlock(rwlock_t *lock);
158 extern void _raw_write_lock(rwlock_t *lock); 159 extern void _raw_write_lock(rwlock_t *lock);
160#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
159 extern int _raw_write_trylock(rwlock_t *lock); 161 extern int _raw_write_trylock(rwlock_t *lock);
160 extern void _raw_write_unlock(rwlock_t *lock); 162 extern void _raw_write_unlock(rwlock_t *lock);
161#else 163#else
@@ -165,9 +167,13 @@ do { \
165# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) 167# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
166# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 168# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
167# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) 169# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
170# define _raw_read_lock_flags(lock, flags) \
171 __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
168# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) 172# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
169# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) 173# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
170# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) 174# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
175# define _raw_write_lock_flags(lock, flags) \
176 __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
171# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) 177# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
172# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) 178# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
173#endif 179#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index d18fc198aa2f..489019ef1694 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -10,8 +10,10 @@
10#include <linux/compiler.h> /* for inline */ 10#include <linux/compiler.h> /* for inline */
11#include <linux/types.h> /* for size_t */ 11#include <linux/types.h> /* for size_t */
12#include <linux/stddef.h> /* for NULL */ 12#include <linux/stddef.h> /* for NULL */
13#include <stdarg.h>
13 14
14extern char *strndup_user(const char __user *, long); 15extern char *strndup_user(const char __user *, long);
16extern void *memdup_user(const void __user *, size_t);
15 17
16/* 18/*
17 * Include machine specific inline routines 19 * Include machine specific inline routines
@@ -111,8 +113,23 @@ extern void argv_free(char **argv);
111 113
112extern bool sysfs_streq(const char *s1, const char *s2); 114extern bool sysfs_streq(const char *s1, const char *s2);
113 115
116#ifdef CONFIG_BINARY_PRINTF
117int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
118int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
119int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
120#endif
121
114extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, 122extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
115 const void *from, size_t available); 123 const void *from, size_t available);
116 124
125/**
126 * strstarts - does @str start with @prefix?
127 * @str: string to examine
128 * @prefix: prefix to look for.
129 */
130static inline bool strstarts(const char *str, const char *prefix)
131{
132 return strncmp(str, prefix, strlen(prefix)) == 0;
133}
117#endif 134#endif
118#endif /* _LINUX_STRING_H_ */ 135#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 3435d24bfe55..2a30775959e9 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -24,6 +24,15 @@
24 */ 24 */
25typedef int (*svc_thread_fn)(void *); 25typedef int (*svc_thread_fn)(void *);
26 26
27/* statistics for svc_pool structures */
28struct svc_pool_stats {
29 unsigned long packets;
30 unsigned long sockets_queued;
31 unsigned long threads_woken;
32 unsigned long overloads_avoided;
33 unsigned long threads_timedout;
34};
35
27/* 36/*
28 * 37 *
29 * RPC service thread pool. 38 * RPC service thread pool.
@@ -41,6 +50,8 @@ struct svc_pool {
41 struct list_head sp_sockets; /* pending sockets */ 50 struct list_head sp_sockets; /* pending sockets */
42 unsigned int sp_nrthreads; /* # of threads in pool */ 51 unsigned int sp_nrthreads; /* # of threads in pool */
43 struct list_head sp_all_threads; /* all server threads */ 52 struct list_head sp_all_threads; /* all server threads */
53 int sp_nwaking; /* number of threads woken but not yet active */
54 struct svc_pool_stats sp_stats; /* statistics on pool operation */
44} ____cacheline_aligned_in_smp; 55} ____cacheline_aligned_in_smp;
45 56
46/* 57/*
@@ -69,7 +80,6 @@ struct svc_serv {
69 struct list_head sv_tempsocks; /* all temporary sockets */ 80 struct list_head sv_tempsocks; /* all temporary sockets */
70 int sv_tmpcnt; /* count of temporary sockets */ 81 int sv_tmpcnt; /* count of temporary sockets */
71 struct timer_list sv_temptimer; /* timer for aging temporary sockets */ 82 struct timer_list sv_temptimer; /* timer for aging temporary sockets */
72 sa_family_t sv_family; /* listener's address family */
73 83
74 char * sv_name; /* service name */ 84 char * sv_name; /* service name */
75 85
@@ -84,6 +94,8 @@ struct svc_serv {
84 struct module * sv_module; /* optional module to count when 94 struct module * sv_module; /* optional module to count when
85 * adding threads */ 95 * adding threads */
86 svc_thread_fn sv_function; /* main function for threads */ 96 svc_thread_fn sv_function; /* main function for threads */
97 unsigned int sv_drc_max_pages; /* Total pages for DRC */
98 unsigned int sv_drc_pages_used;/* DRC pages used */
87}; 99};
88 100
89/* 101/*
@@ -219,6 +231,7 @@ struct svc_rqst {
219 struct svc_cred rq_cred; /* auth info */ 231 struct svc_cred rq_cred; /* auth info */
220 void * rq_xprt_ctxt; /* transport specific context ptr */ 232 void * rq_xprt_ctxt; /* transport specific context ptr */
221 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ 233 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
234 int rq_usedeferral; /* use deferral */
222 235
223 size_t rq_xprt_hlen; /* xprt header len */ 236 size_t rq_xprt_hlen; /* xprt header len */
224 struct xdr_buf rq_arg; 237 struct xdr_buf rq_arg;
@@ -264,6 +277,7 @@ struct svc_rqst {
264 * cache pages */ 277 * cache pages */
265 wait_queue_head_t rq_wait; /* synchronization */ 278 wait_queue_head_t rq_wait; /* synchronization */
266 struct task_struct *rq_task; /* service thread */ 279 struct task_struct *rq_task; /* service thread */
280 int rq_waking; /* 1 if thread is being woken */
267}; 281};
268 282
269/* 283/*
@@ -385,19 +399,20 @@ struct svc_procedure {
385/* 399/*
386 * Function prototypes. 400 * Function prototypes.
387 */ 401 */
388struct svc_serv *svc_create(struct svc_program *, unsigned int, sa_family_t, 402struct svc_serv *svc_create(struct svc_program *, unsigned int,
389 void (*shutdown)(struct svc_serv *)); 403 void (*shutdown)(struct svc_serv *));
390struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, 404struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
391 struct svc_pool *pool); 405 struct svc_pool *pool);
392void svc_exit_thread(struct svc_rqst *); 406void svc_exit_thread(struct svc_rqst *);
393struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, 407struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
394 sa_family_t, void (*shutdown)(struct svc_serv *), 408 void (*shutdown)(struct svc_serv *),
395 svc_thread_fn, struct module *); 409 svc_thread_fn, struct module *);
396int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); 410int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
411int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
397void svc_destroy(struct svc_serv *); 412void svc_destroy(struct svc_serv *);
398int svc_process(struct svc_rqst *); 413int svc_process(struct svc_rqst *);
399int svc_register(const struct svc_serv *, const unsigned short, 414int svc_register(const struct svc_serv *, const int,
400 const unsigned short); 415 const unsigned short, const unsigned short);
401 416
402void svc_wake_up(struct svc_serv *); 417void svc_wake_up(struct svc_serv *);
403void svc_reserve(struct svc_rqst *rqstp, int space); 418void svc_reserve(struct svc_rqst *rqstp, int space);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 0127daca4354..0d9cb6ef28b0 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -71,7 +71,8 @@ int svc_reg_xprt_class(struct svc_xprt_class *);
71void svc_unreg_xprt_class(struct svc_xprt_class *); 71void svc_unreg_xprt_class(struct svc_xprt_class *);
72void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *, 72void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *,
73 struct svc_serv *); 73 struct svc_serv *);
74int svc_create_xprt(struct svc_serv *, char *, unsigned short, int); 74int svc_create_xprt(struct svc_serv *, const char *, const int,
75 const unsigned short, int);
75void svc_xprt_enqueue(struct svc_xprt *xprt); 76void svc_xprt_enqueue(struct svc_xprt *xprt);
76void svc_xprt_received(struct svc_xprt *); 77void svc_xprt_received(struct svc_xprt *);
77void svc_xprt_put(struct svc_xprt *xprt); 78void svc_xprt_put(struct svc_xprt *xprt);
@@ -80,7 +81,8 @@ void svc_close_xprt(struct svc_xprt *xprt);
80void svc_delete_xprt(struct svc_xprt *xprt); 81void svc_delete_xprt(struct svc_xprt *xprt);
81int svc_port_is_privileged(struct sockaddr *sin); 82int svc_port_is_privileged(struct sockaddr *sin);
82int svc_print_xprts(char *buf, int maxlen); 83int svc_print_xprts(char *buf, int maxlen);
83struct svc_xprt *svc_find_xprt(struct svc_serv *, char *, int, int); 84struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
85 const sa_family_t af, const unsigned short port);
84int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen); 86int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen);
85 87
86static inline void svc_xprt_get(struct svc_xprt *xprt) 88static inline void svc_xprt_get(struct svc_xprt *xprt)
@@ -88,29 +90,32 @@ static inline void svc_xprt_get(struct svc_xprt *xprt)
88 kref_get(&xprt->xpt_ref); 90 kref_get(&xprt->xpt_ref);
89} 91}
90static inline void svc_xprt_set_local(struct svc_xprt *xprt, 92static inline void svc_xprt_set_local(struct svc_xprt *xprt,
91 struct sockaddr *sa, int salen) 93 const struct sockaddr *sa,
94 const size_t salen)
92{ 95{
93 memcpy(&xprt->xpt_local, sa, salen); 96 memcpy(&xprt->xpt_local, sa, salen);
94 xprt->xpt_locallen = salen; 97 xprt->xpt_locallen = salen;
95} 98}
96static inline void svc_xprt_set_remote(struct svc_xprt *xprt, 99static inline void svc_xprt_set_remote(struct svc_xprt *xprt,
97 struct sockaddr *sa, int salen) 100 const struct sockaddr *sa,
101 const size_t salen)
98{ 102{
99 memcpy(&xprt->xpt_remote, sa, salen); 103 memcpy(&xprt->xpt_remote, sa, salen);
100 xprt->xpt_remotelen = salen; 104 xprt->xpt_remotelen = salen;
101} 105}
102static inline unsigned short svc_addr_port(struct sockaddr *sa) 106static inline unsigned short svc_addr_port(const struct sockaddr *sa)
103{ 107{
104 unsigned short ret = 0; 108 const struct sockaddr_in *sin = (const struct sockaddr_in *)sa;
109 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sa;
110
105 switch (sa->sa_family) { 111 switch (sa->sa_family) {
106 case AF_INET: 112 case AF_INET:
107 ret = ntohs(((struct sockaddr_in *)sa)->sin_port); 113 return ntohs(sin->sin_port);
108 break;
109 case AF_INET6: 114 case AF_INET6:
110 ret = ntohs(((struct sockaddr_in6 *)sa)->sin6_port); 115 return ntohs(sin6->sin6_port);
111 break;
112 } 116 }
113 return ret; 117
118 return 0;
114} 119}
115 120
116static inline size_t svc_addr_len(struct sockaddr *sa) 121static inline size_t svc_addr_len(struct sockaddr *sa)
@@ -124,36 +129,39 @@ static inline size_t svc_addr_len(struct sockaddr *sa)
124 return -EAFNOSUPPORT; 129 return -EAFNOSUPPORT;
125} 130}
126 131
127static inline unsigned short svc_xprt_local_port(struct svc_xprt *xprt) 132static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt)
128{ 133{
129 return svc_addr_port((struct sockaddr *)&xprt->xpt_local); 134 return svc_addr_port((const struct sockaddr *)&xprt->xpt_local);
130} 135}
131 136
132static inline unsigned short svc_xprt_remote_port(struct svc_xprt *xprt) 137static inline unsigned short svc_xprt_remote_port(const struct svc_xprt *xprt)
133{ 138{
134 return svc_addr_port((struct sockaddr *)&xprt->xpt_remote); 139 return svc_addr_port((const struct sockaddr *)&xprt->xpt_remote);
135} 140}
136 141
137static inline char *__svc_print_addr(struct sockaddr *addr, 142static inline char *__svc_print_addr(const struct sockaddr *addr,
138 char *buf, size_t len) 143 char *buf, const size_t len)
139{ 144{
145 const struct sockaddr_in *sin = (const struct sockaddr_in *)addr;
146 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)addr;
147
140 switch (addr->sa_family) { 148 switch (addr->sa_family) {
141 case AF_INET: 149 case AF_INET:
142 snprintf(buf, len, "%pI4, port=%u", 150 snprintf(buf, len, "%pI4, port=%u", &sin->sin_addr,
143 &((struct sockaddr_in *)addr)->sin_addr, 151 ntohs(sin->sin_port));
144 ntohs(((struct sockaddr_in *) addr)->sin_port));
145 break; 152 break;
146 153
147 case AF_INET6: 154 case AF_INET6:
148 snprintf(buf, len, "%pI6, port=%u", 155 snprintf(buf, len, "%pI6, port=%u",
149 &((struct sockaddr_in6 *)addr)->sin6_addr, 156 &sin6->sin6_addr,
150 ntohs(((struct sockaddr_in6 *) addr)->sin6_port)); 157 ntohs(sin6->sin6_port));
151 break; 158 break;
152 159
153 default: 160 default:
154 snprintf(buf, len, "unknown address type: %d", addr->sa_family); 161 snprintf(buf, len, "unknown address type: %d", addr->sa_family);
155 break; 162 break;
156 } 163 }
164
157 return buf; 165 return buf;
158} 166}
159#endif /* SUNRPC_SVC_XPRT_H */ 167#endif /* SUNRPC_SVC_XPRT_H */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 49e1eb454465..d8910b68e1bd 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -69,27 +69,27 @@ struct xdr_buf {
69 * pre-xdr'ed macros. 69 * pre-xdr'ed macros.
70 */ 70 */
71 71
72#define xdr_zero __constant_htonl(0) 72#define xdr_zero cpu_to_be32(0)
73#define xdr_one __constant_htonl(1) 73#define xdr_one cpu_to_be32(1)
74#define xdr_two __constant_htonl(2) 74#define xdr_two cpu_to_be32(2)
75 75
76#define rpc_success __constant_htonl(RPC_SUCCESS) 76#define rpc_success cpu_to_be32(RPC_SUCCESS)
77#define rpc_prog_unavail __constant_htonl(RPC_PROG_UNAVAIL) 77#define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL)
78#define rpc_prog_mismatch __constant_htonl(RPC_PROG_MISMATCH) 78#define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH)
79#define rpc_proc_unavail __constant_htonl(RPC_PROC_UNAVAIL) 79#define rpc_proc_unavail cpu_to_be32(RPC_PROC_UNAVAIL)
80#define rpc_garbage_args __constant_htonl(RPC_GARBAGE_ARGS) 80#define rpc_garbage_args cpu_to_be32(RPC_GARBAGE_ARGS)
81#define rpc_system_err __constant_htonl(RPC_SYSTEM_ERR) 81#define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR)
82#define rpc_drop_reply __constant_htonl(RPC_DROP_REPLY) 82#define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY)
83 83
84#define rpc_auth_ok __constant_htonl(RPC_AUTH_OK) 84#define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK)
85#define rpc_autherr_badcred __constant_htonl(RPC_AUTH_BADCRED) 85#define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED)
86#define rpc_autherr_rejectedcred __constant_htonl(RPC_AUTH_REJECTEDCRED) 86#define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED)
87#define rpc_autherr_badverf __constant_htonl(RPC_AUTH_BADVERF) 87#define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF)
88#define rpc_autherr_rejectedverf __constant_htonl(RPC_AUTH_REJECTEDVERF) 88#define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF)
89#define rpc_autherr_tooweak __constant_htonl(RPC_AUTH_TOOWEAK) 89#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK)
90#define rpcsec_gsserr_credproblem __constant_htonl(RPCSEC_GSS_CREDPROBLEM) 90#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM)
91#define rpcsec_gsserr_ctxproblem __constant_htonl(RPCSEC_GSS_CTXPROBLEM) 91#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM)
92#define rpc_autherr_oldseqnum __constant_htonl(101) 92#define rpc_autherr_oldseqnum cpu_to_be32(101)
93 93
94/* 94/*
95 * Miscellaneous XDR helper functions 95 * Miscellaneous XDR helper functions
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 11fc71d50c1e..1758d9f5b5c3 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -235,6 +235,7 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *
235 */ 235 */
236int xprt_register_transport(struct xprt_class *type); 236int xprt_register_transport(struct xprt_class *type);
237int xprt_unregister_transport(struct xprt_class *type); 237int xprt_unregister_transport(struct xprt_class *type);
238int xprt_load_transport(const char *);
238void xprt_set_retrans_timeout_def(struct rpc_task *task); 239void xprt_set_retrans_timeout_def(struct rpc_task *task);
239void xprt_set_retrans_timeout_rtt(struct rpc_task *task); 240void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
240void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); 241void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
@@ -259,6 +260,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
259#define XPRT_BOUND (4) 260#define XPRT_BOUND (4)
260#define XPRT_BINDING (5) 261#define XPRT_BINDING (5)
261#define XPRT_CLOSING (6) 262#define XPRT_CLOSING (6)
263#define XPRT_CONNECTION_ABORT (7)
262 264
263static inline void xprt_set_connected(struct rpc_xprt *xprt) 265static inline void xprt_set_connected(struct rpc_xprt *xprt)
264{ 266{
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index c7d9bb1832ba..3e3a4364cbff 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -1,9 +1,6 @@
1#ifndef _LINUX_SUSPEND_H 1#ifndef _LINUX_SUSPEND_H
2#define _LINUX_SUSPEND_H 2#define _LINUX_SUSPEND_H
3 3
4#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
5#include <asm/suspend.h>
6#endif
7#include <linux/swap.h> 4#include <linux/swap.h>
8#include <linux/notifier.h> 5#include <linux/notifier.h>
9#include <linux/init.h> 6#include <linux/init.h>
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d30215578877..62d81435347a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -212,7 +212,7 @@ static inline void lru_cache_add_active_file(struct page *page)
212 212
213/* linux/mm/vmscan.c */ 213/* linux/mm/vmscan.c */
214extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 214extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
215 gfp_t gfp_mask); 215 gfp_t gfp_mask, nodemask_t *mask);
216extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 216extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
217 gfp_t gfp_mask, bool noswap, 217 gfp_t gfp_mask, bool noswap,
218 unsigned int swappiness); 218 unsigned int swappiness);
@@ -382,6 +382,11 @@ static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
382 return NULL; 382 return NULL;
383} 383}
384 384
385static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
386{
387 return 0;
388}
389
385static inline struct page *lookup_swap_cache(swp_entry_t swp) 390static inline struct page *lookup_swap_cache(swp_entry_t swp)
386{ 391{
387 return NULL; 392 return NULL;
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index dedd3c0cfe30..ac9ff54f7cb3 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -31,7 +31,7 @@ extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev,
31 phys_addr_t address); 31 phys_addr_t address);
32extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); 32extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
33 33
34extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); 34extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size);
35 35
36extern void 36extern void
37*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 37*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
@@ -41,20 +41,13 @@ extern void
41swiotlb_free_coherent(struct device *hwdev, size_t size, 41swiotlb_free_coherent(struct device *hwdev, size_t size,
42 void *vaddr, dma_addr_t dma_handle); 42 void *vaddr, dma_addr_t dma_handle);
43 43
44extern dma_addr_t 44extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
45swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); 45 unsigned long offset, size_t size,
46 46 enum dma_data_direction dir,
47extern void 47 struct dma_attrs *attrs);
48swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, 48extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
49 size_t size, int dir); 49 size_t size, enum dma_data_direction dir,
50 50 struct dma_attrs *attrs);
51extern dma_addr_t
52swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
53 int dir, struct dma_attrs *attrs);
54
55extern void
56swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
57 size_t size, int dir, struct dma_attrs *attrs);
58 51
59extern int 52extern int
60swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, 53swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
@@ -66,36 +59,38 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
66 59
67extern int 60extern int
68swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, 61swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
69 int dir, struct dma_attrs *attrs); 62 enum dma_data_direction dir, struct dma_attrs *attrs);
70 63
71extern void 64extern void
72swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 65swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
73 int nelems, int dir, struct dma_attrs *attrs); 66 int nelems, enum dma_data_direction dir,
67 struct dma_attrs *attrs);
74 68
75extern void 69extern void
76swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 70swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
77 size_t size, int dir); 71 size_t size, enum dma_data_direction dir);
78 72
79extern void 73extern void
80swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 74swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
81 int nelems, int dir); 75 int nelems, enum dma_data_direction dir);
82 76
83extern void 77extern void
84swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 78swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
85 size_t size, int dir); 79 size_t size, enum dma_data_direction dir);
86 80
87extern void 81extern void
88swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 82swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
89 int nelems, int dir); 83 int nelems, enum dma_data_direction dir);
90 84
91extern void 85extern void
92swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 86swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
93 unsigned long offset, size_t size, int dir); 87 unsigned long offset, size_t size,
88 enum dma_data_direction dir);
94 89
95extern void 90extern void
96swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, 91swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
97 unsigned long offset, size_t size, 92 unsigned long offset, size_t size,
98 int dir); 93 enum dma_data_direction dir);
99 94
100extern int 95extern int
101swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); 96swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
diff --git a/include/linux/synclink.h b/include/linux/synclink.h
index 99b8bdb17b2b..0ff2779c44d0 100644
--- a/include/linux/synclink.h
+++ b/include/linux/synclink.h
@@ -125,6 +125,7 @@
125#define MGSL_MODE_MONOSYNC 3 125#define MGSL_MODE_MONOSYNC 3
126#define MGSL_MODE_BISYNC 4 126#define MGSL_MODE_BISYNC 4
127#define MGSL_MODE_RAW 6 127#define MGSL_MODE_RAW 6
128#define MGSL_MODE_BASE_CLOCK 7
128 129
129#define MGSL_BUS_TYPE_ISA 1 130#define MGSL_BUS_TYPE_ISA 1
130#define MGSL_BUS_TYPE_EISA 2 131#define MGSL_BUS_TYPE_EISA 2
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index f9f900cfd066..6470f74074af 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -65,6 +65,7 @@ struct old_linux_dirent;
65#include <asm/signal.h> 65#include <asm/signal.h>
66#include <linux/quota.h> 66#include <linux/quota.h>
67#include <linux/key.h> 67#include <linux/key.h>
68#include <linux/ftrace.h>
68 69
69#define __SC_DECL1(t1, a1) t1 a1 70#define __SC_DECL1(t1, a1) t1 a1
70#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) 71#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__)
@@ -95,7 +96,46 @@ struct old_linux_dirent;
95#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) 96#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
96#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 97#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
97 98
99#ifdef CONFIG_FTRACE_SYSCALLS
100#define __SC_STR_ADECL1(t, a) #a
101#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
102#define __SC_STR_ADECL3(t, a, ...) #a, __SC_STR_ADECL2(__VA_ARGS__)
103#define __SC_STR_ADECL4(t, a, ...) #a, __SC_STR_ADECL3(__VA_ARGS__)
104#define __SC_STR_ADECL5(t, a, ...) #a, __SC_STR_ADECL4(__VA_ARGS__)
105#define __SC_STR_ADECL6(t, a, ...) #a, __SC_STR_ADECL5(__VA_ARGS__)
106
107#define __SC_STR_TDECL1(t, a) #t
108#define __SC_STR_TDECL2(t, a, ...) #t, __SC_STR_TDECL1(__VA_ARGS__)
109#define __SC_STR_TDECL3(t, a, ...) #t, __SC_STR_TDECL2(__VA_ARGS__)
110#define __SC_STR_TDECL4(t, a, ...) #t, __SC_STR_TDECL3(__VA_ARGS__)
111#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
112#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
113
114#define SYSCALL_METADATA(sname, nb) \
115 static const struct syscall_metadata __used \
116 __attribute__((__aligned__(4))) \
117 __attribute__((section("__syscalls_metadata"))) \
118 __syscall_meta_##sname = { \
119 .name = "sys"#sname, \
120 .nb_args = nb, \
121 .types = types_##sname, \
122 .args = args_##sname, \
123 }
124
125#define SYSCALL_DEFINE0(sname) \
126 static const struct syscall_metadata __used \
127 __attribute__((__aligned__(4))) \
128 __attribute__((section("__syscalls_metadata"))) \
129 __syscall_meta_##sname = { \
130 .name = "sys_"#sname, \
131 .nb_args = 0, \
132 }; \
133 asmlinkage long sys_##sname(void)
134
135#else
98#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) 136#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
137#endif
138
99#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) 139#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
100#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) 140#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
101#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) 141#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
@@ -117,10 +157,26 @@ struct old_linux_dirent;
117#endif 157#endif
118#endif 158#endif
119 159
160#ifdef CONFIG_FTRACE_SYSCALLS
161#define SYSCALL_DEFINEx(x, sname, ...) \
162 static const char *types_##sname[] = { \
163 __SC_STR_TDECL##x(__VA_ARGS__) \
164 }; \
165 static const char *args_##sname[] = { \
166 __SC_STR_ADECL##x(__VA_ARGS__) \
167 }; \
168 SYSCALL_METADATA(sname, x); \
169 __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
170#else
171#define SYSCALL_DEFINEx(x, sname, ...) \
172 __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
173#endif
174
120#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS 175#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
121 176
122#define SYSCALL_DEFINE(name) static inline long SYSC_##name 177#define SYSCALL_DEFINE(name) static inline long SYSC_##name
123#define SYSCALL_DEFINEx(x, name, ...) \ 178
179#define __SYSCALL_DEFINEx(x, name, ...) \
124 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ 180 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \
125 static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ 181 static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \
126 asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ 182 asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \
@@ -134,7 +190,7 @@ struct old_linux_dirent;
134#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ 190#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
135 191
136#define SYSCALL_DEFINE(name) asmlinkage long sys_##name 192#define SYSCALL_DEFINE(name) asmlinkage long sys_##name
137#define SYSCALL_DEFINEx(x, name, ...) \ 193#define __SYSCALL_DEFINEx(x, name, ...) \
138 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) 194 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__))
139 195
140#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ 196#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
@@ -461,6 +517,10 @@ asmlinkage long sys_pread64(unsigned int fd, char __user *buf,
461 size_t count, loff_t pos); 517 size_t count, loff_t pos);
462asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, 518asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
463 size_t count, loff_t pos); 519 size_t count, loff_t pos);
520asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
521 unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
522asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
523 unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
464asmlinkage long sys_getcwd(char __user *buf, unsigned long size); 524asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
465asmlinkage long sys_mkdir(const char __user *pathname, int mode); 525asmlinkage long sys_mkdir(const char __user *pathname, int mode);
466asmlinkage long sys_chdir(const char __user *filename); 526asmlinkage long sys_chdir(const char __user *filename);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 917707e6151d..1de8b9eb841b 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -27,27 +27,46 @@
27 27
28#include <linux/idr.h> 28#include <linux/idr.h>
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/workqueue.h>
30 31
31struct thermal_zone_device; 32struct thermal_zone_device;
32struct thermal_cooling_device; 33struct thermal_cooling_device;
33 34
35enum thermal_device_mode {
36 THERMAL_DEVICE_DISABLED = 0,
37 THERMAL_DEVICE_ENABLED,
38};
39
40enum thermal_trip_type {
41 THERMAL_TRIP_ACTIVE = 0,
42 THERMAL_TRIP_PASSIVE,
43 THERMAL_TRIP_HOT,
44 THERMAL_TRIP_CRITICAL,
45};
46
34struct thermal_zone_device_ops { 47struct thermal_zone_device_ops {
35 int (*bind) (struct thermal_zone_device *, 48 int (*bind) (struct thermal_zone_device *,
36 struct thermal_cooling_device *); 49 struct thermal_cooling_device *);
37 int (*unbind) (struct thermal_zone_device *, 50 int (*unbind) (struct thermal_zone_device *,
38 struct thermal_cooling_device *); 51 struct thermal_cooling_device *);
39 int (*get_temp) (struct thermal_zone_device *, char *); 52 int (*get_temp) (struct thermal_zone_device *, unsigned long *);
40 int (*get_mode) (struct thermal_zone_device *, char *); 53 int (*get_mode) (struct thermal_zone_device *,
41 int (*set_mode) (struct thermal_zone_device *, const char *); 54 enum thermal_device_mode *);
42 int (*get_trip_type) (struct thermal_zone_device *, int, char *); 55 int (*set_mode) (struct thermal_zone_device *,
43 int (*get_trip_temp) (struct thermal_zone_device *, int, char *); 56 enum thermal_device_mode);
57 int (*get_trip_type) (struct thermal_zone_device *, int,
58 enum thermal_trip_type *);
59 int (*get_trip_temp) (struct thermal_zone_device *, int,
60 unsigned long *);
44 int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *); 61 int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
62 int (*notify) (struct thermal_zone_device *, int,
63 enum thermal_trip_type);
45}; 64};
46 65
47struct thermal_cooling_device_ops { 66struct thermal_cooling_device_ops {
48 int (*get_max_state) (struct thermal_cooling_device *, char *); 67 int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
49 int (*get_cur_state) (struct thermal_cooling_device *, char *); 68 int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
50 int (*set_cur_state) (struct thermal_cooling_device *, unsigned int); 69 int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
51}; 70};
52 71
53#define THERMAL_TRIPS_NONE -1 72#define THERMAL_TRIPS_NONE -1
@@ -88,11 +107,19 @@ struct thermal_zone_device {
88 struct device device; 107 struct device device;
89 void *devdata; 108 void *devdata;
90 int trips; 109 int trips;
110 int tc1;
111 int tc2;
112 int passive_delay;
113 int polling_delay;
114 int last_temperature;
115 bool passive;
116 unsigned int forced_passive;
91 struct thermal_zone_device_ops *ops; 117 struct thermal_zone_device_ops *ops;
92 struct list_head cooling_devices; 118 struct list_head cooling_devices;
93 struct idr idr; 119 struct idr idr;
94 struct mutex lock; /* protect cooling devices list */ 120 struct mutex lock; /* protect cooling devices list */
95 struct list_head node; 121 struct list_head node;
122 struct delayed_work poll_queue;
96#if defined(CONFIG_THERMAL_HWMON) 123#if defined(CONFIG_THERMAL_HWMON)
97 struct list_head hwmon_node; 124 struct list_head hwmon_node;
98 struct thermal_hwmon_device *hwmon; 125 struct thermal_hwmon_device *hwmon;
@@ -104,13 +131,16 @@ struct thermal_zone_device {
104struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, 131struct thermal_zone_device *thermal_zone_device_register(char *, int, void *,
105 struct 132 struct
106 thermal_zone_device_ops 133 thermal_zone_device_ops
107 *); 134 *, int tc1, int tc2,
135 int passive_freq,
136 int polling_freq);
108void thermal_zone_device_unregister(struct thermal_zone_device *); 137void thermal_zone_device_unregister(struct thermal_zone_device *);
109 138
110int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, 139int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
111 struct thermal_cooling_device *); 140 struct thermal_cooling_device *);
112int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, 141int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
113 struct thermal_cooling_device *); 142 struct thermal_cooling_device *);
143void thermal_zone_device_update(struct thermal_zone_device *);
114struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, 144struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
115 struct 145 struct
116 thermal_cooling_device_ops 146 thermal_cooling_device_ops
diff --git a/include/linux/timer.h b/include/linux/timer.h
index e2d662e3416e..6cdb6f3331f1 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -5,6 +5,7 @@
5#include <linux/ktime.h> 5#include <linux/ktime.h>
6#include <linux/stddef.h> 6#include <linux/stddef.h>
7#include <linux/debugobjects.h> 7#include <linux/debugobjects.h>
8#include <linux/stringify.h>
8 9
9struct tvec_base; 10struct tvec_base;
10 11
@@ -21,52 +22,126 @@ struct timer_list {
21 char start_comm[16]; 22 char start_comm[16];
22 int start_pid; 23 int start_pid;
23#endif 24#endif
25#ifdef CONFIG_LOCKDEP
26 struct lockdep_map lockdep_map;
27#endif
24}; 28};
25 29
26extern struct tvec_base boot_tvec_bases; 30extern struct tvec_base boot_tvec_bases;
27 31
32#ifdef CONFIG_LOCKDEP
33/*
34 * NB: because we have to copy the lockdep_map, setting the lockdep_map key
35 * (second argument) here is required, otherwise it could be initialised to
36 * the copy of the lockdep_map later! We use the pointer to and the string
37 * "<file>:<line>" as the key resp. the name of the lockdep_map.
38 */
39#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \
40 .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn),
41#else
42#define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn)
43#endif
44
28#define TIMER_INITIALIZER(_function, _expires, _data) { \ 45#define TIMER_INITIALIZER(_function, _expires, _data) { \
29 .entry = { .prev = TIMER_ENTRY_STATIC }, \ 46 .entry = { .prev = TIMER_ENTRY_STATIC }, \
30 .function = (_function), \ 47 .function = (_function), \
31 .expires = (_expires), \ 48 .expires = (_expires), \
32 .data = (_data), \ 49 .data = (_data), \
33 .base = &boot_tvec_bases, \ 50 .base = &boot_tvec_bases, \
51 __TIMER_LOCKDEP_MAP_INITIALIZER( \
52 __FILE__ ":" __stringify(__LINE__)) \
34 } 53 }
35 54
36#define DEFINE_TIMER(_name, _function, _expires, _data) \ 55#define DEFINE_TIMER(_name, _function, _expires, _data) \
37 struct timer_list _name = \ 56 struct timer_list _name = \
38 TIMER_INITIALIZER(_function, _expires, _data) 57 TIMER_INITIALIZER(_function, _expires, _data)
39 58
40void init_timer(struct timer_list *timer); 59void init_timer_key(struct timer_list *timer,
41void init_timer_deferrable(struct timer_list *timer); 60 const char *name,
61 struct lock_class_key *key);
62void init_timer_deferrable_key(struct timer_list *timer,
63 const char *name,
64 struct lock_class_key *key);
65
66#ifdef CONFIG_LOCKDEP
67#define init_timer(timer) \
68 do { \
69 static struct lock_class_key __key; \
70 init_timer_key((timer), #timer, &__key); \
71 } while (0)
72
73#define init_timer_deferrable(timer) \
74 do { \
75 static struct lock_class_key __key; \
76 init_timer_deferrable_key((timer), #timer, &__key); \
77 } while (0)
78
79#define init_timer_on_stack(timer) \
80 do { \
81 static struct lock_class_key __key; \
82 init_timer_on_stack_key((timer), #timer, &__key); \
83 } while (0)
84
85#define setup_timer(timer, fn, data) \
86 do { \
87 static struct lock_class_key __key; \
88 setup_timer_key((timer), #timer, &__key, (fn), (data));\
89 } while (0)
90
91#define setup_timer_on_stack(timer, fn, data) \
92 do { \
93 static struct lock_class_key __key; \
94 setup_timer_on_stack_key((timer), #timer, &__key, \
95 (fn), (data)); \
96 } while (0)
97#else
98#define init_timer(timer)\
99 init_timer_key((timer), NULL, NULL)
100#define init_timer_deferrable(timer)\
101 init_timer_deferrable_key((timer), NULL, NULL)
102#define init_timer_on_stack(timer)\
103 init_timer_on_stack_key((timer), NULL, NULL)
104#define setup_timer(timer, fn, data)\
105 setup_timer_key((timer), NULL, NULL, (fn), (data))
106#define setup_timer_on_stack(timer, fn, data)\
107 setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data))
108#endif
42 109
43#ifdef CONFIG_DEBUG_OBJECTS_TIMERS 110#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
44extern void init_timer_on_stack(struct timer_list *timer); 111extern void init_timer_on_stack_key(struct timer_list *timer,
112 const char *name,
113 struct lock_class_key *key);
45extern void destroy_timer_on_stack(struct timer_list *timer); 114extern void destroy_timer_on_stack(struct timer_list *timer);
46#else 115#else
47static inline void destroy_timer_on_stack(struct timer_list *timer) { } 116static inline void destroy_timer_on_stack(struct timer_list *timer) { }
48static inline void init_timer_on_stack(struct timer_list *timer) 117static inline void init_timer_on_stack_key(struct timer_list *timer,
118 const char *name,
119 struct lock_class_key *key)
49{ 120{
50 init_timer(timer); 121 init_timer_key(timer, name, key);
51} 122}
52#endif 123#endif
53 124
54static inline void setup_timer(struct timer_list * timer, 125static inline void setup_timer_key(struct timer_list * timer,
126 const char *name,
127 struct lock_class_key *key,
55 void (*function)(unsigned long), 128 void (*function)(unsigned long),
56 unsigned long data) 129 unsigned long data)
57{ 130{
58 timer->function = function; 131 timer->function = function;
59 timer->data = data; 132 timer->data = data;
60 init_timer(timer); 133 init_timer_key(timer, name, key);
61} 134}
62 135
63static inline void setup_timer_on_stack(struct timer_list *timer, 136static inline void setup_timer_on_stack_key(struct timer_list *timer,
137 const char *name,
138 struct lock_class_key *key,
64 void (*function)(unsigned long), 139 void (*function)(unsigned long),
65 unsigned long data) 140 unsigned long data)
66{ 141{
67 timer->function = function; 142 timer->function = function;
68 timer->data = data; 143 timer->data = data;
69 init_timer_on_stack(timer); 144 init_timer_on_stack_key(timer, name, key);
70} 145}
71 146
72/** 147/**
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h
index dd253177f65f..3e08a1c86830 100644
--- a/include/linux/timeriomem-rng.h
+++ b/include/linux/timeriomem-rng.h
@@ -14,7 +14,7 @@ struct timeriomem_rng_data {
14 struct completion completion; 14 struct completion completion;
15 unsigned int present:1; 15 unsigned int present:1;
16 16
17 u32 __iomem *address; 17 void __iomem *address;
18 18
19 /* measures in usecs */ 19 /* measures in usecs */
20 unsigned int period; 20 unsigned int period;
diff --git a/include/linux/topology.h b/include/linux/topology.h
index a16b9e06f2e5..7402c1a27c4f 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -38,11 +38,7 @@
38#endif 38#endif
39 39
40#ifndef nr_cpus_node 40#ifndef nr_cpus_node
41#define nr_cpus_node(node) \ 41#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
42 ({ \
43 node_to_cpumask_ptr(__tmp__, node); \
44 cpus_weight(*__tmp__); \
45 })
46#endif 42#endif
47 43
48#define for_each_node_with_cpus(node) \ 44#define for_each_node_with_cpus(node) \
@@ -200,4 +196,9 @@ int arch_update_cpu_topology(void);
200#define topology_core_cpumask(cpu) cpumask_of(cpu) 196#define topology_core_cpumask(cpu) cpumask_of(cpu)
201#endif 197#endif
202 198
199/* Returns the number of the current Node. */
200#ifndef numa_node_id
201#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
202#endif
203
203#endif /* _LINUX_TOPOLOGY_H */ 204#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
new file mode 100644
index 000000000000..7a8130384087
--- /dev/null
+++ b/include/linux/trace_clock.h
@@ -0,0 +1,19 @@
1#ifndef _LINUX_TRACE_CLOCK_H
2#define _LINUX_TRACE_CLOCK_H
3
4/*
5 * 3 trace clock variants, with differing scalability/precision
6 * tradeoffs:
7 *
8 * - local: CPU-local trace clock
9 * - medium: scalable global clock with some jitter
10 * - global: globally monotonic, serialized clock
11 */
12#include <linux/compiler.h>
13#include <linux/types.h>
14
15extern u64 notrace trace_clock_local(void);
16extern u64 notrace trace_clock(void);
17extern u64 notrace trace_clock_global(void);
18
19#endif /* _LINUX_TRACE_CLOCK_H */
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 6186a789d6c7..c7aa154f4bfc 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -388,17 +388,14 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
388 * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal 388 * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
389 * @task: task receiving the signal 389 * @task: task receiving the signal
390 * @sig: signal number being sent 390 * @sig: signal number being sent
391 * @handler: %SIG_IGN or %SIG_DFL
392 * 391 *
393 * Return zero iff tracing doesn't care to examine this ignored signal, 392 * Return zero iff tracing doesn't care to examine this ignored signal,
394 * so it can short-circuit normal delivery and never even get queued. 393 * so it can short-circuit normal delivery and never even get queued.
395 * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN.
396 * 394 *
397 * Called with @task->sighand->siglock held. 395 * Called with @task->sighand->siglock held.
398 */ 396 */
399static inline int tracehook_consider_ignored_signal(struct task_struct *task, 397static inline int tracehook_consider_ignored_signal(struct task_struct *task,
400 int sig, 398 int sig)
401 void __user *handler)
402{ 399{
403 return (task_ptrace(task) & PT_PTRACED) != 0; 400 return (task_ptrace(task) & PT_PTRACED) != 0;
404} 401}
@@ -407,19 +404,17 @@ static inline int tracehook_consider_ignored_signal(struct task_struct *task,
407 * tracehook_consider_fatal_signal - suppress special handling of fatal signal 404 * tracehook_consider_fatal_signal - suppress special handling of fatal signal
408 * @task: task receiving the signal 405 * @task: task receiving the signal
409 * @sig: signal number being sent 406 * @sig: signal number being sent
410 * @handler: %SIG_DFL or %SIG_IGN
411 * 407 *
412 * Return nonzero to prevent special handling of this termination signal. 408 * Return nonzero to prevent special handling of this termination signal.
413 * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored, 409 * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is
414 * in which case force_sig() is about to reset it to %SIG_DFL. 410 * ignored, in which case force_sig() is about to reset it to %SIG_DFL.
415 * When this returns zero, this signal might cause a quick termination 411 * When this returns zero, this signal might cause a quick termination
416 * that does not give the debugger a chance to intercept the signal. 412 * that does not give the debugger a chance to intercept the signal.
417 * 413 *
418 * Called with or without @task->sighand->siglock held. 414 * Called with or without @task->sighand->siglock held.
419 */ 415 */
420static inline int tracehook_consider_fatal_signal(struct task_struct *task, 416static inline int tracehook_consider_fatal_signal(struct task_struct *task,
421 int sig, 417 int sig)
422 void __user *handler)
423{ 418{
424 return (task_ptrace(task) & PT_PTRACED) != 0; 419 return (task_ptrace(task) & PT_PTRACED) != 0;
425} 420}
@@ -507,7 +502,7 @@ static inline int tracehook_notify_jctl(int notify, int why)
507static inline int tracehook_notify_death(struct task_struct *task, 502static inline int tracehook_notify_death(struct task_struct *task,
508 void **death_cookie, int group_dead) 503 void **death_cookie, int group_dead)
509{ 504{
510 if (task->exit_signal == -1) 505 if (task_detached(task))
511 return task->ptrace ? SIGCHLD : DEATH_REAP; 506 return task->ptrace ? SIGCHLD : DEATH_REAP;
512 507
513 /* 508 /*
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 757005458366..d35a7ee7611f 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -31,8 +31,8 @@ struct tracepoint {
31 * Keep in sync with vmlinux.lds.h. 31 * Keep in sync with vmlinux.lds.h.
32 */ 32 */
33 33
34#define TPPROTO(args...) args 34#define TP_PROTO(args...) args
35#define TPARGS(args...) args 35#define TP_ARGS(args...) args
36 36
37#ifdef CONFIG_TRACEPOINTS 37#ifdef CONFIG_TRACEPOINTS
38 38
@@ -65,7 +65,7 @@ struct tracepoint {
65 { \ 65 { \
66 if (unlikely(__tracepoint_##name.state)) \ 66 if (unlikely(__tracepoint_##name.state)) \
67 __DO_TRACE(&__tracepoint_##name, \ 67 __DO_TRACE(&__tracepoint_##name, \
68 TPPROTO(proto), TPARGS(args)); \ 68 TP_PROTO(proto), TP_ARGS(args)); \
69 } \ 69 } \
70 static inline int register_trace_##name(void (*probe)(proto)) \ 70 static inline int register_trace_##name(void (*probe)(proto)) \
71 { \ 71 { \
@@ -153,4 +153,114 @@ static inline void tracepoint_synchronize_unregister(void)
153 synchronize_sched(); 153 synchronize_sched();
154} 154}
155 155
156#define PARAMS(args...) args
157#define TRACE_FORMAT(name, proto, args, fmt) \
158 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
159
160
161/*
162 * For use with the TRACE_EVENT macro:
163 *
164 * We define a tracepoint, its arguments, its printk format
165 * and its 'fast binay record' layout.
166 *
167 * Firstly, name your tracepoint via TRACE_EVENT(name : the
168 * 'subsystem_event' notation is fine.
169 *
170 * Think about this whole construct as the
171 * 'trace_sched_switch() function' from now on.
172 *
173 *
174 * TRACE_EVENT(sched_switch,
175 *
176 * *
177 * * A function has a regular function arguments
178 * * prototype, declare it via TP_PROTO():
179 * *
180 *
181 * TP_PROTO(struct rq *rq, struct task_struct *prev,
182 * struct task_struct *next),
183 *
184 * *
185 * * Define the call signature of the 'function'.
186 * * (Design sidenote: we use this instead of a
187 * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.)
188 * *
189 *
190 * TP_ARGS(rq, prev, next),
191 *
192 * *
193 * * Fast binary tracing: define the trace record via
194 * * TP_STRUCT__entry(). You can think about it like a
195 * * regular C structure local variable definition.
196 * *
197 * * This is how the trace record is structured and will
198 * * be saved into the ring buffer. These are the fields
199 * * that will be exposed to user-space in
200 * * /debug/tracing/events/<*>/format.
201 * *
202 * * The declared 'local variable' is called '__entry'
203 * *
204 * * __field(pid_t, prev_prid) is equivalent to a standard declariton:
205 * *
206 * * pid_t prev_pid;
207 * *
208 * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to:
209 * *
210 * * char prev_comm[TASK_COMM_LEN];
211 * *
212 *
213 * TP_STRUCT__entry(
214 * __array( char, prev_comm, TASK_COMM_LEN )
215 * __field( pid_t, prev_pid )
216 * __field( int, prev_prio )
217 * __array( char, next_comm, TASK_COMM_LEN )
218 * __field( pid_t, next_pid )
219 * __field( int, next_prio )
220 * ),
221 *
222 * *
223 * * Assign the entry into the trace record, by embedding
224 * * a full C statement block into TP_fast_assign(). You
225 * * can refer to the trace record as '__entry' -
226 * * otherwise you can put arbitrary C code in here.
227 * *
228 * * Note: this C code will execute every time a trace event
229 * * happens, on an active tracepoint.
230 * *
231 *
232 * TP_fast_assign(
233 * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
234 * __entry->prev_pid = prev->pid;
235 * __entry->prev_prio = prev->prio;
236 * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
237 * __entry->next_pid = next->pid;
238 * __entry->next_prio = next->prio;
239 * )
240 *
241 * *
242 * * Formatted output of a trace record via TP_printk().
243 * * This is how the tracepoint will appear under ftrace
244 * * plugins that make use of this tracepoint.
245 * *
246 * * (raw-binary tracing wont actually perform this step.)
247 * *
248 *
249 * TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
250 * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
251 * __entry->next_comm, __entry->next_pid, __entry->next_prio),
252 *
253 * );
254 *
255 * This macro construct is thus used for the regular printk format
256 * tracing setup, it is used to construct a function pointer based
257 * tracepoint callback (this is used by programmatic plugins and
258 * can also by used by generic instrumentation like SystemTap), and
259 * it is also used to expose a structured trace record in
260 * /debug/tracing/events/.
261 */
262
263#define TRACE_EVENT(name, proto, args, struct, assign, print) \
264 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
265
156#endif 266#endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 08e088334dba..bcba84ea2d86 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -252,8 +252,6 @@ struct tty_operations {
252 void (*set_ldisc)(struct tty_struct *tty); 252 void (*set_ldisc)(struct tty_struct *tty);
253 void (*wait_until_sent)(struct tty_struct *tty, int timeout); 253 void (*wait_until_sent)(struct tty_struct *tty, int timeout);
254 void (*send_xchar)(struct tty_struct *tty, char ch); 254 void (*send_xchar)(struct tty_struct *tty, char ch);
255 int (*read_proc)(char *page, char **start, off_t off,
256 int count, int *eof, void *data);
257 int (*tiocmget)(struct tty_struct *tty, struct file *file); 255 int (*tiocmget)(struct tty_struct *tty, struct file *file);
258 int (*tiocmset)(struct tty_struct *tty, struct file *file, 256 int (*tiocmset)(struct tty_struct *tty, struct file *file,
259 unsigned int set, unsigned int clear); 257 unsigned int set, unsigned int clear);
@@ -264,6 +262,7 @@ struct tty_operations {
264 int (*poll_get_char)(struct tty_driver *driver, int line); 262 int (*poll_get_char)(struct tty_driver *driver, int line);
265 void (*poll_put_char)(struct tty_driver *driver, int line, char ch); 263 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
266#endif 264#endif
265 const struct file_operations *proc_fops;
267}; 266};
268 267
269struct tty_driver { 268struct tty_driver {
@@ -310,7 +309,8 @@ extern void tty_set_operations(struct tty_driver *driver,
310extern struct tty_driver *tty_find_polling_driver(char *name, int *line); 309extern struct tty_driver *tty_find_polling_driver(char *name, int *line);
311 310
312extern void tty_driver_kref_put(struct tty_driver *driver); 311extern void tty_driver_kref_put(struct tty_driver *driver);
313extern inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) 312
313static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d)
314{ 314{
315 kref_get(&d->kref); 315 kref_get(&d->kref);
316 return d; 316 return d;
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 5f401b644ed5..429c631d2aad 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -80,8 +80,7 @@ struct wusb_ckhdid {
80 u8 data[16]; 80 u8 data[16];
81} __attribute__((packed)); 81} __attribute__((packed));
82 82
83const static 83static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
84struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
85 84
86#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) 85#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
87 86
diff --git a/include/linux/video_decoder.h b/include/linux/video_decoder.h
deleted file mode 100644
index e26c0c86a6ea..000000000000
--- a/include/linux/video_decoder.h
+++ /dev/null
@@ -1,48 +0,0 @@
1#ifndef _LINUX_VIDEO_DECODER_H
2#define _LINUX_VIDEO_DECODER_H
3
4#include <linux/types.h>
5
6#define HAVE_VIDEO_DECODER 1
7
8struct video_decoder_capability { /* this name is too long */
9 __u32 flags;
10#define VIDEO_DECODER_PAL 1 /* can decode PAL signal */
11#define VIDEO_DECODER_NTSC 2 /* can decode NTSC */
12#define VIDEO_DECODER_SECAM 4 /* can decode SECAM */
13#define VIDEO_DECODER_AUTO 8 /* can autosense norm */
14#define VIDEO_DECODER_CCIR 16 /* CCIR-601 pixel rate (720 pixels per line) instead of square pixel rate */
15 int inputs; /* number of inputs */
16 int outputs; /* number of outputs */
17};
18
19/*
20DECODER_GET_STATUS returns the following flags. The only one you need is
21DECODER_STATUS_GOOD, the others are just nice things to know.
22*/
23#define DECODER_STATUS_GOOD 1 /* receiving acceptable input */
24#define DECODER_STATUS_COLOR 2 /* receiving color information */
25#define DECODER_STATUS_PAL 4 /* auto detected */
26#define DECODER_STATUS_NTSC 8 /* auto detected */
27#define DECODER_STATUS_SECAM 16 /* auto detected */
28
29struct video_decoder_init {
30 unsigned char len;
31 const unsigned char *data;
32};
33
34#define DECODER_GET_CAPABILITIES _IOR('d', 1, struct video_decoder_capability)
35#define DECODER_GET_STATUS _IOR('d', 2, int)
36#define DECODER_SET_NORM _IOW('d', 3, int)
37#define DECODER_SET_INPUT _IOW('d', 4, int) /* 0 <= input < #inputs */
38#define DECODER_SET_OUTPUT _IOW('d', 5, int) /* 0 <= output < #outputs */
39#define DECODER_ENABLE_OUTPUT _IOW('d', 6, int) /* boolean output enable control */
40#define DECODER_SET_PICTURE _IOW('d', 7, struct video_picture)
41#define DECODER_SET_GPIO _IOW('d', 8, int) /* switch general purpose pin */
42#define DECODER_INIT _IOW('d', 9, struct video_decoder_init) /* init internal registers at once */
43#define DECODER_SET_VBI_BYPASS _IOW('d', 10, int) /* switch vbi bypass */
44
45#define DECODER_DUMP _IO('d', 192) /* debug hook */
46
47
48#endif
diff --git a/include/linux/video_encoder.h b/include/linux/video_encoder.h
deleted file mode 100644
index b7b6423bbb8a..000000000000
--- a/include/linux/video_encoder.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef _LINUX_VIDEO_ENCODER_H
2#define _LINUX_VIDEO_ENCODER_H
3
4#include <linux/types.h>
5
6struct video_encoder_capability { /* this name is too long */
7 __u32 flags;
8#define VIDEO_ENCODER_PAL 1 /* can encode PAL signal */
9#define VIDEO_ENCODER_NTSC 2 /* can encode NTSC */
10#define VIDEO_ENCODER_SECAM 4 /* can encode SECAM */
11#define VIDEO_ENCODER_CCIR 16 /* CCIR-601 pixel rate (720 pixels per line) instead of square pixel rate */
12 int inputs; /* number of inputs */
13 int outputs; /* number of outputs */
14};
15
16#define ENCODER_GET_CAPABILITIES _IOR('e', 1, struct video_encoder_capability)
17#define ENCODER_SET_NORM _IOW('e', 2, int)
18#define ENCODER_SET_INPUT _IOW('e', 3, int) /* 0 <= input < #inputs */
19#define ENCODER_SET_OUTPUT _IOW('e', 4, int) /* 0 <= output < #outputs */
20#define ENCODER_ENABLE_OUTPUT _IOW('e', 5, int) /* boolean output enable control */
21
22
23#endif
diff --git a/include/linux/videodev.h b/include/linux/videodev.h
index 837f392fbe97..b19eab140977 100644
--- a/include/linux/videodev.h
+++ b/include/linux/videodev.h
@@ -16,6 +16,23 @@
16#include <linux/ioctl.h> 16#include <linux/ioctl.h>
17#include <linux/videodev2.h> 17#include <linux/videodev2.h>
18 18
19#if defined(__MIN_V4L1) && defined (__KERNEL__)
20
21/*
22 * Used by those V4L2 core functions that need a minimum V4L1 support,
23 * in order to allow V4L1 Compatibilty code compilation.
24 */
25
26struct video_mbuf
27{
28 int size; /* Total memory to map */
29 int frames; /* Frames */
30 int offsets[VIDEO_MAX_FRAME];
31};
32
33#define VIDIOCGMBUF _IOR('v',20, struct video_mbuf) /* Memory map buffer info */
34
35#else
19#if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__) 36#if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__)
20 37
21#define VID_TYPE_CAPTURE 1 /* Can capture */ 38#define VID_TYPE_CAPTURE 1 /* Can capture */
@@ -312,6 +329,7 @@ struct video_code
312#define VID_PLAY_END_MARK 14 329#define VID_PLAY_END_MARK 14
313 330
314#endif /* CONFIG_VIDEO_V4L1_COMPAT */ 331#endif /* CONFIG_VIDEO_V4L1_COMPAT */
332#endif /* __MIN_V4L1 */
315 333
316#endif /* __LINUX_VIDEODEV_H */ 334#endif /* __LINUX_VIDEODEV_H */
317 335
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 5571dbe1c0ad..ebb2ea6b4995 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -344,6 +344,8 @@ struct v4l2_pix_format {
344#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */ 344#define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */
345#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ 345#define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */
346#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ 346#define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */
347#define V4L2_PIX_FMT_MR97310A v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */
348#define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */
347#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */ 349#define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */
348#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */ 350#define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */
349 351
@@ -735,6 +737,11 @@ struct v4l2_input {
735#define V4L2_IN_ST_NO_SIGNAL 0x00000002 737#define V4L2_IN_ST_NO_SIGNAL 0x00000002
736#define V4L2_IN_ST_NO_COLOR 0x00000004 738#define V4L2_IN_ST_NO_COLOR 0x00000004
737 739
740/* field 'status' - sensor orientation */
741/* If sensor is mounted upside down set both bits */
742#define V4L2_IN_ST_HFLIP 0x00000010 /* Frames are flipped horizontally */
743#define V4L2_IN_ST_VFLIP 0x00000020 /* Frames are flipped vertically */
744
738/* field 'status' - analog */ 745/* field 'status' - analog */
739#define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */ 746#define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */
740#define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */ 747#define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */
@@ -829,6 +836,7 @@ struct v4l2_querymenu {
829#define V4L2_CTRL_FLAG_UPDATE 0x0008 836#define V4L2_CTRL_FLAG_UPDATE 0x0008
830#define V4L2_CTRL_FLAG_INACTIVE 0x0010 837#define V4L2_CTRL_FLAG_INACTIVE 0x0010
831#define V4L2_CTRL_FLAG_SLIDER 0x0020 838#define V4L2_CTRL_FLAG_SLIDER 0x0020
839#define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040
832 840
833/* Query flag, to be ORed with the control ID */ 841/* Query flag, to be ORed with the control ID */
834#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000 842#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
@@ -879,8 +887,15 @@ enum v4l2_power_line_frequency {
879#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28) 887#define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28)
880#define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29) 888#define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29)
881#define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30) 889#define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30)
890#define V4L2_CID_COLORFX (V4L2_CID_BASE+31)
891enum v4l2_colorfx {
892 V4L2_COLORFX_NONE = 0,
893 V4L2_COLORFX_BW = 1,
894 V4L2_COLORFX_SEPIA = 2,
895};
896
882/* last CID + 1 */ 897/* last CID + 1 */
883#define V4L2_CID_LASTP1 (V4L2_CID_BASE+31) 898#define V4L2_CID_LASTP1 (V4L2_CID_BASE+32)
884 899
885/* MPEG-class control IDs defined by V4L2 */ 900/* MPEG-class control IDs defined by V4L2 */
886#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900) 901#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
@@ -1339,6 +1354,53 @@ struct v4l2_sliced_vbi_data {
1339}; 1354};
1340 1355
1341/* 1356/*
1357 * Sliced VBI data inserted into MPEG Streams
1358 */
1359
1360/*
1361 * V4L2_MPEG_STREAM_VBI_FMT_IVTV:
1362 *
1363 * Structure of payload contained in an MPEG 2 Private Stream 1 PES Packet in an
1364 * MPEG-2 Program Pack that contains V4L2_MPEG_STREAM_VBI_FMT_IVTV Sliced VBI
1365 * data
1366 *
1367 * Note, the MPEG-2 Program Pack and Private Stream 1 PES packet header
1368 * definitions are not included here. See the MPEG-2 specifications for details
1369 * on these headers.
1370 */
1371
1372/* Line type IDs */
1373#define V4L2_MPEG_VBI_IVTV_TELETEXT_B (1)
1374#define V4L2_MPEG_VBI_IVTV_CAPTION_525 (4)
1375#define V4L2_MPEG_VBI_IVTV_WSS_625 (5)
1376#define V4L2_MPEG_VBI_IVTV_VPS (7)
1377
1378struct v4l2_mpeg_vbi_itv0_line {
1379 __u8 id; /* One of V4L2_MPEG_VBI_IVTV_* above */
1380 __u8 data[42]; /* Sliced VBI data for the line */
1381} __attribute__ ((packed));
1382
1383struct v4l2_mpeg_vbi_itv0 {
1384 __le32 linemask[2]; /* Bitmasks of VBI service lines present */
1385 struct v4l2_mpeg_vbi_itv0_line line[35];
1386} __attribute__ ((packed));
1387
1388struct v4l2_mpeg_vbi_ITV0 {
1389 struct v4l2_mpeg_vbi_itv0_line line[36];
1390} __attribute__ ((packed));
1391
1392#define V4L2_MPEG_VBI_IVTV_MAGIC0 "itv0"
1393#define V4L2_MPEG_VBI_IVTV_MAGIC1 "ITV0"
1394
1395struct v4l2_mpeg_vbi_fmt_ivtv {
1396 __u8 magic[4];
1397 union {
1398 struct v4l2_mpeg_vbi_itv0 itv0;
1399 struct v4l2_mpeg_vbi_ITV0 ITV0;
1400 };
1401} __attribute__ ((packed));
1402
1403/*
1342 * A G G R E G A T E S T R U C T U R E S 1404 * A G G R E G A T E S T R U C T U R E S
1343 */ 1405 */
1344 1406
@@ -1403,14 +1465,6 @@ struct v4l2_dbg_chip_ident {
1403 __u32 revision; /* chip revision, chip specific */ 1465 __u32 revision; /* chip revision, chip specific */
1404} __attribute__ ((packed)); 1466} __attribute__ ((packed));
1405 1467
1406/* VIDIOC_G_CHIP_IDENT_OLD: Deprecated, do not use */
1407struct v4l2_chip_ident_old {
1408 __u32 match_type; /* Match type */
1409 __u32 match_chip; /* Match this chip, meaning determined by match_type */
1410 __u32 ident; /* chip identifier as specified in <media/v4l2-chip-ident.h> */
1411 __u32 revision; /* chip revision, chip specific */
1412};
1413
1414/* 1468/*
1415 * I O C T L C O D E S F O R V I D E O D E V I C E S 1469 * I O C T L C O D E S F O R V I D E O D E V I C E S
1416 * 1470 *
@@ -1488,8 +1542,6 @@ struct v4l2_chip_ident_old {
1488/* Experimental, meant for debugging, testing and internal use. 1542/* Experimental, meant for debugging, testing and internal use.
1489 Never use this ioctl in applications! */ 1543 Never use this ioctl in applications! */
1490#define VIDIOC_DBG_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_dbg_chip_ident) 1544#define VIDIOC_DBG_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_dbg_chip_ident)
1491/* This is deprecated and will go away in 2.6.30 */
1492#define VIDIOC_G_CHIP_IDENT_OLD _IOWR('V', 81, struct v4l2_chip_ident_old)
1493#endif 1545#endif
1494 1546
1495#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) 1547#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a210ede73b56..5d631c17eaee 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -135,8 +135,11 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
135void __wake_up_common(wait_queue_head_t *q, unsigned int mode, 135void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
136 int nr_exclusive, int sync, void *key); 136 int nr_exclusive, int sync, void *key);
137void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 137void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
138extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); 138void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
139extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 139void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
140 void *key);
141void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
142void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
140void __wake_up_bit(wait_queue_head_t *, void *, int); 143void __wake_up_bit(wait_queue_head_t *, void *, int);
141int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 144int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
142int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 145int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
@@ -155,21 +158,17 @@ wait_queue_head_t *bit_waitqueue(void *, int);
155#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) 158#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
156#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) 159#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
157 160
158#ifdef CONFIG_DEBUG_LOCK_ALLOC
159/* 161/*
160 * macro to avoid include hell 162 * Wakeup macros to be used to report events to the targets.
161 */ 163 */
162#define wake_up_nested(x, s) \ 164#define wake_up_poll(x, m) \
163do { \ 165 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
164 unsigned long flags; \ 166#define wake_up_locked_poll(x, m) \
165 \ 167 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
166 spin_lock_irqsave_nested(&(x)->lock, flags, (s)); \ 168#define wake_up_interruptible_poll(x, m) \
167 wake_up_locked(x); \ 169 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
168 spin_unlock_irqrestore(&(x)->lock, flags); \ 170#define wake_up_interruptible_sync_poll(x, m) \
169} while (0) 171 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
170#else
171#define wake_up_nested(x, s) wake_up(x)
172#endif
173 172
174#define __wait_event(wq, condition) \ 173#define __wait_event(wq, condition) \
175do { \ 174do { \
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 3cd51e579ab1..13e1adf55c4c 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -41,6 +41,11 @@ struct delayed_work {
41 struct timer_list timer; 41 struct timer_list timer;
42}; 42};
43 43
44static inline struct delayed_work *to_delayed_work(struct work_struct *work)
45{
46 return container_of(work, struct delayed_work, work);
47}
48
44struct execute_work { 49struct execute_work {
45 struct work_struct work; 50 struct work_struct work;
46}; 51};
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 7300ecdc480c..9c1ed1fb6ddb 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -109,8 +109,8 @@ extern int dirty_background_ratio;
109extern unsigned long dirty_background_bytes; 109extern unsigned long dirty_background_bytes;
110extern int vm_dirty_ratio; 110extern int vm_dirty_ratio;
111extern unsigned long vm_dirty_bytes; 111extern unsigned long vm_dirty_bytes;
112extern int dirty_writeback_interval; 112extern unsigned int dirty_writeback_interval;
113extern int dirty_expire_interval; 113extern unsigned int dirty_expire_interval;
114extern int vm_highmem_is_dirtyable; 114extern int vm_highmem_is_dirtyable;
115extern int block_dump; 115extern int block_dump;
116extern int laptop_mode; 116extern int laptop_mode;
@@ -168,6 +168,8 @@ void writeback_set_ratelimit(void);
168/* pdflush.c */ 168/* pdflush.c */
169extern int nr_pdflush_threads; /* Global so it can be exported to sysctl 169extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
170 read-only. */ 170 read-only. */
171extern int nr_pdflush_threads_max; /* Global so it can be exported to sysctl */
172extern int nr_pdflush_threads_min; /* Global so it can be exported to sysctl */
171 173
172 174
173#endif /* WRITEBACK_H */ 175#endif /* WRITEBACK_H */
diff --git a/include/media/bt819.h b/include/media/bt819.h
new file mode 100644
index 000000000000..38f666bde77a
--- /dev/null
+++ b/include/media/bt819.h
@@ -0,0 +1,33 @@
1/*
2 bt819.h - bt819 notifications
3
4 Copyright (C) 2009 Hans Verkuil (hverkuil@xs4all.nl)
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/
20
21#ifndef _BT819_H_
22#define _BT819_H_
23
24#include <linux/ioctl.h>
25
26/* v4l2_device notifications. */
27
28/* Needed to reset the FIFO buffer when changing the input
29 or the video standard. */
30#define BT819_FIFO_RESET_LOW _IO('b', 0)
31#define BT819_FIFO_RESET_HIGH _IO('b', 1)
32
33#endif
diff --git a/include/media/cx2341x.h b/include/media/cx2341x.h
index 9ec4d5889ef5..9ebe8558b9b6 100644
--- a/include/media/cx2341x.h
+++ b/include/media/cx2341x.h
@@ -1,5 +1,5 @@
1/* 1/*
2 cx23415/6 header containing common defines. 2 cx23415/6/8 header containing common defines.
3 3
4 This program is free software; you can redistribute it and/or modify 4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by 5 it under the terms of the GNU General Public License as published by
@@ -28,6 +28,7 @@ enum cx2341x_port {
28enum cx2341x_cap { 28enum cx2341x_cap {
29 CX2341X_CAP_HAS_SLICED_VBI = 1 << 0, 29 CX2341X_CAP_HAS_SLICED_VBI = 1 << 0,
30 CX2341X_CAP_HAS_TS = 1 << 1, 30 CX2341X_CAP_HAS_TS = 1 << 1,
31 CX2341X_CAP_HAS_AC3 = 1 << 2,
31}; 32};
32 33
33struct cx2341x_mpeg_params { 34struct cx2341x_mpeg_params {
@@ -47,11 +48,12 @@ struct cx2341x_mpeg_params {
47 enum v4l2_mpeg_audio_sampling_freq audio_sampling_freq; 48 enum v4l2_mpeg_audio_sampling_freq audio_sampling_freq;
48 enum v4l2_mpeg_audio_encoding audio_encoding; 49 enum v4l2_mpeg_audio_encoding audio_encoding;
49 enum v4l2_mpeg_audio_l2_bitrate audio_l2_bitrate; 50 enum v4l2_mpeg_audio_l2_bitrate audio_l2_bitrate;
51 enum v4l2_mpeg_audio_ac3_bitrate audio_ac3_bitrate;
50 enum v4l2_mpeg_audio_mode audio_mode; 52 enum v4l2_mpeg_audio_mode audio_mode;
51 enum v4l2_mpeg_audio_mode_extension audio_mode_extension; 53 enum v4l2_mpeg_audio_mode_extension audio_mode_extension;
52 enum v4l2_mpeg_audio_emphasis audio_emphasis; 54 enum v4l2_mpeg_audio_emphasis audio_emphasis;
53 enum v4l2_mpeg_audio_crc audio_crc; 55 enum v4l2_mpeg_audio_crc audio_crc;
54 u16 audio_properties; 56 u32 audio_properties;
55 u16 audio_mute; 57 u16 audio_mute;
56 58
57 /* video */ 59 /* video */
diff --git a/include/media/cx25840.h b/include/media/cx25840.h
index db431d513f2f..2c3fbaa33f74 100644
--- a/include/media/cx25840.h
+++ b/include/media/cx25840.h
@@ -21,6 +21,18 @@
21#ifndef _CX25840_H_ 21#ifndef _CX25840_H_
22#define _CX25840_H_ 22#define _CX25840_H_
23 23
24/* Note that the cx25840 driver requires that the bridge driver calls the
25 v4l2_subdev's init operation in order to load the driver's firmware.
26 Without this the audio standard detection will fail and you will
27 only get mono.
28
29 Since loading the firmware is often problematic when the driver is
30 compiled into the kernel I recommend postponing calling this function
31 until the first open of the video device. Another reason for
32 postponing it is that loading this firmware takes a long time (seconds)
33 due to the slow i2c bus speed. So it will speed up the boot process if
34 you can avoid loading the fw as long as the video device isn't used. */
35
24enum cx25840_video_input { 36enum cx25840_video_input {
25 /* Composite video inputs In1-In8 */ 37 /* Composite video inputs In1-In8 */
26 CX25840_COMPOSITE1 = 1, 38 CX25840_COMPOSITE1 = 1,
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index 5bf2ea00678c..7b5b91f60425 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -111,6 +111,7 @@ extern IR_KEYTAB_TYPE ir_codes_empty[IR_KEYTAB_SIZE];
111extern IR_KEYTAB_TYPE ir_codes_avermedia[IR_KEYTAB_SIZE]; 111extern IR_KEYTAB_TYPE ir_codes_avermedia[IR_KEYTAB_SIZE];
112extern IR_KEYTAB_TYPE ir_codes_avermedia_dvbt[IR_KEYTAB_SIZE]; 112extern IR_KEYTAB_TYPE ir_codes_avermedia_dvbt[IR_KEYTAB_SIZE];
113extern IR_KEYTAB_TYPE ir_codes_avermedia_m135a[IR_KEYTAB_SIZE]; 113extern IR_KEYTAB_TYPE ir_codes_avermedia_m135a[IR_KEYTAB_SIZE];
114extern IR_KEYTAB_TYPE ir_codes_avermedia_cardbus[IR_KEYTAB_SIZE];
114extern IR_KEYTAB_TYPE ir_codes_apac_viewcomp[IR_KEYTAB_SIZE]; 115extern IR_KEYTAB_TYPE ir_codes_apac_viewcomp[IR_KEYTAB_SIZE];
115extern IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE]; 116extern IR_KEYTAB_TYPE ir_codes_pixelview[IR_KEYTAB_SIZE];
116extern IR_KEYTAB_TYPE ir_codes_pixelview_new[IR_KEYTAB_SIZE]; 117extern IR_KEYTAB_TYPE ir_codes_pixelview_new[IR_KEYTAB_SIZE];
@@ -159,6 +160,8 @@ extern IR_KEYTAB_TYPE ir_codes_real_audio_220_32_keys[IR_KEYTAB_SIZE];
159extern IR_KEYTAB_TYPE ir_codes_msi_tvanywhere_plus[IR_KEYTAB_SIZE]; 160extern IR_KEYTAB_TYPE ir_codes_msi_tvanywhere_plus[IR_KEYTAB_SIZE];
160extern IR_KEYTAB_TYPE ir_codes_ati_tv_wonder_hd_600[IR_KEYTAB_SIZE]; 161extern IR_KEYTAB_TYPE ir_codes_ati_tv_wonder_hd_600[IR_KEYTAB_SIZE];
161extern IR_KEYTAB_TYPE ir_codes_kworld_plus_tv_analog[IR_KEYTAB_SIZE]; 162extern IR_KEYTAB_TYPE ir_codes_kworld_plus_tv_analog[IR_KEYTAB_SIZE];
163extern IR_KEYTAB_TYPE ir_codes_kaiomy[IR_KEYTAB_SIZE];
164extern IR_KEYTAB_TYPE ir_codes_dm1105_nec[IR_KEYTAB_SIZE];
162#endif 165#endif
163 166
164/* 167/*
diff --git a/include/media/ir-kbd-i2c.h b/include/media/ir-kbd-i2c.h
index 00fa57eb9fde..07963d705400 100644
--- a/include/media/ir-kbd-i2c.h
+++ b/include/media/ir-kbd-i2c.h
@@ -14,8 +14,7 @@ struct IR_i2c {
14 /* Used to avoid fast repeating */ 14 /* Used to avoid fast repeating */
15 unsigned char old; 15 unsigned char old;
16 16
17 struct work_struct work; 17 struct delayed_work work;
18 struct timer_list timer;
19 char phys[32]; 18 char phys[32];
20 int (*get_key)(struct IR_i2c*, u32*, u32*); 19 int (*get_key)(struct IR_i2c*, u32*, u32*);
21}; 20};
diff --git a/include/media/msp3400.h b/include/media/msp3400.h
index 6ab854931c05..90cf22ada8b4 100644
--- a/include/media/msp3400.h
+++ b/include/media/msp3400.h
@@ -54,13 +54,13 @@
54 ======= 54 =======
55 55
56 So to specify a complete routing scheme for the msp3400 you will have to 56 So to specify a complete routing scheme for the msp3400 you will have to
57 specify in the 'input' field of the v4l2_routing struct: 57 specify in the 'input' arg of the s_routing function:
58 58
59 1) which tuner input to use 59 1) which tuner input to use
60 2) which SCART input to use 60 2) which SCART input to use
61 3) which DSP input to use for each DSP output 61 3) which DSP input to use for each DSP output
62 62
63 And in the 'output' field of the v4l2_routing struct you specify: 63 And in the 'output' arg of the s_routing function you specify:
64 64
65 1) which SCART input to use for each SCART output 65 1) which SCART input to use for each SCART output
66 66
diff --git a/include/media/ov772x.h b/include/media/ov772x.h
index e391d55edb95..30d9629198ef 100644
--- a/include/media/ov772x.h
+++ b/include/media/ov772x.h
@@ -13,9 +13,49 @@
13 13
14#include <media/soc_camera.h> 14#include <media/soc_camera.h>
15 15
16/* for flags */
17#define OV772X_FLAG_VFLIP 0x00000001 /* Vertical flip image */
18#define OV772X_FLAG_HFLIP 0x00000002 /* Horizontal flip image */
19
20/*
21 * for Edge ctrl
22 *
23 * strength also control Auto or Manual Edge Control Mode
24 * see also OV772X_MANUAL_EDGE_CTRL
25 */
26struct ov772x_edge_ctrl {
27 unsigned char strength;
28 unsigned char threshold;
29 unsigned char upper;
30 unsigned char lower;
31};
32
33#define OV772X_MANUAL_EDGE_CTRL 0x80 /* un-used bit of strength */
34#define EDGE_STRENGTH_MASK 0x1F
35#define EDGE_THRESHOLD_MASK 0x0F
36#define EDGE_UPPER_MASK 0xFF
37#define EDGE_LOWER_MASK 0xFF
38
39#define OV772X_AUTO_EDGECTRL(u, l) \
40{ \
41 .upper = (u & EDGE_UPPER_MASK), \
42 .lower = (l & EDGE_LOWER_MASK), \
43}
44
45#define OV772X_MANUAL_EDGECTRL(s, t) \
46{ \
47 .strength = (s & EDGE_STRENGTH_MASK) | OV772X_MANUAL_EDGE_CTRL,\
48 .threshold = (t & EDGE_THRESHOLD_MASK), \
49}
50
51/*
52 * ov772x camera info
53 */
16struct ov772x_camera_info { 54struct ov772x_camera_info {
17 unsigned long buswidth; 55 unsigned long buswidth;
56 unsigned long flags;
18 struct soc_camera_link link; 57 struct soc_camera_link link;
58 struct ov772x_edge_ctrl edgectrl;
19}; 59};
20 60
21#endif /* __OV772X_H__ */ 61#endif /* __OV772X_H__ */
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index c5a6e22a4b37..7a9f76ecbbbd 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -13,11 +13,12 @@
13#include <linux/stringify.h> 13#include <linux/stringify.h>
14#include <linux/mutex.h> 14#include <linux/mutex.h>
15#include <linux/scatterlist.h> 15#include <linux/scatterlist.h>
16#include <media/v4l2-device.h>
16 17
17#include <linux/vmalloc.h> /* for vmalloc() */ 18#include <linux/vmalloc.h> /* for vmalloc() */
18#include <linux/mm.h> /* for vmalloc_to_page() */ 19#include <linux/mm.h> /* for vmalloc_to_page() */
19 20
20#define SAA7146_VERSION_CODE 0x000500 /* 0.5.0 */ 21#define SAA7146_VERSION_CODE 0x000600 /* 0.6.0 */
21 22
22#define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr))) 23#define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr)))
23#define saa7146_read(sxy,adr) readl(sxy->mem+(adr)) 24#define saa7146_read(sxy,adr) readl(sxy->mem+(adr))
@@ -110,6 +111,8 @@ struct saa7146_dev
110 111
111 struct list_head item; 112 struct list_head item;
112 113
114 struct v4l2_device v4l2_dev;
115
113 /* different device locks */ 116 /* different device locks */
114 spinlock_t slock; 117 spinlock_t slock;
115 struct mutex lock; 118 struct mutex lock;
@@ -145,6 +148,11 @@ struct saa7146_dev
145 struct saa7146_dma d_rps1; 148 struct saa7146_dma d_rps1;
146}; 149};
147 150
151static inline struct saa7146_dev *to_saa7146_dev(struct v4l2_device *v4l2_dev)
152{
153 return container_of(v4l2_dev, struct saa7146_dev, v4l2_dev);
154}
155
148/* from saa7146_i2c.c */ 156/* from saa7146_i2c.c */
149int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c_adapter, u32 bitrate); 157int saa7146_i2c_adapter_prepare(struct saa7146_dev *dev, struct i2c_adapter *i2c_adapter, u32 bitrate);
150 158
diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
index c8d0b23fde29..eed5fccc83f3 100644
--- a/include/media/saa7146_vv.h
+++ b/include/media/saa7146_vv.h
@@ -150,16 +150,6 @@ struct saa7146_vv
150 unsigned int resources; /* resource management for device */ 150 unsigned int resources; /* resource management for device */
151}; 151};
152 152
153#define SAA7146_EXCLUSIVE 0x1
154#define SAA7146_BEFORE 0x2
155#define SAA7146_AFTER 0x4
156
157struct saa7146_extension_ioctls
158{
159 unsigned int cmd;
160 int flags;
161};
162
163/* flags */ 153/* flags */
164#define SAA7146_USE_PORT_B_FOR_VBI 0x2 /* use input port b for vbi hardware bug workaround */ 154#define SAA7146_USE_PORT_B_FOR_VBI 0x2 /* use input port b for vbi hardware bug workaround */
165 155
@@ -176,8 +166,10 @@ struct saa7146_ext_vv
176 int num_stds; 166 int num_stds;
177 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *); 167 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
178 168
179 struct saa7146_extension_ioctls *ioctls; 169 /* the extension can override this */
180 long (*ioctl)(struct saa7146_fh *, unsigned int cmd, void *arg); 170 struct v4l2_ioctl_ops ops;
171 /* pointer to the saa7146 core ops */
172 const struct v4l2_ioctl_ops *core_ops;
181 173
182 struct v4l2_file_operations vbi_fops; 174 struct v4l2_file_operations vbi_fops;
183}; 175};
@@ -213,6 +205,7 @@ void saa7146_set_hps_source_and_sync(struct saa7146_dev *saa, int source, int sy
213void saa7146_set_gpio(struct saa7146_dev *saa, u8 pin, u8 data); 205void saa7146_set_gpio(struct saa7146_dev *saa, u8 pin, u8 data);
214 206
215/* from saa7146_video.c */ 207/* from saa7146_video.c */
208extern const struct v4l2_ioctl_ops saa7146_video_ioctl_ops;
216extern struct saa7146_use_ops saa7146_video_uops; 209extern struct saa7146_use_ops saa7146_video_uops;
217int saa7146_start_preview(struct saa7146_fh *fh); 210int saa7146_start_preview(struct saa7146_fh *fh);
218int saa7146_stop_preview(struct saa7146_fh *fh); 211int saa7146_stop_preview(struct saa7146_fh *fh);
diff --git a/include/media/sh_mobile_ceu.h b/include/media/sh_mobile_ceu.h
index b5dbefea3740..0f3524cff435 100644
--- a/include/media/sh_mobile_ceu.h
+++ b/include/media/sh_mobile_ceu.h
@@ -1,10 +1,11 @@
1#ifndef __ASM_SH_MOBILE_CEU_H__ 1#ifndef __ASM_SH_MOBILE_CEU_H__
2#define __ASM_SH_MOBILE_CEU_H__ 2#define __ASM_SH_MOBILE_CEU_H__
3 3
4#include <media/soc_camera.h> 4#define SH_CEU_FLAG_USE_8BIT_BUS (1 << 0) /* use 8bit bus width */
5#define SH_CEU_FLAG_USE_16BIT_BUS (1 << 1) /* use 16bit bus width */
5 6
6struct sh_mobile_ceu_info { 7struct sh_mobile_ceu_info {
7 unsigned long flags; /* SOCAM_... */ 8 unsigned long flags;
8}; 9};
9 10
10#endif /* __ASM_SH_MOBILE_CEU_H__ */ 11#endif /* __ASM_SH_MOBILE_CEU_H__ */
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 7440d9250665..37013688af44 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -45,6 +45,7 @@ struct soc_camera_device {
45 int num_formats; 45 int num_formats;
46 struct soc_camera_format_xlate *user_formats; 46 struct soc_camera_format_xlate *user_formats;
47 int num_user_formats; 47 int num_user_formats;
48 enum v4l2_field field; /* Preserve field over close() */
48 struct module *owner; 49 struct module *owner;
49 void *host_priv; /* Per-device host private data */ 50 void *host_priv; /* Per-device host private data */
50 /* soc_camera.c private count. Only accessed with .video_lock held */ 51 /* soc_camera.c private count. Only accessed with .video_lock held */
@@ -74,7 +75,8 @@ struct soc_camera_host_ops {
74 int (*resume)(struct soc_camera_device *); 75 int (*resume)(struct soc_camera_device *);
75 int (*get_formats)(struct soc_camera_device *, int, 76 int (*get_formats)(struct soc_camera_device *, int,
76 struct soc_camera_format_xlate *); 77 struct soc_camera_format_xlate *);
77 int (*set_fmt)(struct soc_camera_device *, __u32, struct v4l2_rect *); 78 int (*set_crop)(struct soc_camera_device *, struct v4l2_rect *);
79 int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *);
78 int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *); 80 int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *);
79 void (*init_videobuf)(struct videobuf_queue *, 81 void (*init_videobuf)(struct videobuf_queue *,
80 struct soc_camera_device *); 82 struct soc_camera_device *);
@@ -93,13 +95,18 @@ struct soc_camera_host_ops {
93struct soc_camera_link { 95struct soc_camera_link {
94 /* Camera bus id, used to match a camera and a bus */ 96 /* Camera bus id, used to match a camera and a bus */
95 int bus_id; 97 int bus_id;
96 /* GPIO number to switch between 8 and 10 bit modes */
97 unsigned int gpio;
98 /* Per camera SOCAM_SENSOR_* bus flags */ 98 /* Per camera SOCAM_SENSOR_* bus flags */
99 unsigned long flags; 99 unsigned long flags;
100 /* Optional callbacks to power on or off and reset the sensor */ 100 /* Optional callbacks to power on or off and reset the sensor */
101 int (*power)(struct device *, int); 101 int (*power)(struct device *, int);
102 int (*reset)(struct device *); 102 int (*reset)(struct device *);
103 /*
104 * some platforms may support different data widths than the sensors
105 * native ones due to different data line routing. Let the board code
106 * overwrite the width flags.
107 */
108 int (*set_bus_param)(struct soc_camera_link *, unsigned long flags);
109 unsigned long (*query_bus_param)(struct soc_camera_link *);
103}; 110};
104 111
105static inline struct soc_camera_device *to_soc_camera_dev(struct device *dev) 112static inline struct soc_camera_device *to_soc_camera_dev(struct device *dev)
@@ -159,7 +166,8 @@ struct soc_camera_ops {
159 int (*release)(struct soc_camera_device *); 166 int (*release)(struct soc_camera_device *);
160 int (*start_capture)(struct soc_camera_device *); 167 int (*start_capture)(struct soc_camera_device *);
161 int (*stop_capture)(struct soc_camera_device *); 168 int (*stop_capture)(struct soc_camera_device *);
162 int (*set_fmt)(struct soc_camera_device *, __u32, struct v4l2_rect *); 169 int (*set_crop)(struct soc_camera_device *, struct v4l2_rect *);
170 int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *);
163 int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *); 171 int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *);
164 unsigned long (*query_bus_param)(struct soc_camera_device *); 172 unsigned long (*query_bus_param)(struct soc_camera_device *);
165 int (*set_bus_param)(struct soc_camera_device *, unsigned long); 173 int (*set_bus_param)(struct soc_camera_device *, unsigned long);
@@ -239,15 +247,19 @@ static inline struct v4l2_queryctrl const *soc_camera_find_qctrl(
239static inline unsigned long soc_camera_bus_param_compatible( 247static inline unsigned long soc_camera_bus_param_compatible(
240 unsigned long camera_flags, unsigned long bus_flags) 248 unsigned long camera_flags, unsigned long bus_flags)
241{ 249{
242 unsigned long common_flags, hsync, vsync, pclk; 250 unsigned long common_flags, hsync, vsync, pclk, data, buswidth, mode;
243 251
244 common_flags = camera_flags & bus_flags; 252 common_flags = camera_flags & bus_flags;
245 253
246 hsync = common_flags & (SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW); 254 hsync = common_flags & (SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW);
247 vsync = common_flags & (SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW); 255 vsync = common_flags & (SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW);
248 pclk = common_flags & (SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING); 256 pclk = common_flags & (SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING);
257 data = common_flags & (SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATA_ACTIVE_LOW);
258 mode = common_flags & (SOCAM_MASTER | SOCAM_SLAVE);
259 buswidth = common_flags & SOCAM_DATAWIDTH_MASK;
249 260
250 return (!hsync || !vsync || !pclk) ? 0 : common_flags; 261 return (!hsync || !vsync || !pclk || !data || !mode || !buswidth) ? 0 :
262 common_flags;
251} 263}
252 264
253extern unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl, 265extern unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl,
diff --git a/include/media/tvaudio.h b/include/media/tvaudio.h
index 6915aafc875a..1ac8184693f8 100644
--- a/include/media/tvaudio.h
+++ b/include/media/tvaudio.h
@@ -21,10 +21,29 @@
21#ifndef _TVAUDIO_H 21#ifndef _TVAUDIO_H
22#define _TVAUDIO_H 22#define _TVAUDIO_H
23 23
24#include <media/i2c-addr.h>
25
24/* The tvaudio module accepts the following inputs: */ 26/* The tvaudio module accepts the following inputs: */
25#define TVAUDIO_INPUT_TUNER 0 27#define TVAUDIO_INPUT_TUNER 0
26#define TVAUDIO_INPUT_RADIO 1 28#define TVAUDIO_INPUT_RADIO 1
27#define TVAUDIO_INPUT_EXTERN 2 29#define TVAUDIO_INPUT_EXTERN 2
28#define TVAUDIO_INPUT_INTERN 3 30#define TVAUDIO_INPUT_INTERN 3
29 31
32static inline const unsigned short *tvaudio_addrs(void)
33{
34 static const unsigned short addrs[] = {
35 I2C_ADDR_TDA8425 >> 1,
36 I2C_ADDR_TEA6300 >> 1,
37 I2C_ADDR_TEA6420 >> 1,
38 I2C_ADDR_TDA9840 >> 1,
39 I2C_ADDR_TDA985x_L >> 1,
40 I2C_ADDR_TDA985x_H >> 1,
41 I2C_ADDR_TDA9874 >> 1,
42 I2C_ADDR_PIC16C54 >> 1,
43 I2C_CLIENT_END
44 };
45
46 return addrs;
47}
48
30#endif 49#endif
diff --git a/include/media/v4l2-chip-ident.h b/include/media/v4l2-chip-ident.h
index 9aaf652b20ef..1be461a29077 100644
--- a/include/media/v4l2-chip-ident.h
+++ b/include/media/v4l2-chip-ident.h
@@ -37,10 +37,8 @@ enum {
37 /* module saa7110: just ident 100 */ 37 /* module saa7110: just ident 100 */
38 V4L2_IDENT_SAA7110 = 100, 38 V4L2_IDENT_SAA7110 = 100,
39 39
40 /* module saa7111: just ident 101 */ 40 /* module saa7115: reserved range 101-149 */
41 V4L2_IDENT_SAA7111 = 101, 41 V4L2_IDENT_SAA7111 = 101,
42
43 /* module saa7115: reserved range 102-149 */
44 V4L2_IDENT_SAA7113 = 103, 42 V4L2_IDENT_SAA7113 = 103,
45 V4L2_IDENT_SAA7114 = 104, 43 V4L2_IDENT_SAA7114 = 104,
46 V4L2_IDENT_SAA7115 = 105, 44 V4L2_IDENT_SAA7115 = 105,
@@ -63,44 +61,96 @@ enum {
63 V4L2_IDENT_OV7720 = 251, 61 V4L2_IDENT_OV7720 = 251,
64 V4L2_IDENT_OV7725 = 252, 62 V4L2_IDENT_OV7725 = 252,
65 63
66 /* Conexant MPEG encoder/decoders: reserved range 410-420 */ 64 /* module saa7146: reserved range 300-309 */
65 V4L2_IDENT_SAA7146 = 300,
66
67 /* Conexant MPEG encoder/decoders: reserved range 400-420 */
68 V4L2_IDENT_CX23418_843 = 403, /* Integrated A/V Decoder on the '418 */
67 V4L2_IDENT_CX23415 = 415, 69 V4L2_IDENT_CX23415 = 415,
68 V4L2_IDENT_CX23416 = 416, 70 V4L2_IDENT_CX23416 = 416,
69 V4L2_IDENT_CX23418 = 418, 71 V4L2_IDENT_CX23418 = 418,
70 72
73 /* module au0828 */
74 V4L2_IDENT_AU0828 = 828,
75
76 /* module indycam: just ident 2000 */
77 V4L2_IDENT_INDYCAM = 2000,
78
79 /* module bt819: reserved range 810-819 */
80 V4L2_IDENT_BT815A = 815,
81 V4L2_IDENT_BT817A = 817,
82 V4L2_IDENT_BT819A = 819,
83
84 /* module bt856: just ident 856 */
85 V4L2_IDENT_BT856 = 856,
86
87 /* module bt866: just ident 866 */
88 V4L2_IDENT_BT866 = 866,
89
90 /* module ks0127: reserved range 1120-1129 */
91 V4L2_IDENT_KS0122S = 1122,
92 V4L2_IDENT_KS0127 = 1127,
93 V4L2_IDENT_KS0127B = 1128,
94
71 /* module vp27smpx: just ident 2700 */ 95 /* module vp27smpx: just ident 2700 */
72 V4L2_IDENT_VP27SMPX = 2700, 96 V4L2_IDENT_VP27SMPX = 2700,
73 97
98 /* module vpx3220: reserved range: 3210-3229 */
99 V4L2_IDENT_VPX3214C = 3214,
100 V4L2_IDENT_VPX3216B = 3216,
101 V4L2_IDENT_VPX3220A = 3220,
102
74 /* module tvp5150 */ 103 /* module tvp5150 */
75 V4L2_IDENT_TVP5150 = 5150, 104 V4L2_IDENT_TVP5150 = 5150,
76 105
106 /* module saa5246a: just ident 5246 */
107 V4L2_IDENT_SAA5246A = 5246,
108
109 /* module saa5249: just ident 5249 */
110 V4L2_IDENT_SAA5249 = 5249,
111
77 /* module cs5345: just ident 5345 */ 112 /* module cs5345: just ident 5345 */
78 V4L2_IDENT_CS5345 = 5345, 113 V4L2_IDENT_CS5345 = 5345,
79 114
115 /* module tea6415c: just ident 6415 */
116 V4L2_IDENT_TEA6415C = 6415,
117
118 /* module tea6420: just ident 6420 */
119 V4L2_IDENT_TEA6420 = 6420,
120
121 /* module saa6588: just ident 6588 */
122 V4L2_IDENT_SAA6588 = 6588,
123
80 /* module saa6752hs: reserved range 6750-6759 */ 124 /* module saa6752hs: reserved range 6750-6759 */
81 V4L2_IDENT_SAA6752HS = 6752, 125 V4L2_IDENT_SAA6752HS = 6752,
82 V4L2_IDENT_SAA6752HS_AC3 = 6753, 126 V4L2_IDENT_SAA6752HS_AC3 = 6753,
83 127
128 /* module adv7170: just ident 7170 */
129 V4L2_IDENT_ADV7170 = 7170,
130
131 /* module adv7175: just ident 7175 */
132 V4L2_IDENT_ADV7175 = 7175,
133
134 /* module saa7185: just ident 7185 */
135 V4L2_IDENT_SAA7185 = 7185,
136
137 /* module saa7191: just ident 7191 */
138 V4L2_IDENT_SAA7191 = 7191,
139
84 /* module wm8739: just ident 8739 */ 140 /* module wm8739: just ident 8739 */
85 V4L2_IDENT_WM8739 = 8739, 141 V4L2_IDENT_WM8739 = 8739,
86 142
87 /* module wm8775: just ident 8775 */ 143 /* module wm8775: just ident 8775 */
88 V4L2_IDENT_WM8775 = 8775, 144 V4L2_IDENT_WM8775 = 8775,
89 145
90 /* module tw9910: just ident 9910 */ 146 /* module tda9840: just ident 9840 */
91 V4L2_IDENT_TW9910 = 9910, 147 V4L2_IDENT_TDA9840 = 9840,
92
93 /* module cs53132a: just ident 53132 */
94 V4L2_IDENT_CS53l32A = 53132,
95
96 /* module upd64031a: just ident 64031 */
97 V4L2_IDENT_UPD64031A = 64031,
98 148
99 /* module upd64083: just ident 64083 */ 149 /* module cafe_ccic, just ident 8801 */
100 V4L2_IDENT_UPD64083 = 64083, 150 V4L2_IDENT_CAFE = 8801,
101 151
102 /* module m52790: just ident 52790 */ 152 /* module tw9910: just ident 9910 */
103 V4L2_IDENT_M52790 = 52790, 153 V4L2_IDENT_TW9910 = 9910,
104 154
105 /* module msp3400: reserved range 34000-34999 and 44000-44999 */ 155 /* module msp3400: reserved range 34000-34999 and 44000-44999 */
106 V4L2_IDENT_MSPX4XX = 34000, /* generic MSPX4XX identifier, only 156 V4L2_IDENT_MSPX4XX = 34000, /* generic MSPX4XX identifier, only
@@ -178,6 +228,18 @@ enum {
178 V4L2_IDENT_MT9V022IX7ATC = 45010, /* No way to detect "normal" I77ATx */ 228 V4L2_IDENT_MT9V022IX7ATC = 45010, /* No way to detect "normal" I77ATx */
179 V4L2_IDENT_MT9V022IX7ATM = 45015, /* and "lead free" IA7ATx chips */ 229 V4L2_IDENT_MT9V022IX7ATM = 45015, /* and "lead free" IA7ATx chips */
180 V4L2_IDENT_MT9T031 = 45020, 230 V4L2_IDENT_MT9T031 = 45020,
231
232 /* module cs53132a: just ident 53132 */
233 V4L2_IDENT_CS53l32A = 53132,
234
235 /* module upd64031a: just ident 64031 */
236 V4L2_IDENT_UPD64031A = 64031,
237
238 /* module upd64083: just ident 64083 */
239 V4L2_IDENT_UPD64083 = 64083,
240
241 /* module m52790: just ident 52790 */
242 V4L2_IDENT_M52790 = 52790,
181}; 243};
182 244
183#endif 245#endif
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 95e74f1874e1..c48c24e4d0fa 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -102,11 +102,15 @@ int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
102const char *v4l2_ctrl_get_name(u32 id); 102const char *v4l2_ctrl_get_name(u32 id);
103const char **v4l2_ctrl_get_menu(u32 id); 103const char **v4l2_ctrl_get_menu(u32 id);
104int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def); 104int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def);
105int v4l2_ctrl_query_fill_std(struct v4l2_queryctrl *qctrl);
106int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu, 105int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu,
107 struct v4l2_queryctrl *qctrl, const char **menu_items); 106 struct v4l2_queryctrl *qctrl, const char **menu_items);
108#define V4L2_CTRL_MENU_IDS_END (0xffffffff) 107#define V4L2_CTRL_MENU_IDS_END (0xffffffff)
109int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids); 108int v4l2_ctrl_query_menu_valid_items(struct v4l2_querymenu *qmenu, const u32 *ids);
109
110/* Note: ctrl_classes points to an array of u32 pointers. Each u32 array is a
111 0-terminated array of control IDs. Each array must be sorted low to high
112 and belong to the same control class. The array of u32 pointers must also
113 be sorted, from low class IDs to high class IDs. */
110u32 v4l2_ctrl_next(const u32 * const *ctrl_classes, u32 id); 114u32 v4l2_ctrl_next(const u32 * const *ctrl_classes, u32 id);
111 115
112/* ------------------------------------------------------------------------- */ 116/* ------------------------------------------------------------------------- */
@@ -121,7 +125,7 @@ int v4l2_chip_match_host(const struct v4l2_dbg_match *match);
121 125
122/* ------------------------------------------------------------------------- */ 126/* ------------------------------------------------------------------------- */
123 127
124/* Helper function for I2C legacy drivers */ 128/* I2C Helper functions */
125 129
126struct i2c_driver; 130struct i2c_driver;
127struct i2c_adapter; 131struct i2c_adapter;
@@ -131,157 +135,62 @@ struct v4l2_device;
131struct v4l2_subdev; 135struct v4l2_subdev;
132struct v4l2_subdev_ops; 136struct v4l2_subdev_ops;
133 137
134int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver *driver,
135 const char *name,
136 int (*probe)(struct i2c_client *, const struct i2c_device_id *));
137 138
138/* Load an i2c module and return an initialized v4l2_subdev struct. 139/* Load an i2c module and return an initialized v4l2_subdev struct.
139 Only call request_module if module_name != NULL. 140 Only call request_module if module_name != NULL.
140 The client_type argument is the name of the chip that's on the adapter. */ 141 The client_type argument is the name of the chip that's on the adapter. */
141struct v4l2_subdev *v4l2_i2c_new_subdev(struct i2c_adapter *adapter, 142struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
143 struct i2c_adapter *adapter,
142 const char *module_name, const char *client_type, u8 addr); 144 const char *module_name, const char *client_type, u8 addr);
143/* Probe and load an i2c module and return an initialized v4l2_subdev struct. 145/* Probe and load an i2c module and return an initialized v4l2_subdev struct.
144 Only call request_module if module_name != NULL. 146 Only call request_module if module_name != NULL.
145 The client_type argument is the name of the chip that's on the adapter. */ 147 The client_type argument is the name of the chip that's on the adapter. */
146struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct i2c_adapter *adapter, 148struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct v4l2_device *v4l2_dev,
149 struct i2c_adapter *adapter,
147 const char *module_name, const char *client_type, 150 const char *module_name, const char *client_type,
148 const unsigned short *addrs); 151 const unsigned short *addrs);
152/* Like v4l2_i2c_new_probed_subdev, except probe for a single address. */
153struct v4l2_subdev *v4l2_i2c_new_probed_subdev_addr(struct v4l2_device *v4l2_dev,
154 struct i2c_adapter *adapter,
155 const char *module_name, const char *client_type, u8 addr);
149/* Initialize an v4l2_subdev with data from an i2c_client struct */ 156/* Initialize an v4l2_subdev with data from an i2c_client struct */
150void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, 157void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
151 const struct v4l2_subdev_ops *ops); 158 const struct v4l2_subdev_ops *ops);
159/* Return i2c client address of v4l2_subdev. */
160unsigned short v4l2_i2c_subdev_addr(struct v4l2_subdev *sd);
161
162enum v4l2_i2c_tuner_type {
163 ADDRS_RADIO, /* Radio tuner addresses */
164 ADDRS_DEMOD, /* Demod tuner addresses */
165 ADDRS_TV, /* TV tuner addresses */
166 /* TV tuner addresses if demod is present, this excludes
167 addresses used by the demodulator from the list of
168 candidates. */
169 ADDRS_TV_WITH_DEMOD,
170};
171/* Return a list of I2C tuner addresses to probe. Use only if the tuner
172 addresses are unknown. */
173const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type);
152 174
153/* ------------------------------------------------------------------------- */ 175/* ------------------------------------------------------------------------- */
154 176
155/* Internal ioctls */ 177/* Note: these remaining ioctls/structs should be removed as well, but they are
156 178 still used in tuner-simple.c (TUNER_SET_CONFIG), cx18/ivtv (RESET) and
157/* VIDIOC_INT_DECODE_VBI_LINE */ 179 v4l2-int-device.h (v4l2_routing). To remove these ioctls some more cleanup
158struct v4l2_decode_vbi_line { 180 is needed in those modules. */
159 u32 is_second_field; /* Set to 0 for the first (odd) field,
160 set to 1 for the second (even) field. */
161 u8 *p; /* Pointer to the sliced VBI data from the decoder.
162 On exit points to the start of the payload. */
163 u32 line; /* Line number of the sliced VBI data (1-23) */
164 u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */
165};
166 181
182/* s_config */
167struct v4l2_priv_tun_config { 183struct v4l2_priv_tun_config {
168 int tuner; 184 int tuner;
169 void *priv; 185 void *priv;
170}; 186};
171
172/* audio ioctls */
173
174/* v4l device was opened in Radio mode, to be replaced by VIDIOC_INT_S_TUNER_MODE */
175#define AUDC_SET_RADIO _IO('d',88)
176
177/* tuner ioctls */
178
179/* Sets tuner type and its I2C addr */
180#define TUNER_SET_TYPE_ADDR _IOW('d', 90, int)
181
182/* Puts tuner on powersaving state, disabling it, except for i2c. To be replaced
183 by VIDIOC_INT_S_STANDBY. */
184#define TUNER_SET_STANDBY _IOW('d', 91, int)
185
186/* Sets tda9887 specific stuff, like port1, port2 and qss */
187#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config) 187#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config)
188 188
189/* Switch the tuner to a specific tuner mode. Replacement of AUDC_SET_RADIO */
190#define VIDIOC_INT_S_TUNER_MODE _IOW('d', 93, enum v4l2_tuner_type)
191
192/* Generic standby command. Passing -1 (all bits set to 1) will put the whole
193 chip into standby mode, value 0 will make the chip fully active. Specific
194 bits can be used by certain chips to enable/disable specific subsystems.
195 Replacement of TUNER_SET_STANDBY. */
196#define VIDIOC_INT_S_STANDBY _IOW('d', 94, u32)
197
198/* 100, 101 used by VIDIOC_DBG_[SG]_REGISTER */
199
200/* Generic reset command. The argument selects which subsystems to reset.
201 Passing 0 will always reset the whole chip. */
202#define VIDIOC_INT_RESET _IOW ('d', 102, u32) 189#define VIDIOC_INT_RESET _IOW ('d', 102, u32)
203 190
204/* Set the frequency (in Hz) of the audio clock output.
205 Used to slave an audio processor to the video decoder, ensuring that audio
206 and video remain synchronized.
207 Usual values for the frequency are 48000, 44100 or 32000 Hz.
208 If the frequency is not supported, then -EINVAL is returned. */
209#define VIDIOC_INT_AUDIO_CLOCK_FREQ _IOW ('d', 103, u32)
210
211/* Video decoders that support sliced VBI need to implement this ioctl.
212 Field p of the v4l2_sliced_vbi_line struct is set to the start of the VBI
213 data that was generated by the decoder. The driver then parses the sliced
214 VBI data and sets the other fields in the struct accordingly. The pointer p
215 is updated to point to the start of the payload which can be copied
216 verbatim into the data field of the v4l2_sliced_vbi_data struct. If no
217 valid VBI data was found, then the type field is set to 0 on return. */
218#define VIDIOC_INT_DECODE_VBI_LINE _IOWR('d', 104, struct v4l2_decode_vbi_line)
219
220/* Used to generate VBI signals on a video signal. v4l2_sliced_vbi_data is
221 filled with the data packets that should be output. Note that if you set
222 the line field to 0, then that VBI signal is disabled. If no
223 valid VBI data was found, then the type field is set to 0 on return. */
224#define VIDIOC_INT_S_VBI_DATA _IOW ('d', 105, struct v4l2_sliced_vbi_data)
225
226/* Used to obtain the sliced VBI packet from a readback register. Not all
227 video decoders support this. If no data is available because the readback
228 register contains invalid or erroneous data -EIO is returned. Note that
229 you must fill in the 'id' member and the 'field' member (to determine
230 whether CC data from the first or second field should be obtained). */
231#define VIDIOC_INT_G_VBI_DATA _IOWR('d', 106, struct v4l2_sliced_vbi_data)
232
233/* Sets I2S speed in bps. This is used to provide a standard way to select I2S
234 clock used by driving digital audio streams at some board designs.
235 Usual values for the frequency are 1024000 and 2048000.
236 If the frequency is not supported, then -EINVAL is returned. */
237#define VIDIOC_INT_I2S_CLOCK_FREQ _IOW ('d', 108, u32)
238
239/* Routing definition, device dependent. It specifies which inputs (if any)
240 should be routed to which outputs (if any). */
241struct v4l2_routing { 191struct v4l2_routing {
242 u32 input; 192 u32 input;
243 u32 output; 193 u32 output;
244}; 194};
245 195
246/* These internal commands should be used to define the inputs and outputs
247 of an audio/video chip. They will replace the v4l2 API commands
248 VIDIOC_S/G_INPUT, VIDIOC_S/G_OUTPUT, VIDIOC_S/G_AUDIO and VIDIOC_S/G_AUDOUT
249 that are meant to be used by the user.
250 The internal commands should be used to switch inputs/outputs
251 because only the driver knows how to map a 'Television' input to the precise
252 input/output routing of an A/D converter, or a DSP, or a video digitizer.
253 These four commands should only be sent directly to an i2c device, they
254 should not be broadcast as the routing is very device specific. */
255#define VIDIOC_INT_S_AUDIO_ROUTING _IOW ('d', 109, struct v4l2_routing)
256#define VIDIOC_INT_G_AUDIO_ROUTING _IOR ('d', 110, struct v4l2_routing)
257#define VIDIOC_INT_S_VIDEO_ROUTING _IOW ('d', 111, struct v4l2_routing)
258#define VIDIOC_INT_G_VIDEO_ROUTING _IOR ('d', 112, struct v4l2_routing)
259
260struct v4l2_crystal_freq {
261 u32 freq; /* frequency in Hz of the crystal */
262 u32 flags; /* device specific flags */
263};
264
265/* Sets the frequency of the crystal used to generate the clocks.
266 An extra flags field allows device specific configuration regarding
267 clock frequency dividers, etc. If not used, then set flags to 0.
268 If the frequency is not supported, then -EINVAL is returned. */
269#define VIDIOC_INT_S_CRYSTAL_FREQ _IOW('d', 113, struct v4l2_crystal_freq)
270
271/* Initialize the sensor registors to some sort of reasonable
272 default values. */
273#define VIDIOC_INT_INIT _IOW('d', 114, u32)
274
275/* Set v4l2_std_id for video OUTPUT devices. This is ignored by
276 video input devices. */
277#define VIDIOC_INT_S_STD_OUTPUT _IOW('d', 115, v4l2_std_id)
278
279/* Get v4l2_std_id for video OUTPUT devices. This is ignored by
280 video input devices. */
281#define VIDIOC_INT_G_STD_OUTPUT _IOW('d', 116, v4l2_std_id)
282
283/* Set GPIO pins. Very simple right now, might need to be extended with
284 a v4l2_gpio struct if a direction is also needed. */
285#define VIDIOC_INT_S_GPIO _IOW('d', 117, u32)
286
287#endif /* V4L2_COMMON_H_ */ 196#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index e36faab8459b..2058dd45e915 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -40,6 +40,8 @@ struct v4l2_file_operations {
40 unsigned int (*poll) (struct file *, struct poll_table_struct *); 40 unsigned int (*poll) (struct file *, struct poll_table_struct *);
41 long (*ioctl) (struct file *, unsigned int, unsigned long); 41 long (*ioctl) (struct file *, unsigned int, unsigned long);
42 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); 42 long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
43 unsigned long (*get_unmapped_area) (struct file *, unsigned long,
44 unsigned long, unsigned long, unsigned long);
43 int (*mmap) (struct file *, struct vm_area_struct *); 45 int (*mmap) (struct file *, struct vm_area_struct *);
44 int (*open) (struct file *); 46 int (*open) (struct file *);
45 int (*release) (struct file *); 47 int (*release) (struct file *);
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index 55e41afd95ef..0dd3e8e8653e 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -33,7 +33,9 @@
33#define V4L2_DEVICE_NAME_SIZE (BUS_ID_SIZE + 16) 33#define V4L2_DEVICE_NAME_SIZE (BUS_ID_SIZE + 16)
34 34
35struct v4l2_device { 35struct v4l2_device {
36 /* dev->driver_data points to this struct */ 36 /* dev->driver_data points to this struct.
37 Note: dev might be NULL if there is no parent device
38 as is the case with e.g. ISA devices. */
37 struct device *dev; 39 struct device *dev;
38 /* used to keep track of the registered subdevs */ 40 /* used to keep track of the registered subdevs */
39 struct list_head subdevs; 41 struct list_head subdevs;
@@ -42,33 +44,43 @@ struct v4l2_device {
42 spinlock_t lock; 44 spinlock_t lock;
43 /* unique device name, by default the driver name + bus ID */ 45 /* unique device name, by default the driver name + bus ID */
44 char name[V4L2_DEVICE_NAME_SIZE]; 46 char name[V4L2_DEVICE_NAME_SIZE];
47 /* notify callback called by some sub-devices. */
48 void (*notify)(struct v4l2_subdev *sd,
49 unsigned int notification, void *arg);
45}; 50};
46 51
47/* Initialize v4l2_dev and make dev->driver_data point to v4l2_dev */ 52/* Initialize v4l2_dev and make dev->driver_data point to v4l2_dev.
53 dev may be NULL in rare cases (ISA devices). In that case you
54 must fill in the v4l2_dev->name field before calling this function. */
48int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev); 55int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev);
49/* Set v4l2_dev->dev->driver_data to NULL and unregister all sub-devices */ 56/* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
57 Since the parent disappears this ensures that v4l2_dev doesn't have an
58 invalid parent pointer. */
59void v4l2_device_disconnect(struct v4l2_device *v4l2_dev);
60/* Unregister all sub-devices and any other resources related to v4l2_dev. */
50void v4l2_device_unregister(struct v4l2_device *v4l2_dev); 61void v4l2_device_unregister(struct v4l2_device *v4l2_dev);
51 62
52/* Register a subdev with a v4l2 device. While registered the subdev module 63/* Register a subdev with a v4l2 device. While registered the subdev module
53 is marked as in-use. An error is returned if the module is no longer 64 is marked as in-use. An error is returned if the module is no longer
54 loaded when you attempt to register it. */ 65 loaded when you attempt to register it. */
55int __must_check v4l2_device_register_subdev(struct v4l2_device *dev, struct v4l2_subdev *sd); 66int __must_check v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
67 struct v4l2_subdev *sd);
56/* Unregister a subdev with a v4l2 device. Can also be called if the subdev 68/* Unregister a subdev with a v4l2 device. Can also be called if the subdev
57 wasn't registered. In that case it will do nothing. */ 69 wasn't registered. In that case it will do nothing. */
58void v4l2_device_unregister_subdev(struct v4l2_subdev *sd); 70void v4l2_device_unregister_subdev(struct v4l2_subdev *sd);
59 71
60/* Iterate over all subdevs. */ 72/* Iterate over all subdevs. */
61#define v4l2_device_for_each_subdev(sd, dev) \ 73#define v4l2_device_for_each_subdev(sd, v4l2_dev) \
62 list_for_each_entry(sd, &(dev)->subdevs, list) 74 list_for_each_entry(sd, &(v4l2_dev)->subdevs, list)
63 75
64/* Call the specified callback for all subdevs matching the condition. 76/* Call the specified callback for all subdevs matching the condition.
65 Ignore any errors. Note that you cannot add or delete a subdev 77 Ignore any errors. Note that you cannot add or delete a subdev
66 while walking the subdevs list. */ 78 while walking the subdevs list. */
67#define __v4l2_device_call_subdevs(dev, cond, o, f, args...) \ 79#define __v4l2_device_call_subdevs(v4l2_dev, cond, o, f, args...) \
68 do { \ 80 do { \
69 struct v4l2_subdev *sd; \ 81 struct v4l2_subdev *sd; \
70 \ 82 \
71 list_for_each_entry(sd, &(dev)->subdevs, list) \ 83 list_for_each_entry(sd, &(v4l2_dev)->subdevs, list) \
72 if ((cond) && sd->ops->o && sd->ops->o->f) \ 84 if ((cond) && sd->ops->o && sd->ops->o->f) \
73 sd->ops->o->f(sd , ##args); \ 85 sd->ops->o->f(sd , ##args); \
74 } while (0) 86 } while (0)
@@ -77,12 +89,12 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd);
77 If the callback returns an error other than 0 or -ENOIOCTLCMD, then 89 If the callback returns an error other than 0 or -ENOIOCTLCMD, then
78 return with that error code. Note that you cannot add or delete a 90 return with that error code. Note that you cannot add or delete a
79 subdev while walking the subdevs list. */ 91 subdev while walking the subdevs list. */
80#define __v4l2_device_call_subdevs_until_err(dev, cond, o, f, args...) \ 92#define __v4l2_device_call_subdevs_until_err(v4l2_dev, cond, o, f, args...) \
81({ \ 93({ \
82 struct v4l2_subdev *sd; \ 94 struct v4l2_subdev *sd; \
83 long err = 0; \ 95 long err = 0; \
84 \ 96 \
85 list_for_each_entry(sd, &(dev)->subdevs, list) { \ 97 list_for_each_entry(sd, &(v4l2_dev)->subdevs, list) { \
86 if ((cond) && sd->ops->o && sd->ops->o->f) \ 98 if ((cond) && sd->ops->o && sd->ops->o->f) \
87 err = sd->ops->o->f(sd , ##args); \ 99 err = sd->ops->o->f(sd , ##args); \
88 if (err && err != -ENOIOCTLCMD) \ 100 if (err && err != -ENOIOCTLCMD) \
@@ -94,16 +106,16 @@ void v4l2_device_unregister_subdev(struct v4l2_subdev *sd);
94/* Call the specified callback for all subdevs matching grp_id (if 0, then 106/* Call the specified callback for all subdevs matching grp_id (if 0, then
95 match them all). Ignore any errors. Note that you cannot add or delete 107 match them all). Ignore any errors. Note that you cannot add or delete
96 a subdev while walking the subdevs list. */ 108 a subdev while walking the subdevs list. */
97#define v4l2_device_call_all(dev, grpid, o, f, args...) \ 109#define v4l2_device_call_all(v4l2_dev, grpid, o, f, args...) \
98 __v4l2_device_call_subdevs(dev, \ 110 __v4l2_device_call_subdevs(v4l2_dev, \
99 !(grpid) || sd->grp_id == (grpid), o, f , ##args) 111 !(grpid) || sd->grp_id == (grpid), o, f , ##args)
100 112
101/* Call the specified callback for all subdevs matching grp_id (if 0, then 113/* Call the specified callback for all subdevs matching grp_id (if 0, then
102 match them all). If the callback returns an error other than 0 or 114 match them all). If the callback returns an error other than 0 or
103 -ENOIOCTLCMD, then return with that error code. Note that you cannot 115 -ENOIOCTLCMD, then return with that error code. Note that you cannot
104 add or delete a subdev while walking the subdevs list. */ 116 add or delete a subdev while walking the subdevs list. */
105#define v4l2_device_call_until_err(dev, grpid, o, f, args...) \ 117#define v4l2_device_call_until_err(v4l2_dev, grpid, o, f, args...) \
106 __v4l2_device_call_subdevs_until_err(dev, \ 118 __v4l2_device_call_subdevs_until_err(v4l2_dev, \
107 !(grpid) || sd->grp_id == (grpid), o, f , ##args) 119 !(grpid) || sd->grp_id == (grpid), o, f , ##args)
108 120
109#endif 121#endif
diff --git a/include/media/v4l2-i2c-drv-legacy.h b/include/media/v4l2-i2c-drv-legacy.h
deleted file mode 100644
index e65dd9d84e8b..000000000000
--- a/include/media/v4l2-i2c-drv-legacy.h
+++ /dev/null
@@ -1,152 +0,0 @@
1/*
2 * v4l2-i2c-drv-legacy.h - contains I2C handling code that's identical
3 * for all V4L2 I2C drivers. Use this header if the
4 * I2C driver is used by both legacy drivers and
5 * drivers converted to the bus-based I2C API.
6 *
7 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24/* NOTE: the full version of this header is in the v4l-dvb repository
25 * and allows v4l i2c drivers to be compiled on older kernels as well.
26 * The version of this header as it appears in the kernel is a stripped
27 * version (without all the backwards compatibility stuff) and so it
28 * looks a bit odd.
29 *
30 * If you look at the full version then you will understand the reason
31 * for introducing this header since you really don't want to have all
32 * the tricky backwards compatibility code in each and every i2c driver.
33 */
34
35struct v4l2_i2c_driver_data {
36 const char * const name;
37 int driverid;
38 int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
39 int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
40 int (*remove)(struct i2c_client *client);
41 int (*suspend)(struct i2c_client *client, pm_message_t state);
42 int (*resume)(struct i2c_client *client);
43 int (*legacy_probe)(struct i2c_adapter *adapter);
44 int legacy_class;
45 const struct i2c_device_id *id_table;
46};
47
48static struct v4l2_i2c_driver_data v4l2_i2c_data;
49static const struct i2c_client_address_data addr_data;
50static struct i2c_driver v4l2_i2c_driver_legacy;
51static char v4l2_i2c_drv_name_legacy[32];
52
53static int v4l2_i2c_drv_attach_legacy(struct i2c_adapter *adapter, int address, int kind)
54{
55 return v4l2_i2c_attach(adapter, address, &v4l2_i2c_driver_legacy,
56 v4l2_i2c_drv_name_legacy, v4l2_i2c_data.probe);
57}
58
59static int v4l2_i2c_drv_probe_legacy(struct i2c_adapter *adapter)
60{
61 if (v4l2_i2c_data.legacy_probe) {
62 if (v4l2_i2c_data.legacy_probe(adapter))
63 return i2c_probe(adapter, &addr_data, v4l2_i2c_drv_attach_legacy);
64 return 0;
65 }
66 if (adapter->class & v4l2_i2c_data.legacy_class)
67 return i2c_probe(adapter, &addr_data, v4l2_i2c_drv_attach_legacy);
68 return 0;
69}
70
71static int v4l2_i2c_drv_detach_legacy(struct i2c_client *client)
72{
73 int err;
74
75 if (v4l2_i2c_data.remove)
76 v4l2_i2c_data.remove(client);
77
78 err = i2c_detach_client(client);
79 if (err)
80 return err;
81 kfree(client);
82 return 0;
83}
84
85static int v4l2_i2c_drv_suspend_helper(struct i2c_client *client, pm_message_t state)
86{
87 return v4l2_i2c_data.suspend ? v4l2_i2c_data.suspend(client, state) : 0;
88}
89
90static int v4l2_i2c_drv_resume_helper(struct i2c_client *client)
91{
92 return v4l2_i2c_data.resume ? v4l2_i2c_data.resume(client) : 0;
93}
94
95/* ----------------------------------------------------------------------- */
96
97/* i2c implementation */
98static struct i2c_driver v4l2_i2c_driver_legacy = {
99 .driver = {
100 .owner = THIS_MODULE,
101 },
102 .attach_adapter = v4l2_i2c_drv_probe_legacy,
103 .detach_client = v4l2_i2c_drv_detach_legacy,
104 .suspend = v4l2_i2c_drv_suspend_helper,
105 .resume = v4l2_i2c_drv_resume_helper,
106};
107
108/* ----------------------------------------------------------------------- */
109
110/* i2c implementation */
111static struct i2c_driver v4l2_i2c_driver = {
112 .suspend = v4l2_i2c_drv_suspend_helper,
113 .resume = v4l2_i2c_drv_resume_helper,
114};
115
116static int __init v4l2_i2c_drv_init(void)
117{
118 int err;
119
120 strlcpy(v4l2_i2c_drv_name_legacy, v4l2_i2c_data.name, sizeof(v4l2_i2c_drv_name_legacy));
121 strlcat(v4l2_i2c_drv_name_legacy, "'", sizeof(v4l2_i2c_drv_name_legacy));
122
123 if (v4l2_i2c_data.legacy_class == 0)
124 v4l2_i2c_data.legacy_class = I2C_CLASS_TV_ANALOG;
125
126 v4l2_i2c_driver_legacy.driver.name = v4l2_i2c_drv_name_legacy;
127 v4l2_i2c_driver_legacy.id = v4l2_i2c_data.driverid;
128 v4l2_i2c_driver_legacy.command = v4l2_i2c_data.command;
129 err = i2c_add_driver(&v4l2_i2c_driver_legacy);
130
131 if (err)
132 return err;
133 v4l2_i2c_driver.driver.name = v4l2_i2c_data.name;
134 v4l2_i2c_driver.id = v4l2_i2c_data.driverid;
135 v4l2_i2c_driver.command = v4l2_i2c_data.command;
136 v4l2_i2c_driver.probe = v4l2_i2c_data.probe;
137 v4l2_i2c_driver.remove = v4l2_i2c_data.remove;
138 v4l2_i2c_driver.id_table = v4l2_i2c_data.id_table;
139 err = i2c_add_driver(&v4l2_i2c_driver);
140 if (err)
141 i2c_del_driver(&v4l2_i2c_driver_legacy);
142 return err;
143}
144
145static void __exit v4l2_i2c_drv_cleanup(void)
146{
147 i2c_del_driver(&v4l2_i2c_driver_legacy);
148 i2c_del_driver(&v4l2_i2c_driver);
149}
150
151module_init(v4l2_i2c_drv_init);
152module_exit(v4l2_i2c_drv_cleanup);
diff --git a/include/media/v4l2-i2c-drv.h b/include/media/v4l2-i2c-drv.h
index efdc8bf27f87..10a2882c3cbf 100644
--- a/include/media/v4l2-i2c-drv.h
+++ b/include/media/v4l2-i2c-drv.h
@@ -39,14 +39,11 @@
39 39
40struct v4l2_i2c_driver_data { 40struct v4l2_i2c_driver_data {
41 const char * const name; 41 const char * const name;
42 int driverid;
43 int (*command)(struct i2c_client *client, unsigned int cmd, void *arg); 42 int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
44 int (*probe)(struct i2c_client *client, const struct i2c_device_id *id); 43 int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
45 int (*remove)(struct i2c_client *client); 44 int (*remove)(struct i2c_client *client);
46 int (*suspend)(struct i2c_client *client, pm_message_t state); 45 int (*suspend)(struct i2c_client *client, pm_message_t state);
47 int (*resume)(struct i2c_client *client); 46 int (*resume)(struct i2c_client *client);
48 int (*legacy_probe)(struct i2c_adapter *adapter);
49 int legacy_class;
50 const struct i2c_device_id *id_table; 47 const struct i2c_device_id *id_table;
51}; 48};
52 49
@@ -54,12 +51,11 @@ static struct v4l2_i2c_driver_data v4l2_i2c_data;
54static struct i2c_driver v4l2_i2c_driver; 51static struct i2c_driver v4l2_i2c_driver;
55 52
56 53
57/* Bus-based I2C implementation for kernels >= 2.6.22 */ 54/* Bus-based I2C implementation for kernels >= 2.6.26 */
58 55
59static int __init v4l2_i2c_drv_init(void) 56static int __init v4l2_i2c_drv_init(void)
60{ 57{
61 v4l2_i2c_driver.driver.name = v4l2_i2c_data.name; 58 v4l2_i2c_driver.driver.name = v4l2_i2c_data.name;
62 v4l2_i2c_driver.id = v4l2_i2c_data.driverid;
63 v4l2_i2c_driver.command = v4l2_i2c_data.command; 59 v4l2_i2c_driver.command = v4l2_i2c_data.command;
64 v4l2_i2c_driver.probe = v4l2_i2c_data.probe; 60 v4l2_i2c_driver.probe = v4l2_i2c_data.probe;
65 v4l2_i2c_driver.remove = v4l2_i2c_data.remove; 61 v4l2_i2c_driver.remove = v4l2_i2c_data.remove;
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index b01c044868d0..7a4529defa88 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -15,6 +15,7 @@
15#include <linux/mutex.h> 15#include <linux/mutex.h>
16#include <linux/compiler.h> /* need __user */ 16#include <linux/compiler.h> /* need __user */
17#ifdef CONFIG_VIDEO_V4L1_COMPAT 17#ifdef CONFIG_VIDEO_V4L1_COMPAT
18#define __MIN_V4L1
18#include <linux/videodev.h> 19#include <linux/videodev.h>
19#else 20#else
20#include <linux/videodev2.h> 21#include <linux/videodev2.h>
@@ -267,6 +268,7 @@ struct v4l2_ioctl_ops {
267 268
268/* Video standard functions */ 269/* Video standard functions */
269extern const char *v4l2_norm_to_name(v4l2_std_id id); 270extern const char *v4l2_norm_to_name(v4l2_std_id id);
271extern void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod);
270extern int v4l2_video_std_construct(struct v4l2_standard *vs, 272extern int v4l2_video_std_construct(struct v4l2_standard *vs,
271 int id, const char *name); 273 int id, const char *name);
272/* Prints the ioctl in a human-readable format */ 274/* Prints the ioctl in a human-readable format */
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 37b09e56e943..17856081c809 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -27,6 +27,16 @@ struct v4l2_device;
27struct v4l2_subdev; 27struct v4l2_subdev;
28struct tuner_setup; 28struct tuner_setup;
29 29
30/* decode_vbi_line */
31struct v4l2_decode_vbi_line {
32 u32 is_second_field; /* Set to 0 for the first (odd) field,
33 set to 1 for the second (even) field. */
34 u8 *p; /* Pointer to the sliced VBI data from the decoder.
35 On exit points to the start of the payload. */
36 u32 line; /* Line number of the sliced VBI data (1-23) */
37 u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */
38};
39
30/* Sub-devices are devices that are connected somehow to the main bridge 40/* Sub-devices are devices that are connected somehow to the main bridge
31 device. These devices are usually audio/video muxers/encoders/decoders or 41 device. These devices are usually audio/video muxers/encoders/decoders or
32 sensors and webcam controllers. 42 sensors and webcam controllers.
@@ -68,17 +78,36 @@ struct tuner_setup;
68 the use-case it might be better to use subdev-specific ops (currently 78 the use-case it might be better to use subdev-specific ops (currently
69 not yet implemented) since ops provide proper type-checking. 79 not yet implemented) since ops provide proper type-checking.
70 */ 80 */
81
82/* init: initialize the sensor registors to some sort of reasonable default
83 values. Do not use for new drivers and should be removed in existing
84 drivers.
85
86 load_fw: load firmware.
87
88 reset: generic reset command. The argument selects which subsystems to
89 reset. Passing 0 will always reset the whole chip. Do not use for new
90 drivers without discussing this first on the linux-media mailinglist.
91 There should be no reason normally to reset a device.
92
93 s_gpio: set GPIO pins. Very simple right now, might need to be extended with
94 a direction argument if needed.
95 */
71struct v4l2_subdev_core_ops { 96struct v4l2_subdev_core_ops {
72 int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip); 97 int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip);
73 int (*log_status)(struct v4l2_subdev *sd); 98 int (*log_status)(struct v4l2_subdev *sd);
74 int (*init)(struct v4l2_subdev *sd, u32 val); 99 int (*init)(struct v4l2_subdev *sd, u32 val);
75 int (*s_standby)(struct v4l2_subdev *sd, u32 standby); 100 int (*load_fw)(struct v4l2_subdev *sd);
76 int (*reset)(struct v4l2_subdev *sd, u32 val); 101 int (*reset)(struct v4l2_subdev *sd, u32 val);
77 int (*s_gpio)(struct v4l2_subdev *sd, u32 val); 102 int (*s_gpio)(struct v4l2_subdev *sd, u32 val);
78 int (*queryctrl)(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc); 103 int (*queryctrl)(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc);
79 int (*g_ctrl)(struct v4l2_subdev *sd, struct v4l2_control *ctrl); 104 int (*g_ctrl)(struct v4l2_subdev *sd, struct v4l2_control *ctrl);
80 int (*s_ctrl)(struct v4l2_subdev *sd, struct v4l2_control *ctrl); 105 int (*s_ctrl)(struct v4l2_subdev *sd, struct v4l2_control *ctrl);
106 int (*g_ext_ctrls)(struct v4l2_subdev *sd, struct v4l2_ext_controls *ctrls);
107 int (*s_ext_ctrls)(struct v4l2_subdev *sd, struct v4l2_ext_controls *ctrls);
108 int (*try_ext_ctrls)(struct v4l2_subdev *sd, struct v4l2_ext_controls *ctrls);
81 int (*querymenu)(struct v4l2_subdev *sd, struct v4l2_querymenu *qm); 109 int (*querymenu)(struct v4l2_subdev *sd, struct v4l2_querymenu *qm);
110 int (*s_std)(struct v4l2_subdev *sd, v4l2_std_id norm);
82 long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); 111 long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
83#ifdef CONFIG_VIDEO_ADV_DEBUG 112#ifdef CONFIG_VIDEO_ADV_DEBUG
84 int (*g_register)(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg); 113 int (*g_register)(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg);
@@ -86,6 +115,16 @@ struct v4l2_subdev_core_ops {
86#endif 115#endif
87}; 116};
88 117
118/* s_mode: switch the tuner to a specific tuner mode. Replacement of s_radio.
119
120 s_radio: v4l device was opened in Radio mode, to be replaced by s_mode.
121
122 s_type_addr: sets tuner type and its I2C addr.
123
124 s_config: sets tda9887 specific stuff, like port1, port2 and qss
125
126 s_standby: puts tuner on powersaving state, disabling it, except for i2c.
127 */
89struct v4l2_subdev_tuner_ops { 128struct v4l2_subdev_tuner_ops {
90 int (*s_mode)(struct v4l2_subdev *sd, enum v4l2_tuner_type); 129 int (*s_mode)(struct v4l2_subdev *sd, enum v4l2_tuner_type);
91 int (*s_radio)(struct v4l2_subdev *sd); 130 int (*s_radio)(struct v4l2_subdev *sd);
@@ -93,28 +132,93 @@ struct v4l2_subdev_tuner_ops {
93 int (*g_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq); 132 int (*g_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq);
94 int (*g_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt); 133 int (*g_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt);
95 int (*s_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt); 134 int (*s_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt);
96 int (*s_std)(struct v4l2_subdev *sd, v4l2_std_id norm);
97 int (*s_type_addr)(struct v4l2_subdev *sd, struct tuner_setup *type); 135 int (*s_type_addr)(struct v4l2_subdev *sd, struct tuner_setup *type);
98 int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config); 136 int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config);
137 int (*s_standby)(struct v4l2_subdev *sd);
99}; 138};
100 139
140/* s_clock_freq: set the frequency (in Hz) of the audio clock output.
141 Used to slave an audio processor to the video decoder, ensuring that
142 audio and video remain synchronized. Usual values for the frequency
143 are 48000, 44100 or 32000 Hz. If the frequency is not supported, then
144 -EINVAL is returned.
145
146 s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard
147 way to select I2S clock used by driving digital audio streams at some
148 board designs. Usual values for the frequency are 1024000 and 2048000.
149 If the frequency is not supported, then -EINVAL is returned.
150
151 s_routing: used to define the input and/or output pins of an audio chip,
152 and any additional configuration data.
153 Never attempt to use user-level input IDs (e.g. Composite, S-Video,
154 Tuner) at this level. An i2c device shouldn't know about whether an
155 input pin is connected to a Composite connector, become on another
156 board or platform it might be connected to something else entirely.
157 The calling driver is responsible for mapping a user-level input to
158 the right pins on the i2c device.
159 */
101struct v4l2_subdev_audio_ops { 160struct v4l2_subdev_audio_ops {
102 int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq); 161 int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq);
103 int (*s_i2s_clock_freq)(struct v4l2_subdev *sd, u32 freq); 162 int (*s_i2s_clock_freq)(struct v4l2_subdev *sd, u32 freq);
104 int (*s_routing)(struct v4l2_subdev *sd, const struct v4l2_routing *route); 163 int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
105}; 164};
106 165
166/*
167 decode_vbi_line: video decoders that support sliced VBI need to implement
168 this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the
169 start of the VBI data that was generated by the decoder. The driver
170 then parses the sliced VBI data and sets the other fields in the
171 struct accordingly. The pointer p is updated to point to the start of
172 the payload which can be copied verbatim into the data field of the
173 v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the
174 type field is set to 0 on return.
175
176 s_vbi_data: used to generate VBI signals on a video signal.
177 v4l2_sliced_vbi_data is filled with the data packets that should be
178 output. Note that if you set the line field to 0, then that VBI signal
179 is disabled. If no valid VBI data was found, then the type field is
180 set to 0 on return.
181
182 g_vbi_data: used to obtain the sliced VBI packet from a readback register.
183 Not all video decoders support this. If no data is available because
184 the readback register contains invalid or erroneous data -EIO is
185 returned. Note that you must fill in the 'id' member and the 'field'
186 member (to determine whether CC data from the first or second field
187 should be obtained).
188
189 s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
190 video input devices.
191
192 s_crystal_freq: sets the frequency of the crystal used to generate the
193 clocks in Hz. An extra flags field allows device specific configuration
194 regarding clock frequency dividers, etc. If not used, then set flags
195 to 0. If the frequency is not supported, then -EINVAL is returned.
196
197 g_input_status: get input status. Same as the status field in the v4l2_input
198 struct.
199
200 s_routing: see s_routing in audio_ops, except this version is for video
201 devices.
202 */
107struct v4l2_subdev_video_ops { 203struct v4l2_subdev_video_ops {
108 int (*s_routing)(struct v4l2_subdev *sd, const struct v4l2_routing *route); 204 int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
109 int (*s_crystal_freq)(struct v4l2_subdev *sd, struct v4l2_crystal_freq *freq); 205 int (*s_crystal_freq)(struct v4l2_subdev *sd, u32 freq, u32 flags);
110 int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line); 206 int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line);
111 int (*s_vbi_data)(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *vbi_data); 207 int (*s_vbi_data)(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *vbi_data);
112 int (*g_vbi_data)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *vbi_data); 208 int (*g_vbi_data)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *vbi_data);
113 int (*g_sliced_vbi_cap)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_cap *cap); 209 int (*g_sliced_vbi_cap)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_cap *cap);
114 int (*s_std_output)(struct v4l2_subdev *sd, v4l2_std_id std); 210 int (*s_std_output)(struct v4l2_subdev *sd, v4l2_std_id std);
211 int (*querystd)(struct v4l2_subdev *sd, v4l2_std_id *std);
212 int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
115 int (*s_stream)(struct v4l2_subdev *sd, int enable); 213 int (*s_stream)(struct v4l2_subdev *sd, int enable);
116 int (*s_fmt)(struct v4l2_subdev *sd, struct v4l2_format *fmt); 214 int (*enum_fmt)(struct v4l2_subdev *sd, struct v4l2_fmtdesc *fmtdesc);
117 int (*g_fmt)(struct v4l2_subdev *sd, struct v4l2_format *fmt); 215 int (*g_fmt)(struct v4l2_subdev *sd, struct v4l2_format *fmt);
216 int (*try_fmt)(struct v4l2_subdev *sd, struct v4l2_format *fmt);
217 int (*s_fmt)(struct v4l2_subdev *sd, struct v4l2_format *fmt);
218 int (*g_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
219 int (*s_parm)(struct v4l2_subdev *sd, struct v4l2_streamparm *param);
220 int (*enum_framesizes)(struct v4l2_subdev *sd, struct v4l2_frmsizeenum *fsize);
221 int (*enum_frameintervals)(struct v4l2_subdev *sd, struct v4l2_frmivalenum *fival);
118}; 222};
119 223
120struct v4l2_subdev_ops { 224struct v4l2_subdev_ops {
@@ -132,7 +236,7 @@ struct v4l2_subdev_ops {
132struct v4l2_subdev { 236struct v4l2_subdev {
133 struct list_head list; 237 struct list_head list;
134 struct module *owner; 238 struct module *owner;
135 struct v4l2_device *dev; 239 struct v4l2_device *v4l2_dev;
136 const struct v4l2_subdev_ops *ops; 240 const struct v4l2_subdev_ops *ops;
137 /* name must be unique */ 241 /* name must be unique */
138 char name[V4L2_SUBDEV_NAME_SIZE]; 242 char name[V4L2_SUBDEV_NAME_SIZE];
@@ -152,18 +256,6 @@ static inline void *v4l2_get_subdevdata(const struct v4l2_subdev *sd)
152 return sd->priv; 256 return sd->priv;
153} 257}
154 258
155/* Convert an ioctl-type command to the proper v4l2_subdev_ops function call.
156 This is used by subdev modules that can be called by both old-style ioctl
157 commands and through the v4l2_subdev_ops.
158
159 The ioctl API of the subdev driver can call this function to call the
160 right ops based on the ioctl cmd and arg.
161
162 Once all subdev drivers have been converted and all drivers no longer
163 use the ioctl interface, then this function can be removed.
164 */
165int v4l2_subdev_command(struct v4l2_subdev *sd, unsigned cmd, void *arg);
166
167static inline void v4l2_subdev_init(struct v4l2_subdev *sd, 259static inline void v4l2_subdev_init(struct v4l2_subdev *sd,
168 const struct v4l2_subdev_ops *ops) 260 const struct v4l2_subdev_ops *ops)
169{ 261{
@@ -171,7 +263,7 @@ static inline void v4l2_subdev_init(struct v4l2_subdev *sd,
171 /* ops->core MUST be set */ 263 /* ops->core MUST be set */
172 BUG_ON(!ops || !ops->core); 264 BUG_ON(!ops || !ops->core);
173 sd->ops = ops; 265 sd->ops = ops;
174 sd->dev = NULL; 266 sd->v4l2_dev = NULL;
175 sd->name[0] = '\0'; 267 sd->name[0] = '\0';
176 sd->grp_id = 0; 268 sd->grp_id = 0;
177 sd->priv = NULL; 269 sd->priv = NULL;
@@ -186,4 +278,9 @@ static inline void v4l2_subdev_init(struct v4l2_subdev *sd,
186 (!(sd) ? -ENODEV : (((sd) && (sd)->ops->o && (sd)->ops->o->f) ? \ 278 (!(sd) ? -ENODEV : (((sd) && (sd)->ops->o && (sd)->ops->o->f) ? \
187 (sd)->ops->o->f((sd) , ##args) : -ENOIOCTLCMD)) 279 (sd)->ops->o->f((sd) , ##args) : -ENOIOCTLCMD))
188 280
281/* Send a notification to v4l2_device. */
282#define v4l2_subdev_notify(sd, notification, arg) \
283 ((!(sd) || !(sd)->v4l2_dev || !(sd)->v4l2_dev->notify) ? -ENODEV : \
284 (sd)->v4l2_dev->notify((sd), (notification), (arg)))
285
189#endif 286#endif
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
index 874f1340d049..1c5946c44758 100644
--- a/include/media/videobuf-core.h
+++ b/include/media/videobuf-core.h
@@ -18,6 +18,7 @@
18 18
19#include <linux/poll.h> 19#include <linux/poll.h>
20#ifdef CONFIG_VIDEO_V4L1_COMPAT 20#ifdef CONFIG_VIDEO_V4L1_COMPAT
21#define __MIN_V4L1
21#include <linux/videodev.h> 22#include <linux/videodev.h>
22#endif 23#endif
23#include <linux/videodev2.h> 24#include <linux/videodev2.h>
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e54c76d75495..1b94b9bfe2dc 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -616,21 +616,6 @@ static inline int tcp_skb_mss(const struct sk_buff *skb)
616 return skb_shinfo(skb)->gso_size; 616 return skb_shinfo(skb)->gso_size;
617} 617}
618 618
619static inline void tcp_dec_pcount_approx_int(__u32 *count, const int decr)
620{
621 if (*count) {
622 *count -= decr;
623 if ((int)*count < 0)
624 *count = 0;
625 }
626}
627
628static inline void tcp_dec_pcount_approx(__u32 *count,
629 const struct sk_buff *skb)
630{
631 tcp_dec_pcount_approx_int(count, tcp_skb_pcount(skb));
632}
633
634/* Events passed to congestion control interface */ 619/* Events passed to congestion control interface */
635enum tcp_ca_event { 620enum tcp_ca_event {
636 CA_EVENT_TX_START, /* first transmit when no packets in flight */ 621 CA_EVENT_TX_START, /* first transmit when no packets in flight */
diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h
new file mode 100644
index 000000000000..0627a9ae6347
--- /dev/null
+++ b/include/scsi/fc/fc_fip.h
@@ -0,0 +1,237 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17#ifndef _FC_FIP_H_
18#define _FC_FIP_H_
19
20/*
21 * This version is based on:
22 * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf
23 */
24
25/*
26 * The FIP ethertype eventually goes in net/if_ether.h.
27 */
28#ifndef ETH_P_FIP
29#define ETH_P_FIP 0x8914 /* FIP Ethertype */
30#endif
31
32#define FIP_DEF_PRI 128 /* default selection priority */
33#define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */
34#define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */
35#define FIP_VN_KA_PERIOD 90000 /* required VN_port keep-alive period (mS) */
36#define FIP_FCF_FUZZ 100 /* random time added by FCF (mS) */
37
38/*
39 * Multicast MAC addresses. T11-adopted.
40 */
41#define FIP_ALL_FCOE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 0 })
42#define FIP_ALL_ENODE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 1 })
43#define FIP_ALL_FCF_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
44
45#define FIP_VER 1 /* version for fip_header */
46
47struct fip_header {
48 __u8 fip_ver; /* upper 4 bits are the version */
49 __u8 fip_resv1; /* reserved */
50 __be16 fip_op; /* operation code */
51 __u8 fip_resv2; /* reserved */
52 __u8 fip_subcode; /* lower 4 bits are sub-code */
53 __be16 fip_dl_len; /* length of descriptors in words */
54 __be16 fip_flags; /* header flags */
55} __attribute__((packed));
56
57#define FIP_VER_SHIFT 4
58#define FIP_VER_ENCAPS(v) ((v) << FIP_VER_SHIFT)
59#define FIP_VER_DECAPS(v) ((v) >> FIP_VER_SHIFT)
60#define FIP_BPW 4 /* bytes per word for lengths */
61
62/*
63 * fip_op.
64 */
65enum fip_opcode {
66 FIP_OP_DISC = 1, /* discovery, advertisement, etc. */
67 FIP_OP_LS = 2, /* Link Service request or reply */
68 FIP_OP_CTRL = 3, /* Keep Alive / Link Reset */
69 FIP_OP_VLAN = 4, /* VLAN discovery */
70 FIP_OP_VENDOR_MIN = 0xfff8, /* min vendor-specific opcode */
71 FIP_OP_VENDOR_MAX = 0xfffe, /* max vendor-specific opcode */
72};
73
74/*
75 * Subcodes for FIP_OP_DISC.
76 */
77enum fip_disc_subcode {
78 FIP_SC_SOL = 1, /* solicitation */
79 FIP_SC_ADV = 2, /* advertisement */
80};
81
82/*
83 * Subcodes for FIP_OP_LS.
84 */
85enum fip_trans_subcode {
86 FIP_SC_REQ = 1, /* request */
87 FIP_SC_REP = 2, /* reply */
88};
89
90/*
91 * Subcodes for FIP_OP_RESET.
92 */
93enum fip_reset_subcode {
94 FIP_SC_KEEP_ALIVE = 1, /* keep-alive from VN_Port */
95 FIP_SC_CLR_VLINK = 2, /* clear virtual link from VF_Port */
96};
97
98/*
99 * Subcodes for FIP_OP_VLAN.
100 */
101enum fip_vlan_subcode {
102 FIP_SC_VL_REQ = 1, /* request */
103 FIP_SC_VL_REP = 2, /* reply */
104};
105
106/*
107 * flags in header fip_flags.
108 */
109enum fip_flag {
110 FIP_FL_FPMA = 0x8000, /* supports FPMA fabric-provided MACs */
111 FIP_FL_SPMA = 0x4000, /* supports SPMA server-provided MACs */
112 FIP_FL_AVAIL = 0x0004, /* available for FLOGI/ELP */
113 FIP_FL_SOL = 0x0002, /* this is a solicited message */
114 FIP_FL_FPORT = 0x0001, /* sent from an F port */
115};
116
117/*
118 * Common descriptor header format.
119 */
120struct fip_desc {
121 __u8 fip_dtype; /* type - see below */
122 __u8 fip_dlen; /* length - in 32-bit words */
123};
124
125enum fip_desc_type {
126 FIP_DT_PRI = 1, /* priority for forwarder selection */
127 FIP_DT_MAC = 2, /* MAC address */
128 FIP_DT_MAP_OUI = 3, /* FC-MAP OUI */
129 FIP_DT_NAME = 4, /* switch name or node name */
130 FIP_DT_FAB = 5, /* fabric descriptor */
131 FIP_DT_FCOE_SIZE = 6, /* max FCoE frame size */
132 FIP_DT_FLOGI = 7, /* FLOGI request or response */
133 FIP_DT_FDISC = 8, /* FDISC request or response */
134 FIP_DT_LOGO = 9, /* LOGO request or response */
135 FIP_DT_ELP = 10, /* ELP request or response */
136 FIP_DT_VN_ID = 11, /* VN_Node Identifier */
137 FIP_DT_FKA = 12, /* advertisement keep-alive period */
138 FIP_DT_VENDOR = 13, /* vendor ID */
139 FIP_DT_VLAN = 14, /* vlan number */
140 FIP_DT_LIMIT, /* max defined desc_type + 1 */
141 FIP_DT_VENDOR_BASE = 128, /* first vendor-specific desc_type */
142};
143
144/*
145 * FIP_DT_PRI - priority descriptor.
146 */
147struct fip_pri_desc {
148 struct fip_desc fd_desc;
149 __u8 fd_resvd;
150 __u8 fd_pri; /* FCF priority: higher is better */
151} __attribute__((packed));
152
153/*
154 * FIP_DT_MAC - MAC address descriptor.
155 */
156struct fip_mac_desc {
157 struct fip_desc fd_desc;
158 __u8 fd_mac[ETH_ALEN];
159} __attribute__((packed));
160
161/*
162 * FIP_DT_MAP - descriptor.
163 */
164struct fip_map_desc {
165 struct fip_desc fd_desc;
166 __u8 fd_resvd[3];
167 __u8 fd_map[3];
168} __attribute__((packed));
169
170/*
171 * FIP_DT_NAME descriptor.
172 */
173struct fip_wwn_desc {
174 struct fip_desc fd_desc;
175 __u8 fd_resvd[2];
176 __be64 fd_wwn; /* 64-bit WWN, unaligned */
177} __attribute__((packed));
178
179/*
180 * FIP_DT_FAB descriptor.
181 */
182struct fip_fab_desc {
183 struct fip_desc fd_desc;
184 __be16 fd_vfid; /* virtual fabric ID */
185 __u8 fd_resvd;
186 __u8 fd_map[3]; /* FC-MAP value */
187 __be64 fd_wwn; /* fabric name, unaligned */
188} __attribute__((packed));
189
190/*
191 * FIP_DT_FCOE_SIZE descriptor.
192 */
193struct fip_size_desc {
194 struct fip_desc fd_desc;
195 __be16 fd_size;
196} __attribute__((packed));
197
198/*
199 * Descriptor that encapsulates an ELS or ILS frame.
200 * The encapsulated frame immediately follows this header, without
201 * SOF, EOF, or CRC.
202 */
203struct fip_encaps {
204 struct fip_desc fd_desc;
205 __u8 fd_resvd[2];
206} __attribute__((packed));
207
208/*
209 * FIP_DT_VN_ID - VN_Node Identifier descriptor.
210 */
211struct fip_vn_desc {
212 struct fip_desc fd_desc;
213 __u8 fd_mac[ETH_ALEN];
214 __u8 fd_resvd;
215 __u8 fd_fc_id[3];
216 __be64 fd_wwpn; /* port name, unaligned */
217} __attribute__((packed));
218
219/*
220 * FIP_DT_FKA - Advertisement keep-alive period.
221 */
222struct fip_fka_desc {
223 struct fip_desc fd_desc;
224 __u8 fd_resvd[2];
225 __be32 fd_fka_period; /* adv./keep-alive period in mS */
226} __attribute__((packed));
227
228/*
229 * FIP_DT_VENDOR descriptor.
230 */
231struct fip_vendor_desc {
232 struct fip_desc fd_desc;
233 __u8 fd_resvd[2];
234 __u8 fd_vendor_id[8];
235} __attribute__((packed));
236
237#endif /* _FC_FIP_H_ */
diff --git a/include/scsi/fc_transport_fcoe.h b/include/scsi/fc_transport_fcoe.h
deleted file mode 100644
index 8dca2af14ffc..000000000000
--- a/include/scsi/fc_transport_fcoe.h
+++ /dev/null
@@ -1,54 +0,0 @@
1#ifndef FC_TRANSPORT_FCOE_H
2#define FC_TRANSPORT_FCOE_H
3
4#include <linux/device.h>
5#include <linux/netdevice.h>
6#include <scsi/scsi_host.h>
7#include <scsi/libfc.h>
8
9/**
10 * struct fcoe_transport - FCoE transport struct for generic transport
11 * for Ethernet devices as well as pure HBAs
12 *
13 * @name: name for thsi transport
14 * @bus: physical bus type (pci_bus_type)
15 * @driver: physical bus driver for network device
16 * @create: entry create function
17 * @destroy: exit destroy function
18 * @list: list of transports
19 */
20struct fcoe_transport {
21 char *name;
22 unsigned short vendor;
23 unsigned short device;
24 struct bus_type *bus;
25 struct device_driver *driver;
26 int (*create)(struct net_device *device);
27 int (*destroy)(struct net_device *device);
28 bool (*match)(struct net_device *device);
29 struct list_head list;
30 struct list_head devlist;
31 struct mutex devlock;
32};
33
34/**
35 * MODULE_ALIAS_FCOE_PCI
36 *
37 * some care must be taken with this, vendor and device MUST be a hex value
38 * preceded with 0x and with letters in lower case (0x12ab, not 0x12AB or 12AB)
39 */
40#define MODULE_ALIAS_FCOE_PCI(vendor, device) \
41 MODULE_ALIAS("fcoe-pci-" __stringify(vendor) "-" __stringify(device))
42
43/* exported funcs */
44int fcoe_transport_attach(struct net_device *netdev);
45int fcoe_transport_release(struct net_device *netdev);
46int fcoe_transport_register(struct fcoe_transport *t);
47int fcoe_transport_unregister(struct fcoe_transport *t);
48int fcoe_load_transport_driver(struct net_device *netdev);
49int __init fcoe_transport_init(void);
50int __exit fcoe_transport_exit(void);
51
52/* fcow_sw is the default transport */
53extern struct fcoe_transport fcoe_sw_transport;
54#endif /* FC_TRANSPORT_FCOE_H */
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index a70eafaad084..0303a6a098cc 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -22,6 +22,7 @@
22 22
23#include <linux/timer.h> 23#include <linux/timer.h>
24#include <linux/if.h> 24#include <linux/if.h>
25#include <linux/percpu.h>
25 26
26#include <scsi/scsi_transport.h> 27#include <scsi/scsi_transport.h>
27#include <scsi/scsi_transport_fc.h> 28#include <scsi/scsi_transport_fc.h>
@@ -661,7 +662,8 @@ struct fc_lport {
661 unsigned long boot_time; 662 unsigned long boot_time;
662 663
663 struct fc_host_statistics host_stats; 664 struct fc_host_statistics host_stats;
664 struct fcoe_dev_stats *dev_stats[NR_CPUS]; 665 struct fcoe_dev_stats *dev_stats;
666
665 u64 wwpn; 667 u64 wwpn;
666 u64 wwnn; 668 u64 wwnn;
667 u8 retry_count; 669 u8 retry_count;
@@ -694,11 +696,6 @@ struct fc_lport {
694/* 696/*
695 * FC_LPORT HELPER FUNCTIONS 697 * FC_LPORT HELPER FUNCTIONS
696 *****************************/ 698 *****************************/
697static inline void *lport_priv(const struct fc_lport *lp)
698{
699 return (void *)(lp + 1);
700}
701
702static inline int fc_lport_test_ready(struct fc_lport *lp) 699static inline int fc_lport_test_ready(struct fc_lport *lp)
703{ 700{
704 return lp->state == LPORT_ST_READY; 701 return lp->state == LPORT_ST_READY;
@@ -722,6 +719,42 @@ static inline void fc_lport_state_enter(struct fc_lport *lp,
722 lp->state = state; 719 lp->state = state;
723} 720}
724 721
722static inline int fc_lport_init_stats(struct fc_lport *lp)
723{
724 /* allocate per cpu stats block */
725 lp->dev_stats = alloc_percpu(struct fcoe_dev_stats);
726 if (!lp->dev_stats)
727 return -ENOMEM;
728 return 0;
729}
730
731static inline void fc_lport_free_stats(struct fc_lport *lp)
732{
733 free_percpu(lp->dev_stats);
734}
735
736static inline struct fcoe_dev_stats *fc_lport_get_stats(struct fc_lport *lp)
737{
738 return per_cpu_ptr(lp->dev_stats, smp_processor_id());
739}
740
741static inline void *lport_priv(const struct fc_lport *lp)
742{
743 return (void *)(lp + 1);
744}
745
746/**
747 * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport
748 * @sht: ptr to the scsi host templ
749 * @priv_size: size of private data after fc_lport
750 *
751 * Returns: ptr to Scsi_Host
752 */
753static inline struct Scsi_Host *
754libfc_host_alloc(struct scsi_host_template *sht, int priv_size)
755{
756 return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
757}
725 758
726/* 759/*
727 * LOCAL PORT LAYER 760 * LOCAL PORT LAYER
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index c41f7d0c6efc..666cc131732e 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. 2 * Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007-2008 Intel Corporation. All rights reserved.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -20,134 +21,144 @@
20#ifndef _LIBFCOE_H 21#ifndef _LIBFCOE_H
21#define _LIBFCOE_H 22#define _LIBFCOE_H
22 23
24#include <linux/etherdevice.h>
25#include <linux/if_ether.h>
23#include <linux/netdevice.h> 26#include <linux/netdevice.h>
24#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/workqueue.h>
25#include <scsi/fc/fc_fcoe.h> 29#include <scsi/fc/fc_fcoe.h>
26#include <scsi/libfc.h> 30#include <scsi/libfc.h>
27 31
28/* 32/*
29 * this percpu struct for fcoe 33 * FIP tunable parameters.
30 */ 34 */
31struct fcoe_percpu_s { 35#define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */
32 int cpu; 36#define FCOE_CTRL_SOL_TOV 2000 /* min. solicitation interval (mS) */
33 struct task_struct *thread; 37#define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */
34 struct sk_buff_head fcoe_rx_list; 38
35 struct page *crc_eof_page; 39/**
36 int crc_eof_offset; 40 * enum fip_state - internal state of FCoE controller.
41 * @FIP_ST_DISABLED: controller has been disabled or not yet enabled.
42 * @FIP_ST_LINK_WAIT: the physical link is down or unusable.
43 * @FIP_ST_AUTO: determining whether to use FIP or non-FIP mode.
44 * @FIP_ST_NON_FIP: non-FIP mode selected.
45 * @FIP_ST_ENABLED: FIP mode selected.
46 */
47enum fip_state {
48 FIP_ST_DISABLED,
49 FIP_ST_LINK_WAIT,
50 FIP_ST_AUTO,
51 FIP_ST_NON_FIP,
52 FIP_ST_ENABLED,
37}; 53};
38 54
39/* 55/**
40 * the fcoe sw transport private data 56 * struct fcoe_ctlr - FCoE Controller and FIP state.
57 * @state: internal FIP state for network link and FIP or non-FIP mode.
58 * @lp: &fc_lport: libfc local port.
59 * @sel_fcf: currently selected FCF, or NULL.
60 * @fcfs: list of discovered FCFs.
61 * @fcf_count: number of discovered FCF entries.
62 * @sol_time: time when a multicast solicitation was last sent.
63 * @sel_time: time after which to select an FCF.
64 * @port_ka_time: time of next port keep-alive.
65 * @ctlr_ka_time: time of next controller keep-alive.
66 * @timer: timer struct used for all delayed events.
67 * @link_work: &work_struct for doing FCF selection.
68 * @recv_work: &work_struct for receiving FIP frames.
69 * @fip_recv_list: list of received FIP frames.
70 * @user_mfs: configured maximum FC frame size, including FC header.
71 * @flogi_oxid: exchange ID of most recent fabric login.
72 * @flogi_count: number of FLOGI attempts in AUTO mode.
73 * @link: current link status for libfc.
74 * @last_link: last link state reported to libfc.
75 * @map_dest: use the FC_MAP mode for destination MAC addresses.
76 * @dest_addr: MAC address of the selected FC forwarder.
77 * @ctl_src_addr: the native MAC address of our local port.
78 * @data_src_addr: the assigned MAC address for the local port after FLOGI.
79 * @send: LLD-supplied function to handle sending of FIP Ethernet frames.
80 * @update_mac: LLD-supplied function to handle changes to MAC addresses.
81 * @lock: lock protecting this structure.
82 *
83 * This structure is used by all FCoE drivers. It contains information
84 * needed by all FCoE low-level drivers (LLDs) as well as internal state
85 * for FIP, and fields shared with the LLDS.
41 */ 86 */
42struct fcoe_softc { 87struct fcoe_ctlr {
43 struct list_head list; 88 enum fip_state state;
44 struct fc_lport *lp; 89 struct fc_lport *lp;
45 struct net_device *real_dev; 90 struct fcoe_fcf *sel_fcf;
46 struct net_device *phys_dev; /* device with ethtool_ops */ 91 struct list_head fcfs;
47 struct packet_type fcoe_packet_type; 92 u16 fcf_count;
48 struct sk_buff_head fcoe_pending_queue; 93 unsigned long sol_time;
49 u8 fcoe_pending_queue_active; 94 unsigned long sel_time;
50 95 unsigned long port_ka_time;
96 unsigned long ctlr_ka_time;
97 struct timer_list timer;
98 struct work_struct link_work;
99 struct work_struct recv_work;
100 struct sk_buff_head fip_recv_list;
101 u16 user_mfs;
102 u16 flogi_oxid;
103 u8 flogi_count;
104 u8 link;
105 u8 last_link;
106 u8 map_dest;
51 u8 dest_addr[ETH_ALEN]; 107 u8 dest_addr[ETH_ALEN];
52 u8 ctl_src_addr[ETH_ALEN]; 108 u8 ctl_src_addr[ETH_ALEN];
53 u8 data_src_addr[ETH_ALEN]; 109 u8 data_src_addr[ETH_ALEN];
54 /*
55 * fcoe protocol address learning related stuff
56 */
57 u16 flogi_oxid;
58 u8 flogi_progress;
59 u8 address_mode;
60};
61
62static inline struct net_device *fcoe_netdev(
63 const struct fc_lport *lp)
64{
65 return ((struct fcoe_softc *)lport_priv(lp))->real_dev;
66}
67
68static inline struct fcoe_hdr *skb_fcoe_header(const struct sk_buff *skb)
69{
70 return (struct fcoe_hdr *)skb_network_header(skb);
71}
72
73static inline int skb_fcoe_offset(const struct sk_buff *skb)
74{
75 return skb_network_offset(skb);
76}
77
78static inline struct fc_frame_header *skb_fc_header(const struct sk_buff *skb)
79{
80 return (struct fc_frame_header *)skb_transport_header(skb);
81}
82
83static inline int skb_fc_offset(const struct sk_buff *skb)
84{
85 return skb_transport_offset(skb);
86}
87 110
88static inline void skb_reset_fc_header(struct sk_buff *skb) 111 void (*send)(struct fcoe_ctlr *, struct sk_buff *);
89{ 112 void (*update_mac)(struct fcoe_ctlr *, u8 *old, u8 *new);
90 skb_reset_network_header(skb); 113 spinlock_t lock;
91 skb_set_transport_header(skb, skb_network_offset(skb) + 114};
92 sizeof(struct fcoe_hdr));
93}
94
95static inline bool skb_fc_is_data(const struct sk_buff *skb)
96{
97 return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_SOL_DATA;
98}
99
100static inline bool skb_fc_is_cmd(const struct sk_buff *skb)
101{
102 return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD;
103}
104 115
105static inline bool skb_fc_has_exthdr(const struct sk_buff *skb) 116/*
106{ 117 * struct fcoe_fcf - Fibre-Channel Forwarder.
107 return (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_VFTH) || 118 * @list: list linkage.
108 (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_IFRH) || 119 * @time: system time (jiffies) when an advertisement was last received.
109 (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_ENCH); 120 * @switch_name: WWN of switch from advertisement.
110} 121 * @fabric_name: WWN of fabric from advertisement.
122 * @fc_map: FC_MAP value from advertisement.
123 * @fcf_mac: Ethernet address of the FCF.
124 * @vfid: virtual fabric ID.
125 * @pri: seletion priority, smaller values are better.
126 * @flags: flags received from advertisement.
127 * @fka_period: keep-alive period, in jiffies.
128 *
129 * A Fibre-Channel Forwarder (FCF) is the entity on the Ethernet that
130 * passes FCoE frames on to an FC fabric. This structure represents
131 * one FCF from which advertisements have been received.
132 *
133 * When looking up an FCF, @switch_name, @fabric_name, @fc_map, @vfid, and
134 * @fcf_mac together form the lookup key.
135 */
136struct fcoe_fcf {
137 struct list_head list;
138 unsigned long time;
111 139
112static inline bool skb_fc_is_roff(const struct sk_buff *skb) 140 u64 switch_name;
113{ 141 u64 fabric_name;
114 return skb_fc_header(skb)->fh_f_ctl[2] & FC_FC_REL_OFF; 142 u32 fc_map;
115} 143 u16 vfid;
144 u8 fcf_mac[ETH_ALEN];
116 145
117static inline u16 skb_fc_oxid(const struct sk_buff *skb) 146 u8 pri;
118{ 147 u16 flags;
119 return be16_to_cpu(skb_fc_header(skb)->fh_ox_id); 148 u32 fka_period;
120} 149};
121 150
122static inline u16 skb_fc_rxid(const struct sk_buff *skb) 151/* FIP API functions */
123{ 152void fcoe_ctlr_init(struct fcoe_ctlr *);
124 return be16_to_cpu(skb_fc_header(skb)->fh_rx_id); 153void fcoe_ctlr_destroy(struct fcoe_ctlr *);
125} 154void fcoe_ctlr_link_up(struct fcoe_ctlr *);
155int fcoe_ctlr_link_down(struct fcoe_ctlr *);
156int fcoe_ctlr_els_send(struct fcoe_ctlr *, struct sk_buff *);
157void fcoe_ctlr_recv(struct fcoe_ctlr *, struct sk_buff *);
158int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_frame *fp, u8 *sa);
126 159
127/* libfcoe funcs */ 160/* libfcoe funcs */
128int fcoe_reset(struct Scsi_Host *shost); 161u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
129u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
130 unsigned int scheme, unsigned int port);
131
132u32 fcoe_fc_crc(struct fc_frame *fp);
133int fcoe_xmit(struct fc_lport *, struct fc_frame *);
134int fcoe_rcv(struct sk_buff *, struct net_device *,
135 struct packet_type *, struct net_device *);
136
137int fcoe_percpu_receive_thread(void *arg);
138void fcoe_clean_pending_queue(struct fc_lport *lp);
139void fcoe_percpu_clean(struct fc_lport *lp);
140void fcoe_watchdog(ulong vp);
141int fcoe_link_ok(struct fc_lport *lp);
142
143struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
144int fcoe_hostlist_add(const struct fc_lport *);
145int fcoe_hostlist_remove(const struct fc_lport *);
146
147struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *, int);
148int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *); 162int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *);
149 163
150/* fcoe sw hba */
151int __init fcoe_sw_init(void);
152int __exit fcoe_sw_exit(void);
153#endif /* _LIBFCOE_H */ 164#endif /* _LIBFCOE_H */
diff --git a/include/sound/tea575x-tuner.h b/include/sound/tea575x-tuner.h
index 426899e529c5..5718a02d3afb 100644
--- a/include/sound/tea575x-tuner.h
+++ b/include/sound/tea575x-tuner.h
@@ -22,8 +22,9 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/videodev.h> 25#include <linux/videodev2.h>
26#include <media/v4l2-dev.h> 26#include <media/v4l2-dev.h>
27#include <media/v4l2-ioctl.h>
27 28
28struct snd_tea575x; 29struct snd_tea575x;
29 30
@@ -35,11 +36,10 @@ struct snd_tea575x_ops {
35 36
36struct snd_tea575x { 37struct snd_tea575x {
37 struct snd_card *card; 38 struct snd_card *card;
38 struct video_device vd; /* video device */ 39 struct video_device *vd; /* video device */
39 struct v4l2_file_operations fops;
40 int dev_nr; /* requested device number + 1 */ 40 int dev_nr; /* requested device number + 1 */
41 int vd_registered; /* video device is registered */
42 int tea5759; /* 5759 chip is present */ 41 int tea5759; /* 5759 chip is present */
42 int mute; /* Device is muted? */
43 unsigned int freq_fixup; /* crystal onboard */ 43 unsigned int freq_fixup; /* crystal onboard */
44 unsigned int val; /* hw value */ 44 unsigned int val; /* hw value */
45 unsigned long freq; /* frequency */ 45 unsigned long freq; /* frequency */
diff --git a/include/trace/block.h b/include/trace/block.h
index 25c6a1fd5b77..25b7068b819e 100644
--- a/include/trace/block.h
+++ b/include/trace/block.h
@@ -5,72 +5,72 @@
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(block_rq_abort, 7DECLARE_TRACE(block_rq_abort,
8 TPPROTO(struct request_queue *q, struct request *rq), 8 TP_PROTO(struct request_queue *q, struct request *rq),
9 TPARGS(q, rq)); 9 TP_ARGS(q, rq));
10 10
11DECLARE_TRACE(block_rq_insert, 11DECLARE_TRACE(block_rq_insert,
12 TPPROTO(struct request_queue *q, struct request *rq), 12 TP_PROTO(struct request_queue *q, struct request *rq),
13 TPARGS(q, rq)); 13 TP_ARGS(q, rq));
14 14
15DECLARE_TRACE(block_rq_issue, 15DECLARE_TRACE(block_rq_issue,
16 TPPROTO(struct request_queue *q, struct request *rq), 16 TP_PROTO(struct request_queue *q, struct request *rq),
17 TPARGS(q, rq)); 17 TP_ARGS(q, rq));
18 18
19DECLARE_TRACE(block_rq_requeue, 19DECLARE_TRACE(block_rq_requeue,
20 TPPROTO(struct request_queue *q, struct request *rq), 20 TP_PROTO(struct request_queue *q, struct request *rq),
21 TPARGS(q, rq)); 21 TP_ARGS(q, rq));
22 22
23DECLARE_TRACE(block_rq_complete, 23DECLARE_TRACE(block_rq_complete,
24 TPPROTO(struct request_queue *q, struct request *rq), 24 TP_PROTO(struct request_queue *q, struct request *rq),
25 TPARGS(q, rq)); 25 TP_ARGS(q, rq));
26 26
27DECLARE_TRACE(block_bio_bounce, 27DECLARE_TRACE(block_bio_bounce,
28 TPPROTO(struct request_queue *q, struct bio *bio), 28 TP_PROTO(struct request_queue *q, struct bio *bio),
29 TPARGS(q, bio)); 29 TP_ARGS(q, bio));
30 30
31DECLARE_TRACE(block_bio_complete, 31DECLARE_TRACE(block_bio_complete,
32 TPPROTO(struct request_queue *q, struct bio *bio), 32 TP_PROTO(struct request_queue *q, struct bio *bio),
33 TPARGS(q, bio)); 33 TP_ARGS(q, bio));
34 34
35DECLARE_TRACE(block_bio_backmerge, 35DECLARE_TRACE(block_bio_backmerge,
36 TPPROTO(struct request_queue *q, struct bio *bio), 36 TP_PROTO(struct request_queue *q, struct bio *bio),
37 TPARGS(q, bio)); 37 TP_ARGS(q, bio));
38 38
39DECLARE_TRACE(block_bio_frontmerge, 39DECLARE_TRACE(block_bio_frontmerge,
40 TPPROTO(struct request_queue *q, struct bio *bio), 40 TP_PROTO(struct request_queue *q, struct bio *bio),
41 TPARGS(q, bio)); 41 TP_ARGS(q, bio));
42 42
43DECLARE_TRACE(block_bio_queue, 43DECLARE_TRACE(block_bio_queue,
44 TPPROTO(struct request_queue *q, struct bio *bio), 44 TP_PROTO(struct request_queue *q, struct bio *bio),
45 TPARGS(q, bio)); 45 TP_ARGS(q, bio));
46 46
47DECLARE_TRACE(block_getrq, 47DECLARE_TRACE(block_getrq,
48 TPPROTO(struct request_queue *q, struct bio *bio, int rw), 48 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
49 TPARGS(q, bio, rw)); 49 TP_ARGS(q, bio, rw));
50 50
51DECLARE_TRACE(block_sleeprq, 51DECLARE_TRACE(block_sleeprq,
52 TPPROTO(struct request_queue *q, struct bio *bio, int rw), 52 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
53 TPARGS(q, bio, rw)); 53 TP_ARGS(q, bio, rw));
54 54
55DECLARE_TRACE(block_plug, 55DECLARE_TRACE(block_plug,
56 TPPROTO(struct request_queue *q), 56 TP_PROTO(struct request_queue *q),
57 TPARGS(q)); 57 TP_ARGS(q));
58 58
59DECLARE_TRACE(block_unplug_timer, 59DECLARE_TRACE(block_unplug_timer,
60 TPPROTO(struct request_queue *q), 60 TP_PROTO(struct request_queue *q),
61 TPARGS(q)); 61 TP_ARGS(q));
62 62
63DECLARE_TRACE(block_unplug_io, 63DECLARE_TRACE(block_unplug_io,
64 TPPROTO(struct request_queue *q), 64 TP_PROTO(struct request_queue *q),
65 TPARGS(q)); 65 TP_ARGS(q));
66 66
67DECLARE_TRACE(block_split, 67DECLARE_TRACE(block_split,
68 TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu), 68 TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
69 TPARGS(q, bio, pdu)); 69 TP_ARGS(q, bio, pdu));
70 70
71DECLARE_TRACE(block_remap, 71DECLARE_TRACE(block_remap,
72 TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev, 72 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
73 sector_t from, sector_t to), 73 sector_t from, sector_t to),
74 TPARGS(q, bio, dev, from, to)); 74 TP_ARGS(q, bio, dev, from, to));
75 75
76#endif 76#endif
diff --git a/include/trace/irq.h b/include/trace/irq.h
new file mode 100644
index 000000000000..ff5d4495dc37
--- /dev/null
+++ b/include/trace/irq.h
@@ -0,0 +1,9 @@
1#ifndef _TRACE_IRQ_H
2#define _TRACE_IRQ_H
3
4#include <linux/interrupt.h>
5#include <linux/tracepoint.h>
6
7#include <trace/irq_event_types.h>
8
9#endif
diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h
new file mode 100644
index 000000000000..85964ebd47ec
--- /dev/null
+++ b/include/trace/irq_event_types.h
@@ -0,0 +1,55 @@
1
2/* use <trace/irq.h> instead */
3#ifndef TRACE_FORMAT
4# error Do not include this file directly.
5# error Unless you know what you are doing.
6#endif
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM irq
10
11/*
12 * Tracepoint for entry of interrupt handler:
13 */
14TRACE_FORMAT(irq_handler_entry,
15 TP_PROTO(int irq, struct irqaction *action),
16 TP_ARGS(irq, action),
17 TP_FMT("irq=%d handler=%s", irq, action->name)
18 );
19
20/*
21 * Tracepoint for return of an interrupt handler:
22 */
23TRACE_EVENT(irq_handler_exit,
24
25 TP_PROTO(int irq, struct irqaction *action, int ret),
26
27 TP_ARGS(irq, action, ret),
28
29 TP_STRUCT__entry(
30 __field( int, irq )
31 __field( int, ret )
32 ),
33
34 TP_fast_assign(
35 __entry->irq = irq;
36 __entry->ret = ret;
37 ),
38
39 TP_printk("irq=%d return=%s",
40 __entry->irq, __entry->ret ? "handled" : "unhandled")
41);
42
43TRACE_FORMAT(softirq_entry,
44 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
45 TP_ARGS(h, vec),
46 TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
47 );
48
49TRACE_FORMAT(softirq_exit,
50 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
51 TP_ARGS(h, vec),
52 TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
53 );
54
55#undef TRACE_SYSTEM
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h
new file mode 100644
index 000000000000..28ee69f9cd46
--- /dev/null
+++ b/include/trace/kmemtrace.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2008 Eduard - Gabriel Munteanu
3 *
4 * This file is released under GPL version 2.
5 */
6
7#ifndef _LINUX_KMEMTRACE_H
8#define _LINUX_KMEMTRACE_H
9
10#ifdef __KERNEL__
11
12#include <linux/tracepoint.h>
13#include <linux/types.h>
14
15#ifdef CONFIG_KMEMTRACE
16extern void kmemtrace_init(void);
17#else
18static inline void kmemtrace_init(void)
19{
20}
21#endif
22
23DECLARE_TRACE(kmalloc,
24 TP_PROTO(unsigned long call_site,
25 const void *ptr,
26 size_t bytes_req,
27 size_t bytes_alloc,
28 gfp_t gfp_flags),
29 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
30DECLARE_TRACE(kmem_cache_alloc,
31 TP_PROTO(unsigned long call_site,
32 const void *ptr,
33 size_t bytes_req,
34 size_t bytes_alloc,
35 gfp_t gfp_flags),
36 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
37DECLARE_TRACE(kmalloc_node,
38 TP_PROTO(unsigned long call_site,
39 const void *ptr,
40 size_t bytes_req,
41 size_t bytes_alloc,
42 gfp_t gfp_flags,
43 int node),
44 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
45DECLARE_TRACE(kmem_cache_alloc_node,
46 TP_PROTO(unsigned long call_site,
47 const void *ptr,
48 size_t bytes_req,
49 size_t bytes_alloc,
50 gfp_t gfp_flags,
51 int node),
52 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
53DECLARE_TRACE(kfree,
54 TP_PROTO(unsigned long call_site, const void *ptr),
55 TP_ARGS(call_site, ptr));
56DECLARE_TRACE(kmem_cache_free,
57 TP_PROTO(unsigned long call_site, const void *ptr),
58 TP_ARGS(call_site, ptr));
59
60#endif /* __KERNEL__ */
61
62#endif /* _LINUX_KMEMTRACE_H */
63
diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h
new file mode 100644
index 000000000000..5ca67df87f2a
--- /dev/null
+++ b/include/trace/lockdep.h
@@ -0,0 +1,9 @@
1#ifndef _TRACE_LOCKDEP_H
2#define _TRACE_LOCKDEP_H
3
4#include <linux/lockdep.h>
5#include <linux/tracepoint.h>
6
7#include <trace/lockdep_event_types.h>
8
9#endif
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h
new file mode 100644
index 000000000000..adccfcd2ec8f
--- /dev/null
+++ b/include/trace/lockdep_event_types.h
@@ -0,0 +1,44 @@
1
2#ifndef TRACE_FORMAT
3# error Do not include this file directly.
4# error Unless you know what you are doing.
5#endif
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM lock
9
10#ifdef CONFIG_LOCKDEP
11
12TRACE_FORMAT(lock_acquire,
13 TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
14 int trylock, int read, int check,
15 struct lockdep_map *next_lock, unsigned long ip),
16 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
17 TP_FMT("%s%s%s", trylock ? "try " : "",
18 read ? "read " : "", lock->name)
19 );
20
21TRACE_FORMAT(lock_release,
22 TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
23 TP_ARGS(lock, nested, ip),
24 TP_FMT("%s", lock->name)
25 );
26
27#ifdef CONFIG_LOCK_STAT
28
29TRACE_FORMAT(lock_contended,
30 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
31 TP_ARGS(lock, ip),
32 TP_FMT("%s", lock->name)
33 );
34
35TRACE_FORMAT(lock_acquired,
36 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
37 TP_ARGS(lock, ip),
38 TP_FMT("%s", lock->name)
39 );
40
41#endif
42#endif
43
44#undef TRACE_SYSTEM
diff --git a/include/trace/power.h b/include/trace/power.h
new file mode 100644
index 000000000000..ef204666e983
--- /dev/null
+++ b/include/trace/power.h
@@ -0,0 +1,32 @@
1#ifndef _TRACE_POWER_H
2#define _TRACE_POWER_H
3
4#include <linux/ktime.h>
5#include <linux/tracepoint.h>
6
7enum {
8 POWER_NONE = 0,
9 POWER_CSTATE = 1,
10 POWER_PSTATE = 2,
11};
12
13struct power_trace {
14 ktime_t stamp;
15 ktime_t end;
16 int type;
17 int state;
18};
19
20DECLARE_TRACE(power_start,
21 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
22 TP_ARGS(it, type, state));
23
24DECLARE_TRACE(power_mark,
25 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
26 TP_ARGS(it, type, state));
27
28DECLARE_TRACE(power_end,
29 TP_PROTO(struct power_trace *it),
30 TP_ARGS(it));
31
32#endif /* _TRACE_POWER_H */
diff --git a/include/trace/sched.h b/include/trace/sched.h
index 0d81098ee9fc..4e372a1a29bf 100644
--- a/include/trace/sched.h
+++ b/include/trace/sched.h
@@ -4,53 +4,6 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(sched_kthread_stop, 7#include <trace/sched_event_types.h>
8 TPPROTO(struct task_struct *t),
9 TPARGS(t));
10
11DECLARE_TRACE(sched_kthread_stop_ret,
12 TPPROTO(int ret),
13 TPARGS(ret));
14
15DECLARE_TRACE(sched_wait_task,
16 TPPROTO(struct rq *rq, struct task_struct *p),
17 TPARGS(rq, p));
18
19DECLARE_TRACE(sched_wakeup,
20 TPPROTO(struct rq *rq, struct task_struct *p, int success),
21 TPARGS(rq, p, success));
22
23DECLARE_TRACE(sched_wakeup_new,
24 TPPROTO(struct rq *rq, struct task_struct *p, int success),
25 TPARGS(rq, p, success));
26
27DECLARE_TRACE(sched_switch,
28 TPPROTO(struct rq *rq, struct task_struct *prev,
29 struct task_struct *next),
30 TPARGS(rq, prev, next));
31
32DECLARE_TRACE(sched_migrate_task,
33 TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
34 TPARGS(p, orig_cpu, dest_cpu));
35
36DECLARE_TRACE(sched_process_free,
37 TPPROTO(struct task_struct *p),
38 TPARGS(p));
39
40DECLARE_TRACE(sched_process_exit,
41 TPPROTO(struct task_struct *p),
42 TPARGS(p));
43
44DECLARE_TRACE(sched_process_wait,
45 TPPROTO(struct pid *pid),
46 TPARGS(pid));
47
48DECLARE_TRACE(sched_process_fork,
49 TPPROTO(struct task_struct *parent, struct task_struct *child),
50 TPARGS(parent, child));
51
52DECLARE_TRACE(sched_signal_send,
53 TPPROTO(int sig, struct task_struct *p),
54 TPARGS(sig, p));
55 8
56#endif 9#endif
diff --git a/include/trace/sched_event_types.h b/include/trace/sched_event_types.h
new file mode 100644
index 000000000000..63547dc1125f
--- /dev/null
+++ b/include/trace/sched_event_types.h
@@ -0,0 +1,337 @@
1
2/* use <trace/sched.h> instead */
3#ifndef TRACE_EVENT
4# error Do not include this file directly.
5# error Unless you know what you are doing.
6#endif
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM sched
10
11/*
12 * Tracepoint for calling kthread_stop, performed to end a kthread:
13 */
14TRACE_EVENT(sched_kthread_stop,
15
16 TP_PROTO(struct task_struct *t),
17
18 TP_ARGS(t),
19
20 TP_STRUCT__entry(
21 __array( char, comm, TASK_COMM_LEN )
22 __field( pid_t, pid )
23 ),
24
25 TP_fast_assign(
26 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
27 __entry->pid = t->pid;
28 ),
29
30 TP_printk("task %s:%d", __entry->comm, __entry->pid)
31);
32
33/*
34 * Tracepoint for the return value of the kthread stopping:
35 */
36TRACE_EVENT(sched_kthread_stop_ret,
37
38 TP_PROTO(int ret),
39
40 TP_ARGS(ret),
41
42 TP_STRUCT__entry(
43 __field( int, ret )
44 ),
45
46 TP_fast_assign(
47 __entry->ret = ret;
48 ),
49
50 TP_printk("ret %d", __entry->ret)
51);
52
53/*
54 * Tracepoint for waiting on task to unschedule:
55 *
56 * (NOTE: the 'rq' argument is not used by generic trace events,
57 * but used by the latency tracer plugin. )
58 */
59TRACE_EVENT(sched_wait_task,
60
61 TP_PROTO(struct rq *rq, struct task_struct *p),
62
63 TP_ARGS(rq, p),
64
65 TP_STRUCT__entry(
66 __array( char, comm, TASK_COMM_LEN )
67 __field( pid_t, pid )
68 __field( int, prio )
69 ),
70
71 TP_fast_assign(
72 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
73 __entry->pid = p->pid;
74 __entry->prio = p->prio;
75 ),
76
77 TP_printk("task %s:%d [%d]",
78 __entry->comm, __entry->pid, __entry->prio)
79);
80
81/*
82 * Tracepoint for waking up a task:
83 *
84 * (NOTE: the 'rq' argument is not used by generic trace events,
85 * but used by the latency tracer plugin. )
86 */
87TRACE_EVENT(sched_wakeup,
88
89 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
90
91 TP_ARGS(rq, p, success),
92
93 TP_STRUCT__entry(
94 __array( char, comm, TASK_COMM_LEN )
95 __field( pid_t, pid )
96 __field( int, prio )
97 __field( int, success )
98 ),
99
100 TP_fast_assign(
101 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
102 __entry->pid = p->pid;
103 __entry->prio = p->prio;
104 __entry->success = success;
105 ),
106
107 TP_printk("task %s:%d [%d] success=%d",
108 __entry->comm, __entry->pid, __entry->prio,
109 __entry->success)
110);
111
112/*
113 * Tracepoint for waking up a new task:
114 *
115 * (NOTE: the 'rq' argument is not used by generic trace events,
116 * but used by the latency tracer plugin. )
117 */
118TRACE_EVENT(sched_wakeup_new,
119
120 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
121
122 TP_ARGS(rq, p, success),
123
124 TP_STRUCT__entry(
125 __array( char, comm, TASK_COMM_LEN )
126 __field( pid_t, pid )
127 __field( int, prio )
128 __field( int, success )
129 ),
130
131 TP_fast_assign(
132 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
133 __entry->pid = p->pid;
134 __entry->prio = p->prio;
135 __entry->success = success;
136 ),
137
138 TP_printk("task %s:%d [%d] success=%d",
139 __entry->comm, __entry->pid, __entry->prio,
140 __entry->success)
141);
142
143/*
144 * Tracepoint for task switches, performed by the scheduler:
145 *
146 * (NOTE: the 'rq' argument is not used by generic trace events,
147 * but used by the latency tracer plugin. )
148 */
149TRACE_EVENT(sched_switch,
150
151 TP_PROTO(struct rq *rq, struct task_struct *prev,
152 struct task_struct *next),
153
154 TP_ARGS(rq, prev, next),
155
156 TP_STRUCT__entry(
157 __array( char, prev_comm, TASK_COMM_LEN )
158 __field( pid_t, prev_pid )
159 __field( int, prev_prio )
160 __array( char, next_comm, TASK_COMM_LEN )
161 __field( pid_t, next_pid )
162 __field( int, next_prio )
163 ),
164
165 TP_fast_assign(
166 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
167 __entry->prev_pid = prev->pid;
168 __entry->prev_prio = prev->prio;
169 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
170 __entry->next_pid = next->pid;
171 __entry->next_prio = next->prio;
172 ),
173
174 TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
175 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
176 __entry->next_comm, __entry->next_pid, __entry->next_prio)
177);
178
179/*
180 * Tracepoint for a task being migrated:
181 */
182TRACE_EVENT(sched_migrate_task,
183
184 TP_PROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
185
186 TP_ARGS(p, orig_cpu, dest_cpu),
187
188 TP_STRUCT__entry(
189 __array( char, comm, TASK_COMM_LEN )
190 __field( pid_t, pid )
191 __field( int, prio )
192 __field( int, orig_cpu )
193 __field( int, dest_cpu )
194 ),
195
196 TP_fast_assign(
197 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
198 __entry->pid = p->pid;
199 __entry->prio = p->prio;
200 __entry->orig_cpu = orig_cpu;
201 __entry->dest_cpu = dest_cpu;
202 ),
203
204 TP_printk("task %s:%d [%d] from: %d to: %d",
205 __entry->comm, __entry->pid, __entry->prio,
206 __entry->orig_cpu, __entry->dest_cpu)
207);
208
209/*
210 * Tracepoint for freeing a task:
211 */
212TRACE_EVENT(sched_process_free,
213
214 TP_PROTO(struct task_struct *p),
215
216 TP_ARGS(p),
217
218 TP_STRUCT__entry(
219 __array( char, comm, TASK_COMM_LEN )
220 __field( pid_t, pid )
221 __field( int, prio )
222 ),
223
224 TP_fast_assign(
225 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
226 __entry->pid = p->pid;
227 __entry->prio = p->prio;
228 ),
229
230 TP_printk("task %s:%d [%d]",
231 __entry->comm, __entry->pid, __entry->prio)
232);
233
234/*
235 * Tracepoint for a task exiting:
236 */
237TRACE_EVENT(sched_process_exit,
238
239 TP_PROTO(struct task_struct *p),
240
241 TP_ARGS(p),
242
243 TP_STRUCT__entry(
244 __array( char, comm, TASK_COMM_LEN )
245 __field( pid_t, pid )
246 __field( int, prio )
247 ),
248
249 TP_fast_assign(
250 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
251 __entry->pid = p->pid;
252 __entry->prio = p->prio;
253 ),
254
255 TP_printk("task %s:%d [%d]",
256 __entry->comm, __entry->pid, __entry->prio)
257);
258
259/*
260 * Tracepoint for a waiting task:
261 */
262TRACE_EVENT(sched_process_wait,
263
264 TP_PROTO(struct pid *pid),
265
266 TP_ARGS(pid),
267
268 TP_STRUCT__entry(
269 __array( char, comm, TASK_COMM_LEN )
270 __field( pid_t, pid )
271 __field( int, prio )
272 ),
273
274 TP_fast_assign(
275 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
276 __entry->pid = pid_nr(pid);
277 __entry->prio = current->prio;
278 ),
279
280 TP_printk("task %s:%d [%d]",
281 __entry->comm, __entry->pid, __entry->prio)
282);
283
284/*
285 * Tracepoint for do_fork:
286 */
287TRACE_EVENT(sched_process_fork,
288
289 TP_PROTO(struct task_struct *parent, struct task_struct *child),
290
291 TP_ARGS(parent, child),
292
293 TP_STRUCT__entry(
294 __array( char, parent_comm, TASK_COMM_LEN )
295 __field( pid_t, parent_pid )
296 __array( char, child_comm, TASK_COMM_LEN )
297 __field( pid_t, child_pid )
298 ),
299
300 TP_fast_assign(
301 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
302 __entry->parent_pid = parent->pid;
303 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
304 __entry->child_pid = child->pid;
305 ),
306
307 TP_printk("parent %s:%d child %s:%d",
308 __entry->parent_comm, __entry->parent_pid,
309 __entry->child_comm, __entry->child_pid)
310);
311
312/*
313 * Tracepoint for sending a signal:
314 */
315TRACE_EVENT(sched_signal_send,
316
317 TP_PROTO(int sig, struct task_struct *p),
318
319 TP_ARGS(sig, p),
320
321 TP_STRUCT__entry(
322 __field( int, sig )
323 __array( char, comm, TASK_COMM_LEN )
324 __field( pid_t, pid )
325 ),
326
327 TP_fast_assign(
328 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
329 __entry->pid = p->pid;
330 __entry->sig = sig;
331 ),
332
333 TP_printk("sig: %d task %s:%d",
334 __entry->sig, __entry->comm, __entry->pid)
335);
336
337#undef TRACE_SYSTEM
diff --git a/include/trace/skb.h b/include/trace/skb.h
index a96610f92f69..b66206d9be72 100644
--- a/include/trace/skb.h
+++ b/include/trace/skb.h
@@ -5,7 +5,7 @@
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(kfree_skb, 7DECLARE_TRACE(kfree_skb,
8 TPPROTO(struct sk_buff *skb, void *location), 8 TP_PROTO(struct sk_buff *skb, void *location),
9 TPARGS(skb, location)); 9 TP_ARGS(skb, location));
10 10
11#endif 11#endif
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h
new file mode 100644
index 000000000000..df56f5694be6
--- /dev/null
+++ b/include/trace/trace_event_types.h
@@ -0,0 +1,5 @@
1/* trace/<type>_event_types.h here */
2
3#include <trace/sched_event_types.h>
4#include <trace/irq_event_types.h>
5#include <trace/lockdep_event_types.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
new file mode 100644
index 000000000000..fd13750ca4ba
--- /dev/null
+++ b/include/trace/trace_events.h
@@ -0,0 +1,5 @@
1/* trace/<type>.h here */
2
3#include <trace/sched.h>
4#include <trace/irq.h>
5#include <trace/lockdep.h>
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h
new file mode 100644
index 000000000000..7626523deeba
--- /dev/null
+++ b/include/trace/workqueue.h
@@ -0,0 +1,25 @@
1#ifndef __TRACE_WORKQUEUE_H
2#define __TRACE_WORKQUEUE_H
3
4#include <linux/tracepoint.h>
5#include <linux/workqueue.h>
6#include <linux/sched.h>
7
8DECLARE_TRACE(workqueue_insertion,
9 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
10 TP_ARGS(wq_thread, work));
11
12DECLARE_TRACE(workqueue_execution,
13 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
14 TP_ARGS(wq_thread, work));
15
16/* Trace the creation of one workqueue thread on a cpu */
17DECLARE_TRACE(workqueue_creation,
18 TP_PROTO(struct task_struct *wq_thread, int cpu),
19 TP_ARGS(wq_thread, cpu));
20
21DECLARE_TRACE(workqueue_destruction,
22 TP_PROTO(struct task_struct *wq_thread),
23 TP_ARGS(wq_thread));
24
25#endif /* __TRACE_WORKQUEUE_H */
diff --git a/include/video/aty128.h b/include/video/aty128.h
index 51ac69f05bdc..f0851e3bb7cc 100644
--- a/include/video/aty128.h
+++ b/include/video/aty128.h
@@ -415,7 +415,7 @@
415#define PWR_MGT_SLOWDOWN_MCLK 0x00002000 415#define PWR_MGT_SLOWDOWN_MCLK 0x00002000
416 416
417#define PMI_PMSCR_REG 0x60 417#define PMI_PMSCR_REG 0x60
418 418
419/* used by ATI bug fix for hardware ROM */ 419/* used by ATI bug fix for hardware ROM */
420#define RAGE128_MPP_TB_CONFIG 0x01c0 420#define RAGE128_MPP_TB_CONFIG 0x01c0
421 421
diff --git a/include/video/cirrus.h b/include/video/cirrus.h
index b2776b6c8679..9a5e9ee30782 100644
--- a/include/video/cirrus.h
+++ b/include/video/cirrus.h
@@ -32,7 +32,6 @@
32#define CL_VSSM2 0x3c3 /* Motherboard Sleep */ 32#define CL_VSSM2 0x3c3 /* Motherboard Sleep */
33 33
34/*** VGA Sequencer Registers ***/ 34/*** VGA Sequencer Registers ***/
35#define CL_SEQR0 0x0 /* Reset */
36/* the following are from the "extension registers" group */ 35/* the following are from the "extension registers" group */
37#define CL_SEQR6 0x6 /* Unlock ALL Extensions */ 36#define CL_SEQR6 0x6 /* Unlock ALL Extensions */
38#define CL_SEQR7 0x7 /* Extended Sequencer Mode */ 37#define CL_SEQR7 0x7 /* Extended Sequencer Mode */
@@ -71,6 +70,7 @@
71#define CL_CRT1B 0x1b /* Extended Display Controls */ 70#define CL_CRT1B 0x1b /* Extended Display Controls */
72#define CL_CRT1C 0x1c /* Sync adjust and genlock register */ 71#define CL_CRT1C 0x1c /* Sync adjust and genlock register */
73#define CL_CRT1D 0x1d /* Overlay Extended Control register */ 72#define CL_CRT1D 0x1d /* Overlay Extended Control register */
73#define CL_CRT1E 0x1e /* Another overflow register */
74#define CL_CRT25 0x25 /* Part Status Register */ 74#define CL_CRT25 0x25 /* Part Status Register */
75#define CL_CRT27 0x27 /* ID Register */ 75#define CL_CRT27 0x27 /* ID Register */
76#define CL_CRT51 0x51 /* P4 disable "flicker fixer" */ 76#define CL_CRT51 0x51 /* P4 disable "flicker fixer" */
diff --git a/include/video/newport.h b/include/video/newport.h
index 1f5ebeaa818f..001b935e71c4 100644
--- a/include/video/newport.h
+++ b/include/video/newport.h
@@ -453,7 +453,7 @@ static __inline__ int newport_wait(struct newport_regs *regs)
453{ 453{
454 int t = BUSY_TIMEOUT; 454 int t = BUSY_TIMEOUT;
455 455
456 while (t--) 456 while (--t)
457 if (!(regs->cset.status & NPORT_STAT_GBUSY)) 457 if (!(regs->cset.status & NPORT_STAT_GBUSY))
458 break; 458 break;
459 return !t; 459 return !t;
@@ -463,7 +463,7 @@ static __inline__ int newport_bfwait(struct newport_regs *regs)
463{ 463{
464 int t = BUSY_TIMEOUT; 464 int t = BUSY_TIMEOUT;
465 465
466 while (t--) 466 while (--t)
467 if(!(regs->cset.status & NPORT_STAT_BBUSY)) 467 if(!(regs->cset.status & NPORT_STAT_BBUSY))
468 break; 468 break;
469 return !t; 469 return !t;
diff --git a/include/video/radeon.h b/include/video/radeon.h
index e072b16b39ab..56b188abfb54 100644
--- a/include/video/radeon.h
+++ b/include/video/radeon.h
@@ -5,12 +5,12 @@
5#define RADEON_REGSIZE 0x4000 5#define RADEON_REGSIZE 0x4000
6 6
7 7
8#define MM_INDEX 0x0000 8#define MM_INDEX 0x0000
9#define MM_DATA 0x0004 9#define MM_DATA 0x0004
10#define BUS_CNTL 0x0030 10#define BUS_CNTL 0x0030
11#define HI_STAT 0x004C 11#define HI_STAT 0x004C
12#define BUS_CNTL1 0x0034 12#define BUS_CNTL1 0x0034
13#define I2C_CNTL_1 0x0094 13#define I2C_CNTL_1 0x0094
14#define CNFG_CNTL 0x00E0 14#define CNFG_CNTL 0x00E0
15#define CNFG_MEMSIZE 0x00F8 15#define CNFG_MEMSIZE 0x00F8
16#define CNFG_APER_0_BASE 0x0100 16#define CNFG_APER_0_BASE 0x0100
@@ -18,8 +18,8 @@
18#define CNFG_APER_SIZE 0x0108 18#define CNFG_APER_SIZE 0x0108
19#define CNFG_REG_1_BASE 0x010C 19#define CNFG_REG_1_BASE 0x010C
20#define CNFG_REG_APER_SIZE 0x0110 20#define CNFG_REG_APER_SIZE 0x0110
21#define PAD_AGPINPUT_DELAY 0x0164 21#define PAD_AGPINPUT_DELAY 0x0164
22#define PAD_CTLR_STRENGTH 0x0168 22#define PAD_CTLR_STRENGTH 0x0168
23#define PAD_CTLR_UPDATE 0x016C 23#define PAD_CTLR_UPDATE 0x016C
24#define PAD_CTLR_MISC 0x0aa0 24#define PAD_CTLR_MISC 0x0aa0
25#define AGP_CNTL 0x0174 25#define AGP_CNTL 0x0174
@@ -27,171 +27,171 @@
27#define CAP0_TRIG_CNTL 0x0950 27#define CAP0_TRIG_CNTL 0x0950
28#define CAP1_TRIG_CNTL 0x09c0 28#define CAP1_TRIG_CNTL 0x09c0
29#define VIPH_CONTROL 0x0C40 29#define VIPH_CONTROL 0x0C40
30#define VENDOR_ID 0x0F00 30#define VENDOR_ID 0x0F00
31#define DEVICE_ID 0x0F02 31#define DEVICE_ID 0x0F02
32#define COMMAND 0x0F04 32#define COMMAND 0x0F04
33#define STATUS 0x0F06 33#define STATUS 0x0F06
34#define REVISION_ID 0x0F08 34#define REVISION_ID 0x0F08
35#define REGPROG_INF 0x0F09 35#define REGPROG_INF 0x0F09
36#define SUB_CLASS 0x0F0A 36#define SUB_CLASS 0x0F0A
37#define BASE_CODE 0x0F0B 37#define BASE_CODE 0x0F0B
38#define CACHE_LINE 0x0F0C 38#define CACHE_LINE 0x0F0C
39#define LATENCY 0x0F0D 39#define LATENCY 0x0F0D
40#define HEADER 0x0F0E 40#define HEADER 0x0F0E
41#define BIST 0x0F0F 41#define BIST 0x0F0F
42#define REG_MEM_BASE 0x0F10 42#define REG_MEM_BASE 0x0F10
43#define REG_IO_BASE 0x0F14 43#define REG_IO_BASE 0x0F14
44#define REG_REG_BASE 0x0F18 44#define REG_REG_BASE 0x0F18
45#define ADAPTER_ID 0x0F2C 45#define ADAPTER_ID 0x0F2C
46#define BIOS_ROM 0x0F30 46#define BIOS_ROM 0x0F30
47#define CAPABILITIES_PTR 0x0F34 47#define CAPABILITIES_PTR 0x0F34
48#define INTERRUPT_LINE 0x0F3C 48#define INTERRUPT_LINE 0x0F3C
49#define INTERRUPT_PIN 0x0F3D 49#define INTERRUPT_PIN 0x0F3D
50#define MIN_GRANT 0x0F3E 50#define MIN_GRANT 0x0F3E
51#define MAX_LATENCY 0x0F3F 51#define MAX_LATENCY 0x0F3F
52#define ADAPTER_ID_W 0x0F4C 52#define ADAPTER_ID_W 0x0F4C
53#define PMI_CAP_ID 0x0F50 53#define PMI_CAP_ID 0x0F50
54#define PMI_NXT_CAP_PTR 0x0F51 54#define PMI_NXT_CAP_PTR 0x0F51
55#define PMI_PMC_REG 0x0F52 55#define PMI_PMC_REG 0x0F52
56#define PM_STATUS 0x0F54 56#define PM_STATUS 0x0F54
57#define PMI_DATA 0x0F57 57#define PMI_DATA 0x0F57
58#define AGP_CAP_ID 0x0F58 58#define AGP_CAP_ID 0x0F58
59#define AGP_STATUS 0x0F5C 59#define AGP_STATUS 0x0F5C
60#define AGP_COMMAND 0x0F60 60#define AGP_COMMAND 0x0F60
61#define AIC_CTRL 0x01D0 61#define AIC_CTRL 0x01D0
62#define AIC_STAT 0x01D4 62#define AIC_STAT 0x01D4
63#define AIC_PT_BASE 0x01D8 63#define AIC_PT_BASE 0x01D8
64#define AIC_LO_ADDR 0x01DC 64#define AIC_LO_ADDR 0x01DC
65#define AIC_HI_ADDR 0x01E0 65#define AIC_HI_ADDR 0x01E0
66#define AIC_TLB_ADDR 0x01E4 66#define AIC_TLB_ADDR 0x01E4
67#define AIC_TLB_DATA 0x01E8 67#define AIC_TLB_DATA 0x01E8
68#define DAC_CNTL 0x0058 68#define DAC_CNTL 0x0058
69#define DAC_CNTL2 0x007c 69#define DAC_CNTL2 0x007c
70#define CRTC_GEN_CNTL 0x0050 70#define CRTC_GEN_CNTL 0x0050
71#define MEM_CNTL 0x0140 71#define MEM_CNTL 0x0140
72#define MC_CNTL 0x0140 72#define MC_CNTL 0x0140
73#define EXT_MEM_CNTL 0x0144 73#define EXT_MEM_CNTL 0x0144
74#define MC_TIMING_CNTL 0x0144 74#define MC_TIMING_CNTL 0x0144
75#define MC_AGP_LOCATION 0x014C 75#define MC_AGP_LOCATION 0x014C
76#define MEM_IO_CNTL_A0 0x0178 76#define MEM_IO_CNTL_A0 0x0178
77#define MEM_REFRESH_CNTL 0x0178 77#define MEM_REFRESH_CNTL 0x0178
78#define MEM_INIT_LATENCY_TIMER 0x0154 78#define MEM_INIT_LATENCY_TIMER 0x0154
79#define MC_INIT_GFX_LAT_TIMER 0x0154 79#define MC_INIT_GFX_LAT_TIMER 0x0154
80#define MEM_SDRAM_MODE_REG 0x0158 80#define MEM_SDRAM_MODE_REG 0x0158
81#define AGP_BASE 0x0170 81#define AGP_BASE 0x0170
82#define MEM_IO_CNTL_A1 0x017C 82#define MEM_IO_CNTL_A1 0x017C
83#define MC_READ_CNTL_AB 0x017C 83#define MC_READ_CNTL_AB 0x017C
84#define MEM_IO_CNTL_B0 0x0180 84#define MEM_IO_CNTL_B0 0x0180
85#define MC_INIT_MISC_LAT_TIMER 0x0180 85#define MC_INIT_MISC_LAT_TIMER 0x0180
86#define MEM_IO_CNTL_B1 0x0184 86#define MEM_IO_CNTL_B1 0x0184
87#define MC_IOPAD_CNTL 0x0184 87#define MC_IOPAD_CNTL 0x0184
88#define MC_DEBUG 0x0188 88#define MC_DEBUG 0x0188
89#define MC_STATUS 0x0150 89#define MC_STATUS 0x0150
90#define MEM_IO_OE_CNTL 0x018C 90#define MEM_IO_OE_CNTL 0x018C
91#define MC_CHIP_IO_OE_CNTL_AB 0x018C 91#define MC_CHIP_IO_OE_CNTL_AB 0x018C
92#define MC_FB_LOCATION 0x0148 92#define MC_FB_LOCATION 0x0148
93#define HOST_PATH_CNTL 0x0130 93#define HOST_PATH_CNTL 0x0130
94#define MEM_VGA_WP_SEL 0x0038 94#define MEM_VGA_WP_SEL 0x0038
95#define MEM_VGA_RP_SEL 0x003C 95#define MEM_VGA_RP_SEL 0x003C
96#define HDP_DEBUG 0x0138 96#define HDP_DEBUG 0x0138
97#define SW_SEMAPHORE 0x013C 97#define SW_SEMAPHORE 0x013C
98#define CRTC2_GEN_CNTL 0x03f8 98#define CRTC2_GEN_CNTL 0x03f8
99#define CRTC2_DISPLAY_BASE_ADDR 0x033c 99#define CRTC2_DISPLAY_BASE_ADDR 0x033c
100#define SURFACE_CNTL 0x0B00 100#define SURFACE_CNTL 0x0B00
101#define SURFACE0_LOWER_BOUND 0x0B04 101#define SURFACE0_LOWER_BOUND 0x0B04
102#define SURFACE1_LOWER_BOUND 0x0B14 102#define SURFACE1_LOWER_BOUND 0x0B14
103#define SURFACE2_LOWER_BOUND 0x0B24 103#define SURFACE2_LOWER_BOUND 0x0B24
104#define SURFACE3_LOWER_BOUND 0x0B34 104#define SURFACE3_LOWER_BOUND 0x0B34
105#define SURFACE4_LOWER_BOUND 0x0B44 105#define SURFACE4_LOWER_BOUND 0x0B44
106#define SURFACE5_LOWER_BOUND 0x0B54 106#define SURFACE5_LOWER_BOUND 0x0B54
107#define SURFACE6_LOWER_BOUND 0x0B64 107#define SURFACE6_LOWER_BOUND 0x0B64
108#define SURFACE7_LOWER_BOUND 0x0B74 108#define SURFACE7_LOWER_BOUND 0x0B74
109#define SURFACE0_UPPER_BOUND 0x0B08 109#define SURFACE0_UPPER_BOUND 0x0B08
110#define SURFACE1_UPPER_BOUND 0x0B18 110#define SURFACE1_UPPER_BOUND 0x0B18
111#define SURFACE2_UPPER_BOUND 0x0B28 111#define SURFACE2_UPPER_BOUND 0x0B28
112#define SURFACE3_UPPER_BOUND 0x0B38 112#define SURFACE3_UPPER_BOUND 0x0B38
113#define SURFACE4_UPPER_BOUND 0x0B48 113#define SURFACE4_UPPER_BOUND 0x0B48
114#define SURFACE5_UPPER_BOUND 0x0B58 114#define SURFACE5_UPPER_BOUND 0x0B58
115#define SURFACE6_UPPER_BOUND 0x0B68 115#define SURFACE6_UPPER_BOUND 0x0B68
116#define SURFACE7_UPPER_BOUND 0x0B78 116#define SURFACE7_UPPER_BOUND 0x0B78
117#define SURFACE0_INFO 0x0B0C 117#define SURFACE0_INFO 0x0B0C
118#define SURFACE1_INFO 0x0B1C 118#define SURFACE1_INFO 0x0B1C
119#define SURFACE2_INFO 0x0B2C 119#define SURFACE2_INFO 0x0B2C
120#define SURFACE3_INFO 0x0B3C 120#define SURFACE3_INFO 0x0B3C
121#define SURFACE4_INFO 0x0B4C 121#define SURFACE4_INFO 0x0B4C
122#define SURFACE5_INFO 0x0B5C 122#define SURFACE5_INFO 0x0B5C
123#define SURFACE6_INFO 0x0B6C 123#define SURFACE6_INFO 0x0B6C
124#define SURFACE7_INFO 0x0B7C 124#define SURFACE7_INFO 0x0B7C
125#define SURFACE_ACCESS_FLAGS 0x0BF8 125#define SURFACE_ACCESS_FLAGS 0x0BF8
126#define SURFACE_ACCESS_CLR 0x0BFC 126#define SURFACE_ACCESS_CLR 0x0BFC
127#define GEN_INT_CNTL 0x0040 127#define GEN_INT_CNTL 0x0040
128#define GEN_INT_STATUS 0x0044 128#define GEN_INT_STATUS 0x0044
129#define CRTC_EXT_CNTL 0x0054 129#define CRTC_EXT_CNTL 0x0054
130#define RB3D_CNTL 0x1C3C 130#define RB3D_CNTL 0x1C3C
131#define WAIT_UNTIL 0x1720 131#define WAIT_UNTIL 0x1720
132#define ISYNC_CNTL 0x1724 132#define ISYNC_CNTL 0x1724
133#define RBBM_GUICNTL 0x172C 133#define RBBM_GUICNTL 0x172C
134#define RBBM_STATUS 0x0E40 134#define RBBM_STATUS 0x0E40
135#define RBBM_STATUS_alt_1 0x1740 135#define RBBM_STATUS_alt_1 0x1740
136#define RBBM_CNTL 0x00EC 136#define RBBM_CNTL 0x00EC
137#define RBBM_CNTL_alt_1 0x0E44 137#define RBBM_CNTL_alt_1 0x0E44
138#define RBBM_SOFT_RESET 0x00F0 138#define RBBM_SOFT_RESET 0x00F0
139#define RBBM_SOFT_RESET_alt_1 0x0E48 139#define RBBM_SOFT_RESET_alt_1 0x0E48
140#define NQWAIT_UNTIL 0x0E50 140#define NQWAIT_UNTIL 0x0E50
141#define RBBM_DEBUG 0x0E6C 141#define RBBM_DEBUG 0x0E6C
142#define RBBM_CMDFIFO_ADDR 0x0E70 142#define RBBM_CMDFIFO_ADDR 0x0E70
143#define RBBM_CMDFIFO_DATAL 0x0E74 143#define RBBM_CMDFIFO_DATAL 0x0E74
144#define RBBM_CMDFIFO_DATAH 0x0E78 144#define RBBM_CMDFIFO_DATAH 0x0E78
145#define RBBM_CMDFIFO_STAT 0x0E7C 145#define RBBM_CMDFIFO_STAT 0x0E7C
146#define CRTC_STATUS 0x005C 146#define CRTC_STATUS 0x005C
147#define GPIO_VGA_DDC 0x0060 147#define GPIO_VGA_DDC 0x0060
148#define GPIO_DVI_DDC 0x0064 148#define GPIO_DVI_DDC 0x0064
149#define GPIO_MONID 0x0068 149#define GPIO_MONID 0x0068
150#define GPIO_CRT2_DDC 0x006c 150#define GPIO_CRT2_DDC 0x006c
151#define PALETTE_INDEX 0x00B0 151#define PALETTE_INDEX 0x00B0
152#define PALETTE_DATA 0x00B4 152#define PALETTE_DATA 0x00B4
153#define PALETTE_30_DATA 0x00B8 153#define PALETTE_30_DATA 0x00B8
154#define CRTC_H_TOTAL_DISP 0x0200 154#define CRTC_H_TOTAL_DISP 0x0200
155#define CRTC_H_SYNC_STRT_WID 0x0204 155#define CRTC_H_SYNC_STRT_WID 0x0204
156#define CRTC_V_TOTAL_DISP 0x0208 156#define CRTC_V_TOTAL_DISP 0x0208
157#define CRTC_V_SYNC_STRT_WID 0x020C 157#define CRTC_V_SYNC_STRT_WID 0x020C
158#define CRTC_VLINE_CRNT_VLINE 0x0210 158#define CRTC_VLINE_CRNT_VLINE 0x0210
159#define CRTC_CRNT_FRAME 0x0214 159#define CRTC_CRNT_FRAME 0x0214
160#define CRTC_GUI_TRIG_VLINE 0x0218 160#define CRTC_GUI_TRIG_VLINE 0x0218
161#define CRTC_DEBUG 0x021C 161#define CRTC_DEBUG 0x021C
162#define CRTC_OFFSET_RIGHT 0x0220 162#define CRTC_OFFSET_RIGHT 0x0220
163#define CRTC_OFFSET 0x0224 163#define CRTC_OFFSET 0x0224
164#define CRTC_OFFSET_CNTL 0x0228 164#define CRTC_OFFSET_CNTL 0x0228
165#define CRTC_PITCH 0x022C 165#define CRTC_PITCH 0x022C
166#define OVR_CLR 0x0230 166#define OVR_CLR 0x0230
167#define OVR_WID_LEFT_RIGHT 0x0234 167#define OVR_WID_LEFT_RIGHT 0x0234
168#define OVR_WID_TOP_BOTTOM 0x0238 168#define OVR_WID_TOP_BOTTOM 0x0238
169#define DISPLAY_BASE_ADDR 0x023C 169#define DISPLAY_BASE_ADDR 0x023C
170#define SNAPSHOT_VH_COUNTS 0x0240 170#define SNAPSHOT_VH_COUNTS 0x0240
171#define SNAPSHOT_F_COUNT 0x0244 171#define SNAPSHOT_F_COUNT 0x0244
172#define N_VIF_COUNT 0x0248 172#define N_VIF_COUNT 0x0248
173#define SNAPSHOT_VIF_COUNT 0x024C 173#define SNAPSHOT_VIF_COUNT 0x024C
174#define FP_CRTC_H_TOTAL_DISP 0x0250 174#define FP_CRTC_H_TOTAL_DISP 0x0250
175#define FP_CRTC_V_TOTAL_DISP 0x0254 175#define FP_CRTC_V_TOTAL_DISP 0x0254
176#define CRT_CRTC_H_SYNC_STRT_WID 0x0258 176#define CRT_CRTC_H_SYNC_STRT_WID 0x0258
177#define CRT_CRTC_V_SYNC_STRT_WID 0x025C 177#define CRT_CRTC_V_SYNC_STRT_WID 0x025C
178#define CUR_OFFSET 0x0260 178#define CUR_OFFSET 0x0260
179#define CUR_HORZ_VERT_POSN 0x0264 179#define CUR_HORZ_VERT_POSN 0x0264
180#define CUR_HORZ_VERT_OFF 0x0268 180#define CUR_HORZ_VERT_OFF 0x0268
181#define CUR_CLR0 0x026C 181#define CUR_CLR0 0x026C
182#define CUR_CLR1 0x0270 182#define CUR_CLR1 0x0270
183#define FP_HORZ_VERT_ACTIVE 0x0278 183#define FP_HORZ_VERT_ACTIVE 0x0278
184#define CRTC_MORE_CNTL 0x027C 184#define CRTC_MORE_CNTL 0x027C
185#define CRTC_H_CUTOFF_ACTIVE_EN (1<<4) 185#define CRTC_H_CUTOFF_ACTIVE_EN (1<<4)
186#define CRTC_V_CUTOFF_ACTIVE_EN (1<<5) 186#define CRTC_V_CUTOFF_ACTIVE_EN (1<<5)
187#define DAC_EXT_CNTL 0x0280 187#define DAC_EXT_CNTL 0x0280
188#define FP_GEN_CNTL 0x0284 188#define FP_GEN_CNTL 0x0284
189#define FP_HORZ_STRETCH 0x028C 189#define FP_HORZ_STRETCH 0x028C
190#define FP_VERT_STRETCH 0x0290 190#define FP_VERT_STRETCH 0x0290
191#define FP_H_SYNC_STRT_WID 0x02C4 191#define FP_H_SYNC_STRT_WID 0x02C4
192#define FP_V_SYNC_STRT_WID 0x02C8 192#define FP_V_SYNC_STRT_WID 0x02C8
193#define AUX_WINDOW_HORZ_CNTL 0x02D8 193#define AUX_WINDOW_HORZ_CNTL 0x02D8
194#define AUX_WINDOW_VERT_CNTL 0x02DC 194#define AUX_WINDOW_VERT_CNTL 0x02DC
195//#define DDA_CONFIG 0x02e0 195//#define DDA_CONFIG 0x02e0
196//#define DDA_ON_OFF 0x02e4 196//#define DDA_ON_OFF 0x02e4
197#define DVI_I2C_CNTL_1 0x02e4 197#define DVI_I2C_CNTL_1 0x02e4
@@ -199,192 +199,192 @@
199#define GRPH2_BUFFER_CNTL 0x03F0 199#define GRPH2_BUFFER_CNTL 0x03F0
200#define VGA_BUFFER_CNTL 0x02F4 200#define VGA_BUFFER_CNTL 0x02F4
201#define OV0_Y_X_START 0x0400 201#define OV0_Y_X_START 0x0400
202#define OV0_Y_X_END 0x0404 202#define OV0_Y_X_END 0x0404
203#define OV0_PIPELINE_CNTL 0x0408 203#define OV0_PIPELINE_CNTL 0x0408
204#define OV0_REG_LOAD_CNTL 0x0410 204#define OV0_REG_LOAD_CNTL 0x0410
205#define OV0_SCALE_CNTL 0x0420 205#define OV0_SCALE_CNTL 0x0420
206#define OV0_V_INC 0x0424 206#define OV0_V_INC 0x0424
207#define OV0_P1_V_ACCUM_INIT 0x0428 207#define OV0_P1_V_ACCUM_INIT 0x0428
208#define OV0_P23_V_ACCUM_INIT 0x042C 208#define OV0_P23_V_ACCUM_INIT 0x042C
209#define OV0_P1_BLANK_LINES_AT_TOP 0x0430 209#define OV0_P1_BLANK_LINES_AT_TOP 0x0430
210#define OV0_P23_BLANK_LINES_AT_TOP 0x0434 210#define OV0_P23_BLANK_LINES_AT_TOP 0x0434
211#define OV0_BASE_ADDR 0x043C 211#define OV0_BASE_ADDR 0x043C
212#define OV0_VID_BUF0_BASE_ADRS 0x0440 212#define OV0_VID_BUF0_BASE_ADRS 0x0440
213#define OV0_VID_BUF1_BASE_ADRS 0x0444 213#define OV0_VID_BUF1_BASE_ADRS 0x0444
214#define OV0_VID_BUF2_BASE_ADRS 0x0448 214#define OV0_VID_BUF2_BASE_ADRS 0x0448
215#define OV0_VID_BUF3_BASE_ADRS 0x044C 215#define OV0_VID_BUF3_BASE_ADRS 0x044C
216#define OV0_VID_BUF4_BASE_ADRS 0x0450 216#define OV0_VID_BUF4_BASE_ADRS 0x0450
217#define OV0_VID_BUF5_BASE_ADRS 0x0454 217#define OV0_VID_BUF5_BASE_ADRS 0x0454
218#define OV0_VID_BUF_PITCH0_VALUE 0x0460 218#define OV0_VID_BUF_PITCH0_VALUE 0x0460
219#define OV0_VID_BUF_PITCH1_VALUE 0x0464 219#define OV0_VID_BUF_PITCH1_VALUE 0x0464
220#define OV0_AUTO_FLIP_CNTRL 0x0470 220#define OV0_AUTO_FLIP_CNTRL 0x0470
221#define OV0_DEINTERLACE_PATTERN 0x0474 221#define OV0_DEINTERLACE_PATTERN 0x0474
222#define OV0_SUBMIT_HISTORY 0x0478 222#define OV0_SUBMIT_HISTORY 0x0478
223#define OV0_H_INC 0x0480 223#define OV0_H_INC 0x0480
224#define OV0_STEP_BY 0x0484 224#define OV0_STEP_BY 0x0484
225#define OV0_P1_H_ACCUM_INIT 0x0488 225#define OV0_P1_H_ACCUM_INIT 0x0488
226#define OV0_P23_H_ACCUM_INIT 0x048C 226#define OV0_P23_H_ACCUM_INIT 0x048C
227#define OV0_P1_X_START_END 0x0494 227#define OV0_P1_X_START_END 0x0494
228#define OV0_P2_X_START_END 0x0498 228#define OV0_P2_X_START_END 0x0498
229#define OV0_P3_X_START_END 0x049C 229#define OV0_P3_X_START_END 0x049C
230#define OV0_FILTER_CNTL 0x04A0 230#define OV0_FILTER_CNTL 0x04A0
231#define OV0_FOUR_TAP_COEF_0 0x04B0 231#define OV0_FOUR_TAP_COEF_0 0x04B0
232#define OV0_FOUR_TAP_COEF_1 0x04B4 232#define OV0_FOUR_TAP_COEF_1 0x04B4
233#define OV0_FOUR_TAP_COEF_2 0x04B8 233#define OV0_FOUR_TAP_COEF_2 0x04B8
234#define OV0_FOUR_TAP_COEF_3 0x04BC 234#define OV0_FOUR_TAP_COEF_3 0x04BC
235#define OV0_FOUR_TAP_COEF_4 0x04C0 235#define OV0_FOUR_TAP_COEF_4 0x04C0
236#define OV0_FLAG_CNTRL 0x04DC 236#define OV0_FLAG_CNTRL 0x04DC
237#define OV0_SLICE_CNTL 0x04E0 237#define OV0_SLICE_CNTL 0x04E0
238#define OV0_VID_KEY_CLR_LOW 0x04E4 238#define OV0_VID_KEY_CLR_LOW 0x04E4
239#define OV0_VID_KEY_CLR_HIGH 0x04E8 239#define OV0_VID_KEY_CLR_HIGH 0x04E8
240#define OV0_GRPH_KEY_CLR_LOW 0x04EC 240#define OV0_GRPH_KEY_CLR_LOW 0x04EC
241#define OV0_GRPH_KEY_CLR_HIGH 0x04F0 241#define OV0_GRPH_KEY_CLR_HIGH 0x04F0
242#define OV0_KEY_CNTL 0x04F4 242#define OV0_KEY_CNTL 0x04F4
243#define OV0_TEST 0x04F8 243#define OV0_TEST 0x04F8
244#define SUBPIC_CNTL 0x0540 244#define SUBPIC_CNTL 0x0540
245#define SUBPIC_DEFCOLCON 0x0544 245#define SUBPIC_DEFCOLCON 0x0544
246#define SUBPIC_Y_X_START 0x054C 246#define SUBPIC_Y_X_START 0x054C
247#define SUBPIC_Y_X_END 0x0550 247#define SUBPIC_Y_X_END 0x0550
248#define SUBPIC_V_INC 0x0554 248#define SUBPIC_V_INC 0x0554
249#define SUBPIC_H_INC 0x0558 249#define SUBPIC_H_INC 0x0558
250#define SUBPIC_BUF0_OFFSET 0x055C 250#define SUBPIC_BUF0_OFFSET 0x055C
251#define SUBPIC_BUF1_OFFSET 0x0560 251#define SUBPIC_BUF1_OFFSET 0x0560
252#define SUBPIC_LC0_OFFSET 0x0564 252#define SUBPIC_LC0_OFFSET 0x0564
253#define SUBPIC_LC1_OFFSET 0x0568 253#define SUBPIC_LC1_OFFSET 0x0568
254#define SUBPIC_PITCH 0x056C 254#define SUBPIC_PITCH 0x056C
255#define SUBPIC_BTN_HLI_COLCON 0x0570 255#define SUBPIC_BTN_HLI_COLCON 0x0570
256#define SUBPIC_BTN_HLI_Y_X_START 0x0574 256#define SUBPIC_BTN_HLI_Y_X_START 0x0574
257#define SUBPIC_BTN_HLI_Y_X_END 0x0578 257#define SUBPIC_BTN_HLI_Y_X_END 0x0578
258#define SUBPIC_PALETTE_INDEX 0x057C 258#define SUBPIC_PALETTE_INDEX 0x057C
259#define SUBPIC_PALETTE_DATA 0x0580 259#define SUBPIC_PALETTE_DATA 0x0580
260#define SUBPIC_H_ACCUM_INIT 0x0584 260#define SUBPIC_H_ACCUM_INIT 0x0584
261#define SUBPIC_V_ACCUM_INIT 0x0588 261#define SUBPIC_V_ACCUM_INIT 0x0588
262#define DISP_MISC_CNTL 0x0D00 262#define DISP_MISC_CNTL 0x0D00
263#define DAC_MACRO_CNTL 0x0D04 263#define DAC_MACRO_CNTL 0x0D04
264#define DISP_PWR_MAN 0x0D08 264#define DISP_PWR_MAN 0x0D08
265#define DISP_TEST_DEBUG_CNTL 0x0D10 265#define DISP_TEST_DEBUG_CNTL 0x0D10
266#define DISP_HW_DEBUG 0x0D14 266#define DISP_HW_DEBUG 0x0D14
267#define DAC_CRC_SIG1 0x0D18 267#define DAC_CRC_SIG1 0x0D18
268#define DAC_CRC_SIG2 0x0D1C 268#define DAC_CRC_SIG2 0x0D1C
269#define OV0_LIN_TRANS_A 0x0D20 269#define OV0_LIN_TRANS_A 0x0D20
270#define OV0_LIN_TRANS_B 0x0D24 270#define OV0_LIN_TRANS_B 0x0D24
271#define OV0_LIN_TRANS_C 0x0D28 271#define OV0_LIN_TRANS_C 0x0D28
272#define OV0_LIN_TRANS_D 0x0D2C 272#define OV0_LIN_TRANS_D 0x0D2C
273#define OV0_LIN_TRANS_E 0x0D30 273#define OV0_LIN_TRANS_E 0x0D30
274#define OV0_LIN_TRANS_F 0x0D34 274#define OV0_LIN_TRANS_F 0x0D34
275#define OV0_GAMMA_0_F 0x0D40 275#define OV0_GAMMA_0_F 0x0D40
276#define OV0_GAMMA_10_1F 0x0D44 276#define OV0_GAMMA_10_1F 0x0D44
277#define OV0_GAMMA_20_3F 0x0D48 277#define OV0_GAMMA_20_3F 0x0D48
278#define OV0_GAMMA_40_7F 0x0D4C 278#define OV0_GAMMA_40_7F 0x0D4C
279#define OV0_GAMMA_380_3BF 0x0D50 279#define OV0_GAMMA_380_3BF 0x0D50
280#define OV0_GAMMA_3C0_3FF 0x0D54 280#define OV0_GAMMA_3C0_3FF 0x0D54
281#define DISP_MERGE_CNTL 0x0D60 281#define DISP_MERGE_CNTL 0x0D60
282#define DISP_OUTPUT_CNTL 0x0D64 282#define DISP_OUTPUT_CNTL 0x0D64
283#define DISP_LIN_TRANS_GRPH_A 0x0D80 283#define DISP_LIN_TRANS_GRPH_A 0x0D80
284#define DISP_LIN_TRANS_GRPH_B 0x0D84 284#define DISP_LIN_TRANS_GRPH_B 0x0D84
285#define DISP_LIN_TRANS_GRPH_C 0x0D88 285#define DISP_LIN_TRANS_GRPH_C 0x0D88
286#define DISP_LIN_TRANS_GRPH_D 0x0D8C 286#define DISP_LIN_TRANS_GRPH_D 0x0D8C
287#define DISP_LIN_TRANS_GRPH_E 0x0D90 287#define DISP_LIN_TRANS_GRPH_E 0x0D90
288#define DISP_LIN_TRANS_GRPH_F 0x0D94 288#define DISP_LIN_TRANS_GRPH_F 0x0D94
289#define DISP_LIN_TRANS_VID_A 0x0D98 289#define DISP_LIN_TRANS_VID_A 0x0D98
290#define DISP_LIN_TRANS_VID_B 0x0D9C 290#define DISP_LIN_TRANS_VID_B 0x0D9C
291#define DISP_LIN_TRANS_VID_C 0x0DA0 291#define DISP_LIN_TRANS_VID_C 0x0DA0
292#define DISP_LIN_TRANS_VID_D 0x0DA4 292#define DISP_LIN_TRANS_VID_D 0x0DA4
293#define DISP_LIN_TRANS_VID_E 0x0DA8 293#define DISP_LIN_TRANS_VID_E 0x0DA8
294#define DISP_LIN_TRANS_VID_F 0x0DAC 294#define DISP_LIN_TRANS_VID_F 0x0DAC
295#define RMX_HORZ_FILTER_0TAP_COEF 0x0DB0 295#define RMX_HORZ_FILTER_0TAP_COEF 0x0DB0
296#define RMX_HORZ_FILTER_1TAP_COEF 0x0DB4 296#define RMX_HORZ_FILTER_1TAP_COEF 0x0DB4
297#define RMX_HORZ_FILTER_2TAP_COEF 0x0DB8 297#define RMX_HORZ_FILTER_2TAP_COEF 0x0DB8
298#define RMX_HORZ_PHASE 0x0DBC 298#define RMX_HORZ_PHASE 0x0DBC
299#define DAC_EMBEDDED_SYNC_CNTL 0x0DC0 299#define DAC_EMBEDDED_SYNC_CNTL 0x0DC0
300#define DAC_BROAD_PULSE 0x0DC4 300#define DAC_BROAD_PULSE 0x0DC4
301#define DAC_SKEW_CLKS 0x0DC8 301#define DAC_SKEW_CLKS 0x0DC8
302#define DAC_INCR 0x0DCC 302#define DAC_INCR 0x0DCC
303#define DAC_NEG_SYNC_LEVEL 0x0DD0 303#define DAC_NEG_SYNC_LEVEL 0x0DD0
304#define DAC_POS_SYNC_LEVEL 0x0DD4 304#define DAC_POS_SYNC_LEVEL 0x0DD4
305#define DAC_BLANK_LEVEL 0x0DD8 305#define DAC_BLANK_LEVEL 0x0DD8
306#define CLOCK_CNTL_INDEX 0x0008 306#define CLOCK_CNTL_INDEX 0x0008
307#define CLOCK_CNTL_DATA 0x000C 307#define CLOCK_CNTL_DATA 0x000C
308#define CP_RB_CNTL 0x0704 308#define CP_RB_CNTL 0x0704
309#define CP_RB_BASE 0x0700 309#define CP_RB_BASE 0x0700
310#define CP_RB_RPTR_ADDR 0x070C 310#define CP_RB_RPTR_ADDR 0x070C
311#define CP_RB_RPTR 0x0710 311#define CP_RB_RPTR 0x0710
312#define CP_RB_WPTR 0x0714 312#define CP_RB_WPTR 0x0714
313#define CP_RB_WPTR_DELAY 0x0718 313#define CP_RB_WPTR_DELAY 0x0718
314#define CP_IB_BASE 0x0738 314#define CP_IB_BASE 0x0738
315#define CP_IB_BUFSZ 0x073C 315#define CP_IB_BUFSZ 0x073C
316#define SCRATCH_REG0 0x15E0 316#define SCRATCH_REG0 0x15E0
317#define GUI_SCRATCH_REG0 0x15E0 317#define GUI_SCRATCH_REG0 0x15E0
318#define SCRATCH_REG1 0x15E4 318#define SCRATCH_REG1 0x15E4
319#define GUI_SCRATCH_REG1 0x15E4 319#define GUI_SCRATCH_REG1 0x15E4
320#define SCRATCH_REG2 0x15E8 320#define SCRATCH_REG2 0x15E8
321#define GUI_SCRATCH_REG2 0x15E8 321#define GUI_SCRATCH_REG2 0x15E8
322#define SCRATCH_REG3 0x15EC 322#define SCRATCH_REG3 0x15EC
323#define GUI_SCRATCH_REG3 0x15EC 323#define GUI_SCRATCH_REG3 0x15EC
324#define SCRATCH_REG4 0x15F0 324#define SCRATCH_REG4 0x15F0
325#define GUI_SCRATCH_REG4 0x15F0 325#define GUI_SCRATCH_REG4 0x15F0
326#define SCRATCH_REG5 0x15F4 326#define SCRATCH_REG5 0x15F4
327#define GUI_SCRATCH_REG5 0x15F4 327#define GUI_SCRATCH_REG5 0x15F4
328#define SCRATCH_UMSK 0x0770 328#define SCRATCH_UMSK 0x0770
329#define SCRATCH_ADDR 0x0774 329#define SCRATCH_ADDR 0x0774
330#define DP_BRUSH_FRGD_CLR 0x147C 330#define DP_BRUSH_FRGD_CLR 0x147C
331#define DP_BRUSH_BKGD_CLR 0x1478 331#define DP_BRUSH_BKGD_CLR 0x1478
332#define DST_LINE_START 0x1600 332#define DST_LINE_START 0x1600
333#define DST_LINE_END 0x1604 333#define DST_LINE_END 0x1604
334#define SRC_OFFSET 0x15AC 334#define SRC_OFFSET 0x15AC
335#define SRC_PITCH 0x15B0 335#define SRC_PITCH 0x15B0
336#define SRC_TILE 0x1704 336#define SRC_TILE 0x1704
337#define SRC_PITCH_OFFSET 0x1428 337#define SRC_PITCH_OFFSET 0x1428
338#define SRC_X 0x1414 338#define SRC_X 0x1414
339#define SRC_Y 0x1418 339#define SRC_Y 0x1418
340#define SRC_X_Y 0x1590 340#define SRC_X_Y 0x1590
341#define SRC_Y_X 0x1434 341#define SRC_Y_X 0x1434
342#define DST_Y_X 0x1438 342#define DST_Y_X 0x1438
343#define DST_WIDTH_HEIGHT 0x1598 343#define DST_WIDTH_HEIGHT 0x1598
344#define DST_HEIGHT_WIDTH 0x143c 344#define DST_HEIGHT_WIDTH 0x143c
345#define DST_OFFSET 0x1404 345#define DST_OFFSET 0x1404
346#define SRC_CLUT_ADDRESS 0x1780 346#define SRC_CLUT_ADDRESS 0x1780
347#define SRC_CLUT_DATA 0x1784 347#define SRC_CLUT_DATA 0x1784
348#define SRC_CLUT_DATA_RD 0x1788 348#define SRC_CLUT_DATA_RD 0x1788
349#define HOST_DATA0 0x17C0 349#define HOST_DATA0 0x17C0
350#define HOST_DATA1 0x17C4 350#define HOST_DATA1 0x17C4
351#define HOST_DATA2 0x17C8 351#define HOST_DATA2 0x17C8
352#define HOST_DATA3 0x17CC 352#define HOST_DATA3 0x17CC
353#define HOST_DATA4 0x17D0 353#define HOST_DATA4 0x17D0
354#define HOST_DATA5 0x17D4 354#define HOST_DATA5 0x17D4
355#define HOST_DATA6 0x17D8 355#define HOST_DATA6 0x17D8
356#define HOST_DATA7 0x17DC 356#define HOST_DATA7 0x17DC
357#define HOST_DATA_LAST 0x17E0 357#define HOST_DATA_LAST 0x17E0
358#define DP_SRC_ENDIAN 0x15D4 358#define DP_SRC_ENDIAN 0x15D4
359#define DP_SRC_FRGD_CLR 0x15D8 359#define DP_SRC_FRGD_CLR 0x15D8
360#define DP_SRC_BKGD_CLR 0x15DC 360#define DP_SRC_BKGD_CLR 0x15DC
361#define SC_LEFT 0x1640 361#define SC_LEFT 0x1640
362#define SC_RIGHT 0x1644 362#define SC_RIGHT 0x1644
363#define SC_TOP 0x1648 363#define SC_TOP 0x1648
364#define SC_BOTTOM 0x164C 364#define SC_BOTTOM 0x164C
365#define SRC_SC_RIGHT 0x1654 365#define SRC_SC_RIGHT 0x1654
366#define SRC_SC_BOTTOM 0x165C 366#define SRC_SC_BOTTOM 0x165C
367#define DP_CNTL 0x16C0 367#define DP_CNTL 0x16C0
368#define DP_CNTL_XDIR_YDIR_YMAJOR 0x16D0 368#define DP_CNTL_XDIR_YDIR_YMAJOR 0x16D0
369#define DP_DATATYPE 0x16C4 369#define DP_DATATYPE 0x16C4
370#define DP_MIX 0x16C8 370#define DP_MIX 0x16C8
371#define DP_WRITE_MSK 0x16CC 371#define DP_WRITE_MSK 0x16CC
372#define DP_XOP 0x17F8 372#define DP_XOP 0x17F8
373#define CLR_CMP_CLR_SRC 0x15C4 373#define CLR_CMP_CLR_SRC 0x15C4
374#define CLR_CMP_CLR_DST 0x15C8 374#define CLR_CMP_CLR_DST 0x15C8
375#define CLR_CMP_CNTL 0x15C0 375#define CLR_CMP_CNTL 0x15C0
376#define CLR_CMP_MSK 0x15CC 376#define CLR_CMP_MSK 0x15CC
377#define DSTCACHE_MODE 0x1710 377#define DSTCACHE_MODE 0x1710
378#define DSTCACHE_CTLSTAT 0x1714 378#define DSTCACHE_CTLSTAT 0x1714
379#define DEFAULT_PITCH_OFFSET 0x16E0 379#define DEFAULT_PITCH_OFFSET 0x16E0
380#define DEFAULT_SC_BOTTOM_RIGHT 0x16E8 380#define DEFAULT_SC_BOTTOM_RIGHT 0x16E8
381#define DEFAULT_SC_TOP_LEFT 0x16EC 381#define DEFAULT_SC_TOP_LEFT 0x16EC
382#define SRC_PITCH_OFFSET 0x1428 382#define SRC_PITCH_OFFSET 0x1428
383#define DST_PITCH_OFFSET 0x142C 383#define DST_PITCH_OFFSET 0x142C
384#define DP_GUI_MASTER_CNTL 0x146C 384#define DP_GUI_MASTER_CNTL 0x146C
385#define SC_TOP_LEFT 0x16EC 385#define SC_TOP_LEFT 0x16EC
386#define SC_BOTTOM_RIGHT 0x16F0 386#define SC_BOTTOM_RIGHT 0x16F0
387#define SRC_SC_BOTTOM_RIGHT 0x16F4 387#define SRC_SC_BOTTOM_RIGHT 0x16F4
388#define RB2D_DSTCACHE_MODE 0x3428 388#define RB2D_DSTCACHE_MODE 0x3428
389#define RB2D_DSTCACHE_CTLSTAT_broken 0x342C /* do not use */ 389#define RB2D_DSTCACHE_CTLSTAT_broken 0x342C /* do not use */
390#define LVDS_GEN_CNTL 0x02d0 390#define LVDS_GEN_CNTL 0x02d0
@@ -686,7 +686,7 @@
686#define VERT_FP_LOOP_STRETCH (0x7 << 28) 686#define VERT_FP_LOOP_STRETCH (0x7 << 28)
687#define VERT_STRETCH_RESERVED 0xf1000000 687#define VERT_STRETCH_RESERVED 0xf1000000
688 688
689/* DAC_CNTL bit constants */ 689/* DAC_CNTL bit constants */
690#define DAC_8BIT_EN 0x00000100 690#define DAC_8BIT_EN 0x00000100
691#define DAC_4BPP_PIX_ORDER 0x00000200 691#define DAC_4BPP_PIX_ORDER 0x00000200
692#define DAC_CRC_EN 0x00080000 692#define DAC_CRC_EN 0x00080000
@@ -700,7 +700,7 @@
700#define DAC_CMP_EN (1 << 3) 700#define DAC_CMP_EN (1 << 3)
701#define DAC_CMP_OUTPUT (1 << 7) 701#define DAC_CMP_OUTPUT (1 << 7)
702 702
703/* DAC_CNTL2 bit constants */ 703/* DAC_CNTL2 bit constants */
704#define DAC2_EXPAND_MODE (1 << 14) 704#define DAC2_EXPAND_MODE (1 << 14)
705#define DAC2_CMP_EN (1 << 7) 705#define DAC2_CMP_EN (1 << 7)
706#define DAC2_PALETTE_ACCESS_CNTL (1 << 5) 706#define DAC2_PALETTE_ACCESS_CNTL (1 << 5)
diff --git a/include/video/s1d13xxxfb.h b/include/video/s1d13xxxfb.h
index fe41b8407946..c3b2a2aa7140 100644
--- a/include/video/s1d13xxxfb.h
+++ b/include/video/s1d13xxxfb.h
@@ -14,13 +14,16 @@
14#define S1D13XXXFB_H 14#define S1D13XXXFB_H
15 15
16#define S1D_PALETTE_SIZE 256 16#define S1D_PALETTE_SIZE 256
17#define S1D13506_CHIP_REV 4 /* expected chip revision number for s1d13506 */ 17#define S1D_FBID "S1D13xxx"
18#define S1D13806_CHIP_REV 7 /* expected chip revision number for s1d13806 */ 18#define S1D_DEVICENAME "s1d13xxxfb"
19#define S1D_FBID "S1D13806" 19
20#define S1D_DEVICENAME "s1d13806fb" 20/* S1DREG_REV_CODE register = prod_id (6 bits) + revision (2 bits) */
21#define S1D13505_PROD_ID 0x3 /* 000011 */
22#define S1D13506_PROD_ID 0x4 /* 000100 */
23#define S1D13806_PROD_ID 0x7 /* 000111 */
21 24
22/* register definitions (tested on s1d13896) */ 25/* register definitions (tested on s1d13896) */
23#define S1DREG_REV_CODE 0x0000 /* Revision Code Register */ 26#define S1DREG_REV_CODE 0x0000 /* Prod + Rev Code Register */
24#define S1DREG_MISC 0x0001 /* Miscellaneous Register */ 27#define S1DREG_MISC 0x0001 /* Miscellaneous Register */
25#define S1DREG_GPIO_CNF0 0x0004 /* General IO Pins Configuration Register 0 */ 28#define S1DREG_GPIO_CNF0 0x0004 /* General IO Pins Configuration Register 0 */
26#define S1DREG_GPIO_CNF1 0x0005 /* General IO Pins Configuration Register 1 */ 29#define S1DREG_GPIO_CNF1 0x0005 /* General IO Pins Configuration Register 1 */
@@ -141,10 +144,11 @@ struct s1d13xxxfb_regval {
141 u8 value; 144 u8 value;
142}; 145};
143 146
144
145struct s1d13xxxfb_par { 147struct s1d13xxxfb_par {
146 void __iomem *regs; 148 void __iomem *regs;
147 unsigned char display; 149 unsigned char display;
150 unsigned char prod_id;
151 unsigned char revision;
148 152
149 unsigned int pseudo_palette[16]; 153 unsigned int pseudo_palette[16];
150#ifdef CONFIG_PM 154#ifdef CONFIG_PM
diff --git a/include/video/tdfx.h b/include/video/tdfx.h
index 7431d9681e57..befbaf0a92d8 100644
--- a/include/video/tdfx.h
+++ b/include/video/tdfx.h
@@ -1,6 +1,9 @@
1#ifndef _TDFX_H 1#ifndef _TDFX_H
2#define _TDFX_H 2#define _TDFX_H
3 3
4#include <linux/i2c.h>
5#include <linux/i2c-algo-bit.h>
6
4/* membase0 register offsets */ 7/* membase0 register offsets */
5#define STATUS 0x00 8#define STATUS 0x00
6#define PCIINIT0 0x04 9#define PCIINIT0 0x04
@@ -123,6 +126,18 @@
123#define VIDCFG_PIXFMT_SHIFT 18 126#define VIDCFG_PIXFMT_SHIFT 18
124#define DACMODE_2X BIT(0) 127#define DACMODE_2X BIT(0)
125 128
129/* I2C bit locations in the VIDSERPARPORT register */
130#define DDC_ENAB 0x00040000
131#define DDC_SCL_OUT 0x00080000
132#define DDC_SDA_OUT 0x00100000
133#define DDC_SCL_IN 0x00200000
134#define DDC_SDA_IN 0x00400000
135#define I2C_ENAB 0x00800000
136#define I2C_SCL_OUT 0x01000000
137#define I2C_SDA_OUT 0x02000000
138#define I2C_SCL_IN 0x04000000
139#define I2C_SDA_IN 0x08000000
140
126/* VGA rubbish, need to change this for multihead support */ 141/* VGA rubbish, need to change this for multihead support */
127#define MISC_W 0x3c2 142#define MISC_W 0x3c2
128#define MISC_R 0x3cc 143#define MISC_R 0x3cc
@@ -168,12 +183,23 @@ struct banshee_reg {
168 unsigned long miscinit0; 183 unsigned long miscinit0;
169}; 184};
170 185
186struct tdfx_par;
187
188struct tdfxfb_i2c_chan {
189 struct tdfx_par *par;
190 struct i2c_adapter adapter;
191 struct i2c_algo_bit_data algo;
192};
193
171struct tdfx_par { 194struct tdfx_par {
172 u32 max_pixclock; 195 u32 max_pixclock;
173 u32 palette[16]; 196 u32 palette[16];
174 void __iomem *regbase_virt; 197 void __iomem *regbase_virt;
175 unsigned long iobase; 198 unsigned long iobase;
176 int mtrr_handle; 199 int mtrr_handle;
200#ifdef CONFIG_FB_3DFX_I2C
201 struct tdfxfb_i2c_chan chan[2];
202#endif
177}; 203};
178 204
179#endif /* __KERNEL__ */ 205#endif /* __KERNEL__ */