aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acexcep.h6
-rw-r--r--include/acpi/acpi_bus.h18
-rw-r--r--include/acpi/acpi_drivers.h33
-rw-r--r--include/acpi/acpiosxf.h4
-rw-r--r--include/acpi/acpixf.h16
-rw-r--r--include/acpi/actbl.h74
-rw-r--r--include/acpi/actbl1.h6
-rw-r--r--include/acpi/actypes.h18
-rw-r--r--include/acpi/processor.h4
-rw-r--r--include/acpi/video.h11
-rw-r--r--include/asm-frv/ftrace.h1
-rw-r--r--include/asm-frv/highmem.h2
-rw-r--r--include/asm-generic/dma-mapping.h308
-rw-r--r--include/asm-generic/gpio.h5
-rw-r--r--include/asm-generic/vmlinux.lds.h29
-rw-r--r--include/asm-m32r/ftrace.h1
-rw-r--r--include/asm-m32r/spinlock.h3
-rw-r--r--include/asm-mn10300/ftrace.h1
-rw-r--r--include/asm-mn10300/highmem.h2
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_os_linux.h4
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/acpi.h35
-rw-r--r--include/linux/async_tx.h9
-rw-r--r--include/linux/auto_dev-ioctl.h7
-rw-r--r--include/linux/auto_fs.h6
-rw-r--r--include/linux/backing-dev.h12
-rw-r--r--include/linux/binfmts.h3
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/blkdev.h55
-rw-r--r--include/linux/blktrace_api.h5
-rw-r--r--include/linux/bootmem.h6
-rw-r--r--include/linux/buffer_head.h14
-rw-r--r--include/linux/cgroup.h142
-rw-r--r--include/linux/compat.h6
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/connector.h4
-rw-r--r--include/linux/cpu.h16
-rw-r--r--include/linux/cpuset.h33
-rw-r--r--include/linux/debugfs.h8
-rw-r--r--include/linux/device-mapper.h3
-rw-r--r--include/linux/dm-dirty-log.h13
-rw-r--r--include/linux/dma_remapping.h1
-rw-r--r--include/linux/dmaengine.h30
-rw-r--r--include/linux/ds1wm.h12
-rw-r--r--include/linux/dst.h587
-rw-r--r--include/linux/dw_dmac.h19
-rw-r--r--include/linux/eventfd.h12
-rw-r--r--include/linux/ext3_fs.h6
-rw-r--r--include/linux/fb.h11
-rw-r--r--include/linux/fdtable.h4
-rw-r--r--include/linux/fs.h58
-rw-r--r--include/linux/fs_struct.h10
-rw-r--r--include/linux/fscache-cache.h505
-rw-r--r--include/linux/fscache.h618
-rw-r--r--include/linux/fsl_devices.h7
-rw-r--r--include/linux/ftrace.h244
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/gfp.h1
-rw-r--r--include/linux/hardirq.h73
-rw-r--r--include/linux/hdreg.h66
-rw-r--r--include/linux/hid.h23
-rw-r--r--include/linux/highmem.h17
-rw-r--r--include/linux/i2c-algo-sgi.h26
-rw-r--r--include/linux/i2c-id.h37
-rw-r--r--include/linux/i2c.h4
-rw-r--r--include/linux/i2c/at24.h4
-rw-r--r--include/linux/i2c/twl4030.h47
-rw-r--r--include/linux/ide.h56
-rw-r--r--include/linux/idr.h1
-rw-r--r--include/linux/intel-iommu.h2
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/iommu.h13
-rw-r--r--include/linux/jbd.h7
-rw-r--r--include/linux/jbd2.h6
-rw-r--r--include/linux/kallsyms.h15
-rw-r--r--include/linux/kernel.h156
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/kmod.h11
-rw-r--r--include/linux/leds-bd2802.h26
-rw-r--r--include/linux/leds.h4
-rw-r--r--include/linux/leds_pwm.h21
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/lockd/xdr.h12
-rw-r--r--include/linux/lockd/xdr4.h10
-rw-r--r--include/linux/lockdep.h17
-rw-r--r--include/linux/loop.h1
-rw-r--r--include/linux/memcontrol.h20
-rw-r--r--include/linux/memory.h17
-rw-r--r--include/linux/mfd/ds1wm.h6
-rw-r--r--include/linux/mfd/htc-pasic3.h1
-rw-r--r--include/linux/mfd/wm8350/core.h2
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h4
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/mnt_namespace.h2
-rw-r--r--include/linux/module.h68
-rw-r--r--include/linux/moduleparam.h10
-rw-r--r--include/linux/mpage.h10
-rw-r--r--include/linux/msi.h13
-rw-r--r--include/linux/nfs.h1
-rw-r--r--include/linux/nfs4.h138
-rw-r--r--include/linux/nfs_fs.h17
-rw-r--r--include/linux/nfs_fs_sb.h16
-rw-r--r--include/linux/nfs_iostat.h12
-rw-r--r--include/linux/nfs_xdr.h59
-rw-r--r--include/linux/nfsd/cache.h8
-rw-r--r--include/linux/nfsd/nfsd.h225
-rw-r--r--include/linux/nfsd/nfsfh.h7
-rw-r--r--include/linux/nfsd/state.h84
-rw-r--r--include/linux/nfsd/stats.h9
-rw-r--r--include/linux/nfsd/xdr4.h129
-rw-r--r--include/linux/nsproxy.h1
-rw-r--r--include/linux/page-debug-flags.h30
-rw-r--r--include/linux/page-flags.h60
-rw-r--r--include/linux/page_cgroup.h13
-rw-r--r--include/linux/pagemap.h17
-rw-r--r--include/linux/pagevec.h1
-rw-r--r--include/linux/pci-acpi.h67
-rw-r--r--include/linux/pci.h61
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pci_regs.h37
-rw-r--r--include/linux/pcieport_if.h36
-rw-r--r--include/linux/poison.h3
-rw-r--r--include/linux/power_supply.h2
-rw-r--r--include/linux/ptrace.h1
-rw-r--r--include/linux/pwm.h2
-rw-r--r--include/linux/raid/bitmap.h288
-rw-r--r--include/linux/raid/linear.h31
-rw-r--r--include/linux/raid/md.h81
-rw-r--r--include/linux/raid/md_k.h402
-rw-r--r--include/linux/raid/md_u.h35
-rw-r--r--include/linux/raid/multipath.h42
-rw-r--r--include/linux/raid/pq.h132
-rw-r--r--include/linux/raid/raid0.h30
-rw-r--r--include/linux/raid/raid1.h134
-rw-r--r--include/linux/raid/raid10.h123
-rw-r--r--include/linux/raid/raid5.h402
-rw-r--r--include/linux/raid/xor.h2
-rw-r--r--include/linux/rcuclassic.h16
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/rcupreempt.h53
-rw-r--r--include/linux/rcutree.h27
-rw-r--r--include/linux/regulator/bq24022.h3
-rw-r--r--include/linux/regulator/consumer.h6
-rw-r--r--include/linux/regulator/driver.h81
-rw-r--r--include/linux/regulator/fixed.h3
-rw-r--r--include/linux/regulator/machine.h12
-rw-r--r--include/linux/ring_buffer.h38
-rw-r--r--include/linux/rtc-v3020.h6
-rw-r--r--include/linux/rtc.h6
-rw-r--r--include/linux/sched.h91
-rw-r--r--include/linux/security.h24
-rw-r--r--include/linux/slab_def.h68
-rw-r--r--include/linux/slob_def.h9
-rw-r--r--include/linux/slow-work.h95
-rw-r--r--include/linux/slub_def.h49
-rw-r--r--include/linux/smp.h7
-rw-r--r--include/linux/sonypi.h8
-rw-r--r--include/linux/spi/eeprom.h6
-rw-r--r--include/linux/spi/spi_gpio.h6
-rw-r--r--include/linux/spinlock.h6
-rw-r--r--include/linux/string.h17
-rw-r--r--include/linux/sunrpc/svc.h25
-rw-r--r--include/linux/sunrpc/svc_xprt.h52
-rw-r--r--include/linux/sunrpc/xdr.h42
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/suspend.h3
-rw-r--r--include/linux/swap.h7
-rw-r--r--include/linux/synclink.h1
-rw-r--r--include/linux/syscalls.h64
-rw-r--r--include/linux/thermal.h48
-rw-r--r--include/linux/timeriomem-rng.h2
-rw-r--r--include/linux/topology.h11
-rw-r--r--include/linux/trace_clock.h19
-rw-r--r--include/linux/tracehook.h15
-rw-r--r--include/linux/tracepoint.h116
-rw-r--r--include/linux/tty_driver.h3
-rw-r--r--include/linux/usb/wusb.h3
-rw-r--r--include/linux/wait.h29
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/linux/writeback.h4
-rw-r--r--include/net/tcp.h15
-rw-r--r--include/scsi/fc/fc_fip.h237
-rw-r--r--include/scsi/fc_transport_fcoe.h54
-rw-r--r--include/scsi/libfc.h45
-rw-r--r--include/scsi/libfcoe.h227
-rw-r--r--include/trace/block.h70
-rw-r--r--include/trace/irq.h9
-rw-r--r--include/trace/irq_event_types.h55
-rw-r--r--include/trace/kmemtrace.h63
-rw-r--r--include/trace/lockdep.h9
-rw-r--r--include/trace/lockdep_event_types.h44
-rw-r--r--include/trace/power.h32
-rw-r--r--include/trace/sched.h49
-rw-r--r--include/trace/sched_event_types.h337
-rw-r--r--include/trace/skb.h4
-rw-r--r--include/trace/trace_event_types.h5
-rw-r--r--include/trace/trace_events.h5
-rw-r--r--include/trace/workqueue.h25
-rw-r--r--include/video/aty128.h2
-rw-r--r--include/video/cirrus.h2
-rw-r--r--include/video/newport.h4
-rw-r--r--include/video/radeon.h564
-rw-r--r--include/video/s1d13xxxfb.h16
206 files changed, 6044 insertions, 3578 deletions
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index eda04546cdf6..473d584b1d31 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -103,8 +103,9 @@
103#define AE_BAD_OCTAL_CONSTANT (acpi_status) (0x0006 | AE_CODE_PROGRAMMER) 103#define AE_BAD_OCTAL_CONSTANT (acpi_status) (0x0006 | AE_CODE_PROGRAMMER)
104#define AE_BAD_DECIMAL_CONSTANT (acpi_status) (0x0007 | AE_CODE_PROGRAMMER) 104#define AE_BAD_DECIMAL_CONSTANT (acpi_status) (0x0007 | AE_CODE_PROGRAMMER)
105#define AE_MISSING_ARGUMENTS (acpi_status) (0x0008 | AE_CODE_PROGRAMMER) 105#define AE_MISSING_ARGUMENTS (acpi_status) (0x0008 | AE_CODE_PROGRAMMER)
106#define AE_BAD_ADDRESS (acpi_status) (0x0009 | AE_CODE_PROGRAMMER)
106 107
107#define AE_CODE_PGM_MAX 0x0008 108#define AE_CODE_PGM_MAX 0x0009
108 109
109/* 110/*
110 * Acpi table exceptions 111 * Acpi table exceptions
@@ -224,7 +225,8 @@ char const *acpi_gbl_exception_names_pgm[] = {
224 "AE_BAD_HEX_CONSTANT", 225 "AE_BAD_HEX_CONSTANT",
225 "AE_BAD_OCTAL_CONSTANT", 226 "AE_BAD_OCTAL_CONSTANT",
226 "AE_BAD_DECIMAL_CONSTANT", 227 "AE_BAD_DECIMAL_CONSTANT",
227 "AE_MISSING_ARGUMENTS" 228 "AE_MISSING_ARGUMENTS",
229 "AE_BAD_ADDRESS"
228}; 230};
229 231
230char const *acpi_gbl_exception_names_tbl[] = { 232char const *acpi_gbl_exception_names_tbl[] = {
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index e9f6574930ef..a2228511d4be 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -88,44 +88,30 @@ struct acpi_device;
88 88
89typedef int (*acpi_op_add) (struct acpi_device * device); 89typedef int (*acpi_op_add) (struct acpi_device * device);
90typedef int (*acpi_op_remove) (struct acpi_device * device, int type); 90typedef int (*acpi_op_remove) (struct acpi_device * device, int type);
91typedef int (*acpi_op_lock) (struct acpi_device * device, int type);
92typedef int (*acpi_op_start) (struct acpi_device * device); 91typedef int (*acpi_op_start) (struct acpi_device * device);
93typedef int (*acpi_op_stop) (struct acpi_device * device, int type); 92typedef int (*acpi_op_stop) (struct acpi_device * device, int type);
94typedef int (*acpi_op_suspend) (struct acpi_device * device, 93typedef int (*acpi_op_suspend) (struct acpi_device * device,
95 pm_message_t state); 94 pm_message_t state);
96typedef int (*acpi_op_resume) (struct acpi_device * device); 95typedef int (*acpi_op_resume) (struct acpi_device * device);
97typedef int (*acpi_op_scan) (struct acpi_device * device);
98typedef int (*acpi_op_bind) (struct acpi_device * device); 96typedef int (*acpi_op_bind) (struct acpi_device * device);
99typedef int (*acpi_op_unbind) (struct acpi_device * device); 97typedef int (*acpi_op_unbind) (struct acpi_device * device);
100typedef int (*acpi_op_shutdown) (struct acpi_device * device); 98typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event);
101 99
102struct acpi_bus_ops { 100struct acpi_bus_ops {
103 u32 acpi_op_add:1; 101 u32 acpi_op_add:1;
104 u32 acpi_op_remove:1;
105 u32 acpi_op_lock:1;
106 u32 acpi_op_start:1; 102 u32 acpi_op_start:1;
107 u32 acpi_op_stop:1;
108 u32 acpi_op_suspend:1;
109 u32 acpi_op_resume:1;
110 u32 acpi_op_scan:1;
111 u32 acpi_op_bind:1;
112 u32 acpi_op_unbind:1;
113 u32 acpi_op_shutdown:1;
114 u32 reserved:21;
115}; 103};
116 104
117struct acpi_device_ops { 105struct acpi_device_ops {
118 acpi_op_add add; 106 acpi_op_add add;
119 acpi_op_remove remove; 107 acpi_op_remove remove;
120 acpi_op_lock lock;
121 acpi_op_start start; 108 acpi_op_start start;
122 acpi_op_stop stop; 109 acpi_op_stop stop;
123 acpi_op_suspend suspend; 110 acpi_op_suspend suspend;
124 acpi_op_resume resume; 111 acpi_op_resume resume;
125 acpi_op_scan scan;
126 acpi_op_bind bind; 112 acpi_op_bind bind;
127 acpi_op_unbind unbind; 113 acpi_op_unbind unbind;
128 acpi_op_shutdown shutdown; 114 acpi_op_notify notify;
129}; 115};
130 116
131struct acpi_driver { 117struct acpi_driver {
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 5fc1bb0f4a90..0352c8f0b05b 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -67,6 +67,16 @@
67#define ACPI_BAY_HID "LNXIOBAY" 67#define ACPI_BAY_HID "LNXIOBAY"
68#define ACPI_DOCK_HID "LNXDOCK" 68#define ACPI_DOCK_HID "LNXDOCK"
69 69
70/*
71 * For fixed hardware buttons, we fabricate acpi_devices with HID
72 * ACPI_BUTTON_HID_POWERF or ACPI_BUTTON_HID_SLEEPF. Fixed hardware
73 * signals only an event; it doesn't supply a notification value.
74 * To allow drivers to treat notifications from fixed hardware the
75 * same as those from real devices, we turn the events into this
76 * notification value.
77 */
78#define ACPI_FIXED_HARDWARE_EVENT 0x100
79
70/* -------------------------------------------------------------------------- 80/* --------------------------------------------------------------------------
71 PCI 81 PCI
72 -------------------------------------------------------------------------- */ 82 -------------------------------------------------------------------------- */
@@ -99,24 +109,6 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain,
99 int bus); 109 int bus);
100 110
101/* -------------------------------------------------------------------------- 111/* --------------------------------------------------------------------------
102 Power Resource
103 -------------------------------------------------------------------------- */
104
105int acpi_device_sleep_wake(struct acpi_device *dev,
106 int enable, int sleep_state, int dev_state);
107int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state);
108int acpi_disable_wakeup_device_power(struct acpi_device *dev);
109int acpi_power_get_inferred_state(struct acpi_device *device);
110int acpi_power_transition(struct acpi_device *device, int state);
111extern int acpi_power_nocheck;
112
113/* --------------------------------------------------------------------------
114 Embedded Controller
115 -------------------------------------------------------------------------- */
116int acpi_ec_ecdt_probe(void);
117int acpi_boot_ec_enable(void);
118
119/* --------------------------------------------------------------------------
120 Processor 112 Processor
121 -------------------------------------------------------------------------- */ 113 -------------------------------------------------------------------------- */
122 114
@@ -165,9 +157,4 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle)
165} 157}
166#endif 158#endif
167 159
168/*--------------------------------------------------------------------------
169 Suspend/Resume
170 -------------------------------------------------------------------------- */
171extern int acpi_sleep_init(void);
172
173#endif /*__ACPI_DRIVERS_H__*/ 160#endif /*__ACPI_DRIVERS_H__*/
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index ab0b85cf21f3..3e798593b17b 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -242,10 +242,6 @@ acpi_os_derive_pci_id(acpi_handle rhandle,
242acpi_status acpi_os_validate_interface(char *interface); 242acpi_status acpi_os_validate_interface(char *interface);
243acpi_status acpi_osi_invalidate(char* interface); 243acpi_status acpi_osi_invalidate(char* interface);
244 244
245acpi_status
246acpi_os_validate_address(u8 space_id, acpi_physical_address address,
247 acpi_size length, char *name);
248
249u64 acpi_os_get_timer(void); 245u64 acpi_os_get_timer(void);
250 246
251acpi_status acpi_os_signal(u32 function, void *info); 247acpi_status acpi_os_signal(u32 function, void *info);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index cc40102fe2f3..aeaf7cd41dc7 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
47 47
48/* Current ACPICA subsystem version in YYYYMMDD format */ 48/* Current ACPICA subsystem version in YYYYMMDD format */
49 49
50#define ACPI_CA_VERSION 0x20081204 50#define ACPI_CA_VERSION 0x20090320
51 51
52#include "actypes.h" 52#include "actypes.h"
53#include "actbl.h" 53#include "actbl.h"
@@ -349,17 +349,15 @@ acpi_resource_to_address64(struct acpi_resource *resource,
349 */ 349 */
350acpi_status acpi_reset(void); 350acpi_status acpi_reset(void);
351 351
352acpi_status acpi_get_register(u32 register_id, u32 * return_value); 352acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value);
353 353
354acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value); 354acpi_status acpi_write_bit_register(u32 register_id, u32 value);
355 355
356acpi_status acpi_set_register(u32 register_id, u32 value); 356acpi_status acpi_set_firmware_waking_vector(u32 physical_address);
357 357
358acpi_status 358#if ACPI_MACHINE_WIDTH == 64
359acpi_set_firmware_waking_vector(u32 physical_address); 359acpi_status acpi_set_firmware_waking_vector64(u64 physical_address);
360 360#endif
361acpi_status
362acpi_set_firmware_waking_vector64(u64 physical_address);
363 361
364acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg); 362acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg);
365 363
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index bf8d4cfd8cf5..222733d01f36 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -214,11 +214,11 @@ struct acpi_table_fadt {
214 u16 flush_size; /* Processor's memory cache line width, in bytes */ 214 u16 flush_size; /* Processor's memory cache line width, in bytes */
215 u16 flush_stride; /* Number of flush strides that need to be read */ 215 u16 flush_stride; /* Number of flush strides that need to be read */
216 u8 duty_offset; /* Processor duty cycle index in processor's P_CNT reg */ 216 u8 duty_offset; /* Processor duty cycle index in processor's P_CNT reg */
217 u8 duty_width; /* Processor duty cycle value bit width in P_CNT register. */ 217 u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */
218 u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ 218 u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */
219 u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ 219 u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */
220 u8 century; /* Index to century in RTC CMOS RAM */ 220 u8 century; /* Index to century in RTC CMOS RAM */
221 u16 boot_flags; /* IA-PC Boot Architecture Flags. See Table 5-10 for description */ 221 u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */
222 u8 reserved; /* Reserved, must be zero */ 222 u8 reserved; /* Reserved, must be zero */
223 u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ 223 u32 flags; /* Miscellaneous flag bits (see below for individual flags) */
224 struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ 224 struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */
@@ -236,32 +236,41 @@ struct acpi_table_fadt {
236 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ 236 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
237}; 237};
238 238
239/* FADT Boot Architecture Flags (boot_flags) */
240
241#define ACPI_FADT_LEGACY_DEVICES (1) /* 00: [V2] System has LPC or ISA bus devices */
242#define ACPI_FADT_8042 (1<<1) /* 01: [V3] System has an 8042 controller on port 60/64 */
243#define ACPI_FADT_NO_VGA (1<<2) /* 02: [V4] It is not safe to probe for VGA hardware */
244#define ACPI_FADT_NO_MSI (1<<3) /* 03: [V4] Message Signaled Interrupts (MSI) must not be enabled */
245#define ACPI_FADT_NO_ASPM (1<<4) /* 04: [V4] PCIe ASPM control must not be enabled */
246
247#define FADT2_REVISION_ID 3
248
239/* FADT flags */ 249/* FADT flags */
240 250
241#define ACPI_FADT_WBINVD (1) /* 00: The wbinvd instruction works properly */ 251#define ACPI_FADT_WBINVD (1) /* 00: [V1] The wbinvd instruction works properly */
242#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: The wbinvd flushes but does not invalidate */ 252#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: [V1] wbinvd flushes but does not invalidate caches */
243#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: All processors support C1 state */ 253#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: [V1] All processors support C1 state */
244#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: C2 state works on MP system */ 254#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: [V1] C2 state works on MP system */
245#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: Power button is handled as a generic feature */ 255#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: [V1] Power button is handled as a control method device */
246#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: Sleep button is handled as a generic feature, or not present */ 256#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: [V1] Sleep button is handled as a control method device */
247#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: RTC wakeup stat not in fixed register space */ 257#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: [V1] RTC wakeup status not in fixed register space */
248#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: RTC wakeup possible from S4 */ 258#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: [V1] RTC alarm can wake system from S4 */
249#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: tmr_val is 32 bits 0=24-bits */ 259#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: [V1] ACPI timer width is 32-bit (0=24-bit) */
250#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: Docking supported */ 260#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: [V1] Docking supported */
251#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: System reset via the FADT RESET_REG supported */ 261#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: [V2] System reset via the FADT RESET_REG supported */
252#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: No internal expansion capabilities and case is sealed */ 262#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: [V3] No internal expansion capabilities and case is sealed */
253#define ACPI_FADT_HEADLESS (1<<12) /* 12: No local video capabilities or local input devices */ 263#define ACPI_FADT_HEADLESS (1<<12) /* 12: [V3] No local video capabilities or local input devices */
254#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: Must execute native instruction after writing SLP_TYPx register */ 264#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: [V3] Must execute native instruction after writing SLP_TYPx register */
255#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */ 265#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: [V4] System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
256#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: OSPM should use platform-provided timer (ACPI 3.0) */ 266#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: [V4] OSPM should use platform-provided timer (ACPI 3.0) */
257#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */ 267#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: [V4] Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
258#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: System is compatible with remote power on (ACPI 3.0) */ 268#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */
259#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: All local APICs must use cluster model (ACPI 3.0) */ 269#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */
260#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */ 270#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */
271
272/* FADT Prefered Power Management Profiles */
261 273
262/*
263 * FADT Prefered Power Management Profiles
264 */
265enum acpi_prefered_pm_profiles { 274enum acpi_prefered_pm_profiles {
266 PM_UNSPECIFIED = 0, 275 PM_UNSPECIFIED = 0,
267 PM_DESKTOP = 1, 276 PM_DESKTOP = 1,
@@ -272,16 +281,6 @@ enum acpi_prefered_pm_profiles {
272 PM_APPLIANCE_PC = 6 281 PM_APPLIANCE_PC = 6
273}; 282};
274 283
275/* FADT Boot Arch Flags */
276
277#define BAF_LEGACY_DEVICES 0x0001
278#define BAF_8042_KEYBOARD_CONTROLLER 0x0002
279#define BAF_MSI_NOT_SUPPORTED 0x0008
280#define BAF_PCIE_ASPM_CONTROL 0x0010
281
282#define FADT2_REVISION_ID 3
283#define FADT2_MINUS_REVISION_ID 2
284
285/* Reset to default packing */ 284/* Reset to default packing */
286 285
287#pragma pack() 286#pragma pack()
@@ -310,8 +309,9 @@ struct acpi_table_desc {
310#define ACPI_TABLE_ORIGIN_UNKNOWN (0) 309#define ACPI_TABLE_ORIGIN_UNKNOWN (0)
311#define ACPI_TABLE_ORIGIN_MAPPED (1) 310#define ACPI_TABLE_ORIGIN_MAPPED (1)
312#define ACPI_TABLE_ORIGIN_ALLOCATED (2) 311#define ACPI_TABLE_ORIGIN_ALLOCATED (2)
313#define ACPI_TABLE_ORIGIN_MASK (3) 312#define ACPI_TABLE_ORIGIN_OVERRIDE (4)
314#define ACPI_TABLE_IS_LOADED (4) 313#define ACPI_TABLE_ORIGIN_MASK (7)
314#define ACPI_TABLE_IS_LOADED (8)
315 315
316/* 316/*
317 * Get the remaining ACPI tables 317 * Get the remaining ACPI tables
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 18963b968114..59ade0752473 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -1016,9 +1016,9 @@ struct acpi_madt_interrupt_source {
1016struct acpi_madt_local_x2apic { 1016struct acpi_madt_local_x2apic {
1017 struct acpi_subtable_header header; 1017 struct acpi_subtable_header header;
1018 u16 reserved; /* Reserved - must be zero */ 1018 u16 reserved; /* Reserved - must be zero */
1019 u32 local_apic_id; /* Processor X2_APIC ID */ 1019 u32 local_apic_id; /* Processor x2APIC ID */
1020 u32 lapic_flags; 1020 u32 lapic_flags;
1021 u32 uid; /* Extended X2_APIC processor ID */ 1021 u32 uid; /* ACPI processor UID */
1022}; 1022};
1023 1023
1024/* 10: Local X2APIC NMI (07/2008) */ 1024/* 10: Local X2APIC NMI (07/2008) */
@@ -1026,7 +1026,7 @@ struct acpi_madt_local_x2apic {
1026struct acpi_madt_local_x2apic_nmi { 1026struct acpi_madt_local_x2apic_nmi {
1027 struct acpi_subtable_header header; 1027 struct acpi_subtable_header header;
1028 u16 inti_flags; 1028 u16 inti_flags;
1029 u32 uid; /* Processor X2_APIC ID */ 1029 u32 uid; /* ACPI processor UID */
1030 u8 lint; /* LINTn to which NMI is connected */ 1030 u8 lint; /* LINTn to which NMI is connected */
1031 u8 reserved[3]; 1031 u8 reserved[3];
1032}; 1032};
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index a20aab510173..f555d927f7c0 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -777,17 +777,25 @@ typedef u8 acpi_adr_space_type;
777#define ACPI_BITREG_SCI_ENABLE 0x0E 777#define ACPI_BITREG_SCI_ENABLE 0x0E
778#define ACPI_BITREG_BUS_MASTER_RLD 0x0F 778#define ACPI_BITREG_BUS_MASTER_RLD 0x0F
779#define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10 779#define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10
780#define ACPI_BITREG_SLEEP_TYPE_A 0x11 780#define ACPI_BITREG_SLEEP_TYPE 0x11
781#define ACPI_BITREG_SLEEP_TYPE_B 0x12 781#define ACPI_BITREG_SLEEP_ENABLE 0x12
782#define ACPI_BITREG_SLEEP_ENABLE 0x13
783 782
784/* PM2 Control register */ 783/* PM2 Control register */
785 784
786#define ACPI_BITREG_ARB_DISABLE 0x14 785#define ACPI_BITREG_ARB_DISABLE 0x13
787 786
788#define ACPI_BITREG_MAX 0x14 787#define ACPI_BITREG_MAX 0x13
789#define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 788#define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1
790 789
790/* Status register values. A 1 clears a status bit. 0 = no effect */
791
792#define ACPI_CLEAR_STATUS 1
793
794/* Enable and Control register values */
795
796#define ACPI_ENABLE_EVENT 1
797#define ACPI_DISABLE_EVENT 0
798
791/* 799/*
792 * External ACPI object definition 800 * External ACPI object definition
793 */ 801 */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 0574add2a1e3..b09c4fde9725 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -322,7 +322,7 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
322int acpi_processor_tstate_has_changed(struct acpi_processor *pr); 322int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
323int acpi_processor_get_throttling_info(struct acpi_processor *pr); 323int acpi_processor_get_throttling_info(struct acpi_processor *pr);
324extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state); 324extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
325extern struct file_operations acpi_processor_throttling_fops; 325extern const struct file_operations acpi_processor_throttling_fops;
326extern void acpi_processor_throttling_init(void); 326extern void acpi_processor_throttling_init(void);
327/* in processor_idle.c */ 327/* in processor_idle.c */
328int acpi_processor_power_init(struct acpi_processor *pr, 328int acpi_processor_power_init(struct acpi_processor *pr,
@@ -336,7 +336,7 @@ extern struct cpuidle_driver acpi_idle_driver;
336 336
337/* in processor_thermal.c */ 337/* in processor_thermal.c */
338int acpi_processor_get_limit_info(struct acpi_processor *pr); 338int acpi_processor_get_limit_info(struct acpi_processor *pr);
339extern struct file_operations acpi_processor_limit_fops; 339extern const struct file_operations acpi_processor_limit_fops;
340extern struct thermal_cooling_device_ops processor_cooling_ops; 340extern struct thermal_cooling_device_ops processor_cooling_ops;
341#ifdef CONFIG_CPU_FREQ 341#ifdef CONFIG_CPU_FREQ
342void acpi_thermal_cpufreq_init(void); 342void acpi_thermal_cpufreq_init(void);
diff --git a/include/acpi/video.h b/include/acpi/video.h
new file mode 100644
index 000000000000..f0275bb79ce4
--- /dev/null
+++ b/include/acpi/video.h
@@ -0,0 +1,11 @@
1#ifndef __ACPI_VIDEO_H
2#define __ACPI_VIDEO_H
3
4#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
5extern int acpi_video_register(void);
6#else
7static inline int acpi_video_register(void) { return 0; }
8#endif
9
10#endif
11
diff --git a/include/asm-frv/ftrace.h b/include/asm-frv/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-frv/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index 26cefcde5cee..68e4677fb9e7 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -18,6 +18,7 @@
18#ifdef __KERNEL__ 18#ifdef __KERNEL__
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/highmem.h>
21#include <asm/mem-layout.h> 22#include <asm/mem-layout.h>
22#include <asm/spr-regs.h> 23#include <asm/spr-regs.h>
23#include <asm/mb-regs.h> 24#include <asm/mb-regs.h>
@@ -116,6 +117,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
116 unsigned long paddr; 117 unsigned long paddr;
117 118
118 pagefault_disable(); 119 pagefault_disable();
120 debug_kmap_atomic(type);
119 paddr = page_to_phys(page); 121 paddr = page_to_phys(page);
120 122
121 switch (type) { 123 switch (type) {
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
deleted file mode 100644
index 189486c3f92e..000000000000
--- a/include/asm-generic/dma-mapping.h
+++ /dev/null
@@ -1,308 +0,0 @@
1/* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com
2 *
3 * Implements the generic device dma API via the existing pci_ one
4 * for unconverted architectures
5 */
6
7#ifndef _ASM_GENERIC_DMA_MAPPING_H
8#define _ASM_GENERIC_DMA_MAPPING_H
9
10
11#ifdef CONFIG_PCI
12
13/* we implement the API below in terms of the existing PCI one,
14 * so include it */
15#include <linux/pci.h>
16/* need struct page definitions */
17#include <linux/mm.h>
18
19static inline int
20dma_supported(struct device *dev, u64 mask)
21{
22 BUG_ON(dev->bus != &pci_bus_type);
23
24 return pci_dma_supported(to_pci_dev(dev), mask);
25}
26
27static inline int
28dma_set_mask(struct device *dev, u64 dma_mask)
29{
30 BUG_ON(dev->bus != &pci_bus_type);
31
32 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
33}
34
35static inline void *
36dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
37 gfp_t flag)
38{
39 BUG_ON(dev->bus != &pci_bus_type);
40
41 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
42}
43
44static inline void
45dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
46 dma_addr_t dma_handle)
47{
48 BUG_ON(dev->bus != &pci_bus_type);
49
50 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
51}
52
53static inline dma_addr_t
54dma_map_single(struct device *dev, void *cpu_addr, size_t size,
55 enum dma_data_direction direction)
56{
57 BUG_ON(dev->bus != &pci_bus_type);
58
59 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
60}
61
62static inline void
63dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
64 enum dma_data_direction direction)
65{
66 BUG_ON(dev->bus != &pci_bus_type);
67
68 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
69}
70
71static inline dma_addr_t
72dma_map_page(struct device *dev, struct page *page,
73 unsigned long offset, size_t size,
74 enum dma_data_direction direction)
75{
76 BUG_ON(dev->bus != &pci_bus_type);
77
78 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
79}
80
81static inline void
82dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
83 enum dma_data_direction direction)
84{
85 BUG_ON(dev->bus != &pci_bus_type);
86
87 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
88}
89
90static inline int
91dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
92 enum dma_data_direction direction)
93{
94 BUG_ON(dev->bus != &pci_bus_type);
95
96 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
97}
98
99static inline void
100dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
101 enum dma_data_direction direction)
102{
103 BUG_ON(dev->bus != &pci_bus_type);
104
105 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
106}
107
108static inline void
109dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
110 enum dma_data_direction direction)
111{
112 BUG_ON(dev->bus != &pci_bus_type);
113
114 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
115 size, (int)direction);
116}
117
118static inline void
119dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
120 enum dma_data_direction direction)
121{
122 BUG_ON(dev->bus != &pci_bus_type);
123
124 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
125 size, (int)direction);
126}
127
128static inline void
129dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
130 enum dma_data_direction direction)
131{
132 BUG_ON(dev->bus != &pci_bus_type);
133
134 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
135}
136
137static inline void
138dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
139 enum dma_data_direction direction)
140{
141 BUG_ON(dev->bus != &pci_bus_type);
142
143 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
144}
145
146static inline int
147dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
148{
149 return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
150}
151
152
153#else
154
155static inline int
156dma_supported(struct device *dev, u64 mask)
157{
158 return 0;
159}
160
161static inline int
162dma_set_mask(struct device *dev, u64 dma_mask)
163{
164 BUG();
165 return 0;
166}
167
168static inline void *
169dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
170 gfp_t flag)
171{
172 BUG();
173 return NULL;
174}
175
176static inline void
177dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
178 dma_addr_t dma_handle)
179{
180 BUG();
181}
182
183static inline dma_addr_t
184dma_map_single(struct device *dev, void *cpu_addr, size_t size,
185 enum dma_data_direction direction)
186{
187 BUG();
188 return 0;
189}
190
191static inline void
192dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
193 enum dma_data_direction direction)
194{
195 BUG();
196}
197
198static inline dma_addr_t
199dma_map_page(struct device *dev, struct page *page,
200 unsigned long offset, size_t size,
201 enum dma_data_direction direction)
202{
203 BUG();
204 return 0;
205}
206
207static inline void
208dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
209 enum dma_data_direction direction)
210{
211 BUG();
212}
213
214static inline int
215dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
216 enum dma_data_direction direction)
217{
218 BUG();
219 return 0;
220}
221
222static inline void
223dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
224 enum dma_data_direction direction)
225{
226 BUG();
227}
228
229static inline void
230dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
231 enum dma_data_direction direction)
232{
233 BUG();
234}
235
236static inline void
237dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
238 enum dma_data_direction direction)
239{
240 BUG();
241}
242
243static inline void
244dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
245 enum dma_data_direction direction)
246{
247 BUG();
248}
249
250static inline void
251dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
252 enum dma_data_direction direction)
253{
254 BUG();
255}
256
257static inline int
258dma_error(dma_addr_t dma_addr)
259{
260 return 0;
261}
262
263#endif
264
265/* Now for the API extensions over the pci_ one */
266
267#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
268#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
269#define dma_is_consistent(d, h) (1)
270
271static inline int
272dma_get_cache_alignment(void)
273{
274 /* no easy way to get cache size on all processors, so return
275 * the maximum possible, to be safe */
276 return (1 << INTERNODE_CACHE_SHIFT);
277}
278
279static inline void
280dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
281 unsigned long offset, size_t size,
282 enum dma_data_direction direction)
283{
284 /* just sync everything, that's all the pci API can do */
285 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
286}
287
288static inline void
289dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
290 unsigned long offset, size_t size,
291 enum dma_data_direction direction)
292{
293 /* just sync everything, that's all the pci API can do */
294 dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
295}
296
297static inline void
298dma_cache_sync(struct device *dev, void *vaddr, size_t size,
299 enum dma_data_direction direction)
300{
301 /* could define this in terms of the dma_cache ... operations,
302 * but if you get this on a platform, you should convert the platform
303 * to using the generic device DMA API */
304 BUG();
305}
306
307#endif
308
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index 81797ec9ab29..d6c379dc64fa 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -55,6 +55,10 @@ struct module;
55 * handled is (base + ngpio - 1). 55 * handled is (base + ngpio - 1).
56 * @can_sleep: flag must be set iff get()/set() methods sleep, as they 56 * @can_sleep: flag must be set iff get()/set() methods sleep, as they
57 * must while accessing GPIO expander chips over I2C or SPI 57 * must while accessing GPIO expander chips over I2C or SPI
58 * @names: if set, must be an array of strings to use as alternative
59 * names for the GPIOs in this chip. Any entry in the array
60 * may be NULL if there is no alias for the GPIO, however the
61 * array must be @ngpio entries long.
58 * 62 *
59 * A gpio_chip can help platforms abstract various sources of GPIOs so 63 * A gpio_chip can help platforms abstract various sources of GPIOs so
60 * they can all be accessed through a common programing interface. 64 * they can all be accessed through a common programing interface.
@@ -92,6 +96,7 @@ struct gpio_chip {
92 struct gpio_chip *chip); 96 struct gpio_chip *chip);
93 int base; 97 int base;
94 u16 ngpio; 98 u16 ngpio;
99 char **names;
95 unsigned can_sleep:1; 100 unsigned can_sleep:1;
96 unsigned exported:1; 101 unsigned exported:1;
97}; 102};
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index a654d724d3b0..7fa660fd449c 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -61,6 +61,30 @@
61#define BRANCH_PROFILE() 61#define BRANCH_PROFILE()
62#endif 62#endif
63 63
64#ifdef CONFIG_EVENT_TRACER
65#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
66 *(_ftrace_events) \
67 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
68#else
69#define FTRACE_EVENTS()
70#endif
71
72#ifdef CONFIG_TRACING
73#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
74 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
75 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
76#else
77#define TRACE_PRINTKS()
78#endif
79
80#ifdef CONFIG_FTRACE_SYSCALLS
81#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
82 *(__syscalls_metadata) \
83 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
84#else
85#define TRACE_SYSCALLS()
86#endif
87
64/* .data section */ 88/* .data section */
65#define DATA_DATA \ 89#define DATA_DATA \
66 *(.data) \ 90 *(.data) \
@@ -86,7 +110,10 @@
86 *(__verbose) \ 110 *(__verbose) \
87 VMLINUX_SYMBOL(__stop___verbose) = .; \ 111 VMLINUX_SYMBOL(__stop___verbose) = .; \
88 LIKELY_PROFILE() \ 112 LIKELY_PROFILE() \
89 BRANCH_PROFILE() 113 BRANCH_PROFILE() \
114 TRACE_PRINTKS() \
115 FTRACE_EVENTS() \
116 TRACE_SYSCALLS()
90 117
91#define RO_DATA(align) \ 118#define RO_DATA(align) \
92 . = ALIGN((align)); \ 119 . = ALIGN((align)); \
diff --git a/include/asm-m32r/ftrace.h b/include/asm-m32r/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-m32r/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-m32r/spinlock.h b/include/asm-m32r/spinlock.h
index f5cfba81ee10..dded923883b2 100644
--- a/include/asm-m32r/spinlock.h
+++ b/include/asm-m32r/spinlock.h
@@ -316,6 +316,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
316 return 0; 316 return 0;
317} 317}
318 318
319#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
320#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
321
319#define _raw_spin_relax(lock) cpu_relax() 322#define _raw_spin_relax(lock) cpu_relax()
320#define _raw_read_relax(lock) cpu_relax() 323#define _raw_read_relax(lock) cpu_relax()
321#define _raw_write_relax(lock) cpu_relax() 324#define _raw_write_relax(lock) cpu_relax()
diff --git a/include/asm-mn10300/ftrace.h b/include/asm-mn10300/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-mn10300/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-mn10300/highmem.h b/include/asm-mn10300/highmem.h
index 5256854c0453..90f2abb04bfd 100644
--- a/include/asm-mn10300/highmem.h
+++ b/include/asm-mn10300/highmem.h
@@ -16,6 +16,7 @@
16 16
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/highmem.h>
19#include <asm/kmap_types.h> 20#include <asm/kmap_types.h>
20#include <asm/pgtable.h> 21#include <asm/pgtable.h>
21 22
@@ -77,6 +78,7 @@ static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
77 if (page < highmem_start_page) 78 if (page < highmem_start_page)
78 return page_address(page); 79 return page_address(page);
79 80
81 debug_kmap_atomic(type);
80 idx = type + KM_TYPE_NR * smp_processor_id(); 82 idx = type + KM_TYPE_NR * smp_processor_id();
81 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 83 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
82#if HIGHMEM_DEBUG 84#if HIGHMEM_DEBUG
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index c7d4b2e606a5..ec073d8288d9 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -33,7 +33,6 @@
33#ifndef __DRM_CRTC_HELPER_H__ 33#ifndef __DRM_CRTC_HELPER_H__
34#define __DRM_CRTC_HELPER_H__ 34#define __DRM_CRTC_HELPER_H__
35 35
36#include <linux/i2c.h>
37#include <linux/spinlock.h> 36#include <linux/spinlock.h>
38#include <linux/types.h> 37#include <linux/types.h>
39#include <linux/idr.h> 38#include <linux/idr.h>
@@ -92,7 +91,7 @@ struct drm_connector_helper_funcs {
92extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); 91extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY);
93extern void drm_helper_disable_unused_functions(struct drm_device *dev); 92extern void drm_helper_disable_unused_functions(struct drm_device *dev);
94extern int drm_helper_hotplug_stage_two(struct drm_device *dev); 93extern int drm_helper_hotplug_stage_two(struct drm_device *dev);
95extern bool drm_helper_initial_config(struct drm_device *dev, bool can_grow); 94extern bool drm_helper_initial_config(struct drm_device *dev);
96extern int drm_crtc_helper_set_config(struct drm_mode_set *set); 95extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
97extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, 96extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
98 struct drm_display_mode *mode, 97 struct drm_display_mode *mode,
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 013551d03c03..26641e95e0a4 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -7,12 +7,12 @@
7#include <linux/delay.h> 7#include <linux/delay.h>
8 8
9#ifndef readq 9#ifndef readq
10static u64 readq(void __iomem *reg) 10static inline u64 readq(void __iomem *reg)
11{ 11{
12 return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32); 12 return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
13} 13}
14 14
15static void writeq(u64 val, void __iomem *reg) 15static inline void writeq(u64 val, void __iomem *reg)
16{ 16{
17 writel(val & 0xffffffff, reg); 17 writel(val & 0xffffffff, reg);
18 writel(val >> 32, reg + 0x4UL); 18 writel(val >> 32, reg + 0x4UL);
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index a67b6227d272..ca9b9b9bd331 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -67,6 +67,7 @@ header-y += falloc.h
67header-y += fd.h 67header-y += fd.h
68header-y += fdreg.h 68header-y += fdreg.h
69header-y += fib_rules.h 69header-y += fib_rules.h
70header-y += fiemap.h
70header-y += firewire-cdev.h 71header-y += firewire-cdev.h
71header-y += firewire-constants.h 72header-y += firewire-constants.h
72header-y += fuse.h 73header-y += fuse.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 78199151c00b..6586cbd0d4af 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -97,6 +97,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
97/* the following four functions are architecture-dependent */ 97/* the following four functions are architecture-dependent */
98void acpi_numa_slit_init (struct acpi_table_slit *slit); 98void acpi_numa_slit_init (struct acpi_table_slit *slit);
99void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); 99void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
100void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
100void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); 101void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
101void acpi_numa_arch_fixup(void); 102void acpi_numa_arch_fixup(void);
102 103
@@ -257,6 +258,40 @@ void __init acpi_no_s4_hw_signature(void);
257void __init acpi_old_suspend_ordering(void); 258void __init acpi_old_suspend_ordering(void);
258void __init acpi_s4_no_nvs(void); 259void __init acpi_s4_no_nvs(void);
259#endif /* CONFIG_PM_SLEEP */ 260#endif /* CONFIG_PM_SLEEP */
261
262#define OSC_QUERY_TYPE 0
263#define OSC_SUPPORT_TYPE 1
264#define OSC_CONTROL_TYPE 2
265#define OSC_SUPPORT_MASKS 0x1f
266
267/* _OSC DW0 Definition */
268#define OSC_QUERY_ENABLE 1
269#define OSC_REQUEST_ERROR 2
270#define OSC_INVALID_UUID_ERROR 4
271#define OSC_INVALID_REVISION_ERROR 8
272#define OSC_CAPABILITIES_MASK_ERROR 16
273
274/* _OSC DW1 Definition (OS Support Fields) */
275#define OSC_EXT_PCI_CONFIG_SUPPORT 1
276#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
277#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
278#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
279#define OSC_MSI_SUPPORT 16
280
281/* _OSC DW1 Definition (OS Control Fields) */
282#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
283#define OSC_SHPC_NATIVE_HP_CONTROL 2
284#define OSC_PCI_EXPRESS_PME_CONTROL 4
285#define OSC_PCI_EXPRESS_AER_CONTROL 8
286#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
287
288#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
289 OSC_SHPC_NATIVE_HP_CONTROL | \
290 OSC_PCI_EXPRESS_PME_CONTROL | \
291 OSC_PCI_EXPRESS_AER_CONTROL | \
292 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
293
294extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags);
260#else /* CONFIG_ACPI */ 295#else /* CONFIG_ACPI */
261 296
262static inline int early_acpi_boot_init(void) 297static inline int early_acpi_boot_init(void)
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 45f6297821bd..5fc2ef8d97fa 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -21,6 +21,15 @@
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23 23
24/* on architectures without dma-mapping capabilities we need to ensure
25 * that the asynchronous path compiles away
26 */
27#ifdef CONFIG_HAS_DMA
28#define __async_inline
29#else
30#define __async_inline __always_inline
31#endif
32
24/** 33/**
25 * dma_chan_ref - object used to manage dma channels received from the 34 * dma_chan_ref - object used to manage dma channels received from the
26 * dmaengine core. 35 * dmaengine core.
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h
index 91a773993a5c..850f39b33e74 100644
--- a/include/linux/auto_dev-ioctl.h
+++ b/include/linux/auto_dev-ioctl.h
@@ -10,8 +10,13 @@
10#ifndef _LINUX_AUTO_DEV_IOCTL_H 10#ifndef _LINUX_AUTO_DEV_IOCTL_H
11#define _LINUX_AUTO_DEV_IOCTL_H 11#define _LINUX_AUTO_DEV_IOCTL_H
12 12
13#include <linux/auto_fs.h>
14
15#ifdef __KERNEL__
13#include <linux/string.h> 16#include <linux/string.h>
14#include <linux/types.h> 17#else
18#include <string.h>
19#endif /* __KERNEL__ */
15 20
16#define AUTOFS_DEVICE_NAME "autofs" 21#define AUTOFS_DEVICE_NAME "autofs"
17 22
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index c21e5972a3e8..63265852b7d1 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -17,11 +17,13 @@
17#ifdef __KERNEL__ 17#ifdef __KERNEL__
18#include <linux/fs.h> 18#include <linux/fs.h>
19#include <linux/limits.h> 19#include <linux/limits.h>
20#include <linux/types.h>
21#include <linux/ioctl.h>
22#else
20#include <asm/types.h> 23#include <asm/types.h>
24#include <sys/ioctl.h>
21#endif /* __KERNEL__ */ 25#endif /* __KERNEL__ */
22 26
23#include <linux/ioctl.h>
24
25/* This file describes autofs v3 */ 27/* This file describes autofs v3 */
26#define AUTOFS_PROTO_VERSION 3 28#define AUTOFS_PROTO_VERSION 3
27 29
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index bee52abb8a4d..0ec2c594868e 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -24,8 +24,8 @@ struct dentry;
24 */ 24 */
25enum bdi_state { 25enum bdi_state {
26 BDI_pdflush, /* A pdflush thread is working this device */ 26 BDI_pdflush, /* A pdflush thread is working this device */
27 BDI_write_congested, /* The write queue is getting full */ 27 BDI_async_congested, /* The async (write) queue is getting full */
28 BDI_read_congested, /* The read queue is getting full */ 28 BDI_sync_congested, /* The sync queue is getting full */
29 BDI_unused, /* Available bits start here */ 29 BDI_unused, /* Available bits start here */
30}; 30};
31 31
@@ -215,18 +215,18 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
215 215
216static inline int bdi_read_congested(struct backing_dev_info *bdi) 216static inline int bdi_read_congested(struct backing_dev_info *bdi)
217{ 217{
218 return bdi_congested(bdi, 1 << BDI_read_congested); 218 return bdi_congested(bdi, 1 << BDI_sync_congested);
219} 219}
220 220
221static inline int bdi_write_congested(struct backing_dev_info *bdi) 221static inline int bdi_write_congested(struct backing_dev_info *bdi)
222{ 222{
223 return bdi_congested(bdi, 1 << BDI_write_congested); 223 return bdi_congested(bdi, 1 << BDI_async_congested);
224} 224}
225 225
226static inline int bdi_rw_congested(struct backing_dev_info *bdi) 226static inline int bdi_rw_congested(struct backing_dev_info *bdi)
227{ 227{
228 return bdi_congested(bdi, (1 << BDI_read_congested)| 228 return bdi_congested(bdi, (1 << BDI_sync_congested) |
229 (1 << BDI_write_congested)); 229 (1 << BDI_async_congested));
230} 230}
231 231
232void clear_bdi_congested(struct backing_dev_info *bdi, int rw); 232void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 77b4a9e46004..6638b8148de7 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -35,8 +35,7 @@ struct linux_binprm{
35#endif 35#endif
36 struct mm_struct *mm; 36 struct mm_struct *mm;
37 unsigned long p; /* current top of mem */ 37 unsigned long p; /* current top of mem */
38 unsigned int sh_bang:1, 38 unsigned int
39 misc_bang:1,
40 cred_prepared:1,/* true if creds already prepared (multiple 39 cred_prepared:1,/* true if creds already prepared (multiple
41 * preps happen for interpreters) */ 40 * preps happen for interpreters) */
42 cap_effective:1;/* true if has elevated effective capabilities, 41 cap_effective:1;/* true if has elevated effective capabilities,
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b05b1d4d17d2..b900d2c67d29 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -145,20 +145,21 @@ struct bio {
145 * bit 2 -- barrier 145 * bit 2 -- barrier
146 * Insert a serialization point in the IO queue, forcing previously 146 * Insert a serialization point in the IO queue, forcing previously
147 * submitted IO to be completed before this one is issued. 147 * submitted IO to be completed before this one is issued.
148 * bit 3 -- synchronous I/O hint: the block layer will unplug immediately 148 * bit 3 -- synchronous I/O hint.
149 * Note that this does NOT indicate that the IO itself is sync, just 149 * bit 4 -- Unplug the device immediately after submitting this bio.
150 * that the block layer will not postpone issue of this IO by plugging. 150 * bit 5 -- metadata request
151 * bit 4 -- metadata request
152 * Used for tracing to differentiate metadata and data IO. May also 151 * Used for tracing to differentiate metadata and data IO. May also
153 * get some preferential treatment in the IO scheduler 152 * get some preferential treatment in the IO scheduler
154 * bit 5 -- discard sectors 153 * bit 6 -- discard sectors
155 * Informs the lower level device that this range of sectors is no longer 154 * Informs the lower level device that this range of sectors is no longer
156 * used by the file system and may thus be freed by the device. Used 155 * used by the file system and may thus be freed by the device. Used
157 * for flash based storage. 156 * for flash based storage.
158 * bit 6 -- fail fast device errors 157 * bit 7 -- fail fast device errors
159 * bit 7 -- fail fast transport errors 158 * bit 8 -- fail fast transport errors
160 * bit 8 -- fail fast driver errors 159 * bit 9 -- fail fast driver errors
161 * Don't want driver retries for any fast fail whatever the reason. 160 * Don't want driver retries for any fast fail whatever the reason.
161 * bit 10 -- Tell the IO scheduler not to wait for more requests after this
162 one has been submitted, even if it is a SYNC request.
162 */ 163 */
163#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ 164#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
164#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ 165#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
@@ -170,6 +171,7 @@ struct bio {
170#define BIO_RW_FAILFAST_DEV 7 171#define BIO_RW_FAILFAST_DEV 7
171#define BIO_RW_FAILFAST_TRANSPORT 8 172#define BIO_RW_FAILFAST_TRANSPORT 8
172#define BIO_RW_FAILFAST_DRIVER 9 173#define BIO_RW_FAILFAST_DRIVER 9
174#define BIO_RW_NOIDLE 10
173 175
174#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) 176#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag)))
175 177
@@ -188,6 +190,7 @@ struct bio {
188#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) 190#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
189#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) 191#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
190#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) 192#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
193#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE)
191 194
192/* 195/*
193 * upper 16 bits of bi_rw define the io priority of this bio 196 * upper 16 bits of bi_rw define the io priority of this bio
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 465d6babc847..e03660964e02 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -38,6 +38,10 @@ struct request;
38typedef void (rq_end_io_fn)(struct request *, int); 38typedef void (rq_end_io_fn)(struct request *, int);
39 39
40struct request_list { 40struct request_list {
41 /*
42 * count[], starved[], and wait[] are indexed by
43 * BLK_RW_SYNC/BLK_RW_ASYNC
44 */
41 int count[2]; 45 int count[2];
42 int starved[2]; 46 int starved[2];
43 int elvpriv; 47 int elvpriv;
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits {
66 REQ_TYPE_ATA_PC, 70 REQ_TYPE_ATA_PC,
67}; 71};
68 72
73enum {
74 BLK_RW_ASYNC = 0,
75 BLK_RW_SYNC = 1,
76};
77
69/* 78/*
70 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 79 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
71 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 80 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -103,12 +112,13 @@ enum rq_flag_bits {
103 __REQ_QUIET, /* don't worry about errors */ 112 __REQ_QUIET, /* don't worry about errors */
104 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 113 __REQ_PREEMPT, /* set for "ide_preempt" requests */
105 __REQ_ORDERED_COLOR, /* is before or after barrier */ 114 __REQ_ORDERED_COLOR, /* is before or after barrier */
106 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 115 __REQ_RW_SYNC, /* request is sync (sync write or read) */
107 __REQ_ALLOCED, /* request came from our alloc pool */ 116 __REQ_ALLOCED, /* request came from our alloc pool */
108 __REQ_RW_META, /* metadata io request */ 117 __REQ_RW_META, /* metadata io request */
109 __REQ_COPY_USER, /* contains copies of user pages */ 118 __REQ_COPY_USER, /* contains copies of user pages */
110 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 119 __REQ_INTEGRITY, /* integrity metadata has been remapped */
111 __REQ_UNPLUG, /* unplug queue on submission */ 120 __REQ_UNPLUG, /* unplug queue on submission */
121 __REQ_NOIDLE, /* Don't anticipate more IO after this one */
112 __REQ_NR_BITS, /* stops here */ 122 __REQ_NR_BITS, /* stops here */
113}; 123};
114 124
@@ -136,6 +146,7 @@ enum rq_flag_bits {
136#define REQ_COPY_USER (1 << __REQ_COPY_USER) 146#define REQ_COPY_USER (1 << __REQ_COPY_USER)
137#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 147#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
138#define REQ_UNPLUG (1 << __REQ_UNPLUG) 148#define REQ_UNPLUG (1 << __REQ_UNPLUG)
149#define REQ_NOIDLE (1 << __REQ_NOIDLE)
139 150
140#define BLK_MAX_CDB 16 151#define BLK_MAX_CDB 16
141 152
@@ -438,8 +449,8 @@ struct request_queue
438#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 449#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
439#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 450#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
440#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 451#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
441#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 452#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
442#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 453#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
443#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 454#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
444#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 455#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
445#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 456#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
@@ -611,32 +622,42 @@ enum {
611#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 622#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
612 623
613/* 624/*
614 * We regard a request as sync, if it's a READ or a SYNC write. 625 * We regard a request as sync, if either a read or a sync write
615 */ 626 */
616#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 627static inline bool rw_is_sync(unsigned int rw_flags)
628{
629 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
630}
631
632static inline bool rq_is_sync(struct request *rq)
633{
634 return rw_is_sync(rq->cmd_flags);
635}
636
617#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 637#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
638#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
618 639
619static inline int blk_queue_full(struct request_queue *q, int rw) 640static inline int blk_queue_full(struct request_queue *q, int sync)
620{ 641{
621 if (rw == READ) 642 if (sync)
622 return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 643 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
623 return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 644 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
624} 645}
625 646
626static inline void blk_set_queue_full(struct request_queue *q, int rw) 647static inline void blk_set_queue_full(struct request_queue *q, int sync)
627{ 648{
628 if (rw == READ) 649 if (sync)
629 queue_flag_set(QUEUE_FLAG_READFULL, q); 650 queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
630 else 651 else
631 queue_flag_set(QUEUE_FLAG_WRITEFULL, q); 652 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
632} 653}
633 654
634static inline void blk_clear_queue_full(struct request_queue *q, int rw) 655static inline void blk_clear_queue_full(struct request_queue *q, int sync)
635{ 656{
636 if (rw == READ) 657 if (sync)
637 queue_flag_clear(QUEUE_FLAG_READFULL, q); 658 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
638 else 659 else
639 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); 660 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
640} 661}
641 662
642 663
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 6e915878e88c..d960889e92ef 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -144,6 +144,9 @@ struct blk_user_trace_setup {
144 144
145#ifdef __KERNEL__ 145#ifdef __KERNEL__
146#if defined(CONFIG_BLK_DEV_IO_TRACE) 146#if defined(CONFIG_BLK_DEV_IO_TRACE)
147
148#include <linux/sysfs.h>
149
147struct blk_trace { 150struct blk_trace {
148 int trace_state; 151 int trace_state;
149 struct rchan *rchan; 152 struct rchan *rchan;
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
194extern int blk_trace_startstop(struct request_queue *q, int start); 197extern int blk_trace_startstop(struct request_queue *q, int start);
195extern int blk_trace_remove(struct request_queue *q); 198extern int blk_trace_remove(struct request_queue *q);
196 199
200extern struct attribute_group blk_trace_attr_group;
201
197#else /* !CONFIG_BLK_DEV_IO_TRACE */ 202#else /* !CONFIG_BLK_DEV_IO_TRACE */
198#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 203#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
199#define blk_trace_shutdown(q) do { } while (0) 204#define blk_trace_shutdown(q) do { } while (0)
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 455d83219fae..bc3ab7073695 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -146,10 +146,10 @@ extern void *alloc_large_system_hash(const char *tablename,
146 146
147#define HASH_EARLY 0x00000001 /* Allocating during early boot? */ 147#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
148 148
149/* Only NUMA needs hash distribution. 149/* Only NUMA needs hash distribution. 64bit NUMA architectures have
150 * IA64 and x86_64 have sufficient vmalloc space. 150 * sufficient vmalloc space.
151 */ 151 */
152#if defined(CONFIG_NUMA) && (defined(CONFIG_IA64) || defined(CONFIG_X86_64)) 152#if defined(CONFIG_NUMA) && defined(CONFIG_64BIT)
153#define HASHDIST_DEFAULT 1 153#define HASHDIST_DEFAULT 1
154#else 154#else
155#define HASHDIST_DEFAULT 0 155#define HASHDIST_DEFAULT 0
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index f19fd9045ea0..7b73bb8f1970 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -216,7 +216,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
216 get_block_t *, loff_t *); 216 get_block_t *, loff_t *);
217int generic_cont_expand_simple(struct inode *inode, loff_t size); 217int generic_cont_expand_simple(struct inode *inode, loff_t size);
218int block_commit_write(struct page *page, unsigned from, unsigned to); 218int block_commit_write(struct page *page, unsigned from, unsigned to);
219int block_page_mkwrite(struct vm_area_struct *vma, struct page *page, 219int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
220 get_block_t get_block); 220 get_block_t get_block);
221void block_sync_page(struct page *); 221void block_sync_page(struct page *);
222sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); 222sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
@@ -332,22 +332,10 @@ extern int __set_page_dirty_buffers(struct page *page);
332 332
333static inline void buffer_init(void) {} 333static inline void buffer_init(void) {}
334static inline int try_to_free_buffers(struct page *page) { return 1; } 334static inline int try_to_free_buffers(struct page *page) { return 1; }
335static inline int sync_blockdev(struct block_device *bdev) { return 0; }
336static inline int inode_has_buffers(struct inode *inode) { return 0; } 335static inline int inode_has_buffers(struct inode *inode) { return 0; }
337static inline void invalidate_inode_buffers(struct inode *inode) {} 336static inline void invalidate_inode_buffers(struct inode *inode) {}
338static inline int remove_inode_buffers(struct inode *inode) { return 1; } 337static inline int remove_inode_buffers(struct inode *inode) { return 1; }
339static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } 338static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
340static inline void invalidate_bdev(struct block_device *bdev) {}
341
342static inline struct super_block *freeze_bdev(struct block_device *sb)
343{
344 return NULL;
345}
346
347static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
348{
349 return 0;
350}
351 339
352#endif /* CONFIG_BLOCK */ 340#endif /* CONFIG_BLOCK */
353#endif /* _LINUX_BUFFER_HEAD_H */ 341#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 499900d0cee7..665fa70e4094 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -15,6 +15,7 @@
15#include <linux/cgroupstats.h> 15#include <linux/cgroupstats.h>
16#include <linux/prio_heap.h> 16#include <linux/prio_heap.h>
17#include <linux/rwsem.h> 17#include <linux/rwsem.h>
18#include <linux/idr.h>
18 19
19#ifdef CONFIG_CGROUPS 20#ifdef CONFIG_CGROUPS
20 21
@@ -22,6 +23,7 @@ struct cgroupfs_root;
22struct cgroup_subsys; 23struct cgroup_subsys;
23struct inode; 24struct inode;
24struct cgroup; 25struct cgroup;
26struct css_id;
25 27
26extern int cgroup_init_early(void); 28extern int cgroup_init_early(void);
27extern int cgroup_init(void); 29extern int cgroup_init(void);
@@ -47,18 +49,24 @@ enum cgroup_subsys_id {
47 49
48/* Per-subsystem/per-cgroup state maintained by the system. */ 50/* Per-subsystem/per-cgroup state maintained by the system. */
49struct cgroup_subsys_state { 51struct cgroup_subsys_state {
50 /* The cgroup that this subsystem is attached to. Useful 52 /*
53 * The cgroup that this subsystem is attached to. Useful
51 * for subsystems that want to know about the cgroup 54 * for subsystems that want to know about the cgroup
52 * hierarchy structure */ 55 * hierarchy structure
56 */
53 struct cgroup *cgroup; 57 struct cgroup *cgroup;
54 58
55 /* State maintained by the cgroup system to allow subsystems 59 /*
60 * State maintained by the cgroup system to allow subsystems
56 * to be "busy". Should be accessed via css_get(), 61 * to be "busy". Should be accessed via css_get(),
57 * css_tryget() and and css_put(). */ 62 * css_tryget() and and css_put().
63 */
58 64
59 atomic_t refcnt; 65 atomic_t refcnt;
60 66
61 unsigned long flags; 67 unsigned long flags;
68 /* ID for this css, if possible */
69 struct css_id *id;
62}; 70};
63 71
64/* bits in struct cgroup_subsys_state flags field */ 72/* bits in struct cgroup_subsys_state flags field */
@@ -120,19 +128,26 @@ static inline void css_put(struct cgroup_subsys_state *css)
120enum { 128enum {
121 /* Control Group is dead */ 129 /* Control Group is dead */
122 CGRP_REMOVED, 130 CGRP_REMOVED,
123 /* Control Group has previously had a child cgroup or a task, 131 /*
124 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ 132 * Control Group has previously had a child cgroup or a task,
133 * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set)
134 */
125 CGRP_RELEASABLE, 135 CGRP_RELEASABLE,
126 /* Control Group requires release notifications to userspace */ 136 /* Control Group requires release notifications to userspace */
127 CGRP_NOTIFY_ON_RELEASE, 137 CGRP_NOTIFY_ON_RELEASE,
138 /*
139 * A thread in rmdir() is wating for this cgroup.
140 */
141 CGRP_WAIT_ON_RMDIR,
128}; 142};
129 143
130struct cgroup { 144struct cgroup {
131 unsigned long flags; /* "unsigned long" so bitops work */ 145 unsigned long flags; /* "unsigned long" so bitops work */
132 146
133 /* count users of this cgroup. >0 means busy, but doesn't 147 /*
134 * necessarily indicate the number of tasks in the 148 * count users of this cgroup. >0 means busy, but doesn't
135 * cgroup */ 149 * necessarily indicate the number of tasks in the cgroup
150 */
136 atomic_t count; 151 atomic_t count;
137 152
138 /* 153 /*
@@ -142,7 +157,7 @@ struct cgroup {
142 struct list_head sibling; /* my parent's children */ 157 struct list_head sibling; /* my parent's children */
143 struct list_head children; /* my children */ 158 struct list_head children; /* my children */
144 159
145 struct cgroup *parent; /* my parent */ 160 struct cgroup *parent; /* my parent */
146 struct dentry *dentry; /* cgroup fs entry, RCU protected */ 161 struct dentry *dentry; /* cgroup fs entry, RCU protected */
147 162
148 /* Private pointers for each registered subsystem */ 163 /* Private pointers for each registered subsystem */
@@ -177,11 +192,12 @@ struct cgroup {
177 struct rcu_head rcu_head; 192 struct rcu_head rcu_head;
178}; 193};
179 194
180/* A css_set is a structure holding pointers to a set of 195/*
196 * A css_set is a structure holding pointers to a set of
181 * cgroup_subsys_state objects. This saves space in the task struct 197 * cgroup_subsys_state objects. This saves space in the task struct
182 * object and speeds up fork()/exit(), since a single inc/dec and a 198 * object and speeds up fork()/exit(), since a single inc/dec and a
183 * list_add()/del() can bump the reference count on the entire 199 * list_add()/del() can bump the reference count on the entire cgroup
184 * cgroup set for a task. 200 * set for a task.
185 */ 201 */
186 202
187struct css_set { 203struct css_set {
@@ -226,13 +242,8 @@ struct cgroup_map_cb {
226 void *state; 242 void *state;
227}; 243};
228 244
229/* struct cftype: 245/*
230 * 246 * struct cftype: handler definitions for cgroup control files
231 * The files in the cgroup filesystem mostly have a very simple read/write
232 * handling, some common function will take care of it. Nevertheless some cases
233 * (read tasks) are special and therefore I define this structure for every
234 * kind of file.
235 *
236 * 247 *
237 * When reading/writing to a file: 248 * When reading/writing to a file:
238 * - the cgroup to use is file->f_dentry->d_parent->d_fsdata 249 * - the cgroup to use is file->f_dentry->d_parent->d_fsdata
@@ -241,10 +252,17 @@ struct cgroup_map_cb {
241 252
242#define MAX_CFTYPE_NAME 64 253#define MAX_CFTYPE_NAME 64
243struct cftype { 254struct cftype {
244 /* By convention, the name should begin with the name of the 255 /*
245 * subsystem, followed by a period */ 256 * By convention, the name should begin with the name of the
257 * subsystem, followed by a period
258 */
246 char name[MAX_CFTYPE_NAME]; 259 char name[MAX_CFTYPE_NAME];
247 int private; 260 int private;
261 /*
262 * If not 0, file mode is set to this value, otherwise it will
263 * be figured out automatically
264 */
265 mode_t mode;
248 266
249 /* 267 /*
250 * If non-zero, defines the maximum length of string that can 268 * If non-zero, defines the maximum length of string that can
@@ -319,15 +337,20 @@ struct cgroup_scanner {
319 void (*process_task)(struct task_struct *p, 337 void (*process_task)(struct task_struct *p,
320 struct cgroup_scanner *scan); 338 struct cgroup_scanner *scan);
321 struct ptr_heap *heap; 339 struct ptr_heap *heap;
340 void *data;
322}; 341};
323 342
324/* Add a new file to the given cgroup directory. Should only be 343/*
325 * called by subsystems from within a populate() method */ 344 * Add a new file to the given cgroup directory. Should only be
345 * called by subsystems from within a populate() method
346 */
326int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, 347int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
327 const struct cftype *cft); 348 const struct cftype *cft);
328 349
329/* Add a set of new files to the given cgroup directory. Should 350/*
330 * only be called by subsystems from within a populate() method */ 351 * Add a set of new files to the given cgroup directory. Should
352 * only be called by subsystems from within a populate() method
353 */
331int cgroup_add_files(struct cgroup *cgrp, 354int cgroup_add_files(struct cgroup *cgrp,
332 struct cgroup_subsys *subsys, 355 struct cgroup_subsys *subsys,
333 const struct cftype cft[], 356 const struct cftype cft[],
@@ -339,15 +362,18 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
339 362
340int cgroup_task_count(const struct cgroup *cgrp); 363int cgroup_task_count(const struct cgroup *cgrp);
341 364
342/* Return true if the cgroup is a descendant of the current cgroup */ 365/* Return true if cgrp is a descendant of the task's cgroup */
343int cgroup_is_descendant(const struct cgroup *cgrp); 366int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
344 367
345/* Control Group subsystem type. See Documentation/cgroups.txt for details */ 368/*
369 * Control Group subsystem type.
370 * See Documentation/cgroups/cgroups.txt for details
371 */
346 372
347struct cgroup_subsys { 373struct cgroup_subsys {
348 struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, 374 struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
349 struct cgroup *cgrp); 375 struct cgroup *cgrp);
350 void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 376 int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
351 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); 377 void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
352 int (*can_attach)(struct cgroup_subsys *ss, 378 int (*can_attach)(struct cgroup_subsys *ss,
353 struct cgroup *cgrp, struct task_struct *tsk); 379 struct cgroup *cgrp, struct task_struct *tsk);
@@ -364,6 +390,11 @@ struct cgroup_subsys {
364 int active; 390 int active;
365 int disabled; 391 int disabled;
366 int early_init; 392 int early_init;
393 /*
394 * True if this subsys uses ID. ID is not available before cgroup_init()
395 * (not available in early_init time.)
396 */
397 bool use_id;
367#define MAX_CGROUP_TYPE_NAMELEN 32 398#define MAX_CGROUP_TYPE_NAMELEN 32
368 const char *name; 399 const char *name;
369 400
@@ -386,6 +417,9 @@ struct cgroup_subsys {
386 */ 417 */
387 struct cgroupfs_root *root; 418 struct cgroupfs_root *root;
388 struct list_head sibling; 419 struct list_head sibling;
420 /* used when use_id == true */
421 struct idr idr;
422 spinlock_t id_lock;
389}; 423};
390 424
391#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; 425#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys;
@@ -419,7 +453,8 @@ struct cgroup_iter {
419 struct list_head *task; 453 struct list_head *task;
420}; 454};
421 455
422/* To iterate across the tasks in a cgroup: 456/*
457 * To iterate across the tasks in a cgroup:
423 * 458 *
424 * 1) call cgroup_iter_start to intialize an iterator 459 * 1) call cgroup_iter_start to intialize an iterator
425 * 460 *
@@ -428,9 +463,10 @@ struct cgroup_iter {
428 * 463 *
429 * 3) call cgroup_iter_end() to destroy the iterator. 464 * 3) call cgroup_iter_end() to destroy the iterator.
430 * 465 *
431 * Or, call cgroup_scan_tasks() to iterate through every task in a cpuset. 466 * Or, call cgroup_scan_tasks() to iterate through every task in a
432 * - cgroup_scan_tasks() holds the css_set_lock when calling the test_task() 467 * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
433 * callback, but not while calling the process_task() callback. 468 * the test_task() callback, but not while calling the process_task()
469 * callback.
434 */ 470 */
435void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); 471void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
436struct task_struct *cgroup_iter_next(struct cgroup *cgrp, 472struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
@@ -439,6 +475,44 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
439int cgroup_scan_tasks(struct cgroup_scanner *scan); 475int cgroup_scan_tasks(struct cgroup_scanner *scan);
440int cgroup_attach_task(struct cgroup *, struct task_struct *); 476int cgroup_attach_task(struct cgroup *, struct task_struct *);
441 477
478/*
479 * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works
480 * if cgroup_subsys.use_id == true. It can be used for looking up and scanning.
481 * CSS ID is assigned at cgroup allocation (create) automatically
482 * and removed when subsys calls free_css_id() function. This is because
483 * the lifetime of cgroup_subsys_state is subsys's matter.
484 *
485 * Looking up and scanning function should be called under rcu_read_lock().
486 * Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls.
487 * But the css returned by this routine can be "not populated yet" or "being
488 * destroyed". The caller should check css and cgroup's status.
489 */
490
491/*
492 * Typically Called at ->destroy(), or somewhere the subsys frees
493 * cgroup_subsys_state.
494 */
495void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css);
496
497/* Find a cgroup_subsys_state which has given ID */
498
499struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id);
500
501/*
502 * Get a cgroup whose id is greater than or equal to id under tree of root.
503 * Returning a cgroup_subsys_state or NULL.
504 */
505struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id,
506 struct cgroup_subsys_state *root, int *foundid);
507
508/* Returns true if root is ancestor of cg */
509bool css_is_ancestor(struct cgroup_subsys_state *cg,
510 const struct cgroup_subsys_state *root);
511
512/* Get id and depth of css */
513unsigned short css_id(struct cgroup_subsys_state *css);
514unsigned short css_depth(struct cgroup_subsys_state *css);
515
442#else /* !CONFIG_CGROUPS */ 516#else /* !CONFIG_CGROUPS */
443 517
444static inline int cgroup_init_early(void) { return 0; } 518static inline int cgroup_init_early(void) { return 0; }
diff --git a/include/linux/compat.h b/include/linux/compat.h
index b880864672de..f2ded21f9a3c 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -191,6 +191,12 @@ asmlinkage ssize_t compat_sys_readv(unsigned long fd,
191 const struct compat_iovec __user *vec, unsigned long vlen); 191 const struct compat_iovec __user *vec, unsigned long vlen);
192asmlinkage ssize_t compat_sys_writev(unsigned long fd, 192asmlinkage ssize_t compat_sys_writev(unsigned long fd,
193 const struct compat_iovec __user *vec, unsigned long vlen); 193 const struct compat_iovec __user *vec, unsigned long vlen);
194asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
195 const struct compat_iovec __user *vec,
196 unsigned long vlen, u32 pos_low, u32 pos_high);
197asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
198 const struct compat_iovec __user *vec,
199 unsigned long vlen, u32 pos_low, u32 pos_high);
194 200
195int compat_do_execve(char * filename, compat_uptr_t __user *argv, 201int compat_do_execve(char * filename, compat_uptr_t __user *argv,
196 compat_uptr_t __user *envp, struct pt_regs * regs); 202 compat_uptr_t __user *envp, struct pt_regs * regs);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d95da1020f1c..6faa7e549de4 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -68,6 +68,7 @@ struct ftrace_branch_data {
68 unsigned long miss; 68 unsigned long miss;
69 unsigned long hit; 69 unsigned long hit;
70 }; 70 };
71 unsigned long miss_hit[2];
71 }; 72 };
72}; 73};
73 74
@@ -125,10 +126,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
125 .line = __LINE__, \ 126 .line = __LINE__, \
126 }; \ 127 }; \
127 ______r = !!(cond); \ 128 ______r = !!(cond); \
128 if (______r) \ 129 ______f.miss_hit[______r]++; \
129 ______f.hit++; \
130 else \
131 ______f.miss++; \
132 ______r; \ 130 ______r; \
133 })) 131 }))
134#endif /* CONFIG_PROFILE_ALL_BRANCHES */ 132#endif /* CONFIG_PROFILE_ALL_BRANCHES */
diff --git a/include/linux/connector.h b/include/linux/connector.h
index fc65d219d88c..b9966e64604e 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -39,8 +39,10 @@
39#define CN_IDX_V86D 0x4 39#define CN_IDX_V86D 0x4
40#define CN_VAL_V86D_UVESAFB 0x1 40#define CN_VAL_V86D_UVESAFB 0x1
41#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ 41#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */
42#define CN_DST_IDX 0x6
43#define CN_DST_VAL 0x1
42 44
43#define CN_NETLINK_USERS 6 45#define CN_NETLINK_USERS 7
44 46
45/* 47/*
46 * Maximum connector's message size. 48 * Maximum connector's message size.
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index c2747ac2ae43..2643d848df90 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -23,7 +23,6 @@
23#include <linux/node.h> 23#include <linux/node.h>
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25#include <linux/cpumask.h> 25#include <linux/cpumask.h>
26#include <linux/mutex.h>
27 26
28struct cpu { 27struct cpu {
29 int node_id; /* The node which contains the CPU */ 28 int node_id; /* The node which contains the CPU */
@@ -103,16 +102,6 @@ extern struct sysdev_class cpu_sysdev_class;
103#ifdef CONFIG_HOTPLUG_CPU 102#ifdef CONFIG_HOTPLUG_CPU
104/* Stop CPUs going up and down. */ 103/* Stop CPUs going up and down. */
105 104
106static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
107{
108 mutex_lock(cpu_hp_mutex);
109}
110
111static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
112{
113 mutex_unlock(cpu_hp_mutex);
114}
115
116extern void get_online_cpus(void); 105extern void get_online_cpus(void);
117extern void put_online_cpus(void); 106extern void put_online_cpus(void);
118#define hotcpu_notifier(fn, pri) { \ 107#define hotcpu_notifier(fn, pri) { \
@@ -126,11 +115,6 @@ int cpu_down(unsigned int cpu);
126 115
127#else /* CONFIG_HOTPLUG_CPU */ 116#else /* CONFIG_HOTPLUG_CPU */
128 117
129static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
130{ }
131static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
132{ }
133
134#define get_online_cpus() do { } while (0) 118#define get_online_cpus() do { } while (0)
135#define put_online_cpus() do { } while (0) 119#define put_online_cpus() do { } while (0)
136#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) 120#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 2e0d79678deb..05ea1dd7d681 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -12,6 +12,7 @@
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/nodemask.h> 13#include <linux/nodemask.h>
14#include <linux/cgroup.h> 14#include <linux/cgroup.h>
15#include <linux/mm.h>
15 16
16#ifdef CONFIG_CPUSETS 17#ifdef CONFIG_CPUSETS
17 18
@@ -29,19 +30,29 @@ void cpuset_init_current_mems_allowed(void);
29void cpuset_update_task_memory_state(void); 30void cpuset_update_task_memory_state(void);
30int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 31int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
31 32
32extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); 33extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
33extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); 34extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
34 35
35static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) 36static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
36{ 37{
37 return number_of_cpusets <= 1 || 38 return number_of_cpusets <= 1 ||
38 __cpuset_zone_allowed_softwall(z, gfp_mask); 39 __cpuset_node_allowed_softwall(node, gfp_mask);
39} 40}
40 41
41static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) 42static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
42{ 43{
43 return number_of_cpusets <= 1 || 44 return number_of_cpusets <= 1 ||
44 __cpuset_zone_allowed_hardwall(z, gfp_mask); 45 __cpuset_node_allowed_hardwall(node, gfp_mask);
46}
47
48static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
49{
50 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
51}
52
53static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
54{
55 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
45} 56}
46 57
47extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 58extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -112,6 +123,16 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
112 return 1; 123 return 1;
113} 124}
114 125
126static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
127{
128 return 1;
129}
130
131static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
132{
133 return 1;
134}
135
115static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) 136static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
116{ 137{
117 return 1; 138 return 1;
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index af0e01d4c663..eb5c2ba2f81a 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode,
71struct dentry *debugfs_create_blob(const char *name, mode_t mode, 71struct dentry *debugfs_create_blob(const char *name, mode_t mode,
72 struct dentry *parent, 72 struct dentry *parent,
73 struct debugfs_blob_wrapper *blob); 73 struct debugfs_blob_wrapper *blob);
74
75bool debugfs_initialized(void);
76
74#else 77#else
75 78
76#include <linux/err.h> 79#include <linux/err.h>
@@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
183 return ERR_PTR(-ENODEV); 186 return ERR_PTR(-ENODEV);
184} 187}
185 188
189static inline bool debugfs_initialized(void)
190{
191 return false;
192}
193
186#endif 194#endif
187 195
188#endif 196#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 8209e08969f9..66ec05a57955 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -139,6 +139,9 @@ struct target_type {
139 dm_ioctl_fn ioctl; 139 dm_ioctl_fn ioctl;
140 dm_merge_fn merge; 140 dm_merge_fn merge;
141 dm_busy_fn busy; 141 dm_busy_fn busy;
142
143 /* For internal device-mapper use. */
144 struct list_head list;
142}; 145};
143 146
144struct io_restrictions { 147struct io_restrictions {
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 600c5fb2daad..5e8b11d88f6f 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -28,6 +28,9 @@ struct dm_dirty_log_type {
28 const char *name; 28 const char *name;
29 struct module *module; 29 struct module *module;
30 30
31 /* For internal device-mapper use */
32 struct list_head list;
33
31 int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, 34 int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
32 unsigned argc, char **argv); 35 unsigned argc, char **argv);
33 void (*dtr)(struct dm_dirty_log *log); 36 void (*dtr)(struct dm_dirty_log *log);
@@ -113,6 +116,16 @@ struct dm_dirty_log_type {
113 */ 116 */
114 int (*status)(struct dm_dirty_log *log, status_type_t status_type, 117 int (*status)(struct dm_dirty_log *log, status_type_t status_type,
115 char *result, unsigned maxlen); 118 char *result, unsigned maxlen);
119
120 /*
121 * is_remote_recovering is necessary for cluster mirroring. It provides
122 * a way to detect recovery on another node, so we aren't writing
123 * concurrently. This function is likely to block (when a cluster log
124 * is used).
125 *
126 * Returns: 0, 1
127 */
128 int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
116}; 129};
117 130
118int dm_dirty_log_type_register(struct dm_dirty_log_type *type); 131int dm_dirty_log_type_register(struct dm_dirty_log_type *type);
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index af1dab41674b..1a455f1f86d7 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -11,6 +11,7 @@
11 11
12#define DMA_PTE_READ (1) 12#define DMA_PTE_READ (1)
13#define DMA_PTE_WRITE (2) 13#define DMA_PTE_WRITE (2)
14#define DMA_PTE_SNP (1 << 11)
14 15
15struct intel_iommu; 16struct intel_iommu;
16struct dmar_domain; 17struct dmar_domain;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 1956c8d46d32..2e2aa3df170c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -23,9 +23,6 @@
23 23
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/uio.h> 25#include <linux/uio.h>
26#include <linux/kref.h>
27#include <linux/completion.h>
28#include <linux/rcupdate.h>
29#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
30 27
31/** 28/**
@@ -205,6 +202,7 @@ struct dma_async_tx_descriptor {
205/** 202/**
206 * struct dma_device - info on the entity supplying DMA services 203 * struct dma_device - info on the entity supplying DMA services
207 * @chancnt: how many DMA channels are supported 204 * @chancnt: how many DMA channels are supported
205 * @privatecnt: how many DMA channels are requested by dma_request_channel
208 * @channels: the list of struct dma_chan 206 * @channels: the list of struct dma_chan
209 * @global_node: list_head for global dma_device_list 207 * @global_node: list_head for global dma_device_list
210 * @cap_mask: one or more dma_capability flags 208 * @cap_mask: one or more dma_capability flags
@@ -227,6 +225,7 @@ struct dma_async_tx_descriptor {
227struct dma_device { 225struct dma_device {
228 226
229 unsigned int chancnt; 227 unsigned int chancnt;
228 unsigned int privatecnt;
230 struct list_head channels; 229 struct list_head channels;
231 struct list_head global_node; 230 struct list_head global_node;
232 dma_cap_mask_t cap_mask; 231 dma_cap_mask_t cap_mask;
@@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void)
291} 290}
292#endif 291#endif
293 292
293#ifdef CONFIG_ASYNC_TX_DMA
294#define async_dmaengine_get() dmaengine_get()
295#define async_dmaengine_put() dmaengine_put()
296#define async_dma_find_channel(type) dma_find_channel(type)
297#else
298static inline void async_dmaengine_get(void)
299{
300}
301static inline void async_dmaengine_put(void)
302{
303}
304static inline struct dma_chan *
305async_dma_find_channel(enum dma_transaction_type type)
306{
307 return NULL;
308}
309#endif
310
294dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 311dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
295 void *dest, void *src, size_t len); 312 void *dest, void *src, size_t len);
296dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 313dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
337 set_bit(tx_type, dstp->bits); 354 set_bit(tx_type, dstp->bits);
338} 355}
339 356
357#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
358static inline void
359__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
360{
361 clear_bit(tx_type, dstp->bits);
362}
363
340#define dma_cap_zero(mask) __dma_cap_zero(&(mask)) 364#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
341static inline void __dma_cap_zero(dma_cap_mask_t *dstp) 365static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
342{ 366{
diff --git a/include/linux/ds1wm.h b/include/linux/ds1wm.h
deleted file mode 100644
index d3c65e48a2e7..000000000000
--- a/include/linux/ds1wm.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/* platform data for the DS1WM driver */
2
3struct ds1wm_platform_data {
4 int bus_shift; /* number of shifts needed to calculate the
5 * offset between DS1WM registers;
6 * e.g. on h5xxx and h2200 this is 2
7 * (registers aligned to 4-byte boundaries),
8 * while on hx4700 this is 1 */
9 int active_high;
10 void (*enable)(struct platform_device *pdev);
11 void (*disable)(struct platform_device *pdev);
12};
diff --git a/include/linux/dst.h b/include/linux/dst.h
new file mode 100644
index 000000000000..e26fed84b1aa
--- /dev/null
+++ b/include/linux/dst.h
@@ -0,0 +1,587 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __DST_H
17#define __DST_H
18
19#include <linux/types.h>
20#include <linux/connector.h>
21
22#define DST_NAMELEN 32
23#define DST_NAME "dst"
24
25enum {
26 /* Remove node with given id from storage */
27 DST_DEL_NODE = 0,
28 /* Add remote node with given id to the storage */
29 DST_ADD_REMOTE,
30 /* Add local node with given id to the storage to be exported and used by remote peers */
31 DST_ADD_EXPORT,
32 /* Crypto initialization command (hash/cipher used to protect the connection) */
33 DST_CRYPTO,
34 /* Security attributes for given connection (permissions for example) */
35 DST_SECURITY,
36 /* Register given node in the block layer subsystem */
37 DST_START,
38 DST_CMD_MAX
39};
40
41struct dst_ctl
42{
43 /* Storage name */
44 char name[DST_NAMELEN];
45 /* Command flags */
46 __u32 flags;
47 /* Command itself (see above) */
48 __u32 cmd;
49 /* Maximum number of pages per single request in this device */
50 __u32 max_pages;
51 /* Stale/error transaction scanning timeout in milliseconds */
52 __u32 trans_scan_timeout;
53 /* Maximum number of retry sends before completing transaction as broken */
54 __u32 trans_max_retries;
55 /* Storage size */
56 __u64 size;
57};
58
59/* Reply command carries completion status */
60struct dst_ctl_ack
61{
62 struct cn_msg msg;
63 int error;
64 int unused[3];
65};
66
67/*
68 * Unfortunaltely socket address structure is not exported to userspace
69 * and is redefined there.
70 */
71#define SADDR_MAX_DATA 128
72
73struct saddr {
74 /* address family, AF_xxx */
75 unsigned short sa_family;
76 /* 14 bytes of protocol address */
77 char sa_data[SADDR_MAX_DATA];
78 /* Number of bytes used in sa_data */
79 unsigned short sa_data_len;
80};
81
82/* Address structure */
83struct dst_network_ctl
84{
85 /* Socket type: datagram, stream...*/
86 unsigned int type;
87 /* Let me guess, is it a Jupiter diameter? */
88 unsigned int proto;
89 /* Peer's address */
90 struct saddr addr;
91};
92
93struct dst_crypto_ctl
94{
95 /* Cipher and hash names */
96 char cipher_algo[DST_NAMELEN];
97 char hash_algo[DST_NAMELEN];
98
99 /* Key sizes. Can be zero for digest for example */
100 unsigned int cipher_keysize, hash_keysize;
101 /* Alignment. Calculated by the DST itself. */
102 unsigned int crypto_attached_size;
103 /* Number of threads to perform crypto operations */
104 int thread_num;
105};
106
107/* Export security attributes have this bits checked in when client connects */
108#define DST_PERM_READ (1<<0)
109#define DST_PERM_WRITE (1<<1)
110
111/*
112 * Right now it is simple model, where each remote address
113 * is assigned to set of permissions it is allowed to perform.
114 * In real world block device does not know anything but
115 * reading and writing, so it should be more than enough.
116 */
117struct dst_secure_user
118{
119 unsigned int permissions;
120 struct saddr addr;
121};
122
123/*
124 * Export control command: device to export and network address to accept
125 * clients to work with given device
126 */
127struct dst_export_ctl
128{
129 char device[DST_NAMELEN];
130 struct dst_network_ctl ctl;
131};
132
133enum {
134 DST_CFG = 1, /* Request remote configuration */
135 DST_IO, /* IO command */
136 DST_IO_RESPONSE, /* IO response */
137 DST_PING, /* Keepalive message */
138 DST_NCMD_MAX,
139};
140
141struct dst_cmd
142{
143 /* Network command itself, see above */
144 __u32 cmd;
145 /*
146 * Size of the attached data
147 * (in most cases, for READ command it means how many bytes were requested)
148 */
149 __u32 size;
150 /* Crypto size: number of attached bytes with digest/hmac */
151 __u32 csize;
152 /* Here we can carry secret data */
153 __u32 reserved;
154 /* Read/write bits, see how they are encoded in bio structure */
155 __u64 rw;
156 /* BIO flags */
157 __u64 flags;
158 /* Unique command id (like transaction ID) */
159 __u64 id;
160 /* Sector to start IO from */
161 __u64 sector;
162 /* Hash data is placed after this header */
163 __u8 hash[0];
164};
165
166/*
167 * Convert command to/from network byte order.
168 * We do not use hton*() functions, since there is
169 * no 64-bit implementation.
170 */
171static inline void dst_convert_cmd(struct dst_cmd *c)
172{
173 c->cmd = __cpu_to_be32(c->cmd);
174 c->csize = __cpu_to_be32(c->csize);
175 c->size = __cpu_to_be32(c->size);
176 c->sector = __cpu_to_be64(c->sector);
177 c->id = __cpu_to_be64(c->id);
178 c->flags = __cpu_to_be64(c->flags);
179 c->rw = __cpu_to_be64(c->rw);
180}
181
182/* Transaction id */
183typedef __u64 dst_gen_t;
184
185#ifdef __KERNEL__
186
187#include <linux/blkdev.h>
188#include <linux/bio.h>
189#include <linux/device.h>
190#include <linux/mempool.h>
191#include <linux/net.h>
192#include <linux/poll.h>
193#include <linux/rbtree.h>
194
195#ifdef CONFIG_DST_DEBUG
196#define dprintk(f, a...) printk(KERN_NOTICE f, ##a)
197#else
198static inline void __attribute__ ((format (printf, 1, 2)))
199 dprintk(const char *fmt, ...) {}
200#endif
201
202struct dst_node;
203
204struct dst_trans
205{
206 /* DST node we are working with */
207 struct dst_node *n;
208
209 /* Entry inside transaction tree */
210 struct rb_node trans_entry;
211
212 /* Merlin kills this transaction when this memory cell equals zero */
213 atomic_t refcnt;
214
215 /* How this transaction should be processed by crypto engine */
216 short enc;
217 /* How many times this transaction was resent */
218 short retries;
219 /* Completion status */
220 int error;
221
222 /* When did we send it to the remote peer */
223 long send_time;
224
225 /* My name is...
226 * Well, computers does not speak, they have unique id instead */
227 dst_gen_t gen;
228
229 /* Block IO we are working with */
230 struct bio *bio;
231
232 /* Network command for above block IO request */
233 struct dst_cmd cmd;
234};
235
236struct dst_crypto_engine
237{
238 /* What should we do with all block requests */
239 struct crypto_hash *hash;
240 struct crypto_ablkcipher *cipher;
241
242 /* Pool of pages used to encrypt data into before sending */
243 int page_num;
244 struct page **pages;
245
246 /* What to do with current request */
247 int enc;
248 /* Who we are and where do we go */
249 struct scatterlist *src, *dst;
250
251 /* Maximum timeout waiting for encryption to be completed */
252 long timeout;
253 /* IV is a 64-bit sequential counter */
254 u64 iv;
255
256 /* Secret data */
257 void *private;
258
259 /* Cached temporary data lives here */
260 int size;
261 void *data;
262};
263
264struct dst_state
265{
266 /* The main state protection */
267 struct mutex state_lock;
268
269 /* Polling machinery for sockets */
270 wait_queue_t wait;
271 wait_queue_head_t *whead;
272 /* Most of events are being waited here */
273 wait_queue_head_t thread_wait;
274
275 /* Who owns this? */
276 struct dst_node *node;
277
278 /* Network address for this state */
279 struct dst_network_ctl ctl;
280
281 /* Permissions to work with: read-only or rw connection */
282 u32 permissions;
283
284 /* Called when we need to clean private data */
285 void (* cleanup)(struct dst_state *st);
286
287 /* Used by the server: BIO completion queues BIOs here */
288 struct list_head request_list;
289 spinlock_t request_lock;
290
291 /* Guess what? No, it is not number of planets */
292 atomic_t refcnt;
293
294 /* This flags is set when connection should be dropped */
295 int need_exit;
296
297 /*
298 * Socket to work with. Second pointer is used for
299 * lockless check if socket was changed before performing
300 * next action (like working with cached polling result)
301 */
302 struct socket *socket, *read_socket;
303
304 /* Cached preallocated data */
305 void *data;
306 unsigned int size;
307
308 /* Currently processed command */
309 struct dst_cmd cmd;
310};
311
312struct dst_info
313{
314 /* Device size */
315 u64 size;
316
317 /* Local device name for export devices */
318 char local[DST_NAMELEN];
319
320 /* Network setup */
321 struct dst_network_ctl net;
322
323 /* Sysfs bits use this */
324 struct device device;
325};
326
327struct dst_node
328{
329 struct list_head node_entry;
330
331 /* Hi, my name is stored here */
332 char name[DST_NAMELEN];
333 /* My cache name is stored here */
334 char cache_name[DST_NAMELEN];
335
336 /* Block device attached to given node.
337 * Only valid for exporting nodes */
338 struct block_device *bdev;
339 /* Network state machine for given peer */
340 struct dst_state *state;
341
342 /* Block IO machinery */
343 struct request_queue *queue;
344 struct gendisk *disk;
345
346 /* Number of threads in processing pool */
347 int thread_num;
348 /* Maximum number of pages in single IO */
349 int max_pages;
350
351 /* I'm that big in bytes */
352 loff_t size;
353
354 /* Exported to userspace node information */
355 struct dst_info *info;
356
357 /*
358 * Security attribute list.
359 * Used only by exporting node currently.
360 */
361 struct list_head security_list;
362 struct mutex security_lock;
363
364 /*
365 * When this unerflows below zero, university collapses.
366 * But this will not happen, since node will be freed,
367 * when reference counter reaches zero.
368 */
369 atomic_t refcnt;
370
371 /* How precisely should I be started? */
372 int (*start)(struct dst_node *);
373
374 /* Crypto capabilities */
375 struct dst_crypto_ctl crypto;
376 u8 *hash_key;
377 u8 *cipher_key;
378
379 /* Pool of processing thread */
380 struct thread_pool *pool;
381
382 /* Transaction IDs live here */
383 atomic_long_t gen;
384
385 /*
386 * How frequently and how many times transaction
387 * tree should be scanned to drop stale objects.
388 */
389 long trans_scan_timeout;
390 int trans_max_retries;
391
392 /* Small gnomes live here */
393 struct rb_root trans_root;
394 struct mutex trans_lock;
395
396 /*
397 * Transaction cache/memory pool.
398 * It is big enough to contain not only transaction
399 * itself, but additional crypto data (digest/hmac).
400 */
401 struct kmem_cache *trans_cache;
402 mempool_t *trans_pool;
403
404 /* This entity scans transaction tree */
405 struct delayed_work trans_work;
406
407 wait_queue_head_t wait;
408};
409
410/* Kernel representation of the security attribute */
411struct dst_secure
412{
413 struct list_head sec_entry;
414 struct dst_secure_user sec;
415};
416
417int dst_process_bio(struct dst_node *n, struct bio *bio);
418
419int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r);
420int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le);
421
422static inline struct dst_state *dst_state_get(struct dst_state *st)
423{
424 BUG_ON(atomic_read(&st->refcnt) == 0);
425 atomic_inc(&st->refcnt);
426 return st;
427}
428
429void dst_state_put(struct dst_state *st);
430
431struct dst_state *dst_state_alloc(struct dst_node *n);
432int dst_state_socket_create(struct dst_state *st);
433void dst_state_socket_release(struct dst_state *st);
434
435void dst_state_exit_connected(struct dst_state *st);
436
437int dst_state_schedule_receiver(struct dst_state *st);
438
439void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str);
440
441static inline void dst_state_lock(struct dst_state *st)
442{
443 mutex_lock(&st->state_lock);
444}
445
446static inline void dst_state_unlock(struct dst_state *st)
447{
448 mutex_unlock(&st->state_lock);
449}
450
451void dst_poll_exit(struct dst_state *st);
452int dst_poll_init(struct dst_state *st);
453
454static inline unsigned int dst_state_poll(struct dst_state *st)
455{
456 unsigned int revents = POLLHUP | POLLERR;
457
458 dst_state_lock(st);
459 if (st->socket)
460 revents = st->socket->ops->poll(NULL, st->socket, NULL);
461 dst_state_unlock(st);
462
463 return revents;
464}
465
466static inline int dst_thread_setup(void *private, void *data)
467{
468 return 0;
469}
470
471void dst_node_put(struct dst_node *n);
472
473static inline struct dst_node *dst_node_get(struct dst_node *n)
474{
475 atomic_inc(&n->refcnt);
476 return n;
477}
478
479int dst_data_recv(struct dst_state *st, void *data, unsigned int size);
480int dst_recv_cdata(struct dst_state *st, void *cdata);
481int dst_data_send_header(struct socket *sock,
482 void *data, unsigned int size, int more);
483
484int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio);
485
486int dst_process_io(struct dst_state *st);
487int dst_export_crypto(struct dst_node *n, struct bio *bio);
488int dst_export_send_bio(struct bio *bio);
489int dst_start_export(struct dst_node *n);
490
491int __init dst_export_init(void);
492void dst_export_exit(void);
493
494/* Private structure for export block IO requests */
495struct dst_export_priv
496{
497 struct list_head request_entry;
498 struct dst_state *state;
499 struct bio *bio;
500 struct dst_cmd cmd;
501};
502
503static inline void dst_trans_get(struct dst_trans *t)
504{
505 atomic_inc(&t->refcnt);
506}
507
508struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen);
509int dst_trans_remove(struct dst_trans *t);
510int dst_trans_remove_nolock(struct dst_trans *t);
511void dst_trans_put(struct dst_trans *t);
512
513/*
514 * Convert bio into network command.
515 */
516static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd,
517 u32 command, u64 id)
518{
519 cmd->cmd = command;
520 cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS;
521 cmd->rw = bio->bi_rw;
522 cmd->size = bio->bi_size;
523 cmd->csize = 0;
524 cmd->id = id;
525 cmd->sector = bio->bi_sector;
526};
527
528int dst_trans_send(struct dst_trans *t);
529int dst_trans_crypto(struct dst_trans *t);
530
531int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl);
532void dst_node_crypto_exit(struct dst_node *n);
533
534static inline int dst_need_crypto(struct dst_node *n)
535{
536 struct dst_crypto_ctl *c = &n->crypto;
537 /*
538 * Logical OR is appropriate here, but boolean one produces
539 * more optimal code, so it is used instead.
540 */
541 return (c->hash_algo[0] | c->cipher_algo[0]);
542}
543
544int dst_node_trans_init(struct dst_node *n, unsigned int size);
545void dst_node_trans_exit(struct dst_node *n);
546
547/*
548 * Pool of threads.
549 * Ready list contains threads currently free to be used,
550 * active one contains threads with some work scheduled for them.
551 * Caller can wait in given queue when thread is ready.
552 */
553struct thread_pool
554{
555 int thread_num;
556 struct mutex thread_lock;
557 struct list_head ready_list, active_list;
558
559 wait_queue_head_t wait;
560};
561
562void thread_pool_del_worker(struct thread_pool *p);
563void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id);
564int thread_pool_add_worker(struct thread_pool *p,
565 char *name,
566 unsigned int id,
567 void *(* init)(void *data),
568 void (* cleanup)(void *data),
569 void *data);
570
571void thread_pool_destroy(struct thread_pool *p);
572struct thread_pool *thread_pool_create(int num, char *name,
573 void *(* init)(void *data),
574 void (* cleanup)(void *data),
575 void *data);
576
577int thread_pool_schedule(struct thread_pool *p,
578 int (* setup)(void *stored_private, void *setup_data),
579 int (* action)(void *stored_private, void *setup_data),
580 void *setup_data, long timeout);
581int thread_pool_schedule_private(struct thread_pool *p,
582 int (* setup)(void *private, void *data),
583 int (* action)(void *private, void *data),
584 void *data, long timeout, void *id);
585
586#endif /* __KERNEL__ */
587#endif /* __DST_H */
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index d797dde247f7..c8aad713a046 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -74,4 +74,23 @@ struct dw_dma_slave {
74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ 74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ 75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
76 76
77/* DMA API extensions */
78struct dw_cyclic_desc {
79 struct dw_desc **desc;
80 unsigned long periods;
81 void (*period_callback)(void *param);
82 void *period_callback_param;
83};
84
85struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
86 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
87 enum dma_data_direction direction);
88void dw_dma_cyclic_free(struct dma_chan *chan);
89int dw_dma_cyclic_start(struct dma_chan *chan);
90void dw_dma_cyclic_stop(struct dma_chan *chan);
91
92dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
93
94dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
95
77#endif /* DW_DMAC_H */ 96#endif /* DW_DMAC_H */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index a667637b54e3..f45a8ae5f828 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -13,10 +13,20 @@
13/* For O_CLOEXEC and O_NONBLOCK */ 13/* For O_CLOEXEC and O_NONBLOCK */
14#include <linux/fcntl.h> 14#include <linux/fcntl.h>
15 15
16/* Flags for eventfd2. */ 16/*
17 * CAREFUL: Check include/asm-generic/fcntl.h when defining
18 * new flags, since they might collide with O_* ones. We want
19 * to re-use O_* flags that couldn't possibly have a meaning
20 * from eventfd, in order to leave a free define-space for
21 * shared O_* flags.
22 */
23#define EFD_SEMAPHORE (1 << 0)
17#define EFD_CLOEXEC O_CLOEXEC 24#define EFD_CLOEXEC O_CLOEXEC
18#define EFD_NONBLOCK O_NONBLOCK 25#define EFD_NONBLOCK O_NONBLOCK
19 26
27#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
28#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
29
20struct file *eventfd_fget(int fd); 30struct file *eventfd_fget(int fd);
21int eventfd_signal(struct file *file, int n); 31int eventfd_signal(struct file *file, int n);
22 32
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index dd495b8c3091..634a5e5aba3e 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -208,6 +208,7 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
208#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */ 208#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */
209#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */ 209#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */
210#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */ 210#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */
211#define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008
211 212
212/* Used to pass group descriptor data when online resize is done */ 213/* Used to pass group descriptor data when online resize is done */
213struct ext3_new_group_input { 214struct ext3_new_group_input {
@@ -893,9 +894,8 @@ extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
893 u64 start, u64 len); 894 u64 start, u64 len);
894 895
895/* ioctl.c */ 896/* ioctl.c */
896extern int ext3_ioctl (struct inode *, struct file *, unsigned int, 897extern long ext3_ioctl(struct file *, unsigned int, unsigned long);
897 unsigned long); 898extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long);
898extern long ext3_compat_ioctl (struct file *, unsigned int, unsigned long);
899 899
900/* namei.c */ 900/* namei.c */
901extern int ext3_orphan_add(handle_t *, struct inode *); 901extern int ext3_orphan_add(handle_t *, struct inode *);
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 31527e17076b..f563c5013932 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -123,6 +123,7 @@ struct dentry;
123#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ 123#define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */
124#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ 124#define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */
125#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ 125#define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */
126#define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */
126#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ 127#define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */
127#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ 128#define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */
128#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ 129#define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */
@@ -960,15 +961,7 @@ extern struct fb_info *registered_fb[FB_MAX];
960extern int num_registered_fb; 961extern int num_registered_fb;
961extern struct class *fb_class; 962extern struct class *fb_class;
962 963
963static inline int lock_fb_info(struct fb_info *info) 964extern int lock_fb_info(struct fb_info *info);
964{
965 mutex_lock(&info->lock);
966 if (!info->fbops) {
967 mutex_unlock(&info->lock);
968 return 0;
969 }
970 return 1;
971}
972 965
973static inline void unlock_fb_info(struct fb_info *info) 966static inline void unlock_fb_info(struct fb_info *info)
974{ 967{
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 09d6c5bbdddd..a2ec74bc4812 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -5,12 +5,14 @@
5#ifndef __LINUX_FDTABLE_H 5#ifndef __LINUX_FDTABLE_H
6#define __LINUX_FDTABLE_H 6#define __LINUX_FDTABLE_H
7 7
8#include <asm/atomic.h>
9#include <linux/posix_types.h> 8#include <linux/posix_types.h>
10#include <linux/compiler.h> 9#include <linux/compiler.h>
11#include <linux/spinlock.h> 10#include <linux/spinlock.h>
12#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
13#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/init.h>
14
15#include <asm/atomic.h>
14 16
15/* 17/*
16 * The default fd array needs to be at least BITS_PER_LONG, 18 * The default fd array needs to be at least BITS_PER_LONG,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 87e7bfc5ebd7..bce40a2207ee 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -95,8 +95,12 @@ struct inodes_stat_t {
95#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ 95#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
96#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 96#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
97#define READ_META (READ | (1 << BIO_RW_META)) 97#define READ_META (READ | (1 << BIO_RW_META))
98#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 98#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
99#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 99#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
100#define WRITE_ODIRECT (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
101#define SWRITE_SYNC_PLUG \
102 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
103#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
100#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) 104#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
101#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) 105#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
102#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) 106#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
@@ -1741,6 +1745,8 @@ extern void drop_collected_mounts(struct vfsmount *);
1741 1745
1742extern int vfs_statfs(struct dentry *, struct kstatfs *); 1746extern int vfs_statfs(struct dentry *, struct kstatfs *);
1743 1747
1748extern int current_umask(void);
1749
1744/* /sys/fs */ 1750/* /sys/fs */
1745extern struct kobject *fs_kobj; 1751extern struct kobject *fs_kobj;
1746 1752
@@ -1878,12 +1884,25 @@ extern struct block_device *open_by_devnum(dev_t, fmode_t);
1878extern void invalidate_bdev(struct block_device *); 1884extern void invalidate_bdev(struct block_device *);
1879extern int sync_blockdev(struct block_device *bdev); 1885extern int sync_blockdev(struct block_device *bdev);
1880extern struct super_block *freeze_bdev(struct block_device *); 1886extern struct super_block *freeze_bdev(struct block_device *);
1887extern void emergency_thaw_all(void);
1881extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); 1888extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
1882extern int fsync_bdev(struct block_device *); 1889extern int fsync_bdev(struct block_device *);
1883extern int fsync_super(struct super_block *); 1890extern int fsync_super(struct super_block *);
1884extern int fsync_no_super(struct block_device *); 1891extern int fsync_no_super(struct block_device *);
1885#else 1892#else
1886static inline void bd_forget(struct inode *inode) {} 1893static inline void bd_forget(struct inode *inode) {}
1894static inline int sync_blockdev(struct block_device *bdev) { return 0; }
1895static inline void invalidate_bdev(struct block_device *bdev) {}
1896
1897static inline struct super_block *freeze_bdev(struct block_device *sb)
1898{
1899 return NULL;
1900}
1901
1902static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
1903{
1904 return 0;
1905}
1887#endif 1906#endif
1888extern const struct file_operations def_blk_fops; 1907extern const struct file_operations def_blk_fops;
1889extern const struct file_operations def_chr_fops; 1908extern const struct file_operations def_chr_fops;
@@ -2322,19 +2341,7 @@ ssize_t simple_transaction_read(struct file *file, char __user *buf,
2322 size_t size, loff_t *pos); 2341 size_t size, loff_t *pos);
2323int simple_transaction_release(struct inode *inode, struct file *file); 2342int simple_transaction_release(struct inode *inode, struct file *file);
2324 2343
2325static inline void simple_transaction_set(struct file *file, size_t n) 2344void simple_transaction_set(struct file *file, size_t n);
2326{
2327 struct simple_transaction_argresp *ar = file->private_data;
2328
2329 BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
2330
2331 /*
2332 * The barrier ensures that ar->size will really remain zero until
2333 * ar->data is ready for reading.
2334 */
2335 smp_mb();
2336 ar->size = n;
2337}
2338 2345
2339/* 2346/*
2340 * simple attribute files 2347 * simple attribute files
@@ -2381,27 +2388,6 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
2381ssize_t simple_attr_write(struct file *file, const char __user *buf, 2388ssize_t simple_attr_write(struct file *file, const char __user *buf,
2382 size_t len, loff_t *ppos); 2389 size_t len, loff_t *ppos);
2383 2390
2384
2385#ifdef CONFIG_SECURITY
2386static inline char *alloc_secdata(void)
2387{
2388 return (char *)get_zeroed_page(GFP_KERNEL);
2389}
2390
2391static inline void free_secdata(void *secdata)
2392{
2393 free_page((unsigned long)secdata);
2394}
2395#else
2396static inline char *alloc_secdata(void)
2397{
2398 return (char *)1;
2399}
2400
2401static inline void free_secdata(void *secdata)
2402{ }
2403#endif /* CONFIG_SECURITY */
2404
2405struct ctl_table; 2391struct ctl_table;
2406int proc_nr_files(struct ctl_table *table, int write, struct file *filp, 2392int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
2407 void __user *buffer, size_t *lenp, loff_t *ppos); 2393 void __user *buffer, size_t *lenp, loff_t *ppos);
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index 18b467dbe278..78a05bfcd8eb 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -4,12 +4,10 @@
4#include <linux/path.h> 4#include <linux/path.h>
5 5
6struct fs_struct { 6struct fs_struct {
7 atomic_t count; /* This usage count is used by check_unsafe_exec() for 7 int users;
8 * security checking purposes - therefore it may not be
9 * incremented, except by clone(CLONE_FS).
10 */
11 rwlock_t lock; 8 rwlock_t lock;
12 int umask; 9 int umask;
10 int in_exec;
13 struct path root, pwd; 11 struct path root, pwd;
14}; 12};
15 13
@@ -19,6 +17,8 @@ extern void exit_fs(struct task_struct *);
19extern void set_fs_root(struct fs_struct *, struct path *); 17extern void set_fs_root(struct fs_struct *, struct path *);
20extern void set_fs_pwd(struct fs_struct *, struct path *); 18extern void set_fs_pwd(struct fs_struct *, struct path *);
21extern struct fs_struct *copy_fs_struct(struct fs_struct *); 19extern struct fs_struct *copy_fs_struct(struct fs_struct *);
22extern void put_fs_struct(struct fs_struct *); 20extern void free_fs_struct(struct fs_struct *);
21extern void daemonize_fs_struct(void);
22extern int unshare_fs_struct(void);
23 23
24#endif /* _LINUX_FS_STRUCT_H */ 24#endif /* _LINUX_FS_STRUCT_H */
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
new file mode 100644
index 000000000000..84d3532dd3ea
--- /dev/null
+++ b/include/linux/fscache-cache.h
@@ -0,0 +1,505 @@
1/* General filesystem caching backing cache interface
2 *
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * NOTE!!! See:
12 *
13 * Documentation/filesystems/caching/backend-api.txt
14 *
15 * for a description of the cache backend interface declared here.
16 */
17
18#ifndef _LINUX_FSCACHE_CACHE_H
19#define _LINUX_FSCACHE_CACHE_H
20
21#include <linux/fscache.h>
22#include <linux/sched.h>
23#include <linux/slow-work.h>
24
25#define NR_MAXCACHES BITS_PER_LONG
26
27struct fscache_cache;
28struct fscache_cache_ops;
29struct fscache_object;
30struct fscache_operation;
31
32/*
33 * cache tag definition
34 */
35struct fscache_cache_tag {
36 struct list_head link;
37 struct fscache_cache *cache; /* cache referred to by this tag */
38 unsigned long flags;
39#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
40 atomic_t usage;
41 char name[0]; /* tag name */
42};
43
44/*
45 * cache definition
46 */
47struct fscache_cache {
48 const struct fscache_cache_ops *ops;
49 struct fscache_cache_tag *tag; /* tag representing this cache */
50 struct kobject *kobj; /* system representation of this cache */
51 struct list_head link; /* link in list of caches */
52 size_t max_index_size; /* maximum size of index data */
53 char identifier[36]; /* cache label */
54
55 /* node management */
56 struct work_struct op_gc; /* operation garbage collector */
57 struct list_head object_list; /* list of data/index objects */
58 struct list_head op_gc_list; /* list of ops to be deleted */
59 spinlock_t object_list_lock;
60 spinlock_t op_gc_list_lock;
61 atomic_t object_count; /* no. of live objects in this cache */
62 struct fscache_object *fsdef; /* object for the fsdef index */
63 unsigned long flags;
64#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
65#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
66};
67
68extern wait_queue_head_t fscache_cache_cleared_wq;
69
70/*
71 * operation to be applied to a cache object
72 * - retrieval initiation operations are done in the context of the process
73 * that issued them, and not in an async thread pool
74 */
75typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
76typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
77
78struct fscache_operation {
79 union {
80 struct work_struct fast_work; /* record for fast ops */
81 struct slow_work slow_work; /* record for (very) slow ops */
82 };
83 struct list_head pend_link; /* link in object->pending_ops */
84 struct fscache_object *object; /* object to be operated upon */
85
86 unsigned long flags;
87#define FSCACHE_OP_TYPE 0x000f /* operation type */
88#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */
89#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */
90#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */
91#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
92#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
93#define FSCACHE_OP_DEAD 6 /* op is now dead */
94
95 atomic_t usage;
96 unsigned debug_id; /* debugging ID */
97
98 /* operation processor callback
99 * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
100 * the op in a non-pool thread */
101 fscache_operation_processor_t processor;
102
103 /* operation releaser */
104 fscache_operation_release_t release;
105};
106
107extern atomic_t fscache_op_debug_id;
108extern const struct slow_work_ops fscache_op_slow_work_ops;
109
110extern void fscache_enqueue_operation(struct fscache_operation *);
111extern void fscache_put_operation(struct fscache_operation *);
112
113/**
114 * fscache_operation_init - Do basic initialisation of an operation
115 * @op: The operation to initialise
116 * @release: The release function to assign
117 *
118 * Do basic initialisation of an operation. The caller must still set flags,
119 * object, either fast_work or slow_work if necessary, and processor if needed.
120 */
121static inline void fscache_operation_init(struct fscache_operation *op,
122 fscache_operation_release_t release)
123{
124 atomic_set(&op->usage, 1);
125 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
126 op->release = release;
127 INIT_LIST_HEAD(&op->pend_link);
128}
129
130/**
131 * fscache_operation_init_slow - Do additional initialisation of a slow op
132 * @op: The operation to initialise
133 * @processor: The processor function to assign
134 *
135 * Do additional initialisation of an operation as required for slow work.
136 */
137static inline
138void fscache_operation_init_slow(struct fscache_operation *op,
139 fscache_operation_processor_t processor)
140{
141 op->processor = processor;
142 slow_work_init(&op->slow_work, &fscache_op_slow_work_ops);
143}
144
145/*
146 * data read operation
147 */
148struct fscache_retrieval {
149 struct fscache_operation op;
150 struct address_space *mapping; /* netfs pages */
151 fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
152 void *context; /* netfs read context (pinned) */
153 struct list_head to_do; /* list of things to be done by the backend */
154 unsigned long start_time; /* time at which retrieval started */
155};
156
157typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
158 struct page *page,
159 gfp_t gfp);
160
161typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
162 struct list_head *pages,
163 unsigned *nr_pages,
164 gfp_t gfp);
165
166/**
167 * fscache_get_retrieval - Get an extra reference on a retrieval operation
168 * @op: The retrieval operation to get a reference on
169 *
170 * Get an extra reference on a retrieval operation.
171 */
172static inline
173struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
174{
175 atomic_inc(&op->op.usage);
176 return op;
177}
178
179/**
180 * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
181 * @op: The retrieval operation affected
182 *
183 * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
184 */
185static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
186{
187 fscache_enqueue_operation(&op->op);
188}
189
190/**
191 * fscache_put_retrieval - Drop a reference to a retrieval operation
192 * @op: The retrieval operation affected
193 *
194 * Drop a reference to a retrieval operation.
195 */
196static inline void fscache_put_retrieval(struct fscache_retrieval *op)
197{
198 fscache_put_operation(&op->op);
199}
200
201/*
202 * cached page storage work item
203 * - used to do three things:
204 * - batch writes to the cache
205 * - do cache writes asynchronously
206 * - defer writes until cache object lookup completion
207 */
208struct fscache_storage {
209 struct fscache_operation op;
210 pgoff_t store_limit; /* don't write more than this */
211};
212
213/*
214 * cache operations
215 */
216struct fscache_cache_ops {
217 /* name of cache provider */
218 const char *name;
219
220 /* allocate an object record for a cookie */
221 struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
222 struct fscache_cookie *cookie);
223
224 /* look up the object for a cookie */
225 void (*lookup_object)(struct fscache_object *object);
226
227 /* finished looking up */
228 void (*lookup_complete)(struct fscache_object *object);
229
230 /* increment the usage count on this object (may fail if unmounting) */
231 struct fscache_object *(*grab_object)(struct fscache_object *object);
232
233 /* pin an object in the cache */
234 int (*pin_object)(struct fscache_object *object);
235
236 /* unpin an object in the cache */
237 void (*unpin_object)(struct fscache_object *object);
238
239 /* store the updated auxilliary data on an object */
240 void (*update_object)(struct fscache_object *object);
241
242 /* discard the resources pinned by an object and effect retirement if
243 * necessary */
244 void (*drop_object)(struct fscache_object *object);
245
246 /* dispose of a reference to an object */
247 void (*put_object)(struct fscache_object *object);
248
249 /* sync a cache */
250 void (*sync_cache)(struct fscache_cache *cache);
251
252 /* notification that the attributes of a non-index object (such as
253 * i_size) have changed */
254 int (*attr_changed)(struct fscache_object *object);
255
256 /* reserve space for an object's data and associated metadata */
257 int (*reserve_space)(struct fscache_object *object, loff_t i_size);
258
259 /* request a backing block for a page be read or allocated in the
260 * cache */
261 fscache_page_retrieval_func_t read_or_alloc_page;
262
263 /* request backing blocks for a list of pages be read or allocated in
264 * the cache */
265 fscache_pages_retrieval_func_t read_or_alloc_pages;
266
267 /* request a backing block for a page be allocated in the cache so that
268 * it can be written directly */
269 fscache_page_retrieval_func_t allocate_page;
270
271 /* request backing blocks for pages be allocated in the cache so that
272 * they can be written directly */
273 fscache_pages_retrieval_func_t allocate_pages;
274
275 /* write a page to its backing block in the cache */
276 int (*write_page)(struct fscache_storage *op, struct page *page);
277
278 /* detach backing block from a page (optional)
279 * - must release the cookie lock before returning
280 * - may sleep
281 */
282 void (*uncache_page)(struct fscache_object *object,
283 struct page *page);
284
285 /* dissociate a cache from all the pages it was backing */
286 void (*dissociate_pages)(struct fscache_cache *cache);
287};
288
289/*
290 * data file or index object cookie
291 * - a file will only appear in one cache
292 * - a request to cache a file may or may not be honoured, subject to
293 * constraints such as disk space
294 * - indices are created on disk just-in-time
295 */
296struct fscache_cookie {
297 atomic_t usage; /* number of users of this cookie */
298 atomic_t n_children; /* number of children of this cookie */
299 spinlock_t lock;
300 struct hlist_head backing_objects; /* object(s) backing this file/index */
301 const struct fscache_cookie_def *def; /* definition */
302 struct fscache_cookie *parent; /* parent of this entry */
303 void *netfs_data; /* back pointer to netfs */
304 struct radix_tree_root stores; /* pages to be stored on this cookie */
305#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
306
307 unsigned long flags;
308#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
309#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */
310#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */
311#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */
312#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */
313#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */
314};
315
316extern struct fscache_cookie fscache_fsdef_index;
317
318/*
319 * on-disk cache file or index handle
320 */
321struct fscache_object {
322 enum fscache_object_state {
323 FSCACHE_OBJECT_INIT, /* object in initial unbound state */
324 FSCACHE_OBJECT_LOOKING_UP, /* looking up object */
325 FSCACHE_OBJECT_CREATING, /* creating object */
326
327 /* active states */
328 FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */
329 FSCACHE_OBJECT_ACTIVE, /* object is usable */
330 FSCACHE_OBJECT_UPDATING, /* object is updating */
331
332 /* terminal states */
333 FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */
334 FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */
335 FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */
336 FSCACHE_OBJECT_RELEASING, /* releasing object */
337 FSCACHE_OBJECT_RECYCLING, /* retiring object */
338 FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
339 FSCACHE_OBJECT_DEAD, /* object is now dead */
340 } state;
341
342 int debug_id; /* debugging ID */
343 int n_children; /* number of child objects */
344 int n_ops; /* number of ops outstanding on object */
345 int n_obj_ops; /* number of object ops outstanding on object */
346 int n_in_progress; /* number of ops in progress */
347 int n_exclusive; /* number of exclusive ops queued */
348 spinlock_t lock; /* state and operations lock */
349
350 unsigned long lookup_jif; /* time at which lookup started */
351 unsigned long event_mask; /* events this object is interested in */
352 unsigned long events; /* events to be processed by this object
353 * (order is important - using fls) */
354#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */
355#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */
356#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */
357#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */
358#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
359#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
360#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
361
362 unsigned long flags;
363#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
364#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
365#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
366
367 struct list_head cache_link; /* link in cache->object_list */
368 struct hlist_node cookie_link; /* link in cookie->backing_objects */
369 struct fscache_cache *cache; /* cache that supplied this object */
370 struct fscache_cookie *cookie; /* netfs's file/index object */
371 struct fscache_object *parent; /* parent object */
372 struct slow_work work; /* attention scheduling record */
373 struct list_head dependents; /* FIFO of dependent objects */
374 struct list_head dep_link; /* link in parent's dependents list */
375 struct list_head pending_ops; /* unstarted operations on this object */
376 pgoff_t store_limit; /* current storage limit */
377};
378
379extern const char *fscache_object_states[];
380
381#define fscache_object_is_active(obj) \
382 (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
383 (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
384 (obj)->state < FSCACHE_OBJECT_DYING)
385
386extern const struct slow_work_ops fscache_object_slow_work_ops;
387
388/**
389 * fscache_object_init - Initialise a cache object description
390 * @object: Object description
391 *
392 * Initialise a cache object description to its basic values.
393 *
394 * See Documentation/filesystems/caching/backend-api.txt for a complete
395 * description.
396 */
397static inline
398void fscache_object_init(struct fscache_object *object,
399 struct fscache_cookie *cookie,
400 struct fscache_cache *cache)
401{
402 atomic_inc(&cache->object_count);
403
404 object->state = FSCACHE_OBJECT_INIT;
405 spin_lock_init(&object->lock);
406 INIT_LIST_HEAD(&object->cache_link);
407 INIT_HLIST_NODE(&object->cookie_link);
408 vslow_work_init(&object->work, &fscache_object_slow_work_ops);
409 INIT_LIST_HEAD(&object->dependents);
410 INIT_LIST_HEAD(&object->dep_link);
411 INIT_LIST_HEAD(&object->pending_ops);
412 object->n_children = 0;
413 object->n_ops = object->n_in_progress = object->n_exclusive = 0;
414 object->events = object->event_mask = 0;
415 object->flags = 0;
416 object->store_limit = 0;
417 object->cache = cache;
418 object->cookie = cookie;
419 object->parent = NULL;
420}
421
422extern void fscache_object_lookup_negative(struct fscache_object *object);
423extern void fscache_obtained_object(struct fscache_object *object);
424
425/**
426 * fscache_object_destroyed - Note destruction of an object in a cache
427 * @cache: The cache from which the object came
428 *
429 * Note the destruction and deallocation of an object record in a cache.
430 */
431static inline void fscache_object_destroyed(struct fscache_cache *cache)
432{
433 if (atomic_dec_and_test(&cache->object_count))
434 wake_up_all(&fscache_cache_cleared_wq);
435}
436
437/**
438 * fscache_object_lookup_error - Note an object encountered an error
439 * @object: The object on which the error was encountered
440 *
441 * Note that an object encountered a fatal error (usually an I/O error) and
442 * that it should be withdrawn as soon as possible.
443 */
444static inline void fscache_object_lookup_error(struct fscache_object *object)
445{
446 set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
447}
448
449/**
450 * fscache_set_store_limit - Set the maximum size to be stored in an object
451 * @object: The object to set the maximum on
452 * @i_size: The limit to set in bytes
453 *
454 * Set the maximum size an object is permitted to reach, implying the highest
455 * byte that may be written. Intended to be called by the attr_changed() op.
456 *
457 * See Documentation/filesystems/caching/backend-api.txt for a complete
458 * description.
459 */
460static inline
461void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
462{
463 object->store_limit = i_size >> PAGE_SHIFT;
464 if (i_size & ~PAGE_MASK)
465 object->store_limit++;
466}
467
468/**
469 * fscache_end_io - End a retrieval operation on a page
470 * @op: The FS-Cache operation covering the retrieval
471 * @page: The page that was to be fetched
472 * @error: The error code (0 if successful)
473 *
474 * Note the end of an operation to retrieve a page, as covered by a particular
475 * operation record.
476 */
477static inline void fscache_end_io(struct fscache_retrieval *op,
478 struct page *page, int error)
479{
480 op->end_io_func(page, op->context, error);
481}
482
483/*
484 * out-of-line cache backend functions
485 */
486extern void fscache_init_cache(struct fscache_cache *cache,
487 const struct fscache_cache_ops *ops,
488 const char *idfmt,
489 ...) __attribute__ ((format (printf, 3, 4)));
490
491extern int fscache_add_cache(struct fscache_cache *cache,
492 struct fscache_object *fsdef,
493 const char *tagname);
494extern void fscache_withdraw_cache(struct fscache_cache *cache);
495
496extern void fscache_io_error(struct fscache_cache *cache);
497
498extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
499 struct pagevec *pagevec);
500
501extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
502 const void *data,
503 uint16_t datalen);
504
505#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
new file mode 100644
index 000000000000..6d8ee466e0a0
--- /dev/null
+++ b/include/linux/fscache.h
@@ -0,0 +1,618 @@
1/* General filesystem caching interface
2 *
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * NOTE!!! See:
12 *
13 * Documentation/filesystems/caching/netfs-api.txt
14 *
15 * for a description of the network filesystem interface declared here.
16 */
17
18#ifndef _LINUX_FSCACHE_H
19#define _LINUX_FSCACHE_H
20
21#include <linux/fs.h>
22#include <linux/list.h>
23#include <linux/pagemap.h>
24#include <linux/pagevec.h>
25
26#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
27#define fscache_available() (1)
28#define fscache_cookie_valid(cookie) (cookie)
29#else
30#define fscache_available() (0)
31#define fscache_cookie_valid(cookie) (0)
32#endif
33
34
35/*
36 * overload PG_private_2 to give us PG_fscache - this is used to indicate that
37 * a page is currently backed by a local disk cache
38 */
39#define PageFsCache(page) PagePrivate2((page))
40#define SetPageFsCache(page) SetPagePrivate2((page))
41#define ClearPageFsCache(page) ClearPagePrivate2((page))
42#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
43#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
44
45/* pattern used to fill dead space in an index entry */
46#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
47
48struct pagevec;
49struct fscache_cache_tag;
50struct fscache_cookie;
51struct fscache_netfs;
52
53typedef void (*fscache_rw_complete_t)(struct page *page,
54 void *context,
55 int error);
56
57/* result of index entry consultation */
58enum fscache_checkaux {
59 FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
60 FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
61 FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
62};
63
64/*
65 * fscache cookie definition
66 */
67struct fscache_cookie_def {
68 /* name of cookie type */
69 char name[16];
70
71 /* cookie type */
72 uint8_t type;
73#define FSCACHE_COOKIE_TYPE_INDEX 0
74#define FSCACHE_COOKIE_TYPE_DATAFILE 1
75
76 /* select the cache into which to insert an entry in this index
77 * - optional
78 * - should return a cache identifier or NULL to cause the cache to be
79 * inherited from the parent if possible or the first cache picked
80 * for a non-index file if not
81 */
82 struct fscache_cache_tag *(*select_cache)(
83 const void *parent_netfs_data,
84 const void *cookie_netfs_data);
85
86 /* get an index key
87 * - should store the key data in the buffer
88 * - should return the amount of amount stored
89 * - not permitted to return an error
90 * - the netfs data from the cookie being used as the source is
91 * presented
92 */
93 uint16_t (*get_key)(const void *cookie_netfs_data,
94 void *buffer,
95 uint16_t bufmax);
96
97 /* get certain file attributes from the netfs data
98 * - this function can be absent for an index
99 * - not permitted to return an error
100 * - the netfs data from the cookie being used as the source is
101 * presented
102 */
103 void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
104
105 /* get the auxilliary data from netfs data
106 * - this function can be absent if the index carries no state data
107 * - should store the auxilliary data in the buffer
108 * - should return the amount of amount stored
109 * - not permitted to return an error
110 * - the netfs data from the cookie being used as the source is
111 * presented
112 */
113 uint16_t (*get_aux)(const void *cookie_netfs_data,
114 void *buffer,
115 uint16_t bufmax);
116
117 /* consult the netfs about the state of an object
118 * - this function can be absent if the index carries no state data
119 * - the netfs data from the cookie being used as the target is
120 * presented, as is the auxilliary data
121 */
122 enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
123 const void *data,
124 uint16_t datalen);
125
126 /* get an extra reference on a read context
127 * - this function can be absent if the completion function doesn't
128 * require a context
129 */
130 void (*get_context)(void *cookie_netfs_data, void *context);
131
132 /* release an extra reference on a read context
133 * - this function can be absent if the completion function doesn't
134 * require a context
135 */
136 void (*put_context)(void *cookie_netfs_data, void *context);
137
138 /* indicate pages that now have cache metadata retained
139 * - this function should mark the specified pages as now being cached
140 * - the pages will have been marked with PG_fscache before this is
141 * called, so this is optional
142 */
143 void (*mark_pages_cached)(void *cookie_netfs_data,
144 struct address_space *mapping,
145 struct pagevec *cached_pvec);
146
147 /* indicate the cookie is no longer cached
148 * - this function is called when the backing store currently caching
149 * a cookie is removed
150 * - the netfs should use this to clean up any markers indicating
151 * cached pages
152 * - this is mandatory for any object that may have data
153 */
154 void (*now_uncached)(void *cookie_netfs_data);
155};
156
157/*
158 * fscache cached network filesystem type
159 * - name, version and ops must be filled in before registration
160 * - all other fields will be set during registration
161 */
162struct fscache_netfs {
163 uint32_t version; /* indexing version */
164 const char *name; /* filesystem name */
165 struct fscache_cookie *primary_index;
166 struct list_head link; /* internal link */
167};
168
169/*
170 * slow-path functions for when there is actually caching available, and the
171 * netfs does actually have a valid token
172 * - these are not to be called directly
173 * - these are undefined symbols when FS-Cache is not configured and the
174 * optimiser takes care of not using them
175 */
176extern int __fscache_register_netfs(struct fscache_netfs *);
177extern void __fscache_unregister_netfs(struct fscache_netfs *);
178extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
179extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
180
181extern struct fscache_cookie *__fscache_acquire_cookie(
182 struct fscache_cookie *,
183 const struct fscache_cookie_def *,
184 void *);
185extern void __fscache_relinquish_cookie(struct fscache_cookie *, int);
186extern void __fscache_update_cookie(struct fscache_cookie *);
187extern int __fscache_attr_changed(struct fscache_cookie *);
188extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
189 struct page *,
190 fscache_rw_complete_t,
191 void *,
192 gfp_t);
193extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
194 struct address_space *,
195 struct list_head *,
196 unsigned *,
197 fscache_rw_complete_t,
198 void *,
199 gfp_t);
200extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
201extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
202extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
203extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
204extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
205
206/**
207 * fscache_register_netfs - Register a filesystem as desiring caching services
208 * @netfs: The description of the filesystem
209 *
210 * Register a filesystem as desiring caching services if they're available.
211 *
212 * See Documentation/filesystems/caching/netfs-api.txt for a complete
213 * description.
214 */
215static inline
216int fscache_register_netfs(struct fscache_netfs *netfs)
217{
218 if (fscache_available())
219 return __fscache_register_netfs(netfs);
220 else
221 return 0;
222}
223
224/**
225 * fscache_unregister_netfs - Indicate that a filesystem no longer desires
226 * caching services
227 * @netfs: The description of the filesystem
228 *
229 * Indicate that a filesystem no longer desires caching services for the
230 * moment.
231 *
232 * See Documentation/filesystems/caching/netfs-api.txt for a complete
233 * description.
234 */
235static inline
236void fscache_unregister_netfs(struct fscache_netfs *netfs)
237{
238 if (fscache_available())
239 __fscache_unregister_netfs(netfs);
240}
241
242/**
243 * fscache_lookup_cache_tag - Look up a cache tag
244 * @name: The name of the tag to search for
245 *
246 * Acquire a specific cache referral tag that can be used to select a specific
247 * cache in which to cache an index.
248 *
249 * See Documentation/filesystems/caching/netfs-api.txt for a complete
250 * description.
251 */
252static inline
253struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
254{
255 if (fscache_available())
256 return __fscache_lookup_cache_tag(name);
257 else
258 return NULL;
259}
260
261/**
262 * fscache_release_cache_tag - Release a cache tag
263 * @tag: The tag to release
264 *
265 * Release a reference to a cache referral tag previously looked up.
266 *
267 * See Documentation/filesystems/caching/netfs-api.txt for a complete
268 * description.
269 */
270static inline
271void fscache_release_cache_tag(struct fscache_cache_tag *tag)
272{
273 if (fscache_available())
274 __fscache_release_cache_tag(tag);
275}
276
277/**
278 * fscache_acquire_cookie - Acquire a cookie to represent a cache object
279 * @parent: The cookie that's to be the parent of this one
280 * @def: A description of the cache object, including callback operations
281 * @netfs_data: An arbitrary piece of data to be kept in the cookie to
282 * represent the cache object to the netfs
283 *
284 * This function is used to inform FS-Cache about part of an index hierarchy
285 * that can be used to locate files. This is done by requesting a cookie for
286 * each index in the path to the file.
287 *
288 * See Documentation/filesystems/caching/netfs-api.txt for a complete
289 * description.
290 */
291static inline
292struct fscache_cookie *fscache_acquire_cookie(
293 struct fscache_cookie *parent,
294 const struct fscache_cookie_def *def,
295 void *netfs_data)
296{
297 if (fscache_cookie_valid(parent))
298 return __fscache_acquire_cookie(parent, def, netfs_data);
299 else
300 return NULL;
301}
302
303/**
304 * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
305 * it
306 * @cookie: The cookie being returned
307 * @retire: True if the cache object the cookie represents is to be discarded
308 *
309 * This function returns a cookie to the cache, forcibly discarding the
310 * associated cache object if retire is set to true.
311 *
312 * See Documentation/filesystems/caching/netfs-api.txt for a complete
313 * description.
314 */
315static inline
316void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
317{
318 if (fscache_cookie_valid(cookie))
319 __fscache_relinquish_cookie(cookie, retire);
320}
321
322/**
323 * fscache_update_cookie - Request that a cache object be updated
324 * @cookie: The cookie representing the cache object
325 *
326 * Request an update of the index data for the cache object associated with the
327 * cookie.
328 *
329 * See Documentation/filesystems/caching/netfs-api.txt for a complete
330 * description.
331 */
332static inline
333void fscache_update_cookie(struct fscache_cookie *cookie)
334{
335 if (fscache_cookie_valid(cookie))
336 __fscache_update_cookie(cookie);
337}
338
339/**
340 * fscache_pin_cookie - Pin a data-storage cache object in its cache
341 * @cookie: The cookie representing the cache object
342 *
343 * Permit data-storage cache objects to be pinned in the cache.
344 *
345 * See Documentation/filesystems/caching/netfs-api.txt for a complete
346 * description.
347 */
348static inline
349int fscache_pin_cookie(struct fscache_cookie *cookie)
350{
351 return -ENOBUFS;
352}
353
354/**
355 * fscache_pin_cookie - Unpin a data-storage cache object in its cache
356 * @cookie: The cookie representing the cache object
357 *
358 * Permit data-storage cache objects to be unpinned from the cache.
359 *
360 * See Documentation/filesystems/caching/netfs-api.txt for a complete
361 * description.
362 */
363static inline
364void fscache_unpin_cookie(struct fscache_cookie *cookie)
365{
366}
367
368/**
369 * fscache_attr_changed - Notify cache that an object's attributes changed
370 * @cookie: The cookie representing the cache object
371 *
372 * Send a notification to the cache indicating that an object's attributes have
373 * changed. This includes the data size. These attributes will be obtained
374 * through the get_attr() cookie definition op.
375 *
376 * See Documentation/filesystems/caching/netfs-api.txt for a complete
377 * description.
378 */
379static inline
380int fscache_attr_changed(struct fscache_cookie *cookie)
381{
382 if (fscache_cookie_valid(cookie))
383 return __fscache_attr_changed(cookie);
384 else
385 return -ENOBUFS;
386}
387
388/**
389 * fscache_reserve_space - Reserve data space for a cached object
390 * @cookie: The cookie representing the cache object
391 * @i_size: The amount of space to be reserved
392 *
393 * Reserve an amount of space in the cache for the cache object attached to a
394 * cookie so that a write to that object within the space can always be
395 * honoured.
396 *
397 * See Documentation/filesystems/caching/netfs-api.txt for a complete
398 * description.
399 */
400static inline
401int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
402{
403 return -ENOBUFS;
404}
405
406/**
407 * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
408 * in which to store it
409 * @cookie: The cookie representing the cache object
410 * @page: The netfs page to fill if possible
411 * @end_io_func: The callback to invoke when and if the page is filled
412 * @context: An arbitrary piece of data to pass on to end_io_func()
413 * @gfp: The conditions under which memory allocation should be made
414 *
415 * Read a page from the cache, or if that's not possible make a potential
416 * one-block reservation in the cache into which the page may be stored once
417 * fetched from the server.
418 *
419 * If the page is not backed by the cache object, or if it there's some reason
420 * it can't be, -ENOBUFS will be returned and nothing more will be done for
421 * that page.
422 *
423 * Else, if that page is backed by the cache, a read will be initiated directly
424 * to the netfs's page and 0 will be returned by this function. The
425 * end_io_func() callback will be invoked when the operation terminates on a
426 * completion or failure. Note that the callback may be invoked before the
427 * return.
428 *
429 * Else, if the page is unbacked, -ENODATA is returned and a block may have
430 * been allocated in the cache.
431 *
432 * See Documentation/filesystems/caching/netfs-api.txt for a complete
433 * description.
434 */
435static inline
436int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
437 struct page *page,
438 fscache_rw_complete_t end_io_func,
439 void *context,
440 gfp_t gfp)
441{
442 if (fscache_cookie_valid(cookie))
443 return __fscache_read_or_alloc_page(cookie, page, end_io_func,
444 context, gfp);
445 else
446 return -ENOBUFS;
447}
448
449/**
450 * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
451 * blocks in which to store them
452 * @cookie: The cookie representing the cache object
453 * @mapping: The netfs inode mapping to which the pages will be attached
454 * @pages: A list of potential netfs pages to be filled
455 * @end_io_func: The callback to invoke when and if each page is filled
456 * @context: An arbitrary piece of data to pass on to end_io_func()
457 * @gfp: The conditions under which memory allocation should be made
458 *
459 * Read a set of pages from the cache, or if that's not possible, attempt to
460 * make a potential one-block reservation for each page in the cache into which
461 * that page may be stored once fetched from the server.
462 *
463 * If some pages are not backed by the cache object, or if it there's some
464 * reason they can't be, -ENOBUFS will be returned and nothing more will be
465 * done for that pages.
466 *
467 * Else, if some of the pages are backed by the cache, a read will be initiated
468 * directly to the netfs's page and 0 will be returned by this function. The
469 * end_io_func() callback will be invoked when the operation terminates on a
470 * completion or failure. Note that the callback may be invoked before the
471 * return.
472 *
473 * Else, if a page is unbacked, -ENODATA is returned and a block may have
474 * been allocated in the cache.
475 *
476 * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
477 * regard to different pages, the return values are prioritised in that order.
478 * Any pages submitted for reading are removed from the pages list.
479 *
480 * See Documentation/filesystems/caching/netfs-api.txt for a complete
481 * description.
482 */
483static inline
484int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
485 struct address_space *mapping,
486 struct list_head *pages,
487 unsigned *nr_pages,
488 fscache_rw_complete_t end_io_func,
489 void *context,
490 gfp_t gfp)
491{
492 if (fscache_cookie_valid(cookie))
493 return __fscache_read_or_alloc_pages(cookie, mapping, pages,
494 nr_pages, end_io_func,
495 context, gfp);
496 else
497 return -ENOBUFS;
498}
499
500/**
501 * fscache_alloc_page - Allocate a block in which to store a page
502 * @cookie: The cookie representing the cache object
503 * @page: The netfs page to allocate a page for
504 * @gfp: The conditions under which memory allocation should be made
505 *
506 * Request Allocation a block in the cache in which to store a netfs page
507 * without retrieving any contents from the cache.
508 *
509 * If the page is not backed by a file then -ENOBUFS will be returned and
510 * nothing more will be done, and no reservation will be made.
511 *
512 * Else, a block will be allocated if one wasn't already, and 0 will be
513 * returned
514 *
515 * See Documentation/filesystems/caching/netfs-api.txt for a complete
516 * description.
517 */
518static inline
519int fscache_alloc_page(struct fscache_cookie *cookie,
520 struct page *page,
521 gfp_t gfp)
522{
523 if (fscache_cookie_valid(cookie))
524 return __fscache_alloc_page(cookie, page, gfp);
525 else
526 return -ENOBUFS;
527}
528
529/**
530 * fscache_write_page - Request storage of a page in the cache
531 * @cookie: The cookie representing the cache object
532 * @page: The netfs page to store
533 * @gfp: The conditions under which memory allocation should be made
534 *
535 * Request the contents of the netfs page be written into the cache. This
536 * request may be ignored if no cache block is currently allocated, in which
537 * case it will return -ENOBUFS.
538 *
539 * If a cache block was already allocated, a write will be initiated and 0 will
540 * be returned. The PG_fscache_write page bit is set immediately and will then
541 * be cleared at the completion of the write to indicate the success or failure
542 * of the operation. Note that the completion may happen before the return.
543 *
544 * See Documentation/filesystems/caching/netfs-api.txt for a complete
545 * description.
546 */
547static inline
548int fscache_write_page(struct fscache_cookie *cookie,
549 struct page *page,
550 gfp_t gfp)
551{
552 if (fscache_cookie_valid(cookie))
553 return __fscache_write_page(cookie, page, gfp);
554 else
555 return -ENOBUFS;
556}
557
558/**
559 * fscache_uncache_page - Indicate that caching is no longer required on a page
560 * @cookie: The cookie representing the cache object
561 * @page: The netfs page that was being cached.
562 *
563 * Tell the cache that we no longer want a page to be cached and that it should
564 * remove any knowledge of the netfs page it may have.
565 *
566 * Note that this cannot cancel any outstanding I/O operations between this
567 * page and the cache.
568 *
569 * See Documentation/filesystems/caching/netfs-api.txt for a complete
570 * description.
571 */
572static inline
573void fscache_uncache_page(struct fscache_cookie *cookie,
574 struct page *page)
575{
576 if (fscache_cookie_valid(cookie))
577 __fscache_uncache_page(cookie, page);
578}
579
580/**
581 * fscache_check_page_write - Ask if a page is being writing to the cache
582 * @cookie: The cookie representing the cache object
583 * @page: The netfs page that is being cached.
584 *
585 * Ask the cache if a page is being written to the cache.
586 *
587 * See Documentation/filesystems/caching/netfs-api.txt for a complete
588 * description.
589 */
590static inline
591bool fscache_check_page_write(struct fscache_cookie *cookie,
592 struct page *page)
593{
594 if (fscache_cookie_valid(cookie))
595 return __fscache_check_page_write(cookie, page);
596 return false;
597}
598
599/**
600 * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
601 * @cookie: The cookie representing the cache object
602 * @page: The netfs page that is being cached.
603 *
604 * Ask the cache to wake us up when a page is no longer being written to the
605 * cache.
606 *
607 * See Documentation/filesystems/caching/netfs-api.txt for a complete
608 * description.
609 */
610static inline
611void fscache_wait_on_page_write(struct fscache_cookie *cookie,
612 struct page *page)
613{
614 if (fscache_cookie_valid(cookie))
615 __fscache_wait_on_page_write(cookie, page);
616}
617
618#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index d9051d717d27..7ef1caf50269 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -95,14 +95,15 @@ struct fsl_usb2_platform_data {
95#define FSL_USB2_PORT0_ENABLED 0x00000001 95#define FSL_USB2_PORT0_ENABLED 0x00000001
96#define FSL_USB2_PORT1_ENABLED 0x00000002 96#define FSL_USB2_PORT1_ENABLED 0x00000002
97 97
98struct spi_device;
99
98struct fsl_spi_platform_data { 100struct fsl_spi_platform_data {
99 u32 initial_spmode; /* initial SPMODE value */ 101 u32 initial_spmode; /* initial SPMODE value */
100 u16 bus_num; 102 s16 bus_num;
101 bool qe_mode; 103 bool qe_mode;
102 /* board specific information */ 104 /* board specific information */
103 u16 max_chipselect; 105 u16 max_chipselect;
104 void (*activate_cs)(u8 cs, u8 polarity); 106 void (*cs_control)(struct spi_device *spi, bool on);
105 void (*deactivate_cs)(u8 cs, u8 polarity);
106 u32 sysclk; 107 u32 sysclk;
107}; 108};
108 109
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a7f8134c594e..015a3d22cf74 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,15 +1,18 @@
1#ifndef _LINUX_FTRACE_H 1#ifndef _LINUX_FTRACE_H
2#define _LINUX_FTRACE_H 2#define _LINUX_FTRACE_H
3 3
4#include <linux/linkage.h> 4#include <linux/trace_clock.h>
5#include <linux/fs.h>
6#include <linux/ktime.h>
7#include <linux/init.h>
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6#include <linux/linkage.h>
11#include <linux/bitops.h> 7#include <linux/bitops.h>
8#include <linux/module.h>
9#include <linux/ktime.h>
12#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14
15#include <asm/ftrace.h>
13 16
14#ifdef CONFIG_FUNCTION_TRACER 17#ifdef CONFIG_FUNCTION_TRACER
15 18
@@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write,
95 loff_t *ppos); 98 loff_t *ppos);
96#endif 99#endif
97 100
101struct ftrace_func_command {
102 struct list_head list;
103 char *name;
104 int (*func)(char *func, char *cmd,
105 char *params, int enable);
106};
107
98#ifdef CONFIG_DYNAMIC_FTRACE 108#ifdef CONFIG_DYNAMIC_FTRACE
99/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ 109
100#include <asm/ftrace.h> 110int ftrace_arch_code_modify_prepare(void);
111int ftrace_arch_code_modify_post_process(void);
112
113struct seq_file;
114
115struct ftrace_probe_ops {
116 void (*func)(unsigned long ip,
117 unsigned long parent_ip,
118 void **data);
119 int (*callback)(unsigned long ip, void **data);
120 void (*free)(void **data);
121 int (*print)(struct seq_file *m,
122 unsigned long ip,
123 struct ftrace_probe_ops *ops,
124 void *data);
125};
126
127extern int
128register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
129 void *data);
130extern void
131unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
132 void *data);
133extern void
134unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
135extern void unregister_ftrace_function_probe_all(char *glob);
101 136
102enum { 137enum {
103 FTRACE_FL_FREE = (1 << 0), 138 FTRACE_FL_FREE = (1 << 0),
@@ -110,15 +145,23 @@ enum {
110}; 145};
111 146
112struct dyn_ftrace { 147struct dyn_ftrace {
113 struct list_head list; 148 union {
114 unsigned long ip; /* address of mcount call-site */ 149 unsigned long ip; /* address of mcount call-site */
115 unsigned long flags; 150 struct dyn_ftrace *freelist;
116 struct dyn_arch_ftrace arch; 151 };
152 union {
153 unsigned long flags;
154 struct dyn_ftrace *newlist;
155 };
156 struct dyn_arch_ftrace arch;
117}; 157};
118 158
119int ftrace_force_update(void); 159int ftrace_force_update(void);
120void ftrace_set_filter(unsigned char *buf, int len, int reset); 160void ftrace_set_filter(unsigned char *buf, int len, int reset);
121 161
162int register_ftrace_command(struct ftrace_func_command *cmd);
163int unregister_ftrace_command(struct ftrace_func_command *cmd);
164
122/* defined in arch */ 165/* defined in arch */
123extern int ftrace_ip_converted(unsigned long ip); 166extern int ftrace_ip_converted(unsigned long ip);
124extern int ftrace_dyn_arch_init(void *data); 167extern int ftrace_dyn_arch_init(void *data);
@@ -126,6 +169,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
126extern void ftrace_caller(void); 169extern void ftrace_caller(void);
127extern void ftrace_call(void); 170extern void ftrace_call(void);
128extern void mcount_call(void); 171extern void mcount_call(void);
172
173#ifndef FTRACE_ADDR
174#define FTRACE_ADDR ((unsigned long)ftrace_caller)
175#endif
129#ifdef CONFIG_FUNCTION_GRAPH_TRACER 176#ifdef CONFIG_FUNCTION_GRAPH_TRACER
130extern void ftrace_graph_caller(void); 177extern void ftrace_graph_caller(void);
131extern int ftrace_enable_ftrace_graph_caller(void); 178extern int ftrace_enable_ftrace_graph_caller(void);
@@ -136,7 +183,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
136#endif 183#endif
137 184
138/** 185/**
139 * ftrace_make_nop - convert code into top 186 * ftrace_make_nop - convert code into nop
140 * @mod: module structure if called by module load initialization 187 * @mod: module structure if called by module load initialization
141 * @rec: the mcount call site record 188 * @rec: the mcount call site record
142 * @addr: the address that the call site should be calling 189 * @addr: the address that the call site should be calling
@@ -181,7 +228,6 @@ extern int ftrace_make_nop(struct module *mod,
181 */ 228 */
182extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 229extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
183 230
184
185/* May be defined in arch */ 231/* May be defined in arch */
186extern int ftrace_arch_read_dyn_info(char *buf, int size); 232extern int ftrace_arch_read_dyn_info(char *buf, int size);
187 233
@@ -198,6 +244,14 @@ extern void ftrace_enable_daemon(void);
198# define ftrace_disable_daemon() do { } while (0) 244# define ftrace_disable_daemon() do { } while (0)
199# define ftrace_enable_daemon() do { } while (0) 245# define ftrace_enable_daemon() do { } while (0)
200static inline void ftrace_release(void *start, unsigned long size) { } 246static inline void ftrace_release(void *start, unsigned long size) { }
247static inline int register_ftrace_command(struct ftrace_func_command *cmd)
248{
249 return -EINVAL;
250}
251static inline int unregister_ftrace_command(char *cmd_name)
252{
253 return -EINVAL;
254}
201#endif /* CONFIG_DYNAMIC_FTRACE */ 255#endif /* CONFIG_DYNAMIC_FTRACE */
202 256
203/* totally disable ftrace - can not re-enable after this */ 257/* totally disable ftrace - can not re-enable after this */
@@ -233,24 +287,25 @@ static inline void __ftrace_enabled_restore(int enabled)
233#endif 287#endif
234} 288}
235 289
236#ifdef CONFIG_FRAME_POINTER 290#ifndef HAVE_ARCH_CALLER_ADDR
237/* TODO: need to fix this for ARM */ 291# ifdef CONFIG_FRAME_POINTER
238# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 292# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
239# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) 293# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
240# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) 294# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
241# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) 295# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
242# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) 296# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
243# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) 297# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
244# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) 298# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
245#else 299# else
246# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 300# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
247# define CALLER_ADDR1 0UL 301# define CALLER_ADDR1 0UL
248# define CALLER_ADDR2 0UL 302# define CALLER_ADDR2 0UL
249# define CALLER_ADDR3 0UL 303# define CALLER_ADDR3 0UL
250# define CALLER_ADDR4 0UL 304# define CALLER_ADDR4 0UL
251# define CALLER_ADDR5 0UL 305# define CALLER_ADDR5 0UL
252# define CALLER_ADDR6 0UL 306# define CALLER_ADDR6 0UL
253#endif 307# endif
308#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
254 309
255#ifdef CONFIG_IRQSOFF_TRACER 310#ifdef CONFIG_IRQSOFF_TRACER
256 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 311 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
@@ -268,54 +323,6 @@ static inline void __ftrace_enabled_restore(int enabled)
268# define trace_preempt_off(a0, a1) do { } while (0) 323# define trace_preempt_off(a0, a1) do { } while (0)
269#endif 324#endif
270 325
271#ifdef CONFIG_TRACING
272extern int ftrace_dump_on_oops;
273
274extern void tracing_start(void);
275extern void tracing_stop(void);
276extern void ftrace_off_permanent(void);
277
278extern void
279ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
280
281/**
282 * ftrace_printk - printf formatting in the ftrace buffer
283 * @fmt: the printf format for printing
284 *
285 * Note: __ftrace_printk is an internal function for ftrace_printk and
286 * the @ip is passed in via the ftrace_printk macro.
287 *
288 * This function allows a kernel developer to debug fast path sections
289 * that printk is not appropriate for. By scattering in various
290 * printk like tracing in the code, a developer can quickly see
291 * where problems are occurring.
292 *
293 * This is intended as a debugging tool for the developer only.
294 * Please refrain from leaving ftrace_printks scattered around in
295 * your code.
296 */
297# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
298extern int
299__ftrace_printk(unsigned long ip, const char *fmt, ...)
300 __attribute__ ((format (printf, 2, 3)));
301extern void ftrace_dump(void);
302#else
303static inline void
304ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
305static inline int
306ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
307
308static inline void tracing_start(void) { }
309static inline void tracing_stop(void) { }
310static inline void ftrace_off_permanent(void) { }
311static inline int
312ftrace_printk(const char *fmt, ...)
313{
314 return 0;
315}
316static inline void ftrace_dump(void) { }
317#endif
318
319#ifdef CONFIG_FTRACE_MCOUNT_RECORD 326#ifdef CONFIG_FTRACE_MCOUNT_RECORD
320extern void ftrace_init(void); 327extern void ftrace_init(void);
321extern void ftrace_init_module(struct module *mod, 328extern void ftrace_init_module(struct module *mod,
@@ -327,36 +334,6 @@ ftrace_init_module(struct module *mod,
327 unsigned long *start, unsigned long *end) { } 334 unsigned long *start, unsigned long *end) { }
328#endif 335#endif
329 336
330enum {
331 POWER_NONE = 0,
332 POWER_CSTATE = 1,
333 POWER_PSTATE = 2,
334};
335
336struct power_trace {
337#ifdef CONFIG_POWER_TRACER
338 ktime_t stamp;
339 ktime_t end;
340 int type;
341 int state;
342#endif
343};
344
345#ifdef CONFIG_POWER_TRACER
346extern void trace_power_start(struct power_trace *it, unsigned int type,
347 unsigned int state);
348extern void trace_power_mark(struct power_trace *it, unsigned int type,
349 unsigned int state);
350extern void trace_power_end(struct power_trace *it);
351#else
352static inline void trace_power_start(struct power_trace *it, unsigned int type,
353 unsigned int state) { }
354static inline void trace_power_mark(struct power_trace *it, unsigned int type,
355 unsigned int state) { }
356static inline void trace_power_end(struct power_trace *it) { }
357#endif
358
359
360/* 337/*
361 * Structure that defines an entry function trace. 338 * Structure that defines an entry function trace.
362 */ 339 */
@@ -398,8 +375,7 @@ struct ftrace_ret_stack {
398extern void return_to_handler(void); 375extern void return_to_handler(void);
399 376
400extern int 377extern int
401ftrace_push_return_trace(unsigned long ret, unsigned long long time, 378ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
402 unsigned long func, int *depth);
403extern void 379extern void
404ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); 380ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
405 381
@@ -514,6 +490,50 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
514 return tsk->trace & TSK_TRACE_FL_GRAPH; 490 return tsk->trace & TSK_TRACE_FL_GRAPH;
515} 491}
516 492
493extern int ftrace_dump_on_oops;
494
517#endif /* CONFIG_TRACING */ 495#endif /* CONFIG_TRACING */
518 496
497
498#ifdef CONFIG_HW_BRANCH_TRACER
499
500void trace_hw_branch(u64 from, u64 to);
501void trace_hw_branch_oops(void);
502
503#else /* CONFIG_HW_BRANCH_TRACER */
504
505static inline void trace_hw_branch(u64 from, u64 to) {}
506static inline void trace_hw_branch_oops(void) {}
507
508#endif /* CONFIG_HW_BRANCH_TRACER */
509
510/*
511 * A syscall entry in the ftrace syscalls array.
512 *
513 * @name: name of the syscall
514 * @nb_args: number of parameters it takes
515 * @types: list of types as strings
516 * @args: list of args as strings (args[i] matches types[i])
517 */
518struct syscall_metadata {
519 const char *name;
520 int nb_args;
521 const char **types;
522 const char **args;
523};
524
525#ifdef CONFIG_FTRACE_SYSCALLS
526extern void arch_init_ftrace_syscalls(void);
527extern struct syscall_metadata *syscall_nr_to_meta(int nr);
528extern void start_ftrace_syscalls(void);
529extern void stop_ftrace_syscalls(void);
530extern void ftrace_syscall_enter(struct pt_regs *regs);
531extern void ftrace_syscall_exit(struct pt_regs *regs);
532#else
533static inline void start_ftrace_syscalls(void) { }
534static inline void stop_ftrace_syscalls(void) { }
535static inline void ftrace_syscall_enter(struct pt_regs *regs) { }
536static inline void ftrace_syscall_exit(struct pt_regs *regs) { }
537#endif
538
519#endif /* _LINUX_FTRACE_H */ 539#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 366a054d0b05..dca7bf8cffe2 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
2#define _LINUX_FTRACE_IRQ_H 2#define _LINUX_FTRACE_IRQ_H
3 3
4 4
5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) 5#ifdef CONFIG_FTRACE_NMI_ENTER
6extern void ftrace_nmi_enter(void); 6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void); 7extern void ftrace_nmi_exit(void);
8#else 8#else
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index dd20cd78faa8..0bbc15f54536 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -4,6 +4,7 @@
4#include <linux/mmzone.h> 4#include <linux/mmzone.h>
5#include <linux/stddef.h> 5#include <linux/stddef.h>
6#include <linux/linkage.h> 6#include <linux/linkage.h>
7#include <linux/topology.h>
7 8
8struct vm_area_struct; 9struct vm_area_struct;
9 10
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f83288347dda..faa1cf848bcd 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -15,55 +15,61 @@
15 * - bits 0-7 are the preemption count (max preemption depth: 256) 15 * - bits 0-7 are the preemption count (max preemption depth: 256)
16 * - bits 8-15 are the softirq count (max # of softirqs: 256) 16 * - bits 8-15 are the softirq count (max # of softirqs: 256)
17 * 17 *
18 * The hardirq count can be overridden per architecture, the default is: 18 * The hardirq count can in theory reach the same as NR_IRQS.
19 * In reality, the number of nested IRQS is limited to the stack
20 * size as well. For archs with over 1000 IRQS it is not practical
21 * to expect that they will all nest. We give a max of 10 bits for
22 * hardirq nesting. An arch may choose to give less than 10 bits.
23 * m68k expects it to be 8.
19 * 24 *
20 * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) 25 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
21 * - ( bit 28 is the PREEMPT_ACTIVE flag. ) 26 * - bit 26 is the NMI_MASK
27 * - bit 28 is the PREEMPT_ACTIVE flag
22 * 28 *
23 * PREEMPT_MASK: 0x000000ff 29 * PREEMPT_MASK: 0x000000ff
24 * SOFTIRQ_MASK: 0x0000ff00 30 * SOFTIRQ_MASK: 0x0000ff00
25 * HARDIRQ_MASK: 0x0fff0000 31 * HARDIRQ_MASK: 0x03ff0000
32 * NMI_MASK: 0x04000000
26 */ 33 */
27#define PREEMPT_BITS 8 34#define PREEMPT_BITS 8
28#define SOFTIRQ_BITS 8 35#define SOFTIRQ_BITS 8
36#define NMI_BITS 1
29 37
30#ifndef HARDIRQ_BITS 38#define MAX_HARDIRQ_BITS 10
31#define HARDIRQ_BITS 12
32 39
33#ifndef MAX_HARDIRQS_PER_CPU 40#ifndef HARDIRQ_BITS
34#define MAX_HARDIRQS_PER_CPU NR_IRQS 41# define HARDIRQ_BITS MAX_HARDIRQ_BITS
35#endif 42#endif
36 43
37/* 44#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
38 * The hardirq mask has to be large enough to have space for potentially 45#error HARDIRQ_BITS too high!
39 * all IRQ sources in the system nesting on a single CPU.
40 */
41#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
42# error HARDIRQ_BITS is too low!
43#endif
44#endif 46#endif
45 47
46#define PREEMPT_SHIFT 0 48#define PREEMPT_SHIFT 0
47#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 49#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
48#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 50#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
51#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
49 52
50#define __IRQ_MASK(x) ((1UL << (x))-1) 53#define __IRQ_MASK(x) ((1UL << (x))-1)
51 54
52#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) 55#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
53#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) 56#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
54#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) 57#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
58#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
55 59
56#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) 60#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
57#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) 61#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
58#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 62#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
63#define NMI_OFFSET (1UL << NMI_SHIFT)
59 64
60#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) 65#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
61#error PREEMPT_ACTIVE is too low! 66#error PREEMPT_ACTIVE is too low!
62#endif 67#endif
63 68
64#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 69#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
65#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 70#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
66#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) 71#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
72 | NMI_MASK))
67 73
68/* 74/*
69 * Are we doing bottom half or hardware interrupt processing? 75 * Are we doing bottom half or hardware interrupt processing?
@@ -73,6 +79,11 @@
73#define in_softirq() (softirq_count()) 79#define in_softirq() (softirq_count())
74#define in_interrupt() (irq_count()) 80#define in_interrupt() (irq_count())
75 81
82/*
83 * Are we in NMI context?
84 */
85#define in_nmi() (preempt_count() & NMI_MASK)
86
76#if defined(CONFIG_PREEMPT) 87#if defined(CONFIG_PREEMPT)
77# define PREEMPT_INATOMIC_BASE kernel_locked() 88# define PREEMPT_INATOMIC_BASE kernel_locked()
78# define PREEMPT_CHECK_OFFSET 1 89# define PREEMPT_CHECK_OFFSET 1
@@ -164,20 +175,24 @@ extern void irq_enter(void);
164 */ 175 */
165extern void irq_exit(void); 176extern void irq_exit(void);
166 177
167#define nmi_enter() \ 178#define nmi_enter() \
168 do { \ 179 do { \
169 ftrace_nmi_enter(); \ 180 ftrace_nmi_enter(); \
170 lockdep_off(); \ 181 BUG_ON(in_nmi()); \
171 rcu_nmi_enter(); \ 182 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
172 __irq_enter(); \ 183 lockdep_off(); \
184 rcu_nmi_enter(); \
185 trace_hardirq_enter(); \
173 } while (0) 186 } while (0)
174 187
175#define nmi_exit() \ 188#define nmi_exit() \
176 do { \ 189 do { \
177 __irq_exit(); \ 190 trace_hardirq_exit(); \
178 rcu_nmi_exit(); \ 191 rcu_nmi_exit(); \
179 lockdep_on(); \ 192 lockdep_on(); \
180 ftrace_nmi_exit(); \ 193 BUG_ON(!in_nmi()); \
194 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
195 ftrace_nmi_exit(); \
181 } while (0) 196 } while (0)
182 197
183#endif /* LINUX_HARDIRQ_H */ 198#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h
index ed21bd3dbd25..29ee2873f4a8 100644
--- a/include/linux/hdreg.h
+++ b/include/linux/hdreg.h
@@ -1,68 +1,6 @@
1#ifndef _LINUX_HDREG_H 1#ifndef _LINUX_HDREG_H
2#define _LINUX_HDREG_H 2#define _LINUX_HDREG_H
3 3
4#ifdef __KERNEL__
5#include <linux/ata.h>
6
7/*
8 * This file contains some defines for the AT-hd-controller.
9 * Various sources.
10 */
11
12/* ide.c has its own port definitions in "ide.h" */
13
14#define HD_IRQ 14
15
16/* Hd controller regs. Ref: IBM AT Bios-listing */
17#define HD_DATA 0x1f0 /* _CTL when writing */
18#define HD_ERROR 0x1f1 /* see err-bits */
19#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */
20#define HD_SECTOR 0x1f3 /* starting sector */
21#define HD_LCYL 0x1f4 /* starting cylinder */
22#define HD_HCYL 0x1f5 /* high byte of starting cyl */
23#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */
24#define HD_STATUS 0x1f7 /* see status-bits */
25#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */
26#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */
27#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */
28
29#define HD_CMD 0x3f6 /* used for resets */
30#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */
31
32/* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */
33
34/* Bits of HD_STATUS */
35#define ERR_STAT 0x01
36#define INDEX_STAT 0x02
37#define ECC_STAT 0x04 /* Corrected error */
38#define DRQ_STAT 0x08
39#define SEEK_STAT 0x10
40#define SRV_STAT 0x10
41#define WRERR_STAT 0x20
42#define READY_STAT 0x40
43#define BUSY_STAT 0x80
44
45/* Bits for HD_ERROR */
46#define MARK_ERR 0x01 /* Bad address mark */
47#define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */
48#define TRK0_ERR 0x02 /* couldn't find track 0 */
49#define EOM_ERR 0x02 /* End Of Media (ATAPI) */
50#define ABRT_ERR 0x04 /* Command aborted */
51#define MCR_ERR 0x08 /* media change request */
52#define ID_ERR 0x10 /* ID field not found */
53#define MC_ERR 0x20 /* media changed */
54#define ECC_ERR 0x40 /* Uncorrectable ECC error */
55#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
56#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
57#define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */
58
59/* Bits of HD_NSECTOR */
60#define CD 0x01
61#define IO 0x02
62#define REL 0x04
63#define TAG_MASK 0xf8
64#endif /* __KERNEL__ */
65
66#include <linux/types.h> 4#include <linux/types.h>
67 5
68/* 6/*
@@ -191,6 +129,7 @@ typedef struct hd_drive_hob_hdr {
191#define TASKFILE_INVALID 0x7fff 129#define TASKFILE_INVALID 0x7fff
192#endif 130#endif
193 131
132#ifndef __KERNEL__
194/* ATA/ATAPI Commands pre T13 Spec */ 133/* ATA/ATAPI Commands pre T13 Spec */
195#define WIN_NOP 0x00 134#define WIN_NOP 0x00
196/* 135/*
@@ -379,6 +318,7 @@ typedef struct hd_drive_hob_hdr {
379#define SECURITY_ERASE_UNIT 0xBD 318#define SECURITY_ERASE_UNIT 0xBD
380#define SECURITY_FREEZE_LOCK 0xBE 319#define SECURITY_FREEZE_LOCK 0xBE
381#define SECURITY_DISABLE_PASSWORD 0xBF 320#define SECURITY_DISABLE_PASSWORD 0xBF
321#endif /* __KERNEL__ */
382 322
383struct hd_geometry { 323struct hd_geometry {
384 unsigned char heads; 324 unsigned char heads;
@@ -448,6 +388,7 @@ enum {
448 388
449#define __NEW_HD_DRIVE_ID 389#define __NEW_HD_DRIVE_ID
450 390
391#ifndef __KERNEL__
451/* 392/*
452 * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec. 393 * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec.
453 * 394 *
@@ -699,6 +640,7 @@ struct hd_driveid {
699 * 7:0 Signature 640 * 7:0 Signature
700 */ 641 */
701}; 642};
643#endif /* __KERNEL__ */
702 644
703/* 645/*
704 * IDE "nice" flags. These are used on a per drive basis to determine 646 * IDE "nice" flags. These are used on a per drive basis to determine
diff --git a/include/linux/hid.h b/include/linux/hid.h
index fa8ee9cef7be..a72876e43589 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -270,6 +270,7 @@ struct hid_item {
270 270
271#define HID_QUIRK_INVERT 0x00000001 271#define HID_QUIRK_INVERT 0x00000001
272#define HID_QUIRK_NOTOUCH 0x00000002 272#define HID_QUIRK_NOTOUCH 0x00000002
273#define HID_QUIRK_IGNORE 0x00000004
273#define HID_QUIRK_NOGET 0x00000008 274#define HID_QUIRK_NOGET 0x00000008
274#define HID_QUIRK_BADPAD 0x00000020 275#define HID_QUIRK_BADPAD 0x00000020
275#define HID_QUIRK_MULTI_INPUT 0x00000040 276#define HID_QUIRK_MULTI_INPUT 0x00000040
@@ -603,12 +604,17 @@ struct hid_ll_driver {
603 int (*open)(struct hid_device *hdev); 604 int (*open)(struct hid_device *hdev);
604 void (*close)(struct hid_device *hdev); 605 void (*close)(struct hid_device *hdev);
605 606
607 int (*power)(struct hid_device *hdev, int level);
608
606 int (*hidinput_input_event) (struct input_dev *idev, unsigned int type, 609 int (*hidinput_input_event) (struct input_dev *idev, unsigned int type,
607 unsigned int code, int value); 610 unsigned int code, int value);
608 611
609 int (*parse)(struct hid_device *hdev); 612 int (*parse)(struct hid_device *hdev);
610}; 613};
611 614
615#define PM_HINT_FULLON 1<<5
616#define PM_HINT_NORMAL 1<<1
617
612/* Applications from HID Usage Tables 4/8/99 Version 1.1 */ 618/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
613/* We ignore a few input applications that are not widely used */ 619/* We ignore a few input applications that are not widely used */
614#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) 620#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002))
@@ -641,6 +647,7 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
641void hid_output_report(struct hid_report *report, __u8 *data); 647void hid_output_report(struct hid_report *report, __u8 *data);
642struct hid_device *hid_allocate_device(void); 648struct hid_device *hid_allocate_device(void);
643int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); 649int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
650int hid_check_keys_pressed(struct hid_device *hid);
644int hid_connect(struct hid_device *hid, unsigned int connect_mask); 651int hid_connect(struct hid_device *hid, unsigned int connect_mask);
645 652
646/** 653/**
@@ -791,21 +798,5 @@ dbg_hid(const char *fmt, ...)
791 __FILE__ , ## arg) 798 __FILE__ , ## arg)
792#endif /* HID_FF */ 799#endif /* HID_FF */
793 800
794#ifdef __KERNEL__
795#ifdef CONFIG_HID_COMPAT
796#define HID_COMPAT_LOAD_DRIVER(name) \
797/* prototype to avoid sparse warning */ \
798extern void hid_compat_##name(void); \
799void hid_compat_##name(void) { } \
800EXPORT_SYMBOL(hid_compat_##name)
801#else
802#define HID_COMPAT_LOAD_DRIVER(name)
803#endif /* HID_COMPAT */
804#define HID_COMPAT_CALL_DRIVER(name) do { \
805 extern void hid_compat_##name(void); \
806 hid_compat_##name(); \
807} while (0)
808#endif /* __KERNEL__ */
809
810#endif 801#endif
811 802
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 13875ce9112a..1fcb7126a01f 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -19,8 +19,21 @@ static inline void flush_kernel_dcache_page(struct page *page)
19} 19}
20#endif 20#endif
21 21
22#ifdef CONFIG_HIGHMEM 22#include <asm/kmap_types.h>
23
24#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
25
26void debug_kmap_atomic(enum km_type type);
23 27
28#else
29
30static inline void debug_kmap_atomic(enum km_type type)
31{
32}
33
34#endif
35
36#ifdef CONFIG_HIGHMEM
24#include <asm/highmem.h> 37#include <asm/highmem.h>
25 38
26/* declarations for linux/mm/highmem.c */ 39/* declarations for linux/mm/highmem.c */
@@ -44,8 +57,6 @@ static inline void *kmap(struct page *page)
44 57
45#define kunmap(page) do { (void) (page); } while (0) 58#define kunmap(page) do { (void) (page); } while (0)
46 59
47#include <asm/kmap_types.h>
48
49static inline void *kmap_atomic(struct page *page, enum km_type idx) 60static inline void *kmap_atomic(struct page *page, enum km_type idx)
50{ 61{
51 pagefault_disable(); 62 pagefault_disable();
diff --git a/include/linux/i2c-algo-sgi.h b/include/linux/i2c-algo-sgi.h
deleted file mode 100644
index 3b7715024e69..000000000000
--- a/include/linux/i2c-algo-sgi.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License version 2 as published by the Free Software Foundation.
4 *
5 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
6 */
7
8#ifndef I2C_ALGO_SGI_H
9#define I2C_ALGO_SGI_H 1
10
11#include <linux/i2c.h>
12
13struct i2c_algo_sgi_data {
14 void *data; /* private data for lowlevel routines */
15 unsigned (*getctrl)(void *data);
16 void (*setctrl)(void *data, unsigned val);
17 unsigned (*rdata)(void *data);
18 void (*wdata)(void *data, unsigned val);
19
20 int xfer_timeout;
21 int ack_timeout;
22};
23
24int i2c_sgi_add_bus(struct i2c_adapter *);
25
26#endif /* I2C_ALGO_SGI_H */
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index f27604af8378..ee9fbc172405 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -33,47 +33,10 @@
33 33
34#define I2C_DRIVERID_MSP3400 1 34#define I2C_DRIVERID_MSP3400 1
35#define I2C_DRIVERID_TUNER 2 35#define I2C_DRIVERID_TUNER 2
36#define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */
37#define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */
38#define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */
39#define I2C_DRIVERID_SAA7111A 8 /* video input processor */
40#define I2C_DRIVERID_SAA7185B 13 /* video encoder */
41#define I2C_DRIVERID_SAA7110 22 /* video decoder */
42#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */
43#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */ 36#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */
44#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */ 37#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */
45#define I2C_DRIVERID_TDA9875 32 /* TV sound decoder chip */
46#define I2C_DRIVERID_BT819 40 /* video decoder */
47#define I2C_DRIVERID_BT856 41 /* video encoder */
48#define I2C_DRIVERID_VPX3220 42 /* video decoder+vbi/vtxt */
49#define I2C_DRIVERID_ADV7175 48 /* ADV 7175/7176 video encoder */
50#define I2C_DRIVERID_SAA7114 49 /* video decoder */
51#define I2C_DRIVERID_ADV7170 54 /* video encoder */
52#define I2C_DRIVERID_SAA7191 57 /* video decoder */
53#define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */
54#define I2C_DRIVERID_OVCAMCHIP 61 /* OmniVision CMOS image sens. */
55#define I2C_DRIVERID_SAA6752HS 67 /* MPEG2 encoder */
56#define I2C_DRIVERID_TVEEPROM 68 /* TV EEPROM */
57#define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */
58#define I2C_DRIVERID_CS53L32A 70 /* cs53l32a audio processor */
59#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */
60#define I2C_DRIVERID_SAA7127 72 /* saa7127 video encoder */
61#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ 38#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
62#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */ 39#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */
63#define I2C_DRIVERID_TVP5150 76 /* TVP5150 video decoder */
64#define I2C_DRIVERID_WM8739 77 /* wm8739 audio processor */
65#define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */
66#define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */
67#define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */
68#define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */
69#define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */
70#define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */
71#define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */
72#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */
73#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */
74#define I2C_DRIVERID_AU8522 97 /* Auvitek au8522 */
75
76#define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */
77 40
78/* 41/*
79 * ---- Adapter types ---------------------------------------------------- 42 * ---- Adapter types ----------------------------------------------------
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index c86c3b07604c..00ee11eb9092 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -353,8 +353,8 @@ struct i2c_adapter {
353 void *algo_data; 353 void *algo_data;
354 354
355 /* --- administration stuff. */ 355 /* --- administration stuff. */
356 int (*client_register)(struct i2c_client *); 356 int (*client_register)(struct i2c_client *) __deprecated;
357 int (*client_unregister)(struct i2c_client *); 357 int (*client_unregister)(struct i2c_client *) __deprecated;
358 358
359 /* data fields that are valid for all devices */ 359 /* data fields that are valid for all devices */
360 u8 level; /* nesting level for lockdep */ 360 u8 level; /* nesting level for lockdep */
diff --git a/include/linux/i2c/at24.h b/include/linux/i2c/at24.h
index f6edd522a929..8ace93024d60 100644
--- a/include/linux/i2c/at24.h
+++ b/include/linux/i2c/at24.h
@@ -2,6 +2,7 @@
2#define _LINUX_AT24_H 2#define _LINUX_AT24_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/memory.h>
5 6
6/* 7/*
7 * As seen through Linux I2C, differences between the most common types of I2C 8 * As seen through Linux I2C, differences between the most common types of I2C
@@ -23,6 +24,9 @@ struct at24_platform_data {
23#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ 24#define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */
24#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ 25#define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */
25#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ 26#define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */
27
28 void (*setup)(struct memory_accessor *, void *context);
29 void *context;
26}; 30};
27 31
28#endif /* _LINUX_AT24_H */ 32#endif /* _LINUX_AT24_H */
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h
index 8137f660a5cc..0dc80ef24975 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl4030.h
@@ -218,6 +218,53 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
218 218
219/*----------------------------------------------------------------------*/ 219/*----------------------------------------------------------------------*/
220 220
221/* Power bus message definitions */
222
223#define DEV_GRP_NULL 0x0
224#define DEV_GRP_P1 0x1
225#define DEV_GRP_P2 0x2
226#define DEV_GRP_P3 0x4
227
228#define RES_GRP_RES 0x0
229#define RES_GRP_PP 0x1
230#define RES_GRP_RC 0x2
231#define RES_GRP_PP_RC 0x3
232#define RES_GRP_PR 0x4
233#define RES_GRP_PP_PR 0x5
234#define RES_GRP_RC_PR 0x6
235#define RES_GRP_ALL 0x7
236
237#define RES_TYPE2_R0 0x0
238
239#define RES_TYPE_ALL 0x7
240
241#define RES_STATE_WRST 0xF
242#define RES_STATE_ACTIVE 0xE
243#define RES_STATE_SLEEP 0x8
244#define RES_STATE_OFF 0x0
245
246/*
247 * Power Bus Message Format ... these can be sent individually by Linux,
248 * but are usually part of downloaded scripts that are run when various
249 * power events are triggered.
250 *
251 * Broadcast Message (16 Bits):
252 * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4]
253 * RES_STATE[3:0]
254 *
255 * Singular Message (16 Bits):
256 * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0]
257 */
258
259#define MSG_BROADCAST(devgrp, grp, type, type2, state) \
260 ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \
261 | (type) << 4 | (state))
262
263#define MSG_SINGULAR(devgrp, id, state) \
264 ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
265
266/*----------------------------------------------------------------------*/
267
221struct twl4030_bci_platform_data { 268struct twl4030_bci_platform_data {
222 int *battery_tmp_tbl; 269 int *battery_tmp_tbl;
223 unsigned int tblsize; 270 unsigned int tblsize;
diff --git a/include/linux/ide.h b/include/linux/ide.h
index d5d832271f44..a5d26f66ef78 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -265,7 +265,7 @@ enum {
265 IDE_TFLAG_WRITE = (1 << 12), 265 IDE_TFLAG_WRITE = (1 << 12),
266 IDE_TFLAG_CUSTOM_HANDLER = (1 << 13), 266 IDE_TFLAG_CUSTOM_HANDLER = (1 << 13),
267 IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 14), 267 IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 14),
268 IDE_TFLAG_IN_HOB_FEATURE = (1 << 15), 268 IDE_TFLAG_IN_HOB_ERROR = (1 << 15),
269 IDE_TFLAG_IN_HOB_NSECT = (1 << 16), 269 IDE_TFLAG_IN_HOB_NSECT = (1 << 16),
270 IDE_TFLAG_IN_HOB_LBAL = (1 << 17), 270 IDE_TFLAG_IN_HOB_LBAL = (1 << 17),
271 IDE_TFLAG_IN_HOB_LBAM = (1 << 18), 271 IDE_TFLAG_IN_HOB_LBAM = (1 << 18),
@@ -273,10 +273,10 @@ enum {
273 IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL | 273 IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL |
274 IDE_TFLAG_IN_HOB_LBAM | 274 IDE_TFLAG_IN_HOB_LBAM |
275 IDE_TFLAG_IN_HOB_LBAH, 275 IDE_TFLAG_IN_HOB_LBAH,
276 IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | 276 IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_ERROR |
277 IDE_TFLAG_IN_HOB_NSECT | 277 IDE_TFLAG_IN_HOB_NSECT |
278 IDE_TFLAG_IN_HOB_LBA, 278 IDE_TFLAG_IN_HOB_LBA,
279 IDE_TFLAG_IN_FEATURE = (1 << 20), 279 IDE_TFLAG_IN_ERROR = (1 << 20),
280 IDE_TFLAG_IN_NSECT = (1 << 21), 280 IDE_TFLAG_IN_NSECT = (1 << 21),
281 IDE_TFLAG_IN_LBAL = (1 << 22), 281 IDE_TFLAG_IN_LBAL = (1 << 22),
282 IDE_TFLAG_IN_LBAM = (1 << 23), 282 IDE_TFLAG_IN_LBAM = (1 << 23),
@@ -310,8 +310,12 @@ enum {
310 310
311struct ide_taskfile { 311struct ide_taskfile {
312 u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */ 312 u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */
313 /* 1-5: additional data to support LBA48 */
314 union {
315 u8 hob_error; /* read: error */
316 u8 hob_feature; /* write: feature */
317 };
313 318
314 u8 hob_feature; /* 1-5: additional data to support LBA48 */
315 u8 hob_nsect; 319 u8 hob_nsect;
316 u8 hob_lbal; 320 u8 hob_lbal;
317 u8 hob_lbam; 321 u8 hob_lbam;
@@ -352,6 +356,8 @@ struct ide_cmd {
352 356
353 unsigned int nbytes; 357 unsigned int nbytes;
354 unsigned int nleft; 358 unsigned int nleft;
359 unsigned int last_xfer_len;
360
355 struct scatterlist *cursg; 361 struct scatterlist *cursg;
356 unsigned int cursg_ofs; 362 unsigned int cursg_ofs;
357 363
@@ -375,7 +381,7 @@ enum {
375 * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes. 381 * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes.
376 * This is used for several packet commands (not for READ/WRITE commands). 382 * This is used for several packet commands (not for READ/WRITE commands).
377 */ 383 */
378#define IDE_PC_BUFFER_SIZE 256 384#define IDE_PC_BUFFER_SIZE 64
379#define ATAPI_WAIT_PC (60 * HZ) 385#define ATAPI_WAIT_PC (60 * HZ)
380 386
381struct ide_atapi_pc { 387struct ide_atapi_pc {
@@ -413,9 +419,6 @@ struct ide_atapi_pc {
413 struct idetape_bh *bh; 419 struct idetape_bh *bh;
414 char *b_data; 420 char *b_data;
415 421
416 struct scatterlist *sg;
417 unsigned int sg_cnt;
418
419 unsigned long timeout; 422 unsigned long timeout;
420}; 423};
421 424
@@ -456,11 +459,6 @@ enum {
456 IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), 459 IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3),
457 /* TOC track numbers are in BCD. */ 460 /* TOC track numbers are in BCD. */
458 IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), 461 IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4),
459 /*
460 * Drive does not provide data in multiples of SECTOR_SIZE
461 * when more than one interrupt is needed.
462 */
463 IDE_AFLAG_LIMIT_NFRAMES = (1 << 5),
464 /* Saved TOC information is current. */ 462 /* Saved TOC information is current. */
465 IDE_AFLAG_TOC_VALID = (1 << 6), 463 IDE_AFLAG_TOC_VALID = (1 << 6),
466 /* We think that the drive door is locked. */ 464 /* We think that the drive door is locked. */
@@ -605,7 +603,7 @@ struct ide_drive_s {
605 603
606 unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */ 604 unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
607 unsigned int cyl; /* "real" number of cyls */ 605 unsigned int cyl; /* "real" number of cyls */
608 unsigned int drive_data; /* used by set_pio_mode/selectproc */ 606 unsigned int drive_data; /* used by set_pio_mode/dev_select() */
609 unsigned int failures; /* current failure count */ 607 unsigned int failures; /* current failure count */
610 unsigned int max_failures; /* maximum allowed failure count */ 608 unsigned int max_failures; /* maximum allowed failure count */
611 u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */ 609 u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */
@@ -661,9 +659,9 @@ struct ide_tp_ops {
661 void (*exec_command)(struct hwif_s *, u8); 659 void (*exec_command)(struct hwif_s *, u8);
662 u8 (*read_status)(struct hwif_s *); 660 u8 (*read_status)(struct hwif_s *);
663 u8 (*read_altstatus)(struct hwif_s *); 661 u8 (*read_altstatus)(struct hwif_s *);
662 void (*write_devctl)(struct hwif_s *, u8);
664 663
665 void (*set_irq)(struct hwif_s *, int); 664 void (*dev_select)(ide_drive_t *);
666
667 void (*tf_load)(ide_drive_t *, struct ide_cmd *); 665 void (*tf_load)(ide_drive_t *, struct ide_cmd *);
668 void (*tf_read)(ide_drive_t *, struct ide_cmd *); 666 void (*tf_read)(ide_drive_t *, struct ide_cmd *);
669 667
@@ -681,7 +679,6 @@ extern const struct ide_tp_ops default_tp_ops;
681 * @init_dev: host specific initialization of a device 679 * @init_dev: host specific initialization of a device
682 * @set_pio_mode: routine to program host for PIO mode 680 * @set_pio_mode: routine to program host for PIO mode
683 * @set_dma_mode: routine to program host for DMA mode 681 * @set_dma_mode: routine to program host for DMA mode
684 * @selectproc: tweaks hardware to select drive
685 * @reset_poll: chipset polling based on hba specifics 682 * @reset_poll: chipset polling based on hba specifics
686 * @pre_reset: chipset specific changes to default for device-hba resets 683 * @pre_reset: chipset specific changes to default for device-hba resets
687 * @resetproc: routine to reset controller after a disk reset 684 * @resetproc: routine to reset controller after a disk reset
@@ -698,7 +695,6 @@ struct ide_port_ops {
698 void (*init_dev)(ide_drive_t *); 695 void (*init_dev)(ide_drive_t *);
699 void (*set_pio_mode)(ide_drive_t *, const u8); 696 void (*set_pio_mode)(ide_drive_t *, const u8);
700 void (*set_dma_mode)(ide_drive_t *, const u8); 697 void (*set_dma_mode)(ide_drive_t *, const u8);
701 void (*selectproc)(ide_drive_t *);
702 int (*reset_poll)(ide_drive_t *); 698 int (*reset_poll)(ide_drive_t *);
703 void (*pre_reset)(ide_drive_t *); 699 void (*pre_reset)(ide_drive_t *);
704 void (*resetproc)(ide_drive_t *); 700 void (*resetproc)(ide_drive_t *);
@@ -719,8 +715,10 @@ struct ide_dma_ops {
719 int (*dma_end)(struct ide_drive_s *); 715 int (*dma_end)(struct ide_drive_s *);
720 int (*dma_test_irq)(struct ide_drive_s *); 716 int (*dma_test_irq)(struct ide_drive_s *);
721 void (*dma_lost_irq)(struct ide_drive_s *); 717 void (*dma_lost_irq)(struct ide_drive_s *);
718 /* below ones are optional */
719 int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
722 int (*dma_timer_expiry)(struct ide_drive_s *); 720 int (*dma_timer_expiry)(struct ide_drive_s *);
723 void (*dma_timeout)(struct ide_drive_s *); 721 void (*dma_clear)(struct ide_drive_s *);
724 /* 722 /*
725 * The following method is optional and only required to be 723 * The following method is optional and only required to be
726 * implemented for the SFF-8038i compatible controllers. 724 * implemented for the SFF-8038i compatible controllers.
@@ -1169,18 +1167,15 @@ void ide_tf_dump(const char *, struct ide_taskfile *);
1169void ide_exec_command(ide_hwif_t *, u8); 1167void ide_exec_command(ide_hwif_t *, u8);
1170u8 ide_read_status(ide_hwif_t *); 1168u8 ide_read_status(ide_hwif_t *);
1171u8 ide_read_altstatus(ide_hwif_t *); 1169u8 ide_read_altstatus(ide_hwif_t *);
1170void ide_write_devctl(ide_hwif_t *, u8);
1172 1171
1173void ide_set_irq(ide_hwif_t *, int); 1172void ide_dev_select(ide_drive_t *);
1174
1175void ide_tf_load(ide_drive_t *, struct ide_cmd *); 1173void ide_tf_load(ide_drive_t *, struct ide_cmd *);
1176void ide_tf_read(ide_drive_t *, struct ide_cmd *); 1174void ide_tf_read(ide_drive_t *, struct ide_cmd *);
1177 1175
1178void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); 1176void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
1179void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); 1177void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
1180 1178
1181int ide_io_buffers(ide_drive_t *, struct ide_atapi_pc *, unsigned int, int);
1182
1183extern void SELECT_DRIVE(ide_drive_t *);
1184void SELECT_MASK(ide_drive_t *, int); 1179void SELECT_MASK(ide_drive_t *, int);
1185 1180
1186u8 ide_read_error(ide_drive_t *); 1181u8 ide_read_error(ide_drive_t *);
@@ -1226,6 +1221,8 @@ ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
1226 1221
1227ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *); 1222ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
1228 1223
1224void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
1225
1229void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8); 1226void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
1230 1227
1231int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16); 1228int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
@@ -1443,8 +1440,8 @@ ide_startstop_t ide_dma_intr(ide_drive_t *);
1443int ide_allocate_dma_engine(ide_hwif_t *); 1440int ide_allocate_dma_engine(ide_hwif_t *);
1444void ide_release_dma_engine(ide_hwif_t *); 1441void ide_release_dma_engine(ide_hwif_t *);
1445 1442
1446int ide_build_sglist(ide_drive_t *, struct ide_cmd *); 1443int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
1447void ide_destroy_dmatable(ide_drive_t *); 1444void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
1448 1445
1449#ifdef CONFIG_BLK_DEV_IDEDMA_SFF 1446#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
1450int config_drive_for_dma(ide_drive_t *); 1447int config_drive_for_dma(ide_drive_t *);
@@ -1462,7 +1459,6 @@ static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
1462#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ 1459#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
1463 1460
1464void ide_dma_lost_irq(ide_drive_t *); 1461void ide_dma_lost_irq(ide_drive_t *);
1465void ide_dma_timeout(ide_drive_t *);
1466ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int); 1462ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
1467 1463
1468#else 1464#else
@@ -1478,8 +1474,10 @@ static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
1478static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; } 1474static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
1479static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; } 1475static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
1480static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; } 1476static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
1481static inline int ide_build_sglist(ide_drive_t *drive, 1477static inline int ide_dma_prepare(ide_drive_t *drive,
1482 struct ide_cmd *cmd) { return 0; } 1478 struct ide_cmd *cmd) { return 1; }
1479static inline void ide_dma_unmap_sg(ide_drive_t *drive,
1480 struct ide_cmd *cmd) { ; }
1483#endif /* CONFIG_BLK_DEV_IDEDMA */ 1481#endif /* CONFIG_BLK_DEV_IDEDMA */
1484 1482
1485#ifdef CONFIG_BLK_DEV_IDEACPI 1483#ifdef CONFIG_BLK_DEV_IDEACPI
diff --git a/include/linux/idr.h b/include/linux/idr.h
index dd846df8cd32..e968db71e33a 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -106,6 +106,7 @@ int idr_get_new(struct idr *idp, void *ptr, int *id);
106int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); 106int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
107int idr_for_each(struct idr *idp, 107int idr_for_each(struct idr *idp,
108 int (*fn)(int id, void *p, void *data), void *data); 108 int (*fn)(int id, void *p, void *data), void *data);
109void *idr_get_next(struct idr *idp, int *nextid);
109void *idr_replace(struct idr *idp, void *ptr, int id); 110void *idr_replace(struct idr *idp, void *ptr, int id);
110void idr_remove(struct idr *idp, int id); 111void idr_remove(struct idr *idp, int id);
111void idr_remove_all(struct idr *idp); 112void idr_remove_all(struct idr *idp);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 1d6c71d96ede..77214ead1a36 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
123#define ecap_eim_support(e) ((e >> 4) & 0x1) 123#define ecap_eim_support(e) ((e >> 4) & 0x1)
124#define ecap_ir_support(e) ((e >> 3) & 0x1) 124#define ecap_ir_support(e) ((e >> 3) & 0x1)
125#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) 125#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
126 126#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
127 127
128/* IOTLB_REG */ 128/* IOTLB_REG */
129#define DMA_TLB_FLUSH_GRANU_OFFSET 60 129#define DMA_TLB_FLUSH_GRANU_OFFSET 60
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 4528bf70866a..8a9613d0c674 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -278,6 +278,11 @@ enum
278 NR_SOFTIRQS 278 NR_SOFTIRQS
279}; 279};
280 280
281/* map softirq index to softirq name. update 'softirq_to_name' in
282 * kernel/softirq.c when adding a new softirq.
283 */
284extern char *softirq_to_name[NR_SOFTIRQS];
285
281/* softirq mask and active fields moved to irq_cpustat_t in 286/* softirq mask and active fields moved to irq_cpustat_t in
282 * asm/hardirq.h to get better cache usage. KAO 287 * asm/hardirq.h to get better cache usage. KAO
283 */ 288 */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8a7bfb1b6ca0..3af4ffd591b9 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,6 +21,7 @@
21 21
22#define IOMMU_READ (1) 22#define IOMMU_READ (1)
23#define IOMMU_WRITE (2) 23#define IOMMU_WRITE (2)
24#define IOMMU_CACHE (4) /* DMA cache coherency */
24 25
25struct device; 26struct device;
26 27
@@ -28,6 +29,8 @@ struct iommu_domain {
28 void *priv; 29 void *priv;
29}; 30};
30 31
32#define IOMMU_CAP_CACHE_COHERENCY 0x1
33
31struct iommu_ops { 34struct iommu_ops {
32 int (*domain_init)(struct iommu_domain *domain); 35 int (*domain_init)(struct iommu_domain *domain);
33 void (*domain_destroy)(struct iommu_domain *domain); 36 void (*domain_destroy)(struct iommu_domain *domain);
@@ -39,6 +42,8 @@ struct iommu_ops {
39 size_t size); 42 size_t size);
40 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 43 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
41 unsigned long iova); 44 unsigned long iova);
45 int (*domain_has_cap)(struct iommu_domain *domain,
46 unsigned long cap);
42}; 47};
43 48
44#ifdef CONFIG_IOMMU_API 49#ifdef CONFIG_IOMMU_API
@@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
57 size_t size); 62 size_t size);
58extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 63extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
59 unsigned long iova); 64 unsigned long iova);
65extern int iommu_domain_has_cap(struct iommu_domain *domain,
66 unsigned long cap);
60 67
61#else /* CONFIG_IOMMU_API */ 68#else /* CONFIG_IOMMU_API */
62 69
@@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
107 return 0; 114 return 0;
108} 115}
109 116
117static inline int domain_has_cap(struct iommu_domain *domain,
118 unsigned long cap)
119{
120 return 0;
121}
122
110#endif /* CONFIG_IOMMU_API */ 123#endif /* CONFIG_IOMMU_API */
111 124
112#endif /* __LINUX_IOMMU_H */ 125#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 64246dce5663..53ae4399da2d 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -35,7 +35,7 @@
35#define journal_oom_retry 1 35#define journal_oom_retry 1
36 36
37/* 37/*
38 * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds 38 * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
39 * certain classes of error which can occur due to failed IOs. Under 39 * certain classes of error which can occur due to failed IOs. Under
40 * normal use we want ext3 to continue after such errors, because 40 * normal use we want ext3 to continue after such errors, because
41 * hardware _can_ fail, but for debugging purposes when running tests on 41 * hardware _can_ fail, but for debugging purposes when running tests on
@@ -552,6 +552,11 @@ struct transaction_s
552 */ 552 */
553 int t_handle_count; 553 int t_handle_count;
554 554
555 /*
556 * This transaction is being forced and some process is
557 * waiting for it to finish.
558 */
559 int t_synchronous_commit:1;
555}; 560};
556 561
557/** 562/**
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 4d248b3f1323..8815a3456b3b 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -649,6 +649,12 @@ struct transaction_s
649 int t_handle_count; 649 int t_handle_count;
650 650
651 /* 651 /*
652 * This transaction is being forced and some process is
653 * waiting for it to finish.
654 */
655 int t_synchronous_commit:1;
656
657 /*
652 * For use by the filesystem to store fs-specific data 658 * For use by the filesystem to store fs-specific data
653 * structures associated with the transaction 659 * structures associated with the transaction
654 */ 660 */
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index f3fe34391d8e..792274269f2b 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -13,10 +13,17 @@
13#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 13#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
14 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) 14 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
15 15
16struct module;
17
16#ifdef CONFIG_KALLSYMS 18#ifdef CONFIG_KALLSYMS
17/* Lookup the address for a symbol. Returns 0 if not found. */ 19/* Lookup the address for a symbol. Returns 0 if not found. */
18unsigned long kallsyms_lookup_name(const char *name); 20unsigned long kallsyms_lookup_name(const char *name);
19 21
22/* Call a function on each kallsyms symbol in the core kernel */
23int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
24 unsigned long),
25 void *data);
26
20extern int kallsyms_lookup_size_offset(unsigned long addr, 27extern int kallsyms_lookup_size_offset(unsigned long addr,
21 unsigned long *symbolsize, 28 unsigned long *symbolsize,
22 unsigned long *offset); 29 unsigned long *offset);
@@ -43,6 +50,14 @@ static inline unsigned long kallsyms_lookup_name(const char *name)
43 return 0; 50 return 0;
44} 51}
45 52
53static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
54 struct module *,
55 unsigned long),
56 void *data)
57{
58 return 0;
59}
60
46static inline int kallsyms_lookup_size_offset(unsigned long addr, 61static inline int kallsyms_lookup_size_offset(unsigned long addr,
47 unsigned long *symbolsize, 62 unsigned long *symbolsize,
48 unsigned long *offset) 63 unsigned long *offset)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index f81d80f47dcb..d9e75ec7def5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -242,6 +242,20 @@ extern struct ratelimit_state printk_ratelimit_state;
242extern int printk_ratelimit(void); 242extern int printk_ratelimit(void);
243extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, 243extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
244 unsigned int interval_msec); 244 unsigned int interval_msec);
245
246/*
247 * Print a one-time message (analogous to WARN_ONCE() et al):
248 */
249#define printk_once(x...) ({ \
250 static int __print_once = 1; \
251 \
252 if (__print_once) { \
253 __print_once = 0; \
254 printk(x); \
255 } \
256})
257
258void log_buf_kexec_setup(void);
245#else 259#else
246static inline int vprintk(const char *s, va_list args) 260static inline int vprintk(const char *s, va_list args)
247 __attribute__ ((format (printf, 1, 0))); 261 __attribute__ ((format (printf, 1, 0)));
@@ -253,6 +267,13 @@ static inline int printk_ratelimit(void) { return 0; }
253static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ 267static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
254 unsigned int interval_msec) \ 268 unsigned int interval_msec) \
255 { return false; } 269 { return false; }
270
271/* No effect, but we still get type checking even in the !PRINTK case: */
272#define printk_once(x...) printk(x)
273
274static inline void log_buf_kexec_setup(void)
275{
276}
256#endif 277#endif
257 278
258extern int printk_needs_cpu(int cpu); 279extern int printk_needs_cpu(int cpu);
@@ -353,6 +374,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
353 printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) 374 printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
354#define pr_info(fmt, ...) \ 375#define pr_info(fmt, ...) \
355 printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) 376 printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
377#define pr_cont(fmt, ...) \
378 printk(KERN_CONT fmt, ##__VA_ARGS__)
356 379
357/* If you are writing a driver, please use dev_dbg instead */ 380/* If you are writing a driver, please use dev_dbg instead */
358#if defined(DEBUG) 381#if defined(DEBUG)
@@ -369,6 +392,139 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
369#endif 392#endif
370 393
371/* 394/*
395 * General tracing related utility functions - trace_printk(),
396 * tracing_on/tracing_off and tracing_start()/tracing_stop
397 *
398 * Use tracing_on/tracing_off when you want to quickly turn on or off
399 * tracing. It simply enables or disables the recording of the trace events.
400 * This also corresponds to the user space debugfs/tracing/tracing_on
401 * file, which gives a means for the kernel and userspace to interact.
402 * Place a tracing_off() in the kernel where you want tracing to end.
403 * From user space, examine the trace, and then echo 1 > tracing_on
404 * to continue tracing.
405 *
406 * tracing_stop/tracing_start has slightly more overhead. It is used
407 * by things like suspend to ram where disabling the recording of the
408 * trace is not enough, but tracing must actually stop because things
409 * like calling smp_processor_id() may crash the system.
410 *
411 * Most likely, you want to use tracing_on/tracing_off.
412 */
413#ifdef CONFIG_RING_BUFFER
414void tracing_on(void);
415void tracing_off(void);
416/* trace_off_permanent stops recording with no way to bring it back */
417void tracing_off_permanent(void);
418int tracing_is_on(void);
419#else
420static inline void tracing_on(void) { }
421static inline void tracing_off(void) { }
422static inline void tracing_off_permanent(void) { }
423static inline int tracing_is_on(void) { return 0; }
424#endif
425#ifdef CONFIG_TRACING
426extern void tracing_start(void);
427extern void tracing_stop(void);
428extern void ftrace_off_permanent(void);
429
430extern void
431ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
432
433static inline void __attribute__ ((format (printf, 1, 2)))
434____trace_printk_check_format(const char *fmt, ...)
435{
436}
437#define __trace_printk_check_format(fmt, args...) \
438do { \
439 if (0) \
440 ____trace_printk_check_format(fmt, ##args); \
441} while (0)
442
443/**
444 * trace_printk - printf formatting in the ftrace buffer
445 * @fmt: the printf format for printing
446 *
447 * Note: __trace_printk is an internal function for trace_printk and
448 * the @ip is passed in via the trace_printk macro.
449 *
450 * This function allows a kernel developer to debug fast path sections
451 * that printk is not appropriate for. By scattering in various
452 * printk like tracing in the code, a developer can quickly see
453 * where problems are occurring.
454 *
455 * This is intended as a debugging tool for the developer only.
456 * Please refrain from leaving trace_printks scattered around in
457 * your code.
458 */
459
460#define trace_printk(fmt, args...) \
461do { \
462 __trace_printk_check_format(fmt, ##args); \
463 if (__builtin_constant_p(fmt)) { \
464 static const char *trace_printk_fmt \
465 __attribute__((section("__trace_printk_fmt"))) = \
466 __builtin_constant_p(fmt) ? fmt : NULL; \
467 \
468 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
469 } else \
470 __trace_printk(_THIS_IP_, fmt, ##args); \
471} while (0)
472
473extern int
474__trace_bprintk(unsigned long ip, const char *fmt, ...)
475 __attribute__ ((format (printf, 2, 3)));
476
477extern int
478__trace_printk(unsigned long ip, const char *fmt, ...)
479 __attribute__ ((format (printf, 2, 3)));
480
481/*
482 * The double __builtin_constant_p is because gcc will give us an error
483 * if we try to allocate the static variable to fmt if it is not a
484 * constant. Even with the outer if statement.
485 */
486#define ftrace_vprintk(fmt, vargs) \
487do { \
488 if (__builtin_constant_p(fmt)) { \
489 static const char *trace_printk_fmt \
490 __attribute__((section("__trace_printk_fmt"))) = \
491 __builtin_constant_p(fmt) ? fmt : NULL; \
492 \
493 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
494 } else \
495 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
496} while (0)
497
498extern int
499__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
500
501extern int
502__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
503
504extern void ftrace_dump(void);
505#else
506static inline void
507ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
508static inline int
509trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
510
511static inline void tracing_start(void) { }
512static inline void tracing_stop(void) { }
513static inline void ftrace_off_permanent(void) { }
514static inline int
515trace_printk(const char *fmt, ...)
516{
517 return 0;
518}
519static inline int
520ftrace_vprintk(const char *fmt, va_list ap)
521{
522 return 0;
523}
524static inline void ftrace_dump(void) { }
525#endif /* CONFIG_TRACING */
526
527/*
372 * Display an IP address in readable format. 528 * Display an IP address in readable format.
373 */ 529 */
374 530
diff --git a/include/linux/key.h b/include/linux/key.h
index 21d32a142c00..e544f466d69a 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -20,6 +20,7 @@
20#include <linux/rbtree.h> 20#include <linux/rbtree.h>
21#include <linux/rcupdate.h> 21#include <linux/rcupdate.h>
22#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/rwsem.h>
23#include <asm/atomic.h> 24#include <asm/atomic.h>
24 25
25#ifdef __KERNEL__ 26#ifdef __KERNEL__
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 92213a9194e1..d5fa565086d1 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -29,10 +29,15 @@
29#ifdef CONFIG_MODULES 29#ifdef CONFIG_MODULES
30/* modprobe exit status on success, -ve on error. Return value 30/* modprobe exit status on success, -ve on error. Return value
31 * usually useless though. */ 31 * usually useless though. */
32extern int request_module(const char * name, ...) __attribute__ ((format (printf, 1, 2))); 32extern int __request_module(bool wait, const char *name, ...) \
33#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) 33 __attribute__((format(printf, 2, 3)));
34#define request_module(mod...) __request_module(true, mod)
35#define request_module_nowait(mod...) __request_module(false, mod)
36#define try_then_request_module(x, mod...) \
37 ((x) ?: (__request_module(false, mod), (x)))
34#else 38#else
35static inline int request_module(const char * name, ...) { return -ENOSYS; } 39static inline int request_module(const char *name, ...) { return -ENOSYS; }
40static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; }
36#define try_then_request_module(x, mod...) (x) 41#define try_then_request_module(x, mod...) (x)
37#endif 42#endif
38 43
diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h
new file mode 100644
index 000000000000..42f854a1a199
--- /dev/null
+++ b/include/linux/leds-bd2802.h
@@ -0,0 +1,26 @@
1/*
2 * leds-bd2802.h - RGB LED Driver
3 *
4 * Copyright (C) 2009 Samsung Electronics
5 * Kim Kyuwon <q1.kim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf
12 *
13 */
14#ifndef _LEDS_BD2802_H_
15#define _LEDS_BD2802_H_
16
17struct bd2802_led_platform_data{
18 int reset_gpio;
19 u8 rgb_time;
20};
21
22#define RGB_TIME(slopedown, slopeup, waveform) \
23 ((slopedown) << 6 | (slopeup) << 4 | (waveform))
24
25#endif /* _LEDS_BD2802_H_ */
26
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 24489da701e3..376fe07732ea 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -30,6 +30,7 @@ enum led_brightness {
30struct led_classdev { 30struct led_classdev {
31 const char *name; 31 const char *name;
32 int brightness; 32 int brightness;
33 int max_brightness;
33 int flags; 34 int flags;
34 35
35 /* Lower 16 bits reflect status */ 36 /* Lower 16 bits reflect status */
@@ -140,7 +141,8 @@ struct gpio_led {
140 const char *name; 141 const char *name;
141 const char *default_trigger; 142 const char *default_trigger;
142 unsigned gpio; 143 unsigned gpio;
143 u8 active_low; 144 u8 active_low : 1;
145 u8 retain_state_suspended : 1;
144}; 146};
145 147
146struct gpio_led_platform_data { 148struct gpio_led_platform_data {
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h
new file mode 100644
index 000000000000..33a071167489
--- /dev/null
+++ b/include/linux/leds_pwm.h
@@ -0,0 +1,21 @@
1/*
2 * PWM LED driver data - see drivers/leds/leds-pwm.c
3 */
4#ifndef __LINUX_LEDS_PWM_H
5#define __LINUX_LEDS_PWM_H
6
7struct led_pwm {
8 const char *name;
9 const char *default_trigger;
10 unsigned pwm_id;
11 u8 active_low;
12 unsigned max_brightness;
13 unsigned pwm_period_ns;
14};
15
16struct led_pwm_platform_data {
17 int num_leds;
18 struct led_pwm *leds;
19};
20
21#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 76262d83656b..b450a2628855 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -379,7 +379,7 @@ enum {
379 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ 379 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
380 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands 380 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
381 not multiple of 16 bytes */ 381 not multiple of 16 bytes */
382 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ 382 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
383 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ 383 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
384 384
385 /* DMA mask for user DMA control: User visible values; DO NOT 385 /* DMA mask for user DMA control: User visible values; DO NOT
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 7dc5b6cb44cd..d39ed1cc5fbf 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -25,13 +25,13 @@ struct svc_rqst;
25#define NLM_MAXCOOKIELEN 32 25#define NLM_MAXCOOKIELEN 32
26#define NLM_MAXSTRLEN 1024 26#define NLM_MAXSTRLEN 1024
27 27
28#define nlm_granted __constant_htonl(NLM_LCK_GRANTED) 28#define nlm_granted cpu_to_be32(NLM_LCK_GRANTED)
29#define nlm_lck_denied __constant_htonl(NLM_LCK_DENIED) 29#define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED)
30#define nlm_lck_denied_nolocks __constant_htonl(NLM_LCK_DENIED_NOLOCKS) 30#define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS)
31#define nlm_lck_blocked __constant_htonl(NLM_LCK_BLOCKED) 31#define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED)
32#define nlm_lck_denied_grace_period __constant_htonl(NLM_LCK_DENIED_GRACE_PERIOD) 32#define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD)
33 33
34#define nlm_drop_reply __constant_htonl(30000) 34#define nlm_drop_reply cpu_to_be32(30000)
35 35
36/* Lock info passed via NLM */ 36/* Lock info passed via NLM */
37struct nlm_lock { 37struct nlm_lock {
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index 12bfe09de2b1..7353821341ed 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -15,11 +15,11 @@
15#include <linux/lockd/xdr.h> 15#include <linux/lockd/xdr.h>
16 16
17/* error codes new to NLMv4 */ 17/* error codes new to NLMv4 */
18#define nlm4_deadlock __constant_htonl(NLM_DEADLCK) 18#define nlm4_deadlock cpu_to_be32(NLM_DEADLCK)
19#define nlm4_rofs __constant_htonl(NLM_ROFS) 19#define nlm4_rofs cpu_to_be32(NLM_ROFS)
20#define nlm4_stale_fh __constant_htonl(NLM_STALE_FH) 20#define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH)
21#define nlm4_fbig __constant_htonl(NLM_FBIG) 21#define nlm4_fbig cpu_to_be32(NLM_FBIG)
22#define nlm4_failed __constant_htonl(NLM_FAILED) 22#define nlm4_failed cpu_to_be32(NLM_FAILED)
23 23
24 24
25 25
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 5a58ea3e91e9..da5a5a1f4cd2 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -364,6 +364,23 @@ do { \
364 364
365#endif /* CONFIG_LOCK_STAT */ 365#endif /* CONFIG_LOCK_STAT */
366 366
367#ifdef CONFIG_LOCKDEP
368
369/*
370 * On lockdep we dont want the hand-coded irq-enable of
371 * _raw_*_lock_flags() code, because lockdep assumes
372 * that interrupts are not re-enabled during lock-acquire:
373 */
374#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
375 LOCK_CONTENDED((_lock), (try), (lock))
376
377#else /* CONFIG_LOCKDEP */
378
379#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
380 lockfl((_lock), (flags))
381
382#endif /* CONFIG_LOCKDEP */
383
367#ifdef CONFIG_GENERIC_HARDIRQS 384#ifdef CONFIG_GENERIC_HARDIRQS
368extern void early_init_irq_lock_class(void); 385extern void early_init_irq_lock_class(void);
369#else 386#else
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 6ffd6db5bb0d..40725447f5e0 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -160,5 +160,6 @@ int loop_unregister_transfer(int number);
160#define LOOP_SET_STATUS64 0x4C04 160#define LOOP_SET_STATUS64 0x4C04
161#define LOOP_GET_STATUS64 0x4C05 161#define LOOP_GET_STATUS64 0x4C05
162#define LOOP_CHANGE_FD 0x4C06 162#define LOOP_CHANGE_FD 0x4C06
163#define LOOP_SET_CAPACITY 0x4C07
163 164
164#endif 165#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 326f45c86530..18146c980b68 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -88,9 +88,6 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
88/* 88/*
89 * For memory reclaim. 89 * For memory reclaim.
90 */ 90 */
91extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
92extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
93
94extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem); 91extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
95extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, 92extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
96 int priority); 93 int priority);
@@ -104,6 +101,8 @@ struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
104 struct zone *zone); 101 struct zone *zone);
105struct zone_reclaim_stat* 102struct zone_reclaim_stat*
106mem_cgroup_get_reclaim_stat_from_page(struct page *page); 103mem_cgroup_get_reclaim_stat_from_page(struct page *page);
104extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
105 struct task_struct *p);
107 106
108#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 107#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
109extern int do_swap_account; 108extern int do_swap_account;
@@ -209,16 +208,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
209{ 208{
210} 209}
211 210
212static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
213{
214 return 0;
215}
216
217static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
218{
219 return 0;
220}
221
222static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) 211static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
223{ 212{
224 return 0; 213 return 0;
@@ -270,6 +259,11 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
270 return NULL; 259 return NULL;
271} 260}
272 261
262static inline void
263mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
264{
265}
266
273#endif /* CONFIG_CGROUP_MEM_CONT */ 267#endif /* CONFIG_CGROUP_MEM_CONT */
274 268
275#endif /* _LINUX_MEMCONTROL_H */ 269#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 3fdc10806d31..37fa19b34ef5 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -99,4 +99,21 @@ enum mem_add_context { BOOT, HOTPLUG };
99#define hotplug_memory_notifier(fn, pri) do { } while (0) 99#define hotplug_memory_notifier(fn, pri) do { } while (0)
100#endif 100#endif
101 101
102/*
103 * 'struct memory_accessor' is a generic interface to provide
104 * in-kernel access to persistent memory such as i2c or SPI EEPROMs
105 */
106struct memory_accessor {
107 ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset,
108 size_t count);
109 ssize_t (*write)(struct memory_accessor *, const char *buf,
110 off_t offset, size_t count);
111};
112
113/*
114 * Kernel text modification mutex, used for code patching. Users of this lock
115 * can sleep.
116 */
117extern struct mutex text_mutex;
118
102#endif /* _LINUX_MEMORY_H_ */ 119#endif /* _LINUX_MEMORY_H_ */
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
new file mode 100644
index 000000000000..be469a357cbb
--- /dev/null
+++ b/include/linux/mfd/ds1wm.h
@@ -0,0 +1,6 @@
1/* MFD cell driver data for the DS1WM driver */
2
3struct ds1wm_driver_data {
4 int active_high;
5 int clock_rate;
6};
diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h
index b4294f12c4f8..3d3ed67bd969 100644
--- a/include/linux/mfd/htc-pasic3.h
+++ b/include/linux/mfd/htc-pasic3.h
@@ -48,7 +48,6 @@ struct pasic3_leds_machinfo {
48 48
49struct pasic3_platform_data { 49struct pasic3_platform_data {
50 struct pasic3_leds_machinfo *led_pdata; 50 struct pasic3_leds_machinfo *led_pdata;
51 unsigned int bus_shift;
52 unsigned int clock_rate; 51 unsigned int clock_rate;
53}; 52};
54 53
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 980669d50dca..42cca672f340 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -640,9 +640,11 @@ struct wm8350 {
640 * 640 *
641 * @init: Function called during driver initialisation. Should be 641 * @init: Function called during driver initialisation. Should be
642 * used by the platform to configure GPIO functions and similar. 642 * used by the platform to configure GPIO functions and similar.
643 * @irq_high: Set if WM8350 IRQ is active high.
643 */ 644 */
644struct wm8350_platform_data { 645struct wm8350_platform_data {
645 int (*init)(struct wm8350 *wm8350); 646 int (*init)(struct wm8350 *wm8350);
647 int irq_high;
646}; 648};
647 649
648 650
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b1ea37fc7a24..bff1f0d475c7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -135,6 +135,7 @@ extern pgprot_t protection_map[16];
135 135
136#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ 136#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
137#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ 137#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
138#define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */
138 139
139/* 140/*
140 * This interface is used by x86 PAT code to identify a pfn mapping that is 141 * This interface is used by x86 PAT code to identify a pfn mapping that is
@@ -187,7 +188,7 @@ struct vm_operations_struct {
187 188
188 /* notification that a previously read-only page is about to become 189 /* notification that a previously read-only page is about to become
189 * writable, if an error is returned it will cause a SIGBUS */ 190 * writable, if an error is returned it will cause a SIGBUS */
190 int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); 191 int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
191 192
192 /* called by access_process_vm when get_user_pages() fails, typically 193 /* called by access_process_vm when get_user_pages() fails, typically
193 * for use by special VMAs that can switch between memory and hardware 194 * for use by special VMAs that can switch between memory and hardware
@@ -834,6 +835,7 @@ int __set_page_dirty_nobuffers(struct page *page);
834int __set_page_dirty_no_writeback(struct page *page); 835int __set_page_dirty_no_writeback(struct page *page);
835int redirty_page_for_writepage(struct writeback_control *wbc, 836int redirty_page_for_writepage(struct writeback_control *wbc,
836 struct page *page); 837 struct page *page);
838void account_page_dirtied(struct page *page, struct address_space *mapping);
837int set_page_dirty(struct page *page); 839int set_page_dirty(struct page *page);
838int set_page_dirty_lock(struct page *page); 840int set_page_dirty_lock(struct page *page);
839int clear_page_dirty_for_io(struct page *page); 841int clear_page_dirty_for_io(struct page *page);
@@ -1077,7 +1079,7 @@ static inline void setup_per_cpu_pageset(void) {}
1077#endif 1079#endif
1078 1080
1079/* nommu.c */ 1081/* nommu.c */
1080extern atomic_t mmap_pages_allocated; 1082extern atomic_long_t mmap_pages_allocated;
1081 1083
1082/* prio_tree.c */ 1084/* prio_tree.c */
1083void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); 1085void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d84feb7bdbf0..0e80e26ecf21 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -11,6 +11,7 @@
11#include <linux/rwsem.h> 11#include <linux/rwsem.h>
12#include <linux/completion.h> 12#include <linux/completion.h>
13#include <linux/cpumask.h> 13#include <linux/cpumask.h>
14#include <linux/page-debug-flags.h>
14#include <asm/page.h> 15#include <asm/page.h>
15#include <asm/mmu.h> 16#include <asm/mmu.h>
16 17
@@ -94,6 +95,9 @@ struct page {
94 void *virtual; /* Kernel virtual address (NULL if 95 void *virtual; /* Kernel virtual address (NULL if
95 not kmapped, ie. highmem) */ 96 not kmapped, ie. highmem) */
96#endif /* WANT_PAGE_VIRTUAL */ 97#endif /* WANT_PAGE_VIRTUAL */
98#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
99 unsigned long debug_flags; /* Use atomic bitops on this */
100#endif
97}; 101};
98 102
99/* 103/*
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4e457256bd33..3e7615e9087e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -192,5 +192,10 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
192 wake_up_process(host->sdio_irq_thread); 192 wake_up_process(host->sdio_irq_thread);
193} 193}
194 194
195struct regulator;
196
197int mmc_regulator_get_ocrmask(struct regulator *supply);
198int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit);
199
195#endif 200#endif
196 201
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 1aca6cebbb78..186ec6ab334d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -764,12 +764,6 @@ extern int numa_zonelist_order_handler(struct ctl_table *, int,
764extern char numa_zonelist_order[]; 764extern char numa_zonelist_order[];
765#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ 765#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
766 766
767#include <linux/topology.h>
768/* Returns the number of the current Node. */
769#ifndef numa_node_id
770#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
771#endif
772
773#ifndef CONFIG_NEED_MULTIPLE_NODES 767#ifndef CONFIG_NEED_MULTIPLE_NODES
774 768
775extern struct pglist_data contig_page_data; 769extern struct pglist_data contig_page_data;
@@ -806,6 +800,14 @@ extern struct zone *next_zone(struct zone *zone);
806 zone; \ 800 zone; \
807 zone = next_zone(zone)) 801 zone = next_zone(zone))
808 802
803#define for_each_populated_zone(zone) \
804 for (zone = (first_online_pgdat())->node_zones; \
805 zone; \
806 zone = next_zone(zone)) \
807 if (!populated_zone(zone)) \
808 ; /* do nothing */ \
809 else
810
809static inline struct zone *zonelist_zone(struct zoneref *zoneref) 811static inline struct zone *zonelist_zone(struct zoneref *zoneref)
810{ 812{
811 return zoneref->zone; 813 return zoneref->zone;
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 830bbcd449d6..3a059298cc19 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -22,6 +22,8 @@ struct proc_mounts {
22 int event; 22 int event;
23}; 23};
24 24
25struct fs_struct;
26
25extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, 27extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
26 struct fs_struct *); 28 struct fs_struct *);
27extern void __put_mnt_ns(struct mnt_namespace *ns); 29extern void __put_mnt_ns(struct mnt_namespace *ns);
diff --git a/include/linux/module.h b/include/linux/module.h
index 145a75528cc1..627ac082e2a6 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -248,6 +248,10 @@ struct module
248 const unsigned long *crcs; 248 const unsigned long *crcs;
249 unsigned int num_syms; 249 unsigned int num_syms;
250 250
251 /* Kernel parameters. */
252 struct kernel_param *kp;
253 unsigned int num_kp;
254
251 /* GPL-only exported symbols. */ 255 /* GPL-only exported symbols. */
252 unsigned int num_gpl_syms; 256 unsigned int num_gpl_syms;
253 const struct kernel_symbol *gpl_syms; 257 const struct kernel_symbol *gpl_syms;
@@ -329,6 +333,11 @@ struct module
329 unsigned int num_tracepoints; 333 unsigned int num_tracepoints;
330#endif 334#endif
331 335
336#ifdef CONFIG_TRACING
337 const char **trace_bprintk_fmt_start;
338 unsigned int num_trace_bprintk_fmt;
339#endif
340
332#ifdef CONFIG_MODULE_UNLOAD 341#ifdef CONFIG_MODULE_UNLOAD
333 /* What modules depend on me? */ 342 /* What modules depend on me? */
334 struct list_head modules_which_use_me; 343 struct list_head modules_which_use_me;
@@ -350,6 +359,8 @@ struct module
350#define MODULE_ARCH_INIT {} 359#define MODULE_ARCH_INIT {}
351#endif 360#endif
352 361
362extern struct mutex module_mutex;
363
353/* FIXME: It'd be nice to isolate modules during init, too, so they 364/* FIXME: It'd be nice to isolate modules during init, too, so they
354 aren't used before they (may) fail. But presently too much code 365 aren't used before they (may) fail. But presently too much code
355 (IDE & SCSI) require entry into the module during init.*/ 366 (IDE & SCSI) require entry into the module during init.*/
@@ -358,10 +369,10 @@ static inline int module_is_live(struct module *mod)
358 return mod->state != MODULE_STATE_GOING; 369 return mod->state != MODULE_STATE_GOING;
359} 370}
360 371
361/* Is this address in a module? (second is with no locks, for oops) */
362struct module *module_text_address(unsigned long addr);
363struct module *__module_text_address(unsigned long addr); 372struct module *__module_text_address(unsigned long addr);
364int is_module_address(unsigned long addr); 373struct module *__module_address(unsigned long addr);
374bool is_module_address(unsigned long addr);
375bool is_module_text_address(unsigned long addr);
365 376
366static inline int within_module_core(unsigned long addr, struct module *mod) 377static inline int within_module_core(unsigned long addr, struct module *mod)
367{ 378{
@@ -375,6 +386,31 @@ static inline int within_module_init(unsigned long addr, struct module *mod)
375 addr < (unsigned long)mod->module_init + mod->init_size; 386 addr < (unsigned long)mod->module_init + mod->init_size;
376} 387}
377 388
389/* Search for module by name: must hold module_mutex. */
390struct module *find_module(const char *name);
391
392struct symsearch {
393 const struct kernel_symbol *start, *stop;
394 const unsigned long *crcs;
395 enum {
396 NOT_GPL_ONLY,
397 GPL_ONLY,
398 WILL_BE_GPL_ONLY,
399 } licence;
400 bool unused;
401};
402
403/* Search for an exported symbol by name. */
404const struct kernel_symbol *find_symbol(const char *name,
405 struct module **owner,
406 const unsigned long **crc,
407 bool gplok,
408 bool warn);
409
410/* Walk the exported symbol table */
411bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
412 unsigned int symnum, void *data), void *data);
413
378/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if 414/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
379 symnum out of range. */ 415 symnum out of range. */
380int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 416int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
@@ -383,6 +419,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
383/* Look for this name: can be of form module:name. */ 419/* Look for this name: can be of form module:name. */
384unsigned long module_kallsyms_lookup_name(const char *name); 420unsigned long module_kallsyms_lookup_name(const char *name);
385 421
422int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
423 struct module *, unsigned long),
424 void *data);
425
386extern void __module_put_and_exit(struct module *mod, long code) 426extern void __module_put_and_exit(struct module *mod, long code)
387 __attribute__((noreturn)); 427 __attribute__((noreturn));
388#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); 428#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code);
@@ -444,6 +484,7 @@ static inline void __module_get(struct module *module)
444#define symbol_put_addr(p) do { } while(0) 484#define symbol_put_addr(p) do { } while(0)
445 485
446#endif /* CONFIG_MODULE_UNLOAD */ 486#endif /* CONFIG_MODULE_UNLOAD */
487int use_module(struct module *a, struct module *b);
447 488
448/* This is a #define so the string doesn't get put in every .o file */ 489/* This is a #define so the string doesn't get put in every .o file */
449#define module_name(mod) \ 490#define module_name(mod) \
@@ -490,21 +531,24 @@ search_module_extables(unsigned long addr)
490 return NULL; 531 return NULL;
491} 532}
492 533
493/* Is this address in a module? */ 534static inline struct module *__module_address(unsigned long addr)
494static inline struct module *module_text_address(unsigned long addr)
495{ 535{
496 return NULL; 536 return NULL;
497} 537}
498 538
499/* Is this address in a module? (don't take a lock, we're oopsing) */
500static inline struct module *__module_text_address(unsigned long addr) 539static inline struct module *__module_text_address(unsigned long addr)
501{ 540{
502 return NULL; 541 return NULL;
503} 542}
504 543
505static inline int is_module_address(unsigned long addr) 544static inline bool is_module_address(unsigned long addr)
506{ 545{
507 return 0; 546 return false;
547}
548
549static inline bool is_module_text_address(unsigned long addr)
550{
551 return false;
508} 552}
509 553
510/* Get/put a kernel symbol (calls should be symmetric) */ 554/* Get/put a kernel symbol (calls should be symmetric) */
@@ -559,6 +603,14 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name)
559 return 0; 603 return 0;
560} 604}
561 605
606static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
607 struct module *,
608 unsigned long),
609 void *data)
610{
611 return 0;
612}
613
562static inline int register_module_notifier(struct notifier_block * nb) 614static inline int register_module_notifier(struct notifier_block * nb)
563{ 615{
564 /* no events will happen anyway, so this can always succeed */ 616 /* no events will happen anyway, so this can always succeed */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index e4af3399ef48..a4f0b931846c 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -138,6 +138,16 @@ extern int parse_args(const char *name,
138 unsigned num, 138 unsigned num,
139 int (*unknown)(char *param, char *val)); 139 int (*unknown)(char *param, char *val));
140 140
141/* Called by module remove. */
142#ifdef CONFIG_SYSFS
143extern void destroy_params(const struct kernel_param *params, unsigned num);
144#else
145static inline void destroy_params(const struct kernel_param *params,
146 unsigned num)
147{
148}
149#endif /* !CONFIG_SYSFS */
150
141/* All the helper functions */ 151/* All the helper functions */
142/* The macros to do compile-time type checking stolen from Jakub 152/* The macros to do compile-time type checking stolen from Jakub
143 Jelinek, who IIRC came up with this idea for the 2.4 module init code. */ 153 Jelinek, who IIRC came up with this idea for the 2.4 module init code. */
diff --git a/include/linux/mpage.h b/include/linux/mpage.h
index 5c42821da2d1..068a0c9946af 100644
--- a/include/linux/mpage.h
+++ b/include/linux/mpage.h
@@ -11,21 +11,11 @@
11 */ 11 */
12#ifdef CONFIG_BLOCK 12#ifdef CONFIG_BLOCK
13 13
14struct mpage_data {
15 struct bio *bio;
16 sector_t last_block_in_bio;
17 get_block_t *get_block;
18 unsigned use_writepage;
19};
20
21struct writeback_control; 14struct writeback_control;
22 15
23struct bio *mpage_bio_submit(int rw, struct bio *bio);
24int mpage_readpages(struct address_space *mapping, struct list_head *pages, 16int mpage_readpages(struct address_space *mapping, struct list_head *pages,
25 unsigned nr_pages, get_block_t get_block); 17 unsigned nr_pages, get_block_t get_block);
26int mpage_readpage(struct page *page, get_block_t get_block); 18int mpage_readpage(struct page *page, get_block_t get_block);
27int __mpage_writepage(struct page *page, struct writeback_control *wbc,
28 void *data);
29int mpage_writepages(struct address_space *mapping, 19int mpage_writepages(struct address_space *mapping,
30 struct writeback_control *wbc, get_block_t get_block); 20 struct writeback_control *wbc, get_block_t get_block);
31int mpage_writepage(struct page *page, get_block_t *get_block, 21int mpage_writepage(struct page *page, get_block_t *get_block,
diff --git a/include/linux/msi.h b/include/linux/msi.h
index d2b8a1e8ca11..6991ab5b24d1 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -20,20 +20,23 @@ extern void write_msi_msg(unsigned int irq, struct msi_msg *msg);
20 20
21struct msi_desc { 21struct msi_desc {
22 struct { 22 struct {
23 __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */ 23 __u8 is_msix : 1;
24 __u8 multiple: 3; /* log2 number of messages */
24 __u8 maskbit : 1; /* mask-pending bit supported ? */ 25 __u8 maskbit : 1; /* mask-pending bit supported ? */
25 __u8 masked : 1;
26 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ 26 __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */
27 __u8 pos; /* Location of the msi capability */ 27 __u8 pos; /* Location of the msi capability */
28 __u32 maskbits_mask; /* mask bits mask */
29 __u16 entry_nr; /* specific enabled entry */ 28 __u16 entry_nr; /* specific enabled entry */
30 unsigned default_irq; /* default pre-assigned irq */ 29 unsigned default_irq; /* default pre-assigned irq */
31 }msi_attrib; 30 } msi_attrib;
32 31
32 u32 masked; /* mask bits */
33 unsigned int irq; 33 unsigned int irq;
34 struct list_head list; 34 struct list_head list;
35 35
36 void __iomem *mask_base; 36 union {
37 void __iomem *mask_base;
38 u8 mask_pos;
39 };
37 struct pci_dev *dev; 40 struct pci_dev *dev;
38 41
39 /* Last set MSI message */ 42 /* Last set MSI message */
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 54af92c1c70b..214d499718f7 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -109,7 +109,6 @@
109 NFSERR_FILE_OPEN = 10046, /* v4 */ 109 NFSERR_FILE_OPEN = 10046, /* v4 */
110 NFSERR_ADMIN_REVOKED = 10047, /* v4 */ 110 NFSERR_ADMIN_REVOKED = 10047, /* v4 */
111 NFSERR_CB_PATH_DOWN = 10048, /* v4 */ 111 NFSERR_CB_PATH_DOWN = 10048, /* v4 */
112 NFSERR_REPLAY_ME = 10049 /* v4 */
113}; 112};
114 113
115/* NFSv2 file types - beware, these are not the same in NFSv3 */ 114/* NFSv2 file types - beware, these are not the same in NFSv3 */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index b912311a56b1..e3f0cbcbd0db 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -21,6 +21,7 @@
21#define NFS4_FHSIZE 128 21#define NFS4_FHSIZE 128
22#define NFS4_MAXPATHLEN PATH_MAX 22#define NFS4_MAXPATHLEN PATH_MAX
23#define NFS4_MAXNAMLEN NAME_MAX 23#define NFS4_MAXNAMLEN NAME_MAX
24#define NFS4_MAX_SESSIONID_LEN 16
24 25
25#define NFS4_ACCESS_READ 0x0001 26#define NFS4_ACCESS_READ 0x0001
26#define NFS4_ACCESS_LOOKUP 0x0002 27#define NFS4_ACCESS_LOOKUP 0x0002
@@ -38,6 +39,7 @@
38#define NFS4_OPEN_RESULT_CONFIRM 0x0002 39#define NFS4_OPEN_RESULT_CONFIRM 0x0002
39#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 40#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
40 41
42#define NFS4_SHARE_ACCESS_MASK 0x000F
41#define NFS4_SHARE_ACCESS_READ 0x0001 43#define NFS4_SHARE_ACCESS_READ 0x0001
42#define NFS4_SHARE_ACCESS_WRITE 0x0002 44#define NFS4_SHARE_ACCESS_WRITE 0x0002
43#define NFS4_SHARE_ACCESS_BOTH 0x0003 45#define NFS4_SHARE_ACCESS_BOTH 0x0003
@@ -45,6 +47,19 @@
45#define NFS4_SHARE_DENY_WRITE 0x0002 47#define NFS4_SHARE_DENY_WRITE 0x0002
46#define NFS4_SHARE_DENY_BOTH 0x0003 48#define NFS4_SHARE_DENY_BOTH 0x0003
47 49
50/* nfs41 */
51#define NFS4_SHARE_WANT_MASK 0xFF00
52#define NFS4_SHARE_WANT_NO_PREFERENCE 0x0000
53#define NFS4_SHARE_WANT_READ_DELEG 0x0100
54#define NFS4_SHARE_WANT_WRITE_DELEG 0x0200
55#define NFS4_SHARE_WANT_ANY_DELEG 0x0300
56#define NFS4_SHARE_WANT_NO_DELEG 0x0400
57#define NFS4_SHARE_WANT_CANCEL 0x0500
58
59#define NFS4_SHARE_WHEN_MASK 0xF0000
60#define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000
61#define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000
62
48#define NFS4_SET_TO_SERVER_TIME 0 63#define NFS4_SET_TO_SERVER_TIME 0
49#define NFS4_SET_TO_CLIENT_TIME 1 64#define NFS4_SET_TO_CLIENT_TIME 1
50 65
@@ -88,6 +103,31 @@
88#define NFS4_ACE_GENERIC_EXECUTE 0x001200A0 103#define NFS4_ACE_GENERIC_EXECUTE 0x001200A0
89#define NFS4_ACE_MASK_ALL 0x001F01FF 104#define NFS4_ACE_MASK_ALL 0x001F01FF
90 105
106#define EXCHGID4_FLAG_SUPP_MOVED_REFER 0x00000001
107#define EXCHGID4_FLAG_SUPP_MOVED_MIGR 0x00000002
108#define EXCHGID4_FLAG_USE_NON_PNFS 0x00010000
109#define EXCHGID4_FLAG_USE_PNFS_MDS 0x00020000
110#define EXCHGID4_FLAG_USE_PNFS_DS 0x00040000
111#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000
112#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000
113/*
114 * Since the validity of these bits depends on whether
115 * they're set in the argument or response, have separate
116 * invalid flag masks for arg (_A) and resp (_R).
117 */
118#define EXCHGID4_FLAG_MASK_A 0x40070003
119#define EXCHGID4_FLAG_MASK_R 0x80070003
120
121#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001
122#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002
123#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED 0x00000004
124#define SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED 0x00000008
125#define SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED 0x00000010
126#define SEQ4_STATUS_ADMIN_STATE_REVOKED 0x00000020
127#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040
128#define SEQ4_STATUS_LEASE_MOVED 0x00000080
129#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100
130
91#define NFS4_MAX_UINT64 (~(u64)0) 131#define NFS4_MAX_UINT64 (~(u64)0)
92 132
93enum nfs4_acl_whotype { 133enum nfs4_acl_whotype {
@@ -154,6 +194,28 @@ enum nfs_opnum4 {
154 OP_VERIFY = 37, 194 OP_VERIFY = 37,
155 OP_WRITE = 38, 195 OP_WRITE = 38,
156 OP_RELEASE_LOCKOWNER = 39, 196 OP_RELEASE_LOCKOWNER = 39,
197
198 /* nfs41 */
199 OP_BACKCHANNEL_CTL = 40,
200 OP_BIND_CONN_TO_SESSION = 41,
201 OP_EXCHANGE_ID = 42,
202 OP_CREATE_SESSION = 43,
203 OP_DESTROY_SESSION = 44,
204 OP_FREE_STATEID = 45,
205 OP_GET_DIR_DELEGATION = 46,
206 OP_GETDEVICEINFO = 47,
207 OP_GETDEVICELIST = 48,
208 OP_LAYOUTCOMMIT = 49,
209 OP_LAYOUTGET = 50,
210 OP_LAYOUTRETURN = 51,
211 OP_SECINFO_NO_NAME = 52,
212 OP_SEQUENCE = 53,
213 OP_SET_SSV = 54,
214 OP_TEST_STATEID = 55,
215 OP_WANT_DELEGATION = 56,
216 OP_DESTROY_CLIENTID = 57,
217 OP_RECLAIM_COMPLETE = 58,
218
157 OP_ILLEGAL = 10044, 219 OP_ILLEGAL = 10044,
158}; 220};
159 221
@@ -230,7 +292,48 @@ enum nfsstat4 {
230 NFS4ERR_DEADLOCK = 10045, 292 NFS4ERR_DEADLOCK = 10045,
231 NFS4ERR_FILE_OPEN = 10046, 293 NFS4ERR_FILE_OPEN = 10046,
232 NFS4ERR_ADMIN_REVOKED = 10047, 294 NFS4ERR_ADMIN_REVOKED = 10047,
233 NFS4ERR_CB_PATH_DOWN = 10048 295 NFS4ERR_CB_PATH_DOWN = 10048,
296
297 /* nfs41 */
298 NFS4ERR_BADIOMODE = 10049,
299 NFS4ERR_BADLAYOUT = 10050,
300 NFS4ERR_BAD_SESSION_DIGEST = 10051,
301 NFS4ERR_BADSESSION = 10052,
302 NFS4ERR_BADSLOT = 10053,
303 NFS4ERR_COMPLETE_ALREADY = 10054,
304 NFS4ERR_CONN_NOT_BOUND_TO_SESSION = 10055,
305 NFS4ERR_DELEG_ALREADY_WANTED = 10056,
306 NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */
307 NFS4ERR_LAYOUTTRYLATER = 10058,
308 NFS4ERR_LAYOUTUNAVAILABLE = 10059,
309 NFS4ERR_NOMATCHING_LAYOUT = 10060,
310 NFS4ERR_RECALLCONFLICT = 10061,
311 NFS4ERR_UNKNOWN_LAYOUTTYPE = 10062,
312 NFS4ERR_SEQ_MISORDERED = 10063, /* unexpected seq.id in req */
313 NFS4ERR_SEQUENCE_POS = 10064, /* [CB_]SEQ. op not 1st op */
314 NFS4ERR_REQ_TOO_BIG = 10065, /* request too big */
315 NFS4ERR_REP_TOO_BIG = 10066, /* reply too big */
316 NFS4ERR_REP_TOO_BIG_TO_CACHE = 10067, /* rep. not all cached */
317 NFS4ERR_RETRY_UNCACHED_REP = 10068, /* retry & rep. uncached */
318 NFS4ERR_UNSAFE_COMPOUND = 10069, /* retry/recovery too hard */
319 NFS4ERR_TOO_MANY_OPS = 10070, /* too many ops in [CB_]COMP */
320 NFS4ERR_OP_NOT_IN_SESSION = 10071, /* op needs [CB_]SEQ. op */
321 NFS4ERR_HASH_ALG_UNSUPP = 10072, /* hash alg. not supp. */
322 /* Error 10073 is unused. */
323 NFS4ERR_CLIENTID_BUSY = 10074, /* clientid has state */
324 NFS4ERR_PNFS_IO_HOLE = 10075, /* IO to _SPARSE file hole */
325 NFS4ERR_SEQ_FALSE_RETRY = 10076, /* retry not origional */
326 NFS4ERR_BAD_HIGH_SLOT = 10077, /* sequence arg bad */
327 NFS4ERR_DEADSESSION = 10078, /* persistent session dead */
328 NFS4ERR_ENCR_ALG_UNSUPP = 10079, /* SSV alg mismatch */
329 NFS4ERR_PNFS_NO_LAYOUT = 10080, /* direct I/O with no layout */
330 NFS4ERR_NOT_ONLY_OP = 10081, /* bad compound */
331 NFS4ERR_WRONG_CRED = 10082, /* permissions:state change */
332 NFS4ERR_WRONG_TYPE = 10083, /* current operation mismatch */
333 NFS4ERR_DIRDELEG_UNAVAIL = 10084, /* no directory delegation */
334 NFS4ERR_REJECT_DELEG = 10085, /* on callback */
335 NFS4ERR_RETURNCONFLICT = 10086, /* outstanding layoutreturn */
336 NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */
234}; 337};
235 338
236/* 339/*
@@ -265,7 +368,13 @@ enum opentype4 {
265enum createmode4 { 368enum createmode4 {
266 NFS4_CREATE_UNCHECKED = 0, 369 NFS4_CREATE_UNCHECKED = 0,
267 NFS4_CREATE_GUARDED = 1, 370 NFS4_CREATE_GUARDED = 1,
268 NFS4_CREATE_EXCLUSIVE = 2 371 NFS4_CREATE_EXCLUSIVE = 2,
372 /*
373 * New to NFSv4.1. If session is persistent,
374 * GUARDED4 MUST be used. Otherwise, use
375 * EXCLUSIVE4_1 instead of EXCLUSIVE4.
376 */
377 NFS4_CREATE_EXCLUSIVE4_1 = 3
269}; 378};
270 379
271enum limit_by4 { 380enum limit_by4 {
@@ -301,6 +410,8 @@ enum lock_type4 {
301#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9) 410#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9)
302#define FATTR4_WORD0_LEASE_TIME (1UL << 10) 411#define FATTR4_WORD0_LEASE_TIME (1UL << 10)
303#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11) 412#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11)
413/* Mandatory in NFSv4.1 */
414#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11)
304 415
305/* Recommended Attributes */ 416/* Recommended Attributes */
306#define FATTR4_WORD0_ACL (1UL << 12) 417#define FATTR4_WORD0_ACL (1UL << 12)
@@ -391,6 +502,29 @@ enum {
391 NFSPROC4_CLNT_GETACL, 502 NFSPROC4_CLNT_GETACL,
392 NFSPROC4_CLNT_SETACL, 503 NFSPROC4_CLNT_SETACL,
393 NFSPROC4_CLNT_FS_LOCATIONS, 504 NFSPROC4_CLNT_FS_LOCATIONS,
505
506 /* nfs41 */
507 NFSPROC4_CLNT_EXCHANGE_ID,
508 NFSPROC4_CLNT_CREATE_SESSION,
509 NFSPROC4_CLNT_DESTROY_SESSION,
510 NFSPROC4_CLNT_SEQUENCE,
511 NFSPROC4_CLNT_GET_LEASE_TIME,
512};
513
514/* nfs41 types */
515struct nfs4_sessionid {
516 unsigned char data[NFS4_MAX_SESSIONID_LEN];
517};
518
519/* Create Session Flags */
520#define SESSION4_PERSIST 0x001
521#define SESSION4_BACK_CHAN 0x002
522#define SESSION4_RDMA 0x004
523
524enum state_protect_how4 {
525 SP4_NONE = 0,
526 SP4_MACH_CRED = 1,
527 SP4_SSV = 2
394}; 528};
395 529
396#endif 530#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 8cc8807f77d6..fdffb413b192 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -166,8 +166,7 @@ struct nfs_inode {
166 */ 166 */
167 struct radix_tree_root nfs_page_tree; 167 struct radix_tree_root nfs_page_tree;
168 168
169 unsigned long ncommit, 169 unsigned long npages;
170 npages;
171 170
172 /* Open contexts for shared mmap writes */ 171 /* Open contexts for shared mmap writes */
173 struct list_head open_files; 172 struct list_head open_files;
@@ -186,6 +185,9 @@ struct nfs_inode {
186 fmode_t delegation_state; 185 fmode_t delegation_state;
187 struct rw_semaphore rwsem; 186 struct rw_semaphore rwsem;
188#endif /* CONFIG_NFS_V4*/ 187#endif /* CONFIG_NFS_V4*/
188#ifdef CONFIG_NFS_FSCACHE
189 struct fscache_cookie *fscache;
190#endif
189 struct inode vfs_inode; 191 struct inode vfs_inode;
190}; 192};
191 193
@@ -207,6 +209,9 @@ struct nfs_inode {
207#define NFS_INO_STALE (1) /* possible stale inode */ 209#define NFS_INO_STALE (1) /* possible stale inode */
208#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ 210#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
209#define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */ 211#define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */
212#define NFS_INO_FLUSHING (4) /* inode is flushing out data */
213#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
214#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
210 215
211static inline struct nfs_inode *NFS_I(const struct inode *inode) 216static inline struct nfs_inode *NFS_I(const struct inode *inode)
212{ 217{
@@ -260,6 +265,11 @@ static inline int NFS_STALE(const struct inode *inode)
260 return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); 265 return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
261} 266}
262 267
268static inline int NFS_FSCACHE(const struct inode *inode)
269{
270 return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
271}
272
263static inline __u64 NFS_FILEID(const struct inode *inode) 273static inline __u64 NFS_FILEID(const struct inode *inode)
264{ 274{
265 return NFS_I(inode)->fileid; 275 return NFS_I(inode)->fileid;
@@ -506,6 +516,8 @@ extern int nfs_readpages(struct file *, struct address_space *,
506 struct list_head *, unsigned); 516 struct list_head *, unsigned);
507extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); 517extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
508extern void nfs_readdata_release(void *data); 518extern void nfs_readdata_release(void *data);
519extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
520 struct page *);
509 521
510/* 522/*
511 * Allocate nfs_read_data structures 523 * Allocate nfs_read_data structures
@@ -583,6 +595,7 @@ extern void * nfs_root_data(void);
583#define NFSDBG_CALLBACK 0x0100 595#define NFSDBG_CALLBACK 0x0100
584#define NFSDBG_CLIENT 0x0200 596#define NFSDBG_CLIENT 0x0200
585#define NFSDBG_MOUNT 0x0400 597#define NFSDBG_MOUNT 0x0400
598#define NFSDBG_FSCACHE 0x0800
586#define NFSDBG_ALL 0xFFFF 599#define NFSDBG_ALL 0xFFFF
587 600
588#ifdef __KERNEL__ 601#ifdef __KERNEL__
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 9bb81aec91cf..6ad75948cbf7 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -64,6 +64,10 @@ struct nfs_client {
64 char cl_ipaddr[48]; 64 char cl_ipaddr[48];
65 unsigned char cl_id_uniquifier; 65 unsigned char cl_id_uniquifier;
66#endif 66#endif
67
68#ifdef CONFIG_NFS_FSCACHE
69 struct fscache_cookie *fscache; /* client index cache cookie */
70#endif
67}; 71};
68 72
69/* 73/*
@@ -96,16 +100,28 @@ struct nfs_server {
96 unsigned int acdirmin; 100 unsigned int acdirmin;
97 unsigned int acdirmax; 101 unsigned int acdirmax;
98 unsigned int namelen; 102 unsigned int namelen;
103 unsigned int options; /* extra options enabled by mount */
104#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */
99 105
100 struct nfs_fsid fsid; 106 struct nfs_fsid fsid;
101 __u64 maxfilesize; /* maximum file size */ 107 __u64 maxfilesize; /* maximum file size */
102 unsigned long mount_time; /* when this fs was mounted */ 108 unsigned long mount_time; /* when this fs was mounted */
103 dev_t s_dev; /* superblock dev numbers */ 109 dev_t s_dev; /* superblock dev numbers */
104 110
111#ifdef CONFIG_NFS_FSCACHE
112 struct nfs_fscache_key *fscache_key; /* unique key for superblock */
113 struct fscache_cookie *fscache; /* superblock cookie */
114#endif
115
105#ifdef CONFIG_NFS_V4 116#ifdef CONFIG_NFS_V4
106 u32 attr_bitmask[2];/* V4 bitmask representing the set 117 u32 attr_bitmask[2];/* V4 bitmask representing the set
107 of attributes supported on this 118 of attributes supported on this
108 filesystem */ 119 filesystem */
120 u32 cache_consistency_bitmask[2];
121 /* V4 bitmask representing the subset
122 of change attribute, size, ctime
123 and mtime attributes supported by
124 the server */
109 u32 acl_bitmask; /* V4 bitmask representing the ACEs 125 u32 acl_bitmask; /* V4 bitmask representing the ACEs
110 that are supported on this 126 that are supported on this
111 filesystem */ 127 filesystem */
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
index 1cb9a3fed2b3..68b10f5f8907 100644
--- a/include/linux/nfs_iostat.h
+++ b/include/linux/nfs_iostat.h
@@ -116,4 +116,16 @@ enum nfs_stat_eventcounters {
116 __NFSIOS_COUNTSMAX, 116 __NFSIOS_COUNTSMAX,
117}; 117};
118 118
119/*
120 * NFS local caching servicing counters
121 */
122enum nfs_stat_fscachecounters {
123 NFSIOS_FSCACHE_PAGES_READ_OK,
124 NFSIOS_FSCACHE_PAGES_READ_FAIL,
125 NFSIOS_FSCACHE_PAGES_WRITTEN_OK,
126 NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL,
127 NFSIOS_FSCACHE_PAGES_UNCACHED,
128 __NFSIOS_FSCACHEMAX,
129};
130
119#endif /* _LINUX_NFS_IOSTAT */ 131#endif /* _LINUX_NFS_IOSTAT */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 43a713fce11c..b89c34e40bc2 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -27,12 +27,8 @@ static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid
27} 27}
28 28
29struct nfs_fattr { 29struct nfs_fattr {
30 unsigned short valid; /* which fields are valid */ 30 unsigned int valid; /* which fields are valid */
31 __u64 pre_size; /* pre_op_attr.size */ 31 umode_t mode;
32 struct timespec pre_mtime; /* pre_op_attr.mtime */
33 struct timespec pre_ctime; /* pre_op_attr.ctime */
34 enum nfs_ftype type; /* always use NFSv2 types */
35 __u32 mode;
36 __u32 nlink; 32 __u32 nlink;
37 __u32 uid; 33 __u32 uid;
38 __u32 gid; 34 __u32 gid;
@@ -52,19 +48,55 @@ struct nfs_fattr {
52 struct timespec atime; 48 struct timespec atime;
53 struct timespec mtime; 49 struct timespec mtime;
54 struct timespec ctime; 50 struct timespec ctime;
55 __u32 bitmap[2]; /* NFSv4 returned attribute bitmap */
56 __u64 change_attr; /* NFSv4 change attribute */ 51 __u64 change_attr; /* NFSv4 change attribute */
57 __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ 52 __u64 pre_change_attr;/* pre-op NFSv4 change attribute */
53 __u64 pre_size; /* pre_op_attr.size */
54 struct timespec pre_mtime; /* pre_op_attr.mtime */
55 struct timespec pre_ctime; /* pre_op_attr.ctime */
58 unsigned long time_start; 56 unsigned long time_start;
59 unsigned long gencount; 57 unsigned long gencount;
60}; 58};
61 59
62#define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */ 60#define NFS_ATTR_FATTR_TYPE (1U << 0)
63#define NFS_ATTR_FATTR 0x0002 /* post-op attributes */ 61#define NFS_ATTR_FATTR_MODE (1U << 1)
64#define NFS_ATTR_FATTR_V3 0x0004 /* NFSv3 attributes */ 62#define NFS_ATTR_FATTR_NLINK (1U << 2)
65#define NFS_ATTR_FATTR_V4 0x0008 /* NFSv4 change attribute */ 63#define NFS_ATTR_FATTR_OWNER (1U << 3)
66#define NFS_ATTR_WCC_V4 0x0010 /* pre-op change attribute */ 64#define NFS_ATTR_FATTR_GROUP (1U << 4)
67#define NFS_ATTR_FATTR_V4_REFERRAL 0x0020 /* NFSv4 referral */ 65#define NFS_ATTR_FATTR_RDEV (1U << 5)
66#define NFS_ATTR_FATTR_SIZE (1U << 6)
67#define NFS_ATTR_FATTR_PRESIZE (1U << 7)
68#define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8)
69#define NFS_ATTR_FATTR_SPACE_USED (1U << 9)
70#define NFS_ATTR_FATTR_FSID (1U << 10)
71#define NFS_ATTR_FATTR_FILEID (1U << 11)
72#define NFS_ATTR_FATTR_ATIME (1U << 12)
73#define NFS_ATTR_FATTR_MTIME (1U << 13)
74#define NFS_ATTR_FATTR_CTIME (1U << 14)
75#define NFS_ATTR_FATTR_PREMTIME (1U << 15)
76#define NFS_ATTR_FATTR_PRECTIME (1U << 16)
77#define NFS_ATTR_FATTR_CHANGE (1U << 17)
78#define NFS_ATTR_FATTR_PRECHANGE (1U << 18)
79#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 19) /* NFSv4 referral */
80
81#define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \
82 | NFS_ATTR_FATTR_MODE \
83 | NFS_ATTR_FATTR_NLINK \
84 | NFS_ATTR_FATTR_OWNER \
85 | NFS_ATTR_FATTR_GROUP \
86 | NFS_ATTR_FATTR_RDEV \
87 | NFS_ATTR_FATTR_SIZE \
88 | NFS_ATTR_FATTR_FSID \
89 | NFS_ATTR_FATTR_FILEID \
90 | NFS_ATTR_FATTR_ATIME \
91 | NFS_ATTR_FATTR_MTIME \
92 | NFS_ATTR_FATTR_CTIME)
93#define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \
94 | NFS_ATTR_FATTR_BLOCKS_USED)
95#define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \
96 | NFS_ATTR_FATTR_SPACE_USED)
97#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \
98 | NFS_ATTR_FATTR_SPACE_USED \
99 | NFS_ATTR_FATTR_CHANGE)
68 100
69/* 101/*
70 * Info on the file system 102 * Info on the file system
@@ -836,6 +868,7 @@ struct nfs_rpc_ops {
836 int (*lock)(struct file *, int, struct file_lock *); 868 int (*lock)(struct file *, int, struct file_lock *);
837 int (*lock_check_bounds)(const struct file_lock *); 869 int (*lock_check_bounds)(const struct file_lock *);
838 void (*clear_acl_cache)(struct inode *); 870 void (*clear_acl_cache)(struct inode *);
871 void (*close_context)(struct nfs_open_context *ctx, int);
839}; 872};
840 873
841/* 874/*
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h
index 04b355c801d8..5bccaab81056 100644
--- a/include/linux/nfsd/cache.h
+++ b/include/linux/nfsd/cache.h
@@ -76,4 +76,12 @@ void nfsd_reply_cache_shutdown(void);
76int nfsd_cache_lookup(struct svc_rqst *, int); 76int nfsd_cache_lookup(struct svc_rqst *, int);
77void nfsd_cache_update(struct svc_rqst *, int, __be32 *); 77void nfsd_cache_update(struct svc_rqst *, int, __be32 *);
78 78
79#ifdef CONFIG_NFSD_V4
80void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp);
81#else /* CONFIG_NFSD_V4 */
82static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
83{
84}
85#endif /* CONFIG_NFSD_V4 */
86
79#endif /* NFSCACHE_H */ 87#endif /* NFSCACHE_H */
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index e19f45991b2e..2b49d676d0c9 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -23,7 +23,7 @@
23/* 23/*
24 * nfsd version 24 * nfsd version
25 */ 25 */
26#define NFSD_SUPPORTED_MINOR_VERSION 0 26#define NFSD_SUPPORTED_MINOR_VERSION 1
27 27
28/* 28/*
29 * Flags for nfsd_permission 29 * Flags for nfsd_permission
@@ -53,6 +53,7 @@ typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int);
53extern struct svc_program nfsd_program; 53extern struct svc_program nfsd_program;
54extern struct svc_version nfsd_version2, nfsd_version3, 54extern struct svc_version nfsd_version2, nfsd_version3,
55 nfsd_version4; 55 nfsd_version4;
56extern u32 nfsd_supported_minorversion;
56extern struct mutex nfsd_mutex; 57extern struct mutex nfsd_mutex;
57extern struct svc_serv *nfsd_serv; 58extern struct svc_serv *nfsd_serv;
58 59
@@ -105,7 +106,7 @@ void nfsd_close(struct file *);
105__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *, 106__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *,
106 loff_t, struct kvec *, int, unsigned long *); 107 loff_t, struct kvec *, int, unsigned long *);
107__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, 108__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *,
108 loff_t, struct kvec *,int, unsigned long, int *); 109 loff_t, struct kvec *,int, unsigned long *, int *);
109__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, 110__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
110 char *, int *); 111 char *, int *);
111__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, 112__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
@@ -149,6 +150,7 @@ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
149 150
150enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; 151enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
151int nfsd_vers(int vers, enum vers_op change); 152int nfsd_vers(int vers, enum vers_op change);
153int nfsd_minorversion(u32 minorversion, enum vers_op change);
152void nfsd_reset_versions(void); 154void nfsd_reset_versions(void);
153int nfsd_create_serv(void); 155int nfsd_create_serv(void);
154 156
@@ -186,78 +188,119 @@ void nfsd_lockd_shutdown(void);
186/* 188/*
187 * These macros provide pre-xdr'ed values for faster operation. 189 * These macros provide pre-xdr'ed values for faster operation.
188 */ 190 */
189#define nfs_ok __constant_htonl(NFS_OK) 191#define nfs_ok cpu_to_be32(NFS_OK)
190#define nfserr_perm __constant_htonl(NFSERR_PERM) 192#define nfserr_perm cpu_to_be32(NFSERR_PERM)
191#define nfserr_noent __constant_htonl(NFSERR_NOENT) 193#define nfserr_noent cpu_to_be32(NFSERR_NOENT)
192#define nfserr_io __constant_htonl(NFSERR_IO) 194#define nfserr_io cpu_to_be32(NFSERR_IO)
193#define nfserr_nxio __constant_htonl(NFSERR_NXIO) 195#define nfserr_nxio cpu_to_be32(NFSERR_NXIO)
194#define nfserr_eagain __constant_htonl(NFSERR_EAGAIN) 196#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN)
195#define nfserr_acces __constant_htonl(NFSERR_ACCES) 197#define nfserr_acces cpu_to_be32(NFSERR_ACCES)
196#define nfserr_exist __constant_htonl(NFSERR_EXIST) 198#define nfserr_exist cpu_to_be32(NFSERR_EXIST)
197#define nfserr_xdev __constant_htonl(NFSERR_XDEV) 199#define nfserr_xdev cpu_to_be32(NFSERR_XDEV)
198#define nfserr_nodev __constant_htonl(NFSERR_NODEV) 200#define nfserr_nodev cpu_to_be32(NFSERR_NODEV)
199#define nfserr_notdir __constant_htonl(NFSERR_NOTDIR) 201#define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR)
200#define nfserr_isdir __constant_htonl(NFSERR_ISDIR) 202#define nfserr_isdir cpu_to_be32(NFSERR_ISDIR)
201#define nfserr_inval __constant_htonl(NFSERR_INVAL) 203#define nfserr_inval cpu_to_be32(NFSERR_INVAL)
202#define nfserr_fbig __constant_htonl(NFSERR_FBIG) 204#define nfserr_fbig cpu_to_be32(NFSERR_FBIG)
203#define nfserr_nospc __constant_htonl(NFSERR_NOSPC) 205#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC)
204#define nfserr_rofs __constant_htonl(NFSERR_ROFS) 206#define nfserr_rofs cpu_to_be32(NFSERR_ROFS)
205#define nfserr_mlink __constant_htonl(NFSERR_MLINK) 207#define nfserr_mlink cpu_to_be32(NFSERR_MLINK)
206#define nfserr_opnotsupp __constant_htonl(NFSERR_OPNOTSUPP) 208#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP)
207#define nfserr_nametoolong __constant_htonl(NFSERR_NAMETOOLONG) 209#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG)
208#define nfserr_notempty __constant_htonl(NFSERR_NOTEMPTY) 210#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY)
209#define nfserr_dquot __constant_htonl(NFSERR_DQUOT) 211#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT)
210#define nfserr_stale __constant_htonl(NFSERR_STALE) 212#define nfserr_stale cpu_to_be32(NFSERR_STALE)
211#define nfserr_remote __constant_htonl(NFSERR_REMOTE) 213#define nfserr_remote cpu_to_be32(NFSERR_REMOTE)
212#define nfserr_wflush __constant_htonl(NFSERR_WFLUSH) 214#define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH)
213#define nfserr_badhandle __constant_htonl(NFSERR_BADHANDLE) 215#define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE)
214#define nfserr_notsync __constant_htonl(NFSERR_NOT_SYNC) 216#define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC)
215#define nfserr_badcookie __constant_htonl(NFSERR_BAD_COOKIE) 217#define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE)
216#define nfserr_notsupp __constant_htonl(NFSERR_NOTSUPP) 218#define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP)
217#define nfserr_toosmall __constant_htonl(NFSERR_TOOSMALL) 219#define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL)
218#define nfserr_serverfault __constant_htonl(NFSERR_SERVERFAULT) 220#define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT)
219#define nfserr_badtype __constant_htonl(NFSERR_BADTYPE) 221#define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE)
220#define nfserr_jukebox __constant_htonl(NFSERR_JUKEBOX) 222#define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX)
221#define nfserr_denied __constant_htonl(NFSERR_DENIED) 223#define nfserr_denied cpu_to_be32(NFSERR_DENIED)
222#define nfserr_deadlock __constant_htonl(NFSERR_DEADLOCK) 224#define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK)
223#define nfserr_expired __constant_htonl(NFSERR_EXPIRED) 225#define nfserr_expired cpu_to_be32(NFSERR_EXPIRED)
224#define nfserr_bad_cookie __constant_htonl(NFSERR_BAD_COOKIE) 226#define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE)
225#define nfserr_same __constant_htonl(NFSERR_SAME) 227#define nfserr_same cpu_to_be32(NFSERR_SAME)
226#define nfserr_clid_inuse __constant_htonl(NFSERR_CLID_INUSE) 228#define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE)
227#define nfserr_stale_clientid __constant_htonl(NFSERR_STALE_CLIENTID) 229#define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID)
228#define nfserr_resource __constant_htonl(NFSERR_RESOURCE) 230#define nfserr_resource cpu_to_be32(NFSERR_RESOURCE)
229#define nfserr_moved __constant_htonl(NFSERR_MOVED) 231#define nfserr_moved cpu_to_be32(NFSERR_MOVED)
230#define nfserr_nofilehandle __constant_htonl(NFSERR_NOFILEHANDLE) 232#define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE)
231#define nfserr_minor_vers_mismatch __constant_htonl(NFSERR_MINOR_VERS_MISMATCH) 233#define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH)
232#define nfserr_share_denied __constant_htonl(NFSERR_SHARE_DENIED) 234#define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED)
233#define nfserr_stale_stateid __constant_htonl(NFSERR_STALE_STATEID) 235#define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID)
234#define nfserr_old_stateid __constant_htonl(NFSERR_OLD_STATEID) 236#define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID)
235#define nfserr_bad_stateid __constant_htonl(NFSERR_BAD_STATEID) 237#define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID)
236#define nfserr_bad_seqid __constant_htonl(NFSERR_BAD_SEQID) 238#define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID)
237#define nfserr_symlink __constant_htonl(NFSERR_SYMLINK) 239#define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK)
238#define nfserr_not_same __constant_htonl(NFSERR_NOT_SAME) 240#define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME)
239#define nfserr_restorefh __constant_htonl(NFSERR_RESTOREFH) 241#define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH)
240#define nfserr_attrnotsupp __constant_htonl(NFSERR_ATTRNOTSUPP) 242#define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP)
241#define nfserr_bad_xdr __constant_htonl(NFSERR_BAD_XDR) 243#define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR)
242#define nfserr_openmode __constant_htonl(NFSERR_OPENMODE) 244#define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE)
243#define nfserr_locks_held __constant_htonl(NFSERR_LOCKS_HELD) 245#define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD)
244#define nfserr_op_illegal __constant_htonl(NFSERR_OP_ILLEGAL) 246#define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL)
245#define nfserr_grace __constant_htonl(NFSERR_GRACE) 247#define nfserr_grace cpu_to_be32(NFSERR_GRACE)
246#define nfserr_no_grace __constant_htonl(NFSERR_NO_GRACE) 248#define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE)
247#define nfserr_reclaim_bad __constant_htonl(NFSERR_RECLAIM_BAD) 249#define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD)
248#define nfserr_badname __constant_htonl(NFSERR_BADNAME) 250#define nfserr_badname cpu_to_be32(NFSERR_BADNAME)
249#define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN) 251#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN)
250#define nfserr_locked __constant_htonl(NFSERR_LOCKED) 252#define nfserr_locked cpu_to_be32(NFSERR_LOCKED)
251#define nfserr_wrongsec __constant_htonl(NFSERR_WRONGSEC) 253#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC)
252#define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME) 254#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE)
255#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT)
256#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST)
257#define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION)
258#define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT)
259#define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY)
260#define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
261#define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED)
262#define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY)
263#define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER)
264#define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE)
265#define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT)
266#define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT)
267#define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE)
268#define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED)
269#define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS)
270#define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG)
271#define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG)
272#define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE)
273#define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP)
274#define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND)
275#define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS)
276#define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION)
277#define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP)
278#define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY)
279#define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE)
280#define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY)
281#define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT)
282#define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION)
283#define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP)
284#define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT)
285#define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP)
286#define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED)
287#define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE)
288#define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL)
289#define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG)
290#define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT)
291#define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED)
253 292
254/* error codes for internal use */ 293/* error codes for internal use */
255/* if a request fails due to kmalloc failure, it gets dropped. 294/* if a request fails due to kmalloc failure, it gets dropped.
256 * Client should resend eventually 295 * Client should resend eventually
257 */ 296 */
258#define nfserr_dropit __constant_htonl(30000) 297#define nfserr_dropit cpu_to_be32(30000)
259/* end-of-file indicator in readdir */ 298/* end-of-file indicator in readdir */
260#define nfserr_eof __constant_htonl(30001) 299#define nfserr_eof cpu_to_be32(30001)
300/* replay detected */
301#define nfserr_replay_me cpu_to_be32(11001)
302/* nfs41 replay detected */
303#define nfserr_replay_cache cpu_to_be32(11002)
261 304
262/* Check for dir entries '.' and '..' */ 305/* Check for dir entries '.' and '..' */
263#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) 306#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.'))
@@ -300,7 +343,7 @@ extern struct timeval nfssvc_boot;
300 * TIME_BACKUP (unlikely to be supported any time soon) 343 * TIME_BACKUP (unlikely to be supported any time soon)
301 * TIME_CREATE (unlikely to be supported any time soon) 344 * TIME_CREATE (unlikely to be supported any time soon)
302 */ 345 */
303#define NFSD_SUPPORTED_ATTRS_WORD0 \ 346#define NFSD4_SUPPORTED_ATTRS_WORD0 \
304(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \ 347(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \
305 | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \ 348 | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \
306 | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \ 349 | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \
@@ -312,7 +355,7 @@ extern struct timeval nfssvc_boot;
312 | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \ 355 | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \
313 | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL) 356 | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL)
314 357
315#define NFSD_SUPPORTED_ATTRS_WORD1 \ 358#define NFSD4_SUPPORTED_ATTRS_WORD1 \
316(FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \ 359(FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \
317 | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \ 360 | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \
318 | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \ 361 | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \
@@ -320,6 +363,35 @@ extern struct timeval nfssvc_boot;
320 | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \ 363 | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \
321 | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID) 364 | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID)
322 365
366#define NFSD4_SUPPORTED_ATTRS_WORD2 0
367
368#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \
369 NFSD4_SUPPORTED_ATTRS_WORD0
370
371#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \
372 NFSD4_SUPPORTED_ATTRS_WORD1
373
374#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \
375 (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
376
377static inline u32 nfsd_suppattrs0(u32 minorversion)
378{
379 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
380 : NFSD4_SUPPORTED_ATTRS_WORD0;
381}
382
383static inline u32 nfsd_suppattrs1(u32 minorversion)
384{
385 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1
386 : NFSD4_SUPPORTED_ATTRS_WORD1;
387}
388
389static inline u32 nfsd_suppattrs2(u32 minorversion)
390{
391 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2
392 : NFSD4_SUPPORTED_ATTRS_WORD2;
393}
394
323/* These will return ERR_INVAL if specified in GETATTR or READDIR. */ 395/* These will return ERR_INVAL if specified in GETATTR or READDIR. */
324#define NFSD_WRITEONLY_ATTRS_WORD1 \ 396#define NFSD_WRITEONLY_ATTRS_WORD1 \
325(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) 397(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
@@ -330,6 +402,19 @@ extern struct timeval nfssvc_boot;
330#define NFSD_WRITEABLE_ATTRS_WORD1 \ 402#define NFSD_WRITEABLE_ATTRS_WORD1 \
331(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \ 403(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \
332 | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) 404 | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
405#define NFSD_WRITEABLE_ATTRS_WORD2 0
406
407#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \
408 NFSD_WRITEABLE_ATTRS_WORD0
409/*
410 * we currently store the exclusive create verifier in the v_{a,m}time
411 * attributes so the client can't set these at create time using EXCLUSIVE4_1
412 */
413#define NFSD_SUPPATTR_EXCLCREAT_WORD1 \
414 (NFSD_WRITEABLE_ATTRS_WORD1 & \
415 ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET))
416#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \
417 NFSD_WRITEABLE_ATTRS_WORD2
333 418
334#endif /* CONFIG_NFSD_V4 */ 419#endif /* CONFIG_NFSD_V4 */
335 420
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index fa317f6c154b..afa19016c4a8 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -269,6 +269,13 @@ fh_copy(struct svc_fh *dst, struct svc_fh *src)
269 return dst; 269 return dst;
270} 270}
271 271
272static inline void
273fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
274{
275 dst->fh_size = src->fh_size;
276 memcpy(&dst->fh_base, &src->fh_base, src->fh_size);
277}
278
272static __inline__ struct svc_fh * 279static __inline__ struct svc_fh *
273fh_init(struct svc_fh *fhp, int maxsize) 280fh_init(struct svc_fh *fhp, int maxsize)
274{ 281{
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index 128298c0362d..4d61c873feed 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -66,8 +66,7 @@ struct nfs4_cb_recall {
66 u32 cbr_ident; 66 u32 cbr_ident;
67 int cbr_trunc; 67 int cbr_trunc;
68 stateid_t cbr_stateid; 68 stateid_t cbr_stateid;
69 u32 cbr_fhlen; 69 struct knfsd_fh cbr_fh;
70 char cbr_fhval[NFS4_FHSIZE];
71 struct nfs4_delegation *cbr_dp; 70 struct nfs4_delegation *cbr_dp;
72}; 71};
73 72
@@ -86,8 +85,7 @@ struct nfs4_delegation {
86}; 85};
87 86
88#define dl_stateid dl_recall.cbr_stateid 87#define dl_stateid dl_recall.cbr_stateid
89#define dl_fhlen dl_recall.cbr_fhlen 88#define dl_fh dl_recall.cbr_fh
90#define dl_fhval dl_recall.cbr_fhval
91 89
92/* client delegation callback info */ 90/* client delegation callback info */
93struct nfs4_callback { 91struct nfs4_callback {
@@ -101,6 +99,64 @@ struct nfs4_callback {
101 struct rpc_clnt * cb_client; 99 struct rpc_clnt * cb_client;
102}; 100};
103 101
102/* Maximum number of slots per session. 128 is useful for long haul TCP */
103#define NFSD_MAX_SLOTS_PER_SESSION 128
104/* Maximum number of pages per slot cache entry */
105#define NFSD_PAGES_PER_SLOT 1
106/* Maximum number of operations per session compound */
107#define NFSD_MAX_OPS_PER_COMPOUND 16
108
109struct nfsd4_cache_entry {
110 __be32 ce_status;
111 struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */
112 struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1];
113 int ce_cachethis;
114 short ce_resused;
115 int ce_opcnt;
116 int ce_rpchdrlen;
117};
118
119struct nfsd4_slot {
120 bool sl_inuse;
121 u32 sl_seqid;
122 struct nfsd4_cache_entry sl_cache_entry;
123};
124
125struct nfsd4_session {
126 struct kref se_ref;
127 struct list_head se_hash; /* hash by sessionid */
128 struct list_head se_perclnt;
129 u32 se_flags;
130 struct nfs4_client *se_client; /* for expire_client */
131 struct nfs4_sessionid se_sessionid;
132 u32 se_fmaxreq_sz;
133 u32 se_fmaxresp_sz;
134 u32 se_fmaxresp_cached;
135 u32 se_fmaxops;
136 u32 se_fnumslots;
137 struct nfsd4_slot se_slots[]; /* forward channel slots */
138};
139
140static inline void
141nfsd4_put_session(struct nfsd4_session *ses)
142{
143 extern void free_session(struct kref *kref);
144 kref_put(&ses->se_ref, free_session);
145}
146
147static inline void
148nfsd4_get_session(struct nfsd4_session *ses)
149{
150 kref_get(&ses->se_ref);
151}
152
153/* formatted contents of nfs4_sessionid */
154struct nfsd4_sessionid {
155 clientid_t clientid;
156 u32 sequence;
157 u32 reserved;
158};
159
104#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */ 160#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */
105 161
106/* 162/*
@@ -132,6 +188,12 @@ struct nfs4_client {
132 struct nfs4_callback cl_callback; /* callback info */ 188 struct nfs4_callback cl_callback; /* callback info */
133 atomic_t cl_count; /* ref count */ 189 atomic_t cl_count; /* ref count */
134 u32 cl_firststate; /* recovery dir creation */ 190 u32 cl_firststate; /* recovery dir creation */
191
192 /* for nfs41 */
193 struct list_head cl_sessions;
194 struct nfsd4_slot cl_slot; /* create_session slot */
195 u32 cl_exchange_flags;
196 struct nfs4_sessionid cl_sessionid;
135}; 197};
136 198
137/* struct nfs4_client_reset 199/* struct nfs4_client_reset
@@ -168,8 +230,7 @@ struct nfs4_replay {
168 unsigned int rp_buflen; 230 unsigned int rp_buflen;
169 char *rp_buf; 231 char *rp_buf;
170 unsigned intrp_allocated; 232 unsigned intrp_allocated;
171 int rp_openfh_len; 233 struct knfsd_fh rp_openfh;
172 char rp_openfh[NFS4_FHSIZE];
173 char rp_ibuf[NFSD4_REPLAY_ISIZE]; 234 char rp_ibuf[NFSD4_REPLAY_ISIZE];
174}; 235};
175 236
@@ -217,7 +278,7 @@ struct nfs4_stateowner {
217* share_acces, share_deny on the file. 278* share_acces, share_deny on the file.
218*/ 279*/
219struct nfs4_file { 280struct nfs4_file {
220 struct kref fi_ref; 281 atomic_t fi_ref;
221 struct list_head fi_hash; /* hash by "struct inode *" */ 282 struct list_head fi_hash; /* hash by "struct inode *" */
222 struct list_head fi_stateids; 283 struct list_head fi_stateids;
223 struct list_head fi_delegations; 284 struct list_head fi_delegations;
@@ -259,14 +320,13 @@ struct nfs4_stateid {
259}; 320};
260 321
261/* flags for preprocess_seqid_op() */ 322/* flags for preprocess_seqid_op() */
262#define CHECK_FH 0x00000001 323#define HAS_SESSION 0x00000001
263#define CONFIRM 0x00000002 324#define CONFIRM 0x00000002
264#define OPEN_STATE 0x00000004 325#define OPEN_STATE 0x00000004
265#define LOCK_STATE 0x00000008 326#define LOCK_STATE 0x00000008
266#define RD_STATE 0x00000010 327#define RD_STATE 0x00000010
267#define WR_STATE 0x00000020 328#define WR_STATE 0x00000020
268#define CLOSE_STATE 0x00000040 329#define CLOSE_STATE 0x00000040
269#define DELEG_RET 0x00000080
270 330
271#define seqid_mutating_err(err) \ 331#define seqid_mutating_err(err) \
272 (((err) != nfserr_stale_clientid) && \ 332 (((err) != nfserr_stale_clientid) && \
@@ -274,7 +334,9 @@ struct nfs4_stateid {
274 ((err) != nfserr_stale_stateid) && \ 334 ((err) != nfserr_stale_stateid) && \
275 ((err) != nfserr_bad_stateid)) 335 ((err) != nfserr_bad_stateid))
276 336
277extern __be32 nfs4_preprocess_stateid_op(struct svc_fh *current_fh, 337struct nfsd4_compound_state;
338
339extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
278 stateid_t *stateid, int flags, struct file **filp); 340 stateid_t *stateid, int flags, struct file **filp);
279extern void nfs4_lock_state(void); 341extern void nfs4_lock_state(void);
280extern void nfs4_unlock_state(void); 342extern void nfs4_unlock_state(void);
@@ -290,7 +352,7 @@ extern void nfsd4_init_recdir(char *recdir_name);
290extern int nfsd4_recdir_load(void); 352extern int nfsd4_recdir_load(void);
291extern void nfsd4_shutdown_recdir(void); 353extern void nfsd4_shutdown_recdir(void);
292extern int nfs4_client_to_reclaim(const char *name); 354extern int nfs4_client_to_reclaim(const char *name);
293extern int nfs4_has_reclaimed_state(const char *name); 355extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
294extern void nfsd4_recdir_purge_old(void); 356extern void nfsd4_recdir_purge_old(void);
295extern int nfsd4_create_clid_dir(struct nfs4_client *clp); 357extern int nfsd4_create_clid_dir(struct nfs4_client *clp);
296extern void nfsd4_remove_clid_dir(struct nfs4_client *clp); 358extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
diff --git a/include/linux/nfsd/stats.h b/include/linux/nfsd/stats.h
index 7678cfbe9960..2693ef647df6 100644
--- a/include/linux/nfsd/stats.h
+++ b/include/linux/nfsd/stats.h
@@ -11,6 +11,11 @@
11 11
12#include <linux/nfs4.h> 12#include <linux/nfs4.h>
13 13
14/* thread usage wraps very million seconds (approx one fortnight) */
15#define NFSD_USAGE_WRAP (HZ*1000000)
16
17#ifdef __KERNEL__
18
14struct nfsd_stats { 19struct nfsd_stats {
15 unsigned int rchits; /* repcache hits */ 20 unsigned int rchits; /* repcache hits */
16 unsigned int rcmisses; /* repcache hits */ 21 unsigned int rcmisses; /* repcache hits */
@@ -35,10 +40,6 @@ struct nfsd_stats {
35 40
36}; 41};
37 42
38/* thread usage wraps very million seconds (approx one fortnight) */
39#define NFSD_USAGE_WRAP (HZ*1000000)
40
41#ifdef __KERNEL__
42 43
43extern struct nfsd_stats nfsdstats; 44extern struct nfsd_stats nfsdstats;
44extern struct svc_stat nfsd_svcstats; 45extern struct svc_stat nfsd_svcstats;
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 27bd3e38ec5a..f80d6013fdc3 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -45,10 +45,22 @@
45#define XDR_LEN(n) (((n) + 3) & ~3) 45#define XDR_LEN(n) (((n) + 3) & ~3)
46 46
47struct nfsd4_compound_state { 47struct nfsd4_compound_state {
48 struct svc_fh current_fh; 48 struct svc_fh current_fh;
49 struct svc_fh save_fh; 49 struct svc_fh save_fh;
50 struct nfs4_stateowner *replay_owner; 50 struct nfs4_stateowner *replay_owner;
51}; 51 /* For sessions DRC */
52 struct nfsd4_session *session;
53 struct nfsd4_slot *slot;
54 __be32 *statp;
55 size_t iovlen;
56 u32 minorversion;
57 u32 status;
58};
59
60static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs)
61{
62 return cs->slot != NULL;
63}
52 64
53struct nfsd4_change_info { 65struct nfsd4_change_info {
54 u32 atomic; 66 u32 atomic;
@@ -90,7 +102,7 @@ struct nfsd4_create {
90 u32 specdata2; 102 u32 specdata2;
91 } dev; /* NF4BLK, NF4CHR */ 103 } dev; /* NF4BLK, NF4CHR */
92 } u; 104 } u;
93 u32 cr_bmval[2]; /* request */ 105 u32 cr_bmval[3]; /* request */
94 struct iattr cr_iattr; /* request */ 106 struct iattr cr_iattr; /* request */
95 struct nfsd4_change_info cr_cinfo; /* response */ 107 struct nfsd4_change_info cr_cinfo; /* response */
96 struct nfs4_acl *cr_acl; 108 struct nfs4_acl *cr_acl;
@@ -105,7 +117,7 @@ struct nfsd4_delegreturn {
105}; 117};
106 118
107struct nfsd4_getattr { 119struct nfsd4_getattr {
108 u32 ga_bmval[2]; /* request */ 120 u32 ga_bmval[3]; /* request */
109 struct svc_fh *ga_fhp; /* response */ 121 struct svc_fh *ga_fhp; /* response */
110}; 122};
111 123
@@ -206,11 +218,9 @@ struct nfsd4_open {
206 stateid_t op_delegate_stateid; /* request - response */ 218 stateid_t op_delegate_stateid; /* request - response */
207 u32 op_create; /* request */ 219 u32 op_create; /* request */
208 u32 op_createmode; /* request */ 220 u32 op_createmode; /* request */
209 u32 op_bmval[2]; /* request */ 221 u32 op_bmval[3]; /* request */
210 union { /* request */ 222 struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
211 struct iattr iattr; /* UNCHECKED4,GUARDED4 */ 223 nfs4_verifier verf; /* EXCLUSIVE4 */
212 nfs4_verifier verf; /* EXCLUSIVE4 */
213 } u;
214 clientid_t op_clientid; /* request */ 224 clientid_t op_clientid; /* request */
215 struct xdr_netobj op_owner; /* request */ 225 struct xdr_netobj op_owner; /* request */
216 u32 op_seqid; /* request */ 226 u32 op_seqid; /* request */
@@ -224,8 +234,8 @@ struct nfsd4_open {
224 struct nfs4_stateowner *op_stateowner; /* used during processing */ 234 struct nfs4_stateowner *op_stateowner; /* used during processing */
225 struct nfs4_acl *op_acl; 235 struct nfs4_acl *op_acl;
226}; 236};
227#define op_iattr u.iattr 237#define op_iattr iattr
228#define op_verf u.verf 238#define op_verf verf
229 239
230struct nfsd4_open_confirm { 240struct nfsd4_open_confirm {
231 stateid_t oc_req_stateid /* request */; 241 stateid_t oc_req_stateid /* request */;
@@ -259,7 +269,7 @@ struct nfsd4_readdir {
259 nfs4_verifier rd_verf; /* request */ 269 nfs4_verifier rd_verf; /* request */
260 u32 rd_dircount; /* request */ 270 u32 rd_dircount; /* request */
261 u32 rd_maxcount; /* request */ 271 u32 rd_maxcount; /* request */
262 u32 rd_bmval[2]; /* request */ 272 u32 rd_bmval[3]; /* request */
263 struct svc_rqst *rd_rqstp; /* response */ 273 struct svc_rqst *rd_rqstp; /* response */
264 struct svc_fh * rd_fhp; /* response */ 274 struct svc_fh * rd_fhp; /* response */
265 275
@@ -301,7 +311,7 @@ struct nfsd4_secinfo {
301 311
302struct nfsd4_setattr { 312struct nfsd4_setattr {
303 stateid_t sa_stateid; /* request */ 313 stateid_t sa_stateid; /* request */
304 u32 sa_bmval[2]; /* request */ 314 u32 sa_bmval[3]; /* request */
305 struct iattr sa_iattr; /* request */ 315 struct iattr sa_iattr; /* request */
306 struct nfs4_acl *sa_acl; 316 struct nfs4_acl *sa_acl;
307}; 317};
@@ -327,7 +337,7 @@ struct nfsd4_setclientid_confirm {
327 337
328/* also used for NVERIFY */ 338/* also used for NVERIFY */
329struct nfsd4_verify { 339struct nfsd4_verify {
330 u32 ve_bmval[2]; /* request */ 340 u32 ve_bmval[3]; /* request */
331 u32 ve_attrlen; /* request */ 341 u32 ve_attrlen; /* request */
332 char * ve_attrval; /* request */ 342 char * ve_attrval; /* request */
333}; 343};
@@ -344,6 +354,54 @@ struct nfsd4_write {
344 nfs4_verifier wr_verifier; /* response */ 354 nfs4_verifier wr_verifier; /* response */
345}; 355};
346 356
357struct nfsd4_exchange_id {
358 nfs4_verifier verifier;
359 struct xdr_netobj clname;
360 u32 flags;
361 clientid_t clientid;
362 u32 seqid;
363 int spa_how;
364};
365
366struct nfsd4_channel_attrs {
367 u32 headerpadsz;
368 u32 maxreq_sz;
369 u32 maxresp_sz;
370 u32 maxresp_cached;
371 u32 maxops;
372 u32 maxreqs;
373 u32 nr_rdma_attrs;
374 u32 rdma_attrs;
375};
376
377struct nfsd4_create_session {
378 clientid_t clientid;
379 struct nfs4_sessionid sessionid;
380 u32 seqid;
381 u32 flags;
382 struct nfsd4_channel_attrs fore_channel;
383 struct nfsd4_channel_attrs back_channel;
384 u32 callback_prog;
385 u32 uid;
386 u32 gid;
387};
388
389struct nfsd4_sequence {
390 struct nfs4_sessionid sessionid; /* request/response */
391 u32 seqid; /* request/response */
392 u32 slotid; /* request/response */
393 u32 maxslots; /* request/response */
394 u32 cachethis; /* request */
395#if 0
396 u32 target_maxslots; /* response */
397 u32 status_flags; /* response */
398#endif /* not yet */
399};
400
401struct nfsd4_destroy_session {
402 struct nfs4_sessionid sessionid;
403};
404
347struct nfsd4_op { 405struct nfsd4_op {
348 int opnum; 406 int opnum;
349 __be32 status; 407 __be32 status;
@@ -378,6 +436,12 @@ struct nfsd4_op {
378 struct nfsd4_verify verify; 436 struct nfsd4_verify verify;
379 struct nfsd4_write write; 437 struct nfsd4_write write;
380 struct nfsd4_release_lockowner release_lockowner; 438 struct nfsd4_release_lockowner release_lockowner;
439
440 /* NFSv4.1 */
441 struct nfsd4_exchange_id exchange_id;
442 struct nfsd4_create_session create_session;
443 struct nfsd4_destroy_session destroy_session;
444 struct nfsd4_sequence sequence;
381 } u; 445 } u;
382 struct nfs4_replay * replay; 446 struct nfs4_replay * replay;
383}; 447};
@@ -416,9 +480,22 @@ struct nfsd4_compoundres {
416 u32 taglen; 480 u32 taglen;
417 char * tag; 481 char * tag;
418 u32 opcnt; 482 u32 opcnt;
419 __be32 * tagp; /* where to encode tag and opcount */ 483 __be32 * tagp; /* tag, opcount encode location */
484 struct nfsd4_compound_state cstate;
420}; 485};
421 486
487static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
488{
489 struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
490 return args->opcnt == 1;
491}
492
493static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
494{
495 return !resp->cstate.slot->sl_cache_entry.ce_cachethis ||
496 nfsd4_is_solo_sequence(resp);
497}
498
422#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) 499#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
423 500
424static inline void 501static inline void
@@ -448,7 +525,23 @@ extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
448extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 525extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
449 struct nfsd4_compound_state *, 526 struct nfsd4_compound_state *,
450 struct nfsd4_setclientid_confirm *setclientid_confirm); 527 struct nfsd4_setclientid_confirm *setclientid_confirm);
451extern __be32 nfsd4_process_open1(struct nfsd4_open *open); 528extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp);
529extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
530 struct nfsd4_sequence *seq);
531extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
532 struct nfsd4_compound_state *,
533struct nfsd4_exchange_id *);
534 extern __be32 nfsd4_create_session(struct svc_rqst *,
535 struct nfsd4_compound_state *,
536 struct nfsd4_create_session *);
537extern __be32 nfsd4_sequence(struct svc_rqst *,
538 struct nfsd4_compound_state *,
539 struct nfsd4_sequence *);
540extern __be32 nfsd4_destroy_session(struct svc_rqst *,
541 struct nfsd4_compound_state *,
542 struct nfsd4_destroy_session *);
543extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
544 struct nfsd4_open *open);
452extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, 545extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
453 struct svc_fh *current_fh, struct nfsd4_open *open); 546 struct svc_fh *current_fh, struct nfsd4_open *open);
454extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, 547extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index afad7dec1b36..7b370c7cfeff 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -8,6 +8,7 @@ struct mnt_namespace;
8struct uts_namespace; 8struct uts_namespace;
9struct ipc_namespace; 9struct ipc_namespace;
10struct pid_namespace; 10struct pid_namespace;
11struct fs_struct;
11 12
12/* 13/*
13 * A structure to contain pointers to all per-process 14 * A structure to contain pointers to all per-process
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h
new file mode 100644
index 000000000000..b0638fd91e92
--- /dev/null
+++ b/include/linux/page-debug-flags.h
@@ -0,0 +1,30 @@
1#ifndef LINUX_PAGE_DEBUG_FLAGS_H
2#define LINUX_PAGE_DEBUG_FLAGS_H
3
4/*
5 * page->debug_flags bits:
6 *
7 * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to
8 * implement generic debug pagealloc feature. The pages are filled with
9 * poison patterns and set this flag after free_pages(). The poisoned
10 * pages are verified whether the patterns are not corrupted and clear
11 * the flag before alloc_pages().
12 */
13
14enum page_debug_flags {
15 PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */
16};
17
18/*
19 * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably
20 * gets turned off when no debug features are enabling it!
21 */
22
23#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
24#if !defined(CONFIG_PAGE_POISONING) \
25/* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */
26#error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features!
27#endif
28#endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */
29
30#endif /* LINUX_PAGE_DEBUG_FLAGS_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 219a523ecdb0..62214c7d2d93 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -82,6 +82,7 @@ enum pageflags {
82 PG_arch_1, 82 PG_arch_1,
83 PG_reserved, 83 PG_reserved,
84 PG_private, /* If pagecache, has fs-private data */ 84 PG_private, /* If pagecache, has fs-private data */
85 PG_private_2, /* If pagecache, has fs aux data */
85 PG_writeback, /* Page is under writeback */ 86 PG_writeback, /* Page is under writeback */
86#ifdef CONFIG_PAGEFLAGS_EXTENDED 87#ifdef CONFIG_PAGEFLAGS_EXTENDED
87 PG_head, /* A head page */ 88 PG_head, /* A head page */
@@ -96,6 +97,8 @@ enum pageflags {
96 PG_swapbacked, /* Page is backed by RAM/swap */ 97 PG_swapbacked, /* Page is backed by RAM/swap */
97#ifdef CONFIG_UNEVICTABLE_LRU 98#ifdef CONFIG_UNEVICTABLE_LRU
98 PG_unevictable, /* Page is "unevictable" */ 99 PG_unevictable, /* Page is "unevictable" */
100#endif
101#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
99 PG_mlocked, /* Page is vma mlocked */ 102 PG_mlocked, /* Page is vma mlocked */
100#endif 103#endif
101#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 104#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
@@ -106,6 +109,12 @@ enum pageflags {
106 /* Filesystems */ 109 /* Filesystems */
107 PG_checked = PG_owner_priv_1, 110 PG_checked = PG_owner_priv_1,
108 111
112 /* Two page bits are conscripted by FS-Cache to maintain local caching
113 * state. These bits are set on pages belonging to the netfs's inodes
114 * when those inodes are being locally cached.
115 */
116 PG_fscache = PG_private_2, /* page backed by cache */
117
109 /* XEN */ 118 /* XEN */
110 PG_pinned = PG_owner_priv_1, 119 PG_pinned = PG_owner_priv_1,
111 PG_savepinned = PG_dirty, 120 PG_savepinned = PG_dirty,
@@ -180,7 +189,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
180 189
181struct page; /* forward declaration */ 190struct page; /* forward declaration */
182 191
183TESTPAGEFLAG(Locked, locked) 192TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked)
184PAGEFLAG(Error, error) 193PAGEFLAG(Error, error)
185PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) 194PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
186PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) 195PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
@@ -192,8 +201,6 @@ PAGEFLAG(Checked, checked) /* Used by some filesystems */
192PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ 201PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
193PAGEFLAG(SavePinned, savepinned); /* Xen */ 202PAGEFLAG(SavePinned, savepinned); /* Xen */
194PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 203PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
195PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
196 __SETPAGEFLAG(Private, private)
197PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) 204PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
198 205
199__PAGEFLAG(SlobPage, slob_page) 206__PAGEFLAG(SlobPage, slob_page)
@@ -203,6 +210,16 @@ __PAGEFLAG(SlubFrozen, slub_frozen)
203__PAGEFLAG(SlubDebug, slub_debug) 210__PAGEFLAG(SlubDebug, slub_debug)
204 211
205/* 212/*
213 * Private page markings that may be used by the filesystem that owns the page
214 * for its own purposes.
215 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
216 */
217PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
218 __CLEARPAGEFLAG(Private, private)
219PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
220PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
221
222/*
206 * Only test-and-set exist for PG_writeback. The unconditional operators are 223 * Only test-and-set exist for PG_writeback. The unconditional operators are
207 * risky: they bypass page accounting. 224 * risky: they bypass page accounting.
208 */ 225 */
@@ -234,20 +251,20 @@ PAGEFLAG_FALSE(SwapCache)
234#ifdef CONFIG_UNEVICTABLE_LRU 251#ifdef CONFIG_UNEVICTABLE_LRU
235PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) 252PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
236 TESTCLEARFLAG(Unevictable, unevictable) 253 TESTCLEARFLAG(Unevictable, unevictable)
254#else
255PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
256 SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
257 __CLEARPAGEFLAG_NOOP(Unevictable)
258#endif
237 259
260#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
238#define MLOCK_PAGES 1 261#define MLOCK_PAGES 1
239PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) 262PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
240 TESTSCFLAG(Mlocked, mlocked) 263 TESTSCFLAG(Mlocked, mlocked)
241
242#else 264#else
243
244#define MLOCK_PAGES 0 265#define MLOCK_PAGES 0
245PAGEFLAG_FALSE(Mlocked) 266PAGEFLAG_FALSE(Mlocked)
246 SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) 267 SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked)
247
248PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable)
249 SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable)
250 __CLEARPAGEFLAG_NOOP(Unevictable)
251#endif 268#endif
252 269
253#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 270#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
@@ -367,9 +384,13 @@ static inline void __ClearPageTail(struct page *page)
367 384
368#ifdef CONFIG_UNEVICTABLE_LRU 385#ifdef CONFIG_UNEVICTABLE_LRU
369#define __PG_UNEVICTABLE (1 << PG_unevictable) 386#define __PG_UNEVICTABLE (1 << PG_unevictable)
370#define __PG_MLOCKED (1 << PG_mlocked)
371#else 387#else
372#define __PG_UNEVICTABLE 0 388#define __PG_UNEVICTABLE 0
389#endif
390
391#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
392#define __PG_MLOCKED (1 << PG_mlocked)
393#else
373#define __PG_MLOCKED 0 394#define __PG_MLOCKED 0
374#endif 395#endif
375 396
@@ -378,9 +399,10 @@ static inline void __ClearPageTail(struct page *page)
378 * these flags set. It they are, there is a problem. 399 * these flags set. It they are, there is a problem.
379 */ 400 */
380#define PAGE_FLAGS_CHECK_AT_FREE \ 401#define PAGE_FLAGS_CHECK_AT_FREE \
381 (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ 402 (1 << PG_lru | 1 << PG_locked | \
382 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ 403 1 << PG_private | 1 << PG_private_2 | \
383 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 404 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
405 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
384 __PG_UNEVICTABLE | __PG_MLOCKED) 406 __PG_UNEVICTABLE | __PG_MLOCKED)
385 407
386/* 408/*
@@ -391,4 +413,16 @@ static inline void __ClearPageTail(struct page *page)
391#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) 413#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
392 414
393#endif /* !__GENERATING_BOUNDS_H */ 415#endif /* !__GENERATING_BOUNDS_H */
416
417/**
418 * page_has_private - Determine if page has private stuff
419 * @page: The page to be checked
420 *
421 * Determine if a page has private stuff, indicating that release routines
422 * should be invoked upon it.
423 */
424#define page_has_private(page) \
425 ((page)->flags & ((1 << PG_private) | \
426 (1 << PG_private_2)))
427
394#endif /* PAGE_FLAGS_H */ 428#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 602cc1fdee90..7339c7bf7331 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -91,24 +91,23 @@ static inline void page_cgroup_init(void)
91 91
92#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 92#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
93#include <linux/swap.h> 93#include <linux/swap.h>
94extern struct mem_cgroup * 94extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
95swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem); 95extern unsigned short lookup_swap_cgroup(swp_entry_t ent);
96extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent);
97extern int swap_cgroup_swapon(int type, unsigned long max_pages); 96extern int swap_cgroup_swapon(int type, unsigned long max_pages);
98extern void swap_cgroup_swapoff(int type); 97extern void swap_cgroup_swapoff(int type);
99#else 98#else
100#include <linux/swap.h> 99#include <linux/swap.h>
101 100
102static inline 101static inline
103struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem) 102unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
104{ 103{
105 return NULL; 104 return 0;
106} 105}
107 106
108static inline 107static inline
109struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent) 108unsigned short lookup_swap_cgroup(swp_entry_t ent)
110{ 109{
111 return NULL; 110 return 0;
112} 111}
113 112
114static inline int 113static inline int
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 01ca0856caff..34da5230faab 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -18,9 +18,14 @@
18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page 18 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
19 * allocation mode flags. 19 * allocation mode flags.
20 */ 20 */
21#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ 21enum mapping_flags {
22#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ 22 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
23#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ 23 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
24 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
25#ifdef CONFIG_UNEVICTABLE_LRU
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27#endif
28};
24 29
25static inline void mapping_set_error(struct address_space *mapping, int error) 30static inline void mapping_set_error(struct address_space *mapping, int error)
26{ 31{
@@ -33,7 +38,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error)
33} 38}
34 39
35#ifdef CONFIG_UNEVICTABLE_LRU 40#ifdef CONFIG_UNEVICTABLE_LRU
36#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */
37 41
38static inline void mapping_set_unevictable(struct address_space *mapping) 42static inline void mapping_set_unevictable(struct address_space *mapping)
39{ 43{
@@ -380,6 +384,11 @@ static inline void wait_on_page_writeback(struct page *page)
380extern void end_page_writeback(struct page *page); 384extern void end_page_writeback(struct page *page);
381 385
382/* 386/*
387 * Add an arbitrary waiter to a page's wait queue
388 */
389extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
390
391/*
383 * Fault a userspace page into pagetables. Return non-zero on a fault. 392 * Fault a userspace page into pagetables. Return non-zero on a fault.
384 * 393 *
385 * This assumes that two userspace pages are always sufficient. That's 394 * This assumes that two userspace pages are always sufficient. That's
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 7b2886fa7fdc..bab82f4c571c 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -24,7 +24,6 @@ void __pagevec_release(struct pagevec *pvec);
24void __pagevec_free(struct pagevec *pvec); 24void __pagevec_free(struct pagevec *pvec);
25void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); 25void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru);
26void pagevec_strip(struct pagevec *pvec); 26void pagevec_strip(struct pagevec *pvec);
27void pagevec_swap_free(struct pagevec *pvec);
28unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 27unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
29 pgoff_t start, unsigned nr_pages); 28 pgoff_t start, unsigned nr_pages);
30unsigned pagevec_lookup_tag(struct pagevec *pvec, 29unsigned pagevec_lookup_tag(struct pagevec *pvec,
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 042c166f65d5..092e82e0048c 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -10,72 +10,25 @@
10 10
11#include <linux/acpi.h> 11#include <linux/acpi.h>
12 12
13#define OSC_QUERY_TYPE 0
14#define OSC_SUPPORT_TYPE 1
15#define OSC_CONTROL_TYPE 2
16#define OSC_SUPPORT_MASKS 0x1f
17
18/*
19 * _OSC DW0 Definition
20 */
21#define OSC_QUERY_ENABLE 1
22#define OSC_REQUEST_ERROR 2
23#define OSC_INVALID_UUID_ERROR 4
24#define OSC_INVALID_REVISION_ERROR 8
25#define OSC_CAPABILITIES_MASK_ERROR 16
26
27/*
28 * _OSC DW1 Definition (OS Support Fields)
29 */
30#define OSC_EXT_PCI_CONFIG_SUPPORT 1
31#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
32#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
33#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
34#define OSC_MSI_SUPPORT 16
35
36/*
37 * _OSC DW1 Definition (OS Control Fields)
38 */
39#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
40#define OSC_SHPC_NATIVE_HP_CONTROL 2
41#define OSC_PCI_EXPRESS_PME_CONTROL 4
42#define OSC_PCI_EXPRESS_AER_CONTROL 8
43#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
44
45#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
46 OSC_SHPC_NATIVE_HP_CONTROL | \
47 OSC_PCI_EXPRESS_PME_CONTROL | \
48 OSC_PCI_EXPRESS_AER_CONTROL | \
49 OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
50
51#ifdef CONFIG_ACPI 13#ifdef CONFIG_ACPI
52extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
53int pci_acpi_osc_support(acpi_handle handle, u32 flags);
54static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) 14static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
55{ 15{
56 /* Find root host bridge */ 16 struct pci_bus *pbus = pdev->bus;
57 while (pdev->bus->self) 17 /* Find a PCI root bus */
58 pdev = pdev->bus->self; 18 while (pbus->parent)
59 19 pbus = pbus->parent;
60 return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus), 20 return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
61 pdev->bus->number); 21 pbus->number);
62} 22}
63 23
64static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) 24static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
65{ 25{
66 int seg = pci_domain_nr(pbus), busnr = pbus->number; 26 if (pbus->parent)
67 struct pci_dev *bridge = pbus->self; 27 return DEVICE_ACPI_HANDLE(&(pbus->self->dev));
68 if (bridge) 28 return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus),
69 return DEVICE_ACPI_HANDLE(&(bridge->dev)); 29 pbus->number);
70 return acpi_get_pci_rootbridge_handle(seg, busnr);
71} 30}
72#else 31#else
73#if !defined(AE_ERROR)
74typedef u32 acpi_status;
75#define AE_ERROR (acpi_status) (0x0001)
76#endif
77static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
78{return AE_ERROR;}
79static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) 32static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev)
80{ return NULL; } 33{ return NULL; }
81#endif 34#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index df3644132617..a7fe4bbd7ff1 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -52,6 +52,7 @@
52#include <asm/atomic.h> 52#include <asm/atomic.h>
53#include <linux/device.h> 53#include <linux/device.h>
54#include <linux/io.h> 54#include <linux/io.h>
55#include <linux/irqreturn.h>
55 56
56/* Include the ID list */ 57/* Include the ID list */
57#include <linux/pci_ids.h> 58#include <linux/pci_ids.h>
@@ -93,6 +94,12 @@ enum {
93 /* #6: expansion ROM resource */ 94 /* #6: expansion ROM resource */
94 PCI_ROM_RESOURCE, 95 PCI_ROM_RESOURCE,
95 96
97 /* device specific resources */
98#ifdef CONFIG_PCI_IOV
99 PCI_IOV_RESOURCES,
100 PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1,
101#endif
102
96 /* resources assigned to buses behind the bridge */ 103 /* resources assigned to buses behind the bridge */
97#define PCI_BRIDGE_RESOURCE_NUM 4 104#define PCI_BRIDGE_RESOURCE_NUM 4
98 105
@@ -180,6 +187,7 @@ struct pci_cap_saved_state {
180 187
181struct pcie_link_state; 188struct pcie_link_state;
182struct pci_vpd; 189struct pci_vpd;
190struct pci_sriov;
183 191
184/* 192/*
185 * The pci_dev structure is used to describe PCI devices. 193 * The pci_dev structure is used to describe PCI devices.
@@ -257,6 +265,8 @@ struct pci_dev {
257 unsigned int is_managed:1; 265 unsigned int is_managed:1;
258 unsigned int is_pcie:1; 266 unsigned int is_pcie:1;
259 unsigned int state_saved:1; 267 unsigned int state_saved:1;
268 unsigned int is_physfn:1;
269 unsigned int is_virtfn:1;
260 pci_dev_flags_t dev_flags; 270 pci_dev_flags_t dev_flags;
261 atomic_t enable_cnt; /* pci_enable_device has been called */ 271 atomic_t enable_cnt; /* pci_enable_device has been called */
262 272
@@ -270,6 +280,12 @@ struct pci_dev {
270 struct list_head msi_list; 280 struct list_head msi_list;
271#endif 281#endif
272 struct pci_vpd *vpd; 282 struct pci_vpd *vpd;
283#ifdef CONFIG_PCI_IOV
284 union {
285 struct pci_sriov *sriov; /* SR-IOV capability related */
286 struct pci_dev *physfn; /* the PF this VF is associated with */
287 };
288#endif
273}; 289};
274 290
275extern struct pci_dev *alloc_pci_dev(void); 291extern struct pci_dev *alloc_pci_dev(void);
@@ -341,6 +357,15 @@ struct pci_bus {
341#define pci_bus_b(n) list_entry(n, struct pci_bus, node) 357#define pci_bus_b(n) list_entry(n, struct pci_bus, node)
342#define to_pci_bus(n) container_of(n, struct pci_bus, dev) 358#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
343 359
360/*
361 * Returns true if the pci bus is root (behind host-pci bridge),
362 * false otherwise
363 */
364static inline bool pci_is_root_bus(struct pci_bus *pbus)
365{
366 return !(pbus->parent);
367}
368
344#ifdef CONFIG_PCI_MSI 369#ifdef CONFIG_PCI_MSI
345static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) 370static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev)
346{ 371{
@@ -528,7 +553,7 @@ void pcibios_update_irq(struct pci_dev *, int irq);
528/* Generic PCI functions used internally */ 553/* Generic PCI functions used internally */
529 554
530extern struct pci_bus *pci_find_bus(int domain, int busnr); 555extern struct pci_bus *pci_find_bus(int domain, int busnr);
531void pci_bus_add_devices(struct pci_bus *bus); 556void pci_bus_add_devices(const struct pci_bus *bus);
532struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, 557struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
533 struct pci_ops *ops, void *sysdata); 558 struct pci_ops *ops, void *sysdata);
534static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, 559static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
@@ -702,6 +727,9 @@ int pci_back_from_sleep(struct pci_dev *dev);
702 727
703/* Functions for PCI Hotplug drivers to use */ 728/* Functions for PCI Hotplug drivers to use */
704int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); 729int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap);
730#ifdef CONFIG_HOTPLUG
731unsigned int pci_rescan_bus(struct pci_bus *bus);
732#endif
705 733
706/* Vital product data routines */ 734/* Vital product data routines */
707ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); 735ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf);
@@ -709,7 +737,7 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void
709int pci_vpd_truncate(struct pci_dev *dev, size_t size); 737int pci_vpd_truncate(struct pci_dev *dev, size_t size);
710 738
711/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ 739/* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */
712void pci_bus_assign_resources(struct pci_bus *bus); 740void pci_bus_assign_resources(const struct pci_bus *bus);
713void pci_bus_size_bridges(struct pci_bus *bus); 741void pci_bus_size_bridges(struct pci_bus *bus);
714int pci_claim_resource(struct pci_dev *, int); 742int pci_claim_resource(struct pci_dev *, int);
715void pci_assign_unassigned_resources(void); 743void pci_assign_unassigned_resources(void);
@@ -790,7 +818,7 @@ struct msix_entry {
790 818
791 819
792#ifndef CONFIG_PCI_MSI 820#ifndef CONFIG_PCI_MSI
793static inline int pci_enable_msi(struct pci_dev *dev) 821static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec)
794{ 822{
795 return -1; 823 return -1;
796} 824}
@@ -800,6 +828,10 @@ static inline void pci_msi_shutdown(struct pci_dev *dev)
800static inline void pci_disable_msi(struct pci_dev *dev) 828static inline void pci_disable_msi(struct pci_dev *dev)
801{ } 829{ }
802 830
831static inline int pci_msix_table_size(struct pci_dev *dev)
832{
833 return 0;
834}
803static inline int pci_enable_msix(struct pci_dev *dev, 835static inline int pci_enable_msix(struct pci_dev *dev,
804 struct msix_entry *entries, int nvec) 836 struct msix_entry *entries, int nvec)
805{ 837{
@@ -821,9 +853,10 @@ static inline int pci_msi_enabled(void)
821 return 0; 853 return 0;
822} 854}
823#else 855#else
824extern int pci_enable_msi(struct pci_dev *dev); 856extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec);
825extern void pci_msi_shutdown(struct pci_dev *dev); 857extern void pci_msi_shutdown(struct pci_dev *dev);
826extern void pci_disable_msi(struct pci_dev *dev); 858extern void pci_disable_msi(struct pci_dev *dev);
859extern int pci_msix_table_size(struct pci_dev *dev);
827extern int pci_enable_msix(struct pci_dev *dev, 860extern int pci_enable_msix(struct pci_dev *dev,
828 struct msix_entry *entries, int nvec); 861 struct msix_entry *entries, int nvec);
829extern void pci_msix_shutdown(struct pci_dev *dev); 862extern void pci_msix_shutdown(struct pci_dev *dev);
@@ -842,6 +875,8 @@ static inline int pcie_aspm_enabled(void)
842extern int pcie_aspm_enabled(void); 875extern int pcie_aspm_enabled(void);
843#endif 876#endif
844 877
878#define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1)
879
845#ifdef CONFIG_HT_IRQ 880#ifdef CONFIG_HT_IRQ
846/* The functions a driver should call */ 881/* The functions a driver should call */
847int ht_create_irq(struct pci_dev *dev, int idx); 882int ht_create_irq(struct pci_dev *dev, int idx);
@@ -1195,5 +1230,23 @@ int pci_ext_cfg_avail(struct pci_dev *dev);
1195 1230
1196void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); 1231void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar);
1197 1232
1233#ifdef CONFIG_PCI_IOV
1234extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn);
1235extern void pci_disable_sriov(struct pci_dev *dev);
1236extern irqreturn_t pci_sriov_migration(struct pci_dev *dev);
1237#else
1238static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
1239{
1240 return -ENODEV;
1241}
1242static inline void pci_disable_sriov(struct pci_dev *dev)
1243{
1244}
1245static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev)
1246{
1247 return IRQ_NONE;
1248}
1249#endif
1250
1198#endif /* __KERNEL__ */ 1251#endif /* __KERNEL__ */
1199#endif /* LINUX_PCI_H */ 1252#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index e5816dd33371..170f8b1f22db 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -526,6 +526,7 @@
526#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 526#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
527#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 527#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
528#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 528#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
529#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
529#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 530#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
530#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 531#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
531#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a 532#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a
@@ -2396,6 +2397,7 @@
2396#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c 2397#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c
2397#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0 2398#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0
2398#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1 2399#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1
2400#define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2
2399#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3 2401#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3
2400#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5 2402#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5
2401#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6 2403#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index 027815b4635e..e4d08c1b2e0b 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -235,7 +235,7 @@
235#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ 235#define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */
236#define PCI_PM_CTRL 4 /* PM control and status register */ 236#define PCI_PM_CTRL 4 /* PM control and status register */
237#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ 237#define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */
238#define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */ 238#define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */
239#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */ 239#define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */
240#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */ 240#define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */
241#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */ 241#define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */
@@ -375,6 +375,7 @@
375#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */ 375#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
376#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ 376#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
377#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */ 377#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
378#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
378#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ 379#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
379#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ 380#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
380#define PCI_EXP_DEVCAP 4 /* Device capabilities */ 381#define PCI_EXP_DEVCAP 4 /* Device capabilities */
@@ -487,6 +488,8 @@
487#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */ 488#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */
488#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ 489#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
489#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */ 490#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */
491#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
492#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
490 493
491/* Extended Capabilities (PCI-X 2.0 and Express) */ 494/* Extended Capabilities (PCI-X 2.0 and Express) */
492#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff) 495#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
@@ -498,6 +501,7 @@
498#define PCI_EXT_CAP_ID_DSN 3 501#define PCI_EXT_CAP_ID_DSN 3
499#define PCI_EXT_CAP_ID_PWR 4 502#define PCI_EXT_CAP_ID_PWR 4
500#define PCI_EXT_CAP_ID_ARI 14 503#define PCI_EXT_CAP_ID_ARI 14
504#define PCI_EXT_CAP_ID_SRIOV 16
501 505
502/* Advanced Error Reporting */ 506/* Advanced Error Reporting */
503#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ 507#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
@@ -615,4 +619,35 @@
615#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */ 619#define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */
616#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */ 620#define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */
617 621
622/* Single Root I/O Virtualization */
623#define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */
624#define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */
625#define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */
626#define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */
627#define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */
628#define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */
629#define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */
630#define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */
631#define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */
632#define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */
633#define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */
634#define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */
635#define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */
636#define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */
637#define PCI_SRIOV_FUNC_LINK 0x12 /* Function Dependency Link */
638#define PCI_SRIOV_VF_OFFSET 0x14 /* First VF Offset */
639#define PCI_SRIOV_VF_STRIDE 0x16 /* Following VF Stride */
640#define PCI_SRIOV_VF_DID 0x1a /* VF Device ID */
641#define PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */
642#define PCI_SRIOV_SYS_PGSIZE 0x20 /* System Page Size */
643#define PCI_SRIOV_BAR 0x24 /* VF BAR0 */
644#define PCI_SRIOV_NUM_BARS 6 /* Number of VF BARs */
645#define PCI_SRIOV_VFM 0x3c /* VF Migration State Array Offset*/
646#define PCI_SRIOV_VFM_BIR(x) ((x) & 7) /* State BIR */
647#define PCI_SRIOV_VFM_OFFSET(x) ((x) & ~7) /* State Offset */
648#define PCI_SRIOV_VFM_UA 0x0 /* Inactive.Unavailable */
649#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
650#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
651#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
652
618#endif /* LINUX_PCI_REGS_H */ 653#endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index 6cd91e3f9820..b4c79545330b 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -16,29 +16,30 @@
16#define PCIE_ANY_PORT 7 16#define PCIE_ANY_PORT 7
17 17
18/* Service Type */ 18/* Service Type */
19#define PCIE_PORT_SERVICE_PME 1 /* Power Management Event */ 19#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
20#define PCIE_PORT_SERVICE_AER 2 /* Advanced Error Reporting */ 20#define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT)
21#define PCIE_PORT_SERVICE_HP 4 /* Native Hotplug */ 21#define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */
22#define PCIE_PORT_SERVICE_VC 8 /* Virtual Channel */ 22#define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT)
23#define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */
24#define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT)
25#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
26#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
23 27
24/* Root/Upstream/Downstream Port's Interrupt Mode */ 28/* Root/Upstream/Downstream Port's Interrupt Mode */
29#define PCIE_PORT_NO_IRQ (-1)
25#define PCIE_PORT_INTx_MODE 0 30#define PCIE_PORT_INTx_MODE 0
26#define PCIE_PORT_MSI_MODE 1 31#define PCIE_PORT_MSI_MODE 1
27#define PCIE_PORT_MSIX_MODE 2 32#define PCIE_PORT_MSIX_MODE 2
28 33
29struct pcie_port_service_id { 34struct pcie_port_data {
30 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ 35 int port_type; /* Type of the port */
31 __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ 36 int port_irq_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
32 __u32 class, class_mask; /* (class,subclass,prog-if) triplet */
33 __u32 port_type, service_type; /* Port Entity */
34 kernel_ulong_t driver_data;
35}; 37};
36 38
37struct pcie_device { 39struct pcie_device {
38 int irq; /* Service IRQ/MSI/MSI-X Vector */ 40 int irq; /* Service IRQ/MSI/MSI-X Vector */
39 int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */ 41 struct pci_dev *port; /* Root/Upstream/Downstream Port */
40 struct pcie_port_service_id id; /* Service ID */ 42 u32 service; /* Port service this device represents */
41 struct pci_dev *port; /* Root/Upstream/Downstream Port */
42 void *priv_data; /* Service Private Data */ 43 void *priv_data; /* Service Private Data */
43 struct device device; /* Generic Device Interface */ 44 struct device device; /* Generic Device Interface */
44}; 45};
@@ -56,10 +57,9 @@ static inline void* get_service_data(struct pcie_device *dev)
56 57
57struct pcie_port_service_driver { 58struct pcie_port_service_driver {
58 const char *name; 59 const char *name;
59 int (*probe) (struct pcie_device *dev, 60 int (*probe) (struct pcie_device *dev);
60 const struct pcie_port_service_id *id);
61 void (*remove) (struct pcie_device *dev); 61 void (*remove) (struct pcie_device *dev);
62 int (*suspend) (struct pcie_device *dev, pm_message_t state); 62 int (*suspend) (struct pcie_device *dev);
63 int (*resume) (struct pcie_device *dev); 63 int (*resume) (struct pcie_device *dev);
64 64
65 /* Service Error Recovery Handler */ 65 /* Service Error Recovery Handler */
@@ -68,7 +68,9 @@ struct pcie_port_service_driver {
68 /* Link Reset Capability - AER service driver specific */ 68 /* Link Reset Capability - AER service driver specific */
69 pci_ers_result_t (*reset_link) (struct pci_dev *dev); 69 pci_ers_result_t (*reset_link) (struct pci_dev *dev);
70 70
71 const struct pcie_port_service_id *id_table; 71 int port_type; /* Type of the port this driver can handle */
72 u32 service; /* Port service this device represents */
73
72 struct device_driver driver; 74 struct device_driver driver;
73}; 75};
74#define to_service_driver(d) \ 76#define to_service_driver(d) \
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 9f31683728fd..6729f7dcd60e 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -17,6 +17,9 @@
17 */ 17 */
18#define TIMER_ENTRY_STATIC ((void *) 0x74737461) 18#define TIMER_ENTRY_STATIC ((void *) 0x74737461)
19 19
20/********** mm/debug-pagealloc.c **********/
21#define PAGE_POISON 0xaa
22
20/********** mm/slab.c **********/ 23/********** mm/slab.c **********/
21/* 24/*
22 * Magic nums for obj red zoning. 25 * Magic nums for obj red zoning.
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 8ff25e0e7f7a..594c494ac3f0 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -73,6 +73,8 @@ enum power_supply_property {
73 POWER_SUPPLY_PROP_VOLTAGE_AVG, 73 POWER_SUPPLY_PROP_VOLTAGE_AVG,
74 POWER_SUPPLY_PROP_CURRENT_NOW, 74 POWER_SUPPLY_PROP_CURRENT_NOW,
75 POWER_SUPPLY_PROP_CURRENT_AVG, 75 POWER_SUPPLY_PROP_CURRENT_AVG,
76 POWER_SUPPLY_PROP_POWER_NOW,
77 POWER_SUPPLY_PROP_POWER_AVG,
76 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 78 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
77 POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN, 79 POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN,
78 POWER_SUPPLY_PROP_CHARGE_FULL, 80 POWER_SUPPLY_PROP_CHARGE_FULL,
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 98b93ca4db06..67c15653fc23 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -94,6 +94,7 @@ extern void ptrace_notify(int exit_code);
94extern void __ptrace_link(struct task_struct *child, 94extern void __ptrace_link(struct task_struct *child,
95 struct task_struct *new_parent); 95 struct task_struct *new_parent);
96extern void __ptrace_unlink(struct task_struct *child); 96extern void __ptrace_unlink(struct task_struct *child);
97extern void exit_ptrace(struct task_struct *tracer);
97extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); 98extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
98#define PTRACE_MODE_READ 1 99#define PTRACE_MODE_READ 1
99#define PTRACE_MODE_ATTACH 2 100#define PTRACE_MODE_ATTACH 2
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 3945f803d514..7c775751392c 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -28,4 +28,4 @@ int pwm_enable(struct pwm_device *pwm);
28 */ 28 */
29void pwm_disable(struct pwm_device *pwm); 29void pwm_disable(struct pwm_device *pwm);
30 30
31#endif /* __ASM_ARCH_PWM_H */ 31#endif /* __LINUX_PWM_H */
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
deleted file mode 100644
index e98900671ca9..000000000000
--- a/include/linux/raid/bitmap.h
+++ /dev/null
@@ -1,288 +0,0 @@
1/*
2 * bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
3 *
4 * additions: Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
5 */
6#ifndef BITMAP_H
7#define BITMAP_H 1
8
9#define BITMAP_MAJOR_LO 3
10/* version 4 insists the bitmap is in little-endian order
11 * with version 3, it is host-endian which is non-portable
12 */
13#define BITMAP_MAJOR_HI 4
14#define BITMAP_MAJOR_HOSTENDIAN 3
15
16#define BITMAP_MINOR 39
17
18/*
19 * in-memory bitmap:
20 *
21 * Use 16 bit block counters to track pending writes to each "chunk".
22 * The 2 high order bits are special-purpose, the first is a flag indicating
23 * whether a resync is needed. The second is a flag indicating whether a
24 * resync is active.
25 * This means that the counter is actually 14 bits:
26 *
27 * +--------+--------+------------------------------------------------+
28 * | resync | resync | counter |
29 * | needed | active | |
30 * | (0-1) | (0-1) | (0-16383) |
31 * +--------+--------+------------------------------------------------+
32 *
33 * The "resync needed" bit is set when:
34 * a '1' bit is read from storage at startup.
35 * a write request fails on some drives
36 * a resync is aborted on a chunk with 'resync active' set
37 * It is cleared (and resync-active set) when a resync starts across all drives
38 * of the chunk.
39 *
40 *
41 * The "resync active" bit is set when:
42 * a resync is started on all drives, and resync_needed is set.
43 * resync_needed will be cleared (as long as resync_active wasn't already set).
44 * It is cleared when a resync completes.
45 *
46 * The counter counts pending write requests, plus the on-disk bit.
47 * When the counter is '1' and the resync bits are clear, the on-disk
48 * bit can be cleared aswell, thus setting the counter to 0.
49 * When we set a bit, or in the counter (to start a write), if the fields is
50 * 0, we first set the disk bit and set the counter to 1.
51 *
52 * If the counter is 0, the on-disk bit is clear and the stipe is clean
53 * Anything that dirties the stipe pushes the counter to 2 (at least)
54 * and sets the on-disk bit (lazily).
55 * If a periodic sweep find the counter at 2, it is decremented to 1.
56 * If the sweep find the counter at 1, the on-disk bit is cleared and the
57 * counter goes to zero.
58 *
59 * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block
60 * counters as a fallback when "page" memory cannot be allocated:
61 *
62 * Normal case (page memory allocated):
63 *
64 * page pointer (32-bit)
65 *
66 * [ ] ------+
67 * |
68 * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters)
69 * c1 c2 c2048
70 *
71 * Hijacked case (page memory allocation failed):
72 *
73 * hijacked page pointer (32-bit)
74 *
75 * [ ][ ] (no page memory allocated)
76 * counter #1 (16-bit) counter #2 (16-bit)
77 *
78 */
79
80#ifdef __KERNEL__
81
82#define PAGE_BITS (PAGE_SIZE << 3)
83#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3)
84
85typedef __u16 bitmap_counter_t;
86#define COUNTER_BITS 16
87#define COUNTER_BIT_SHIFT 4
88#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8)
89#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
90
91#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
92#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2)))
93#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1)
94#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK)
95#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK)
96#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX)
97
98/* how many counters per page? */
99#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS)
100/* same, except a shift value for more efficient bitops */
101#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT)
102/* same, except a mask value for more efficient bitops */
103#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1)
104
105#define BITMAP_BLOCK_SIZE 512
106#define BITMAP_BLOCK_SHIFT 9
107
108/* how many blocks per chunk? (this is variable) */
109#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT)
110#define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT)
111#define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1)
112
113/* when hijacked, the counters and bits represent even larger "chunks" */
114/* there will be 1024 chunks represented by each counter in the page pointers */
115#define PAGEPTR_BLOCK_RATIO(bitmap) \
116 (CHUNK_BLOCK_RATIO(bitmap) << PAGE_COUNTER_SHIFT >> 1)
117#define PAGEPTR_BLOCK_SHIFT(bitmap) \
118 (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1)
119#define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1)
120
121/*
122 * on-disk bitmap:
123 *
124 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
125 * file a page at a time. There's a superblock at the start of the file.
126 */
127
128/* map chunks (bits) to file pages - offset by the size of the superblock */
129#define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3))
130
131#endif
132
133/*
134 * bitmap structures:
135 */
136
137#define BITMAP_MAGIC 0x6d746962
138
139/* use these for bitmap->flags and bitmap->sb->state bit-fields */
140enum bitmap_state {
141 BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */
142 BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */
143 BITMAP_HOSTENDIAN = 0x8000,
144};
145
146/* the superblock at the front of the bitmap file -- little endian */
147typedef struct bitmap_super_s {
148 __le32 magic; /* 0 BITMAP_MAGIC */
149 __le32 version; /* 4 the bitmap major for now, could change... */
150 __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */
151 __le64 events; /* 24 event counter for the bitmap (1)*/
152 __le64 events_cleared;/*32 event counter when last bit cleared (2) */
153 __le64 sync_size; /* 40 the size of the md device's sync range(3) */
154 __le32 state; /* 48 bitmap state information */
155 __le32 chunksize; /* 52 the bitmap chunk size in bytes */
156 __le32 daemon_sleep; /* 56 seconds between disk flushes */
157 __le32 write_behind; /* 60 number of outstanding write-behind writes */
158
159 __u8 pad[256 - 64]; /* set to zero */
160} bitmap_super_t;
161
162/* notes:
163 * (1) This event counter is updated before the eventcounter in the md superblock
164 * When a bitmap is loaded, it is only accepted if this event counter is equal
165 * to, or one greater than, the event counter in the superblock.
166 * (2) This event counter is updated when the other one is *if*and*only*if* the
167 * array is not degraded. As bits are not cleared when the array is degraded,
168 * this represents the last time that any bits were cleared.
169 * If a device is being added that has an event count with this value or
170 * higher, it is accepted as conforming to the bitmap.
171 * (3)This is the number of sectors represented by the bitmap, and is the range that
172 * resync happens across. For raid1 and raid5/6 it is the size of individual
173 * devices. For raid10 it is the size of the array.
174 */
175
176#ifdef __KERNEL__
177
178/* the in-memory bitmap is represented by bitmap_pages */
179struct bitmap_page {
180 /*
181 * map points to the actual memory page
182 */
183 char *map;
184 /*
185 * in emergencies (when map cannot be alloced), hijack the map
186 * pointer and use it as two counters itself
187 */
188 unsigned int hijacked:1;
189 /*
190 * count of dirty bits on the page
191 */
192 unsigned int count:31;
193};
194
195/* keep track of bitmap file pages that have pending writes on them */
196struct page_list {
197 struct list_head list;
198 struct page *page;
199};
200
201/* the main bitmap structure - one per mddev */
202struct bitmap {
203 struct bitmap_page *bp;
204 unsigned long pages; /* total number of pages in the bitmap */
205 unsigned long missing_pages; /* number of pages not yet allocated */
206
207 mddev_t *mddev; /* the md device that the bitmap is for */
208
209 int counter_bits; /* how many bits per block counter */
210
211 /* bitmap chunksize -- how much data does each bit represent? */
212 unsigned long chunksize;
213 unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
214 unsigned long chunks; /* total number of data chunks for the array */
215
216 /* We hold a count on the chunk currently being synced, and drop
217 * it when the last block is started. If the resync is aborted
218 * midway, we need to be able to drop that count, so we remember
219 * the counted chunk..
220 */
221 unsigned long syncchunk;
222
223 __u64 events_cleared;
224 int need_sync;
225
226 /* bitmap spinlock */
227 spinlock_t lock;
228
229 long offset; /* offset from superblock if file is NULL */
230 struct file *file; /* backing disk file */
231 struct page *sb_page; /* cached copy of the bitmap file superblock */
232 struct page **filemap; /* list of cache pages for the file */
233 unsigned long *filemap_attr; /* attributes associated w/ filemap pages */
234 unsigned long file_pages; /* number of pages in the file */
235 int last_page_size; /* bytes in the last page */
236
237 unsigned long flags;
238
239 int allclean;
240
241 unsigned long max_write_behind; /* write-behind mode */
242 atomic_t behind_writes;
243
244 /*
245 * the bitmap daemon - periodically wakes up and sweeps the bitmap
246 * file, cleaning up bits and flushing out pages to disk as necessary
247 */
248 unsigned long daemon_lastrun; /* jiffies of last run */
249 unsigned long daemon_sleep; /* how many seconds between updates? */
250 unsigned long last_end_sync; /* when we lasted called end_sync to
251 * update bitmap with resync progress */
252
253 atomic_t pending_writes; /* pending writes to the bitmap file */
254 wait_queue_head_t write_wait;
255 wait_queue_head_t overflow_wait;
256
257};
258
259/* the bitmap API */
260
261/* these are used only by md/bitmap */
262int bitmap_create(mddev_t *mddev);
263void bitmap_flush(mddev_t *mddev);
264void bitmap_destroy(mddev_t *mddev);
265
266void bitmap_print_sb(struct bitmap *bitmap);
267void bitmap_update_sb(struct bitmap *bitmap);
268
269int bitmap_setallbits(struct bitmap *bitmap);
270void bitmap_write_all(struct bitmap *bitmap);
271
272void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
273
274/* these are exported */
275int bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
276 unsigned long sectors, int behind);
277void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
278 unsigned long sectors, int success, int behind);
279int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded);
280void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted);
281void bitmap_close_sync(struct bitmap *bitmap);
282void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
283
284void bitmap_unplug(struct bitmap *bitmap);
285void bitmap_daemon_work(struct bitmap *bitmap);
286#endif
287
288#endif
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h
deleted file mode 100644
index f38b9c586afb..000000000000
--- a/include/linux/raid/linear.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _LINEAR_H
2#define _LINEAR_H
3
4#include <linux/raid/md.h>
5
6struct dev_info {
7 mdk_rdev_t *rdev;
8 sector_t num_sectors;
9 sector_t start_sector;
10};
11
12typedef struct dev_info dev_info_t;
13
14struct linear_private_data
15{
16 struct linear_private_data *prev; /* earlier version */
17 dev_info_t **hash_table;
18 sector_t spacing;
19 sector_t array_sectors;
20 int sector_shift; /* shift before dividing
21 * by spacing
22 */
23 dev_info_t disks[0];
24};
25
26
27typedef struct linear_private_data linear_conf_t;
28
29#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private)
30
31#endif
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
deleted file mode 100644
index 82bea14cae1a..000000000000
--- a/include/linux/raid/md.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 md.h : Multiple Devices driver for Linux
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4 Copyright (C) 1994-96 Marc ZYNGIER
5 <zyngier@ufr-info-p7.ibp.fr> or
6 <maz@gloups.fdn.fr>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 You should have received a copy of the GNU General Public License
14 (for example /usr/src/linux/COPYING); if not, write to the Free
15 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16*/
17
18#ifndef _MD_H
19#define _MD_H
20
21#include <linux/blkdev.h>
22#include <linux/seq_file.h>
23
24/*
25 * 'md_p.h' holds the 'physical' layout of RAID devices
26 * 'md_u.h' holds the user <=> kernel API
27 *
28 * 'md_k.h' holds kernel internal definitions
29 */
30
31#include <linux/raid/md_p.h>
32#include <linux/raid/md_u.h>
33#include <linux/raid/md_k.h>
34
35#ifdef CONFIG_MD
36
37/*
38 * Different major versions are not compatible.
39 * Different minor versions are only downward compatible.
40 * Different patchlevel versions are downward and upward compatible.
41 */
42#define MD_MAJOR_VERSION 0
43#define MD_MINOR_VERSION 90
44/*
45 * MD_PATCHLEVEL_VERSION indicates kernel functionality.
46 * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
47 * and major_version/minor_version accordingly
48 * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
49 * in the super status byte
50 * >=3 means that bitmap superblock version 4 is supported, which uses
51 * little-ending representation rather than host-endian
52 */
53#define MD_PATCHLEVEL_VERSION 3
54
55extern int mdp_major;
56
57extern int register_md_personality(struct mdk_personality *p);
58extern int unregister_md_personality(struct mdk_personality *p);
59extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
60 mddev_t *mddev, const char *name);
61extern void md_unregister_thread(mdk_thread_t *thread);
62extern void md_wakeup_thread(mdk_thread_t *thread);
63extern void md_check_recovery(mddev_t *mddev);
64extern void md_write_start(mddev_t *mddev, struct bio *bi);
65extern void md_write_end(mddev_t *mddev);
66extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
67extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
68
69extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
70 sector_t sector, int size, struct page *page);
71extern void md_super_wait(mddev_t *mddev);
72extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
73 struct page *page, int rw);
74extern void md_do_sync(mddev_t *mddev);
75extern void md_new_event(mddev_t *mddev);
76extern int md_allow_write(mddev_t *mddev);
77extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
78
79#endif /* CONFIG_MD */
80#endif
81
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
deleted file mode 100644
index 9743e4dbc918..000000000000
--- a/include/linux/raid/md_k.h
+++ /dev/null
@@ -1,402 +0,0 @@
1/*
2 md_k.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13*/
14
15#ifndef _MD_K_H
16#define _MD_K_H
17
18/* and dm-bio-list.h is not under include/linux because.... ??? */
19#include "../../../drivers/md/dm-bio-list.h"
20
21#ifdef CONFIG_BLOCK
22
23#define LEVEL_MULTIPATH (-4)
24#define LEVEL_LINEAR (-1)
25#define LEVEL_FAULTY (-5)
26
27/* we need a value for 'no level specified' and 0
28 * means 'raid0', so we need something else. This is
29 * for internal use only
30 */
31#define LEVEL_NONE (-1000000)
32
33#define MaxSector (~(sector_t)0)
34
35typedef struct mddev_s mddev_t;
36typedef struct mdk_rdev_s mdk_rdev_t;
37
38/*
39 * options passed in raidrun:
40 */
41
42/* Currently this must fit in an 'int' */
43#define MAX_CHUNK_SIZE (1<<30)
44
45/*
46 * MD's 'extended' device
47 */
48struct mdk_rdev_s
49{
50 struct list_head same_set; /* RAID devices within the same set */
51
52 sector_t size; /* Device size (in blocks) */
53 mddev_t *mddev; /* RAID array if running */
54 long last_events; /* IO event timestamp */
55
56 struct block_device *bdev; /* block device handle */
57
58 struct page *sb_page;
59 int sb_loaded;
60 __u64 sb_events;
61 sector_t data_offset; /* start of data in array */
62 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
63 int sb_size; /* bytes in the superblock */
64 int preferred_minor; /* autorun support */
65
66 struct kobject kobj;
67
68 /* A device can be in one of three states based on two flags:
69 * Not working: faulty==1 in_sync==0
70 * Fully working: faulty==0 in_sync==1
71 * Working, but not
72 * in sync with array
73 * faulty==0 in_sync==0
74 *
75 * It can never have faulty==1, in_sync==1
76 * This reduces the burden of testing multiple flags in many cases
77 */
78
79 unsigned long flags;
80#define Faulty 1 /* device is known to have a fault */
81#define In_sync 2 /* device is in_sync with rest of array */
82#define WriteMostly 4 /* Avoid reading if at all possible */
83#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */
84#define AllReserved 6 /* If whole device is reserved for
85 * one array */
86#define AutoDetected 7 /* added by auto-detect */
87#define Blocked 8 /* An error occured on an externally
88 * managed array, don't allow writes
89 * until it is cleared */
90#define StateChanged 9 /* Faulty or Blocked has changed during
91 * interrupt, so it needs to be
92 * notified by the thread */
93 wait_queue_head_t blocked_wait;
94
95 int desc_nr; /* descriptor index in the superblock */
96 int raid_disk; /* role of device in array */
97 int saved_raid_disk; /* role that device used to have in the
98 * array and could again if we did a partial
99 * resync from the bitmap
100 */
101 sector_t recovery_offset;/* If this device has been partially
102 * recovered, this is where we were
103 * up to.
104 */
105
106 atomic_t nr_pending; /* number of pending requests.
107 * only maintained for arrays that
108 * support hot removal
109 */
110 atomic_t read_errors; /* number of consecutive read errors that
111 * we have tried to ignore.
112 */
113 atomic_t corrected_errors; /* number of corrected read errors,
114 * for reporting to userspace and storing
115 * in superblock.
116 */
117 struct work_struct del_work; /* used for delayed sysfs removal */
118
119 struct sysfs_dirent *sysfs_state; /* handle for 'state'
120 * sysfs entry */
121};
122
123struct mddev_s
124{
125 void *private;
126 struct mdk_personality *pers;
127 dev_t unit;
128 int md_minor;
129 struct list_head disks;
130 unsigned long flags;
131#define MD_CHANGE_DEVS 0 /* Some device status has changed */
132#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
133#define MD_CHANGE_PENDING 2 /* superblock update in progress */
134
135 int ro;
136
137 struct gendisk *gendisk;
138
139 struct kobject kobj;
140 int hold_active;
141#define UNTIL_IOCTL 1
142#define UNTIL_STOP 2
143
144 /* Superblock information */
145 int major_version,
146 minor_version,
147 patch_version;
148 int persistent;
149 int external; /* metadata is
150 * managed externally */
151 char metadata_type[17]; /* externally set*/
152 int chunk_size;
153 time_t ctime, utime;
154 int level, layout;
155 char clevel[16];
156 int raid_disks;
157 int max_disks;
158 sector_t size; /* used size of component devices */
159 sector_t array_sectors; /* exported array size */
160 __u64 events;
161
162 char uuid[16];
163
164 /* If the array is being reshaped, we need to record the
165 * new shape and an indication of where we are up to.
166 * This is written to the superblock.
167 * If reshape_position is MaxSector, then no reshape is happening (yet).
168 */
169 sector_t reshape_position;
170 int delta_disks, new_level, new_layout, new_chunk;
171
172 struct mdk_thread_s *thread; /* management thread */
173 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
174 sector_t curr_resync; /* last block scheduled */
175 unsigned long resync_mark; /* a recent timestamp */
176 sector_t resync_mark_cnt;/* blocks written at resync_mark */
177 sector_t curr_mark_cnt; /* blocks scheduled now */
178
179 sector_t resync_max_sectors; /* may be set by personality */
180
181 sector_t resync_mismatches; /* count of sectors where
182 * parity/replica mismatch found
183 */
184
185 /* allow user-space to request suspension of IO to regions of the array */
186 sector_t suspend_lo;
187 sector_t suspend_hi;
188 /* if zero, use the system-wide default */
189 int sync_speed_min;
190 int sync_speed_max;
191
192 /* resync even though the same disks are shared among md-devices */
193 int parallel_resync;
194
195 int ok_start_degraded;
196 /* recovery/resync flags
197 * NEEDED: we might need to start a resync/recover
198 * RUNNING: a thread is running, or about to be started
199 * SYNC: actually doing a resync, not a recovery
200 * RECOVER: doing recovery, or need to try it.
201 * INTR: resync needs to be aborted for some reason
202 * DONE: thread is done and is waiting to be reaped
203 * REQUEST: user-space has requested a sync (used with SYNC)
204 * CHECK: user-space request for for check-only, no repair
205 * RESHAPE: A reshape is happening
206 *
207 * If neither SYNC or RESHAPE are set, then it is a recovery.
208 */
209#define MD_RECOVERY_RUNNING 0
210#define MD_RECOVERY_SYNC 1
211#define MD_RECOVERY_RECOVER 2
212#define MD_RECOVERY_INTR 3
213#define MD_RECOVERY_DONE 4
214#define MD_RECOVERY_NEEDED 5
215#define MD_RECOVERY_REQUESTED 6
216#define MD_RECOVERY_CHECK 7
217#define MD_RECOVERY_RESHAPE 8
218#define MD_RECOVERY_FROZEN 9
219
220 unsigned long recovery;
221 int recovery_disabled; /* if we detect that recovery
222 * will always fail, set this
223 * so we don't loop trying */
224
225 int in_sync; /* know to not need resync */
226 struct mutex reconfig_mutex;
227 atomic_t active; /* general refcount */
228 atomic_t openers; /* number of active opens */
229
230 int changed; /* true if we might need to reread partition info */
231 int degraded; /* whether md should consider
232 * adding a spare
233 */
234 int barriers_work; /* initialised to true, cleared as soon
235 * as a barrier request to slave
236 * fails. Only supported
237 */
238 struct bio *biolist; /* bios that need to be retried
239 * because BIO_RW_BARRIER is not supported
240 */
241
242 atomic_t recovery_active; /* blocks scheduled, but not written */
243 wait_queue_head_t recovery_wait;
244 sector_t recovery_cp;
245 sector_t resync_min; /* user requested sync
246 * starts here */
247 sector_t resync_max; /* resync should pause
248 * when it gets here */
249
250 struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
251 * file in sysfs.
252 */
253 struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
254
255 struct work_struct del_work; /* used for delayed sysfs removal */
256
257 spinlock_t write_lock;
258 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
259 atomic_t pending_writes; /* number of active superblock writes */
260
261 unsigned int safemode; /* if set, update "clean" superblock
262 * when no writes pending.
263 */
264 unsigned int safemode_delay;
265 struct timer_list safemode_timer;
266 atomic_t writes_pending;
267 struct request_queue *queue; /* for plugging ... */
268
269 atomic_t write_behind; /* outstanding async IO */
270 unsigned int max_write_behind; /* 0 = sync */
271
272 struct bitmap *bitmap; /* the bitmap for the device */
273 struct file *bitmap_file; /* the bitmap file */
274 long bitmap_offset; /* offset from superblock of
275 * start of bitmap. May be
276 * negative, but not '0'
277 */
278 long default_bitmap_offset; /* this is the offset to use when
279 * hot-adding a bitmap. It should
280 * eventually be settable by sysfs.
281 */
282
283 struct list_head all_mddevs;
284};
285
286
287static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
288{
289 int faulty = test_bit(Faulty, &rdev->flags);
290 if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
291 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
292}
293
294static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
295{
296 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
297}
298
299struct mdk_personality
300{
301 char *name;
302 int level;
303 struct list_head list;
304 struct module *owner;
305 int (*make_request)(struct request_queue *q, struct bio *bio);
306 int (*run)(mddev_t *mddev);
307 int (*stop)(mddev_t *mddev);
308 void (*status)(struct seq_file *seq, mddev_t *mddev);
309 /* error_handler must set ->faulty and clear ->in_sync
310 * if appropriate, and should abort recovery if needed
311 */
312 void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
313 int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
314 int (*hot_remove_disk) (mddev_t *mddev, int number);
315 int (*spare_active) (mddev_t *mddev);
316 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
317 int (*resize) (mddev_t *mddev, sector_t sectors);
318 int (*check_reshape) (mddev_t *mddev);
319 int (*start_reshape) (mddev_t *mddev);
320 int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
321 /* quiesce moves between quiescence states
322 * 0 - fully active
323 * 1 - no new requests allowed
324 * others - reserved
325 */
326 void (*quiesce) (mddev_t *mddev, int state);
327};
328
329
330struct md_sysfs_entry {
331 struct attribute attr;
332 ssize_t (*show)(mddev_t *, char *);
333 ssize_t (*store)(mddev_t *, const char *, size_t);
334};
335
336
337static inline char * mdname (mddev_t * mddev)
338{
339 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
340}
341
342/*
343 * iterates through some rdev ringlist. It's safe to remove the
344 * current 'rdev'. Dont touch 'tmp' though.
345 */
346#define rdev_for_each_list(rdev, tmp, head) \
347 list_for_each_entry_safe(rdev, tmp, head, same_set)
348
349/*
350 * iterates through the 'same array disks' ringlist
351 */
352#define rdev_for_each(rdev, tmp, mddev) \
353 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
354
355#define rdev_for_each_rcu(rdev, mddev) \
356 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
357
358typedef struct mdk_thread_s {
359 void (*run) (mddev_t *mddev);
360 mddev_t *mddev;
361 wait_queue_head_t wqueue;
362 unsigned long flags;
363 struct task_struct *tsk;
364 unsigned long timeout;
365} mdk_thread_t;
366
367#define THREAD_WAKEUP 0
368
369#define __wait_event_lock_irq(wq, condition, lock, cmd) \
370do { \
371 wait_queue_t __wait; \
372 init_waitqueue_entry(&__wait, current); \
373 \
374 add_wait_queue(&wq, &__wait); \
375 for (;;) { \
376 set_current_state(TASK_UNINTERRUPTIBLE); \
377 if (condition) \
378 break; \
379 spin_unlock_irq(&lock); \
380 cmd; \
381 schedule(); \
382 spin_lock_irq(&lock); \
383 } \
384 current->state = TASK_RUNNING; \
385 remove_wait_queue(&wq, &__wait); \
386} while (0)
387
388#define wait_event_lock_irq(wq, condition, lock, cmd) \
389do { \
390 if (condition) \
391 break; \
392 __wait_event_lock_irq(wq, condition, lock, cmd); \
393} while (0)
394
395static inline void safe_put_page(struct page *p)
396{
397 if (p) put_page(p);
398}
399
400#endif /* CONFIG_BLOCK */
401#endif
402
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h
index 7192035fc4b0..fb1abb3367e9 100644
--- a/include/linux/raid/md_u.h
+++ b/include/linux/raid/md_u.h
@@ -15,6 +15,24 @@
15#ifndef _MD_U_H 15#ifndef _MD_U_H
16#define _MD_U_H 16#define _MD_U_H
17 17
18/*
19 * Different major versions are not compatible.
20 * Different minor versions are only downward compatible.
21 * Different patchlevel versions are downward and upward compatible.
22 */
23#define MD_MAJOR_VERSION 0
24#define MD_MINOR_VERSION 90
25/*
26 * MD_PATCHLEVEL_VERSION indicates kernel functionality.
27 * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
28 * and major_version/minor_version accordingly
29 * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
30 * in the super status byte
31 * >=3 means that bitmap superblock version 4 is supported, which uses
32 * little-ending representation rather than host-endian
33 */
34#define MD_PATCHLEVEL_VERSION 3
35
18/* ioctls */ 36/* ioctls */
19 37
20/* status */ 38/* status */
@@ -46,6 +64,12 @@
46#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) 64#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
47#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) 65#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
48 66
67/* 63 partitions with the alternate major number (mdp) */
68#define MdpMinorShift 6
69#ifdef __KERNEL__
70extern int mdp_major;
71#endif
72
49typedef struct mdu_version_s { 73typedef struct mdu_version_s {
50 int major; 74 int major;
51 int minor; 75 int minor;
@@ -85,6 +109,17 @@ typedef struct mdu_array_info_s {
85 109
86} mdu_array_info_t; 110} mdu_array_info_t;
87 111
112/* non-obvious values for 'level' */
113#define LEVEL_MULTIPATH (-4)
114#define LEVEL_LINEAR (-1)
115#define LEVEL_FAULTY (-5)
116
117/* we need a value for 'no level specified' and 0
118 * means 'raid0', so we need something else. This is
119 * for internal use only
120 */
121#define LEVEL_NONE (-1000000)
122
88typedef struct mdu_disk_info_s { 123typedef struct mdu_disk_info_s {
89 /* 124 /*
90 * configuration/status of one particular disk 125 * configuration/status of one particular disk
diff --git a/include/linux/raid/multipath.h b/include/linux/raid/multipath.h
deleted file mode 100644
index 6f53fc177a47..000000000000
--- a/include/linux/raid/multipath.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef _MULTIPATH_H
2#define _MULTIPATH_H
3
4#include <linux/raid/md.h>
5
6struct multipath_info {
7 mdk_rdev_t *rdev;
8};
9
10struct multipath_private_data {
11 mddev_t *mddev;
12 struct multipath_info *multipaths;
13 int raid_disks;
14 int working_disks;
15 spinlock_t device_lock;
16 struct list_head retry_list;
17
18 mempool_t *pool;
19};
20
21typedef struct multipath_private_data multipath_conf_t;
22
23/*
24 * this is the only point in the RAID code where we violate
25 * C type safety. mddev->private is an 'opaque' pointer.
26 */
27#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private)
28
29/*
30 * this is our 'private' 'collective' MULTIPATH buffer head.
31 * it contains information about what kind of IO operations were started
32 * for this MULTIPATH operation, and about their status:
33 */
34
35struct multipath_bh {
36 mddev_t *mddev;
37 struct bio *master_bio;
38 struct bio bio;
39 int path;
40 struct list_head retry_list;
41};
42#endif
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
new file mode 100644
index 000000000000..d92480f8285c
--- /dev/null
+++ b/include/linux/raid/pq.h
@@ -0,0 +1,132 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2003 H. Peter Anvin - All Rights Reserved
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
8 * Boston MA 02111-1307, USA; either version 2 of the License, or
9 * (at your option) any later version; incorporated herein by reference.
10 *
11 * ----------------------------------------------------------------------- */
12
13#ifndef LINUX_RAID_RAID6_H
14#define LINUX_RAID_RAID6_H
15
16#ifdef __KERNEL__
17
18/* Set to 1 to use kernel-wide empty_zero_page */
19#define RAID6_USE_EMPTY_ZERO_PAGE 0
20#include <linux/blkdev.h>
21
22/* We need a pre-zeroed page... if we don't want to use the kernel-provided
23 one define it here */
24#if RAID6_USE_EMPTY_ZERO_PAGE
25# define raid6_empty_zero_page empty_zero_page
26#else
27extern const char raid6_empty_zero_page[PAGE_SIZE];
28#endif
29
30#else /* ! __KERNEL__ */
31/* Used for testing in user space */
32
33#include <errno.h>
34#include <inttypes.h>
35#include <limits.h>
36#include <stddef.h>
37#include <sys/mman.h>
38#include <sys/types.h>
39
40/* Not standard, but glibc defines it */
41#define BITS_PER_LONG __WORDSIZE
42
43typedef uint8_t u8;
44typedef uint16_t u16;
45typedef uint32_t u32;
46typedef uint64_t u64;
47
48#ifndef PAGE_SIZE
49# define PAGE_SIZE 4096
50#endif
51extern const char raid6_empty_zero_page[PAGE_SIZE];
52
53#define __init
54#define __exit
55#define __attribute_const__ __attribute__((const))
56#define noinline __attribute__((noinline))
57
58#define preempt_enable()
59#define preempt_disable()
60#define cpu_has_feature(x) 1
61#define enable_kernel_altivec()
62#define disable_kernel_altivec()
63
64#define EXPORT_SYMBOL(sym)
65#define MODULE_LICENSE(licence)
66#define subsys_initcall(x)
67#define module_exit(x)
68#endif /* __KERNEL__ */
69
70/* Routine choices */
71struct raid6_calls {
72 void (*gen_syndrome)(int, size_t, void **);
73 int (*valid)(void); /* Returns 1 if this routine set is usable */
74 const char *name; /* Name of this routine set */
75 int prefer; /* Has special performance attribute */
76};
77
78/* Selected algorithm */
79extern struct raid6_calls raid6_call;
80
81/* Algorithm list */
82extern const struct raid6_calls * const raid6_algos[];
83int raid6_select_algo(void);
84
85/* Return values from chk_syndrome */
86#define RAID6_OK 0
87#define RAID6_P_BAD 1
88#define RAID6_Q_BAD 2
89#define RAID6_PQ_BAD 3
90
91/* Galois field tables */
92extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
93extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
94extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
95extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
96
97/* Recovery routines */
98void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
99 void **ptrs);
100void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs);
101void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
102 void **ptrs);
103
104/* Some definitions to allow code to be compiled for testing in userspace */
105#ifndef __KERNEL__
106
107# define jiffies raid6_jiffies()
108# define printk printf
109# define GFP_KERNEL 0
110# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
111 PROT_READ|PROT_WRITE, \
112 MAP_PRIVATE|MAP_ANONYMOUS,\
113 0, 0))
114# define free_pages(x, y) munmap((void *)(x), (y)*PAGE_SIZE)
115
116static inline void cpu_relax(void)
117{
118 /* Nothing */
119}
120
121#undef HZ
122#define HZ 1000
123static inline uint32_t raid6_jiffies(void)
124{
125 struct timeval tv;
126 gettimeofday(&tv, NULL);
127 return tv.tv_sec*1000 + tv.tv_usec/1000;
128}
129
130#endif /* ! __KERNEL__ */
131
132#endif /* LINUX_RAID_RAID6_H */
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h
deleted file mode 100644
index fd42aa87c391..000000000000
--- a/include/linux/raid/raid0.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _RAID0_H
2#define _RAID0_H
3
4#include <linux/raid/md.h>
5
6struct strip_zone
7{
8 sector_t zone_start; /* Zone offset in md_dev (in sectors) */
9 sector_t dev_start; /* Zone offset in real dev (in sectors) */
10 sector_t sectors; /* Zone size in sectors */
11 int nb_dev; /* # of devices attached to the zone */
12 mdk_rdev_t **dev; /* Devices attached to the zone */
13};
14
15struct raid0_private_data
16{
17 struct strip_zone **hash_table; /* Table of indexes into strip_zone */
18 struct strip_zone *strip_zone;
19 mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
20 int nr_strip_zones;
21
22 sector_t spacing;
23 int sector_shift; /* shift this before divide by spacing */
24};
25
26typedef struct raid0_private_data raid0_conf_t;
27
28#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private)
29
30#endif
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h
deleted file mode 100644
index 0a9ba7c3302e..000000000000
--- a/include/linux/raid/raid1.h
+++ /dev/null
@@ -1,134 +0,0 @@
1#ifndef _RAID1_H
2#define _RAID1_H
3
4#include <linux/raid/md.h>
5
6typedef struct mirror_info mirror_info_t;
7
8struct mirror_info {
9 mdk_rdev_t *rdev;
10 sector_t head_position;
11};
12
13/*
14 * memory pools need a pointer to the mddev, so they can force an unplug
15 * when memory is tight, and a count of the number of drives that the
16 * pool was allocated for, so they know how much to allocate and free.
17 * mddev->raid_disks cannot be used, as it can change while a pool is active
18 * These two datums are stored in a kmalloced struct.
19 */
20
21struct pool_info {
22 mddev_t *mddev;
23 int raid_disks;
24};
25
26
27typedef struct r1bio_s r1bio_t;
28
29struct r1_private_data_s {
30 mddev_t *mddev;
31 mirror_info_t *mirrors;
32 int raid_disks;
33 int last_used;
34 sector_t next_seq_sect;
35 spinlock_t device_lock;
36
37 struct list_head retry_list;
38 /* queue pending writes and submit them on unplug */
39 struct bio_list pending_bio_list;
40 /* queue of writes that have been unplugged */
41 struct bio_list flushing_bio_list;
42
43 /* for use when syncing mirrors: */
44
45 spinlock_t resync_lock;
46 int nr_pending;
47 int nr_waiting;
48 int nr_queued;
49 int barrier;
50 sector_t next_resync;
51 int fullsync; /* set to 1 if a full sync is needed,
52 * (fresh device added).
53 * Cleared when a sync completes.
54 */
55
56 wait_queue_head_t wait_barrier;
57
58 struct pool_info *poolinfo;
59
60 struct page *tmppage;
61
62 mempool_t *r1bio_pool;
63 mempool_t *r1buf_pool;
64};
65
66typedef struct r1_private_data_s conf_t;
67
68/*
69 * this is the only point in the RAID code where we violate
70 * C type safety. mddev->private is an 'opaque' pointer.
71 */
72#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
73
74/*
75 * this is our 'private' RAID1 bio.
76 *
77 * it contains information about what kind of IO operations were started
78 * for this RAID1 operation, and about their status:
79 */
80
81struct r1bio_s {
82 atomic_t remaining; /* 'have we finished' count,
83 * used from IRQ handlers
84 */
85 atomic_t behind_remaining; /* number of write-behind ios remaining
86 * in this BehindIO request
87 */
88 sector_t sector;
89 int sectors;
90 unsigned long state;
91 mddev_t *mddev;
92 /*
93 * original bio going to /dev/mdx
94 */
95 struct bio *master_bio;
96 /*
97 * if the IO is in READ direction, then this is where we read
98 */
99 int read_disk;
100
101 struct list_head retry_list;
102 struct bitmap_update *bitmap_update;
103 /*
104 * if the IO is in WRITE direction, then multiple bios are used.
105 * We choose the number when they are allocated.
106 */
107 struct bio *bios[0];
108 /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
109};
110
111/* when we get a read error on a read-only array, we redirect to another
112 * device without failing the first device, or trying to over-write to
113 * correct the read error. To keep track of bad blocks on a per-bio
114 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
115 */
116#define IO_BLOCKED ((struct bio*)1)
117
118/* bits for r1bio.state */
119#define R1BIO_Uptodate 0
120#define R1BIO_IsSync 1
121#define R1BIO_Degraded 2
122#define R1BIO_BehindIO 3
123#define R1BIO_Barrier 4
124#define R1BIO_BarrierRetry 5
125/* For write-behind requests, we call bi_end_io when
126 * the last non-write-behind device completes, providing
127 * any write was successful. Otherwise we call when
128 * any write-behind write succeeds, otherwise we call
129 * with failure when last write completes (and all failed).
130 * Record that bi_end_io was called with this flag...
131 */
132#define R1BIO_Returned 6
133
134#endif
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h
deleted file mode 100644
index e9091cfeb286..000000000000
--- a/include/linux/raid/raid10.h
+++ /dev/null
@@ -1,123 +0,0 @@
1#ifndef _RAID10_H
2#define _RAID10_H
3
4#include <linux/raid/md.h>
5
6typedef struct mirror_info mirror_info_t;
7
8struct mirror_info {
9 mdk_rdev_t *rdev;
10 sector_t head_position;
11};
12
13typedef struct r10bio_s r10bio_t;
14
15struct r10_private_data_s {
16 mddev_t *mddev;
17 mirror_info_t *mirrors;
18 int raid_disks;
19 spinlock_t device_lock;
20
21 /* geometry */
22 int near_copies; /* number of copies layed out raid0 style */
23 int far_copies; /* number of copies layed out
24 * at large strides across drives
25 */
26 int far_offset; /* far_copies are offset by 1 stripe
27 * instead of many
28 */
29 int copies; /* near_copies * far_copies.
30 * must be <= raid_disks
31 */
32 sector_t stride; /* distance between far copies.
33 * This is size / far_copies unless
34 * far_offset, in which case it is
35 * 1 stripe.
36 */
37
38 int chunk_shift; /* shift from chunks to sectors */
39 sector_t chunk_mask;
40
41 struct list_head retry_list;
42 /* queue pending writes and submit them on unplug */
43 struct bio_list pending_bio_list;
44
45
46 spinlock_t resync_lock;
47 int nr_pending;
48 int nr_waiting;
49 int nr_queued;
50 int barrier;
51 sector_t next_resync;
52 int fullsync; /* set to 1 if a full sync is needed,
53 * (fresh device added).
54 * Cleared when a sync completes.
55 */
56
57 wait_queue_head_t wait_barrier;
58
59 mempool_t *r10bio_pool;
60 mempool_t *r10buf_pool;
61 struct page *tmppage;
62};
63
64typedef struct r10_private_data_s conf_t;
65
66/*
67 * this is the only point in the RAID code where we violate
68 * C type safety. mddev->private is an 'opaque' pointer.
69 */
70#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
71
72/*
73 * this is our 'private' RAID10 bio.
74 *
75 * it contains information about what kind of IO operations were started
76 * for this RAID10 operation, and about their status:
77 */
78
79struct r10bio_s {
80 atomic_t remaining; /* 'have we finished' count,
81 * used from IRQ handlers
82 */
83 sector_t sector; /* virtual sector number */
84 int sectors;
85 unsigned long state;
86 mddev_t *mddev;
87 /*
88 * original bio going to /dev/mdx
89 */
90 struct bio *master_bio;
91 /*
92 * if the IO is in READ direction, then this is where we read
93 */
94 int read_slot;
95
96 struct list_head retry_list;
97 /*
98 * if the IO is in WRITE direction, then multiple bios are used,
99 * one for each copy.
100 * When resyncing we also use one for each copy.
101 * When reconstructing, we use 2 bios, one for read, one for write.
102 * We choose the number when they are allocated.
103 */
104 struct {
105 struct bio *bio;
106 sector_t addr;
107 int devnum;
108 } devs[0];
109};
110
111/* when we get a read error on a read-only array, we redirect to another
112 * device without failing the first device, or trying to over-write to
113 * correct the read error. To keep track of bad blocks on a per-bio
114 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
115 */
116#define IO_BLOCKED ((struct bio*)1)
117
118/* bits for r10bio.state */
119#define R10BIO_Uptodate 0
120#define R10BIO_IsSync 1
121#define R10BIO_IsRecover 2
122#define R10BIO_Degraded 3
123#endif
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
deleted file mode 100644
index 3b2672792457..000000000000
--- a/include/linux/raid/raid5.h
+++ /dev/null
@@ -1,402 +0,0 @@
1#ifndef _RAID5_H
2#define _RAID5_H
3
4#include <linux/raid/md.h>
5#include <linux/raid/xor.h>
6
7/*
8 *
9 * Each stripe contains one buffer per disc. Each buffer can be in
10 * one of a number of states stored in "flags". Changes between
11 * these states happen *almost* exclusively under a per-stripe
12 * spinlock. Some very specific changes can happen in bi_end_io, and
13 * these are not protected by the spin lock.
14 *
15 * The flag bits that are used to represent these states are:
16 * R5_UPTODATE and R5_LOCKED
17 *
18 * State Empty == !UPTODATE, !LOCK
19 * We have no data, and there is no active request
20 * State Want == !UPTODATE, LOCK
21 * A read request is being submitted for this block
22 * State Dirty == UPTODATE, LOCK
23 * Some new data is in this buffer, and it is being written out
24 * State Clean == UPTODATE, !LOCK
25 * We have valid data which is the same as on disc
26 *
27 * The possible state transitions are:
28 *
29 * Empty -> Want - on read or write to get old data for parity calc
30 * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
31 * Empty -> Clean - on compute_block when computing a block for failed drive
32 * Want -> Empty - on failed read
33 * Want -> Clean - on successful completion of read request
34 * Dirty -> Clean - on successful completion of write request
35 * Dirty -> Clean - on failed write
36 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
37 *
38 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
39 * all happen in b_end_io at interrupt time.
40 * Each sets the Uptodate bit before releasing the Lock bit.
41 * This leaves one multi-stage transition:
42 * Want->Dirty->Clean
43 * This is safe because thinking that a Clean buffer is actually dirty
44 * will at worst delay some action, and the stripe will be scheduled
45 * for attention after the transition is complete.
46 *
47 * There is one possibility that is not covered by these states. That
48 * is if one drive has failed and there is a spare being rebuilt. We
49 * can't distinguish between a clean block that has been generated
50 * from parity calculations, and a clean block that has been
51 * successfully written to the spare ( or to parity when resyncing).
52 * To distingush these states we have a stripe bit STRIPE_INSYNC that
53 * is set whenever a write is scheduled to the spare, or to the parity
54 * disc if there is no spare. A sync request clears this bit, and
55 * when we find it set with no buffers locked, we know the sync is
56 * complete.
57 *
58 * Buffers for the md device that arrive via make_request are attached
59 * to the appropriate stripe in one of two lists linked on b_reqnext.
60 * One list (bh_read) for read requests, one (bh_write) for write.
61 * There should never be more than one buffer on the two lists
62 * together, but we are not guaranteed of that so we allow for more.
63 *
64 * If a buffer is on the read list when the associated cache buffer is
65 * Uptodate, the data is copied into the read buffer and it's b_end_io
66 * routine is called. This may happen in the end_request routine only
67 * if the buffer has just successfully been read. end_request should
68 * remove the buffers from the list and then set the Uptodate bit on
69 * the buffer. Other threads may do this only if they first check
70 * that the Uptodate bit is set. Once they have checked that they may
71 * take buffers off the read queue.
72 *
73 * When a buffer on the write list is committed for write it is copied
74 * into the cache buffer, which is then marked dirty, and moved onto a
75 * third list, the written list (bh_written). Once both the parity
76 * block and the cached buffer are successfully written, any buffer on
77 * a written list can be returned with b_end_io.
78 *
79 * The write list and read list both act as fifos. The read list is
80 * protected by the device_lock. The write and written lists are
81 * protected by the stripe lock. The device_lock, which can be
82 * claimed while the stipe lock is held, is only for list
83 * manipulations and will only be held for a very short time. It can
84 * be claimed from interrupts.
85 *
86 *
87 * Stripes in the stripe cache can be on one of two lists (or on
88 * neither). The "inactive_list" contains stripes which are not
89 * currently being used for any request. They can freely be reused
90 * for another stripe. The "handle_list" contains stripes that need
91 * to be handled in some way. Both of these are fifo queues. Each
92 * stripe is also (potentially) linked to a hash bucket in the hash
93 * table so that it can be found by sector number. Stripes that are
94 * not hashed must be on the inactive_list, and will normally be at
95 * the front. All stripes start life this way.
96 *
97 * The inactive_list, handle_list and hash bucket lists are all protected by the
98 * device_lock.
99 * - stripes on the inactive_list never have their stripe_lock held.
100 * - stripes have a reference counter. If count==0, they are on a list.
101 * - If a stripe might need handling, STRIPE_HANDLE is set.
102 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
103 * handle_list else inactive_list
104 *
105 * This, combined with the fact that STRIPE_HANDLE is only ever
106 * cleared while a stripe has a non-zero count means that if the
107 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
108 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
109 * the stripe is on inactive_list.
110 *
111 * The possible transitions are:
112 * activate an unhashed/inactive stripe (get_active_stripe())
113 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
114 * activate a hashed, possibly active stripe (get_active_stripe())
115 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
116 * attach a request to an active stripe (add_stripe_bh())
117 * lockdev attach-buffer unlockdev
118 * handle a stripe (handle_stripe())
119 * lockstripe clrSTRIPE_HANDLE ...
120 * (lockdev check-buffers unlockdev) ..
121 * change-state ..
122 * record io/ops needed unlockstripe schedule io/ops
123 * release an active stripe (release_stripe())
124 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
125 *
126 * The refcount counts each thread that have activated the stripe,
127 * plus raid5d if it is handling it, plus one for each active request
128 * on a cached buffer, and plus one if the stripe is undergoing stripe
129 * operations.
130 *
131 * Stripe operations are performed outside the stripe lock,
132 * the stripe operations are:
133 * -copying data between the stripe cache and user application buffers
134 * -computing blocks to save a disk access, or to recover a missing block
135 * -updating the parity on a write operation (reconstruct write and
136 * read-modify-write)
137 * -checking parity correctness
138 * -running i/o to disk
139 * These operations are carried out by raid5_run_ops which uses the async_tx
140 * api to (optionally) offload operations to dedicated hardware engines.
141 * When requesting an operation handle_stripe sets the pending bit for the
142 * operation and increments the count. raid5_run_ops is then run whenever
143 * the count is non-zero.
144 * There are some critical dependencies between the operations that prevent some
145 * from being requested while another is in flight.
146 * 1/ Parity check operations destroy the in cache version of the parity block,
147 * so we prevent parity dependent operations like writes and compute_blocks
148 * from starting while a check is in progress. Some dma engines can perform
149 * the check without damaging the parity block, in these cases the parity
150 * block is re-marked up to date (assuming the check was successful) and is
151 * not re-read from disk.
152 * 2/ When a write operation is requested we immediately lock the affected
153 * blocks, and mark them as not up to date. This causes new read requests
154 * to be held off, as well as parity checks and compute block operations.
155 * 3/ Once a compute block operation has been requested handle_stripe treats
156 * that block as if it is up to date. raid5_run_ops guaruntees that any
157 * operation that is dependent on the compute block result is initiated after
158 * the compute block completes.
159 */
160
161/*
162 * Operations state - intermediate states that are visible outside of sh->lock
163 * In general _idle indicates nothing is running, _run indicates a data
164 * processing operation is active, and _result means the data processing result
165 * is stable and can be acted upon. For simple operations like biofill and
166 * compute that only have an _idle and _run state they are indicated with
167 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
168 */
169/**
170 * enum check_states - handles syncing / repairing a stripe
171 * @check_state_idle - check operations are quiesced
172 * @check_state_run - check operation is running
173 * @check_state_result - set outside lock when check result is valid
174 * @check_state_compute_run - check failed and we are repairing
175 * @check_state_compute_result - set outside lock when compute result is valid
176 */
177enum check_states {
178 check_state_idle = 0,
179 check_state_run, /* parity check */
180 check_state_check_result,
181 check_state_compute_run, /* parity repair */
182 check_state_compute_result,
183};
184
185/**
186 * enum reconstruct_states - handles writing or expanding a stripe
187 */
188enum reconstruct_states {
189 reconstruct_state_idle = 0,
190 reconstruct_state_prexor_drain_run, /* prexor-write */
191 reconstruct_state_drain_run, /* write */
192 reconstruct_state_run, /* expand */
193 reconstruct_state_prexor_drain_result,
194 reconstruct_state_drain_result,
195 reconstruct_state_result,
196};
197
198struct stripe_head {
199 struct hlist_node hash;
200 struct list_head lru; /* inactive_list or handle_list */
201 struct raid5_private_data *raid_conf;
202 sector_t sector; /* sector of this row */
203 int pd_idx; /* parity disk index */
204 unsigned long state; /* state flags */
205 atomic_t count; /* nr of active thread/requests */
206 spinlock_t lock;
207 int bm_seq; /* sequence number for bitmap flushes */
208 int disks; /* disks in stripe */
209 enum check_states check_state;
210 enum reconstruct_states reconstruct_state;
211 /* stripe_operations
212 * @target - STRIPE_OP_COMPUTE_BLK target
213 */
214 struct stripe_operations {
215 int target;
216 u32 zero_sum_result;
217 } ops;
218 struct r5dev {
219 struct bio req;
220 struct bio_vec vec;
221 struct page *page;
222 struct bio *toread, *read, *towrite, *written;
223 sector_t sector; /* sector of this page */
224 unsigned long flags;
225 } dev[1]; /* allocated with extra space depending of RAID geometry */
226};
227
228/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
229 * for handle_stripe. It is only valid under spin_lock(sh->lock);
230 */
231struct stripe_head_state {
232 int syncing, expanding, expanded;
233 int locked, uptodate, to_read, to_write, failed, written;
234 int to_fill, compute, req_compute, non_overwrite;
235 int failed_num;
236 unsigned long ops_request;
237};
238
239/* r6_state - extra state data only relevant to r6 */
240struct r6_state {
241 int p_failed, q_failed, qd_idx, failed_num[2];
242};
243
244/* Flags */
245#define R5_UPTODATE 0 /* page contains current data */
246#define R5_LOCKED 1 /* IO has been submitted on "req" */
247#define R5_OVERWRITE 2 /* towrite covers whole page */
248/* and some that are internal to handle_stripe */
249#define R5_Insync 3 /* rdev && rdev->in_sync at start */
250#define R5_Wantread 4 /* want to schedule a read */
251#define R5_Wantwrite 5
252#define R5_Overlap 7 /* There is a pending overlapping request on this block */
253#define R5_ReadError 8 /* seen a read error here recently */
254#define R5_ReWrite 9 /* have tried to over-write the readerror */
255
256#define R5_Expanded 10 /* This block now has post-expand data */
257#define R5_Wantcompute 11 /* compute_block in progress treat as
258 * uptodate
259 */
260#define R5_Wantfill 12 /* dev->toread contains a bio that needs
261 * filling
262 */
263#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
264/*
265 * Write method
266 */
267#define RECONSTRUCT_WRITE 1
268#define READ_MODIFY_WRITE 2
269/* not a write method, but a compute_parity mode */
270#define CHECK_PARITY 3
271
272/*
273 * Stripe state
274 */
275#define STRIPE_HANDLE 2
276#define STRIPE_SYNCING 3
277#define STRIPE_INSYNC 4
278#define STRIPE_PREREAD_ACTIVE 5
279#define STRIPE_DELAYED 6
280#define STRIPE_DEGRADED 7
281#define STRIPE_BIT_DELAY 8
282#define STRIPE_EXPANDING 9
283#define STRIPE_EXPAND_SOURCE 10
284#define STRIPE_EXPAND_READY 11
285#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
286#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
287#define STRIPE_BIOFILL_RUN 14
288#define STRIPE_COMPUTE_RUN 15
289/*
290 * Operation request flags
291 */
292#define STRIPE_OP_BIOFILL 0
293#define STRIPE_OP_COMPUTE_BLK 1
294#define STRIPE_OP_PREXOR 2
295#define STRIPE_OP_BIODRAIN 3
296#define STRIPE_OP_POSTXOR 4
297#define STRIPE_OP_CHECK 5
298
299/*
300 * Plugging:
301 *
302 * To improve write throughput, we need to delay the handling of some
303 * stripes until there has been a chance that several write requests
304 * for the one stripe have all been collected.
305 * In particular, any write request that would require pre-reading
306 * is put on a "delayed" queue until there are no stripes currently
307 * in a pre-read phase. Further, if the "delayed" queue is empty when
308 * a stripe is put on it then we "plug" the queue and do not process it
309 * until an unplug call is made. (the unplug_io_fn() is called).
310 *
311 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
312 * it to the count of prereading stripes.
313 * When write is initiated, or the stripe refcnt == 0 (just in case) we
314 * clear the PREREAD_ACTIVE flag and decrement the count
315 * Whenever the 'handle' queue is empty and the device is not plugged, we
316 * move any strips from delayed to handle and clear the DELAYED flag and set
317 * PREREAD_ACTIVE.
318 * In stripe_handle, if we find pre-reading is necessary, we do it if
319 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
320 * HANDLE gets cleared if stripe_handle leave nothing locked.
321 */
322
323
324struct disk_info {
325 mdk_rdev_t *rdev;
326};
327
328struct raid5_private_data {
329 struct hlist_head *stripe_hashtbl;
330 mddev_t *mddev;
331 struct disk_info *spare;
332 int chunk_size, level, algorithm;
333 int max_degraded;
334 int raid_disks;
335 int max_nr_stripes;
336
337 /* used during an expand */
338 sector_t expand_progress; /* MaxSector when no expand happening */
339 sector_t expand_lo; /* from here up to expand_progress it out-of-bounds
340 * as we haven't flushed the metadata yet
341 */
342 int previous_raid_disks;
343
344 struct list_head handle_list; /* stripes needing handling */
345 struct list_head hold_list; /* preread ready stripes */
346 struct list_head delayed_list; /* stripes that have plugged requests */
347 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
348 struct bio *retry_read_aligned; /* currently retrying aligned bios */
349 struct bio *retry_read_aligned_list; /* aligned bios retry list */
350 atomic_t preread_active_stripes; /* stripes with scheduled io */
351 atomic_t active_aligned_reads;
352 atomic_t pending_full_writes; /* full write backlog */
353 int bypass_count; /* bypassed prereads */
354 int bypass_threshold; /* preread nice */
355 struct list_head *last_hold; /* detect hold_list promotions */
356
357 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
358 /* unfortunately we need two cache names as we temporarily have
359 * two caches.
360 */
361 int active_name;
362 char cache_name[2][20];
363 struct kmem_cache *slab_cache; /* for allocating stripes */
364
365 int seq_flush, seq_write;
366 int quiesce;
367
368 int fullsync; /* set to 1 if a full sync is needed,
369 * (fresh device added).
370 * Cleared when a sync completes.
371 */
372
373 struct page *spare_page; /* Used when checking P/Q in raid6 */
374
375 /*
376 * Free stripes pool
377 */
378 atomic_t active_stripes;
379 struct list_head inactive_list;
380 wait_queue_head_t wait_for_stripe;
381 wait_queue_head_t wait_for_overlap;
382 int inactive_blocked; /* release of inactive stripes blocked,
383 * waiting for 25% to be free
384 */
385 int pool_size; /* number of disks in stripeheads in pool */
386 spinlock_t device_lock;
387 struct disk_info *disks;
388};
389
390typedef struct raid5_private_data raid5_conf_t;
391
392#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private)
393
394/*
395 * Our supported algorithms
396 */
397#define ALGORITHM_LEFT_ASYMMETRIC 0
398#define ALGORITHM_RIGHT_ASYMMETRIC 1
399#define ALGORITHM_LEFT_SYMMETRIC 2
400#define ALGORITHM_RIGHT_SYMMETRIC 3
401
402#endif
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 3e120587eada..5a210959e3f8 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -1,8 +1,6 @@
1#ifndef _XOR_H 1#ifndef _XOR_H
2#define _XOR_H 2#define _XOR_H
3 3
4#include <linux/raid/md.h>
5
6#define MAX_XOR_BLOCKS 4 4#define MAX_XOR_BLOCKS 4
7 5
8extern void xor_blocks(unsigned int count, unsigned int bytes, 6extern void xor_blocks(unsigned int count, unsigned int bytes,
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 80044a4f3ab9..bfd92e1e5d2c 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -36,7 +36,6 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h>
40#include <linux/cpumask.h> 39#include <linux/cpumask.h>
41#include <linux/seqlock.h> 40#include <linux/seqlock.h>
42 41
@@ -108,25 +107,14 @@ struct rcu_data {
108 struct rcu_head barrier; 107 struct rcu_head barrier;
109}; 108};
110 109
111DECLARE_PER_CPU(struct rcu_data, rcu_data);
112DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
113
114/* 110/*
115 * Increment the quiescent state counter. 111 * Increment the quiescent state counter.
116 * The counter is a bit degenerated: We do not need to know 112 * The counter is a bit degenerated: We do not need to know
117 * how many quiescent states passed, just if there was at least 113 * how many quiescent states passed, just if there was at least
118 * one since the start of the grace period. Thus just a flag. 114 * one since the start of the grace period. Thus just a flag.
119 */ 115 */
120static inline void rcu_qsctr_inc(int cpu) 116extern void rcu_qsctr_inc(int cpu);
121{ 117extern void rcu_bh_qsctr_inc(int cpu);
122 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
123 rdp->passed_quiesc = 1;
124}
125static inline void rcu_bh_qsctr_inc(int cpu)
126{
127 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
128 rdp->passed_quiesc = 1;
129}
130 118
131extern int rcu_pending(int cpu); 119extern int rcu_pending(int cpu);
132extern int rcu_needs_cpu(int cpu); 120extern int rcu_needs_cpu(int cpu);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 528343e6da51..15fbb3ca634d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -36,7 +36,6 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h>
40#include <linux/cpumask.h> 39#include <linux/cpumask.h>
41#include <linux/seqlock.h> 40#include <linux/seqlock.h>
42#include <linux/lockdep.h> 41#include <linux/lockdep.h>
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 74304b4538d8..fce522782ffa 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -36,34 +36,19 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h> 39#include <linux/smp.h>
40#include <linux/cpumask.h> 40#include <linux/cpumask.h>
41#include <linux/seqlock.h> 41#include <linux/seqlock.h>
42 42
43struct rcu_dyntick_sched { 43extern void rcu_qsctr_inc(int cpu);
44 int dynticks; 44static inline void rcu_bh_qsctr_inc(int cpu) { }
45 int dynticks_snap;
46 int sched_qs;
47 int sched_qs_snap;
48 int sched_dynticks_snap;
49};
50
51DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
52
53static inline void rcu_qsctr_inc(int cpu)
54{
55 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
56
57 rdssp->sched_qs++;
58}
59#define rcu_bh_qsctr_inc(cpu)
60 45
61/* 46/*
62 * Someone might want to pass call_rcu_bh as a function pointer. 47 * Someone might want to pass call_rcu_bh as a function pointer.
63 * So this needs to just be a rename and not a macro function. 48 * So this needs to just be a rename and not a macro function.
64 * (no parentheses) 49 * (no parentheses)
65 */ 50 */
66#define call_rcu_bh call_rcu 51#define call_rcu_bh call_rcu
67 52
68/** 53/**
69 * call_rcu_sched - Queue RCU callback for invocation after sched grace period. 54 * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
@@ -117,30 +102,12 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
117struct softirq_action; 102struct softirq_action;
118 103
119#ifdef CONFIG_NO_HZ 104#ifdef CONFIG_NO_HZ
120 105extern void rcu_enter_nohz(void);
121static inline void rcu_enter_nohz(void) 106extern void rcu_exit_nohz(void);
122{ 107#else
123 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); 108# define rcu_enter_nohz() do { } while (0)
124 109# define rcu_exit_nohz() do { } while (0)
125 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 110#endif
126 __get_cpu_var(rcu_dyntick_sched).dynticks++;
127 WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
128}
129
130static inline void rcu_exit_nohz(void)
131{
132 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
133
134 __get_cpu_var(rcu_dyntick_sched).dynticks++;
135 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
136 WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
137 &rs);
138}
139
140#else /* CONFIG_NO_HZ */
141#define rcu_enter_nohz() do { } while (0)
142#define rcu_exit_nohz() do { } while (0)
143#endif /* CONFIG_NO_HZ */
144 111
145/* 112/*
146 * A context switch is a grace period for rcupreempt synchronize_rcu() 113 * A context switch is a grace period for rcupreempt synchronize_rcu()
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index a722fb67bb2d..0cdda00f2b2a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -33,7 +33,6 @@
33#include <linux/cache.h> 33#include <linux/cache.h>
34#include <linux/spinlock.h> 34#include <linux/spinlock.h>
35#include <linux/threads.h> 35#include <linux/threads.h>
36#include <linux/percpu.h>
37#include <linux/cpumask.h> 36#include <linux/cpumask.h>
38#include <linux/seqlock.h> 37#include <linux/seqlock.h>
39 38
@@ -236,30 +235,8 @@ struct rcu_state {
236#endif /* #ifdef CONFIG_NO_HZ */ 235#endif /* #ifdef CONFIG_NO_HZ */
237}; 236};
238 237
239extern struct rcu_state rcu_state; 238extern void rcu_qsctr_inc(int cpu);
240DECLARE_PER_CPU(struct rcu_data, rcu_data); 239extern void rcu_bh_qsctr_inc(int cpu);
241
242extern struct rcu_state rcu_bh_state;
243DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
244
245/*
246 * Increment the quiescent state counter.
247 * The counter is a bit degenerated: We do not need to know
248 * how many quiescent states passed, just if there was at least
249 * one since the start of the grace period. Thus just a flag.
250 */
251static inline void rcu_qsctr_inc(int cpu)
252{
253 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
254 rdp->passed_quiesc = 1;
255 rdp->passed_quiesc_completed = rdp->completed;
256}
257static inline void rcu_bh_qsctr_inc(int cpu)
258{
259 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
260 rdp->passed_quiesc = 1;
261 rdp->passed_quiesc_completed = rdp->completed;
262}
263 240
264extern int rcu_pending(int cpu); 241extern int rcu_pending(int cpu);
265extern int rcu_needs_cpu(int cpu); 242extern int rcu_needs_cpu(int cpu);
diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h
index e84b0a9feda5..a6d014005d49 100644
--- a/include/linux/regulator/bq24022.h
+++ b/include/linux/regulator/bq24022.h
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13struct regulator_init_data;
14
13/** 15/**
14 * bq24022_mach_info - platform data for bq24022 16 * bq24022_mach_info - platform data for bq24022
15 * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging 17 * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging
@@ -18,4 +20,5 @@
18struct bq24022_mach_info { 20struct bq24022_mach_info {
19 int gpio_nce; 21 int gpio_nce;
20 int gpio_iset2; 22 int gpio_iset2;
23 struct regulator_init_data *init_data;
21}; 24};
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 801bf77ff4e2..277f4b964df5 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -88,6 +88,7 @@
88 * FAIL Regulator output has failed. 88 * FAIL Regulator output has failed.
89 * OVER_TEMP Regulator over temp. 89 * OVER_TEMP Regulator over temp.
90 * FORCE_DISABLE Regulator shut down by software. 90 * FORCE_DISABLE Regulator shut down by software.
91 * VOLTAGE_CHANGE Regulator voltage changed.
91 * 92 *
92 * NOTE: These events can be OR'ed together when passed into handler. 93 * NOTE: These events can be OR'ed together when passed into handler.
93 */ 94 */
@@ -98,6 +99,7 @@
98#define REGULATOR_EVENT_FAIL 0x08 99#define REGULATOR_EVENT_FAIL 0x08
99#define REGULATOR_EVENT_OVER_TEMP 0x10 100#define REGULATOR_EVENT_OVER_TEMP 0x10
100#define REGULATOR_EVENT_FORCE_DISABLE 0x20 101#define REGULATOR_EVENT_FORCE_DISABLE 0x20
102#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
101 103
102struct regulator; 104struct regulator;
103 105
@@ -140,6 +142,8 @@ int regulator_bulk_disable(int num_consumers,
140void regulator_bulk_free(int num_consumers, 142void regulator_bulk_free(int num_consumers,
141 struct regulator_bulk_data *consumers); 143 struct regulator_bulk_data *consumers);
142 144
145int regulator_count_voltages(struct regulator *regulator);
146int regulator_list_voltage(struct regulator *regulator, unsigned selector);
143int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); 147int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV);
144int regulator_get_voltage(struct regulator *regulator); 148int regulator_get_voltage(struct regulator *regulator);
145int regulator_set_current_limit(struct regulator *regulator, 149int regulator_set_current_limit(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 2dae05705f13..4848d8dacd90 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -21,25 +21,38 @@
21struct regulator_dev; 21struct regulator_dev;
22struct regulator_init_data; 22struct regulator_init_data;
23 23
24enum regulator_status {
25 REGULATOR_STATUS_OFF,
26 REGULATOR_STATUS_ON,
27 REGULATOR_STATUS_ERROR,
28 /* fast/normal/idle/standby are flavors of "on" */
29 REGULATOR_STATUS_FAST,
30 REGULATOR_STATUS_NORMAL,
31 REGULATOR_STATUS_IDLE,
32 REGULATOR_STATUS_STANDBY,
33};
34
24/** 35/**
25 * struct regulator_ops - regulator operations. 36 * struct regulator_ops - regulator operations.
26 * 37 *
27 * This struct describes regulator operations which can be implemented by 38 * @enable: Configure the regulator as enabled.
28 * regulator chip drivers. 39 * @disable: Configure the regulator as disabled.
29 *
30 * @enable: Enable the regulator.
31 * @disable: Disable the regulator.
32 * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. 40 * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise.
33 * 41 *
34 * @set_voltage: Set the voltage for the regulator within the range specified. 42 * @set_voltage: Set the voltage for the regulator within the range specified.
35 * The driver should select the voltage closest to min_uV. 43 * The driver should select the voltage closest to min_uV.
36 * @get_voltage: Return the currently configured voltage for the regulator. 44 * @get_voltage: Return the currently configured voltage for the regulator.
45 * @list_voltage: Return one of the supported voltages, in microvolts; zero
46 * if the selector indicates a voltage that is unusable on this system;
47 * or negative errno. Selectors range from zero to one less than
48 * regulator_desc.n_voltages. Voltages may be reported in any order.
37 * 49 *
38 * @set_current_limit: Configure a limit for a current-limited regulator. 50 * @set_current_limit: Configure a limit for a current-limited regulator.
39 * @get_current_limit: Get the limit for a current-limited regulator. 51 * @get_current_limit: Get the configured limit for a current-limited regulator.
40 * 52 *
41 * @set_mode: Set the operating mode for the regulator. 53 * @get_mode: Get the configured operating mode for the regulator.
42 * @get_mode: Get the current operating mode for the regulator. 54 * @get_status: Return actual (not as-configured) status of regulator, as a
55 * REGULATOR_STATUS value (or negative errno)
43 * @get_optimum_mode: Get the most efficient operating mode for the regulator 56 * @get_optimum_mode: Get the most efficient operating mode for the regulator
44 * when running with the specified parameters. 57 * when running with the specified parameters.
45 * 58 *
@@ -51,9 +64,15 @@ struct regulator_init_data;
51 * suspended. 64 * suspended.
52 * @set_suspend_mode: Set the operating mode for the regulator when the 65 * @set_suspend_mode: Set the operating mode for the regulator when the
53 * system is suspended. 66 * system is suspended.
67 *
68 * This struct describes regulator operations which can be implemented by
69 * regulator chip drivers.
54 */ 70 */
55struct regulator_ops { 71struct regulator_ops {
56 72
73 /* enumerate supported voltages */
74 int (*list_voltage) (struct regulator_dev *, unsigned selector);
75
57 /* get/set regulator voltage */ 76 /* get/set regulator voltage */
58 int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); 77 int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV);
59 int (*get_voltage) (struct regulator_dev *); 78 int (*get_voltage) (struct regulator_dev *);
@@ -72,6 +91,13 @@ struct regulator_ops {
72 int (*set_mode) (struct regulator_dev *, unsigned int mode); 91 int (*set_mode) (struct regulator_dev *, unsigned int mode);
73 unsigned int (*get_mode) (struct regulator_dev *); 92 unsigned int (*get_mode) (struct regulator_dev *);
74 93
94 /* report regulator status ... most other accessors report
95 * control inputs, this reports results of combining inputs
96 * from Linux (and other sources) with the actual load.
97 * returns REGULATOR_STATUS_* or negative errno.
98 */
99 int (*get_status)(struct regulator_dev *);
100
75 /* get most efficient regulator operating mode for load */ 101 /* get most efficient regulator operating mode for load */
76 unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, 102 unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV,
77 int output_uV, int load_uA); 103 int output_uV, int load_uA);
@@ -106,6 +132,7 @@ enum regulator_type {
106 * 132 *
107 * @name: Identifying name for the regulator. 133 * @name: Identifying name for the regulator.
108 * @id: Numerical identifier for the regulator. 134 * @id: Numerical identifier for the regulator.
135 * @n_voltages: Number of selectors available for ops.list_voltage().
109 * @ops: Regulator operations table. 136 * @ops: Regulator operations table.
110 * @irq: Interrupt number for the regulator. 137 * @irq: Interrupt number for the regulator.
111 * @type: Indicates if the regulator is a voltage or current regulator. 138 * @type: Indicates if the regulator is a voltage or current regulator.
@@ -114,14 +141,48 @@ enum regulator_type {
114struct regulator_desc { 141struct regulator_desc {
115 const char *name; 142 const char *name;
116 int id; 143 int id;
144 unsigned n_voltages;
117 struct regulator_ops *ops; 145 struct regulator_ops *ops;
118 int irq; 146 int irq;
119 enum regulator_type type; 147 enum regulator_type type;
120 struct module *owner; 148 struct module *owner;
121}; 149};
122 150
151/*
152 * struct regulator_dev
153 *
154 * Voltage / Current regulator class device. One for each
155 * regulator.
156 *
157 * This should *not* be used directly by anything except the regulator
158 * core and notification injection (which should take the mutex and do
159 * no other direct access).
160 */
161struct regulator_dev {
162 struct regulator_desc *desc;
163 int use_count;
164
165 /* lists we belong to */
166 struct list_head list; /* list of all regulators */
167 struct list_head slist; /* list of supplied regulators */
168
169 /* lists we own */
170 struct list_head consumer_list; /* consumers we supply */
171 struct list_head supply_list; /* regulators we supply */
172
173 struct blocking_notifier_head notifier;
174 struct mutex mutex; /* consumer lock */
175 struct module *owner;
176 struct device dev;
177 struct regulation_constraints *constraints;
178 struct regulator_dev *supply; /* for tree */
179
180 void *reg_data; /* regulator_dev data */
181};
182
123struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, 183struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
124 struct device *dev, void *driver_data); 184 struct device *dev, struct regulator_init_data *init_data,
185 void *driver_data);
125void regulator_unregister(struct regulator_dev *rdev); 186void regulator_unregister(struct regulator_dev *rdev);
126 187
127int regulator_notifier_call_chain(struct regulator_dev *rdev, 188int regulator_notifier_call_chain(struct regulator_dev *rdev,
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 1387a5d2190e..91b4da31f1b5 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -14,9 +14,12 @@
14#ifndef __REGULATOR_FIXED_H 14#ifndef __REGULATOR_FIXED_H
15#define __REGULATOR_FIXED_H 15#define __REGULATOR_FIXED_H
16 16
17struct regulator_init_data;
18
17struct fixed_voltage_config { 19struct fixed_voltage_config {
18 const char *supply_name; 20 const char *supply_name;
19 int microvolts; 21 int microvolts;
22 struct regulator_init_data *init_data;
20}; 23};
21 24
22#endif 25#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 3794773b23d2..bac64fa390f2 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -73,7 +73,9 @@ struct regulator_state {
73 * 73 *
74 * @always_on: Set if the regulator should never be disabled. 74 * @always_on: Set if the regulator should never be disabled.
75 * @boot_on: Set if the regulator is enabled when the system is initially 75 * @boot_on: Set if the regulator is enabled when the system is initially
76 * started. 76 * started. If the regulator is not enabled by the hardware or
77 * bootloader then it will be enabled when the constraints are
78 * applied.
77 * @apply_uV: Apply the voltage constraint when initialising. 79 * @apply_uV: Apply the voltage constraint when initialising.
78 * 80 *
79 * @input_uV: Input voltage for regulator when supplied by another regulator. 81 * @input_uV: Input voltage for regulator when supplied by another regulator.
@@ -83,6 +85,7 @@ struct regulator_state {
83 * @state_standby: State for regulator when system is suspended in standby 85 * @state_standby: State for regulator when system is suspended in standby
84 * mode. 86 * mode.
85 * @initial_state: Suspend state to set by default. 87 * @initial_state: Suspend state to set by default.
88 * @initial_mode: Mode to set at startup.
86 */ 89 */
87struct regulation_constraints { 90struct regulation_constraints {
88 91
@@ -111,6 +114,9 @@ struct regulation_constraints {
111 struct regulator_state state_standby; 114 struct regulator_state state_standby;
112 suspend_state_t initial_state; /* suspend state to set at init */ 115 suspend_state_t initial_state; /* suspend state to set at init */
113 116
117 /* mode to set on startup */
118 unsigned int initial_mode;
119
114 /* constriant flags */ 120 /* constriant flags */
115 unsigned always_on:1; /* regulator never off when system is on */ 121 unsigned always_on:1; /* regulator never off when system is on */
116 unsigned boot_on:1; /* bootloader/firmware enabled regulator */ 122 unsigned boot_on:1; /* bootloader/firmware enabled regulator */
@@ -160,4 +166,6 @@ struct regulator_init_data {
160 166
161int regulator_suspend_prepare(suspend_state_t state); 167int regulator_suspend_prepare(suspend_state_t state);
162 168
169void regulator_has_full_constraints(void);
170
163#endif 171#endif
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b3b359660082..e1b7b2173885 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -8,7 +8,7 @@ struct ring_buffer;
8struct ring_buffer_iter; 8struct ring_buffer_iter;
9 9
10/* 10/*
11 * Don't reference this struct directly, use functions below. 11 * Don't refer to this struct directly, use functions below.
12 */ 12 */
13struct ring_buffer_event { 13struct ring_buffer_event {
14 u32 type:2, len:3, time_delta:27; 14 u32 type:2, len:3, time_delta:27;
@@ -18,10 +18,13 @@ struct ring_buffer_event {
18/** 18/**
19 * enum ring_buffer_type - internal ring buffer types 19 * enum ring_buffer_type - internal ring buffer types
20 * 20 *
21 * @RINGBUF_TYPE_PADDING: Left over page padding 21 * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
22 * array is ignored 22 * If time_delta is 0:
23 * size is variable depending on how much 23 * array is ignored
24 * size is variable depending on how much
24 * padding is needed 25 * padding is needed
26 * If time_delta is non zero:
27 * everything else same as RINGBUF_TYPE_DATA
25 * 28 *
26 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta 29 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
27 * array[0] = time delta (28 .. 59) 30 * array[0] = time delta (28 .. 59)
@@ -65,6 +68,8 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
65 return event->time_delta; 68 return event->time_delta;
66} 69}
67 70
71void ring_buffer_event_discard(struct ring_buffer_event *event);
72
68/* 73/*
69 * size is in bytes for each per CPU buffer. 74 * size is in bytes for each per CPU buffer.
70 */ 75 */
@@ -74,13 +79,10 @@ void ring_buffer_free(struct ring_buffer *buffer);
74 79
75int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); 80int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
76 81
77struct ring_buffer_event * 82struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
78ring_buffer_lock_reserve(struct ring_buffer *buffer, 83 unsigned long length);
79 unsigned long length,
80 unsigned long *flags);
81int ring_buffer_unlock_commit(struct ring_buffer *buffer, 84int ring_buffer_unlock_commit(struct ring_buffer *buffer,
82 struct ring_buffer_event *event, 85 struct ring_buffer_event *event);
83 unsigned long flags);
84int ring_buffer_write(struct ring_buffer *buffer, 86int ring_buffer_write(struct ring_buffer *buffer,
85 unsigned long length, void *data); 87 unsigned long length, void *data);
86 88
@@ -121,17 +123,19 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
121unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); 123unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
122unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); 124unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
123 125
124u64 ring_buffer_time_stamp(int cpu); 126u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
125void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); 127void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
128 int cpu, u64 *ts);
129void ring_buffer_set_clock(struct ring_buffer *buffer,
130 u64 (*clock)(void));
131
132size_t ring_buffer_page_len(void *page);
126 133
127void tracing_on(void);
128void tracing_off(void);
129void tracing_off_permanent(void);
130 134
131void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); 135void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
132void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); 136void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
133int ring_buffer_read_page(struct ring_buffer *buffer, 137int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
134 void **data_page, int cpu, int full); 138 size_t len, int cpu, int full);
135 139
136enum ring_buffer_flags { 140enum ring_buffer_flags {
137 RB_FL_OVERWRITE = 1 << 0, 141 RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/rtc-v3020.h b/include/linux/rtc-v3020.h
index bf74e63c98fe..8ba646e610d9 100644
--- a/include/linux/rtc-v3020.h
+++ b/include/linux/rtc-v3020.h
@@ -14,6 +14,12 @@
14 * is used depends on the board. */ 14 * is used depends on the board. */
15struct v3020_platform_data { 15struct v3020_platform_data {
16 int leftshift; /* (1<<(leftshift)) & readl() */ 16 int leftshift; /* (1<<(leftshift)) & readl() */
17
18 int use_gpio:1;
19 unsigned int gpio_cs;
20 unsigned int gpio_wr;
21 unsigned int gpio_rd;
22 unsigned int gpio_io;
17}; 23};
18 24
19#define V3020_STATUS_0 0x00 25#define V3020_STATUS_0 0x00
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 4046b75563c1..60f88a7fb13d 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -99,6 +99,7 @@ struct rtc_pll_info {
99 99
100#ifdef __KERNEL__ 100#ifdef __KERNEL__
101 101
102#include <linux/types.h>
102#include <linux/interrupt.h> 103#include <linux/interrupt.h>
103 104
104extern int rtc_month_days(unsigned int month, unsigned int year); 105extern int rtc_month_days(unsigned int month, unsigned int year);
@@ -232,6 +233,11 @@ int rtc_register(rtc_task_t *task);
232int rtc_unregister(rtc_task_t *task); 233int rtc_unregister(rtc_task_t *task);
233int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); 234int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg);
234 235
236static inline bool is_leap_year(unsigned int year)
237{
238 return (!(year % 4) && (year % 100)) || !(year % 400);
239}
240
235#endif /* __KERNEL__ */ 241#endif /* __KERNEL__ */
236 242
237#endif /* _LINUX_RTC_H_ */ 243#endif /* _LINUX_RTC_H_ */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 29df6374d2de..b94f3541f67b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -68,7 +68,7 @@ struct sched_param {
68#include <linux/smp.h> 68#include <linux/smp.h>
69#include <linux/sem.h> 69#include <linux/sem.h>
70#include <linux/signal.h> 70#include <linux/signal.h>
71#include <linux/fs_struct.h> 71#include <linux/path.h>
72#include <linux/compiler.h> 72#include <linux/compiler.h>
73#include <linux/completion.h> 73#include <linux/completion.h>
74#include <linux/pid.h> 74#include <linux/pid.h>
@@ -97,6 +97,7 @@ struct futex_pi_state;
97struct robust_list_head; 97struct robust_list_head;
98struct bio; 98struct bio;
99struct bts_tracer; 99struct bts_tracer;
100struct fs_struct;
100 101
101/* 102/*
102 * List of flags we want to share for kernel threads, 103 * List of flags we want to share for kernel threads,
@@ -137,6 +138,8 @@ extern unsigned long nr_uninterruptible(void);
137extern unsigned long nr_active(void); 138extern unsigned long nr_active(void);
138extern unsigned long nr_iowait(void); 139extern unsigned long nr_iowait(void);
139 140
141extern unsigned long get_parent_ip(unsigned long addr);
142
140struct seq_file; 143struct seq_file;
141struct cfs_rq; 144struct cfs_rq;
142struct task_group; 145struct task_group;
@@ -391,8 +394,15 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
391 (mm)->hiwater_vm = (mm)->total_vm; \ 394 (mm)->hiwater_vm = (mm)->total_vm; \
392} while (0) 395} while (0)
393 396
394#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm)) 397static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
395#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm) 398{
399 return max(mm->hiwater_rss, get_mm_rss(mm));
400}
401
402static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
403{
404 return max(mm->hiwater_vm, mm->total_vm);
405}
396 406
397extern void set_dumpable(struct mm_struct *mm, int value); 407extern void set_dumpable(struct mm_struct *mm, int value);
398extern int get_dumpable(struct mm_struct *mm); 408extern int get_dumpable(struct mm_struct *mm);
@@ -540,25 +550,8 @@ struct signal_struct {
540 550
541 struct list_head cpu_timers[3]; 551 struct list_head cpu_timers[3];
542 552
543 /* job control IDs */
544
545 /*
546 * pgrp and session fields are deprecated.
547 * use the task_session_Xnr and task_pgrp_Xnr routines below
548 */
549
550 union {
551 pid_t pgrp __deprecated;
552 pid_t __pgrp;
553 };
554
555 struct pid *tty_old_pgrp; 553 struct pid *tty_old_pgrp;
556 554
557 union {
558 pid_t session __deprecated;
559 pid_t __session;
560 };
561
562 /* boolean value for session group leader */ 555 /* boolean value for session group leader */
563 int leader; 556 int leader;
564 557
@@ -1414,6 +1407,8 @@ struct task_struct {
1414 int curr_ret_stack; 1407 int curr_ret_stack;
1415 /* Stack of return addresses for return function tracing */ 1408 /* Stack of return addresses for return function tracing */
1416 struct ftrace_ret_stack *ret_stack; 1409 struct ftrace_ret_stack *ret_stack;
1410 /* time stamp for last schedule */
1411 unsigned long long ftrace_timestamp;
1417 /* 1412 /*
1418 * Number of functions that haven't been traced 1413 * Number of functions that haven't been traced
1419 * because of depth overrun. 1414 * because of depth overrun.
@@ -1462,16 +1457,6 @@ static inline int rt_task(struct task_struct *p)
1462 return rt_prio(p->prio); 1457 return rt_prio(p->prio);
1463} 1458}
1464 1459
1465static inline void set_task_session(struct task_struct *tsk, pid_t session)
1466{
1467 tsk->signal->__session = session;
1468}
1469
1470static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
1471{
1472 tsk->signal->__pgrp = pgrp;
1473}
1474
1475static inline struct pid *task_pid(struct task_struct *task) 1460static inline struct pid *task_pid(struct task_struct *task)
1476{ 1461{
1477 return task->pids[PIDTYPE_PID].pid; 1462 return task->pids[PIDTYPE_PID].pid;
@@ -1482,6 +1467,11 @@ static inline struct pid *task_tgid(struct task_struct *task)
1482 return task->group_leader->pids[PIDTYPE_PID].pid; 1467 return task->group_leader->pids[PIDTYPE_PID].pid;
1483} 1468}
1484 1469
1470/*
1471 * Without tasklist or rcu lock it is not safe to dereference
1472 * the result of task_pgrp/task_session even if task == current,
1473 * we can race with another thread doing sys_setsid/sys_setpgid.
1474 */
1485static inline struct pid *task_pgrp(struct task_struct *task) 1475static inline struct pid *task_pgrp(struct task_struct *task)
1486{ 1476{
1487 return task->group_leader->pids[PIDTYPE_PGID].pid; 1477 return task->group_leader->pids[PIDTYPE_PGID].pid;
@@ -1507,17 +1497,23 @@ struct pid_namespace;
1507 * 1497 *
1508 * see also pid_nr() etc in include/linux/pid.h 1498 * see also pid_nr() etc in include/linux/pid.h
1509 */ 1499 */
1500pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1501 struct pid_namespace *ns);
1510 1502
1511static inline pid_t task_pid_nr(struct task_struct *tsk) 1503static inline pid_t task_pid_nr(struct task_struct *tsk)
1512{ 1504{
1513 return tsk->pid; 1505 return tsk->pid;
1514} 1506}
1515 1507
1516pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); 1508static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1509 struct pid_namespace *ns)
1510{
1511 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1512}
1517 1513
1518static inline pid_t task_pid_vnr(struct task_struct *tsk) 1514static inline pid_t task_pid_vnr(struct task_struct *tsk)
1519{ 1515{
1520 return pid_vnr(task_pid(tsk)); 1516 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1521} 1517}
1522 1518
1523 1519
@@ -1534,31 +1530,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1534} 1530}
1535 1531
1536 1532
1537static inline pid_t task_pgrp_nr(struct task_struct *tsk) 1533static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1534 struct pid_namespace *ns)
1538{ 1535{
1539 return tsk->signal->__pgrp; 1536 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1540} 1537}
1541 1538
1542pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1543
1544static inline pid_t task_pgrp_vnr(struct task_struct *tsk) 1539static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1545{ 1540{
1546 return pid_vnr(task_pgrp(tsk)); 1541 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1547} 1542}
1548 1543
1549 1544
1550static inline pid_t task_session_nr(struct task_struct *tsk) 1545static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1546 struct pid_namespace *ns)
1551{ 1547{
1552 return tsk->signal->__session; 1548 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1553} 1549}
1554 1550
1555pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1556
1557static inline pid_t task_session_vnr(struct task_struct *tsk) 1551static inline pid_t task_session_vnr(struct task_struct *tsk)
1558{ 1552{
1559 return pid_vnr(task_session(tsk)); 1553 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1560} 1554}
1561 1555
1556/* obsolete, do not use */
1557static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1558{
1559 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1560}
1562 1561
1563/** 1562/**
1564 * pid_alive - check that a task structure is not stale 1563 * pid_alive - check that a task structure is not stale
@@ -1968,7 +1967,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *);
1968/* Allocate a new mm structure and copy contents from tsk->mm */ 1967/* Allocate a new mm structure and copy contents from tsk->mm */
1969extern struct mm_struct *dup_mm(struct task_struct *tsk); 1968extern struct mm_struct *dup_mm(struct task_struct *tsk);
1970 1969
1971extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); 1970extern int copy_thread(unsigned long, unsigned long, unsigned long,
1971 struct task_struct *, struct pt_regs *);
1972extern void flush_thread(void); 1972extern void flush_thread(void);
1973extern void exit_thread(void); 1973extern void exit_thread(void);
1974 1974
@@ -2053,6 +2053,11 @@ static inline int thread_group_empty(struct task_struct *p)
2053#define delay_group_leader(p) \ 2053#define delay_group_leader(p) \
2054 (thread_group_leader(p) && !thread_group_empty(p)) 2054 (thread_group_leader(p) && !thread_group_empty(p))
2055 2055
2056static inline int task_detached(struct task_struct *p)
2057{
2058 return p->exit_signal == -1;
2059}
2060
2056/* 2061/*
2057 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring 2062 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2058 * subscriptions and synchronises with wait4(). Also used in procfs. Also 2063 * subscriptions and synchronises with wait4(). Also used in procfs. Also
diff --git a/include/linux/security.h b/include/linux/security.h
index 54ed15799a83..d5fd6163606f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -32,6 +32,7 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/key.h> 33#include <linux/key.h>
34#include <linux/xfrm.h> 34#include <linux/xfrm.h>
35#include <linux/gfp.h>
35#include <net/flow.h> 36#include <net/flow.h>
36 37
37/* Maximum number of letters for an LSM name string */ 38/* Maximum number of letters for an LSM name string */
@@ -2953,5 +2954,28 @@ static inline void securityfs_remove(struct dentry *dentry)
2953 2954
2954#endif 2955#endif
2955 2956
2957#ifdef CONFIG_SECURITY
2958
2959static inline char *alloc_secdata(void)
2960{
2961 return (char *)get_zeroed_page(GFP_KERNEL);
2962}
2963
2964static inline void free_secdata(void *secdata)
2965{
2966 free_page((unsigned long)secdata);
2967}
2968
2969#else
2970
2971static inline char *alloc_secdata(void)
2972{
2973 return (char *)1;
2974}
2975
2976static inline void free_secdata(void *secdata)
2977{ }
2978#endif /* CONFIG_SECURITY */
2979
2956#endif /* ! __LINUX_SECURITY_H */ 2980#endif /* ! __LINUX_SECURITY_H */
2957 2981
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 6ca6a7b66d75..5ac9b0bcaf9a 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,6 +14,7 @@
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ 14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ 15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <trace/kmemtrace.h>
17 18
18/* Size description struct for general caches. */ 19/* Size description struct for general caches. */
19struct cache_sizes { 20struct cache_sizes {
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
28void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 29void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
29void *__kmalloc(size_t size, gfp_t flags); 30void *__kmalloc(size_t size, gfp_t flags);
30 31
31static inline void *kmalloc(size_t size, gfp_t flags) 32#ifdef CONFIG_KMEMTRACE
33extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
34extern size_t slab_buffer_size(struct kmem_cache *cachep);
35#else
36static __always_inline void *
37kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
32{ 38{
39 return kmem_cache_alloc(cachep, flags);
40}
41static inline size_t slab_buffer_size(struct kmem_cache *cachep)
42{
43 return 0;
44}
45#endif
46
47static __always_inline void *kmalloc(size_t size, gfp_t flags)
48{
49 struct kmem_cache *cachep;
50 void *ret;
51
33 if (__builtin_constant_p(size)) { 52 if (__builtin_constant_p(size)) {
34 int i = 0; 53 int i = 0;
35 54
@@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
47found: 66found:
48#ifdef CONFIG_ZONE_DMA 67#ifdef CONFIG_ZONE_DMA
49 if (flags & GFP_DMA) 68 if (flags & GFP_DMA)
50 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, 69 cachep = malloc_sizes[i].cs_dmacachep;
51 flags); 70 else
52#endif 71#endif
53 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); 72 cachep = malloc_sizes[i].cs_cachep;
73
74 ret = kmem_cache_alloc_notrace(cachep, flags);
75
76 trace_kmalloc(_THIS_IP_, ret,
77 size, slab_buffer_size(cachep), flags);
78
79 return ret;
54 } 80 }
55 return __kmalloc(size, flags); 81 return __kmalloc(size, flags);
56} 82}
@@ -59,8 +85,25 @@ found:
59extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 85extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
60extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 86extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
61 87
62static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 88#ifdef CONFIG_KMEMTRACE
89extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
90 gfp_t flags,
91 int nodeid);
92#else
93static __always_inline void *
94kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
95 gfp_t flags,
96 int nodeid)
97{
98 return kmem_cache_alloc_node(cachep, flags, nodeid);
99}
100#endif
101
102static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63{ 103{
104 struct kmem_cache *cachep;
105 void *ret;
106
64 if (__builtin_constant_p(size)) { 107 if (__builtin_constant_p(size)) {
65 int i = 0; 108 int i = 0;
66 109
@@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
78found: 121found:
79#ifdef CONFIG_ZONE_DMA 122#ifdef CONFIG_ZONE_DMA
80 if (flags & GFP_DMA) 123 if (flags & GFP_DMA)
81 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, 124 cachep = malloc_sizes[i].cs_dmacachep;
82 flags, node); 125 else
83#endif 126#endif
84 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, 127 cachep = malloc_sizes[i].cs_cachep;
85 flags, node); 128
129 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
130
131 trace_kmalloc_node(_THIS_IP_, ret,
132 size, slab_buffer_size(cachep),
133 flags, node);
134
135 return ret;
86 } 136 }
87 return __kmalloc_node(size, flags, node); 137 return __kmalloc_node(size, flags, node);
88} 138}
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 59a3fa476ab9..0ec00b39d006 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -3,14 +3,15 @@
3 3
4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
5 5
6static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 6static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
7 gfp_t flags)
7{ 8{
8 return kmem_cache_alloc_node(cachep, flags, -1); 9 return kmem_cache_alloc_node(cachep, flags, -1);
9} 10}
10 11
11void *__kmalloc_node(size_t size, gfp_t flags, int node); 12void *__kmalloc_node(size_t size, gfp_t flags, int node);
12 13
13static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 14static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
14{ 15{
15 return __kmalloc_node(size, flags, node); 16 return __kmalloc_node(size, flags, node);
16} 17}
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
23 * kmalloc is the normal method of allocating memory 24 * kmalloc is the normal method of allocating memory
24 * in the kernel. 25 * in the kernel.
25 */ 26 */
26static inline void *kmalloc(size_t size, gfp_t flags) 27static __always_inline void *kmalloc(size_t size, gfp_t flags)
27{ 28{
28 return __kmalloc_node(size, flags, -1); 29 return __kmalloc_node(size, flags, -1);
29} 30}
30 31
31static inline void *__kmalloc(size_t size, gfp_t flags) 32static __always_inline void *__kmalloc(size_t size, gfp_t flags)
32{ 33{
33 return kmalloc(size, flags); 34 return kmalloc(size, flags);
34} 35}
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
new file mode 100644
index 000000000000..85958277f83d
--- /dev/null
+++ b/include/linux/slow-work.h
@@ -0,0 +1,95 @@
1/* Worker thread pool for slow items, such as filesystem lookups or mkdirs
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 *
11 * See Documentation/slow-work.txt
12 */
13
14#ifndef _LINUX_SLOW_WORK_H
15#define _LINUX_SLOW_WORK_H
16
17#ifdef CONFIG_SLOW_WORK
18
19#include <linux/sysctl.h>
20
21struct slow_work;
22
23/*
24 * The operations used to support slow work items
25 */
26struct slow_work_ops {
27 /* get a ref on a work item
28 * - return 0 if successful, -ve if not
29 */
30 int (*get_ref)(struct slow_work *work);
31
32 /* discard a ref to a work item */
33 void (*put_ref)(struct slow_work *work);
34
35 /* execute a work item */
36 void (*execute)(struct slow_work *work);
37};
38
39/*
40 * A slow work item
41 * - A reference is held on the parent object by the thread pool when it is
42 * queued
43 */
44struct slow_work {
45 unsigned long flags;
46#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
47#define SLOW_WORK_EXECUTING 1 /* item currently executing */
48#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
49#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
50 const struct slow_work_ops *ops; /* operations table for this item */
51 struct list_head link; /* link in queue */
52};
53
54/**
55 * slow_work_init - Initialise a slow work item
56 * @work: The work item to initialise
57 * @ops: The operations to use to handle the slow work item
58 *
59 * Initialise a slow work item.
60 */
61static inline void slow_work_init(struct slow_work *work,
62 const struct slow_work_ops *ops)
63{
64 work->flags = 0;
65 work->ops = ops;
66 INIT_LIST_HEAD(&work->link);
67}
68
69/**
70 * slow_work_init - Initialise a very slow work item
71 * @work: The work item to initialise
72 * @ops: The operations to use to handle the slow work item
73 *
74 * Initialise a very slow work item. This item will be restricted such that
75 * only a certain number of the pool threads will be able to execute items of
76 * this type.
77 */
78static inline void vslow_work_init(struct slow_work *work,
79 const struct slow_work_ops *ops)
80{
81 work->flags = 1 << SLOW_WORK_VERY_SLOW;
82 work->ops = ops;
83 INIT_LIST_HEAD(&work->link);
84}
85
86extern int slow_work_enqueue(struct slow_work *work);
87extern int slow_work_register_user(void);
88extern void slow_work_unregister_user(void);
89
90#ifdef CONFIG_SYSCTL
91extern ctl_table slow_work_sysctls[];
92#endif
93
94#endif /* CONFIG_SLOW_WORK */
95#endif /* _LINUX_SLOW_WORK_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index e37b6aa8a9fb..5046f90c1171 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,6 +10,7 @@
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/kobject.h> 12#include <linux/kobject.h>
13#include <trace/kmemtrace.h>
13 14
14enum stat_item { 15enum stat_item {
15 ALLOC_FASTPATH, /* Allocation from cpu slab */ 16 ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -217,13 +218,30 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
217void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 218void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
218void *__kmalloc(size_t size, gfp_t flags); 219void *__kmalloc(size_t size, gfp_t flags);
219 220
221#ifdef CONFIG_KMEMTRACE
222extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
223#else
224static __always_inline void *
225kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
226{
227 return kmem_cache_alloc(s, gfpflags);
228}
229#endif
230
220static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 231static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
221{ 232{
222 return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); 233 unsigned int order = get_order(size);
234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
235
236 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
237
238 return ret;
223} 239}
224 240
225static __always_inline void *kmalloc(size_t size, gfp_t flags) 241static __always_inline void *kmalloc(size_t size, gfp_t flags)
226{ 242{
243 void *ret;
244
227 if (__builtin_constant_p(size)) { 245 if (__builtin_constant_p(size)) {
228 if (size > SLUB_MAX_SIZE) 246 if (size > SLUB_MAX_SIZE)
229 return kmalloc_large(size, flags); 247 return kmalloc_large(size, flags);
@@ -234,7 +252,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
234 if (!s) 252 if (!s)
235 return ZERO_SIZE_PTR; 253 return ZERO_SIZE_PTR;
236 254
237 return kmem_cache_alloc(s, flags); 255 ret = kmem_cache_alloc_notrace(s, flags);
256
257 trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
258
259 return ret;
238 } 260 }
239 } 261 }
240 return __kmalloc(size, flags); 262 return __kmalloc(size, flags);
@@ -244,8 +266,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
244void *__kmalloc_node(size_t size, gfp_t flags, int node); 266void *__kmalloc_node(size_t size, gfp_t flags, int node);
245void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 267void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
246 268
269#ifdef CONFIG_KMEMTRACE
270extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
271 gfp_t gfpflags,
272 int node);
273#else
274static __always_inline void *
275kmem_cache_alloc_node_notrace(struct kmem_cache *s,
276 gfp_t gfpflags,
277 int node)
278{
279 return kmem_cache_alloc_node(s, gfpflags, node);
280}
281#endif
282
247static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 283static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
248{ 284{
285 void *ret;
286
249 if (__builtin_constant_p(size) && 287 if (__builtin_constant_p(size) &&
250 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 288 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
251 struct kmem_cache *s = kmalloc_slab(size); 289 struct kmem_cache *s = kmalloc_slab(size);
@@ -253,7 +291,12 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
253 if (!s) 291 if (!s)
254 return ZERO_SIZE_PTR; 292 return ZERO_SIZE_PTR;
255 293
256 return kmem_cache_alloc_node(s, flags, node); 294 ret = kmem_cache_alloc_node_notrace(s, flags, node);
295
296 trace_kmalloc_node(_THIS_IP_, ret,
297 size, s->size, flags, node);
298
299 return ret;
257 } 300 }
258 return __kmalloc_node(size, flags, node); 301 return __kmalloc_node(size, flags, node);
259} 302}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index bbacb7baa446..a69db820eed6 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -38,7 +38,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
38/* 38/*
39 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. 39 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
40 * (defined in asm header): 40 * (defined in asm header):
41 */ 41 */
42 42
43/* 43/*
44 * stops all CPUs but the current one: 44 * stops all CPUs but the current one:
@@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
82 return 0; 82 return 0;
83} 83}
84 84
85void __smp_call_function_single(int cpuid, struct call_single_data *data); 85void __smp_call_function_single(int cpuid, struct call_single_data *data,
86 int wait);
86 87
87/* 88/*
88 * Generic and arch helpers 89 * Generic and arch helpers
@@ -121,6 +122,8 @@ extern unsigned int setup_max_cpus;
121 122
122#else /* !SMP */ 123#else /* !SMP */
123 124
125static inline void smp_send_stop(void) { }
126
124/* 127/*
125 * These macros fold the SMP functionality into a single CPU system 128 * These macros fold the SMP functionality into a single CPU system
126 */ 129 */
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
index f41ffd7c2dd9..34c4475ac4a2 100644
--- a/include/linux/sonypi.h
+++ b/include/linux/sonypi.h
@@ -103,6 +103,14 @@
103#define SONYPI_EVENT_WIRELESS_OFF 61 103#define SONYPI_EVENT_WIRELESS_OFF 61
104#define SONYPI_EVENT_ZOOM_IN_PRESSED 62 104#define SONYPI_EVENT_ZOOM_IN_PRESSED 62
105#define SONYPI_EVENT_ZOOM_OUT_PRESSED 63 105#define SONYPI_EVENT_ZOOM_OUT_PRESSED 63
106#define SONYPI_EVENT_CD_EJECT_PRESSED 64
107#define SONYPI_EVENT_MODEKEY_PRESSED 65
108#define SONYPI_EVENT_PKEY_P4 66
109#define SONYPI_EVENT_PKEY_P5 67
110#define SONYPI_EVENT_SETTINGKEY_PRESSED 68
111#define SONYPI_EVENT_VOLUME_INC_PRESSED 69
112#define SONYPI_EVENT_VOLUME_DEC_PRESSED 70
113#define SONYPI_EVENT_BRIGHTNESS_PRESSED 71
106 114
107/* get/set brightness */ 115/* get/set brightness */
108#define SONYPI_IOCGBRT _IOR('v', 0, __u8) 116#define SONYPI_IOCGBRT _IOR('v', 0, __u8)
diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h
index 1085212c446e..306e7b1c69ed 100644
--- a/include/linux/spi/eeprom.h
+++ b/include/linux/spi/eeprom.h
@@ -1,6 +1,8 @@
1#ifndef __LINUX_SPI_EEPROM_H 1#ifndef __LINUX_SPI_EEPROM_H
2#define __LINUX_SPI_EEPROM_H 2#define __LINUX_SPI_EEPROM_H
3 3
4#include <linux/memory.h>
5
4/* 6/*
5 * Put one of these structures in platform_data for SPI EEPROMS handled 7 * Put one of these structures in platform_data for SPI EEPROMS handled
6 * by the "at25" driver. On SPI, most EEPROMS understand the same core 8 * by the "at25" driver. On SPI, most EEPROMS understand the same core
@@ -17,6 +19,10 @@ struct spi_eeprom {
17#define EE_ADDR2 0x0002 /* 16 bit addrs */ 19#define EE_ADDR2 0x0002 /* 16 bit addrs */
18#define EE_ADDR3 0x0004 /* 24 bit addrs */ 20#define EE_ADDR3 0x0004 /* 24 bit addrs */
19#define EE_READONLY 0x0008 /* disallow writes */ 21#define EE_READONLY 0x0008 /* disallow writes */
22
23 /* for exporting this chip's data to other kernel code */
24 void (*setup)(struct memory_accessor *mem, void *context);
25 void *context;
20}; 26};
21 27
22#endif /* __LINUX_SPI_EEPROM_H */ 28#endif /* __LINUX_SPI_EEPROM_H */
diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h
index 0f01a0f1f40c..ca6782ee4b9f 100644
--- a/include/linux/spi/spi_gpio.h
+++ b/include/linux/spi/spi_gpio.h
@@ -25,10 +25,16 @@
25 * ... 25 * ...
26 * }; 26 * };
27 * 27 *
28 * If chipselect is not used (there's only one device on the bus), assign
29 * SPI_GPIO_NO_CHIPSELECT to the controller_data:
30 * .controller_data = (void *) SPI_GPIO_NO_CHIPSELECT;
31 *
28 * If the bitbanged bus is later switched to a "native" controller, 32 * If the bitbanged bus is later switched to a "native" controller,
29 * that platform_device and controller_data should be removed. 33 * that platform_device and controller_data should be removed.
30 */ 34 */
31 35
36#define SPI_GPIO_NO_CHIPSELECT ((unsigned long)-1l)
37
32/** 38/**
33 * struct spi_gpio_platform_data - parameter for bitbanged SPI master 39 * struct spi_gpio_platform_data - parameter for bitbanged SPI master
34 * @sck: number of the GPIO used for clock output 40 * @sck: number of the GPIO used for clock output
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index a0c66a2e00ad..252b245cfcf4 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -153,9 +153,11 @@ do { \
153 extern int _raw_spin_trylock(spinlock_t *lock); 153 extern int _raw_spin_trylock(spinlock_t *lock);
154 extern void _raw_spin_unlock(spinlock_t *lock); 154 extern void _raw_spin_unlock(spinlock_t *lock);
155 extern void _raw_read_lock(rwlock_t *lock); 155 extern void _raw_read_lock(rwlock_t *lock);
156#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
156 extern int _raw_read_trylock(rwlock_t *lock); 157 extern int _raw_read_trylock(rwlock_t *lock);
157 extern void _raw_read_unlock(rwlock_t *lock); 158 extern void _raw_read_unlock(rwlock_t *lock);
158 extern void _raw_write_lock(rwlock_t *lock); 159 extern void _raw_write_lock(rwlock_t *lock);
160#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
159 extern int _raw_write_trylock(rwlock_t *lock); 161 extern int _raw_write_trylock(rwlock_t *lock);
160 extern void _raw_write_unlock(rwlock_t *lock); 162 extern void _raw_write_unlock(rwlock_t *lock);
161#else 163#else
@@ -165,9 +167,13 @@ do { \
165# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) 167# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
166# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 168# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
167# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) 169# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
170# define _raw_read_lock_flags(lock, flags) \
171 __raw_read_lock_flags(&(lock)->raw_lock, *(flags))
168# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) 172# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
169# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) 173# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
170# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) 174# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
175# define _raw_write_lock_flags(lock, flags) \
176 __raw_write_lock_flags(&(lock)->raw_lock, *(flags))
171# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) 177# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
172# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) 178# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
173#endif 179#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index d18fc198aa2f..489019ef1694 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -10,8 +10,10 @@
10#include <linux/compiler.h> /* for inline */ 10#include <linux/compiler.h> /* for inline */
11#include <linux/types.h> /* for size_t */ 11#include <linux/types.h> /* for size_t */
12#include <linux/stddef.h> /* for NULL */ 12#include <linux/stddef.h> /* for NULL */
13#include <stdarg.h>
13 14
14extern char *strndup_user(const char __user *, long); 15extern char *strndup_user(const char __user *, long);
16extern void *memdup_user(const void __user *, size_t);
15 17
16/* 18/*
17 * Include machine specific inline routines 19 * Include machine specific inline routines
@@ -111,8 +113,23 @@ extern void argv_free(char **argv);
111 113
112extern bool sysfs_streq(const char *s1, const char *s2); 114extern bool sysfs_streq(const char *s1, const char *s2);
113 115
116#ifdef CONFIG_BINARY_PRINTF
117int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
118int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
119int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
120#endif
121
114extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, 122extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
115 const void *from, size_t available); 123 const void *from, size_t available);
116 124
125/**
126 * strstarts - does @str start with @prefix?
127 * @str: string to examine
128 * @prefix: prefix to look for.
129 */
130static inline bool strstarts(const char *str, const char *prefix)
131{
132 return strncmp(str, prefix, strlen(prefix)) == 0;
133}
117#endif 134#endif
118#endif /* _LINUX_STRING_H_ */ 135#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 3435d24bfe55..2a30775959e9 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -24,6 +24,15 @@
24 */ 24 */
25typedef int (*svc_thread_fn)(void *); 25typedef int (*svc_thread_fn)(void *);
26 26
27/* statistics for svc_pool structures */
28struct svc_pool_stats {
29 unsigned long packets;
30 unsigned long sockets_queued;
31 unsigned long threads_woken;
32 unsigned long overloads_avoided;
33 unsigned long threads_timedout;
34};
35
27/* 36/*
28 * 37 *
29 * RPC service thread pool. 38 * RPC service thread pool.
@@ -41,6 +50,8 @@ struct svc_pool {
41 struct list_head sp_sockets; /* pending sockets */ 50 struct list_head sp_sockets; /* pending sockets */
42 unsigned int sp_nrthreads; /* # of threads in pool */ 51 unsigned int sp_nrthreads; /* # of threads in pool */
43 struct list_head sp_all_threads; /* all server threads */ 52 struct list_head sp_all_threads; /* all server threads */
53 int sp_nwaking; /* number of threads woken but not yet active */
54 struct svc_pool_stats sp_stats; /* statistics on pool operation */
44} ____cacheline_aligned_in_smp; 55} ____cacheline_aligned_in_smp;
45 56
46/* 57/*
@@ -69,7 +80,6 @@ struct svc_serv {
69 struct list_head sv_tempsocks; /* all temporary sockets */ 80 struct list_head sv_tempsocks; /* all temporary sockets */
70 int sv_tmpcnt; /* count of temporary sockets */ 81 int sv_tmpcnt; /* count of temporary sockets */
71 struct timer_list sv_temptimer; /* timer for aging temporary sockets */ 82 struct timer_list sv_temptimer; /* timer for aging temporary sockets */
72 sa_family_t sv_family; /* listener's address family */
73 83
74 char * sv_name; /* service name */ 84 char * sv_name; /* service name */
75 85
@@ -84,6 +94,8 @@ struct svc_serv {
84 struct module * sv_module; /* optional module to count when 94 struct module * sv_module; /* optional module to count when
85 * adding threads */ 95 * adding threads */
86 svc_thread_fn sv_function; /* main function for threads */ 96 svc_thread_fn sv_function; /* main function for threads */
97 unsigned int sv_drc_max_pages; /* Total pages for DRC */
98 unsigned int sv_drc_pages_used;/* DRC pages used */
87}; 99};
88 100
89/* 101/*
@@ -219,6 +231,7 @@ struct svc_rqst {
219 struct svc_cred rq_cred; /* auth info */ 231 struct svc_cred rq_cred; /* auth info */
220 void * rq_xprt_ctxt; /* transport specific context ptr */ 232 void * rq_xprt_ctxt; /* transport specific context ptr */
221 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ 233 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
234 int rq_usedeferral; /* use deferral */
222 235
223 size_t rq_xprt_hlen; /* xprt header len */ 236 size_t rq_xprt_hlen; /* xprt header len */
224 struct xdr_buf rq_arg; 237 struct xdr_buf rq_arg;
@@ -264,6 +277,7 @@ struct svc_rqst {
264 * cache pages */ 277 * cache pages */
265 wait_queue_head_t rq_wait; /* synchronization */ 278 wait_queue_head_t rq_wait; /* synchronization */
266 struct task_struct *rq_task; /* service thread */ 279 struct task_struct *rq_task; /* service thread */
280 int rq_waking; /* 1 if thread is being woken */
267}; 281};
268 282
269/* 283/*
@@ -385,19 +399,20 @@ struct svc_procedure {
385/* 399/*
386 * Function prototypes. 400 * Function prototypes.
387 */ 401 */
388struct svc_serv *svc_create(struct svc_program *, unsigned int, sa_family_t, 402struct svc_serv *svc_create(struct svc_program *, unsigned int,
389 void (*shutdown)(struct svc_serv *)); 403 void (*shutdown)(struct svc_serv *));
390struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, 404struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
391 struct svc_pool *pool); 405 struct svc_pool *pool);
392void svc_exit_thread(struct svc_rqst *); 406void svc_exit_thread(struct svc_rqst *);
393struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, 407struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
394 sa_family_t, void (*shutdown)(struct svc_serv *), 408 void (*shutdown)(struct svc_serv *),
395 svc_thread_fn, struct module *); 409 svc_thread_fn, struct module *);
396int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); 410int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
411int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
397void svc_destroy(struct svc_serv *); 412void svc_destroy(struct svc_serv *);
398int svc_process(struct svc_rqst *); 413int svc_process(struct svc_rqst *);
399int svc_register(const struct svc_serv *, const unsigned short, 414int svc_register(const struct svc_serv *, const int,
400 const unsigned short); 415 const unsigned short, const unsigned short);
401 416
402void svc_wake_up(struct svc_serv *); 417void svc_wake_up(struct svc_serv *);
403void svc_reserve(struct svc_rqst *rqstp, int space); 418void svc_reserve(struct svc_rqst *rqstp, int space);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index 0127daca4354..0d9cb6ef28b0 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -71,7 +71,8 @@ int svc_reg_xprt_class(struct svc_xprt_class *);
71void svc_unreg_xprt_class(struct svc_xprt_class *); 71void svc_unreg_xprt_class(struct svc_xprt_class *);
72void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *, 72void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *,
73 struct svc_serv *); 73 struct svc_serv *);
74int svc_create_xprt(struct svc_serv *, char *, unsigned short, int); 74int svc_create_xprt(struct svc_serv *, const char *, const int,
75 const unsigned short, int);
75void svc_xprt_enqueue(struct svc_xprt *xprt); 76void svc_xprt_enqueue(struct svc_xprt *xprt);
76void svc_xprt_received(struct svc_xprt *); 77void svc_xprt_received(struct svc_xprt *);
77void svc_xprt_put(struct svc_xprt *xprt); 78void svc_xprt_put(struct svc_xprt *xprt);
@@ -80,7 +81,8 @@ void svc_close_xprt(struct svc_xprt *xprt);
80void svc_delete_xprt(struct svc_xprt *xprt); 81void svc_delete_xprt(struct svc_xprt *xprt);
81int svc_port_is_privileged(struct sockaddr *sin); 82int svc_port_is_privileged(struct sockaddr *sin);
82int svc_print_xprts(char *buf, int maxlen); 83int svc_print_xprts(char *buf, int maxlen);
83struct svc_xprt *svc_find_xprt(struct svc_serv *, char *, int, int); 84struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name,
85 const sa_family_t af, const unsigned short port);
84int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen); 86int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen);
85 87
86static inline void svc_xprt_get(struct svc_xprt *xprt) 88static inline void svc_xprt_get(struct svc_xprt *xprt)
@@ -88,29 +90,32 @@ static inline void svc_xprt_get(struct svc_xprt *xprt)
88 kref_get(&xprt->xpt_ref); 90 kref_get(&xprt->xpt_ref);
89} 91}
90static inline void svc_xprt_set_local(struct svc_xprt *xprt, 92static inline void svc_xprt_set_local(struct svc_xprt *xprt,
91 struct sockaddr *sa, int salen) 93 const struct sockaddr *sa,
94 const size_t salen)
92{ 95{
93 memcpy(&xprt->xpt_local, sa, salen); 96 memcpy(&xprt->xpt_local, sa, salen);
94 xprt->xpt_locallen = salen; 97 xprt->xpt_locallen = salen;
95} 98}
96static inline void svc_xprt_set_remote(struct svc_xprt *xprt, 99static inline void svc_xprt_set_remote(struct svc_xprt *xprt,
97 struct sockaddr *sa, int salen) 100 const struct sockaddr *sa,
101 const size_t salen)
98{ 102{
99 memcpy(&xprt->xpt_remote, sa, salen); 103 memcpy(&xprt->xpt_remote, sa, salen);
100 xprt->xpt_remotelen = salen; 104 xprt->xpt_remotelen = salen;
101} 105}
102static inline unsigned short svc_addr_port(struct sockaddr *sa) 106static inline unsigned short svc_addr_port(const struct sockaddr *sa)
103{ 107{
104 unsigned short ret = 0; 108 const struct sockaddr_in *sin = (const struct sockaddr_in *)sa;
109 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sa;
110
105 switch (sa->sa_family) { 111 switch (sa->sa_family) {
106 case AF_INET: 112 case AF_INET:
107 ret = ntohs(((struct sockaddr_in *)sa)->sin_port); 113 return ntohs(sin->sin_port);
108 break;
109 case AF_INET6: 114 case AF_INET6:
110 ret = ntohs(((struct sockaddr_in6 *)sa)->sin6_port); 115 return ntohs(sin6->sin6_port);
111 break;
112 } 116 }
113 return ret; 117
118 return 0;
114} 119}
115 120
116static inline size_t svc_addr_len(struct sockaddr *sa) 121static inline size_t svc_addr_len(struct sockaddr *sa)
@@ -124,36 +129,39 @@ static inline size_t svc_addr_len(struct sockaddr *sa)
124 return -EAFNOSUPPORT; 129 return -EAFNOSUPPORT;
125} 130}
126 131
127static inline unsigned short svc_xprt_local_port(struct svc_xprt *xprt) 132static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt)
128{ 133{
129 return svc_addr_port((struct sockaddr *)&xprt->xpt_local); 134 return svc_addr_port((const struct sockaddr *)&xprt->xpt_local);
130} 135}
131 136
132static inline unsigned short svc_xprt_remote_port(struct svc_xprt *xprt) 137static inline unsigned short svc_xprt_remote_port(const struct svc_xprt *xprt)
133{ 138{
134 return svc_addr_port((struct sockaddr *)&xprt->xpt_remote); 139 return svc_addr_port((const struct sockaddr *)&xprt->xpt_remote);
135} 140}
136 141
137static inline char *__svc_print_addr(struct sockaddr *addr, 142static inline char *__svc_print_addr(const struct sockaddr *addr,
138 char *buf, size_t len) 143 char *buf, const size_t len)
139{ 144{
145 const struct sockaddr_in *sin = (const struct sockaddr_in *)addr;
146 const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)addr;
147
140 switch (addr->sa_family) { 148 switch (addr->sa_family) {
141 case AF_INET: 149 case AF_INET:
142 snprintf(buf, len, "%pI4, port=%u", 150 snprintf(buf, len, "%pI4, port=%u", &sin->sin_addr,
143 &((struct sockaddr_in *)addr)->sin_addr, 151 ntohs(sin->sin_port));
144 ntohs(((struct sockaddr_in *) addr)->sin_port));
145 break; 152 break;
146 153
147 case AF_INET6: 154 case AF_INET6:
148 snprintf(buf, len, "%pI6, port=%u", 155 snprintf(buf, len, "%pI6, port=%u",
149 &((struct sockaddr_in6 *)addr)->sin6_addr, 156 &sin6->sin6_addr,
150 ntohs(((struct sockaddr_in6 *) addr)->sin6_port)); 157 ntohs(sin6->sin6_port));
151 break; 158 break;
152 159
153 default: 160 default:
154 snprintf(buf, len, "unknown address type: %d", addr->sa_family); 161 snprintf(buf, len, "unknown address type: %d", addr->sa_family);
155 break; 162 break;
156 } 163 }
164
157 return buf; 165 return buf;
158} 166}
159#endif /* SUNRPC_SVC_XPRT_H */ 167#endif /* SUNRPC_SVC_XPRT_H */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 49e1eb454465..d8910b68e1bd 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -69,27 +69,27 @@ struct xdr_buf {
69 * pre-xdr'ed macros. 69 * pre-xdr'ed macros.
70 */ 70 */
71 71
72#define xdr_zero __constant_htonl(0) 72#define xdr_zero cpu_to_be32(0)
73#define xdr_one __constant_htonl(1) 73#define xdr_one cpu_to_be32(1)
74#define xdr_two __constant_htonl(2) 74#define xdr_two cpu_to_be32(2)
75 75
76#define rpc_success __constant_htonl(RPC_SUCCESS) 76#define rpc_success cpu_to_be32(RPC_SUCCESS)
77#define rpc_prog_unavail __constant_htonl(RPC_PROG_UNAVAIL) 77#define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL)
78#define rpc_prog_mismatch __constant_htonl(RPC_PROG_MISMATCH) 78#define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH)
79#define rpc_proc_unavail __constant_htonl(RPC_PROC_UNAVAIL) 79#define rpc_proc_unavail cpu_to_be32(RPC_PROC_UNAVAIL)
80#define rpc_garbage_args __constant_htonl(RPC_GARBAGE_ARGS) 80#define rpc_garbage_args cpu_to_be32(RPC_GARBAGE_ARGS)
81#define rpc_system_err __constant_htonl(RPC_SYSTEM_ERR) 81#define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR)
82#define rpc_drop_reply __constant_htonl(RPC_DROP_REPLY) 82#define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY)
83 83
84#define rpc_auth_ok __constant_htonl(RPC_AUTH_OK) 84#define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK)
85#define rpc_autherr_badcred __constant_htonl(RPC_AUTH_BADCRED) 85#define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED)
86#define rpc_autherr_rejectedcred __constant_htonl(RPC_AUTH_REJECTEDCRED) 86#define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED)
87#define rpc_autherr_badverf __constant_htonl(RPC_AUTH_BADVERF) 87#define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF)
88#define rpc_autherr_rejectedverf __constant_htonl(RPC_AUTH_REJECTEDVERF) 88#define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF)
89#define rpc_autherr_tooweak __constant_htonl(RPC_AUTH_TOOWEAK) 89#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK)
90#define rpcsec_gsserr_credproblem __constant_htonl(RPCSEC_GSS_CREDPROBLEM) 90#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM)
91#define rpcsec_gsserr_ctxproblem __constant_htonl(RPCSEC_GSS_CTXPROBLEM) 91#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM)
92#define rpc_autherr_oldseqnum __constant_htonl(101) 92#define rpc_autherr_oldseqnum cpu_to_be32(101)
93 93
94/* 94/*
95 * Miscellaneous XDR helper functions 95 * Miscellaneous XDR helper functions
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 11fc71d50c1e..1758d9f5b5c3 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -235,6 +235,7 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *
235 */ 235 */
236int xprt_register_transport(struct xprt_class *type); 236int xprt_register_transport(struct xprt_class *type);
237int xprt_unregister_transport(struct xprt_class *type); 237int xprt_unregister_transport(struct xprt_class *type);
238int xprt_load_transport(const char *);
238void xprt_set_retrans_timeout_def(struct rpc_task *task); 239void xprt_set_retrans_timeout_def(struct rpc_task *task);
239void xprt_set_retrans_timeout_rtt(struct rpc_task *task); 240void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
240void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); 241void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
@@ -259,6 +260,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
259#define XPRT_BOUND (4) 260#define XPRT_BOUND (4)
260#define XPRT_BINDING (5) 261#define XPRT_BINDING (5)
261#define XPRT_CLOSING (6) 262#define XPRT_CLOSING (6)
263#define XPRT_CONNECTION_ABORT (7)
262 264
263static inline void xprt_set_connected(struct rpc_xprt *xprt) 265static inline void xprt_set_connected(struct rpc_xprt *xprt)
264{ 266{
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index c7d9bb1832ba..3e3a4364cbff 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -1,9 +1,6 @@
1#ifndef _LINUX_SUSPEND_H 1#ifndef _LINUX_SUSPEND_H
2#define _LINUX_SUSPEND_H 2#define _LINUX_SUSPEND_H
3 3
4#if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
5#include <asm/suspend.h>
6#endif
7#include <linux/swap.h> 4#include <linux/swap.h>
8#include <linux/notifier.h> 5#include <linux/notifier.h>
9#include <linux/init.h> 6#include <linux/init.h>
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d30215578877..62d81435347a 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -212,7 +212,7 @@ static inline void lru_cache_add_active_file(struct page *page)
212 212
213/* linux/mm/vmscan.c */ 213/* linux/mm/vmscan.c */
214extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 214extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
215 gfp_t gfp_mask); 215 gfp_t gfp_mask, nodemask_t *mask);
216extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 216extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
217 gfp_t gfp_mask, bool noswap, 217 gfp_t gfp_mask, bool noswap,
218 unsigned int swappiness); 218 unsigned int swappiness);
@@ -382,6 +382,11 @@ static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
382 return NULL; 382 return NULL;
383} 383}
384 384
385static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
386{
387 return 0;
388}
389
385static inline struct page *lookup_swap_cache(swp_entry_t swp) 390static inline struct page *lookup_swap_cache(swp_entry_t swp)
386{ 391{
387 return NULL; 392 return NULL;
diff --git a/include/linux/synclink.h b/include/linux/synclink.h
index 99b8bdb17b2b..0ff2779c44d0 100644
--- a/include/linux/synclink.h
+++ b/include/linux/synclink.h
@@ -125,6 +125,7 @@
125#define MGSL_MODE_MONOSYNC 3 125#define MGSL_MODE_MONOSYNC 3
126#define MGSL_MODE_BISYNC 4 126#define MGSL_MODE_BISYNC 4
127#define MGSL_MODE_RAW 6 127#define MGSL_MODE_RAW 6
128#define MGSL_MODE_BASE_CLOCK 7
128 129
129#define MGSL_BUS_TYPE_ISA 1 130#define MGSL_BUS_TYPE_ISA 1
130#define MGSL_BUS_TYPE_EISA 2 131#define MGSL_BUS_TYPE_EISA 2
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index f9f900cfd066..6470f74074af 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -65,6 +65,7 @@ struct old_linux_dirent;
65#include <asm/signal.h> 65#include <asm/signal.h>
66#include <linux/quota.h> 66#include <linux/quota.h>
67#include <linux/key.h> 67#include <linux/key.h>
68#include <linux/ftrace.h>
68 69
69#define __SC_DECL1(t1, a1) t1 a1 70#define __SC_DECL1(t1, a1) t1 a1
70#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) 71#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__)
@@ -95,7 +96,46 @@ struct old_linux_dirent;
95#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) 96#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
96#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 97#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
97 98
99#ifdef CONFIG_FTRACE_SYSCALLS
100#define __SC_STR_ADECL1(t, a) #a
101#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
102#define __SC_STR_ADECL3(t, a, ...) #a, __SC_STR_ADECL2(__VA_ARGS__)
103#define __SC_STR_ADECL4(t, a, ...) #a, __SC_STR_ADECL3(__VA_ARGS__)
104#define __SC_STR_ADECL5(t, a, ...) #a, __SC_STR_ADECL4(__VA_ARGS__)
105#define __SC_STR_ADECL6(t, a, ...) #a, __SC_STR_ADECL5(__VA_ARGS__)
106
107#define __SC_STR_TDECL1(t, a) #t
108#define __SC_STR_TDECL2(t, a, ...) #t, __SC_STR_TDECL1(__VA_ARGS__)
109#define __SC_STR_TDECL3(t, a, ...) #t, __SC_STR_TDECL2(__VA_ARGS__)
110#define __SC_STR_TDECL4(t, a, ...) #t, __SC_STR_TDECL3(__VA_ARGS__)
111#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
112#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
113
114#define SYSCALL_METADATA(sname, nb) \
115 static const struct syscall_metadata __used \
116 __attribute__((__aligned__(4))) \
117 __attribute__((section("__syscalls_metadata"))) \
118 __syscall_meta_##sname = { \
119 .name = "sys"#sname, \
120 .nb_args = nb, \
121 .types = types_##sname, \
122 .args = args_##sname, \
123 }
124
125#define SYSCALL_DEFINE0(sname) \
126 static const struct syscall_metadata __used \
127 __attribute__((__aligned__(4))) \
128 __attribute__((section("__syscalls_metadata"))) \
129 __syscall_meta_##sname = { \
130 .name = "sys_"#sname, \
131 .nb_args = 0, \
132 }; \
133 asmlinkage long sys_##sname(void)
134
135#else
98#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) 136#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
137#endif
138
99#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) 139#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
100#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) 140#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
101#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) 141#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
@@ -117,10 +157,26 @@ struct old_linux_dirent;
117#endif 157#endif
118#endif 158#endif
119 159
160#ifdef CONFIG_FTRACE_SYSCALLS
161#define SYSCALL_DEFINEx(x, sname, ...) \
162 static const char *types_##sname[] = { \
163 __SC_STR_TDECL##x(__VA_ARGS__) \
164 }; \
165 static const char *args_##sname[] = { \
166 __SC_STR_ADECL##x(__VA_ARGS__) \
167 }; \
168 SYSCALL_METADATA(sname, x); \
169 __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
170#else
171#define SYSCALL_DEFINEx(x, sname, ...) \
172 __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
173#endif
174
120#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS 175#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
121 176
122#define SYSCALL_DEFINE(name) static inline long SYSC_##name 177#define SYSCALL_DEFINE(name) static inline long SYSC_##name
123#define SYSCALL_DEFINEx(x, name, ...) \ 178
179#define __SYSCALL_DEFINEx(x, name, ...) \
124 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ 180 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \
125 static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ 181 static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \
126 asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ 182 asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \
@@ -134,7 +190,7 @@ struct old_linux_dirent;
134#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ 190#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
135 191
136#define SYSCALL_DEFINE(name) asmlinkage long sys_##name 192#define SYSCALL_DEFINE(name) asmlinkage long sys_##name
137#define SYSCALL_DEFINEx(x, name, ...) \ 193#define __SYSCALL_DEFINEx(x, name, ...) \
138 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) 194 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__))
139 195
140#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ 196#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
@@ -461,6 +517,10 @@ asmlinkage long sys_pread64(unsigned int fd, char __user *buf,
461 size_t count, loff_t pos); 517 size_t count, loff_t pos);
462asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, 518asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
463 size_t count, loff_t pos); 519 size_t count, loff_t pos);
520asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
521 unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
522asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
523 unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
464asmlinkage long sys_getcwd(char __user *buf, unsigned long size); 524asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
465asmlinkage long sys_mkdir(const char __user *pathname, int mode); 525asmlinkage long sys_mkdir(const char __user *pathname, int mode);
466asmlinkage long sys_chdir(const char __user *filename); 526asmlinkage long sys_chdir(const char __user *filename);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 917707e6151d..1de8b9eb841b 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -27,27 +27,46 @@
27 27
28#include <linux/idr.h> 28#include <linux/idr.h>
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/workqueue.h>
30 31
31struct thermal_zone_device; 32struct thermal_zone_device;
32struct thermal_cooling_device; 33struct thermal_cooling_device;
33 34
35enum thermal_device_mode {
36 THERMAL_DEVICE_DISABLED = 0,
37 THERMAL_DEVICE_ENABLED,
38};
39
40enum thermal_trip_type {
41 THERMAL_TRIP_ACTIVE = 0,
42 THERMAL_TRIP_PASSIVE,
43 THERMAL_TRIP_HOT,
44 THERMAL_TRIP_CRITICAL,
45};
46
34struct thermal_zone_device_ops { 47struct thermal_zone_device_ops {
35 int (*bind) (struct thermal_zone_device *, 48 int (*bind) (struct thermal_zone_device *,
36 struct thermal_cooling_device *); 49 struct thermal_cooling_device *);
37 int (*unbind) (struct thermal_zone_device *, 50 int (*unbind) (struct thermal_zone_device *,
38 struct thermal_cooling_device *); 51 struct thermal_cooling_device *);
39 int (*get_temp) (struct thermal_zone_device *, char *); 52 int (*get_temp) (struct thermal_zone_device *, unsigned long *);
40 int (*get_mode) (struct thermal_zone_device *, char *); 53 int (*get_mode) (struct thermal_zone_device *,
41 int (*set_mode) (struct thermal_zone_device *, const char *); 54 enum thermal_device_mode *);
42 int (*get_trip_type) (struct thermal_zone_device *, int, char *); 55 int (*set_mode) (struct thermal_zone_device *,
43 int (*get_trip_temp) (struct thermal_zone_device *, int, char *); 56 enum thermal_device_mode);
57 int (*get_trip_type) (struct thermal_zone_device *, int,
58 enum thermal_trip_type *);
59 int (*get_trip_temp) (struct thermal_zone_device *, int,
60 unsigned long *);
44 int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *); 61 int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
62 int (*notify) (struct thermal_zone_device *, int,
63 enum thermal_trip_type);
45}; 64};
46 65
47struct thermal_cooling_device_ops { 66struct thermal_cooling_device_ops {
48 int (*get_max_state) (struct thermal_cooling_device *, char *); 67 int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
49 int (*get_cur_state) (struct thermal_cooling_device *, char *); 68 int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
50 int (*set_cur_state) (struct thermal_cooling_device *, unsigned int); 69 int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
51}; 70};
52 71
53#define THERMAL_TRIPS_NONE -1 72#define THERMAL_TRIPS_NONE -1
@@ -88,11 +107,19 @@ struct thermal_zone_device {
88 struct device device; 107 struct device device;
89 void *devdata; 108 void *devdata;
90 int trips; 109 int trips;
110 int tc1;
111 int tc2;
112 int passive_delay;
113 int polling_delay;
114 int last_temperature;
115 bool passive;
116 unsigned int forced_passive;
91 struct thermal_zone_device_ops *ops; 117 struct thermal_zone_device_ops *ops;
92 struct list_head cooling_devices; 118 struct list_head cooling_devices;
93 struct idr idr; 119 struct idr idr;
94 struct mutex lock; /* protect cooling devices list */ 120 struct mutex lock; /* protect cooling devices list */
95 struct list_head node; 121 struct list_head node;
122 struct delayed_work poll_queue;
96#if defined(CONFIG_THERMAL_HWMON) 123#if defined(CONFIG_THERMAL_HWMON)
97 struct list_head hwmon_node; 124 struct list_head hwmon_node;
98 struct thermal_hwmon_device *hwmon; 125 struct thermal_hwmon_device *hwmon;
@@ -104,13 +131,16 @@ struct thermal_zone_device {
104struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, 131struct thermal_zone_device *thermal_zone_device_register(char *, int, void *,
105 struct 132 struct
106 thermal_zone_device_ops 133 thermal_zone_device_ops
107 *); 134 *, int tc1, int tc2,
135 int passive_freq,
136 int polling_freq);
108void thermal_zone_device_unregister(struct thermal_zone_device *); 137void thermal_zone_device_unregister(struct thermal_zone_device *);
109 138
110int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, 139int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
111 struct thermal_cooling_device *); 140 struct thermal_cooling_device *);
112int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, 141int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
113 struct thermal_cooling_device *); 142 struct thermal_cooling_device *);
143void thermal_zone_device_update(struct thermal_zone_device *);
114struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, 144struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
115 struct 145 struct
116 thermal_cooling_device_ops 146 thermal_cooling_device_ops
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h
index dd253177f65f..3e08a1c86830 100644
--- a/include/linux/timeriomem-rng.h
+++ b/include/linux/timeriomem-rng.h
@@ -14,7 +14,7 @@ struct timeriomem_rng_data {
14 struct completion completion; 14 struct completion completion;
15 unsigned int present:1; 15 unsigned int present:1;
16 16
17 u32 __iomem *address; 17 void __iomem *address;
18 18
19 /* measures in usecs */ 19 /* measures in usecs */
20 unsigned int period; 20 unsigned int period;
diff --git a/include/linux/topology.h b/include/linux/topology.h
index a16b9e06f2e5..7402c1a27c4f 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -38,11 +38,7 @@
38#endif 38#endif
39 39
40#ifndef nr_cpus_node 40#ifndef nr_cpus_node
41#define nr_cpus_node(node) \ 41#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
42 ({ \
43 node_to_cpumask_ptr(__tmp__, node); \
44 cpus_weight(*__tmp__); \
45 })
46#endif 42#endif
47 43
48#define for_each_node_with_cpus(node) \ 44#define for_each_node_with_cpus(node) \
@@ -200,4 +196,9 @@ int arch_update_cpu_topology(void);
200#define topology_core_cpumask(cpu) cpumask_of(cpu) 196#define topology_core_cpumask(cpu) cpumask_of(cpu)
201#endif 197#endif
202 198
199/* Returns the number of the current Node. */
200#ifndef numa_node_id
201#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
202#endif
203
203#endif /* _LINUX_TOPOLOGY_H */ 204#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
new file mode 100644
index 000000000000..7a8130384087
--- /dev/null
+++ b/include/linux/trace_clock.h
@@ -0,0 +1,19 @@
1#ifndef _LINUX_TRACE_CLOCK_H
2#define _LINUX_TRACE_CLOCK_H
3
4/*
5 * 3 trace clock variants, with differing scalability/precision
6 * tradeoffs:
7 *
8 * - local: CPU-local trace clock
9 * - medium: scalable global clock with some jitter
10 * - global: globally monotonic, serialized clock
11 */
12#include <linux/compiler.h>
13#include <linux/types.h>
14
15extern u64 notrace trace_clock_local(void);
16extern u64 notrace trace_clock(void);
17extern u64 notrace trace_clock_global(void);
18
19#endif /* _LINUX_TRACE_CLOCK_H */
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 6186a789d6c7..c7aa154f4bfc 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -388,17 +388,14 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
388 * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal 388 * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
389 * @task: task receiving the signal 389 * @task: task receiving the signal
390 * @sig: signal number being sent 390 * @sig: signal number being sent
391 * @handler: %SIG_IGN or %SIG_DFL
392 * 391 *
393 * Return zero iff tracing doesn't care to examine this ignored signal, 392 * Return zero iff tracing doesn't care to examine this ignored signal,
394 * so it can short-circuit normal delivery and never even get queued. 393 * so it can short-circuit normal delivery and never even get queued.
395 * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN.
396 * 394 *
397 * Called with @task->sighand->siglock held. 395 * Called with @task->sighand->siglock held.
398 */ 396 */
399static inline int tracehook_consider_ignored_signal(struct task_struct *task, 397static inline int tracehook_consider_ignored_signal(struct task_struct *task,
400 int sig, 398 int sig)
401 void __user *handler)
402{ 399{
403 return (task_ptrace(task) & PT_PTRACED) != 0; 400 return (task_ptrace(task) & PT_PTRACED) != 0;
404} 401}
@@ -407,19 +404,17 @@ static inline int tracehook_consider_ignored_signal(struct task_struct *task,
407 * tracehook_consider_fatal_signal - suppress special handling of fatal signal 404 * tracehook_consider_fatal_signal - suppress special handling of fatal signal
408 * @task: task receiving the signal 405 * @task: task receiving the signal
409 * @sig: signal number being sent 406 * @sig: signal number being sent
410 * @handler: %SIG_DFL or %SIG_IGN
411 * 407 *
412 * Return nonzero to prevent special handling of this termination signal. 408 * Return nonzero to prevent special handling of this termination signal.
413 * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored, 409 * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is
414 * in which case force_sig() is about to reset it to %SIG_DFL. 410 * ignored, in which case force_sig() is about to reset it to %SIG_DFL.
415 * When this returns zero, this signal might cause a quick termination 411 * When this returns zero, this signal might cause a quick termination
416 * that does not give the debugger a chance to intercept the signal. 412 * that does not give the debugger a chance to intercept the signal.
417 * 413 *
418 * Called with or without @task->sighand->siglock held. 414 * Called with or without @task->sighand->siglock held.
419 */ 415 */
420static inline int tracehook_consider_fatal_signal(struct task_struct *task, 416static inline int tracehook_consider_fatal_signal(struct task_struct *task,
421 int sig, 417 int sig)
422 void __user *handler)
423{ 418{
424 return (task_ptrace(task) & PT_PTRACED) != 0; 419 return (task_ptrace(task) & PT_PTRACED) != 0;
425} 420}
@@ -507,7 +502,7 @@ static inline int tracehook_notify_jctl(int notify, int why)
507static inline int tracehook_notify_death(struct task_struct *task, 502static inline int tracehook_notify_death(struct task_struct *task,
508 void **death_cookie, int group_dead) 503 void **death_cookie, int group_dead)
509{ 504{
510 if (task->exit_signal == -1) 505 if (task_detached(task))
511 return task->ptrace ? SIGCHLD : DEATH_REAP; 506 return task->ptrace ? SIGCHLD : DEATH_REAP;
512 507
513 /* 508 /*
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 757005458366..d35a7ee7611f 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -31,8 +31,8 @@ struct tracepoint {
31 * Keep in sync with vmlinux.lds.h. 31 * Keep in sync with vmlinux.lds.h.
32 */ 32 */
33 33
34#define TPPROTO(args...) args 34#define TP_PROTO(args...) args
35#define TPARGS(args...) args 35#define TP_ARGS(args...) args
36 36
37#ifdef CONFIG_TRACEPOINTS 37#ifdef CONFIG_TRACEPOINTS
38 38
@@ -65,7 +65,7 @@ struct tracepoint {
65 { \ 65 { \
66 if (unlikely(__tracepoint_##name.state)) \ 66 if (unlikely(__tracepoint_##name.state)) \
67 __DO_TRACE(&__tracepoint_##name, \ 67 __DO_TRACE(&__tracepoint_##name, \
68 TPPROTO(proto), TPARGS(args)); \ 68 TP_PROTO(proto), TP_ARGS(args)); \
69 } \ 69 } \
70 static inline int register_trace_##name(void (*probe)(proto)) \ 70 static inline int register_trace_##name(void (*probe)(proto)) \
71 { \ 71 { \
@@ -153,4 +153,114 @@ static inline void tracepoint_synchronize_unregister(void)
153 synchronize_sched(); 153 synchronize_sched();
154} 154}
155 155
156#define PARAMS(args...) args
157#define TRACE_FORMAT(name, proto, args, fmt) \
158 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
159
160
161/*
162 * For use with the TRACE_EVENT macro:
163 *
164 * We define a tracepoint, its arguments, its printk format
165 * and its 'fast binay record' layout.
166 *
167 * Firstly, name your tracepoint via TRACE_EVENT(name : the
168 * 'subsystem_event' notation is fine.
169 *
170 * Think about this whole construct as the
171 * 'trace_sched_switch() function' from now on.
172 *
173 *
174 * TRACE_EVENT(sched_switch,
175 *
176 * *
177 * * A function has a regular function arguments
178 * * prototype, declare it via TP_PROTO():
179 * *
180 *
181 * TP_PROTO(struct rq *rq, struct task_struct *prev,
182 * struct task_struct *next),
183 *
184 * *
185 * * Define the call signature of the 'function'.
186 * * (Design sidenote: we use this instead of a
187 * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.)
188 * *
189 *
190 * TP_ARGS(rq, prev, next),
191 *
192 * *
193 * * Fast binary tracing: define the trace record via
194 * * TP_STRUCT__entry(). You can think about it like a
195 * * regular C structure local variable definition.
196 * *
197 * * This is how the trace record is structured and will
198 * * be saved into the ring buffer. These are the fields
199 * * that will be exposed to user-space in
200 * * /debug/tracing/events/<*>/format.
201 * *
202 * * The declared 'local variable' is called '__entry'
203 * *
204 * * __field(pid_t, prev_prid) is equivalent to a standard declariton:
205 * *
206 * * pid_t prev_pid;
207 * *
208 * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to:
209 * *
210 * * char prev_comm[TASK_COMM_LEN];
211 * *
212 *
213 * TP_STRUCT__entry(
214 * __array( char, prev_comm, TASK_COMM_LEN )
215 * __field( pid_t, prev_pid )
216 * __field( int, prev_prio )
217 * __array( char, next_comm, TASK_COMM_LEN )
218 * __field( pid_t, next_pid )
219 * __field( int, next_prio )
220 * ),
221 *
222 * *
223 * * Assign the entry into the trace record, by embedding
224 * * a full C statement block into TP_fast_assign(). You
225 * * can refer to the trace record as '__entry' -
226 * * otherwise you can put arbitrary C code in here.
227 * *
228 * * Note: this C code will execute every time a trace event
229 * * happens, on an active tracepoint.
230 * *
231 *
232 * TP_fast_assign(
233 * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
234 * __entry->prev_pid = prev->pid;
235 * __entry->prev_prio = prev->prio;
236 * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
237 * __entry->next_pid = next->pid;
238 * __entry->next_prio = next->prio;
239 * )
240 *
241 * *
242 * * Formatted output of a trace record via TP_printk().
243 * * This is how the tracepoint will appear under ftrace
244 * * plugins that make use of this tracepoint.
245 * *
246 * * (raw-binary tracing wont actually perform this step.)
247 * *
248 *
249 * TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
250 * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
251 * __entry->next_comm, __entry->next_pid, __entry->next_prio),
252 *
253 * );
254 *
255 * This macro construct is thus used for the regular printk format
256 * tracing setup, it is used to construct a function pointer based
257 * tracepoint callback (this is used by programmatic plugins and
258 * can also by used by generic instrumentation like SystemTap), and
259 * it is also used to expose a structured trace record in
260 * /debug/tracing/events/.
261 */
262
263#define TRACE_EVENT(name, proto, args, struct, assign, print) \
264 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
265
156#endif 266#endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 08e088334dba..8615d661ab60 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -252,8 +252,6 @@ struct tty_operations {
252 void (*set_ldisc)(struct tty_struct *tty); 252 void (*set_ldisc)(struct tty_struct *tty);
253 void (*wait_until_sent)(struct tty_struct *tty, int timeout); 253 void (*wait_until_sent)(struct tty_struct *tty, int timeout);
254 void (*send_xchar)(struct tty_struct *tty, char ch); 254 void (*send_xchar)(struct tty_struct *tty, char ch);
255 int (*read_proc)(char *page, char **start, off_t off,
256 int count, int *eof, void *data);
257 int (*tiocmget)(struct tty_struct *tty, struct file *file); 255 int (*tiocmget)(struct tty_struct *tty, struct file *file);
258 int (*tiocmset)(struct tty_struct *tty, struct file *file, 256 int (*tiocmset)(struct tty_struct *tty, struct file *file,
259 unsigned int set, unsigned int clear); 257 unsigned int set, unsigned int clear);
@@ -264,6 +262,7 @@ struct tty_operations {
264 int (*poll_get_char)(struct tty_driver *driver, int line); 262 int (*poll_get_char)(struct tty_driver *driver, int line);
265 void (*poll_put_char)(struct tty_driver *driver, int line, char ch); 263 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
266#endif 264#endif
265 const struct file_operations *proc_fops;
267}; 266};
268 267
269struct tty_driver { 268struct tty_driver {
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 5f401b644ed5..429c631d2aad 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -80,8 +80,7 @@ struct wusb_ckhdid {
80 u8 data[16]; 80 u8 data[16];
81} __attribute__((packed)); 81} __attribute__((packed));
82 82
83const static 83static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
84struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
85 84
86#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) 85#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
87 86
diff --git a/include/linux/wait.h b/include/linux/wait.h
index a210ede73b56..5d631c17eaee 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -135,8 +135,11 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
135void __wake_up_common(wait_queue_head_t *q, unsigned int mode, 135void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
136 int nr_exclusive, int sync, void *key); 136 int nr_exclusive, int sync, void *key);
137void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 137void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
138extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); 138void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
139extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); 139void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
140 void *key);
141void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
142void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
140void __wake_up_bit(wait_queue_head_t *, void *, int); 143void __wake_up_bit(wait_queue_head_t *, void *, int);
141int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 144int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
142int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); 145int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
@@ -155,21 +158,17 @@ wait_queue_head_t *bit_waitqueue(void *, int);
155#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) 158#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
156#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) 159#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
157 160
158#ifdef CONFIG_DEBUG_LOCK_ALLOC
159/* 161/*
160 * macro to avoid include hell 162 * Wakeup macros to be used to report events to the targets.
161 */ 163 */
162#define wake_up_nested(x, s) \ 164#define wake_up_poll(x, m) \
163do { \ 165 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
164 unsigned long flags; \ 166#define wake_up_locked_poll(x, m) \
165 \ 167 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
166 spin_lock_irqsave_nested(&(x)->lock, flags, (s)); \ 168#define wake_up_interruptible_poll(x, m) \
167 wake_up_locked(x); \ 169 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
168 spin_unlock_irqrestore(&(x)->lock, flags); \ 170#define wake_up_interruptible_sync_poll(x, m) \
169} while (0) 171 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
170#else
171#define wake_up_nested(x, s) wake_up(x)
172#endif
173 172
174#define __wait_event(wq, condition) \ 173#define __wait_event(wq, condition) \
175do { \ 174do { \
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 3cd51e579ab1..13e1adf55c4c 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -41,6 +41,11 @@ struct delayed_work {
41 struct timer_list timer; 41 struct timer_list timer;
42}; 42};
43 43
44static inline struct delayed_work *to_delayed_work(struct work_struct *work)
45{
46 return container_of(work, struct delayed_work, work);
47}
48
44struct execute_work { 49struct execute_work {
45 struct work_struct work; 50 struct work_struct work;
46}; 51};
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 7300ecdc480c..93445477f86a 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -109,8 +109,8 @@ extern int dirty_background_ratio;
109extern unsigned long dirty_background_bytes; 109extern unsigned long dirty_background_bytes;
110extern int vm_dirty_ratio; 110extern int vm_dirty_ratio;
111extern unsigned long vm_dirty_bytes; 111extern unsigned long vm_dirty_bytes;
112extern int dirty_writeback_interval; 112extern unsigned int dirty_writeback_interval;
113extern int dirty_expire_interval; 113extern unsigned int dirty_expire_interval;
114extern int vm_highmem_is_dirtyable; 114extern int vm_highmem_is_dirtyable;
115extern int block_dump; 115extern int block_dump;
116extern int laptop_mode; 116extern int laptop_mode;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e54c76d75495..1b94b9bfe2dc 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -616,21 +616,6 @@ static inline int tcp_skb_mss(const struct sk_buff *skb)
616 return skb_shinfo(skb)->gso_size; 616 return skb_shinfo(skb)->gso_size;
617} 617}
618 618
619static inline void tcp_dec_pcount_approx_int(__u32 *count, const int decr)
620{
621 if (*count) {
622 *count -= decr;
623 if ((int)*count < 0)
624 *count = 0;
625 }
626}
627
628static inline void tcp_dec_pcount_approx(__u32 *count,
629 const struct sk_buff *skb)
630{
631 tcp_dec_pcount_approx_int(count, tcp_skb_pcount(skb));
632}
633
634/* Events passed to congestion control interface */ 619/* Events passed to congestion control interface */
635enum tcp_ca_event { 620enum tcp_ca_event {
636 CA_EVENT_TX_START, /* first transmit when no packets in flight */ 621 CA_EVENT_TX_START, /* first transmit when no packets in flight */
diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h
new file mode 100644
index 000000000000..0627a9ae6347
--- /dev/null
+++ b/include/scsi/fc/fc_fip.h
@@ -0,0 +1,237 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17#ifndef _FC_FIP_H_
18#define _FC_FIP_H_
19
20/*
21 * This version is based on:
22 * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf
23 */
24
25/*
26 * The FIP ethertype eventually goes in net/if_ether.h.
27 */
28#ifndef ETH_P_FIP
29#define ETH_P_FIP 0x8914 /* FIP Ethertype */
30#endif
31
32#define FIP_DEF_PRI 128 /* default selection priority */
33#define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */
34#define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */
35#define FIP_VN_KA_PERIOD 90000 /* required VN_port keep-alive period (mS) */
36#define FIP_FCF_FUZZ 100 /* random time added by FCF (mS) */
37
38/*
39 * Multicast MAC addresses. T11-adopted.
40 */
41#define FIP_ALL_FCOE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 0 })
42#define FIP_ALL_ENODE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 1 })
43#define FIP_ALL_FCF_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
44
45#define FIP_VER 1 /* version for fip_header */
46
47struct fip_header {
48 __u8 fip_ver; /* upper 4 bits are the version */
49 __u8 fip_resv1; /* reserved */
50 __be16 fip_op; /* operation code */
51 __u8 fip_resv2; /* reserved */
52 __u8 fip_subcode; /* lower 4 bits are sub-code */
53 __be16 fip_dl_len; /* length of descriptors in words */
54 __be16 fip_flags; /* header flags */
55} __attribute__((packed));
56
57#define FIP_VER_SHIFT 4
58#define FIP_VER_ENCAPS(v) ((v) << FIP_VER_SHIFT)
59#define FIP_VER_DECAPS(v) ((v) >> FIP_VER_SHIFT)
60#define FIP_BPW 4 /* bytes per word for lengths */
61
62/*
63 * fip_op.
64 */
65enum fip_opcode {
66 FIP_OP_DISC = 1, /* discovery, advertisement, etc. */
67 FIP_OP_LS = 2, /* Link Service request or reply */
68 FIP_OP_CTRL = 3, /* Keep Alive / Link Reset */
69 FIP_OP_VLAN = 4, /* VLAN discovery */
70 FIP_OP_VENDOR_MIN = 0xfff8, /* min vendor-specific opcode */
71 FIP_OP_VENDOR_MAX = 0xfffe, /* max vendor-specific opcode */
72};
73
74/*
75 * Subcodes for FIP_OP_DISC.
76 */
77enum fip_disc_subcode {
78 FIP_SC_SOL = 1, /* solicitation */
79 FIP_SC_ADV = 2, /* advertisement */
80};
81
82/*
83 * Subcodes for FIP_OP_LS.
84 */
85enum fip_trans_subcode {
86 FIP_SC_REQ = 1, /* request */
87 FIP_SC_REP = 2, /* reply */
88};
89
90/*
91 * Subcodes for FIP_OP_RESET.
92 */
93enum fip_reset_subcode {
94 FIP_SC_KEEP_ALIVE = 1, /* keep-alive from VN_Port */
95 FIP_SC_CLR_VLINK = 2, /* clear virtual link from VF_Port */
96};
97
98/*
99 * Subcodes for FIP_OP_VLAN.
100 */
101enum fip_vlan_subcode {
102 FIP_SC_VL_REQ = 1, /* request */
103 FIP_SC_VL_REP = 2, /* reply */
104};
105
106/*
107 * flags in header fip_flags.
108 */
109enum fip_flag {
110 FIP_FL_FPMA = 0x8000, /* supports FPMA fabric-provided MACs */
111 FIP_FL_SPMA = 0x4000, /* supports SPMA server-provided MACs */
112 FIP_FL_AVAIL = 0x0004, /* available for FLOGI/ELP */
113 FIP_FL_SOL = 0x0002, /* this is a solicited message */
114 FIP_FL_FPORT = 0x0001, /* sent from an F port */
115};
116
117/*
118 * Common descriptor header format.
119 */
120struct fip_desc {
121 __u8 fip_dtype; /* type - see below */
122 __u8 fip_dlen; /* length - in 32-bit words */
123};
124
125enum fip_desc_type {
126 FIP_DT_PRI = 1, /* priority for forwarder selection */
127 FIP_DT_MAC = 2, /* MAC address */
128 FIP_DT_MAP_OUI = 3, /* FC-MAP OUI */
129 FIP_DT_NAME = 4, /* switch name or node name */
130 FIP_DT_FAB = 5, /* fabric descriptor */
131 FIP_DT_FCOE_SIZE = 6, /* max FCoE frame size */
132 FIP_DT_FLOGI = 7, /* FLOGI request or response */
133 FIP_DT_FDISC = 8, /* FDISC request or response */
134 FIP_DT_LOGO = 9, /* LOGO request or response */
135 FIP_DT_ELP = 10, /* ELP request or response */
136 FIP_DT_VN_ID = 11, /* VN_Node Identifier */
137 FIP_DT_FKA = 12, /* advertisement keep-alive period */
138 FIP_DT_VENDOR = 13, /* vendor ID */
139 FIP_DT_VLAN = 14, /* vlan number */
140 FIP_DT_LIMIT, /* max defined desc_type + 1 */
141 FIP_DT_VENDOR_BASE = 128, /* first vendor-specific desc_type */
142};
143
144/*
145 * FIP_DT_PRI - priority descriptor.
146 */
147struct fip_pri_desc {
148 struct fip_desc fd_desc;
149 __u8 fd_resvd;
150 __u8 fd_pri; /* FCF priority: higher is better */
151} __attribute__((packed));
152
153/*
154 * FIP_DT_MAC - MAC address descriptor.
155 */
156struct fip_mac_desc {
157 struct fip_desc fd_desc;
158 __u8 fd_mac[ETH_ALEN];
159} __attribute__((packed));
160
161/*
162 * FIP_DT_MAP - descriptor.
163 */
164struct fip_map_desc {
165 struct fip_desc fd_desc;
166 __u8 fd_resvd[3];
167 __u8 fd_map[3];
168} __attribute__((packed));
169
170/*
171 * FIP_DT_NAME descriptor.
172 */
173struct fip_wwn_desc {
174 struct fip_desc fd_desc;
175 __u8 fd_resvd[2];
176 __be64 fd_wwn; /* 64-bit WWN, unaligned */
177} __attribute__((packed));
178
179/*
180 * FIP_DT_FAB descriptor.
181 */
182struct fip_fab_desc {
183 struct fip_desc fd_desc;
184 __be16 fd_vfid; /* virtual fabric ID */
185 __u8 fd_resvd;
186 __u8 fd_map[3]; /* FC-MAP value */
187 __be64 fd_wwn; /* fabric name, unaligned */
188} __attribute__((packed));
189
190/*
191 * FIP_DT_FCOE_SIZE descriptor.
192 */
193struct fip_size_desc {
194 struct fip_desc fd_desc;
195 __be16 fd_size;
196} __attribute__((packed));
197
198/*
199 * Descriptor that encapsulates an ELS or ILS frame.
200 * The encapsulated frame immediately follows this header, without
201 * SOF, EOF, or CRC.
202 */
203struct fip_encaps {
204 struct fip_desc fd_desc;
205 __u8 fd_resvd[2];
206} __attribute__((packed));
207
208/*
209 * FIP_DT_VN_ID - VN_Node Identifier descriptor.
210 */
211struct fip_vn_desc {
212 struct fip_desc fd_desc;
213 __u8 fd_mac[ETH_ALEN];
214 __u8 fd_resvd;
215 __u8 fd_fc_id[3];
216 __be64 fd_wwpn; /* port name, unaligned */
217} __attribute__((packed));
218
219/*
220 * FIP_DT_FKA - Advertisement keep-alive period.
221 */
222struct fip_fka_desc {
223 struct fip_desc fd_desc;
224 __u8 fd_resvd[2];
225 __be32 fd_fka_period; /* adv./keep-alive period in mS */
226} __attribute__((packed));
227
228/*
229 * FIP_DT_VENDOR descriptor.
230 */
231struct fip_vendor_desc {
232 struct fip_desc fd_desc;
233 __u8 fd_resvd[2];
234 __u8 fd_vendor_id[8];
235} __attribute__((packed));
236
237#endif /* _FC_FIP_H_ */
diff --git a/include/scsi/fc_transport_fcoe.h b/include/scsi/fc_transport_fcoe.h
deleted file mode 100644
index 8dca2af14ffc..000000000000
--- a/include/scsi/fc_transport_fcoe.h
+++ /dev/null
@@ -1,54 +0,0 @@
1#ifndef FC_TRANSPORT_FCOE_H
2#define FC_TRANSPORT_FCOE_H
3
4#include <linux/device.h>
5#include <linux/netdevice.h>
6#include <scsi/scsi_host.h>
7#include <scsi/libfc.h>
8
9/**
10 * struct fcoe_transport - FCoE transport struct for generic transport
11 * for Ethernet devices as well as pure HBAs
12 *
13 * @name: name for thsi transport
14 * @bus: physical bus type (pci_bus_type)
15 * @driver: physical bus driver for network device
16 * @create: entry create function
17 * @destroy: exit destroy function
18 * @list: list of transports
19 */
20struct fcoe_transport {
21 char *name;
22 unsigned short vendor;
23 unsigned short device;
24 struct bus_type *bus;
25 struct device_driver *driver;
26 int (*create)(struct net_device *device);
27 int (*destroy)(struct net_device *device);
28 bool (*match)(struct net_device *device);
29 struct list_head list;
30 struct list_head devlist;
31 struct mutex devlock;
32};
33
34/**
35 * MODULE_ALIAS_FCOE_PCI
36 *
37 * some care must be taken with this, vendor and device MUST be a hex value
38 * preceded with 0x and with letters in lower case (0x12ab, not 0x12AB or 12AB)
39 */
40#define MODULE_ALIAS_FCOE_PCI(vendor, device) \
41 MODULE_ALIAS("fcoe-pci-" __stringify(vendor) "-" __stringify(device))
42
43/* exported funcs */
44int fcoe_transport_attach(struct net_device *netdev);
45int fcoe_transport_release(struct net_device *netdev);
46int fcoe_transport_register(struct fcoe_transport *t);
47int fcoe_transport_unregister(struct fcoe_transport *t);
48int fcoe_load_transport_driver(struct net_device *netdev);
49int __init fcoe_transport_init(void);
50int __exit fcoe_transport_exit(void);
51
52/* fcow_sw is the default transport */
53extern struct fcoe_transport fcoe_sw_transport;
54#endif /* FC_TRANSPORT_FCOE_H */
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index a70eafaad084..0303a6a098cc 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -22,6 +22,7 @@
22 22
23#include <linux/timer.h> 23#include <linux/timer.h>
24#include <linux/if.h> 24#include <linux/if.h>
25#include <linux/percpu.h>
25 26
26#include <scsi/scsi_transport.h> 27#include <scsi/scsi_transport.h>
27#include <scsi/scsi_transport_fc.h> 28#include <scsi/scsi_transport_fc.h>
@@ -661,7 +662,8 @@ struct fc_lport {
661 unsigned long boot_time; 662 unsigned long boot_time;
662 663
663 struct fc_host_statistics host_stats; 664 struct fc_host_statistics host_stats;
664 struct fcoe_dev_stats *dev_stats[NR_CPUS]; 665 struct fcoe_dev_stats *dev_stats;
666
665 u64 wwpn; 667 u64 wwpn;
666 u64 wwnn; 668 u64 wwnn;
667 u8 retry_count; 669 u8 retry_count;
@@ -694,11 +696,6 @@ struct fc_lport {
694/* 696/*
695 * FC_LPORT HELPER FUNCTIONS 697 * FC_LPORT HELPER FUNCTIONS
696 *****************************/ 698 *****************************/
697static inline void *lport_priv(const struct fc_lport *lp)
698{
699 return (void *)(lp + 1);
700}
701
702static inline int fc_lport_test_ready(struct fc_lport *lp) 699static inline int fc_lport_test_ready(struct fc_lport *lp)
703{ 700{
704 return lp->state == LPORT_ST_READY; 701 return lp->state == LPORT_ST_READY;
@@ -722,6 +719,42 @@ static inline void fc_lport_state_enter(struct fc_lport *lp,
722 lp->state = state; 719 lp->state = state;
723} 720}
724 721
722static inline int fc_lport_init_stats(struct fc_lport *lp)
723{
724 /* allocate per cpu stats block */
725 lp->dev_stats = alloc_percpu(struct fcoe_dev_stats);
726 if (!lp->dev_stats)
727 return -ENOMEM;
728 return 0;
729}
730
731static inline void fc_lport_free_stats(struct fc_lport *lp)
732{
733 free_percpu(lp->dev_stats);
734}
735
736static inline struct fcoe_dev_stats *fc_lport_get_stats(struct fc_lport *lp)
737{
738 return per_cpu_ptr(lp->dev_stats, smp_processor_id());
739}
740
741static inline void *lport_priv(const struct fc_lport *lp)
742{
743 return (void *)(lp + 1);
744}
745
746/**
747 * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport
748 * @sht: ptr to the scsi host templ
749 * @priv_size: size of private data after fc_lport
750 *
751 * Returns: ptr to Scsi_Host
752 */
753static inline struct Scsi_Host *
754libfc_host_alloc(struct scsi_host_template *sht, int priv_size)
755{
756 return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
757}
725 758
726/* 759/*
727 * LOCAL PORT LAYER 760 * LOCAL PORT LAYER
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index c41f7d0c6efc..666cc131732e 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. 2 * Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007-2008 Intel Corporation. All rights reserved.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -20,134 +21,144 @@
20#ifndef _LIBFCOE_H 21#ifndef _LIBFCOE_H
21#define _LIBFCOE_H 22#define _LIBFCOE_H
22 23
24#include <linux/etherdevice.h>
25#include <linux/if_ether.h>
23#include <linux/netdevice.h> 26#include <linux/netdevice.h>
24#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/workqueue.h>
25#include <scsi/fc/fc_fcoe.h> 29#include <scsi/fc/fc_fcoe.h>
26#include <scsi/libfc.h> 30#include <scsi/libfc.h>
27 31
28/* 32/*
29 * this percpu struct for fcoe 33 * FIP tunable parameters.
30 */ 34 */
31struct fcoe_percpu_s { 35#define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */
32 int cpu; 36#define FCOE_CTRL_SOL_TOV 2000 /* min. solicitation interval (mS) */
33 struct task_struct *thread; 37#define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */
34 struct sk_buff_head fcoe_rx_list; 38
35 struct page *crc_eof_page; 39/**
36 int crc_eof_offset; 40 * enum fip_state - internal state of FCoE controller.
41 * @FIP_ST_DISABLED: controller has been disabled or not yet enabled.
42 * @FIP_ST_LINK_WAIT: the physical link is down or unusable.
43 * @FIP_ST_AUTO: determining whether to use FIP or non-FIP mode.
44 * @FIP_ST_NON_FIP: non-FIP mode selected.
45 * @FIP_ST_ENABLED: FIP mode selected.
46 */
47enum fip_state {
48 FIP_ST_DISABLED,
49 FIP_ST_LINK_WAIT,
50 FIP_ST_AUTO,
51 FIP_ST_NON_FIP,
52 FIP_ST_ENABLED,
37}; 53};
38 54
39/* 55/**
40 * the fcoe sw transport private data 56 * struct fcoe_ctlr - FCoE Controller and FIP state.
57 * @state: internal FIP state for network link and FIP or non-FIP mode.
58 * @lp: &fc_lport: libfc local port.
59 * @sel_fcf: currently selected FCF, or NULL.
60 * @fcfs: list of discovered FCFs.
61 * @fcf_count: number of discovered FCF entries.
62 * @sol_time: time when a multicast solicitation was last sent.
63 * @sel_time: time after which to select an FCF.
64 * @port_ka_time: time of next port keep-alive.
65 * @ctlr_ka_time: time of next controller keep-alive.
66 * @timer: timer struct used for all delayed events.
67 * @link_work: &work_struct for doing FCF selection.
68 * @recv_work: &work_struct for receiving FIP frames.
69 * @fip_recv_list: list of received FIP frames.
70 * @user_mfs: configured maximum FC frame size, including FC header.
71 * @flogi_oxid: exchange ID of most recent fabric login.
72 * @flogi_count: number of FLOGI attempts in AUTO mode.
73 * @link: current link status for libfc.
74 * @last_link: last link state reported to libfc.
75 * @map_dest: use the FC_MAP mode for destination MAC addresses.
76 * @dest_addr: MAC address of the selected FC forwarder.
77 * @ctl_src_addr: the native MAC address of our local port.
78 * @data_src_addr: the assigned MAC address for the local port after FLOGI.
79 * @send: LLD-supplied function to handle sending of FIP Ethernet frames.
80 * @update_mac: LLD-supplied function to handle changes to MAC addresses.
81 * @lock: lock protecting this structure.
82 *
83 * This structure is used by all FCoE drivers. It contains information
84 * needed by all FCoE low-level drivers (LLDs) as well as internal state
85 * for FIP, and fields shared with the LLDS.
41 */ 86 */
42struct fcoe_softc { 87struct fcoe_ctlr {
43 struct list_head list; 88 enum fip_state state;
44 struct fc_lport *lp; 89 struct fc_lport *lp;
45 struct net_device *real_dev; 90 struct fcoe_fcf *sel_fcf;
46 struct net_device *phys_dev; /* device with ethtool_ops */ 91 struct list_head fcfs;
47 struct packet_type fcoe_packet_type; 92 u16 fcf_count;
48 struct sk_buff_head fcoe_pending_queue; 93 unsigned long sol_time;
49 u8 fcoe_pending_queue_active; 94 unsigned long sel_time;
50 95 unsigned long port_ka_time;
96 unsigned long ctlr_ka_time;
97 struct timer_list timer;
98 struct work_struct link_work;
99 struct work_struct recv_work;
100 struct sk_buff_head fip_recv_list;
101 u16 user_mfs;
102 u16 flogi_oxid;
103 u8 flogi_count;
104 u8 link;
105 u8 last_link;
106 u8 map_dest;
51 u8 dest_addr[ETH_ALEN]; 107 u8 dest_addr[ETH_ALEN];
52 u8 ctl_src_addr[ETH_ALEN]; 108 u8 ctl_src_addr[ETH_ALEN];
53 u8 data_src_addr[ETH_ALEN]; 109 u8 data_src_addr[ETH_ALEN];
54 /*
55 * fcoe protocol address learning related stuff
56 */
57 u16 flogi_oxid;
58 u8 flogi_progress;
59 u8 address_mode;
60};
61
62static inline struct net_device *fcoe_netdev(
63 const struct fc_lport *lp)
64{
65 return ((struct fcoe_softc *)lport_priv(lp))->real_dev;
66}
67
68static inline struct fcoe_hdr *skb_fcoe_header(const struct sk_buff *skb)
69{
70 return (struct fcoe_hdr *)skb_network_header(skb);
71}
72
73static inline int skb_fcoe_offset(const struct sk_buff *skb)
74{
75 return skb_network_offset(skb);
76}
77
78static inline struct fc_frame_header *skb_fc_header(const struct sk_buff *skb)
79{
80 return (struct fc_frame_header *)skb_transport_header(skb);
81}
82
83static inline int skb_fc_offset(const struct sk_buff *skb)
84{
85 return skb_transport_offset(skb);
86}
87 110
88static inline void skb_reset_fc_header(struct sk_buff *skb) 111 void (*send)(struct fcoe_ctlr *, struct sk_buff *);
89{ 112 void (*update_mac)(struct fcoe_ctlr *, u8 *old, u8 *new);
90 skb_reset_network_header(skb); 113 spinlock_t lock;
91 skb_set_transport_header(skb, skb_network_offset(skb) + 114};
92 sizeof(struct fcoe_hdr));
93}
94
95static inline bool skb_fc_is_data(const struct sk_buff *skb)
96{
97 return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_SOL_DATA;
98}
99
100static inline bool skb_fc_is_cmd(const struct sk_buff *skb)
101{
102 return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD;
103}
104 115
105static inline bool skb_fc_has_exthdr(const struct sk_buff *skb) 116/*
106{ 117 * struct fcoe_fcf - Fibre-Channel Forwarder.
107 return (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_VFTH) || 118 * @list: list linkage.
108 (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_IFRH) || 119 * @time: system time (jiffies) when an advertisement was last received.
109 (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_ENCH); 120 * @switch_name: WWN of switch from advertisement.
110} 121 * @fabric_name: WWN of fabric from advertisement.
122 * @fc_map: FC_MAP value from advertisement.
123 * @fcf_mac: Ethernet address of the FCF.
124 * @vfid: virtual fabric ID.
125 * @pri: seletion priority, smaller values are better.
126 * @flags: flags received from advertisement.
127 * @fka_period: keep-alive period, in jiffies.
128 *
129 * A Fibre-Channel Forwarder (FCF) is the entity on the Ethernet that
130 * passes FCoE frames on to an FC fabric. This structure represents
131 * one FCF from which advertisements have been received.
132 *
133 * When looking up an FCF, @switch_name, @fabric_name, @fc_map, @vfid, and
134 * @fcf_mac together form the lookup key.
135 */
136struct fcoe_fcf {
137 struct list_head list;
138 unsigned long time;
111 139
112static inline bool skb_fc_is_roff(const struct sk_buff *skb) 140 u64 switch_name;
113{ 141 u64 fabric_name;
114 return skb_fc_header(skb)->fh_f_ctl[2] & FC_FC_REL_OFF; 142 u32 fc_map;
115} 143 u16 vfid;
144 u8 fcf_mac[ETH_ALEN];
116 145
117static inline u16 skb_fc_oxid(const struct sk_buff *skb) 146 u8 pri;
118{ 147 u16 flags;
119 return be16_to_cpu(skb_fc_header(skb)->fh_ox_id); 148 u32 fka_period;
120} 149};
121 150
122static inline u16 skb_fc_rxid(const struct sk_buff *skb) 151/* FIP API functions */
123{ 152void fcoe_ctlr_init(struct fcoe_ctlr *);
124 return be16_to_cpu(skb_fc_header(skb)->fh_rx_id); 153void fcoe_ctlr_destroy(struct fcoe_ctlr *);
125} 154void fcoe_ctlr_link_up(struct fcoe_ctlr *);
155int fcoe_ctlr_link_down(struct fcoe_ctlr *);
156int fcoe_ctlr_els_send(struct fcoe_ctlr *, struct sk_buff *);
157void fcoe_ctlr_recv(struct fcoe_ctlr *, struct sk_buff *);
158int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_frame *fp, u8 *sa);
126 159
127/* libfcoe funcs */ 160/* libfcoe funcs */
128int fcoe_reset(struct Scsi_Host *shost); 161u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
129u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
130 unsigned int scheme, unsigned int port);
131
132u32 fcoe_fc_crc(struct fc_frame *fp);
133int fcoe_xmit(struct fc_lport *, struct fc_frame *);
134int fcoe_rcv(struct sk_buff *, struct net_device *,
135 struct packet_type *, struct net_device *);
136
137int fcoe_percpu_receive_thread(void *arg);
138void fcoe_clean_pending_queue(struct fc_lport *lp);
139void fcoe_percpu_clean(struct fc_lport *lp);
140void fcoe_watchdog(ulong vp);
141int fcoe_link_ok(struct fc_lport *lp);
142
143struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
144int fcoe_hostlist_add(const struct fc_lport *);
145int fcoe_hostlist_remove(const struct fc_lport *);
146
147struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *, int);
148int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *); 162int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *);
149 163
150/* fcoe sw hba */
151int __init fcoe_sw_init(void);
152int __exit fcoe_sw_exit(void);
153#endif /* _LIBFCOE_H */ 164#endif /* _LIBFCOE_H */
diff --git a/include/trace/block.h b/include/trace/block.h
index 25c6a1fd5b77..25b7068b819e 100644
--- a/include/trace/block.h
+++ b/include/trace/block.h
@@ -5,72 +5,72 @@
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(block_rq_abort, 7DECLARE_TRACE(block_rq_abort,
8 TPPROTO(struct request_queue *q, struct request *rq), 8 TP_PROTO(struct request_queue *q, struct request *rq),
9 TPARGS(q, rq)); 9 TP_ARGS(q, rq));
10 10
11DECLARE_TRACE(block_rq_insert, 11DECLARE_TRACE(block_rq_insert,
12 TPPROTO(struct request_queue *q, struct request *rq), 12 TP_PROTO(struct request_queue *q, struct request *rq),
13 TPARGS(q, rq)); 13 TP_ARGS(q, rq));
14 14
15DECLARE_TRACE(block_rq_issue, 15DECLARE_TRACE(block_rq_issue,
16 TPPROTO(struct request_queue *q, struct request *rq), 16 TP_PROTO(struct request_queue *q, struct request *rq),
17 TPARGS(q, rq)); 17 TP_ARGS(q, rq));
18 18
19DECLARE_TRACE(block_rq_requeue, 19DECLARE_TRACE(block_rq_requeue,
20 TPPROTO(struct request_queue *q, struct request *rq), 20 TP_PROTO(struct request_queue *q, struct request *rq),
21 TPARGS(q, rq)); 21 TP_ARGS(q, rq));
22 22
23DECLARE_TRACE(block_rq_complete, 23DECLARE_TRACE(block_rq_complete,
24 TPPROTO(struct request_queue *q, struct request *rq), 24 TP_PROTO(struct request_queue *q, struct request *rq),
25 TPARGS(q, rq)); 25 TP_ARGS(q, rq));
26 26
27DECLARE_TRACE(block_bio_bounce, 27DECLARE_TRACE(block_bio_bounce,
28 TPPROTO(struct request_queue *q, struct bio *bio), 28 TP_PROTO(struct request_queue *q, struct bio *bio),
29 TPARGS(q, bio)); 29 TP_ARGS(q, bio));
30 30
31DECLARE_TRACE(block_bio_complete, 31DECLARE_TRACE(block_bio_complete,
32 TPPROTO(struct request_queue *q, struct bio *bio), 32 TP_PROTO(struct request_queue *q, struct bio *bio),
33 TPARGS(q, bio)); 33 TP_ARGS(q, bio));
34 34
35DECLARE_TRACE(block_bio_backmerge, 35DECLARE_TRACE(block_bio_backmerge,
36 TPPROTO(struct request_queue *q, struct bio *bio), 36 TP_PROTO(struct request_queue *q, struct bio *bio),
37 TPARGS(q, bio)); 37 TP_ARGS(q, bio));
38 38
39DECLARE_TRACE(block_bio_frontmerge, 39DECLARE_TRACE(block_bio_frontmerge,
40 TPPROTO(struct request_queue *q, struct bio *bio), 40 TP_PROTO(struct request_queue *q, struct bio *bio),
41 TPARGS(q, bio)); 41 TP_ARGS(q, bio));
42 42
43DECLARE_TRACE(block_bio_queue, 43DECLARE_TRACE(block_bio_queue,
44 TPPROTO(struct request_queue *q, struct bio *bio), 44 TP_PROTO(struct request_queue *q, struct bio *bio),
45 TPARGS(q, bio)); 45 TP_ARGS(q, bio));
46 46
47DECLARE_TRACE(block_getrq, 47DECLARE_TRACE(block_getrq,
48 TPPROTO(struct request_queue *q, struct bio *bio, int rw), 48 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
49 TPARGS(q, bio, rw)); 49 TP_ARGS(q, bio, rw));
50 50
51DECLARE_TRACE(block_sleeprq, 51DECLARE_TRACE(block_sleeprq,
52 TPPROTO(struct request_queue *q, struct bio *bio, int rw), 52 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
53 TPARGS(q, bio, rw)); 53 TP_ARGS(q, bio, rw));
54 54
55DECLARE_TRACE(block_plug, 55DECLARE_TRACE(block_plug,
56 TPPROTO(struct request_queue *q), 56 TP_PROTO(struct request_queue *q),
57 TPARGS(q)); 57 TP_ARGS(q));
58 58
59DECLARE_TRACE(block_unplug_timer, 59DECLARE_TRACE(block_unplug_timer,
60 TPPROTO(struct request_queue *q), 60 TP_PROTO(struct request_queue *q),
61 TPARGS(q)); 61 TP_ARGS(q));
62 62
63DECLARE_TRACE(block_unplug_io, 63DECLARE_TRACE(block_unplug_io,
64 TPPROTO(struct request_queue *q), 64 TP_PROTO(struct request_queue *q),
65 TPARGS(q)); 65 TP_ARGS(q));
66 66
67DECLARE_TRACE(block_split, 67DECLARE_TRACE(block_split,
68 TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu), 68 TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
69 TPARGS(q, bio, pdu)); 69 TP_ARGS(q, bio, pdu));
70 70
71DECLARE_TRACE(block_remap, 71DECLARE_TRACE(block_remap,
72 TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev, 72 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
73 sector_t from, sector_t to), 73 sector_t from, sector_t to),
74 TPARGS(q, bio, dev, from, to)); 74 TP_ARGS(q, bio, dev, from, to));
75 75
76#endif 76#endif
diff --git a/include/trace/irq.h b/include/trace/irq.h
new file mode 100644
index 000000000000..ff5d4495dc37
--- /dev/null
+++ b/include/trace/irq.h
@@ -0,0 +1,9 @@
1#ifndef _TRACE_IRQ_H
2#define _TRACE_IRQ_H
3
4#include <linux/interrupt.h>
5#include <linux/tracepoint.h>
6
7#include <trace/irq_event_types.h>
8
9#endif
diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h
new file mode 100644
index 000000000000..85964ebd47ec
--- /dev/null
+++ b/include/trace/irq_event_types.h
@@ -0,0 +1,55 @@
1
2/* use <trace/irq.h> instead */
3#ifndef TRACE_FORMAT
4# error Do not include this file directly.
5# error Unless you know what you are doing.
6#endif
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM irq
10
11/*
12 * Tracepoint for entry of interrupt handler:
13 */
14TRACE_FORMAT(irq_handler_entry,
15 TP_PROTO(int irq, struct irqaction *action),
16 TP_ARGS(irq, action),
17 TP_FMT("irq=%d handler=%s", irq, action->name)
18 );
19
20/*
21 * Tracepoint for return of an interrupt handler:
22 */
23TRACE_EVENT(irq_handler_exit,
24
25 TP_PROTO(int irq, struct irqaction *action, int ret),
26
27 TP_ARGS(irq, action, ret),
28
29 TP_STRUCT__entry(
30 __field( int, irq )
31 __field( int, ret )
32 ),
33
34 TP_fast_assign(
35 __entry->irq = irq;
36 __entry->ret = ret;
37 ),
38
39 TP_printk("irq=%d return=%s",
40 __entry->irq, __entry->ret ? "handled" : "unhandled")
41);
42
43TRACE_FORMAT(softirq_entry,
44 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
45 TP_ARGS(h, vec),
46 TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
47 );
48
49TRACE_FORMAT(softirq_exit,
50 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
51 TP_ARGS(h, vec),
52 TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
53 );
54
55#undef TRACE_SYSTEM
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h
new file mode 100644
index 000000000000..28ee69f9cd46
--- /dev/null
+++ b/include/trace/kmemtrace.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2008 Eduard - Gabriel Munteanu
3 *
4 * This file is released under GPL version 2.
5 */
6
7#ifndef _LINUX_KMEMTRACE_H
8#define _LINUX_KMEMTRACE_H
9
10#ifdef __KERNEL__
11
12#include <linux/tracepoint.h>
13#include <linux/types.h>
14
15#ifdef CONFIG_KMEMTRACE
16extern void kmemtrace_init(void);
17#else
18static inline void kmemtrace_init(void)
19{
20}
21#endif
22
23DECLARE_TRACE(kmalloc,
24 TP_PROTO(unsigned long call_site,
25 const void *ptr,
26 size_t bytes_req,
27 size_t bytes_alloc,
28 gfp_t gfp_flags),
29 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
30DECLARE_TRACE(kmem_cache_alloc,
31 TP_PROTO(unsigned long call_site,
32 const void *ptr,
33 size_t bytes_req,
34 size_t bytes_alloc,
35 gfp_t gfp_flags),
36 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
37DECLARE_TRACE(kmalloc_node,
38 TP_PROTO(unsigned long call_site,
39 const void *ptr,
40 size_t bytes_req,
41 size_t bytes_alloc,
42 gfp_t gfp_flags,
43 int node),
44 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
45DECLARE_TRACE(kmem_cache_alloc_node,
46 TP_PROTO(unsigned long call_site,
47 const void *ptr,
48 size_t bytes_req,
49 size_t bytes_alloc,
50 gfp_t gfp_flags,
51 int node),
52 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
53DECLARE_TRACE(kfree,
54 TP_PROTO(unsigned long call_site, const void *ptr),
55 TP_ARGS(call_site, ptr));
56DECLARE_TRACE(kmem_cache_free,
57 TP_PROTO(unsigned long call_site, const void *ptr),
58 TP_ARGS(call_site, ptr));
59
60#endif /* __KERNEL__ */
61
62#endif /* _LINUX_KMEMTRACE_H */
63
diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h
new file mode 100644
index 000000000000..5ca67df87f2a
--- /dev/null
+++ b/include/trace/lockdep.h
@@ -0,0 +1,9 @@
1#ifndef _TRACE_LOCKDEP_H
2#define _TRACE_LOCKDEP_H
3
4#include <linux/lockdep.h>
5#include <linux/tracepoint.h>
6
7#include <trace/lockdep_event_types.h>
8
9#endif
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h
new file mode 100644
index 000000000000..adccfcd2ec8f
--- /dev/null
+++ b/include/trace/lockdep_event_types.h
@@ -0,0 +1,44 @@
1
2#ifndef TRACE_FORMAT
3# error Do not include this file directly.
4# error Unless you know what you are doing.
5#endif
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM lock
9
10#ifdef CONFIG_LOCKDEP
11
12TRACE_FORMAT(lock_acquire,
13 TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
14 int trylock, int read, int check,
15 struct lockdep_map *next_lock, unsigned long ip),
16 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
17 TP_FMT("%s%s%s", trylock ? "try " : "",
18 read ? "read " : "", lock->name)
19 );
20
21TRACE_FORMAT(lock_release,
22 TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
23 TP_ARGS(lock, nested, ip),
24 TP_FMT("%s", lock->name)
25 );
26
27#ifdef CONFIG_LOCK_STAT
28
29TRACE_FORMAT(lock_contended,
30 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
31 TP_ARGS(lock, ip),
32 TP_FMT("%s", lock->name)
33 );
34
35TRACE_FORMAT(lock_acquired,
36 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
37 TP_ARGS(lock, ip),
38 TP_FMT("%s", lock->name)
39 );
40
41#endif
42#endif
43
44#undef TRACE_SYSTEM
diff --git a/include/trace/power.h b/include/trace/power.h
new file mode 100644
index 000000000000..ef204666e983
--- /dev/null
+++ b/include/trace/power.h
@@ -0,0 +1,32 @@
1#ifndef _TRACE_POWER_H
2#define _TRACE_POWER_H
3
4#include <linux/ktime.h>
5#include <linux/tracepoint.h>
6
7enum {
8 POWER_NONE = 0,
9 POWER_CSTATE = 1,
10 POWER_PSTATE = 2,
11};
12
13struct power_trace {
14 ktime_t stamp;
15 ktime_t end;
16 int type;
17 int state;
18};
19
20DECLARE_TRACE(power_start,
21 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
22 TP_ARGS(it, type, state));
23
24DECLARE_TRACE(power_mark,
25 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
26 TP_ARGS(it, type, state));
27
28DECLARE_TRACE(power_end,
29 TP_PROTO(struct power_trace *it),
30 TP_ARGS(it));
31
32#endif /* _TRACE_POWER_H */
diff --git a/include/trace/sched.h b/include/trace/sched.h
index 0d81098ee9fc..4e372a1a29bf 100644
--- a/include/trace/sched.h
+++ b/include/trace/sched.h
@@ -4,53 +4,6 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(sched_kthread_stop, 7#include <trace/sched_event_types.h>
8 TPPROTO(struct task_struct *t),
9 TPARGS(t));
10
11DECLARE_TRACE(sched_kthread_stop_ret,
12 TPPROTO(int ret),
13 TPARGS(ret));
14
15DECLARE_TRACE(sched_wait_task,
16 TPPROTO(struct rq *rq, struct task_struct *p),
17 TPARGS(rq, p));
18
19DECLARE_TRACE(sched_wakeup,
20 TPPROTO(struct rq *rq, struct task_struct *p, int success),
21 TPARGS(rq, p, success));
22
23DECLARE_TRACE(sched_wakeup_new,
24 TPPROTO(struct rq *rq, struct task_struct *p, int success),
25 TPARGS(rq, p, success));
26
27DECLARE_TRACE(sched_switch,
28 TPPROTO(struct rq *rq, struct task_struct *prev,
29 struct task_struct *next),
30 TPARGS(rq, prev, next));
31
32DECLARE_TRACE(sched_migrate_task,
33 TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
34 TPARGS(p, orig_cpu, dest_cpu));
35
36DECLARE_TRACE(sched_process_free,
37 TPPROTO(struct task_struct *p),
38 TPARGS(p));
39
40DECLARE_TRACE(sched_process_exit,
41 TPPROTO(struct task_struct *p),
42 TPARGS(p));
43
44DECLARE_TRACE(sched_process_wait,
45 TPPROTO(struct pid *pid),
46 TPARGS(pid));
47
48DECLARE_TRACE(sched_process_fork,
49 TPPROTO(struct task_struct *parent, struct task_struct *child),
50 TPARGS(parent, child));
51
52DECLARE_TRACE(sched_signal_send,
53 TPPROTO(int sig, struct task_struct *p),
54 TPARGS(sig, p));
55 8
56#endif 9#endif
diff --git a/include/trace/sched_event_types.h b/include/trace/sched_event_types.h
new file mode 100644
index 000000000000..63547dc1125f
--- /dev/null
+++ b/include/trace/sched_event_types.h
@@ -0,0 +1,337 @@
1
2/* use <trace/sched.h> instead */
3#ifndef TRACE_EVENT
4# error Do not include this file directly.
5# error Unless you know what you are doing.
6#endif
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM sched
10
11/*
12 * Tracepoint for calling kthread_stop, performed to end a kthread:
13 */
14TRACE_EVENT(sched_kthread_stop,
15
16 TP_PROTO(struct task_struct *t),
17
18 TP_ARGS(t),
19
20 TP_STRUCT__entry(
21 __array( char, comm, TASK_COMM_LEN )
22 __field( pid_t, pid )
23 ),
24
25 TP_fast_assign(
26 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
27 __entry->pid = t->pid;
28 ),
29
30 TP_printk("task %s:%d", __entry->comm, __entry->pid)
31);
32
33/*
34 * Tracepoint for the return value of the kthread stopping:
35 */
36TRACE_EVENT(sched_kthread_stop_ret,
37
38 TP_PROTO(int ret),
39
40 TP_ARGS(ret),
41
42 TP_STRUCT__entry(
43 __field( int, ret )
44 ),
45
46 TP_fast_assign(
47 __entry->ret = ret;
48 ),
49
50 TP_printk("ret %d", __entry->ret)
51);
52
53/*
54 * Tracepoint for waiting on task to unschedule:
55 *
56 * (NOTE: the 'rq' argument is not used by generic trace events,
57 * but used by the latency tracer plugin. )
58 */
59TRACE_EVENT(sched_wait_task,
60
61 TP_PROTO(struct rq *rq, struct task_struct *p),
62
63 TP_ARGS(rq, p),
64
65 TP_STRUCT__entry(
66 __array( char, comm, TASK_COMM_LEN )
67 __field( pid_t, pid )
68 __field( int, prio )
69 ),
70
71 TP_fast_assign(
72 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
73 __entry->pid = p->pid;
74 __entry->prio = p->prio;
75 ),
76
77 TP_printk("task %s:%d [%d]",
78 __entry->comm, __entry->pid, __entry->prio)
79);
80
81/*
82 * Tracepoint for waking up a task:
83 *
84 * (NOTE: the 'rq' argument is not used by generic trace events,
85 * but used by the latency tracer plugin. )
86 */
87TRACE_EVENT(sched_wakeup,
88
89 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
90
91 TP_ARGS(rq, p, success),
92
93 TP_STRUCT__entry(
94 __array( char, comm, TASK_COMM_LEN )
95 __field( pid_t, pid )
96 __field( int, prio )
97 __field( int, success )
98 ),
99
100 TP_fast_assign(
101 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
102 __entry->pid = p->pid;
103 __entry->prio = p->prio;
104 __entry->success = success;
105 ),
106
107 TP_printk("task %s:%d [%d] success=%d",
108 __entry->comm, __entry->pid, __entry->prio,
109 __entry->success)
110);
111
112/*
113 * Tracepoint for waking up a new task:
114 *
115 * (NOTE: the 'rq' argument is not used by generic trace events,
116 * but used by the latency tracer plugin. )
117 */
118TRACE_EVENT(sched_wakeup_new,
119
120 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
121
122 TP_ARGS(rq, p, success),
123
124 TP_STRUCT__entry(
125 __array( char, comm, TASK_COMM_LEN )
126 __field( pid_t, pid )
127 __field( int, prio )
128 __field( int, success )
129 ),
130
131 TP_fast_assign(
132 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
133 __entry->pid = p->pid;
134 __entry->prio = p->prio;
135 __entry->success = success;
136 ),
137
138 TP_printk("task %s:%d [%d] success=%d",
139 __entry->comm, __entry->pid, __entry->prio,
140 __entry->success)
141);
142
143/*
144 * Tracepoint for task switches, performed by the scheduler:
145 *
146 * (NOTE: the 'rq' argument is not used by generic trace events,
147 * but used by the latency tracer plugin. )
148 */
149TRACE_EVENT(sched_switch,
150
151 TP_PROTO(struct rq *rq, struct task_struct *prev,
152 struct task_struct *next),
153
154 TP_ARGS(rq, prev, next),
155
156 TP_STRUCT__entry(
157 __array( char, prev_comm, TASK_COMM_LEN )
158 __field( pid_t, prev_pid )
159 __field( int, prev_prio )
160 __array( char, next_comm, TASK_COMM_LEN )
161 __field( pid_t, next_pid )
162 __field( int, next_prio )
163 ),
164
165 TP_fast_assign(
166 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
167 __entry->prev_pid = prev->pid;
168 __entry->prev_prio = prev->prio;
169 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
170 __entry->next_pid = next->pid;
171 __entry->next_prio = next->prio;
172 ),
173
174 TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
175 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
176 __entry->next_comm, __entry->next_pid, __entry->next_prio)
177);
178
179/*
180 * Tracepoint for a task being migrated:
181 */
182TRACE_EVENT(sched_migrate_task,
183
184 TP_PROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
185
186 TP_ARGS(p, orig_cpu, dest_cpu),
187
188 TP_STRUCT__entry(
189 __array( char, comm, TASK_COMM_LEN )
190 __field( pid_t, pid )
191 __field( int, prio )
192 __field( int, orig_cpu )
193 __field( int, dest_cpu )
194 ),
195
196 TP_fast_assign(
197 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
198 __entry->pid = p->pid;
199 __entry->prio = p->prio;
200 __entry->orig_cpu = orig_cpu;
201 __entry->dest_cpu = dest_cpu;
202 ),
203
204 TP_printk("task %s:%d [%d] from: %d to: %d",
205 __entry->comm, __entry->pid, __entry->prio,
206 __entry->orig_cpu, __entry->dest_cpu)
207);
208
209/*
210 * Tracepoint for freeing a task:
211 */
212TRACE_EVENT(sched_process_free,
213
214 TP_PROTO(struct task_struct *p),
215
216 TP_ARGS(p),
217
218 TP_STRUCT__entry(
219 __array( char, comm, TASK_COMM_LEN )
220 __field( pid_t, pid )
221 __field( int, prio )
222 ),
223
224 TP_fast_assign(
225 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
226 __entry->pid = p->pid;
227 __entry->prio = p->prio;
228 ),
229
230 TP_printk("task %s:%d [%d]",
231 __entry->comm, __entry->pid, __entry->prio)
232);
233
234/*
235 * Tracepoint for a task exiting:
236 */
237TRACE_EVENT(sched_process_exit,
238
239 TP_PROTO(struct task_struct *p),
240
241 TP_ARGS(p),
242
243 TP_STRUCT__entry(
244 __array( char, comm, TASK_COMM_LEN )
245 __field( pid_t, pid )
246 __field( int, prio )
247 ),
248
249 TP_fast_assign(
250 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
251 __entry->pid = p->pid;
252 __entry->prio = p->prio;
253 ),
254
255 TP_printk("task %s:%d [%d]",
256 __entry->comm, __entry->pid, __entry->prio)
257);
258
259/*
260 * Tracepoint for a waiting task:
261 */
262TRACE_EVENT(sched_process_wait,
263
264 TP_PROTO(struct pid *pid),
265
266 TP_ARGS(pid),
267
268 TP_STRUCT__entry(
269 __array( char, comm, TASK_COMM_LEN )
270 __field( pid_t, pid )
271 __field( int, prio )
272 ),
273
274 TP_fast_assign(
275 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
276 __entry->pid = pid_nr(pid);
277 __entry->prio = current->prio;
278 ),
279
280 TP_printk("task %s:%d [%d]",
281 __entry->comm, __entry->pid, __entry->prio)
282);
283
284/*
285 * Tracepoint for do_fork:
286 */
287TRACE_EVENT(sched_process_fork,
288
289 TP_PROTO(struct task_struct *parent, struct task_struct *child),
290
291 TP_ARGS(parent, child),
292
293 TP_STRUCT__entry(
294 __array( char, parent_comm, TASK_COMM_LEN )
295 __field( pid_t, parent_pid )
296 __array( char, child_comm, TASK_COMM_LEN )
297 __field( pid_t, child_pid )
298 ),
299
300 TP_fast_assign(
301 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
302 __entry->parent_pid = parent->pid;
303 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
304 __entry->child_pid = child->pid;
305 ),
306
307 TP_printk("parent %s:%d child %s:%d",
308 __entry->parent_comm, __entry->parent_pid,
309 __entry->child_comm, __entry->child_pid)
310);
311
312/*
313 * Tracepoint for sending a signal:
314 */
315TRACE_EVENT(sched_signal_send,
316
317 TP_PROTO(int sig, struct task_struct *p),
318
319 TP_ARGS(sig, p),
320
321 TP_STRUCT__entry(
322 __field( int, sig )
323 __array( char, comm, TASK_COMM_LEN )
324 __field( pid_t, pid )
325 ),
326
327 TP_fast_assign(
328 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
329 __entry->pid = p->pid;
330 __entry->sig = sig;
331 ),
332
333 TP_printk("sig: %d task %s:%d",
334 __entry->sig, __entry->comm, __entry->pid)
335);
336
337#undef TRACE_SYSTEM
diff --git a/include/trace/skb.h b/include/trace/skb.h
index a96610f92f69..b66206d9be72 100644
--- a/include/trace/skb.h
+++ b/include/trace/skb.h
@@ -5,7 +5,7 @@
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(kfree_skb, 7DECLARE_TRACE(kfree_skb,
8 TPPROTO(struct sk_buff *skb, void *location), 8 TP_PROTO(struct sk_buff *skb, void *location),
9 TPARGS(skb, location)); 9 TP_ARGS(skb, location));
10 10
11#endif 11#endif
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h
new file mode 100644
index 000000000000..df56f5694be6
--- /dev/null
+++ b/include/trace/trace_event_types.h
@@ -0,0 +1,5 @@
1/* trace/<type>_event_types.h here */
2
3#include <trace/sched_event_types.h>
4#include <trace/irq_event_types.h>
5#include <trace/lockdep_event_types.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
new file mode 100644
index 000000000000..fd13750ca4ba
--- /dev/null
+++ b/include/trace/trace_events.h
@@ -0,0 +1,5 @@
1/* trace/<type>.h here */
2
3#include <trace/sched.h>
4#include <trace/irq.h>
5#include <trace/lockdep.h>
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h
new file mode 100644
index 000000000000..7626523deeba
--- /dev/null
+++ b/include/trace/workqueue.h
@@ -0,0 +1,25 @@
1#ifndef __TRACE_WORKQUEUE_H
2#define __TRACE_WORKQUEUE_H
3
4#include <linux/tracepoint.h>
5#include <linux/workqueue.h>
6#include <linux/sched.h>
7
8DECLARE_TRACE(workqueue_insertion,
9 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
10 TP_ARGS(wq_thread, work));
11
12DECLARE_TRACE(workqueue_execution,
13 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
14 TP_ARGS(wq_thread, work));
15
16/* Trace the creation of one workqueue thread on a cpu */
17DECLARE_TRACE(workqueue_creation,
18 TP_PROTO(struct task_struct *wq_thread, int cpu),
19 TP_ARGS(wq_thread, cpu));
20
21DECLARE_TRACE(workqueue_destruction,
22 TP_PROTO(struct task_struct *wq_thread),
23 TP_ARGS(wq_thread));
24
25#endif /* __TRACE_WORKQUEUE_H */
diff --git a/include/video/aty128.h b/include/video/aty128.h
index 51ac69f05bdc..f0851e3bb7cc 100644
--- a/include/video/aty128.h
+++ b/include/video/aty128.h
@@ -415,7 +415,7 @@
415#define PWR_MGT_SLOWDOWN_MCLK 0x00002000 415#define PWR_MGT_SLOWDOWN_MCLK 0x00002000
416 416
417#define PMI_PMSCR_REG 0x60 417#define PMI_PMSCR_REG 0x60
418 418
419/* used by ATI bug fix for hardware ROM */ 419/* used by ATI bug fix for hardware ROM */
420#define RAGE128_MPP_TB_CONFIG 0x01c0 420#define RAGE128_MPP_TB_CONFIG 0x01c0
421 421
diff --git a/include/video/cirrus.h b/include/video/cirrus.h
index b2776b6c8679..9a5e9ee30782 100644
--- a/include/video/cirrus.h
+++ b/include/video/cirrus.h
@@ -32,7 +32,6 @@
32#define CL_VSSM2 0x3c3 /* Motherboard Sleep */ 32#define CL_VSSM2 0x3c3 /* Motherboard Sleep */
33 33
34/*** VGA Sequencer Registers ***/ 34/*** VGA Sequencer Registers ***/
35#define CL_SEQR0 0x0 /* Reset */
36/* the following are from the "extension registers" group */ 35/* the following are from the "extension registers" group */
37#define CL_SEQR6 0x6 /* Unlock ALL Extensions */ 36#define CL_SEQR6 0x6 /* Unlock ALL Extensions */
38#define CL_SEQR7 0x7 /* Extended Sequencer Mode */ 37#define CL_SEQR7 0x7 /* Extended Sequencer Mode */
@@ -71,6 +70,7 @@
71#define CL_CRT1B 0x1b /* Extended Display Controls */ 70#define CL_CRT1B 0x1b /* Extended Display Controls */
72#define CL_CRT1C 0x1c /* Sync adjust and genlock register */ 71#define CL_CRT1C 0x1c /* Sync adjust and genlock register */
73#define CL_CRT1D 0x1d /* Overlay Extended Control register */ 72#define CL_CRT1D 0x1d /* Overlay Extended Control register */
73#define CL_CRT1E 0x1e /* Another overflow register */
74#define CL_CRT25 0x25 /* Part Status Register */ 74#define CL_CRT25 0x25 /* Part Status Register */
75#define CL_CRT27 0x27 /* ID Register */ 75#define CL_CRT27 0x27 /* ID Register */
76#define CL_CRT51 0x51 /* P4 disable "flicker fixer" */ 76#define CL_CRT51 0x51 /* P4 disable "flicker fixer" */
diff --git a/include/video/newport.h b/include/video/newport.h
index 1f5ebeaa818f..001b935e71c4 100644
--- a/include/video/newport.h
+++ b/include/video/newport.h
@@ -453,7 +453,7 @@ static __inline__ int newport_wait(struct newport_regs *regs)
453{ 453{
454 int t = BUSY_TIMEOUT; 454 int t = BUSY_TIMEOUT;
455 455
456 while (t--) 456 while (--t)
457 if (!(regs->cset.status & NPORT_STAT_GBUSY)) 457 if (!(regs->cset.status & NPORT_STAT_GBUSY))
458 break; 458 break;
459 return !t; 459 return !t;
@@ -463,7 +463,7 @@ static __inline__ int newport_bfwait(struct newport_regs *regs)
463{ 463{
464 int t = BUSY_TIMEOUT; 464 int t = BUSY_TIMEOUT;
465 465
466 while (t--) 466 while (--t)
467 if(!(regs->cset.status & NPORT_STAT_BBUSY)) 467 if(!(regs->cset.status & NPORT_STAT_BBUSY))
468 break; 468 break;
469 return !t; 469 return !t;
diff --git a/include/video/radeon.h b/include/video/radeon.h
index e072b16b39ab..56b188abfb54 100644
--- a/include/video/radeon.h
+++ b/include/video/radeon.h
@@ -5,12 +5,12 @@
5#define RADEON_REGSIZE 0x4000 5#define RADEON_REGSIZE 0x4000
6 6
7 7
8#define MM_INDEX 0x0000 8#define MM_INDEX 0x0000
9#define MM_DATA 0x0004 9#define MM_DATA 0x0004
10#define BUS_CNTL 0x0030 10#define BUS_CNTL 0x0030
11#define HI_STAT 0x004C 11#define HI_STAT 0x004C
12#define BUS_CNTL1 0x0034 12#define BUS_CNTL1 0x0034
13#define I2C_CNTL_1 0x0094 13#define I2C_CNTL_1 0x0094
14#define CNFG_CNTL 0x00E0 14#define CNFG_CNTL 0x00E0
15#define CNFG_MEMSIZE 0x00F8 15#define CNFG_MEMSIZE 0x00F8
16#define CNFG_APER_0_BASE 0x0100 16#define CNFG_APER_0_BASE 0x0100
@@ -18,8 +18,8 @@
18#define CNFG_APER_SIZE 0x0108 18#define CNFG_APER_SIZE 0x0108
19#define CNFG_REG_1_BASE 0x010C 19#define CNFG_REG_1_BASE 0x010C
20#define CNFG_REG_APER_SIZE 0x0110 20#define CNFG_REG_APER_SIZE 0x0110
21#define PAD_AGPINPUT_DELAY 0x0164 21#define PAD_AGPINPUT_DELAY 0x0164
22#define PAD_CTLR_STRENGTH 0x0168 22#define PAD_CTLR_STRENGTH 0x0168
23#define PAD_CTLR_UPDATE 0x016C 23#define PAD_CTLR_UPDATE 0x016C
24#define PAD_CTLR_MISC 0x0aa0 24#define PAD_CTLR_MISC 0x0aa0
25#define AGP_CNTL 0x0174 25#define AGP_CNTL 0x0174
@@ -27,171 +27,171 @@
27#define CAP0_TRIG_CNTL 0x0950 27#define CAP0_TRIG_CNTL 0x0950
28#define CAP1_TRIG_CNTL 0x09c0 28#define CAP1_TRIG_CNTL 0x09c0
29#define VIPH_CONTROL 0x0C40 29#define VIPH_CONTROL 0x0C40
30#define VENDOR_ID 0x0F00 30#define VENDOR_ID 0x0F00
31#define DEVICE_ID 0x0F02 31#define DEVICE_ID 0x0F02
32#define COMMAND 0x0F04 32#define COMMAND 0x0F04
33#define STATUS 0x0F06 33#define STATUS 0x0F06
34#define REVISION_ID 0x0F08 34#define REVISION_ID 0x0F08
35#define REGPROG_INF 0x0F09 35#define REGPROG_INF 0x0F09
36#define SUB_CLASS 0x0F0A 36#define SUB_CLASS 0x0F0A
37#define BASE_CODE 0x0F0B 37#define BASE_CODE 0x0F0B
38#define CACHE_LINE 0x0F0C 38#define CACHE_LINE 0x0F0C
39#define LATENCY 0x0F0D 39#define LATENCY 0x0F0D
40#define HEADER 0x0F0E 40#define HEADER 0x0F0E
41#define BIST 0x0F0F 41#define BIST 0x0F0F
42#define REG_MEM_BASE 0x0F10 42#define REG_MEM_BASE 0x0F10
43#define REG_IO_BASE 0x0F14 43#define REG_IO_BASE 0x0F14
44#define REG_REG_BASE 0x0F18 44#define REG_REG_BASE 0x0F18
45#define ADAPTER_ID 0x0F2C 45#define ADAPTER_ID 0x0F2C
46#define BIOS_ROM 0x0F30 46#define BIOS_ROM 0x0F30
47#define CAPABILITIES_PTR 0x0F34 47#define CAPABILITIES_PTR 0x0F34
48#define INTERRUPT_LINE 0x0F3C 48#define INTERRUPT_LINE 0x0F3C
49#define INTERRUPT_PIN 0x0F3D 49#define INTERRUPT_PIN 0x0F3D
50#define MIN_GRANT 0x0F3E 50#define MIN_GRANT 0x0F3E
51#define MAX_LATENCY 0x0F3F 51#define MAX_LATENCY 0x0F3F
52#define ADAPTER_ID_W 0x0F4C 52#define ADAPTER_ID_W 0x0F4C
53#define PMI_CAP_ID 0x0F50 53#define PMI_CAP_ID 0x0F50
54#define PMI_NXT_CAP_PTR 0x0F51 54#define PMI_NXT_CAP_PTR 0x0F51
55#define PMI_PMC_REG 0x0F52 55#define PMI_PMC_REG 0x0F52
56#define PM_STATUS 0x0F54 56#define PM_STATUS 0x0F54
57#define PMI_DATA 0x0F57 57#define PMI_DATA 0x0F57
58#define AGP_CAP_ID 0x0F58 58#define AGP_CAP_ID 0x0F58
59#define AGP_STATUS 0x0F5C 59#define AGP_STATUS 0x0F5C
60#define AGP_COMMAND 0x0F60 60#define AGP_COMMAND 0x0F60
61#define AIC_CTRL 0x01D0 61#define AIC_CTRL 0x01D0
62#define AIC_STAT 0x01D4 62#define AIC_STAT 0x01D4
63#define AIC_PT_BASE 0x01D8 63#define AIC_PT_BASE 0x01D8
64#define AIC_LO_ADDR 0x01DC 64#define AIC_LO_ADDR 0x01DC
65#define AIC_HI_ADDR 0x01E0 65#define AIC_HI_ADDR 0x01E0
66#define AIC_TLB_ADDR 0x01E4 66#define AIC_TLB_ADDR 0x01E4
67#define AIC_TLB_DATA 0x01E8 67#define AIC_TLB_DATA 0x01E8
68#define DAC_CNTL 0x0058 68#define DAC_CNTL 0x0058
69#define DAC_CNTL2 0x007c 69#define DAC_CNTL2 0x007c
70#define CRTC_GEN_CNTL 0x0050 70#define CRTC_GEN_CNTL 0x0050
71#define MEM_CNTL 0x0140 71#define MEM_CNTL 0x0140
72#define MC_CNTL 0x0140 72#define MC_CNTL 0x0140
73#define EXT_MEM_CNTL 0x0144 73#define EXT_MEM_CNTL 0x0144
74#define MC_TIMING_CNTL 0x0144 74#define MC_TIMING_CNTL 0x0144
75#define MC_AGP_LOCATION 0x014C 75#define MC_AGP_LOCATION 0x014C
76#define MEM_IO_CNTL_A0 0x0178 76#define MEM_IO_CNTL_A0 0x0178
77#define MEM_REFRESH_CNTL 0x0178 77#define MEM_REFRESH_CNTL 0x0178
78#define MEM_INIT_LATENCY_TIMER 0x0154 78#define MEM_INIT_LATENCY_TIMER 0x0154
79#define MC_INIT_GFX_LAT_TIMER 0x0154 79#define MC_INIT_GFX_LAT_TIMER 0x0154
80#define MEM_SDRAM_MODE_REG 0x0158 80#define MEM_SDRAM_MODE_REG 0x0158
81#define AGP_BASE 0x0170 81#define AGP_BASE 0x0170
82#define MEM_IO_CNTL_A1 0x017C 82#define MEM_IO_CNTL_A1 0x017C
83#define MC_READ_CNTL_AB 0x017C 83#define MC_READ_CNTL_AB 0x017C
84#define MEM_IO_CNTL_B0 0x0180 84#define MEM_IO_CNTL_B0 0x0180
85#define MC_INIT_MISC_LAT_TIMER 0x0180 85#define MC_INIT_MISC_LAT_TIMER 0x0180
86#define MEM_IO_CNTL_B1 0x0184 86#define MEM_IO_CNTL_B1 0x0184
87#define MC_IOPAD_CNTL 0x0184 87#define MC_IOPAD_CNTL 0x0184
88#define MC_DEBUG 0x0188 88#define MC_DEBUG 0x0188
89#define MC_STATUS 0x0150 89#define MC_STATUS 0x0150
90#define MEM_IO_OE_CNTL 0x018C 90#define MEM_IO_OE_CNTL 0x018C
91#define MC_CHIP_IO_OE_CNTL_AB 0x018C 91#define MC_CHIP_IO_OE_CNTL_AB 0x018C
92#define MC_FB_LOCATION 0x0148 92#define MC_FB_LOCATION 0x0148
93#define HOST_PATH_CNTL 0x0130 93#define HOST_PATH_CNTL 0x0130
94#define MEM_VGA_WP_SEL 0x0038 94#define MEM_VGA_WP_SEL 0x0038
95#define MEM_VGA_RP_SEL 0x003C 95#define MEM_VGA_RP_SEL 0x003C
96#define HDP_DEBUG 0x0138 96#define HDP_DEBUG 0x0138
97#define SW_SEMAPHORE 0x013C 97#define SW_SEMAPHORE 0x013C
98#define CRTC2_GEN_CNTL 0x03f8 98#define CRTC2_GEN_CNTL 0x03f8
99#define CRTC2_DISPLAY_BASE_ADDR 0x033c 99#define CRTC2_DISPLAY_BASE_ADDR 0x033c
100#define SURFACE_CNTL 0x0B00 100#define SURFACE_CNTL 0x0B00
101#define SURFACE0_LOWER_BOUND 0x0B04 101#define SURFACE0_LOWER_BOUND 0x0B04
102#define SURFACE1_LOWER_BOUND 0x0B14 102#define SURFACE1_LOWER_BOUND 0x0B14
103#define SURFACE2_LOWER_BOUND 0x0B24 103#define SURFACE2_LOWER_BOUND 0x0B24
104#define SURFACE3_LOWER_BOUND 0x0B34 104#define SURFACE3_LOWER_BOUND 0x0B34
105#define SURFACE4_LOWER_BOUND 0x0B44 105#define SURFACE4_LOWER_BOUND 0x0B44
106#define SURFACE5_LOWER_BOUND 0x0B54 106#define SURFACE5_LOWER_BOUND 0x0B54
107#define SURFACE6_LOWER_BOUND 0x0B64 107#define SURFACE6_LOWER_BOUND 0x0B64
108#define SURFACE7_LOWER_BOUND 0x0B74 108#define SURFACE7_LOWER_BOUND 0x0B74
109#define SURFACE0_UPPER_BOUND 0x0B08 109#define SURFACE0_UPPER_BOUND 0x0B08
110#define SURFACE1_UPPER_BOUND 0x0B18 110#define SURFACE1_UPPER_BOUND 0x0B18
111#define SURFACE2_UPPER_BOUND 0x0B28 111#define SURFACE2_UPPER_BOUND 0x0B28
112#define SURFACE3_UPPER_BOUND 0x0B38 112#define SURFACE3_UPPER_BOUND 0x0B38
113#define SURFACE4_UPPER_BOUND 0x0B48 113#define SURFACE4_UPPER_BOUND 0x0B48
114#define SURFACE5_UPPER_BOUND 0x0B58 114#define SURFACE5_UPPER_BOUND 0x0B58
115#define SURFACE6_UPPER_BOUND 0x0B68 115#define SURFACE6_UPPER_BOUND 0x0B68
116#define SURFACE7_UPPER_BOUND 0x0B78 116#define SURFACE7_UPPER_BOUND 0x0B78
117#define SURFACE0_INFO 0x0B0C 117#define SURFACE0_INFO 0x0B0C
118#define SURFACE1_INFO 0x0B1C 118#define SURFACE1_INFO 0x0B1C
119#define SURFACE2_INFO 0x0B2C 119#define SURFACE2_INFO 0x0B2C
120#define SURFACE3_INFO 0x0B3C 120#define SURFACE3_INFO 0x0B3C
121#define SURFACE4_INFO 0x0B4C 121#define SURFACE4_INFO 0x0B4C
122#define SURFACE5_INFO 0x0B5C 122#define SURFACE5_INFO 0x0B5C
123#define SURFACE6_INFO 0x0B6C 123#define SURFACE6_INFO 0x0B6C
124#define SURFACE7_INFO 0x0B7C 124#define SURFACE7_INFO 0x0B7C
125#define SURFACE_ACCESS_FLAGS 0x0BF8 125#define SURFACE_ACCESS_FLAGS 0x0BF8
126#define SURFACE_ACCESS_CLR 0x0BFC 126#define SURFACE_ACCESS_CLR 0x0BFC
127#define GEN_INT_CNTL 0x0040 127#define GEN_INT_CNTL 0x0040
128#define GEN_INT_STATUS 0x0044 128#define GEN_INT_STATUS 0x0044
129#define CRTC_EXT_CNTL 0x0054 129#define CRTC_EXT_CNTL 0x0054
130#define RB3D_CNTL 0x1C3C 130#define RB3D_CNTL 0x1C3C
131#define WAIT_UNTIL 0x1720 131#define WAIT_UNTIL 0x1720
132#define ISYNC_CNTL 0x1724 132#define ISYNC_CNTL 0x1724
133#define RBBM_GUICNTL 0x172C 133#define RBBM_GUICNTL 0x172C
134#define RBBM_STATUS 0x0E40 134#define RBBM_STATUS 0x0E40
135#define RBBM_STATUS_alt_1 0x1740 135#define RBBM_STATUS_alt_1 0x1740
136#define RBBM_CNTL 0x00EC 136#define RBBM_CNTL 0x00EC
137#define RBBM_CNTL_alt_1 0x0E44 137#define RBBM_CNTL_alt_1 0x0E44
138#define RBBM_SOFT_RESET 0x00F0 138#define RBBM_SOFT_RESET 0x00F0
139#define RBBM_SOFT_RESET_alt_1 0x0E48 139#define RBBM_SOFT_RESET_alt_1 0x0E48
140#define NQWAIT_UNTIL 0x0E50 140#define NQWAIT_UNTIL 0x0E50
141#define RBBM_DEBUG 0x0E6C 141#define RBBM_DEBUG 0x0E6C
142#define RBBM_CMDFIFO_ADDR 0x0E70 142#define RBBM_CMDFIFO_ADDR 0x0E70
143#define RBBM_CMDFIFO_DATAL 0x0E74 143#define RBBM_CMDFIFO_DATAL 0x0E74
144#define RBBM_CMDFIFO_DATAH 0x0E78 144#define RBBM_CMDFIFO_DATAH 0x0E78
145#define RBBM_CMDFIFO_STAT 0x0E7C 145#define RBBM_CMDFIFO_STAT 0x0E7C
146#define CRTC_STATUS 0x005C 146#define CRTC_STATUS 0x005C
147#define GPIO_VGA_DDC 0x0060 147#define GPIO_VGA_DDC 0x0060
148#define GPIO_DVI_DDC 0x0064 148#define GPIO_DVI_DDC 0x0064
149#define GPIO_MONID 0x0068 149#define GPIO_MONID 0x0068
150#define GPIO_CRT2_DDC 0x006c 150#define GPIO_CRT2_DDC 0x006c
151#define PALETTE_INDEX 0x00B0 151#define PALETTE_INDEX 0x00B0
152#define PALETTE_DATA 0x00B4 152#define PALETTE_DATA 0x00B4
153#define PALETTE_30_DATA 0x00B8 153#define PALETTE_30_DATA 0x00B8
154#define CRTC_H_TOTAL_DISP 0x0200 154#define CRTC_H_TOTAL_DISP 0x0200
155#define CRTC_H_SYNC_STRT_WID 0x0204 155#define CRTC_H_SYNC_STRT_WID 0x0204
156#define CRTC_V_TOTAL_DISP 0x0208 156#define CRTC_V_TOTAL_DISP 0x0208
157#define CRTC_V_SYNC_STRT_WID 0x020C 157#define CRTC_V_SYNC_STRT_WID 0x020C
158#define CRTC_VLINE_CRNT_VLINE 0x0210 158#define CRTC_VLINE_CRNT_VLINE 0x0210
159#define CRTC_CRNT_FRAME 0x0214 159#define CRTC_CRNT_FRAME 0x0214
160#define CRTC_GUI_TRIG_VLINE 0x0218 160#define CRTC_GUI_TRIG_VLINE 0x0218
161#define CRTC_DEBUG 0x021C 161#define CRTC_DEBUG 0x021C
162#define CRTC_OFFSET_RIGHT 0x0220 162#define CRTC_OFFSET_RIGHT 0x0220
163#define CRTC_OFFSET 0x0224 163#define CRTC_OFFSET 0x0224
164#define CRTC_OFFSET_CNTL 0x0228 164#define CRTC_OFFSET_CNTL 0x0228
165#define CRTC_PITCH 0x022C 165#define CRTC_PITCH 0x022C
166#define OVR_CLR 0x0230 166#define OVR_CLR 0x0230
167#define OVR_WID_LEFT_RIGHT 0x0234 167#define OVR_WID_LEFT_RIGHT 0x0234
168#define OVR_WID_TOP_BOTTOM 0x0238 168#define OVR_WID_TOP_BOTTOM 0x0238
169#define DISPLAY_BASE_ADDR 0x023C 169#define DISPLAY_BASE_ADDR 0x023C
170#define SNAPSHOT_VH_COUNTS 0x0240 170#define SNAPSHOT_VH_COUNTS 0x0240
171#define SNAPSHOT_F_COUNT 0x0244 171#define SNAPSHOT_F_COUNT 0x0244
172#define N_VIF_COUNT 0x0248 172#define N_VIF_COUNT 0x0248
173#define SNAPSHOT_VIF_COUNT 0x024C 173#define SNAPSHOT_VIF_COUNT 0x024C
174#define FP_CRTC_H_TOTAL_DISP 0x0250 174#define FP_CRTC_H_TOTAL_DISP 0x0250
175#define FP_CRTC_V_TOTAL_DISP 0x0254 175#define FP_CRTC_V_TOTAL_DISP 0x0254
176#define CRT_CRTC_H_SYNC_STRT_WID 0x0258 176#define CRT_CRTC_H_SYNC_STRT_WID 0x0258
177#define CRT_CRTC_V_SYNC_STRT_WID 0x025C 177#define CRT_CRTC_V_SYNC_STRT_WID 0x025C
178#define CUR_OFFSET 0x0260 178#define CUR_OFFSET 0x0260
179#define CUR_HORZ_VERT_POSN 0x0264 179#define CUR_HORZ_VERT_POSN 0x0264
180#define CUR_HORZ_VERT_OFF 0x0268 180#define CUR_HORZ_VERT_OFF 0x0268
181#define CUR_CLR0 0x026C 181#define CUR_CLR0 0x026C
182#define CUR_CLR1 0x0270 182#define CUR_CLR1 0x0270
183#define FP_HORZ_VERT_ACTIVE 0x0278 183#define FP_HORZ_VERT_ACTIVE 0x0278
184#define CRTC_MORE_CNTL 0x027C 184#define CRTC_MORE_CNTL 0x027C
185#define CRTC_H_CUTOFF_ACTIVE_EN (1<<4) 185#define CRTC_H_CUTOFF_ACTIVE_EN (1<<4)
186#define CRTC_V_CUTOFF_ACTIVE_EN (1<<5) 186#define CRTC_V_CUTOFF_ACTIVE_EN (1<<5)
187#define DAC_EXT_CNTL 0x0280 187#define DAC_EXT_CNTL 0x0280
188#define FP_GEN_CNTL 0x0284 188#define FP_GEN_CNTL 0x0284
189#define FP_HORZ_STRETCH 0x028C 189#define FP_HORZ_STRETCH 0x028C
190#define FP_VERT_STRETCH 0x0290 190#define FP_VERT_STRETCH 0x0290
191#define FP_H_SYNC_STRT_WID 0x02C4 191#define FP_H_SYNC_STRT_WID 0x02C4
192#define FP_V_SYNC_STRT_WID 0x02C8 192#define FP_V_SYNC_STRT_WID 0x02C8
193#define AUX_WINDOW_HORZ_CNTL 0x02D8 193#define AUX_WINDOW_HORZ_CNTL 0x02D8
194#define AUX_WINDOW_VERT_CNTL 0x02DC 194#define AUX_WINDOW_VERT_CNTL 0x02DC
195//#define DDA_CONFIG 0x02e0 195//#define DDA_CONFIG 0x02e0
196//#define DDA_ON_OFF 0x02e4 196//#define DDA_ON_OFF 0x02e4
197#define DVI_I2C_CNTL_1 0x02e4 197#define DVI_I2C_CNTL_1 0x02e4
@@ -199,192 +199,192 @@
199#define GRPH2_BUFFER_CNTL 0x03F0 199#define GRPH2_BUFFER_CNTL 0x03F0
200#define VGA_BUFFER_CNTL 0x02F4 200#define VGA_BUFFER_CNTL 0x02F4
201#define OV0_Y_X_START 0x0400 201#define OV0_Y_X_START 0x0400
202#define OV0_Y_X_END 0x0404 202#define OV0_Y_X_END 0x0404
203#define OV0_PIPELINE_CNTL 0x0408 203#define OV0_PIPELINE_CNTL 0x0408
204#define OV0_REG_LOAD_CNTL 0x0410 204#define OV0_REG_LOAD_CNTL 0x0410
205#define OV0_SCALE_CNTL 0x0420 205#define OV0_SCALE_CNTL 0x0420
206#define OV0_V_INC 0x0424 206#define OV0_V_INC 0x0424
207#define OV0_P1_V_ACCUM_INIT 0x0428 207#define OV0_P1_V_ACCUM_INIT 0x0428
208#define OV0_P23_V_ACCUM_INIT 0x042C 208#define OV0_P23_V_ACCUM_INIT 0x042C
209#define OV0_P1_BLANK_LINES_AT_TOP 0x0430 209#define OV0_P1_BLANK_LINES_AT_TOP 0x0430
210#define OV0_P23_BLANK_LINES_AT_TOP 0x0434 210#define OV0_P23_BLANK_LINES_AT_TOP 0x0434
211#define OV0_BASE_ADDR 0x043C 211#define OV0_BASE_ADDR 0x043C
212#define OV0_VID_BUF0_BASE_ADRS 0x0440 212#define OV0_VID_BUF0_BASE_ADRS 0x0440
213#define OV0_VID_BUF1_BASE_ADRS 0x0444 213#define OV0_VID_BUF1_BASE_ADRS 0x0444
214#define OV0_VID_BUF2_BASE_ADRS 0x0448 214#define OV0_VID_BUF2_BASE_ADRS 0x0448
215#define OV0_VID_BUF3_BASE_ADRS 0x044C 215#define OV0_VID_BUF3_BASE_ADRS 0x044C
216#define OV0_VID_BUF4_BASE_ADRS 0x0450 216#define OV0_VID_BUF4_BASE_ADRS 0x0450
217#define OV0_VID_BUF5_BASE_ADRS 0x0454 217#define OV0_VID_BUF5_BASE_ADRS 0x0454
218#define OV0_VID_BUF_PITCH0_VALUE 0x0460 218#define OV0_VID_BUF_PITCH0_VALUE 0x0460
219#define OV0_VID_BUF_PITCH1_VALUE 0x0464 219#define OV0_VID_BUF_PITCH1_VALUE 0x0464
220#define OV0_AUTO_FLIP_CNTRL 0x0470 220#define OV0_AUTO_FLIP_CNTRL 0x0470
221#define OV0_DEINTERLACE_PATTERN 0x0474 221#define OV0_DEINTERLACE_PATTERN 0x0474
222#define OV0_SUBMIT_HISTORY 0x0478 222#define OV0_SUBMIT_HISTORY 0x0478
223#define OV0_H_INC 0x0480 223#define OV0_H_INC 0x0480
224#define OV0_STEP_BY 0x0484 224#define OV0_STEP_BY 0x0484
225#define OV0_P1_H_ACCUM_INIT 0x0488 225#define OV0_P1_H_ACCUM_INIT 0x0488
226#define OV0_P23_H_ACCUM_INIT 0x048C 226#define OV0_P23_H_ACCUM_INIT 0x048C
227#define OV0_P1_X_START_END 0x0494 227#define OV0_P1_X_START_END 0x0494
228#define OV0_P2_X_START_END 0x0498 228#define OV0_P2_X_START_END 0x0498
229#define OV0_P3_X_START_END 0x049C 229#define OV0_P3_X_START_END 0x049C
230#define OV0_FILTER_CNTL 0x04A0 230#define OV0_FILTER_CNTL 0x04A0
231#define OV0_FOUR_TAP_COEF_0 0x04B0 231#define OV0_FOUR_TAP_COEF_0 0x04B0
232#define OV0_FOUR_TAP_COEF_1 0x04B4 232#define OV0_FOUR_TAP_COEF_1 0x04B4
233#define OV0_FOUR_TAP_COEF_2 0x04B8 233#define OV0_FOUR_TAP_COEF_2 0x04B8
234#define OV0_FOUR_TAP_COEF_3 0x04BC 234#define OV0_FOUR_TAP_COEF_3 0x04BC
235#define OV0_FOUR_TAP_COEF_4 0x04C0 235#define OV0_FOUR_TAP_COEF_4 0x04C0
236#define OV0_FLAG_CNTRL 0x04DC 236#define OV0_FLAG_CNTRL 0x04DC
237#define OV0_SLICE_CNTL 0x04E0 237#define OV0_SLICE_CNTL 0x04E0
238#define OV0_VID_KEY_CLR_LOW 0x04E4 238#define OV0_VID_KEY_CLR_LOW 0x04E4
239#define OV0_VID_KEY_CLR_HIGH 0x04E8 239#define OV0_VID_KEY_CLR_HIGH 0x04E8
240#define OV0_GRPH_KEY_CLR_LOW 0x04EC 240#define OV0_GRPH_KEY_CLR_LOW 0x04EC
241#define OV0_GRPH_KEY_CLR_HIGH 0x04F0 241#define OV0_GRPH_KEY_CLR_HIGH 0x04F0
242#define OV0_KEY_CNTL 0x04F4 242#define OV0_KEY_CNTL 0x04F4
243#define OV0_TEST 0x04F8 243#define OV0_TEST 0x04F8
244#define SUBPIC_CNTL 0x0540 244#define SUBPIC_CNTL 0x0540
245#define SUBPIC_DEFCOLCON 0x0544 245#define SUBPIC_DEFCOLCON 0x0544
246#define SUBPIC_Y_X_START 0x054C 246#define SUBPIC_Y_X_START 0x054C
247#define SUBPIC_Y_X_END 0x0550 247#define SUBPIC_Y_X_END 0x0550
248#define SUBPIC_V_INC 0x0554 248#define SUBPIC_V_INC 0x0554
249#define SUBPIC_H_INC 0x0558 249#define SUBPIC_H_INC 0x0558
250#define SUBPIC_BUF0_OFFSET 0x055C 250#define SUBPIC_BUF0_OFFSET 0x055C
251#define SUBPIC_BUF1_OFFSET 0x0560 251#define SUBPIC_BUF1_OFFSET 0x0560
252#define SUBPIC_LC0_OFFSET 0x0564 252#define SUBPIC_LC0_OFFSET 0x0564
253#define SUBPIC_LC1_OFFSET 0x0568 253#define SUBPIC_LC1_OFFSET 0x0568
254#define SUBPIC_PITCH 0x056C 254#define SUBPIC_PITCH 0x056C
255#define SUBPIC_BTN_HLI_COLCON 0x0570 255#define SUBPIC_BTN_HLI_COLCON 0x0570
256#define SUBPIC_BTN_HLI_Y_X_START 0x0574 256#define SUBPIC_BTN_HLI_Y_X_START 0x0574
257#define SUBPIC_BTN_HLI_Y_X_END 0x0578 257#define SUBPIC_BTN_HLI_Y_X_END 0x0578
258#define SUBPIC_PALETTE_INDEX 0x057C 258#define SUBPIC_PALETTE_INDEX 0x057C
259#define SUBPIC_PALETTE_DATA 0x0580 259#define SUBPIC_PALETTE_DATA 0x0580
260#define SUBPIC_H_ACCUM_INIT 0x0584 260#define SUBPIC_H_ACCUM_INIT 0x0584
261#define SUBPIC_V_ACCUM_INIT 0x0588 261#define SUBPIC_V_ACCUM_INIT 0x0588
262#define DISP_MISC_CNTL 0x0D00 262#define DISP_MISC_CNTL 0x0D00
263#define DAC_MACRO_CNTL 0x0D04 263#define DAC_MACRO_CNTL 0x0D04
264#define DISP_PWR_MAN 0x0D08 264#define DISP_PWR_MAN 0x0D08
265#define DISP_TEST_DEBUG_CNTL 0x0D10 265#define DISP_TEST_DEBUG_CNTL 0x0D10
266#define DISP_HW_DEBUG 0x0D14 266#define DISP_HW_DEBUG 0x0D14
267#define DAC_CRC_SIG1 0x0D18 267#define DAC_CRC_SIG1 0x0D18
268#define DAC_CRC_SIG2 0x0D1C 268#define DAC_CRC_SIG2 0x0D1C
269#define OV0_LIN_TRANS_A 0x0D20 269#define OV0_LIN_TRANS_A 0x0D20
270#define OV0_LIN_TRANS_B 0x0D24 270#define OV0_LIN_TRANS_B 0x0D24
271#define OV0_LIN_TRANS_C 0x0D28 271#define OV0_LIN_TRANS_C 0x0D28
272#define OV0_LIN_TRANS_D 0x0D2C 272#define OV0_LIN_TRANS_D 0x0D2C
273#define OV0_LIN_TRANS_E 0x0D30 273#define OV0_LIN_TRANS_E 0x0D30
274#define OV0_LIN_TRANS_F 0x0D34 274#define OV0_LIN_TRANS_F 0x0D34
275#define OV0_GAMMA_0_F 0x0D40 275#define OV0_GAMMA_0_F 0x0D40
276#define OV0_GAMMA_10_1F 0x0D44 276#define OV0_GAMMA_10_1F 0x0D44
277#define OV0_GAMMA_20_3F 0x0D48 277#define OV0_GAMMA_20_3F 0x0D48
278#define OV0_GAMMA_40_7F 0x0D4C 278#define OV0_GAMMA_40_7F 0x0D4C
279#define OV0_GAMMA_380_3BF 0x0D50 279#define OV0_GAMMA_380_3BF 0x0D50
280#define OV0_GAMMA_3C0_3FF 0x0D54 280#define OV0_GAMMA_3C0_3FF 0x0D54
281#define DISP_MERGE_CNTL 0x0D60 281#define DISP_MERGE_CNTL 0x0D60
282#define DISP_OUTPUT_CNTL 0x0D64 282#define DISP_OUTPUT_CNTL 0x0D64
283#define DISP_LIN_TRANS_GRPH_A 0x0D80 283#define DISP_LIN_TRANS_GRPH_A 0x0D80
284#define DISP_LIN_TRANS_GRPH_B 0x0D84 284#define DISP_LIN_TRANS_GRPH_B 0x0D84
285#define DISP_LIN_TRANS_GRPH_C 0x0D88 285#define DISP_LIN_TRANS_GRPH_C 0x0D88
286#define DISP_LIN_TRANS_GRPH_D 0x0D8C 286#define DISP_LIN_TRANS_GRPH_D 0x0D8C
287#define DISP_LIN_TRANS_GRPH_E 0x0D90 287#define DISP_LIN_TRANS_GRPH_E 0x0D90
288#define DISP_LIN_TRANS_GRPH_F 0x0D94 288#define DISP_LIN_TRANS_GRPH_F 0x0D94
289#define DISP_LIN_TRANS_VID_A 0x0D98 289#define DISP_LIN_TRANS_VID_A 0x0D98
290#define DISP_LIN_TRANS_VID_B 0x0D9C 290#define DISP_LIN_TRANS_VID_B 0x0D9C
291#define DISP_LIN_TRANS_VID_C 0x0DA0 291#define DISP_LIN_TRANS_VID_C 0x0DA0
292#define DISP_LIN_TRANS_VID_D 0x0DA4 292#define DISP_LIN_TRANS_VID_D 0x0DA4
293#define DISP_LIN_TRANS_VID_E 0x0DA8 293#define DISP_LIN_TRANS_VID_E 0x0DA8
294#define DISP_LIN_TRANS_VID_F 0x0DAC 294#define DISP_LIN_TRANS_VID_F 0x0DAC
295#define RMX_HORZ_FILTER_0TAP_COEF 0x0DB0 295#define RMX_HORZ_FILTER_0TAP_COEF 0x0DB0
296#define RMX_HORZ_FILTER_1TAP_COEF 0x0DB4 296#define RMX_HORZ_FILTER_1TAP_COEF 0x0DB4
297#define RMX_HORZ_FILTER_2TAP_COEF 0x0DB8 297#define RMX_HORZ_FILTER_2TAP_COEF 0x0DB8
298#define RMX_HORZ_PHASE 0x0DBC 298#define RMX_HORZ_PHASE 0x0DBC
299#define DAC_EMBEDDED_SYNC_CNTL 0x0DC0 299#define DAC_EMBEDDED_SYNC_CNTL 0x0DC0
300#define DAC_BROAD_PULSE 0x0DC4 300#define DAC_BROAD_PULSE 0x0DC4
301#define DAC_SKEW_CLKS 0x0DC8 301#define DAC_SKEW_CLKS 0x0DC8
302#define DAC_INCR 0x0DCC 302#define DAC_INCR 0x0DCC
303#define DAC_NEG_SYNC_LEVEL 0x0DD0 303#define DAC_NEG_SYNC_LEVEL 0x0DD0
304#define DAC_POS_SYNC_LEVEL 0x0DD4 304#define DAC_POS_SYNC_LEVEL 0x0DD4
305#define DAC_BLANK_LEVEL 0x0DD8 305#define DAC_BLANK_LEVEL 0x0DD8
306#define CLOCK_CNTL_INDEX 0x0008 306#define CLOCK_CNTL_INDEX 0x0008
307#define CLOCK_CNTL_DATA 0x000C 307#define CLOCK_CNTL_DATA 0x000C
308#define CP_RB_CNTL 0x0704 308#define CP_RB_CNTL 0x0704
309#define CP_RB_BASE 0x0700 309#define CP_RB_BASE 0x0700
310#define CP_RB_RPTR_ADDR 0x070C 310#define CP_RB_RPTR_ADDR 0x070C
311#define CP_RB_RPTR 0x0710 311#define CP_RB_RPTR 0x0710
312#define CP_RB_WPTR 0x0714 312#define CP_RB_WPTR 0x0714
313#define CP_RB_WPTR_DELAY 0x0718 313#define CP_RB_WPTR_DELAY 0x0718
314#define CP_IB_BASE 0x0738 314#define CP_IB_BASE 0x0738
315#define CP_IB_BUFSZ 0x073C 315#define CP_IB_BUFSZ 0x073C
316#define SCRATCH_REG0 0x15E0 316#define SCRATCH_REG0 0x15E0
317#define GUI_SCRATCH_REG0 0x15E0 317#define GUI_SCRATCH_REG0 0x15E0
318#define SCRATCH_REG1 0x15E4 318#define SCRATCH_REG1 0x15E4
319#define GUI_SCRATCH_REG1 0x15E4 319#define GUI_SCRATCH_REG1 0x15E4
320#define SCRATCH_REG2 0x15E8 320#define SCRATCH_REG2 0x15E8
321#define GUI_SCRATCH_REG2 0x15E8 321#define GUI_SCRATCH_REG2 0x15E8
322#define SCRATCH_REG3 0x15EC 322#define SCRATCH_REG3 0x15EC
323#define GUI_SCRATCH_REG3 0x15EC 323#define GUI_SCRATCH_REG3 0x15EC
324#define SCRATCH_REG4 0x15F0 324#define SCRATCH_REG4 0x15F0
325#define GUI_SCRATCH_REG4 0x15F0 325#define GUI_SCRATCH_REG4 0x15F0
326#define SCRATCH_REG5 0x15F4 326#define SCRATCH_REG5 0x15F4
327#define GUI_SCRATCH_REG5 0x15F4 327#define GUI_SCRATCH_REG5 0x15F4
328#define SCRATCH_UMSK 0x0770 328#define SCRATCH_UMSK 0x0770
329#define SCRATCH_ADDR 0x0774 329#define SCRATCH_ADDR 0x0774
330#define DP_BRUSH_FRGD_CLR 0x147C 330#define DP_BRUSH_FRGD_CLR 0x147C
331#define DP_BRUSH_BKGD_CLR 0x1478 331#define DP_BRUSH_BKGD_CLR 0x1478
332#define DST_LINE_START 0x1600 332#define DST_LINE_START 0x1600
333#define DST_LINE_END 0x1604 333#define DST_LINE_END 0x1604
334#define SRC_OFFSET 0x15AC 334#define SRC_OFFSET 0x15AC
335#define SRC_PITCH 0x15B0 335#define SRC_PITCH 0x15B0
336#define SRC_TILE 0x1704 336#define SRC_TILE 0x1704
337#define SRC_PITCH_OFFSET 0x1428 337#define SRC_PITCH_OFFSET 0x1428
338#define SRC_X 0x1414 338#define SRC_X 0x1414
339#define SRC_Y 0x1418 339#define SRC_Y 0x1418
340#define SRC_X_Y 0x1590 340#define SRC_X_Y 0x1590
341#define SRC_Y_X 0x1434 341#define SRC_Y_X 0x1434
342#define DST_Y_X 0x1438 342#define DST_Y_X 0x1438
343#define DST_WIDTH_HEIGHT 0x1598 343#define DST_WIDTH_HEIGHT 0x1598
344#define DST_HEIGHT_WIDTH 0x143c 344#define DST_HEIGHT_WIDTH 0x143c
345#define DST_OFFSET 0x1404 345#define DST_OFFSET 0x1404
346#define SRC_CLUT_ADDRESS 0x1780 346#define SRC_CLUT_ADDRESS 0x1780
347#define SRC_CLUT_DATA 0x1784 347#define SRC_CLUT_DATA 0x1784
348#define SRC_CLUT_DATA_RD 0x1788 348#define SRC_CLUT_DATA_RD 0x1788
349#define HOST_DATA0 0x17C0 349#define HOST_DATA0 0x17C0
350#define HOST_DATA1 0x17C4 350#define HOST_DATA1 0x17C4
351#define HOST_DATA2 0x17C8 351#define HOST_DATA2 0x17C8
352#define HOST_DATA3 0x17CC 352#define HOST_DATA3 0x17CC
353#define HOST_DATA4 0x17D0 353#define HOST_DATA4 0x17D0
354#define HOST_DATA5 0x17D4 354#define HOST_DATA5 0x17D4
355#define HOST_DATA6 0x17D8 355#define HOST_DATA6 0x17D8
356#define HOST_DATA7 0x17DC 356#define HOST_DATA7 0x17DC
357#define HOST_DATA_LAST 0x17E0 357#define HOST_DATA_LAST 0x17E0
358#define DP_SRC_ENDIAN 0x15D4 358#define DP_SRC_ENDIAN 0x15D4
359#define DP_SRC_FRGD_CLR 0x15D8 359#define DP_SRC_FRGD_CLR 0x15D8
360#define DP_SRC_BKGD_CLR 0x15DC 360#define DP_SRC_BKGD_CLR 0x15DC
361#define SC_LEFT 0x1640 361#define SC_LEFT 0x1640
362#define SC_RIGHT 0x1644 362#define SC_RIGHT 0x1644
363#define SC_TOP 0x1648 363#define SC_TOP 0x1648
364#define SC_BOTTOM 0x164C 364#define SC_BOTTOM 0x164C
365#define SRC_SC_RIGHT 0x1654 365#define SRC_SC_RIGHT 0x1654
366#define SRC_SC_BOTTOM 0x165C 366#define SRC_SC_BOTTOM 0x165C
367#define DP_CNTL 0x16C0 367#define DP_CNTL 0x16C0
368#define DP_CNTL_XDIR_YDIR_YMAJOR 0x16D0 368#define DP_CNTL_XDIR_YDIR_YMAJOR 0x16D0
369#define DP_DATATYPE 0x16C4 369#define DP_DATATYPE 0x16C4
370#define DP_MIX 0x16C8 370#define DP_MIX 0x16C8
371#define DP_WRITE_MSK 0x16CC 371#define DP_WRITE_MSK 0x16CC
372#define DP_XOP 0x17F8 372#define DP_XOP 0x17F8
373#define CLR_CMP_CLR_SRC 0x15C4 373#define CLR_CMP_CLR_SRC 0x15C4
374#define CLR_CMP_CLR_DST 0x15C8 374#define CLR_CMP_CLR_DST 0x15C8
375#define CLR_CMP_CNTL 0x15C0 375#define CLR_CMP_CNTL 0x15C0
376#define CLR_CMP_MSK 0x15CC 376#define CLR_CMP_MSK 0x15CC
377#define DSTCACHE_MODE 0x1710 377#define DSTCACHE_MODE 0x1710
378#define DSTCACHE_CTLSTAT 0x1714 378#define DSTCACHE_CTLSTAT 0x1714
379#define DEFAULT_PITCH_OFFSET 0x16E0 379#define DEFAULT_PITCH_OFFSET 0x16E0
380#define DEFAULT_SC_BOTTOM_RIGHT 0x16E8 380#define DEFAULT_SC_BOTTOM_RIGHT 0x16E8
381#define DEFAULT_SC_TOP_LEFT 0x16EC 381#define DEFAULT_SC_TOP_LEFT 0x16EC
382#define SRC_PITCH_OFFSET 0x1428 382#define SRC_PITCH_OFFSET 0x1428
383#define DST_PITCH_OFFSET 0x142C 383#define DST_PITCH_OFFSET 0x142C
384#define DP_GUI_MASTER_CNTL 0x146C 384#define DP_GUI_MASTER_CNTL 0x146C
385#define SC_TOP_LEFT 0x16EC 385#define SC_TOP_LEFT 0x16EC
386#define SC_BOTTOM_RIGHT 0x16F0 386#define SC_BOTTOM_RIGHT 0x16F0
387#define SRC_SC_BOTTOM_RIGHT 0x16F4 387#define SRC_SC_BOTTOM_RIGHT 0x16F4
388#define RB2D_DSTCACHE_MODE 0x3428 388#define RB2D_DSTCACHE_MODE 0x3428
389#define RB2D_DSTCACHE_CTLSTAT_broken 0x342C /* do not use */ 389#define RB2D_DSTCACHE_CTLSTAT_broken 0x342C /* do not use */
390#define LVDS_GEN_CNTL 0x02d0 390#define LVDS_GEN_CNTL 0x02d0
@@ -686,7 +686,7 @@
686#define VERT_FP_LOOP_STRETCH (0x7 << 28) 686#define VERT_FP_LOOP_STRETCH (0x7 << 28)
687#define VERT_STRETCH_RESERVED 0xf1000000 687#define VERT_STRETCH_RESERVED 0xf1000000
688 688
689/* DAC_CNTL bit constants */ 689/* DAC_CNTL bit constants */
690#define DAC_8BIT_EN 0x00000100 690#define DAC_8BIT_EN 0x00000100
691#define DAC_4BPP_PIX_ORDER 0x00000200 691#define DAC_4BPP_PIX_ORDER 0x00000200
692#define DAC_CRC_EN 0x00080000 692#define DAC_CRC_EN 0x00080000
@@ -700,7 +700,7 @@
700#define DAC_CMP_EN (1 << 3) 700#define DAC_CMP_EN (1 << 3)
701#define DAC_CMP_OUTPUT (1 << 7) 701#define DAC_CMP_OUTPUT (1 << 7)
702 702
703/* DAC_CNTL2 bit constants */ 703/* DAC_CNTL2 bit constants */
704#define DAC2_EXPAND_MODE (1 << 14) 704#define DAC2_EXPAND_MODE (1 << 14)
705#define DAC2_CMP_EN (1 << 7) 705#define DAC2_CMP_EN (1 << 7)
706#define DAC2_PALETTE_ACCESS_CNTL (1 << 5) 706#define DAC2_PALETTE_ACCESS_CNTL (1 << 5)
diff --git a/include/video/s1d13xxxfb.h b/include/video/s1d13xxxfb.h
index fe41b8407946..c3b2a2aa7140 100644
--- a/include/video/s1d13xxxfb.h
+++ b/include/video/s1d13xxxfb.h
@@ -14,13 +14,16 @@
14#define S1D13XXXFB_H 14#define S1D13XXXFB_H
15 15
16#define S1D_PALETTE_SIZE 256 16#define S1D_PALETTE_SIZE 256
17#define S1D13506_CHIP_REV 4 /* expected chip revision number for s1d13506 */ 17#define S1D_FBID "S1D13xxx"
18#define S1D13806_CHIP_REV 7 /* expected chip revision number for s1d13806 */ 18#define S1D_DEVICENAME "s1d13xxxfb"
19#define S1D_FBID "S1D13806" 19
20#define S1D_DEVICENAME "s1d13806fb" 20/* S1DREG_REV_CODE register = prod_id (6 bits) + revision (2 bits) */
21#define S1D13505_PROD_ID 0x3 /* 000011 */
22#define S1D13506_PROD_ID 0x4 /* 000100 */
23#define S1D13806_PROD_ID 0x7 /* 000111 */
21 24
22/* register definitions (tested on s1d13896) */ 25/* register definitions (tested on s1d13896) */
23#define S1DREG_REV_CODE 0x0000 /* Revision Code Register */ 26#define S1DREG_REV_CODE 0x0000 /* Prod + Rev Code Register */
24#define S1DREG_MISC 0x0001 /* Miscellaneous Register */ 27#define S1DREG_MISC 0x0001 /* Miscellaneous Register */
25#define S1DREG_GPIO_CNF0 0x0004 /* General IO Pins Configuration Register 0 */ 28#define S1DREG_GPIO_CNF0 0x0004 /* General IO Pins Configuration Register 0 */
26#define S1DREG_GPIO_CNF1 0x0005 /* General IO Pins Configuration Register 1 */ 29#define S1DREG_GPIO_CNF1 0x0005 /* General IO Pins Configuration Register 1 */
@@ -141,10 +144,11 @@ struct s1d13xxxfb_regval {
141 u8 value; 144 u8 value;
142}; 145};
143 146
144
145struct s1d13xxxfb_par { 147struct s1d13xxxfb_par {
146 void __iomem *regs; 148 void __iomem *regs;
147 unsigned char display; 149 unsigned char display;
150 unsigned char prod_id;
151 unsigned char revision;
148 152
149 unsigned int pseudo_palette[16]; 153 unsigned int pseudo_palette[16];
150#ifdef CONFIG_PM 154#ifdef CONFIG_PM