aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acexcep.h6
-rw-r--r--include/acpi/acpi_bus.h19
-rw-r--r--include/acpi/acpi_drivers.h33
-rw-r--r--include/acpi/acpiosxf.h4
-rw-r--r--include/acpi/acpixf.h18
-rw-r--r--include/acpi/actbl.h74
-rw-r--r--include/acpi/actbl1.h6
-rw-r--r--include/acpi/actypes.h18
-rw-r--r--include/acpi/processor.h4
-rw-r--r--include/acpi/video.h11
-rw-r--r--include/asm-frv/ftrace.h1
-rw-r--r--include/asm-generic/vmlinux.lds.h29
-rw-r--r--include/asm-m32r/ftrace.h1
-rw-r--r--include/asm-mn10300/ftrace.h1
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/async_tx.h9
-rw-r--r--include/linux/ata.h41
-rw-r--r--include/linux/backing-dev.h12
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/blkdev.h57
-rw-r--r--include/linux/blktrace_api.h5
-rw-r--r--include/linux/cgroup.h5
-rw-r--r--include/linux/compat.h4
-rw-r--r--include/linux/compiler.h13
-rw-r--r--include/linux/connector.h4
-rw-r--r--include/linux/debugfs.h8
-rw-r--r--include/linux/device-mapper.h4
-rw-r--r--include/linux/dm-dirty-log.h13
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/dma_remapping.h1
-rw-r--r--include/linux/dmaengine.h30
-rw-r--r--include/linux/dmar.h11
-rw-r--r--include/linux/ds1wm.h12
-rw-r--r--include/linux/dst.h587
-rw-r--r--include/linux/dw_dmac.h19
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/ext3_fs.h1
-rw-r--r--include/linux/fdtable.h4
-rw-r--r--include/linux/fs.h46
-rw-r--r--include/linux/fscache-cache.h505
-rw-r--r--include/linux/fscache.h618
-rw-r--r--include/linux/fsl_devices.h22
-rw-r--r--include/linux/ftrace.h252
-rw-r--r--include/linux/ftrace_irq.h2
-rw-r--r--include/linux/gfp.h1
-rw-r--r--include/linux/hardirq.h75
-rw-r--r--include/linux/hdreg.h66
-rw-r--r--include/linux/hid.h23
-rw-r--r--include/linux/highmem.h29
-rw-r--r--include/linux/hrtimer.h5
-rw-r--r--include/linux/i2c-algo-sgi.h26
-rw-r--r--include/linux/i2c-id.h38
-rw-r--r--include/linux/i2c.h4
-rw-r--r--include/linux/i2c/s6000.h10
-rw-r--r--include/linux/i2c/twl4030.h47
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/intel-iommu.h15
-rw-r--r--include/linux/interrupt.h81
-rw-r--r--include/linux/iommu.h13
-rw-r--r--include/linux/ipc_namespace.h65
-rw-r--r--include/linux/irq.h5
-rw-r--r--include/linux/irqflags.h8
-rw-r--r--include/linux/irqreturn.h2
-rw-r--r--include/linux/jbd.h7
-rw-r--r--include/linux/kallsyms.h15
-rw-r--r--include/linux/kernel.h150
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/kmod.h11
-rw-r--r--include/linux/kprobes.h52
-rw-r--r--include/linux/leds-bd2802.h26
-rw-r--r--include/linux/leds.h4
-rw-r--r--include/linux/leds_pwm.h21
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/lockd/xdr.h12
-rw-r--r--include/linux/lockd/xdr4.h10
-rw-r--r--include/linux/memory.h6
-rw-r--r--include/linux/mfd/ds1wm.h6
-rw-r--r--include/linux/mfd/htc-pasic3.h1
-rw-r--r--include/linux/mfd/pcf50633/core.h2
-rw-r--r--include/linux/mfd/pcf50633/mbc.h1
-rw-r--r--include/linux/mfd/wm8350/core.h2
-rw-r--r--include/linux/mg_disk.h206
-rw-r--r--include/linux/mmc/host.h5
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/module.h68
-rw-r--r--include/linux/moduleparam.h10
-rw-r--r--include/linux/mtd/mtd.h21
-rw-r--r--include/linux/mtd/nand.h4
-rw-r--r--include/linux/mtd/partitions.h12
-rw-r--r--include/linux/nfs.h1
-rw-r--r--include/linux/nfs4.h138
-rw-r--r--include/linux/nfs_fs.h13
-rw-r--r--include/linux/nfs_fs_sb.h11
-rw-r--r--include/linux/nfs_iostat.h12
-rw-r--r--include/linux/nfsd/cache.h8
-rw-r--r--include/linux/nfsd/nfsd.h225
-rw-r--r--include/linux/nfsd/nfsfh.h7
-rw-r--r--include/linux/nfsd/state.h84
-rw-r--r--include/linux/nfsd/stats.h9
-rw-r--r--include/linux/nfsd/xdr4.h129
-rw-r--r--include/linux/nilfs2_fs.h801
-rw-r--r--include/linux/page-flags.h40
-rw-r--r--include/linux/pagemap.h5
-rw-r--r--include/linux/parport_pc.h11
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h26
-rw-r--r--include/linux/pda_power.h2
-rw-r--r--include/linux/power_supply.h2
-rw-r--r--include/linux/pwm.h2
-rw-r--r--include/linux/raid/bitmap.h288
-rw-r--r--include/linux/raid/linear.h31
-rw-r--r--include/linux/raid/md.h81
-rw-r--r--include/linux/raid/md_k.h402
-rw-r--r--include/linux/raid/md_u.h35
-rw-r--r--include/linux/raid/multipath.h42
-rw-r--r--include/linux/raid/pq.h132
-rw-r--r--include/linux/raid/raid0.h30
-rw-r--r--include/linux/raid/raid1.h134
-rw-r--r--include/linux/raid/raid10.h123
-rw-r--r--include/linux/raid/raid5.h402
-rw-r--r--include/linux/raid/xor.h2
-rw-r--r--include/linux/rcuclassic.h16
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/rcupreempt.h53
-rw-r--r--include/linux/rcutree.h27
-rw-r--r--include/linux/regulator/bq24022.h3
-rw-r--r--include/linux/regulator/consumer.h6
-rw-r--r--include/linux/regulator/driver.h81
-rw-r--r--include/linux/regulator/fixed.h3
-rw-r--r--include/linux/regulator/machine.h12
-rw-r--r--include/linux/ring_buffer.h38
-rw-r--r--include/linux/sched.h27
-rw-r--r--include/linux/security.h24
-rw-r--r--include/linux/serial_core.h5
-rw-r--r--include/linux/serial_max3100.h52
-rw-r--r--include/linux/slab_def.h68
-rw-r--r--include/linux/slob_def.h9
-rw-r--r--include/linux/slow-work.h95
-rw-r--r--include/linux/slub_def.h49
-rw-r--r--include/linux/smp.h7
-rw-r--r--include/linux/sonypi.h8
-rw-r--r--include/linux/spi/spi.h6
-rw-r--r--include/linux/string.h16
-rw-r--r--include/linux/sunrpc/svc.h16
-rw-r--r--include/linux/sunrpc/xdr.h42
-rw-r--r--include/linux/syscalls.h64
-rw-r--r--include/linux/thermal.h48
-rw-r--r--include/linux/timeriomem-rng.h2
-rw-r--r--include/linux/topology.h11
-rw-r--r--include/linux/trace_clock.h19
-rw-r--r--include/linux/tracepoint.h116
-rw-r--r--include/linux/tty_driver.h3
-rw-r--r--include/linux/usb/wusb.h3
-rw-r--r--include/linux/videodev2.h5
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/media/msp3400.h4
-rw-r--r--include/media/ov772x.h35
-rw-r--r--include/media/saa7146.h2
-rw-r--r--include/media/tvaudio.h19
-rw-r--r--include/media/v4l2-common.h141
-rw-r--r--include/media/v4l2-i2c-drv-legacy.h152
-rw-r--r--include/media/v4l2-i2c-drv.h6
-rw-r--r--include/media/v4l2-subdev.h115
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h5
-rw-r--r--include/scsi/fc/fc_fip.h237
-rw-r--r--include/scsi/fc_transport_fcoe.h54
-rw-r--r--include/scsi/libfc.h45
-rw-r--r--include/scsi/libfcoe.h227
-rw-r--r--include/trace/block.h70
-rw-r--r--include/trace/irq.h9
-rw-r--r--include/trace/irq_event_types.h55
-rw-r--r--include/trace/kmemtrace.h63
-rw-r--r--include/trace/lockdep.h9
-rw-r--r--include/trace/lockdep_event_types.h44
-rw-r--r--include/trace/power.h32
-rw-r--r--include/trace/sched.h49
-rw-r--r--include/trace/sched_event_types.h337
-rw-r--r--include/trace/skb.h4
-rw-r--r--include/trace/trace_event_types.h5
-rw-r--r--include/trace/trace_events.h5
-rw-r--r--include/trace/workqueue.h25
-rw-r--r--include/video/tdfx.h26
182 files changed, 6669 insertions, 2985 deletions
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index eda04546cdf6..473d584b1d31 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -103,8 +103,9 @@
103#define AE_BAD_OCTAL_CONSTANT (acpi_status) (0x0006 | AE_CODE_PROGRAMMER) 103#define AE_BAD_OCTAL_CONSTANT (acpi_status) (0x0006 | AE_CODE_PROGRAMMER)
104#define AE_BAD_DECIMAL_CONSTANT (acpi_status) (0x0007 | AE_CODE_PROGRAMMER) 104#define AE_BAD_DECIMAL_CONSTANT (acpi_status) (0x0007 | AE_CODE_PROGRAMMER)
105#define AE_MISSING_ARGUMENTS (acpi_status) (0x0008 | AE_CODE_PROGRAMMER) 105#define AE_MISSING_ARGUMENTS (acpi_status) (0x0008 | AE_CODE_PROGRAMMER)
106#define AE_BAD_ADDRESS (acpi_status) (0x0009 | AE_CODE_PROGRAMMER)
106 107
107#define AE_CODE_PGM_MAX 0x0008 108#define AE_CODE_PGM_MAX 0x0009
108 109
109/* 110/*
110 * Acpi table exceptions 111 * Acpi table exceptions
@@ -224,7 +225,8 @@ char const *acpi_gbl_exception_names_pgm[] = {
224 "AE_BAD_HEX_CONSTANT", 225 "AE_BAD_HEX_CONSTANT",
225 "AE_BAD_OCTAL_CONSTANT", 226 "AE_BAD_OCTAL_CONSTANT",
226 "AE_BAD_DECIMAL_CONSTANT", 227 "AE_BAD_DECIMAL_CONSTANT",
227 "AE_MISSING_ARGUMENTS" 228 "AE_MISSING_ARGUMENTS",
229 "AE_BAD_ADDRESS"
228}; 230};
229 231
230char const *acpi_gbl_exception_names_tbl[] = { 232char const *acpi_gbl_exception_names_tbl[] = {
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index e9f6574930ef..c34b11022908 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -88,44 +88,30 @@ struct acpi_device;
88 88
89typedef int (*acpi_op_add) (struct acpi_device * device); 89typedef int (*acpi_op_add) (struct acpi_device * device);
90typedef int (*acpi_op_remove) (struct acpi_device * device, int type); 90typedef int (*acpi_op_remove) (struct acpi_device * device, int type);
91typedef int (*acpi_op_lock) (struct acpi_device * device, int type);
92typedef int (*acpi_op_start) (struct acpi_device * device); 91typedef int (*acpi_op_start) (struct acpi_device * device);
93typedef int (*acpi_op_stop) (struct acpi_device * device, int type); 92typedef int (*acpi_op_stop) (struct acpi_device * device, int type);
94typedef int (*acpi_op_suspend) (struct acpi_device * device, 93typedef int (*acpi_op_suspend) (struct acpi_device * device,
95 pm_message_t state); 94 pm_message_t state);
96typedef int (*acpi_op_resume) (struct acpi_device * device); 95typedef int (*acpi_op_resume) (struct acpi_device * device);
97typedef int (*acpi_op_scan) (struct acpi_device * device);
98typedef int (*acpi_op_bind) (struct acpi_device * device); 96typedef int (*acpi_op_bind) (struct acpi_device * device);
99typedef int (*acpi_op_unbind) (struct acpi_device * device); 97typedef int (*acpi_op_unbind) (struct acpi_device * device);
100typedef int (*acpi_op_shutdown) (struct acpi_device * device); 98typedef void (*acpi_op_notify) (struct acpi_device * device, u32 event);
101 99
102struct acpi_bus_ops { 100struct acpi_bus_ops {
103 u32 acpi_op_add:1; 101 u32 acpi_op_add:1;
104 u32 acpi_op_remove:1;
105 u32 acpi_op_lock:1;
106 u32 acpi_op_start:1; 102 u32 acpi_op_start:1;
107 u32 acpi_op_stop:1;
108 u32 acpi_op_suspend:1;
109 u32 acpi_op_resume:1;
110 u32 acpi_op_scan:1;
111 u32 acpi_op_bind:1;
112 u32 acpi_op_unbind:1;
113 u32 acpi_op_shutdown:1;
114 u32 reserved:21;
115}; 103};
116 104
117struct acpi_device_ops { 105struct acpi_device_ops {
118 acpi_op_add add; 106 acpi_op_add add;
119 acpi_op_remove remove; 107 acpi_op_remove remove;
120 acpi_op_lock lock;
121 acpi_op_start start; 108 acpi_op_start start;
122 acpi_op_stop stop; 109 acpi_op_stop stop;
123 acpi_op_suspend suspend; 110 acpi_op_suspend suspend;
124 acpi_op_resume resume; 111 acpi_op_resume resume;
125 acpi_op_scan scan;
126 acpi_op_bind bind; 112 acpi_op_bind bind;
127 acpi_op_unbind unbind; 113 acpi_op_unbind unbind;
128 acpi_op_shutdown shutdown; 114 acpi_op_notify notify;
129}; 115};
130 116
131struct acpi_driver { 117struct acpi_driver {
@@ -284,7 +270,6 @@ struct acpi_device {
284 struct list_head children; 270 struct list_head children;
285 struct list_head node; 271 struct list_head node;
286 struct list_head wakeup_list; 272 struct list_head wakeup_list;
287 struct list_head g_list;
288 struct acpi_device_status status; 273 struct acpi_device_status status;
289 struct acpi_device_flags flags; 274 struct acpi_device_flags flags;
290 struct acpi_device_pnp pnp; 275 struct acpi_device_pnp pnp;
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index 5fc1bb0f4a90..0352c8f0b05b 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -67,6 +67,16 @@
67#define ACPI_BAY_HID "LNXIOBAY" 67#define ACPI_BAY_HID "LNXIOBAY"
68#define ACPI_DOCK_HID "LNXDOCK" 68#define ACPI_DOCK_HID "LNXDOCK"
69 69
70/*
71 * For fixed hardware buttons, we fabricate acpi_devices with HID
72 * ACPI_BUTTON_HID_POWERF or ACPI_BUTTON_HID_SLEEPF. Fixed hardware
73 * signals only an event; it doesn't supply a notification value.
74 * To allow drivers to treat notifications from fixed hardware the
75 * same as those from real devices, we turn the events into this
76 * notification value.
77 */
78#define ACPI_FIXED_HARDWARE_EVENT 0x100
79
70/* -------------------------------------------------------------------------- 80/* --------------------------------------------------------------------------
71 PCI 81 PCI
72 -------------------------------------------------------------------------- */ 82 -------------------------------------------------------------------------- */
@@ -99,24 +109,6 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_device *device, int domain,
99 int bus); 109 int bus);
100 110
101/* -------------------------------------------------------------------------- 111/* --------------------------------------------------------------------------
102 Power Resource
103 -------------------------------------------------------------------------- */
104
105int acpi_device_sleep_wake(struct acpi_device *dev,
106 int enable, int sleep_state, int dev_state);
107int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state);
108int acpi_disable_wakeup_device_power(struct acpi_device *dev);
109int acpi_power_get_inferred_state(struct acpi_device *device);
110int acpi_power_transition(struct acpi_device *device, int state);
111extern int acpi_power_nocheck;
112
113/* --------------------------------------------------------------------------
114 Embedded Controller
115 -------------------------------------------------------------------------- */
116int acpi_ec_ecdt_probe(void);
117int acpi_boot_ec_enable(void);
118
119/* --------------------------------------------------------------------------
120 Processor 112 Processor
121 -------------------------------------------------------------------------- */ 113 -------------------------------------------------------------------------- */
122 114
@@ -165,9 +157,4 @@ static inline void unregister_hotplug_dock_device(acpi_handle handle)
165} 157}
166#endif 158#endif
167 159
168/*--------------------------------------------------------------------------
169 Suspend/Resume
170 -------------------------------------------------------------------------- */
171extern int acpi_sleep_init(void);
172
173#endif /*__ACPI_DRIVERS_H__*/ 160#endif /*__ACPI_DRIVERS_H__*/
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index ab0b85cf21f3..3e798593b17b 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -242,10 +242,6 @@ acpi_os_derive_pci_id(acpi_handle rhandle,
242acpi_status acpi_os_validate_interface(char *interface); 242acpi_status acpi_os_validate_interface(char *interface);
243acpi_status acpi_osi_invalidate(char* interface); 243acpi_status acpi_osi_invalidate(char* interface);
244 244
245acpi_status
246acpi_os_validate_address(u8 space_id, acpi_physical_address address,
247 acpi_size length, char *name);
248
249u64 acpi_os_get_timer(void); 245u64 acpi_os_get_timer(void);
250 246
251acpi_status acpi_os_signal(u32 function, void *info); 247acpi_status acpi_os_signal(u32 function, void *info);
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index cc40102fe2f3..4db89e98535d 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -47,7 +47,7 @@
47 47
48/* Current ACPICA subsystem version in YYYYMMDD format */ 48/* Current ACPICA subsystem version in YYYYMMDD format */
49 49
50#define ACPI_CA_VERSION 0x20081204 50#define ACPI_CA_VERSION 0x20090320
51 51
52#include "actypes.h" 52#include "actypes.h"
53#include "actbl.h" 53#include "actbl.h"
@@ -191,14 +191,12 @@ acpi_evaluate_object(acpi_handle object,
191 struct acpi_object_list *parameter_objects, 191 struct acpi_object_list *parameter_objects,
192 struct acpi_buffer *return_object_buffer); 192 struct acpi_buffer *return_object_buffer);
193 193
194#ifdef ACPI_FUTURE_USAGE
195acpi_status 194acpi_status
196acpi_evaluate_object_typed(acpi_handle object, 195acpi_evaluate_object_typed(acpi_handle object,
197 acpi_string pathname, 196 acpi_string pathname,
198 struct acpi_object_list *external_params, 197 struct acpi_object_list *external_params,
199 struct acpi_buffer *return_buffer, 198 struct acpi_buffer *return_buffer,
200 acpi_object_type return_type); 199 acpi_object_type return_type);
201#endif
202 200
203acpi_status 201acpi_status
204acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer); 202acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer);
@@ -349,17 +347,15 @@ acpi_resource_to_address64(struct acpi_resource *resource,
349 */ 347 */
350acpi_status acpi_reset(void); 348acpi_status acpi_reset(void);
351 349
352acpi_status acpi_get_register(u32 register_id, u32 * return_value); 350acpi_status acpi_read_bit_register(u32 register_id, u32 *return_value);
353
354acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value);
355 351
356acpi_status acpi_set_register(u32 register_id, u32 value); 352acpi_status acpi_write_bit_register(u32 register_id, u32 value);
357 353
358acpi_status 354acpi_status acpi_set_firmware_waking_vector(u32 physical_address);
359acpi_set_firmware_waking_vector(u32 physical_address);
360 355
361acpi_status 356#if ACPI_MACHINE_WIDTH == 64
362acpi_set_firmware_waking_vector64(u64 physical_address); 357acpi_status acpi_set_firmware_waking_vector64(u64 physical_address);
358#endif
363 359
364acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg); 360acpi_status acpi_read(u32 *value, struct acpi_generic_address *reg);
365 361
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index bf8d4cfd8cf5..222733d01f36 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -214,11 +214,11 @@ struct acpi_table_fadt {
214 u16 flush_size; /* Processor's memory cache line width, in bytes */ 214 u16 flush_size; /* Processor's memory cache line width, in bytes */
215 u16 flush_stride; /* Number of flush strides that need to be read */ 215 u16 flush_stride; /* Number of flush strides that need to be read */
216 u8 duty_offset; /* Processor duty cycle index in processor's P_CNT reg */ 216 u8 duty_offset; /* Processor duty cycle index in processor's P_CNT reg */
217 u8 duty_width; /* Processor duty cycle value bit width in P_CNT register. */ 217 u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */
218 u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ 218 u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */
219 u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ 219 u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */
220 u8 century; /* Index to century in RTC CMOS RAM */ 220 u8 century; /* Index to century in RTC CMOS RAM */
221 u16 boot_flags; /* IA-PC Boot Architecture Flags. See Table 5-10 for description */ 221 u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */
222 u8 reserved; /* Reserved, must be zero */ 222 u8 reserved; /* Reserved, must be zero */
223 u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ 223 u32 flags; /* Miscellaneous flag bits (see below for individual flags) */
224 struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ 224 struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */
@@ -236,32 +236,41 @@ struct acpi_table_fadt {
236 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ 236 struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */
237}; 237};
238 238
239/* FADT Boot Architecture Flags (boot_flags) */
240
241#define ACPI_FADT_LEGACY_DEVICES (1) /* 00: [V2] System has LPC or ISA bus devices */
242#define ACPI_FADT_8042 (1<<1) /* 01: [V3] System has an 8042 controller on port 60/64 */
243#define ACPI_FADT_NO_VGA (1<<2) /* 02: [V4] It is not safe to probe for VGA hardware */
244#define ACPI_FADT_NO_MSI (1<<3) /* 03: [V4] Message Signaled Interrupts (MSI) must not be enabled */
245#define ACPI_FADT_NO_ASPM (1<<4) /* 04: [V4] PCIe ASPM control must not be enabled */
246
247#define FADT2_REVISION_ID 3
248
239/* FADT flags */ 249/* FADT flags */
240 250
241#define ACPI_FADT_WBINVD (1) /* 00: The wbinvd instruction works properly */ 251#define ACPI_FADT_WBINVD (1) /* 00: [V1] The wbinvd instruction works properly */
242#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: The wbinvd flushes but does not invalidate */ 252#define ACPI_FADT_WBINVD_FLUSH (1<<1) /* 01: [V1] wbinvd flushes but does not invalidate caches */
243#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: All processors support C1 state */ 253#define ACPI_FADT_C1_SUPPORTED (1<<2) /* 02: [V1] All processors support C1 state */
244#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: C2 state works on MP system */ 254#define ACPI_FADT_C2_MP_SUPPORTED (1<<3) /* 03: [V1] C2 state works on MP system */
245#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: Power button is handled as a generic feature */ 255#define ACPI_FADT_POWER_BUTTON (1<<4) /* 04: [V1] Power button is handled as a control method device */
246#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: Sleep button is handled as a generic feature, or not present */ 256#define ACPI_FADT_SLEEP_BUTTON (1<<5) /* 05: [V1] Sleep button is handled as a control method device */
247#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: RTC wakeup stat not in fixed register space */ 257#define ACPI_FADT_FIXED_RTC (1<<6) /* 06: [V1] RTC wakeup status not in fixed register space */
248#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: RTC wakeup possible from S4 */ 258#define ACPI_FADT_S4_RTC_WAKE (1<<7) /* 07: [V1] RTC alarm can wake system from S4 */
249#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: tmr_val is 32 bits 0=24-bits */ 259#define ACPI_FADT_32BIT_TIMER (1<<8) /* 08: [V1] ACPI timer width is 32-bit (0=24-bit) */
250#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: Docking supported */ 260#define ACPI_FADT_DOCKING_SUPPORTED (1<<9) /* 09: [V1] Docking supported */
251#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: System reset via the FADT RESET_REG supported */ 261#define ACPI_FADT_RESET_REGISTER (1<<10) /* 10: [V2] System reset via the FADT RESET_REG supported */
252#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: No internal expansion capabilities and case is sealed */ 262#define ACPI_FADT_SEALED_CASE (1<<11) /* 11: [V3] No internal expansion capabilities and case is sealed */
253#define ACPI_FADT_HEADLESS (1<<12) /* 12: No local video capabilities or local input devices */ 263#define ACPI_FADT_HEADLESS (1<<12) /* 12: [V3] No local video capabilities or local input devices */
254#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: Must execute native instruction after writing SLP_TYPx register */ 264#define ACPI_FADT_SLEEP_TYPE (1<<13) /* 13: [V3] Must execute native instruction after writing SLP_TYPx register */
255#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */ 265#define ACPI_FADT_PCI_EXPRESS_WAKE (1<<14) /* 14: [V4] System supports PCIEXP_WAKE (STS/EN) bits (ACPI 3.0) */
256#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: OSPM should use platform-provided timer (ACPI 3.0) */ 266#define ACPI_FADT_PLATFORM_CLOCK (1<<15) /* 15: [V4] OSPM should use platform-provided timer (ACPI 3.0) */
257#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: Contents of RTC_STS valid after S4 wake (ACPI 3.0) */ 267#define ACPI_FADT_S4_RTC_VALID (1<<16) /* 16: [V4] Contents of RTC_STS valid after S4 wake (ACPI 3.0) */
258#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: System is compatible with remote power on (ACPI 3.0) */ 268#define ACPI_FADT_REMOTE_POWER_ON (1<<17) /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */
259#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: All local APICs must use cluster model (ACPI 3.0) */ 269#define ACPI_FADT_APIC_CLUSTER (1<<18) /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */
260#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: All local x_aPICs must use physical dest mode (ACPI 3.0) */ 270#define ACPI_FADT_APIC_PHYSICAL (1<<19) /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */
271
272/* FADT Prefered Power Management Profiles */
261 273
262/*
263 * FADT Prefered Power Management Profiles
264 */
265enum acpi_prefered_pm_profiles { 274enum acpi_prefered_pm_profiles {
266 PM_UNSPECIFIED = 0, 275 PM_UNSPECIFIED = 0,
267 PM_DESKTOP = 1, 276 PM_DESKTOP = 1,
@@ -272,16 +281,6 @@ enum acpi_prefered_pm_profiles {
272 PM_APPLIANCE_PC = 6 281 PM_APPLIANCE_PC = 6
273}; 282};
274 283
275/* FADT Boot Arch Flags */
276
277#define BAF_LEGACY_DEVICES 0x0001
278#define BAF_8042_KEYBOARD_CONTROLLER 0x0002
279#define BAF_MSI_NOT_SUPPORTED 0x0008
280#define BAF_PCIE_ASPM_CONTROL 0x0010
281
282#define FADT2_REVISION_ID 3
283#define FADT2_MINUS_REVISION_ID 2
284
285/* Reset to default packing */ 284/* Reset to default packing */
286 285
287#pragma pack() 286#pragma pack()
@@ -310,8 +309,9 @@ struct acpi_table_desc {
310#define ACPI_TABLE_ORIGIN_UNKNOWN (0) 309#define ACPI_TABLE_ORIGIN_UNKNOWN (0)
311#define ACPI_TABLE_ORIGIN_MAPPED (1) 310#define ACPI_TABLE_ORIGIN_MAPPED (1)
312#define ACPI_TABLE_ORIGIN_ALLOCATED (2) 311#define ACPI_TABLE_ORIGIN_ALLOCATED (2)
313#define ACPI_TABLE_ORIGIN_MASK (3) 312#define ACPI_TABLE_ORIGIN_OVERRIDE (4)
314#define ACPI_TABLE_IS_LOADED (4) 313#define ACPI_TABLE_ORIGIN_MASK (7)
314#define ACPI_TABLE_IS_LOADED (8)
315 315
316/* 316/*
317 * Get the remaining ACPI tables 317 * Get the remaining ACPI tables
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 18963b968114..59ade0752473 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -1016,9 +1016,9 @@ struct acpi_madt_interrupt_source {
1016struct acpi_madt_local_x2apic { 1016struct acpi_madt_local_x2apic {
1017 struct acpi_subtable_header header; 1017 struct acpi_subtable_header header;
1018 u16 reserved; /* Reserved - must be zero */ 1018 u16 reserved; /* Reserved - must be zero */
1019 u32 local_apic_id; /* Processor X2_APIC ID */ 1019 u32 local_apic_id; /* Processor x2APIC ID */
1020 u32 lapic_flags; 1020 u32 lapic_flags;
1021 u32 uid; /* Extended X2_APIC processor ID */ 1021 u32 uid; /* ACPI processor UID */
1022}; 1022};
1023 1023
1024/* 10: Local X2APIC NMI (07/2008) */ 1024/* 10: Local X2APIC NMI (07/2008) */
@@ -1026,7 +1026,7 @@ struct acpi_madt_local_x2apic {
1026struct acpi_madt_local_x2apic_nmi { 1026struct acpi_madt_local_x2apic_nmi {
1027 struct acpi_subtable_header header; 1027 struct acpi_subtable_header header;
1028 u16 inti_flags; 1028 u16 inti_flags;
1029 u32 uid; /* Processor X2_APIC ID */ 1029 u32 uid; /* ACPI processor UID */
1030 u8 lint; /* LINTn to which NMI is connected */ 1030 u8 lint; /* LINTn to which NMI is connected */
1031 u8 reserved[3]; 1031 u8 reserved[3];
1032}; 1032};
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index a20aab510173..f555d927f7c0 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -777,17 +777,25 @@ typedef u8 acpi_adr_space_type;
777#define ACPI_BITREG_SCI_ENABLE 0x0E 777#define ACPI_BITREG_SCI_ENABLE 0x0E
778#define ACPI_BITREG_BUS_MASTER_RLD 0x0F 778#define ACPI_BITREG_BUS_MASTER_RLD 0x0F
779#define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10 779#define ACPI_BITREG_GLOBAL_LOCK_RELEASE 0x10
780#define ACPI_BITREG_SLEEP_TYPE_A 0x11 780#define ACPI_BITREG_SLEEP_TYPE 0x11
781#define ACPI_BITREG_SLEEP_TYPE_B 0x12 781#define ACPI_BITREG_SLEEP_ENABLE 0x12
782#define ACPI_BITREG_SLEEP_ENABLE 0x13
783 782
784/* PM2 Control register */ 783/* PM2 Control register */
785 784
786#define ACPI_BITREG_ARB_DISABLE 0x14 785#define ACPI_BITREG_ARB_DISABLE 0x13
787 786
788#define ACPI_BITREG_MAX 0x14 787#define ACPI_BITREG_MAX 0x13
789#define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1 788#define ACPI_NUM_BITREG ACPI_BITREG_MAX + 1
790 789
790/* Status register values. A 1 clears a status bit. 0 = no effect */
791
792#define ACPI_CLEAR_STATUS 1
793
794/* Enable and Control register values */
795
796#define ACPI_ENABLE_EVENT 1
797#define ACPI_DISABLE_EVENT 0
798
791/* 799/*
792 * External ACPI object definition 800 * External ACPI object definition
793 */ 801 */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 0574add2a1e3..b09c4fde9725 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -322,7 +322,7 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
322int acpi_processor_tstate_has_changed(struct acpi_processor *pr); 322int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
323int acpi_processor_get_throttling_info(struct acpi_processor *pr); 323int acpi_processor_get_throttling_info(struct acpi_processor *pr);
324extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state); 324extern int acpi_processor_set_throttling(struct acpi_processor *pr, int state);
325extern struct file_operations acpi_processor_throttling_fops; 325extern const struct file_operations acpi_processor_throttling_fops;
326extern void acpi_processor_throttling_init(void); 326extern void acpi_processor_throttling_init(void);
327/* in processor_idle.c */ 327/* in processor_idle.c */
328int acpi_processor_power_init(struct acpi_processor *pr, 328int acpi_processor_power_init(struct acpi_processor *pr,
@@ -336,7 +336,7 @@ extern struct cpuidle_driver acpi_idle_driver;
336 336
337/* in processor_thermal.c */ 337/* in processor_thermal.c */
338int acpi_processor_get_limit_info(struct acpi_processor *pr); 338int acpi_processor_get_limit_info(struct acpi_processor *pr);
339extern struct file_operations acpi_processor_limit_fops; 339extern const struct file_operations acpi_processor_limit_fops;
340extern struct thermal_cooling_device_ops processor_cooling_ops; 340extern struct thermal_cooling_device_ops processor_cooling_ops;
341#ifdef CONFIG_CPU_FREQ 341#ifdef CONFIG_CPU_FREQ
342void acpi_thermal_cpufreq_init(void); 342void acpi_thermal_cpufreq_init(void);
diff --git a/include/acpi/video.h b/include/acpi/video.h
new file mode 100644
index 000000000000..f0275bb79ce4
--- /dev/null
+++ b/include/acpi/video.h
@@ -0,0 +1,11 @@
1#ifndef __ACPI_VIDEO_H
2#define __ACPI_VIDEO_H
3
4#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
5extern int acpi_video_register(void);
6#else
7static inline int acpi_video_register(void) { return 0; }
8#endif
9
10#endif
11
diff --git a/include/asm-frv/ftrace.h b/include/asm-frv/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-frv/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index a654d724d3b0..7fa660fd449c 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -61,6 +61,30 @@
61#define BRANCH_PROFILE() 61#define BRANCH_PROFILE()
62#endif 62#endif
63 63
64#ifdef CONFIG_EVENT_TRACER
65#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
66 *(_ftrace_events) \
67 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
68#else
69#define FTRACE_EVENTS()
70#endif
71
72#ifdef CONFIG_TRACING
73#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
74 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
75 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
76#else
77#define TRACE_PRINTKS()
78#endif
79
80#ifdef CONFIG_FTRACE_SYSCALLS
81#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
82 *(__syscalls_metadata) \
83 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
84#else
85#define TRACE_SYSCALLS()
86#endif
87
64/* .data section */ 88/* .data section */
65#define DATA_DATA \ 89#define DATA_DATA \
66 *(.data) \ 90 *(.data) \
@@ -86,7 +110,10 @@
86 *(__verbose) \ 110 *(__verbose) \
87 VMLINUX_SYMBOL(__stop___verbose) = .; \ 111 VMLINUX_SYMBOL(__stop___verbose) = .; \
88 LIKELY_PROFILE() \ 112 LIKELY_PROFILE() \
89 BRANCH_PROFILE() 113 BRANCH_PROFILE() \
114 TRACE_PRINTKS() \
115 FTRACE_EVENTS() \
116 TRACE_SYSCALLS()
90 117
91#define RO_DATA(align) \ 118#define RO_DATA(align) \
92 . = ALIGN((align)); \ 119 . = ALIGN((align)); \
diff --git a/include/asm-m32r/ftrace.h b/include/asm-m32r/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-m32r/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/asm-mn10300/ftrace.h b/include/asm-mn10300/ftrace.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/include/asm-mn10300/ftrace.h
@@ -0,0 +1 @@
/* empty */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index d047f846c3ed..6586cbd0d4af 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -97,6 +97,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
97/* the following four functions are architecture-dependent */ 97/* the following four functions are architecture-dependent */
98void acpi_numa_slit_init (struct acpi_table_slit *slit); 98void acpi_numa_slit_init (struct acpi_table_slit *slit);
99void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); 99void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
100void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa);
100void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); 101void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma);
101void acpi_numa_arch_fixup(void); 102void acpi_numa_arch_fixup(void);
102 103
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h
index 45f6297821bd..5fc2ef8d97fa 100644
--- a/include/linux/async_tx.h
+++ b/include/linux/async_tx.h
@@ -21,6 +21,15 @@
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23 23
24/* on architectures without dma-mapping capabilities we need to ensure
25 * that the asynchronous path compiles away
26 */
27#ifdef CONFIG_HAS_DMA
28#define __async_inline
29#else
30#define __async_inline __always_inline
31#endif
32
24/** 33/**
25 * dma_chan_ref - object used to manage dma channels received from the 34 * dma_chan_ref - object used to manage dma channels received from the
26 * dmaengine core. 35 * dmaengine core.
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 6617c9f8f2ca..cb79b7a208e1 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -29,6 +29,8 @@
29#ifndef __LINUX_ATA_H__ 29#ifndef __LINUX_ATA_H__
30#define __LINUX_ATA_H__ 30#define __LINUX_ATA_H__
31 31
32#include <linux/kernel.h>
33#include <linux/string.h>
32#include <linux/types.h> 34#include <linux/types.h>
33#include <asm/byteorder.h> 35#include <asm/byteorder.h>
34 36
@@ -91,6 +93,7 @@ enum {
91 ATA_ID_CFA_POWER = 160, 93 ATA_ID_CFA_POWER = 160,
92 ATA_ID_CFA_KEY_MGMT = 162, 94 ATA_ID_CFA_KEY_MGMT = 162,
93 ATA_ID_CFA_MODES = 163, 95 ATA_ID_CFA_MODES = 163,
96 ATA_ID_DATA_SET_MGMT = 169,
94 ATA_ID_ROT_SPEED = 217, 97 ATA_ID_ROT_SPEED = 217,
95 ATA_ID_PIO4 = (1 << 1), 98 ATA_ID_PIO4 = (1 << 1),
96 99
@@ -248,6 +251,7 @@ enum {
248 ATA_CMD_SMART = 0xB0, 251 ATA_CMD_SMART = 0xB0,
249 ATA_CMD_MEDIA_LOCK = 0xDE, 252 ATA_CMD_MEDIA_LOCK = 0xDE,
250 ATA_CMD_MEDIA_UNLOCK = 0xDF, 253 ATA_CMD_MEDIA_UNLOCK = 0xDF,
254 ATA_CMD_DSM = 0x06,
251 /* marked obsolete in the ATA/ATAPI-7 spec */ 255 /* marked obsolete in the ATA/ATAPI-7 spec */
252 ATA_CMD_RESTORE = 0x10, 256 ATA_CMD_RESTORE = 0x10,
253 257
@@ -321,6 +325,9 @@ enum {
321 ATA_SMART_READ_VALUES = 0xD0, 325 ATA_SMART_READ_VALUES = 0xD0,
322 ATA_SMART_READ_THRESHOLDS = 0xD1, 326 ATA_SMART_READ_THRESHOLDS = 0xD1,
323 327
328 /* feature values for Data Set Management */
329 ATA_DSM_TRIM = 0x01,
330
324 /* password used in LBA Mid / LBA High for executing SMART commands */ 331 /* password used in LBA Mid / LBA High for executing SMART commands */
325 ATA_SMART_LBAM_PASS = 0x4F, 332 ATA_SMART_LBAM_PASS = 0x4F,
326 ATA_SMART_LBAH_PASS = 0xC2, 333 ATA_SMART_LBAH_PASS = 0xC2,
@@ -723,6 +730,14 @@ static inline int ata_id_has_unload(const u16 *id)
723 return 0; 730 return 0;
724} 731}
725 732
733static inline int ata_id_has_trim(const u16 *id)
734{
735 if (ata_id_major_version(id) >= 7 &&
736 (id[ATA_ID_DATA_SET_MGMT] & 1))
737 return 1;
738 return 0;
739}
740
726static inline int ata_id_current_chs_valid(const u16 *id) 741static inline int ata_id_current_chs_valid(const u16 *id)
727{ 742{
728 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command 743 /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
@@ -863,6 +878,32 @@ static inline void ata_id_to_hd_driveid(u16 *id)
863#endif 878#endif
864} 879}
865 880
881/*
882 * Write up to 'max' LBA Range Entries to the buffer that will cover the
883 * extent from sector to sector + count. This is used for TRIM and for
884 * ADD LBA(S) TO NV CACHE PINNED SET.
885 */
886static inline unsigned ata_set_lba_range_entries(void *_buffer, unsigned max,
887 u64 sector, unsigned long count)
888{
889 __le64 *buffer = _buffer;
890 unsigned i = 0;
891
892 while (i < max) {
893 u64 entry = sector |
894 ((u64)(count > 0xffff ? 0xffff : count) << 48);
895 buffer[i++] = __cpu_to_le64(entry);
896 if (count <= 0xffff)
897 break;
898 count -= 0xffff;
899 sector += 0xffff;
900 }
901
902 max = ALIGN(i * 8, 512);
903 memset(buffer + i, 0, max - i * 8);
904 return max;
905}
906
866static inline int is_multi_taskfile(struct ata_taskfile *tf) 907static inline int is_multi_taskfile(struct ata_taskfile *tf)
867{ 908{
868 return (tf->command == ATA_CMD_READ_MULTI) || 909 return (tf->command == ATA_CMD_READ_MULTI) ||
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index bee52abb8a4d..0ec2c594868e 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -24,8 +24,8 @@ struct dentry;
24 */ 24 */
25enum bdi_state { 25enum bdi_state {
26 BDI_pdflush, /* A pdflush thread is working this device */ 26 BDI_pdflush, /* A pdflush thread is working this device */
27 BDI_write_congested, /* The write queue is getting full */ 27 BDI_async_congested, /* The async (write) queue is getting full */
28 BDI_read_congested, /* The read queue is getting full */ 28 BDI_sync_congested, /* The sync queue is getting full */
29 BDI_unused, /* Available bits start here */ 29 BDI_unused, /* Available bits start here */
30}; 30};
31 31
@@ -215,18 +215,18 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
215 215
216static inline int bdi_read_congested(struct backing_dev_info *bdi) 216static inline int bdi_read_congested(struct backing_dev_info *bdi)
217{ 217{
218 return bdi_congested(bdi, 1 << BDI_read_congested); 218 return bdi_congested(bdi, 1 << BDI_sync_congested);
219} 219}
220 220
221static inline int bdi_write_congested(struct backing_dev_info *bdi) 221static inline int bdi_write_congested(struct backing_dev_info *bdi)
222{ 222{
223 return bdi_congested(bdi, 1 << BDI_write_congested); 223 return bdi_congested(bdi, 1 << BDI_async_congested);
224} 224}
225 225
226static inline int bdi_rw_congested(struct backing_dev_info *bdi) 226static inline int bdi_rw_congested(struct backing_dev_info *bdi)
227{ 227{
228 return bdi_congested(bdi, (1 << BDI_read_congested)| 228 return bdi_congested(bdi, (1 << BDI_sync_congested) |
229 (1 << BDI_write_congested)); 229 (1 << BDI_async_congested));
230} 230}
231 231
232void clear_bdi_congested(struct backing_dev_info *bdi, int rw); 232void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b05b1d4d17d2..b900d2c67d29 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -145,20 +145,21 @@ struct bio {
145 * bit 2 -- barrier 145 * bit 2 -- barrier
146 * Insert a serialization point in the IO queue, forcing previously 146 * Insert a serialization point in the IO queue, forcing previously
147 * submitted IO to be completed before this one is issued. 147 * submitted IO to be completed before this one is issued.
148 * bit 3 -- synchronous I/O hint: the block layer will unplug immediately 148 * bit 3 -- synchronous I/O hint.
149 * Note that this does NOT indicate that the IO itself is sync, just 149 * bit 4 -- Unplug the device immediately after submitting this bio.
150 * that the block layer will not postpone issue of this IO by plugging. 150 * bit 5 -- metadata request
151 * bit 4 -- metadata request
152 * Used for tracing to differentiate metadata and data IO. May also 151 * Used for tracing to differentiate metadata and data IO. May also
153 * get some preferential treatment in the IO scheduler 152 * get some preferential treatment in the IO scheduler
154 * bit 5 -- discard sectors 153 * bit 6 -- discard sectors
155 * Informs the lower level device that this range of sectors is no longer 154 * Informs the lower level device that this range of sectors is no longer
156 * used by the file system and may thus be freed by the device. Used 155 * used by the file system and may thus be freed by the device. Used
157 * for flash based storage. 156 * for flash based storage.
158 * bit 6 -- fail fast device errors 157 * bit 7 -- fail fast device errors
159 * bit 7 -- fail fast transport errors 158 * bit 8 -- fail fast transport errors
160 * bit 8 -- fail fast driver errors 159 * bit 9 -- fail fast driver errors
161 * Don't want driver retries for any fast fail whatever the reason. 160 * Don't want driver retries for any fast fail whatever the reason.
161 * bit 10 -- Tell the IO scheduler not to wait for more requests after this
162 one has been submitted, even if it is a SYNC request.
162 */ 163 */
163#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ 164#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
164#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ 165#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
@@ -170,6 +171,7 @@ struct bio {
170#define BIO_RW_FAILFAST_DEV 7 171#define BIO_RW_FAILFAST_DEV 7
171#define BIO_RW_FAILFAST_TRANSPORT 8 172#define BIO_RW_FAILFAST_TRANSPORT 8
172#define BIO_RW_FAILFAST_DRIVER 9 173#define BIO_RW_FAILFAST_DRIVER 9
174#define BIO_RW_NOIDLE 10
173 175
174#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) 176#define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag)))
175 177
@@ -188,6 +190,7 @@ struct bio {
188#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) 190#define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD)
189#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) 191#define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META)
190#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) 192#define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD)
193#define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE)
191 194
192/* 195/*
193 * upper 16 bits of bi_rw define the io priority of this bio 196 * upper 16 bits of bi_rw define the io priority of this bio
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 465d6babc847..ba54c834a590 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -38,6 +38,10 @@ struct request;
38typedef void (rq_end_io_fn)(struct request *, int); 38typedef void (rq_end_io_fn)(struct request *, int);
39 39
40struct request_list { 40struct request_list {
41 /*
42 * count[], starved[], and wait[] are indexed by
43 * BLK_RW_SYNC/BLK_RW_ASYNC
44 */
41 int count[2]; 45 int count[2];
42 int starved[2]; 46 int starved[2];
43 int elvpriv; 47 int elvpriv;
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits {
66 REQ_TYPE_ATA_PC, 70 REQ_TYPE_ATA_PC,
67}; 71};
68 72
73enum {
74 BLK_RW_ASYNC = 0,
75 BLK_RW_SYNC = 1,
76};
77
69/* 78/*
70 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 79 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
71 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 80 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -103,12 +112,12 @@ enum rq_flag_bits {
103 __REQ_QUIET, /* don't worry about errors */ 112 __REQ_QUIET, /* don't worry about errors */
104 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 113 __REQ_PREEMPT, /* set for "ide_preempt" requests */
105 __REQ_ORDERED_COLOR, /* is before or after barrier */ 114 __REQ_ORDERED_COLOR, /* is before or after barrier */
106 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 115 __REQ_RW_SYNC, /* request is sync (sync write or read) */
107 __REQ_ALLOCED, /* request came from our alloc pool */ 116 __REQ_ALLOCED, /* request came from our alloc pool */
108 __REQ_RW_META, /* metadata io request */ 117 __REQ_RW_META, /* metadata io request */
109 __REQ_COPY_USER, /* contains copies of user pages */ 118 __REQ_COPY_USER, /* contains copies of user pages */
110 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 119 __REQ_INTEGRITY, /* integrity metadata has been remapped */
111 __REQ_UNPLUG, /* unplug queue on submission */ 120 __REQ_NOIDLE, /* Don't anticipate more IO after this one */
112 __REQ_NR_BITS, /* stops here */ 121 __REQ_NR_BITS, /* stops here */
113}; 122};
114 123
@@ -135,7 +144,7 @@ enum rq_flag_bits {
135#define REQ_RW_META (1 << __REQ_RW_META) 144#define REQ_RW_META (1 << __REQ_RW_META)
136#define REQ_COPY_USER (1 << __REQ_COPY_USER) 145#define REQ_COPY_USER (1 << __REQ_COPY_USER)
137#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 146#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
138#define REQ_UNPLUG (1 << __REQ_UNPLUG) 147#define REQ_NOIDLE (1 << __REQ_NOIDLE)
139 148
140#define BLK_MAX_CDB 16 149#define BLK_MAX_CDB 16
141 150
@@ -438,8 +447,8 @@ struct request_queue
438#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 447#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
439#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 448#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
440#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 449#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
441#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 450#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
442#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 451#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
443#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 452#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
444#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 453#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
445#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 454#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
@@ -611,32 +620,42 @@ enum {
611#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 620#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
612 621
613/* 622/*
614 * We regard a request as sync, if it's a READ or a SYNC write. 623 * We regard a request as sync, if either a read or a sync write
615 */ 624 */
616#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 625static inline bool rw_is_sync(unsigned int rw_flags)
626{
627 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
628}
629
630static inline bool rq_is_sync(struct request *rq)
631{
632 return rw_is_sync(rq->cmd_flags);
633}
634
617#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 635#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
636#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
618 637
619static inline int blk_queue_full(struct request_queue *q, int rw) 638static inline int blk_queue_full(struct request_queue *q, int sync)
620{ 639{
621 if (rw == READ) 640 if (sync)
622 return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 641 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
623 return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 642 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
624} 643}
625 644
626static inline void blk_set_queue_full(struct request_queue *q, int rw) 645static inline void blk_set_queue_full(struct request_queue *q, int sync)
627{ 646{
628 if (rw == READ) 647 if (sync)
629 queue_flag_set(QUEUE_FLAG_READFULL, q); 648 queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
630 else 649 else
631 queue_flag_set(QUEUE_FLAG_WRITEFULL, q); 650 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
632} 651}
633 652
634static inline void blk_clear_queue_full(struct request_queue *q, int rw) 653static inline void blk_clear_queue_full(struct request_queue *q, int sync)
635{ 654{
636 if (rw == READ) 655 if (sync)
637 queue_flag_clear(QUEUE_FLAG_READFULL, q); 656 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
638 else 657 else
639 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); 658 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
640} 659}
641 660
642 661
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 6e915878e88c..d960889e92ef 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -144,6 +144,9 @@ struct blk_user_trace_setup {
144 144
145#ifdef __KERNEL__ 145#ifdef __KERNEL__
146#if defined(CONFIG_BLK_DEV_IO_TRACE) 146#if defined(CONFIG_BLK_DEV_IO_TRACE)
147
148#include <linux/sysfs.h>
149
147struct blk_trace { 150struct blk_trace {
148 int trace_state; 151 int trace_state;
149 struct rchan *rchan; 152 struct rchan *rchan;
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
194extern int blk_trace_startstop(struct request_queue *q, int start); 197extern int blk_trace_startstop(struct request_queue *q, int start);
195extern int blk_trace_remove(struct request_queue *q); 198extern int blk_trace_remove(struct request_queue *q);
196 199
200extern struct attribute_group blk_trace_attr_group;
201
197#else /* !CONFIG_BLK_DEV_IO_TRACE */ 202#else /* !CONFIG_BLK_DEV_IO_TRACE */
198#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) 203#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
199#define blk_trace_shutdown(q) do { } while (0) 204#define blk_trace_shutdown(q) do { } while (0)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 4316a546beb5..665fa70e4094 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -365,7 +365,10 @@ int cgroup_task_count(const struct cgroup *cgrp);
365/* Return true if cgrp is a descendant of the task's cgroup */ 365/* Return true if cgrp is a descendant of the task's cgroup */
366int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); 366int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task);
367 367
368/* Control Group subsystem type. See Documentation/cgroups.txt for details */ 368/*
369 * Control Group subsystem type.
370 * See Documentation/cgroups/cgroups.txt for details
371 */
369 372
370struct cgroup_subsys { 373struct cgroup_subsys {
371 struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, 374 struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 9723edd6455c..f2ded21f9a3c 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -193,10 +193,10 @@ asmlinkage ssize_t compat_sys_writev(unsigned long fd,
193 const struct compat_iovec __user *vec, unsigned long vlen); 193 const struct compat_iovec __user *vec, unsigned long vlen);
194asmlinkage ssize_t compat_sys_preadv(unsigned long fd, 194asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
195 const struct compat_iovec __user *vec, 195 const struct compat_iovec __user *vec,
196 unsigned long vlen, u32 pos_high, u32 pos_low); 196 unsigned long vlen, u32 pos_low, u32 pos_high);
197asmlinkage ssize_t compat_sys_pwritev(unsigned long fd, 197asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
198 const struct compat_iovec __user *vec, 198 const struct compat_iovec __user *vec,
199 unsigned long vlen, u32 pos_high, u32 pos_low); 199 unsigned long vlen, u32 pos_low, u32 pos_high);
200 200
201int compat_do_execve(char * filename, compat_uptr_t __user *argv, 201int compat_do_execve(char * filename, compat_uptr_t __user *argv,
202 compat_uptr_t __user *envp, struct pt_regs * regs); 202 compat_uptr_t __user *envp, struct pt_regs * regs);
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d95da1020f1c..37bcb50a4d7c 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -68,6 +68,7 @@ struct ftrace_branch_data {
68 unsigned long miss; 68 unsigned long miss;
69 unsigned long hit; 69 unsigned long hit;
70 }; 70 };
71 unsigned long miss_hit[2];
71 }; 72 };
72}; 73};
73 74
@@ -75,7 +76,8 @@ struct ftrace_branch_data {
75 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code 76 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
76 * to disable branch tracing on a per file basis. 77 * to disable branch tracing on a per file basis.
77 */ 78 */
78#if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING) 79#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
80 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
79void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); 81void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
80 82
81#define likely_notrace(x) __builtin_expect(!!(x), 1) 83#define likely_notrace(x) __builtin_expect(!!(x), 1)
@@ -113,7 +115,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
113 * "Define 'is'", Bill Clinton 115 * "Define 'is'", Bill Clinton
114 * "Define 'if'", Steven Rostedt 116 * "Define 'if'", Steven Rostedt
115 */ 117 */
116#define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \ 118#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
119#define __trace_if(cond) \
120 if (__builtin_constant_p((cond)) ? !!(cond) : \
117 ({ \ 121 ({ \
118 int ______r; \ 122 int ______r; \
119 static struct ftrace_branch_data \ 123 static struct ftrace_branch_data \
@@ -125,10 +129,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
125 .line = __LINE__, \ 129 .line = __LINE__, \
126 }; \ 130 }; \
127 ______r = !!(cond); \ 131 ______r = !!(cond); \
128 if (______r) \ 132 ______f.miss_hit[______r]++; \
129 ______f.hit++; \
130 else \
131 ______f.miss++; \
132 ______r; \ 133 ______r; \
133 })) 134 }))
134#endif /* CONFIG_PROFILE_ALL_BRANCHES */ 135#endif /* CONFIG_PROFILE_ALL_BRANCHES */
diff --git a/include/linux/connector.h b/include/linux/connector.h
index fc65d219d88c..b9966e64604e 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -39,8 +39,10 @@
39#define CN_IDX_V86D 0x4 39#define CN_IDX_V86D 0x4
40#define CN_VAL_V86D_UVESAFB 0x1 40#define CN_VAL_V86D_UVESAFB 0x1
41#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ 41#define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */
42#define CN_DST_IDX 0x6
43#define CN_DST_VAL 0x1
42 44
43#define CN_NETLINK_USERS 6 45#define CN_NETLINK_USERS 7
44 46
45/* 47/*
46 * Maximum connector's message size. 48 * Maximum connector's message size.
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index af0e01d4c663..eb5c2ba2f81a 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode,
71struct dentry *debugfs_create_blob(const char *name, mode_t mode, 71struct dentry *debugfs_create_blob(const char *name, mode_t mode,
72 struct dentry *parent, 72 struct dentry *parent,
73 struct debugfs_blob_wrapper *blob); 73 struct debugfs_blob_wrapper *blob);
74
75bool debugfs_initialized(void);
76
74#else 77#else
75 78
76#include <linux/err.h> 79#include <linux/err.h>
@@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode,
183 return ERR_PTR(-ENODEV); 186 return ERR_PTR(-ENODEV);
184} 187}
185 188
189static inline bool debugfs_initialized(void)
190{
191 return false;
192}
193
186#endif 194#endif
187 195
188#endif 196#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 8209e08969f9..ded2d7c42668 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -116,7 +116,6 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d);
116/* 116/*
117 * Target features 117 * Target features
118 */ 118 */
119#define DM_TARGET_SUPPORTS_BARRIERS 0x00000001
120 119
121struct target_type { 120struct target_type {
122 uint64_t features; 121 uint64_t features;
@@ -139,6 +138,9 @@ struct target_type {
139 dm_ioctl_fn ioctl; 138 dm_ioctl_fn ioctl;
140 dm_merge_fn merge; 139 dm_merge_fn merge;
141 dm_busy_fn busy; 140 dm_busy_fn busy;
141
142 /* For internal device-mapper use. */
143 struct list_head list;
142}; 144};
143 145
144struct io_restrictions { 146struct io_restrictions {
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 600c5fb2daad..5e8b11d88f6f 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -28,6 +28,9 @@ struct dm_dirty_log_type {
28 const char *name; 28 const char *name;
29 struct module *module; 29 struct module *module;
30 30
31 /* For internal device-mapper use */
32 struct list_head list;
33
31 int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, 34 int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti,
32 unsigned argc, char **argv); 35 unsigned argc, char **argv);
33 void (*dtr)(struct dm_dirty_log *log); 36 void (*dtr)(struct dm_dirty_log *log);
@@ -113,6 +116,16 @@ struct dm_dirty_log_type {
113 */ 116 */
114 int (*status)(struct dm_dirty_log *log, status_type_t status_type, 117 int (*status)(struct dm_dirty_log *log, status_type_t status_type,
115 char *result, unsigned maxlen); 118 char *result, unsigned maxlen);
119
120 /*
121 * is_remote_recovering is necessary for cluster mirroring. It provides
122 * a way to detect recovery on another node, so we aren't writing
123 * concurrently. This function is likely to block (when a cluster log
124 * is used).
125 *
126 * Returns: 0, 1
127 */
128 int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region);
116}; 129};
117 130
118int dm_dirty_log_type_register(struct dm_dirty_log_type *type); 131int dm_dirty_log_type_register(struct dm_dirty_log_type *type);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index d7d090d21031..8083b6a36a38 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -115,7 +115,7 @@ static inline u64 dma_get_mask(struct device *dev)
115{ 115{
116 if (dev && dev->dma_mask && *dev->dma_mask) 116 if (dev && dev->dma_mask && *dev->dma_mask)
117 return *dev->dma_mask; 117 return *dev->dma_mask;
118 return DMA_32BIT_MASK; 118 return DMA_BIT_MASK(32);
119} 119}
120 120
121extern u64 dma_get_required_mask(struct device *dev); 121extern u64 dma_get_required_mask(struct device *dev);
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index af1dab41674b..1a455f1f86d7 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -11,6 +11,7 @@
11 11
12#define DMA_PTE_READ (1) 12#define DMA_PTE_READ (1)
13#define DMA_PTE_WRITE (2) 13#define DMA_PTE_WRITE (2)
14#define DMA_PTE_SNP (1 << 11)
14 15
15struct intel_iommu; 16struct intel_iommu;
16struct dmar_domain; 17struct dmar_domain;
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 1956c8d46d32..2e2aa3df170c 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -23,9 +23,6 @@
23 23
24#include <linux/device.h> 24#include <linux/device.h>
25#include <linux/uio.h> 25#include <linux/uio.h>
26#include <linux/kref.h>
27#include <linux/completion.h>
28#include <linux/rcupdate.h>
29#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
30 27
31/** 28/**
@@ -205,6 +202,7 @@ struct dma_async_tx_descriptor {
205/** 202/**
206 * struct dma_device - info on the entity supplying DMA services 203 * struct dma_device - info on the entity supplying DMA services
207 * @chancnt: how many DMA channels are supported 204 * @chancnt: how many DMA channels are supported
205 * @privatecnt: how many DMA channels are requested by dma_request_channel
208 * @channels: the list of struct dma_chan 206 * @channels: the list of struct dma_chan
209 * @global_node: list_head for global dma_device_list 207 * @global_node: list_head for global dma_device_list
210 * @cap_mask: one or more dma_capability flags 208 * @cap_mask: one or more dma_capability flags
@@ -227,6 +225,7 @@ struct dma_async_tx_descriptor {
227struct dma_device { 225struct dma_device {
228 226
229 unsigned int chancnt; 227 unsigned int chancnt;
228 unsigned int privatecnt;
230 struct list_head channels; 229 struct list_head channels;
231 struct list_head global_node; 230 struct list_head global_node;
232 dma_cap_mask_t cap_mask; 231 dma_cap_mask_t cap_mask;
@@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void)
291} 290}
292#endif 291#endif
293 292
293#ifdef CONFIG_ASYNC_TX_DMA
294#define async_dmaengine_get() dmaengine_get()
295#define async_dmaengine_put() dmaengine_put()
296#define async_dma_find_channel(type) dma_find_channel(type)
297#else
298static inline void async_dmaengine_get(void)
299{
300}
301static inline void async_dmaengine_put(void)
302{
303}
304static inline struct dma_chan *
305async_dma_find_channel(enum dma_transaction_type type)
306{
307 return NULL;
308}
309#endif
310
294dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, 311dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
295 void *dest, void *src, size_t len); 312 void *dest, void *src, size_t len);
296dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, 313dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
@@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
337 set_bit(tx_type, dstp->bits); 354 set_bit(tx_type, dstp->bits);
338} 355}
339 356
357#define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask))
358static inline void
359__dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
360{
361 clear_bit(tx_type, dstp->bits);
362}
363
340#define dma_cap_zero(mask) __dma_cap_zero(&(mask)) 364#define dma_cap_zero(mask) __dma_cap_zero(&(mask))
341static inline void __dma_cap_zero(dma_cap_mask_t *dstp) 365static inline void __dma_cap_zero(dma_cap_mask_t *dstp)
342{ 366{
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 2f3427468956..e397dc342cda 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -34,6 +34,7 @@ struct dmar_drhd_unit {
34 u64 reg_base_addr; /* register base address*/ 34 u64 reg_base_addr; /* register base address*/
35 struct pci_dev **devices; /* target device array */ 35 struct pci_dev **devices; /* target device array */
36 int devices_cnt; /* target device count */ 36 int devices_cnt; /* target device count */
37 u16 segment; /* PCI domain */
37 u8 ignored:1; /* ignore drhd */ 38 u8 ignored:1; /* ignore drhd */
38 u8 include_all:1; 39 u8 include_all:1;
39 struct intel_iommu *iommu; 40 struct intel_iommu *iommu;
@@ -44,6 +45,14 @@ extern struct list_head dmar_drhd_units;
44#define for_each_drhd_unit(drhd) \ 45#define for_each_drhd_unit(drhd) \
45 list_for_each_entry(drhd, &dmar_drhd_units, list) 46 list_for_each_entry(drhd, &dmar_drhd_units, list)
46 47
48#define for_each_active_iommu(i, drhd) \
49 list_for_each_entry(drhd, &dmar_drhd_units, list) \
50 if (i=drhd->iommu, drhd->ignored) {} else
51
52#define for_each_iommu(i, drhd) \
53 list_for_each_entry(drhd, &dmar_drhd_units, list) \
54 if (i=drhd->iommu, 0) {} else
55
47extern int dmar_table_init(void); 56extern int dmar_table_init(void);
48extern int dmar_dev_scope_init(void); 57extern int dmar_dev_scope_init(void);
49 58
@@ -100,6 +109,8 @@ struct irte {
100#ifdef CONFIG_INTR_REMAP 109#ifdef CONFIG_INTR_REMAP
101extern int intr_remapping_enabled; 110extern int intr_remapping_enabled;
102extern int enable_intr_remapping(int); 111extern int enable_intr_remapping(int);
112extern void disable_intr_remapping(void);
113extern int reenable_intr_remapping(int);
103 114
104extern int get_irte(int irq, struct irte *entry); 115extern int get_irte(int irq, struct irte *entry);
105extern int modify_irte(int irq, struct irte *irte_modified); 116extern int modify_irte(int irq, struct irte *irte_modified);
diff --git a/include/linux/ds1wm.h b/include/linux/ds1wm.h
deleted file mode 100644
index d3c65e48a2e7..000000000000
--- a/include/linux/ds1wm.h
+++ /dev/null
@@ -1,12 +0,0 @@
1/* platform data for the DS1WM driver */
2
3struct ds1wm_platform_data {
4 int bus_shift; /* number of shifts needed to calculate the
5 * offset between DS1WM registers;
6 * e.g. on h5xxx and h2200 this is 2
7 * (registers aligned to 4-byte boundaries),
8 * while on hx4700 this is 1 */
9 int active_high;
10 void (*enable)(struct platform_device *pdev);
11 void (*disable)(struct platform_device *pdev);
12};
diff --git a/include/linux/dst.h b/include/linux/dst.h
new file mode 100644
index 000000000000..e26fed84b1aa
--- /dev/null
+++ b/include/linux/dst.h
@@ -0,0 +1,587 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __DST_H
17#define __DST_H
18
19#include <linux/types.h>
20#include <linux/connector.h>
21
22#define DST_NAMELEN 32
23#define DST_NAME "dst"
24
25enum {
26 /* Remove node with given id from storage */
27 DST_DEL_NODE = 0,
28 /* Add remote node with given id to the storage */
29 DST_ADD_REMOTE,
30 /* Add local node with given id to the storage to be exported and used by remote peers */
31 DST_ADD_EXPORT,
32 /* Crypto initialization command (hash/cipher used to protect the connection) */
33 DST_CRYPTO,
34 /* Security attributes for given connection (permissions for example) */
35 DST_SECURITY,
36 /* Register given node in the block layer subsystem */
37 DST_START,
38 DST_CMD_MAX
39};
40
41struct dst_ctl
42{
43 /* Storage name */
44 char name[DST_NAMELEN];
45 /* Command flags */
46 __u32 flags;
47 /* Command itself (see above) */
48 __u32 cmd;
49 /* Maximum number of pages per single request in this device */
50 __u32 max_pages;
51 /* Stale/error transaction scanning timeout in milliseconds */
52 __u32 trans_scan_timeout;
53 /* Maximum number of retry sends before completing transaction as broken */
54 __u32 trans_max_retries;
55 /* Storage size */
56 __u64 size;
57};
58
59/* Reply command carries completion status */
60struct dst_ctl_ack
61{
62 struct cn_msg msg;
63 int error;
64 int unused[3];
65};
66
67/*
68 * Unfortunaltely socket address structure is not exported to userspace
69 * and is redefined there.
70 */
71#define SADDR_MAX_DATA 128
72
73struct saddr {
74 /* address family, AF_xxx */
75 unsigned short sa_family;
76 /* 14 bytes of protocol address */
77 char sa_data[SADDR_MAX_DATA];
78 /* Number of bytes used in sa_data */
79 unsigned short sa_data_len;
80};
81
82/* Address structure */
83struct dst_network_ctl
84{
85 /* Socket type: datagram, stream...*/
86 unsigned int type;
87 /* Let me guess, is it a Jupiter diameter? */
88 unsigned int proto;
89 /* Peer's address */
90 struct saddr addr;
91};
92
93struct dst_crypto_ctl
94{
95 /* Cipher and hash names */
96 char cipher_algo[DST_NAMELEN];
97 char hash_algo[DST_NAMELEN];
98
99 /* Key sizes. Can be zero for digest for example */
100 unsigned int cipher_keysize, hash_keysize;
101 /* Alignment. Calculated by the DST itself. */
102 unsigned int crypto_attached_size;
103 /* Number of threads to perform crypto operations */
104 int thread_num;
105};
106
107/* Export security attributes have this bits checked in when client connects */
108#define DST_PERM_READ (1<<0)
109#define DST_PERM_WRITE (1<<1)
110
111/*
112 * Right now it is simple model, where each remote address
113 * is assigned to set of permissions it is allowed to perform.
114 * In real world block device does not know anything but
115 * reading and writing, so it should be more than enough.
116 */
117struct dst_secure_user
118{
119 unsigned int permissions;
120 struct saddr addr;
121};
122
123/*
124 * Export control command: device to export and network address to accept
125 * clients to work with given device
126 */
127struct dst_export_ctl
128{
129 char device[DST_NAMELEN];
130 struct dst_network_ctl ctl;
131};
132
133enum {
134 DST_CFG = 1, /* Request remote configuration */
135 DST_IO, /* IO command */
136 DST_IO_RESPONSE, /* IO response */
137 DST_PING, /* Keepalive message */
138 DST_NCMD_MAX,
139};
140
141struct dst_cmd
142{
143 /* Network command itself, see above */
144 __u32 cmd;
145 /*
146 * Size of the attached data
147 * (in most cases, for READ command it means how many bytes were requested)
148 */
149 __u32 size;
150 /* Crypto size: number of attached bytes with digest/hmac */
151 __u32 csize;
152 /* Here we can carry secret data */
153 __u32 reserved;
154 /* Read/write bits, see how they are encoded in bio structure */
155 __u64 rw;
156 /* BIO flags */
157 __u64 flags;
158 /* Unique command id (like transaction ID) */
159 __u64 id;
160 /* Sector to start IO from */
161 __u64 sector;
162 /* Hash data is placed after this header */
163 __u8 hash[0];
164};
165
166/*
167 * Convert command to/from network byte order.
168 * We do not use hton*() functions, since there is
169 * no 64-bit implementation.
170 */
171static inline void dst_convert_cmd(struct dst_cmd *c)
172{
173 c->cmd = __cpu_to_be32(c->cmd);
174 c->csize = __cpu_to_be32(c->csize);
175 c->size = __cpu_to_be32(c->size);
176 c->sector = __cpu_to_be64(c->sector);
177 c->id = __cpu_to_be64(c->id);
178 c->flags = __cpu_to_be64(c->flags);
179 c->rw = __cpu_to_be64(c->rw);
180}
181
182/* Transaction id */
183typedef __u64 dst_gen_t;
184
185#ifdef __KERNEL__
186
187#include <linux/blkdev.h>
188#include <linux/bio.h>
189#include <linux/device.h>
190#include <linux/mempool.h>
191#include <linux/net.h>
192#include <linux/poll.h>
193#include <linux/rbtree.h>
194
195#ifdef CONFIG_DST_DEBUG
196#define dprintk(f, a...) printk(KERN_NOTICE f, ##a)
197#else
198static inline void __attribute__ ((format (printf, 1, 2)))
199 dprintk(const char *fmt, ...) {}
200#endif
201
202struct dst_node;
203
204struct dst_trans
205{
206 /* DST node we are working with */
207 struct dst_node *n;
208
209 /* Entry inside transaction tree */
210 struct rb_node trans_entry;
211
212 /* Merlin kills this transaction when this memory cell equals zero */
213 atomic_t refcnt;
214
215 /* How this transaction should be processed by crypto engine */
216 short enc;
217 /* How many times this transaction was resent */
218 short retries;
219 /* Completion status */
220 int error;
221
222 /* When did we send it to the remote peer */
223 long send_time;
224
225 /* My name is...
226 * Well, computers does not speak, they have unique id instead */
227 dst_gen_t gen;
228
229 /* Block IO we are working with */
230 struct bio *bio;
231
232 /* Network command for above block IO request */
233 struct dst_cmd cmd;
234};
235
236struct dst_crypto_engine
237{
238 /* What should we do with all block requests */
239 struct crypto_hash *hash;
240 struct crypto_ablkcipher *cipher;
241
242 /* Pool of pages used to encrypt data into before sending */
243 int page_num;
244 struct page **pages;
245
246 /* What to do with current request */
247 int enc;
248 /* Who we are and where do we go */
249 struct scatterlist *src, *dst;
250
251 /* Maximum timeout waiting for encryption to be completed */
252 long timeout;
253 /* IV is a 64-bit sequential counter */
254 u64 iv;
255
256 /* Secret data */
257 void *private;
258
259 /* Cached temporary data lives here */
260 int size;
261 void *data;
262};
263
264struct dst_state
265{
266 /* The main state protection */
267 struct mutex state_lock;
268
269 /* Polling machinery for sockets */
270 wait_queue_t wait;
271 wait_queue_head_t *whead;
272 /* Most of events are being waited here */
273 wait_queue_head_t thread_wait;
274
275 /* Who owns this? */
276 struct dst_node *node;
277
278 /* Network address for this state */
279 struct dst_network_ctl ctl;
280
281 /* Permissions to work with: read-only or rw connection */
282 u32 permissions;
283
284 /* Called when we need to clean private data */
285 void (* cleanup)(struct dst_state *st);
286
287 /* Used by the server: BIO completion queues BIOs here */
288 struct list_head request_list;
289 spinlock_t request_lock;
290
291 /* Guess what? No, it is not number of planets */
292 atomic_t refcnt;
293
294 /* This flags is set when connection should be dropped */
295 int need_exit;
296
297 /*
298 * Socket to work with. Second pointer is used for
299 * lockless check if socket was changed before performing
300 * next action (like working with cached polling result)
301 */
302 struct socket *socket, *read_socket;
303
304 /* Cached preallocated data */
305 void *data;
306 unsigned int size;
307
308 /* Currently processed command */
309 struct dst_cmd cmd;
310};
311
312struct dst_info
313{
314 /* Device size */
315 u64 size;
316
317 /* Local device name for export devices */
318 char local[DST_NAMELEN];
319
320 /* Network setup */
321 struct dst_network_ctl net;
322
323 /* Sysfs bits use this */
324 struct device device;
325};
326
327struct dst_node
328{
329 struct list_head node_entry;
330
331 /* Hi, my name is stored here */
332 char name[DST_NAMELEN];
333 /* My cache name is stored here */
334 char cache_name[DST_NAMELEN];
335
336 /* Block device attached to given node.
337 * Only valid for exporting nodes */
338 struct block_device *bdev;
339 /* Network state machine for given peer */
340 struct dst_state *state;
341
342 /* Block IO machinery */
343 struct request_queue *queue;
344 struct gendisk *disk;
345
346 /* Number of threads in processing pool */
347 int thread_num;
348 /* Maximum number of pages in single IO */
349 int max_pages;
350
351 /* I'm that big in bytes */
352 loff_t size;
353
354 /* Exported to userspace node information */
355 struct dst_info *info;
356
357 /*
358 * Security attribute list.
359 * Used only by exporting node currently.
360 */
361 struct list_head security_list;
362 struct mutex security_lock;
363
364 /*
365 * When this unerflows below zero, university collapses.
366 * But this will not happen, since node will be freed,
367 * when reference counter reaches zero.
368 */
369 atomic_t refcnt;
370
371 /* How precisely should I be started? */
372 int (*start)(struct dst_node *);
373
374 /* Crypto capabilities */
375 struct dst_crypto_ctl crypto;
376 u8 *hash_key;
377 u8 *cipher_key;
378
379 /* Pool of processing thread */
380 struct thread_pool *pool;
381
382 /* Transaction IDs live here */
383 atomic_long_t gen;
384
385 /*
386 * How frequently and how many times transaction
387 * tree should be scanned to drop stale objects.
388 */
389 long trans_scan_timeout;
390 int trans_max_retries;
391
392 /* Small gnomes live here */
393 struct rb_root trans_root;
394 struct mutex trans_lock;
395
396 /*
397 * Transaction cache/memory pool.
398 * It is big enough to contain not only transaction
399 * itself, but additional crypto data (digest/hmac).
400 */
401 struct kmem_cache *trans_cache;
402 mempool_t *trans_pool;
403
404 /* This entity scans transaction tree */
405 struct delayed_work trans_work;
406
407 wait_queue_head_t wait;
408};
409
410/* Kernel representation of the security attribute */
411struct dst_secure
412{
413 struct list_head sec_entry;
414 struct dst_secure_user sec;
415};
416
417int dst_process_bio(struct dst_node *n, struct bio *bio);
418
419int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r);
420int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le);
421
422static inline struct dst_state *dst_state_get(struct dst_state *st)
423{
424 BUG_ON(atomic_read(&st->refcnt) == 0);
425 atomic_inc(&st->refcnt);
426 return st;
427}
428
429void dst_state_put(struct dst_state *st);
430
431struct dst_state *dst_state_alloc(struct dst_node *n);
432int dst_state_socket_create(struct dst_state *st);
433void dst_state_socket_release(struct dst_state *st);
434
435void dst_state_exit_connected(struct dst_state *st);
436
437int dst_state_schedule_receiver(struct dst_state *st);
438
439void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str);
440
441static inline void dst_state_lock(struct dst_state *st)
442{
443 mutex_lock(&st->state_lock);
444}
445
446static inline void dst_state_unlock(struct dst_state *st)
447{
448 mutex_unlock(&st->state_lock);
449}
450
451void dst_poll_exit(struct dst_state *st);
452int dst_poll_init(struct dst_state *st);
453
454static inline unsigned int dst_state_poll(struct dst_state *st)
455{
456 unsigned int revents = POLLHUP | POLLERR;
457
458 dst_state_lock(st);
459 if (st->socket)
460 revents = st->socket->ops->poll(NULL, st->socket, NULL);
461 dst_state_unlock(st);
462
463 return revents;
464}
465
466static inline int dst_thread_setup(void *private, void *data)
467{
468 return 0;
469}
470
471void dst_node_put(struct dst_node *n);
472
473static inline struct dst_node *dst_node_get(struct dst_node *n)
474{
475 atomic_inc(&n->refcnt);
476 return n;
477}
478
479int dst_data_recv(struct dst_state *st, void *data, unsigned int size);
480int dst_recv_cdata(struct dst_state *st, void *cdata);
481int dst_data_send_header(struct socket *sock,
482 void *data, unsigned int size, int more);
483
484int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio);
485
486int dst_process_io(struct dst_state *st);
487int dst_export_crypto(struct dst_node *n, struct bio *bio);
488int dst_export_send_bio(struct bio *bio);
489int dst_start_export(struct dst_node *n);
490
491int __init dst_export_init(void);
492void dst_export_exit(void);
493
494/* Private structure for export block IO requests */
495struct dst_export_priv
496{
497 struct list_head request_entry;
498 struct dst_state *state;
499 struct bio *bio;
500 struct dst_cmd cmd;
501};
502
503static inline void dst_trans_get(struct dst_trans *t)
504{
505 atomic_inc(&t->refcnt);
506}
507
508struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen);
509int dst_trans_remove(struct dst_trans *t);
510int dst_trans_remove_nolock(struct dst_trans *t);
511void dst_trans_put(struct dst_trans *t);
512
513/*
514 * Convert bio into network command.
515 */
516static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd,
517 u32 command, u64 id)
518{
519 cmd->cmd = command;
520 cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS;
521 cmd->rw = bio->bi_rw;
522 cmd->size = bio->bi_size;
523 cmd->csize = 0;
524 cmd->id = id;
525 cmd->sector = bio->bi_sector;
526};
527
528int dst_trans_send(struct dst_trans *t);
529int dst_trans_crypto(struct dst_trans *t);
530
531int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl);
532void dst_node_crypto_exit(struct dst_node *n);
533
534static inline int dst_need_crypto(struct dst_node *n)
535{
536 struct dst_crypto_ctl *c = &n->crypto;
537 /*
538 * Logical OR is appropriate here, but boolean one produces
539 * more optimal code, so it is used instead.
540 */
541 return (c->hash_algo[0] | c->cipher_algo[0]);
542}
543
544int dst_node_trans_init(struct dst_node *n, unsigned int size);
545void dst_node_trans_exit(struct dst_node *n);
546
547/*
548 * Pool of threads.
549 * Ready list contains threads currently free to be used,
550 * active one contains threads with some work scheduled for them.
551 * Caller can wait in given queue when thread is ready.
552 */
553struct thread_pool
554{
555 int thread_num;
556 struct mutex thread_lock;
557 struct list_head ready_list, active_list;
558
559 wait_queue_head_t wait;
560};
561
562void thread_pool_del_worker(struct thread_pool *p);
563void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id);
564int thread_pool_add_worker(struct thread_pool *p,
565 char *name,
566 unsigned int id,
567 void *(* init)(void *data),
568 void (* cleanup)(void *data),
569 void *data);
570
571void thread_pool_destroy(struct thread_pool *p);
572struct thread_pool *thread_pool_create(int num, char *name,
573 void *(* init)(void *data),
574 void (* cleanup)(void *data),
575 void *data);
576
577int thread_pool_schedule(struct thread_pool *p,
578 int (* setup)(void *stored_private, void *setup_data),
579 int (* action)(void *stored_private, void *setup_data),
580 void *setup_data, long timeout);
581int thread_pool_schedule_private(struct thread_pool *p,
582 int (* setup)(void *private, void *data),
583 int (* action)(void *private, void *data),
584 void *data, long timeout, void *id);
585
586#endif /* __KERNEL__ */
587#endif /* __DST_H */
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index d797dde247f7..c8aad713a046 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -74,4 +74,23 @@ struct dw_dma_slave {
74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ 74#define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */
75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ 75#define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */
76 76
77/* DMA API extensions */
78struct dw_cyclic_desc {
79 struct dw_desc **desc;
80 unsigned long periods;
81 void (*period_callback)(void *param);
82 void *period_callback_param;
83};
84
85struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
86 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
87 enum dma_data_direction direction);
88void dw_dma_cyclic_free(struct dma_chan *chan);
89int dw_dma_cyclic_start(struct dma_chan *chan);
90void dw_dma_cyclic_stop(struct dma_chan *chan);
91
92dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
93
94dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
95
77#endif /* DW_DMAC_H */ 96#endif /* DW_DMAC_H */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 7a204256b155..c59b769f62b0 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -116,6 +116,7 @@ extern void elv_abort_queue(struct request_queue *);
116extern void elv_completed_request(struct request_queue *, struct request *); 116extern void elv_completed_request(struct request_queue *, struct request *);
117extern int elv_set_request(struct request_queue *, struct request *, gfp_t); 117extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
118extern void elv_put_request(struct request_queue *, struct request *); 118extern void elv_put_request(struct request_queue *, struct request *);
119extern void elv_drain_elevator(struct request_queue *);
119 120
120/* 121/*
121 * io scheduler registration 122 * io scheduler registration
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index e263acaa405b..634a5e5aba3e 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -208,6 +208,7 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
208#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */ 208#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */
209#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */ 209#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */
210#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */ 210#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */
211#define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008
211 212
212/* Used to pass group descriptor data when online resize is done */ 213/* Used to pass group descriptor data when online resize is done */
213struct ext3_new_group_input { 214struct ext3_new_group_input {
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 09d6c5bbdddd..a2ec74bc4812 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -5,12 +5,14 @@
5#ifndef __LINUX_FDTABLE_H 5#ifndef __LINUX_FDTABLE_H
6#define __LINUX_FDTABLE_H 6#define __LINUX_FDTABLE_H
7 7
8#include <asm/atomic.h>
9#include <linux/posix_types.h> 8#include <linux/posix_types.h>
10#include <linux/compiler.h> 9#include <linux/compiler.h>
11#include <linux/spinlock.h> 10#include <linux/spinlock.h>
12#include <linux/rcupdate.h> 11#include <linux/rcupdate.h>
13#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/init.h>
14
15#include <asm/atomic.h>
14 16
15/* 17/*
16 * The default fd array needs to be at least BITS_PER_LONG, 18 * The default fd array needs to be at least BITS_PER_LONG,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a09e17c8f5fd..562d2855cf30 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -95,8 +95,12 @@ struct inodes_stat_t {
95#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ 95#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
96#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 96#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
97#define READ_META (READ | (1 << BIO_RW_META)) 97#define READ_META (READ | (1 << BIO_RW_META))
98#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 98#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
99#define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 99#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
100#define WRITE_ODIRECT (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG))
101#define SWRITE_SYNC_PLUG \
102 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
103#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
100#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) 104#define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER))
101#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) 105#define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD)
102#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) 106#define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER))
@@ -1695,6 +1699,9 @@ struct file_system_type {
1695 struct lock_class_key i_alloc_sem_key; 1699 struct lock_class_key i_alloc_sem_key;
1696}; 1700};
1697 1701
1702extern int get_sb_ns(struct file_system_type *fs_type, int flags, void *data,
1703 int (*fill_super)(struct super_block *, void *, int),
1704 struct vfsmount *mnt);
1698extern int get_sb_bdev(struct file_system_type *fs_type, 1705extern int get_sb_bdev(struct file_system_type *fs_type,
1699 int flags, const char *dev_name, void *data, 1706 int flags, const char *dev_name, void *data,
1700 int (*fill_super)(struct super_block *, void *, int), 1707 int (*fill_super)(struct super_block *, void *, int),
@@ -2337,19 +2344,7 @@ ssize_t simple_transaction_read(struct file *file, char __user *buf,
2337 size_t size, loff_t *pos); 2344 size_t size, loff_t *pos);
2338int simple_transaction_release(struct inode *inode, struct file *file); 2345int simple_transaction_release(struct inode *inode, struct file *file);
2339 2346
2340static inline void simple_transaction_set(struct file *file, size_t n) 2347void simple_transaction_set(struct file *file, size_t n);
2341{
2342 struct simple_transaction_argresp *ar = file->private_data;
2343
2344 BUG_ON(n > SIMPLE_TRANSACTION_LIMIT);
2345
2346 /*
2347 * The barrier ensures that ar->size will really remain zero until
2348 * ar->data is ready for reading.
2349 */
2350 smp_mb();
2351 ar->size = n;
2352}
2353 2348
2354/* 2349/*
2355 * simple attribute files 2350 * simple attribute files
@@ -2396,27 +2391,6 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
2396ssize_t simple_attr_write(struct file *file, const char __user *buf, 2391ssize_t simple_attr_write(struct file *file, const char __user *buf,
2397 size_t len, loff_t *ppos); 2392 size_t len, loff_t *ppos);
2398 2393
2399
2400#ifdef CONFIG_SECURITY
2401static inline char *alloc_secdata(void)
2402{
2403 return (char *)get_zeroed_page(GFP_KERNEL);
2404}
2405
2406static inline void free_secdata(void *secdata)
2407{
2408 free_page((unsigned long)secdata);
2409}
2410#else
2411static inline char *alloc_secdata(void)
2412{
2413 return (char *)1;
2414}
2415
2416static inline void free_secdata(void *secdata)
2417{ }
2418#endif /* CONFIG_SECURITY */
2419
2420struct ctl_table; 2394struct ctl_table;
2421int proc_nr_files(struct ctl_table *table, int write, struct file *filp, 2395int proc_nr_files(struct ctl_table *table, int write, struct file *filp,
2422 void __user *buffer, size_t *lenp, loff_t *ppos); 2396 void __user *buffer, size_t *lenp, loff_t *ppos);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
new file mode 100644
index 000000000000..84d3532dd3ea
--- /dev/null
+++ b/include/linux/fscache-cache.h
@@ -0,0 +1,505 @@
1/* General filesystem caching backing cache interface
2 *
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * NOTE!!! See:
12 *
13 * Documentation/filesystems/caching/backend-api.txt
14 *
15 * for a description of the cache backend interface declared here.
16 */
17
18#ifndef _LINUX_FSCACHE_CACHE_H
19#define _LINUX_FSCACHE_CACHE_H
20
21#include <linux/fscache.h>
22#include <linux/sched.h>
23#include <linux/slow-work.h>
24
25#define NR_MAXCACHES BITS_PER_LONG
26
27struct fscache_cache;
28struct fscache_cache_ops;
29struct fscache_object;
30struct fscache_operation;
31
32/*
33 * cache tag definition
34 */
35struct fscache_cache_tag {
36 struct list_head link;
37 struct fscache_cache *cache; /* cache referred to by this tag */
38 unsigned long flags;
39#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
40 atomic_t usage;
41 char name[0]; /* tag name */
42};
43
44/*
45 * cache definition
46 */
47struct fscache_cache {
48 const struct fscache_cache_ops *ops;
49 struct fscache_cache_tag *tag; /* tag representing this cache */
50 struct kobject *kobj; /* system representation of this cache */
51 struct list_head link; /* link in list of caches */
52 size_t max_index_size; /* maximum size of index data */
53 char identifier[36]; /* cache label */
54
55 /* node management */
56 struct work_struct op_gc; /* operation garbage collector */
57 struct list_head object_list; /* list of data/index objects */
58 struct list_head op_gc_list; /* list of ops to be deleted */
59 spinlock_t object_list_lock;
60 spinlock_t op_gc_list_lock;
61 atomic_t object_count; /* no. of live objects in this cache */
62 struct fscache_object *fsdef; /* object for the fsdef index */
63 unsigned long flags;
64#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
65#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
66};
67
68extern wait_queue_head_t fscache_cache_cleared_wq;
69
70/*
71 * operation to be applied to a cache object
72 * - retrieval initiation operations are done in the context of the process
73 * that issued them, and not in an async thread pool
74 */
75typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
76typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
77
78struct fscache_operation {
79 union {
80 struct work_struct fast_work; /* record for fast ops */
81 struct slow_work slow_work; /* record for (very) slow ops */
82 };
83 struct list_head pend_link; /* link in object->pending_ops */
84 struct fscache_object *object; /* object to be operated upon */
85
86 unsigned long flags;
87#define FSCACHE_OP_TYPE 0x000f /* operation type */
88#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */
89#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */
90#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */
91#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
92#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
93#define FSCACHE_OP_DEAD 6 /* op is now dead */
94
95 atomic_t usage;
96 unsigned debug_id; /* debugging ID */
97
98 /* operation processor callback
99 * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
100 * the op in a non-pool thread */
101 fscache_operation_processor_t processor;
102
103 /* operation releaser */
104 fscache_operation_release_t release;
105};
106
107extern atomic_t fscache_op_debug_id;
108extern const struct slow_work_ops fscache_op_slow_work_ops;
109
110extern void fscache_enqueue_operation(struct fscache_operation *);
111extern void fscache_put_operation(struct fscache_operation *);
112
113/**
114 * fscache_operation_init - Do basic initialisation of an operation
115 * @op: The operation to initialise
116 * @release: The release function to assign
117 *
118 * Do basic initialisation of an operation. The caller must still set flags,
119 * object, either fast_work or slow_work if necessary, and processor if needed.
120 */
121static inline void fscache_operation_init(struct fscache_operation *op,
122 fscache_operation_release_t release)
123{
124 atomic_set(&op->usage, 1);
125 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
126 op->release = release;
127 INIT_LIST_HEAD(&op->pend_link);
128}
129
130/**
131 * fscache_operation_init_slow - Do additional initialisation of a slow op
132 * @op: The operation to initialise
133 * @processor: The processor function to assign
134 *
135 * Do additional initialisation of an operation as required for slow work.
136 */
137static inline
138void fscache_operation_init_slow(struct fscache_operation *op,
139 fscache_operation_processor_t processor)
140{
141 op->processor = processor;
142 slow_work_init(&op->slow_work, &fscache_op_slow_work_ops);
143}
144
145/*
146 * data read operation
147 */
148struct fscache_retrieval {
149 struct fscache_operation op;
150 struct address_space *mapping; /* netfs pages */
151 fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
152 void *context; /* netfs read context (pinned) */
153 struct list_head to_do; /* list of things to be done by the backend */
154 unsigned long start_time; /* time at which retrieval started */
155};
156
157typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
158 struct page *page,
159 gfp_t gfp);
160
161typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
162 struct list_head *pages,
163 unsigned *nr_pages,
164 gfp_t gfp);
165
166/**
167 * fscache_get_retrieval - Get an extra reference on a retrieval operation
168 * @op: The retrieval operation to get a reference on
169 *
170 * Get an extra reference on a retrieval operation.
171 */
172static inline
173struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
174{
175 atomic_inc(&op->op.usage);
176 return op;
177}
178
179/**
180 * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
181 * @op: The retrieval operation affected
182 *
183 * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
184 */
185static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
186{
187 fscache_enqueue_operation(&op->op);
188}
189
190/**
191 * fscache_put_retrieval - Drop a reference to a retrieval operation
192 * @op: The retrieval operation affected
193 *
194 * Drop a reference to a retrieval operation.
195 */
196static inline void fscache_put_retrieval(struct fscache_retrieval *op)
197{
198 fscache_put_operation(&op->op);
199}
200
201/*
202 * cached page storage work item
203 * - used to do three things:
204 * - batch writes to the cache
205 * - do cache writes asynchronously
206 * - defer writes until cache object lookup completion
207 */
208struct fscache_storage {
209 struct fscache_operation op;
210 pgoff_t store_limit; /* don't write more than this */
211};
212
213/*
214 * cache operations
215 */
216struct fscache_cache_ops {
217 /* name of cache provider */
218 const char *name;
219
220 /* allocate an object record for a cookie */
221 struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
222 struct fscache_cookie *cookie);
223
224 /* look up the object for a cookie */
225 void (*lookup_object)(struct fscache_object *object);
226
227 /* finished looking up */
228 void (*lookup_complete)(struct fscache_object *object);
229
230 /* increment the usage count on this object (may fail if unmounting) */
231 struct fscache_object *(*grab_object)(struct fscache_object *object);
232
233 /* pin an object in the cache */
234 int (*pin_object)(struct fscache_object *object);
235
236 /* unpin an object in the cache */
237 void (*unpin_object)(struct fscache_object *object);
238
239 /* store the updated auxilliary data on an object */
240 void (*update_object)(struct fscache_object *object);
241
242 /* discard the resources pinned by an object and effect retirement if
243 * necessary */
244 void (*drop_object)(struct fscache_object *object);
245
246 /* dispose of a reference to an object */
247 void (*put_object)(struct fscache_object *object);
248
249 /* sync a cache */
250 void (*sync_cache)(struct fscache_cache *cache);
251
252 /* notification that the attributes of a non-index object (such as
253 * i_size) have changed */
254 int (*attr_changed)(struct fscache_object *object);
255
256 /* reserve space for an object's data and associated metadata */
257 int (*reserve_space)(struct fscache_object *object, loff_t i_size);
258
259 /* request a backing block for a page be read or allocated in the
260 * cache */
261 fscache_page_retrieval_func_t read_or_alloc_page;
262
263 /* request backing blocks for a list of pages be read or allocated in
264 * the cache */
265 fscache_pages_retrieval_func_t read_or_alloc_pages;
266
267 /* request a backing block for a page be allocated in the cache so that
268 * it can be written directly */
269 fscache_page_retrieval_func_t allocate_page;
270
271 /* request backing blocks for pages be allocated in the cache so that
272 * they can be written directly */
273 fscache_pages_retrieval_func_t allocate_pages;
274
275 /* write a page to its backing block in the cache */
276 int (*write_page)(struct fscache_storage *op, struct page *page);
277
278 /* detach backing block from a page (optional)
279 * - must release the cookie lock before returning
280 * - may sleep
281 */
282 void (*uncache_page)(struct fscache_object *object,
283 struct page *page);
284
285 /* dissociate a cache from all the pages it was backing */
286 void (*dissociate_pages)(struct fscache_cache *cache);
287};
288
289/*
290 * data file or index object cookie
291 * - a file will only appear in one cache
292 * - a request to cache a file may or may not be honoured, subject to
293 * constraints such as disk space
294 * - indices are created on disk just-in-time
295 */
296struct fscache_cookie {
297 atomic_t usage; /* number of users of this cookie */
298 atomic_t n_children; /* number of children of this cookie */
299 spinlock_t lock;
300 struct hlist_head backing_objects; /* object(s) backing this file/index */
301 const struct fscache_cookie_def *def; /* definition */
302 struct fscache_cookie *parent; /* parent of this entry */
303 void *netfs_data; /* back pointer to netfs */
304 struct radix_tree_root stores; /* pages to be stored on this cookie */
305#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
306
307 unsigned long flags;
308#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
309#define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */
310#define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */
311#define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */
312#define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */
313#define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */
314};
315
316extern struct fscache_cookie fscache_fsdef_index;
317
318/*
319 * on-disk cache file or index handle
320 */
321struct fscache_object {
322 enum fscache_object_state {
323 FSCACHE_OBJECT_INIT, /* object in initial unbound state */
324 FSCACHE_OBJECT_LOOKING_UP, /* looking up object */
325 FSCACHE_OBJECT_CREATING, /* creating object */
326
327 /* active states */
328 FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */
329 FSCACHE_OBJECT_ACTIVE, /* object is usable */
330 FSCACHE_OBJECT_UPDATING, /* object is updating */
331
332 /* terminal states */
333 FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */
334 FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */
335 FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */
336 FSCACHE_OBJECT_RELEASING, /* releasing object */
337 FSCACHE_OBJECT_RECYCLING, /* retiring object */
338 FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */
339 FSCACHE_OBJECT_DEAD, /* object is now dead */
340 } state;
341
342 int debug_id; /* debugging ID */
343 int n_children; /* number of child objects */
344 int n_ops; /* number of ops outstanding on object */
345 int n_obj_ops; /* number of object ops outstanding on object */
346 int n_in_progress; /* number of ops in progress */
347 int n_exclusive; /* number of exclusive ops queued */
348 spinlock_t lock; /* state and operations lock */
349
350 unsigned long lookup_jif; /* time at which lookup started */
351 unsigned long event_mask; /* events this object is interested in */
352 unsigned long events; /* events to be processed by this object
353 * (order is important - using fls) */
354#define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */
355#define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */
356#define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */
357#define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */
358#define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */
359#define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */
360#define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */
361
362 unsigned long flags;
363#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
364#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
365#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
366
367 struct list_head cache_link; /* link in cache->object_list */
368 struct hlist_node cookie_link; /* link in cookie->backing_objects */
369 struct fscache_cache *cache; /* cache that supplied this object */
370 struct fscache_cookie *cookie; /* netfs's file/index object */
371 struct fscache_object *parent; /* parent object */
372 struct slow_work work; /* attention scheduling record */
373 struct list_head dependents; /* FIFO of dependent objects */
374 struct list_head dep_link; /* link in parent's dependents list */
375 struct list_head pending_ops; /* unstarted operations on this object */
376 pgoff_t store_limit; /* current storage limit */
377};
378
379extern const char *fscache_object_states[];
380
381#define fscache_object_is_active(obj) \
382 (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \
383 (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \
384 (obj)->state < FSCACHE_OBJECT_DYING)
385
386extern const struct slow_work_ops fscache_object_slow_work_ops;
387
388/**
389 * fscache_object_init - Initialise a cache object description
390 * @object: Object description
391 *
392 * Initialise a cache object description to its basic values.
393 *
394 * See Documentation/filesystems/caching/backend-api.txt for a complete
395 * description.
396 */
397static inline
398void fscache_object_init(struct fscache_object *object,
399 struct fscache_cookie *cookie,
400 struct fscache_cache *cache)
401{
402 atomic_inc(&cache->object_count);
403
404 object->state = FSCACHE_OBJECT_INIT;
405 spin_lock_init(&object->lock);
406 INIT_LIST_HEAD(&object->cache_link);
407 INIT_HLIST_NODE(&object->cookie_link);
408 vslow_work_init(&object->work, &fscache_object_slow_work_ops);
409 INIT_LIST_HEAD(&object->dependents);
410 INIT_LIST_HEAD(&object->dep_link);
411 INIT_LIST_HEAD(&object->pending_ops);
412 object->n_children = 0;
413 object->n_ops = object->n_in_progress = object->n_exclusive = 0;
414 object->events = object->event_mask = 0;
415 object->flags = 0;
416 object->store_limit = 0;
417 object->cache = cache;
418 object->cookie = cookie;
419 object->parent = NULL;
420}
421
422extern void fscache_object_lookup_negative(struct fscache_object *object);
423extern void fscache_obtained_object(struct fscache_object *object);
424
425/**
426 * fscache_object_destroyed - Note destruction of an object in a cache
427 * @cache: The cache from which the object came
428 *
429 * Note the destruction and deallocation of an object record in a cache.
430 */
431static inline void fscache_object_destroyed(struct fscache_cache *cache)
432{
433 if (atomic_dec_and_test(&cache->object_count))
434 wake_up_all(&fscache_cache_cleared_wq);
435}
436
437/**
438 * fscache_object_lookup_error - Note an object encountered an error
439 * @object: The object on which the error was encountered
440 *
441 * Note that an object encountered a fatal error (usually an I/O error) and
442 * that it should be withdrawn as soon as possible.
443 */
444static inline void fscache_object_lookup_error(struct fscache_object *object)
445{
446 set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
447}
448
449/**
450 * fscache_set_store_limit - Set the maximum size to be stored in an object
451 * @object: The object to set the maximum on
452 * @i_size: The limit to set in bytes
453 *
454 * Set the maximum size an object is permitted to reach, implying the highest
455 * byte that may be written. Intended to be called by the attr_changed() op.
456 *
457 * See Documentation/filesystems/caching/backend-api.txt for a complete
458 * description.
459 */
460static inline
461void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
462{
463 object->store_limit = i_size >> PAGE_SHIFT;
464 if (i_size & ~PAGE_MASK)
465 object->store_limit++;
466}
467
468/**
469 * fscache_end_io - End a retrieval operation on a page
470 * @op: The FS-Cache operation covering the retrieval
471 * @page: The page that was to be fetched
472 * @error: The error code (0 if successful)
473 *
474 * Note the end of an operation to retrieve a page, as covered by a particular
475 * operation record.
476 */
477static inline void fscache_end_io(struct fscache_retrieval *op,
478 struct page *page, int error)
479{
480 op->end_io_func(page, op->context, error);
481}
482
483/*
484 * out-of-line cache backend functions
485 */
486extern void fscache_init_cache(struct fscache_cache *cache,
487 const struct fscache_cache_ops *ops,
488 const char *idfmt,
489 ...) __attribute__ ((format (printf, 3, 4)));
490
491extern int fscache_add_cache(struct fscache_cache *cache,
492 struct fscache_object *fsdef,
493 const char *tagname);
494extern void fscache_withdraw_cache(struct fscache_cache *cache);
495
496extern void fscache_io_error(struct fscache_cache *cache);
497
498extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
499 struct pagevec *pagevec);
500
501extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
502 const void *data,
503 uint16_t datalen);
504
505#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
new file mode 100644
index 000000000000..6d8ee466e0a0
--- /dev/null
+++ b/include/linux/fscache.h
@@ -0,0 +1,618 @@
1/* General filesystem caching interface
2 *
3 * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * NOTE!!! See:
12 *
13 * Documentation/filesystems/caching/netfs-api.txt
14 *
15 * for a description of the network filesystem interface declared here.
16 */
17
18#ifndef _LINUX_FSCACHE_H
19#define _LINUX_FSCACHE_H
20
21#include <linux/fs.h>
22#include <linux/list.h>
23#include <linux/pagemap.h>
24#include <linux/pagevec.h>
25
26#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
27#define fscache_available() (1)
28#define fscache_cookie_valid(cookie) (cookie)
29#else
30#define fscache_available() (0)
31#define fscache_cookie_valid(cookie) (0)
32#endif
33
34
35/*
36 * overload PG_private_2 to give us PG_fscache - this is used to indicate that
37 * a page is currently backed by a local disk cache
38 */
39#define PageFsCache(page) PagePrivate2((page))
40#define SetPageFsCache(page) SetPagePrivate2((page))
41#define ClearPageFsCache(page) ClearPagePrivate2((page))
42#define TestSetPageFsCache(page) TestSetPagePrivate2((page))
43#define TestClearPageFsCache(page) TestClearPagePrivate2((page))
44
45/* pattern used to fill dead space in an index entry */
46#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
47
48struct pagevec;
49struct fscache_cache_tag;
50struct fscache_cookie;
51struct fscache_netfs;
52
53typedef void (*fscache_rw_complete_t)(struct page *page,
54 void *context,
55 int error);
56
57/* result of index entry consultation */
58enum fscache_checkaux {
59 FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
60 FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
61 FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
62};
63
64/*
65 * fscache cookie definition
66 */
67struct fscache_cookie_def {
68 /* name of cookie type */
69 char name[16];
70
71 /* cookie type */
72 uint8_t type;
73#define FSCACHE_COOKIE_TYPE_INDEX 0
74#define FSCACHE_COOKIE_TYPE_DATAFILE 1
75
76 /* select the cache into which to insert an entry in this index
77 * - optional
78 * - should return a cache identifier or NULL to cause the cache to be
79 * inherited from the parent if possible or the first cache picked
80 * for a non-index file if not
81 */
82 struct fscache_cache_tag *(*select_cache)(
83 const void *parent_netfs_data,
84 const void *cookie_netfs_data);
85
86 /* get an index key
87 * - should store the key data in the buffer
88 * - should return the amount of amount stored
89 * - not permitted to return an error
90 * - the netfs data from the cookie being used as the source is
91 * presented
92 */
93 uint16_t (*get_key)(const void *cookie_netfs_data,
94 void *buffer,
95 uint16_t bufmax);
96
97 /* get certain file attributes from the netfs data
98 * - this function can be absent for an index
99 * - not permitted to return an error
100 * - the netfs data from the cookie being used as the source is
101 * presented
102 */
103 void (*get_attr)(const void *cookie_netfs_data, uint64_t *size);
104
105 /* get the auxilliary data from netfs data
106 * - this function can be absent if the index carries no state data
107 * - should store the auxilliary data in the buffer
108 * - should return the amount of amount stored
109 * - not permitted to return an error
110 * - the netfs data from the cookie being used as the source is
111 * presented
112 */
113 uint16_t (*get_aux)(const void *cookie_netfs_data,
114 void *buffer,
115 uint16_t bufmax);
116
117 /* consult the netfs about the state of an object
118 * - this function can be absent if the index carries no state data
119 * - the netfs data from the cookie being used as the target is
120 * presented, as is the auxilliary data
121 */
122 enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
123 const void *data,
124 uint16_t datalen);
125
126 /* get an extra reference on a read context
127 * - this function can be absent if the completion function doesn't
128 * require a context
129 */
130 void (*get_context)(void *cookie_netfs_data, void *context);
131
132 /* release an extra reference on a read context
133 * - this function can be absent if the completion function doesn't
134 * require a context
135 */
136 void (*put_context)(void *cookie_netfs_data, void *context);
137
138 /* indicate pages that now have cache metadata retained
139 * - this function should mark the specified pages as now being cached
140 * - the pages will have been marked with PG_fscache before this is
141 * called, so this is optional
142 */
143 void (*mark_pages_cached)(void *cookie_netfs_data,
144 struct address_space *mapping,
145 struct pagevec *cached_pvec);
146
147 /* indicate the cookie is no longer cached
148 * - this function is called when the backing store currently caching
149 * a cookie is removed
150 * - the netfs should use this to clean up any markers indicating
151 * cached pages
152 * - this is mandatory for any object that may have data
153 */
154 void (*now_uncached)(void *cookie_netfs_data);
155};
156
157/*
158 * fscache cached network filesystem type
159 * - name, version and ops must be filled in before registration
160 * - all other fields will be set during registration
161 */
162struct fscache_netfs {
163 uint32_t version; /* indexing version */
164 const char *name; /* filesystem name */
165 struct fscache_cookie *primary_index;
166 struct list_head link; /* internal link */
167};
168
169/*
170 * slow-path functions for when there is actually caching available, and the
171 * netfs does actually have a valid token
172 * - these are not to be called directly
173 * - these are undefined symbols when FS-Cache is not configured and the
174 * optimiser takes care of not using them
175 */
176extern int __fscache_register_netfs(struct fscache_netfs *);
177extern void __fscache_unregister_netfs(struct fscache_netfs *);
178extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
179extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
180
181extern struct fscache_cookie *__fscache_acquire_cookie(
182 struct fscache_cookie *,
183 const struct fscache_cookie_def *,
184 void *);
185extern void __fscache_relinquish_cookie(struct fscache_cookie *, int);
186extern void __fscache_update_cookie(struct fscache_cookie *);
187extern int __fscache_attr_changed(struct fscache_cookie *);
188extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
189 struct page *,
190 fscache_rw_complete_t,
191 void *,
192 gfp_t);
193extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
194 struct address_space *,
195 struct list_head *,
196 unsigned *,
197 fscache_rw_complete_t,
198 void *,
199 gfp_t);
200extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
201extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
202extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
203extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
204extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
205
206/**
207 * fscache_register_netfs - Register a filesystem as desiring caching services
208 * @netfs: The description of the filesystem
209 *
210 * Register a filesystem as desiring caching services if they're available.
211 *
212 * See Documentation/filesystems/caching/netfs-api.txt for a complete
213 * description.
214 */
215static inline
216int fscache_register_netfs(struct fscache_netfs *netfs)
217{
218 if (fscache_available())
219 return __fscache_register_netfs(netfs);
220 else
221 return 0;
222}
223
224/**
225 * fscache_unregister_netfs - Indicate that a filesystem no longer desires
226 * caching services
227 * @netfs: The description of the filesystem
228 *
229 * Indicate that a filesystem no longer desires caching services for the
230 * moment.
231 *
232 * See Documentation/filesystems/caching/netfs-api.txt for a complete
233 * description.
234 */
235static inline
236void fscache_unregister_netfs(struct fscache_netfs *netfs)
237{
238 if (fscache_available())
239 __fscache_unregister_netfs(netfs);
240}
241
242/**
243 * fscache_lookup_cache_tag - Look up a cache tag
244 * @name: The name of the tag to search for
245 *
246 * Acquire a specific cache referral tag that can be used to select a specific
247 * cache in which to cache an index.
248 *
249 * See Documentation/filesystems/caching/netfs-api.txt for a complete
250 * description.
251 */
252static inline
253struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
254{
255 if (fscache_available())
256 return __fscache_lookup_cache_tag(name);
257 else
258 return NULL;
259}
260
261/**
262 * fscache_release_cache_tag - Release a cache tag
263 * @tag: The tag to release
264 *
265 * Release a reference to a cache referral tag previously looked up.
266 *
267 * See Documentation/filesystems/caching/netfs-api.txt for a complete
268 * description.
269 */
270static inline
271void fscache_release_cache_tag(struct fscache_cache_tag *tag)
272{
273 if (fscache_available())
274 __fscache_release_cache_tag(tag);
275}
276
277/**
278 * fscache_acquire_cookie - Acquire a cookie to represent a cache object
279 * @parent: The cookie that's to be the parent of this one
280 * @def: A description of the cache object, including callback operations
281 * @netfs_data: An arbitrary piece of data to be kept in the cookie to
282 * represent the cache object to the netfs
283 *
284 * This function is used to inform FS-Cache about part of an index hierarchy
285 * that can be used to locate files. This is done by requesting a cookie for
286 * each index in the path to the file.
287 *
288 * See Documentation/filesystems/caching/netfs-api.txt for a complete
289 * description.
290 */
291static inline
292struct fscache_cookie *fscache_acquire_cookie(
293 struct fscache_cookie *parent,
294 const struct fscache_cookie_def *def,
295 void *netfs_data)
296{
297 if (fscache_cookie_valid(parent))
298 return __fscache_acquire_cookie(parent, def, netfs_data);
299 else
300 return NULL;
301}
302
303/**
304 * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
305 * it
306 * @cookie: The cookie being returned
307 * @retire: True if the cache object the cookie represents is to be discarded
308 *
309 * This function returns a cookie to the cache, forcibly discarding the
310 * associated cache object if retire is set to true.
311 *
312 * See Documentation/filesystems/caching/netfs-api.txt for a complete
313 * description.
314 */
315static inline
316void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
317{
318 if (fscache_cookie_valid(cookie))
319 __fscache_relinquish_cookie(cookie, retire);
320}
321
322/**
323 * fscache_update_cookie - Request that a cache object be updated
324 * @cookie: The cookie representing the cache object
325 *
326 * Request an update of the index data for the cache object associated with the
327 * cookie.
328 *
329 * See Documentation/filesystems/caching/netfs-api.txt for a complete
330 * description.
331 */
332static inline
333void fscache_update_cookie(struct fscache_cookie *cookie)
334{
335 if (fscache_cookie_valid(cookie))
336 __fscache_update_cookie(cookie);
337}
338
339/**
340 * fscache_pin_cookie - Pin a data-storage cache object in its cache
341 * @cookie: The cookie representing the cache object
342 *
343 * Permit data-storage cache objects to be pinned in the cache.
344 *
345 * See Documentation/filesystems/caching/netfs-api.txt for a complete
346 * description.
347 */
348static inline
349int fscache_pin_cookie(struct fscache_cookie *cookie)
350{
351 return -ENOBUFS;
352}
353
354/**
355 * fscache_pin_cookie - Unpin a data-storage cache object in its cache
356 * @cookie: The cookie representing the cache object
357 *
358 * Permit data-storage cache objects to be unpinned from the cache.
359 *
360 * See Documentation/filesystems/caching/netfs-api.txt for a complete
361 * description.
362 */
363static inline
364void fscache_unpin_cookie(struct fscache_cookie *cookie)
365{
366}
367
368/**
369 * fscache_attr_changed - Notify cache that an object's attributes changed
370 * @cookie: The cookie representing the cache object
371 *
372 * Send a notification to the cache indicating that an object's attributes have
373 * changed. This includes the data size. These attributes will be obtained
374 * through the get_attr() cookie definition op.
375 *
376 * See Documentation/filesystems/caching/netfs-api.txt for a complete
377 * description.
378 */
379static inline
380int fscache_attr_changed(struct fscache_cookie *cookie)
381{
382 if (fscache_cookie_valid(cookie))
383 return __fscache_attr_changed(cookie);
384 else
385 return -ENOBUFS;
386}
387
388/**
389 * fscache_reserve_space - Reserve data space for a cached object
390 * @cookie: The cookie representing the cache object
391 * @i_size: The amount of space to be reserved
392 *
393 * Reserve an amount of space in the cache for the cache object attached to a
394 * cookie so that a write to that object within the space can always be
395 * honoured.
396 *
397 * See Documentation/filesystems/caching/netfs-api.txt for a complete
398 * description.
399 */
400static inline
401int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
402{
403 return -ENOBUFS;
404}
405
406/**
407 * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
408 * in which to store it
409 * @cookie: The cookie representing the cache object
410 * @page: The netfs page to fill if possible
411 * @end_io_func: The callback to invoke when and if the page is filled
412 * @context: An arbitrary piece of data to pass on to end_io_func()
413 * @gfp: The conditions under which memory allocation should be made
414 *
415 * Read a page from the cache, or if that's not possible make a potential
416 * one-block reservation in the cache into which the page may be stored once
417 * fetched from the server.
418 *
419 * If the page is not backed by the cache object, or if it there's some reason
420 * it can't be, -ENOBUFS will be returned and nothing more will be done for
421 * that page.
422 *
423 * Else, if that page is backed by the cache, a read will be initiated directly
424 * to the netfs's page and 0 will be returned by this function. The
425 * end_io_func() callback will be invoked when the operation terminates on a
426 * completion or failure. Note that the callback may be invoked before the
427 * return.
428 *
429 * Else, if the page is unbacked, -ENODATA is returned and a block may have
430 * been allocated in the cache.
431 *
432 * See Documentation/filesystems/caching/netfs-api.txt for a complete
433 * description.
434 */
435static inline
436int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
437 struct page *page,
438 fscache_rw_complete_t end_io_func,
439 void *context,
440 gfp_t gfp)
441{
442 if (fscache_cookie_valid(cookie))
443 return __fscache_read_or_alloc_page(cookie, page, end_io_func,
444 context, gfp);
445 else
446 return -ENOBUFS;
447}
448
449/**
450 * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
451 * blocks in which to store them
452 * @cookie: The cookie representing the cache object
453 * @mapping: The netfs inode mapping to which the pages will be attached
454 * @pages: A list of potential netfs pages to be filled
455 * @end_io_func: The callback to invoke when and if each page is filled
456 * @context: An arbitrary piece of data to pass on to end_io_func()
457 * @gfp: The conditions under which memory allocation should be made
458 *
459 * Read a set of pages from the cache, or if that's not possible, attempt to
460 * make a potential one-block reservation for each page in the cache into which
461 * that page may be stored once fetched from the server.
462 *
463 * If some pages are not backed by the cache object, or if it there's some
464 * reason they can't be, -ENOBUFS will be returned and nothing more will be
465 * done for that pages.
466 *
467 * Else, if some of the pages are backed by the cache, a read will be initiated
468 * directly to the netfs's page and 0 will be returned by this function. The
469 * end_io_func() callback will be invoked when the operation terminates on a
470 * completion or failure. Note that the callback may be invoked before the
471 * return.
472 *
473 * Else, if a page is unbacked, -ENODATA is returned and a block may have
474 * been allocated in the cache.
475 *
476 * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
477 * regard to different pages, the return values are prioritised in that order.
478 * Any pages submitted for reading are removed from the pages list.
479 *
480 * See Documentation/filesystems/caching/netfs-api.txt for a complete
481 * description.
482 */
483static inline
484int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
485 struct address_space *mapping,
486 struct list_head *pages,
487 unsigned *nr_pages,
488 fscache_rw_complete_t end_io_func,
489 void *context,
490 gfp_t gfp)
491{
492 if (fscache_cookie_valid(cookie))
493 return __fscache_read_or_alloc_pages(cookie, mapping, pages,
494 nr_pages, end_io_func,
495 context, gfp);
496 else
497 return -ENOBUFS;
498}
499
500/**
501 * fscache_alloc_page - Allocate a block in which to store a page
502 * @cookie: The cookie representing the cache object
503 * @page: The netfs page to allocate a page for
504 * @gfp: The conditions under which memory allocation should be made
505 *
506 * Request Allocation a block in the cache in which to store a netfs page
507 * without retrieving any contents from the cache.
508 *
509 * If the page is not backed by a file then -ENOBUFS will be returned and
510 * nothing more will be done, and no reservation will be made.
511 *
512 * Else, a block will be allocated if one wasn't already, and 0 will be
513 * returned
514 *
515 * See Documentation/filesystems/caching/netfs-api.txt for a complete
516 * description.
517 */
518static inline
519int fscache_alloc_page(struct fscache_cookie *cookie,
520 struct page *page,
521 gfp_t gfp)
522{
523 if (fscache_cookie_valid(cookie))
524 return __fscache_alloc_page(cookie, page, gfp);
525 else
526 return -ENOBUFS;
527}
528
529/**
530 * fscache_write_page - Request storage of a page in the cache
531 * @cookie: The cookie representing the cache object
532 * @page: The netfs page to store
533 * @gfp: The conditions under which memory allocation should be made
534 *
535 * Request the contents of the netfs page be written into the cache. This
536 * request may be ignored if no cache block is currently allocated, in which
537 * case it will return -ENOBUFS.
538 *
539 * If a cache block was already allocated, a write will be initiated and 0 will
540 * be returned. The PG_fscache_write page bit is set immediately and will then
541 * be cleared at the completion of the write to indicate the success or failure
542 * of the operation. Note that the completion may happen before the return.
543 *
544 * See Documentation/filesystems/caching/netfs-api.txt for a complete
545 * description.
546 */
547static inline
548int fscache_write_page(struct fscache_cookie *cookie,
549 struct page *page,
550 gfp_t gfp)
551{
552 if (fscache_cookie_valid(cookie))
553 return __fscache_write_page(cookie, page, gfp);
554 else
555 return -ENOBUFS;
556}
557
558/**
559 * fscache_uncache_page - Indicate that caching is no longer required on a page
560 * @cookie: The cookie representing the cache object
561 * @page: The netfs page that was being cached.
562 *
563 * Tell the cache that we no longer want a page to be cached and that it should
564 * remove any knowledge of the netfs page it may have.
565 *
566 * Note that this cannot cancel any outstanding I/O operations between this
567 * page and the cache.
568 *
569 * See Documentation/filesystems/caching/netfs-api.txt for a complete
570 * description.
571 */
572static inline
573void fscache_uncache_page(struct fscache_cookie *cookie,
574 struct page *page)
575{
576 if (fscache_cookie_valid(cookie))
577 __fscache_uncache_page(cookie, page);
578}
579
580/**
581 * fscache_check_page_write - Ask if a page is being writing to the cache
582 * @cookie: The cookie representing the cache object
583 * @page: The netfs page that is being cached.
584 *
585 * Ask the cache if a page is being written to the cache.
586 *
587 * See Documentation/filesystems/caching/netfs-api.txt for a complete
588 * description.
589 */
590static inline
591bool fscache_check_page_write(struct fscache_cookie *cookie,
592 struct page *page)
593{
594 if (fscache_cookie_valid(cookie))
595 return __fscache_check_page_write(cookie, page);
596 return false;
597}
598
599/**
600 * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
601 * @cookie: The cookie representing the cache object
602 * @page: The netfs page that is being cached.
603 *
604 * Ask the cache to wake us up when a page is no longer being written to the
605 * cache.
606 *
607 * See Documentation/filesystems/caching/netfs-api.txt for a complete
608 * description.
609 */
610static inline
611void fscache_wait_on_page_write(struct fscache_cookie *cookie,
612 struct page *page)
613{
614 if (fscache_cookie_valid(cookie))
615 __fscache_wait_on_page_write(cookie, page);
616}
617
618#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 7ef1caf50269..f2a78b5e8b55 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -18,7 +18,6 @@
18#define _FSL_DEVICE_H_ 18#define _FSL_DEVICE_H_
19 19
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/phy.h>
22 21
23/* 22/*
24 * Some conventions on how we handle peripherals on Freescale chips 23 * Some conventions on how we handle peripherals on Freescale chips
@@ -44,27 +43,6 @@
44 * 43 *
45 */ 44 */
46 45
47struct gianfar_platform_data {
48 /* device specific information */
49 u32 device_flags;
50 char bus_id[BUS_ID_SIZE];
51 phy_interface_t interface;
52};
53
54struct gianfar_mdio_data {
55 /* board specific information */
56 int irq[32];
57};
58
59/* Flags in gianfar_platform_data */
60#define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */
61#define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */
62
63struct fsl_i2c_platform_data {
64 /* device specific information */
65 u32 device_flags;
66};
67
68/* Flags related to I2C device features */ 46/* Flags related to I2C device features */
69#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001 47#define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001
70#define FSL_I2C_DEV_CLOCK_5200 0x00000002 48#define FSL_I2C_DEV_CLOCK_5200 0x00000002
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index a7f8134c594e..da5405dce347 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1,15 +1,18 @@
1#ifndef _LINUX_FTRACE_H 1#ifndef _LINUX_FTRACE_H
2#define _LINUX_FTRACE_H 2#define _LINUX_FTRACE_H
3 3
4#include <linux/linkage.h> 4#include <linux/trace_clock.h>
5#include <linux/fs.h>
6#include <linux/ktime.h>
7#include <linux/init.h>
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/kallsyms.h> 5#include <linux/kallsyms.h>
6#include <linux/linkage.h>
11#include <linux/bitops.h> 7#include <linux/bitops.h>
8#include <linux/module.h>
9#include <linux/ktime.h>
12#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/fs.h>
14
15#include <asm/ftrace.h>
13 16
14#ifdef CONFIG_FUNCTION_TRACER 17#ifdef CONFIG_FUNCTION_TRACER
15 18
@@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write,
95 loff_t *ppos); 98 loff_t *ppos);
96#endif 99#endif
97 100
101struct ftrace_func_command {
102 struct list_head list;
103 char *name;
104 int (*func)(char *func, char *cmd,
105 char *params, int enable);
106};
107
98#ifdef CONFIG_DYNAMIC_FTRACE 108#ifdef CONFIG_DYNAMIC_FTRACE
99/* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ 109
100#include <asm/ftrace.h> 110int ftrace_arch_code_modify_prepare(void);
111int ftrace_arch_code_modify_post_process(void);
112
113struct seq_file;
114
115struct ftrace_probe_ops {
116 void (*func)(unsigned long ip,
117 unsigned long parent_ip,
118 void **data);
119 int (*callback)(unsigned long ip, void **data);
120 void (*free)(void **data);
121 int (*print)(struct seq_file *m,
122 unsigned long ip,
123 struct ftrace_probe_ops *ops,
124 void *data);
125};
126
127extern int
128register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
129 void *data);
130extern void
131unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
132 void *data);
133extern void
134unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
135extern void unregister_ftrace_function_probe_all(char *glob);
101 136
102enum { 137enum {
103 FTRACE_FL_FREE = (1 << 0), 138 FTRACE_FL_FREE = (1 << 0),
@@ -110,15 +145,23 @@ enum {
110}; 145};
111 146
112struct dyn_ftrace { 147struct dyn_ftrace {
113 struct list_head list; 148 union {
114 unsigned long ip; /* address of mcount call-site */ 149 unsigned long ip; /* address of mcount call-site */
115 unsigned long flags; 150 struct dyn_ftrace *freelist;
116 struct dyn_arch_ftrace arch; 151 };
152 union {
153 unsigned long flags;
154 struct dyn_ftrace *newlist;
155 };
156 struct dyn_arch_ftrace arch;
117}; 157};
118 158
119int ftrace_force_update(void); 159int ftrace_force_update(void);
120void ftrace_set_filter(unsigned char *buf, int len, int reset); 160void ftrace_set_filter(unsigned char *buf, int len, int reset);
121 161
162int register_ftrace_command(struct ftrace_func_command *cmd);
163int unregister_ftrace_command(struct ftrace_func_command *cmd);
164
122/* defined in arch */ 165/* defined in arch */
123extern int ftrace_ip_converted(unsigned long ip); 166extern int ftrace_ip_converted(unsigned long ip);
124extern int ftrace_dyn_arch_init(void *data); 167extern int ftrace_dyn_arch_init(void *data);
@@ -126,6 +169,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func);
126extern void ftrace_caller(void); 169extern void ftrace_caller(void);
127extern void ftrace_call(void); 170extern void ftrace_call(void);
128extern void mcount_call(void); 171extern void mcount_call(void);
172
173#ifndef FTRACE_ADDR
174#define FTRACE_ADDR ((unsigned long)ftrace_caller)
175#endif
129#ifdef CONFIG_FUNCTION_GRAPH_TRACER 176#ifdef CONFIG_FUNCTION_GRAPH_TRACER
130extern void ftrace_graph_caller(void); 177extern void ftrace_graph_caller(void);
131extern int ftrace_enable_ftrace_graph_caller(void); 178extern int ftrace_enable_ftrace_graph_caller(void);
@@ -136,7 +183,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
136#endif 183#endif
137 184
138/** 185/**
139 * ftrace_make_nop - convert code into top 186 * ftrace_make_nop - convert code into nop
140 * @mod: module structure if called by module load initialization 187 * @mod: module structure if called by module load initialization
141 * @rec: the mcount call site record 188 * @rec: the mcount call site record
142 * @addr: the address that the call site should be calling 189 * @addr: the address that the call site should be calling
@@ -181,7 +228,6 @@ extern int ftrace_make_nop(struct module *mod,
181 */ 228 */
182extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); 229extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
183 230
184
185/* May be defined in arch */ 231/* May be defined in arch */
186extern int ftrace_arch_read_dyn_info(char *buf, int size); 232extern int ftrace_arch_read_dyn_info(char *buf, int size);
187 233
@@ -198,6 +244,14 @@ extern void ftrace_enable_daemon(void);
198# define ftrace_disable_daemon() do { } while (0) 244# define ftrace_disable_daemon() do { } while (0)
199# define ftrace_enable_daemon() do { } while (0) 245# define ftrace_enable_daemon() do { } while (0)
200static inline void ftrace_release(void *start, unsigned long size) { } 246static inline void ftrace_release(void *start, unsigned long size) { }
247static inline int register_ftrace_command(struct ftrace_func_command *cmd)
248{
249 return -EINVAL;
250}
251static inline int unregister_ftrace_command(char *cmd_name)
252{
253 return -EINVAL;
254}
201#endif /* CONFIG_DYNAMIC_FTRACE */ 255#endif /* CONFIG_DYNAMIC_FTRACE */
202 256
203/* totally disable ftrace - can not re-enable after this */ 257/* totally disable ftrace - can not re-enable after this */
@@ -233,24 +287,25 @@ static inline void __ftrace_enabled_restore(int enabled)
233#endif 287#endif
234} 288}
235 289
236#ifdef CONFIG_FRAME_POINTER 290#ifndef HAVE_ARCH_CALLER_ADDR
237/* TODO: need to fix this for ARM */ 291# ifdef CONFIG_FRAME_POINTER
238# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 292# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
239# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) 293# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
240# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) 294# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
241# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) 295# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
242# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) 296# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
243# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) 297# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
244# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) 298# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
245#else 299# else
246# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) 300# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
247# define CALLER_ADDR1 0UL 301# define CALLER_ADDR1 0UL
248# define CALLER_ADDR2 0UL 302# define CALLER_ADDR2 0UL
249# define CALLER_ADDR3 0UL 303# define CALLER_ADDR3 0UL
250# define CALLER_ADDR4 0UL 304# define CALLER_ADDR4 0UL
251# define CALLER_ADDR5 0UL 305# define CALLER_ADDR5 0UL
252# define CALLER_ADDR6 0UL 306# define CALLER_ADDR6 0UL
253#endif 307# endif
308#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
254 309
255#ifdef CONFIG_IRQSOFF_TRACER 310#ifdef CONFIG_IRQSOFF_TRACER
256 extern void time_hardirqs_on(unsigned long a0, unsigned long a1); 311 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
@@ -268,54 +323,6 @@ static inline void __ftrace_enabled_restore(int enabled)
268# define trace_preempt_off(a0, a1) do { } while (0) 323# define trace_preempt_off(a0, a1) do { } while (0)
269#endif 324#endif
270 325
271#ifdef CONFIG_TRACING
272extern int ftrace_dump_on_oops;
273
274extern void tracing_start(void);
275extern void tracing_stop(void);
276extern void ftrace_off_permanent(void);
277
278extern void
279ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
280
281/**
282 * ftrace_printk - printf formatting in the ftrace buffer
283 * @fmt: the printf format for printing
284 *
285 * Note: __ftrace_printk is an internal function for ftrace_printk and
286 * the @ip is passed in via the ftrace_printk macro.
287 *
288 * This function allows a kernel developer to debug fast path sections
289 * that printk is not appropriate for. By scattering in various
290 * printk like tracing in the code, a developer can quickly see
291 * where problems are occurring.
292 *
293 * This is intended as a debugging tool for the developer only.
294 * Please refrain from leaving ftrace_printks scattered around in
295 * your code.
296 */
297# define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt)
298extern int
299__ftrace_printk(unsigned long ip, const char *fmt, ...)
300 __attribute__ ((format (printf, 2, 3)));
301extern void ftrace_dump(void);
302#else
303static inline void
304ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
305static inline int
306ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
307
308static inline void tracing_start(void) { }
309static inline void tracing_stop(void) { }
310static inline void ftrace_off_permanent(void) { }
311static inline int
312ftrace_printk(const char *fmt, ...)
313{
314 return 0;
315}
316static inline void ftrace_dump(void) { }
317#endif
318
319#ifdef CONFIG_FTRACE_MCOUNT_RECORD 326#ifdef CONFIG_FTRACE_MCOUNT_RECORD
320extern void ftrace_init(void); 327extern void ftrace_init(void);
321extern void ftrace_init_module(struct module *mod, 328extern void ftrace_init_module(struct module *mod,
@@ -327,36 +334,6 @@ ftrace_init_module(struct module *mod,
327 unsigned long *start, unsigned long *end) { } 334 unsigned long *start, unsigned long *end) { }
328#endif 335#endif
329 336
330enum {
331 POWER_NONE = 0,
332 POWER_CSTATE = 1,
333 POWER_PSTATE = 2,
334};
335
336struct power_trace {
337#ifdef CONFIG_POWER_TRACER
338 ktime_t stamp;
339 ktime_t end;
340 int type;
341 int state;
342#endif
343};
344
345#ifdef CONFIG_POWER_TRACER
346extern void trace_power_start(struct power_trace *it, unsigned int type,
347 unsigned int state);
348extern void trace_power_mark(struct power_trace *it, unsigned int type,
349 unsigned int state);
350extern void trace_power_end(struct power_trace *it);
351#else
352static inline void trace_power_start(struct power_trace *it, unsigned int type,
353 unsigned int state) { }
354static inline void trace_power_mark(struct power_trace *it, unsigned int type,
355 unsigned int state) { }
356static inline void trace_power_end(struct power_trace *it) { }
357#endif
358
359
360/* 337/*
361 * Structure that defines an entry function trace. 338 * Structure that defines an entry function trace.
362 */ 339 */
@@ -379,6 +356,9 @@ struct ftrace_graph_ret {
379 356
380#ifdef CONFIG_FUNCTION_GRAPH_TRACER 357#ifdef CONFIG_FUNCTION_GRAPH_TRACER
381 358
359/* for init task */
360#define INIT_FTRACE_GRAPH .ret_stack = NULL
361
382/* 362/*
383 * Stack of return addresses for functions 363 * Stack of return addresses for functions
384 * of a thread. 364 * of a thread.
@@ -398,8 +378,7 @@ struct ftrace_ret_stack {
398extern void return_to_handler(void); 378extern void return_to_handler(void);
399 379
400extern int 380extern int
401ftrace_push_return_trace(unsigned long ret, unsigned long long time, 381ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
402 unsigned long func, int *depth);
403extern void 382extern void
404ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); 383ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
405 384
@@ -454,10 +433,11 @@ static inline void unpause_graph_tracing(void)
454{ 433{
455 atomic_dec(&current->tracing_graph_pause); 434 atomic_dec(&current->tracing_graph_pause);
456} 435}
457#else 436#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
458 437
459#define __notrace_funcgraph 438#define __notrace_funcgraph
460#define __irq_entry 439#define __irq_entry
440#define INIT_FTRACE_GRAPH
461 441
462static inline void ftrace_graph_init_task(struct task_struct *t) { } 442static inline void ftrace_graph_init_task(struct task_struct *t) { }
463static inline void ftrace_graph_exit_task(struct task_struct *t) { } 443static inline void ftrace_graph_exit_task(struct task_struct *t) { }
@@ -469,7 +449,7 @@ static inline int task_curr_ret_stack(struct task_struct *tsk)
469 449
470static inline void pause_graph_tracing(void) { } 450static inline void pause_graph_tracing(void) { }
471static inline void unpause_graph_tracing(void) { } 451static inline void unpause_graph_tracing(void) { }
472#endif 452#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
473 453
474#ifdef CONFIG_TRACING 454#ifdef CONFIG_TRACING
475#include <linux/sched.h> 455#include <linux/sched.h>
@@ -514,6 +494,50 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
514 return tsk->trace & TSK_TRACE_FL_GRAPH; 494 return tsk->trace & TSK_TRACE_FL_GRAPH;
515} 495}
516 496
497extern int ftrace_dump_on_oops;
498
517#endif /* CONFIG_TRACING */ 499#endif /* CONFIG_TRACING */
518 500
501
502#ifdef CONFIG_HW_BRANCH_TRACER
503
504void trace_hw_branch(u64 from, u64 to);
505void trace_hw_branch_oops(void);
506
507#else /* CONFIG_HW_BRANCH_TRACER */
508
509static inline void trace_hw_branch(u64 from, u64 to) {}
510static inline void trace_hw_branch_oops(void) {}
511
512#endif /* CONFIG_HW_BRANCH_TRACER */
513
514/*
515 * A syscall entry in the ftrace syscalls array.
516 *
517 * @name: name of the syscall
518 * @nb_args: number of parameters it takes
519 * @types: list of types as strings
520 * @args: list of args as strings (args[i] matches types[i])
521 */
522struct syscall_metadata {
523 const char *name;
524 int nb_args;
525 const char **types;
526 const char **args;
527};
528
529#ifdef CONFIG_FTRACE_SYSCALLS
530extern void arch_init_ftrace_syscalls(void);
531extern struct syscall_metadata *syscall_nr_to_meta(int nr);
532extern void start_ftrace_syscalls(void);
533extern void stop_ftrace_syscalls(void);
534extern void ftrace_syscall_enter(struct pt_regs *regs);
535extern void ftrace_syscall_exit(struct pt_regs *regs);
536#else
537static inline void start_ftrace_syscalls(void) { }
538static inline void stop_ftrace_syscalls(void) { }
539static inline void ftrace_syscall_enter(struct pt_regs *regs) { }
540static inline void ftrace_syscall_exit(struct pt_regs *regs) { }
541#endif
542
519#endif /* _LINUX_FTRACE_H */ 543#endif /* _LINUX_FTRACE_H */
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h
index 366a054d0b05..dca7bf8cffe2 100644
--- a/include/linux/ftrace_irq.h
+++ b/include/linux/ftrace_irq.h
@@ -2,7 +2,7 @@
2#define _LINUX_FTRACE_IRQ_H 2#define _LINUX_FTRACE_IRQ_H
3 3
4 4
5#if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) 5#ifdef CONFIG_FTRACE_NMI_ENTER
6extern void ftrace_nmi_enter(void); 6extern void ftrace_nmi_enter(void);
7extern void ftrace_nmi_exit(void); 7extern void ftrace_nmi_exit(void);
8#else 8#else
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index dd20cd78faa8..0bbc15f54536 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -4,6 +4,7 @@
4#include <linux/mmzone.h> 4#include <linux/mmzone.h>
5#include <linux/stddef.h> 5#include <linux/stddef.h>
6#include <linux/linkage.h> 6#include <linux/linkage.h>
7#include <linux/topology.h>
7 8
8struct vm_area_struct; 9struct vm_area_struct;
9 10
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index f83288347dda..45257475623c 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -15,55 +15,61 @@
15 * - bits 0-7 are the preemption count (max preemption depth: 256) 15 * - bits 0-7 are the preemption count (max preemption depth: 256)
16 * - bits 8-15 are the softirq count (max # of softirqs: 256) 16 * - bits 8-15 are the softirq count (max # of softirqs: 256)
17 * 17 *
18 * The hardirq count can be overridden per architecture, the default is: 18 * The hardirq count can in theory reach the same as NR_IRQS.
19 * In reality, the number of nested IRQS is limited to the stack
20 * size as well. For archs with over 1000 IRQS it is not practical
21 * to expect that they will all nest. We give a max of 10 bits for
22 * hardirq nesting. An arch may choose to give less than 10 bits.
23 * m68k expects it to be 8.
19 * 24 *
20 * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) 25 * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
21 * - ( bit 28 is the PREEMPT_ACTIVE flag. ) 26 * - bit 26 is the NMI_MASK
27 * - bit 28 is the PREEMPT_ACTIVE flag
22 * 28 *
23 * PREEMPT_MASK: 0x000000ff 29 * PREEMPT_MASK: 0x000000ff
24 * SOFTIRQ_MASK: 0x0000ff00 30 * SOFTIRQ_MASK: 0x0000ff00
25 * HARDIRQ_MASK: 0x0fff0000 31 * HARDIRQ_MASK: 0x03ff0000
32 * NMI_MASK: 0x04000000
26 */ 33 */
27#define PREEMPT_BITS 8 34#define PREEMPT_BITS 8
28#define SOFTIRQ_BITS 8 35#define SOFTIRQ_BITS 8
36#define NMI_BITS 1
29 37
30#ifndef HARDIRQ_BITS 38#define MAX_HARDIRQ_BITS 10
31#define HARDIRQ_BITS 12
32 39
33#ifndef MAX_HARDIRQS_PER_CPU 40#ifndef HARDIRQ_BITS
34#define MAX_HARDIRQS_PER_CPU NR_IRQS 41# define HARDIRQ_BITS MAX_HARDIRQ_BITS
35#endif 42#endif
36 43
37/* 44#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
38 * The hardirq mask has to be large enough to have space for potentially 45#error HARDIRQ_BITS too high!
39 * all IRQ sources in the system nesting on a single CPU.
40 */
41#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
42# error HARDIRQ_BITS is too low!
43#endif
44#endif 46#endif
45 47
46#define PREEMPT_SHIFT 0 48#define PREEMPT_SHIFT 0
47#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) 49#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
48#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) 50#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
51#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
49 52
50#define __IRQ_MASK(x) ((1UL << (x))-1) 53#define __IRQ_MASK(x) ((1UL << (x))-1)
51 54
52#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) 55#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
53#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) 56#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
54#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) 57#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
58#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
55 59
56#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) 60#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
57#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) 61#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
58#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) 62#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
63#define NMI_OFFSET (1UL << NMI_SHIFT)
59 64
60#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) 65#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
61#error PREEMPT_ACTIVE is too low! 66#error PREEMPT_ACTIVE is too low!
62#endif 67#endif
63 68
64#define hardirq_count() (preempt_count() & HARDIRQ_MASK) 69#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
65#define softirq_count() (preempt_count() & SOFTIRQ_MASK) 70#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
66#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) 71#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
72 | NMI_MASK))
67 73
68/* 74/*
69 * Are we doing bottom half or hardware interrupt processing? 75 * Are we doing bottom half or hardware interrupt processing?
@@ -73,6 +79,11 @@
73#define in_softirq() (softirq_count()) 79#define in_softirq() (softirq_count())
74#define in_interrupt() (irq_count()) 80#define in_interrupt() (irq_count())
75 81
82/*
83 * Are we in NMI context?
84 */
85#define in_nmi() (preempt_count() & NMI_MASK)
86
76#if defined(CONFIG_PREEMPT) 87#if defined(CONFIG_PREEMPT)
77# define PREEMPT_INATOMIC_BASE kernel_locked() 88# define PREEMPT_INATOMIC_BASE kernel_locked()
78# define PREEMPT_CHECK_OFFSET 1 89# define PREEMPT_CHECK_OFFSET 1
@@ -105,7 +116,7 @@
105# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET 116# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
106#endif 117#endif
107 118
108#ifdef CONFIG_SMP 119#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
109extern void synchronize_irq(unsigned int irq); 120extern void synchronize_irq(unsigned int irq);
110#else 121#else
111# define synchronize_irq(irq) barrier() 122# define synchronize_irq(irq) barrier()
@@ -164,20 +175,24 @@ extern void irq_enter(void);
164 */ 175 */
165extern void irq_exit(void); 176extern void irq_exit(void);
166 177
167#define nmi_enter() \ 178#define nmi_enter() \
168 do { \ 179 do { \
169 ftrace_nmi_enter(); \ 180 ftrace_nmi_enter(); \
170 lockdep_off(); \ 181 BUG_ON(in_nmi()); \
171 rcu_nmi_enter(); \ 182 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
172 __irq_enter(); \ 183 lockdep_off(); \
184 rcu_nmi_enter(); \
185 trace_hardirq_enter(); \
173 } while (0) 186 } while (0)
174 187
175#define nmi_exit() \ 188#define nmi_exit() \
176 do { \ 189 do { \
177 __irq_exit(); \ 190 trace_hardirq_exit(); \
178 rcu_nmi_exit(); \ 191 rcu_nmi_exit(); \
179 lockdep_on(); \ 192 lockdep_on(); \
180 ftrace_nmi_exit(); \ 193 BUG_ON(!in_nmi()); \
194 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \
195 ftrace_nmi_exit(); \
181 } while (0) 196 } while (0)
182 197
183#endif /* LINUX_HARDIRQ_H */ 198#endif /* LINUX_HARDIRQ_H */
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h
index ed21bd3dbd25..29ee2873f4a8 100644
--- a/include/linux/hdreg.h
+++ b/include/linux/hdreg.h
@@ -1,68 +1,6 @@
1#ifndef _LINUX_HDREG_H 1#ifndef _LINUX_HDREG_H
2#define _LINUX_HDREG_H 2#define _LINUX_HDREG_H
3 3
4#ifdef __KERNEL__
5#include <linux/ata.h>
6
7/*
8 * This file contains some defines for the AT-hd-controller.
9 * Various sources.
10 */
11
12/* ide.c has its own port definitions in "ide.h" */
13
14#define HD_IRQ 14
15
16/* Hd controller regs. Ref: IBM AT Bios-listing */
17#define HD_DATA 0x1f0 /* _CTL when writing */
18#define HD_ERROR 0x1f1 /* see err-bits */
19#define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */
20#define HD_SECTOR 0x1f3 /* starting sector */
21#define HD_LCYL 0x1f4 /* starting cylinder */
22#define HD_HCYL 0x1f5 /* high byte of starting cyl */
23#define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */
24#define HD_STATUS 0x1f7 /* see status-bits */
25#define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */
26#define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */
27#define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */
28
29#define HD_CMD 0x3f6 /* used for resets */
30#define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */
31
32/* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */
33
34/* Bits of HD_STATUS */
35#define ERR_STAT 0x01
36#define INDEX_STAT 0x02
37#define ECC_STAT 0x04 /* Corrected error */
38#define DRQ_STAT 0x08
39#define SEEK_STAT 0x10
40#define SRV_STAT 0x10
41#define WRERR_STAT 0x20
42#define READY_STAT 0x40
43#define BUSY_STAT 0x80
44
45/* Bits for HD_ERROR */
46#define MARK_ERR 0x01 /* Bad address mark */
47#define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */
48#define TRK0_ERR 0x02 /* couldn't find track 0 */
49#define EOM_ERR 0x02 /* End Of Media (ATAPI) */
50#define ABRT_ERR 0x04 /* Command aborted */
51#define MCR_ERR 0x08 /* media change request */
52#define ID_ERR 0x10 /* ID field not found */
53#define MC_ERR 0x20 /* media changed */
54#define ECC_ERR 0x40 /* Uncorrectable ECC error */
55#define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */
56#define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */
57#define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */
58
59/* Bits of HD_NSECTOR */
60#define CD 0x01
61#define IO 0x02
62#define REL 0x04
63#define TAG_MASK 0xf8
64#endif /* __KERNEL__ */
65
66#include <linux/types.h> 4#include <linux/types.h>
67 5
68/* 6/*
@@ -191,6 +129,7 @@ typedef struct hd_drive_hob_hdr {
191#define TASKFILE_INVALID 0x7fff 129#define TASKFILE_INVALID 0x7fff
192#endif 130#endif
193 131
132#ifndef __KERNEL__
194/* ATA/ATAPI Commands pre T13 Spec */ 133/* ATA/ATAPI Commands pre T13 Spec */
195#define WIN_NOP 0x00 134#define WIN_NOP 0x00
196/* 135/*
@@ -379,6 +318,7 @@ typedef struct hd_drive_hob_hdr {
379#define SECURITY_ERASE_UNIT 0xBD 318#define SECURITY_ERASE_UNIT 0xBD
380#define SECURITY_FREEZE_LOCK 0xBE 319#define SECURITY_FREEZE_LOCK 0xBE
381#define SECURITY_DISABLE_PASSWORD 0xBF 320#define SECURITY_DISABLE_PASSWORD 0xBF
321#endif /* __KERNEL__ */
382 322
383struct hd_geometry { 323struct hd_geometry {
384 unsigned char heads; 324 unsigned char heads;
@@ -448,6 +388,7 @@ enum {
448 388
449#define __NEW_HD_DRIVE_ID 389#define __NEW_HD_DRIVE_ID
450 390
391#ifndef __KERNEL__
451/* 392/*
452 * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec. 393 * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec.
453 * 394 *
@@ -699,6 +640,7 @@ struct hd_driveid {
699 * 7:0 Signature 640 * 7:0 Signature
700 */ 641 */
701}; 642};
643#endif /* __KERNEL__ */
702 644
703/* 645/*
704 * IDE "nice" flags. These are used on a per drive basis to determine 646 * IDE "nice" flags. These are used on a per drive basis to determine
diff --git a/include/linux/hid.h b/include/linux/hid.h
index fa8ee9cef7be..a72876e43589 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -270,6 +270,7 @@ struct hid_item {
270 270
271#define HID_QUIRK_INVERT 0x00000001 271#define HID_QUIRK_INVERT 0x00000001
272#define HID_QUIRK_NOTOUCH 0x00000002 272#define HID_QUIRK_NOTOUCH 0x00000002
273#define HID_QUIRK_IGNORE 0x00000004
273#define HID_QUIRK_NOGET 0x00000008 274#define HID_QUIRK_NOGET 0x00000008
274#define HID_QUIRK_BADPAD 0x00000020 275#define HID_QUIRK_BADPAD 0x00000020
275#define HID_QUIRK_MULTI_INPUT 0x00000040 276#define HID_QUIRK_MULTI_INPUT 0x00000040
@@ -603,12 +604,17 @@ struct hid_ll_driver {
603 int (*open)(struct hid_device *hdev); 604 int (*open)(struct hid_device *hdev);
604 void (*close)(struct hid_device *hdev); 605 void (*close)(struct hid_device *hdev);
605 606
607 int (*power)(struct hid_device *hdev, int level);
608
606 int (*hidinput_input_event) (struct input_dev *idev, unsigned int type, 609 int (*hidinput_input_event) (struct input_dev *idev, unsigned int type,
607 unsigned int code, int value); 610 unsigned int code, int value);
608 611
609 int (*parse)(struct hid_device *hdev); 612 int (*parse)(struct hid_device *hdev);
610}; 613};
611 614
615#define PM_HINT_FULLON 1<<5
616#define PM_HINT_NORMAL 1<<1
617
612/* Applications from HID Usage Tables 4/8/99 Version 1.1 */ 618/* Applications from HID Usage Tables 4/8/99 Version 1.1 */
613/* We ignore a few input applications that are not widely used */ 619/* We ignore a few input applications that are not widely used */
614#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) 620#define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002))
@@ -641,6 +647,7 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
641void hid_output_report(struct hid_report *report, __u8 *data); 647void hid_output_report(struct hid_report *report, __u8 *data);
642struct hid_device *hid_allocate_device(void); 648struct hid_device *hid_allocate_device(void);
643int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); 649int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
650int hid_check_keys_pressed(struct hid_device *hid);
644int hid_connect(struct hid_device *hid, unsigned int connect_mask); 651int hid_connect(struct hid_device *hid, unsigned int connect_mask);
645 652
646/** 653/**
@@ -791,21 +798,5 @@ dbg_hid(const char *fmt, ...)
791 __FILE__ , ## arg) 798 __FILE__ , ## arg)
792#endif /* HID_FF */ 799#endif /* HID_FF */
793 800
794#ifdef __KERNEL__
795#ifdef CONFIG_HID_COMPAT
796#define HID_COMPAT_LOAD_DRIVER(name) \
797/* prototype to avoid sparse warning */ \
798extern void hid_compat_##name(void); \
799void hid_compat_##name(void) { } \
800EXPORT_SYMBOL(hid_compat_##name)
801#else
802#define HID_COMPAT_LOAD_DRIVER(name)
803#endif /* HID_COMPAT */
804#define HID_COMPAT_CALL_DRIVER(name) do { \
805 extern void hid_compat_##name(void); \
806 hid_compat_##name(); \
807} while (0)
808#endif /* __KERNEL__ */
809
810#endif 801#endif
811 802
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 7ff5c55f9b55..1fcb7126a01f 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -19,8 +19,21 @@ static inline void flush_kernel_dcache_page(struct page *page)
19} 19}
20#endif 20#endif
21 21
22#ifdef CONFIG_HIGHMEM 22#include <asm/kmap_types.h>
23
24#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
25
26void debug_kmap_atomic(enum km_type type);
27
28#else
23 29
30static inline void debug_kmap_atomic(enum km_type type)
31{
32}
33
34#endif
35
36#ifdef CONFIG_HIGHMEM
24#include <asm/highmem.h> 37#include <asm/highmem.h>
25 38
26/* declarations for linux/mm/highmem.c */ 39/* declarations for linux/mm/highmem.c */
@@ -44,8 +57,6 @@ static inline void *kmap(struct page *page)
44 57
45#define kunmap(page) do { (void) (page); } while (0) 58#define kunmap(page) do { (void) (page); } while (0)
46 59
47#include <asm/kmap_types.h>
48
49static inline void *kmap_atomic(struct page *page, enum km_type idx) 60static inline void *kmap_atomic(struct page *page, enum km_type idx)
50{ 61{
51 pagefault_disable(); 62 pagefault_disable();
@@ -187,16 +198,4 @@ static inline void copy_highpage(struct page *to, struct page *from)
187 kunmap_atomic(vto, KM_USER1); 198 kunmap_atomic(vto, KM_USER1);
188} 199}
189 200
190#if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT)
191
192void debug_kmap_atomic(enum km_type type);
193
194#else
195
196static inline void debug_kmap_atomic(enum km_type type)
197{
198}
199
200#endif
201
202#endif /* _LINUX_HIGHMEM_H */ 201#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index bd37078c2d7d..0d2f7c8a33d6 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -336,6 +336,11 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
336 const enum hrtimer_mode mode); 336 const enum hrtimer_mode mode);
337extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, 337extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
338 unsigned long range_ns, const enum hrtimer_mode mode); 338 unsigned long range_ns, const enum hrtimer_mode mode);
339extern int
340__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
341 unsigned long delta_ns,
342 const enum hrtimer_mode mode, int wakeup);
343
339extern int hrtimer_cancel(struct hrtimer *timer); 344extern int hrtimer_cancel(struct hrtimer *timer);
340extern int hrtimer_try_to_cancel(struct hrtimer *timer); 345extern int hrtimer_try_to_cancel(struct hrtimer *timer);
341 346
diff --git a/include/linux/i2c-algo-sgi.h b/include/linux/i2c-algo-sgi.h
deleted file mode 100644
index 3b7715024e69..000000000000
--- a/include/linux/i2c-algo-sgi.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License version 2 as published by the Free Software Foundation.
4 *
5 * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
6 */
7
8#ifndef I2C_ALGO_SGI_H
9#define I2C_ALGO_SGI_H 1
10
11#include <linux/i2c.h>
12
13struct i2c_algo_sgi_data {
14 void *data; /* private data for lowlevel routines */
15 unsigned (*getctrl)(void *data);
16 void (*setctrl)(void *data, unsigned val);
17 unsigned (*rdata)(void *data);
18 void (*wdata)(void *data, unsigned val);
19
20 int xfer_timeout;
21 int ack_timeout;
22};
23
24int i2c_sgi_add_bus(struct i2c_adapter *);
25
26#endif /* I2C_ALGO_SGI_H */
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h
index f27604af8378..c9087de5c6c6 100644
--- a/include/linux/i2c-id.h
+++ b/include/linux/i2c-id.h
@@ -33,47 +33,10 @@
33 33
34#define I2C_DRIVERID_MSP3400 1 34#define I2C_DRIVERID_MSP3400 1
35#define I2C_DRIVERID_TUNER 2 35#define I2C_DRIVERID_TUNER 2
36#define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */
37#define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */
38#define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */
39#define I2C_DRIVERID_SAA7111A 8 /* video input processor */
40#define I2C_DRIVERID_SAA7185B 13 /* video encoder */
41#define I2C_DRIVERID_SAA7110 22 /* video decoder */
42#define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */
43#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */ 36#define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */
44#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */ 37#define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */
45#define I2C_DRIVERID_TDA9875 32 /* TV sound decoder chip */
46#define I2C_DRIVERID_BT819 40 /* video decoder */
47#define I2C_DRIVERID_BT856 41 /* video encoder */
48#define I2C_DRIVERID_VPX3220 42 /* video decoder+vbi/vtxt */
49#define I2C_DRIVERID_ADV7175 48 /* ADV 7175/7176 video encoder */
50#define I2C_DRIVERID_SAA7114 49 /* video decoder */
51#define I2C_DRIVERID_ADV7170 54 /* video encoder */
52#define I2C_DRIVERID_SAA7191 57 /* video decoder */
53#define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */
54#define I2C_DRIVERID_OVCAMCHIP 61 /* OmniVision CMOS image sens. */
55#define I2C_DRIVERID_SAA6752HS 67 /* MPEG2 encoder */
56#define I2C_DRIVERID_TVEEPROM 68 /* TV EEPROM */
57#define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */
58#define I2C_DRIVERID_CS53L32A 70 /* cs53l32a audio processor */
59#define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */
60#define I2C_DRIVERID_SAA7127 72 /* saa7127 video encoder */
61#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ 38#define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */
62#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */ 39#define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */
63#define I2C_DRIVERID_TVP5150 76 /* TVP5150 video decoder */
64#define I2C_DRIVERID_WM8739 77 /* wm8739 audio processor */
65#define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */
66#define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */
67#define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */
68#define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */
69#define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */
70#define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */
71#define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */
72#define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */
73#define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */
74#define I2C_DRIVERID_AU8522 97 /* Auvitek au8522 */
75
76#define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */
77 40
78/* 41/*
79 * ---- Adapter types ---------------------------------------------------- 42 * ---- Adapter types ----------------------------------------------------
@@ -88,6 +51,7 @@
88#define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */ 51#define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */
89#define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */ 52#define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */
90#define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ 53#define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */
54#define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */
91#define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */ 55#define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */
92 56
93/* --- SGI adapters */ 57/* --- SGI adapters */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index c86c3b07604c..00ee11eb9092 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -353,8 +353,8 @@ struct i2c_adapter {
353 void *algo_data; 353 void *algo_data;
354 354
355 /* --- administration stuff. */ 355 /* --- administration stuff. */
356 int (*client_register)(struct i2c_client *); 356 int (*client_register)(struct i2c_client *) __deprecated;
357 int (*client_unregister)(struct i2c_client *); 357 int (*client_unregister)(struct i2c_client *) __deprecated;
358 358
359 /* data fields that are valid for all devices */ 359 /* data fields that are valid for all devices */
360 u8 level; /* nesting level for lockdep */ 360 u8 level; /* nesting level for lockdep */
diff --git a/include/linux/i2c/s6000.h b/include/linux/i2c/s6000.h
new file mode 100644
index 000000000000..d9b34bfdae76
--- /dev/null
+++ b/include/linux/i2c/s6000.h
@@ -0,0 +1,10 @@
1#ifndef __LINUX_I2C_S6000_H
2#define __LINUX_I2C_S6000_H
3
4struct s6_i2c_platform_data {
5 const char *clock; /* the clock to use */
6 int bus_num; /* the bus number to register */
7};
8
9#endif
10
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h
index 8137f660a5cc..0dc80ef24975 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl4030.h
@@ -218,6 +218,53 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
218 218
219/*----------------------------------------------------------------------*/ 219/*----------------------------------------------------------------------*/
220 220
221/* Power bus message definitions */
222
223#define DEV_GRP_NULL 0x0
224#define DEV_GRP_P1 0x1
225#define DEV_GRP_P2 0x2
226#define DEV_GRP_P3 0x4
227
228#define RES_GRP_RES 0x0
229#define RES_GRP_PP 0x1
230#define RES_GRP_RC 0x2
231#define RES_GRP_PP_RC 0x3
232#define RES_GRP_PR 0x4
233#define RES_GRP_PP_PR 0x5
234#define RES_GRP_RC_PR 0x6
235#define RES_GRP_ALL 0x7
236
237#define RES_TYPE2_R0 0x0
238
239#define RES_TYPE_ALL 0x7
240
241#define RES_STATE_WRST 0xF
242#define RES_STATE_ACTIVE 0xE
243#define RES_STATE_SLEEP 0x8
244#define RES_STATE_OFF 0x0
245
246/*
247 * Power Bus Message Format ... these can be sent individually by Linux,
248 * but are usually part of downloaded scripts that are run when various
249 * power events are triggered.
250 *
251 * Broadcast Message (16 Bits):
252 * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4]
253 * RES_STATE[3:0]
254 *
255 * Singular Message (16 Bits):
256 * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0]
257 */
258
259#define MSG_BROADCAST(devgrp, grp, type, type2, state) \
260 ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \
261 | (type) << 4 | (state))
262
263#define MSG_SINGULAR(devgrp, id, state) \
264 ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
265
266/*----------------------------------------------------------------------*/
267
221struct twl4030_bci_platform_data { 268struct twl4030_bci_platform_data {
222 int *battery_tmp_tbl; 269 int *battery_tmp_tbl;
223 unsigned int tblsize; 270 unsigned int tblsize;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index af1de95e711e..dcfb93337e9a 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -5,6 +5,7 @@
5#include <linux/irqflags.h> 5#include <linux/irqflags.h>
6#include <linux/utsname.h> 6#include <linux/utsname.h>
7#include <linux/lockdep.h> 7#include <linux/lockdep.h>
8#include <linux/ftrace.h>
8#include <linux/ipc.h> 9#include <linux/ipc.h>
9#include <linux/pid_namespace.h> 10#include <linux/pid_namespace.h>
10#include <linux/user_namespace.h> 11#include <linux/user_namespace.h>
@@ -185,6 +186,7 @@ extern struct cred init_cred;
185 INIT_IDS \ 186 INIT_IDS \
186 INIT_TRACE_IRQFLAGS \ 187 INIT_TRACE_IRQFLAGS \
187 INIT_LOCKDEP \ 188 INIT_LOCKDEP \
189 INIT_FTRACE_GRAPH \
188} 190}
189 191
190 192
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 1d6c71d96ede..aa8c53171233 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
123#define ecap_eim_support(e) ((e >> 4) & 0x1) 123#define ecap_eim_support(e) ((e >> 4) & 0x1)
124#define ecap_ir_support(e) ((e >> 3) & 0x1) 124#define ecap_ir_support(e) ((e >> 3) & 0x1)
125#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) 125#define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
126 126#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
127 127
128/* IOTLB_REG */ 128/* IOTLB_REG */
129#define DMA_TLB_FLUSH_GRANU_OFFSET 60 129#define DMA_TLB_FLUSH_GRANU_OFFSET 60
@@ -164,6 +164,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
164#define DMA_GCMD_QIE (((u32)1) << 26) 164#define DMA_GCMD_QIE (((u32)1) << 26)
165#define DMA_GCMD_SIRTP (((u32)1) << 24) 165#define DMA_GCMD_SIRTP (((u32)1) << 24)
166#define DMA_GCMD_IRE (((u32) 1) << 25) 166#define DMA_GCMD_IRE (((u32) 1) << 25)
167#define DMA_GCMD_CFI (((u32) 1) << 23)
167 168
168/* GSTS_REG */ 169/* GSTS_REG */
169#define DMA_GSTS_TES (((u32)1) << 31) 170#define DMA_GSTS_TES (((u32)1) << 31)
@@ -174,6 +175,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
174#define DMA_GSTS_QIES (((u32)1) << 26) 175#define DMA_GSTS_QIES (((u32)1) << 26)
175#define DMA_GSTS_IRTPS (((u32)1) << 24) 176#define DMA_GSTS_IRTPS (((u32)1) << 24)
176#define DMA_GSTS_IRES (((u32)1) << 25) 177#define DMA_GSTS_IRES (((u32)1) << 25)
178#define DMA_GSTS_CFIS (((u32)1) << 23)
177 179
178/* CCMD_REG */ 180/* CCMD_REG */
179#define DMA_CCMD_ICC (((u64)1) << 63) 181#define DMA_CCMD_ICC (((u64)1) << 63)
@@ -284,6 +286,14 @@ struct iommu_flush {
284 unsigned int size_order, u64 type, int non_present_entry_flush); 286 unsigned int size_order, u64 type, int non_present_entry_flush);
285}; 287};
286 288
289enum {
290 SR_DMAR_FECTL_REG,
291 SR_DMAR_FEDATA_REG,
292 SR_DMAR_FEADDR_REG,
293 SR_DMAR_FEUADDR_REG,
294 MAX_SR_DMAR_REGS
295};
296
287struct intel_iommu { 297struct intel_iommu {
288 void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 298 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
289 u64 cap; 299 u64 cap;
@@ -304,6 +314,8 @@ struct intel_iommu {
304 struct iommu_flush flush; 314 struct iommu_flush flush;
305#endif 315#endif
306 struct q_inval *qi; /* Queued invalidation info */ 316 struct q_inval *qi; /* Queued invalidation info */
317 u32 *iommu_state; /* Store iommu states between suspend and resume.*/
318
307#ifdef CONFIG_INTR_REMAP 319#ifdef CONFIG_INTR_REMAP
308 struct ir_table *ir_table; /* Interrupt remapping info */ 320 struct ir_table *ir_table; /* Interrupt remapping info */
309#endif 321#endif
@@ -322,6 +334,7 @@ extern int alloc_iommu(struct dmar_drhd_unit *drhd);
322extern void free_iommu(struct intel_iommu *iommu); 334extern void free_iommu(struct intel_iommu *iommu);
323extern int dmar_enable_qi(struct intel_iommu *iommu); 335extern int dmar_enable_qi(struct intel_iommu *iommu);
324extern void dmar_disable_qi(struct intel_iommu *iommu); 336extern void dmar_disable_qi(struct intel_iommu *iommu);
337extern int dmar_reenable_qi(struct intel_iommu *iommu);
325extern void qi_global_iec(struct intel_iommu *iommu); 338extern void qi_global_iec(struct intel_iommu *iommu);
326 339
327extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, 340extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c68bffd182bb..91bb76f44f14 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -59,6 +59,18 @@
59#define IRQF_NOBALANCING 0x00000800 59#define IRQF_NOBALANCING 0x00000800
60#define IRQF_IRQPOLL 0x00001000 60#define IRQF_IRQPOLL 0x00001000
61 61
62/*
63 * Bits used by threaded handlers:
64 * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
65 * IRQTF_DIED - handler thread died
66 * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
67 */
68enum {
69 IRQTF_RUNTHREAD,
70 IRQTF_DIED,
71 IRQTF_WARNED,
72};
73
62typedef irqreturn_t (*irq_handler_t)(int, void *); 74typedef irqreturn_t (*irq_handler_t)(int, void *);
63 75
64/** 76/**
@@ -71,6 +83,9 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
71 * @next: pointer to the next irqaction for shared interrupts 83 * @next: pointer to the next irqaction for shared interrupts
72 * @irq: interrupt number 84 * @irq: interrupt number
73 * @dir: pointer to the proc/irq/NN/name entry 85 * @dir: pointer to the proc/irq/NN/name entry
86 * @thread_fn: interupt handler function for threaded interrupts
87 * @thread: thread pointer for threaded interrupts
88 * @thread_flags: flags related to @thread
74 */ 89 */
75struct irqaction { 90struct irqaction {
76 irq_handler_t handler; 91 irq_handler_t handler;
@@ -81,18 +96,68 @@ struct irqaction {
81 struct irqaction *next; 96 struct irqaction *next;
82 int irq; 97 int irq;
83 struct proc_dir_entry *dir; 98 struct proc_dir_entry *dir;
99 irq_handler_t thread_fn;
100 struct task_struct *thread;
101 unsigned long thread_flags;
84}; 102};
85 103
86extern irqreturn_t no_action(int cpl, void *dev_id); 104extern irqreturn_t no_action(int cpl, void *dev_id);
87extern int __must_check request_irq(unsigned int, irq_handler_t handler, 105
88 unsigned long, const char *, void *); 106#ifdef CONFIG_GENERIC_HARDIRQS
107extern int __must_check
108request_threaded_irq(unsigned int irq, irq_handler_t handler,
109 irq_handler_t thread_fn,
110 unsigned long flags, const char *name, void *dev);
111
112static inline int __must_check
113request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
114 const char *name, void *dev)
115{
116 return request_threaded_irq(irq, handler, NULL, flags, name, dev);
117}
118
119extern void exit_irq_thread(void);
120#else
121
122extern int __must_check
123request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
124 const char *name, void *dev);
125
126/*
127 * Special function to avoid ifdeffery in kernel/irq/devres.c which
128 * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
129 * m68k). I really love these $@%#!* obvious Makefile references:
130 * ../../../kernel/irq/devres.o
131 */
132static inline int __must_check
133request_threaded_irq(unsigned int irq, irq_handler_t handler,
134 irq_handler_t thread_fn,
135 unsigned long flags, const char *name, void *dev)
136{
137 return request_irq(irq, handler, flags, name, dev);
138}
139
140static inline void exit_irq_thread(void) { }
141#endif
142
89extern void free_irq(unsigned int, void *); 143extern void free_irq(unsigned int, void *);
90 144
91struct device; 145struct device;
92 146
93extern int __must_check devm_request_irq(struct device *dev, unsigned int irq, 147extern int __must_check
94 irq_handler_t handler, unsigned long irqflags, 148devm_request_threaded_irq(struct device *dev, unsigned int irq,
95 const char *devname, void *dev_id); 149 irq_handler_t handler, irq_handler_t thread_fn,
150 unsigned long irqflags, const char *devname,
151 void *dev_id);
152
153static inline int __must_check
154devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
155 unsigned long irqflags, const char *devname, void *dev_id)
156{
157 return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
158 devname, dev_id);
159}
160
96extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); 161extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
97 162
98/* 163/*
@@ -278,6 +343,11 @@ enum
278 NR_SOFTIRQS 343 NR_SOFTIRQS
279}; 344};
280 345
346/* map softirq index to softirq name. update 'softirq_to_name' in
347 * kernel/softirq.c when adding a new softirq.
348 */
349extern char *softirq_to_name[NR_SOFTIRQS];
350
281/* softirq mask and active fields moved to irq_cpustat_t in 351/* softirq mask and active fields moved to irq_cpustat_t in
282 * asm/hardirq.h to get better cache usage. KAO 352 * asm/hardirq.h to get better cache usage. KAO
283 */ 353 */
@@ -294,6 +364,7 @@ extern void softirq_init(void);
294#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) 364#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
295extern void raise_softirq_irqoff(unsigned int nr); 365extern void raise_softirq_irqoff(unsigned int nr);
296extern void raise_softirq(unsigned int nr); 366extern void raise_softirq(unsigned int nr);
367extern void wakeup_softirqd(void);
297 368
298/* This is the worklist that queues up per-cpu softirq work. 369/* This is the worklist that queues up per-cpu softirq work.
299 * 370 *
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8a7bfb1b6ca0..3af4ffd591b9 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -21,6 +21,7 @@
21 21
22#define IOMMU_READ (1) 22#define IOMMU_READ (1)
23#define IOMMU_WRITE (2) 23#define IOMMU_WRITE (2)
24#define IOMMU_CACHE (4) /* DMA cache coherency */
24 25
25struct device; 26struct device;
26 27
@@ -28,6 +29,8 @@ struct iommu_domain {
28 void *priv; 29 void *priv;
29}; 30};
30 31
32#define IOMMU_CAP_CACHE_COHERENCY 0x1
33
31struct iommu_ops { 34struct iommu_ops {
32 int (*domain_init)(struct iommu_domain *domain); 35 int (*domain_init)(struct iommu_domain *domain);
33 void (*domain_destroy)(struct iommu_domain *domain); 36 void (*domain_destroy)(struct iommu_domain *domain);
@@ -39,6 +42,8 @@ struct iommu_ops {
39 size_t size); 42 size_t size);
40 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 43 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
41 unsigned long iova); 44 unsigned long iova);
45 int (*domain_has_cap)(struct iommu_domain *domain,
46 unsigned long cap);
42}; 47};
43 48
44#ifdef CONFIG_IOMMU_API 49#ifdef CONFIG_IOMMU_API
@@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
57 size_t size); 62 size_t size);
58extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, 63extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
59 unsigned long iova); 64 unsigned long iova);
65extern int iommu_domain_has_cap(struct iommu_domain *domain,
66 unsigned long cap);
60 67
61#else /* CONFIG_IOMMU_API */ 68#else /* CONFIG_IOMMU_API */
62 69
@@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
107 return 0; 114 return 0;
108} 115}
109 116
117static inline int domain_has_cap(struct iommu_domain *domain,
118 unsigned long cap)
119{
120 return 0;
121}
122
110#endif /* CONFIG_IOMMU_API */ 123#endif /* CONFIG_IOMMU_API */
111 124
112#endif /* __LINUX_IOMMU_H */ 125#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index ea330f9e7100..3bf40e246a80 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -25,7 +25,7 @@ struct ipc_ids {
25}; 25};
26 26
27struct ipc_namespace { 27struct ipc_namespace {
28 struct kref kref; 28 atomic_t count;
29 struct ipc_ids ids[3]; 29 struct ipc_ids ids[3];
30 30
31 int sem_ctls[4]; 31 int sem_ctls[4];
@@ -44,25 +44,57 @@ struct ipc_namespace {
44 int shm_tot; 44 int shm_tot;
45 45
46 struct notifier_block ipcns_nb; 46 struct notifier_block ipcns_nb;
47
48 /* The kern_mount of the mqueuefs sb. We take a ref on it */
49 struct vfsmount *mq_mnt;
50
51 /* # queues in this ns, protected by mq_lock */
52 unsigned int mq_queues_count;
53
54 /* next fields are set through sysctl */
55 unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */
56 unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */
57 unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */
58
47}; 59};
48 60
49extern struct ipc_namespace init_ipc_ns; 61extern struct ipc_namespace init_ipc_ns;
50extern atomic_t nr_ipc_ns; 62extern atomic_t nr_ipc_ns;
51 63
52#ifdef CONFIG_SYSVIPC 64extern spinlock_t mq_lock;
65#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
53#define INIT_IPC_NS(ns) .ns = &init_ipc_ns, 66#define INIT_IPC_NS(ns) .ns = &init_ipc_ns,
67#else
68#define INIT_IPC_NS(ns)
69#endif
54 70
71#ifdef CONFIG_SYSVIPC
55extern int register_ipcns_notifier(struct ipc_namespace *); 72extern int register_ipcns_notifier(struct ipc_namespace *);
56extern int cond_register_ipcns_notifier(struct ipc_namespace *); 73extern int cond_register_ipcns_notifier(struct ipc_namespace *);
57extern void unregister_ipcns_notifier(struct ipc_namespace *); 74extern void unregister_ipcns_notifier(struct ipc_namespace *);
58extern int ipcns_notify(unsigned long); 75extern int ipcns_notify(unsigned long);
59
60#else /* CONFIG_SYSVIPC */ 76#else /* CONFIG_SYSVIPC */
61#define INIT_IPC_NS(ns) 77static inline int register_ipcns_notifier(struct ipc_namespace *ns)
78{ return 0; }
79static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns)
80{ return 0; }
81static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { }
82static inline int ipcns_notify(unsigned long l) { return 0; }
62#endif /* CONFIG_SYSVIPC */ 83#endif /* CONFIG_SYSVIPC */
63 84
64#if defined(CONFIG_SYSVIPC) && defined(CONFIG_IPC_NS) 85#ifdef CONFIG_POSIX_MQUEUE
65extern void free_ipc_ns(struct kref *kref); 86extern int mq_init_ns(struct ipc_namespace *ns);
87/* default values */
88#define DFLT_QUEUESMAX 256 /* max number of message queues */
89#define DFLT_MSGMAX 10 /* max number of messages in each queue */
90#define HARD_MSGMAX (131072/sizeof(void *))
91#define DFLT_MSGSIZEMAX 8192 /* max message size */
92#else
93static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
94#endif
95
96#if defined(CONFIG_IPC_NS)
97extern void free_ipc_ns(struct ipc_namespace *ns);
66extern struct ipc_namespace *copy_ipcs(unsigned long flags, 98extern struct ipc_namespace *copy_ipcs(unsigned long flags,
67 struct ipc_namespace *ns); 99 struct ipc_namespace *ns);
68extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, 100extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
@@ -72,14 +104,11 @@ extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
72static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) 104static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns)
73{ 105{
74 if (ns) 106 if (ns)
75 kref_get(&ns->kref); 107 atomic_inc(&ns->count);
76 return ns; 108 return ns;
77} 109}
78 110
79static inline void put_ipc_ns(struct ipc_namespace *ns) 111extern void put_ipc_ns(struct ipc_namespace *ns);
80{
81 kref_put(&ns->kref, free_ipc_ns);
82}
83#else 112#else
84static inline struct ipc_namespace *copy_ipcs(unsigned long flags, 113static inline struct ipc_namespace *copy_ipcs(unsigned long flags,
85 struct ipc_namespace *ns) 114 struct ipc_namespace *ns)
@@ -99,4 +128,18 @@ static inline void put_ipc_ns(struct ipc_namespace *ns)
99{ 128{
100} 129}
101#endif 130#endif
131
132#ifdef CONFIG_POSIX_MQUEUE_SYSCTL
133
134struct ctl_table_header;
135extern struct ctl_table_header *mq_register_sysctl_table(void);
136
137#else /* CONFIG_POSIX_MQUEUE_SYSCTL */
138
139static inline struct ctl_table_header *mq_register_sysctl_table(void)
140{
141 return NULL;
142}
143
144#endif /* CONFIG_POSIX_MQUEUE_SYSCTL */
102#endif 145#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 99d147efe399..b7cbeed972e4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -22,6 +22,7 @@
22#include <linux/irqnr.h> 22#include <linux/irqnr.h>
23#include <linux/errno.h> 23#include <linux/errno.h>
24#include <linux/topology.h> 24#include <linux/topology.h>
25#include <linux/wait.h>
25 26
26#include <asm/irq.h> 27#include <asm/irq.h>
27#include <asm/ptrace.h> 28#include <asm/ptrace.h>
@@ -158,6 +159,8 @@ struct irq_2_iommu;
158 * @affinity: IRQ affinity on SMP 159 * @affinity: IRQ affinity on SMP
159 * @cpu: cpu index useful for balancing 160 * @cpu: cpu index useful for balancing
160 * @pending_mask: pending rebalanced interrupts 161 * @pending_mask: pending rebalanced interrupts
162 * @threads_active: number of irqaction threads currently running
163 * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
161 * @dir: /proc/irq/ procfs entry 164 * @dir: /proc/irq/ procfs entry
162 * @name: flow handler name for /proc/interrupts output 165 * @name: flow handler name for /proc/interrupts output
163 */ 166 */
@@ -189,6 +192,8 @@ struct irq_desc {
189 cpumask_var_t pending_mask; 192 cpumask_var_t pending_mask;
190#endif 193#endif
191#endif 194#endif
195 atomic_t threads_active;
196 wait_queue_head_t wait_for_threads;
192#ifdef CONFIG_PROC_FS 197#ifdef CONFIG_PROC_FS
193 struct proc_dir_entry *dir; 198 struct proc_dir_entry *dir;
194#endif 199#endif
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 74bde13224c9..b02a3f1d46a0 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -24,8 +24,8 @@
24# define trace_softirqs_enabled(p) ((p)->softirqs_enabled) 24# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
25# define trace_hardirq_enter() do { current->hardirq_context++; } while (0) 25# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
26# define trace_hardirq_exit() do { current->hardirq_context--; } while (0) 26# define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
27# define trace_softirq_enter() do { current->softirq_context++; } while (0) 27# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
28# define trace_softirq_exit() do { current->softirq_context--; } while (0) 28# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
29# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, 29# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
30#else 30#else
31# define trace_hardirqs_on() do { } while (0) 31# define trace_hardirqs_on() do { } while (0)
@@ -38,8 +38,8 @@
38# define trace_softirqs_enabled(p) 0 38# define trace_softirqs_enabled(p) 0
39# define trace_hardirq_enter() do { } while (0) 39# define trace_hardirq_enter() do { } while (0)
40# define trace_hardirq_exit() do { } while (0) 40# define trace_hardirq_exit() do { } while (0)
41# define trace_softirq_enter() do { } while (0) 41# define lockdep_softirq_enter() do { } while (0)
42# define trace_softirq_exit() do { } while (0) 42# define lockdep_softirq_exit() do { } while (0)
43# define INIT_TRACE_IRQFLAGS 43# define INIT_TRACE_IRQFLAGS
44#endif 44#endif
45 45
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index c5584ca5b8c9..819acaaac3f5 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -5,10 +5,12 @@
5 * enum irqreturn 5 * enum irqreturn
6 * @IRQ_NONE interrupt was not from this device 6 * @IRQ_NONE interrupt was not from this device
7 * @IRQ_HANDLED interrupt was handled by this device 7 * @IRQ_HANDLED interrupt was handled by this device
8 * @IRQ_WAKE_THREAD handler requests to wake the handler thread
8 */ 9 */
9enum irqreturn { 10enum irqreturn {
10 IRQ_NONE, 11 IRQ_NONE,
11 IRQ_HANDLED, 12 IRQ_HANDLED,
13 IRQ_WAKE_THREAD,
12}; 14};
13 15
14typedef enum irqreturn irqreturn_t; 16typedef enum irqreturn irqreturn_t;
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 64246dce5663..53ae4399da2d 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -35,7 +35,7 @@
35#define journal_oom_retry 1 35#define journal_oom_retry 1
36 36
37/* 37/*
38 * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds 38 * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds
39 * certain classes of error which can occur due to failed IOs. Under 39 * certain classes of error which can occur due to failed IOs. Under
40 * normal use we want ext3 to continue after such errors, because 40 * normal use we want ext3 to continue after such errors, because
41 * hardware _can_ fail, but for debugging purposes when running tests on 41 * hardware _can_ fail, but for debugging purposes when running tests on
@@ -552,6 +552,11 @@ struct transaction_s
552 */ 552 */
553 int t_handle_count; 553 int t_handle_count;
554 554
555 /*
556 * This transaction is being forced and some process is
557 * waiting for it to finish.
558 */
559 int t_synchronous_commit:1;
555}; 560};
556 561
557/** 562/**
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index f3fe34391d8e..792274269f2b 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -13,10 +13,17 @@
13#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ 13#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
14 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) 14 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
15 15
16struct module;
17
16#ifdef CONFIG_KALLSYMS 18#ifdef CONFIG_KALLSYMS
17/* Lookup the address for a symbol. Returns 0 if not found. */ 19/* Lookup the address for a symbol. Returns 0 if not found. */
18unsigned long kallsyms_lookup_name(const char *name); 20unsigned long kallsyms_lookup_name(const char *name);
19 21
22/* Call a function on each kallsyms symbol in the core kernel */
23int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
24 unsigned long),
25 void *data);
26
20extern int kallsyms_lookup_size_offset(unsigned long addr, 27extern int kallsyms_lookup_size_offset(unsigned long addr,
21 unsigned long *symbolsize, 28 unsigned long *symbolsize,
22 unsigned long *offset); 29 unsigned long *offset);
@@ -43,6 +50,14 @@ static inline unsigned long kallsyms_lookup_name(const char *name)
43 return 0; 50 return 0;
44} 51}
45 52
53static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
54 struct module *,
55 unsigned long),
56 void *data)
57{
58 return 0;
59}
60
46static inline int kallsyms_lookup_size_offset(unsigned long addr, 61static inline int kallsyms_lookup_size_offset(unsigned long addr,
47 unsigned long *symbolsize, 62 unsigned long *symbolsize,
48 unsigned long *offset) 63 unsigned long *offset)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 556d781e69fe..d9e75ec7def5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -242,6 +242,19 @@ extern struct ratelimit_state printk_ratelimit_state;
242extern int printk_ratelimit(void); 242extern int printk_ratelimit(void);
243extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, 243extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
244 unsigned int interval_msec); 244 unsigned int interval_msec);
245
246/*
247 * Print a one-time message (analogous to WARN_ONCE() et al):
248 */
249#define printk_once(x...) ({ \
250 static int __print_once = 1; \
251 \
252 if (__print_once) { \
253 __print_once = 0; \
254 printk(x); \
255 } \
256})
257
245void log_buf_kexec_setup(void); 258void log_buf_kexec_setup(void);
246#else 259#else
247static inline int vprintk(const char *s, va_list args) 260static inline int vprintk(const char *s, va_list args)
@@ -254,6 +267,10 @@ static inline int printk_ratelimit(void) { return 0; }
254static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ 267static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
255 unsigned int interval_msec) \ 268 unsigned int interval_msec) \
256 { return false; } 269 { return false; }
270
271/* No effect, but we still get type checking even in the !PRINTK case: */
272#define printk_once(x...) printk(x)
273
257static inline void log_buf_kexec_setup(void) 274static inline void log_buf_kexec_setup(void)
258{ 275{
259} 276}
@@ -375,6 +392,139 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
375#endif 392#endif
376 393
377/* 394/*
395 * General tracing related utility functions - trace_printk(),
396 * tracing_on/tracing_off and tracing_start()/tracing_stop
397 *
398 * Use tracing_on/tracing_off when you want to quickly turn on or off
399 * tracing. It simply enables or disables the recording of the trace events.
400 * This also corresponds to the user space debugfs/tracing/tracing_on
401 * file, which gives a means for the kernel and userspace to interact.
402 * Place a tracing_off() in the kernel where you want tracing to end.
403 * From user space, examine the trace, and then echo 1 > tracing_on
404 * to continue tracing.
405 *
406 * tracing_stop/tracing_start has slightly more overhead. It is used
407 * by things like suspend to ram where disabling the recording of the
408 * trace is not enough, but tracing must actually stop because things
409 * like calling smp_processor_id() may crash the system.
410 *
411 * Most likely, you want to use tracing_on/tracing_off.
412 */
413#ifdef CONFIG_RING_BUFFER
414void tracing_on(void);
415void tracing_off(void);
416/* trace_off_permanent stops recording with no way to bring it back */
417void tracing_off_permanent(void);
418int tracing_is_on(void);
419#else
420static inline void tracing_on(void) { }
421static inline void tracing_off(void) { }
422static inline void tracing_off_permanent(void) { }
423static inline int tracing_is_on(void) { return 0; }
424#endif
425#ifdef CONFIG_TRACING
426extern void tracing_start(void);
427extern void tracing_stop(void);
428extern void ftrace_off_permanent(void);
429
430extern void
431ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
432
433static inline void __attribute__ ((format (printf, 1, 2)))
434____trace_printk_check_format(const char *fmt, ...)
435{
436}
437#define __trace_printk_check_format(fmt, args...) \
438do { \
439 if (0) \
440 ____trace_printk_check_format(fmt, ##args); \
441} while (0)
442
443/**
444 * trace_printk - printf formatting in the ftrace buffer
445 * @fmt: the printf format for printing
446 *
447 * Note: __trace_printk is an internal function for trace_printk and
448 * the @ip is passed in via the trace_printk macro.
449 *
450 * This function allows a kernel developer to debug fast path sections
451 * that printk is not appropriate for. By scattering in various
452 * printk like tracing in the code, a developer can quickly see
453 * where problems are occurring.
454 *
455 * This is intended as a debugging tool for the developer only.
456 * Please refrain from leaving trace_printks scattered around in
457 * your code.
458 */
459
460#define trace_printk(fmt, args...) \
461do { \
462 __trace_printk_check_format(fmt, ##args); \
463 if (__builtin_constant_p(fmt)) { \
464 static const char *trace_printk_fmt \
465 __attribute__((section("__trace_printk_fmt"))) = \
466 __builtin_constant_p(fmt) ? fmt : NULL; \
467 \
468 __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \
469 } else \
470 __trace_printk(_THIS_IP_, fmt, ##args); \
471} while (0)
472
473extern int
474__trace_bprintk(unsigned long ip, const char *fmt, ...)
475 __attribute__ ((format (printf, 2, 3)));
476
477extern int
478__trace_printk(unsigned long ip, const char *fmt, ...)
479 __attribute__ ((format (printf, 2, 3)));
480
481/*
482 * The double __builtin_constant_p is because gcc will give us an error
483 * if we try to allocate the static variable to fmt if it is not a
484 * constant. Even with the outer if statement.
485 */
486#define ftrace_vprintk(fmt, vargs) \
487do { \
488 if (__builtin_constant_p(fmt)) { \
489 static const char *trace_printk_fmt \
490 __attribute__((section("__trace_printk_fmt"))) = \
491 __builtin_constant_p(fmt) ? fmt : NULL; \
492 \
493 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
494 } else \
495 __ftrace_vprintk(_THIS_IP_, fmt, vargs); \
496} while (0)
497
498extern int
499__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap);
500
501extern int
502__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap);
503
504extern void ftrace_dump(void);
505#else
506static inline void
507ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
508static inline int
509trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
510
511static inline void tracing_start(void) { }
512static inline void tracing_stop(void) { }
513static inline void ftrace_off_permanent(void) { }
514static inline int
515trace_printk(const char *fmt, ...)
516{
517 return 0;
518}
519static inline int
520ftrace_vprintk(const char *fmt, va_list ap)
521{
522 return 0;
523}
524static inline void ftrace_dump(void) { }
525#endif /* CONFIG_TRACING */
526
527/*
378 * Display an IP address in readable format. 528 * Display an IP address in readable format.
379 */ 529 */
380 530
diff --git a/include/linux/key.h b/include/linux/key.h
index 21d32a142c00..e544f466d69a 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -20,6 +20,7 @@
20#include <linux/rbtree.h> 20#include <linux/rbtree.h>
21#include <linux/rcupdate.h> 21#include <linux/rcupdate.h>
22#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/rwsem.h>
23#include <asm/atomic.h> 24#include <asm/atomic.h>
24 25
25#ifdef __KERNEL__ 26#ifdef __KERNEL__
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 92213a9194e1..384ca8bbf1ac 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -29,10 +29,15 @@
29#ifdef CONFIG_MODULES 29#ifdef CONFIG_MODULES
30/* modprobe exit status on success, -ve on error. Return value 30/* modprobe exit status on success, -ve on error. Return value
31 * usually useless though. */ 31 * usually useless though. */
32extern int request_module(const char * name, ...) __attribute__ ((format (printf, 1, 2))); 32extern int __request_module(bool wait, const char *name, ...) \
33#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) 33 __attribute__((format(printf, 2, 3)));
34#define request_module(mod...) __request_module(true, mod)
35#define request_module_nowait(mod...) __request_module(false, mod)
36#define try_then_request_module(x, mod...) \
37 ((x) ?: (__request_module(true, mod), (x)))
34#else 38#else
35static inline int request_module(const char * name, ...) { return -ENOSYS; } 39static inline int request_module(const char *name, ...) { return -ENOSYS; }
40static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; }
36#define try_then_request_module(x, mod...) (x) 41#define try_then_request_module(x, mod...) (x)
37#endif 42#endif
38 43
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 2ec6cc14a114..bcd9c07848be 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -94,12 +94,16 @@ struct kprobe {
94 /* Called after addr is executed, unless... */ 94 /* Called after addr is executed, unless... */
95 kprobe_post_handler_t post_handler; 95 kprobe_post_handler_t post_handler;
96 96
97 /* ... called if executing addr causes a fault (eg. page fault). 97 /*
98 * Return 1 if it handled fault, otherwise kernel will see it. */ 98 * ... called if executing addr causes a fault (eg. page fault).
99 * Return 1 if it handled fault, otherwise kernel will see it.
100 */
99 kprobe_fault_handler_t fault_handler; 101 kprobe_fault_handler_t fault_handler;
100 102
101 /* ... called if breakpoint trap occurs in probe handler. 103 /*
102 * Return 1 if it handled break, otherwise kernel will see it. */ 104 * ... called if breakpoint trap occurs in probe handler.
105 * Return 1 if it handled break, otherwise kernel will see it.
106 */
103 kprobe_break_handler_t break_handler; 107 kprobe_break_handler_t break_handler;
104 108
105 /* Saved opcode (which has been replaced with breakpoint) */ 109 /* Saved opcode (which has been replaced with breakpoint) */
@@ -108,18 +112,28 @@ struct kprobe {
108 /* copy of the original instruction */ 112 /* copy of the original instruction */
109 struct arch_specific_insn ainsn; 113 struct arch_specific_insn ainsn;
110 114
111 /* Indicates various status flags. Protected by kprobe_mutex. */ 115 /*
116 * Indicates various status flags.
117 * Protected by kprobe_mutex after this kprobe is registered.
118 */
112 u32 flags; 119 u32 flags;
113}; 120};
114 121
115/* Kprobe status flags */ 122/* Kprobe status flags */
116#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ 123#define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */
124#define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */
117 125
126/* Has this kprobe gone ? */
118static inline int kprobe_gone(struct kprobe *p) 127static inline int kprobe_gone(struct kprobe *p)
119{ 128{
120 return p->flags & KPROBE_FLAG_GONE; 129 return p->flags & KPROBE_FLAG_GONE;
121} 130}
122 131
132/* Is this kprobe disabled ? */
133static inline int kprobe_disabled(struct kprobe *p)
134{
135 return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE);
136}
123/* 137/*
124 * Special probe type that uses setjmp-longjmp type tricks to resume 138 * Special probe type that uses setjmp-longjmp type tricks to resume
125 * execution at a specified entry with a matching prototype corresponding 139 * execution at a specified entry with a matching prototype corresponding
@@ -279,6 +293,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num);
279void kprobe_flush_task(struct task_struct *tk); 293void kprobe_flush_task(struct task_struct *tk);
280void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); 294void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
281 295
296int disable_kprobe(struct kprobe *kp);
297int enable_kprobe(struct kprobe *kp);
298
282#else /* !CONFIG_KPROBES: */ 299#else /* !CONFIG_KPROBES: */
283 300
284static inline int kprobes_built_in(void) 301static inline int kprobes_built_in(void)
@@ -345,5 +362,30 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
345static inline void kprobe_flush_task(struct task_struct *tk) 362static inline void kprobe_flush_task(struct task_struct *tk)
346{ 363{
347} 364}
365static inline int disable_kprobe(struct kprobe *kp)
366{
367 return -ENOSYS;
368}
369static inline int enable_kprobe(struct kprobe *kp)
370{
371 return -ENOSYS;
372}
348#endif /* CONFIG_KPROBES */ 373#endif /* CONFIG_KPROBES */
374static inline int disable_kretprobe(struct kretprobe *rp)
375{
376 return disable_kprobe(&rp->kp);
377}
378static inline int enable_kretprobe(struct kretprobe *rp)
379{
380 return enable_kprobe(&rp->kp);
381}
382static inline int disable_jprobe(struct jprobe *jp)
383{
384 return disable_kprobe(&jp->kp);
385}
386static inline int enable_jprobe(struct jprobe *jp)
387{
388 return enable_kprobe(&jp->kp);
389}
390
349#endif /* _LINUX_KPROBES_H */ 391#endif /* _LINUX_KPROBES_H */
diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h
new file mode 100644
index 000000000000..42f854a1a199
--- /dev/null
+++ b/include/linux/leds-bd2802.h
@@ -0,0 +1,26 @@
1/*
2 * leds-bd2802.h - RGB LED Driver
3 *
4 * Copyright (C) 2009 Samsung Electronics
5 * Kim Kyuwon <q1.kim@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf
12 *
13 */
14#ifndef _LEDS_BD2802_H_
15#define _LEDS_BD2802_H_
16
17struct bd2802_led_platform_data{
18 int reset_gpio;
19 u8 rgb_time;
20};
21
22#define RGB_TIME(slopedown, slopeup, waveform) \
23 ((slopedown) << 6 | (slopeup) << 4 | (waveform))
24
25#endif /* _LEDS_BD2802_H_ */
26
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 24489da701e3..376fe07732ea 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -30,6 +30,7 @@ enum led_brightness {
30struct led_classdev { 30struct led_classdev {
31 const char *name; 31 const char *name;
32 int brightness; 32 int brightness;
33 int max_brightness;
33 int flags; 34 int flags;
34 35
35 /* Lower 16 bits reflect status */ 36 /* Lower 16 bits reflect status */
@@ -140,7 +141,8 @@ struct gpio_led {
140 const char *name; 141 const char *name;
141 const char *default_trigger; 142 const char *default_trigger;
142 unsigned gpio; 143 unsigned gpio;
143 u8 active_low; 144 u8 active_low : 1;
145 u8 retain_state_suspended : 1;
144}; 146};
145 147
146struct gpio_led_platform_data { 148struct gpio_led_platform_data {
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h
new file mode 100644
index 000000000000..33a071167489
--- /dev/null
+++ b/include/linux/leds_pwm.h
@@ -0,0 +1,21 @@
1/*
2 * PWM LED driver data - see drivers/leds/leds-pwm.c
3 */
4#ifndef __LINUX_LEDS_PWM_H
5#define __LINUX_LEDS_PWM_H
6
7struct led_pwm {
8 const char *name;
9 const char *default_trigger;
10 unsigned pwm_id;
11 u8 active_low;
12 unsigned max_brightness;
13 unsigned pwm_period_ns;
14};
15
16struct led_pwm_platform_data {
17 int num_leds;
18 struct led_pwm *leds;
19};
20
21#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 76262d83656b..b450a2628855 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -379,7 +379,7 @@ enum {
379 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ 379 ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */
380 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands 380 ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands
381 not multiple of 16 bytes */ 381 not multiple of 16 bytes */
382 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ 382 ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */
383 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ 383 ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */
384 384
385 /* DMA mask for user DMA control: User visible values; DO NOT 385 /* DMA mask for user DMA control: User visible values; DO NOT
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 7dc5b6cb44cd..d39ed1cc5fbf 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -25,13 +25,13 @@ struct svc_rqst;
25#define NLM_MAXCOOKIELEN 32 25#define NLM_MAXCOOKIELEN 32
26#define NLM_MAXSTRLEN 1024 26#define NLM_MAXSTRLEN 1024
27 27
28#define nlm_granted __constant_htonl(NLM_LCK_GRANTED) 28#define nlm_granted cpu_to_be32(NLM_LCK_GRANTED)
29#define nlm_lck_denied __constant_htonl(NLM_LCK_DENIED) 29#define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED)
30#define nlm_lck_denied_nolocks __constant_htonl(NLM_LCK_DENIED_NOLOCKS) 30#define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS)
31#define nlm_lck_blocked __constant_htonl(NLM_LCK_BLOCKED) 31#define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED)
32#define nlm_lck_denied_grace_period __constant_htonl(NLM_LCK_DENIED_GRACE_PERIOD) 32#define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD)
33 33
34#define nlm_drop_reply __constant_htonl(30000) 34#define nlm_drop_reply cpu_to_be32(30000)
35 35
36/* Lock info passed via NLM */ 36/* Lock info passed via NLM */
37struct nlm_lock { 37struct nlm_lock {
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index 12bfe09de2b1..7353821341ed 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -15,11 +15,11 @@
15#include <linux/lockd/xdr.h> 15#include <linux/lockd/xdr.h>
16 16
17/* error codes new to NLMv4 */ 17/* error codes new to NLMv4 */
18#define nlm4_deadlock __constant_htonl(NLM_DEADLCK) 18#define nlm4_deadlock cpu_to_be32(NLM_DEADLCK)
19#define nlm4_rofs __constant_htonl(NLM_ROFS) 19#define nlm4_rofs cpu_to_be32(NLM_ROFS)
20#define nlm4_stale_fh __constant_htonl(NLM_STALE_FH) 20#define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH)
21#define nlm4_fbig __constant_htonl(NLM_FBIG) 21#define nlm4_fbig cpu_to_be32(NLM_FBIG)
22#define nlm4_failed __constant_htonl(NLM_FAILED) 22#define nlm4_failed cpu_to_be32(NLM_FAILED)
23 23
24 24
25 25
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 42767d1a62e7..37fa19b34ef5 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -110,4 +110,10 @@ struct memory_accessor {
110 off_t offset, size_t count); 110 off_t offset, size_t count);
111}; 111};
112 112
113/*
114 * Kernel text modification mutex, used for code patching. Users of this lock
115 * can sleep.
116 */
117extern struct mutex text_mutex;
118
113#endif /* _LINUX_MEMORY_H_ */ 119#endif /* _LINUX_MEMORY_H_ */
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h
new file mode 100644
index 000000000000..be469a357cbb
--- /dev/null
+++ b/include/linux/mfd/ds1wm.h
@@ -0,0 +1,6 @@
1/* MFD cell driver data for the DS1WM driver */
2
3struct ds1wm_driver_data {
4 int active_high;
5 int clock_rate;
6};
diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h
index b4294f12c4f8..3d3ed67bd969 100644
--- a/include/linux/mfd/htc-pasic3.h
+++ b/include/linux/mfd/htc-pasic3.h
@@ -48,7 +48,6 @@ struct pasic3_leds_machinfo {
48 48
49struct pasic3_platform_data { 49struct pasic3_platform_data {
50 struct pasic3_leds_machinfo *led_pdata; 50 struct pasic3_leds_machinfo *led_pdata;
51 unsigned int bus_shift;
52 unsigned int clock_rate; 51 unsigned int clock_rate;
53}; 52};
54 53
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index 4455b212d75a..c8f51c3c0a72 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -29,6 +29,8 @@ struct pcf50633_platform_data {
29 char **batteries; 29 char **batteries;
30 int num_batteries; 30 int num_batteries;
31 31
32 int charging_restart_interval;
33
32 /* Callbacks */ 34 /* Callbacks */
33 void (*probe_done)(struct pcf50633 *); 35 void (*probe_done)(struct pcf50633 *);
34 void (*mbc_event_callback)(struct pcf50633 *, int); 36 void (*mbc_event_callback)(struct pcf50633 *, int);
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
index 6e17619b773a..4119579acf2c 100644
--- a/include/linux/mfd/pcf50633/mbc.h
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -128,7 +128,6 @@ enum pcf50633_reg_mbcs3 {
128int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); 128int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
129 129
130int pcf50633_mbc_get_status(struct pcf50633 *); 130int pcf50633_mbc_get_status(struct pcf50633 *);
131void pcf50633_mbc_set_status(struct pcf50633 *, int what, int status);
132 131
133#endif 132#endif
134 133
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 980669d50dca..42cca672f340 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -640,9 +640,11 @@ struct wm8350 {
640 * 640 *
641 * @init: Function called during driver initialisation. Should be 641 * @init: Function called during driver initialisation. Should be
642 * used by the platform to configure GPIO functions and similar. 642 * used by the platform to configure GPIO functions and similar.
643 * @irq_high: Set if WM8350 IRQ is active high.
643 */ 644 */
644struct wm8350_platform_data { 645struct wm8350_platform_data {
645 int (*init)(struct wm8350 *wm8350); 646 int (*init)(struct wm8350 *wm8350);
647 int irq_high;
646}; 648};
647 649
648 650
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
new file mode 100644
index 000000000000..1f76b1ebf627
--- /dev/null
+++ b/include/linux/mg_disk.h
@@ -0,0 +1,206 @@
1/*
2 * include/linux/mg_disk.c
3 *
4 * Support for the mGine m[g]flash IO mode.
5 * Based on legacy hd.c
6 *
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef __MG_DISK_H__
16#define __MG_DISK_H__
17
18#include <linux/blkdev.h>
19#include <linux/ata.h>
20
21/* name for block device */
22#define MG_DISK_NAME "mgd"
23/* name for platform device */
24#define MG_DEV_NAME "mg_disk"
25
26#define MG_DISK_MAJ 0
27#define MG_DISK_MAX_PART 16
28#define MG_SECTOR_SIZE 512
29#define MG_MAX_SECTS 256
30
31/* Register offsets */
32#define MG_BUFF_OFFSET 0x8000
33#define MG_STORAGE_BUFFER_SIZE 0x200
34#define MG_REG_OFFSET 0xC000
35#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
36#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
37#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
38#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
39#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
40#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
41#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
42#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
43#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
44#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
45#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
46
47/* "Drive Select/Head Register" bit values */
48#define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */
49#define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON)
50#define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON)
51#define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON)
52
53
54/* "Device Control Register" bit values */
55#define MG_REG_CTRL_INTR_ENABLE 0x0
56#define MG_REG_CTRL_INTR_DISABLE (0x1<<1)
57#define MG_REG_CTRL_RESET (0x1<<2)
58#define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0
59#define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4)
60#define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0
61#define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5)
62#define MG_REG_CTRL_DPD_DISABLE 0x0
63#define MG_REG_CTRL_DPD_ENABLE (0x1<<6)
64
65/* Status register bit */
66/* error bit in status register */
67#define MG_REG_STATUS_BIT_ERROR 0x01
68/* corrected error in status register */
69#define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04
70/* data request bit in status register */
71#define MG_REG_STATUS_BIT_DATA_REQ 0x08
72/* DSC - Drive Seek Complete */
73#define MG_REG_STATUS_BIT_SEEK_DONE 0x10
74/* DWF - Drive Write Fault */
75#define MG_REG_STATUS_BIT_WRITE_FAULT 0x20
76#define MG_REG_STATUS_BIT_READY 0x40
77#define MG_REG_STATUS_BIT_BUSY 0x80
78
79/* handy status */
80#define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE)
81#define MG_READY_OK(s) (((s) & (MG_STAT_READY | \
82 (MG_REG_STATUS_BIT_BUSY | \
83 MG_REG_STATUS_BIT_WRITE_FAULT | \
84 MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY)
85
86/* Error register */
87#define MG_REG_ERR_AMNF 0x01
88#define MG_REG_ERR_ABRT 0x04
89#define MG_REG_ERR_IDNF 0x10
90#define MG_REG_ERR_UNC 0x40
91#define MG_REG_ERR_BBK 0x80
92
93/* error code for others */
94#define MG_ERR_NONE 0
95#define MG_ERR_TIMEOUT 0x100
96#define MG_ERR_INIT_STAT 0x101
97#define MG_ERR_TRANSLATION 0x102
98#define MG_ERR_CTRL_RST 0x103
99#define MG_ERR_INV_STAT 0x104
100#define MG_ERR_RSTOUT 0x105
101
102#define MG_MAX_ERRORS 6 /* Max read/write errors */
103
104/* command */
105#define MG_CMD_RD 0x20
106#define MG_CMD_WR 0x30
107#define MG_CMD_SLEEP 0x99
108#define MG_CMD_WAKEUP 0xC3
109#define MG_CMD_ID 0xEC
110#define MG_CMD_WR_CONF 0x3C
111#define MG_CMD_RD_CONF 0x40
112
113/* operation mode */
114#define MG_OP_CASCADE (1 << 0)
115#define MG_OP_CASCADE_SYNC_RD (1 << 1)
116#define MG_OP_CASCADE_SYNC_WR (1 << 2)
117#define MG_OP_INTERLEAVE (1 << 3)
118
119/* synchronous */
120#define MG_BURST_LAT_4 (3 << 4)
121#define MG_BURST_LAT_5 (4 << 4)
122#define MG_BURST_LAT_6 (5 << 4)
123#define MG_BURST_LAT_7 (6 << 4)
124#define MG_BURST_LAT_8 (7 << 4)
125#define MG_BURST_LEN_4 (1 << 1)
126#define MG_BURST_LEN_8 (2 << 1)
127#define MG_BURST_LEN_16 (3 << 1)
128#define MG_BURST_LEN_32 (4 << 1)
129#define MG_BURST_LEN_CONT (0 << 1)
130
131/* timeout value (unit: ms) */
132#define MG_TMAX_CONF_TO_CMD 1
133#define MG_TMAX_WAIT_RD_DRQ 10
134#define MG_TMAX_WAIT_WR_DRQ 500
135#define MG_TMAX_RST_TO_BUSY 10
136#define MG_TMAX_HDRST_TO_RDY 500
137#define MG_TMAX_SWRST_TO_RDY 500
138#define MG_TMAX_RSTOUT 3000
139
140/* device attribution */
141/* use mflash as boot device */
142#define MG_BOOT_DEV (1 << 0)
143/* use mflash as storage device */
144#define MG_STORAGE_DEV (1 << 1)
145/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
146#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
147
148#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
149
150/* names of GPIO resource */
151#define MG_RST_PIN "mg_rst"
152/* except MG_BOOT_DEV, reset-out pin should be assigned */
153#define MG_RSTOUT_PIN "mg_rstout"
154
155/* private driver data */
156struct mg_drv_data {
157 /* disk resource */
158 u32 use_polling;
159
160 /* device attribution */
161 u32 dev_attr;
162
163 /* internally used */
164 struct mg_host *host;
165};
166
167/* main structure for mflash driver */
168struct mg_host {
169 struct device *dev;
170
171 struct request_queue *breq;
172 spinlock_t lock;
173 struct gendisk *gd;
174
175 struct timer_list timer;
176 void (*mg_do_intr) (struct mg_host *);
177
178 u16 id[ATA_ID_WORDS];
179
180 u16 cyls;
181 u16 heads;
182 u16 sectors;
183 u32 n_sectors;
184 u32 nres_sectors;
185
186 void __iomem *dev_base;
187 unsigned int irq;
188 unsigned int rst;
189 unsigned int rstout;
190
191 u32 major;
192 u32 error;
193};
194
195/*
196 * Debugging macro and defines
197 */
198#undef DO_MG_DEBUG
199#ifdef DO_MG_DEBUG
200# define MG_DBG(fmt, args...) \
201 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
202#else /* CONFIG_MG_DEBUG */
203# define MG_DBG(fmt, args...) do { } while (0)
204#endif /* CONFIG_MG_DEBUG */
205
206#endif
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 4e457256bd33..3e7615e9087e 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -192,5 +192,10 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host)
192 wake_up_process(host->sdio_irq_thread); 192 wake_up_process(host->sdio_irq_thread);
193} 193}
194 194
195struct regulator;
196
197int mmc_regulator_get_ocrmask(struct regulator *supply);
198int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit);
199
195#endif 200#endif
196 201
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 26ef24076b76..186ec6ab334d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -764,12 +764,6 @@ extern int numa_zonelist_order_handler(struct ctl_table *, int,
764extern char numa_zonelist_order[]; 764extern char numa_zonelist_order[];
765#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ 765#define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */
766 766
767#include <linux/topology.h>
768/* Returns the number of the current Node. */
769#ifndef numa_node_id
770#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
771#endif
772
773#ifndef CONFIG_NEED_MULTIPLE_NODES 767#ifndef CONFIG_NEED_MULTIPLE_NODES
774 768
775extern struct pglist_data contig_page_data; 769extern struct pglist_data contig_page_data;
diff --git a/include/linux/module.h b/include/linux/module.h
index 145a75528cc1..627ac082e2a6 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -248,6 +248,10 @@ struct module
248 const unsigned long *crcs; 248 const unsigned long *crcs;
249 unsigned int num_syms; 249 unsigned int num_syms;
250 250
251 /* Kernel parameters. */
252 struct kernel_param *kp;
253 unsigned int num_kp;
254
251 /* GPL-only exported symbols. */ 255 /* GPL-only exported symbols. */
252 unsigned int num_gpl_syms; 256 unsigned int num_gpl_syms;
253 const struct kernel_symbol *gpl_syms; 257 const struct kernel_symbol *gpl_syms;
@@ -329,6 +333,11 @@ struct module
329 unsigned int num_tracepoints; 333 unsigned int num_tracepoints;
330#endif 334#endif
331 335
336#ifdef CONFIG_TRACING
337 const char **trace_bprintk_fmt_start;
338 unsigned int num_trace_bprintk_fmt;
339#endif
340
332#ifdef CONFIG_MODULE_UNLOAD 341#ifdef CONFIG_MODULE_UNLOAD
333 /* What modules depend on me? */ 342 /* What modules depend on me? */
334 struct list_head modules_which_use_me; 343 struct list_head modules_which_use_me;
@@ -350,6 +359,8 @@ struct module
350#define MODULE_ARCH_INIT {} 359#define MODULE_ARCH_INIT {}
351#endif 360#endif
352 361
362extern struct mutex module_mutex;
363
353/* FIXME: It'd be nice to isolate modules during init, too, so they 364/* FIXME: It'd be nice to isolate modules during init, too, so they
354 aren't used before they (may) fail. But presently too much code 365 aren't used before they (may) fail. But presently too much code
355 (IDE & SCSI) require entry into the module during init.*/ 366 (IDE & SCSI) require entry into the module during init.*/
@@ -358,10 +369,10 @@ static inline int module_is_live(struct module *mod)
358 return mod->state != MODULE_STATE_GOING; 369 return mod->state != MODULE_STATE_GOING;
359} 370}
360 371
361/* Is this address in a module? (second is with no locks, for oops) */
362struct module *module_text_address(unsigned long addr);
363struct module *__module_text_address(unsigned long addr); 372struct module *__module_text_address(unsigned long addr);
364int is_module_address(unsigned long addr); 373struct module *__module_address(unsigned long addr);
374bool is_module_address(unsigned long addr);
375bool is_module_text_address(unsigned long addr);
365 376
366static inline int within_module_core(unsigned long addr, struct module *mod) 377static inline int within_module_core(unsigned long addr, struct module *mod)
367{ 378{
@@ -375,6 +386,31 @@ static inline int within_module_init(unsigned long addr, struct module *mod)
375 addr < (unsigned long)mod->module_init + mod->init_size; 386 addr < (unsigned long)mod->module_init + mod->init_size;
376} 387}
377 388
389/* Search for module by name: must hold module_mutex. */
390struct module *find_module(const char *name);
391
392struct symsearch {
393 const struct kernel_symbol *start, *stop;
394 const unsigned long *crcs;
395 enum {
396 NOT_GPL_ONLY,
397 GPL_ONLY,
398 WILL_BE_GPL_ONLY,
399 } licence;
400 bool unused;
401};
402
403/* Search for an exported symbol by name. */
404const struct kernel_symbol *find_symbol(const char *name,
405 struct module **owner,
406 const unsigned long **crc,
407 bool gplok,
408 bool warn);
409
410/* Walk the exported symbol table */
411bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
412 unsigned int symnum, void *data), void *data);
413
378/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if 414/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
379 symnum out of range. */ 415 symnum out of range. */
380int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, 416int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
@@ -383,6 +419,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
383/* Look for this name: can be of form module:name. */ 419/* Look for this name: can be of form module:name. */
384unsigned long module_kallsyms_lookup_name(const char *name); 420unsigned long module_kallsyms_lookup_name(const char *name);
385 421
422int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
423 struct module *, unsigned long),
424 void *data);
425
386extern void __module_put_and_exit(struct module *mod, long code) 426extern void __module_put_and_exit(struct module *mod, long code)
387 __attribute__((noreturn)); 427 __attribute__((noreturn));
388#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); 428#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code);
@@ -444,6 +484,7 @@ static inline void __module_get(struct module *module)
444#define symbol_put_addr(p) do { } while(0) 484#define symbol_put_addr(p) do { } while(0)
445 485
446#endif /* CONFIG_MODULE_UNLOAD */ 486#endif /* CONFIG_MODULE_UNLOAD */
487int use_module(struct module *a, struct module *b);
447 488
448/* This is a #define so the string doesn't get put in every .o file */ 489/* This is a #define so the string doesn't get put in every .o file */
449#define module_name(mod) \ 490#define module_name(mod) \
@@ -490,21 +531,24 @@ search_module_extables(unsigned long addr)
490 return NULL; 531 return NULL;
491} 532}
492 533
493/* Is this address in a module? */ 534static inline struct module *__module_address(unsigned long addr)
494static inline struct module *module_text_address(unsigned long addr)
495{ 535{
496 return NULL; 536 return NULL;
497} 537}
498 538
499/* Is this address in a module? (don't take a lock, we're oopsing) */
500static inline struct module *__module_text_address(unsigned long addr) 539static inline struct module *__module_text_address(unsigned long addr)
501{ 540{
502 return NULL; 541 return NULL;
503} 542}
504 543
505static inline int is_module_address(unsigned long addr) 544static inline bool is_module_address(unsigned long addr)
506{ 545{
507 return 0; 546 return false;
547}
548
549static inline bool is_module_text_address(unsigned long addr)
550{
551 return false;
508} 552}
509 553
510/* Get/put a kernel symbol (calls should be symmetric) */ 554/* Get/put a kernel symbol (calls should be symmetric) */
@@ -559,6 +603,14 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name)
559 return 0; 603 return 0;
560} 604}
561 605
606static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
607 struct module *,
608 unsigned long),
609 void *data)
610{
611 return 0;
612}
613
562static inline int register_module_notifier(struct notifier_block * nb) 614static inline int register_module_notifier(struct notifier_block * nb)
563{ 615{
564 /* no events will happen anyway, so this can always succeed */ 616 /* no events will happen anyway, so this can always succeed */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index e4af3399ef48..a4f0b931846c 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -138,6 +138,16 @@ extern int parse_args(const char *name,
138 unsigned num, 138 unsigned num,
139 int (*unknown)(char *param, char *val)); 139 int (*unknown)(char *param, char *val));
140 140
141/* Called by module remove. */
142#ifdef CONFIG_SYSFS
143extern void destroy_params(const struct kernel_param *params, unsigned num);
144#else
145static inline void destroy_params(const struct kernel_param *params,
146 unsigned num)
147{
148}
149#endif /* !CONFIG_SYSFS */
150
141/* All the helper functions */ 151/* All the helper functions */
142/* The macros to do compile-time type checking stolen from Jakub 152/* The macros to do compile-time type checking stolen from Jakub
143 Jelinek, who IIRC came up with this idea for the 2.4 module init code. */ 153 Jelinek, who IIRC came up with this idea for the 2.4 module init code. */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 3aa5d77c2cdb..5675b63a0631 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/uio.h> 12#include <linux/uio.h>
13#include <linux/notifier.h> 13#include <linux/notifier.h>
14#include <linux/device.h>
14 15
15#include <linux/mtd/compatmac.h> 16#include <linux/mtd/compatmac.h>
16#include <mtd/mtd-abi.h> 17#include <mtd/mtd-abi.h>
@@ -162,6 +163,20 @@ struct mtd_info {
162 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ 163 /* We probably shouldn't allow XIP if the unpoint isn't a NULL */
163 void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len); 164 void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len);
164 165
166 /* Allow NOMMU mmap() to directly map the device (if not NULL)
167 * - return the address to which the offset maps
168 * - return -ENOSYS to indicate refusal to do the mapping
169 */
170 unsigned long (*get_unmapped_area) (struct mtd_info *mtd,
171 unsigned long len,
172 unsigned long offset,
173 unsigned long flags);
174
175 /* Backing device capabilities for this device
176 * - provides mmap capabilities
177 */
178 struct backing_dev_info *backing_dev_info;
179
165 180
166 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); 181 int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
167 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); 182 int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
@@ -223,6 +238,7 @@ struct mtd_info {
223 void *priv; 238 void *priv;
224 239
225 struct module *owner; 240 struct module *owner;
241 struct device dev;
226 int usecount; 242 int usecount;
227 243
228 /* If the driver is something smart, like UBI, it may need to maintain 244 /* If the driver is something smart, like UBI, it may need to maintain
@@ -233,6 +249,11 @@ struct mtd_info {
233 void (*put_device) (struct mtd_info *mtd); 249 void (*put_device) (struct mtd_info *mtd);
234}; 250};
235 251
252static inline struct mtd_info *dev_to_mtd(struct device *dev)
253{
254 return dev ? container_of(dev, struct mtd_info, dev) : NULL;
255}
256
236static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) 257static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd)
237{ 258{
238 if (mtd->erasesize_shift) 259 if (mtd->erasesize_shift)
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index db5b63da2a7e..7efb9be34662 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -43,8 +43,8 @@ extern void nand_wait_ready(struct mtd_info *mtd);
43 * is supported now. If you add a chip with bigger oobsize/page 43 * is supported now. If you add a chip with bigger oobsize/page
44 * adjust this accordingly. 44 * adjust this accordingly.
45 */ 45 */
46#define NAND_MAX_OOBSIZE 64 46#define NAND_MAX_OOBSIZE 128
47#define NAND_MAX_PAGESIZE 2048 47#define NAND_MAX_PAGESIZE 4096
48 48
49/* 49/*
50 * Constants for hardware specific CLE/ALE/NCE function 50 * Constants for hardware specific CLE/ALE/NCE function
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index a45dd831b3f8..7535a74083b9 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -76,4 +76,16 @@ int __devinit of_mtd_parse_partitions(struct device *dev,
76 struct device_node *node, 76 struct device_node *node,
77 struct mtd_partition **pparts); 77 struct mtd_partition **pparts);
78 78
79#ifdef CONFIG_MTD_PARTITIONS
80static inline int mtd_has_partitions(void) { return 1; }
81#else
82static inline int mtd_has_partitions(void) { return 0; }
83#endif
84
85#ifdef CONFIG_MTD_CMDLINE_PARTS
86static inline int mtd_has_cmdlinepart(void) { return 1; }
87#else
88static inline int mtd_has_cmdlinepart(void) { return 0; }
89#endif
90
79#endif 91#endif
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 54af92c1c70b..214d499718f7 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -109,7 +109,6 @@
109 NFSERR_FILE_OPEN = 10046, /* v4 */ 109 NFSERR_FILE_OPEN = 10046, /* v4 */
110 NFSERR_ADMIN_REVOKED = 10047, /* v4 */ 110 NFSERR_ADMIN_REVOKED = 10047, /* v4 */
111 NFSERR_CB_PATH_DOWN = 10048, /* v4 */ 111 NFSERR_CB_PATH_DOWN = 10048, /* v4 */
112 NFSERR_REPLAY_ME = 10049 /* v4 */
113}; 112};
114 113
115/* NFSv2 file types - beware, these are not the same in NFSv3 */ 114/* NFSv2 file types - beware, these are not the same in NFSv3 */
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index b912311a56b1..e3f0cbcbd0db 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -21,6 +21,7 @@
21#define NFS4_FHSIZE 128 21#define NFS4_FHSIZE 128
22#define NFS4_MAXPATHLEN PATH_MAX 22#define NFS4_MAXPATHLEN PATH_MAX
23#define NFS4_MAXNAMLEN NAME_MAX 23#define NFS4_MAXNAMLEN NAME_MAX
24#define NFS4_MAX_SESSIONID_LEN 16
24 25
25#define NFS4_ACCESS_READ 0x0001 26#define NFS4_ACCESS_READ 0x0001
26#define NFS4_ACCESS_LOOKUP 0x0002 27#define NFS4_ACCESS_LOOKUP 0x0002
@@ -38,6 +39,7 @@
38#define NFS4_OPEN_RESULT_CONFIRM 0x0002 39#define NFS4_OPEN_RESULT_CONFIRM 0x0002
39#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 40#define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004
40 41
42#define NFS4_SHARE_ACCESS_MASK 0x000F
41#define NFS4_SHARE_ACCESS_READ 0x0001 43#define NFS4_SHARE_ACCESS_READ 0x0001
42#define NFS4_SHARE_ACCESS_WRITE 0x0002 44#define NFS4_SHARE_ACCESS_WRITE 0x0002
43#define NFS4_SHARE_ACCESS_BOTH 0x0003 45#define NFS4_SHARE_ACCESS_BOTH 0x0003
@@ -45,6 +47,19 @@
45#define NFS4_SHARE_DENY_WRITE 0x0002 47#define NFS4_SHARE_DENY_WRITE 0x0002
46#define NFS4_SHARE_DENY_BOTH 0x0003 48#define NFS4_SHARE_DENY_BOTH 0x0003
47 49
50/* nfs41 */
51#define NFS4_SHARE_WANT_MASK 0xFF00
52#define NFS4_SHARE_WANT_NO_PREFERENCE 0x0000
53#define NFS4_SHARE_WANT_READ_DELEG 0x0100
54#define NFS4_SHARE_WANT_WRITE_DELEG 0x0200
55#define NFS4_SHARE_WANT_ANY_DELEG 0x0300
56#define NFS4_SHARE_WANT_NO_DELEG 0x0400
57#define NFS4_SHARE_WANT_CANCEL 0x0500
58
59#define NFS4_SHARE_WHEN_MASK 0xF0000
60#define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000
61#define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000
62
48#define NFS4_SET_TO_SERVER_TIME 0 63#define NFS4_SET_TO_SERVER_TIME 0
49#define NFS4_SET_TO_CLIENT_TIME 1 64#define NFS4_SET_TO_CLIENT_TIME 1
50 65
@@ -88,6 +103,31 @@
88#define NFS4_ACE_GENERIC_EXECUTE 0x001200A0 103#define NFS4_ACE_GENERIC_EXECUTE 0x001200A0
89#define NFS4_ACE_MASK_ALL 0x001F01FF 104#define NFS4_ACE_MASK_ALL 0x001F01FF
90 105
106#define EXCHGID4_FLAG_SUPP_MOVED_REFER 0x00000001
107#define EXCHGID4_FLAG_SUPP_MOVED_MIGR 0x00000002
108#define EXCHGID4_FLAG_USE_NON_PNFS 0x00010000
109#define EXCHGID4_FLAG_USE_PNFS_MDS 0x00020000
110#define EXCHGID4_FLAG_USE_PNFS_DS 0x00040000
111#define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000
112#define EXCHGID4_FLAG_CONFIRMED_R 0x80000000
113/*
114 * Since the validity of these bits depends on whether
115 * they're set in the argument or response, have separate
116 * invalid flag masks for arg (_A) and resp (_R).
117 */
118#define EXCHGID4_FLAG_MASK_A 0x40070003
119#define EXCHGID4_FLAG_MASK_R 0x80070003
120
121#define SEQ4_STATUS_CB_PATH_DOWN 0x00000001
122#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002
123#define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED 0x00000004
124#define SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED 0x00000008
125#define SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED 0x00000010
126#define SEQ4_STATUS_ADMIN_STATE_REVOKED 0x00000020
127#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040
128#define SEQ4_STATUS_LEASE_MOVED 0x00000080
129#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100
130
91#define NFS4_MAX_UINT64 (~(u64)0) 131#define NFS4_MAX_UINT64 (~(u64)0)
92 132
93enum nfs4_acl_whotype { 133enum nfs4_acl_whotype {
@@ -154,6 +194,28 @@ enum nfs_opnum4 {
154 OP_VERIFY = 37, 194 OP_VERIFY = 37,
155 OP_WRITE = 38, 195 OP_WRITE = 38,
156 OP_RELEASE_LOCKOWNER = 39, 196 OP_RELEASE_LOCKOWNER = 39,
197
198 /* nfs41 */
199 OP_BACKCHANNEL_CTL = 40,
200 OP_BIND_CONN_TO_SESSION = 41,
201 OP_EXCHANGE_ID = 42,
202 OP_CREATE_SESSION = 43,
203 OP_DESTROY_SESSION = 44,
204 OP_FREE_STATEID = 45,
205 OP_GET_DIR_DELEGATION = 46,
206 OP_GETDEVICEINFO = 47,
207 OP_GETDEVICELIST = 48,
208 OP_LAYOUTCOMMIT = 49,
209 OP_LAYOUTGET = 50,
210 OP_LAYOUTRETURN = 51,
211 OP_SECINFO_NO_NAME = 52,
212 OP_SEQUENCE = 53,
213 OP_SET_SSV = 54,
214 OP_TEST_STATEID = 55,
215 OP_WANT_DELEGATION = 56,
216 OP_DESTROY_CLIENTID = 57,
217 OP_RECLAIM_COMPLETE = 58,
218
157 OP_ILLEGAL = 10044, 219 OP_ILLEGAL = 10044,
158}; 220};
159 221
@@ -230,7 +292,48 @@ enum nfsstat4 {
230 NFS4ERR_DEADLOCK = 10045, 292 NFS4ERR_DEADLOCK = 10045,
231 NFS4ERR_FILE_OPEN = 10046, 293 NFS4ERR_FILE_OPEN = 10046,
232 NFS4ERR_ADMIN_REVOKED = 10047, 294 NFS4ERR_ADMIN_REVOKED = 10047,
233 NFS4ERR_CB_PATH_DOWN = 10048 295 NFS4ERR_CB_PATH_DOWN = 10048,
296
297 /* nfs41 */
298 NFS4ERR_BADIOMODE = 10049,
299 NFS4ERR_BADLAYOUT = 10050,
300 NFS4ERR_BAD_SESSION_DIGEST = 10051,
301 NFS4ERR_BADSESSION = 10052,
302 NFS4ERR_BADSLOT = 10053,
303 NFS4ERR_COMPLETE_ALREADY = 10054,
304 NFS4ERR_CONN_NOT_BOUND_TO_SESSION = 10055,
305 NFS4ERR_DELEG_ALREADY_WANTED = 10056,
306 NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */
307 NFS4ERR_LAYOUTTRYLATER = 10058,
308 NFS4ERR_LAYOUTUNAVAILABLE = 10059,
309 NFS4ERR_NOMATCHING_LAYOUT = 10060,
310 NFS4ERR_RECALLCONFLICT = 10061,
311 NFS4ERR_UNKNOWN_LAYOUTTYPE = 10062,
312 NFS4ERR_SEQ_MISORDERED = 10063, /* unexpected seq.id in req */
313 NFS4ERR_SEQUENCE_POS = 10064, /* [CB_]SEQ. op not 1st op */
314 NFS4ERR_REQ_TOO_BIG = 10065, /* request too big */
315 NFS4ERR_REP_TOO_BIG = 10066, /* reply too big */
316 NFS4ERR_REP_TOO_BIG_TO_CACHE = 10067, /* rep. not all cached */
317 NFS4ERR_RETRY_UNCACHED_REP = 10068, /* retry & rep. uncached */
318 NFS4ERR_UNSAFE_COMPOUND = 10069, /* retry/recovery too hard */
319 NFS4ERR_TOO_MANY_OPS = 10070, /* too many ops in [CB_]COMP */
320 NFS4ERR_OP_NOT_IN_SESSION = 10071, /* op needs [CB_]SEQ. op */
321 NFS4ERR_HASH_ALG_UNSUPP = 10072, /* hash alg. not supp. */
322 /* Error 10073 is unused. */
323 NFS4ERR_CLIENTID_BUSY = 10074, /* clientid has state */
324 NFS4ERR_PNFS_IO_HOLE = 10075, /* IO to _SPARSE file hole */
325 NFS4ERR_SEQ_FALSE_RETRY = 10076, /* retry not origional */
326 NFS4ERR_BAD_HIGH_SLOT = 10077, /* sequence arg bad */
327 NFS4ERR_DEADSESSION = 10078, /* persistent session dead */
328 NFS4ERR_ENCR_ALG_UNSUPP = 10079, /* SSV alg mismatch */
329 NFS4ERR_PNFS_NO_LAYOUT = 10080, /* direct I/O with no layout */
330 NFS4ERR_NOT_ONLY_OP = 10081, /* bad compound */
331 NFS4ERR_WRONG_CRED = 10082, /* permissions:state change */
332 NFS4ERR_WRONG_TYPE = 10083, /* current operation mismatch */
333 NFS4ERR_DIRDELEG_UNAVAIL = 10084, /* no directory delegation */
334 NFS4ERR_REJECT_DELEG = 10085, /* on callback */
335 NFS4ERR_RETURNCONFLICT = 10086, /* outstanding layoutreturn */
336 NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */
234}; 337};
235 338
236/* 339/*
@@ -265,7 +368,13 @@ enum opentype4 {
265enum createmode4 { 368enum createmode4 {
266 NFS4_CREATE_UNCHECKED = 0, 369 NFS4_CREATE_UNCHECKED = 0,
267 NFS4_CREATE_GUARDED = 1, 370 NFS4_CREATE_GUARDED = 1,
268 NFS4_CREATE_EXCLUSIVE = 2 371 NFS4_CREATE_EXCLUSIVE = 2,
372 /*
373 * New to NFSv4.1. If session is persistent,
374 * GUARDED4 MUST be used. Otherwise, use
375 * EXCLUSIVE4_1 instead of EXCLUSIVE4.
376 */
377 NFS4_CREATE_EXCLUSIVE4_1 = 3
269}; 378};
270 379
271enum limit_by4 { 380enum limit_by4 {
@@ -301,6 +410,8 @@ enum lock_type4 {
301#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9) 410#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9)
302#define FATTR4_WORD0_LEASE_TIME (1UL << 10) 411#define FATTR4_WORD0_LEASE_TIME (1UL << 10)
303#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11) 412#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11)
413/* Mandatory in NFSv4.1 */
414#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11)
304 415
305/* Recommended Attributes */ 416/* Recommended Attributes */
306#define FATTR4_WORD0_ACL (1UL << 12) 417#define FATTR4_WORD0_ACL (1UL << 12)
@@ -391,6 +502,29 @@ enum {
391 NFSPROC4_CLNT_GETACL, 502 NFSPROC4_CLNT_GETACL,
392 NFSPROC4_CLNT_SETACL, 503 NFSPROC4_CLNT_SETACL,
393 NFSPROC4_CLNT_FS_LOCATIONS, 504 NFSPROC4_CLNT_FS_LOCATIONS,
505
506 /* nfs41 */
507 NFSPROC4_CLNT_EXCHANGE_ID,
508 NFSPROC4_CLNT_CREATE_SESSION,
509 NFSPROC4_CLNT_DESTROY_SESSION,
510 NFSPROC4_CLNT_SEQUENCE,
511 NFSPROC4_CLNT_GET_LEASE_TIME,
512};
513
514/* nfs41 types */
515struct nfs4_sessionid {
516 unsigned char data[NFS4_MAX_SESSIONID_LEN];
517};
518
519/* Create Session Flags */
520#define SESSION4_PERSIST 0x001
521#define SESSION4_BACK_CHAN 0x002
522#define SESSION4_RDMA 0x004
523
524enum state_protect_how4 {
525 SP4_NONE = 0,
526 SP4_MACH_CRED = 1,
527 SP4_SSV = 2
394}; 528};
395 529
396#endif 530#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index bde2557c2a9c..fdffb413b192 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -185,6 +185,9 @@ struct nfs_inode {
185 fmode_t delegation_state; 185 fmode_t delegation_state;
186 struct rw_semaphore rwsem; 186 struct rw_semaphore rwsem;
187#endif /* CONFIG_NFS_V4*/ 187#endif /* CONFIG_NFS_V4*/
188#ifdef CONFIG_NFS_FSCACHE
189 struct fscache_cookie *fscache;
190#endif
188 struct inode vfs_inode; 191 struct inode vfs_inode;
189}; 192};
190 193
@@ -207,6 +210,8 @@ struct nfs_inode {
207#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ 210#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
208#define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */ 211#define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */
209#define NFS_INO_FLUSHING (4) /* inode is flushing out data */ 212#define NFS_INO_FLUSHING (4) /* inode is flushing out data */
213#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
214#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
210 215
211static inline struct nfs_inode *NFS_I(const struct inode *inode) 216static inline struct nfs_inode *NFS_I(const struct inode *inode)
212{ 217{
@@ -260,6 +265,11 @@ static inline int NFS_STALE(const struct inode *inode)
260 return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); 265 return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
261} 266}
262 267
268static inline int NFS_FSCACHE(const struct inode *inode)
269{
270 return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
271}
272
263static inline __u64 NFS_FILEID(const struct inode *inode) 273static inline __u64 NFS_FILEID(const struct inode *inode)
264{ 274{
265 return NFS_I(inode)->fileid; 275 return NFS_I(inode)->fileid;
@@ -506,6 +516,8 @@ extern int nfs_readpages(struct file *, struct address_space *,
506 struct list_head *, unsigned); 516 struct list_head *, unsigned);
507extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); 517extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *);
508extern void nfs_readdata_release(void *data); 518extern void nfs_readdata_release(void *data);
519extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
520 struct page *);
509 521
510/* 522/*
511 * Allocate nfs_read_data structures 523 * Allocate nfs_read_data structures
@@ -583,6 +595,7 @@ extern void * nfs_root_data(void);
583#define NFSDBG_CALLBACK 0x0100 595#define NFSDBG_CALLBACK 0x0100
584#define NFSDBG_CLIENT 0x0200 596#define NFSDBG_CLIENT 0x0200
585#define NFSDBG_MOUNT 0x0400 597#define NFSDBG_MOUNT 0x0400
598#define NFSDBG_FSCACHE 0x0800
586#define NFSDBG_ALL 0xFFFF 599#define NFSDBG_ALL 0xFFFF
587 600
588#ifdef __KERNEL__ 601#ifdef __KERNEL__
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 29b1e40dce99..6ad75948cbf7 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -64,6 +64,10 @@ struct nfs_client {
64 char cl_ipaddr[48]; 64 char cl_ipaddr[48];
65 unsigned char cl_id_uniquifier; 65 unsigned char cl_id_uniquifier;
66#endif 66#endif
67
68#ifdef CONFIG_NFS_FSCACHE
69 struct fscache_cookie *fscache; /* client index cache cookie */
70#endif
67}; 71};
68 72
69/* 73/*
@@ -96,12 +100,19 @@ struct nfs_server {
96 unsigned int acdirmin; 100 unsigned int acdirmin;
97 unsigned int acdirmax; 101 unsigned int acdirmax;
98 unsigned int namelen; 102 unsigned int namelen;
103 unsigned int options; /* extra options enabled by mount */
104#define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */
99 105
100 struct nfs_fsid fsid; 106 struct nfs_fsid fsid;
101 __u64 maxfilesize; /* maximum file size */ 107 __u64 maxfilesize; /* maximum file size */
102 unsigned long mount_time; /* when this fs was mounted */ 108 unsigned long mount_time; /* when this fs was mounted */
103 dev_t s_dev; /* superblock dev numbers */ 109 dev_t s_dev; /* superblock dev numbers */
104 110
111#ifdef CONFIG_NFS_FSCACHE
112 struct nfs_fscache_key *fscache_key; /* unique key for superblock */
113 struct fscache_cookie *fscache; /* superblock cookie */
114#endif
115
105#ifdef CONFIG_NFS_V4 116#ifdef CONFIG_NFS_V4
106 u32 attr_bitmask[2];/* V4 bitmask representing the set 117 u32 attr_bitmask[2];/* V4 bitmask representing the set
107 of attributes supported on this 118 of attributes supported on this
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h
index 1cb9a3fed2b3..68b10f5f8907 100644
--- a/include/linux/nfs_iostat.h
+++ b/include/linux/nfs_iostat.h
@@ -116,4 +116,16 @@ enum nfs_stat_eventcounters {
116 __NFSIOS_COUNTSMAX, 116 __NFSIOS_COUNTSMAX,
117}; 117};
118 118
119/*
120 * NFS local caching servicing counters
121 */
122enum nfs_stat_fscachecounters {
123 NFSIOS_FSCACHE_PAGES_READ_OK,
124 NFSIOS_FSCACHE_PAGES_READ_FAIL,
125 NFSIOS_FSCACHE_PAGES_WRITTEN_OK,
126 NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL,
127 NFSIOS_FSCACHE_PAGES_UNCACHED,
128 __NFSIOS_FSCACHEMAX,
129};
130
119#endif /* _LINUX_NFS_IOSTAT */ 131#endif /* _LINUX_NFS_IOSTAT */
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h
index 04b355c801d8..5bccaab81056 100644
--- a/include/linux/nfsd/cache.h
+++ b/include/linux/nfsd/cache.h
@@ -76,4 +76,12 @@ void nfsd_reply_cache_shutdown(void);
76int nfsd_cache_lookup(struct svc_rqst *, int); 76int nfsd_cache_lookup(struct svc_rqst *, int);
77void nfsd_cache_update(struct svc_rqst *, int, __be32 *); 77void nfsd_cache_update(struct svc_rqst *, int, __be32 *);
78 78
79#ifdef CONFIG_NFSD_V4
80void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp);
81#else /* CONFIG_NFSD_V4 */
82static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
83{
84}
85#endif /* CONFIG_NFSD_V4 */
86
79#endif /* NFSCACHE_H */ 87#endif /* NFSCACHE_H */
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
index e19f45991b2e..2b49d676d0c9 100644
--- a/include/linux/nfsd/nfsd.h
+++ b/include/linux/nfsd/nfsd.h
@@ -23,7 +23,7 @@
23/* 23/*
24 * nfsd version 24 * nfsd version
25 */ 25 */
26#define NFSD_SUPPORTED_MINOR_VERSION 0 26#define NFSD_SUPPORTED_MINOR_VERSION 1
27 27
28/* 28/*
29 * Flags for nfsd_permission 29 * Flags for nfsd_permission
@@ -53,6 +53,7 @@ typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int);
53extern struct svc_program nfsd_program; 53extern struct svc_program nfsd_program;
54extern struct svc_version nfsd_version2, nfsd_version3, 54extern struct svc_version nfsd_version2, nfsd_version3,
55 nfsd_version4; 55 nfsd_version4;
56extern u32 nfsd_supported_minorversion;
56extern struct mutex nfsd_mutex; 57extern struct mutex nfsd_mutex;
57extern struct svc_serv *nfsd_serv; 58extern struct svc_serv *nfsd_serv;
58 59
@@ -105,7 +106,7 @@ void nfsd_close(struct file *);
105__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *, 106__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *,
106 loff_t, struct kvec *, int, unsigned long *); 107 loff_t, struct kvec *, int, unsigned long *);
107__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, 108__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *,
108 loff_t, struct kvec *,int, unsigned long, int *); 109 loff_t, struct kvec *,int, unsigned long *, int *);
109__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, 110__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
110 char *, int *); 111 char *, int *);
111__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, 112__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
@@ -149,6 +150,7 @@ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
149 150
150enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; 151enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
151int nfsd_vers(int vers, enum vers_op change); 152int nfsd_vers(int vers, enum vers_op change);
153int nfsd_minorversion(u32 minorversion, enum vers_op change);
152void nfsd_reset_versions(void); 154void nfsd_reset_versions(void);
153int nfsd_create_serv(void); 155int nfsd_create_serv(void);
154 156
@@ -186,78 +188,119 @@ void nfsd_lockd_shutdown(void);
186/* 188/*
187 * These macros provide pre-xdr'ed values for faster operation. 189 * These macros provide pre-xdr'ed values for faster operation.
188 */ 190 */
189#define nfs_ok __constant_htonl(NFS_OK) 191#define nfs_ok cpu_to_be32(NFS_OK)
190#define nfserr_perm __constant_htonl(NFSERR_PERM) 192#define nfserr_perm cpu_to_be32(NFSERR_PERM)
191#define nfserr_noent __constant_htonl(NFSERR_NOENT) 193#define nfserr_noent cpu_to_be32(NFSERR_NOENT)
192#define nfserr_io __constant_htonl(NFSERR_IO) 194#define nfserr_io cpu_to_be32(NFSERR_IO)
193#define nfserr_nxio __constant_htonl(NFSERR_NXIO) 195#define nfserr_nxio cpu_to_be32(NFSERR_NXIO)
194#define nfserr_eagain __constant_htonl(NFSERR_EAGAIN) 196#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN)
195#define nfserr_acces __constant_htonl(NFSERR_ACCES) 197#define nfserr_acces cpu_to_be32(NFSERR_ACCES)
196#define nfserr_exist __constant_htonl(NFSERR_EXIST) 198#define nfserr_exist cpu_to_be32(NFSERR_EXIST)
197#define nfserr_xdev __constant_htonl(NFSERR_XDEV) 199#define nfserr_xdev cpu_to_be32(NFSERR_XDEV)
198#define nfserr_nodev __constant_htonl(NFSERR_NODEV) 200#define nfserr_nodev cpu_to_be32(NFSERR_NODEV)
199#define nfserr_notdir __constant_htonl(NFSERR_NOTDIR) 201#define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR)
200#define nfserr_isdir __constant_htonl(NFSERR_ISDIR) 202#define nfserr_isdir cpu_to_be32(NFSERR_ISDIR)
201#define nfserr_inval __constant_htonl(NFSERR_INVAL) 203#define nfserr_inval cpu_to_be32(NFSERR_INVAL)
202#define nfserr_fbig __constant_htonl(NFSERR_FBIG) 204#define nfserr_fbig cpu_to_be32(NFSERR_FBIG)
203#define nfserr_nospc __constant_htonl(NFSERR_NOSPC) 205#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC)
204#define nfserr_rofs __constant_htonl(NFSERR_ROFS) 206#define nfserr_rofs cpu_to_be32(NFSERR_ROFS)
205#define nfserr_mlink __constant_htonl(NFSERR_MLINK) 207#define nfserr_mlink cpu_to_be32(NFSERR_MLINK)
206#define nfserr_opnotsupp __constant_htonl(NFSERR_OPNOTSUPP) 208#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP)
207#define nfserr_nametoolong __constant_htonl(NFSERR_NAMETOOLONG) 209#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG)
208#define nfserr_notempty __constant_htonl(NFSERR_NOTEMPTY) 210#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY)
209#define nfserr_dquot __constant_htonl(NFSERR_DQUOT) 211#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT)
210#define nfserr_stale __constant_htonl(NFSERR_STALE) 212#define nfserr_stale cpu_to_be32(NFSERR_STALE)
211#define nfserr_remote __constant_htonl(NFSERR_REMOTE) 213#define nfserr_remote cpu_to_be32(NFSERR_REMOTE)
212#define nfserr_wflush __constant_htonl(NFSERR_WFLUSH) 214#define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH)
213#define nfserr_badhandle __constant_htonl(NFSERR_BADHANDLE) 215#define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE)
214#define nfserr_notsync __constant_htonl(NFSERR_NOT_SYNC) 216#define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC)
215#define nfserr_badcookie __constant_htonl(NFSERR_BAD_COOKIE) 217#define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE)
216#define nfserr_notsupp __constant_htonl(NFSERR_NOTSUPP) 218#define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP)
217#define nfserr_toosmall __constant_htonl(NFSERR_TOOSMALL) 219#define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL)
218#define nfserr_serverfault __constant_htonl(NFSERR_SERVERFAULT) 220#define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT)
219#define nfserr_badtype __constant_htonl(NFSERR_BADTYPE) 221#define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE)
220#define nfserr_jukebox __constant_htonl(NFSERR_JUKEBOX) 222#define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX)
221#define nfserr_denied __constant_htonl(NFSERR_DENIED) 223#define nfserr_denied cpu_to_be32(NFSERR_DENIED)
222#define nfserr_deadlock __constant_htonl(NFSERR_DEADLOCK) 224#define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK)
223#define nfserr_expired __constant_htonl(NFSERR_EXPIRED) 225#define nfserr_expired cpu_to_be32(NFSERR_EXPIRED)
224#define nfserr_bad_cookie __constant_htonl(NFSERR_BAD_COOKIE) 226#define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE)
225#define nfserr_same __constant_htonl(NFSERR_SAME) 227#define nfserr_same cpu_to_be32(NFSERR_SAME)
226#define nfserr_clid_inuse __constant_htonl(NFSERR_CLID_INUSE) 228#define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE)
227#define nfserr_stale_clientid __constant_htonl(NFSERR_STALE_CLIENTID) 229#define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID)
228#define nfserr_resource __constant_htonl(NFSERR_RESOURCE) 230#define nfserr_resource cpu_to_be32(NFSERR_RESOURCE)
229#define nfserr_moved __constant_htonl(NFSERR_MOVED) 231#define nfserr_moved cpu_to_be32(NFSERR_MOVED)
230#define nfserr_nofilehandle __constant_htonl(NFSERR_NOFILEHANDLE) 232#define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE)
231#define nfserr_minor_vers_mismatch __constant_htonl(NFSERR_MINOR_VERS_MISMATCH) 233#define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH)
232#define nfserr_share_denied __constant_htonl(NFSERR_SHARE_DENIED) 234#define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED)
233#define nfserr_stale_stateid __constant_htonl(NFSERR_STALE_STATEID) 235#define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID)
234#define nfserr_old_stateid __constant_htonl(NFSERR_OLD_STATEID) 236#define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID)
235#define nfserr_bad_stateid __constant_htonl(NFSERR_BAD_STATEID) 237#define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID)
236#define nfserr_bad_seqid __constant_htonl(NFSERR_BAD_SEQID) 238#define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID)
237#define nfserr_symlink __constant_htonl(NFSERR_SYMLINK) 239#define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK)
238#define nfserr_not_same __constant_htonl(NFSERR_NOT_SAME) 240#define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME)
239#define nfserr_restorefh __constant_htonl(NFSERR_RESTOREFH) 241#define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH)
240#define nfserr_attrnotsupp __constant_htonl(NFSERR_ATTRNOTSUPP) 242#define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP)
241#define nfserr_bad_xdr __constant_htonl(NFSERR_BAD_XDR) 243#define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR)
242#define nfserr_openmode __constant_htonl(NFSERR_OPENMODE) 244#define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE)
243#define nfserr_locks_held __constant_htonl(NFSERR_LOCKS_HELD) 245#define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD)
244#define nfserr_op_illegal __constant_htonl(NFSERR_OP_ILLEGAL) 246#define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL)
245#define nfserr_grace __constant_htonl(NFSERR_GRACE) 247#define nfserr_grace cpu_to_be32(NFSERR_GRACE)
246#define nfserr_no_grace __constant_htonl(NFSERR_NO_GRACE) 248#define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE)
247#define nfserr_reclaim_bad __constant_htonl(NFSERR_RECLAIM_BAD) 249#define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD)
248#define nfserr_badname __constant_htonl(NFSERR_BADNAME) 250#define nfserr_badname cpu_to_be32(NFSERR_BADNAME)
249#define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN) 251#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN)
250#define nfserr_locked __constant_htonl(NFSERR_LOCKED) 252#define nfserr_locked cpu_to_be32(NFSERR_LOCKED)
251#define nfserr_wrongsec __constant_htonl(NFSERR_WRONGSEC) 253#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC)
252#define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME) 254#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE)
255#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT)
256#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST)
257#define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION)
258#define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT)
259#define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY)
260#define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
261#define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED)
262#define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY)
263#define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER)
264#define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE)
265#define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT)
266#define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT)
267#define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE)
268#define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED)
269#define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS)
270#define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG)
271#define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG)
272#define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE)
273#define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP)
274#define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND)
275#define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS)
276#define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION)
277#define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP)
278#define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY)
279#define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE)
280#define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY)
281#define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT)
282#define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION)
283#define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP)
284#define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT)
285#define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP)
286#define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED)
287#define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE)
288#define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL)
289#define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG)
290#define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT)
291#define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED)
253 292
254/* error codes for internal use */ 293/* error codes for internal use */
255/* if a request fails due to kmalloc failure, it gets dropped. 294/* if a request fails due to kmalloc failure, it gets dropped.
256 * Client should resend eventually 295 * Client should resend eventually
257 */ 296 */
258#define nfserr_dropit __constant_htonl(30000) 297#define nfserr_dropit cpu_to_be32(30000)
259/* end-of-file indicator in readdir */ 298/* end-of-file indicator in readdir */
260#define nfserr_eof __constant_htonl(30001) 299#define nfserr_eof cpu_to_be32(30001)
300/* replay detected */
301#define nfserr_replay_me cpu_to_be32(11001)
302/* nfs41 replay detected */
303#define nfserr_replay_cache cpu_to_be32(11002)
261 304
262/* Check for dir entries '.' and '..' */ 305/* Check for dir entries '.' and '..' */
263#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) 306#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.'))
@@ -300,7 +343,7 @@ extern struct timeval nfssvc_boot;
300 * TIME_BACKUP (unlikely to be supported any time soon) 343 * TIME_BACKUP (unlikely to be supported any time soon)
301 * TIME_CREATE (unlikely to be supported any time soon) 344 * TIME_CREATE (unlikely to be supported any time soon)
302 */ 345 */
303#define NFSD_SUPPORTED_ATTRS_WORD0 \ 346#define NFSD4_SUPPORTED_ATTRS_WORD0 \
304(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \ 347(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \
305 | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \ 348 | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \
306 | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \ 349 | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \
@@ -312,7 +355,7 @@ extern struct timeval nfssvc_boot;
312 | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \ 355 | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \
313 | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL) 356 | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL)
314 357
315#define NFSD_SUPPORTED_ATTRS_WORD1 \ 358#define NFSD4_SUPPORTED_ATTRS_WORD1 \
316(FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \ 359(FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \
317 | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \ 360 | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \
318 | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \ 361 | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \
@@ -320,6 +363,35 @@ extern struct timeval nfssvc_boot;
320 | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \ 363 | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \
321 | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID) 364 | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID)
322 365
366#define NFSD4_SUPPORTED_ATTRS_WORD2 0
367
368#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \
369 NFSD4_SUPPORTED_ATTRS_WORD0
370
371#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \
372 NFSD4_SUPPORTED_ATTRS_WORD1
373
374#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \
375 (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
376
377static inline u32 nfsd_suppattrs0(u32 minorversion)
378{
379 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
380 : NFSD4_SUPPORTED_ATTRS_WORD0;
381}
382
383static inline u32 nfsd_suppattrs1(u32 minorversion)
384{
385 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1
386 : NFSD4_SUPPORTED_ATTRS_WORD1;
387}
388
389static inline u32 nfsd_suppattrs2(u32 minorversion)
390{
391 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2
392 : NFSD4_SUPPORTED_ATTRS_WORD2;
393}
394
323/* These will return ERR_INVAL if specified in GETATTR or READDIR. */ 395/* These will return ERR_INVAL if specified in GETATTR or READDIR. */
324#define NFSD_WRITEONLY_ATTRS_WORD1 \ 396#define NFSD_WRITEONLY_ATTRS_WORD1 \
325(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) 397(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
@@ -330,6 +402,19 @@ extern struct timeval nfssvc_boot;
330#define NFSD_WRITEABLE_ATTRS_WORD1 \ 402#define NFSD_WRITEABLE_ATTRS_WORD1 \
331(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \ 403(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \
332 | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) 404 | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
405#define NFSD_WRITEABLE_ATTRS_WORD2 0
406
407#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \
408 NFSD_WRITEABLE_ATTRS_WORD0
409/*
410 * we currently store the exclusive create verifier in the v_{a,m}time
411 * attributes so the client can't set these at create time using EXCLUSIVE4_1
412 */
413#define NFSD_SUPPATTR_EXCLCREAT_WORD1 \
414 (NFSD_WRITEABLE_ATTRS_WORD1 & \
415 ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET))
416#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \
417 NFSD_WRITEABLE_ATTRS_WORD2
333 418
334#endif /* CONFIG_NFSD_V4 */ 419#endif /* CONFIG_NFSD_V4 */
335 420
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index fa317f6c154b..afa19016c4a8 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -269,6 +269,13 @@ fh_copy(struct svc_fh *dst, struct svc_fh *src)
269 return dst; 269 return dst;
270} 270}
271 271
272static inline void
273fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
274{
275 dst->fh_size = src->fh_size;
276 memcpy(&dst->fh_base, &src->fh_base, src->fh_size);
277}
278
272static __inline__ struct svc_fh * 279static __inline__ struct svc_fh *
273fh_init(struct svc_fh *fhp, int maxsize) 280fh_init(struct svc_fh *fhp, int maxsize)
274{ 281{
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
index 128298c0362d..4d61c873feed 100644
--- a/include/linux/nfsd/state.h
+++ b/include/linux/nfsd/state.h
@@ -66,8 +66,7 @@ struct nfs4_cb_recall {
66 u32 cbr_ident; 66 u32 cbr_ident;
67 int cbr_trunc; 67 int cbr_trunc;
68 stateid_t cbr_stateid; 68 stateid_t cbr_stateid;
69 u32 cbr_fhlen; 69 struct knfsd_fh cbr_fh;
70 char cbr_fhval[NFS4_FHSIZE];
71 struct nfs4_delegation *cbr_dp; 70 struct nfs4_delegation *cbr_dp;
72}; 71};
73 72
@@ -86,8 +85,7 @@ struct nfs4_delegation {
86}; 85};
87 86
88#define dl_stateid dl_recall.cbr_stateid 87#define dl_stateid dl_recall.cbr_stateid
89#define dl_fhlen dl_recall.cbr_fhlen 88#define dl_fh dl_recall.cbr_fh
90#define dl_fhval dl_recall.cbr_fhval
91 89
92/* client delegation callback info */ 90/* client delegation callback info */
93struct nfs4_callback { 91struct nfs4_callback {
@@ -101,6 +99,64 @@ struct nfs4_callback {
101 struct rpc_clnt * cb_client; 99 struct rpc_clnt * cb_client;
102}; 100};
103 101
102/* Maximum number of slots per session. 128 is useful for long haul TCP */
103#define NFSD_MAX_SLOTS_PER_SESSION 128
104/* Maximum number of pages per slot cache entry */
105#define NFSD_PAGES_PER_SLOT 1
106/* Maximum number of operations per session compound */
107#define NFSD_MAX_OPS_PER_COMPOUND 16
108
109struct nfsd4_cache_entry {
110 __be32 ce_status;
111 struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */
112 struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1];
113 int ce_cachethis;
114 short ce_resused;
115 int ce_opcnt;
116 int ce_rpchdrlen;
117};
118
119struct nfsd4_slot {
120 bool sl_inuse;
121 u32 sl_seqid;
122 struct nfsd4_cache_entry sl_cache_entry;
123};
124
125struct nfsd4_session {
126 struct kref se_ref;
127 struct list_head se_hash; /* hash by sessionid */
128 struct list_head se_perclnt;
129 u32 se_flags;
130 struct nfs4_client *se_client; /* for expire_client */
131 struct nfs4_sessionid se_sessionid;
132 u32 se_fmaxreq_sz;
133 u32 se_fmaxresp_sz;
134 u32 se_fmaxresp_cached;
135 u32 se_fmaxops;
136 u32 se_fnumslots;
137 struct nfsd4_slot se_slots[]; /* forward channel slots */
138};
139
140static inline void
141nfsd4_put_session(struct nfsd4_session *ses)
142{
143 extern void free_session(struct kref *kref);
144 kref_put(&ses->se_ref, free_session);
145}
146
147static inline void
148nfsd4_get_session(struct nfsd4_session *ses)
149{
150 kref_get(&ses->se_ref);
151}
152
153/* formatted contents of nfs4_sessionid */
154struct nfsd4_sessionid {
155 clientid_t clientid;
156 u32 sequence;
157 u32 reserved;
158};
159
104#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */ 160#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */
105 161
106/* 162/*
@@ -132,6 +188,12 @@ struct nfs4_client {
132 struct nfs4_callback cl_callback; /* callback info */ 188 struct nfs4_callback cl_callback; /* callback info */
133 atomic_t cl_count; /* ref count */ 189 atomic_t cl_count; /* ref count */
134 u32 cl_firststate; /* recovery dir creation */ 190 u32 cl_firststate; /* recovery dir creation */
191
192 /* for nfs41 */
193 struct list_head cl_sessions;
194 struct nfsd4_slot cl_slot; /* create_session slot */
195 u32 cl_exchange_flags;
196 struct nfs4_sessionid cl_sessionid;
135}; 197};
136 198
137/* struct nfs4_client_reset 199/* struct nfs4_client_reset
@@ -168,8 +230,7 @@ struct nfs4_replay {
168 unsigned int rp_buflen; 230 unsigned int rp_buflen;
169 char *rp_buf; 231 char *rp_buf;
170 unsigned intrp_allocated; 232 unsigned intrp_allocated;
171 int rp_openfh_len; 233 struct knfsd_fh rp_openfh;
172 char rp_openfh[NFS4_FHSIZE];
173 char rp_ibuf[NFSD4_REPLAY_ISIZE]; 234 char rp_ibuf[NFSD4_REPLAY_ISIZE];
174}; 235};
175 236
@@ -217,7 +278,7 @@ struct nfs4_stateowner {
217* share_acces, share_deny on the file. 278* share_acces, share_deny on the file.
218*/ 279*/
219struct nfs4_file { 280struct nfs4_file {
220 struct kref fi_ref; 281 atomic_t fi_ref;
221 struct list_head fi_hash; /* hash by "struct inode *" */ 282 struct list_head fi_hash; /* hash by "struct inode *" */
222 struct list_head fi_stateids; 283 struct list_head fi_stateids;
223 struct list_head fi_delegations; 284 struct list_head fi_delegations;
@@ -259,14 +320,13 @@ struct nfs4_stateid {
259}; 320};
260 321
261/* flags for preprocess_seqid_op() */ 322/* flags for preprocess_seqid_op() */
262#define CHECK_FH 0x00000001 323#define HAS_SESSION 0x00000001
263#define CONFIRM 0x00000002 324#define CONFIRM 0x00000002
264#define OPEN_STATE 0x00000004 325#define OPEN_STATE 0x00000004
265#define LOCK_STATE 0x00000008 326#define LOCK_STATE 0x00000008
266#define RD_STATE 0x00000010 327#define RD_STATE 0x00000010
267#define WR_STATE 0x00000020 328#define WR_STATE 0x00000020
268#define CLOSE_STATE 0x00000040 329#define CLOSE_STATE 0x00000040
269#define DELEG_RET 0x00000080
270 330
271#define seqid_mutating_err(err) \ 331#define seqid_mutating_err(err) \
272 (((err) != nfserr_stale_clientid) && \ 332 (((err) != nfserr_stale_clientid) && \
@@ -274,7 +334,9 @@ struct nfs4_stateid {
274 ((err) != nfserr_stale_stateid) && \ 334 ((err) != nfserr_stale_stateid) && \
275 ((err) != nfserr_bad_stateid)) 335 ((err) != nfserr_bad_stateid))
276 336
277extern __be32 nfs4_preprocess_stateid_op(struct svc_fh *current_fh, 337struct nfsd4_compound_state;
338
339extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
278 stateid_t *stateid, int flags, struct file **filp); 340 stateid_t *stateid, int flags, struct file **filp);
279extern void nfs4_lock_state(void); 341extern void nfs4_lock_state(void);
280extern void nfs4_unlock_state(void); 342extern void nfs4_unlock_state(void);
@@ -290,7 +352,7 @@ extern void nfsd4_init_recdir(char *recdir_name);
290extern int nfsd4_recdir_load(void); 352extern int nfsd4_recdir_load(void);
291extern void nfsd4_shutdown_recdir(void); 353extern void nfsd4_shutdown_recdir(void);
292extern int nfs4_client_to_reclaim(const char *name); 354extern int nfs4_client_to_reclaim(const char *name);
293extern int nfs4_has_reclaimed_state(const char *name); 355extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
294extern void nfsd4_recdir_purge_old(void); 356extern void nfsd4_recdir_purge_old(void);
295extern int nfsd4_create_clid_dir(struct nfs4_client *clp); 357extern int nfsd4_create_clid_dir(struct nfs4_client *clp);
296extern void nfsd4_remove_clid_dir(struct nfs4_client *clp); 358extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
diff --git a/include/linux/nfsd/stats.h b/include/linux/nfsd/stats.h
index 7678cfbe9960..2693ef647df6 100644
--- a/include/linux/nfsd/stats.h
+++ b/include/linux/nfsd/stats.h
@@ -11,6 +11,11 @@
11 11
12#include <linux/nfs4.h> 12#include <linux/nfs4.h>
13 13
14/* thread usage wraps very million seconds (approx one fortnight) */
15#define NFSD_USAGE_WRAP (HZ*1000000)
16
17#ifdef __KERNEL__
18
14struct nfsd_stats { 19struct nfsd_stats {
15 unsigned int rchits; /* repcache hits */ 20 unsigned int rchits; /* repcache hits */
16 unsigned int rcmisses; /* repcache hits */ 21 unsigned int rcmisses; /* repcache hits */
@@ -35,10 +40,6 @@ struct nfsd_stats {
35 40
36}; 41};
37 42
38/* thread usage wraps very million seconds (approx one fortnight) */
39#define NFSD_USAGE_WRAP (HZ*1000000)
40
41#ifdef __KERNEL__
42 43
43extern struct nfsd_stats nfsdstats; 44extern struct nfsd_stats nfsdstats;
44extern struct svc_stat nfsd_svcstats; 45extern struct svc_stat nfsd_svcstats;
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
index 27bd3e38ec5a..f80d6013fdc3 100644
--- a/include/linux/nfsd/xdr4.h
+++ b/include/linux/nfsd/xdr4.h
@@ -45,10 +45,22 @@
45#define XDR_LEN(n) (((n) + 3) & ~3) 45#define XDR_LEN(n) (((n) + 3) & ~3)
46 46
47struct nfsd4_compound_state { 47struct nfsd4_compound_state {
48 struct svc_fh current_fh; 48 struct svc_fh current_fh;
49 struct svc_fh save_fh; 49 struct svc_fh save_fh;
50 struct nfs4_stateowner *replay_owner; 50 struct nfs4_stateowner *replay_owner;
51}; 51 /* For sessions DRC */
52 struct nfsd4_session *session;
53 struct nfsd4_slot *slot;
54 __be32 *statp;
55 size_t iovlen;
56 u32 minorversion;
57 u32 status;
58};
59
60static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs)
61{
62 return cs->slot != NULL;
63}
52 64
53struct nfsd4_change_info { 65struct nfsd4_change_info {
54 u32 atomic; 66 u32 atomic;
@@ -90,7 +102,7 @@ struct nfsd4_create {
90 u32 specdata2; 102 u32 specdata2;
91 } dev; /* NF4BLK, NF4CHR */ 103 } dev; /* NF4BLK, NF4CHR */
92 } u; 104 } u;
93 u32 cr_bmval[2]; /* request */ 105 u32 cr_bmval[3]; /* request */
94 struct iattr cr_iattr; /* request */ 106 struct iattr cr_iattr; /* request */
95 struct nfsd4_change_info cr_cinfo; /* response */ 107 struct nfsd4_change_info cr_cinfo; /* response */
96 struct nfs4_acl *cr_acl; 108 struct nfs4_acl *cr_acl;
@@ -105,7 +117,7 @@ struct nfsd4_delegreturn {
105}; 117};
106 118
107struct nfsd4_getattr { 119struct nfsd4_getattr {
108 u32 ga_bmval[2]; /* request */ 120 u32 ga_bmval[3]; /* request */
109 struct svc_fh *ga_fhp; /* response */ 121 struct svc_fh *ga_fhp; /* response */
110}; 122};
111 123
@@ -206,11 +218,9 @@ struct nfsd4_open {
206 stateid_t op_delegate_stateid; /* request - response */ 218 stateid_t op_delegate_stateid; /* request - response */
207 u32 op_create; /* request */ 219 u32 op_create; /* request */
208 u32 op_createmode; /* request */ 220 u32 op_createmode; /* request */
209 u32 op_bmval[2]; /* request */ 221 u32 op_bmval[3]; /* request */
210 union { /* request */ 222 struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
211 struct iattr iattr; /* UNCHECKED4,GUARDED4 */ 223 nfs4_verifier verf; /* EXCLUSIVE4 */
212 nfs4_verifier verf; /* EXCLUSIVE4 */
213 } u;
214 clientid_t op_clientid; /* request */ 224 clientid_t op_clientid; /* request */
215 struct xdr_netobj op_owner; /* request */ 225 struct xdr_netobj op_owner; /* request */
216 u32 op_seqid; /* request */ 226 u32 op_seqid; /* request */
@@ -224,8 +234,8 @@ struct nfsd4_open {
224 struct nfs4_stateowner *op_stateowner; /* used during processing */ 234 struct nfs4_stateowner *op_stateowner; /* used during processing */
225 struct nfs4_acl *op_acl; 235 struct nfs4_acl *op_acl;
226}; 236};
227#define op_iattr u.iattr 237#define op_iattr iattr
228#define op_verf u.verf 238#define op_verf verf
229 239
230struct nfsd4_open_confirm { 240struct nfsd4_open_confirm {
231 stateid_t oc_req_stateid /* request */; 241 stateid_t oc_req_stateid /* request */;
@@ -259,7 +269,7 @@ struct nfsd4_readdir {
259 nfs4_verifier rd_verf; /* request */ 269 nfs4_verifier rd_verf; /* request */
260 u32 rd_dircount; /* request */ 270 u32 rd_dircount; /* request */
261 u32 rd_maxcount; /* request */ 271 u32 rd_maxcount; /* request */
262 u32 rd_bmval[2]; /* request */ 272 u32 rd_bmval[3]; /* request */
263 struct svc_rqst *rd_rqstp; /* response */ 273 struct svc_rqst *rd_rqstp; /* response */
264 struct svc_fh * rd_fhp; /* response */ 274 struct svc_fh * rd_fhp; /* response */
265 275
@@ -301,7 +311,7 @@ struct nfsd4_secinfo {
301 311
302struct nfsd4_setattr { 312struct nfsd4_setattr {
303 stateid_t sa_stateid; /* request */ 313 stateid_t sa_stateid; /* request */
304 u32 sa_bmval[2]; /* request */ 314 u32 sa_bmval[3]; /* request */
305 struct iattr sa_iattr; /* request */ 315 struct iattr sa_iattr; /* request */
306 struct nfs4_acl *sa_acl; 316 struct nfs4_acl *sa_acl;
307}; 317};
@@ -327,7 +337,7 @@ struct nfsd4_setclientid_confirm {
327 337
328/* also used for NVERIFY */ 338/* also used for NVERIFY */
329struct nfsd4_verify { 339struct nfsd4_verify {
330 u32 ve_bmval[2]; /* request */ 340 u32 ve_bmval[3]; /* request */
331 u32 ve_attrlen; /* request */ 341 u32 ve_attrlen; /* request */
332 char * ve_attrval; /* request */ 342 char * ve_attrval; /* request */
333}; 343};
@@ -344,6 +354,54 @@ struct nfsd4_write {
344 nfs4_verifier wr_verifier; /* response */ 354 nfs4_verifier wr_verifier; /* response */
345}; 355};
346 356
357struct nfsd4_exchange_id {
358 nfs4_verifier verifier;
359 struct xdr_netobj clname;
360 u32 flags;
361 clientid_t clientid;
362 u32 seqid;
363 int spa_how;
364};
365
366struct nfsd4_channel_attrs {
367 u32 headerpadsz;
368 u32 maxreq_sz;
369 u32 maxresp_sz;
370 u32 maxresp_cached;
371 u32 maxops;
372 u32 maxreqs;
373 u32 nr_rdma_attrs;
374 u32 rdma_attrs;
375};
376
377struct nfsd4_create_session {
378 clientid_t clientid;
379 struct nfs4_sessionid sessionid;
380 u32 seqid;
381 u32 flags;
382 struct nfsd4_channel_attrs fore_channel;
383 struct nfsd4_channel_attrs back_channel;
384 u32 callback_prog;
385 u32 uid;
386 u32 gid;
387};
388
389struct nfsd4_sequence {
390 struct nfs4_sessionid sessionid; /* request/response */
391 u32 seqid; /* request/response */
392 u32 slotid; /* request/response */
393 u32 maxslots; /* request/response */
394 u32 cachethis; /* request */
395#if 0
396 u32 target_maxslots; /* response */
397 u32 status_flags; /* response */
398#endif /* not yet */
399};
400
401struct nfsd4_destroy_session {
402 struct nfs4_sessionid sessionid;
403};
404
347struct nfsd4_op { 405struct nfsd4_op {
348 int opnum; 406 int opnum;
349 __be32 status; 407 __be32 status;
@@ -378,6 +436,12 @@ struct nfsd4_op {
378 struct nfsd4_verify verify; 436 struct nfsd4_verify verify;
379 struct nfsd4_write write; 437 struct nfsd4_write write;
380 struct nfsd4_release_lockowner release_lockowner; 438 struct nfsd4_release_lockowner release_lockowner;
439
440 /* NFSv4.1 */
441 struct nfsd4_exchange_id exchange_id;
442 struct nfsd4_create_session create_session;
443 struct nfsd4_destroy_session destroy_session;
444 struct nfsd4_sequence sequence;
381 } u; 445 } u;
382 struct nfs4_replay * replay; 446 struct nfs4_replay * replay;
383}; 447};
@@ -416,9 +480,22 @@ struct nfsd4_compoundres {
416 u32 taglen; 480 u32 taglen;
417 char * tag; 481 char * tag;
418 u32 opcnt; 482 u32 opcnt;
419 __be32 * tagp; /* where to encode tag and opcount */ 483 __be32 * tagp; /* tag, opcount encode location */
484 struct nfsd4_compound_state cstate;
420}; 485};
421 486
487static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
488{
489 struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
490 return args->opcnt == 1;
491}
492
493static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
494{
495 return !resp->cstate.slot->sl_cache_entry.ce_cachethis ||
496 nfsd4_is_solo_sequence(resp);
497}
498
422#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) 499#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
423 500
424static inline void 501static inline void
@@ -448,7 +525,23 @@ extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
448extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 525extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
449 struct nfsd4_compound_state *, 526 struct nfsd4_compound_state *,
450 struct nfsd4_setclientid_confirm *setclientid_confirm); 527 struct nfsd4_setclientid_confirm *setclientid_confirm);
451extern __be32 nfsd4_process_open1(struct nfsd4_open *open); 528extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp);
529extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
530 struct nfsd4_sequence *seq);
531extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
532 struct nfsd4_compound_state *,
533struct nfsd4_exchange_id *);
534 extern __be32 nfsd4_create_session(struct svc_rqst *,
535 struct nfsd4_compound_state *,
536 struct nfsd4_create_session *);
537extern __be32 nfsd4_sequence(struct svc_rqst *,
538 struct nfsd4_compound_state *,
539 struct nfsd4_sequence *);
540extern __be32 nfsd4_destroy_session(struct svc_rqst *,
541 struct nfsd4_compound_state *,
542 struct nfsd4_destroy_session *);
543extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
544 struct nfsd4_open *open);
452extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, 545extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
453 struct svc_fh *current_fh, struct nfsd4_open *open); 546 struct svc_fh *current_fh, struct nfsd4_open *open);
454extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, 547extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
new file mode 100644
index 000000000000..79fec6af3f9f
--- /dev/null
+++ b/include/linux/nilfs2_fs.h
@@ -0,0 +1,801 @@
1/*
2 * nilfs2_fs.h - NILFS2 on-disk structures and common declarations.
3 *
4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 *
20 * Written by Koji Sato <koji@osrg.net>
21 * Ryusuke Konishi <ryusuke@osrg.net>
22 */
23/*
24 * linux/include/linux/ext2_fs.h
25 *
26 * Copyright (C) 1992, 1993, 1994, 1995
27 * Remy Card (card@masi.ibp.fr)
28 * Laboratoire MASI - Institut Blaise Pascal
29 * Universite Pierre et Marie Curie (Paris VI)
30 *
31 * from
32 *
33 * linux/include/linux/minix_fs.h
34 *
35 * Copyright (C) 1991, 1992 Linus Torvalds
36 */
37
38#ifndef _LINUX_NILFS_FS_H
39#define _LINUX_NILFS_FS_H
40
41#include <linux/types.h>
42#include <linux/ioctl.h>
43
44/*
45 * Inode flags stored in nilfs_inode and on-memory nilfs inode
46 *
47 * We define these flags based on ext2-fs because of the
48 * compatibility reason; to avoid problems in chattr(1)
49 */
50#define NILFS_SECRM_FL 0x00000001 /* Secure deletion */
51#define NILFS_UNRM_FL 0x00000002 /* Undelete */
52#define NILFS_SYNC_FL 0x00000008 /* Synchronous updates */
53#define NILFS_IMMUTABLE_FL 0x00000010 /* Immutable file */
54#define NILFS_APPEND_FL 0x00000020 /* writes to file may only append */
55#define NILFS_NODUMP_FL 0x00000040 /* do not dump file */
56#define NILFS_NOATIME_FL 0x00000080 /* do not update atime */
57/* Reserved for compression usage... */
58#define NILFS_NOTAIL_FL 0x00008000 /* file tail should not be merged */
59#define NILFS_DIRSYNC_FL 0x00010000 /* dirsync behaviour */
60
61#define NILFS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
62#define NILFS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
63
64
65#define NILFS_INODE_BMAP_SIZE 7
66/**
67 * struct nilfs_inode - structure of an inode on disk
68 * @i_blocks: blocks count
69 * @i_size: size in bytes
70 * @i_ctime: creation time (seconds)
71 * @i_mtime: modification time (seconds)
72 * @i_ctime_nsec: creation time (nano seconds)
73 * @i_mtime_nsec: modification time (nano seconds)
74 * @i_uid: user id
75 * @i_gid: group id
76 * @i_mode: file mode
77 * @i_links_count: links count
78 * @i_flags: file flags
79 * @i_bmap: block mapping
80 * @i_xattr: extended attributes
81 * @i_generation: file generation (for NFS)
82 * @i_pad: padding
83 */
84struct nilfs_inode {
85 __le64 i_blocks;
86 __le64 i_size;
87 __le64 i_ctime;
88 __le64 i_mtime;
89 __le32 i_ctime_nsec;
90 __le32 i_mtime_nsec;
91 __le32 i_uid;
92 __le32 i_gid;
93 __le16 i_mode;
94 __le16 i_links_count;
95 __le32 i_flags;
96 __le64 i_bmap[NILFS_INODE_BMAP_SIZE];
97#define i_device_code i_bmap[0]
98 __le64 i_xattr;
99 __le32 i_generation;
100 __le32 i_pad;
101};
102
103/**
104 * struct nilfs_super_root - structure of super root
105 * @sr_sum: check sum
106 * @sr_bytes: byte count of the structure
107 * @sr_flags: flags (reserved)
108 * @sr_nongc_ctime: write time of the last segment not for cleaner operation
109 * @sr_dat: DAT file inode
110 * @sr_cpfile: checkpoint file inode
111 * @sr_sufile: segment usage file inode
112 */
113struct nilfs_super_root {
114 __le32 sr_sum;
115 __le16 sr_bytes;
116 __le16 sr_flags;
117 __le64 sr_nongc_ctime;
118 struct nilfs_inode sr_dat;
119 struct nilfs_inode sr_cpfile;
120 struct nilfs_inode sr_sufile;
121};
122
123#define NILFS_SR_MDT_OFFSET(inode_size, i) \
124 ((unsigned long)&((struct nilfs_super_root *)0)->sr_dat + \
125 (inode_size) * (i))
126#define NILFS_SR_DAT_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 0)
127#define NILFS_SR_CPFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 1)
128#define NILFS_SR_SUFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 2)
129#define NILFS_SR_BYTES (sizeof(struct nilfs_super_root))
130
131/*
132 * Maximal mount counts
133 */
134#define NILFS_DFL_MAX_MNT_COUNT 50 /* 50 mounts */
135
136/*
137 * File system states (sbp->s_state, nilfs->ns_mount_state)
138 */
139#define NILFS_VALID_FS 0x0001 /* Unmounted cleanly */
140#define NILFS_ERROR_FS 0x0002 /* Errors detected */
141#define NILFS_RESIZE_FS 0x0004 /* Resize required */
142
143/*
144 * Mount flags (sbi->s_mount_opt)
145 */
146#define NILFS_MOUNT_ERROR_MODE 0x0070 /* Error mode mask */
147#define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */
148#define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */
149#define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */
150#define NILFS_MOUNT_SNAPSHOT 0x0080 /* Snapshot flag */
151#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */
152#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order
153 semantics also for data */
154
155
156/**
157 * struct nilfs_super_block - structure of super block on disk
158 */
159struct nilfs_super_block {
160 __le32 s_rev_level; /* Revision level */
161 __le16 s_minor_rev_level; /* minor revision level */
162 __le16 s_magic; /* Magic signature */
163
164 __le16 s_bytes; /* Bytes count of CRC calculation
165 for this structure. s_reserved
166 is excluded. */
167 __le16 s_flags; /* flags */
168 __le32 s_crc_seed; /* Seed value of CRC calculation */
169 __le32 s_sum; /* Check sum of super block */
170
171 __le32 s_log_block_size; /* Block size represented as follows
172 blocksize =
173 1 << (s_log_block_size + 10) */
174 __le64 s_nsegments; /* Number of segments in filesystem */
175 __le64 s_dev_size; /* block device size in bytes */
176 __le64 s_first_data_block; /* 1st seg disk block number */
177 __le32 s_blocks_per_segment; /* number of blocks per full segment */
178 __le32 s_r_segments_percentage; /* Reserved segments percentage */
179
180 __le64 s_last_cno; /* Last checkpoint number */
181 __le64 s_last_pseg; /* disk block addr pseg written last */
182 __le64 s_last_seq; /* seq. number of seg written last */
183 __le64 s_free_blocks_count; /* Free blocks count */
184
185 __le64 s_ctime; /* Creation time (execution time of
186 newfs) */
187 __le64 s_mtime; /* Mount time */
188 __le64 s_wtime; /* Write time */
189 __le16 s_mnt_count; /* Mount count */
190 __le16 s_max_mnt_count; /* Maximal mount count */
191 __le16 s_state; /* File system state */
192 __le16 s_errors; /* Behaviour when detecting errors */
193 __le64 s_lastcheck; /* time of last check */
194
195 __le32 s_checkinterval; /* max. time between checks */
196 __le32 s_creator_os; /* OS */
197 __le16 s_def_resuid; /* Default uid for reserved blocks */
198 __le16 s_def_resgid; /* Default gid for reserved blocks */
199 __le32 s_first_ino; /* First non-reserved inode */
200
201 __le16 s_inode_size; /* Size of an inode */
202 __le16 s_dat_entry_size; /* Size of a dat entry */
203 __le16 s_checkpoint_size; /* Size of a checkpoint */
204 __le16 s_segment_usage_size; /* Size of a segment usage */
205
206 __u8 s_uuid[16]; /* 128-bit uuid for volume */
207 char s_volume_name[16]; /* volume name */
208 char s_last_mounted[64]; /* directory where last mounted */
209
210 __le32 s_c_interval; /* Commit interval of segment */
211 __le32 s_c_block_max; /* Threshold of data amount for
212 the segment construction */
213 __u32 s_reserved[192]; /* padding to the end of the block */
214};
215
216/*
217 * Codes for operating systems
218 */
219#define NILFS_OS_LINUX 0
220/* Codes from 1 to 4 are reserved to keep compatibility with ext2 creator-OS */
221
222/*
223 * Revision levels
224 */
225#define NILFS_CURRENT_REV 2 /* current major revision */
226#define NILFS_MINOR_REV 0 /* minor revision */
227
228/*
229 * Bytes count of super_block for CRC-calculation
230 */
231#define NILFS_SB_BYTES \
232 ((long)&((struct nilfs_super_block *)0)->s_reserved)
233
234/*
235 * Special inode number
236 */
237#define NILFS_ROOT_INO 2 /* Root file inode */
238#define NILFS_DAT_INO 3 /* DAT file */
239#define NILFS_CPFILE_INO 4 /* checkpoint file */
240#define NILFS_SUFILE_INO 5 /* segment usage file */
241#define NILFS_IFILE_INO 6 /* ifile */
242#define NILFS_ATIME_INO 7 /* Atime file (reserved) */
243#define NILFS_XATTR_INO 8 /* Xattribute file (reserved) */
244#define NILFS_SKETCH_INO 10 /* Sketch file */
245#define NILFS_USER_INO 11 /* Fisrt user's file inode number */
246
247#define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */
248#define NILFS_SUPER_MAGIC 0x3434 /* NILFS filesystem magic number */
249
250#define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in
251 a full segment */
252#define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in
253 a partial segment */
254#define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved
255 segments */
256
257/*
258 * bytes offset of secondary super block
259 */
260#define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12)
261
262/*
263 * Maximal count of links to a file
264 */
265#define NILFS_LINK_MAX 32000
266
267/*
268 * Structure of a directory entry
269 * (Same as ext2)
270 */
271
272#define NILFS_NAME_LEN 255
273
274/*
275 * The new version of the directory entry. Since V0 structures are
276 * stored in intel byte order, and the name_len field could never be
277 * bigger than 255 chars, it's safe to reclaim the extra byte for the
278 * file_type field.
279 */
280struct nilfs_dir_entry {
281 __le64 inode; /* Inode number */
282 __le16 rec_len; /* Directory entry length */
283 __u8 name_len; /* Name length */
284 __u8 file_type;
285 char name[NILFS_NAME_LEN]; /* File name */
286 char pad;
287};
288
289/*
290 * NILFS directory file types. Only the low 3 bits are used. The
291 * other bits are reserved for now.
292 */
293enum {
294 NILFS_FT_UNKNOWN,
295 NILFS_FT_REG_FILE,
296 NILFS_FT_DIR,
297 NILFS_FT_CHRDEV,
298 NILFS_FT_BLKDEV,
299 NILFS_FT_FIFO,
300 NILFS_FT_SOCK,
301 NILFS_FT_SYMLINK,
302 NILFS_FT_MAX
303};
304
305/*
306 * NILFS_DIR_PAD defines the directory entries boundaries
307 *
308 * NOTE: It must be a multiple of 8
309 */
310#define NILFS_DIR_PAD 8
311#define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1)
312#define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \
313 ~NILFS_DIR_ROUND)
314
315
316/**
317 * struct nilfs_finfo - file information
318 * @fi_ino: inode number
319 * @fi_cno: checkpoint number
320 * @fi_nblocks: number of blocks (including intermediate blocks)
321 * @fi_ndatablk: number of file data blocks
322 */
323struct nilfs_finfo {
324 __le64 fi_ino;
325 __le64 fi_cno;
326 __le32 fi_nblocks;
327 __le32 fi_ndatablk;
328 /* array of virtual block numbers */
329};
330
331/**
332 * struct nilfs_binfo_v - information for the block to which a virtual block number is assigned
333 * @bi_vblocknr: virtual block number
334 * @bi_blkoff: block offset
335 */
336struct nilfs_binfo_v {
337 __le64 bi_vblocknr;
338 __le64 bi_blkoff;
339};
340
341/**
342 * struct nilfs_binfo_dat - information for the block which belongs to the DAT file
343 * @bi_blkoff: block offset
344 * @bi_level: level
345 * @bi_pad: padding
346 */
347struct nilfs_binfo_dat {
348 __le64 bi_blkoff;
349 __u8 bi_level;
350 __u8 bi_pad[7];
351};
352
353/**
354 * union nilfs_binfo: block information
355 * @bi_v: nilfs_binfo_v structure
356 * @bi_dat: nilfs_binfo_dat structure
357 */
358union nilfs_binfo {
359 struct nilfs_binfo_v bi_v;
360 struct nilfs_binfo_dat bi_dat;
361};
362
363/**
364 * struct nilfs_segment_summary - segment summary
365 * @ss_datasum: checksum of data
366 * @ss_sumsum: checksum of segment summary
367 * @ss_magic: magic number
368 * @ss_bytes: size of this structure in bytes
369 * @ss_flags: flags
370 * @ss_seq: sequence number
371 * @ss_create: creation timestamp
372 * @ss_next: next segment
373 * @ss_nblocks: number of blocks
374 * @ss_nfinfo: number of finfo structures
375 * @ss_sumbytes: total size of segment summary in bytes
376 * @ss_pad: padding
377 */
378struct nilfs_segment_summary {
379 __le32 ss_datasum;
380 __le32 ss_sumsum;
381 __le32 ss_magic;
382 __le16 ss_bytes;
383 __le16 ss_flags;
384 __le64 ss_seq;
385 __le64 ss_create;
386 __le64 ss_next;
387 __le32 ss_nblocks;
388 __le32 ss_nfinfo;
389 __le32 ss_sumbytes;
390 __le32 ss_pad;
391 /* array of finfo structures */
392};
393
394#define NILFS_SEGSUM_MAGIC 0x1eaffa11 /* segment summary magic number */
395
396/*
397 * Segment summary flags
398 */
399#define NILFS_SS_LOGBGN 0x0001 /* begins a logical segment */
400#define NILFS_SS_LOGEND 0x0002 /* ends a logical segment */
401#define NILFS_SS_SR 0x0004 /* has super root */
402#define NILFS_SS_SYNDT 0x0008 /* includes data only updates */
403#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */
404
405/**
406 * struct nilfs_palloc_group_desc - block group descriptor
407 * @pg_nfrees: number of free entries in block group
408 */
409struct nilfs_palloc_group_desc {
410 __le32 pg_nfrees;
411};
412
413/**
414 * struct nilfs_dat_entry - disk address translation entry
415 * @dt_blocknr: block number
416 * @dt_start: start checkpoint number
417 * @dt_end: end checkpoint number
418 * @dt_rsv: reserved for future use
419 */
420struct nilfs_dat_entry {
421 __le64 de_blocknr;
422 __le64 de_start;
423 __le64 de_end;
424 __le64 de_rsv;
425};
426
427/**
428 * struct nilfs_dat_group_desc - block group descriptor
429 * @dg_nfrees: number of free virtual block numbers in block group
430 */
431struct nilfs_dat_group_desc {
432 __le32 dg_nfrees;
433};
434
435
436/**
437 * struct nilfs_snapshot_list - snapshot list
438 * @ssl_next: next checkpoint number on snapshot list
439 * @ssl_prev: previous checkpoint number on snapshot list
440 */
441struct nilfs_snapshot_list {
442 __le64 ssl_next;
443 __le64 ssl_prev;
444};
445
446/**
447 * struct nilfs_checkpoint - checkpoint structure
448 * @cp_flags: flags
449 * @cp_checkpoints_count: checkpoints count in a block
450 * @cp_snapshot_list: snapshot list
451 * @cp_cno: checkpoint number
452 * @cp_create: creation timestamp
453 * @cp_nblk_inc: number of blocks incremented by this checkpoint
454 * @cp_inodes_count: inodes count
455 * @cp_blocks_count: blocks count
456 * @cp_ifile_inode: inode of ifile
457 */
458struct nilfs_checkpoint {
459 __le32 cp_flags;
460 __le32 cp_checkpoints_count;
461 struct nilfs_snapshot_list cp_snapshot_list;
462 __le64 cp_cno;
463 __le64 cp_create;
464 __le64 cp_nblk_inc;
465 __le64 cp_inodes_count;
466 __le64 cp_blocks_count; /* Reserved (might be deleted) */
467
468 /* Do not change the byte offset of ifile inode.
469 To keep the compatibility of the disk format,
470 additional fields should be added behind cp_ifile_inode. */
471 struct nilfs_inode cp_ifile_inode;
472};
473
474/* checkpoint flags */
475enum {
476 NILFS_CHECKPOINT_SNAPSHOT,
477 NILFS_CHECKPOINT_INVALID,
478 NILFS_CHECKPOINT_SKETCH,
479 NILFS_CHECKPOINT_MINOR,
480};
481
482#define NILFS_CHECKPOINT_FNS(flag, name) \
483static inline void \
484nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \
485{ \
486 cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \
487 (1UL << NILFS_CHECKPOINT_##flag)); \
488} \
489static inline void \
490nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \
491{ \
492 cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \
493 ~(1UL << NILFS_CHECKPOINT_##flag)); \
494} \
495static inline int \
496nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \
497{ \
498 return !!(le32_to_cpu(cp->cp_flags) & \
499 (1UL << NILFS_CHECKPOINT_##flag)); \
500}
501
502NILFS_CHECKPOINT_FNS(SNAPSHOT, snapshot)
503NILFS_CHECKPOINT_FNS(INVALID, invalid)
504NILFS_CHECKPOINT_FNS(MINOR, minor)
505
506/**
507 * struct nilfs_cpinfo - checkpoint information
508 * @ci_flags: flags
509 * @ci_pad: padding
510 * @ci_cno: checkpoint number
511 * @ci_create: creation timestamp
512 * @ci_nblk_inc: number of blocks incremented by this checkpoint
513 * @ci_inodes_count: inodes count
514 * @ci_blocks_count: blocks count
515 * @ci_next: next checkpoint number in snapshot list
516 */
517struct nilfs_cpinfo {
518 __u32 ci_flags;
519 __u32 ci_pad;
520 __u64 ci_cno;
521 __u64 ci_create;
522 __u64 ci_nblk_inc;
523 __u64 ci_inodes_count;
524 __u64 ci_blocks_count;
525 __u64 ci_next;
526};
527
528#define NILFS_CPINFO_FNS(flag, name) \
529static inline int \
530nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \
531{ \
532 return !!(cpinfo->ci_flags & (1UL << NILFS_CHECKPOINT_##flag)); \
533}
534
535NILFS_CPINFO_FNS(SNAPSHOT, snapshot)
536NILFS_CPINFO_FNS(INVALID, invalid)
537NILFS_CPINFO_FNS(MINOR, minor)
538
539
540/**
541 * struct nilfs_cpfile_header - checkpoint file header
542 * @ch_ncheckpoints: number of checkpoints
543 * @ch_nsnapshots: number of snapshots
544 * @ch_snapshot_list: snapshot list
545 */
546struct nilfs_cpfile_header {
547 __le64 ch_ncheckpoints;
548 __le64 ch_nsnapshots;
549 struct nilfs_snapshot_list ch_snapshot_list;
550};
551
552#define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \
553 ((sizeof(struct nilfs_cpfile_header) + \
554 sizeof(struct nilfs_checkpoint) - 1) / \
555 sizeof(struct nilfs_checkpoint))
556
557/**
558 * struct nilfs_segment_usage - segment usage
559 * @su_lastmod: last modified timestamp
560 * @su_nblocks: number of blocks in segment
561 * @su_flags: flags
562 */
563struct nilfs_segment_usage {
564 __le64 su_lastmod;
565 __le32 su_nblocks;
566 __le32 su_flags;
567};
568
569/* segment usage flag */
570enum {
571 NILFS_SEGMENT_USAGE_ACTIVE,
572 NILFS_SEGMENT_USAGE_DIRTY,
573 NILFS_SEGMENT_USAGE_ERROR,
574
575 /* ... */
576};
577
578#define NILFS_SEGMENT_USAGE_FNS(flag, name) \
579static inline void \
580nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \
581{ \
582 su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \
583 (1UL << NILFS_SEGMENT_USAGE_##flag));\
584} \
585static inline void \
586nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \
587{ \
588 su->su_flags = \
589 cpu_to_le32(le32_to_cpu(su->su_flags) & \
590 ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \
591} \
592static inline int \
593nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \
594{ \
595 return !!(le32_to_cpu(su->su_flags) & \
596 (1UL << NILFS_SEGMENT_USAGE_##flag)); \
597}
598
599NILFS_SEGMENT_USAGE_FNS(ACTIVE, active)
600NILFS_SEGMENT_USAGE_FNS(DIRTY, dirty)
601NILFS_SEGMENT_USAGE_FNS(ERROR, error)
602
603static inline void
604nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su)
605{
606 su->su_lastmod = cpu_to_le64(0);
607 su->su_nblocks = cpu_to_le32(0);
608 su->su_flags = cpu_to_le32(0);
609}
610
611static inline int
612nilfs_segment_usage_clean(const struct nilfs_segment_usage *su)
613{
614 return !le32_to_cpu(su->su_flags);
615}
616
617/**
618 * struct nilfs_sufile_header - segment usage file header
619 * @sh_ncleansegs: number of clean segments
620 * @sh_ndirtysegs: number of dirty segments
621 * @sh_last_alloc: last allocated segment number
622 */
623struct nilfs_sufile_header {
624 __le64 sh_ncleansegs;
625 __le64 sh_ndirtysegs;
626 __le64 sh_last_alloc;
627 /* ... */
628};
629
630#define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \
631 ((sizeof(struct nilfs_sufile_header) + \
632 sizeof(struct nilfs_segment_usage) - 1) / \
633 sizeof(struct nilfs_segment_usage))
634
635/**
636 * nilfs_suinfo - segment usage information
637 * @sui_lastmod:
638 * @sui_nblocks:
639 * @sui_flags:
640 */
641struct nilfs_suinfo {
642 __u64 sui_lastmod;
643 __u32 sui_nblocks;
644 __u32 sui_flags;
645};
646
647#define NILFS_SUINFO_FNS(flag, name) \
648static inline int \
649nilfs_suinfo_##name(const struct nilfs_suinfo *si) \
650{ \
651 return si->sui_flags & (1UL << NILFS_SEGMENT_USAGE_##flag); \
652}
653
654NILFS_SUINFO_FNS(ACTIVE, active)
655NILFS_SUINFO_FNS(DIRTY, dirty)
656NILFS_SUINFO_FNS(ERROR, error)
657
658static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si)
659{
660 return !si->sui_flags;
661}
662
663/* ioctl */
664enum {
665 NILFS_CHECKPOINT,
666 NILFS_SNAPSHOT,
667};
668
669/**
670 * struct nilfs_cpmode -
671 * @cc_cno:
672 * @cc_mode:
673 */
674struct nilfs_cpmode {
675 __u64 cm_cno;
676 __u32 cm_mode;
677 __u32 cm_pad;
678};
679
680/**
681 * struct nilfs_argv - argument vector
682 * @v_base:
683 * @v_nmembs:
684 * @v_size:
685 * @v_flags:
686 * @v_index:
687 */
688struct nilfs_argv {
689 __u64 v_base;
690 __u32 v_nmembs; /* number of members */
691 __u16 v_size; /* size of members */
692 __u16 v_flags;
693 __u64 v_index;
694};
695
696/**
697 * struct nilfs_period -
698 * @p_start:
699 * @p_end:
700 */
701struct nilfs_period {
702 __u64 p_start;
703 __u64 p_end;
704};
705
706/**
707 * struct nilfs_cpstat -
708 * @cs_cno: checkpoint number
709 * @cs_ncps: number of checkpoints
710 * @cs_nsss: number of snapshots
711 */
712struct nilfs_cpstat {
713 __u64 cs_cno;
714 __u64 cs_ncps;
715 __u64 cs_nsss;
716};
717
718/**
719 * struct nilfs_sustat -
720 * @ss_nsegs: number of segments
721 * @ss_ncleansegs: number of clean segments
722 * @ss_ndirtysegs: number of dirty segments
723 * @ss_ctime: creation time of the last segment
724 * @ss_nongc_ctime: creation time of the last segment not for GC
725 * @ss_prot_seq: least sequence number of segments which must not be reclaimed
726 */
727struct nilfs_sustat {
728 __u64 ss_nsegs;
729 __u64 ss_ncleansegs;
730 __u64 ss_ndirtysegs;
731 __u64 ss_ctime;
732 __u64 ss_nongc_ctime;
733 __u64 ss_prot_seq;
734};
735
736/**
737 * struct nilfs_vinfo - virtual block number information
738 * @vi_vblocknr:
739 * @vi_start:
740 * @vi_end:
741 * @vi_blocknr:
742 */
743struct nilfs_vinfo {
744 __u64 vi_vblocknr;
745 __u64 vi_start;
746 __u64 vi_end;
747 __u64 vi_blocknr;
748};
749
750/**
751 * struct nilfs_vdesc -
752 */
753struct nilfs_vdesc {
754 __u64 vd_ino;
755 __u64 vd_cno;
756 __u64 vd_vblocknr;
757 struct nilfs_period vd_period;
758 __u64 vd_blocknr;
759 __u64 vd_offset;
760 __u32 vd_flags;
761 __u32 vd_pad;
762};
763
764/**
765 * struct nilfs_bdesc -
766 */
767struct nilfs_bdesc {
768 __u64 bd_ino;
769 __u64 bd_oblocknr;
770 __u64 bd_blocknr;
771 __u64 bd_offset;
772 __u32 bd_level;
773 __u32 bd_pad;
774};
775
776#define NILFS_IOCTL_IDENT 'n'
777
778#define NILFS_IOCTL_CHANGE_CPMODE \
779 _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode)
780#define NILFS_IOCTL_DELETE_CHECKPOINT \
781 _IOW(NILFS_IOCTL_IDENT, 0x81, __u64)
782#define NILFS_IOCTL_GET_CPINFO \
783 _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv)
784#define NILFS_IOCTL_GET_CPSTAT \
785 _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat)
786#define NILFS_IOCTL_GET_SUINFO \
787 _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv)
788#define NILFS_IOCTL_GET_SUSTAT \
789 _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat)
790#define NILFS_IOCTL_GET_VINFO \
791 _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv)
792#define NILFS_IOCTL_GET_BDESCS \
793 _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv)
794#define NILFS_IOCTL_CLEAN_SEGMENTS \
795 _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5])
796#define NILFS_IOCTL_SYNC \
797 _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64)
798#define NILFS_IOCTL_RESIZE \
799 _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64)
800
801#endif /* _LINUX_NILFS_FS_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 61df1779b2a5..62214c7d2d93 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -82,6 +82,7 @@ enum pageflags {
82 PG_arch_1, 82 PG_arch_1,
83 PG_reserved, 83 PG_reserved,
84 PG_private, /* If pagecache, has fs-private data */ 84 PG_private, /* If pagecache, has fs-private data */
85 PG_private_2, /* If pagecache, has fs aux data */
85 PG_writeback, /* Page is under writeback */ 86 PG_writeback, /* Page is under writeback */
86#ifdef CONFIG_PAGEFLAGS_EXTENDED 87#ifdef CONFIG_PAGEFLAGS_EXTENDED
87 PG_head, /* A head page */ 88 PG_head, /* A head page */
@@ -108,6 +109,12 @@ enum pageflags {
108 /* Filesystems */ 109 /* Filesystems */
109 PG_checked = PG_owner_priv_1, 110 PG_checked = PG_owner_priv_1,
110 111
112 /* Two page bits are conscripted by FS-Cache to maintain local caching
113 * state. These bits are set on pages belonging to the netfs's inodes
114 * when those inodes are being locally cached.
115 */
116 PG_fscache = PG_private_2, /* page backed by cache */
117
111 /* XEN */ 118 /* XEN */
112 PG_pinned = PG_owner_priv_1, 119 PG_pinned = PG_owner_priv_1,
113 PG_savepinned = PG_dirty, 120 PG_savepinned = PG_dirty,
@@ -182,7 +189,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
182 189
183struct page; /* forward declaration */ 190struct page; /* forward declaration */
184 191
185TESTPAGEFLAG(Locked, locked) 192TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked)
186PAGEFLAG(Error, error) 193PAGEFLAG(Error, error)
187PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) 194PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
188PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) 195PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
@@ -194,8 +201,6 @@ PAGEFLAG(Checked, checked) /* Used by some filesystems */
194PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ 201PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
195PAGEFLAG(SavePinned, savepinned); /* Xen */ 202PAGEFLAG(SavePinned, savepinned); /* Xen */
196PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 203PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
197PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
198 __SETPAGEFLAG(Private, private)
199PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) 204PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
200 205
201__PAGEFLAG(SlobPage, slob_page) 206__PAGEFLAG(SlobPage, slob_page)
@@ -205,6 +210,16 @@ __PAGEFLAG(SlubFrozen, slub_frozen)
205__PAGEFLAG(SlubDebug, slub_debug) 210__PAGEFLAG(SlubDebug, slub_debug)
206 211
207/* 212/*
213 * Private page markings that may be used by the filesystem that owns the page
214 * for its own purposes.
215 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
216 */
217PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
218 __CLEARPAGEFLAG(Private, private)
219PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
220PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
221
222/*
208 * Only test-and-set exist for PG_writeback. The unconditional operators are 223 * Only test-and-set exist for PG_writeback. The unconditional operators are
209 * risky: they bypass page accounting. 224 * risky: they bypass page accounting.
210 */ 225 */
@@ -384,9 +399,10 @@ static inline void __ClearPageTail(struct page *page)
384 * these flags set. It they are, there is a problem. 399 * these flags set. It they are, there is a problem.
385 */ 400 */
386#define PAGE_FLAGS_CHECK_AT_FREE \ 401#define PAGE_FLAGS_CHECK_AT_FREE \
387 (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ 402 (1 << PG_lru | 1 << PG_locked | \
388 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ 403 1 << PG_private | 1 << PG_private_2 | \
389 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 404 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
405 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
390 __PG_UNEVICTABLE | __PG_MLOCKED) 406 __PG_UNEVICTABLE | __PG_MLOCKED)
391 407
392/* 408/*
@@ -397,4 +413,16 @@ static inline void __ClearPageTail(struct page *page)
397#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) 413#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
398 414
399#endif /* !__GENERATING_BOUNDS_H */ 415#endif /* !__GENERATING_BOUNDS_H */
416
417/**
418 * page_has_private - Determine if page has private stuff
419 * @page: The page to be checked
420 *
421 * Determine if a page has private stuff, indicating that release routines
422 * should be invoked upon it.
423 */
424#define page_has_private(page) \
425 ((page)->flags & ((1 << PG_private) | \
426 (1 << PG_private_2)))
427
400#endif /* PAGE_FLAGS_H */ 428#endif /* PAGE_FLAGS_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 076a7dc67c2b..34da5230faab 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -384,6 +384,11 @@ static inline void wait_on_page_writeback(struct page *page)
384extern void end_page_writeback(struct page *page); 384extern void end_page_writeback(struct page *page);
385 385
386/* 386/*
387 * Add an arbitrary waiter to a page's wait queue
388 */
389extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
390
391/*
387 * Fault a userspace page into pagetables. Return non-zero on a fault. 392 * Fault a userspace page into pagetables. Return non-zero on a fault.
388 * 393 *
389 * This assumes that two userspace pages are always sufficient. That's 394 * This assumes that two userspace pages are always sufficient. That's
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h
index ea8c6d84996d..cc1767f5cca8 100644
--- a/include/linux/parport_pc.h
+++ b/include/linux/parport_pc.h
@@ -228,10 +228,11 @@ extern void parport_pc_release_resources(struct parport *p);
228extern int parport_pc_claim_resources(struct parport *p); 228extern int parport_pc_claim_resources(struct parport *p);
229 229
230/* PCMCIA code will want to get us to look at a port. Provide a mechanism. */ 230/* PCMCIA code will want to get us to look at a port. Provide a mechanism. */
231extern struct parport *parport_pc_probe_port (unsigned long base, 231extern struct parport *parport_pc_probe_port(unsigned long base,
232 unsigned long base_hi, 232 unsigned long base_hi,
233 int irq, int dma, 233 int irq, int dma,
234 struct device *dev); 234 struct device *dev,
235extern void parport_pc_unregister_port (struct parport *p); 235 int irqflags);
236extern void parport_pc_unregister_port(struct parport *p);
236 237
237#endif 238#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a7fe4bbd7ff1..72698d89e767 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -674,6 +674,11 @@ int __must_check pci_reenable_device(struct pci_dev *);
674int __must_check pcim_enable_device(struct pci_dev *pdev); 674int __must_check pcim_enable_device(struct pci_dev *pdev);
675void pcim_pin_device(struct pci_dev *pdev); 675void pcim_pin_device(struct pci_dev *pdev);
676 676
677static inline int pci_is_enabled(struct pci_dev *pdev)
678{
679 return (atomic_read(&pdev->enable_cnt) > 0);
680}
681
677static inline int pci_is_managed(struct pci_dev *pdev) 682static inline int pci_is_managed(struct pci_dev *pdev)
678{ 683{
679 return pdev->is_managed; 684 return pdev->is_managed;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 170f8b1f22db..ee98cd570885 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -944,6 +944,32 @@
944#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 944#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801
945#define PCI_DEVICE_ID_SUN_CASSINI 0xabba 945#define PCI_DEVICE_ID_SUN_CASSINI 0xabba
946 946
947#define PCI_VENDOR_ID_NI 0x1093
948#define PCI_DEVICE_ID_NI_PCI2322 0xd130
949#define PCI_DEVICE_ID_NI_PCI2324 0xd140
950#define PCI_DEVICE_ID_NI_PCI2328 0xd150
951#define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190
952#define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0
953#define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0
954#define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0
955#define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0
956#define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1
957#define PCI_DEVICE_ID_NI_PCI2322I 0xd250
958#define PCI_DEVICE_ID_NI_PCI2324I 0xd270
959#define PCI_DEVICE_ID_NI_PCI23216 0xd2b0
960#define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080
961#define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db
962#define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd
963#define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df
964#define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2
965#define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4
966#define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6
967#define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7
968#define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8
969#define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea
970#define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec
971#define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee
972
947#define PCI_VENDOR_ID_CMD 0x1095 973#define PCI_VENDOR_ID_CMD 0x1095
948#define PCI_DEVICE_ID_CMD_643 0x0643 974#define PCI_DEVICE_ID_CMD_643 0x0643
949#define PCI_DEVICE_ID_CMD_646 0x0646 975#define PCI_DEVICE_ID_CMD_646 0x0646
diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h
index cb7d10f30763..d4cf7a2ceb3e 100644
--- a/include/linux/pda_power.h
+++ b/include/linux/pda_power.h
@@ -31,6 +31,8 @@ struct pda_power_pdata {
31 unsigned int wait_for_status; /* msecs, default is 500 */ 31 unsigned int wait_for_status; /* msecs, default is 500 */
32 unsigned int wait_for_charger; /* msecs, default is 500 */ 32 unsigned int wait_for_charger; /* msecs, default is 500 */
33 unsigned int polling_interval; /* msecs, default is 2000 */ 33 unsigned int polling_interval; /* msecs, default is 2000 */
34
35 unsigned long ac_max_uA; /* current to draw when on AC */
34}; 36};
35 37
36#endif /* __PDA_POWER_H__ */ 38#endif /* __PDA_POWER_H__ */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 8ff25e0e7f7a..594c494ac3f0 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -73,6 +73,8 @@ enum power_supply_property {
73 POWER_SUPPLY_PROP_VOLTAGE_AVG, 73 POWER_SUPPLY_PROP_VOLTAGE_AVG,
74 POWER_SUPPLY_PROP_CURRENT_NOW, 74 POWER_SUPPLY_PROP_CURRENT_NOW,
75 POWER_SUPPLY_PROP_CURRENT_AVG, 75 POWER_SUPPLY_PROP_CURRENT_AVG,
76 POWER_SUPPLY_PROP_POWER_NOW,
77 POWER_SUPPLY_PROP_POWER_AVG,
76 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 78 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
77 POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN, 79 POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN,
78 POWER_SUPPLY_PROP_CHARGE_FULL, 80 POWER_SUPPLY_PROP_CHARGE_FULL,
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 3945f803d514..7c775751392c 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -28,4 +28,4 @@ int pwm_enable(struct pwm_device *pwm);
28 */ 28 */
29void pwm_disable(struct pwm_device *pwm); 29void pwm_disable(struct pwm_device *pwm);
30 30
31#endif /* __ASM_ARCH_PWM_H */ 31#endif /* __LINUX_PWM_H */
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h
deleted file mode 100644
index e98900671ca9..000000000000
--- a/include/linux/raid/bitmap.h
+++ /dev/null
@@ -1,288 +0,0 @@
1/*
2 * bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
3 *
4 * additions: Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.
5 */
6#ifndef BITMAP_H
7#define BITMAP_H 1
8
9#define BITMAP_MAJOR_LO 3
10/* version 4 insists the bitmap is in little-endian order
11 * with version 3, it is host-endian which is non-portable
12 */
13#define BITMAP_MAJOR_HI 4
14#define BITMAP_MAJOR_HOSTENDIAN 3
15
16#define BITMAP_MINOR 39
17
18/*
19 * in-memory bitmap:
20 *
21 * Use 16 bit block counters to track pending writes to each "chunk".
22 * The 2 high order bits are special-purpose, the first is a flag indicating
23 * whether a resync is needed. The second is a flag indicating whether a
24 * resync is active.
25 * This means that the counter is actually 14 bits:
26 *
27 * +--------+--------+------------------------------------------------+
28 * | resync | resync | counter |
29 * | needed | active | |
30 * | (0-1) | (0-1) | (0-16383) |
31 * +--------+--------+------------------------------------------------+
32 *
33 * The "resync needed" bit is set when:
34 * a '1' bit is read from storage at startup.
35 * a write request fails on some drives
36 * a resync is aborted on a chunk with 'resync active' set
37 * It is cleared (and resync-active set) when a resync starts across all drives
38 * of the chunk.
39 *
40 *
41 * The "resync active" bit is set when:
42 * a resync is started on all drives, and resync_needed is set.
43 * resync_needed will be cleared (as long as resync_active wasn't already set).
44 * It is cleared when a resync completes.
45 *
46 * The counter counts pending write requests, plus the on-disk bit.
47 * When the counter is '1' and the resync bits are clear, the on-disk
48 * bit can be cleared aswell, thus setting the counter to 0.
49 * When we set a bit, or in the counter (to start a write), if the fields is
50 * 0, we first set the disk bit and set the counter to 1.
51 *
52 * If the counter is 0, the on-disk bit is clear and the stipe is clean
53 * Anything that dirties the stipe pushes the counter to 2 (at least)
54 * and sets the on-disk bit (lazily).
55 * If a periodic sweep find the counter at 2, it is decremented to 1.
56 * If the sweep find the counter at 1, the on-disk bit is cleared and the
57 * counter goes to zero.
58 *
59 * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block
60 * counters as a fallback when "page" memory cannot be allocated:
61 *
62 * Normal case (page memory allocated):
63 *
64 * page pointer (32-bit)
65 *
66 * [ ] ------+
67 * |
68 * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters)
69 * c1 c2 c2048
70 *
71 * Hijacked case (page memory allocation failed):
72 *
73 * hijacked page pointer (32-bit)
74 *
75 * [ ][ ] (no page memory allocated)
76 * counter #1 (16-bit) counter #2 (16-bit)
77 *
78 */
79
80#ifdef __KERNEL__
81
82#define PAGE_BITS (PAGE_SIZE << 3)
83#define PAGE_BIT_SHIFT (PAGE_SHIFT + 3)
84
85typedef __u16 bitmap_counter_t;
86#define COUNTER_BITS 16
87#define COUNTER_BIT_SHIFT 4
88#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8)
89#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
90
91#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
92#define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2)))
93#define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1)
94#define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK)
95#define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK)
96#define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX)
97
98/* how many counters per page? */
99#define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS)
100/* same, except a shift value for more efficient bitops */
101#define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT)
102/* same, except a mask value for more efficient bitops */
103#define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1)
104
105#define BITMAP_BLOCK_SIZE 512
106#define BITMAP_BLOCK_SHIFT 9
107
108/* how many blocks per chunk? (this is variable) */
109#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT)
110#define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT)
111#define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1)
112
113/* when hijacked, the counters and bits represent even larger "chunks" */
114/* there will be 1024 chunks represented by each counter in the page pointers */
115#define PAGEPTR_BLOCK_RATIO(bitmap) \
116 (CHUNK_BLOCK_RATIO(bitmap) << PAGE_COUNTER_SHIFT >> 1)
117#define PAGEPTR_BLOCK_SHIFT(bitmap) \
118 (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1)
119#define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1)
120
121/*
122 * on-disk bitmap:
123 *
124 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
125 * file a page at a time. There's a superblock at the start of the file.
126 */
127
128/* map chunks (bits) to file pages - offset by the size of the superblock */
129#define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3))
130
131#endif
132
133/*
134 * bitmap structures:
135 */
136
137#define BITMAP_MAGIC 0x6d746962
138
139/* use these for bitmap->flags and bitmap->sb->state bit-fields */
140enum bitmap_state {
141 BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */
142 BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */
143 BITMAP_HOSTENDIAN = 0x8000,
144};
145
146/* the superblock at the front of the bitmap file -- little endian */
147typedef struct bitmap_super_s {
148 __le32 magic; /* 0 BITMAP_MAGIC */
149 __le32 version; /* 4 the bitmap major for now, could change... */
150 __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */
151 __le64 events; /* 24 event counter for the bitmap (1)*/
152 __le64 events_cleared;/*32 event counter when last bit cleared (2) */
153 __le64 sync_size; /* 40 the size of the md device's sync range(3) */
154 __le32 state; /* 48 bitmap state information */
155 __le32 chunksize; /* 52 the bitmap chunk size in bytes */
156 __le32 daemon_sleep; /* 56 seconds between disk flushes */
157 __le32 write_behind; /* 60 number of outstanding write-behind writes */
158
159 __u8 pad[256 - 64]; /* set to zero */
160} bitmap_super_t;
161
162/* notes:
163 * (1) This event counter is updated before the eventcounter in the md superblock
164 * When a bitmap is loaded, it is only accepted if this event counter is equal
165 * to, or one greater than, the event counter in the superblock.
166 * (2) This event counter is updated when the other one is *if*and*only*if* the
167 * array is not degraded. As bits are not cleared when the array is degraded,
168 * this represents the last time that any bits were cleared.
169 * If a device is being added that has an event count with this value or
170 * higher, it is accepted as conforming to the bitmap.
171 * (3)This is the number of sectors represented by the bitmap, and is the range that
172 * resync happens across. For raid1 and raid5/6 it is the size of individual
173 * devices. For raid10 it is the size of the array.
174 */
175
176#ifdef __KERNEL__
177
178/* the in-memory bitmap is represented by bitmap_pages */
179struct bitmap_page {
180 /*
181 * map points to the actual memory page
182 */
183 char *map;
184 /*
185 * in emergencies (when map cannot be alloced), hijack the map
186 * pointer and use it as two counters itself
187 */
188 unsigned int hijacked:1;
189 /*
190 * count of dirty bits on the page
191 */
192 unsigned int count:31;
193};
194
195/* keep track of bitmap file pages that have pending writes on them */
196struct page_list {
197 struct list_head list;
198 struct page *page;
199};
200
201/* the main bitmap structure - one per mddev */
202struct bitmap {
203 struct bitmap_page *bp;
204 unsigned long pages; /* total number of pages in the bitmap */
205 unsigned long missing_pages; /* number of pages not yet allocated */
206
207 mddev_t *mddev; /* the md device that the bitmap is for */
208
209 int counter_bits; /* how many bits per block counter */
210
211 /* bitmap chunksize -- how much data does each bit represent? */
212 unsigned long chunksize;
213 unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
214 unsigned long chunks; /* total number of data chunks for the array */
215
216 /* We hold a count on the chunk currently being synced, and drop
217 * it when the last block is started. If the resync is aborted
218 * midway, we need to be able to drop that count, so we remember
219 * the counted chunk..
220 */
221 unsigned long syncchunk;
222
223 __u64 events_cleared;
224 int need_sync;
225
226 /* bitmap spinlock */
227 spinlock_t lock;
228
229 long offset; /* offset from superblock if file is NULL */
230 struct file *file; /* backing disk file */
231 struct page *sb_page; /* cached copy of the bitmap file superblock */
232 struct page **filemap; /* list of cache pages for the file */
233 unsigned long *filemap_attr; /* attributes associated w/ filemap pages */
234 unsigned long file_pages; /* number of pages in the file */
235 int last_page_size; /* bytes in the last page */
236
237 unsigned long flags;
238
239 int allclean;
240
241 unsigned long max_write_behind; /* write-behind mode */
242 atomic_t behind_writes;
243
244 /*
245 * the bitmap daemon - periodically wakes up and sweeps the bitmap
246 * file, cleaning up bits and flushing out pages to disk as necessary
247 */
248 unsigned long daemon_lastrun; /* jiffies of last run */
249 unsigned long daemon_sleep; /* how many seconds between updates? */
250 unsigned long last_end_sync; /* when we lasted called end_sync to
251 * update bitmap with resync progress */
252
253 atomic_t pending_writes; /* pending writes to the bitmap file */
254 wait_queue_head_t write_wait;
255 wait_queue_head_t overflow_wait;
256
257};
258
259/* the bitmap API */
260
261/* these are used only by md/bitmap */
262int bitmap_create(mddev_t *mddev);
263void bitmap_flush(mddev_t *mddev);
264void bitmap_destroy(mddev_t *mddev);
265
266void bitmap_print_sb(struct bitmap *bitmap);
267void bitmap_update_sb(struct bitmap *bitmap);
268
269int bitmap_setallbits(struct bitmap *bitmap);
270void bitmap_write_all(struct bitmap *bitmap);
271
272void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e);
273
274/* these are exported */
275int bitmap_startwrite(struct bitmap *bitmap, sector_t offset,
276 unsigned long sectors, int behind);
277void bitmap_endwrite(struct bitmap *bitmap, sector_t offset,
278 unsigned long sectors, int success, int behind);
279int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded);
280void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted);
281void bitmap_close_sync(struct bitmap *bitmap);
282void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector);
283
284void bitmap_unplug(struct bitmap *bitmap);
285void bitmap_daemon_work(struct bitmap *bitmap);
286#endif
287
288#endif
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h
deleted file mode 100644
index f38b9c586afb..000000000000
--- a/include/linux/raid/linear.h
+++ /dev/null
@@ -1,31 +0,0 @@
1#ifndef _LINEAR_H
2#define _LINEAR_H
3
4#include <linux/raid/md.h>
5
6struct dev_info {
7 mdk_rdev_t *rdev;
8 sector_t num_sectors;
9 sector_t start_sector;
10};
11
12typedef struct dev_info dev_info_t;
13
14struct linear_private_data
15{
16 struct linear_private_data *prev; /* earlier version */
17 dev_info_t **hash_table;
18 sector_t spacing;
19 sector_t array_sectors;
20 int sector_shift; /* shift before dividing
21 * by spacing
22 */
23 dev_info_t disks[0];
24};
25
26
27typedef struct linear_private_data linear_conf_t;
28
29#define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private)
30
31#endif
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
deleted file mode 100644
index 82bea14cae1a..000000000000
--- a/include/linux/raid/md.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 md.h : Multiple Devices driver for Linux
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4 Copyright (C) 1994-96 Marc ZYNGIER
5 <zyngier@ufr-info-p7.ibp.fr> or
6 <maz@gloups.fdn.fr>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
11 any later version.
12
13 You should have received a copy of the GNU General Public License
14 (for example /usr/src/linux/COPYING); if not, write to the Free
15 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16*/
17
18#ifndef _MD_H
19#define _MD_H
20
21#include <linux/blkdev.h>
22#include <linux/seq_file.h>
23
24/*
25 * 'md_p.h' holds the 'physical' layout of RAID devices
26 * 'md_u.h' holds the user <=> kernel API
27 *
28 * 'md_k.h' holds kernel internal definitions
29 */
30
31#include <linux/raid/md_p.h>
32#include <linux/raid/md_u.h>
33#include <linux/raid/md_k.h>
34
35#ifdef CONFIG_MD
36
37/*
38 * Different major versions are not compatible.
39 * Different minor versions are only downward compatible.
40 * Different patchlevel versions are downward and upward compatible.
41 */
42#define MD_MAJOR_VERSION 0
43#define MD_MINOR_VERSION 90
44/*
45 * MD_PATCHLEVEL_VERSION indicates kernel functionality.
46 * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
47 * and major_version/minor_version accordingly
48 * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
49 * in the super status byte
50 * >=3 means that bitmap superblock version 4 is supported, which uses
51 * little-ending representation rather than host-endian
52 */
53#define MD_PATCHLEVEL_VERSION 3
54
55extern int mdp_major;
56
57extern int register_md_personality(struct mdk_personality *p);
58extern int unregister_md_personality(struct mdk_personality *p);
59extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
60 mddev_t *mddev, const char *name);
61extern void md_unregister_thread(mdk_thread_t *thread);
62extern void md_wakeup_thread(mdk_thread_t *thread);
63extern void md_check_recovery(mddev_t *mddev);
64extern void md_write_start(mddev_t *mddev, struct bio *bi);
65extern void md_write_end(mddev_t *mddev);
66extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
67extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
68
69extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
70 sector_t sector, int size, struct page *page);
71extern void md_super_wait(mddev_t *mddev);
72extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
73 struct page *page, int rw);
74extern void md_do_sync(mddev_t *mddev);
75extern void md_new_event(mddev_t *mddev);
76extern int md_allow_write(mddev_t *mddev);
77extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
78
79#endif /* CONFIG_MD */
80#endif
81
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h
deleted file mode 100644
index 9743e4dbc918..000000000000
--- a/include/linux/raid/md_k.h
+++ /dev/null
@@ -1,402 +0,0 @@
1/*
2 md_k.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13*/
14
15#ifndef _MD_K_H
16#define _MD_K_H
17
18/* and dm-bio-list.h is not under include/linux because.... ??? */
19#include "../../../drivers/md/dm-bio-list.h"
20
21#ifdef CONFIG_BLOCK
22
23#define LEVEL_MULTIPATH (-4)
24#define LEVEL_LINEAR (-1)
25#define LEVEL_FAULTY (-5)
26
27/* we need a value for 'no level specified' and 0
28 * means 'raid0', so we need something else. This is
29 * for internal use only
30 */
31#define LEVEL_NONE (-1000000)
32
33#define MaxSector (~(sector_t)0)
34
35typedef struct mddev_s mddev_t;
36typedef struct mdk_rdev_s mdk_rdev_t;
37
38/*
39 * options passed in raidrun:
40 */
41
42/* Currently this must fit in an 'int' */
43#define MAX_CHUNK_SIZE (1<<30)
44
45/*
46 * MD's 'extended' device
47 */
48struct mdk_rdev_s
49{
50 struct list_head same_set; /* RAID devices within the same set */
51
52 sector_t size; /* Device size (in blocks) */
53 mddev_t *mddev; /* RAID array if running */
54 long last_events; /* IO event timestamp */
55
56 struct block_device *bdev; /* block device handle */
57
58 struct page *sb_page;
59 int sb_loaded;
60 __u64 sb_events;
61 sector_t data_offset; /* start of data in array */
62 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
63 int sb_size; /* bytes in the superblock */
64 int preferred_minor; /* autorun support */
65
66 struct kobject kobj;
67
68 /* A device can be in one of three states based on two flags:
69 * Not working: faulty==1 in_sync==0
70 * Fully working: faulty==0 in_sync==1
71 * Working, but not
72 * in sync with array
73 * faulty==0 in_sync==0
74 *
75 * It can never have faulty==1, in_sync==1
76 * This reduces the burden of testing multiple flags in many cases
77 */
78
79 unsigned long flags;
80#define Faulty 1 /* device is known to have a fault */
81#define In_sync 2 /* device is in_sync with rest of array */
82#define WriteMostly 4 /* Avoid reading if at all possible */
83#define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */
84#define AllReserved 6 /* If whole device is reserved for
85 * one array */
86#define AutoDetected 7 /* added by auto-detect */
87#define Blocked 8 /* An error occured on an externally
88 * managed array, don't allow writes
89 * until it is cleared */
90#define StateChanged 9 /* Faulty or Blocked has changed during
91 * interrupt, so it needs to be
92 * notified by the thread */
93 wait_queue_head_t blocked_wait;
94
95 int desc_nr; /* descriptor index in the superblock */
96 int raid_disk; /* role of device in array */
97 int saved_raid_disk; /* role that device used to have in the
98 * array and could again if we did a partial
99 * resync from the bitmap
100 */
101 sector_t recovery_offset;/* If this device has been partially
102 * recovered, this is where we were
103 * up to.
104 */
105
106 atomic_t nr_pending; /* number of pending requests.
107 * only maintained for arrays that
108 * support hot removal
109 */
110 atomic_t read_errors; /* number of consecutive read errors that
111 * we have tried to ignore.
112 */
113 atomic_t corrected_errors; /* number of corrected read errors,
114 * for reporting to userspace and storing
115 * in superblock.
116 */
117 struct work_struct del_work; /* used for delayed sysfs removal */
118
119 struct sysfs_dirent *sysfs_state; /* handle for 'state'
120 * sysfs entry */
121};
122
123struct mddev_s
124{
125 void *private;
126 struct mdk_personality *pers;
127 dev_t unit;
128 int md_minor;
129 struct list_head disks;
130 unsigned long flags;
131#define MD_CHANGE_DEVS 0 /* Some device status has changed */
132#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
133#define MD_CHANGE_PENDING 2 /* superblock update in progress */
134
135 int ro;
136
137 struct gendisk *gendisk;
138
139 struct kobject kobj;
140 int hold_active;
141#define UNTIL_IOCTL 1
142#define UNTIL_STOP 2
143
144 /* Superblock information */
145 int major_version,
146 minor_version,
147 patch_version;
148 int persistent;
149 int external; /* metadata is
150 * managed externally */
151 char metadata_type[17]; /* externally set*/
152 int chunk_size;
153 time_t ctime, utime;
154 int level, layout;
155 char clevel[16];
156 int raid_disks;
157 int max_disks;
158 sector_t size; /* used size of component devices */
159 sector_t array_sectors; /* exported array size */
160 __u64 events;
161
162 char uuid[16];
163
164 /* If the array is being reshaped, we need to record the
165 * new shape and an indication of where we are up to.
166 * This is written to the superblock.
167 * If reshape_position is MaxSector, then no reshape is happening (yet).
168 */
169 sector_t reshape_position;
170 int delta_disks, new_level, new_layout, new_chunk;
171
172 struct mdk_thread_s *thread; /* management thread */
173 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
174 sector_t curr_resync; /* last block scheduled */
175 unsigned long resync_mark; /* a recent timestamp */
176 sector_t resync_mark_cnt;/* blocks written at resync_mark */
177 sector_t curr_mark_cnt; /* blocks scheduled now */
178
179 sector_t resync_max_sectors; /* may be set by personality */
180
181 sector_t resync_mismatches; /* count of sectors where
182 * parity/replica mismatch found
183 */
184
185 /* allow user-space to request suspension of IO to regions of the array */
186 sector_t suspend_lo;
187 sector_t suspend_hi;
188 /* if zero, use the system-wide default */
189 int sync_speed_min;
190 int sync_speed_max;
191
192 /* resync even though the same disks are shared among md-devices */
193 int parallel_resync;
194
195 int ok_start_degraded;
196 /* recovery/resync flags
197 * NEEDED: we might need to start a resync/recover
198 * RUNNING: a thread is running, or about to be started
199 * SYNC: actually doing a resync, not a recovery
200 * RECOVER: doing recovery, or need to try it.
201 * INTR: resync needs to be aborted for some reason
202 * DONE: thread is done and is waiting to be reaped
203 * REQUEST: user-space has requested a sync (used with SYNC)
204 * CHECK: user-space request for for check-only, no repair
205 * RESHAPE: A reshape is happening
206 *
207 * If neither SYNC or RESHAPE are set, then it is a recovery.
208 */
209#define MD_RECOVERY_RUNNING 0
210#define MD_RECOVERY_SYNC 1
211#define MD_RECOVERY_RECOVER 2
212#define MD_RECOVERY_INTR 3
213#define MD_RECOVERY_DONE 4
214#define MD_RECOVERY_NEEDED 5
215#define MD_RECOVERY_REQUESTED 6
216#define MD_RECOVERY_CHECK 7
217#define MD_RECOVERY_RESHAPE 8
218#define MD_RECOVERY_FROZEN 9
219
220 unsigned long recovery;
221 int recovery_disabled; /* if we detect that recovery
222 * will always fail, set this
223 * so we don't loop trying */
224
225 int in_sync; /* know to not need resync */
226 struct mutex reconfig_mutex;
227 atomic_t active; /* general refcount */
228 atomic_t openers; /* number of active opens */
229
230 int changed; /* true if we might need to reread partition info */
231 int degraded; /* whether md should consider
232 * adding a spare
233 */
234 int barriers_work; /* initialised to true, cleared as soon
235 * as a barrier request to slave
236 * fails. Only supported
237 */
238 struct bio *biolist; /* bios that need to be retried
239 * because BIO_RW_BARRIER is not supported
240 */
241
242 atomic_t recovery_active; /* blocks scheduled, but not written */
243 wait_queue_head_t recovery_wait;
244 sector_t recovery_cp;
245 sector_t resync_min; /* user requested sync
246 * starts here */
247 sector_t resync_max; /* resync should pause
248 * when it gets here */
249
250 struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
251 * file in sysfs.
252 */
253 struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
254
255 struct work_struct del_work; /* used for delayed sysfs removal */
256
257 spinlock_t write_lock;
258 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
259 atomic_t pending_writes; /* number of active superblock writes */
260
261 unsigned int safemode; /* if set, update "clean" superblock
262 * when no writes pending.
263 */
264 unsigned int safemode_delay;
265 struct timer_list safemode_timer;
266 atomic_t writes_pending;
267 struct request_queue *queue; /* for plugging ... */
268
269 atomic_t write_behind; /* outstanding async IO */
270 unsigned int max_write_behind; /* 0 = sync */
271
272 struct bitmap *bitmap; /* the bitmap for the device */
273 struct file *bitmap_file; /* the bitmap file */
274 long bitmap_offset; /* offset from superblock of
275 * start of bitmap. May be
276 * negative, but not '0'
277 */
278 long default_bitmap_offset; /* this is the offset to use when
279 * hot-adding a bitmap. It should
280 * eventually be settable by sysfs.
281 */
282
283 struct list_head all_mddevs;
284};
285
286
287static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
288{
289 int faulty = test_bit(Faulty, &rdev->flags);
290 if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
291 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
292}
293
294static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
295{
296 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
297}
298
299struct mdk_personality
300{
301 char *name;
302 int level;
303 struct list_head list;
304 struct module *owner;
305 int (*make_request)(struct request_queue *q, struct bio *bio);
306 int (*run)(mddev_t *mddev);
307 int (*stop)(mddev_t *mddev);
308 void (*status)(struct seq_file *seq, mddev_t *mddev);
309 /* error_handler must set ->faulty and clear ->in_sync
310 * if appropriate, and should abort recovery if needed
311 */
312 void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
313 int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
314 int (*hot_remove_disk) (mddev_t *mddev, int number);
315 int (*spare_active) (mddev_t *mddev);
316 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
317 int (*resize) (mddev_t *mddev, sector_t sectors);
318 int (*check_reshape) (mddev_t *mddev);
319 int (*start_reshape) (mddev_t *mddev);
320 int (*reconfig) (mddev_t *mddev, int layout, int chunk_size);
321 /* quiesce moves between quiescence states
322 * 0 - fully active
323 * 1 - no new requests allowed
324 * others - reserved
325 */
326 void (*quiesce) (mddev_t *mddev, int state);
327};
328
329
330struct md_sysfs_entry {
331 struct attribute attr;
332 ssize_t (*show)(mddev_t *, char *);
333 ssize_t (*store)(mddev_t *, const char *, size_t);
334};
335
336
337static inline char * mdname (mddev_t * mddev)
338{
339 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
340}
341
342/*
343 * iterates through some rdev ringlist. It's safe to remove the
344 * current 'rdev'. Dont touch 'tmp' though.
345 */
346#define rdev_for_each_list(rdev, tmp, head) \
347 list_for_each_entry_safe(rdev, tmp, head, same_set)
348
349/*
350 * iterates through the 'same array disks' ringlist
351 */
352#define rdev_for_each(rdev, tmp, mddev) \
353 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
354
355#define rdev_for_each_rcu(rdev, mddev) \
356 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
357
358typedef struct mdk_thread_s {
359 void (*run) (mddev_t *mddev);
360 mddev_t *mddev;
361 wait_queue_head_t wqueue;
362 unsigned long flags;
363 struct task_struct *tsk;
364 unsigned long timeout;
365} mdk_thread_t;
366
367#define THREAD_WAKEUP 0
368
369#define __wait_event_lock_irq(wq, condition, lock, cmd) \
370do { \
371 wait_queue_t __wait; \
372 init_waitqueue_entry(&__wait, current); \
373 \
374 add_wait_queue(&wq, &__wait); \
375 for (;;) { \
376 set_current_state(TASK_UNINTERRUPTIBLE); \
377 if (condition) \
378 break; \
379 spin_unlock_irq(&lock); \
380 cmd; \
381 schedule(); \
382 spin_lock_irq(&lock); \
383 } \
384 current->state = TASK_RUNNING; \
385 remove_wait_queue(&wq, &__wait); \
386} while (0)
387
388#define wait_event_lock_irq(wq, condition, lock, cmd) \
389do { \
390 if (condition) \
391 break; \
392 __wait_event_lock_irq(wq, condition, lock, cmd); \
393} while (0)
394
395static inline void safe_put_page(struct page *p)
396{
397 if (p) put_page(p);
398}
399
400#endif /* CONFIG_BLOCK */
401#endif
402
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h
index 7192035fc4b0..fb1abb3367e9 100644
--- a/include/linux/raid/md_u.h
+++ b/include/linux/raid/md_u.h
@@ -15,6 +15,24 @@
15#ifndef _MD_U_H 15#ifndef _MD_U_H
16#define _MD_U_H 16#define _MD_U_H
17 17
18/*
19 * Different major versions are not compatible.
20 * Different minor versions are only downward compatible.
21 * Different patchlevel versions are downward and upward compatible.
22 */
23#define MD_MAJOR_VERSION 0
24#define MD_MINOR_VERSION 90
25/*
26 * MD_PATCHLEVEL_VERSION indicates kernel functionality.
27 * >=1 means different superblock formats are selectable using SET_ARRAY_INFO
28 * and major_version/minor_version accordingly
29 * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT
30 * in the super status byte
31 * >=3 means that bitmap superblock version 4 is supported, which uses
32 * little-ending representation rather than host-endian
33 */
34#define MD_PATCHLEVEL_VERSION 3
35
18/* ioctls */ 36/* ioctls */
19 37
20/* status */ 38/* status */
@@ -46,6 +64,12 @@
46#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) 64#define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33)
47#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) 65#define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34)
48 66
67/* 63 partitions with the alternate major number (mdp) */
68#define MdpMinorShift 6
69#ifdef __KERNEL__
70extern int mdp_major;
71#endif
72
49typedef struct mdu_version_s { 73typedef struct mdu_version_s {
50 int major; 74 int major;
51 int minor; 75 int minor;
@@ -85,6 +109,17 @@ typedef struct mdu_array_info_s {
85 109
86} mdu_array_info_t; 110} mdu_array_info_t;
87 111
112/* non-obvious values for 'level' */
113#define LEVEL_MULTIPATH (-4)
114#define LEVEL_LINEAR (-1)
115#define LEVEL_FAULTY (-5)
116
117/* we need a value for 'no level specified' and 0
118 * means 'raid0', so we need something else. This is
119 * for internal use only
120 */
121#define LEVEL_NONE (-1000000)
122
88typedef struct mdu_disk_info_s { 123typedef struct mdu_disk_info_s {
89 /* 124 /*
90 * configuration/status of one particular disk 125 * configuration/status of one particular disk
diff --git a/include/linux/raid/multipath.h b/include/linux/raid/multipath.h
deleted file mode 100644
index 6f53fc177a47..000000000000
--- a/include/linux/raid/multipath.h
+++ /dev/null
@@ -1,42 +0,0 @@
1#ifndef _MULTIPATH_H
2#define _MULTIPATH_H
3
4#include <linux/raid/md.h>
5
6struct multipath_info {
7 mdk_rdev_t *rdev;
8};
9
10struct multipath_private_data {
11 mddev_t *mddev;
12 struct multipath_info *multipaths;
13 int raid_disks;
14 int working_disks;
15 spinlock_t device_lock;
16 struct list_head retry_list;
17
18 mempool_t *pool;
19};
20
21typedef struct multipath_private_data multipath_conf_t;
22
23/*
24 * this is the only point in the RAID code where we violate
25 * C type safety. mddev->private is an 'opaque' pointer.
26 */
27#define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private)
28
29/*
30 * this is our 'private' 'collective' MULTIPATH buffer head.
31 * it contains information about what kind of IO operations were started
32 * for this MULTIPATH operation, and about their status:
33 */
34
35struct multipath_bh {
36 mddev_t *mddev;
37 struct bio *master_bio;
38 struct bio bio;
39 int path;
40 struct list_head retry_list;
41};
42#endif
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
new file mode 100644
index 000000000000..d92480f8285c
--- /dev/null
+++ b/include/linux/raid/pq.h
@@ -0,0 +1,132 @@
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2003 H. Peter Anvin - All Rights Reserved
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
8 * Boston MA 02111-1307, USA; either version 2 of the License, or
9 * (at your option) any later version; incorporated herein by reference.
10 *
11 * ----------------------------------------------------------------------- */
12
13#ifndef LINUX_RAID_RAID6_H
14#define LINUX_RAID_RAID6_H
15
16#ifdef __KERNEL__
17
18/* Set to 1 to use kernel-wide empty_zero_page */
19#define RAID6_USE_EMPTY_ZERO_PAGE 0
20#include <linux/blkdev.h>
21
22/* We need a pre-zeroed page... if we don't want to use the kernel-provided
23 one define it here */
24#if RAID6_USE_EMPTY_ZERO_PAGE
25# define raid6_empty_zero_page empty_zero_page
26#else
27extern const char raid6_empty_zero_page[PAGE_SIZE];
28#endif
29
30#else /* ! __KERNEL__ */
31/* Used for testing in user space */
32
33#include <errno.h>
34#include <inttypes.h>
35#include <limits.h>
36#include <stddef.h>
37#include <sys/mman.h>
38#include <sys/types.h>
39
40/* Not standard, but glibc defines it */
41#define BITS_PER_LONG __WORDSIZE
42
43typedef uint8_t u8;
44typedef uint16_t u16;
45typedef uint32_t u32;
46typedef uint64_t u64;
47
48#ifndef PAGE_SIZE
49# define PAGE_SIZE 4096
50#endif
51extern const char raid6_empty_zero_page[PAGE_SIZE];
52
53#define __init
54#define __exit
55#define __attribute_const__ __attribute__((const))
56#define noinline __attribute__((noinline))
57
58#define preempt_enable()
59#define preempt_disable()
60#define cpu_has_feature(x) 1
61#define enable_kernel_altivec()
62#define disable_kernel_altivec()
63
64#define EXPORT_SYMBOL(sym)
65#define MODULE_LICENSE(licence)
66#define subsys_initcall(x)
67#define module_exit(x)
68#endif /* __KERNEL__ */
69
70/* Routine choices */
71struct raid6_calls {
72 void (*gen_syndrome)(int, size_t, void **);
73 int (*valid)(void); /* Returns 1 if this routine set is usable */
74 const char *name; /* Name of this routine set */
75 int prefer; /* Has special performance attribute */
76};
77
78/* Selected algorithm */
79extern struct raid6_calls raid6_call;
80
81/* Algorithm list */
82extern const struct raid6_calls * const raid6_algos[];
83int raid6_select_algo(void);
84
85/* Return values from chk_syndrome */
86#define RAID6_OK 0
87#define RAID6_P_BAD 1
88#define RAID6_Q_BAD 2
89#define RAID6_PQ_BAD 3
90
91/* Galois field tables */
92extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256)));
93extern const u8 raid6_gfexp[256] __attribute__((aligned(256)));
94extern const u8 raid6_gfinv[256] __attribute__((aligned(256)));
95extern const u8 raid6_gfexi[256] __attribute__((aligned(256)));
96
97/* Recovery routines */
98void raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
99 void **ptrs);
100void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs);
101void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
102 void **ptrs);
103
104/* Some definitions to allow code to be compiled for testing in userspace */
105#ifndef __KERNEL__
106
107# define jiffies raid6_jiffies()
108# define printk printf
109# define GFP_KERNEL 0
110# define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
111 PROT_READ|PROT_WRITE, \
112 MAP_PRIVATE|MAP_ANONYMOUS,\
113 0, 0))
114# define free_pages(x, y) munmap((void *)(x), (y)*PAGE_SIZE)
115
116static inline void cpu_relax(void)
117{
118 /* Nothing */
119}
120
121#undef HZ
122#define HZ 1000
123static inline uint32_t raid6_jiffies(void)
124{
125 struct timeval tv;
126 gettimeofday(&tv, NULL);
127 return tv.tv_sec*1000 + tv.tv_usec/1000;
128}
129
130#endif /* ! __KERNEL__ */
131
132#endif /* LINUX_RAID_RAID6_H */
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h
deleted file mode 100644
index fd42aa87c391..000000000000
--- a/include/linux/raid/raid0.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _RAID0_H
2#define _RAID0_H
3
4#include <linux/raid/md.h>
5
6struct strip_zone
7{
8 sector_t zone_start; /* Zone offset in md_dev (in sectors) */
9 sector_t dev_start; /* Zone offset in real dev (in sectors) */
10 sector_t sectors; /* Zone size in sectors */
11 int nb_dev; /* # of devices attached to the zone */
12 mdk_rdev_t **dev; /* Devices attached to the zone */
13};
14
15struct raid0_private_data
16{
17 struct strip_zone **hash_table; /* Table of indexes into strip_zone */
18 struct strip_zone *strip_zone;
19 mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */
20 int nr_strip_zones;
21
22 sector_t spacing;
23 int sector_shift; /* shift this before divide by spacing */
24};
25
26typedef struct raid0_private_data raid0_conf_t;
27
28#define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private)
29
30#endif
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h
deleted file mode 100644
index 0a9ba7c3302e..000000000000
--- a/include/linux/raid/raid1.h
+++ /dev/null
@@ -1,134 +0,0 @@
1#ifndef _RAID1_H
2#define _RAID1_H
3
4#include <linux/raid/md.h>
5
6typedef struct mirror_info mirror_info_t;
7
8struct mirror_info {
9 mdk_rdev_t *rdev;
10 sector_t head_position;
11};
12
13/*
14 * memory pools need a pointer to the mddev, so they can force an unplug
15 * when memory is tight, and a count of the number of drives that the
16 * pool was allocated for, so they know how much to allocate and free.
17 * mddev->raid_disks cannot be used, as it can change while a pool is active
18 * These two datums are stored in a kmalloced struct.
19 */
20
21struct pool_info {
22 mddev_t *mddev;
23 int raid_disks;
24};
25
26
27typedef struct r1bio_s r1bio_t;
28
29struct r1_private_data_s {
30 mddev_t *mddev;
31 mirror_info_t *mirrors;
32 int raid_disks;
33 int last_used;
34 sector_t next_seq_sect;
35 spinlock_t device_lock;
36
37 struct list_head retry_list;
38 /* queue pending writes and submit them on unplug */
39 struct bio_list pending_bio_list;
40 /* queue of writes that have been unplugged */
41 struct bio_list flushing_bio_list;
42
43 /* for use when syncing mirrors: */
44
45 spinlock_t resync_lock;
46 int nr_pending;
47 int nr_waiting;
48 int nr_queued;
49 int barrier;
50 sector_t next_resync;
51 int fullsync; /* set to 1 if a full sync is needed,
52 * (fresh device added).
53 * Cleared when a sync completes.
54 */
55
56 wait_queue_head_t wait_barrier;
57
58 struct pool_info *poolinfo;
59
60 struct page *tmppage;
61
62 mempool_t *r1bio_pool;
63 mempool_t *r1buf_pool;
64};
65
66typedef struct r1_private_data_s conf_t;
67
68/*
69 * this is the only point in the RAID code where we violate
70 * C type safety. mddev->private is an 'opaque' pointer.
71 */
72#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
73
74/*
75 * this is our 'private' RAID1 bio.
76 *
77 * it contains information about what kind of IO operations were started
78 * for this RAID1 operation, and about their status:
79 */
80
81struct r1bio_s {
82 atomic_t remaining; /* 'have we finished' count,
83 * used from IRQ handlers
84 */
85 atomic_t behind_remaining; /* number of write-behind ios remaining
86 * in this BehindIO request
87 */
88 sector_t sector;
89 int sectors;
90 unsigned long state;
91 mddev_t *mddev;
92 /*
93 * original bio going to /dev/mdx
94 */
95 struct bio *master_bio;
96 /*
97 * if the IO is in READ direction, then this is where we read
98 */
99 int read_disk;
100
101 struct list_head retry_list;
102 struct bitmap_update *bitmap_update;
103 /*
104 * if the IO is in WRITE direction, then multiple bios are used.
105 * We choose the number when they are allocated.
106 */
107 struct bio *bios[0];
108 /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/
109};
110
111/* when we get a read error on a read-only array, we redirect to another
112 * device without failing the first device, or trying to over-write to
113 * correct the read error. To keep track of bad blocks on a per-bio
114 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
115 */
116#define IO_BLOCKED ((struct bio*)1)
117
118/* bits for r1bio.state */
119#define R1BIO_Uptodate 0
120#define R1BIO_IsSync 1
121#define R1BIO_Degraded 2
122#define R1BIO_BehindIO 3
123#define R1BIO_Barrier 4
124#define R1BIO_BarrierRetry 5
125/* For write-behind requests, we call bi_end_io when
126 * the last non-write-behind device completes, providing
127 * any write was successful. Otherwise we call when
128 * any write-behind write succeeds, otherwise we call
129 * with failure when last write completes (and all failed).
130 * Record that bi_end_io was called with this flag...
131 */
132#define R1BIO_Returned 6
133
134#endif
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h
deleted file mode 100644
index e9091cfeb286..000000000000
--- a/include/linux/raid/raid10.h
+++ /dev/null
@@ -1,123 +0,0 @@
1#ifndef _RAID10_H
2#define _RAID10_H
3
4#include <linux/raid/md.h>
5
6typedef struct mirror_info mirror_info_t;
7
8struct mirror_info {
9 mdk_rdev_t *rdev;
10 sector_t head_position;
11};
12
13typedef struct r10bio_s r10bio_t;
14
15struct r10_private_data_s {
16 mddev_t *mddev;
17 mirror_info_t *mirrors;
18 int raid_disks;
19 spinlock_t device_lock;
20
21 /* geometry */
22 int near_copies; /* number of copies layed out raid0 style */
23 int far_copies; /* number of copies layed out
24 * at large strides across drives
25 */
26 int far_offset; /* far_copies are offset by 1 stripe
27 * instead of many
28 */
29 int copies; /* near_copies * far_copies.
30 * must be <= raid_disks
31 */
32 sector_t stride; /* distance between far copies.
33 * This is size / far_copies unless
34 * far_offset, in which case it is
35 * 1 stripe.
36 */
37
38 int chunk_shift; /* shift from chunks to sectors */
39 sector_t chunk_mask;
40
41 struct list_head retry_list;
42 /* queue pending writes and submit them on unplug */
43 struct bio_list pending_bio_list;
44
45
46 spinlock_t resync_lock;
47 int nr_pending;
48 int nr_waiting;
49 int nr_queued;
50 int barrier;
51 sector_t next_resync;
52 int fullsync; /* set to 1 if a full sync is needed,
53 * (fresh device added).
54 * Cleared when a sync completes.
55 */
56
57 wait_queue_head_t wait_barrier;
58
59 mempool_t *r10bio_pool;
60 mempool_t *r10buf_pool;
61 struct page *tmppage;
62};
63
64typedef struct r10_private_data_s conf_t;
65
66/*
67 * this is the only point in the RAID code where we violate
68 * C type safety. mddev->private is an 'opaque' pointer.
69 */
70#define mddev_to_conf(mddev) ((conf_t *) mddev->private)
71
72/*
73 * this is our 'private' RAID10 bio.
74 *
75 * it contains information about what kind of IO operations were started
76 * for this RAID10 operation, and about their status:
77 */
78
79struct r10bio_s {
80 atomic_t remaining; /* 'have we finished' count,
81 * used from IRQ handlers
82 */
83 sector_t sector; /* virtual sector number */
84 int sectors;
85 unsigned long state;
86 mddev_t *mddev;
87 /*
88 * original bio going to /dev/mdx
89 */
90 struct bio *master_bio;
91 /*
92 * if the IO is in READ direction, then this is where we read
93 */
94 int read_slot;
95
96 struct list_head retry_list;
97 /*
98 * if the IO is in WRITE direction, then multiple bios are used,
99 * one for each copy.
100 * When resyncing we also use one for each copy.
101 * When reconstructing, we use 2 bios, one for read, one for write.
102 * We choose the number when they are allocated.
103 */
104 struct {
105 struct bio *bio;
106 sector_t addr;
107 int devnum;
108 } devs[0];
109};
110
111/* when we get a read error on a read-only array, we redirect to another
112 * device without failing the first device, or trying to over-write to
113 * correct the read error. To keep track of bad blocks on a per-bio
114 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
115 */
116#define IO_BLOCKED ((struct bio*)1)
117
118/* bits for r10bio.state */
119#define R10BIO_Uptodate 0
120#define R10BIO_IsSync 1
121#define R10BIO_IsRecover 2
122#define R10BIO_Degraded 3
123#endif
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h
deleted file mode 100644
index 3b2672792457..000000000000
--- a/include/linux/raid/raid5.h
+++ /dev/null
@@ -1,402 +0,0 @@
1#ifndef _RAID5_H
2#define _RAID5_H
3
4#include <linux/raid/md.h>
5#include <linux/raid/xor.h>
6
7/*
8 *
9 * Each stripe contains one buffer per disc. Each buffer can be in
10 * one of a number of states stored in "flags". Changes between
11 * these states happen *almost* exclusively under a per-stripe
12 * spinlock. Some very specific changes can happen in bi_end_io, and
13 * these are not protected by the spin lock.
14 *
15 * The flag bits that are used to represent these states are:
16 * R5_UPTODATE and R5_LOCKED
17 *
18 * State Empty == !UPTODATE, !LOCK
19 * We have no data, and there is no active request
20 * State Want == !UPTODATE, LOCK
21 * A read request is being submitted for this block
22 * State Dirty == UPTODATE, LOCK
23 * Some new data is in this buffer, and it is being written out
24 * State Clean == UPTODATE, !LOCK
25 * We have valid data which is the same as on disc
26 *
27 * The possible state transitions are:
28 *
29 * Empty -> Want - on read or write to get old data for parity calc
30 * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
31 * Empty -> Clean - on compute_block when computing a block for failed drive
32 * Want -> Empty - on failed read
33 * Want -> Clean - on successful completion of read request
34 * Dirty -> Clean - on successful completion of write request
35 * Dirty -> Clean - on failed write
36 * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW)
37 *
38 * The Want->Empty, Want->Clean, Dirty->Clean, transitions
39 * all happen in b_end_io at interrupt time.
40 * Each sets the Uptodate bit before releasing the Lock bit.
41 * This leaves one multi-stage transition:
42 * Want->Dirty->Clean
43 * This is safe because thinking that a Clean buffer is actually dirty
44 * will at worst delay some action, and the stripe will be scheduled
45 * for attention after the transition is complete.
46 *
47 * There is one possibility that is not covered by these states. That
48 * is if one drive has failed and there is a spare being rebuilt. We
49 * can't distinguish between a clean block that has been generated
50 * from parity calculations, and a clean block that has been
51 * successfully written to the spare ( or to parity when resyncing).
52 * To distingush these states we have a stripe bit STRIPE_INSYNC that
53 * is set whenever a write is scheduled to the spare, or to the parity
54 * disc if there is no spare. A sync request clears this bit, and
55 * when we find it set with no buffers locked, we know the sync is
56 * complete.
57 *
58 * Buffers for the md device that arrive via make_request are attached
59 * to the appropriate stripe in one of two lists linked on b_reqnext.
60 * One list (bh_read) for read requests, one (bh_write) for write.
61 * There should never be more than one buffer on the two lists
62 * together, but we are not guaranteed of that so we allow for more.
63 *
64 * If a buffer is on the read list when the associated cache buffer is
65 * Uptodate, the data is copied into the read buffer and it's b_end_io
66 * routine is called. This may happen in the end_request routine only
67 * if the buffer has just successfully been read. end_request should
68 * remove the buffers from the list and then set the Uptodate bit on
69 * the buffer. Other threads may do this only if they first check
70 * that the Uptodate bit is set. Once they have checked that they may
71 * take buffers off the read queue.
72 *
73 * When a buffer on the write list is committed for write it is copied
74 * into the cache buffer, which is then marked dirty, and moved onto a
75 * third list, the written list (bh_written). Once both the parity
76 * block and the cached buffer are successfully written, any buffer on
77 * a written list can be returned with b_end_io.
78 *
79 * The write list and read list both act as fifos. The read list is
80 * protected by the device_lock. The write and written lists are
81 * protected by the stripe lock. The device_lock, which can be
82 * claimed while the stipe lock is held, is only for list
83 * manipulations and will only be held for a very short time. It can
84 * be claimed from interrupts.
85 *
86 *
87 * Stripes in the stripe cache can be on one of two lists (or on
88 * neither). The "inactive_list" contains stripes which are not
89 * currently being used for any request. They can freely be reused
90 * for another stripe. The "handle_list" contains stripes that need
91 * to be handled in some way. Both of these are fifo queues. Each
92 * stripe is also (potentially) linked to a hash bucket in the hash
93 * table so that it can be found by sector number. Stripes that are
94 * not hashed must be on the inactive_list, and will normally be at
95 * the front. All stripes start life this way.
96 *
97 * The inactive_list, handle_list and hash bucket lists are all protected by the
98 * device_lock.
99 * - stripes on the inactive_list never have their stripe_lock held.
100 * - stripes have a reference counter. If count==0, they are on a list.
101 * - If a stripe might need handling, STRIPE_HANDLE is set.
102 * - When refcount reaches zero, then if STRIPE_HANDLE it is put on
103 * handle_list else inactive_list
104 *
105 * This, combined with the fact that STRIPE_HANDLE is only ever
106 * cleared while a stripe has a non-zero count means that if the
107 * refcount is 0 and STRIPE_HANDLE is set, then it is on the
108 * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then
109 * the stripe is on inactive_list.
110 *
111 * The possible transitions are:
112 * activate an unhashed/inactive stripe (get_active_stripe())
113 * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev
114 * activate a hashed, possibly active stripe (get_active_stripe())
115 * lockdev check-hash if(!cnt++)unlink-stripe unlockdev
116 * attach a request to an active stripe (add_stripe_bh())
117 * lockdev attach-buffer unlockdev
118 * handle a stripe (handle_stripe())
119 * lockstripe clrSTRIPE_HANDLE ...
120 * (lockdev check-buffers unlockdev) ..
121 * change-state ..
122 * record io/ops needed unlockstripe schedule io/ops
123 * release an active stripe (release_stripe())
124 * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev
125 *
126 * The refcount counts each thread that have activated the stripe,
127 * plus raid5d if it is handling it, plus one for each active request
128 * on a cached buffer, and plus one if the stripe is undergoing stripe
129 * operations.
130 *
131 * Stripe operations are performed outside the stripe lock,
132 * the stripe operations are:
133 * -copying data between the stripe cache and user application buffers
134 * -computing blocks to save a disk access, or to recover a missing block
135 * -updating the parity on a write operation (reconstruct write and
136 * read-modify-write)
137 * -checking parity correctness
138 * -running i/o to disk
139 * These operations are carried out by raid5_run_ops which uses the async_tx
140 * api to (optionally) offload operations to dedicated hardware engines.
141 * When requesting an operation handle_stripe sets the pending bit for the
142 * operation and increments the count. raid5_run_ops is then run whenever
143 * the count is non-zero.
144 * There are some critical dependencies between the operations that prevent some
145 * from being requested while another is in flight.
146 * 1/ Parity check operations destroy the in cache version of the parity block,
147 * so we prevent parity dependent operations like writes and compute_blocks
148 * from starting while a check is in progress. Some dma engines can perform
149 * the check without damaging the parity block, in these cases the parity
150 * block is re-marked up to date (assuming the check was successful) and is
151 * not re-read from disk.
152 * 2/ When a write operation is requested we immediately lock the affected
153 * blocks, and mark them as not up to date. This causes new read requests
154 * to be held off, as well as parity checks and compute block operations.
155 * 3/ Once a compute block operation has been requested handle_stripe treats
156 * that block as if it is up to date. raid5_run_ops guaruntees that any
157 * operation that is dependent on the compute block result is initiated after
158 * the compute block completes.
159 */
160
161/*
162 * Operations state - intermediate states that are visible outside of sh->lock
163 * In general _idle indicates nothing is running, _run indicates a data
164 * processing operation is active, and _result means the data processing result
165 * is stable and can be acted upon. For simple operations like biofill and
166 * compute that only have an _idle and _run state they are indicated with
167 * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN)
168 */
169/**
170 * enum check_states - handles syncing / repairing a stripe
171 * @check_state_idle - check operations are quiesced
172 * @check_state_run - check operation is running
173 * @check_state_result - set outside lock when check result is valid
174 * @check_state_compute_run - check failed and we are repairing
175 * @check_state_compute_result - set outside lock when compute result is valid
176 */
177enum check_states {
178 check_state_idle = 0,
179 check_state_run, /* parity check */
180 check_state_check_result,
181 check_state_compute_run, /* parity repair */
182 check_state_compute_result,
183};
184
185/**
186 * enum reconstruct_states - handles writing or expanding a stripe
187 */
188enum reconstruct_states {
189 reconstruct_state_idle = 0,
190 reconstruct_state_prexor_drain_run, /* prexor-write */
191 reconstruct_state_drain_run, /* write */
192 reconstruct_state_run, /* expand */
193 reconstruct_state_prexor_drain_result,
194 reconstruct_state_drain_result,
195 reconstruct_state_result,
196};
197
198struct stripe_head {
199 struct hlist_node hash;
200 struct list_head lru; /* inactive_list or handle_list */
201 struct raid5_private_data *raid_conf;
202 sector_t sector; /* sector of this row */
203 int pd_idx; /* parity disk index */
204 unsigned long state; /* state flags */
205 atomic_t count; /* nr of active thread/requests */
206 spinlock_t lock;
207 int bm_seq; /* sequence number for bitmap flushes */
208 int disks; /* disks in stripe */
209 enum check_states check_state;
210 enum reconstruct_states reconstruct_state;
211 /* stripe_operations
212 * @target - STRIPE_OP_COMPUTE_BLK target
213 */
214 struct stripe_operations {
215 int target;
216 u32 zero_sum_result;
217 } ops;
218 struct r5dev {
219 struct bio req;
220 struct bio_vec vec;
221 struct page *page;
222 struct bio *toread, *read, *towrite, *written;
223 sector_t sector; /* sector of this page */
224 unsigned long flags;
225 } dev[1]; /* allocated with extra space depending of RAID geometry */
226};
227
228/* stripe_head_state - collects and tracks the dynamic state of a stripe_head
229 * for handle_stripe. It is only valid under spin_lock(sh->lock);
230 */
231struct stripe_head_state {
232 int syncing, expanding, expanded;
233 int locked, uptodate, to_read, to_write, failed, written;
234 int to_fill, compute, req_compute, non_overwrite;
235 int failed_num;
236 unsigned long ops_request;
237};
238
239/* r6_state - extra state data only relevant to r6 */
240struct r6_state {
241 int p_failed, q_failed, qd_idx, failed_num[2];
242};
243
244/* Flags */
245#define R5_UPTODATE 0 /* page contains current data */
246#define R5_LOCKED 1 /* IO has been submitted on "req" */
247#define R5_OVERWRITE 2 /* towrite covers whole page */
248/* and some that are internal to handle_stripe */
249#define R5_Insync 3 /* rdev && rdev->in_sync at start */
250#define R5_Wantread 4 /* want to schedule a read */
251#define R5_Wantwrite 5
252#define R5_Overlap 7 /* There is a pending overlapping request on this block */
253#define R5_ReadError 8 /* seen a read error here recently */
254#define R5_ReWrite 9 /* have tried to over-write the readerror */
255
256#define R5_Expanded 10 /* This block now has post-expand data */
257#define R5_Wantcompute 11 /* compute_block in progress treat as
258 * uptodate
259 */
260#define R5_Wantfill 12 /* dev->toread contains a bio that needs
261 * filling
262 */
263#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
264/*
265 * Write method
266 */
267#define RECONSTRUCT_WRITE 1
268#define READ_MODIFY_WRITE 2
269/* not a write method, but a compute_parity mode */
270#define CHECK_PARITY 3
271
272/*
273 * Stripe state
274 */
275#define STRIPE_HANDLE 2
276#define STRIPE_SYNCING 3
277#define STRIPE_INSYNC 4
278#define STRIPE_PREREAD_ACTIVE 5
279#define STRIPE_DELAYED 6
280#define STRIPE_DEGRADED 7
281#define STRIPE_BIT_DELAY 8
282#define STRIPE_EXPANDING 9
283#define STRIPE_EXPAND_SOURCE 10
284#define STRIPE_EXPAND_READY 11
285#define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */
286#define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */
287#define STRIPE_BIOFILL_RUN 14
288#define STRIPE_COMPUTE_RUN 15
289/*
290 * Operation request flags
291 */
292#define STRIPE_OP_BIOFILL 0
293#define STRIPE_OP_COMPUTE_BLK 1
294#define STRIPE_OP_PREXOR 2
295#define STRIPE_OP_BIODRAIN 3
296#define STRIPE_OP_POSTXOR 4
297#define STRIPE_OP_CHECK 5
298
299/*
300 * Plugging:
301 *
302 * To improve write throughput, we need to delay the handling of some
303 * stripes until there has been a chance that several write requests
304 * for the one stripe have all been collected.
305 * In particular, any write request that would require pre-reading
306 * is put on a "delayed" queue until there are no stripes currently
307 * in a pre-read phase. Further, if the "delayed" queue is empty when
308 * a stripe is put on it then we "plug" the queue and do not process it
309 * until an unplug call is made. (the unplug_io_fn() is called).
310 *
311 * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add
312 * it to the count of prereading stripes.
313 * When write is initiated, or the stripe refcnt == 0 (just in case) we
314 * clear the PREREAD_ACTIVE flag and decrement the count
315 * Whenever the 'handle' queue is empty and the device is not plugged, we
316 * move any strips from delayed to handle and clear the DELAYED flag and set
317 * PREREAD_ACTIVE.
318 * In stripe_handle, if we find pre-reading is necessary, we do it if
319 * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue.
320 * HANDLE gets cleared if stripe_handle leave nothing locked.
321 */
322
323
324struct disk_info {
325 mdk_rdev_t *rdev;
326};
327
328struct raid5_private_data {
329 struct hlist_head *stripe_hashtbl;
330 mddev_t *mddev;
331 struct disk_info *spare;
332 int chunk_size, level, algorithm;
333 int max_degraded;
334 int raid_disks;
335 int max_nr_stripes;
336
337 /* used during an expand */
338 sector_t expand_progress; /* MaxSector when no expand happening */
339 sector_t expand_lo; /* from here up to expand_progress it out-of-bounds
340 * as we haven't flushed the metadata yet
341 */
342 int previous_raid_disks;
343
344 struct list_head handle_list; /* stripes needing handling */
345 struct list_head hold_list; /* preread ready stripes */
346 struct list_head delayed_list; /* stripes that have plugged requests */
347 struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */
348 struct bio *retry_read_aligned; /* currently retrying aligned bios */
349 struct bio *retry_read_aligned_list; /* aligned bios retry list */
350 atomic_t preread_active_stripes; /* stripes with scheduled io */
351 atomic_t active_aligned_reads;
352 atomic_t pending_full_writes; /* full write backlog */
353 int bypass_count; /* bypassed prereads */
354 int bypass_threshold; /* preread nice */
355 struct list_head *last_hold; /* detect hold_list promotions */
356
357 atomic_t reshape_stripes; /* stripes with pending writes for reshape */
358 /* unfortunately we need two cache names as we temporarily have
359 * two caches.
360 */
361 int active_name;
362 char cache_name[2][20];
363 struct kmem_cache *slab_cache; /* for allocating stripes */
364
365 int seq_flush, seq_write;
366 int quiesce;
367
368 int fullsync; /* set to 1 if a full sync is needed,
369 * (fresh device added).
370 * Cleared when a sync completes.
371 */
372
373 struct page *spare_page; /* Used when checking P/Q in raid6 */
374
375 /*
376 * Free stripes pool
377 */
378 atomic_t active_stripes;
379 struct list_head inactive_list;
380 wait_queue_head_t wait_for_stripe;
381 wait_queue_head_t wait_for_overlap;
382 int inactive_blocked; /* release of inactive stripes blocked,
383 * waiting for 25% to be free
384 */
385 int pool_size; /* number of disks in stripeheads in pool */
386 spinlock_t device_lock;
387 struct disk_info *disks;
388};
389
390typedef struct raid5_private_data raid5_conf_t;
391
392#define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private)
393
394/*
395 * Our supported algorithms
396 */
397#define ALGORITHM_LEFT_ASYMMETRIC 0
398#define ALGORITHM_RIGHT_ASYMMETRIC 1
399#define ALGORITHM_LEFT_SYMMETRIC 2
400#define ALGORITHM_RIGHT_SYMMETRIC 3
401
402#endif
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h
index 3e120587eada..5a210959e3f8 100644
--- a/include/linux/raid/xor.h
+++ b/include/linux/raid/xor.h
@@ -1,8 +1,6 @@
1#ifndef _XOR_H 1#ifndef _XOR_H
2#define _XOR_H 2#define _XOR_H
3 3
4#include <linux/raid/md.h>
5
6#define MAX_XOR_BLOCKS 4 4#define MAX_XOR_BLOCKS 4
7 5
8extern void xor_blocks(unsigned int count, unsigned int bytes, 6extern void xor_blocks(unsigned int count, unsigned int bytes,
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index 80044a4f3ab9..bfd92e1e5d2c 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -36,7 +36,6 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h>
40#include <linux/cpumask.h> 39#include <linux/cpumask.h>
41#include <linux/seqlock.h> 40#include <linux/seqlock.h>
42 41
@@ -108,25 +107,14 @@ struct rcu_data {
108 struct rcu_head barrier; 107 struct rcu_head barrier;
109}; 108};
110 109
111DECLARE_PER_CPU(struct rcu_data, rcu_data);
112DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
113
114/* 110/*
115 * Increment the quiescent state counter. 111 * Increment the quiescent state counter.
116 * The counter is a bit degenerated: We do not need to know 112 * The counter is a bit degenerated: We do not need to know
117 * how many quiescent states passed, just if there was at least 113 * how many quiescent states passed, just if there was at least
118 * one since the start of the grace period. Thus just a flag. 114 * one since the start of the grace period. Thus just a flag.
119 */ 115 */
120static inline void rcu_qsctr_inc(int cpu) 116extern void rcu_qsctr_inc(int cpu);
121{ 117extern void rcu_bh_qsctr_inc(int cpu);
122 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
123 rdp->passed_quiesc = 1;
124}
125static inline void rcu_bh_qsctr_inc(int cpu)
126{
127 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
128 rdp->passed_quiesc = 1;
129}
130 118
131extern int rcu_pending(int cpu); 119extern int rcu_pending(int cpu);
132extern int rcu_needs_cpu(int cpu); 120extern int rcu_needs_cpu(int cpu);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 528343e6da51..15fbb3ca634d 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -36,7 +36,6 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h>
40#include <linux/cpumask.h> 39#include <linux/cpumask.h>
41#include <linux/seqlock.h> 40#include <linux/seqlock.h>
42#include <linux/lockdep.h> 41#include <linux/lockdep.h>
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 74304b4538d8..fce522782ffa 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -36,34 +36,19 @@
36#include <linux/cache.h> 36#include <linux/cache.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/threads.h> 38#include <linux/threads.h>
39#include <linux/percpu.h> 39#include <linux/smp.h>
40#include <linux/cpumask.h> 40#include <linux/cpumask.h>
41#include <linux/seqlock.h> 41#include <linux/seqlock.h>
42 42
43struct rcu_dyntick_sched { 43extern void rcu_qsctr_inc(int cpu);
44 int dynticks; 44static inline void rcu_bh_qsctr_inc(int cpu) { }
45 int dynticks_snap;
46 int sched_qs;
47 int sched_qs_snap;
48 int sched_dynticks_snap;
49};
50
51DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched);
52
53static inline void rcu_qsctr_inc(int cpu)
54{
55 struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu);
56
57 rdssp->sched_qs++;
58}
59#define rcu_bh_qsctr_inc(cpu)
60 45
61/* 46/*
62 * Someone might want to pass call_rcu_bh as a function pointer. 47 * Someone might want to pass call_rcu_bh as a function pointer.
63 * So this needs to just be a rename and not a macro function. 48 * So this needs to just be a rename and not a macro function.
64 * (no parentheses) 49 * (no parentheses)
65 */ 50 */
66#define call_rcu_bh call_rcu 51#define call_rcu_bh call_rcu
67 52
68/** 53/**
69 * call_rcu_sched - Queue RCU callback for invocation after sched grace period. 54 * call_rcu_sched - Queue RCU callback for invocation after sched grace period.
@@ -117,30 +102,12 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu);
117struct softirq_action; 102struct softirq_action;
118 103
119#ifdef CONFIG_NO_HZ 104#ifdef CONFIG_NO_HZ
120 105extern void rcu_enter_nohz(void);
121static inline void rcu_enter_nohz(void) 106extern void rcu_exit_nohz(void);
122{ 107#else
123 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); 108# define rcu_enter_nohz() do { } while (0)
124 109# define rcu_exit_nohz() do { } while (0)
125 smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ 110#endif
126 __get_cpu_var(rcu_dyntick_sched).dynticks++;
127 WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs);
128}
129
130static inline void rcu_exit_nohz(void)
131{
132 static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1);
133
134 __get_cpu_var(rcu_dyntick_sched).dynticks++;
135 smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */
136 WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1),
137 &rs);
138}
139
140#else /* CONFIG_NO_HZ */
141#define rcu_enter_nohz() do { } while (0)
142#define rcu_exit_nohz() do { } while (0)
143#endif /* CONFIG_NO_HZ */
144 111
145/* 112/*
146 * A context switch is a grace period for rcupreempt synchronize_rcu() 113 * A context switch is a grace period for rcupreempt synchronize_rcu()
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index a722fb67bb2d..0cdda00f2b2a 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -33,7 +33,6 @@
33#include <linux/cache.h> 33#include <linux/cache.h>
34#include <linux/spinlock.h> 34#include <linux/spinlock.h>
35#include <linux/threads.h> 35#include <linux/threads.h>
36#include <linux/percpu.h>
37#include <linux/cpumask.h> 36#include <linux/cpumask.h>
38#include <linux/seqlock.h> 37#include <linux/seqlock.h>
39 38
@@ -236,30 +235,8 @@ struct rcu_state {
236#endif /* #ifdef CONFIG_NO_HZ */ 235#endif /* #ifdef CONFIG_NO_HZ */
237}; 236};
238 237
239extern struct rcu_state rcu_state; 238extern void rcu_qsctr_inc(int cpu);
240DECLARE_PER_CPU(struct rcu_data, rcu_data); 239extern void rcu_bh_qsctr_inc(int cpu);
241
242extern struct rcu_state rcu_bh_state;
243DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
244
245/*
246 * Increment the quiescent state counter.
247 * The counter is a bit degenerated: We do not need to know
248 * how many quiescent states passed, just if there was at least
249 * one since the start of the grace period. Thus just a flag.
250 */
251static inline void rcu_qsctr_inc(int cpu)
252{
253 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
254 rdp->passed_quiesc = 1;
255 rdp->passed_quiesc_completed = rdp->completed;
256}
257static inline void rcu_bh_qsctr_inc(int cpu)
258{
259 struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
260 rdp->passed_quiesc = 1;
261 rdp->passed_quiesc_completed = rdp->completed;
262}
263 240
264extern int rcu_pending(int cpu); 241extern int rcu_pending(int cpu);
265extern int rcu_needs_cpu(int cpu); 242extern int rcu_needs_cpu(int cpu);
diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h
index e84b0a9feda5..a6d014005d49 100644
--- a/include/linux/regulator/bq24022.h
+++ b/include/linux/regulator/bq24022.h
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13struct regulator_init_data;
14
13/** 15/**
14 * bq24022_mach_info - platform data for bq24022 16 * bq24022_mach_info - platform data for bq24022
15 * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging 17 * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging
@@ -18,4 +20,5 @@
18struct bq24022_mach_info { 20struct bq24022_mach_info {
19 int gpio_nce; 21 int gpio_nce;
20 int gpio_iset2; 22 int gpio_iset2;
23 struct regulator_init_data *init_data;
21}; 24};
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 801bf77ff4e2..277f4b964df5 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -88,6 +88,7 @@
88 * FAIL Regulator output has failed. 88 * FAIL Regulator output has failed.
89 * OVER_TEMP Regulator over temp. 89 * OVER_TEMP Regulator over temp.
90 * FORCE_DISABLE Regulator shut down by software. 90 * FORCE_DISABLE Regulator shut down by software.
91 * VOLTAGE_CHANGE Regulator voltage changed.
91 * 92 *
92 * NOTE: These events can be OR'ed together when passed into handler. 93 * NOTE: These events can be OR'ed together when passed into handler.
93 */ 94 */
@@ -98,6 +99,7 @@
98#define REGULATOR_EVENT_FAIL 0x08 99#define REGULATOR_EVENT_FAIL 0x08
99#define REGULATOR_EVENT_OVER_TEMP 0x10 100#define REGULATOR_EVENT_OVER_TEMP 0x10
100#define REGULATOR_EVENT_FORCE_DISABLE 0x20 101#define REGULATOR_EVENT_FORCE_DISABLE 0x20
102#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
101 103
102struct regulator; 104struct regulator;
103 105
@@ -140,6 +142,8 @@ int regulator_bulk_disable(int num_consumers,
140void regulator_bulk_free(int num_consumers, 142void regulator_bulk_free(int num_consumers,
141 struct regulator_bulk_data *consumers); 143 struct regulator_bulk_data *consumers);
142 144
145int regulator_count_voltages(struct regulator *regulator);
146int regulator_list_voltage(struct regulator *regulator, unsigned selector);
143int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); 147int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV);
144int regulator_get_voltage(struct regulator *regulator); 148int regulator_get_voltage(struct regulator *regulator);
145int regulator_set_current_limit(struct regulator *regulator, 149int regulator_set_current_limit(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 2dae05705f13..4848d8dacd90 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -21,25 +21,38 @@
21struct regulator_dev; 21struct regulator_dev;
22struct regulator_init_data; 22struct regulator_init_data;
23 23
24enum regulator_status {
25 REGULATOR_STATUS_OFF,
26 REGULATOR_STATUS_ON,
27 REGULATOR_STATUS_ERROR,
28 /* fast/normal/idle/standby are flavors of "on" */
29 REGULATOR_STATUS_FAST,
30 REGULATOR_STATUS_NORMAL,
31 REGULATOR_STATUS_IDLE,
32 REGULATOR_STATUS_STANDBY,
33};
34
24/** 35/**
25 * struct regulator_ops - regulator operations. 36 * struct regulator_ops - regulator operations.
26 * 37 *
27 * This struct describes regulator operations which can be implemented by 38 * @enable: Configure the regulator as enabled.
28 * regulator chip drivers. 39 * @disable: Configure the regulator as disabled.
29 *
30 * @enable: Enable the regulator.
31 * @disable: Disable the regulator.
32 * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. 40 * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise.
33 * 41 *
34 * @set_voltage: Set the voltage for the regulator within the range specified. 42 * @set_voltage: Set the voltage for the regulator within the range specified.
35 * The driver should select the voltage closest to min_uV. 43 * The driver should select the voltage closest to min_uV.
36 * @get_voltage: Return the currently configured voltage for the regulator. 44 * @get_voltage: Return the currently configured voltage for the regulator.
45 * @list_voltage: Return one of the supported voltages, in microvolts; zero
46 * if the selector indicates a voltage that is unusable on this system;
47 * or negative errno. Selectors range from zero to one less than
48 * regulator_desc.n_voltages. Voltages may be reported in any order.
37 * 49 *
38 * @set_current_limit: Configure a limit for a current-limited regulator. 50 * @set_current_limit: Configure a limit for a current-limited regulator.
39 * @get_current_limit: Get the limit for a current-limited regulator. 51 * @get_current_limit: Get the configured limit for a current-limited regulator.
40 * 52 *
41 * @set_mode: Set the operating mode for the regulator. 53 * @get_mode: Get the configured operating mode for the regulator.
42 * @get_mode: Get the current operating mode for the regulator. 54 * @get_status: Return actual (not as-configured) status of regulator, as a
55 * REGULATOR_STATUS value (or negative errno)
43 * @get_optimum_mode: Get the most efficient operating mode for the regulator 56 * @get_optimum_mode: Get the most efficient operating mode for the regulator
44 * when running with the specified parameters. 57 * when running with the specified parameters.
45 * 58 *
@@ -51,9 +64,15 @@ struct regulator_init_data;
51 * suspended. 64 * suspended.
52 * @set_suspend_mode: Set the operating mode for the regulator when the 65 * @set_suspend_mode: Set the operating mode for the regulator when the
53 * system is suspended. 66 * system is suspended.
67 *
68 * This struct describes regulator operations which can be implemented by
69 * regulator chip drivers.
54 */ 70 */
55struct regulator_ops { 71struct regulator_ops {
56 72
73 /* enumerate supported voltages */
74 int (*list_voltage) (struct regulator_dev *, unsigned selector);
75
57 /* get/set regulator voltage */ 76 /* get/set regulator voltage */
58 int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); 77 int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV);
59 int (*get_voltage) (struct regulator_dev *); 78 int (*get_voltage) (struct regulator_dev *);
@@ -72,6 +91,13 @@ struct regulator_ops {
72 int (*set_mode) (struct regulator_dev *, unsigned int mode); 91 int (*set_mode) (struct regulator_dev *, unsigned int mode);
73 unsigned int (*get_mode) (struct regulator_dev *); 92 unsigned int (*get_mode) (struct regulator_dev *);
74 93
94 /* report regulator status ... most other accessors report
95 * control inputs, this reports results of combining inputs
96 * from Linux (and other sources) with the actual load.
97 * returns REGULATOR_STATUS_* or negative errno.
98 */
99 int (*get_status)(struct regulator_dev *);
100
75 /* get most efficient regulator operating mode for load */ 101 /* get most efficient regulator operating mode for load */
76 unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, 102 unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV,
77 int output_uV, int load_uA); 103 int output_uV, int load_uA);
@@ -106,6 +132,7 @@ enum regulator_type {
106 * 132 *
107 * @name: Identifying name for the regulator. 133 * @name: Identifying name for the regulator.
108 * @id: Numerical identifier for the regulator. 134 * @id: Numerical identifier for the regulator.
135 * @n_voltages: Number of selectors available for ops.list_voltage().
109 * @ops: Regulator operations table. 136 * @ops: Regulator operations table.
110 * @irq: Interrupt number for the regulator. 137 * @irq: Interrupt number for the regulator.
111 * @type: Indicates if the regulator is a voltage or current regulator. 138 * @type: Indicates if the regulator is a voltage or current regulator.
@@ -114,14 +141,48 @@ enum regulator_type {
114struct regulator_desc { 141struct regulator_desc {
115 const char *name; 142 const char *name;
116 int id; 143 int id;
144 unsigned n_voltages;
117 struct regulator_ops *ops; 145 struct regulator_ops *ops;
118 int irq; 146 int irq;
119 enum regulator_type type; 147 enum regulator_type type;
120 struct module *owner; 148 struct module *owner;
121}; 149};
122 150
151/*
152 * struct regulator_dev
153 *
154 * Voltage / Current regulator class device. One for each
155 * regulator.
156 *
157 * This should *not* be used directly by anything except the regulator
158 * core and notification injection (which should take the mutex and do
159 * no other direct access).
160 */
161struct regulator_dev {
162 struct regulator_desc *desc;
163 int use_count;
164
165 /* lists we belong to */
166 struct list_head list; /* list of all regulators */
167 struct list_head slist; /* list of supplied regulators */
168
169 /* lists we own */
170 struct list_head consumer_list; /* consumers we supply */
171 struct list_head supply_list; /* regulators we supply */
172
173 struct blocking_notifier_head notifier;
174 struct mutex mutex; /* consumer lock */
175 struct module *owner;
176 struct device dev;
177 struct regulation_constraints *constraints;
178 struct regulator_dev *supply; /* for tree */
179
180 void *reg_data; /* regulator_dev data */
181};
182
123struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, 183struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
124 struct device *dev, void *driver_data); 184 struct device *dev, struct regulator_init_data *init_data,
185 void *driver_data);
125void regulator_unregister(struct regulator_dev *rdev); 186void regulator_unregister(struct regulator_dev *rdev);
126 187
127int regulator_notifier_call_chain(struct regulator_dev *rdev, 188int regulator_notifier_call_chain(struct regulator_dev *rdev,
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index 1387a5d2190e..91b4da31f1b5 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -14,9 +14,12 @@
14#ifndef __REGULATOR_FIXED_H 14#ifndef __REGULATOR_FIXED_H
15#define __REGULATOR_FIXED_H 15#define __REGULATOR_FIXED_H
16 16
17struct regulator_init_data;
18
17struct fixed_voltage_config { 19struct fixed_voltage_config {
18 const char *supply_name; 20 const char *supply_name;
19 int microvolts; 21 int microvolts;
22 struct regulator_init_data *init_data;
20}; 23};
21 24
22#endif 25#endif
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 3794773b23d2..bac64fa390f2 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. 4 * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC.
5 * 5 *
6 * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> 6 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -73,7 +73,9 @@ struct regulator_state {
73 * 73 *
74 * @always_on: Set if the regulator should never be disabled. 74 * @always_on: Set if the regulator should never be disabled.
75 * @boot_on: Set if the regulator is enabled when the system is initially 75 * @boot_on: Set if the regulator is enabled when the system is initially
76 * started. 76 * started. If the regulator is not enabled by the hardware or
77 * bootloader then it will be enabled when the constraints are
78 * applied.
77 * @apply_uV: Apply the voltage constraint when initialising. 79 * @apply_uV: Apply the voltage constraint when initialising.
78 * 80 *
79 * @input_uV: Input voltage for regulator when supplied by another regulator. 81 * @input_uV: Input voltage for regulator when supplied by another regulator.
@@ -83,6 +85,7 @@ struct regulator_state {
83 * @state_standby: State for regulator when system is suspended in standby 85 * @state_standby: State for regulator when system is suspended in standby
84 * mode. 86 * mode.
85 * @initial_state: Suspend state to set by default. 87 * @initial_state: Suspend state to set by default.
88 * @initial_mode: Mode to set at startup.
86 */ 89 */
87struct regulation_constraints { 90struct regulation_constraints {
88 91
@@ -111,6 +114,9 @@ struct regulation_constraints {
111 struct regulator_state state_standby; 114 struct regulator_state state_standby;
112 suspend_state_t initial_state; /* suspend state to set at init */ 115 suspend_state_t initial_state; /* suspend state to set at init */
113 116
117 /* mode to set on startup */
118 unsigned int initial_mode;
119
114 /* constriant flags */ 120 /* constriant flags */
115 unsigned always_on:1; /* regulator never off when system is on */ 121 unsigned always_on:1; /* regulator never off when system is on */
116 unsigned boot_on:1; /* bootloader/firmware enabled regulator */ 122 unsigned boot_on:1; /* bootloader/firmware enabled regulator */
@@ -160,4 +166,6 @@ struct regulator_init_data {
160 166
161int regulator_suspend_prepare(suspend_state_t state); 167int regulator_suspend_prepare(suspend_state_t state);
162 168
169void regulator_has_full_constraints(void);
170
163#endif 171#endif
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b3b359660082..e1b7b2173885 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -8,7 +8,7 @@ struct ring_buffer;
8struct ring_buffer_iter; 8struct ring_buffer_iter;
9 9
10/* 10/*
11 * Don't reference this struct directly, use functions below. 11 * Don't refer to this struct directly, use functions below.
12 */ 12 */
13struct ring_buffer_event { 13struct ring_buffer_event {
14 u32 type:2, len:3, time_delta:27; 14 u32 type:2, len:3, time_delta:27;
@@ -18,10 +18,13 @@ struct ring_buffer_event {
18/** 18/**
19 * enum ring_buffer_type - internal ring buffer types 19 * enum ring_buffer_type - internal ring buffer types
20 * 20 *
21 * @RINGBUF_TYPE_PADDING: Left over page padding 21 * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event
22 * array is ignored 22 * If time_delta is 0:
23 * size is variable depending on how much 23 * array is ignored
24 * size is variable depending on how much
24 * padding is needed 25 * padding is needed
26 * If time_delta is non zero:
27 * everything else same as RINGBUF_TYPE_DATA
25 * 28 *
26 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta 29 * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
27 * array[0] = time delta (28 .. 59) 30 * array[0] = time delta (28 .. 59)
@@ -65,6 +68,8 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
65 return event->time_delta; 68 return event->time_delta;
66} 69}
67 70
71void ring_buffer_event_discard(struct ring_buffer_event *event);
72
68/* 73/*
69 * size is in bytes for each per CPU buffer. 74 * size is in bytes for each per CPU buffer.
70 */ 75 */
@@ -74,13 +79,10 @@ void ring_buffer_free(struct ring_buffer *buffer);
74 79
75int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); 80int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
76 81
77struct ring_buffer_event * 82struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
78ring_buffer_lock_reserve(struct ring_buffer *buffer, 83 unsigned long length);
79 unsigned long length,
80 unsigned long *flags);
81int ring_buffer_unlock_commit(struct ring_buffer *buffer, 84int ring_buffer_unlock_commit(struct ring_buffer *buffer,
82 struct ring_buffer_event *event, 85 struct ring_buffer_event *event);
83 unsigned long flags);
84int ring_buffer_write(struct ring_buffer *buffer, 86int ring_buffer_write(struct ring_buffer *buffer,
85 unsigned long length, void *data); 87 unsigned long length, void *data);
86 88
@@ -121,17 +123,19 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
121unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); 123unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
122unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); 124unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
123 125
124u64 ring_buffer_time_stamp(int cpu); 126u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
125void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); 127void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
128 int cpu, u64 *ts);
129void ring_buffer_set_clock(struct ring_buffer *buffer,
130 u64 (*clock)(void));
131
132size_t ring_buffer_page_len(void *page);
126 133
127void tracing_on(void);
128void tracing_off(void);
129void tracing_off_permanent(void);
130 134
131void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); 135void *ring_buffer_alloc_read_page(struct ring_buffer *buffer);
132void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); 136void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
133int ring_buffer_read_page(struct ring_buffer *buffer, 137int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
134 void **data_page, int cpu, int full); 138 size_t len, int cpu, int full);
135 139
136enum ring_buffer_flags { 140enum ring_buffer_flags {
137 RB_FL_OVERWRITE = 1 << 0, 141 RB_FL_OVERWRITE = 1 << 0,
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 9da5aa0771ef..98e1fe51601d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -138,6 +138,8 @@ extern unsigned long nr_uninterruptible(void);
138extern unsigned long nr_active(void); 138extern unsigned long nr_active(void);
139extern unsigned long nr_iowait(void); 139extern unsigned long nr_iowait(void);
140 140
141extern unsigned long get_parent_ip(unsigned long addr);
142
141struct seq_file; 143struct seq_file;
142struct cfs_rq; 144struct cfs_rq;
143struct task_group; 145struct task_group;
@@ -298,17 +300,11 @@ extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
298 struct file *filp, void __user *buffer, 300 struct file *filp, void __user *buffer,
299 size_t *lenp, loff_t *ppos); 301 size_t *lenp, loff_t *ppos);
300extern unsigned int softlockup_panic; 302extern unsigned int softlockup_panic;
301extern unsigned long sysctl_hung_task_check_count;
302extern unsigned long sysctl_hung_task_timeout_secs;
303extern unsigned long sysctl_hung_task_warnings;
304extern int softlockup_thresh; 303extern int softlockup_thresh;
305#else 304#else
306static inline void softlockup_tick(void) 305static inline void softlockup_tick(void)
307{ 306{
308} 307}
309static inline void spawn_softlockup_task(void)
310{
311}
312static inline void touch_softlockup_watchdog(void) 308static inline void touch_softlockup_watchdog(void)
313{ 309{
314} 310}
@@ -317,6 +313,15 @@ static inline void touch_all_softlockup_watchdogs(void)
317} 313}
318#endif 314#endif
319 315
316#ifdef CONFIG_DETECT_HUNG_TASK
317extern unsigned int sysctl_hung_task_panic;
318extern unsigned long sysctl_hung_task_check_count;
319extern unsigned long sysctl_hung_task_timeout_secs;
320extern unsigned long sysctl_hung_task_warnings;
321extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
322 struct file *filp, void __user *buffer,
323 size_t *lenp, loff_t *ppos);
324#endif
320 325
321/* Attach to any functions which should be ignored in wchan output. */ 326/* Attach to any functions which should be ignored in wchan output. */
322#define __sched __attribute__((__section__(".sched.text"))) 327#define __sched __attribute__((__section__(".sched.text")))
@@ -1253,9 +1258,8 @@ struct task_struct {
1253/* ipc stuff */ 1258/* ipc stuff */
1254 struct sysv_sem sysvsem; 1259 struct sysv_sem sysvsem;
1255#endif 1260#endif
1256#ifdef CONFIG_DETECT_SOFTLOCKUP 1261#ifdef CONFIG_DETECT_HUNG_TASK
1257/* hung task detection */ 1262/* hung task detection */
1258 unsigned long last_switch_timestamp;
1259 unsigned long last_switch_count; 1263 unsigned long last_switch_count;
1260#endif 1264#endif
1261/* CPU-specific state of this task */ 1265/* CPU-specific state of this task */
@@ -1292,6 +1296,11 @@ struct task_struct {
1292/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ 1296/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
1293 spinlock_t alloc_lock; 1297 spinlock_t alloc_lock;
1294 1298
1299#ifdef CONFIG_GENERIC_HARDIRQS
1300 /* IRQ handler threads */
1301 struct irqaction *irqaction;
1302#endif
1303
1295 /* Protection of the PI data structures: */ 1304 /* Protection of the PI data structures: */
1296 spinlock_t pi_lock; 1305 spinlock_t pi_lock;
1297 1306
@@ -1405,6 +1414,8 @@ struct task_struct {
1405 int curr_ret_stack; 1414 int curr_ret_stack;
1406 /* Stack of return addresses for return function tracing */ 1415 /* Stack of return addresses for return function tracing */
1407 struct ftrace_ret_stack *ret_stack; 1416 struct ftrace_ret_stack *ret_stack;
1417 /* time stamp for last schedule */
1418 unsigned long long ftrace_timestamp;
1408 /* 1419 /*
1409 * Number of functions that haven't been traced 1420 * Number of functions that haven't been traced
1410 * because of depth overrun. 1421 * because of depth overrun.
diff --git a/include/linux/security.h b/include/linux/security.h
index 54ed15799a83..d5fd6163606f 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -32,6 +32,7 @@
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/key.h> 33#include <linux/key.h>
34#include <linux/xfrm.h> 34#include <linux/xfrm.h>
35#include <linux/gfp.h>
35#include <net/flow.h> 36#include <net/flow.h>
36 37
37/* Maximum number of letters for an LSM name string */ 38/* Maximum number of letters for an LSM name string */
@@ -2953,5 +2954,28 @@ static inline void securityfs_remove(struct dentry *dentry)
2953 2954
2954#endif 2955#endif
2955 2956
2957#ifdef CONFIG_SECURITY
2958
2959static inline char *alloc_secdata(void)
2960{
2961 return (char *)get_zeroed_page(GFP_KERNEL);
2962}
2963
2964static inline void free_secdata(void *secdata)
2965{
2966 free_page((unsigned long)secdata);
2967}
2968
2969#else
2970
2971static inline char *alloc_secdata(void)
2972{
2973 return (char *)1;
2974}
2975
2976static inline void free_secdata(void *secdata)
2977{ }
2978#endif /* CONFIG_SECURITY */
2979
2956#endif /* ! __LINUX_SECURITY_H */ 2980#endif /* ! __LINUX_SECURITY_H */
2957 2981
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index df9245c7bd3b..57a97e52e58d 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -164,6 +164,9 @@
164/* NWPSERIAL */ 164/* NWPSERIAL */
165#define PORT_NWPSERIAL 85 165#define PORT_NWPSERIAL 85
166 166
167/* MAX3100 */
168#define PORT_MAX3100 86
169
167#ifdef __KERNEL__ 170#ifdef __KERNEL__
168 171
169#include <linux/compiler.h> 172#include <linux/compiler.h>
@@ -277,7 +280,7 @@ struct uart_port {
277 struct uart_icount icount; /* statistics */ 280 struct uart_icount icount; /* statistics */
278 281
279 struct console *cons; /* struct console, if any */ 282 struct console *cons; /* struct console, if any */
280#ifdef CONFIG_SERIAL_CORE_CONSOLE 283#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ)
281 unsigned long sysrq; /* sysrq timeout */ 284 unsigned long sysrq; /* sysrq timeout */
282#endif 285#endif
283 286
diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h
new file mode 100644
index 000000000000..4976befb6aeb
--- /dev/null
+++ b/include/linux/serial_max3100.h
@@ -0,0 +1,52 @@
1/*
2 *
3 * Copyright (C) 2007 Christian Pellegrin
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11
12#ifndef _LINUX_SERIAL_MAX3100_H
13#define _LINUX_SERIAL_MAX3100_H 1
14
15
16/**
17 * struct plat_max3100 - MAX3100 SPI UART platform data
18 * @loopback: force MAX3100 in loopback
19 * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432
20 * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook
21 * called on suspend and resume to activate it.
22 * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw
23 * flow ctrl is possible but you have less CPU usage)
24 *
25 * You should use this structure in your machine description to specify
26 * how the MAX3100 is connected. Example:
27 *
28 * static struct plat_max3100 max3100_plat_data = {
29 * .loopback = 0,
30 * .crystal = 0,
31 * .poll_time = 100,
32 * };
33 *
34 * static struct spi_board_info spi_board_info[] = {
35 * {
36 * .modalias = "max3100",
37 * .platform_data = &max3100_plat_data,
38 * .irq = IRQ_EINT12,
39 * .max_speed_hz = 5*1000*1000,
40 * .chip_select = 0,
41 * },
42 * };
43 *
44 **/
45struct plat_max3100 {
46 int loopback;
47 int crystal;
48 void (*max3100_hw_suspend) (int suspend);
49 int poll_time;
50};
51
52#endif
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 6ca6a7b66d75..5ac9b0bcaf9a 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,6 +14,7 @@
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ 14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ 15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h> 16#include <linux/compiler.h>
17#include <trace/kmemtrace.h>
17 18
18/* Size description struct for general caches. */ 19/* Size description struct for general caches. */
19struct cache_sizes { 20struct cache_sizes {
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[];
28void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 29void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
29void *__kmalloc(size_t size, gfp_t flags); 30void *__kmalloc(size_t size, gfp_t flags);
30 31
31static inline void *kmalloc(size_t size, gfp_t flags) 32#ifdef CONFIG_KMEMTRACE
33extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
34extern size_t slab_buffer_size(struct kmem_cache *cachep);
35#else
36static __always_inline void *
37kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
32{ 38{
39 return kmem_cache_alloc(cachep, flags);
40}
41static inline size_t slab_buffer_size(struct kmem_cache *cachep)
42{
43 return 0;
44}
45#endif
46
47static __always_inline void *kmalloc(size_t size, gfp_t flags)
48{
49 struct kmem_cache *cachep;
50 void *ret;
51
33 if (__builtin_constant_p(size)) { 52 if (__builtin_constant_p(size)) {
34 int i = 0; 53 int i = 0;
35 54
@@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags)
47found: 66found:
48#ifdef CONFIG_ZONE_DMA 67#ifdef CONFIG_ZONE_DMA
49 if (flags & GFP_DMA) 68 if (flags & GFP_DMA)
50 return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, 69 cachep = malloc_sizes[i].cs_dmacachep;
51 flags); 70 else
52#endif 71#endif
53 return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); 72 cachep = malloc_sizes[i].cs_cachep;
73
74 ret = kmem_cache_alloc_notrace(cachep, flags);
75
76 trace_kmalloc(_THIS_IP_, ret,
77 size, slab_buffer_size(cachep), flags);
78
79 return ret;
54 } 80 }
55 return __kmalloc(size, flags); 81 return __kmalloc(size, flags);
56} 82}
@@ -59,8 +85,25 @@ found:
59extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 85extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
60extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 86extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
61 87
62static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 88#ifdef CONFIG_KMEMTRACE
89extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
90 gfp_t flags,
91 int nodeid);
92#else
93static __always_inline void *
94kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
95 gfp_t flags,
96 int nodeid)
97{
98 return kmem_cache_alloc_node(cachep, flags, nodeid);
99}
100#endif
101
102static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
63{ 103{
104 struct kmem_cache *cachep;
105 void *ret;
106
64 if (__builtin_constant_p(size)) { 107 if (__builtin_constant_p(size)) {
65 int i = 0; 108 int i = 0;
66 109
@@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
78found: 121found:
79#ifdef CONFIG_ZONE_DMA 122#ifdef CONFIG_ZONE_DMA
80 if (flags & GFP_DMA) 123 if (flags & GFP_DMA)
81 return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, 124 cachep = malloc_sizes[i].cs_dmacachep;
82 flags, node); 125 else
83#endif 126#endif
84 return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, 127 cachep = malloc_sizes[i].cs_cachep;
85 flags, node); 128
129 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
130
131 trace_kmalloc_node(_THIS_IP_, ret,
132 size, slab_buffer_size(cachep),
133 flags, node);
134
135 return ret;
86 } 136 }
87 return __kmalloc_node(size, flags, node); 137 return __kmalloc_node(size, flags, node);
88} 138}
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 59a3fa476ab9..0ec00b39d006 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -3,14 +3,15 @@
3 3
4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
5 5
6static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 6static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
7 gfp_t flags)
7{ 8{
8 return kmem_cache_alloc_node(cachep, flags, -1); 9 return kmem_cache_alloc_node(cachep, flags, -1);
9} 10}
10 11
11void *__kmalloc_node(size_t size, gfp_t flags, int node); 12void *__kmalloc_node(size_t size, gfp_t flags, int node);
12 13
13static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 14static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
14{ 15{
15 return __kmalloc_node(size, flags, node); 16 return __kmalloc_node(size, flags, node);
16} 17}
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
23 * kmalloc is the normal method of allocating memory 24 * kmalloc is the normal method of allocating memory
24 * in the kernel. 25 * in the kernel.
25 */ 26 */
26static inline void *kmalloc(size_t size, gfp_t flags) 27static __always_inline void *kmalloc(size_t size, gfp_t flags)
27{ 28{
28 return __kmalloc_node(size, flags, -1); 29 return __kmalloc_node(size, flags, -1);
29} 30}
30 31
31static inline void *__kmalloc(size_t size, gfp_t flags) 32static __always_inline void *__kmalloc(size_t size, gfp_t flags)
32{ 33{
33 return kmalloc(size, flags); 34 return kmalloc(size, flags);
34} 35}
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h
new file mode 100644
index 000000000000..85958277f83d
--- /dev/null
+++ b/include/linux/slow-work.h
@@ -0,0 +1,95 @@
1/* Worker thread pool for slow items, such as filesystem lookups or mkdirs
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 *
11 * See Documentation/slow-work.txt
12 */
13
14#ifndef _LINUX_SLOW_WORK_H
15#define _LINUX_SLOW_WORK_H
16
17#ifdef CONFIG_SLOW_WORK
18
19#include <linux/sysctl.h>
20
21struct slow_work;
22
23/*
24 * The operations used to support slow work items
25 */
26struct slow_work_ops {
27 /* get a ref on a work item
28 * - return 0 if successful, -ve if not
29 */
30 int (*get_ref)(struct slow_work *work);
31
32 /* discard a ref to a work item */
33 void (*put_ref)(struct slow_work *work);
34
35 /* execute a work item */
36 void (*execute)(struct slow_work *work);
37};
38
39/*
40 * A slow work item
41 * - A reference is held on the parent object by the thread pool when it is
42 * queued
43 */
44struct slow_work {
45 unsigned long flags;
46#define SLOW_WORK_PENDING 0 /* item pending (further) execution */
47#define SLOW_WORK_EXECUTING 1 /* item currently executing */
48#define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */
49#define SLOW_WORK_VERY_SLOW 3 /* item is very slow */
50 const struct slow_work_ops *ops; /* operations table for this item */
51 struct list_head link; /* link in queue */
52};
53
54/**
55 * slow_work_init - Initialise a slow work item
56 * @work: The work item to initialise
57 * @ops: The operations to use to handle the slow work item
58 *
59 * Initialise a slow work item.
60 */
61static inline void slow_work_init(struct slow_work *work,
62 const struct slow_work_ops *ops)
63{
64 work->flags = 0;
65 work->ops = ops;
66 INIT_LIST_HEAD(&work->link);
67}
68
69/**
70 * slow_work_init - Initialise a very slow work item
71 * @work: The work item to initialise
72 * @ops: The operations to use to handle the slow work item
73 *
74 * Initialise a very slow work item. This item will be restricted such that
75 * only a certain number of the pool threads will be able to execute items of
76 * this type.
77 */
78static inline void vslow_work_init(struct slow_work *work,
79 const struct slow_work_ops *ops)
80{
81 work->flags = 1 << SLOW_WORK_VERY_SLOW;
82 work->ops = ops;
83 INIT_LIST_HEAD(&work->link);
84}
85
86extern int slow_work_enqueue(struct slow_work *work);
87extern int slow_work_register_user(void);
88extern void slow_work_unregister_user(void);
89
90#ifdef CONFIG_SYSCTL
91extern ctl_table slow_work_sysctls[];
92#endif
93
94#endif /* CONFIG_SLOW_WORK */
95#endif /* _LINUX_SLOW_WORK_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index e37b6aa8a9fb..5046f90c1171 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,6 +10,7 @@
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/kobject.h> 12#include <linux/kobject.h>
13#include <trace/kmemtrace.h>
13 14
14enum stat_item { 15enum stat_item {
15 ALLOC_FASTPATH, /* Allocation from cpu slab */ 16 ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -217,13 +218,30 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
217void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 218void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
218void *__kmalloc(size_t size, gfp_t flags); 219void *__kmalloc(size_t size, gfp_t flags);
219 220
221#ifdef CONFIG_KMEMTRACE
222extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
223#else
224static __always_inline void *
225kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
226{
227 return kmem_cache_alloc(s, gfpflags);
228}
229#endif
230
220static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 231static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
221{ 232{
222 return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); 233 unsigned int order = get_order(size);
234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
235
236 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
237
238 return ret;
223} 239}
224 240
225static __always_inline void *kmalloc(size_t size, gfp_t flags) 241static __always_inline void *kmalloc(size_t size, gfp_t flags)
226{ 242{
243 void *ret;
244
227 if (__builtin_constant_p(size)) { 245 if (__builtin_constant_p(size)) {
228 if (size > SLUB_MAX_SIZE) 246 if (size > SLUB_MAX_SIZE)
229 return kmalloc_large(size, flags); 247 return kmalloc_large(size, flags);
@@ -234,7 +252,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
234 if (!s) 252 if (!s)
235 return ZERO_SIZE_PTR; 253 return ZERO_SIZE_PTR;
236 254
237 return kmem_cache_alloc(s, flags); 255 ret = kmem_cache_alloc_notrace(s, flags);
256
257 trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
258
259 return ret;
238 } 260 }
239 } 261 }
240 return __kmalloc(size, flags); 262 return __kmalloc(size, flags);
@@ -244,8 +266,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
244void *__kmalloc_node(size_t size, gfp_t flags, int node); 266void *__kmalloc_node(size_t size, gfp_t flags, int node);
245void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 267void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
246 268
269#ifdef CONFIG_KMEMTRACE
270extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
271 gfp_t gfpflags,
272 int node);
273#else
274static __always_inline void *
275kmem_cache_alloc_node_notrace(struct kmem_cache *s,
276 gfp_t gfpflags,
277 int node)
278{
279 return kmem_cache_alloc_node(s, gfpflags, node);
280}
281#endif
282
247static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 283static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
248{ 284{
285 void *ret;
286
249 if (__builtin_constant_p(size) && 287 if (__builtin_constant_p(size) &&
250 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 288 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
251 struct kmem_cache *s = kmalloc_slab(size); 289 struct kmem_cache *s = kmalloc_slab(size);
@@ -253,7 +291,12 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
253 if (!s) 291 if (!s)
254 return ZERO_SIZE_PTR; 292 return ZERO_SIZE_PTR;
255 293
256 return kmem_cache_alloc_node(s, flags, node); 294 ret = kmem_cache_alloc_node_notrace(s, flags, node);
295
296 trace_kmalloc_node(_THIS_IP_, ret,
297 size, s->size, flags, node);
298
299 return ret;
257 } 300 }
258 return __kmalloc_node(size, flags, node); 301 return __kmalloc_node(size, flags, node);
259} 302}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index bbacb7baa446..a69db820eed6 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -38,7 +38,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
38/* 38/*
39 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. 39 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
40 * (defined in asm header): 40 * (defined in asm header):
41 */ 41 */
42 42
43/* 43/*
44 * stops all CPUs but the current one: 44 * stops all CPUs but the current one:
@@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
82 return 0; 82 return 0;
83} 83}
84 84
85void __smp_call_function_single(int cpuid, struct call_single_data *data); 85void __smp_call_function_single(int cpuid, struct call_single_data *data,
86 int wait);
86 87
87/* 88/*
88 * Generic and arch helpers 89 * Generic and arch helpers
@@ -121,6 +122,8 @@ extern unsigned int setup_max_cpus;
121 122
122#else /* !SMP */ 123#else /* !SMP */
123 124
125static inline void smp_send_stop(void) { }
126
124/* 127/*
125 * These macros fold the SMP functionality into a single CPU system 128 * These macros fold the SMP functionality into a single CPU system
126 */ 129 */
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
index f41ffd7c2dd9..34c4475ac4a2 100644
--- a/include/linux/sonypi.h
+++ b/include/linux/sonypi.h
@@ -103,6 +103,14 @@
103#define SONYPI_EVENT_WIRELESS_OFF 61 103#define SONYPI_EVENT_WIRELESS_OFF 61
104#define SONYPI_EVENT_ZOOM_IN_PRESSED 62 104#define SONYPI_EVENT_ZOOM_IN_PRESSED 62
105#define SONYPI_EVENT_ZOOM_OUT_PRESSED 63 105#define SONYPI_EVENT_ZOOM_OUT_PRESSED 63
106#define SONYPI_EVENT_CD_EJECT_PRESSED 64
107#define SONYPI_EVENT_MODEKEY_PRESSED 65
108#define SONYPI_EVENT_PKEY_P4 66
109#define SONYPI_EVENT_PKEY_P5 67
110#define SONYPI_EVENT_SETTINGKEY_PRESSED 68
111#define SONYPI_EVENT_VOLUME_INC_PRESSED 69
112#define SONYPI_EVENT_VOLUME_DEC_PRESSED 70
113#define SONYPI_EVENT_BRIGHTNESS_PRESSED 71
106 114
107/* get/set brightness */ 115/* get/set brightness */
108#define SONYPI_IOCGBRT _IOR('v', 0, __u8) 116#define SONYPI_IOCGBRT _IOR('v', 0, __u8)
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 68bb1c501d0d..2cc43fa380cb 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -204,6 +204,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
204 * SPI slaves, and are numbered from zero to num_chipselects. 204 * SPI slaves, and are numbered from zero to num_chipselects.
205 * each slave has a chipselect signal, but it's common that not 205 * each slave has a chipselect signal, but it's common that not
206 * every chipselect is connected to a slave. 206 * every chipselect is connected to a slave.
207 * @dma_alignment: SPI controller constraint on DMA buffers alignment.
207 * @setup: updates the device mode and clocking records used by a 208 * @setup: updates the device mode and clocking records used by a
208 * device's SPI controller; protocol code may call this. This 209 * device's SPI controller; protocol code may call this. This
209 * must fail if an unrecognized or unsupported mode is requested. 210 * must fail if an unrecognized or unsupported mode is requested.
@@ -239,6 +240,11 @@ struct spi_master {
239 */ 240 */
240 u16 num_chipselect; 241 u16 num_chipselect;
241 242
243 /* some SPI controllers pose alignment requirements on DMAable
244 * buffers; let protocol drivers know about these requirements.
245 */
246 u16 dma_alignment;
247
242 /* setup mode and clock, etc (spi driver may call many times) */ 248 /* setup mode and clock, etc (spi driver may call many times) */
243 int (*setup)(struct spi_device *spi); 249 int (*setup)(struct spi_device *spi);
244 250
diff --git a/include/linux/string.h b/include/linux/string.h
index 8852739f36df..489019ef1694 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -10,6 +10,7 @@
10#include <linux/compiler.h> /* for inline */ 10#include <linux/compiler.h> /* for inline */
11#include <linux/types.h> /* for size_t */ 11#include <linux/types.h> /* for size_t */
12#include <linux/stddef.h> /* for NULL */ 12#include <linux/stddef.h> /* for NULL */
13#include <stdarg.h>
13 14
14extern char *strndup_user(const char __user *, long); 15extern char *strndup_user(const char __user *, long);
15extern void *memdup_user(const void __user *, size_t); 16extern void *memdup_user(const void __user *, size_t);
@@ -112,8 +113,23 @@ extern void argv_free(char **argv);
112 113
113extern bool sysfs_streq(const char *s1, const char *s2); 114extern bool sysfs_streq(const char *s1, const char *s2);
114 115
116#ifdef CONFIG_BINARY_PRINTF
117int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
118int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf);
119int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
120#endif
121
115extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, 122extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
116 const void *from, size_t available); 123 const void *from, size_t available);
117 124
125/**
126 * strstarts - does @str start with @prefix?
127 * @str: string to examine
128 * @prefix: prefix to look for.
129 */
130static inline bool strstarts(const char *str, const char *prefix)
131{
132 return strncmp(str, prefix, strlen(prefix)) == 0;
133}
118#endif 134#endif
119#endif /* _LINUX_STRING_H_ */ 135#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index d3a4c0231933..2a30775959e9 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -24,6 +24,15 @@
24 */ 24 */
25typedef int (*svc_thread_fn)(void *); 25typedef int (*svc_thread_fn)(void *);
26 26
27/* statistics for svc_pool structures */
28struct svc_pool_stats {
29 unsigned long packets;
30 unsigned long sockets_queued;
31 unsigned long threads_woken;
32 unsigned long overloads_avoided;
33 unsigned long threads_timedout;
34};
35
27/* 36/*
28 * 37 *
29 * RPC service thread pool. 38 * RPC service thread pool.
@@ -41,6 +50,8 @@ struct svc_pool {
41 struct list_head sp_sockets; /* pending sockets */ 50 struct list_head sp_sockets; /* pending sockets */
42 unsigned int sp_nrthreads; /* # of threads in pool */ 51 unsigned int sp_nrthreads; /* # of threads in pool */
43 struct list_head sp_all_threads; /* all server threads */ 52 struct list_head sp_all_threads; /* all server threads */
53 int sp_nwaking; /* number of threads woken but not yet active */
54 struct svc_pool_stats sp_stats; /* statistics on pool operation */
44} ____cacheline_aligned_in_smp; 55} ____cacheline_aligned_in_smp;
45 56
46/* 57/*
@@ -83,6 +94,8 @@ struct svc_serv {
83 struct module * sv_module; /* optional module to count when 94 struct module * sv_module; /* optional module to count when
84 * adding threads */ 95 * adding threads */
85 svc_thread_fn sv_function; /* main function for threads */ 96 svc_thread_fn sv_function; /* main function for threads */
97 unsigned int sv_drc_max_pages; /* Total pages for DRC */
98 unsigned int sv_drc_pages_used;/* DRC pages used */
86}; 99};
87 100
88/* 101/*
@@ -218,6 +231,7 @@ struct svc_rqst {
218 struct svc_cred rq_cred; /* auth info */ 231 struct svc_cred rq_cred; /* auth info */
219 void * rq_xprt_ctxt; /* transport specific context ptr */ 232 void * rq_xprt_ctxt; /* transport specific context ptr */
220 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ 233 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
234 int rq_usedeferral; /* use deferral */
221 235
222 size_t rq_xprt_hlen; /* xprt header len */ 236 size_t rq_xprt_hlen; /* xprt header len */
223 struct xdr_buf rq_arg; 237 struct xdr_buf rq_arg;
@@ -263,6 +277,7 @@ struct svc_rqst {
263 * cache pages */ 277 * cache pages */
264 wait_queue_head_t rq_wait; /* synchronization */ 278 wait_queue_head_t rq_wait; /* synchronization */
265 struct task_struct *rq_task; /* service thread */ 279 struct task_struct *rq_task; /* service thread */
280 int rq_waking; /* 1 if thread is being woken */
266}; 281};
267 282
268/* 283/*
@@ -393,6 +408,7 @@ struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
393 void (*shutdown)(struct svc_serv *), 408 void (*shutdown)(struct svc_serv *),
394 svc_thread_fn, struct module *); 409 svc_thread_fn, struct module *);
395int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); 410int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
411int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
396void svc_destroy(struct svc_serv *); 412void svc_destroy(struct svc_serv *);
397int svc_process(struct svc_rqst *); 413int svc_process(struct svc_rqst *);
398int svc_register(const struct svc_serv *, const int, 414int svc_register(const struct svc_serv *, const int,
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 49e1eb454465..d8910b68e1bd 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -69,27 +69,27 @@ struct xdr_buf {
69 * pre-xdr'ed macros. 69 * pre-xdr'ed macros.
70 */ 70 */
71 71
72#define xdr_zero __constant_htonl(0) 72#define xdr_zero cpu_to_be32(0)
73#define xdr_one __constant_htonl(1) 73#define xdr_one cpu_to_be32(1)
74#define xdr_two __constant_htonl(2) 74#define xdr_two cpu_to_be32(2)
75 75
76#define rpc_success __constant_htonl(RPC_SUCCESS) 76#define rpc_success cpu_to_be32(RPC_SUCCESS)
77#define rpc_prog_unavail __constant_htonl(RPC_PROG_UNAVAIL) 77#define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL)
78#define rpc_prog_mismatch __constant_htonl(RPC_PROG_MISMATCH) 78#define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH)
79#define rpc_proc_unavail __constant_htonl(RPC_PROC_UNAVAIL) 79#define rpc_proc_unavail cpu_to_be32(RPC_PROC_UNAVAIL)
80#define rpc_garbage_args __constant_htonl(RPC_GARBAGE_ARGS) 80#define rpc_garbage_args cpu_to_be32(RPC_GARBAGE_ARGS)
81#define rpc_system_err __constant_htonl(RPC_SYSTEM_ERR) 81#define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR)
82#define rpc_drop_reply __constant_htonl(RPC_DROP_REPLY) 82#define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY)
83 83
84#define rpc_auth_ok __constant_htonl(RPC_AUTH_OK) 84#define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK)
85#define rpc_autherr_badcred __constant_htonl(RPC_AUTH_BADCRED) 85#define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED)
86#define rpc_autherr_rejectedcred __constant_htonl(RPC_AUTH_REJECTEDCRED) 86#define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED)
87#define rpc_autherr_badverf __constant_htonl(RPC_AUTH_BADVERF) 87#define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF)
88#define rpc_autherr_rejectedverf __constant_htonl(RPC_AUTH_REJECTEDVERF) 88#define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF)
89#define rpc_autherr_tooweak __constant_htonl(RPC_AUTH_TOOWEAK) 89#define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK)
90#define rpcsec_gsserr_credproblem __constant_htonl(RPCSEC_GSS_CREDPROBLEM) 90#define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM)
91#define rpcsec_gsserr_ctxproblem __constant_htonl(RPCSEC_GSS_CTXPROBLEM) 91#define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM)
92#define rpc_autherr_oldseqnum __constant_htonl(101) 92#define rpc_autherr_oldseqnum cpu_to_be32(101)
93 93
94/* 94/*
95 * Miscellaneous XDR helper functions 95 * Miscellaneous XDR helper functions
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index b299a82a05e7..6470f74074af 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -65,6 +65,7 @@ struct old_linux_dirent;
65#include <asm/signal.h> 65#include <asm/signal.h>
66#include <linux/quota.h> 66#include <linux/quota.h>
67#include <linux/key.h> 67#include <linux/key.h>
68#include <linux/ftrace.h>
68 69
69#define __SC_DECL1(t1, a1) t1 a1 70#define __SC_DECL1(t1, a1) t1 a1
70#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) 71#define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__)
@@ -95,7 +96,46 @@ struct old_linux_dirent;
95#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) 96#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
96#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) 97#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
97 98
99#ifdef CONFIG_FTRACE_SYSCALLS
100#define __SC_STR_ADECL1(t, a) #a
101#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
102#define __SC_STR_ADECL3(t, a, ...) #a, __SC_STR_ADECL2(__VA_ARGS__)
103#define __SC_STR_ADECL4(t, a, ...) #a, __SC_STR_ADECL3(__VA_ARGS__)
104#define __SC_STR_ADECL5(t, a, ...) #a, __SC_STR_ADECL4(__VA_ARGS__)
105#define __SC_STR_ADECL6(t, a, ...) #a, __SC_STR_ADECL5(__VA_ARGS__)
106
107#define __SC_STR_TDECL1(t, a) #t
108#define __SC_STR_TDECL2(t, a, ...) #t, __SC_STR_TDECL1(__VA_ARGS__)
109#define __SC_STR_TDECL3(t, a, ...) #t, __SC_STR_TDECL2(__VA_ARGS__)
110#define __SC_STR_TDECL4(t, a, ...) #t, __SC_STR_TDECL3(__VA_ARGS__)
111#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
112#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
113
114#define SYSCALL_METADATA(sname, nb) \
115 static const struct syscall_metadata __used \
116 __attribute__((__aligned__(4))) \
117 __attribute__((section("__syscalls_metadata"))) \
118 __syscall_meta_##sname = { \
119 .name = "sys"#sname, \
120 .nb_args = nb, \
121 .types = types_##sname, \
122 .args = args_##sname, \
123 }
124
125#define SYSCALL_DEFINE0(sname) \
126 static const struct syscall_metadata __used \
127 __attribute__((__aligned__(4))) \
128 __attribute__((section("__syscalls_metadata"))) \
129 __syscall_meta_##sname = { \
130 .name = "sys_"#sname, \
131 .nb_args = 0, \
132 }; \
133 asmlinkage long sys_##sname(void)
134
135#else
98#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) 136#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)
137#endif
138
99#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) 139#define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__)
100#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) 140#define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__)
101#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) 141#define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__)
@@ -117,10 +157,26 @@ struct old_linux_dirent;
117#endif 157#endif
118#endif 158#endif
119 159
160#ifdef CONFIG_FTRACE_SYSCALLS
161#define SYSCALL_DEFINEx(x, sname, ...) \
162 static const char *types_##sname[] = { \
163 __SC_STR_TDECL##x(__VA_ARGS__) \
164 }; \
165 static const char *args_##sname[] = { \
166 __SC_STR_ADECL##x(__VA_ARGS__) \
167 }; \
168 SYSCALL_METADATA(sname, x); \
169 __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
170#else
171#define SYSCALL_DEFINEx(x, sname, ...) \
172 __SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
173#endif
174
120#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS 175#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
121 176
122#define SYSCALL_DEFINE(name) static inline long SYSC_##name 177#define SYSCALL_DEFINE(name) static inline long SYSC_##name
123#define SYSCALL_DEFINEx(x, name, ...) \ 178
179#define __SYSCALL_DEFINEx(x, name, ...) \
124 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ 180 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \
125 static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ 181 static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \
126 asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ 182 asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \
@@ -134,7 +190,7 @@ struct old_linux_dirent;
134#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ 190#else /* CONFIG_HAVE_SYSCALL_WRAPPERS */
135 191
136#define SYSCALL_DEFINE(name) asmlinkage long sys_##name 192#define SYSCALL_DEFINE(name) asmlinkage long sys_##name
137#define SYSCALL_DEFINEx(x, name, ...) \ 193#define __SYSCALL_DEFINEx(x, name, ...) \
138 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) 194 asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__))
139 195
140#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ 196#endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */
@@ -462,9 +518,9 @@ asmlinkage long sys_pread64(unsigned int fd, char __user *buf,
462asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, 518asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf,
463 size_t count, loff_t pos); 519 size_t count, loff_t pos);
464asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, 520asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec,
465 unsigned long vlen, u32 pos_high, u32 pos_low); 521 unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
466asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, 522asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec,
467 unsigned long vlen, u32 pos_high, u32 pos_low); 523 unsigned long vlen, unsigned long pos_l, unsigned long pos_h);
468asmlinkage long sys_getcwd(char __user *buf, unsigned long size); 524asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
469asmlinkage long sys_mkdir(const char __user *pathname, int mode); 525asmlinkage long sys_mkdir(const char __user *pathname, int mode);
470asmlinkage long sys_chdir(const char __user *filename); 526asmlinkage long sys_chdir(const char __user *filename);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 917707e6151d..1de8b9eb841b 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -27,27 +27,46 @@
27 27
28#include <linux/idr.h> 28#include <linux/idr.h>
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/workqueue.h>
30 31
31struct thermal_zone_device; 32struct thermal_zone_device;
32struct thermal_cooling_device; 33struct thermal_cooling_device;
33 34
35enum thermal_device_mode {
36 THERMAL_DEVICE_DISABLED = 0,
37 THERMAL_DEVICE_ENABLED,
38};
39
40enum thermal_trip_type {
41 THERMAL_TRIP_ACTIVE = 0,
42 THERMAL_TRIP_PASSIVE,
43 THERMAL_TRIP_HOT,
44 THERMAL_TRIP_CRITICAL,
45};
46
34struct thermal_zone_device_ops { 47struct thermal_zone_device_ops {
35 int (*bind) (struct thermal_zone_device *, 48 int (*bind) (struct thermal_zone_device *,
36 struct thermal_cooling_device *); 49 struct thermal_cooling_device *);
37 int (*unbind) (struct thermal_zone_device *, 50 int (*unbind) (struct thermal_zone_device *,
38 struct thermal_cooling_device *); 51 struct thermal_cooling_device *);
39 int (*get_temp) (struct thermal_zone_device *, char *); 52 int (*get_temp) (struct thermal_zone_device *, unsigned long *);
40 int (*get_mode) (struct thermal_zone_device *, char *); 53 int (*get_mode) (struct thermal_zone_device *,
41 int (*set_mode) (struct thermal_zone_device *, const char *); 54 enum thermal_device_mode *);
42 int (*get_trip_type) (struct thermal_zone_device *, int, char *); 55 int (*set_mode) (struct thermal_zone_device *,
43 int (*get_trip_temp) (struct thermal_zone_device *, int, char *); 56 enum thermal_device_mode);
57 int (*get_trip_type) (struct thermal_zone_device *, int,
58 enum thermal_trip_type *);
59 int (*get_trip_temp) (struct thermal_zone_device *, int,
60 unsigned long *);
44 int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *); 61 int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
62 int (*notify) (struct thermal_zone_device *, int,
63 enum thermal_trip_type);
45}; 64};
46 65
47struct thermal_cooling_device_ops { 66struct thermal_cooling_device_ops {
48 int (*get_max_state) (struct thermal_cooling_device *, char *); 67 int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
49 int (*get_cur_state) (struct thermal_cooling_device *, char *); 68 int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
50 int (*set_cur_state) (struct thermal_cooling_device *, unsigned int); 69 int (*set_cur_state) (struct thermal_cooling_device *, unsigned long);
51}; 70};
52 71
53#define THERMAL_TRIPS_NONE -1 72#define THERMAL_TRIPS_NONE -1
@@ -88,11 +107,19 @@ struct thermal_zone_device {
88 struct device device; 107 struct device device;
89 void *devdata; 108 void *devdata;
90 int trips; 109 int trips;
110 int tc1;
111 int tc2;
112 int passive_delay;
113 int polling_delay;
114 int last_temperature;
115 bool passive;
116 unsigned int forced_passive;
91 struct thermal_zone_device_ops *ops; 117 struct thermal_zone_device_ops *ops;
92 struct list_head cooling_devices; 118 struct list_head cooling_devices;
93 struct idr idr; 119 struct idr idr;
94 struct mutex lock; /* protect cooling devices list */ 120 struct mutex lock; /* protect cooling devices list */
95 struct list_head node; 121 struct list_head node;
122 struct delayed_work poll_queue;
96#if defined(CONFIG_THERMAL_HWMON) 123#if defined(CONFIG_THERMAL_HWMON)
97 struct list_head hwmon_node; 124 struct list_head hwmon_node;
98 struct thermal_hwmon_device *hwmon; 125 struct thermal_hwmon_device *hwmon;
@@ -104,13 +131,16 @@ struct thermal_zone_device {
104struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, 131struct thermal_zone_device *thermal_zone_device_register(char *, int, void *,
105 struct 132 struct
106 thermal_zone_device_ops 133 thermal_zone_device_ops
107 *); 134 *, int tc1, int tc2,
135 int passive_freq,
136 int polling_freq);
108void thermal_zone_device_unregister(struct thermal_zone_device *); 137void thermal_zone_device_unregister(struct thermal_zone_device *);
109 138
110int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, 139int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
111 struct thermal_cooling_device *); 140 struct thermal_cooling_device *);
112int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, 141int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
113 struct thermal_cooling_device *); 142 struct thermal_cooling_device *);
143void thermal_zone_device_update(struct thermal_zone_device *);
114struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, 144struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
115 struct 145 struct
116 thermal_cooling_device_ops 146 thermal_cooling_device_ops
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h
index dd253177f65f..3e08a1c86830 100644
--- a/include/linux/timeriomem-rng.h
+++ b/include/linux/timeriomem-rng.h
@@ -14,7 +14,7 @@ struct timeriomem_rng_data {
14 struct completion completion; 14 struct completion completion;
15 unsigned int present:1; 15 unsigned int present:1;
16 16
17 u32 __iomem *address; 17 void __iomem *address;
18 18
19 /* measures in usecs */ 19 /* measures in usecs */
20 unsigned int period; 20 unsigned int period;
diff --git a/include/linux/topology.h b/include/linux/topology.h
index a16b9e06f2e5..7402c1a27c4f 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -38,11 +38,7 @@
38#endif 38#endif
39 39
40#ifndef nr_cpus_node 40#ifndef nr_cpus_node
41#define nr_cpus_node(node) \ 41#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
42 ({ \
43 node_to_cpumask_ptr(__tmp__, node); \
44 cpus_weight(*__tmp__); \
45 })
46#endif 42#endif
47 43
48#define for_each_node_with_cpus(node) \ 44#define for_each_node_with_cpus(node) \
@@ -200,4 +196,9 @@ int arch_update_cpu_topology(void);
200#define topology_core_cpumask(cpu) cpumask_of(cpu) 196#define topology_core_cpumask(cpu) cpumask_of(cpu)
201#endif 197#endif
202 198
199/* Returns the number of the current Node. */
200#ifndef numa_node_id
201#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
202#endif
203
203#endif /* _LINUX_TOPOLOGY_H */ 204#endif /* _LINUX_TOPOLOGY_H */
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h
new file mode 100644
index 000000000000..7a8130384087
--- /dev/null
+++ b/include/linux/trace_clock.h
@@ -0,0 +1,19 @@
1#ifndef _LINUX_TRACE_CLOCK_H
2#define _LINUX_TRACE_CLOCK_H
3
4/*
5 * 3 trace clock variants, with differing scalability/precision
6 * tradeoffs:
7 *
8 * - local: CPU-local trace clock
9 * - medium: scalable global clock with some jitter
10 * - global: globally monotonic, serialized clock
11 */
12#include <linux/compiler.h>
13#include <linux/types.h>
14
15extern u64 notrace trace_clock_local(void);
16extern u64 notrace trace_clock(void);
17extern u64 notrace trace_clock_global(void);
18
19#endif /* _LINUX_TRACE_CLOCK_H */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 757005458366..d35a7ee7611f 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -31,8 +31,8 @@ struct tracepoint {
31 * Keep in sync with vmlinux.lds.h. 31 * Keep in sync with vmlinux.lds.h.
32 */ 32 */
33 33
34#define TPPROTO(args...) args 34#define TP_PROTO(args...) args
35#define TPARGS(args...) args 35#define TP_ARGS(args...) args
36 36
37#ifdef CONFIG_TRACEPOINTS 37#ifdef CONFIG_TRACEPOINTS
38 38
@@ -65,7 +65,7 @@ struct tracepoint {
65 { \ 65 { \
66 if (unlikely(__tracepoint_##name.state)) \ 66 if (unlikely(__tracepoint_##name.state)) \
67 __DO_TRACE(&__tracepoint_##name, \ 67 __DO_TRACE(&__tracepoint_##name, \
68 TPPROTO(proto), TPARGS(args)); \ 68 TP_PROTO(proto), TP_ARGS(args)); \
69 } \ 69 } \
70 static inline int register_trace_##name(void (*probe)(proto)) \ 70 static inline int register_trace_##name(void (*probe)(proto)) \
71 { \ 71 { \
@@ -153,4 +153,114 @@ static inline void tracepoint_synchronize_unregister(void)
153 synchronize_sched(); 153 synchronize_sched();
154} 154}
155 155
156#define PARAMS(args...) args
157#define TRACE_FORMAT(name, proto, args, fmt) \
158 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
159
160
161/*
162 * For use with the TRACE_EVENT macro:
163 *
164 * We define a tracepoint, its arguments, its printk format
165 * and its 'fast binay record' layout.
166 *
167 * Firstly, name your tracepoint via TRACE_EVENT(name : the
168 * 'subsystem_event' notation is fine.
169 *
170 * Think about this whole construct as the
171 * 'trace_sched_switch() function' from now on.
172 *
173 *
174 * TRACE_EVENT(sched_switch,
175 *
176 * *
177 * * A function has a regular function arguments
178 * * prototype, declare it via TP_PROTO():
179 * *
180 *
181 * TP_PROTO(struct rq *rq, struct task_struct *prev,
182 * struct task_struct *next),
183 *
184 * *
185 * * Define the call signature of the 'function'.
186 * * (Design sidenote: we use this instead of a
187 * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.)
188 * *
189 *
190 * TP_ARGS(rq, prev, next),
191 *
192 * *
193 * * Fast binary tracing: define the trace record via
194 * * TP_STRUCT__entry(). You can think about it like a
195 * * regular C structure local variable definition.
196 * *
197 * * This is how the trace record is structured and will
198 * * be saved into the ring buffer. These are the fields
199 * * that will be exposed to user-space in
200 * * /debug/tracing/events/<*>/format.
201 * *
202 * * The declared 'local variable' is called '__entry'
203 * *
204 * * __field(pid_t, prev_prid) is equivalent to a standard declariton:
205 * *
206 * * pid_t prev_pid;
207 * *
208 * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to:
209 * *
210 * * char prev_comm[TASK_COMM_LEN];
211 * *
212 *
213 * TP_STRUCT__entry(
214 * __array( char, prev_comm, TASK_COMM_LEN )
215 * __field( pid_t, prev_pid )
216 * __field( int, prev_prio )
217 * __array( char, next_comm, TASK_COMM_LEN )
218 * __field( pid_t, next_pid )
219 * __field( int, next_prio )
220 * ),
221 *
222 * *
223 * * Assign the entry into the trace record, by embedding
224 * * a full C statement block into TP_fast_assign(). You
225 * * can refer to the trace record as '__entry' -
226 * * otherwise you can put arbitrary C code in here.
227 * *
228 * * Note: this C code will execute every time a trace event
229 * * happens, on an active tracepoint.
230 * *
231 *
232 * TP_fast_assign(
233 * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
234 * __entry->prev_pid = prev->pid;
235 * __entry->prev_prio = prev->prio;
236 * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
237 * __entry->next_pid = next->pid;
238 * __entry->next_prio = next->prio;
239 * )
240 *
241 * *
242 * * Formatted output of a trace record via TP_printk().
243 * * This is how the tracepoint will appear under ftrace
244 * * plugins that make use of this tracepoint.
245 * *
246 * * (raw-binary tracing wont actually perform this step.)
247 * *
248 *
249 * TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
250 * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
251 * __entry->next_comm, __entry->next_pid, __entry->next_prio),
252 *
253 * );
254 *
255 * This macro construct is thus used for the regular printk format
256 * tracing setup, it is used to construct a function pointer based
257 * tracepoint callback (this is used by programmatic plugins and
258 * can also by used by generic instrumentation like SystemTap), and
259 * it is also used to expose a structured trace record in
260 * /debug/tracing/events/.
261 */
262
263#define TRACE_EVENT(name, proto, args, struct, assign, print) \
264 DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
265
156#endif 266#endif
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 8615d661ab60..bcba84ea2d86 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -309,7 +309,8 @@ extern void tty_set_operations(struct tty_driver *driver,
309extern struct tty_driver *tty_find_polling_driver(char *name, int *line); 309extern struct tty_driver *tty_find_polling_driver(char *name, int *line);
310 310
311extern void tty_driver_kref_put(struct tty_driver *driver); 311extern void tty_driver_kref_put(struct tty_driver *driver);
312extern inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) 312
313static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d)
313{ 314{
314 kref_get(&d->kref); 315 kref_get(&d->kref);
315 return d; 316 return d;
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h
index 5f401b644ed5..429c631d2aad 100644
--- a/include/linux/usb/wusb.h
+++ b/include/linux/usb/wusb.h
@@ -80,8 +80,7 @@ struct wusb_ckhdid {
80 u8 data[16]; 80 u8 data[16];
81} __attribute__((packed)); 81} __attribute__((packed));
82 82
83const static 83static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
84struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } };
85 84
86#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) 85#define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1)
87 86
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 139d234923cd..ebb2ea6b4995 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -737,6 +737,11 @@ struct v4l2_input {
737#define V4L2_IN_ST_NO_SIGNAL 0x00000002 737#define V4L2_IN_ST_NO_SIGNAL 0x00000002
738#define V4L2_IN_ST_NO_COLOR 0x00000004 738#define V4L2_IN_ST_NO_COLOR 0x00000004
739 739
740/* field 'status' - sensor orientation */
741/* If sensor is mounted upside down set both bits */
742#define V4L2_IN_ST_HFLIP 0x00000010 /* Frames are flipped horizontally */
743#define V4L2_IN_ST_VFLIP 0x00000020 /* Frames are flipped vertically */
744
740/* field 'status' - analog */ 745/* field 'status' - analog */
741#define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */ 746#define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */
742#define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */ 747#define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 93445477f86a..9c1ed1fb6ddb 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -168,6 +168,8 @@ void writeback_set_ratelimit(void);
168/* pdflush.c */ 168/* pdflush.c */
169extern int nr_pdflush_threads; /* Global so it can be exported to sysctl 169extern int nr_pdflush_threads; /* Global so it can be exported to sysctl
170 read-only. */ 170 read-only. */
171extern int nr_pdflush_threads_max; /* Global so it can be exported to sysctl */
172extern int nr_pdflush_threads_min; /* Global so it can be exported to sysctl */
171 173
172 174
173#endif /* WRITEBACK_H */ 175#endif /* WRITEBACK_H */
diff --git a/include/media/msp3400.h b/include/media/msp3400.h
index 6ab854931c05..90cf22ada8b4 100644
--- a/include/media/msp3400.h
+++ b/include/media/msp3400.h
@@ -54,13 +54,13 @@
54 ======= 54 =======
55 55
56 So to specify a complete routing scheme for the msp3400 you will have to 56 So to specify a complete routing scheme for the msp3400 you will have to
57 specify in the 'input' field of the v4l2_routing struct: 57 specify in the 'input' arg of the s_routing function:
58 58
59 1) which tuner input to use 59 1) which tuner input to use
60 2) which SCART input to use 60 2) which SCART input to use
61 3) which DSP input to use for each DSP output 61 3) which DSP input to use for each DSP output
62 62
63 And in the 'output' field of the v4l2_routing struct you specify: 63 And in the 'output' arg of the s_routing function you specify:
64 64
65 1) which SCART input to use for each SCART output 65 1) which SCART input to use for each SCART output
66 66
diff --git a/include/media/ov772x.h b/include/media/ov772x.h
index 57db48dd85b8..30d9629198ef 100644
--- a/include/media/ov772x.h
+++ b/include/media/ov772x.h
@@ -17,10 +17,45 @@
17#define OV772X_FLAG_VFLIP 0x00000001 /* Vertical flip image */ 17#define OV772X_FLAG_VFLIP 0x00000001 /* Vertical flip image */
18#define OV772X_FLAG_HFLIP 0x00000002 /* Horizontal flip image */ 18#define OV772X_FLAG_HFLIP 0x00000002 /* Horizontal flip image */
19 19
20/*
21 * for Edge ctrl
22 *
23 * strength also control Auto or Manual Edge Control Mode
24 * see also OV772X_MANUAL_EDGE_CTRL
25 */
26struct ov772x_edge_ctrl {
27 unsigned char strength;
28 unsigned char threshold;
29 unsigned char upper;
30 unsigned char lower;
31};
32
33#define OV772X_MANUAL_EDGE_CTRL 0x80 /* un-used bit of strength */
34#define EDGE_STRENGTH_MASK 0x1F
35#define EDGE_THRESHOLD_MASK 0x0F
36#define EDGE_UPPER_MASK 0xFF
37#define EDGE_LOWER_MASK 0xFF
38
39#define OV772X_AUTO_EDGECTRL(u, l) \
40{ \
41 .upper = (u & EDGE_UPPER_MASK), \
42 .lower = (l & EDGE_LOWER_MASK), \
43}
44
45#define OV772X_MANUAL_EDGECTRL(s, t) \
46{ \
47 .strength = (s & EDGE_STRENGTH_MASK) | OV772X_MANUAL_EDGE_CTRL,\
48 .threshold = (t & EDGE_THRESHOLD_MASK), \
49}
50
51/*
52 * ov772x camera info
53 */
20struct ov772x_camera_info { 54struct ov772x_camera_info {
21 unsigned long buswidth; 55 unsigned long buswidth;
22 unsigned long flags; 56 unsigned long flags;
23 struct soc_camera_link link; 57 struct soc_camera_link link;
58 struct ov772x_edge_ctrl edgectrl;
24}; 59};
25 60
26#endif /* __OV772X_H__ */ 61#endif /* __OV772X_H__ */
diff --git a/include/media/saa7146.h b/include/media/saa7146.h
index fff4235adae5..7a9f76ecbbbd 100644
--- a/include/media/saa7146.h
+++ b/include/media/saa7146.h
@@ -18,7 +18,7 @@
18#include <linux/vmalloc.h> /* for vmalloc() */ 18#include <linux/vmalloc.h> /* for vmalloc() */
19#include <linux/mm.h> /* for vmalloc_to_page() */ 19#include <linux/mm.h> /* for vmalloc_to_page() */
20 20
21#define SAA7146_VERSION_CODE 0x000500 /* 0.5.0 */ 21#define SAA7146_VERSION_CODE 0x000600 /* 0.6.0 */
22 22
23#define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr))) 23#define saa7146_write(sxy,adr,dat) writel((dat),(sxy->mem+(adr)))
24#define saa7146_read(sxy,adr) readl(sxy->mem+(adr)) 24#define saa7146_read(sxy,adr) readl(sxy->mem+(adr))
diff --git a/include/media/tvaudio.h b/include/media/tvaudio.h
index 6915aafc875a..1ac8184693f8 100644
--- a/include/media/tvaudio.h
+++ b/include/media/tvaudio.h
@@ -21,10 +21,29 @@
21#ifndef _TVAUDIO_H 21#ifndef _TVAUDIO_H
22#define _TVAUDIO_H 22#define _TVAUDIO_H
23 23
24#include <media/i2c-addr.h>
25
24/* The tvaudio module accepts the following inputs: */ 26/* The tvaudio module accepts the following inputs: */
25#define TVAUDIO_INPUT_TUNER 0 27#define TVAUDIO_INPUT_TUNER 0
26#define TVAUDIO_INPUT_RADIO 1 28#define TVAUDIO_INPUT_RADIO 1
27#define TVAUDIO_INPUT_EXTERN 2 29#define TVAUDIO_INPUT_EXTERN 2
28#define TVAUDIO_INPUT_INTERN 3 30#define TVAUDIO_INPUT_INTERN 3
29 31
32static inline const unsigned short *tvaudio_addrs(void)
33{
34 static const unsigned short addrs[] = {
35 I2C_ADDR_TDA8425 >> 1,
36 I2C_ADDR_TEA6300 >> 1,
37 I2C_ADDR_TEA6420 >> 1,
38 I2C_ADDR_TDA9840 >> 1,
39 I2C_ADDR_TDA985x_L >> 1,
40 I2C_ADDR_TDA985x_H >> 1,
41 I2C_ADDR_TDA9874 >> 1,
42 I2C_ADDR_PIC16C54 >> 1,
43 I2C_CLIENT_END
44 };
45
46 return addrs;
47}
48
30#endif 49#endif
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 3a6905615d68..c48c24e4d0fa 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -125,7 +125,7 @@ int v4l2_chip_match_host(const struct v4l2_dbg_match *match);
125 125
126/* ------------------------------------------------------------------------- */ 126/* ------------------------------------------------------------------------- */
127 127
128/* Helper function for I2C legacy drivers */ 128/* I2C Helper functions */
129 129
130struct i2c_driver; 130struct i2c_driver;
131struct i2c_adapter; 131struct i2c_adapter;
@@ -135,21 +135,24 @@ struct v4l2_device;
135struct v4l2_subdev; 135struct v4l2_subdev;
136struct v4l2_subdev_ops; 136struct v4l2_subdev_ops;
137 137
138int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver *driver,
139 const char *name,
140 int (*probe)(struct i2c_client *, const struct i2c_device_id *));
141 138
142/* Load an i2c module and return an initialized v4l2_subdev struct. 139/* Load an i2c module and return an initialized v4l2_subdev struct.
143 Only call request_module if module_name != NULL. 140 Only call request_module if module_name != NULL.
144 The client_type argument is the name of the chip that's on the adapter. */ 141 The client_type argument is the name of the chip that's on the adapter. */
145struct v4l2_subdev *v4l2_i2c_new_subdev(struct i2c_adapter *adapter, 142struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
143 struct i2c_adapter *adapter,
146 const char *module_name, const char *client_type, u8 addr); 144 const char *module_name, const char *client_type, u8 addr);
147/* Probe and load an i2c module and return an initialized v4l2_subdev struct. 145/* Probe and load an i2c module and return an initialized v4l2_subdev struct.
148 Only call request_module if module_name != NULL. 146 Only call request_module if module_name != NULL.
149 The client_type argument is the name of the chip that's on the adapter. */ 147 The client_type argument is the name of the chip that's on the adapter. */
150struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct i2c_adapter *adapter, 148struct v4l2_subdev *v4l2_i2c_new_probed_subdev(struct v4l2_device *v4l2_dev,
149 struct i2c_adapter *adapter,
151 const char *module_name, const char *client_type, 150 const char *module_name, const char *client_type,
152 const unsigned short *addrs); 151 const unsigned short *addrs);
152/* Like v4l2_i2c_new_probed_subdev, except probe for a single address. */
153struct v4l2_subdev *v4l2_i2c_new_probed_subdev_addr(struct v4l2_device *v4l2_dev,
154 struct i2c_adapter *adapter,
155 const char *module_name, const char *client_type, u8 addr);
153/* Initialize an v4l2_subdev with data from an i2c_client struct */ 156/* Initialize an v4l2_subdev with data from an i2c_client struct */
154void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, 157void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
155 const struct v4l2_subdev_ops *ops); 158 const struct v4l2_subdev_ops *ops);
@@ -171,139 +174,23 @@ const unsigned short *v4l2_i2c_tuner_addrs(enum v4l2_i2c_tuner_type type);
171 174
172/* ------------------------------------------------------------------------- */ 175/* ------------------------------------------------------------------------- */
173 176
174/* Internal ioctls */ 177/* Note: these remaining ioctls/structs should be removed as well, but they are
175 178 still used in tuner-simple.c (TUNER_SET_CONFIG), cx18/ivtv (RESET) and
176/* VIDIOC_INT_DECODE_VBI_LINE */ 179 v4l2-int-device.h (v4l2_routing). To remove these ioctls some more cleanup
177struct v4l2_decode_vbi_line { 180 is needed in those modules. */
178 u32 is_second_field; /* Set to 0 for the first (odd) field,
179 set to 1 for the second (even) field. */
180 u8 *p; /* Pointer to the sliced VBI data from the decoder.
181 On exit points to the start of the payload. */
182 u32 line; /* Line number of the sliced VBI data (1-23) */
183 u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */
184};
185 181
182/* s_config */
186struct v4l2_priv_tun_config { 183struct v4l2_priv_tun_config {
187 int tuner; 184 int tuner;
188 void *priv; 185 void *priv;
189}; 186};
190
191/* audio ioctls */
192
193/* v4l device was opened in Radio mode, to be replaced by VIDIOC_INT_S_TUNER_MODE */
194#define AUDC_SET_RADIO _IO('d',88)
195
196/* tuner ioctls */
197
198/* Sets tuner type and its I2C addr */
199#define TUNER_SET_TYPE_ADDR _IOW('d', 90, int)
200
201/* Puts tuner on powersaving state, disabling it, except for i2c. To be replaced
202 by VIDIOC_INT_S_STANDBY. */
203#define TUNER_SET_STANDBY _IOW('d', 91, int)
204
205/* Sets tda9887 specific stuff, like port1, port2 and qss */
206#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config) 187#define TUNER_SET_CONFIG _IOW('d', 92, struct v4l2_priv_tun_config)
207 188
208/* Switch the tuner to a specific tuner mode. Replacement of AUDC_SET_RADIO */
209#define VIDIOC_INT_S_TUNER_MODE _IOW('d', 93, enum v4l2_tuner_type)
210
211/* Generic standby command. Passing -1 (all bits set to 1) will put the whole
212 chip into standby mode, value 0 will make the chip fully active. Specific
213 bits can be used by certain chips to enable/disable specific subsystems.
214 Replacement of TUNER_SET_STANDBY. */
215#define VIDIOC_INT_S_STANDBY _IOW('d', 94, u32)
216
217/* 100, 101 used by VIDIOC_DBG_[SG]_REGISTER */
218
219/* Generic reset command. The argument selects which subsystems to reset.
220 Passing 0 will always reset the whole chip. */
221#define VIDIOC_INT_RESET _IOW ('d', 102, u32) 189#define VIDIOC_INT_RESET _IOW ('d', 102, u32)
222 190
223/* Set the frequency (in Hz) of the audio clock output.
224 Used to slave an audio processor to the video decoder, ensuring that audio
225 and video remain synchronized.
226 Usual values for the frequency are 48000, 44100 or 32000 Hz.
227 If the frequency is not supported, then -EINVAL is returned. */
228#define VIDIOC_INT_AUDIO_CLOCK_FREQ _IOW ('d', 103, u32)
229
230/* Video decoders that support sliced VBI need to implement this ioctl.
231 Field p of the v4l2_sliced_vbi_line struct is set to the start of the VBI
232 data that was generated by the decoder. The driver then parses the sliced
233 VBI data and sets the other fields in the struct accordingly. The pointer p
234 is updated to point to the start of the payload which can be copied
235 verbatim into the data field of the v4l2_sliced_vbi_data struct. If no
236 valid VBI data was found, then the type field is set to 0 on return. */
237#define VIDIOC_INT_DECODE_VBI_LINE _IOWR('d', 104, struct v4l2_decode_vbi_line)
238
239/* Used to generate VBI signals on a video signal. v4l2_sliced_vbi_data is
240 filled with the data packets that should be output. Note that if you set
241 the line field to 0, then that VBI signal is disabled. If no
242 valid VBI data was found, then the type field is set to 0 on return. */
243#define VIDIOC_INT_S_VBI_DATA _IOW ('d', 105, struct v4l2_sliced_vbi_data)
244
245/* Used to obtain the sliced VBI packet from a readback register. Not all
246 video decoders support this. If no data is available because the readback
247 register contains invalid or erroneous data -EIO is returned. Note that
248 you must fill in the 'id' member and the 'field' member (to determine
249 whether CC data from the first or second field should be obtained). */
250#define VIDIOC_INT_G_VBI_DATA _IOWR('d', 106, struct v4l2_sliced_vbi_data)
251
252/* Sets I2S speed in bps. This is used to provide a standard way to select I2S
253 clock used by driving digital audio streams at some board designs.
254 Usual values for the frequency are 1024000 and 2048000.
255 If the frequency is not supported, then -EINVAL is returned. */
256#define VIDIOC_INT_I2S_CLOCK_FREQ _IOW ('d', 108, u32)
257
258/* Routing definition, device dependent. It specifies which inputs (if any)
259 should be routed to which outputs (if any). */
260struct v4l2_routing { 191struct v4l2_routing {
261 u32 input; 192 u32 input;
262 u32 output; 193 u32 output;
263}; 194};
264 195
265/* These internal commands should be used to define the inputs and outputs
266 of an audio/video chip. They will replace the v4l2 API commands
267 VIDIOC_S/G_INPUT, VIDIOC_S/G_OUTPUT, VIDIOC_S/G_AUDIO and VIDIOC_S/G_AUDOUT
268 that are meant to be used by the user.
269 The internal commands should be used to switch inputs/outputs
270 because only the driver knows how to map a 'Television' input to the precise
271 input/output routing of an A/D converter, or a DSP, or a video digitizer.
272 These four commands should only be sent directly to an i2c device, they
273 should not be broadcast as the routing is very device specific. */
274#define VIDIOC_INT_S_AUDIO_ROUTING _IOW ('d', 109, struct v4l2_routing)
275#define VIDIOC_INT_G_AUDIO_ROUTING _IOR ('d', 110, struct v4l2_routing)
276#define VIDIOC_INT_S_VIDEO_ROUTING _IOW ('d', 111, struct v4l2_routing)
277#define VIDIOC_INT_G_VIDEO_ROUTING _IOR ('d', 112, struct v4l2_routing)
278
279struct v4l2_crystal_freq {
280 u32 freq; /* frequency in Hz of the crystal */
281 u32 flags; /* device specific flags */
282};
283
284/* Sets the frequency of the crystal used to generate the clocks.
285 An extra flags field allows device specific configuration regarding
286 clock frequency dividers, etc. If not used, then set flags to 0.
287 If the frequency is not supported, then -EINVAL is returned. */
288#define VIDIOC_INT_S_CRYSTAL_FREQ _IOW('d', 113, struct v4l2_crystal_freq)
289
290/* Initialize the sensor registors to some sort of reasonable
291 default values. */
292#define VIDIOC_INT_INIT _IOW('d', 114, u32)
293
294/* Set v4l2_std_id for video OUTPUT devices. This is ignored by
295 video input devices. */
296#define VIDIOC_INT_S_STD_OUTPUT _IOW('d', 115, v4l2_std_id)
297
298/* Get v4l2_std_id for video OUTPUT devices. This is ignored by
299 video input devices. */
300#define VIDIOC_INT_G_STD_OUTPUT _IOW('d', 116, v4l2_std_id)
301
302/* Set GPIO pins. Very simple right now, might need to be extended with
303 a v4l2_gpio struct if a direction is also needed. */
304#define VIDIOC_INT_S_GPIO _IOW('d', 117, u32)
305
306/* Get input status. Same as the status field in the v4l2_input struct. */
307#define VIDIOC_INT_G_INPUT_STATUS _IOR('d', 118, u32)
308
309#endif /* V4L2_COMMON_H_ */ 196#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-i2c-drv-legacy.h b/include/media/v4l2-i2c-drv-legacy.h
deleted file mode 100644
index e65dd9d84e8b..000000000000
--- a/include/media/v4l2-i2c-drv-legacy.h
+++ /dev/null
@@ -1,152 +0,0 @@
1/*
2 * v4l2-i2c-drv-legacy.h - contains I2C handling code that's identical
3 * for all V4L2 I2C drivers. Use this header if the
4 * I2C driver is used by both legacy drivers and
5 * drivers converted to the bus-based I2C API.
6 *
7 * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 */
23
24/* NOTE: the full version of this header is in the v4l-dvb repository
25 * and allows v4l i2c drivers to be compiled on older kernels as well.
26 * The version of this header as it appears in the kernel is a stripped
27 * version (without all the backwards compatibility stuff) and so it
28 * looks a bit odd.
29 *
30 * If you look at the full version then you will understand the reason
31 * for introducing this header since you really don't want to have all
32 * the tricky backwards compatibility code in each and every i2c driver.
33 */
34
35struct v4l2_i2c_driver_data {
36 const char * const name;
37 int driverid;
38 int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
39 int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
40 int (*remove)(struct i2c_client *client);
41 int (*suspend)(struct i2c_client *client, pm_message_t state);
42 int (*resume)(struct i2c_client *client);
43 int (*legacy_probe)(struct i2c_adapter *adapter);
44 int legacy_class;
45 const struct i2c_device_id *id_table;
46};
47
48static struct v4l2_i2c_driver_data v4l2_i2c_data;
49static const struct i2c_client_address_data addr_data;
50static struct i2c_driver v4l2_i2c_driver_legacy;
51static char v4l2_i2c_drv_name_legacy[32];
52
53static int v4l2_i2c_drv_attach_legacy(struct i2c_adapter *adapter, int address, int kind)
54{
55 return v4l2_i2c_attach(adapter, address, &v4l2_i2c_driver_legacy,
56 v4l2_i2c_drv_name_legacy, v4l2_i2c_data.probe);
57}
58
59static int v4l2_i2c_drv_probe_legacy(struct i2c_adapter *adapter)
60{
61 if (v4l2_i2c_data.legacy_probe) {
62 if (v4l2_i2c_data.legacy_probe(adapter))
63 return i2c_probe(adapter, &addr_data, v4l2_i2c_drv_attach_legacy);
64 return 0;
65 }
66 if (adapter->class & v4l2_i2c_data.legacy_class)
67 return i2c_probe(adapter, &addr_data, v4l2_i2c_drv_attach_legacy);
68 return 0;
69}
70
71static int v4l2_i2c_drv_detach_legacy(struct i2c_client *client)
72{
73 int err;
74
75 if (v4l2_i2c_data.remove)
76 v4l2_i2c_data.remove(client);
77
78 err = i2c_detach_client(client);
79 if (err)
80 return err;
81 kfree(client);
82 return 0;
83}
84
85static int v4l2_i2c_drv_suspend_helper(struct i2c_client *client, pm_message_t state)
86{
87 return v4l2_i2c_data.suspend ? v4l2_i2c_data.suspend(client, state) : 0;
88}
89
90static int v4l2_i2c_drv_resume_helper(struct i2c_client *client)
91{
92 return v4l2_i2c_data.resume ? v4l2_i2c_data.resume(client) : 0;
93}
94
95/* ----------------------------------------------------------------------- */
96
97/* i2c implementation */
98static struct i2c_driver v4l2_i2c_driver_legacy = {
99 .driver = {
100 .owner = THIS_MODULE,
101 },
102 .attach_adapter = v4l2_i2c_drv_probe_legacy,
103 .detach_client = v4l2_i2c_drv_detach_legacy,
104 .suspend = v4l2_i2c_drv_suspend_helper,
105 .resume = v4l2_i2c_drv_resume_helper,
106};
107
108/* ----------------------------------------------------------------------- */
109
110/* i2c implementation */
111static struct i2c_driver v4l2_i2c_driver = {
112 .suspend = v4l2_i2c_drv_suspend_helper,
113 .resume = v4l2_i2c_drv_resume_helper,
114};
115
116static int __init v4l2_i2c_drv_init(void)
117{
118 int err;
119
120 strlcpy(v4l2_i2c_drv_name_legacy, v4l2_i2c_data.name, sizeof(v4l2_i2c_drv_name_legacy));
121 strlcat(v4l2_i2c_drv_name_legacy, "'", sizeof(v4l2_i2c_drv_name_legacy));
122
123 if (v4l2_i2c_data.legacy_class == 0)
124 v4l2_i2c_data.legacy_class = I2C_CLASS_TV_ANALOG;
125
126 v4l2_i2c_driver_legacy.driver.name = v4l2_i2c_drv_name_legacy;
127 v4l2_i2c_driver_legacy.id = v4l2_i2c_data.driverid;
128 v4l2_i2c_driver_legacy.command = v4l2_i2c_data.command;
129 err = i2c_add_driver(&v4l2_i2c_driver_legacy);
130
131 if (err)
132 return err;
133 v4l2_i2c_driver.driver.name = v4l2_i2c_data.name;
134 v4l2_i2c_driver.id = v4l2_i2c_data.driverid;
135 v4l2_i2c_driver.command = v4l2_i2c_data.command;
136 v4l2_i2c_driver.probe = v4l2_i2c_data.probe;
137 v4l2_i2c_driver.remove = v4l2_i2c_data.remove;
138 v4l2_i2c_driver.id_table = v4l2_i2c_data.id_table;
139 err = i2c_add_driver(&v4l2_i2c_driver);
140 if (err)
141 i2c_del_driver(&v4l2_i2c_driver_legacy);
142 return err;
143}
144
145static void __exit v4l2_i2c_drv_cleanup(void)
146{
147 i2c_del_driver(&v4l2_i2c_driver_legacy);
148 i2c_del_driver(&v4l2_i2c_driver);
149}
150
151module_init(v4l2_i2c_drv_init);
152module_exit(v4l2_i2c_drv_cleanup);
diff --git a/include/media/v4l2-i2c-drv.h b/include/media/v4l2-i2c-drv.h
index efdc8bf27f87..10a2882c3cbf 100644
--- a/include/media/v4l2-i2c-drv.h
+++ b/include/media/v4l2-i2c-drv.h
@@ -39,14 +39,11 @@
39 39
40struct v4l2_i2c_driver_data { 40struct v4l2_i2c_driver_data {
41 const char * const name; 41 const char * const name;
42 int driverid;
43 int (*command)(struct i2c_client *client, unsigned int cmd, void *arg); 42 int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
44 int (*probe)(struct i2c_client *client, const struct i2c_device_id *id); 43 int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
45 int (*remove)(struct i2c_client *client); 44 int (*remove)(struct i2c_client *client);
46 int (*suspend)(struct i2c_client *client, pm_message_t state); 45 int (*suspend)(struct i2c_client *client, pm_message_t state);
47 int (*resume)(struct i2c_client *client); 46 int (*resume)(struct i2c_client *client);
48 int (*legacy_probe)(struct i2c_adapter *adapter);
49 int legacy_class;
50 const struct i2c_device_id *id_table; 47 const struct i2c_device_id *id_table;
51}; 48};
52 49
@@ -54,12 +51,11 @@ static struct v4l2_i2c_driver_data v4l2_i2c_data;
54static struct i2c_driver v4l2_i2c_driver; 51static struct i2c_driver v4l2_i2c_driver;
55 52
56 53
57/* Bus-based I2C implementation for kernels >= 2.6.22 */ 54/* Bus-based I2C implementation for kernels >= 2.6.26 */
58 55
59static int __init v4l2_i2c_drv_init(void) 56static int __init v4l2_i2c_drv_init(void)
60{ 57{
61 v4l2_i2c_driver.driver.name = v4l2_i2c_data.name; 58 v4l2_i2c_driver.driver.name = v4l2_i2c_data.name;
62 v4l2_i2c_driver.id = v4l2_i2c_data.driverid;
63 v4l2_i2c_driver.command = v4l2_i2c_data.command; 59 v4l2_i2c_driver.command = v4l2_i2c_data.command;
64 v4l2_i2c_driver.probe = v4l2_i2c_data.probe; 60 v4l2_i2c_driver.probe = v4l2_i2c_data.probe;
65 v4l2_i2c_driver.remove = v4l2_i2c_data.remove; 61 v4l2_i2c_driver.remove = v4l2_i2c_data.remove;
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 1d181b4ccb01..17856081c809 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -27,6 +27,16 @@ struct v4l2_device;
27struct v4l2_subdev; 27struct v4l2_subdev;
28struct tuner_setup; 28struct tuner_setup;
29 29
30/* decode_vbi_line */
31struct v4l2_decode_vbi_line {
32 u32 is_second_field; /* Set to 0 for the first (odd) field,
33 set to 1 for the second (even) field. */
34 u8 *p; /* Pointer to the sliced VBI data from the decoder.
35 On exit points to the start of the payload. */
36 u32 line; /* Line number of the sliced VBI data (1-23) */
37 u32 type; /* VBI service type (V4L2_SLICED_*). 0 if no service found */
38};
39
30/* Sub-devices are devices that are connected somehow to the main bridge 40/* Sub-devices are devices that are connected somehow to the main bridge
31 device. These devices are usually audio/video muxers/encoders/decoders or 41 device. These devices are usually audio/video muxers/encoders/decoders or
32 sensors and webcam controllers. 42 sensors and webcam controllers.
@@ -68,11 +78,26 @@ struct tuner_setup;
68 the use-case it might be better to use subdev-specific ops (currently 78 the use-case it might be better to use subdev-specific ops (currently
69 not yet implemented) since ops provide proper type-checking. 79 not yet implemented) since ops provide proper type-checking.
70 */ 80 */
81
82/* init: initialize the sensor registors to some sort of reasonable default
83 values. Do not use for new drivers and should be removed in existing
84 drivers.
85
86 load_fw: load firmware.
87
88 reset: generic reset command. The argument selects which subsystems to
89 reset. Passing 0 will always reset the whole chip. Do not use for new
90 drivers without discussing this first on the linux-media mailinglist.
91 There should be no reason normally to reset a device.
92
93 s_gpio: set GPIO pins. Very simple right now, might need to be extended with
94 a direction argument if needed.
95 */
71struct v4l2_subdev_core_ops { 96struct v4l2_subdev_core_ops {
72 int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip); 97 int (*g_chip_ident)(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip);
73 int (*log_status)(struct v4l2_subdev *sd); 98 int (*log_status)(struct v4l2_subdev *sd);
74 int (*init)(struct v4l2_subdev *sd, u32 val); 99 int (*init)(struct v4l2_subdev *sd, u32 val);
75 int (*s_standby)(struct v4l2_subdev *sd, u32 standby); 100 int (*load_fw)(struct v4l2_subdev *sd);
76 int (*reset)(struct v4l2_subdev *sd, u32 val); 101 int (*reset)(struct v4l2_subdev *sd, u32 val);
77 int (*s_gpio)(struct v4l2_subdev *sd, u32 val); 102 int (*s_gpio)(struct v4l2_subdev *sd, u32 val);
78 int (*queryctrl)(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc); 103 int (*queryctrl)(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc);
@@ -82,6 +107,7 @@ struct v4l2_subdev_core_ops {
82 int (*s_ext_ctrls)(struct v4l2_subdev *sd, struct v4l2_ext_controls *ctrls); 107 int (*s_ext_ctrls)(struct v4l2_subdev *sd, struct v4l2_ext_controls *ctrls);
83 int (*try_ext_ctrls)(struct v4l2_subdev *sd, struct v4l2_ext_controls *ctrls); 108 int (*try_ext_ctrls)(struct v4l2_subdev *sd, struct v4l2_ext_controls *ctrls);
84 int (*querymenu)(struct v4l2_subdev *sd, struct v4l2_querymenu *qm); 109 int (*querymenu)(struct v4l2_subdev *sd, struct v4l2_querymenu *qm);
110 int (*s_std)(struct v4l2_subdev *sd, v4l2_std_id norm);
85 long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg); 111 long (*ioctl)(struct v4l2_subdev *sd, unsigned int cmd, void *arg);
86#ifdef CONFIG_VIDEO_ADV_DEBUG 112#ifdef CONFIG_VIDEO_ADV_DEBUG
87 int (*g_register)(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg); 113 int (*g_register)(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg);
@@ -89,6 +115,16 @@ struct v4l2_subdev_core_ops {
89#endif 115#endif
90}; 116};
91 117
118/* s_mode: switch the tuner to a specific tuner mode. Replacement of s_radio.
119
120 s_radio: v4l device was opened in Radio mode, to be replaced by s_mode.
121
122 s_type_addr: sets tuner type and its I2C addr.
123
124 s_config: sets tda9887 specific stuff, like port1, port2 and qss
125
126 s_standby: puts tuner on powersaving state, disabling it, except for i2c.
127 */
92struct v4l2_subdev_tuner_ops { 128struct v4l2_subdev_tuner_ops {
93 int (*s_mode)(struct v4l2_subdev *sd, enum v4l2_tuner_type); 129 int (*s_mode)(struct v4l2_subdev *sd, enum v4l2_tuner_type);
94 int (*s_radio)(struct v4l2_subdev *sd); 130 int (*s_radio)(struct v4l2_subdev *sd);
@@ -96,20 +132,77 @@ struct v4l2_subdev_tuner_ops {
96 int (*g_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq); 132 int (*g_frequency)(struct v4l2_subdev *sd, struct v4l2_frequency *freq);
97 int (*g_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt); 133 int (*g_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt);
98 int (*s_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt); 134 int (*s_tuner)(struct v4l2_subdev *sd, struct v4l2_tuner *vt);
99 int (*s_std)(struct v4l2_subdev *sd, v4l2_std_id norm);
100 int (*s_type_addr)(struct v4l2_subdev *sd, struct tuner_setup *type); 135 int (*s_type_addr)(struct v4l2_subdev *sd, struct tuner_setup *type);
101 int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config); 136 int (*s_config)(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *config);
137 int (*s_standby)(struct v4l2_subdev *sd);
102}; 138};
103 139
140/* s_clock_freq: set the frequency (in Hz) of the audio clock output.
141 Used to slave an audio processor to the video decoder, ensuring that
142 audio and video remain synchronized. Usual values for the frequency
143 are 48000, 44100 or 32000 Hz. If the frequency is not supported, then
144 -EINVAL is returned.
145
146 s_i2s_clock_freq: sets I2S speed in bps. This is used to provide a standard
147 way to select I2S clock used by driving digital audio streams at some
148 board designs. Usual values for the frequency are 1024000 and 2048000.
149 If the frequency is not supported, then -EINVAL is returned.
150
151 s_routing: used to define the input and/or output pins of an audio chip,
152 and any additional configuration data.
153 Never attempt to use user-level input IDs (e.g. Composite, S-Video,
154 Tuner) at this level. An i2c device shouldn't know about whether an
155 input pin is connected to a Composite connector, become on another
156 board or platform it might be connected to something else entirely.
157 The calling driver is responsible for mapping a user-level input to
158 the right pins on the i2c device.
159 */
104struct v4l2_subdev_audio_ops { 160struct v4l2_subdev_audio_ops {
105 int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq); 161 int (*s_clock_freq)(struct v4l2_subdev *sd, u32 freq);
106 int (*s_i2s_clock_freq)(struct v4l2_subdev *sd, u32 freq); 162 int (*s_i2s_clock_freq)(struct v4l2_subdev *sd, u32 freq);
107 int (*s_routing)(struct v4l2_subdev *sd, const struct v4l2_routing *route); 163 int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
108}; 164};
109 165
166/*
167 decode_vbi_line: video decoders that support sliced VBI need to implement
168 this ioctl. Field p of the v4l2_sliced_vbi_line struct is set to the
169 start of the VBI data that was generated by the decoder. The driver
170 then parses the sliced VBI data and sets the other fields in the
171 struct accordingly. The pointer p is updated to point to the start of
172 the payload which can be copied verbatim into the data field of the
173 v4l2_sliced_vbi_data struct. If no valid VBI data was found, then the
174 type field is set to 0 on return.
175
176 s_vbi_data: used to generate VBI signals on a video signal.
177 v4l2_sliced_vbi_data is filled with the data packets that should be
178 output. Note that if you set the line field to 0, then that VBI signal
179 is disabled. If no valid VBI data was found, then the type field is
180 set to 0 on return.
181
182 g_vbi_data: used to obtain the sliced VBI packet from a readback register.
183 Not all video decoders support this. If no data is available because
184 the readback register contains invalid or erroneous data -EIO is
185 returned. Note that you must fill in the 'id' member and the 'field'
186 member (to determine whether CC data from the first or second field
187 should be obtained).
188
189 s_std_output: set v4l2_std_id for video OUTPUT devices. This is ignored by
190 video input devices.
191
192 s_crystal_freq: sets the frequency of the crystal used to generate the
193 clocks in Hz. An extra flags field allows device specific configuration
194 regarding clock frequency dividers, etc. If not used, then set flags
195 to 0. If the frequency is not supported, then -EINVAL is returned.
196
197 g_input_status: get input status. Same as the status field in the v4l2_input
198 struct.
199
200 s_routing: see s_routing in audio_ops, except this version is for video
201 devices.
202 */
110struct v4l2_subdev_video_ops { 203struct v4l2_subdev_video_ops {
111 int (*s_routing)(struct v4l2_subdev *sd, const struct v4l2_routing *route); 204 int (*s_routing)(struct v4l2_subdev *sd, u32 input, u32 output, u32 config);
112 int (*s_crystal_freq)(struct v4l2_subdev *sd, struct v4l2_crystal_freq *freq); 205 int (*s_crystal_freq)(struct v4l2_subdev *sd, u32 freq, u32 flags);
113 int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line); 206 int (*decode_vbi_line)(struct v4l2_subdev *sd, struct v4l2_decode_vbi_line *vbi_line);
114 int (*s_vbi_data)(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *vbi_data); 207 int (*s_vbi_data)(struct v4l2_subdev *sd, const struct v4l2_sliced_vbi_data *vbi_data);
115 int (*g_vbi_data)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *vbi_data); 208 int (*g_vbi_data)(struct v4l2_subdev *sd, struct v4l2_sliced_vbi_data *vbi_data);
@@ -163,18 +256,6 @@ static inline void *v4l2_get_subdevdata(const struct v4l2_subdev *sd)
163 return sd->priv; 256 return sd->priv;
164} 257}
165 258
166/* Convert an ioctl-type command to the proper v4l2_subdev_ops function call.
167 This is used by subdev modules that can be called by both old-style ioctl
168 commands and through the v4l2_subdev_ops.
169
170 The ioctl API of the subdev driver can call this function to call the
171 right ops based on the ioctl cmd and arg.
172
173 Once all subdev drivers have been converted and all drivers no longer
174 use the ioctl interface, then this function can be removed.
175 */
176int v4l2_subdev_command(struct v4l2_subdev *sd, unsigned cmd, void *arg);
177
178static inline void v4l2_subdev_init(struct v4l2_subdev *sd, 259static inline void v4l2_subdev_init(struct v4l2_subdev *sd,
179 const struct v4l2_subdev_ops *ops) 260 const struct v4l2_subdev_ops *ops)
180{ 261{
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index ab17a159ac66..a9652806d0df 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -99,9 +99,12 @@ void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t,
99 const union nf_inet_addr *, 99 const union nf_inet_addr *,
100 u_int8_t, const __be16 *, const __be16 *); 100 u_int8_t, const __be16 *, const __be16 *);
101void nf_ct_expect_put(struct nf_conntrack_expect *exp); 101void nf_ct_expect_put(struct nf_conntrack_expect *exp);
102int nf_ct_expect_related(struct nf_conntrack_expect *expect);
103int nf_ct_expect_related_report(struct nf_conntrack_expect *expect, 102int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
104 u32 pid, int report); 103 u32 pid, int report);
104static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect)
105{
106 return nf_ct_expect_related_report(expect, 0, 0);
107}
105 108
106#endif /*_NF_CONNTRACK_EXPECT_H*/ 109#endif /*_NF_CONNTRACK_EXPECT_H*/
107 110
diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h
new file mode 100644
index 000000000000..0627a9ae6347
--- /dev/null
+++ b/include/scsi/fc/fc_fip.h
@@ -0,0 +1,237 @@
1/*
2 * Copyright 2008 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 */
17#ifndef _FC_FIP_H_
18#define _FC_FIP_H_
19
20/*
21 * This version is based on:
22 * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf
23 */
24
25/*
26 * The FIP ethertype eventually goes in net/if_ether.h.
27 */
28#ifndef ETH_P_FIP
29#define ETH_P_FIP 0x8914 /* FIP Ethertype */
30#endif
31
32#define FIP_DEF_PRI 128 /* default selection priority */
33#define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */
34#define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */
35#define FIP_VN_KA_PERIOD 90000 /* required VN_port keep-alive period (mS) */
36#define FIP_FCF_FUZZ 100 /* random time added by FCF (mS) */
37
38/*
39 * Multicast MAC addresses. T11-adopted.
40 */
41#define FIP_ALL_FCOE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 0 })
42#define FIP_ALL_ENODE_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 1 })
43#define FIP_ALL_FCF_MACS ((u8[6]) { 1, 0x10, 0x18, 1, 0, 2 })
44
45#define FIP_VER 1 /* version for fip_header */
46
47struct fip_header {
48 __u8 fip_ver; /* upper 4 bits are the version */
49 __u8 fip_resv1; /* reserved */
50 __be16 fip_op; /* operation code */
51 __u8 fip_resv2; /* reserved */
52 __u8 fip_subcode; /* lower 4 bits are sub-code */
53 __be16 fip_dl_len; /* length of descriptors in words */
54 __be16 fip_flags; /* header flags */
55} __attribute__((packed));
56
57#define FIP_VER_SHIFT 4
58#define FIP_VER_ENCAPS(v) ((v) << FIP_VER_SHIFT)
59#define FIP_VER_DECAPS(v) ((v) >> FIP_VER_SHIFT)
60#define FIP_BPW 4 /* bytes per word for lengths */
61
62/*
63 * fip_op.
64 */
65enum fip_opcode {
66 FIP_OP_DISC = 1, /* discovery, advertisement, etc. */
67 FIP_OP_LS = 2, /* Link Service request or reply */
68 FIP_OP_CTRL = 3, /* Keep Alive / Link Reset */
69 FIP_OP_VLAN = 4, /* VLAN discovery */
70 FIP_OP_VENDOR_MIN = 0xfff8, /* min vendor-specific opcode */
71 FIP_OP_VENDOR_MAX = 0xfffe, /* max vendor-specific opcode */
72};
73
74/*
75 * Subcodes for FIP_OP_DISC.
76 */
77enum fip_disc_subcode {
78 FIP_SC_SOL = 1, /* solicitation */
79 FIP_SC_ADV = 2, /* advertisement */
80};
81
82/*
83 * Subcodes for FIP_OP_LS.
84 */
85enum fip_trans_subcode {
86 FIP_SC_REQ = 1, /* request */
87 FIP_SC_REP = 2, /* reply */
88};
89
90/*
91 * Subcodes for FIP_OP_RESET.
92 */
93enum fip_reset_subcode {
94 FIP_SC_KEEP_ALIVE = 1, /* keep-alive from VN_Port */
95 FIP_SC_CLR_VLINK = 2, /* clear virtual link from VF_Port */
96};
97
98/*
99 * Subcodes for FIP_OP_VLAN.
100 */
101enum fip_vlan_subcode {
102 FIP_SC_VL_REQ = 1, /* request */
103 FIP_SC_VL_REP = 2, /* reply */
104};
105
106/*
107 * flags in header fip_flags.
108 */
109enum fip_flag {
110 FIP_FL_FPMA = 0x8000, /* supports FPMA fabric-provided MACs */
111 FIP_FL_SPMA = 0x4000, /* supports SPMA server-provided MACs */
112 FIP_FL_AVAIL = 0x0004, /* available for FLOGI/ELP */
113 FIP_FL_SOL = 0x0002, /* this is a solicited message */
114 FIP_FL_FPORT = 0x0001, /* sent from an F port */
115};
116
117/*
118 * Common descriptor header format.
119 */
120struct fip_desc {
121 __u8 fip_dtype; /* type - see below */
122 __u8 fip_dlen; /* length - in 32-bit words */
123};
124
125enum fip_desc_type {
126 FIP_DT_PRI = 1, /* priority for forwarder selection */
127 FIP_DT_MAC = 2, /* MAC address */
128 FIP_DT_MAP_OUI = 3, /* FC-MAP OUI */
129 FIP_DT_NAME = 4, /* switch name or node name */
130 FIP_DT_FAB = 5, /* fabric descriptor */
131 FIP_DT_FCOE_SIZE = 6, /* max FCoE frame size */
132 FIP_DT_FLOGI = 7, /* FLOGI request or response */
133 FIP_DT_FDISC = 8, /* FDISC request or response */
134 FIP_DT_LOGO = 9, /* LOGO request or response */
135 FIP_DT_ELP = 10, /* ELP request or response */
136 FIP_DT_VN_ID = 11, /* VN_Node Identifier */
137 FIP_DT_FKA = 12, /* advertisement keep-alive period */
138 FIP_DT_VENDOR = 13, /* vendor ID */
139 FIP_DT_VLAN = 14, /* vlan number */
140 FIP_DT_LIMIT, /* max defined desc_type + 1 */
141 FIP_DT_VENDOR_BASE = 128, /* first vendor-specific desc_type */
142};
143
144/*
145 * FIP_DT_PRI - priority descriptor.
146 */
147struct fip_pri_desc {
148 struct fip_desc fd_desc;
149 __u8 fd_resvd;
150 __u8 fd_pri; /* FCF priority: higher is better */
151} __attribute__((packed));
152
153/*
154 * FIP_DT_MAC - MAC address descriptor.
155 */
156struct fip_mac_desc {
157 struct fip_desc fd_desc;
158 __u8 fd_mac[ETH_ALEN];
159} __attribute__((packed));
160
161/*
162 * FIP_DT_MAP - descriptor.
163 */
164struct fip_map_desc {
165 struct fip_desc fd_desc;
166 __u8 fd_resvd[3];
167 __u8 fd_map[3];
168} __attribute__((packed));
169
170/*
171 * FIP_DT_NAME descriptor.
172 */
173struct fip_wwn_desc {
174 struct fip_desc fd_desc;
175 __u8 fd_resvd[2];
176 __be64 fd_wwn; /* 64-bit WWN, unaligned */
177} __attribute__((packed));
178
179/*
180 * FIP_DT_FAB descriptor.
181 */
182struct fip_fab_desc {
183 struct fip_desc fd_desc;
184 __be16 fd_vfid; /* virtual fabric ID */
185 __u8 fd_resvd;
186 __u8 fd_map[3]; /* FC-MAP value */
187 __be64 fd_wwn; /* fabric name, unaligned */
188} __attribute__((packed));
189
190/*
191 * FIP_DT_FCOE_SIZE descriptor.
192 */
193struct fip_size_desc {
194 struct fip_desc fd_desc;
195 __be16 fd_size;
196} __attribute__((packed));
197
198/*
199 * Descriptor that encapsulates an ELS or ILS frame.
200 * The encapsulated frame immediately follows this header, without
201 * SOF, EOF, or CRC.
202 */
203struct fip_encaps {
204 struct fip_desc fd_desc;
205 __u8 fd_resvd[2];
206} __attribute__((packed));
207
208/*
209 * FIP_DT_VN_ID - VN_Node Identifier descriptor.
210 */
211struct fip_vn_desc {
212 struct fip_desc fd_desc;
213 __u8 fd_mac[ETH_ALEN];
214 __u8 fd_resvd;
215 __u8 fd_fc_id[3];
216 __be64 fd_wwpn; /* port name, unaligned */
217} __attribute__((packed));
218
219/*
220 * FIP_DT_FKA - Advertisement keep-alive period.
221 */
222struct fip_fka_desc {
223 struct fip_desc fd_desc;
224 __u8 fd_resvd[2];
225 __be32 fd_fka_period; /* adv./keep-alive period in mS */
226} __attribute__((packed));
227
228/*
229 * FIP_DT_VENDOR descriptor.
230 */
231struct fip_vendor_desc {
232 struct fip_desc fd_desc;
233 __u8 fd_resvd[2];
234 __u8 fd_vendor_id[8];
235} __attribute__((packed));
236
237#endif /* _FC_FIP_H_ */
diff --git a/include/scsi/fc_transport_fcoe.h b/include/scsi/fc_transport_fcoe.h
deleted file mode 100644
index 8dca2af14ffc..000000000000
--- a/include/scsi/fc_transport_fcoe.h
+++ /dev/null
@@ -1,54 +0,0 @@
1#ifndef FC_TRANSPORT_FCOE_H
2#define FC_TRANSPORT_FCOE_H
3
4#include <linux/device.h>
5#include <linux/netdevice.h>
6#include <scsi/scsi_host.h>
7#include <scsi/libfc.h>
8
9/**
10 * struct fcoe_transport - FCoE transport struct for generic transport
11 * for Ethernet devices as well as pure HBAs
12 *
13 * @name: name for thsi transport
14 * @bus: physical bus type (pci_bus_type)
15 * @driver: physical bus driver for network device
16 * @create: entry create function
17 * @destroy: exit destroy function
18 * @list: list of transports
19 */
20struct fcoe_transport {
21 char *name;
22 unsigned short vendor;
23 unsigned short device;
24 struct bus_type *bus;
25 struct device_driver *driver;
26 int (*create)(struct net_device *device);
27 int (*destroy)(struct net_device *device);
28 bool (*match)(struct net_device *device);
29 struct list_head list;
30 struct list_head devlist;
31 struct mutex devlock;
32};
33
34/**
35 * MODULE_ALIAS_FCOE_PCI
36 *
37 * some care must be taken with this, vendor and device MUST be a hex value
38 * preceded with 0x and with letters in lower case (0x12ab, not 0x12AB or 12AB)
39 */
40#define MODULE_ALIAS_FCOE_PCI(vendor, device) \
41 MODULE_ALIAS("fcoe-pci-" __stringify(vendor) "-" __stringify(device))
42
43/* exported funcs */
44int fcoe_transport_attach(struct net_device *netdev);
45int fcoe_transport_release(struct net_device *netdev);
46int fcoe_transport_register(struct fcoe_transport *t);
47int fcoe_transport_unregister(struct fcoe_transport *t);
48int fcoe_load_transport_driver(struct net_device *netdev);
49int __init fcoe_transport_init(void);
50int __exit fcoe_transport_exit(void);
51
52/* fcow_sw is the default transport */
53extern struct fcoe_transport fcoe_sw_transport;
54#endif /* FC_TRANSPORT_FCOE_H */
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index a70eafaad084..0303a6a098cc 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -22,6 +22,7 @@
22 22
23#include <linux/timer.h> 23#include <linux/timer.h>
24#include <linux/if.h> 24#include <linux/if.h>
25#include <linux/percpu.h>
25 26
26#include <scsi/scsi_transport.h> 27#include <scsi/scsi_transport.h>
27#include <scsi/scsi_transport_fc.h> 28#include <scsi/scsi_transport_fc.h>
@@ -661,7 +662,8 @@ struct fc_lport {
661 unsigned long boot_time; 662 unsigned long boot_time;
662 663
663 struct fc_host_statistics host_stats; 664 struct fc_host_statistics host_stats;
664 struct fcoe_dev_stats *dev_stats[NR_CPUS]; 665 struct fcoe_dev_stats *dev_stats;
666
665 u64 wwpn; 667 u64 wwpn;
666 u64 wwnn; 668 u64 wwnn;
667 u8 retry_count; 669 u8 retry_count;
@@ -694,11 +696,6 @@ struct fc_lport {
694/* 696/*
695 * FC_LPORT HELPER FUNCTIONS 697 * FC_LPORT HELPER FUNCTIONS
696 *****************************/ 698 *****************************/
697static inline void *lport_priv(const struct fc_lport *lp)
698{
699 return (void *)(lp + 1);
700}
701
702static inline int fc_lport_test_ready(struct fc_lport *lp) 699static inline int fc_lport_test_ready(struct fc_lport *lp)
703{ 700{
704 return lp->state == LPORT_ST_READY; 701 return lp->state == LPORT_ST_READY;
@@ -722,6 +719,42 @@ static inline void fc_lport_state_enter(struct fc_lport *lp,
722 lp->state = state; 719 lp->state = state;
723} 720}
724 721
722static inline int fc_lport_init_stats(struct fc_lport *lp)
723{
724 /* allocate per cpu stats block */
725 lp->dev_stats = alloc_percpu(struct fcoe_dev_stats);
726 if (!lp->dev_stats)
727 return -ENOMEM;
728 return 0;
729}
730
731static inline void fc_lport_free_stats(struct fc_lport *lp)
732{
733 free_percpu(lp->dev_stats);
734}
735
736static inline struct fcoe_dev_stats *fc_lport_get_stats(struct fc_lport *lp)
737{
738 return per_cpu_ptr(lp->dev_stats, smp_processor_id());
739}
740
741static inline void *lport_priv(const struct fc_lport *lp)
742{
743 return (void *)(lp + 1);
744}
745
746/**
747 * libfc_host_alloc() - Allocate a Scsi_Host with room for the fc_lport
748 * @sht: ptr to the scsi host templ
749 * @priv_size: size of private data after fc_lport
750 *
751 * Returns: ptr to Scsi_Host
752 */
753static inline struct Scsi_Host *
754libfc_host_alloc(struct scsi_host_template *sht, int priv_size)
755{
756 return scsi_host_alloc(sht, sizeof(struct fc_lport) + priv_size);
757}
725 758
726/* 759/*
727 * LOCAL PORT LAYER 760 * LOCAL PORT LAYER
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index c41f7d0c6efc..666cc131732e 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. 2 * Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007-2008 Intel Corporation. All rights reserved.
3 * 4 *
4 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License, 6 * under the terms and conditions of the GNU General Public License,
@@ -20,134 +21,144 @@
20#ifndef _LIBFCOE_H 21#ifndef _LIBFCOE_H
21#define _LIBFCOE_H 22#define _LIBFCOE_H
22 23
24#include <linux/etherdevice.h>
25#include <linux/if_ether.h>
23#include <linux/netdevice.h> 26#include <linux/netdevice.h>
24#include <linux/skbuff.h> 27#include <linux/skbuff.h>
28#include <linux/workqueue.h>
25#include <scsi/fc/fc_fcoe.h> 29#include <scsi/fc/fc_fcoe.h>
26#include <scsi/libfc.h> 30#include <scsi/libfc.h>
27 31
28/* 32/*
29 * this percpu struct for fcoe 33 * FIP tunable parameters.
30 */ 34 */
31struct fcoe_percpu_s { 35#define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */
32 int cpu; 36#define FCOE_CTRL_SOL_TOV 2000 /* min. solicitation interval (mS) */
33 struct task_struct *thread; 37#define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */
34 struct sk_buff_head fcoe_rx_list; 38
35 struct page *crc_eof_page; 39/**
36 int crc_eof_offset; 40 * enum fip_state - internal state of FCoE controller.
41 * @FIP_ST_DISABLED: controller has been disabled or not yet enabled.
42 * @FIP_ST_LINK_WAIT: the physical link is down or unusable.
43 * @FIP_ST_AUTO: determining whether to use FIP or non-FIP mode.
44 * @FIP_ST_NON_FIP: non-FIP mode selected.
45 * @FIP_ST_ENABLED: FIP mode selected.
46 */
47enum fip_state {
48 FIP_ST_DISABLED,
49 FIP_ST_LINK_WAIT,
50 FIP_ST_AUTO,
51 FIP_ST_NON_FIP,
52 FIP_ST_ENABLED,
37}; 53};
38 54
39/* 55/**
40 * the fcoe sw transport private data 56 * struct fcoe_ctlr - FCoE Controller and FIP state.
57 * @state: internal FIP state for network link and FIP or non-FIP mode.
58 * @lp: &fc_lport: libfc local port.
59 * @sel_fcf: currently selected FCF, or NULL.
60 * @fcfs: list of discovered FCFs.
61 * @fcf_count: number of discovered FCF entries.
62 * @sol_time: time when a multicast solicitation was last sent.
63 * @sel_time: time after which to select an FCF.
64 * @port_ka_time: time of next port keep-alive.
65 * @ctlr_ka_time: time of next controller keep-alive.
66 * @timer: timer struct used for all delayed events.
67 * @link_work: &work_struct for doing FCF selection.
68 * @recv_work: &work_struct for receiving FIP frames.
69 * @fip_recv_list: list of received FIP frames.
70 * @user_mfs: configured maximum FC frame size, including FC header.
71 * @flogi_oxid: exchange ID of most recent fabric login.
72 * @flogi_count: number of FLOGI attempts in AUTO mode.
73 * @link: current link status for libfc.
74 * @last_link: last link state reported to libfc.
75 * @map_dest: use the FC_MAP mode for destination MAC addresses.
76 * @dest_addr: MAC address of the selected FC forwarder.
77 * @ctl_src_addr: the native MAC address of our local port.
78 * @data_src_addr: the assigned MAC address for the local port after FLOGI.
79 * @send: LLD-supplied function to handle sending of FIP Ethernet frames.
80 * @update_mac: LLD-supplied function to handle changes to MAC addresses.
81 * @lock: lock protecting this structure.
82 *
83 * This structure is used by all FCoE drivers. It contains information
84 * needed by all FCoE low-level drivers (LLDs) as well as internal state
85 * for FIP, and fields shared with the LLDS.
41 */ 86 */
42struct fcoe_softc { 87struct fcoe_ctlr {
43 struct list_head list; 88 enum fip_state state;
44 struct fc_lport *lp; 89 struct fc_lport *lp;
45 struct net_device *real_dev; 90 struct fcoe_fcf *sel_fcf;
46 struct net_device *phys_dev; /* device with ethtool_ops */ 91 struct list_head fcfs;
47 struct packet_type fcoe_packet_type; 92 u16 fcf_count;
48 struct sk_buff_head fcoe_pending_queue; 93 unsigned long sol_time;
49 u8 fcoe_pending_queue_active; 94 unsigned long sel_time;
50 95 unsigned long port_ka_time;
96 unsigned long ctlr_ka_time;
97 struct timer_list timer;
98 struct work_struct link_work;
99 struct work_struct recv_work;
100 struct sk_buff_head fip_recv_list;
101 u16 user_mfs;
102 u16 flogi_oxid;
103 u8 flogi_count;
104 u8 link;
105 u8 last_link;
106 u8 map_dest;
51 u8 dest_addr[ETH_ALEN]; 107 u8 dest_addr[ETH_ALEN];
52 u8 ctl_src_addr[ETH_ALEN]; 108 u8 ctl_src_addr[ETH_ALEN];
53 u8 data_src_addr[ETH_ALEN]; 109 u8 data_src_addr[ETH_ALEN];
54 /*
55 * fcoe protocol address learning related stuff
56 */
57 u16 flogi_oxid;
58 u8 flogi_progress;
59 u8 address_mode;
60};
61
62static inline struct net_device *fcoe_netdev(
63 const struct fc_lport *lp)
64{
65 return ((struct fcoe_softc *)lport_priv(lp))->real_dev;
66}
67
68static inline struct fcoe_hdr *skb_fcoe_header(const struct sk_buff *skb)
69{
70 return (struct fcoe_hdr *)skb_network_header(skb);
71}
72
73static inline int skb_fcoe_offset(const struct sk_buff *skb)
74{
75 return skb_network_offset(skb);
76}
77
78static inline struct fc_frame_header *skb_fc_header(const struct sk_buff *skb)
79{
80 return (struct fc_frame_header *)skb_transport_header(skb);
81}
82
83static inline int skb_fc_offset(const struct sk_buff *skb)
84{
85 return skb_transport_offset(skb);
86}
87 110
88static inline void skb_reset_fc_header(struct sk_buff *skb) 111 void (*send)(struct fcoe_ctlr *, struct sk_buff *);
89{ 112 void (*update_mac)(struct fcoe_ctlr *, u8 *old, u8 *new);
90 skb_reset_network_header(skb); 113 spinlock_t lock;
91 skb_set_transport_header(skb, skb_network_offset(skb) + 114};
92 sizeof(struct fcoe_hdr));
93}
94
95static inline bool skb_fc_is_data(const struct sk_buff *skb)
96{
97 return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_SOL_DATA;
98}
99
100static inline bool skb_fc_is_cmd(const struct sk_buff *skb)
101{
102 return skb_fc_header(skb)->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD;
103}
104 115
105static inline bool skb_fc_has_exthdr(const struct sk_buff *skb) 116/*
106{ 117 * struct fcoe_fcf - Fibre-Channel Forwarder.
107 return (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_VFTH) || 118 * @list: list linkage.
108 (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_IFRH) || 119 * @time: system time (jiffies) when an advertisement was last received.
109 (skb_fc_header(skb)->fh_r_ctl == FC_RCTL_ENCH); 120 * @switch_name: WWN of switch from advertisement.
110} 121 * @fabric_name: WWN of fabric from advertisement.
122 * @fc_map: FC_MAP value from advertisement.
123 * @fcf_mac: Ethernet address of the FCF.
124 * @vfid: virtual fabric ID.
125 * @pri: seletion priority, smaller values are better.
126 * @flags: flags received from advertisement.
127 * @fka_period: keep-alive period, in jiffies.
128 *
129 * A Fibre-Channel Forwarder (FCF) is the entity on the Ethernet that
130 * passes FCoE frames on to an FC fabric. This structure represents
131 * one FCF from which advertisements have been received.
132 *
133 * When looking up an FCF, @switch_name, @fabric_name, @fc_map, @vfid, and
134 * @fcf_mac together form the lookup key.
135 */
136struct fcoe_fcf {
137 struct list_head list;
138 unsigned long time;
111 139
112static inline bool skb_fc_is_roff(const struct sk_buff *skb) 140 u64 switch_name;
113{ 141 u64 fabric_name;
114 return skb_fc_header(skb)->fh_f_ctl[2] & FC_FC_REL_OFF; 142 u32 fc_map;
115} 143 u16 vfid;
144 u8 fcf_mac[ETH_ALEN];
116 145
117static inline u16 skb_fc_oxid(const struct sk_buff *skb) 146 u8 pri;
118{ 147 u16 flags;
119 return be16_to_cpu(skb_fc_header(skb)->fh_ox_id); 148 u32 fka_period;
120} 149};
121 150
122static inline u16 skb_fc_rxid(const struct sk_buff *skb) 151/* FIP API functions */
123{ 152void fcoe_ctlr_init(struct fcoe_ctlr *);
124 return be16_to_cpu(skb_fc_header(skb)->fh_rx_id); 153void fcoe_ctlr_destroy(struct fcoe_ctlr *);
125} 154void fcoe_ctlr_link_up(struct fcoe_ctlr *);
155int fcoe_ctlr_link_down(struct fcoe_ctlr *);
156int fcoe_ctlr_els_send(struct fcoe_ctlr *, struct sk_buff *);
157void fcoe_ctlr_recv(struct fcoe_ctlr *, struct sk_buff *);
158int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *, struct fc_frame *fp, u8 *sa);
126 159
127/* libfcoe funcs */ 160/* libfcoe funcs */
128int fcoe_reset(struct Scsi_Host *shost); 161u64 fcoe_wwn_from_mac(unsigned char mac[], unsigned int, unsigned int);
129u64 fcoe_wwn_from_mac(unsigned char mac[MAX_ADDR_LEN],
130 unsigned int scheme, unsigned int port);
131
132u32 fcoe_fc_crc(struct fc_frame *fp);
133int fcoe_xmit(struct fc_lport *, struct fc_frame *);
134int fcoe_rcv(struct sk_buff *, struct net_device *,
135 struct packet_type *, struct net_device *);
136
137int fcoe_percpu_receive_thread(void *arg);
138void fcoe_clean_pending_queue(struct fc_lport *lp);
139void fcoe_percpu_clean(struct fc_lport *lp);
140void fcoe_watchdog(ulong vp);
141int fcoe_link_ok(struct fc_lport *lp);
142
143struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
144int fcoe_hostlist_add(const struct fc_lport *);
145int fcoe_hostlist_remove(const struct fc_lport *);
146
147struct Scsi_Host *fcoe_host_alloc(struct scsi_host_template *, int);
148int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *); 162int fcoe_libfc_config(struct fc_lport *, struct libfc_function_template *);
149 163
150/* fcoe sw hba */
151int __init fcoe_sw_init(void);
152int __exit fcoe_sw_exit(void);
153#endif /* _LIBFCOE_H */ 164#endif /* _LIBFCOE_H */
diff --git a/include/trace/block.h b/include/trace/block.h
index 25c6a1fd5b77..25b7068b819e 100644
--- a/include/trace/block.h
+++ b/include/trace/block.h
@@ -5,72 +5,72 @@
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(block_rq_abort, 7DECLARE_TRACE(block_rq_abort,
8 TPPROTO(struct request_queue *q, struct request *rq), 8 TP_PROTO(struct request_queue *q, struct request *rq),
9 TPARGS(q, rq)); 9 TP_ARGS(q, rq));
10 10
11DECLARE_TRACE(block_rq_insert, 11DECLARE_TRACE(block_rq_insert,
12 TPPROTO(struct request_queue *q, struct request *rq), 12 TP_PROTO(struct request_queue *q, struct request *rq),
13 TPARGS(q, rq)); 13 TP_ARGS(q, rq));
14 14
15DECLARE_TRACE(block_rq_issue, 15DECLARE_TRACE(block_rq_issue,
16 TPPROTO(struct request_queue *q, struct request *rq), 16 TP_PROTO(struct request_queue *q, struct request *rq),
17 TPARGS(q, rq)); 17 TP_ARGS(q, rq));
18 18
19DECLARE_TRACE(block_rq_requeue, 19DECLARE_TRACE(block_rq_requeue,
20 TPPROTO(struct request_queue *q, struct request *rq), 20 TP_PROTO(struct request_queue *q, struct request *rq),
21 TPARGS(q, rq)); 21 TP_ARGS(q, rq));
22 22
23DECLARE_TRACE(block_rq_complete, 23DECLARE_TRACE(block_rq_complete,
24 TPPROTO(struct request_queue *q, struct request *rq), 24 TP_PROTO(struct request_queue *q, struct request *rq),
25 TPARGS(q, rq)); 25 TP_ARGS(q, rq));
26 26
27DECLARE_TRACE(block_bio_bounce, 27DECLARE_TRACE(block_bio_bounce,
28 TPPROTO(struct request_queue *q, struct bio *bio), 28 TP_PROTO(struct request_queue *q, struct bio *bio),
29 TPARGS(q, bio)); 29 TP_ARGS(q, bio));
30 30
31DECLARE_TRACE(block_bio_complete, 31DECLARE_TRACE(block_bio_complete,
32 TPPROTO(struct request_queue *q, struct bio *bio), 32 TP_PROTO(struct request_queue *q, struct bio *bio),
33 TPARGS(q, bio)); 33 TP_ARGS(q, bio));
34 34
35DECLARE_TRACE(block_bio_backmerge, 35DECLARE_TRACE(block_bio_backmerge,
36 TPPROTO(struct request_queue *q, struct bio *bio), 36 TP_PROTO(struct request_queue *q, struct bio *bio),
37 TPARGS(q, bio)); 37 TP_ARGS(q, bio));
38 38
39DECLARE_TRACE(block_bio_frontmerge, 39DECLARE_TRACE(block_bio_frontmerge,
40 TPPROTO(struct request_queue *q, struct bio *bio), 40 TP_PROTO(struct request_queue *q, struct bio *bio),
41 TPARGS(q, bio)); 41 TP_ARGS(q, bio));
42 42
43DECLARE_TRACE(block_bio_queue, 43DECLARE_TRACE(block_bio_queue,
44 TPPROTO(struct request_queue *q, struct bio *bio), 44 TP_PROTO(struct request_queue *q, struct bio *bio),
45 TPARGS(q, bio)); 45 TP_ARGS(q, bio));
46 46
47DECLARE_TRACE(block_getrq, 47DECLARE_TRACE(block_getrq,
48 TPPROTO(struct request_queue *q, struct bio *bio, int rw), 48 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
49 TPARGS(q, bio, rw)); 49 TP_ARGS(q, bio, rw));
50 50
51DECLARE_TRACE(block_sleeprq, 51DECLARE_TRACE(block_sleeprq,
52 TPPROTO(struct request_queue *q, struct bio *bio, int rw), 52 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
53 TPARGS(q, bio, rw)); 53 TP_ARGS(q, bio, rw));
54 54
55DECLARE_TRACE(block_plug, 55DECLARE_TRACE(block_plug,
56 TPPROTO(struct request_queue *q), 56 TP_PROTO(struct request_queue *q),
57 TPARGS(q)); 57 TP_ARGS(q));
58 58
59DECLARE_TRACE(block_unplug_timer, 59DECLARE_TRACE(block_unplug_timer,
60 TPPROTO(struct request_queue *q), 60 TP_PROTO(struct request_queue *q),
61 TPARGS(q)); 61 TP_ARGS(q));
62 62
63DECLARE_TRACE(block_unplug_io, 63DECLARE_TRACE(block_unplug_io,
64 TPPROTO(struct request_queue *q), 64 TP_PROTO(struct request_queue *q),
65 TPARGS(q)); 65 TP_ARGS(q));
66 66
67DECLARE_TRACE(block_split, 67DECLARE_TRACE(block_split,
68 TPPROTO(struct request_queue *q, struct bio *bio, unsigned int pdu), 68 TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
69 TPARGS(q, bio, pdu)); 69 TP_ARGS(q, bio, pdu));
70 70
71DECLARE_TRACE(block_remap, 71DECLARE_TRACE(block_remap,
72 TPPROTO(struct request_queue *q, struct bio *bio, dev_t dev, 72 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
73 sector_t from, sector_t to), 73 sector_t from, sector_t to),
74 TPARGS(q, bio, dev, from, to)); 74 TP_ARGS(q, bio, dev, from, to));
75 75
76#endif 76#endif
diff --git a/include/trace/irq.h b/include/trace/irq.h
new file mode 100644
index 000000000000..ff5d4495dc37
--- /dev/null
+++ b/include/trace/irq.h
@@ -0,0 +1,9 @@
1#ifndef _TRACE_IRQ_H
2#define _TRACE_IRQ_H
3
4#include <linux/interrupt.h>
5#include <linux/tracepoint.h>
6
7#include <trace/irq_event_types.h>
8
9#endif
diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h
new file mode 100644
index 000000000000..85964ebd47ec
--- /dev/null
+++ b/include/trace/irq_event_types.h
@@ -0,0 +1,55 @@
1
2/* use <trace/irq.h> instead */
3#ifndef TRACE_FORMAT
4# error Do not include this file directly.
5# error Unless you know what you are doing.
6#endif
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM irq
10
11/*
12 * Tracepoint for entry of interrupt handler:
13 */
14TRACE_FORMAT(irq_handler_entry,
15 TP_PROTO(int irq, struct irqaction *action),
16 TP_ARGS(irq, action),
17 TP_FMT("irq=%d handler=%s", irq, action->name)
18 );
19
20/*
21 * Tracepoint for return of an interrupt handler:
22 */
23TRACE_EVENT(irq_handler_exit,
24
25 TP_PROTO(int irq, struct irqaction *action, int ret),
26
27 TP_ARGS(irq, action, ret),
28
29 TP_STRUCT__entry(
30 __field( int, irq )
31 __field( int, ret )
32 ),
33
34 TP_fast_assign(
35 __entry->irq = irq;
36 __entry->ret = ret;
37 ),
38
39 TP_printk("irq=%d return=%s",
40 __entry->irq, __entry->ret ? "handled" : "unhandled")
41);
42
43TRACE_FORMAT(softirq_entry,
44 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
45 TP_ARGS(h, vec),
46 TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
47 );
48
49TRACE_FORMAT(softirq_exit,
50 TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
51 TP_ARGS(h, vec),
52 TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
53 );
54
55#undef TRACE_SYSTEM
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h
new file mode 100644
index 000000000000..28ee69f9cd46
--- /dev/null
+++ b/include/trace/kmemtrace.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2008 Eduard - Gabriel Munteanu
3 *
4 * This file is released under GPL version 2.
5 */
6
7#ifndef _LINUX_KMEMTRACE_H
8#define _LINUX_KMEMTRACE_H
9
10#ifdef __KERNEL__
11
12#include <linux/tracepoint.h>
13#include <linux/types.h>
14
15#ifdef CONFIG_KMEMTRACE
16extern void kmemtrace_init(void);
17#else
18static inline void kmemtrace_init(void)
19{
20}
21#endif
22
23DECLARE_TRACE(kmalloc,
24 TP_PROTO(unsigned long call_site,
25 const void *ptr,
26 size_t bytes_req,
27 size_t bytes_alloc,
28 gfp_t gfp_flags),
29 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
30DECLARE_TRACE(kmem_cache_alloc,
31 TP_PROTO(unsigned long call_site,
32 const void *ptr,
33 size_t bytes_req,
34 size_t bytes_alloc,
35 gfp_t gfp_flags),
36 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
37DECLARE_TRACE(kmalloc_node,
38 TP_PROTO(unsigned long call_site,
39 const void *ptr,
40 size_t bytes_req,
41 size_t bytes_alloc,
42 gfp_t gfp_flags,
43 int node),
44 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
45DECLARE_TRACE(kmem_cache_alloc_node,
46 TP_PROTO(unsigned long call_site,
47 const void *ptr,
48 size_t bytes_req,
49 size_t bytes_alloc,
50 gfp_t gfp_flags,
51 int node),
52 TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
53DECLARE_TRACE(kfree,
54 TP_PROTO(unsigned long call_site, const void *ptr),
55 TP_ARGS(call_site, ptr));
56DECLARE_TRACE(kmem_cache_free,
57 TP_PROTO(unsigned long call_site, const void *ptr),
58 TP_ARGS(call_site, ptr));
59
60#endif /* __KERNEL__ */
61
62#endif /* _LINUX_KMEMTRACE_H */
63
diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h
new file mode 100644
index 000000000000..5ca67df87f2a
--- /dev/null
+++ b/include/trace/lockdep.h
@@ -0,0 +1,9 @@
1#ifndef _TRACE_LOCKDEP_H
2#define _TRACE_LOCKDEP_H
3
4#include <linux/lockdep.h>
5#include <linux/tracepoint.h>
6
7#include <trace/lockdep_event_types.h>
8
9#endif
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h
new file mode 100644
index 000000000000..adccfcd2ec8f
--- /dev/null
+++ b/include/trace/lockdep_event_types.h
@@ -0,0 +1,44 @@
1
2#ifndef TRACE_FORMAT
3# error Do not include this file directly.
4# error Unless you know what you are doing.
5#endif
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM lock
9
10#ifdef CONFIG_LOCKDEP
11
12TRACE_FORMAT(lock_acquire,
13 TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
14 int trylock, int read, int check,
15 struct lockdep_map *next_lock, unsigned long ip),
16 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
17 TP_FMT("%s%s%s", trylock ? "try " : "",
18 read ? "read " : "", lock->name)
19 );
20
21TRACE_FORMAT(lock_release,
22 TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
23 TP_ARGS(lock, nested, ip),
24 TP_FMT("%s", lock->name)
25 );
26
27#ifdef CONFIG_LOCK_STAT
28
29TRACE_FORMAT(lock_contended,
30 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
31 TP_ARGS(lock, ip),
32 TP_FMT("%s", lock->name)
33 );
34
35TRACE_FORMAT(lock_acquired,
36 TP_PROTO(struct lockdep_map *lock, unsigned long ip),
37 TP_ARGS(lock, ip),
38 TP_FMT("%s", lock->name)
39 );
40
41#endif
42#endif
43
44#undef TRACE_SYSTEM
diff --git a/include/trace/power.h b/include/trace/power.h
new file mode 100644
index 000000000000..ef204666e983
--- /dev/null
+++ b/include/trace/power.h
@@ -0,0 +1,32 @@
1#ifndef _TRACE_POWER_H
2#define _TRACE_POWER_H
3
4#include <linux/ktime.h>
5#include <linux/tracepoint.h>
6
7enum {
8 POWER_NONE = 0,
9 POWER_CSTATE = 1,
10 POWER_PSTATE = 2,
11};
12
13struct power_trace {
14 ktime_t stamp;
15 ktime_t end;
16 int type;
17 int state;
18};
19
20DECLARE_TRACE(power_start,
21 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
22 TP_ARGS(it, type, state));
23
24DECLARE_TRACE(power_mark,
25 TP_PROTO(struct power_trace *it, unsigned int type, unsigned int state),
26 TP_ARGS(it, type, state));
27
28DECLARE_TRACE(power_end,
29 TP_PROTO(struct power_trace *it),
30 TP_ARGS(it));
31
32#endif /* _TRACE_POWER_H */
diff --git a/include/trace/sched.h b/include/trace/sched.h
index 0d81098ee9fc..4e372a1a29bf 100644
--- a/include/trace/sched.h
+++ b/include/trace/sched.h
@@ -4,53 +4,6 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(sched_kthread_stop, 7#include <trace/sched_event_types.h>
8 TPPROTO(struct task_struct *t),
9 TPARGS(t));
10
11DECLARE_TRACE(sched_kthread_stop_ret,
12 TPPROTO(int ret),
13 TPARGS(ret));
14
15DECLARE_TRACE(sched_wait_task,
16 TPPROTO(struct rq *rq, struct task_struct *p),
17 TPARGS(rq, p));
18
19DECLARE_TRACE(sched_wakeup,
20 TPPROTO(struct rq *rq, struct task_struct *p, int success),
21 TPARGS(rq, p, success));
22
23DECLARE_TRACE(sched_wakeup_new,
24 TPPROTO(struct rq *rq, struct task_struct *p, int success),
25 TPARGS(rq, p, success));
26
27DECLARE_TRACE(sched_switch,
28 TPPROTO(struct rq *rq, struct task_struct *prev,
29 struct task_struct *next),
30 TPARGS(rq, prev, next));
31
32DECLARE_TRACE(sched_migrate_task,
33 TPPROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
34 TPARGS(p, orig_cpu, dest_cpu));
35
36DECLARE_TRACE(sched_process_free,
37 TPPROTO(struct task_struct *p),
38 TPARGS(p));
39
40DECLARE_TRACE(sched_process_exit,
41 TPPROTO(struct task_struct *p),
42 TPARGS(p));
43
44DECLARE_TRACE(sched_process_wait,
45 TPPROTO(struct pid *pid),
46 TPARGS(pid));
47
48DECLARE_TRACE(sched_process_fork,
49 TPPROTO(struct task_struct *parent, struct task_struct *child),
50 TPARGS(parent, child));
51
52DECLARE_TRACE(sched_signal_send,
53 TPPROTO(int sig, struct task_struct *p),
54 TPARGS(sig, p));
55 8
56#endif 9#endif
diff --git a/include/trace/sched_event_types.h b/include/trace/sched_event_types.h
new file mode 100644
index 000000000000..63547dc1125f
--- /dev/null
+++ b/include/trace/sched_event_types.h
@@ -0,0 +1,337 @@
1
2/* use <trace/sched.h> instead */
3#ifndef TRACE_EVENT
4# error Do not include this file directly.
5# error Unless you know what you are doing.
6#endif
7
8#undef TRACE_SYSTEM
9#define TRACE_SYSTEM sched
10
11/*
12 * Tracepoint for calling kthread_stop, performed to end a kthread:
13 */
14TRACE_EVENT(sched_kthread_stop,
15
16 TP_PROTO(struct task_struct *t),
17
18 TP_ARGS(t),
19
20 TP_STRUCT__entry(
21 __array( char, comm, TASK_COMM_LEN )
22 __field( pid_t, pid )
23 ),
24
25 TP_fast_assign(
26 memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
27 __entry->pid = t->pid;
28 ),
29
30 TP_printk("task %s:%d", __entry->comm, __entry->pid)
31);
32
33/*
34 * Tracepoint for the return value of the kthread stopping:
35 */
36TRACE_EVENT(sched_kthread_stop_ret,
37
38 TP_PROTO(int ret),
39
40 TP_ARGS(ret),
41
42 TP_STRUCT__entry(
43 __field( int, ret )
44 ),
45
46 TP_fast_assign(
47 __entry->ret = ret;
48 ),
49
50 TP_printk("ret %d", __entry->ret)
51);
52
53/*
54 * Tracepoint for waiting on task to unschedule:
55 *
56 * (NOTE: the 'rq' argument is not used by generic trace events,
57 * but used by the latency tracer plugin. )
58 */
59TRACE_EVENT(sched_wait_task,
60
61 TP_PROTO(struct rq *rq, struct task_struct *p),
62
63 TP_ARGS(rq, p),
64
65 TP_STRUCT__entry(
66 __array( char, comm, TASK_COMM_LEN )
67 __field( pid_t, pid )
68 __field( int, prio )
69 ),
70
71 TP_fast_assign(
72 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
73 __entry->pid = p->pid;
74 __entry->prio = p->prio;
75 ),
76
77 TP_printk("task %s:%d [%d]",
78 __entry->comm, __entry->pid, __entry->prio)
79);
80
81/*
82 * Tracepoint for waking up a task:
83 *
84 * (NOTE: the 'rq' argument is not used by generic trace events,
85 * but used by the latency tracer plugin. )
86 */
87TRACE_EVENT(sched_wakeup,
88
89 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
90
91 TP_ARGS(rq, p, success),
92
93 TP_STRUCT__entry(
94 __array( char, comm, TASK_COMM_LEN )
95 __field( pid_t, pid )
96 __field( int, prio )
97 __field( int, success )
98 ),
99
100 TP_fast_assign(
101 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
102 __entry->pid = p->pid;
103 __entry->prio = p->prio;
104 __entry->success = success;
105 ),
106
107 TP_printk("task %s:%d [%d] success=%d",
108 __entry->comm, __entry->pid, __entry->prio,
109 __entry->success)
110);
111
112/*
113 * Tracepoint for waking up a new task:
114 *
115 * (NOTE: the 'rq' argument is not used by generic trace events,
116 * but used by the latency tracer plugin. )
117 */
118TRACE_EVENT(sched_wakeup_new,
119
120 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
121
122 TP_ARGS(rq, p, success),
123
124 TP_STRUCT__entry(
125 __array( char, comm, TASK_COMM_LEN )
126 __field( pid_t, pid )
127 __field( int, prio )
128 __field( int, success )
129 ),
130
131 TP_fast_assign(
132 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
133 __entry->pid = p->pid;
134 __entry->prio = p->prio;
135 __entry->success = success;
136 ),
137
138 TP_printk("task %s:%d [%d] success=%d",
139 __entry->comm, __entry->pid, __entry->prio,
140 __entry->success)
141);
142
143/*
144 * Tracepoint for task switches, performed by the scheduler:
145 *
146 * (NOTE: the 'rq' argument is not used by generic trace events,
147 * but used by the latency tracer plugin. )
148 */
149TRACE_EVENT(sched_switch,
150
151 TP_PROTO(struct rq *rq, struct task_struct *prev,
152 struct task_struct *next),
153
154 TP_ARGS(rq, prev, next),
155
156 TP_STRUCT__entry(
157 __array( char, prev_comm, TASK_COMM_LEN )
158 __field( pid_t, prev_pid )
159 __field( int, prev_prio )
160 __array( char, next_comm, TASK_COMM_LEN )
161 __field( pid_t, next_pid )
162 __field( int, next_prio )
163 ),
164
165 TP_fast_assign(
166 memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
167 __entry->prev_pid = prev->pid;
168 __entry->prev_prio = prev->prio;
169 memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
170 __entry->next_pid = next->pid;
171 __entry->next_prio = next->prio;
172 ),
173
174 TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
175 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
176 __entry->next_comm, __entry->next_pid, __entry->next_prio)
177);
178
179/*
180 * Tracepoint for a task being migrated:
181 */
182TRACE_EVENT(sched_migrate_task,
183
184 TP_PROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
185
186 TP_ARGS(p, orig_cpu, dest_cpu),
187
188 TP_STRUCT__entry(
189 __array( char, comm, TASK_COMM_LEN )
190 __field( pid_t, pid )
191 __field( int, prio )
192 __field( int, orig_cpu )
193 __field( int, dest_cpu )
194 ),
195
196 TP_fast_assign(
197 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
198 __entry->pid = p->pid;
199 __entry->prio = p->prio;
200 __entry->orig_cpu = orig_cpu;
201 __entry->dest_cpu = dest_cpu;
202 ),
203
204 TP_printk("task %s:%d [%d] from: %d to: %d",
205 __entry->comm, __entry->pid, __entry->prio,
206 __entry->orig_cpu, __entry->dest_cpu)
207);
208
209/*
210 * Tracepoint for freeing a task:
211 */
212TRACE_EVENT(sched_process_free,
213
214 TP_PROTO(struct task_struct *p),
215
216 TP_ARGS(p),
217
218 TP_STRUCT__entry(
219 __array( char, comm, TASK_COMM_LEN )
220 __field( pid_t, pid )
221 __field( int, prio )
222 ),
223
224 TP_fast_assign(
225 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
226 __entry->pid = p->pid;
227 __entry->prio = p->prio;
228 ),
229
230 TP_printk("task %s:%d [%d]",
231 __entry->comm, __entry->pid, __entry->prio)
232);
233
234/*
235 * Tracepoint for a task exiting:
236 */
237TRACE_EVENT(sched_process_exit,
238
239 TP_PROTO(struct task_struct *p),
240
241 TP_ARGS(p),
242
243 TP_STRUCT__entry(
244 __array( char, comm, TASK_COMM_LEN )
245 __field( pid_t, pid )
246 __field( int, prio )
247 ),
248
249 TP_fast_assign(
250 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
251 __entry->pid = p->pid;
252 __entry->prio = p->prio;
253 ),
254
255 TP_printk("task %s:%d [%d]",
256 __entry->comm, __entry->pid, __entry->prio)
257);
258
259/*
260 * Tracepoint for a waiting task:
261 */
262TRACE_EVENT(sched_process_wait,
263
264 TP_PROTO(struct pid *pid),
265
266 TP_ARGS(pid),
267
268 TP_STRUCT__entry(
269 __array( char, comm, TASK_COMM_LEN )
270 __field( pid_t, pid )
271 __field( int, prio )
272 ),
273
274 TP_fast_assign(
275 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
276 __entry->pid = pid_nr(pid);
277 __entry->prio = current->prio;
278 ),
279
280 TP_printk("task %s:%d [%d]",
281 __entry->comm, __entry->pid, __entry->prio)
282);
283
284/*
285 * Tracepoint for do_fork:
286 */
287TRACE_EVENT(sched_process_fork,
288
289 TP_PROTO(struct task_struct *parent, struct task_struct *child),
290
291 TP_ARGS(parent, child),
292
293 TP_STRUCT__entry(
294 __array( char, parent_comm, TASK_COMM_LEN )
295 __field( pid_t, parent_pid )
296 __array( char, child_comm, TASK_COMM_LEN )
297 __field( pid_t, child_pid )
298 ),
299
300 TP_fast_assign(
301 memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
302 __entry->parent_pid = parent->pid;
303 memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
304 __entry->child_pid = child->pid;
305 ),
306
307 TP_printk("parent %s:%d child %s:%d",
308 __entry->parent_comm, __entry->parent_pid,
309 __entry->child_comm, __entry->child_pid)
310);
311
312/*
313 * Tracepoint for sending a signal:
314 */
315TRACE_EVENT(sched_signal_send,
316
317 TP_PROTO(int sig, struct task_struct *p),
318
319 TP_ARGS(sig, p),
320
321 TP_STRUCT__entry(
322 __field( int, sig )
323 __array( char, comm, TASK_COMM_LEN )
324 __field( pid_t, pid )
325 ),
326
327 TP_fast_assign(
328 memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
329 __entry->pid = p->pid;
330 __entry->sig = sig;
331 ),
332
333 TP_printk("sig: %d task %s:%d",
334 __entry->sig, __entry->comm, __entry->pid)
335);
336
337#undef TRACE_SYSTEM
diff --git a/include/trace/skb.h b/include/trace/skb.h
index a96610f92f69..b66206d9be72 100644
--- a/include/trace/skb.h
+++ b/include/trace/skb.h
@@ -5,7 +5,7 @@
5#include <linux/tracepoint.h> 5#include <linux/tracepoint.h>
6 6
7DECLARE_TRACE(kfree_skb, 7DECLARE_TRACE(kfree_skb,
8 TPPROTO(struct sk_buff *skb, void *location), 8 TP_PROTO(struct sk_buff *skb, void *location),
9 TPARGS(skb, location)); 9 TP_ARGS(skb, location));
10 10
11#endif 11#endif
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h
new file mode 100644
index 000000000000..df56f5694be6
--- /dev/null
+++ b/include/trace/trace_event_types.h
@@ -0,0 +1,5 @@
1/* trace/<type>_event_types.h here */
2
3#include <trace/sched_event_types.h>
4#include <trace/irq_event_types.h>
5#include <trace/lockdep_event_types.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
new file mode 100644
index 000000000000..fd13750ca4ba
--- /dev/null
+++ b/include/trace/trace_events.h
@@ -0,0 +1,5 @@
1/* trace/<type>.h here */
2
3#include <trace/sched.h>
4#include <trace/irq.h>
5#include <trace/lockdep.h>
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h
new file mode 100644
index 000000000000..7626523deeba
--- /dev/null
+++ b/include/trace/workqueue.h
@@ -0,0 +1,25 @@
1#ifndef __TRACE_WORKQUEUE_H
2#define __TRACE_WORKQUEUE_H
3
4#include <linux/tracepoint.h>
5#include <linux/workqueue.h>
6#include <linux/sched.h>
7
8DECLARE_TRACE(workqueue_insertion,
9 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
10 TP_ARGS(wq_thread, work));
11
12DECLARE_TRACE(workqueue_execution,
13 TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
14 TP_ARGS(wq_thread, work));
15
16/* Trace the creation of one workqueue thread on a cpu */
17DECLARE_TRACE(workqueue_creation,
18 TP_PROTO(struct task_struct *wq_thread, int cpu),
19 TP_ARGS(wq_thread, cpu));
20
21DECLARE_TRACE(workqueue_destruction,
22 TP_PROTO(struct task_struct *wq_thread),
23 TP_ARGS(wq_thread));
24
25#endif /* __TRACE_WORKQUEUE_H */
diff --git a/include/video/tdfx.h b/include/video/tdfx.h
index 7431d9681e57..befbaf0a92d8 100644
--- a/include/video/tdfx.h
+++ b/include/video/tdfx.h
@@ -1,6 +1,9 @@
1#ifndef _TDFX_H 1#ifndef _TDFX_H
2#define _TDFX_H 2#define _TDFX_H
3 3
4#include <linux/i2c.h>
5#include <linux/i2c-algo-bit.h>
6
4/* membase0 register offsets */ 7/* membase0 register offsets */
5#define STATUS 0x00 8#define STATUS 0x00
6#define PCIINIT0 0x04 9#define PCIINIT0 0x04
@@ -123,6 +126,18 @@
123#define VIDCFG_PIXFMT_SHIFT 18 126#define VIDCFG_PIXFMT_SHIFT 18
124#define DACMODE_2X BIT(0) 127#define DACMODE_2X BIT(0)
125 128
129/* I2C bit locations in the VIDSERPARPORT register */
130#define DDC_ENAB 0x00040000
131#define DDC_SCL_OUT 0x00080000
132#define DDC_SDA_OUT 0x00100000
133#define DDC_SCL_IN 0x00200000
134#define DDC_SDA_IN 0x00400000
135#define I2C_ENAB 0x00800000
136#define I2C_SCL_OUT 0x01000000
137#define I2C_SDA_OUT 0x02000000
138#define I2C_SCL_IN 0x04000000
139#define I2C_SDA_IN 0x08000000
140
126/* VGA rubbish, need to change this for multihead support */ 141/* VGA rubbish, need to change this for multihead support */
127#define MISC_W 0x3c2 142#define MISC_W 0x3c2
128#define MISC_R 0x3cc 143#define MISC_R 0x3cc
@@ -168,12 +183,23 @@ struct banshee_reg {
168 unsigned long miscinit0; 183 unsigned long miscinit0;
169}; 184};
170 185
186struct tdfx_par;
187
188struct tdfxfb_i2c_chan {
189 struct tdfx_par *par;
190 struct i2c_adapter adapter;
191 struct i2c_algo_bit_data algo;
192};
193
171struct tdfx_par { 194struct tdfx_par {
172 u32 max_pixclock; 195 u32 max_pixclock;
173 u32 palette[16]; 196 u32 palette[16];
174 void __iomem *regbase_virt; 197 void __iomem *regbase_virt;
175 unsigned long iobase; 198 unsigned long iobase;
176 int mtrr_handle; 199 int mtrr_handle;
200#ifdef CONFIG_FB_3DFX_I2C
201 struct tdfxfb_i2c_chan chan[2];
202#endif
177}; 203};
178 204
179#endif /* __KERNEL__ */ 205#endif /* __KERNEL__ */