aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-01-23 01:45:46 -0500
committerDavid S. Miller <davem@davemloft.net>2010-01-23 01:45:46 -0500
commit6be325719b3e54624397e413efd4b33a997e55a3 (patch)
tree57f321a56794cab2222e179b16731e0d76a4a68a /include/linux
parent26d92f9276a56d55511a427fb70bd70886af647a (diff)
parent92dcffb916d309aa01778bf8963a6932e4014d07 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/acpi.h30
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/atmel-mci.h4
-rw-r--r--include/linux/backlight.h12
-rw-r--r--include/linux/binfmts.h10
-rw-r--r--include/linux/bitmap.h11
-rw-r--r--include/linux/blkdev.h26
-rw-r--r--include/linux/cpu.h15
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/cs5535.h172
-rw-r--r--include/linux/ctype.h3
-rw-r--r--include/linux/decompress/mm.h4
-rw-r--r--include/linux/decompress/unlzo.h10
-rw-r--r--include/linux/device-mapper.h8
-rw-r--r--include/linux/device.h12
-rw-r--r--include/linux/dm-dirty-log.h6
-rw-r--r--include/linux/dm-ioctl.h13
-rw-r--r--include/linux/dm-region-hash.h3
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/drbd_nl.h1
-rw-r--r--include/linux/dst.h587
-rw-r--r--include/linux/dynamic_debug.h13
-rw-r--r--include/linux/efi.h6
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/enclosure.h2
-rw-r--r--include/linux/err.h5
-rw-r--r--include/linux/exportfs.h2
-rw-r--r--include/linux/ext3_fs_sb.h2
-rw-r--r--include/linux/ext3_jbd.h7
-rw-r--r--include/linux/fiemap.h2
-rw-r--r--include/linux/file.h8
-rw-r--r--include/linux/firewire-cdev.h3
-rw-r--r--include/linux/firewire.h4
-rw-r--r--include/linux/fs.h70
-rw-r--r--include/linux/fs_stack.h6
-rw-r--r--include/linux/fsl_devices.h11
-rw-r--r--include/linux/ftrace_event.h5
-rw-r--r--include/linux/generic_acl.h41
-rw-r--r--include/linux/genhd.h6
-rw-r--r--include/linux/gpio.h6
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/hrtimer.h58
-rw-r--r--include/linux/hugetlb.h6
-rw-r--r--include/linux/hw_breakpoint.h40
-rw-r--r--include/linux/i2c.h92
-rw-r--r--include/linux/i2c/adp5588.h12
-rw-r--r--include/linux/i2c/tps65010.h19
-rw-r--r--include/linux/i2c/twl.h (renamed from include/linux/i2c/twl4030.h)209
-rw-r--r--include/linux/i8042.h18
-rw-r--r--include/linux/ima.h12
-rw-r--r--include/linux/init.h2
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/iocontext.h27
-rw-r--r--include/linux/iommu-helper.h3
-rw-r--r--include/linux/ioport.h4
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/irq.h2
-rw-r--r--include/linux/jbd2.h1
-rw-r--r--include/linux/kallsyms.h12
-rw-r--r--include/linux/kernel-page-flags.h46
-rw-r--r--include/linux/kernel.h62
-rw-r--r--include/linux/kexec.h2
-rw-r--r--include/linux/kfifo.h568
-rw-r--r--include/linux/kgdb.h7
-rw-r--r--include/linux/kmemcheck.h110
-rw-r--r--include/linux/kmemleak.h6
-rw-r--r--include/linux/kmsg_dump.h60
-rw-r--r--include/linux/ksm.h96
-rw-r--r--include/linux/kvm.h1
-rw-r--r--include/linux/leds-lp3944.h3
-rw-r--r--include/linux/leds-pca9532.h2
-rw-r--r--include/linux/leds-regulator.h46
-rw-r--r--include/linux/libata.h3
-rw-r--r--include/linux/lis3lv02d.h15
-rw-r--r--include/linux/list_sort.h11
-rw-r--r--include/linux/lmb.h1
-rw-r--r--include/linux/memcontrol.h30
-rw-r--r--include/linux/memory.h27
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mempolicy.h3
-rw-r--r--include/linux/mfd/88pm8607.h217
-rw-r--r--include/linux/mfd/ab4500.h262
-rw-r--r--include/linux/mfd/adp5520.h299
-rw-r--r--include/linux/mfd/ezx-pcap.h3
-rw-r--r--include/linux/mfd/mc13783-private.h208
-rw-r--r--include/linux/mfd/mc13783.h120
-rw-r--r--include/linux/mfd/pcf50633/core.h17
-rw-r--r--include/linux/mfd/pcf50633/mbc.h1
-rw-r--r--include/linux/mfd/tmio.h39
-rw-r--r--include/linux/mfd/wm831x/core.h43
-rw-r--r--include/linux/mfd/wm831x/pdata.h18
-rw-r--r--include/linux/mfd/wm8350/core.h14
-rw-r--r--include/linux/mfd/wm8350/gpio.h18
-rw-r--r--include/linux/mfd/wm8350/pmic.h28
-rw-r--r--include/linux/migrate.h8
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mm.h37
-rw-r--r--include/linux/mm_types.h6
-rw-r--r--include/linux/mmdebug.h2
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/mtd/bbm.h35
-rw-r--r--include/linux/mtd/cfi.h9
-rw-r--r--include/linux/mtd/flashchip.h9
-rw-r--r--include/linux/mtd/nand.h97
-rw-r--r--include/linux/mtd/nand_ecc.h10
-rw-r--r--include/linux/mtd/onenand.h23
-rw-r--r--include/linux/mtd/onenand_regs.h2
-rw-r--r--include/linux/namei.h2
-rw-r--r--include/linux/nfs4.h3
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h14
-rw-r--r--include/linux/nfsacl.h1
-rw-r--r--include/linux/nfsd/cache.h86
-rw-r--r--include/linux/nfsd/export.h19
-rw-r--r--include/linux/nfsd/nfsd.h424
-rw-r--r--include/linux/nfsd/nfsfh.h206
-rw-r--r--include/linux/nfsd/state.h404
-rw-r--r--include/linux/nfsd/syscall.h8
-rw-r--r--include/linux/nfsd/xdr.h177
-rw-r--r--include/linux/nfsd/xdr3.h346
-rw-r--r--include/linux/nfsd/xdr4.h563
-rw-r--r--include/linux/node.h16
-rw-r--r--include/linux/nodemask.h33
-rw-r--r--include/linux/numa.h2
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/page-flags.h14
-rw-r--r--include/linux/page_cgroup.h7
-rw-r--r--include/linux/pci.h6
-rw-r--r--include/linux/pci_ids.h14
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/percpu.h434
-rw-r--r--include/linux/perf_counter.h444
-rw-r--r--include/linux/perf_event.h54
-rw-r--r--include/linux/plist.h43
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pnp.h13
-rw-r--r--include/linux/poison.h16
-rw-r--r--include/linux/ptrace.h23
-rw-r--r--include/linux/pwm_backlight.h2
-rw-r--r--include/linux/quota.h5
-rw-r--r--include/linux/raid/pq.h19
-rw-r--r--include/linux/rcutiny.h5
-rw-r--r--include/linux/rcutree.h11
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/linux/regulator/machine.h6
-rw-r--r--include/linux/regulator/max8660.h57
-rw-r--r--include/linux/reiserfs_fs.h61
-rw-r--r--include/linux/resource.h8
-rw-r--r--include/linux/rmap.h43
-rw-r--r--include/linux/rtmutex.h6
-rw-r--r--include/linux/rwlock.h125
-rw-r--r--include/linux/rwlock_api_smp.h282
-rw-r--r--include/linux/rwlock_types.h56
-rw-r--r--include/linux/rwsem-spinlock.h6
-rw-r--r--include/linux/sched.h81
-rw-r--r--include/linux/security.h7
-rw-r--r--include/linux/sem.h5
-rw-r--r--include/linux/serio.h19
-rw-r--r--include/linux/shmem_fs.h16
-rw-r--r--include/linux/slab_def.h4
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/sm501-regs.h2
-rw-r--r--include/linux/sonypi.h1
-rw-r--r--include/linux/spi/dw_spi.h212
-rw-r--r--include/linux/spi/sh_msiof.h10
-rw-r--r--include/linux/spi/xilinx_spi.h20
-rw-r--r--include/linux/spinlock.h377
-rw-r--r--include/linux/spinlock_api_smp.h360
-rw-r--r--include/linux/spinlock_api_up.h66
-rw-r--r--include/linux/spinlock_types.h92
-rw-r--r--include/linux/spinlock_types_up.h12
-rw-r--r--include/linux/spinlock_up.h42
-rw-r--r--include/linux/string.h15
-rw-r--r--include/linux/sunrpc/debug.h3
-rw-r--r--include/linux/sunrpc/rpc_rdma.h2
-rw-r--r--include/linux/sunrpc/sched.h16
-rw-r--r--include/linux/sunrpc/svc.h7
-rw-r--r--include/linux/swap.h67
-rw-r--r--include/linux/syscalls.h8
-rw-r--r--include/linux/sysfs.h9
-rw-r--r--include/linux/timb_gpio.h37
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/trace_seq.h7
-rw-r--r--include/linux/tracehook.h7
-rw-r--r--include/linux/tty.h6
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--include/linux/usb.h1
-rw-r--r--include/linux/usb/serial.h3
-rw-r--r--include/linux/usbdevice_fs.h26
-rw-r--r--include/linux/vermagic.h2
-rw-r--r--include/linux/videodev2.h123
-rw-r--r--include/linux/vmstat.h12
-rw-r--r--include/linux/vt.h19
-rw-r--r--include/linux/writeback.h4
-rw-r--r--include/linux/xattr.h13
199 files changed, 5075 insertions, 5019 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index f72914db2a11..756f831cbdd5 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -118,6 +118,7 @@ header-y += mtio.h
118header-y += ncp_no.h 118header-y += ncp_no.h
119header-y += neighbour.h 119header-y += neighbour.h
120header-y += net_dropmon.h 120header-y += net_dropmon.h
121header-y += net_tstamp.h
121header-y += netfilter_arp.h 122header-y += netfilter_arp.h
122header-y += netrom.h 123header-y += netrom.h
123header-y += nfs2.h 124header-y += nfs2.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index dfcd920c3e54..b926afe8c03e 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -80,7 +80,7 @@ char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
80void __acpi_unmap_table(char *map, unsigned long size); 80void __acpi_unmap_table(char *map, unsigned long size);
81int early_acpi_boot_init(void); 81int early_acpi_boot_init(void);
82int acpi_boot_init (void); 82int acpi_boot_init (void);
83int acpi_boot_table_init (void); 83void acpi_boot_table_init (void);
84int acpi_mps_check (void); 84int acpi_mps_check (void);
85int acpi_numa_init (void); 85int acpi_numa_init (void);
86 86
@@ -240,7 +240,7 @@ extern int pnpacpi_disabled;
240#define PXM_INVAL (-1) 240#define PXM_INVAL (-1)
241#define NID_INVAL (-1) 241#define NID_INVAL (-1)
242 242
243int acpi_check_resource_conflict(struct resource *res); 243int acpi_check_resource_conflict(const struct resource *res);
244 244
245int acpi_check_region(resource_size_t start, resource_size_t n, 245int acpi_check_region(resource_size_t start, resource_size_t n,
246 const char *name); 246 const char *name);
@@ -251,12 +251,19 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
251void __init acpi_no_s4_hw_signature(void); 251void __init acpi_no_s4_hw_signature(void);
252void __init acpi_old_suspend_ordering(void); 252void __init acpi_old_suspend_ordering(void);
253void __init acpi_s4_no_nvs(void); 253void __init acpi_s4_no_nvs(void);
254void __init acpi_set_sci_en_on_resume(void);
254#endif /* CONFIG_PM_SLEEP */ 255#endif /* CONFIG_PM_SLEEP */
255 256
257struct acpi_osc_context {
258 char *uuid_str; /* uuid string */
259 int rev;
260 struct acpi_buffer cap; /* arg2/arg3 */
261 struct acpi_buffer ret; /* free by caller if success */
262};
263
256#define OSC_QUERY_TYPE 0 264#define OSC_QUERY_TYPE 0
257#define OSC_SUPPORT_TYPE 1 265#define OSC_SUPPORT_TYPE 1
258#define OSC_CONTROL_TYPE 2 266#define OSC_CONTROL_TYPE 2
259#define OSC_SUPPORT_MASKS 0x1f
260 267
261/* _OSC DW0 Definition */ 268/* _OSC DW0 Definition */
262#define OSC_QUERY_ENABLE 1 269#define OSC_QUERY_ENABLE 1
@@ -265,12 +272,23 @@ void __init acpi_s4_no_nvs(void);
265#define OSC_INVALID_REVISION_ERROR 8 272#define OSC_INVALID_REVISION_ERROR 8
266#define OSC_CAPABILITIES_MASK_ERROR 16 273#define OSC_CAPABILITIES_MASK_ERROR 16
267 274
275acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context);
276
277/* platform-wide _OSC bits */
278#define OSC_SB_PAD_SUPPORT 1
279#define OSC_SB_PPC_OST_SUPPORT 2
280#define OSC_SB_PR3_SUPPORT 4
281#define OSC_SB_CPUHP_OST_SUPPORT 8
282#define OSC_SB_APEI_SUPPORT 16
283
284/* PCI defined _OSC bits */
268/* _OSC DW1 Definition (OS Support Fields) */ 285/* _OSC DW1 Definition (OS Support Fields) */
269#define OSC_EXT_PCI_CONFIG_SUPPORT 1 286#define OSC_EXT_PCI_CONFIG_SUPPORT 1
270#define OSC_ACTIVE_STATE_PWR_SUPPORT 2 287#define OSC_ACTIVE_STATE_PWR_SUPPORT 2
271#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4 288#define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4
272#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8 289#define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8
273#define OSC_MSI_SUPPORT 16 290#define OSC_MSI_SUPPORT 16
291#define OSC_PCI_SUPPORT_MASKS 0x1f
274 292
275/* _OSC DW1 Definition (OS Control Fields) */ 293/* _OSC DW1 Definition (OS Control Fields) */
276#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1 294#define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1
@@ -279,7 +297,7 @@ void __init acpi_s4_no_nvs(void);
279#define OSC_PCI_EXPRESS_AER_CONTROL 8 297#define OSC_PCI_EXPRESS_AER_CONTROL 8
280#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16 298#define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16
281 299
282#define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \ 300#define OSC_PCI_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \
283 OSC_SHPC_NATIVE_HP_CONTROL | \ 301 OSC_SHPC_NATIVE_HP_CONTROL | \
284 OSC_PCI_EXPRESS_PME_CONTROL | \ 302 OSC_PCI_EXPRESS_PME_CONTROL | \
285 OSC_PCI_EXPRESS_AER_CONTROL | \ 303 OSC_PCI_EXPRESS_AER_CONTROL | \
@@ -303,9 +321,9 @@ static inline int acpi_boot_init(void)
303 return 0; 321 return 0;
304} 322}
305 323
306static inline int acpi_boot_table_init(void) 324static inline void acpi_boot_table_init(void)
307{ 325{
308 return 0; 326 return;
309} 327}
310 328
311static inline int acpi_mps_check(void) 329static inline int acpi_mps_check(void)
diff --git a/include/linux/aio.h b/include/linux/aio.h
index aea219d7d8d1..811dbb369379 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -102,7 +102,6 @@ struct kiocb {
102 } ki_obj; 102 } ki_obj;
103 103
104 __u64 ki_user_data; /* user's data for completion */ 104 __u64 ki_user_data; /* user's data for completion */
105 wait_queue_t ki_wait;
106 loff_t ki_pos; 105 loff_t ki_pos;
107 106
108 void *private; 107 void *private;
@@ -140,7 +139,6 @@ struct kiocb {
140 (x)->ki_dtor = NULL; \ 139 (x)->ki_dtor = NULL; \
141 (x)->ki_obj.tsk = tsk; \ 140 (x)->ki_obj.tsk = tsk; \
142 (x)->ki_user_data = 0; \ 141 (x)->ki_user_data = 0; \
143 init_wait((&(x)->ki_wait)); \
144 } while (0) 142 } while (0)
145 143
146#define AIO_RING_MAGIC 0xa10a10a1 144#define AIO_RING_MAGIC 0xa10a10a1
@@ -223,8 +221,6 @@ struct mm_struct;
223static inline void exit_aio(struct mm_struct *mm) { } 221static inline void exit_aio(struct mm_struct *mm) { }
224#endif /* CONFIG_AIO */ 222#endif /* CONFIG_AIO */
225 223
226#define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
227
228static inline struct kiocb *list_kiocb(struct list_head *h) 224static inline struct kiocb *list_kiocb(struct list_head *h)
229{ 225{
230 return list_entry(h, struct kiocb, ki_list); 226 return list_entry(h, struct kiocb, ki_list);
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 57b1846a3c87..3e09b345f4d6 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -3,8 +3,6 @@
3 3
4#define ATMEL_MCI_MAX_NR_SLOTS 2 4#define ATMEL_MCI_MAX_NR_SLOTS 2
5 5
6#include <linux/dw_dmac.h>
7
8/** 6/**
9 * struct mci_slot_pdata - board-specific per-slot configuration 7 * struct mci_slot_pdata - board-specific per-slot configuration
10 * @bus_width: Number of data lines wired up the slot 8 * @bus_width: Number of data lines wired up the slot
@@ -34,7 +32,7 @@ struct mci_slot_pdata {
34 * @slot: Per-slot configuration data. 32 * @slot: Per-slot configuration data.
35 */ 33 */
36struct mci_platform_data { 34struct mci_platform_data {
37 struct dw_dma_slave dma_slave; 35 struct mci_dma_data *dma_slave;
38 struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; 36 struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS];
39}; 37};
40 38
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 0f5f57858a23..8c4f884db6b4 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -36,18 +36,18 @@ struct backlight_device;
36struct fb_info; 36struct fb_info;
37 37
38struct backlight_ops { 38struct backlight_ops {
39 unsigned int options; 39 const unsigned int options;
40 40
41#define BL_CORE_SUSPENDRESUME (1 << 0) 41#define BL_CORE_SUSPENDRESUME (1 << 0)
42 42
43 /* Notify the backlight driver some property has changed */ 43 /* Notify the backlight driver some property has changed */
44 int (*update_status)(struct backlight_device *); 44 int (* const update_status)(struct backlight_device *);
45 /* Return the current backlight brightness (accounting for power, 45 /* Return the current backlight brightness (accounting for power,
46 fb_blank etc.) */ 46 fb_blank etc.) */
47 int (*get_brightness)(struct backlight_device *); 47 int (* const get_brightness)(struct backlight_device *);
48 /* Check if given framebuffer device is the one bound to this backlight; 48 /* Check if given framebuffer device is the one bound to this backlight;
49 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ 49 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
50 int (*check_fb)(struct fb_info *); 50 int (* const check_fb)(struct fb_info *);
51}; 51};
52 52
53/* This structure defines all the properties of a backlight */ 53/* This structure defines all the properties of a backlight */
@@ -86,7 +86,7 @@ struct backlight_device {
86 registered this device has been unloaded, and if class_get_devdata() 86 registered this device has been unloaded, and if class_get_devdata()
87 points to something in the body of that driver, it is also invalid. */ 87 points to something in the body of that driver, it is also invalid. */
88 struct mutex ops_lock; 88 struct mutex ops_lock;
89 struct backlight_ops *ops; 89 const struct backlight_ops *ops;
90 90
91 /* The framebuffer notifier block */ 91 /* The framebuffer notifier block */
92 struct notifier_block fb_notif; 92 struct notifier_block fb_notif;
@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
103} 103}
104 104
105extern struct backlight_device *backlight_device_register(const char *name, 105extern struct backlight_device *backlight_device_register(const char *name,
106 struct device *dev, void *devdata, struct backlight_ops *ops); 106 struct device *dev, void *devdata, const struct backlight_ops *ops);
107extern void backlight_device_unregister(struct backlight_device *bd); 107extern void backlight_device_unregister(struct backlight_device *bd);
108extern void backlight_force_update(struct backlight_device *bd, 108extern void backlight_force_update(struct backlight_device *bd,
109 enum backlight_update_reason reason); 109 enum backlight_update_reason reason);
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index aece486ac734..cd4349bdc34e 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -68,6 +68,14 @@ struct linux_binprm{
68 68
69#define BINPRM_MAX_RECURSION 4 69#define BINPRM_MAX_RECURSION 4
70 70
71/* Function parameter for binfmt->coredump */
72struct coredump_params {
73 long signr;
74 struct pt_regs *regs;
75 struct file *file;
76 unsigned long limit;
77};
78
71/* 79/*
72 * This structure defines the functions that are used to load the binary formats that 80 * This structure defines the functions that are used to load the binary formats that
73 * linux accepts. 81 * linux accepts.
@@ -77,7 +85,7 @@ struct linux_binfmt {
77 struct module *module; 85 struct module *module;
78 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs); 86 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
79 int (*load_shlib)(struct file *); 87 int (*load_shlib)(struct file *);
80 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); 88 int (*core_dump)(struct coredump_params *cprm);
81 unsigned long min_coredump; /* minimal dump size */ 89 unsigned long min_coredump; /* minimal dump size */
82 int hasvdso; 90 int hasvdso;
83}; 91};
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 756d78b8c1c5..daf8c480c786 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -42,6 +42,9 @@
42 * bitmap_empty(src, nbits) Are all bits zero in *src? 42 * bitmap_empty(src, nbits) Are all bits zero in *src?
43 * bitmap_full(src, nbits) Are all bits set in *src? 43 * bitmap_full(src, nbits) Are all bits set in *src?
44 * bitmap_weight(src, nbits) Hamming Weight: number set bits 44 * bitmap_weight(src, nbits) Hamming Weight: number set bits
45 * bitmap_set(dst, pos, nbits) Set specified bit area
46 * bitmap_clear(dst, pos, nbits) Clear specified bit area
47 * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
45 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n 48 * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
46 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n 49 * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
47 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) 50 * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
@@ -108,6 +111,14 @@ extern int __bitmap_subset(const unsigned long *bitmap1,
108 const unsigned long *bitmap2, int bits); 111 const unsigned long *bitmap2, int bits);
109extern int __bitmap_weight(const unsigned long *bitmap, int bits); 112extern int __bitmap_weight(const unsigned long *bitmap, int bits);
110 113
114extern void bitmap_set(unsigned long *map, int i, int len);
115extern void bitmap_clear(unsigned long *map, int start, int nr);
116extern unsigned long bitmap_find_next_zero_area(unsigned long *map,
117 unsigned long size,
118 unsigned long start,
119 unsigned int nr,
120 unsigned long align_mask);
121
111extern int bitmap_scnprintf(char *buf, unsigned int len, 122extern int bitmap_scnprintf(char *buf, unsigned int len,
112 const unsigned long *src, int nbits); 123 const unsigned long *src, int nbits);
113extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, 124extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 784a919aa0d0..5c8018977efa 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -845,7 +845,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
845 * blk_rq_err_bytes() : bytes left till the next error boundary 845 * blk_rq_err_bytes() : bytes left till the next error boundary
846 * blk_rq_sectors() : sectors left in the entire request 846 * blk_rq_sectors() : sectors left in the entire request
847 * blk_rq_cur_sectors() : sectors left in the current segment 847 * blk_rq_cur_sectors() : sectors left in the current segment
848 * blk_rq_err_sectors() : sectors left till the next error boundary
849 */ 848 */
850static inline sector_t blk_rq_pos(const struct request *rq) 849static inline sector_t blk_rq_pos(const struct request *rq)
851{ 850{
@@ -874,11 +873,6 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
874 return blk_rq_cur_bytes(rq) >> 9; 873 return blk_rq_cur_bytes(rq) >> 9;
875} 874}
876 875
877static inline unsigned int blk_rq_err_sectors(const struct request *rq)
878{
879 return blk_rq_err_bytes(rq) >> 9;
880}
881
882/* 876/*
883 * Request issue related functions. 877 * Request issue related functions.
884 */ 878 */
@@ -944,6 +938,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
944extern void blk_set_default_limits(struct queue_limits *lim); 938extern void blk_set_default_limits(struct queue_limits *lim);
945extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 939extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
946 sector_t offset); 940 sector_t offset);
941extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
942 sector_t offset);
947extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 943extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
948 sector_t offset); 944 sector_t offset);
949extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 945extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
@@ -1116,11 +1112,18 @@ static inline int queue_alignment_offset(struct request_queue *q)
1116 return q->limits.alignment_offset; 1112 return q->limits.alignment_offset;
1117} 1113}
1118 1114
1115static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
1116{
1117 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1118
1119 offset &= granularity - 1;
1120 return (granularity + lim->alignment_offset - offset) & (granularity - 1);
1121}
1122
1119static inline int queue_sector_alignment_offset(struct request_queue *q, 1123static inline int queue_sector_alignment_offset(struct request_queue *q,
1120 sector_t sector) 1124 sector_t sector)
1121{ 1125{
1122 return ((sector << 9) - q->limits.alignment_offset) 1126 return queue_limit_alignment_offset(&q->limits, sector << 9);
1123 & (q->limits.io_min - 1);
1124} 1127}
1125 1128
1126static inline int bdev_alignment_offset(struct block_device *bdev) 1129static inline int bdev_alignment_offset(struct block_device *bdev)
@@ -1147,8 +1150,11 @@ static inline int queue_discard_alignment(struct request_queue *q)
1147static inline int queue_sector_discard_alignment(struct request_queue *q, 1150static inline int queue_sector_discard_alignment(struct request_queue *q,
1148 sector_t sector) 1151 sector_t sector)
1149{ 1152{
1150 return ((sector << 9) - q->limits.discard_alignment) 1153 struct queue_limits *lim = &q->limits;
1151 & (q->limits.discard_granularity - 1); 1154 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1155
1156 return (lim->discard_granularity + lim->discard_alignment - alignment)
1157 & (lim->discard_granularity - 1);
1152} 1158}
1153 1159
1154static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1160static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 47536197ffdd..e287863ac053 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -43,6 +43,8 @@ extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
43 43
44#ifdef CONFIG_HOTPLUG_CPU 44#ifdef CONFIG_HOTPLUG_CPU
45extern void unregister_cpu(struct cpu *cpu); 45extern void unregister_cpu(struct cpu *cpu);
46extern ssize_t arch_cpu_probe(const char *, size_t);
47extern ssize_t arch_cpu_release(const char *, size_t);
46#endif 48#endif
47struct notifier_block; 49struct notifier_block;
48 50
@@ -115,6 +117,19 @@ extern void put_online_cpus(void);
115#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) 117#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
116int cpu_down(unsigned int cpu); 118int cpu_down(unsigned int cpu);
117 119
120#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
121extern void cpu_hotplug_driver_lock(void);
122extern void cpu_hotplug_driver_unlock(void);
123#else
124static inline void cpu_hotplug_driver_lock(void)
125{
126}
127
128static inline void cpu_hotplug_driver_unlock(void)
129{
130}
131#endif
132
118#else /* CONFIG_HOTPLUG_CPU */ 133#else /* CONFIG_HOTPLUG_CPU */
119 134
120#define get_online_cpus() do { } while (0) 135#define get_online_cpus() do { } while (0)
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 789cf5f920ce..d77b54733c5b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -84,6 +84,7 @@ extern const struct cpumask *const cpu_active_mask;
84#define num_online_cpus() cpumask_weight(cpu_online_mask) 84#define num_online_cpus() cpumask_weight(cpu_online_mask)
85#define num_possible_cpus() cpumask_weight(cpu_possible_mask) 85#define num_possible_cpus() cpumask_weight(cpu_possible_mask)
86#define num_present_cpus() cpumask_weight(cpu_present_mask) 86#define num_present_cpus() cpumask_weight(cpu_present_mask)
87#define num_active_cpus() cpumask_weight(cpu_active_mask)
87#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask) 88#define cpu_online(cpu) cpumask_test_cpu((cpu), cpu_online_mask)
88#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask) 89#define cpu_possible(cpu) cpumask_test_cpu((cpu), cpu_possible_mask)
89#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask) 90#define cpu_present(cpu) cpumask_test_cpu((cpu), cpu_present_mask)
@@ -92,6 +93,7 @@ extern const struct cpumask *const cpu_active_mask;
92#define num_online_cpus() 1 93#define num_online_cpus() 1
93#define num_possible_cpus() 1 94#define num_possible_cpus() 1
94#define num_present_cpus() 1 95#define num_present_cpus() 1
96#define num_active_cpus() 1
95#define cpu_online(cpu) ((cpu) == 0) 97#define cpu_online(cpu) ((cpu) == 0)
96#define cpu_possible(cpu) ((cpu) == 0) 98#define cpu_possible(cpu) ((cpu) == 0)
97#define cpu_present(cpu) ((cpu) == 0) 99#define cpu_present(cpu) ((cpu) == 0)
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h
new file mode 100644
index 000000000000..d5a1d4810b80
--- /dev/null
+++ b/include/linux/cs5535.h
@@ -0,0 +1,172 @@
1/*
2 * AMD CS5535/CS5536 definitions
3 * Copyright (C) 2006 Advanced Micro Devices, Inc.
4 * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 */
10
11#ifndef _CS5535_H
12#define _CS5535_H
13
14/* MSRs */
15#define MSR_GLIU_P2D_RO0 0x10000029
16
17#define MSR_LX_GLD_MSR_CONFIG 0x48002001
18#define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data
19 * sheet has the wrong value */
20#define MSR_GLCP_SYS_RSTPLL 0x4C000014
21#define MSR_GLCP_DOTPLL 0x4C000015
22
23#define MSR_LBAR_SMB 0x5140000B
24#define MSR_LBAR_GPIO 0x5140000C
25#define MSR_LBAR_MFGPT 0x5140000D
26#define MSR_LBAR_ACPI 0x5140000E
27#define MSR_LBAR_PMS 0x5140000F
28
29#define MSR_DIVIL_SOFT_RESET 0x51400017
30
31#define MSR_PIC_YSEL_LOW 0x51400020
32#define MSR_PIC_YSEL_HIGH 0x51400021
33#define MSR_PIC_ZSEL_LOW 0x51400022
34#define MSR_PIC_ZSEL_HIGH 0x51400023
35#define MSR_PIC_IRQM_LPC 0x51400025
36
37#define MSR_MFGPT_IRQ 0x51400028
38#define MSR_MFGPT_NR 0x51400029
39#define MSR_MFGPT_SETUP 0x5140002B
40
41#define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */
42
43#define MSR_GX_GLD_MSR_CONFIG 0xC0002001
44#define MSR_GX_MSR_PADSEL 0xC0002011
45
46/* resource sizes */
47#define LBAR_GPIO_SIZE 0xFF
48#define LBAR_MFGPT_SIZE 0x40
49#define LBAR_ACPI_SIZE 0x40
50#define LBAR_PMS_SIZE 0x80
51
52/* VSA2 magic values */
53#define VSA_VRC_INDEX 0xAC1C
54#define VSA_VRC_DATA 0xAC1E
55#define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */
56#define VSA_VR_SIGNATURE 0x0003
57#define VSA_VR_MEM_SIZE 0x0200
58#define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */
59#define GSW_VSA_SIG 0x534d /* General Software signature */
60
61#include <linux/io.h>
62
63static inline int cs5535_has_vsa2(void)
64{
65 static int has_vsa2 = -1;
66
67 if (has_vsa2 == -1) {
68 uint16_t val;
69
70 /*
71 * The VSA has virtual registers that we can query for a
72 * signature.
73 */
74 outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
75 outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
76
77 val = inw(VSA_VRC_DATA);
78 has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG);
79 }
80
81 return has_vsa2;
82}
83
84/* GPIOs */
85#define GPIO_OUTPUT_VAL 0x00
86#define GPIO_OUTPUT_ENABLE 0x04
87#define GPIO_OUTPUT_OPEN_DRAIN 0x08
88#define GPIO_OUTPUT_INVERT 0x0C
89#define GPIO_OUTPUT_AUX1 0x10
90#define GPIO_OUTPUT_AUX2 0x14
91#define GPIO_PULL_UP 0x18
92#define GPIO_PULL_DOWN 0x1C
93#define GPIO_INPUT_ENABLE 0x20
94#define GPIO_INPUT_INVERT 0x24
95#define GPIO_INPUT_FILTER 0x28
96#define GPIO_INPUT_EVENT_COUNT 0x2C
97#define GPIO_READ_BACK 0x30
98#define GPIO_INPUT_AUX1 0x34
99#define GPIO_EVENTS_ENABLE 0x38
100#define GPIO_LOCK_ENABLE 0x3C
101#define GPIO_POSITIVE_EDGE_EN 0x40
102#define GPIO_NEGATIVE_EDGE_EN 0x44
103#define GPIO_POSITIVE_EDGE_STS 0x48
104#define GPIO_NEGATIVE_EDGE_STS 0x4C
105
106#define GPIO_MAP_X 0xE0
107#define GPIO_MAP_Y 0xE4
108#define GPIO_MAP_Z 0xE8
109#define GPIO_MAP_W 0xEC
110
111void cs5535_gpio_set(unsigned offset, unsigned int reg);
112void cs5535_gpio_clear(unsigned offset, unsigned int reg);
113int cs5535_gpio_isset(unsigned offset, unsigned int reg);
114
115/* MFGPTs */
116
117#define MFGPT_MAX_TIMERS 8
118#define MFGPT_TIMER_ANY (-1)
119
120#define MFGPT_DOMAIN_WORKING 1
121#define MFGPT_DOMAIN_STANDBY 2
122#define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY)
123
124#define MFGPT_CMP1 0
125#define MFGPT_CMP2 1
126
127#define MFGPT_EVENT_IRQ 0
128#define MFGPT_EVENT_NMI 1
129#define MFGPT_EVENT_RESET 3
130
131#define MFGPT_REG_CMP1 0
132#define MFGPT_REG_CMP2 2
133#define MFGPT_REG_COUNTER 4
134#define MFGPT_REG_SETUP 6
135
136#define MFGPT_SETUP_CNTEN (1 << 15)
137#define MFGPT_SETUP_CMP2 (1 << 14)
138#define MFGPT_SETUP_CMP1 (1 << 13)
139#define MFGPT_SETUP_SETUP (1 << 12)
140#define MFGPT_SETUP_STOPEN (1 << 11)
141#define MFGPT_SETUP_EXTEN (1 << 10)
142#define MFGPT_SETUP_REVEN (1 << 5)
143#define MFGPT_SETUP_CLKSEL (1 << 4)
144
145struct cs5535_mfgpt_timer;
146
147extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer,
148 uint16_t reg);
149extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
150 uint16_t value);
151
152extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
153 int event, int enable);
154extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp,
155 int *irq, int enable);
156extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer,
157 int domain);
158extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer);
159
160static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer,
161 int cmp, int *irq)
162{
163 return cs5535_mfgpt_set_irq(timer, cmp, irq, 1);
164}
165
166static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer,
167 int cmp, int *irq)
168{
169 return cs5535_mfgpt_set_irq(timer, cmp, irq, 0);
170}
171
172#endif
diff --git a/include/linux/ctype.h b/include/linux/ctype.h
index afa36392297a..a3d6ee0044f9 100644
--- a/include/linux/ctype.h
+++ b/include/linux/ctype.h
@@ -15,7 +15,7 @@
15#define _X 0x40 /* hex digit */ 15#define _X 0x40 /* hex digit */
16#define _SP 0x80 /* hard space (0x20) */ 16#define _SP 0x80 /* hard space (0x20) */
17 17
18extern unsigned char _ctype[]; 18extern const unsigned char _ctype[];
19 19
20#define __ismask(x) (_ctype[(int)(unsigned char)(x)]) 20#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
21 21
@@ -27,6 +27,7 @@ extern unsigned char _ctype[];
27#define islower(c) ((__ismask(c)&(_L)) != 0) 27#define islower(c) ((__ismask(c)&(_L)) != 0)
28#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) 28#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
29#define ispunct(c) ((__ismask(c)&(_P)) != 0) 29#define ispunct(c) ((__ismask(c)&(_P)) != 0)
30/* Note: isspace() must return false for %NUL-terminator */
30#define isspace(c) ((__ismask(c)&(_S)) != 0) 31#define isspace(c) ((__ismask(c)&(_S)) != 0)
31#define isupper(c) ((__ismask(c)&(_U)) != 0) 32#define isupper(c) ((__ismask(c)&(_U)) != 0)
32#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) 33#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
index 12ff8c3f1d05..5032b9a31ae7 100644
--- a/include/linux/decompress/mm.h
+++ b/include/linux/decompress/mm.h
@@ -25,7 +25,7 @@ static void *malloc(int size)
25 void *p; 25 void *p;
26 26
27 if (size < 0) 27 if (size < 0)
28 error("Malloc error"); 28 return NULL;
29 if (!malloc_ptr) 29 if (!malloc_ptr)
30 malloc_ptr = free_mem_ptr; 30 malloc_ptr = free_mem_ptr;
31 31
@@ -35,7 +35,7 @@ static void *malloc(int size)
35 malloc_ptr += size; 35 malloc_ptr += size;
36 36
37 if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) 37 if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
38 error("Out of memory"); 38 return NULL;
39 39
40 malloc_count++; 40 malloc_count++;
41 return p; 41 return p;
diff --git a/include/linux/decompress/unlzo.h b/include/linux/decompress/unlzo.h
new file mode 100644
index 000000000000..987229752519
--- /dev/null
+++ b/include/linux/decompress/unlzo.h
@@ -0,0 +1,10 @@
1#ifndef DECOMPRESS_UNLZO_H
2#define DECOMPRESS_UNLZO_H
3
4int unlzo(unsigned char *inbuf, int len,
5 int(*fill)(void*, unsigned int),
6 int(*flush)(void*, unsigned int),
7 unsigned char *output,
8 int *pos,
9 void(*error)(char *x));
10#endif
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index df7607e6dce8..d4c9c0b88adc 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -235,7 +235,7 @@ void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
235const char *dm_device_name(struct mapped_device *md); 235const char *dm_device_name(struct mapped_device *md);
236int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); 236int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
237struct gendisk *dm_disk(struct mapped_device *md); 237struct gendisk *dm_disk(struct mapped_device *md);
238int dm_suspended(struct mapped_device *md); 238int dm_suspended(struct dm_target *ti);
239int dm_noflush_suspending(struct dm_target *ti); 239int dm_noflush_suspending(struct dm_target *ti);
240union map_info *dm_get_mapinfo(struct bio *bio); 240union map_info *dm_get_mapinfo(struct bio *bio);
241union map_info *dm_get_rq_mapinfo(struct request *rq); 241union map_info *dm_get_rq_mapinfo(struct request *rq);
@@ -276,7 +276,7 @@ void dm_table_unplug_all(struct dm_table *t);
276/* 276/*
277 * Table reference counting. 277 * Table reference counting.
278 */ 278 */
279struct dm_table *dm_get_table(struct mapped_device *md); 279struct dm_table *dm_get_live_table(struct mapped_device *md);
280void dm_table_get(struct dm_table *t); 280void dm_table_get(struct dm_table *t);
281void dm_table_put(struct dm_table *t); 281void dm_table_put(struct dm_table *t);
282 282
@@ -295,8 +295,10 @@ void dm_table_event(struct dm_table *t);
295 295
296/* 296/*
297 * The device must be suspended before calling this method. 297 * The device must be suspended before calling this method.
298 * Returns the previous table, which the caller must destroy.
298 */ 299 */
299int dm_swap_table(struct mapped_device *md, struct dm_table *t); 300struct dm_table *dm_swap_table(struct mapped_device *md,
301 struct dm_table *t);
300 302
301/* 303/*
302 * A wrapper around vmalloc. 304 * A wrapper around vmalloc.
diff --git a/include/linux/device.h b/include/linux/device.h
index 2a73d9bcbc9c..a62799f2ab00 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -166,9 +166,9 @@ struct driver_attribute driver_attr_##_name = \
166 __ATTR(_name, _mode, _show, _store) 166 __ATTR(_name, _mode, _show, _store)
167 167
168extern int __must_check driver_create_file(struct device_driver *driver, 168extern int __must_check driver_create_file(struct device_driver *driver,
169 struct driver_attribute *attr); 169 const struct driver_attribute *attr);
170extern void driver_remove_file(struct device_driver *driver, 170extern void driver_remove_file(struct device_driver *driver,
171 struct driver_attribute *attr); 171 const struct driver_attribute *attr);
172 172
173extern int __must_check driver_add_kobj(struct device_driver *drv, 173extern int __must_check driver_add_kobj(struct device_driver *drv,
174 struct kobject *kobj, 174 struct kobject *kobj,
@@ -319,13 +319,13 @@ struct device_attribute {
319struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) 319struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
320 320
321extern int __must_check device_create_file(struct device *device, 321extern int __must_check device_create_file(struct device *device,
322 struct device_attribute *entry); 322 const struct device_attribute *entry);
323extern void device_remove_file(struct device *dev, 323extern void device_remove_file(struct device *dev,
324 struct device_attribute *attr); 324 const struct device_attribute *attr);
325extern int __must_check device_create_bin_file(struct device *dev, 325extern int __must_check device_create_bin_file(struct device *dev,
326 struct bin_attribute *attr); 326 const struct bin_attribute *attr);
327extern void device_remove_bin_file(struct device *dev, 327extern void device_remove_bin_file(struct device *dev,
328 struct bin_attribute *attr); 328 const struct bin_attribute *attr);
329extern int device_schedule_callback_owner(struct device *dev, 329extern int device_schedule_callback_owner(struct device *dev,
330 void (*func)(struct device *dev), struct module *owner); 330 void (*func)(struct device *dev), struct module *owner);
331 331
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h
index 5e8b11d88f6f..7084503c3405 100644
--- a/include/linux/dm-dirty-log.h
+++ b/include/linux/dm-dirty-log.h
@@ -21,6 +21,7 @@ struct dm_dirty_log_type;
21 21
22struct dm_dirty_log { 22struct dm_dirty_log {
23 struct dm_dirty_log_type *type; 23 struct dm_dirty_log_type *type;
24 int (*flush_callback_fn)(struct dm_target *ti);
24 void *context; 25 void *context;
25}; 26};
26 27
@@ -136,8 +137,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type);
136 * type->constructor/destructor() directly. 137 * type->constructor/destructor() directly.
137 */ 138 */
138struct dm_dirty_log *dm_dirty_log_create(const char *type_name, 139struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
139 struct dm_target *ti, 140 struct dm_target *ti,
140 unsigned argc, char **argv); 141 int (*flush_callback_fn)(struct dm_target *ti),
142 unsigned argc, char **argv);
141void dm_dirty_log_destroy(struct dm_dirty_log *log); 143void dm_dirty_log_destroy(struct dm_dirty_log *log);
142 144
143#endif /* __KERNEL__ */ 145#endif /* __KERNEL__ */
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h
index 2ab84c83c31a..aa95508d2f95 100644
--- a/include/linux/dm-ioctl.h
+++ b/include/linux/dm-ioctl.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited. 2 * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved. 3 * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved.
4 * 4 *
5 * This file is released under the LGPL. 5 * This file is released under the LGPL.
6 */ 6 */
@@ -266,9 +266,9 @@ enum {
266#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) 266#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
267 267
268#define DM_VERSION_MAJOR 4 268#define DM_VERSION_MAJOR 4
269#define DM_VERSION_MINOR 15 269#define DM_VERSION_MINOR 16
270#define DM_VERSION_PATCHLEVEL 0 270#define DM_VERSION_PATCHLEVEL 0
271#define DM_VERSION_EXTRA "-ioctl (2009-04-01)" 271#define DM_VERSION_EXTRA "-ioctl (2009-11-05)"
272 272
273/* Status bits */ 273/* Status bits */
274#define DM_READONLY_FLAG (1 << 0) /* In/Out */ 274#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -309,4 +309,11 @@ enum {
309 */ 309 */
310#define DM_NOFLUSH_FLAG (1 << 11) /* In */ 310#define DM_NOFLUSH_FLAG (1 << 11) /* In */
311 311
312/*
313 * If set, any table information returned will relate to the inactive
314 * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG
315 * is set before using the data returned.
316 */
317#define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */
318
312#endif /* _LINUX_DM_IOCTL_H */ 319#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h
index a9e652a41373..9e2a7a401df5 100644
--- a/include/linux/dm-region-hash.h
+++ b/include/linux/dm-region-hash.h
@@ -78,8 +78,7 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region);
78/* Delay bios on regions. */ 78/* Delay bios on regions. */
79void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); 79void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio);
80 80
81void dm_rh_mark_nosync(struct dm_region_hash *rh, 81void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio);
82 struct bio *bio, unsigned done, int error);
83 82
84/* 83/*
85 * Region recovery control. 84 * Region recovery control.
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 2b9f2ac7ed60..78784982b33e 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -74,7 +74,7 @@ enum dma_transaction_type {
74 * control completion, and communicate status. 74 * control completion, and communicate status.
75 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of 75 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
76 * this transaction 76 * this transaction
77 * @DMA_CTRL_ACK - the descriptor cannot be reused until the client 77 * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
78 * acknowledges receipt, i.e. has has a chance to establish any dependency 78 * acknowledges receipt, i.e. has has a chance to establish any dependency
79 * chains 79 * chains
80 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) 80 * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index e84f4733cb55..78962272338a 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
53 53
54 54
55extern const char *drbd_buildtag(void); 55extern const char *drbd_buildtag(void);
56#define REL_VERSION "8.3.6" 56#define REL_VERSION "8.3.7"
57#define API_VERSION 88 57#define API_VERSION 88
58#define PRO_VERSION_MIN 86 58#define PRO_VERSION_MIN 86
59#define PRO_VERSION_MAX 91 59#define PRO_VERSION_MAX 91
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index db5721ad50d1..a4d82f895994 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -69,6 +69,7 @@ NL_PACKET(disconnect, 6, )
69 69
70NL_PACKET(resize, 7, 70NL_PACKET(resize, 7,
71 NL_INT64( 29, T_MAY_IGNORE, resize_size) 71 NL_INT64( 29, T_MAY_IGNORE, resize_size)
72 NL_BIT( 68, T_MAY_IGNORE, resize_force)
72) 73)
73 74
74NL_PACKET(syncer_conf, 8, 75NL_PACKET(syncer_conf, 8,
diff --git a/include/linux/dst.h b/include/linux/dst.h
deleted file mode 100644
index e26fed84b1aa..000000000000
--- a/include/linux/dst.h
+++ /dev/null
@@ -1,587 +0,0 @@
1/*
2 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef __DST_H
17#define __DST_H
18
19#include <linux/types.h>
20#include <linux/connector.h>
21
22#define DST_NAMELEN 32
23#define DST_NAME "dst"
24
25enum {
26 /* Remove node with given id from storage */
27 DST_DEL_NODE = 0,
28 /* Add remote node with given id to the storage */
29 DST_ADD_REMOTE,
30 /* Add local node with given id to the storage to be exported and used by remote peers */
31 DST_ADD_EXPORT,
32 /* Crypto initialization command (hash/cipher used to protect the connection) */
33 DST_CRYPTO,
34 /* Security attributes for given connection (permissions for example) */
35 DST_SECURITY,
36 /* Register given node in the block layer subsystem */
37 DST_START,
38 DST_CMD_MAX
39};
40
41struct dst_ctl
42{
43 /* Storage name */
44 char name[DST_NAMELEN];
45 /* Command flags */
46 __u32 flags;
47 /* Command itself (see above) */
48 __u32 cmd;
49 /* Maximum number of pages per single request in this device */
50 __u32 max_pages;
51 /* Stale/error transaction scanning timeout in milliseconds */
52 __u32 trans_scan_timeout;
53 /* Maximum number of retry sends before completing transaction as broken */
54 __u32 trans_max_retries;
55 /* Storage size */
56 __u64 size;
57};
58
59/* Reply command carries completion status */
60struct dst_ctl_ack
61{
62 struct cn_msg msg;
63 int error;
64 int unused[3];
65};
66
67/*
68 * Unfortunaltely socket address structure is not exported to userspace
69 * and is redefined there.
70 */
71#define SADDR_MAX_DATA 128
72
73struct saddr {
74 /* address family, AF_xxx */
75 unsigned short sa_family;
76 /* 14 bytes of protocol address */
77 char sa_data[SADDR_MAX_DATA];
78 /* Number of bytes used in sa_data */
79 unsigned short sa_data_len;
80};
81
82/* Address structure */
83struct dst_network_ctl
84{
85 /* Socket type: datagram, stream...*/
86 unsigned int type;
87 /* Let me guess, is it a Jupiter diameter? */
88 unsigned int proto;
89 /* Peer's address */
90 struct saddr addr;
91};
92
93struct dst_crypto_ctl
94{
95 /* Cipher and hash names */
96 char cipher_algo[DST_NAMELEN];
97 char hash_algo[DST_NAMELEN];
98
99 /* Key sizes. Can be zero for digest for example */
100 unsigned int cipher_keysize, hash_keysize;
101 /* Alignment. Calculated by the DST itself. */
102 unsigned int crypto_attached_size;
103 /* Number of threads to perform crypto operations */
104 int thread_num;
105};
106
107/* Export security attributes have this bits checked in when client connects */
108#define DST_PERM_READ (1<<0)
109#define DST_PERM_WRITE (1<<1)
110
111/*
112 * Right now it is simple model, where each remote address
113 * is assigned to set of permissions it is allowed to perform.
114 * In real world block device does not know anything but
115 * reading and writing, so it should be more than enough.
116 */
117struct dst_secure_user
118{
119 unsigned int permissions;
120 struct saddr addr;
121};
122
123/*
124 * Export control command: device to export and network address to accept
125 * clients to work with given device
126 */
127struct dst_export_ctl
128{
129 char device[DST_NAMELEN];
130 struct dst_network_ctl ctl;
131};
132
133enum {
134 DST_CFG = 1, /* Request remote configuration */
135 DST_IO, /* IO command */
136 DST_IO_RESPONSE, /* IO response */
137 DST_PING, /* Keepalive message */
138 DST_NCMD_MAX,
139};
140
141struct dst_cmd
142{
143 /* Network command itself, see above */
144 __u32 cmd;
145 /*
146 * Size of the attached data
147 * (in most cases, for READ command it means how many bytes were requested)
148 */
149 __u32 size;
150 /* Crypto size: number of attached bytes with digest/hmac */
151 __u32 csize;
152 /* Here we can carry secret data */
153 __u32 reserved;
154 /* Read/write bits, see how they are encoded in bio structure */
155 __u64 rw;
156 /* BIO flags */
157 __u64 flags;
158 /* Unique command id (like transaction ID) */
159 __u64 id;
160 /* Sector to start IO from */
161 __u64 sector;
162 /* Hash data is placed after this header */
163 __u8 hash[0];
164};
165
166/*
167 * Convert command to/from network byte order.
168 * We do not use hton*() functions, since there is
169 * no 64-bit implementation.
170 */
171static inline void dst_convert_cmd(struct dst_cmd *c)
172{
173 c->cmd = __cpu_to_be32(c->cmd);
174 c->csize = __cpu_to_be32(c->csize);
175 c->size = __cpu_to_be32(c->size);
176 c->sector = __cpu_to_be64(c->sector);
177 c->id = __cpu_to_be64(c->id);
178 c->flags = __cpu_to_be64(c->flags);
179 c->rw = __cpu_to_be64(c->rw);
180}
181
182/* Transaction id */
183typedef __u64 dst_gen_t;
184
185#ifdef __KERNEL__
186
187#include <linux/blkdev.h>
188#include <linux/bio.h>
189#include <linux/device.h>
190#include <linux/mempool.h>
191#include <linux/net.h>
192#include <linux/poll.h>
193#include <linux/rbtree.h>
194
195#ifdef CONFIG_DST_DEBUG
196#define dprintk(f, a...) printk(KERN_NOTICE f, ##a)
197#else
198static inline void __attribute__ ((format (printf, 1, 2)))
199 dprintk(const char *fmt, ...) {}
200#endif
201
202struct dst_node;
203
204struct dst_trans
205{
206 /* DST node we are working with */
207 struct dst_node *n;
208
209 /* Entry inside transaction tree */
210 struct rb_node trans_entry;
211
212 /* Merlin kills this transaction when this memory cell equals zero */
213 atomic_t refcnt;
214
215 /* How this transaction should be processed by crypto engine */
216 short enc;
217 /* How many times this transaction was resent */
218 short retries;
219 /* Completion status */
220 int error;
221
222 /* When did we send it to the remote peer */
223 long send_time;
224
225 /* My name is...
226 * Well, computers does not speak, they have unique id instead */
227 dst_gen_t gen;
228
229 /* Block IO we are working with */
230 struct bio *bio;
231
232 /* Network command for above block IO request */
233 struct dst_cmd cmd;
234};
235
236struct dst_crypto_engine
237{
238 /* What should we do with all block requests */
239 struct crypto_hash *hash;
240 struct crypto_ablkcipher *cipher;
241
242 /* Pool of pages used to encrypt data into before sending */
243 int page_num;
244 struct page **pages;
245
246 /* What to do with current request */
247 int enc;
248 /* Who we are and where do we go */
249 struct scatterlist *src, *dst;
250
251 /* Maximum timeout waiting for encryption to be completed */
252 long timeout;
253 /* IV is a 64-bit sequential counter */
254 u64 iv;
255
256 /* Secret data */
257 void *private;
258
259 /* Cached temporary data lives here */
260 int size;
261 void *data;
262};
263
264struct dst_state
265{
266 /* The main state protection */
267 struct mutex state_lock;
268
269 /* Polling machinery for sockets */
270 wait_queue_t wait;
271 wait_queue_head_t *whead;
272 /* Most of events are being waited here */
273 wait_queue_head_t thread_wait;
274
275 /* Who owns this? */
276 struct dst_node *node;
277
278 /* Network address for this state */
279 struct dst_network_ctl ctl;
280
281 /* Permissions to work with: read-only or rw connection */
282 u32 permissions;
283
284 /* Called when we need to clean private data */
285 void (* cleanup)(struct dst_state *st);
286
287 /* Used by the server: BIO completion queues BIOs here */
288 struct list_head request_list;
289 spinlock_t request_lock;
290
291 /* Guess what? No, it is not number of planets */
292 atomic_t refcnt;
293
294 /* This flags is set when connection should be dropped */
295 int need_exit;
296
297 /*
298 * Socket to work with. Second pointer is used for
299 * lockless check if socket was changed before performing
300 * next action (like working with cached polling result)
301 */
302 struct socket *socket, *read_socket;
303
304 /* Cached preallocated data */
305 void *data;
306 unsigned int size;
307
308 /* Currently processed command */
309 struct dst_cmd cmd;
310};
311
312struct dst_info
313{
314 /* Device size */
315 u64 size;
316
317 /* Local device name for export devices */
318 char local[DST_NAMELEN];
319
320 /* Network setup */
321 struct dst_network_ctl net;
322
323 /* Sysfs bits use this */
324 struct device device;
325};
326
327struct dst_node
328{
329 struct list_head node_entry;
330
331 /* Hi, my name is stored here */
332 char name[DST_NAMELEN];
333 /* My cache name is stored here */
334 char cache_name[DST_NAMELEN];
335
336 /* Block device attached to given node.
337 * Only valid for exporting nodes */
338 struct block_device *bdev;
339 /* Network state machine for given peer */
340 struct dst_state *state;
341
342 /* Block IO machinery */
343 struct request_queue *queue;
344 struct gendisk *disk;
345
346 /* Number of threads in processing pool */
347 int thread_num;
348 /* Maximum number of pages in single IO */
349 int max_pages;
350
351 /* I'm that big in bytes */
352 loff_t size;
353
354 /* Exported to userspace node information */
355 struct dst_info *info;
356
357 /*
358 * Security attribute list.
359 * Used only by exporting node currently.
360 */
361 struct list_head security_list;
362 struct mutex security_lock;
363
364 /*
365 * When this unerflows below zero, university collapses.
366 * But this will not happen, since node will be freed,
367 * when reference counter reaches zero.
368 */
369 atomic_t refcnt;
370
371 /* How precisely should I be started? */
372 int (*start)(struct dst_node *);
373
374 /* Crypto capabilities */
375 struct dst_crypto_ctl crypto;
376 u8 *hash_key;
377 u8 *cipher_key;
378
379 /* Pool of processing thread */
380 struct thread_pool *pool;
381
382 /* Transaction IDs live here */
383 atomic_long_t gen;
384
385 /*
386 * How frequently and how many times transaction
387 * tree should be scanned to drop stale objects.
388 */
389 long trans_scan_timeout;
390 int trans_max_retries;
391
392 /* Small gnomes live here */
393 struct rb_root trans_root;
394 struct mutex trans_lock;
395
396 /*
397 * Transaction cache/memory pool.
398 * It is big enough to contain not only transaction
399 * itself, but additional crypto data (digest/hmac).
400 */
401 struct kmem_cache *trans_cache;
402 mempool_t *trans_pool;
403
404 /* This entity scans transaction tree */
405 struct delayed_work trans_work;
406
407 wait_queue_head_t wait;
408};
409
410/* Kernel representation of the security attribute */
411struct dst_secure
412{
413 struct list_head sec_entry;
414 struct dst_secure_user sec;
415};
416
417int dst_process_bio(struct dst_node *n, struct bio *bio);
418
419int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r);
420int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le);
421
422static inline struct dst_state *dst_state_get(struct dst_state *st)
423{
424 BUG_ON(atomic_read(&st->refcnt) == 0);
425 atomic_inc(&st->refcnt);
426 return st;
427}
428
429void dst_state_put(struct dst_state *st);
430
431struct dst_state *dst_state_alloc(struct dst_node *n);
432int dst_state_socket_create(struct dst_state *st);
433void dst_state_socket_release(struct dst_state *st);
434
435void dst_state_exit_connected(struct dst_state *st);
436
437int dst_state_schedule_receiver(struct dst_state *st);
438
439void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str);
440
441static inline void dst_state_lock(struct dst_state *st)
442{
443 mutex_lock(&st->state_lock);
444}
445
446static inline void dst_state_unlock(struct dst_state *st)
447{
448 mutex_unlock(&st->state_lock);
449}
450
451void dst_poll_exit(struct dst_state *st);
452int dst_poll_init(struct dst_state *st);
453
454static inline unsigned int dst_state_poll(struct dst_state *st)
455{
456 unsigned int revents = POLLHUP | POLLERR;
457
458 dst_state_lock(st);
459 if (st->socket)
460 revents = st->socket->ops->poll(NULL, st->socket, NULL);
461 dst_state_unlock(st);
462
463 return revents;
464}
465
466static inline int dst_thread_setup(void *private, void *data)
467{
468 return 0;
469}
470
471void dst_node_put(struct dst_node *n);
472
473static inline struct dst_node *dst_node_get(struct dst_node *n)
474{
475 atomic_inc(&n->refcnt);
476 return n;
477}
478
479int dst_data_recv(struct dst_state *st, void *data, unsigned int size);
480int dst_recv_cdata(struct dst_state *st, void *cdata);
481int dst_data_send_header(struct socket *sock,
482 void *data, unsigned int size, int more);
483
484int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio);
485
486int dst_process_io(struct dst_state *st);
487int dst_export_crypto(struct dst_node *n, struct bio *bio);
488int dst_export_send_bio(struct bio *bio);
489int dst_start_export(struct dst_node *n);
490
491int __init dst_export_init(void);
492void dst_export_exit(void);
493
494/* Private structure for export block IO requests */
495struct dst_export_priv
496{
497 struct list_head request_entry;
498 struct dst_state *state;
499 struct bio *bio;
500 struct dst_cmd cmd;
501};
502
503static inline void dst_trans_get(struct dst_trans *t)
504{
505 atomic_inc(&t->refcnt);
506}
507
508struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen);
509int dst_trans_remove(struct dst_trans *t);
510int dst_trans_remove_nolock(struct dst_trans *t);
511void dst_trans_put(struct dst_trans *t);
512
513/*
514 * Convert bio into network command.
515 */
516static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd,
517 u32 command, u64 id)
518{
519 cmd->cmd = command;
520 cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS;
521 cmd->rw = bio->bi_rw;
522 cmd->size = bio->bi_size;
523 cmd->csize = 0;
524 cmd->id = id;
525 cmd->sector = bio->bi_sector;
526};
527
528int dst_trans_send(struct dst_trans *t);
529int dst_trans_crypto(struct dst_trans *t);
530
531int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl);
532void dst_node_crypto_exit(struct dst_node *n);
533
534static inline int dst_need_crypto(struct dst_node *n)
535{
536 struct dst_crypto_ctl *c = &n->crypto;
537 /*
538 * Logical OR is appropriate here, but boolean one produces
539 * more optimal code, so it is used instead.
540 */
541 return (c->hash_algo[0] | c->cipher_algo[0]);
542}
543
544int dst_node_trans_init(struct dst_node *n, unsigned int size);
545void dst_node_trans_exit(struct dst_node *n);
546
547/*
548 * Pool of threads.
549 * Ready list contains threads currently free to be used,
550 * active one contains threads with some work scheduled for them.
551 * Caller can wait in given queue when thread is ready.
552 */
553struct thread_pool
554{
555 int thread_num;
556 struct mutex thread_lock;
557 struct list_head ready_list, active_list;
558
559 wait_queue_head_t wait;
560};
561
562void thread_pool_del_worker(struct thread_pool *p);
563void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id);
564int thread_pool_add_worker(struct thread_pool *p,
565 char *name,
566 unsigned int id,
567 void *(* init)(void *data),
568 void (* cleanup)(void *data),
569 void *data);
570
571void thread_pool_destroy(struct thread_pool *p);
572struct thread_pool *thread_pool_create(int num, char *name,
573 void *(* init)(void *data),
574 void (* cleanup)(void *data),
575 void *data);
576
577int thread_pool_schedule(struct thread_pool *p,
578 int (* setup)(void *stored_private, void *setup_data),
579 int (* action)(void *stored_private, void *setup_data),
580 void *setup_data, long timeout);
581int thread_pool_schedule_private(struct thread_pool *p,
582 int (* setup)(void *private, void *data),
583 int (* action)(void *private, void *data),
584 void *data, long timeout, void *id);
585
586#endif /* __KERNEL__ */
587#endif /* __DST_H */
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index a0d9422a1569..f8c2e1767500 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -57,8 +57,7 @@ extern int ddebug_remove_module(char *mod_name);
57 { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ 57 { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
58 DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ 58 DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
59 if (__dynamic_dbg_enabled(descriptor)) \ 59 if (__dynamic_dbg_enabled(descriptor)) \
60 printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \ 60 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \
61 ##__VA_ARGS__); \
62 } while (0) 61 } while (0)
63 62
64 63
@@ -69,9 +68,7 @@ extern int ddebug_remove_module(char *mod_name);
69 { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ 68 { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \
70 DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ 69 DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \
71 if (__dynamic_dbg_enabled(descriptor)) \ 70 if (__dynamic_dbg_enabled(descriptor)) \
72 dev_printk(KERN_DEBUG, dev, \ 71 dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
73 KBUILD_MODNAME ": " fmt, \
74 ##__VA_ARGS__); \
75 } while (0) 72 } while (0)
76 73
77#else 74#else
@@ -81,8 +78,10 @@ static inline int ddebug_remove_module(char *mod)
81 return 0; 78 return 0;
82} 79}
83 80
84#define dynamic_pr_debug(fmt, ...) do { } while (0) 81#define dynamic_pr_debug(fmt, ...) \
85#define dynamic_dev_dbg(dev, format, ...) do { } while (0) 82 do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
83#define dynamic_dev_dbg(dev, format, ...) \
84 do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
86#endif 85#endif
87 86
88#endif 87#endif
diff --git a/include/linux/efi.h b/include/linux/efi.h
index ce4581fbc08b..fb737bc19a8c 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -280,11 +280,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right)
280static inline char * 280static inline char *
281efi_guid_unparse(efi_guid_t *guid, char *out) 281efi_guid_unparse(efi_guid_t *guid, char *out)
282{ 282{
283 sprintf(out, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", 283 sprintf(out, "%pUl", guid->b);
284 guid->b[3], guid->b[2], guid->b[1], guid->b[0],
285 guid->b[5], guid->b[4], guid->b[7], guid->b[6],
286 guid->b[8], guid->b[9], guid->b[10], guid->b[11],
287 guid->b[12], guid->b[13], guid->b[14], guid->b[15]);
288 return out; 284 return out;
289} 285}
290 286
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 90a4ed0ea0e5..0cc4d55151b7 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -361,7 +361,7 @@ typedef struct elf64_shdr {
361#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ 361#define NT_PPC_VSX 0x102 /* PowerPC VSX registers */
362#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ 362#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
363#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ 363#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
364#define NT_PRXSTATUS 0x300 /* s390 upper register halves */ 364#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
365 365
366 366
367/* Note header in a PT_NOTE section */ 367/* Note header in a PT_NOTE section */
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 90d1c2184112..9a33c5f7e126 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -42,6 +42,8 @@ enum enclosure_status {
42 ENCLOSURE_STATUS_NOT_INSTALLED, 42 ENCLOSURE_STATUS_NOT_INSTALLED,
43 ENCLOSURE_STATUS_UNKNOWN, 43 ENCLOSURE_STATUS_UNKNOWN,
44 ENCLOSURE_STATUS_UNAVAILABLE, 44 ENCLOSURE_STATUS_UNAVAILABLE,
45 /* last element for counting purposes */
46 ENCLOSURE_STATUS_MAX
45}; 47};
46 48
47/* SFF-8485 activity light settings */ 49/* SFF-8485 activity light settings */
diff --git a/include/linux/err.h b/include/linux/err.h
index ec87f3142bf3..1b12642636c7 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr)
34 return IS_ERR_VALUE((unsigned long)ptr); 34 return IS_ERR_VALUE((unsigned long)ptr);
35} 35}
36 36
37static inline long IS_ERR_OR_NULL(const void *ptr)
38{
39 return !ptr || IS_ERR_VALUE((unsigned long)ptr);
40}
41
37/** 42/**
38 * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type 43 * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
39 * @ptr: The pointer to cast. 44 * @ptr: The pointer to cast.
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 27e772cefb6a..dc12f416a49f 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -97,7 +97,7 @@ struct fid {
97 * @get_name: find the name for a given inode in a given directory 97 * @get_name: find the name for a given inode in a given directory
98 * @get_parent: find the parent of a given directory 98 * @get_parent: find the parent of a given directory
99 * 99 *
100 * See Documentation/filesystems/Exporting for details on how to use 100 * See Documentation/filesystems/nfs/Exporting for details on how to use
101 * this interface correctly. 101 * this interface correctly.
102 * 102 *
103 * encode_fh: 103 * encode_fh:
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h
index f07f34de2f0e..258088ab3c6b 100644
--- a/include/linux/ext3_fs_sb.h
+++ b/include/linux/ext3_fs_sb.h
@@ -72,6 +72,8 @@ struct ext3_sb_info {
72 struct inode * s_journal_inode; 72 struct inode * s_journal_inode;
73 struct journal_s * s_journal; 73 struct journal_s * s_journal;
74 struct list_head s_orphan; 74 struct list_head s_orphan;
75 struct mutex s_orphan_lock;
76 struct mutex s_resize_lock;
75 unsigned long s_commit_interval; 77 unsigned long s_commit_interval;
76 struct block_device *journal_bdev; 78 struct block_device *journal_bdev;
77#ifdef CONFIG_JBD_DEBUG 79#ifdef CONFIG_JBD_DEBUG
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h
index cf82d519be40..d7b5ddca99c2 100644
--- a/include/linux/ext3_jbd.h
+++ b/include/linux/ext3_jbd.h
@@ -44,13 +44,13 @@
44 44
45#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \ 45#define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \
46 EXT3_XATTR_TRANS_BLOCKS - 2 + \ 46 EXT3_XATTR_TRANS_BLOCKS - 2 + \
47 2*EXT3_QUOTA_TRANS_BLOCKS(sb)) 47 EXT3_MAXQUOTAS_TRANS_BLOCKS(sb))
48 48
49/* Delete operations potentially hit one directory's namespace plus an 49/* Delete operations potentially hit one directory's namespace plus an
50 * entire inode, plus arbitrary amounts of bitmap/indirection data. Be 50 * entire inode, plus arbitrary amounts of bitmap/indirection data. Be
51 * generous. We can grow the delete transaction later if necessary. */ 51 * generous. We can grow the delete transaction later if necessary. */
52 52
53#define EXT3_DELETE_TRANS_BLOCKS(sb) (2 * EXT3_DATA_TRANS_BLOCKS(sb) + 64) 53#define EXT3_DELETE_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64)
54 54
55/* Define an arbitrary limit for the amount of data we will anticipate 55/* Define an arbitrary limit for the amount of data we will anticipate
56 * writing to any given transaction. For unbounded transactions such as 56 * writing to any given transaction. For unbounded transactions such as
@@ -86,6 +86,9 @@
86#define EXT3_QUOTA_INIT_BLOCKS(sb) 0 86#define EXT3_QUOTA_INIT_BLOCKS(sb) 0
87#define EXT3_QUOTA_DEL_BLOCKS(sb) 0 87#define EXT3_QUOTA_DEL_BLOCKS(sb) 0
88#endif 88#endif
89#define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb))
90#define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb))
91#define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb))
89 92
90int 93int
91ext3_mark_iloc_dirty(handle_t *handle, 94ext3_mark_iloc_dirty(handle_t *handle,
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h
index 934e22d65801..d830747f5c0b 100644
--- a/include/linux/fiemap.h
+++ b/include/linux/fiemap.h
@@ -62,5 +62,7 @@ struct fiemap {
62#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively 62#define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively
63 * support extents. Result 63 * support extents. Result
64 * merged for efficiency. */ 64 * merged for efficiency. */
65#define FIEMAP_EXTENT_SHARED 0x00002000 /* Space shared with other
66 * files. */
65 67
66#endif /* _LINUX_FIEMAP_H */ 68#endif /* _LINUX_FIEMAP_H */
diff --git a/include/linux/file.h b/include/linux/file.h
index 335a0a5c316e..5555508fd517 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -18,11 +18,9 @@ extern void drop_file_write_access(struct file *file);
18struct file_operations; 18struct file_operations;
19struct vfsmount; 19struct vfsmount;
20struct dentry; 20struct dentry;
21extern int init_file(struct file *, struct vfsmount *mnt, 21struct path;
22 struct dentry *dentry, fmode_t mode, 22extern struct file *alloc_file(struct path *, fmode_t mode,
23 const struct file_operations *fop); 23 const struct file_operations *fop);
24extern struct file *alloc_file(struct vfsmount *, struct dentry *dentry,
25 fmode_t mode, const struct file_operations *fop);
26 24
27static inline void fput_light(struct file *file, int fput_needed) 25static inline void fput_light(struct file *file, int fput_needed)
28{ 26{
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index c6b3ca3af6df..1f716d9f714b 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -340,6 +340,9 @@ struct fw_cdev_send_response {
340 * The @closure field is passed back to userspace in the response event. 340 * The @closure field is passed back to userspace in the response event.
341 * The @handle field is an out parameter, returning a handle to the allocated 341 * The @handle field is an out parameter, returning a handle to the allocated
342 * range to be used for later deallocation of the range. 342 * range to be used for later deallocation of the range.
343 *
344 * The address range is allocated on all local nodes. The address allocation
345 * is exclusive except for the FCP command and response registers.
343 */ 346 */
344struct fw_cdev_allocate { 347struct fw_cdev_allocate {
345 __u64 offset; 348 __u64 offset;
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 9416a461b696..a0e67150a729 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -248,8 +248,8 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
248 void *data, size_t length, 248 void *data, size_t length,
249 void *callback_data); 249 void *callback_data);
250/* 250/*
251 * Important note: The callback must guarantee that either fw_send_response() 251 * Important note: Except for the FCP registers, the callback must guarantee
252 * or kfree() is called on the @request. 252 * that either fw_send_response() or kfree() is called on the @request.
253 */ 253 */
254typedef void (*fw_address_callback_t)(struct fw_card *card, 254typedef void (*fw_address_callback_t)(struct fw_card *card,
255 struct fw_request *request, 255 struct fw_request *request,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a057f48eb156..b1bcb275b596 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -152,6 +152,7 @@ struct inodes_stat_t {
152#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 152#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
153#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) 153#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
154#define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO)) 154#define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO))
155#define WRITE_META (WRITE | (1 << BIO_RW_META))
155#define SWRITE_SYNC_PLUG \ 156#define SWRITE_SYNC_PLUG \
156 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 157 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE))
157#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) 158#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG))
@@ -1094,10 +1095,6 @@ struct file_lock {
1094 1095
1095extern void send_sigio(struct fown_struct *fown, int fd, int band); 1096extern void send_sigio(struct fown_struct *fown, int fd, int band);
1096 1097
1097/* fs/sync.c */
1098extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
1099 loff_t endbyte, unsigned int flags);
1100
1101#ifdef CONFIG_FILE_LOCKING 1098#ifdef CONFIG_FILE_LOCKING
1102extern int fcntl_getlk(struct file *, struct flock __user *); 1099extern int fcntl_getlk(struct file *, struct flock __user *);
1103extern int fcntl_setlk(unsigned int, struct file *, unsigned int, 1100extern int fcntl_setlk(unsigned int, struct file *, unsigned int,
@@ -1590,7 +1587,7 @@ struct super_operations {
1590 * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at 1587 * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at
1591 * various stages of removing an inode. 1588 * various stages of removing an inode.
1592 * 1589 *
1593 * Two bits are used for locking and completion notification, I_LOCK and I_SYNC. 1590 * Two bits are used for locking and completion notification, I_NEW and I_SYNC.
1594 * 1591 *
1595 * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on 1592 * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on
1596 * fdatasync(). i_atime is the usual cause. 1593 * fdatasync(). i_atime is the usual cause.
@@ -1599,8 +1596,14 @@ struct super_operations {
1599 * don't have to write inode on fdatasync() when only 1596 * don't have to write inode on fdatasync() when only
1600 * mtime has changed in it. 1597 * mtime has changed in it.
1601 * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. 1598 * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean.
1602 * I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both 1599 * I_NEW Serves as both a mutex and completion notification.
1603 * are cleared by unlock_new_inode(), called from iget(). 1600 * New inodes set I_NEW. If two processes both create
1601 * the same inode, one of them will release its inode and
1602 * wait for I_NEW to be released before returning.
1603 * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
1604 * also cause waiting on I_NEW, without I_NEW actually
1605 * being set. find_inode() uses this to prevent returning
1606 * nearly-dead inodes.
1604 * I_WILL_FREE Must be set when calling write_inode_now() if i_count 1607 * I_WILL_FREE Must be set when calling write_inode_now() if i_count
1605 * is zero. I_FREEING must be set when I_WILL_FREE is 1608 * is zero. I_FREEING must be set when I_WILL_FREE is
1606 * cleared. 1609 * cleared.
@@ -1614,35 +1617,23 @@ struct super_operations {
1614 * prohibited for many purposes. iget() must wait for 1617 * prohibited for many purposes. iget() must wait for
1615 * the inode to be completely released, then create it 1618 * the inode to be completely released, then create it
1616 * anew. Other functions will just ignore such inodes, 1619 * anew. Other functions will just ignore such inodes,
1617 * if appropriate. I_LOCK is used for waiting. 1620 * if appropriate. I_NEW is used for waiting.
1618 * 1621 *
1619 * I_LOCK Serves as both a mutex and completion notification. 1622 * I_SYNC Synchonized write of dirty inode data. The bits is
1620 * New inodes set I_LOCK. If two processes both create 1623 * set during data writeback, and cleared with a wakeup
1621 * the same inode, one of them will release its inode and 1624 * on the bit address once it is done.
1622 * wait for I_LOCK to be released before returning.
1623 * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can
1624 * also cause waiting on I_LOCK, without I_LOCK actually
1625 * being set. find_inode() uses this to prevent returning
1626 * nearly-dead inodes.
1627 * I_SYNC Similar to I_LOCK, but limited in scope to writeback
1628 * of inode dirty data. Having a separate lock for this
1629 * purpose reduces latency and prevents some filesystem-
1630 * specific deadlocks.
1631 * 1625 *
1632 * Q: What is the difference between I_WILL_FREE and I_FREEING? 1626 * Q: What is the difference between I_WILL_FREE and I_FREEING?
1633 * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on
1634 * I_CLEAR? If not, why?
1635 */ 1627 */
1636#define I_DIRTY_SYNC 1 1628#define I_DIRTY_SYNC 1
1637#define I_DIRTY_DATASYNC 2 1629#define I_DIRTY_DATASYNC 2
1638#define I_DIRTY_PAGES 4 1630#define I_DIRTY_PAGES 4
1639#define I_NEW 8 1631#define __I_NEW 3
1632#define I_NEW (1 << __I_NEW)
1640#define I_WILL_FREE 16 1633#define I_WILL_FREE 16
1641#define I_FREEING 32 1634#define I_FREEING 32
1642#define I_CLEAR 64 1635#define I_CLEAR 64
1643#define __I_LOCK 7 1636#define __I_SYNC 7
1644#define I_LOCK (1 << __I_LOCK)
1645#define __I_SYNC 8
1646#define I_SYNC (1 << __I_SYNC) 1637#define I_SYNC (1 << __I_SYNC)
1647 1638
1648#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) 1639#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
@@ -2189,7 +2180,6 @@ static inline void insert_inode_hash(struct inode *inode) {
2189 __insert_inode_hash(inode, inode->i_ino); 2180 __insert_inode_hash(inode, inode->i_ino);
2190} 2181}
2191 2182
2192extern struct file * get_empty_filp(void);
2193extern void file_move(struct file *f, struct list_head *list); 2183extern void file_move(struct file *f, struct list_head *list);
2194extern void file_kill(struct file *f); 2184extern void file_kill(struct file *f);
2195#ifdef CONFIG_BLOCK 2185#ifdef CONFIG_BLOCK
@@ -2264,9 +2254,11 @@ ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
2264 int lock_type); 2254 int lock_type);
2265 2255
2266enum { 2256enum {
2267 DIO_LOCKING = 1, /* need locking between buffered and direct access */ 2257 /* need locking between buffered and direct access */
2268 DIO_NO_LOCKING, /* bdev; no locking at all between buffered/direct */ 2258 DIO_LOCKING = 0x01,
2269 DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */ 2259
2260 /* filesystem does not support filling holes */
2261 DIO_SKIP_HOLES = 0x02,
2270}; 2262};
2271 2263
2272static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, 2264static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
@@ -2275,7 +2267,8 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
2275 dio_iodone_t end_io) 2267 dio_iodone_t end_io)
2276{ 2268{
2277 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 2269 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
2278 nr_segs, get_block, end_io, DIO_LOCKING); 2270 nr_segs, get_block, end_io,
2271 DIO_LOCKING | DIO_SKIP_HOLES);
2279} 2272}
2280 2273
2281static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, 2274static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
@@ -2284,16 +2277,7 @@ static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
2284 dio_iodone_t end_io) 2277 dio_iodone_t end_io)
2285{ 2278{
2286 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, 2279 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
2287 nr_segs, get_block, end_io, DIO_NO_LOCKING); 2280 nr_segs, get_block, end_io, 0);
2288}
2289
2290static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
2291 struct inode *inode, struct block_device *bdev, const struct iovec *iov,
2292 loff_t offset, unsigned long nr_segs, get_block_t get_block,
2293 dio_iodone_t end_io)
2294{
2295 return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
2296 nr_segs, get_block, end_io, DIO_OWN_LOCKING);
2297} 2281}
2298#endif 2282#endif
2299 2283
@@ -2313,6 +2297,7 @@ extern const struct inode_operations page_symlink_inode_operations;
2313extern int generic_readlink(struct dentry *, char __user *, int); 2297extern int generic_readlink(struct dentry *, char __user *, int);
2314extern void generic_fillattr(struct inode *, struct kstat *); 2298extern void generic_fillattr(struct inode *, struct kstat *);
2315extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); 2299extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
2300void __inode_add_bytes(struct inode *inode, loff_t bytes);
2316void inode_add_bytes(struct inode *inode, loff_t bytes); 2301void inode_add_bytes(struct inode *inode, loff_t bytes);
2317void inode_sub_bytes(struct inode *inode, loff_t bytes); 2302void inode_sub_bytes(struct inode *inode, loff_t bytes);
2318loff_t inode_get_bytes(struct inode *inode); 2303loff_t inode_get_bytes(struct inode *inode);
@@ -2478,5 +2463,8 @@ int proc_nr_files(struct ctl_table *table, int write,
2478 2463
2479int __init get_filesystem_list(char *buf); 2464int __init get_filesystem_list(char *buf);
2480 2465
2466#define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE])
2467#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
2468
2481#endif /* __KERNEL__ */ 2469#endif /* __KERNEL__ */
2482#endif /* _LINUX_FS_H */ 2470#endif /* _LINUX_FS_H */
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index bb516ceeefc9..da317c7163ab 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -8,10 +8,8 @@
8#include <linux/fs.h> 8#include <linux/fs.h>
9 9
10/* externs for fs/stack.c */ 10/* externs for fs/stack.c */
11extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src, 11extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src);
12 int (*get_nlinks)(struct inode *)); 12extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src);
13
14extern void fsstack_copy_inode_size(struct inode *dst, const struct inode *src);
15 13
16/* inlines */ 14/* inlines */
17static inline void fsstack_copy_attr_atime(struct inode *dest, 15static inline void fsstack_copy_attr_atime(struct inode *dest,
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index 43fc95d822d5..28e33fea5107 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -74,7 +74,12 @@ struct spi_device;
74struct fsl_spi_platform_data { 74struct fsl_spi_platform_data {
75 u32 initial_spmode; /* initial SPMODE value */ 75 u32 initial_spmode; /* initial SPMODE value */
76 s16 bus_num; 76 s16 bus_num;
77 bool qe_mode; 77 unsigned int flags;
78#define SPI_QE_CPU_MODE (1 << 0) /* QE CPU ("PIO") mode */
79#define SPI_CPM_MODE (1 << 1) /* CPM/QE ("DMA") mode */
80#define SPI_CPM1 (1 << 2) /* SPI unit is in CPM1 block */
81#define SPI_CPM2 (1 << 3) /* SPI unit is in CPM2 block */
82#define SPI_QE (1 << 4) /* SPI unit is in QE block */
78 /* board specific information */ 83 /* board specific information */
79 u16 max_chipselect; 84 u16 max_chipselect;
80 void (*cs_control)(struct spi_device *spi, bool on); 85 void (*cs_control)(struct spi_device *spi, bool on);
@@ -90,6 +95,10 @@ struct mpc8xx_pcmcia_ops {
90 * lead to a deep sleep (i.e. power removed from the core, 95 * lead to a deep sleep (i.e. power removed from the core,
91 * instead of just the clock). 96 * instead of just the clock).
92 */ 97 */
98#if defined(CONFIG_PPC_83xx) && defined(CONFIG_SUSPEND)
93int fsl_deep_sleep(void); 99int fsl_deep_sleep(void);
100#else
101static inline int fsl_deep_sleep(void) { return 0; }
102#endif
94 103
95#endif /* _FSL_DEVICE_H_ */ 104#endif /* _FSL_DEVICE_H_ */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 47bbdf9c38d0..2233c98d80df 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -57,6 +57,7 @@ struct trace_iterator {
57 /* The below is zeroed out in pipe_read */ 57 /* The below is zeroed out in pipe_read */
58 struct trace_seq seq; 58 struct trace_seq seq;
59 struct trace_entry *ent; 59 struct trace_entry *ent;
60 int leftover;
60 int cpu; 61 int cpu;
61 u64 ts; 62 u64 ts;
62 63
@@ -130,7 +131,7 @@ struct ftrace_event_call {
130 void *mod; 131 void *mod;
131 void *data; 132 void *data;
132 133
133 atomic_t profile_count; 134 int profile_count;
134 int (*profile_enable)(struct ftrace_event_call *); 135 int (*profile_enable)(struct ftrace_event_call *);
135 void (*profile_disable)(struct ftrace_event_call *); 136 void (*profile_disable)(struct ftrace_event_call *);
136}; 137};
@@ -157,7 +158,7 @@ enum {
157 FILTER_PTR_STRING, 158 FILTER_PTR_STRING,
158}; 159};
159 160
160extern int trace_define_common_fields(struct ftrace_event_call *call); 161extern int trace_event_raw_init(struct ftrace_event_call *call);
161extern int trace_define_field(struct ftrace_event_call *call, const char *type, 162extern int trace_define_field(struct ftrace_event_call *call, const char *type,
162 const char *name, int offset, int size, 163 const char *name, int offset, int size,
163 int is_signed, int filter_type); 164 int is_signed, int filter_type);
diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h
index 886f5faa08cb..ca666d18ed67 100644
--- a/include/linux/generic_acl.h
+++ b/include/linux/generic_acl.h
@@ -1,36 +1,15 @@
1/* 1#ifndef LINUX_GENERIC_ACL_H
2 * include/linux/generic_acl.h 2#define LINUX_GENERIC_ACL_H
3 *
4 * (C) 2005 Andreas Gruenbacher <agruen@suse.de>
5 *
6 * This file is released under the GPL.
7 */
8 3
9#ifndef GENERIC_ACL_H 4#include <linux/xattr.h>
10#define GENERIC_ACL_H
11 5
12#include <linux/posix_acl.h> 6struct inode;
13#include <linux/posix_acl_xattr.h>
14 7
15/** 8extern struct xattr_handler generic_acl_access_handler;
16 * struct generic_acl_operations - filesystem operations 9extern struct xattr_handler generic_acl_default_handler;
17 *
18 * Filesystems must make these operations available to the generic
19 * operations.
20 */
21struct generic_acl_operations {
22 struct posix_acl *(*getacl)(struct inode *, int);
23 void (*setacl)(struct inode *, int, struct posix_acl *);
24};
25 10
26size_t generic_acl_list(struct inode *, struct generic_acl_operations *, int, 11int generic_acl_init(struct inode *, struct inode *);
27 char *, size_t); 12int generic_acl_chmod(struct inode *);
28int generic_acl_get(struct inode *, struct generic_acl_operations *, int, 13int generic_check_acl(struct inode *inode, int mask);
29 void *, size_t);
30int generic_acl_set(struct inode *, struct generic_acl_operations *, int,
31 const void *, size_t);
32int generic_acl_init(struct inode *, struct inode *,
33 struct generic_acl_operations *);
34int generic_acl_chmod(struct inode *, struct generic_acl_operations *);
35 14
36#endif 15#endif /* LINUX_GENERIC_ACL_H */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index c6c0c41af35f..9717081c75ad 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -256,9 +256,9 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
256#define part_stat_read(part, field) \ 256#define part_stat_read(part, field) \
257({ \ 257({ \
258 typeof((part)->dkstats->field) res = 0; \ 258 typeof((part)->dkstats->field) res = 0; \
259 int i; \ 259 unsigned int _cpu; \
260 for_each_possible_cpu(i) \ 260 for_each_possible_cpu(_cpu) \
261 res += per_cpu_ptr((part)->dkstats, i)->field; \ 261 res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
262 res; \ 262 res; \
263}) 263})
264 264
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 059bd189d35d..4e949a5b5b85 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -99,6 +99,12 @@ static inline int gpio_export_link(struct device *dev, const char *name,
99 return -EINVAL; 99 return -EINVAL;
100} 100}
101 101
102static inline int gpio_sysfs_set_active_low(unsigned gpio, int value)
103{
104 /* GPIO can never have been requested */
105 WARN_ON(1);
106 return -EINVAL;
107}
102 108
103static inline void gpio_unexport(unsigned gpio) 109static inline void gpio_unexport(unsigned gpio)
104{ 110{
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 211ff4497269..ab2cc20e21a5 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -46,7 +46,7 @@ void kmap_flush_unused(void);
46 46
47static inline unsigned int nr_free_highpages(void) { return 0; } 47static inline unsigned int nr_free_highpages(void) { return 0; }
48 48
49#define totalhigh_pages 0 49#define totalhigh_pages 0UL
50 50
51#ifndef ARCH_HAS_KMAP 51#ifndef ARCH_HAS_KMAP
52static inline void *kmap(struct page *page) 52static inline void *kmap(struct page *page)
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 9bace4b9f4fe..5d86fb2309d2 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -162,18 +162,23 @@ struct hrtimer_clock_base {
162 * @expires_next: absolute time of the next event which was scheduled 162 * @expires_next: absolute time of the next event which was scheduled
163 * via clock_set_next_event() 163 * via clock_set_next_event()
164 * @hres_active: State of high resolution mode 164 * @hres_active: State of high resolution mode
165 * @check_clocks: Indictator, when set evaluate time source and clock 165 * @hang_detected: The last hrtimer interrupt detected a hang
166 * event devices whether high resolution mode can be 166 * @nr_events: Total number of hrtimer interrupt events
167 * activated. 167 * @nr_retries: Total number of hrtimer interrupt retries
168 * @nr_events: Total number of timer interrupt events 168 * @nr_hangs: Total number of hrtimer interrupt hangs
169 * @max_hang_time: Maximum time spent in hrtimer_interrupt
169 */ 170 */
170struct hrtimer_cpu_base { 171struct hrtimer_cpu_base {
171 spinlock_t lock; 172 raw_spinlock_t lock;
172 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; 173 struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
173#ifdef CONFIG_HIGH_RES_TIMERS 174#ifdef CONFIG_HIGH_RES_TIMERS
174 ktime_t expires_next; 175 ktime_t expires_next;
175 int hres_active; 176 int hres_active;
177 int hang_detected;
176 unsigned long nr_events; 178 unsigned long nr_events;
179 unsigned long nr_retries;
180 unsigned long nr_hangs;
181 ktime_t max_hang_time;
177#endif 182#endif
178}; 183};
179 184
@@ -435,47 +440,4 @@ extern u64 ktime_divns(const ktime_t kt, s64 div);
435/* Show pending timers: */ 440/* Show pending timers: */
436extern void sysrq_timer_list_show(void); 441extern void sysrq_timer_list_show(void);
437 442
438/*
439 * Timer-statistics info:
440 */
441#ifdef CONFIG_TIMER_STATS
442
443extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
444 void *timerf, char *comm,
445 unsigned int timer_flag);
446
447static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
448{
449 if (likely(!timer_stats_active))
450 return;
451 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
452 timer->function, timer->start_comm, 0);
453}
454
455extern void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer,
456 void *addr);
457
458static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
459{
460 __timer_stats_hrtimer_set_start_info(timer, __builtin_return_address(0));
461}
462
463static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
464{
465 timer->start_site = NULL;
466}
467#else
468static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
469{
470}
471
472static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
473{
474}
475
476static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
477{
478}
479#endif
480
481#endif 443#endif
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 41a59afc70fa..78b4bc64c006 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -23,6 +23,12 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
23int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 23int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
24int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 24int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
25int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); 25int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
26
27#ifdef CONFIG_NUMA
28int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
29 void __user *, size_t *, loff_t *);
30#endif
31
26int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); 32int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
27int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, 33int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
28 struct page **, struct vm_area_struct **, 34 struct page **, struct vm_area_struct **,
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index a03daed08c59..41235c93e4e9 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -20,19 +20,18 @@ enum {
20 20
21#ifdef CONFIG_HAVE_HW_BREAKPOINT 21#ifdef CONFIG_HAVE_HW_BREAKPOINT
22 22
23/* As it's for in-kernel or ptrace use, we want it to be pinned */
24#define DEFINE_BREAKPOINT_ATTR(name) \
25struct perf_event_attr name = { \
26 .type = PERF_TYPE_BREAKPOINT, \
27 .size = sizeof(name), \
28 .pinned = 1, \
29};
30
31static inline void hw_breakpoint_init(struct perf_event_attr *attr) 23static inline void hw_breakpoint_init(struct perf_event_attr *attr)
32{ 24{
25 memset(attr, 0, sizeof(*attr));
26
33 attr->type = PERF_TYPE_BREAKPOINT; 27 attr->type = PERF_TYPE_BREAKPOINT;
34 attr->size = sizeof(*attr); 28 attr->size = sizeof(*attr);
29 /*
30 * As it's for in-kernel or ptrace use, we want it to be pinned
31 * and to call its callback every hits.
32 */
35 attr->pinned = 1; 33 attr->pinned = 1;
34 attr->sample_period = 1;
36} 35}
37 36
38static inline unsigned long hw_breakpoint_addr(struct perf_event *bp) 37static inline unsigned long hw_breakpoint_addr(struct perf_event *bp)
@@ -52,27 +51,24 @@ static inline int hw_breakpoint_len(struct perf_event *bp)
52 51
53extern struct perf_event * 52extern struct perf_event *
54register_user_hw_breakpoint(struct perf_event_attr *attr, 53register_user_hw_breakpoint(struct perf_event_attr *attr,
55 perf_callback_t triggered, 54 perf_overflow_handler_t triggered,
56 struct task_struct *tsk); 55 struct task_struct *tsk);
57 56
58/* FIXME: only change from the attr, and don't unregister */ 57/* FIXME: only change from the attr, and don't unregister */
59extern struct perf_event * 58extern int
60modify_user_hw_breakpoint(struct perf_event *bp, 59modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr);
61 struct perf_event_attr *attr,
62 perf_callback_t triggered,
63 struct task_struct *tsk);
64 60
65/* 61/*
66 * Kernel breakpoints are not associated with any particular thread. 62 * Kernel breakpoints are not associated with any particular thread.
67 */ 63 */
68extern struct perf_event * 64extern struct perf_event *
69register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, 65register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
70 perf_callback_t triggered, 66 perf_overflow_handler_t triggered,
71 int cpu); 67 int cpu);
72 68
73extern struct perf_event ** 69extern struct perf_event **
74register_wide_hw_breakpoint(struct perf_event_attr *attr, 70register_wide_hw_breakpoint(struct perf_event_attr *attr,
75 perf_callback_t triggered); 71 perf_overflow_handler_t triggered);
76 72
77extern int register_perf_hw_breakpoint(struct perf_event *bp); 73extern int register_perf_hw_breakpoint(struct perf_event *bp);
78extern int __register_perf_hw_breakpoint(struct perf_event *bp); 74extern int __register_perf_hw_breakpoint(struct perf_event *bp);
@@ -93,20 +89,18 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
93 89
94static inline struct perf_event * 90static inline struct perf_event *
95register_user_hw_breakpoint(struct perf_event_attr *attr, 91register_user_hw_breakpoint(struct perf_event_attr *attr,
96 perf_callback_t triggered, 92 perf_overflow_handler_t triggered,
97 struct task_struct *tsk) { return NULL; } 93 struct task_struct *tsk) { return NULL; }
98static inline struct perf_event * 94static inline int
99modify_user_hw_breakpoint(struct perf_event *bp, 95modify_user_hw_breakpoint(struct perf_event *bp,
100 struct perf_event_attr *attr, 96 struct perf_event_attr *attr) { return -ENOSYS; }
101 perf_callback_t triggered,
102 struct task_struct *tsk) { return NULL; }
103static inline struct perf_event * 97static inline struct perf_event *
104register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr, 98register_wide_hw_breakpoint_cpu(struct perf_event_attr *attr,
105 perf_callback_t triggered, 99 perf_overflow_handler_t triggered,
106 int cpu) { return NULL; } 100 int cpu) { return NULL; }
107static inline struct perf_event ** 101static inline struct perf_event **
108register_wide_hw_breakpoint(struct perf_event_attr *attr, 102register_wide_hw_breakpoint(struct perf_event_attr *attr,
109 perf_callback_t triggered) { return NULL; } 103 perf_overflow_handler_t triggered) { return NULL; }
110static inline int 104static inline int
111register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; } 105register_perf_hw_breakpoint(struct perf_event *bp) { return -ENOSYS; }
112static inline int 106static inline int
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 419ab546b266..02fc617782ef 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -110,7 +110,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client,
110 * @driver: Device driver model driver 110 * @driver: Device driver model driver
111 * @id_table: List of I2C devices supported by this driver 111 * @id_table: List of I2C devices supported by this driver
112 * @detect: Callback for device detection 112 * @detect: Callback for device detection
113 * @address_data: The I2C addresses to probe (for detect) 113 * @address_list: The I2C addresses to probe (for detect)
114 * @clients: List of detected clients we created (for i2c-core use only) 114 * @clients: List of detected clients we created (for i2c-core use only)
115 * 115 *
116 * The driver.owner field should be set to the module owner of this driver. 116 * The driver.owner field should be set to the module owner of this driver.
@@ -161,8 +161,8 @@ struct i2c_driver {
161 const struct i2c_device_id *id_table; 161 const struct i2c_device_id *id_table;
162 162
163 /* Device detection callback for automatic device creation */ 163 /* Device detection callback for automatic device creation */
164 int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *); 164 int (*detect)(struct i2c_client *, struct i2c_board_info *);
165 const struct i2c_client_address_data *address_data; 165 const unsigned short *address_list;
166 struct list_head clients; 166 struct list_head clients;
167}; 167};
168#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) 168#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -391,14 +391,6 @@ static inline void i2c_unlock_adapter(struct i2c_adapter *adapter)
391#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ 391#define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */
392#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ 392#define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */
393 393
394/* i2c_client_address_data is the struct for holding default client
395 * addresses for a driver and for the parameters supplied on the
396 * command line
397 */
398struct i2c_client_address_data {
399 const unsigned short *normal_i2c;
400};
401
402/* Internal numbers to terminate lists */ 394/* Internal numbers to terminate lists */
403#define I2C_CLIENT_END 0xfffeU 395#define I2C_CLIENT_END 0xfffeU
404 396
@@ -576,82 +568,4 @@ union i2c_smbus_data {
576#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */ 568#define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */
577#define I2C_SMBUS_I2C_BLOCK_DATA 8 569#define I2C_SMBUS_I2C_BLOCK_DATA 8
578 570
579
580#ifdef __KERNEL__
581
582/* These defines are used for probing i2c client addresses */
583/* The length of the option lists */
584#define I2C_CLIENT_MAX_OPTS 48
585
586/* Default fill of many variables */
587#define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
588 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
589 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
590 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
591 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
592 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
593 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
594 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
595 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
596 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
597 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
598 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
599 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
600 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
601 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \
602 I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END}
603
604/* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the
605 module header */
606
607#define I2C_CLIENT_MODULE_PARM(var,desc) \
608 static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \
609 static unsigned int var##_num; \
610 module_param_array(var, short, &var##_num, 0); \
611 MODULE_PARM_DESC(var, desc)
612
613#define I2C_CLIENT_INSMOD_COMMON \
614static const struct i2c_client_address_data addr_data = { \
615 .normal_i2c = normal_i2c, \
616}
617
618/* These are the ones you want to use in your own drivers. Pick the one
619 which matches the number of devices the driver differenciates between. */
620#define I2C_CLIENT_INSMOD \
621I2C_CLIENT_INSMOD_COMMON
622
623#define I2C_CLIENT_INSMOD_1(chip1) \
624enum chips { any_chip, chip1 }; \
625I2C_CLIENT_INSMOD_COMMON
626
627#define I2C_CLIENT_INSMOD_2(chip1, chip2) \
628enum chips { any_chip, chip1, chip2 }; \
629I2C_CLIENT_INSMOD_COMMON
630
631#define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \
632enum chips { any_chip, chip1, chip2, chip3 }; \
633I2C_CLIENT_INSMOD_COMMON
634
635#define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \
636enum chips { any_chip, chip1, chip2, chip3, chip4 }; \
637I2C_CLIENT_INSMOD_COMMON
638
639#define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \
640enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \
641I2C_CLIENT_INSMOD_COMMON
642
643#define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \
644enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \
645I2C_CLIENT_INSMOD_COMMON
646
647#define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \
648enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
649 chip7 }; \
650I2C_CLIENT_INSMOD_COMMON
651
652#define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \
653enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \
654 chip7, chip8 }; \
655I2C_CLIENT_INSMOD_COMMON
656#endif /* __KERNEL__ */
657#endif /* _LINUX_I2C_H */ 571#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
index fc5db826b48e..02c9af374741 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/i2c/adp5588.h
@@ -89,4 +89,16 @@ struct adp5588_kpad_platform_data {
89 unsigned short unlock_key2; /* Unlock Key 2 */ 89 unsigned short unlock_key2; /* Unlock Key 2 */
90}; 90};
91 91
92struct adp5588_gpio_platform_data {
93 unsigned gpio_start; /* GPIO Chip base # */
94 unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
95 int (*setup)(struct i2c_client *client,
96 int gpio, unsigned ngpio,
97 void *context);
98 int (*teardown)(struct i2c_client *client,
99 int gpio, unsigned ngpio,
100 void *context);
101 void *context;
102};
103
92#endif 104#endif
diff --git a/include/linux/i2c/tps65010.h b/include/linux/i2c/tps65010.h
index 918c5354d9b8..08aa92278d71 100644
--- a/include/linux/i2c/tps65010.h
+++ b/include/linux/i2c/tps65010.h
@@ -72,6 +72,21 @@
72#define TPS_VDCDC1 0x0c 72#define TPS_VDCDC1 0x0c
73# define TPS_ENABLE_LP (1 << 3) 73# define TPS_ENABLE_LP (1 << 3)
74#define TPS_VDCDC2 0x0d 74#define TPS_VDCDC2 0x0d
75# define TPS_LP_COREOFF (1 << 7)
76# define TPS_VCORE_1_8V (7<<4)
77# define TPS_VCORE_1_5V (6 << 4)
78# define TPS_VCORE_1_4V (5 << 4)
79# define TPS_VCORE_1_3V (4 << 4)
80# define TPS_VCORE_1_2V (3 << 4)
81# define TPS_VCORE_1_1V (2 << 4)
82# define TPS_VCORE_1_0V (1 << 4)
83# define TPS_VCORE_0_85V (0 << 4)
84# define TPS_VCORE_LP_1_2V (3 << 2)
85# define TPS_VCORE_LP_1_1V (2 << 2)
86# define TPS_VCORE_LP_1_0V (1 << 2)
87# define TPS_VCORE_LP_0_85V (0 << 2)
88# define TPS_VIB (1 << 1)
89# define TPS_VCORE_DISCH (1 << 0)
75#define TPS_VREGS1 0x0e 90#define TPS_VREGS1 0x0e
76# define TPS_LDO2_ENABLE (1 << 7) 91# define TPS_LDO2_ENABLE (1 << 7)
77# define TPS_LDO2_OFF (1 << 6) 92# define TPS_LDO2_OFF (1 << 6)
@@ -152,6 +167,10 @@ extern int tps65010_config_vregs1(unsigned value);
152 */ 167 */
153extern int tps65013_set_low_pwr(unsigned mode); 168extern int tps65013_set_low_pwr(unsigned mode);
154 169
170/* tps65010_set_vdcdc2
171 * value to be written to VDCDC2
172 */
173extern int tps65010_config_vdcdc2(unsigned value);
155 174
156struct i2c_client; 175struct i2c_client;
157 176
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl.h
index 5306a759cbde..bf1c5be1f5b6 100644
--- a/include/linux/i2c/twl4030.h
+++ b/include/linux/i2c/twl.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __TWL4030_H_ 25#ifndef __TWL_H_
26#define __TWL4030_H_ 26#define __TWL_H_
27 27
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/input/matrix_keypad.h> 29#include <linux/input/matrix_keypad.h>
@@ -61,28 +61,112 @@
61#define TWL4030_MODULE_PWMA 0x0E 61#define TWL4030_MODULE_PWMA 0x0E
62#define TWL4030_MODULE_PWMB 0x0F 62#define TWL4030_MODULE_PWMB 0x0F
63 63
64#define TWL5031_MODULE_ACCESSORY 0x10
65#define TWL5031_MODULE_INTERRUPTS 0x11
66
64/* Slave 3 (i2c address 0x4b) */ 67/* Slave 3 (i2c address 0x4b) */
65#define TWL4030_MODULE_BACKUP 0x10 68#define TWL4030_MODULE_BACKUP 0x12
66#define TWL4030_MODULE_INT 0x11 69#define TWL4030_MODULE_INT 0x13
67#define TWL4030_MODULE_PM_MASTER 0x12 70#define TWL4030_MODULE_PM_MASTER 0x14
68#define TWL4030_MODULE_PM_RECEIVER 0x13 71#define TWL4030_MODULE_PM_RECEIVER 0x15
69#define TWL4030_MODULE_RTC 0x14 72#define TWL4030_MODULE_RTC 0x16
70#define TWL4030_MODULE_SECURED_REG 0x15 73#define TWL4030_MODULE_SECURED_REG 0x17
74
75#define TWL_MODULE_USB TWL4030_MODULE_USB
76#define TWL_MODULE_AUDIO_VOICE TWL4030_MODULE_AUDIO_VOICE
77#define TWL_MODULE_PIH TWL4030_MODULE_PIH
78#define TWL_MODULE_MADC TWL4030_MODULE_MADC
79#define TWL_MODULE_MAIN_CHARGE TWL4030_MODULE_MAIN_CHARGE
80#define TWL_MODULE_PM_MASTER TWL4030_MODULE_PM_MASTER
81#define TWL_MODULE_PM_RECEIVER TWL4030_MODULE_PM_RECEIVER
82#define TWL_MODULE_RTC TWL4030_MODULE_RTC
83
84#define GPIO_INTR_OFFSET 0
85#define KEYPAD_INTR_OFFSET 1
86#define BCI_INTR_OFFSET 2
87#define MADC_INTR_OFFSET 3
88#define USB_INTR_OFFSET 4
89#define BCI_PRES_INTR_OFFSET 9
90#define USB_PRES_INTR_OFFSET 10
91#define RTC_INTR_OFFSET 11
92
93/*
94 * Offset from TWL6030_IRQ_BASE / pdata->irq_base
95 */
96#define PWR_INTR_OFFSET 0
97#define HOTDIE_INTR_OFFSET 12
98#define SMPSLDO_INTR_OFFSET 13
99#define BATDETECT_INTR_OFFSET 14
100#define SIMDETECT_INTR_OFFSET 15
101#define MMCDETECT_INTR_OFFSET 16
102#define GASGAUGE_INTR_OFFSET 17
103#define USBOTG_INTR_OFFSET 4
104#define CHARGER_INTR_OFFSET 2
105#define RSV_INTR_OFFSET 0
106
107/* INT register offsets */
108#define REG_INT_STS_A 0x00
109#define REG_INT_STS_B 0x01
110#define REG_INT_STS_C 0x02
111
112#define REG_INT_MSK_LINE_A 0x03
113#define REG_INT_MSK_LINE_B 0x04
114#define REG_INT_MSK_LINE_C 0x05
115
116#define REG_INT_MSK_STS_A 0x06
117#define REG_INT_MSK_STS_B 0x07
118#define REG_INT_MSK_STS_C 0x08
119
120/* MASK INT REG GROUP A */
121#define TWL6030_PWR_INT_MASK 0x07
122#define TWL6030_RTC_INT_MASK 0x18
123#define TWL6030_HOTDIE_INT_MASK 0x20
124#define TWL6030_SMPSLDOA_INT_MASK 0xC0
125
126/* MASK INT REG GROUP B */
127#define TWL6030_SMPSLDOB_INT_MASK 0x01
128#define TWL6030_BATDETECT_INT_MASK 0x02
129#define TWL6030_SIMDETECT_INT_MASK 0x04
130#define TWL6030_MMCDETECT_INT_MASK 0x08
131#define TWL6030_GPADC_INT_MASK 0x60
132#define TWL6030_GASGAUGE_INT_MASK 0x80
133
134/* MASK INT REG GROUP C */
135#define TWL6030_USBOTG_INT_MASK 0x0F
136#define TWL6030_CHARGER_CTRL_INT_MASK 0x10
137#define TWL6030_CHARGER_FAULT_INT_MASK 0x60
138
139
140#define TWL4030_CLASS_ID 0x4030
141#define TWL6030_CLASS_ID 0x6030
142unsigned int twl_rev(void);
143#define GET_TWL_REV (twl_rev())
144#define TWL_CLASS_IS(class, id) \
145static inline int twl_class_is_ ##class(void) \
146{ \
147 return ((id) == (GET_TWL_REV)) ? 1 : 0; \
148}
149
150TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
151TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
71 152
72/* 153/*
73 * Read and write single 8-bit registers 154 * Read and write single 8-bit registers
74 */ 155 */
75int twl4030_i2c_write_u8(u8 mod_no, u8 val, u8 reg); 156int twl_i2c_write_u8(u8 mod_no, u8 val, u8 reg);
76int twl4030_i2c_read_u8(u8 mod_no, u8 *val, u8 reg); 157int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
77 158
78/* 159/*
79 * Read and write several 8-bit registers at once. 160 * Read and write several 8-bit registers at once.
80 * 161 *
81 * IMPORTANT: For twl4030_i2c_write(), allocate num_bytes + 1 162 * IMPORTANT: For twl_i2c_write(), allocate num_bytes + 1
82 * for the value, and populate your data starting at offset 1. 163 * for the value, and populate your data starting at offset 1.
83 */ 164 */
84int twl4030_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); 165int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
85int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); 166int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
167
168int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
169int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
86 170
87/*----------------------------------------------------------------------*/ 171/*----------------------------------------------------------------------*/
88 172
@@ -221,6 +305,38 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
221 305
222/*----------------------------------------------------------------------*/ 306/*----------------------------------------------------------------------*/
223 307
308/*
309 * Accessory Interrupts
310 */
311#define TWL5031_ACIIMR_LSB 0x05
312#define TWL5031_ACIIMR_MSB 0x06
313#define TWL5031_ACIIDR_LSB 0x07
314#define TWL5031_ACIIDR_MSB 0x08
315#define TWL5031_ACCISR1 0x0F
316#define TWL5031_ACCIMR1 0x10
317#define TWL5031_ACCISR2 0x11
318#define TWL5031_ACCIMR2 0x12
319#define TWL5031_ACCSIR 0x13
320#define TWL5031_ACCEDR1 0x14
321#define TWL5031_ACCSIHCTRL 0x15
322
323/*----------------------------------------------------------------------*/
324
325/*
326 * Battery Charger Controller
327 */
328
329#define TWL5031_INTERRUPTS_BCIISR1 0x0
330#define TWL5031_INTERRUPTS_BCIIMR1 0x1
331#define TWL5031_INTERRUPTS_BCIISR2 0x2
332#define TWL5031_INTERRUPTS_BCIIMR2 0x3
333#define TWL5031_INTERRUPTS_BCISIR 0x4
334#define TWL5031_INTERRUPTS_BCIEDR1 0x5
335#define TWL5031_INTERRUPTS_BCIEDR2 0x6
336#define TWL5031_INTERRUPTS_BCISIHCTRL 0x7
337
338/*----------------------------------------------------------------------*/
339
224/* Power bus message definitions */ 340/* Power bus message definitions */
225 341
226/* The TWL4030/5030 splits its power-management resources (the various 342/* The TWL4030/5030 splits its power-management resources (the various
@@ -250,6 +366,7 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
250 366
251#define RES_TYPE_ALL 0x7 367#define RES_TYPE_ALL 0x7
252 368
369/* Resource states */
253#define RES_STATE_WRST 0xF 370#define RES_STATE_WRST 0xF
254#define RES_STATE_ACTIVE 0xE 371#define RES_STATE_ACTIVE 0xE
255#define RES_STATE_SLEEP 0x8 372#define RES_STATE_SLEEP 0x8
@@ -310,8 +427,18 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
310#define MSG_SINGULAR(devgrp, id, state) \ 427#define MSG_SINGULAR(devgrp, id, state) \
311 ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) 428 ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state))
312 429
430#define MSG_BROADCAST_ALL(devgrp, state) \
431 ((devgrp) << 5 | (state))
432
433#define MSG_BROADCAST_REF MSG_BROADCAST_ALL
434#define MSG_BROADCAST_PROV MSG_BROADCAST_ALL
435#define MSG_BROADCAST__CLK_RST MSG_BROADCAST_ALL
313/*----------------------------------------------------------------------*/ 436/*----------------------------------------------------------------------*/
314 437
438struct twl4030_clock_init_data {
439 bool ck32k_lowpwr_enable;
440};
441
315struct twl4030_bci_platform_data { 442struct twl4030_bci_platform_data {
316 int *battery_tmp_tbl; 443 int *battery_tmp_tbl;
317 unsigned int tblsize; 444 unsigned int tblsize;
@@ -391,12 +518,15 @@ struct twl4030_resconfig {
391 u8 devgroup; /* Processor group that Power resource belongs to */ 518 u8 devgroup; /* Processor group that Power resource belongs to */
392 u8 type; /* Power resource addressed, 6 / broadcast message */ 519 u8 type; /* Power resource addressed, 6 / broadcast message */
393 u8 type2; /* Power resource addressed, 3 / broadcast message */ 520 u8 type2; /* Power resource addressed, 3 / broadcast message */
521 u8 remap_off; /* off state remapping */
522 u8 remap_sleep; /* sleep state remapping */
394}; 523};
395 524
396struct twl4030_power_data { 525struct twl4030_power_data {
397 struct twl4030_script **scripts; 526 struct twl4030_script **scripts;
398 unsigned num; 527 unsigned num;
399 struct twl4030_resconfig *resource_config; 528 struct twl4030_resconfig *resource_config;
529#define TWL4030_RESCONFIG_UNDEF ((u8)-1)
400}; 530};
401 531
402extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts); 532extern void twl4030_power_init(struct twl4030_power_data *triton2_scripts);
@@ -421,6 +551,7 @@ struct twl4030_codec_data {
421 551
422struct twl4030_platform_data { 552struct twl4030_platform_data {
423 unsigned irq_base, irq_end; 553 unsigned irq_base, irq_end;
554 struct twl4030_clock_init_data *clock;
424 struct twl4030_bci_platform_data *bci; 555 struct twl4030_bci_platform_data *bci;
425 struct twl4030_gpio_platform_data *gpio; 556 struct twl4030_gpio_platform_data *gpio;
426 struct twl4030_madc_platform_data *madc; 557 struct twl4030_madc_platform_data *madc;
@@ -429,19 +560,31 @@ struct twl4030_platform_data {
429 struct twl4030_power_data *power; 560 struct twl4030_power_data *power;
430 struct twl4030_codec_data *codec; 561 struct twl4030_codec_data *codec;
431 562
432 /* LDO regulators */ 563 /* Common LDO regulators for TWL4030/TWL6030 */
433 struct regulator_init_data *vdac; 564 struct regulator_init_data *vdac;
565 struct regulator_init_data *vaux1;
566 struct regulator_init_data *vaux2;
567 struct regulator_init_data *vaux3;
568 /* TWL4030 LDO regulators */
434 struct regulator_init_data *vpll1; 569 struct regulator_init_data *vpll1;
435 struct regulator_init_data *vpll2; 570 struct regulator_init_data *vpll2;
436 struct regulator_init_data *vmmc1; 571 struct regulator_init_data *vmmc1;
437 struct regulator_init_data *vmmc2; 572 struct regulator_init_data *vmmc2;
438 struct regulator_init_data *vsim; 573 struct regulator_init_data *vsim;
439 struct regulator_init_data *vaux1;
440 struct regulator_init_data *vaux2;
441 struct regulator_init_data *vaux3;
442 struct regulator_init_data *vaux4; 574 struct regulator_init_data *vaux4;
443 575 struct regulator_init_data *vio;
444 /* REVISIT more to come ... _nothing_ should be hard-wired */ 576 struct regulator_init_data *vdd1;
577 struct regulator_init_data *vdd2;
578 struct regulator_init_data *vintana1;
579 struct regulator_init_data *vintana2;
580 struct regulator_init_data *vintdig;
581 /* TWL6030 LDO regulators */
582 struct regulator_init_data *vmmc;
583 struct regulator_init_data *vpp;
584 struct regulator_init_data *vusim;
585 struct regulator_init_data *vana;
586 struct regulator_init_data *vcxio;
587 struct regulator_init_data *vusb;
445}; 588};
446 589
447/*----------------------------------------------------------------------*/ 590/*----------------------------------------------------------------------*/
@@ -473,6 +616,7 @@ int twl4030_sih_setup(int module);
473 * VIO is generally fixed. 616 * VIO is generally fixed.
474 */ 617 */
475 618
619/* TWL4030 SMPS/LDO's */
476/* EXTERNAL dc-to-dc buck converters */ 620/* EXTERNAL dc-to-dc buck converters */
477#define TWL4030_REG_VDD1 0 621#define TWL4030_REG_VDD1 0
478#define TWL4030_REG_VDD2 1 622#define TWL4030_REG_VDD2 1
@@ -499,4 +643,31 @@ int twl4030_sih_setup(int module);
499#define TWL4030_REG_VUSB1V8 18 643#define TWL4030_REG_VUSB1V8 18
500#define TWL4030_REG_VUSB3V1 19 644#define TWL4030_REG_VUSB3V1 19
501 645
646/* TWL6030 SMPS/LDO's */
647/* EXTERNAL dc-to-dc buck convertor contollable via SR */
648#define TWL6030_REG_VDD1 30
649#define TWL6030_REG_VDD2 31
650#define TWL6030_REG_VDD3 32
651
652/* Non SR compliant dc-to-dc buck convertors */
653#define TWL6030_REG_VMEM 33
654#define TWL6030_REG_V2V1 34
655#define TWL6030_REG_V1V29 35
656#define TWL6030_REG_V1V8 36
657
658/* EXTERNAL LDOs */
659#define TWL6030_REG_VAUX1_6030 37
660#define TWL6030_REG_VAUX2_6030 38
661#define TWL6030_REG_VAUX3_6030 39
662#define TWL6030_REG_VMMC 40
663#define TWL6030_REG_VPP 41
664#define TWL6030_REG_VUSIM 42
665#define TWL6030_REG_VANA 43
666#define TWL6030_REG_VCXIO 44
667#define TWL6030_REG_VDAC 45
668#define TWL6030_REG_VUSB 46
669
670/* INTERNAL LDOs */
671#define TWL6030_REG_VRTC 47
672
502#endif /* End of __TWL4030_H */ 673#endif /* End of __TWL4030_H */
diff --git a/include/linux/i8042.h b/include/linux/i8042.h
index 60c3360ef6ad..9bf6870ee5f4 100644
--- a/include/linux/i8042.h
+++ b/include/linux/i8042.h
@@ -39,6 +39,10 @@ void i8042_lock_chip(void);
39void i8042_unlock_chip(void); 39void i8042_unlock_chip(void);
40int i8042_command(unsigned char *param, int command); 40int i8042_command(unsigned char *param, int command);
41bool i8042_check_port_owner(const struct serio *); 41bool i8042_check_port_owner(const struct serio *);
42int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
43 struct serio *serio));
44int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
45 struct serio *serio));
42 46
43#else 47#else
44 48
@@ -52,7 +56,7 @@ void i8042_unlock_chip(void)
52 56
53int i8042_command(unsigned char *param, int command) 57int i8042_command(unsigned char *param, int command)
54{ 58{
55 return -ENOSYS; 59 return -ENODEV;
56} 60}
57 61
58bool i8042_check_port_owner(const struct serio *serio) 62bool i8042_check_port_owner(const struct serio *serio)
@@ -60,6 +64,18 @@ bool i8042_check_port_owner(const struct serio *serio)
60 return false; 64 return false;
61} 65}
62 66
67int i8042_install_filter(bool (*filter)(unsigned char data, unsigned char str,
68 struct serio *serio))
69{
70 return -ENODEV;
71}
72
73int i8042_remove_filter(bool (*filter)(unsigned char data, unsigned char str,
74 struct serio *serio))
75{
76 return -ENODEV;
77}
78
63#endif 79#endif
64 80
65#endif 81#endif
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0e3f2a4c25f6..99dc6d5cf7e5 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -13,18 +13,14 @@
13#include <linux/fs.h> 13#include <linux/fs.h>
14struct linux_binprm; 14struct linux_binprm;
15 15
16#define IMA_COUNT_UPDATE 1
17#define IMA_COUNT_LEAVE 0
18
19#ifdef CONFIG_IMA 16#ifdef CONFIG_IMA
20extern int ima_bprm_check(struct linux_binprm *bprm); 17extern int ima_bprm_check(struct linux_binprm *bprm);
21extern int ima_inode_alloc(struct inode *inode); 18extern int ima_inode_alloc(struct inode *inode);
22extern void ima_inode_free(struct inode *inode); 19extern void ima_inode_free(struct inode *inode);
23extern int ima_path_check(struct path *path, int mask, int update_counts); 20extern int ima_path_check(struct path *path, int mask);
24extern void ima_file_free(struct file *file); 21extern void ima_file_free(struct file *file);
25extern int ima_file_mmap(struct file *file, unsigned long prot); 22extern int ima_file_mmap(struct file *file, unsigned long prot);
26extern void ima_counts_get(struct file *file); 23extern void ima_counts_get(struct file *file);
27extern void ima_counts_put(struct path *path, int mask);
28 24
29#else 25#else
30static inline int ima_bprm_check(struct linux_binprm *bprm) 26static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -42,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode)
42 return; 38 return;
43} 39}
44 40
45static inline int ima_path_check(struct path *path, int mask, int update_counts) 41static inline int ima_path_check(struct path *path, int mask)
46{ 42{
47 return 0; 43 return 0;
48} 44}
@@ -62,9 +58,5 @@ static inline void ima_counts_get(struct file *file)
62 return; 58 return;
63} 59}
64 60
65static inline void ima_counts_put(struct path *path, int mask)
66{
67 return;
68}
69#endif /* CONFIG_IMA_H */ 61#endif /* CONFIG_IMA_H */
70#endif /* _LINUX_IMA_H */ 62#endif /* _LINUX_IMA_H */
diff --git a/include/linux/init.h b/include/linux/init.h
index ff8bde520d03..ab1d31f9352b 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -149,6 +149,8 @@ void prepare_namespace(void);
149 149
150extern void (*late_time_init)(void); 150extern void (*late_time_init)(void);
151 151
152extern int initcall_debug;
153
152#endif 154#endif
153 155
154#ifndef MODULE 156#ifndef MODULE
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 8d10aa7fd4c9..abec69b63d7e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -165,7 +165,7 @@ extern struct cred init_cred;
165 .journal_info = NULL, \ 165 .journal_info = NULL, \
166 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ 166 .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
167 .fs_excl = ATOMIC_INIT(0), \ 167 .fs_excl = ATOMIC_INIT(0), \
168 .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ 168 .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
169 .timer_slack_ns = 50000, /* 50 usec default slack */ \ 169 .timer_slack_ns = 50000, /* 50 usec default slack */ \
170 .pids = { \ 170 .pids = { \
171 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ 171 [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 4f0a72a9740c..9310c699a37d 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -332,6 +332,7 @@ struct intel_iommu {
332#ifdef CONFIG_INTR_REMAP 332#ifdef CONFIG_INTR_REMAP
333 struct ir_table *ir_table; /* Interrupt remapping info */ 333 struct ir_table *ir_table; /* Interrupt remapping info */
334#endif 334#endif
335 int node;
335}; 336};
336 337
337static inline void __iommu_flush_cache( 338static inline void __iommu_flush_cache(
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index a63235996309..78ef023227d4 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -4,32 +4,6 @@
4#include <linux/radix-tree.h> 4#include <linux/radix-tree.h>
5#include <linux/rcupdate.h> 5#include <linux/rcupdate.h>
6 6
7/*
8 * This is the per-process anticipatory I/O scheduler state.
9 */
10struct as_io_context {
11 spinlock_t lock;
12
13 void (*dtor)(struct as_io_context *aic); /* destructor */
14 void (*exit)(struct as_io_context *aic); /* called on task exit */
15
16 unsigned long state;
17 atomic_t nr_queued; /* queued reads & sync writes */
18 atomic_t nr_dispatched; /* number of requests gone to the drivers */
19
20 /* IO History tracking */
21 /* Thinktime */
22 unsigned long last_end_request;
23 unsigned long ttime_total;
24 unsigned long ttime_samples;
25 unsigned long ttime_mean;
26 /* Layout pattern */
27 unsigned int seek_samples;
28 sector_t last_request_pos;
29 u64 seek_total;
30 sector_t seek_mean;
31};
32
33struct cfq_queue; 7struct cfq_queue;
34struct cfq_io_context { 8struct cfq_io_context {
35 void *key; 9 void *key;
@@ -78,7 +52,6 @@ struct io_context {
78 unsigned long last_waited; /* Time last woken after wait for request */ 52 unsigned long last_waited; /* Time last woken after wait for request */
79 int nr_batch_requests; /* Number of requests left in the batch */ 53 int nr_batch_requests; /* Number of requests left in the batch */
80 54
81 struct as_io_context *aic;
82 struct radix_tree_root radix_root; 55 struct radix_tree_root radix_root;
83 struct hlist_head cic_list; 56 struct hlist_head cic_list;
84 void *ioc_data; 57 void *ioc_data;
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h
index 3b068e5b5671..64d1b638745d 100644
--- a/include/linux/iommu-helper.h
+++ b/include/linux/iommu-helper.h
@@ -14,14 +14,11 @@ static inline unsigned long iommu_device_max_index(unsigned long size,
14extern int iommu_is_span_boundary(unsigned int index, unsigned int nr, 14extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
15 unsigned long shift, 15 unsigned long shift,
16 unsigned long boundary_size); 16 unsigned long boundary_size);
17extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len);
18extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, 17extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
19 unsigned long start, unsigned int nr, 18 unsigned long start, unsigned int nr,
20 unsigned long shift, 19 unsigned long shift,
21 unsigned long boundary_size, 20 unsigned long boundary_size,
22 unsigned long align_mask); 21 unsigned long align_mask);
23extern void iommu_area_free(unsigned long *map, unsigned long start,
24 unsigned int nr);
25 22
26extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len, 23extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len,
27 unsigned long io_page_size); 24 unsigned long io_page_size);
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 83aa81297ea3..7129504e053d 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -126,11 +126,11 @@ extern int allocate_resource(struct resource *root, struct resource *new,
126int adjust_resource(struct resource *res, resource_size_t start, 126int adjust_resource(struct resource *res, resource_size_t start,
127 resource_size_t size); 127 resource_size_t size);
128resource_size_t resource_alignment(struct resource *res); 128resource_size_t resource_alignment(struct resource *res);
129static inline resource_size_t resource_size(struct resource *res) 129static inline resource_size_t resource_size(const struct resource *res)
130{ 130{
131 return res->end - res->start + 1; 131 return res->end - res->start + 1;
132} 132}
133static inline unsigned long resource_type(struct resource *res) 133static inline unsigned long resource_type(const struct resource *res)
134{ 134{
135 return res->flags & IORESOURCE_TYPE_BITS; 135 return res->flags & IORESOURCE_TYPE_BITS;
136} 136}
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e408722a84c7..07baa38bce37 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -87,7 +87,7 @@ extern int mq_init_ns(struct ipc_namespace *ns);
87/* default values */ 87/* default values */
88#define DFLT_QUEUESMAX 256 /* max number of message queues */ 88#define DFLT_QUEUESMAX 256 /* max number of message queues */
89#define DFLT_MSGMAX 10 /* max number of messages in each queue */ 89#define DFLT_MSGMAX 10 /* max number of messages in each queue */
90#define HARD_MSGMAX (131072/sizeof(void *)) 90#define HARD_MSGMAX (32768*sizeof(void *)/4)
91#define DFLT_MSGSIZEMAX 8192 /* max message size */ 91#define DFLT_MSGSIZEMAX 8192 /* max message size */
92#else 92#else
93static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } 93static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
diff --git a/include/linux/irq.h b/include/linux/irq.h
index a287cfc0b1a6..451481c082b5 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -192,7 +192,7 @@ struct irq_desc {
192 unsigned int irq_count; /* For detecting broken IRQs */ 192 unsigned int irq_count; /* For detecting broken IRQs */
193 unsigned long last_unhandled; /* Aging timer for unhandled count */ 193 unsigned long last_unhandled; /* Aging timer for unhandled count */
194 unsigned int irqs_unhandled; 194 unsigned int irqs_unhandled;
195 spinlock_t lock; 195 raw_spinlock_t lock;
196#ifdef CONFIG_SMP 196#ifdef CONFIG_SMP
197 cpumask_var_t affinity; 197 cpumask_var_t affinity;
198 unsigned int node; 198 unsigned int node;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index f1011f7f3d41..638ce4554c76 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -653,6 +653,7 @@ struct transaction_s
653 * waiting for it to finish. 653 * waiting for it to finish.
654 */ 654 */
655 unsigned int t_synchronous_commit:1; 655 unsigned int t_synchronous_commit:1;
656 unsigned int t_flushed_data_blocks:1;
656 657
657 /* 658 /*
658 * For use by the filesystem to store fs-specific data 659 * For use by the filesystem to store fs-specific data
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 792274269f2b..d8e9b3d1c23c 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -107,18 +107,6 @@ static inline void print_symbol(const char *fmt, unsigned long addr)
107 __builtin_extract_return_addr((void *)addr)); 107 __builtin_extract_return_addr((void *)addr));
108} 108}
109 109
110/*
111 * Pretty-print a function pointer. This function is deprecated.
112 * Please use the "%pF" vsprintf format instead.
113 */
114static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr)
115{
116#if defined(CONFIG_IA64) || defined(CONFIG_PPC64)
117 addr = *(void **)addr;
118#endif
119 print_symbol(fmt, (unsigned long)addr);
120}
121
122static inline void print_ip_sym(unsigned long ip) 110static inline void print_ip_sym(unsigned long ip)
123{ 111{
124 printk("[<%p>] %pS\n", (void *) ip, (void *) ip); 112 printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
new file mode 100644
index 000000000000..bd92a89f4b0a
--- /dev/null
+++ b/include/linux/kernel-page-flags.h
@@ -0,0 +1,46 @@
1#ifndef LINUX_KERNEL_PAGE_FLAGS_H
2#define LINUX_KERNEL_PAGE_FLAGS_H
3
4/*
5 * Stable page flag bits exported to user space
6 */
7
8#define KPF_LOCKED 0
9#define KPF_ERROR 1
10#define KPF_REFERENCED 2
11#define KPF_UPTODATE 3
12#define KPF_DIRTY 4
13#define KPF_LRU 5
14#define KPF_ACTIVE 6
15#define KPF_SLAB 7
16#define KPF_WRITEBACK 8
17#define KPF_RECLAIM 9
18#define KPF_BUDDY 10
19
20/* 11-20: new additions in 2.6.31 */
21#define KPF_MMAP 11
22#define KPF_ANON 12
23#define KPF_SWAPCACHE 13
24#define KPF_SWAPBACKED 14
25#define KPF_COMPOUND_HEAD 15
26#define KPF_COMPOUND_TAIL 16
27#define KPF_HUGE 17
28#define KPF_UNEVICTABLE 18
29#define KPF_HWPOISON 19
30#define KPF_NOPAGE 20
31
32#define KPF_KSM 21
33
34/* kernel hacking assistances
35 * WARNING: subject to change, never rely on them!
36 */
37#define KPF_RESERVED 32
38#define KPF_MLOCKED 33
39#define KPF_MAPPEDTODISK 34
40#define KPF_PRIVATE 35
41#define KPF_PRIVATE_2 36
42#define KPF_OWNER_PRIVATE 37
43#define KPF_ARCH 38
44#define KPF_UNCACHED 39
45
46#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3fa4c590cf12..328bca609b9b 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -251,10 +251,10 @@ extern int printk_delay_msec;
251 * Print a one-time message (analogous to WARN_ONCE() et al): 251 * Print a one-time message (analogous to WARN_ONCE() et al):
252 */ 252 */
253#define printk_once(x...) ({ \ 253#define printk_once(x...) ({ \
254 static bool __print_once = true; \ 254 static bool __print_once; \
255 \ 255 \
256 if (__print_once) { \ 256 if (!__print_once) { \
257 __print_once = false; \ 257 __print_once = true; \
258 printk(x); \ 258 printk(x); \
259 } \ 259 } \
260}) 260})
@@ -397,15 +397,58 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
397 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) 397 printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
398#elif defined(CONFIG_DYNAMIC_DEBUG) 398#elif defined(CONFIG_DYNAMIC_DEBUG)
399/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ 399/* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */
400#define pr_debug(fmt, ...) do { \ 400#define pr_debug(fmt, ...) \
401 dynamic_pr_debug(fmt, ##__VA_ARGS__); \ 401 dynamic_pr_debug(fmt, ##__VA_ARGS__)
402 } while (0)
403#else 402#else
404#define pr_debug(fmt, ...) \ 403#define pr_debug(fmt, ...) \
405 ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) 404 ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
406#endif 405#endif
407 406
408/* 407/*
408 * ratelimited messages with local ratelimit_state,
409 * no local ratelimit_state used in the !PRINTK case
410 */
411#ifdef CONFIG_PRINTK
412#define printk_ratelimited(fmt, ...) ({ \
413 static struct ratelimit_state _rs = { \
414 .interval = DEFAULT_RATELIMIT_INTERVAL, \
415 .burst = DEFAULT_RATELIMIT_BURST, \
416 }; \
417 \
418 if (!__ratelimit(&_rs)) \
419 printk(fmt, ##__VA_ARGS__); \
420})
421#else
422/* No effect, but we still get type checking even in the !PRINTK case: */
423#define printk_ratelimited printk
424#endif
425
426#define pr_emerg_ratelimited(fmt, ...) \
427 printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
428#define pr_alert_ratelimited(fmt, ...) \
429 printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
430#define pr_crit_ratelimited(fmt, ...) \
431 printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
432#define pr_err_ratelimited(fmt, ...) \
433 printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
434#define pr_warning_ratelimited(fmt, ...) \
435 printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
436#define pr_notice_ratelimited(fmt, ...) \
437 printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
438#define pr_info_ratelimited(fmt, ...) \
439 printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
440/* no pr_cont_ratelimited, don't do that... */
441/* If you are writing a driver, please use dev_dbg instead */
442#if defined(DEBUG)
443#define pr_debug_ratelimited(fmt, ...) \
444 printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
445#else
446#define pr_debug_ratelimited(fmt, ...) \
447 ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \
448 ##__VA_ARGS__); 0; })
449#endif
450
451/*
409 * General tracing related utility functions - trace_printk(), 452 * General tracing related utility functions - trace_printk(),
410 * tracing_on/tracing_off and tracing_start()/tracing_stop 453 * tracing_on/tracing_off and tracing_start()/tracing_stop
411 * 454 *
@@ -492,6 +535,8 @@ extern int
492__trace_printk(unsigned long ip, const char *fmt, ...) 535__trace_printk(unsigned long ip, const char *fmt, ...)
493 __attribute__ ((format (printf, 2, 3))); 536 __attribute__ ((format (printf, 2, 3)));
494 537
538extern void trace_dump_stack(void);
539
495/* 540/*
496 * The double __builtin_constant_p is because gcc will give us an error 541 * The double __builtin_constant_p is because gcc will give us an error
497 * if we try to allocate the static variable to fmt if it is not a 542 * if we try to allocate the static variable to fmt if it is not a
@@ -525,6 +570,7 @@ trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2)));
525static inline void tracing_start(void) { } 570static inline void tracing_start(void) { }
526static inline void tracing_stop(void) { } 571static inline void tracing_stop(void) { }
527static inline void ftrace_off_permanent(void) { } 572static inline void ftrace_off_permanent(void) { }
573static inline void trace_dump_stack(void) { }
528static inline int 574static inline int
529trace_printk(const char *fmt, ...) 575trace_printk(const char *fmt, ...)
530{ 576{
@@ -688,6 +734,10 @@ struct sysinfo {
688/* Force a compilation error if condition is constant and true */ 734/* Force a compilation error if condition is constant and true */
689#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)])) 735#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
690 736
737/* Force a compilation error if a constant expression is not a power of 2 */
738#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
739 BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
740
691/* Force a compilation error if condition is true, but also produce a 741/* Force a compilation error if condition is true, but also produce a
692 result (of value 0 and type size_t), so the expression can be used 742 result (of value 0 and type size_t), so the expression can be used
693 e.g. in a structure initializer (or where-ever else comma expressions 743 e.g. in a structure initializer (or where-ever else comma expressions
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index adc34f2c6eff..c356b6914ffd 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -206,6 +206,8 @@ extern size_t vmcoreinfo_max_size;
206 206
207int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, 207int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
208 unsigned long long *crash_size, unsigned long long *crash_base); 208 unsigned long long *crash_size, unsigned long long *crash_base);
209int crash_shrink_memory(unsigned long new_size);
210size_t crash_get_memory_size(void);
209 211
210#else /* !CONFIG_KEXEC */ 212#else /* !CONFIG_KEXEC */
211struct pt_regs; 213struct pt_regs;
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h
index ad6bdf5a5970..6f6c5f300af6 100644
--- a/include/linux/kfifo.h
+++ b/include/linux/kfifo.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * A simple kernel FIFO implementation. 2 * A generic kernel FIFO implementation.
3 * 3 *
4 * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net>
4 * Copyright (C) 2004 Stelian Pop <stelian@popies.net> 5 * Copyright (C) 2004 Stelian Pop <stelian@popies.net>
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -18,6 +19,25 @@
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * 20 *
20 */ 21 */
22
23/*
24 * Howto porting drivers to the new generic fifo API:
25 *
26 * - Modify the declaration of the "struct kfifo *" object into a
27 * in-place "struct kfifo" object
28 * - Init the in-place object with kfifo_alloc() or kfifo_init()
29 * Note: The address of the in-place "struct kfifo" object must be
30 * passed as the first argument to this functions
31 * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get
32 * into kfifo_out
33 * - Replace the use of kfifo_put into kfifo_in_locked and kfifo_get
34 * into kfifo_out_locked
35 * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc
36 * must be passed now to the kfifo_in_locked and kfifo_out_locked
37 * as the last parameter.
38 * - All formerly name __kfifo_* functions has been renamed into kfifo_*
39 */
40
21#ifndef _LINUX_KFIFO_H 41#ifndef _LINUX_KFIFO_H
22#define _LINUX_KFIFO_H 42#define _LINUX_KFIFO_H
23 43
@@ -29,26 +49,82 @@ struct kfifo {
29 unsigned int size; /* the size of the allocated buffer */ 49 unsigned int size; /* the size of the allocated buffer */
30 unsigned int in; /* data is added at offset (in % size) */ 50 unsigned int in; /* data is added at offset (in % size) */
31 unsigned int out; /* data is extracted from off. (out % size) */ 51 unsigned int out; /* data is extracted from off. (out % size) */
32 spinlock_t *lock; /* protects concurrent modifications */
33}; 52};
34 53
35extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, 54/*
36 gfp_t gfp_mask, spinlock_t *lock); 55 * Macros for declaration and initialization of the kfifo datatype
37extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, 56 */
38 spinlock_t *lock); 57
58/* helper macro */
59#define __kfifo_initializer(s, b) \
60 (struct kfifo) { \
61 .size = s, \
62 .in = 0, \
63 .out = 0, \
64 .buffer = b \
65 }
66
67/**
68 * DECLARE_KFIFO - macro to declare a kfifo and the associated buffer
69 * @name: name of the declared kfifo datatype
70 * @size: size of the fifo buffer. Must be a power of two.
71 *
72 * Note1: the macro can be used inside struct or union declaration
73 * Note2: the macro creates two objects:
74 * A kfifo object with the given name and a buffer for the kfifo
75 * object named name##kfifo_buffer
76 */
77#define DECLARE_KFIFO(name, size) \
78union { \
79 struct kfifo name; \
80 unsigned char name##kfifo_buffer[size + sizeof(struct kfifo)]; \
81}
82
83/**
84 * INIT_KFIFO - Initialize a kfifo declared by DECLARE_KFIFO
85 * @name: name of the declared kfifo datatype
86 */
87#define INIT_KFIFO(name) \
88 name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \
89 sizeof(struct kfifo), name##kfifo_buffer)
90
91/**
92 * DEFINE_KFIFO - macro to define and initialize a kfifo
93 * @name: name of the declared kfifo datatype
94 * @size: size of the fifo buffer. Must be a power of two.
95 *
96 * Note1: the macro can be used for global and local kfifo data type variables
97 * Note2: the macro creates two objects:
98 * A kfifo object with the given name and a buffer for the kfifo
99 * object named name##kfifo_buffer
100 */
101#define DEFINE_KFIFO(name, size) \
102 unsigned char name##kfifo_buffer[size]; \
103 struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer)
104
105#undef __kfifo_initializer
106
107extern void kfifo_init(struct kfifo *fifo, void *buffer,
108 unsigned int size);
109extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
110 gfp_t gfp_mask);
39extern void kfifo_free(struct kfifo *fifo); 111extern void kfifo_free(struct kfifo *fifo);
40extern unsigned int __kfifo_put(struct kfifo *fifo, 112extern unsigned int kfifo_in(struct kfifo *fifo,
41 const unsigned char *buffer, unsigned int len); 113 const void *from, unsigned int len);
42extern unsigned int __kfifo_get(struct kfifo *fifo, 114extern __must_check unsigned int kfifo_out(struct kfifo *fifo,
43 unsigned char *buffer, unsigned int len); 115 void *to, unsigned int len);
116extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo,
117 void *to, unsigned int len, unsigned offset);
44 118
45/** 119/**
46 * __kfifo_reset - removes the entire FIFO contents, no locking version 120 * kfifo_initialized - Check if kfifo is initialized.
47 * @fifo: the fifo to be emptied. 121 * @fifo: fifo to check
122 * Return %true if FIFO is initialized, otherwise %false.
123 * Assumes the fifo was 0 before.
48 */ 124 */
49static inline void __kfifo_reset(struct kfifo *fifo) 125static inline bool kfifo_initialized(struct kfifo *fifo)
50{ 126{
51 fifo->in = fifo->out = 0; 127 return fifo->buffer != 0;
52} 128}
53 129
54/** 130/**
@@ -57,96 +133,484 @@ static inline void __kfifo_reset(struct kfifo *fifo)
57 */ 133 */
58static inline void kfifo_reset(struct kfifo *fifo) 134static inline void kfifo_reset(struct kfifo *fifo)
59{ 135{
60 unsigned long flags; 136 fifo->in = fifo->out = 0;
137}
61 138
62 spin_lock_irqsave(fifo->lock, flags); 139/**
140 * kfifo_reset_out - skip FIFO contents
141 * @fifo: the fifo to be emptied.
142 */
143static inline void kfifo_reset_out(struct kfifo *fifo)
144{
145 smp_mb();
146 fifo->out = fifo->in;
147}
63 148
64 __kfifo_reset(fifo); 149/**
150 * kfifo_size - returns the size of the fifo in bytes
151 * @fifo: the fifo to be used.
152 */
153static inline __must_check unsigned int kfifo_size(struct kfifo *fifo)
154{
155 return fifo->size;
156}
65 157
66 spin_unlock_irqrestore(fifo->lock, flags); 158/**
159 * kfifo_len - returns the number of used bytes in the FIFO
160 * @fifo: the fifo to be used.
161 */
162static inline unsigned int kfifo_len(struct kfifo *fifo)
163{
164 register unsigned int out;
165
166 out = fifo->out;
167 smp_rmb();
168 return fifo->in - out;
67} 169}
68 170
69/** 171/**
70 * kfifo_put - puts some data into the FIFO 172 * kfifo_is_empty - returns true if the fifo is empty
71 * @fifo: the fifo to be used. 173 * @fifo: the fifo to be used.
72 * @buffer: the data to be added. 174 */
73 * @len: the length of the data to be added. 175static inline __must_check int kfifo_is_empty(struct kfifo *fifo)
176{
177 return fifo->in == fifo->out;
178}
179
180/**
181 * kfifo_is_full - returns true if the fifo is full
182 * @fifo: the fifo to be used.
183 */
184static inline __must_check int kfifo_is_full(struct kfifo *fifo)
185{
186 return kfifo_len(fifo) == kfifo_size(fifo);
187}
188
189/**
190 * kfifo_avail - returns the number of bytes available in the FIFO
191 * @fifo: the fifo to be used.
192 */
193static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo)
194{
195 return kfifo_size(fifo) - kfifo_len(fifo);
196}
197
198/**
199 * kfifo_in_locked - puts some data into the FIFO using a spinlock for locking
200 * @fifo: the fifo to be used.
201 * @from: the data to be added.
202 * @n: the length of the data to be added.
203 * @lock: pointer to the spinlock to use for locking.
74 * 204 *
75 * This function copies at most @len bytes from the @buffer into 205 * This function copies at most @len bytes from the @from buffer into
76 * the FIFO depending on the free space, and returns the number of 206 * the FIFO depending on the free space, and returns the number of
77 * bytes copied. 207 * bytes copied.
78 */ 208 */
79static inline unsigned int kfifo_put(struct kfifo *fifo, 209static inline unsigned int kfifo_in_locked(struct kfifo *fifo,
80 const unsigned char *buffer, unsigned int len) 210 const void *from, unsigned int n, spinlock_t *lock)
81{ 211{
82 unsigned long flags; 212 unsigned long flags;
83 unsigned int ret; 213 unsigned int ret;
84 214
85 spin_lock_irqsave(fifo->lock, flags); 215 spin_lock_irqsave(lock, flags);
86 216
87 ret = __kfifo_put(fifo, buffer, len); 217 ret = kfifo_in(fifo, from, n);
88 218
89 spin_unlock_irqrestore(fifo->lock, flags); 219 spin_unlock_irqrestore(lock, flags);
90 220
91 return ret; 221 return ret;
92} 222}
93 223
94/** 224/**
95 * kfifo_get - gets some data from the FIFO 225 * kfifo_out_locked - gets some data from the FIFO using a spinlock for locking
96 * @fifo: the fifo to be used. 226 * @fifo: the fifo to be used.
97 * @buffer: where the data must be copied. 227 * @to: where the data must be copied.
98 * @len: the size of the destination buffer. 228 * @n: the size of the destination buffer.
229 * @lock: pointer to the spinlock to use for locking.
99 * 230 *
100 * This function copies at most @len bytes from the FIFO into the 231 * This function copies at most @len bytes from the FIFO into the
101 * @buffer and returns the number of copied bytes. 232 * @to buffer and returns the number of copied bytes.
102 */ 233 */
103static inline unsigned int kfifo_get(struct kfifo *fifo, 234static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo,
104 unsigned char *buffer, unsigned int len) 235 void *to, unsigned int n, spinlock_t *lock)
105{ 236{
106 unsigned long flags; 237 unsigned long flags;
107 unsigned int ret; 238 unsigned int ret;
108 239
109 spin_lock_irqsave(fifo->lock, flags); 240 spin_lock_irqsave(lock, flags);
241
242 ret = kfifo_out(fifo, to, n);
243
244 spin_unlock_irqrestore(lock, flags);
245
246 return ret;
247}
248
249extern void kfifo_skip(struct kfifo *fifo, unsigned int len);
250
251extern __must_check int kfifo_from_user(struct kfifo *fifo,
252 const void __user *from, unsigned int n, unsigned *lenout);
253
254extern __must_check int kfifo_to_user(struct kfifo *fifo,
255 void __user *to, unsigned int n, unsigned *lenout);
256
257/*
258 * __kfifo_add_out internal helper function for updating the out offset
259 */
260static inline void __kfifo_add_out(struct kfifo *fifo,
261 unsigned int off)
262{
263 smp_mb();
264 fifo->out += off;
265}
266
267/*
268 * __kfifo_add_in internal helper function for updating the in offset
269 */
270static inline void __kfifo_add_in(struct kfifo *fifo,
271 unsigned int off)
272{
273 smp_wmb();
274 fifo->in += off;
275}
276
277/*
278 * __kfifo_off internal helper function for calculating the index of a
279 * given offeset
280 */
281static inline unsigned int __kfifo_off(struct kfifo *fifo, unsigned int off)
282{
283 return off & (fifo->size - 1);
284}
285
286/*
287 * __kfifo_peek_n internal helper function for determinate the length of
288 * the next record in the fifo
289 */
290static inline unsigned int __kfifo_peek_n(struct kfifo *fifo,
291 unsigned int recsize)
292{
293#define __KFIFO_GET(fifo, off, shift) \
294 ((fifo)->buffer[__kfifo_off((fifo), (fifo)->out+(off))] << (shift))
295
296 unsigned int l;
297
298 l = __KFIFO_GET(fifo, 0, 0);
299
300 if (--recsize)
301 l |= __KFIFO_GET(fifo, 1, 8);
302
303 return l;
304#undef __KFIFO_GET
305}
306
307/*
308 * __kfifo_poke_n internal helper function for storing the length of
309 * the next record into the fifo
310 */
311static inline void __kfifo_poke_n(struct kfifo *fifo,
312 unsigned int recsize, unsigned int n)
313{
314#define __KFIFO_PUT(fifo, off, val, shift) \
315 ( \
316 (fifo)->buffer[__kfifo_off((fifo), (fifo)->in+(off))] = \
317 (unsigned char)((val) >> (shift)) \
318 )
319
320 __KFIFO_PUT(fifo, 0, n, 0);
321
322 if (--recsize)
323 __KFIFO_PUT(fifo, 1, n, 8);
324#undef __KFIFO_PUT
325}
326
327/*
328 * __kfifo_in_... internal functions for put date into the fifo
329 * do not call it directly, use kfifo_in_rec() instead
330 */
331extern unsigned int __kfifo_in_n(struct kfifo *fifo,
332 const void *from, unsigned int n, unsigned int recsize);
110 333
111 ret = __kfifo_get(fifo, buffer, len); 334extern unsigned int __kfifo_in_generic(struct kfifo *fifo,
335 const void *from, unsigned int n, unsigned int recsize);
112 336
113 /* 337static inline unsigned int __kfifo_in_rec(struct kfifo *fifo,
114 * optimization: if the FIFO is empty, set the indices to 0 338 const void *from, unsigned int n, unsigned int recsize)
115 * so we don't wrap the next time 339{
116 */ 340 unsigned int ret;
117 if (fifo->in == fifo->out)
118 fifo->in = fifo->out = 0;
119 341
120 spin_unlock_irqrestore(fifo->lock, flags); 342 ret = __kfifo_in_n(fifo, from, n, recsize);
121 343
344 if (likely(ret == 0)) {
345 if (recsize)
346 __kfifo_poke_n(fifo, recsize, n);
347 __kfifo_add_in(fifo, n + recsize);
348 }
122 return ret; 349 return ret;
123} 350}
124 351
125/** 352/**
126 * __kfifo_len - returns the number of bytes available in the FIFO, no locking version 353 * kfifo_in_rec - puts some record data into the FIFO
127 * @fifo: the fifo to be used. 354 * @fifo: the fifo to be used.
355 * @from: the data to be added.
356 * @n: the length of the data to be added.
357 * @recsize: size of record field
358 *
359 * This function copies @n bytes from the @from into the FIFO and returns
360 * the number of bytes which cannot be copied.
361 * A returned value greater than the @n value means that the record doesn't
362 * fit into the buffer.
363 *
364 * Note that with only one concurrent reader and one concurrent
365 * writer, you don't need extra locking to use these functions.
366 */
367static inline __must_check unsigned int kfifo_in_rec(struct kfifo *fifo,
368 void *from, unsigned int n, unsigned int recsize)
369{
370 if (!__builtin_constant_p(recsize))
371 return __kfifo_in_generic(fifo, from, n, recsize);
372 return __kfifo_in_rec(fifo, from, n, recsize);
373}
374
375/*
376 * __kfifo_out_... internal functions for get date from the fifo
377 * do not call it directly, use kfifo_out_rec() instead
128 */ 378 */
129static inline unsigned int __kfifo_len(struct kfifo *fifo) 379extern unsigned int __kfifo_out_n(struct kfifo *fifo,
380 void *to, unsigned int reclen, unsigned int recsize);
381
382extern unsigned int __kfifo_out_generic(struct kfifo *fifo,
383 void *to, unsigned int n,
384 unsigned int recsize, unsigned int *total);
385
386static inline unsigned int __kfifo_out_rec(struct kfifo *fifo,
387 void *to, unsigned int n, unsigned int recsize,
388 unsigned int *total)
130{ 389{
131 return fifo->in - fifo->out; 390 unsigned int l;
391
392 if (!recsize) {
393 l = n;
394 if (total)
395 *total = l;
396 } else {
397 l = __kfifo_peek_n(fifo, recsize);
398 if (total)
399 *total = l;
400 if (n < l)
401 return l;
402 }
403
404 return __kfifo_out_n(fifo, to, l, recsize);
132} 405}
133 406
134/** 407/**
135 * kfifo_len - returns the number of bytes available in the FIFO 408 * kfifo_out_rec - gets some record data from the FIFO
136 * @fifo: the fifo to be used. 409 * @fifo: the fifo to be used.
410 * @to: where the data must be copied.
411 * @n: the size of the destination buffer.
412 * @recsize: size of record field
413 * @total: pointer where the total number of to copied bytes should stored
414 *
415 * This function copies at most @n bytes from the FIFO to @to and returns the
416 * number of bytes which cannot be copied.
417 * A returned value greater than the @n value means that the record doesn't
418 * fit into the @to buffer.
419 *
420 * Note that with only one concurrent reader and one concurrent
421 * writer, you don't need extra locking to use these functions.
137 */ 422 */
138static inline unsigned int kfifo_len(struct kfifo *fifo) 423static inline __must_check unsigned int kfifo_out_rec(struct kfifo *fifo,
424 void *to, unsigned int n, unsigned int recsize,
425 unsigned int *total)
426
139{ 427{
140 unsigned long flags; 428 if (!__builtin_constant_p(recsize))
141 unsigned int ret; 429 return __kfifo_out_generic(fifo, to, n, recsize, total);
430 return __kfifo_out_rec(fifo, to, n, recsize, total);
431}
142 432
143 spin_lock_irqsave(fifo->lock, flags); 433/*
434 * __kfifo_from_user_... internal functions for transfer from user space into
435 * the fifo. do not call it directly, use kfifo_from_user_rec() instead
436 */
437extern unsigned int __kfifo_from_user_n(struct kfifo *fifo,
438 const void __user *from, unsigned int n, unsigned int recsize);
439
440extern unsigned int __kfifo_from_user_generic(struct kfifo *fifo,
441 const void __user *from, unsigned int n, unsigned int recsize);
144 442
145 ret = __kfifo_len(fifo); 443static inline unsigned int __kfifo_from_user_rec(struct kfifo *fifo,
444 const void __user *from, unsigned int n, unsigned int recsize)
445{
446 unsigned int ret;
146 447
147 spin_unlock_irqrestore(fifo->lock, flags); 448 ret = __kfifo_from_user_n(fifo, from, n, recsize);
148 449
450 if (likely(ret == 0)) {
451 if (recsize)
452 __kfifo_poke_n(fifo, recsize, n);
453 __kfifo_add_in(fifo, n + recsize);
454 }
149 return ret; 455 return ret;
150} 456}
151 457
458/**
459 * kfifo_from_user_rec - puts some data from user space into the FIFO
460 * @fifo: the fifo to be used.
461 * @from: pointer to the data to be added.
462 * @n: the length of the data to be added.
463 * @recsize: size of record field
464 *
465 * This function copies @n bytes from the @from into the
466 * FIFO and returns the number of bytes which cannot be copied.
467 *
468 * If the returned value is equal or less the @n value, the copy_from_user()
469 * functions has failed. Otherwise the record doesn't fit into the buffer.
470 *
471 * Note that with only one concurrent reader and one concurrent
472 * writer, you don't need extra locking to use these functions.
473 */
474static inline __must_check unsigned int kfifo_from_user_rec(struct kfifo *fifo,
475 const void __user *from, unsigned int n, unsigned int recsize)
476{
477 if (!__builtin_constant_p(recsize))
478 return __kfifo_from_user_generic(fifo, from, n, recsize);
479 return __kfifo_from_user_rec(fifo, from, n, recsize);
480}
481
482/*
483 * __kfifo_to_user_... internal functions for transfer fifo data into user space
484 * do not call it directly, use kfifo_to_user_rec() instead
485 */
486extern unsigned int __kfifo_to_user_n(struct kfifo *fifo,
487 void __user *to, unsigned int n, unsigned int reclen,
488 unsigned int recsize);
489
490extern unsigned int __kfifo_to_user_generic(struct kfifo *fifo,
491 void __user *to, unsigned int n, unsigned int recsize,
492 unsigned int *total);
493
494static inline unsigned int __kfifo_to_user_rec(struct kfifo *fifo,
495 void __user *to, unsigned int n,
496 unsigned int recsize, unsigned int *total)
497{
498 unsigned int l;
499
500 if (!recsize) {
501 l = n;
502 if (total)
503 *total = l;
504 } else {
505 l = __kfifo_peek_n(fifo, recsize);
506 if (total)
507 *total = l;
508 if (n < l)
509 return l;
510 }
511
512 return __kfifo_to_user_n(fifo, to, n, l, recsize);
513}
514
515/**
516 * kfifo_to_user_rec - gets data from the FIFO and write it to user space
517 * @fifo: the fifo to be used.
518 * @to: where the data must be copied.
519 * @n: the size of the destination buffer.
520 * @recsize: size of record field
521 * @total: pointer where the total number of to copied bytes should stored
522 *
523 * This function copies at most @n bytes from the FIFO to the @to.
524 * In case of an error, the function returns the number of bytes which cannot
525 * be copied.
526 * If the returned value is equal or less the @n value, the copy_to_user()
527 * functions has failed. Otherwise the record doesn't fit into the @to buffer.
528 *
529 * Note that with only one concurrent reader and one concurrent
530 * writer, you don't need extra locking to use these functions.
531 */
532static inline __must_check unsigned int kfifo_to_user_rec(struct kfifo *fifo,
533 void __user *to, unsigned int n, unsigned int recsize,
534 unsigned int *total)
535{
536 if (!__builtin_constant_p(recsize))
537 return __kfifo_to_user_generic(fifo, to, n, recsize, total);
538 return __kfifo_to_user_rec(fifo, to, n, recsize, total);
539}
540
541/*
542 * __kfifo_peek_... internal functions for peek into the next fifo record
543 * do not call it directly, use kfifo_peek_rec() instead
544 */
545extern unsigned int __kfifo_peek_generic(struct kfifo *fifo,
546 unsigned int recsize);
547
548/**
549 * kfifo_peek_rec - gets the size of the next FIFO record data
550 * @fifo: the fifo to be used.
551 * @recsize: size of record field
552 *
553 * This function returns the size of the next FIFO record in number of bytes
554 */
555static inline __must_check unsigned int kfifo_peek_rec(struct kfifo *fifo,
556 unsigned int recsize)
557{
558 if (!__builtin_constant_p(recsize))
559 return __kfifo_peek_generic(fifo, recsize);
560 if (!recsize)
561 return kfifo_len(fifo);
562 return __kfifo_peek_n(fifo, recsize);
563}
564
565/*
566 * __kfifo_skip_... internal functions for skip the next fifo record
567 * do not call it directly, use kfifo_skip_rec() instead
568 */
569extern void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize);
570
571static inline void __kfifo_skip_rec(struct kfifo *fifo,
572 unsigned int recsize)
573{
574 unsigned int l;
575
576 if (recsize) {
577 l = __kfifo_peek_n(fifo, recsize);
578
579 if (l + recsize <= kfifo_len(fifo)) {
580 __kfifo_add_out(fifo, l + recsize);
581 return;
582 }
583 }
584 kfifo_reset_out(fifo);
585}
586
587/**
588 * kfifo_skip_rec - skip the next fifo out record
589 * @fifo: the fifo to be used.
590 * @recsize: size of record field
591 *
592 * This function skips the next FIFO record
593 */
594static inline void kfifo_skip_rec(struct kfifo *fifo,
595 unsigned int recsize)
596{
597 if (!__builtin_constant_p(recsize))
598 __kfifo_skip_generic(fifo, recsize);
599 else
600 __kfifo_skip_rec(fifo, recsize);
601}
602
603/**
604 * kfifo_avail_rec - returns the number of bytes available in a record FIFO
605 * @fifo: the fifo to be used.
606 * @recsize: size of record field
607 */
608static inline __must_check unsigned int kfifo_avail_rec(struct kfifo *fifo,
609 unsigned int recsize)
610{
611 unsigned int l = kfifo_size(fifo) - kfifo_len(fifo);
612
613 return (l > recsize) ? l - recsize : 0;
614}
615
152#endif 616#endif
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 6adcc297e354..19ec41a183f5 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -29,8 +29,7 @@ struct pt_regs;
29 * 29 *
30 * On some architectures it is required to skip a breakpoint 30 * On some architectures it is required to skip a breakpoint
31 * exception when it occurs after a breakpoint has been removed. 31 * exception when it occurs after a breakpoint has been removed.
32 * This can be implemented in the architecture specific portion of 32 * This can be implemented in the architecture specific portion of kgdb.
33 * for kgdb.
34 */ 33 */
35extern int kgdb_skipexception(int exception, struct pt_regs *regs); 34extern int kgdb_skipexception(int exception, struct pt_regs *regs);
36 35
@@ -65,7 +64,7 @@ struct uart_port;
65/** 64/**
66 * kgdb_breakpoint - compiled in breakpoint 65 * kgdb_breakpoint - compiled in breakpoint
67 * 66 *
68 * This will be impelmented a static inline per architecture. This 67 * This will be implemented as a static inline per architecture. This
69 * function is called by the kgdb core to execute an architecture 68 * function is called by the kgdb core to execute an architecture
70 * specific trap to cause kgdb to enter the exception processing. 69 * specific trap to cause kgdb to enter the exception processing.
71 * 70 *
@@ -190,7 +189,7 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
190 * @flags: Current IRQ state 189 * @flags: Current IRQ state
191 * 190 *
192 * On SMP systems, we need to get the attention of the other CPUs 191 * On SMP systems, we need to get the attention of the other CPUs
193 * and get them be in a known state. This should do what is needed 192 * and get them into a known state. This should do what is needed
194 * to get the other CPUs to call kgdb_wait(). Note that on some arches, 193 * to get the other CPUs to call kgdb_wait(). Note that on some arches,
195 * the NMI approach is not used for rounding up all the CPUs. For example, 194 * the NMI approach is not used for rounding up all the CPUs. For example,
196 * in case of MIPS, smp_call_function() is used to roundup CPUs. In 195 * in case of MIPS, smp_call_function() is used to roundup CPUs. In
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index e880d4cf9e22..08d7dc4ddf40 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -36,6 +36,56 @@ int kmemcheck_hide_addr(unsigned long address);
36 36
37bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size); 37bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
38 38
39/*
40 * Bitfield annotations
41 *
42 * How to use: If you have a struct using bitfields, for example
43 *
44 * struct a {
45 * int x:8, y:8;
46 * };
47 *
48 * then this should be rewritten as
49 *
50 * struct a {
51 * kmemcheck_bitfield_begin(flags);
52 * int x:8, y:8;
53 * kmemcheck_bitfield_end(flags);
54 * };
55 *
56 * Now the "flags_begin" and "flags_end" members may be used to refer to the
57 * beginning and end, respectively, of the bitfield (and things like
58 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
59 * fields should be annotated:
60 *
61 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
62 * kmemcheck_annotate_bitfield(a, flags);
63 */
64#define kmemcheck_bitfield_begin(name) \
65 int name##_begin[0];
66
67#define kmemcheck_bitfield_end(name) \
68 int name##_end[0];
69
70#define kmemcheck_annotate_bitfield(ptr, name) \
71 do { \
72 int _n; \
73 \
74 if (!ptr) \
75 break; \
76 \
77 _n = (long) &((ptr)->name##_end) \
78 - (long) &((ptr)->name##_begin); \
79 MAYBE_BUILD_BUG_ON(_n < 0); \
80 \
81 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
82 } while (0)
83
84#define kmemcheck_annotate_variable(var) \
85 do { \
86 kmemcheck_mark_initialized(&(var), sizeof(var)); \
87 } while (0) \
88
39#else 89#else
40#define kmemcheck_enabled 0 90#define kmemcheck_enabled 0
41 91
@@ -106,60 +156,16 @@ static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
106 return true; 156 return true;
107} 157}
108 158
109#endif /* CONFIG_KMEMCHECK */ 159#define kmemcheck_bitfield_begin(name)
110 160#define kmemcheck_bitfield_end(name)
111/* 161#define kmemcheck_annotate_bitfield(ptr, name) \
112 * Bitfield annotations 162 do { \
113 * 163 } while (0)
114 * How to use: If you have a struct using bitfields, for example
115 *
116 * struct a {
117 * int x:8, y:8;
118 * };
119 *
120 * then this should be rewritten as
121 *
122 * struct a {
123 * kmemcheck_bitfield_begin(flags);
124 * int x:8, y:8;
125 * kmemcheck_bitfield_end(flags);
126 * };
127 *
128 * Now the "flags_begin" and "flags_end" members may be used to refer to the
129 * beginning and end, respectively, of the bitfield (and things like
130 * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
131 * fields should be annotated:
132 *
133 * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
134 * kmemcheck_annotate_bitfield(a, flags);
135 *
136 * Note: We provide the same definitions for both kmemcheck and non-
137 * kmemcheck kernels. This makes it harder to introduce accidental errors. It
138 * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
139 */
140#define kmemcheck_bitfield_begin(name) \
141 int name##_begin[0];
142
143#define kmemcheck_bitfield_end(name) \
144 int name##_end[0];
145 164
146#define kmemcheck_annotate_bitfield(ptr, name) \ 165#define kmemcheck_annotate_variable(var) \
147 do { \ 166 do { \
148 int _n; \
149 \
150 if (!ptr) \
151 break; \
152 \
153 _n = (long) &((ptr)->name##_end) \
154 - (long) &((ptr)->name##_begin); \
155 MAYBE_BUILD_BUG_ON(_n < 0); \
156 \
157 kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
158 } while (0) 167 } while (0)
159 168
160#define kmemcheck_annotate_variable(var) \ 169#endif /* CONFIG_KMEMCHECK */
161 do { \
162 kmemcheck_mark_initialized(&(var), sizeof(var)); \
163 } while (0) \
164 170
165#endif /* LINUX_KMEMCHECK_H */ 171#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
index 3c7497d46ee9..99d9a6766f7e 100644
--- a/include/linux/kmemleak.h
+++ b/include/linux/kmemleak.h
@@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset,
32 size_t size) __ref; 32 size_t size) __ref;
33extern void kmemleak_not_leak(const void *ptr) __ref; 33extern void kmemleak_not_leak(const void *ptr) __ref;
34extern void kmemleak_ignore(const void *ptr) __ref; 34extern void kmemleak_ignore(const void *ptr) __ref;
35extern void kmemleak_scan_area(const void *ptr, unsigned long offset, 35extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
36 size_t length, gfp_t gfp) __ref;
37extern void kmemleak_no_scan(const void *ptr) __ref; 36extern void kmemleak_no_scan(const void *ptr) __ref;
38 37
39static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, 38static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
@@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr)
84static inline void kmemleak_ignore(const void *ptr) 83static inline void kmemleak_ignore(const void *ptr)
85{ 84{
86} 85}
87static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, 86static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
88 size_t length, gfp_t gfp)
89{ 87{
90} 88}
91static inline void kmemleak_erase(void **ptr) 89static inline void kmemleak_erase(void **ptr)
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
new file mode 100644
index 000000000000..e32aa268efac
--- /dev/null
+++ b/include/linux/kmsg_dump.h
@@ -0,0 +1,60 @@
1/*
2 * linux/include/kmsg_dump.h
3 *
4 * Copyright (C) 2009 Net Insight AB
5 *
6 * Author: Simon Kagstrom <simon.kagstrom@netinsight.net>
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file COPYING in the main directory of this archive
10 * for more details.
11 */
12#ifndef _LINUX_KMSG_DUMP_H
13#define _LINUX_KMSG_DUMP_H
14
15#include <linux/list.h>
16
17enum kmsg_dump_reason {
18 KMSG_DUMP_OOPS,
19 KMSG_DUMP_PANIC,
20};
21
22/**
23 * struct kmsg_dumper - kernel crash message dumper structure
24 * @dump: The callback which gets called on crashes. The buffer is passed
25 * as two sections, where s1 (length l1) contains the older
26 * messages and s2 (length l2) contains the newer.
27 * @list: Entry in the dumper list (private)
28 * @registered: Flag that specifies if this is already registered
29 */
30struct kmsg_dumper {
31 void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
32 const char *s1, unsigned long l1,
33 const char *s2, unsigned long l2);
34 struct list_head list;
35 int registered;
36};
37
38#ifdef CONFIG_PRINTK
39void kmsg_dump(enum kmsg_dump_reason reason);
40
41int kmsg_dump_register(struct kmsg_dumper *dumper);
42
43int kmsg_dump_unregister(struct kmsg_dumper *dumper);
44#else
45static inline void kmsg_dump(enum kmsg_dump_reason reason)
46{
47}
48
49static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
50{
51 return -EINVAL;
52}
53
54static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper)
55{
56 return -EINVAL;
57}
58#endif
59
60#endif /* _LINUX_KMSG_DUMP_H */
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index a485c14ecd5d..43bdab769fc3 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -9,8 +9,12 @@
9 9
10#include <linux/bitops.h> 10#include <linux/bitops.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/pagemap.h>
13#include <linux/rmap.h>
12#include <linux/sched.h> 14#include <linux/sched.h>
13#include <linux/vmstat.h> 15
16struct stable_node;
17struct mem_cgroup;
14 18
15#ifdef CONFIG_KSM 19#ifdef CONFIG_KSM
16int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 20int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
@@ -34,46 +38,110 @@ static inline void ksm_exit(struct mm_struct *mm)
34/* 38/*
35 * A KSM page is one of those write-protected "shared pages" or "merged pages" 39 * A KSM page is one of those write-protected "shared pages" or "merged pages"
36 * which KSM maps into multiple mms, wherever identical anonymous page content 40 * which KSM maps into multiple mms, wherever identical anonymous page content
37 * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. 41 * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
42 * anon_vma, but to that page's node of the stable tree.
38 */ 43 */
39static inline int PageKsm(struct page *page) 44static inline int PageKsm(struct page *page)
40{ 45{
41 return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); 46 return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
47 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
48}
49
50static inline struct stable_node *page_stable_node(struct page *page)
51{
52 return PageKsm(page) ? page_rmapping(page) : NULL;
53}
54
55static inline void set_page_stable_node(struct page *page,
56 struct stable_node *stable_node)
57{
58 page->mapping = (void *)stable_node +
59 (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
42} 60}
43 61
44/* 62/*
45 * But we have to avoid the checking which page_add_anon_rmap() performs. 63 * When do_swap_page() first faults in from swap what used to be a KSM page,
64 * no problem, it will be assigned to this vma's anon_vma; but thereafter,
65 * it might be faulted into a different anon_vma (or perhaps to a different
66 * offset in the same anon_vma). do_swap_page() cannot do all the locking
67 * needed to reconstitute a cross-anon_vma KSM page: for now it has to make
68 * a copy, and leave remerging the pages to a later pass of ksmd.
69 *
70 * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE,
71 * but what if the vma was unmerged while the page was swapped out?
46 */ 72 */
47static inline void page_add_ksm_rmap(struct page *page) 73struct page *ksm_does_need_to_copy(struct page *page,
74 struct vm_area_struct *vma, unsigned long address);
75static inline struct page *ksm_might_need_to_copy(struct page *page,
76 struct vm_area_struct *vma, unsigned long address)
48{ 77{
49 if (atomic_inc_and_test(&page->_mapcount)) { 78 struct anon_vma *anon_vma = page_anon_vma(page);
50 page->mapping = (void *) PAGE_MAPPING_ANON; 79
51 __inc_zone_page_state(page, NR_ANON_PAGES); 80 if (!anon_vma ||
52 } 81 (anon_vma == vma->anon_vma &&
82 page->index == linear_page_index(vma, address)))
83 return page;
84
85 return ksm_does_need_to_copy(page, vma, address);
53} 86}
87
88int page_referenced_ksm(struct page *page,
89 struct mem_cgroup *memcg, unsigned long *vm_flags);
90int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
91int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
92 struct vm_area_struct *, unsigned long, void *), void *arg);
93void ksm_migrate_page(struct page *newpage, struct page *oldpage);
94
54#else /* !CONFIG_KSM */ 95#else /* !CONFIG_KSM */
55 96
97static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
98{
99 return 0;
100}
101
102static inline void ksm_exit(struct mm_struct *mm)
103{
104}
105
106static inline int PageKsm(struct page *page)
107{
108 return 0;
109}
110
111#ifdef CONFIG_MMU
56static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 112static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
57 unsigned long end, int advice, unsigned long *vm_flags) 113 unsigned long end, int advice, unsigned long *vm_flags)
58{ 114{
59 return 0; 115 return 0;
60} 116}
61 117
62static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) 118static inline struct page *ksm_might_need_to_copy(struct page *page,
119 struct vm_area_struct *vma, unsigned long address)
120{
121 return page;
122}
123
124static inline int page_referenced_ksm(struct page *page,
125 struct mem_cgroup *memcg, unsigned long *vm_flags)
63{ 126{
64 return 0; 127 return 0;
65} 128}
66 129
67static inline void ksm_exit(struct mm_struct *mm) 130static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
68{ 131{
132 return 0;
69} 133}
70 134
71static inline int PageKsm(struct page *page) 135static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*,
136 struct vm_area_struct *, unsigned long, void *), void *arg)
72{ 137{
73 return 0; 138 return 0;
74} 139}
75 140
76/* No stub required for page_add_ksm_rmap(page) */ 141static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
142{
143}
144#endif /* CONFIG_MMU */
77#endif /* !CONFIG_KSM */ 145#endif /* !CONFIG_KSM */
78 146
79#endif 147#endif /* __LINUX_KSM_H */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 2d241da07236..a24de0b1858e 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -496,6 +496,7 @@ struct kvm_ioeventfd {
496#define KVM_CAP_VCPU_EVENTS 41 496#define KVM_CAP_VCPU_EVENTS 41
497#endif 497#endif
498#define KVM_CAP_S390_PSW 42 498#define KVM_CAP_S390_PSW 42
499#define KVM_CAP_PPC_SEGSTATE 43
499 500
500#ifdef KVM_CAP_IRQ_ROUTING 501#ifdef KVM_CAP_IRQ_ROUTING
501 502
diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h
index afc9f9fd70f5..2618aa9063bc 100644
--- a/include/linux/leds-lp3944.h
+++ b/include/linux/leds-lp3944.h
@@ -12,9 +12,6 @@
12#ifndef __LINUX_LEDS_LP3944_H 12#ifndef __LINUX_LEDS_LP3944_H
13#define __LINUX_LEDS_LP3944_H 13#define __LINUX_LEDS_LP3944_H
14 14
15#include <linux/leds.h>
16#include <linux/workqueue.h>
17
18#define LP3944_LED0 0 15#define LP3944_LED0 0
19#define LP3944_LED1 1 16#define LP3944_LED1 1
20#define LP3944_LED2 2 17#define LP3944_LED2 2
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index 96eea90f01a8..f158eb1149aa 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -32,7 +32,7 @@ struct pca9532_led {
32 struct i2c_client *client; 32 struct i2c_client *client;
33 char *name; 33 char *name;
34 struct led_classdev ldev; 34 struct led_classdev ldev;
35 struct work_struct work; 35 struct work_struct work;
36 enum pca9532_type type; 36 enum pca9532_type type;
37 enum pca9532_state state; 37 enum pca9532_state state;
38}; 38};
diff --git a/include/linux/leds-regulator.h b/include/linux/leds-regulator.h
new file mode 100644
index 000000000000..5a8eb389aab8
--- /dev/null
+++ b/include/linux/leds-regulator.h
@@ -0,0 +1,46 @@
1/*
2 * leds-regulator.h - platform data structure for regulator driven LEDs.
3 *
4 * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __LINUX_LEDS_REGULATOR_H
13#define __LINUX_LEDS_REGULATOR_H
14
15/*
16 * Use "vled" as supply id when declaring the regulator consumer:
17 *
18 * static struct regulator_consumer_supply pcap_regulator_VVIB_consumers [] = {
19 * { .dev_name = "leds-regulator.0", supply = "vled" },
20 * };
21 *
22 * If you have several regulator driven LEDs, you can append a numerical id to
23 * .dev_name as done above, and use the same id when declaring the platform
24 * device:
25 *
26 * static struct led_regulator_platform_data a780_vibrator_data = {
27 * .name = "a780::vibrator",
28 * };
29 *
30 * static struct platform_device a780_vibrator = {
31 * .name = "leds-regulator",
32 * .id = 0,
33 * .dev = {
34 * .platform_data = &a780_vibrator_data,
35 * },
36 * };
37 */
38
39#include <linux/leds.h>
40
41struct led_regulator_platform_data {
42 char *name; /* LED name as expected by LED class */
43 enum led_brightness brightness; /* initial brightness value */
44};
45
46#endif /* __LINUX_LEDS_REGULATOR_H */
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 6a9c4ddd3d95..73112250862c 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -354,6 +354,9 @@ enum {
354 /* max tries if error condition is still set after ->error_handler */ 354 /* max tries if error condition is still set after ->error_handler */
355 ATA_EH_MAX_TRIES = 5, 355 ATA_EH_MAX_TRIES = 5,
356 356
357 /* sometimes resuming a link requires several retries */
358 ATA_LINK_RESUME_TRIES = 5,
359
357 /* how hard are we gonna try to probe/recover devices */ 360 /* how hard are we gonna try to probe/recover devices */
358 ATA_PROBE_MAX_TRIES = 3, 361 ATA_PROBE_MAX_TRIES = 3,
359 ATA_EH_DEV_TRIES = 3, 362 ATA_EH_DEV_TRIES = 3,
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h
index 3cc2f2c53e4c..f1ca0dcc1628 100644
--- a/include/linux/lis3lv02d.h
+++ b/include/linux/lis3lv02d.h
@@ -43,6 +43,21 @@ struct lis3lv02d_platform_data {
43#define LIS3_WAKEUP_Z_HI (1 << 5) 43#define LIS3_WAKEUP_Z_HI (1 << 5)
44 unsigned char wakeup_flags; 44 unsigned char wakeup_flags;
45 unsigned char wakeup_thresh; 45 unsigned char wakeup_thresh;
46#define LIS3_NO_MAP 0
47#define LIS3_DEV_X 1
48#define LIS3_DEV_Y 2
49#define LIS3_DEV_Z 3
50#define LIS3_INV_DEV_X -1
51#define LIS3_INV_DEV_Y -2
52#define LIS3_INV_DEV_Z -3
53 s8 axis_x;
54 s8 axis_y;
55 s8 axis_z;
56 int (*setup_resources)(void);
57 int (*release_resources)(void);
58 /* Limits for selftest are specified in chip data sheet */
59 s16 st_min_limits[3]; /* min pass limit x, y, z */
60 s16 st_max_limits[3]; /* max pass limit x, y, z */
46}; 61};
47 62
48#endif /* __LIS3LV02D_H_ */ 63#endif /* __LIS3LV02D_H_ */
diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h
new file mode 100644
index 000000000000..1a2df2efb771
--- /dev/null
+++ b/include/linux/list_sort.h
@@ -0,0 +1,11 @@
1#ifndef _LINUX_LIST_SORT_H
2#define _LINUX_LIST_SORT_H
3
4#include <linux/types.h>
5
6struct list_head;
7
8void list_sort(void *priv, struct list_head *head,
9 int (*cmp)(void *priv, struct list_head *a,
10 struct list_head *b));
11#endif
diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 2442e3f3d033..ef82b8fcbddb 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -54,6 +54,7 @@ extern u64 __init lmb_phys_mem_size(void);
54extern u64 lmb_end_of_DRAM(void); 54extern u64 lmb_end_of_DRAM(void);
55extern void __init lmb_enforce_memory_limit(u64 memory_limit); 55extern void __init lmb_enforce_memory_limit(u64 memory_limit);
56extern int __init lmb_is_reserved(u64 addr); 56extern int __init lmb_is_reserved(u64 addr);
57extern int lmb_is_region_reserved(u64 base, u64 size);
57extern int lmb_find(struct lmb_property *res); 58extern int lmb_find(struct lmb_property *res);
58 59
59extern void lmb_dump_all(void); 60extern void lmb_dump_all(void);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bf9213b2db8f..1f9b119f4ace 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -54,6 +54,11 @@ extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
54extern void mem_cgroup_del_lru(struct page *page); 54extern void mem_cgroup_del_lru(struct page *page);
55extern void mem_cgroup_move_lists(struct page *page, 55extern void mem_cgroup_move_lists(struct page *page,
56 enum lru_list from, enum lru_list to); 56 enum lru_list from, enum lru_list to);
57
58/* For coalescing uncharge for reducing memcg' overhead*/
59extern void mem_cgroup_uncharge_start(void);
60extern void mem_cgroup_uncharge_end(void);
61
57extern void mem_cgroup_uncharge_page(struct page *page); 62extern void mem_cgroup_uncharge_page(struct page *page);
58extern void mem_cgroup_uncharge_cache_page(struct page *page); 63extern void mem_cgroup_uncharge_cache_page(struct page *page);
59extern int mem_cgroup_shmem_charge_fallback(struct page *page, 64extern int mem_cgroup_shmem_charge_fallback(struct page *page,
@@ -68,6 +73,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
68extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); 73extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
69int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); 74int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
70 75
76extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
71extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 77extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
72 78
73static inline 79static inline
@@ -80,6 +86,8 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
80 return cgroup == mem; 86 return cgroup == mem;
81} 87}
82 88
89extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem);
90
83extern int 91extern int
84mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); 92mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr);
85extern void mem_cgroup_end_migration(struct mem_cgroup *mem, 93extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
@@ -117,7 +125,7 @@ static inline bool mem_cgroup_disabled(void)
117} 125}
118 126
119extern bool mem_cgroup_oom_called(struct task_struct *task); 127extern bool mem_cgroup_oom_called(struct task_struct *task);
120void mem_cgroup_update_mapped_file_stat(struct page *page, int val); 128void mem_cgroup_update_file_mapped(struct page *page, int val);
121unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 129unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
122 gfp_t gfp_mask, int nid, 130 gfp_t gfp_mask, int nid,
123 int zid); 131 int zid);
@@ -151,6 +159,14 @@ static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
151{ 159{
152} 160}
153 161
162static inline void mem_cgroup_uncharge_start(void)
163{
164}
165
166static inline void mem_cgroup_uncharge_end(void)
167{
168}
169
154static inline void mem_cgroup_uncharge_page(struct page *page) 170static inline void mem_cgroup_uncharge_page(struct page *page)
155{ 171{
156} 172}
@@ -189,6 +205,11 @@ mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
189{ 205{
190} 206}
191 207
208static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
209{
210 return NULL;
211}
212
192static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) 213static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
193{ 214{
194 return 1; 215 return 1;
@@ -200,6 +221,11 @@ static inline int task_in_mem_cgroup(struct task_struct *task,
200 return 1; 221 return 1;
201} 222}
202 223
224static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
225{
226 return NULL;
227}
228
203static inline int 229static inline int
204mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) 230mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
205{ 231{
@@ -274,7 +300,7 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
274{ 300{
275} 301}
276 302
277static inline void mem_cgroup_update_mapped_file_stat(struct page *page, 303static inline void mem_cgroup_update_file_mapped(struct page *page,
278 int val) 304 int val)
279{ 305{
280} 306}
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 37fa19b34ef5..1adfe779eb99 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -50,6 +50,19 @@ struct memory_notify {
50 int status_change_nid; 50 int status_change_nid;
51}; 51};
52 52
53/*
54 * During pageblock isolation, count the number of pages within the
55 * range [start_pfn, start_pfn + nr_pages) which are owned by code
56 * in the notifier chain.
57 */
58#define MEM_ISOLATE_COUNT (1<<0)
59
60struct memory_isolate_notify {
61 unsigned long start_pfn; /* Start of range to check */
62 unsigned int nr_pages; /* # pages in range to check */
63 unsigned int pages_found; /* # pages owned found by callbacks */
64};
65
53struct notifier_block; 66struct notifier_block;
54struct mem_section; 67struct mem_section;
55 68
@@ -76,14 +89,28 @@ static inline int memory_notify(unsigned long val, void *v)
76{ 89{
77 return 0; 90 return 0;
78} 91}
92static inline int register_memory_isolate_notifier(struct notifier_block *nb)
93{
94 return 0;
95}
96static inline void unregister_memory_isolate_notifier(struct notifier_block *nb)
97{
98}
99static inline int memory_isolate_notify(unsigned long val, void *v)
100{
101 return 0;
102}
79#else 103#else
80extern int register_memory_notifier(struct notifier_block *nb); 104extern int register_memory_notifier(struct notifier_block *nb);
81extern void unregister_memory_notifier(struct notifier_block *nb); 105extern void unregister_memory_notifier(struct notifier_block *nb);
106extern int register_memory_isolate_notifier(struct notifier_block *nb);
107extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
82extern int register_new_memory(int, struct mem_section *); 108extern int register_new_memory(int, struct mem_section *);
83extern int unregister_memory_section(struct mem_section *); 109extern int unregister_memory_section(struct mem_section *);
84extern int memory_dev_init(void); 110extern int memory_dev_init(void);
85extern int remove_memory_block(unsigned long, struct mem_section *, int); 111extern int remove_memory_block(unsigned long, struct mem_section *, int);
86extern int memory_notify(unsigned long val, void *v); 112extern int memory_notify(unsigned long val, void *v);
113extern int memory_isolate_notify(unsigned long val, void *v);
87extern struct memory_block *find_memory_block(struct mem_section *); 114extern struct memory_block *find_memory_block(struct mem_section *);
88#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) 115#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
89enum mem_add_context { BOOT, HOTPLUG }; 116enum mem_add_context { BOOT, HOTPLUG };
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index fed969281a41..35b07b773e6c 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -69,7 +69,6 @@ extern void online_page(struct page *page);
69/* VM interface that may be used by firmware interface */ 69/* VM interface that may be used by firmware interface */
70extern int online_pages(unsigned long, unsigned long); 70extern int online_pages(unsigned long, unsigned long);
71extern void __offline_isolated_pages(unsigned long, unsigned long); 71extern void __offline_isolated_pages(unsigned long, unsigned long);
72extern int offline_pages(unsigned long, unsigned long, unsigned long);
73 72
74/* reasonably generic interface to expand the physical pages in a zone */ 73/* reasonably generic interface to expand the physical pages in a zone */
75extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, 74extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 085c903fe0f1..1cc966cd3e5f 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -201,6 +201,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p);
201extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, 201extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
202 unsigned long addr, gfp_t gfp_flags, 202 unsigned long addr, gfp_t gfp_flags,
203 struct mempolicy **mpol, nodemask_t **nodemask); 203 struct mempolicy **mpol, nodemask_t **nodemask);
204extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
204extern unsigned slab_node(struct mempolicy *policy); 205extern unsigned slab_node(struct mempolicy *policy);
205 206
206extern enum zone_type policy_zone; 207extern enum zone_type policy_zone;
@@ -328,6 +329,8 @@ static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
328 return node_zonelist(0, gfp_flags); 329 return node_zonelist(0, gfp_flags);
329} 330}
330 331
332static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; }
333
331static inline int do_migrate_pages(struct mm_struct *mm, 334static inline int do_migrate_pages(struct mm_struct *mm,
332 const nodemask_t *from_nodes, 335 const nodemask_t *from_nodes,
333 const nodemask_t *to_nodes, int flags) 336 const nodemask_t *to_nodes, int flags)
diff --git a/include/linux/mfd/88pm8607.h b/include/linux/mfd/88pm8607.h
new file mode 100644
index 000000000000..f41b428d2cec
--- /dev/null
+++ b/include/linux/mfd/88pm8607.h
@@ -0,0 +1,217 @@
1/*
2 * Marvell 88PM8607 Interface
3 *
4 * Copyright (C) 2009 Marvell International Ltd.
5 * Haojian Zhuang <haojian.zhuang@marvell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef __LINUX_MFD_88PM8607_H
13#define __LINUX_MFD_88PM8607_H
14
15enum {
16 PM8607_ID_BUCK1 = 0,
17 PM8607_ID_BUCK2,
18 PM8607_ID_BUCK3,
19
20 PM8607_ID_LDO1,
21 PM8607_ID_LDO2,
22 PM8607_ID_LDO3,
23 PM8607_ID_LDO4,
24 PM8607_ID_LDO5,
25 PM8607_ID_LDO6,
26 PM8607_ID_LDO7,
27 PM8607_ID_LDO8,
28 PM8607_ID_LDO9,
29 PM8607_ID_LDO10,
30 PM8607_ID_LDO12,
31 PM8607_ID_LDO14,
32
33 PM8607_ID_RG_MAX,
34};
35
36#define CHIP_ID (0x40)
37#define CHIP_ID_MASK (0xF8)
38
39/* Interrupt Registers */
40#define PM8607_STATUS_1 (0x01)
41#define PM8607_STATUS_2 (0x02)
42#define PM8607_INT_STATUS1 (0x03)
43#define PM8607_INT_STATUS2 (0x04)
44#define PM8607_INT_STATUS3 (0x05)
45#define PM8607_INT_MASK_1 (0x06)
46#define PM8607_INT_MASK_2 (0x07)
47#define PM8607_INT_MASK_3 (0x08)
48
49/* Regulator Control Registers */
50#define PM8607_LDO1 (0x10)
51#define PM8607_LDO2 (0x11)
52#define PM8607_LDO3 (0x12)
53#define PM8607_LDO4 (0x13)
54#define PM8607_LDO5 (0x14)
55#define PM8607_LDO6 (0x15)
56#define PM8607_LDO7 (0x16)
57#define PM8607_LDO8 (0x17)
58#define PM8607_LDO9 (0x18)
59#define PM8607_LDO10 (0x19)
60#define PM8607_LDO12 (0x1A)
61#define PM8607_LDO14 (0x1B)
62#define PM8607_SLEEP_MODE1 (0x1C)
63#define PM8607_SLEEP_MODE2 (0x1D)
64#define PM8607_SLEEP_MODE3 (0x1E)
65#define PM8607_SLEEP_MODE4 (0x1F)
66#define PM8607_GO (0x20)
67#define PM8607_SLEEP_BUCK1 (0x21)
68#define PM8607_SLEEP_BUCK2 (0x22)
69#define PM8607_SLEEP_BUCK3 (0x23)
70#define PM8607_BUCK1 (0x24)
71#define PM8607_BUCK2 (0x25)
72#define PM8607_BUCK3 (0x26)
73#define PM8607_BUCK_CONTROLS (0x27)
74#define PM8607_SUPPLIES_EN11 (0x2B)
75#define PM8607_SUPPLIES_EN12 (0x2C)
76#define PM8607_GROUP1 (0x2D)
77#define PM8607_GROUP2 (0x2E)
78#define PM8607_GROUP3 (0x2F)
79#define PM8607_GROUP4 (0x30)
80#define PM8607_GROUP5 (0x31)
81#define PM8607_GROUP6 (0x32)
82#define PM8607_SUPPLIES_EN21 (0x33)
83#define PM8607_SUPPLIES_EN22 (0x34)
84
85/* RTC Control Registers */
86#define PM8607_RTC1 (0xA0)
87#define PM8607_RTC_COUNTER1 (0xA1)
88#define PM8607_RTC_COUNTER2 (0xA2)
89#define PM8607_RTC_COUNTER3 (0xA3)
90#define PM8607_RTC_COUNTER4 (0xA4)
91#define PM8607_RTC_EXPIRE1 (0xA5)
92#define PM8607_RTC_EXPIRE2 (0xA6)
93#define PM8607_RTC_EXPIRE3 (0xA7)
94#define PM8607_RTC_EXPIRE4 (0xA8)
95#define PM8607_RTC_TRIM1 (0xA9)
96#define PM8607_RTC_TRIM2 (0xAA)
97#define PM8607_RTC_TRIM3 (0xAB)
98#define PM8607_RTC_TRIM4 (0xAC)
99#define PM8607_RTC_MISC1 (0xAD)
100#define PM8607_RTC_MISC2 (0xAE)
101#define PM8607_RTC_MISC3 (0xAF)
102
103/* Misc Registers */
104#define PM8607_CHIP_ID (0x00)
105#define PM8607_LDO1 (0x10)
106#define PM8607_DVC3 (0x26)
107#define PM8607_MISC1 (0x40)
108
109/* bit definitions for PM8607 events */
110#define PM8607_EVENT_ONKEY (1 << 0)
111#define PM8607_EVENT_EXTON (1 << 1)
112#define PM8607_EVENT_CHG (1 << 2)
113#define PM8607_EVENT_BAT (1 << 3)
114#define PM8607_EVENT_RTC (1 << 4)
115#define PM8607_EVENT_CC (1 << 5)
116#define PM8607_EVENT_VBAT (1 << 8)
117#define PM8607_EVENT_VCHG (1 << 9)
118#define PM8607_EVENT_VSYS (1 << 10)
119#define PM8607_EVENT_TINT (1 << 11)
120#define PM8607_EVENT_GPADC0 (1 << 12)
121#define PM8607_EVENT_GPADC1 (1 << 13)
122#define PM8607_EVENT_GPADC2 (1 << 14)
123#define PM8607_EVENT_GPADC3 (1 << 15)
124#define PM8607_EVENT_AUDIO_SHORT (1 << 16)
125#define PM8607_EVENT_PEN (1 << 17)
126#define PM8607_EVENT_HEADSET (1 << 18)
127#define PM8607_EVENT_HOOK (1 << 19)
128#define PM8607_EVENT_MICIN (1 << 20)
129#define PM8607_EVENT_CHG_TIMEOUT (1 << 21)
130#define PM8607_EVENT_CHG_DONE (1 << 22)
131#define PM8607_EVENT_CHG_FAULT (1 << 23)
132
133/* bit definitions of Status Query Interface */
134#define PM8607_STATUS_CC (1 << 3)
135#define PM8607_STATUS_PEN (1 << 4)
136#define PM8607_STATUS_HEADSET (1 << 5)
137#define PM8607_STATUS_HOOK (1 << 6)
138#define PM8607_STATUS_MICIN (1 << 7)
139#define PM8607_STATUS_ONKEY (1 << 8)
140#define PM8607_STATUS_EXTON (1 << 9)
141#define PM8607_STATUS_CHG (1 << 10)
142#define PM8607_STATUS_BAT (1 << 11)
143#define PM8607_STATUS_VBUS (1 << 12)
144#define PM8607_STATUS_OV (1 << 13)
145
146/* bit definitions of BUCK3 */
147#define PM8607_BUCK3_DOUBLE (1 << 6)
148
149/* bit definitions of Misc1 */
150#define PM8607_MISC1_PI2C (1 << 0)
151
152/* Interrupt Number in 88PM8607 */
153enum {
154 PM8607_IRQ_ONKEY = 0,
155 PM8607_IRQ_EXTON,
156 PM8607_IRQ_CHG,
157 PM8607_IRQ_BAT,
158 PM8607_IRQ_RTC,
159 PM8607_IRQ_VBAT = 8,
160 PM8607_IRQ_VCHG,
161 PM8607_IRQ_VSYS,
162 PM8607_IRQ_TINT,
163 PM8607_IRQ_GPADC0,
164 PM8607_IRQ_GPADC1,
165 PM8607_IRQ_GPADC2,
166 PM8607_IRQ_GPADC3,
167 PM8607_IRQ_AUDIO_SHORT = 16,
168 PM8607_IRQ_PEN,
169 PM8607_IRQ_HEADSET,
170 PM8607_IRQ_HOOK,
171 PM8607_IRQ_MICIN,
172 PM8607_IRQ_CHG_FAIL,
173 PM8607_IRQ_CHG_DONE,
174 PM8607_IRQ_CHG_FAULT,
175};
176
177enum {
178 PM8607_CHIP_A0 = 0x40,
179 PM8607_CHIP_A1 = 0x41,
180 PM8607_CHIP_B0 = 0x48,
181};
182
183
184struct pm8607_chip {
185 struct device *dev;
186 struct mutex io_lock;
187 struct i2c_client *client;
188
189 int (*read)(struct pm8607_chip *chip, int reg, int bytes, void *dest);
190 int (*write)(struct pm8607_chip *chip, int reg, int bytes, void *src);
191
192 int buck3_double; /* DVC ramp slope double */
193 unsigned char chip_id;
194
195};
196
197#define PM8607_MAX_REGULATOR 15 /* 3 Bucks, 12 LDOs */
198
199enum {
200 GI2C_PORT = 0,
201 PI2C_PORT,
202};
203
204struct pm8607_platform_data {
205 int i2c_port; /* Controlled by GI2C or PI2C */
206 struct regulator_init_data *regulator[PM8607_MAX_REGULATOR];
207};
208
209extern int pm8607_reg_read(struct pm8607_chip *, int);
210extern int pm8607_reg_write(struct pm8607_chip *, int, unsigned char);
211extern int pm8607_bulk_read(struct pm8607_chip *, int, int,
212 unsigned char *);
213extern int pm8607_bulk_write(struct pm8607_chip *, int, int,
214 unsigned char *);
215extern int pm8607_set_bits(struct pm8607_chip *, int, unsigned char,
216 unsigned char);
217#endif /* __LINUX_MFD_88PM8607_H */
diff --git a/include/linux/mfd/ab4500.h b/include/linux/mfd/ab4500.h
new file mode 100644
index 000000000000..a42a7033ae53
--- /dev/null
+++ b/include/linux/mfd/ab4500.h
@@ -0,0 +1,262 @@
1/*
2 * Copyright (C) 2009 ST-Ericsson
3 *
4 * Author: Srinidhi KASAGAR <srinidhi.kasagar@stericsson.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2, as
8 * published by the Free Software Foundation.
9 *
10 * AB4500 device core funtions, for client access
11 */
12#ifndef MFD_AB4500_H
13#define MFD_AB4500_H
14
15#include <linux/device.h>
16
17/*
18 * AB4500 bank addresses
19 */
20#define AB4500_SYS_CTRL1_BLOCK 0x1
21#define AB4500_SYS_CTRL2_BLOCK 0x2
22#define AB4500_REGU_CTRL1 0x3
23#define AB4500_REGU_CTRL2 0x4
24#define AB4500_USB 0x5
25#define AB4500_TVOUT 0x6
26#define AB4500_DBI 0x7
27#define AB4500_ECI_AV_ACC 0x8
28#define AB4500_RESERVED 0x9
29#define AB4500_GPADC 0xA
30#define AB4500_CHARGER 0xB
31#define AB4500_GAS_GAUGE 0xC
32#define AB4500_AUDIO 0xD
33#define AB4500_INTERRUPT 0xE
34#define AB4500_RTC 0xF
35#define AB4500_MISC 0x10
36#define AB4500_DEBUG 0x12
37#define AB4500_PROD_TEST 0x13
38#define AB4500_OTP_EMUL 0x15
39
40/*
41 * System control 1 register offsets.
42 * Bank = 0x01
43 */
44#define AB4500_TURNON_STAT_REG 0x0100
45#define AB4500_RESET_STAT_REG 0x0101
46#define AB4500_PONKEY1_PRESS_STAT_REG 0x0102
47
48#define AB4500_FSM_STAT1_REG 0x0140
49#define AB4500_FSM_STAT2_REG 0x0141
50#define AB4500_SYSCLK_REQ_STAT_REG 0x0142
51#define AB4500_USB_STAT1_REG 0x0143
52#define AB4500_USB_STAT2_REG 0x0144
53#define AB4500_STATUS_SPARE1_REG 0x0145
54#define AB4500_STATUS_SPARE2_REG 0x0146
55
56#define AB4500_CTRL1_REG 0x0180
57#define AB4500_CTRL2_REG 0x0181
58
59/*
60 * System control 2 register offsets.
61 * bank = 0x02
62 */
63#define AB4500_CTRL3_REG 0x0200
64#define AB4500_MAIN_WDOG_CTRL_REG 0x0201
65#define AB4500_MAIN_WDOG_TIMER_REG 0x0202
66#define AB4500_LOW_BAT_REG 0x0203
67#define AB4500_BATT_OK_REG 0x0204
68#define AB4500_SYSCLK_TIMER_REG 0x0205
69#define AB4500_SMPSCLK_CTRL_REG 0x0206
70#define AB4500_SMPSCLK_SEL1_REG 0x0207
71#define AB4500_SMPSCLK_SEL2_REG 0x0208
72#define AB4500_SMPSCLK_SEL3_REG 0x0209
73#define AB4500_SYSULPCLK_CONF_REG 0x020A
74#define AB4500_SYSULPCLK_CTRL1_REG 0x020B
75#define AB4500_SYSCLK_CTRL_REG 0x020C
76#define AB4500_SYSCLK_REQ1_VALID_REG 0x020D
77#define AB4500_SYSCLK_REQ_VALID_REG 0x020E
78#define AB4500_SYSCTRL_SPARE_REG 0x020F
79#define AB4500_PAD_CONF_REG 0x0210
80
81/*
82 * Regu control1 register offsets
83 * Bank = 0x03
84 */
85#define AB4500_REGU_SERIAL_CTRL1_REG 0x0300
86#define AB4500_REGU_SERIAL_CTRL2_REG 0x0301
87#define AB4500_REGU_SERIAL_CTRL3_REG 0x0302
88#define AB4500_REGU_REQ_CTRL1_REG 0x0303
89#define AB4500_REGU_REQ_CTRL2_REG 0x0304
90#define AB4500_REGU_REQ_CTRL3_REG 0x0305
91#define AB4500_REGU_REQ_CTRL4_REG 0x0306
92#define AB4500_REGU_MISC1_REG 0x0380
93#define AB4500_REGU_OTGSUPPLY_CTRL_REG 0x0381
94#define AB4500_REGU_VUSB_CTRL_REG 0x0382
95#define AB4500_REGU_VAUDIO_SUPPLY_REG 0x0383
96#define AB4500_REGU_CTRL1_SPARE_REG 0x0384
97
98/*
99 * Regu control2 Vmod register offsets
100 */
101#define AB4500_REGU_VMOD_REGU_REG 0x0440
102#define AB4500_REGU_VMOD_SEL1_REG 0x0441
103#define AB4500_REGU_VMOD_SEL2_REG 0x0442
104#define AB4500_REGU_CTRL_DISCH_REG 0x0443
105#define AB4500_REGU_CTRL_DISCH2_REG 0x0444
106
107/*
108 * USB/ULPI register offsets
109 * Bank : 0x5
110 */
111#define AB4500_USB_LINE_STAT_REG 0x0580
112#define AB4500_USB_LINE_CTRL1_REG 0x0581
113#define AB4500_USB_LINE_CTRL2_REG 0x0582
114#define AB4500_USB_LINE_CTRL3_REG 0x0583
115#define AB4500_USB_LINE_CTRL4_REG 0x0584
116#define AB4500_USB_LINE_CTRL5_REG 0x0585
117#define AB4500_USB_OTG_CTRL_REG 0x0587
118#define AB4500_USB_OTG_STAT_REG 0x0588
119#define AB4500_USB_OTG_STAT_REG 0x0588
120#define AB4500_USB_CTRL_SPARE_REG 0x0589
121#define AB4500_USB_PHY_CTRL_REG 0x058A
122
123/*
124 * TVOUT / CTRL register offsets
125 * Bank : 0x06
126 */
127#define AB4500_TVOUT_CTRL_REG 0x0680
128
129/*
130 * DBI register offsets
131 * Bank : 0x07
132 */
133#define AB4500_DBI_REG1_REG 0x0700
134#define AB4500_DBI_REG2_REG 0x0701
135
136/*
137 * ECI regsiter offsets
138 * Bank : 0x08
139 */
140#define AB4500_ECI_CTRL_REG 0x0800
141#define AB4500_ECI_HOOKLEVEL_REG 0x0801
142#define AB4500_ECI_DATAOUT_REG 0x0802
143#define AB4500_ECI_DATAIN_REG 0x0803
144
145/*
146 * AV Connector register offsets
147 * Bank : 0x08
148 */
149#define AB4500_AV_CONN_REG 0x0840
150
151/*
152 * Accessory detection register offsets
153 * Bank : 0x08
154 */
155#define AB4500_ACC_DET_DB1_REG 0x0880
156#define AB4500_ACC_DET_DB2_REG 0x0881
157
158/*
159 * GPADC register offsets
160 * Bank : 0x0A
161 */
162#define AB4500_GPADC_CTRL1_REG 0x0A00
163#define AB4500_GPADC_CTRL2_REG 0x0A01
164#define AB4500_GPADC_CTRL3_REG 0x0A02
165#define AB4500_GPADC_AUTO_TIMER_REG 0x0A03
166#define AB4500_GPADC_STAT_REG 0x0A04
167#define AB4500_GPADC_MANDATAL_REG 0x0A05
168#define AB4500_GPADC_MANDATAH_REG 0x0A06
169#define AB4500_GPADC_AUTODATAL_REG 0x0A07
170#define AB4500_GPADC_AUTODATAH_REG 0x0A08
171#define AB4500_GPADC_MUX_CTRL_REG 0x0A09
172
173/*
174 * Charger / status register offfsets
175 * Bank : 0x0B
176 */
177#define AB4500_CH_STATUS1_REG 0x0B00
178#define AB4500_CH_STATUS2_REG 0x0B01
179#define AB4500_CH_USBCH_STAT1_REG 0x0B02
180#define AB4500_CH_USBCH_STAT2_REG 0x0B03
181#define AB4500_CH_FSM_STAT_REG 0x0B04
182#define AB4500_CH_STAT_REG 0x0B05
183
184/*
185 * Charger / control register offfsets
186 * Bank : 0x0B
187 */
188#define AB4500_CH_VOLT_LVL_REG 0x0B40
189
190/*
191 * Charger / main control register offfsets
192 * Bank : 0x0B
193 */
194#define AB4500_MCH_CTRL1 0x0B80
195#define AB4500_MCH_CTRL2 0x0B81
196#define AB4500_MCH_IPT_CURLVL_REG 0x0B82
197#define AB4500_CH_WD_REG 0x0B83
198
199/*
200 * Charger / USB control register offsets
201 * Bank : 0x0B
202 */
203#define AB4500_USBCH_CTRL1_REG 0x0BC0
204#define AB4500_USBCH_CTRL2_REG 0x0BC1
205#define AB4500_USBCH_IPT_CRNTLVL_REG 0x0BC2
206
207/*
208 * RTC bank register offsets
209 * Bank : 0xF
210 */
211#define AB4500_RTC_SOFF_STAT_REG 0x0F00
212#define AB4500_RTC_CC_CONF_REG 0x0F01
213#define AB4500_RTC_READ_REQ_REG 0x0F02
214#define AB4500_RTC_WATCH_TSECMID_REG 0x0F03
215#define AB4500_RTC_WATCH_TSECHI_REG 0x0F04
216#define AB4500_RTC_WATCH_TMIN_LOW_REG 0x0F05
217#define AB4500_RTC_WATCH_TMIN_MID_REG 0x0F06
218#define AB4500_RTC_WATCH_TMIN_HI_REG 0x0F07
219#define AB4500_RTC_ALRM_MIN_LOW_REG 0x0F08
220#define AB4500_RTC_ALRM_MIN_MID_REG 0x0F09
221#define AB4500_RTC_ALRM_MIN_HI_REG 0x0F0A
222#define AB4500_RTC_STAT_REG 0x0F0B
223#define AB4500_RTC_BKUP_CHG_REG 0x0F0C
224#define AB4500_RTC_FORCE_BKUP_REG 0x0F0D
225#define AB4500_RTC_CALIB_REG 0x0F0E
226#define AB4500_RTC_SWITCH_STAT_REG 0x0F0F
227
228/*
229 * PWM Out generators
230 * Bank: 0x10
231 */
232#define AB4500_PWM_OUT_CTRL1_REG 0x1060
233#define AB4500_PWM_OUT_CTRL2_REG 0x1061
234#define AB4500_PWM_OUT_CTRL3_REG 0x1062
235#define AB4500_PWM_OUT_CTRL4_REG 0x1063
236#define AB4500_PWM_OUT_CTRL5_REG 0x1064
237#define AB4500_PWM_OUT_CTRL6_REG 0x1065
238#define AB4500_PWM_OUT_CTRL7_REG 0x1066
239
240#define AB4500_I2C_PAD_CTRL_REG 0x1067
241#define AB4500_REV_REG 0x1080
242
243/**
244 * struct ab4500
245 * @spi: spi device structure
246 * @tx_buf: transmit buffer
247 * @rx_buf: receive buffer
248 * @lock: sync primitive
249 */
250struct ab4500 {
251 struct spi_device *spi;
252 unsigned long tx_buf[4];
253 unsigned long rx_buf[4];
254 struct mutex lock;
255};
256
257int ab4500_write(struct ab4500 *ab4500, unsigned char block,
258 unsigned long addr, unsigned char data);
259int ab4500_read(struct ab4500 *ab4500, unsigned char block,
260 unsigned long addr);
261
262#endif /* MFD_AB4500_H */
diff --git a/include/linux/mfd/adp5520.h b/include/linux/mfd/adp5520.h
new file mode 100644
index 000000000000..ac37558a4673
--- /dev/null
+++ b/include/linux/mfd/adp5520.h
@@ -0,0 +1,299 @@
1/*
2 * Definitions and platform data for Analog Devices
3 * ADP5520/ADP5501 MFD PMICs (Backlight, LED, GPIO and Keys)
4 *
5 * Copyright 2009 Analog Devices Inc.
6 *
7 * Licensed under the GPL-2 or later.
8 */
9
10
11#ifndef __LINUX_MFD_ADP5520_H
12#define __LINUX_MFD_ADP5520_H
13
14#define ID_ADP5520 5520
15#define ID_ADP5501 5501
16
17/*
18 * ADP5520/ADP5501 Register Map
19 */
20
21#define ADP5520_MODE_STATUS 0x00
22#define ADP5520_INTERRUPT_ENABLE 0x01
23#define ADP5520_BL_CONTROL 0x02
24#define ADP5520_BL_TIME 0x03
25#define ADP5520_BL_FADE 0x04
26#define ADP5520_DAYLIGHT_MAX 0x05
27#define ADP5520_DAYLIGHT_DIM 0x06
28#define ADP5520_OFFICE_MAX 0x07
29#define ADP5520_OFFICE_DIM 0x08
30#define ADP5520_DARK_MAX 0x09
31#define ADP5520_DARK_DIM 0x0A
32#define ADP5520_BL_VALUE 0x0B
33#define ADP5520_ALS_CMPR_CFG 0x0C
34#define ADP5520_L2_TRIP 0x0D
35#define ADP5520_L2_HYS 0x0E
36#define ADP5520_L3_TRIP 0x0F
37#define ADP5520_L3_HYS 0x10
38#define ADP5520_LED_CONTROL 0x11
39#define ADP5520_LED_TIME 0x12
40#define ADP5520_LED_FADE 0x13
41#define ADP5520_LED1_CURRENT 0x14
42#define ADP5520_LED2_CURRENT 0x15
43#define ADP5520_LED3_CURRENT 0x16
44
45/*
46 * ADP5520 Register Map
47 */
48
49#define ADP5520_GPIO_CFG_1 0x17
50#define ADP5520_GPIO_CFG_2 0x18
51#define ADP5520_GPIO_IN 0x19
52#define ADP5520_GPIO_OUT 0x1A
53#define ADP5520_GPIO_INT_EN 0x1B
54#define ADP5520_GPIO_INT_STAT 0x1C
55#define ADP5520_GPIO_INT_LVL 0x1D
56#define ADP5520_GPIO_DEBOUNCE 0x1E
57#define ADP5520_GPIO_PULLUP 0x1F
58#define ADP5520_KP_INT_STAT_1 0x20
59#define ADP5520_KP_INT_STAT_2 0x21
60#define ADP5520_KR_INT_STAT_1 0x22
61#define ADP5520_KR_INT_STAT_2 0x23
62#define ADP5520_KEY_STAT_1 0x24
63#define ADP5520_KEY_STAT_2 0x25
64
65/*
66 * MODE_STATUS bits
67 */
68
69#define ADP5520_nSTNBY (1 << 7)
70#define ADP5520_BL_EN (1 << 6)
71#define ADP5520_DIM_EN (1 << 5)
72#define ADP5520_OVP_INT (1 << 4)
73#define ADP5520_CMPR_INT (1 << 3)
74#define ADP5520_GPI_INT (1 << 2)
75#define ADP5520_KR_INT (1 << 1)
76#define ADP5520_KP_INT (1 << 0)
77
78/*
79 * INTERRUPT_ENABLE bits
80 */
81
82#define ADP5520_AUTO_LD_EN (1 << 4)
83#define ADP5520_CMPR_IEN (1 << 3)
84#define ADP5520_OVP_IEN (1 << 2)
85#define ADP5520_KR_IEN (1 << 1)
86#define ADP5520_KP_IEN (1 << 0)
87
88/*
89 * BL_CONTROL bits
90 */
91
92#define ADP5520_BL_LVL ((x) << 5)
93#define ADP5520_BL_LAW ((x) << 4)
94#define ADP5520_BL_AUTO_ADJ (1 << 3)
95#define ADP5520_OVP_EN (1 << 2)
96#define ADP5520_FOVR (1 << 1)
97#define ADP5520_KP_BL_EN (1 << 0)
98
99/*
100 * ALS_CMPR_CFG bits
101 */
102
103#define ADP5520_L3_OUT (1 << 3)
104#define ADP5520_L2_OUT (1 << 2)
105#define ADP5520_L3_EN (1 << 1)
106
107#define ADP5020_MAX_BRIGHTNESS 0x7F
108
109#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
110#define BL_CTRL_VAL(law, auto) (((1 & (auto)) << 3) | ((0x3 & (law)) << 4))
111#define ALS_CMPR_CFG_VAL(filt, l3_en) (((0x7 & filt) << 5) | l3_en)
112
113/*
114 * LEDs subdevice bits and masks
115 */
116
117#define ADP5520_01_MAXLEDS 3
118
119#define ADP5520_FLAG_LED_MASK 0x3
120#define ADP5520_FLAG_OFFT_SHIFT 8
121#define ADP5520_FLAG_OFFT_MASK 0x3
122
123#define ADP5520_R3_MODE (1 << 5)
124#define ADP5520_C3_MODE (1 << 4)
125#define ADP5520_LED_LAW (1 << 3)
126#define ADP5520_LED3_EN (1 << 2)
127#define ADP5520_LED2_EN (1 << 1)
128#define ADP5520_LED1_EN (1 << 0)
129
130/*
131 * GPIO subdevice bits and masks
132 */
133
134#define ADP5520_MAXGPIOS 8
135
136#define ADP5520_GPIO_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
137#define ADP5520_GPIO_C2 (1 << 6)
138#define ADP5520_GPIO_C1 (1 << 5)
139#define ADP5520_GPIO_C0 (1 << 4)
140#define ADP5520_GPIO_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
141#define ADP5520_GPIO_R2 (1 << 2)
142#define ADP5520_GPIO_R1 (1 << 1)
143#define ADP5520_GPIO_R0 (1 << 0)
144
145struct adp5520_gpio_platform_data {
146 unsigned gpio_start;
147 u8 gpio_en_mask;
148 u8 gpio_pullup_mask;
149};
150
151/*
152 * Keypad subdevice bits and masks
153 */
154
155#define ADP5520_MAXKEYS 16
156
157#define ADP5520_COL_C3 (1 << 7) /* LED2 or GPIO7 aka C3 */
158#define ADP5520_COL_C2 (1 << 6)
159#define ADP5520_COL_C1 (1 << 5)
160#define ADP5520_COL_C0 (1 << 4)
161#define ADP5520_ROW_R3 (1 << 3) /* LED3 or GPIO3 aka R3 */
162#define ADP5520_ROW_R2 (1 << 2)
163#define ADP5520_ROW_R1 (1 << 1)
164#define ADP5520_ROW_R0 (1 << 0)
165
166#define ADP5520_KEY(row, col) (col + row * 4)
167#define ADP5520_KEYMAPSIZE ADP5520_MAXKEYS
168
169struct adp5520_keys_platform_data {
170 int rows_en_mask; /* Number of rows */
171 int cols_en_mask; /* Number of columns */
172 const unsigned short *keymap; /* Pointer to keymap */
173 unsigned short keymapsize; /* Keymap size */
174 unsigned repeat:1; /* Enable key repeat */
175};
176
177
178/*
179 * LEDs subdevice platform data
180 */
181
182#define FLAG_ID_ADP5520_LED1_ADP5501_LED0 1 /* ADP5520 PIN ILED */
183#define FLAG_ID_ADP5520_LED2_ADP5501_LED1 2 /* ADP5520 PIN C3 */
184#define FLAG_ID_ADP5520_LED3_ADP5501_LED2 3 /* ADP5520 PIN R3 */
185
186#define ADP5520_LED_DIS_BLINK (0 << ADP5520_FLAG_OFFT_SHIFT)
187#define ADP5520_LED_OFFT_600ms (1 << ADP5520_FLAG_OFFT_SHIFT)
188#define ADP5520_LED_OFFT_800ms (2 << ADP5520_FLAG_OFFT_SHIFT)
189#define ADP5520_LED_OFFT_1200ms (3 << ADP5520_FLAG_OFFT_SHIFT)
190
191#define ADP5520_LED_ONT_200ms 0
192#define ADP5520_LED_ONT_600ms 1
193#define ADP5520_LED_ONT_800ms 2
194#define ADP5520_LED_ONT_1200ms 3
195
196struct adp5520_leds_platform_data {
197 int num_leds;
198 struct led_info *leds;
199 u8 fade_in; /* Backlight Fade-In Timer */
200 u8 fade_out; /* Backlight Fade-Out Timer */
201 u8 led_on_time;
202};
203
204/*
205 * Backlight subdevice platform data
206 */
207
208#define ADP5520_FADE_T_DIS 0 /* Fade Timer Disabled */
209#define ADP5520_FADE_T_300ms 1 /* 0.3 Sec */
210#define ADP5520_FADE_T_600ms 2
211#define ADP5520_FADE_T_900ms 3
212#define ADP5520_FADE_T_1200ms 4
213#define ADP5520_FADE_T_1500ms 5
214#define ADP5520_FADE_T_1800ms 6
215#define ADP5520_FADE_T_2100ms 7
216#define ADP5520_FADE_T_2400ms 8
217#define ADP5520_FADE_T_2700ms 9
218#define ADP5520_FADE_T_3000ms 10
219#define ADP5520_FADE_T_3500ms 11
220#define ADP5520_FADE_T_4000ms 12
221#define ADP5520_FADE_T_4500ms 13
222#define ADP5520_FADE_T_5000ms 14
223#define ADP5520_FADE_T_5500ms 15 /* 5.5 Sec */
224
225#define ADP5520_BL_LAW_LINEAR 0
226#define ADP5520_BL_LAW_SQUARE 1
227#define ADP5520_BL_LAW_CUBIC1 2
228#define ADP5520_BL_LAW_CUBIC2 3
229
230#define ADP5520_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
231#define ADP5520_BL_AMBL_FILT_160ms 1
232#define ADP5520_BL_AMBL_FILT_320ms 2
233#define ADP5520_BL_AMBL_FILT_640ms 3
234#define ADP5520_BL_AMBL_FILT_1280ms 4
235#define ADP5520_BL_AMBL_FILT_2560ms 5
236#define ADP5520_BL_AMBL_FILT_5120ms 6
237#define ADP5520_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
238
239 /*
240 * Blacklight current 0..30mA
241 */
242#define ADP5520_BL_CUR_mA(I) ((I * 127) / 30)
243
244 /*
245 * L2 comparator current 0..1000uA
246 */
247#define ADP5520_L2_COMP_CURR_uA(I) ((I * 255) / 1000)
248
249 /*
250 * L3 comparator current 0..127uA
251 */
252#define ADP5520_L3_COMP_CURR_uA(I) ((I * 255) / 127)
253
254struct adp5520_backlight_platform_data {
255 u8 fade_in; /* Backlight Fade-In Timer */
256 u8 fade_out; /* Backlight Fade-Out Timer */
257 u8 fade_led_law; /* fade-on/fade-off transfer characteristic */
258
259 u8 en_ambl_sens; /* 1 = enable ambient light sensor */
260 u8 abml_filt; /* Light sensor filter time */
261 u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
262 u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
263 u8 l2_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
264 u8 l2_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
265 u8 l3_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
266 u8 l3_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
267 u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
268 u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1000 uA */
269 u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
270 u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 127 uA */
271};
272
273/*
274 * MFD chip platform data
275 */
276
277struct adp5520_platform_data {
278 struct adp5520_keys_platform_data *keys;
279 struct adp5520_gpio_platform_data *gpio;
280 struct adp5520_leds_platform_data *leds;
281 struct adp5520_backlight_platform_data *backlight;
282};
283
284/*
285 * MFD chip functions
286 */
287
288extern int adp5520_read(struct device *dev, int reg, uint8_t *val);
289extern int adp5520_write(struct device *dev, int reg, u8 val);
290extern int adp5520_clr_bits(struct device *dev, int reg, uint8_t bit_mask);
291extern int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask);
292
293extern int adp5520_register_notifier(struct device *dev,
294 struct notifier_block *nb, unsigned int events);
295
296extern int adp5520_unregister_notifier(struct device *dev,
297 struct notifier_block *nb, unsigned int events);
298
299#endif /* __LINUX_MFD_ADP5520_H */
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index 3402042ddc31..40c372165f3e 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -231,9 +231,6 @@ void pcap_set_ts_bits(struct pcap_chip *, u32);
231#define PCAP_LED_4MA 1 231#define PCAP_LED_4MA 1
232#define PCAP_LED_5MA 2 232#define PCAP_LED_5MA 2
233#define PCAP_LED_9MA 3 233#define PCAP_LED_9MA 3
234#define PCAP_LED_GPIO_VAL_MASK 0x00ffffff
235#define PCAP_LED_GPIO_EN 0x01000000
236#define PCAP_LED_GPIO_INVERT 0x02000000
237#define PCAP_LED_T_MASK 0xf 234#define PCAP_LED_T_MASK 0xf
238#define PCAP_LED_C_MASK 0x3 235#define PCAP_LED_C_MASK 0x3
239#define PCAP_BL_MASK 0x1f 236#define PCAP_BL_MASK 0x1f
diff --git a/include/linux/mfd/mc13783-private.h b/include/linux/mfd/mc13783-private.h
index 47e698cb0f16..95cf9360553f 100644
--- a/include/linux/mfd/mc13783-private.h
+++ b/include/linux/mfd/mc13783-private.h
@@ -24,52 +24,23 @@
24 24
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/mfd/mc13783.h> 26#include <linux/mfd/mc13783.h>
27#include <linux/workqueue.h>
28#include <linux/mutex.h> 27#include <linux/mutex.h>
29 28#include <linux/interrupt.h>
30struct mc13783_irq {
31 void (*handler)(int, void *);
32 void *data;
33};
34
35#define MC13783_NUM_IRQ 2
36#define MC13783_IRQ_TS 0
37#define MC13783_IRQ_REGULATOR 1
38
39#define MC13783_ADC_MODE_TS 1
40#define MC13783_ADC_MODE_SINGLE_CHAN 2
41#define MC13783_ADC_MODE_MULT_CHAN 3
42 29
43struct mc13783 { 30struct mc13783 {
44 int revision; 31 struct spi_device *spidev;
45 struct device *dev; 32 struct mutex lock;
46 struct spi_device *spi_device;
47
48 int (*read_dev)(void *data, char reg, int count, u32 *dst);
49 int (*write_dev)(void *data, char reg, int count, const u32 *src);
50
51 struct mutex io_lock;
52 void *io_data;
53 int irq; 33 int irq;
54 unsigned int flags; 34 int flags;
55 35
56 struct mc13783_irq irq_handler[MC13783_NUM_IRQ]; 36 irq_handler_t irqhandler[MC13783_NUM_IRQ];
57 struct work_struct work; 37 void *irqdata[MC13783_NUM_IRQ];
58 struct completion adc_done;
59 unsigned int ts_active;
60 struct mutex adc_conv_lock;
61 38
39 /* XXX these should go as platformdata to the regulator subdevice */
62 struct mc13783_regulator_init_data *regulators; 40 struct mc13783_regulator_init_data *regulators;
63 int num_regulators; 41 int num_regulators;
64}; 42};
65 43
66int mc13783_reg_read(struct mc13783 *, int reg_num, u32 *);
67int mc13783_reg_write(struct mc13783 *, int, u32);
68int mc13783_set_bits(struct mc13783 *, int, u32, u32);
69int mc13783_free_irq(struct mc13783 *mc13783, int irq);
70int mc13783_register_irq(struct mc13783 *mc13783, int irq,
71 void (*handler) (int, void *), void *data);
72
73#define MC13783_REG_INTERRUPT_STATUS_0 0 44#define MC13783_REG_INTERRUPT_STATUS_0 0
74#define MC13783_REG_INTERRUPT_MASK_0 1 45#define MC13783_REG_INTERRUPT_MASK_0 1
75#define MC13783_REG_INTERRUPT_SENSE_0 2 46#define MC13783_REG_INTERRUPT_SENSE_0 2
@@ -136,55 +107,6 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
136#define MC13783_REG_TEST_3 63 107#define MC13783_REG_TEST_3 63
137#define MC13783_REG_NB 64 108#define MC13783_REG_NB 64
138 109
139
140/*
141 * Interrupt Status
142 */
143#define MC13783_INT_STAT_ADCDONEI (1 << 0)
144#define MC13783_INT_STAT_ADCBISDONEI (1 << 1)
145#define MC13783_INT_STAT_TSI (1 << 2)
146#define MC13783_INT_STAT_WHIGHI (1 << 3)
147#define MC13783_INT_STAT_WLOWI (1 << 4)
148#define MC13783_INT_STAT_CHGDETI (1 << 6)
149#define MC13783_INT_STAT_CHGOVI (1 << 7)
150#define MC13783_INT_STAT_CHGREVI (1 << 8)
151#define MC13783_INT_STAT_CHGSHORTI (1 << 9)
152#define MC13783_INT_STAT_CCCVI (1 << 10)
153#define MC13783_INT_STAT_CHGCURRI (1 << 11)
154#define MC13783_INT_STAT_BPONI (1 << 12)
155#define MC13783_INT_STAT_LOBATLI (1 << 13)
156#define MC13783_INT_STAT_LOBATHI (1 << 14)
157#define MC13783_INT_STAT_UDPI (1 << 15)
158#define MC13783_INT_STAT_USBI (1 << 16)
159#define MC13783_INT_STAT_IDI (1 << 19)
160#define MC13783_INT_STAT_Unused (1 << 20)
161#define MC13783_INT_STAT_SE1I (1 << 21)
162#define MC13783_INT_STAT_CKDETI (1 << 22)
163#define MC13783_INT_STAT_UDMI (1 << 23)
164
165/*
166 * Interrupt Mask
167 */
168#define MC13783_INT_MASK_ADCDONEM (1 << 0)
169#define MC13783_INT_MASK_ADCBISDONEM (1 << 1)
170#define MC13783_INT_MASK_TSM (1 << 2)
171#define MC13783_INT_MASK_WHIGHM (1 << 3)
172#define MC13783_INT_MASK_WLOWM (1 << 4)
173#define MC13783_INT_MASK_CHGDETM (1 << 6)
174#define MC13783_INT_MASK_CHGOVM (1 << 7)
175#define MC13783_INT_MASK_CHGREVM (1 << 8)
176#define MC13783_INT_MASK_CHGSHORTM (1 << 9)
177#define MC13783_INT_MASK_CCCVM (1 << 10)
178#define MC13783_INT_MASK_CHGCURRM (1 << 11)
179#define MC13783_INT_MASK_BPONM (1 << 12)
180#define MC13783_INT_MASK_LOBATLM (1 << 13)
181#define MC13783_INT_MASK_LOBATHM (1 << 14)
182#define MC13783_INT_MASK_UDPM (1 << 15)
183#define MC13783_INT_MASK_USBM (1 << 16)
184#define MC13783_INT_MASK_IDM (1 << 19)
185#define MC13783_INT_MASK_SE1M (1 << 21)
186#define MC13783_INT_MASK_CKDETM (1 << 22)
187
188/* 110/*
189 * Reg Regulator Mode 0 111 * Reg Regulator Mode 0
190 */ 112 */
@@ -284,113 +206,15 @@ int mc13783_register_irq(struct mc13783 *mc13783, int irq,
284#define MC13783_SWCTRL_SW3_STBY (1 << 21) 206#define MC13783_SWCTRL_SW3_STBY (1 << 21)
285#define MC13783_SWCTRL_SW3_MODE (1 << 22) 207#define MC13783_SWCTRL_SW3_MODE (1 << 22)
286 208
287/* 209static inline int mc13783_set_bits(struct mc13783 *mc13783, unsigned int offset,
288 * ADC/Touch 210 u32 mask, u32 val)
289 */ 211{
290#define MC13783_ADC0_LICELLCON (1 << 0) 212 int ret;
291#define MC13783_ADC0_CHRGICON (1 << 1) 213 mc13783_lock(mc13783);
292#define MC13783_ADC0_BATICON (1 << 2) 214 ret = mc13783_reg_rmw(mc13783, offset, mask, val);
293#define MC13783_ADC0_RTHEN (1 << 3) 215 mc13783_unlock(mc13783);
294#define MC13783_ADC0_DTHEN (1 << 4)
295#define MC13783_ADC0_UIDEN (1 << 5)
296#define MC13783_ADC0_ADOUTEN (1 << 6)
297#define MC13783_ADC0_ADOUTPER (1 << 7)
298#define MC13783_ADC0_ADREFEN (1 << 10)
299#define MC13783_ADC0_ADREFMODE (1 << 11)
300#define MC13783_ADC0_TSMOD0 (1 << 12)
301#define MC13783_ADC0_TSMOD1 (1 << 13)
302#define MC13783_ADC0_TSMOD2 (1 << 14)
303#define MC13783_ADC0_CHRGRAWDIV (1 << 15)
304#define MC13783_ADC0_ADINC1 (1 << 16)
305#define MC13783_ADC0_ADINC2 (1 << 17)
306#define MC13783_ADC0_WCOMP (1 << 18)
307#define MC13783_ADC0_ADCBIS0 (1 << 23)
308
309#define MC13783_ADC1_ADEN (1 << 0)
310#define MC13783_ADC1_RAND (1 << 1)
311#define MC13783_ADC1_ADSEL (1 << 3)
312#define MC13783_ADC1_TRIGMASK (1 << 4)
313#define MC13783_ADC1_ADA10 (1 << 5)
314#define MC13783_ADC1_ADA11 (1 << 6)
315#define MC13783_ADC1_ADA12 (1 << 7)
316#define MC13783_ADC1_ADA20 (1 << 8)
317#define MC13783_ADC1_ADA21 (1 << 9)
318#define MC13783_ADC1_ADA22 (1 << 10)
319#define MC13783_ADC1_ATO0 (1 << 11)
320#define MC13783_ADC1_ATO1 (1 << 12)
321#define MC13783_ADC1_ATO2 (1 << 13)
322#define MC13783_ADC1_ATO3 (1 << 14)
323#define MC13783_ADC1_ATO4 (1 << 15)
324#define MC13783_ADC1_ATO5 (1 << 16)
325#define MC13783_ADC1_ATO6 (1 << 17)
326#define MC13783_ADC1_ATO7 (1 << 18)
327#define MC13783_ADC1_ATOX (1 << 19)
328#define MC13783_ADC1_ASC (1 << 20)
329#define MC13783_ADC1_ADTRIGIGN (1 << 21)
330#define MC13783_ADC1_ADONESHOT (1 << 22)
331#define MC13783_ADC1_ADCBIS1 (1 << 23)
332
333#define MC13783_ADC1_CHAN0_SHIFT 5
334#define MC13783_ADC1_CHAN1_SHIFT 8
335
336#define MC13783_ADC2_ADD10 (1 << 2)
337#define MC13783_ADC2_ADD11 (1 << 3)
338#define MC13783_ADC2_ADD12 (1 << 4)
339#define MC13783_ADC2_ADD13 (1 << 5)
340#define MC13783_ADC2_ADD14 (1 << 6)
341#define MC13783_ADC2_ADD15 (1 << 7)
342#define MC13783_ADC2_ADD16 (1 << 8)
343#define MC13783_ADC2_ADD17 (1 << 9)
344#define MC13783_ADC2_ADD18 (1 << 10)
345#define MC13783_ADC2_ADD19 (1 << 11)
346#define MC13783_ADC2_ADD20 (1 << 14)
347#define MC13783_ADC2_ADD21 (1 << 15)
348#define MC13783_ADC2_ADD22 (1 << 16)
349#define MC13783_ADC2_ADD23 (1 << 17)
350#define MC13783_ADC2_ADD24 (1 << 18)
351#define MC13783_ADC2_ADD25 (1 << 19)
352#define MC13783_ADC2_ADD26 (1 << 20)
353#define MC13783_ADC2_ADD27 (1 << 21)
354#define MC13783_ADC2_ADD28 (1 << 22)
355#define MC13783_ADC2_ADD29 (1 << 23)
356 216
357#define MC13783_ADC3_WHIGH0 (1 << 0) 217 return ret;
358#define MC13783_ADC3_WHIGH1 (1 << 1) 218}
359#define MC13783_ADC3_WHIGH2 (1 << 2)
360#define MC13783_ADC3_WHIGH3 (1 << 3)
361#define MC13783_ADC3_WHIGH4 (1 << 4)
362#define MC13783_ADC3_WHIGH5 (1 << 5)
363#define MC13783_ADC3_ICID0 (1 << 6)
364#define MC13783_ADC3_ICID1 (1 << 7)
365#define MC13783_ADC3_ICID2 (1 << 8)
366#define MC13783_ADC3_WLOW0 (1 << 9)
367#define MC13783_ADC3_WLOW1 (1 << 10)
368#define MC13783_ADC3_WLOW2 (1 << 11)
369#define MC13783_ADC3_WLOW3 (1 << 12)
370#define MC13783_ADC3_WLOW4 (1 << 13)
371#define MC13783_ADC3_WLOW5 (1 << 14)
372#define MC13783_ADC3_ADCBIS2 (1 << 23)
373
374#define MC13783_ADC4_ADDBIS10 (1 << 2)
375#define MC13783_ADC4_ADDBIS11 (1 << 3)
376#define MC13783_ADC4_ADDBIS12 (1 << 4)
377#define MC13783_ADC4_ADDBIS13 (1 << 5)
378#define MC13783_ADC4_ADDBIS14 (1 << 6)
379#define MC13783_ADC4_ADDBIS15 (1 << 7)
380#define MC13783_ADC4_ADDBIS16 (1 << 8)
381#define MC13783_ADC4_ADDBIS17 (1 << 9)
382#define MC13783_ADC4_ADDBIS18 (1 << 10)
383#define MC13783_ADC4_ADDBIS19 (1 << 11)
384#define MC13783_ADC4_ADDBIS20 (1 << 14)
385#define MC13783_ADC4_ADDBIS21 (1 << 15)
386#define MC13783_ADC4_ADDBIS22 (1 << 16)
387#define MC13783_ADC4_ADDBIS23 (1 << 17)
388#define MC13783_ADC4_ADDBIS24 (1 << 18)
389#define MC13783_ADC4_ADDBIS25 (1 << 19)
390#define MC13783_ADC4_ADDBIS26 (1 << 20)
391#define MC13783_ADC4_ADDBIS27 (1 << 21)
392#define MC13783_ADC4_ADDBIS28 (1 << 22)
393#define MC13783_ADC4_ADDBIS29 (1 << 23)
394 219
395#endif /* __LINUX_MFD_MC13783_PRIV_H */ 220#endif /* __LINUX_MFD_MC13783_PRIV_H */
396
diff --git a/include/linux/mfd/mc13783.h b/include/linux/mfd/mc13783.h
index b3a2a7243573..35680409b8cf 100644
--- a/include/linux/mfd/mc13783.h
+++ b/include/linux/mfd/mc13783.h
@@ -1,28 +1,50 @@
1/* 1/*
2 * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de> 2 * Copyright 2009 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
3 * 4 *
4 * Initial development of this code was funded by 5 * This program is free software; you can redistribute it and/or modify it under
5 * Phytec Messtechnik GmbH, http://www.phytec.de 6 * the terms of the GNU General Public License version 2 as published by the
6 * 7 * Free Software Foundation.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 */ 8 */
9#ifndef __LINUX_MFD_MC13783_H
10#define __LINUX_MFD_MC13783_H
21 11
22#ifndef __INCLUDE_LINUX_MFD_MC13783_H 12#include <linux/interrupt.h>
23#define __INCLUDE_LINUX_MFD_MC13783_H
24 13
25struct mc13783; 14struct mc13783;
15
16void mc13783_lock(struct mc13783 *mc13783);
17void mc13783_unlock(struct mc13783 *mc13783);
18
19int mc13783_reg_read(struct mc13783 *mc13783, unsigned int offset, u32 *val);
20int mc13783_reg_write(struct mc13783 *mc13783, unsigned int offset, u32 val);
21int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset,
22 u32 mask, u32 val);
23
24int mc13783_irq_request(struct mc13783 *mc13783, int irq,
25 irq_handler_t handler, const char *name, void *dev);
26int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq,
27 irq_handler_t handler, const char *name, void *dev);
28int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev);
29int mc13783_ackirq(struct mc13783 *mc13783, int irq);
30
31int mc13783_mask(struct mc13783 *mc13783, int irq);
32int mc13783_unmask(struct mc13783 *mc13783, int irq);
33
34#define MC13783_ADC0 43
35#define MC13783_ADC0_ADREFEN (1 << 10)
36#define MC13783_ADC0_ADREFMODE (1 << 11)
37#define MC13783_ADC0_TSMOD0 (1 << 12)
38#define MC13783_ADC0_TSMOD1 (1 << 13)
39#define MC13783_ADC0_TSMOD2 (1 << 14)
40#define MC13783_ADC0_ADINC1 (1 << 16)
41#define MC13783_ADC0_ADINC2 (1 << 17)
42
43#define MC13783_ADC0_TSMOD_MASK (MC13783_ADC0_TSMOD0 | \
44 MC13783_ADC0_TSMOD1 | \
45 MC13783_ADC0_TSMOD2)
46
47/* to be cleaned up */
26struct regulator_init_data; 48struct regulator_init_data;
27 49
28struct mc13783_regulator_init_data { 50struct mc13783_regulator_init_data {
@@ -30,23 +52,30 @@ struct mc13783_regulator_init_data {
30 struct regulator_init_data *init_data; 52 struct regulator_init_data *init_data;
31}; 53};
32 54
33struct mc13783_platform_data { 55struct mc13783_regulator_platform_data {
34 struct mc13783_regulator_init_data *regulators;
35 int num_regulators; 56 int num_regulators;
36 unsigned int flags; 57 struct mc13783_regulator_init_data *regulators;
37}; 58};
38 59
39/* mc13783_platform_data flags */ 60struct mc13783_platform_data {
61 int num_regulators;
62 struct mc13783_regulator_init_data *regulators;
63
40#define MC13783_USE_TOUCHSCREEN (1 << 0) 64#define MC13783_USE_TOUCHSCREEN (1 << 0)
41#define MC13783_USE_CODEC (1 << 1) 65#define MC13783_USE_CODEC (1 << 1)
42#define MC13783_USE_ADC (1 << 2) 66#define MC13783_USE_ADC (1 << 2)
43#define MC13783_USE_RTC (1 << 3) 67#define MC13783_USE_RTC (1 << 3)
44#define MC13783_USE_REGULATOR (1 << 4) 68#define MC13783_USE_REGULATOR (1 << 4)
69 unsigned int flags;
70};
71
72#define MC13783_ADC_MODE_TS 1
73#define MC13783_ADC_MODE_SINGLE_CHAN 2
74#define MC13783_ADC_MODE_MULT_CHAN 3
45 75
46int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode, 76int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
47 unsigned int channel, unsigned int *sample); 77 unsigned int channel, unsigned int *sample);
48 78
49void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
50 79
51#define MC13783_SW_SW1A 0 80#define MC13783_SW_SW1A 0
52#define MC13783_SW_SW1B 1 81#define MC13783_SW_SW1B 1
@@ -80,5 +109,46 @@ void mc13783_adc_set_ts_status(struct mc13783 *mc13783, unsigned int status);
80#define MC13783_REGU_V3 29 109#define MC13783_REGU_V3 29
81#define MC13783_REGU_V4 30 110#define MC13783_REGU_V4 30
82 111
83#endif /* __INCLUDE_LINUX_MFD_MC13783_H */ 112#define MC13783_IRQ_ADCDONE 0
113#define MC13783_IRQ_ADCBISDONE 1
114#define MC13783_IRQ_TS 2
115#define MC13783_IRQ_WHIGH 3
116#define MC13783_IRQ_WLOW 4
117#define MC13783_IRQ_CHGDET 6
118#define MC13783_IRQ_CHGOV 7
119#define MC13783_IRQ_CHGREV 8
120#define MC13783_IRQ_CHGSHORT 9
121#define MC13783_IRQ_CCCV 10
122#define MC13783_IRQ_CHGCURR 11
123#define MC13783_IRQ_BPON 12
124#define MC13783_IRQ_LOBATL 13
125#define MC13783_IRQ_LOBATH 14
126#define MC13783_IRQ_UDP 15
127#define MC13783_IRQ_USB 16
128#define MC13783_IRQ_ID 19
129#define MC13783_IRQ_SE1 21
130#define MC13783_IRQ_CKDET 22
131#define MC13783_IRQ_UDM 23
132#define MC13783_IRQ_1HZ 24
133#define MC13783_IRQ_TODA 25
134#define MC13783_IRQ_ONOFD1 27
135#define MC13783_IRQ_ONOFD2 28
136#define MC13783_IRQ_ONOFD3 29
137#define MC13783_IRQ_SYSRST 30
138#define MC13783_IRQ_RTCRST 31
139#define MC13783_IRQ_PC 32
140#define MC13783_IRQ_WARM 33
141#define MC13783_IRQ_MEMHLD 34
142#define MC13783_IRQ_PWRRDY 35
143#define MC13783_IRQ_THWARNL 36
144#define MC13783_IRQ_THWARNH 37
145#define MC13783_IRQ_CLK 38
146#define MC13783_IRQ_SEMAF 39
147#define MC13783_IRQ_MC2B 41
148#define MC13783_IRQ_HSDET 42
149#define MC13783_IRQ_HSL 43
150#define MC13783_IRQ_ALSPTH 44
151#define MC13783_IRQ_AHSSHORT 45
152#define MC13783_NUM_IRQ 46
84 153
154#endif /* __LINUX_MFD_MC13783_H */
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
index 9aba7b779fbc..3398bd9aab11 100644
--- a/include/linux/mfd/pcf50633/core.h
+++ b/include/linux/mfd/pcf50633/core.h
@@ -29,7 +29,12 @@ struct pcf50633_platform_data {
29 char **batteries; 29 char **batteries;
30 int num_batteries; 30 int num_batteries;
31 31
32 int charging_restart_interval; 32 /*
33 * Should be set accordingly to the reference resistor used, see
34 * I_{ch(ref)} charger reference current in the pcf50633 User
35 * Manual.
36 */
37 int charger_reference_current_ma;
33 38
34 /* Callbacks */ 39 /* Callbacks */
35 void (*probe_done)(struct pcf50633 *); 40 void (*probe_done)(struct pcf50633 *);
@@ -40,10 +45,6 @@ struct pcf50633_platform_data {
40 u8 resumers[5]; 45 u8 resumers[5];
41}; 46};
42 47
43struct pcf50633_subdev_pdata {
44 struct pcf50633 *pcf;
45};
46
47struct pcf50633_irq { 48struct pcf50633_irq {
48 void (*handler) (int, void *); 49 void (*handler) (int, void *);
49 void *data; 50 void *data;
@@ -217,5 +218,9 @@ enum pcf50633_reg_int5 {
217#define PCF50633_REG_LEDCTL 0x2a 218#define PCF50633_REG_LEDCTL 0x2a
218#define PCF50633_REG_LEDDIM 0x2b 219#define PCF50633_REG_LEDDIM 0x2b
219 220
220#endif 221static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
222{
223 return dev_get_drvdata(dev);
224}
221 225
226#endif
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h
index 4119579acf2c..df4f5fa88de3 100644
--- a/include/linux/mfd/pcf50633/mbc.h
+++ b/include/linux/mfd/pcf50633/mbc.h
@@ -128,6 +128,7 @@ enum pcf50633_reg_mbcs3 {
128int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); 128int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma);
129 129
130int pcf50633_mbc_get_status(struct pcf50633 *); 130int pcf50633_mbc_get_status(struct pcf50633 *);
131int pcf50633_mbc_get_usb_online_status(struct pcf50633 *);
131 132
132#endif 133#endif
133 134
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 6b9c5d06690c..9cb1834deffa 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -2,6 +2,8 @@
2#define MFD_TMIO_H 2#define MFD_TMIO_H
3 3
4#include <linux/fb.h> 4#include <linux/fb.h>
5#include <linux/io.h>
6#include <linux/platform_device.h>
5 7
6#define tmio_ioread8(addr) readb(addr) 8#define tmio_ioread8(addr) readb(addr)
7#define tmio_ioread16(addr) readw(addr) 9#define tmio_ioread16(addr) readw(addr)
@@ -18,11 +20,48 @@
18 writew((val) >> 16, (addr) + 2); \ 20 writew((val) >> 16, (addr) + 2); \
19 } while (0) 21 } while (0)
20 22
23#define CNF_CMD 0x04
24#define CNF_CTL_BASE 0x10
25#define CNF_INT_PIN 0x3d
26#define CNF_STOP_CLK_CTL 0x40
27#define CNF_GCLK_CTL 0x41
28#define CNF_SD_CLK_MODE 0x42
29#define CNF_PIN_STATUS 0x44
30#define CNF_PWR_CTL_1 0x48
31#define CNF_PWR_CTL_2 0x49
32#define CNF_PWR_CTL_3 0x4a
33#define CNF_CARD_DETECT_MODE 0x4c
34#define CNF_SD_SLOT 0x50
35#define CNF_EXT_GCLK_CTL_1 0xf0
36#define CNF_EXT_GCLK_CTL_2 0xf1
37#define CNF_EXT_GCLK_CTL_3 0xf9
38#define CNF_SD_LED_EN_1 0xfa
39#define CNF_SD_LED_EN_2 0xfe
40
41#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
42
43#define sd_config_write8(base, shift, reg, val) \
44 tmio_iowrite8((val), (base) + ((reg) << (shift)))
45#define sd_config_write16(base, shift, reg, val) \
46 tmio_iowrite16((val), (base) + ((reg) << (shift)))
47#define sd_config_write32(base, shift, reg, val) \
48 do { \
49 tmio_iowrite16((val), (base) + ((reg) << (shift))); \
50 tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \
51 } while (0)
52
53int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
54int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
55void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
56void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
57
21/* 58/*
22 * data for the MMC controller 59 * data for the MMC controller
23 */ 60 */
24struct tmio_mmc_data { 61struct tmio_mmc_data {
25 const unsigned int hclk; 62 const unsigned int hclk;
63 void (*set_pwr)(struct platform_device *host, int state);
64 void (*set_clk_div)(struct platform_device *host, int state);
26}; 65};
27 66
28/* 67/*
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 91eb493bf14c..5184b79c700b 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -16,7 +16,6 @@
16#define __MFD_WM831X_CORE_H__ 16#define __MFD_WM831X_CORE_H__
17 17
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/workqueue.h>
20 19
21/* 20/*
22 * Register values. 21 * Register values.
@@ -117,6 +116,7 @@
117#define WM831X_DC3_SLEEP_CONTROL 0x4063 116#define WM831X_DC3_SLEEP_CONTROL 0x4063
118#define WM831X_DC4_CONTROL 0x4064 117#define WM831X_DC4_CONTROL 0x4064
119#define WM831X_DC4_SLEEP_CONTROL 0x4065 118#define WM831X_DC4_SLEEP_CONTROL 0x4065
119#define WM832X_DC4_SLEEP_CONTROL 0x4067
120#define WM831X_EPE1_CONTROL 0x4066 120#define WM831X_EPE1_CONTROL 0x4066
121#define WM831X_EPE2_CONTROL 0x4067 121#define WM831X_EPE2_CONTROL 0x4067
122#define WM831X_LDO1_CONTROL 0x4068 122#define WM831X_LDO1_CONTROL 0x4068
@@ -235,6 +235,8 @@
235 235
236struct regulator_dev; 236struct regulator_dev;
237 237
238#define WM831X_NUM_IRQ_REGS 5
239
238struct wm831x { 240struct wm831x {
239 struct mutex io_lock; 241 struct mutex io_lock;
240 242
@@ -248,10 +250,11 @@ struct wm831x {
248 250
249 int irq; /* Our chip IRQ */ 251 int irq; /* Our chip IRQ */
250 struct mutex irq_lock; 252 struct mutex irq_lock;
251 struct workqueue_struct *irq_wq;
252 struct work_struct irq_work;
253 unsigned int irq_base; 253 unsigned int irq_base;
254 int irq_masks[5]; 254 int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
255 int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
256
257 int num_gpio;
255 258
256 struct mutex auxadc_lock; 259 struct mutex auxadc_lock;
257 260
@@ -278,12 +281,30 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
278int wm831x_irq_init(struct wm831x *wm831x, int irq); 281int wm831x_irq_init(struct wm831x *wm831x, int irq);
279void wm831x_irq_exit(struct wm831x *wm831x); 282void wm831x_irq_exit(struct wm831x *wm831x);
280 283
281int __must_check wm831x_request_irq(struct wm831x *wm831x, 284static inline int __must_check wm831x_request_irq(struct wm831x *wm831x,
282 unsigned int irq, irq_handler_t handler, 285 unsigned int irq,
283 unsigned long flags, const char *name, 286 irq_handler_t handler,
284 void *dev); 287 unsigned long flags,
285void wm831x_free_irq(struct wm831x *wm831x, unsigned int, void *); 288 const char *name,
286void wm831x_disable_irq(struct wm831x *wm831x, int irq); 289 void *dev)
287void wm831x_enable_irq(struct wm831x *wm831x, int irq); 290{
291 return request_threaded_irq(irq, NULL, handler, flags, name, dev);
292}
293
294static inline void wm831x_free_irq(struct wm831x *wm831x,
295 unsigned int irq, void *dev)
296{
297 free_irq(irq, dev);
298}
299
300static inline void wm831x_disable_irq(struct wm831x *wm831x, int irq)
301{
302 disable_irq(irq);
303}
304
305static inline void wm831x_enable_irq(struct wm831x *wm831x, int irq)
306{
307 enable_irq(irq);
308}
288 309
289#endif 310#endif
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h
index 90d820260aad..fd322aca33ba 100644
--- a/include/linux/mfd/wm831x/pdata.h
+++ b/include/linux/mfd/wm831x/pdata.h
@@ -41,6 +41,23 @@ struct wm831x_battery_pdata {
41 int timeout; /** Charge cycle timeout, in minutes */ 41 int timeout; /** Charge cycle timeout, in minutes */
42}; 42};
43 43
44/**
45 * Configuration for the WM831x DC-DC BuckWise convertors. This
46 * should be passed as driver_data in the regulator_init_data.
47 *
48 * Currently all the configuration is for the fast DVS switching
49 * support of the devices. This allows MFPs on the device to be
50 * configured as an input to switch between two output voltages,
51 * allowing voltage transitions without the expense of an access over
52 * I2C or SPI buses.
53 */
54struct wm831x_buckv_pdata {
55 int dvs_gpio; /** CPU GPIO to use for DVS switching */
56 int dvs_control_src; /** Hardware DVS source to use (1 or 2) */
57 int dvs_init_state; /** DVS state to expect on startup */
58 int dvs_state_gpio; /** CPU GPIO to use for monitoring status */
59};
60
44/* Sources for status LED configuration. Values are register values 61/* Sources for status LED configuration. Values are register values
45 * plus 1 to allow for a zero default for preserve. 62 * plus 1 to allow for a zero default for preserve.
46 */ 63 */
@@ -91,6 +108,7 @@ struct wm831x_pdata {
91 /** Called after subdevices are set up */ 108 /** Called after subdevices are set up */
92 int (*post_init)(struct wm831x *wm831x); 109 int (*post_init)(struct wm831x *wm831x);
93 110
111 int irq_base;
94 int gpio_base; 112 int gpio_base;
95 struct wm831x_backlight_pdata *backlight; 113 struct wm831x_backlight_pdata *backlight;
96 struct wm831x_backup_pdata *backup; 114 struct wm831x_backup_pdata *backup;
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 1d595de6a055..43868899bf49 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -15,7 +15,7 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/workqueue.h> 18#include <linux/interrupt.h>
19 19
20#include <linux/mfd/wm8350/audio.h> 20#include <linux/mfd/wm8350/audio.h>
21#include <linux/mfd/wm8350/gpio.h> 21#include <linux/mfd/wm8350/gpio.h>
@@ -601,7 +601,7 @@ extern const u16 wm8352_mode3_defaults[];
601struct wm8350; 601struct wm8350;
602 602
603struct wm8350_irq { 603struct wm8350_irq {
604 void (*handler) (struct wm8350 *, int, void *); 604 irq_handler_t handler;
605 void *data; 605 void *data;
606}; 606};
607 607
@@ -646,10 +646,12 @@ struct wm8350 {
646 * @init: Function called during driver initialisation. Should be 646 * @init: Function called during driver initialisation. Should be
647 * used by the platform to configure GPIO functions and similar. 647 * used by the platform to configure GPIO functions and similar.
648 * @irq_high: Set if WM8350 IRQ is active high. 648 * @irq_high: Set if WM8350 IRQ is active high.
649 * @irq_base: Base IRQ for genirq (not currently used).
649 */ 650 */
650struct wm8350_platform_data { 651struct wm8350_platform_data {
651 int (*init)(struct wm8350 *wm8350); 652 int (*init)(struct wm8350 *wm8350);
652 int irq_high; 653 int irq_high;
654 int irq_base;
653}; 655};
654 656
655 657
@@ -676,11 +678,13 @@ int wm8350_block_write(struct wm8350 *wm8350, int reg, int size, u16 *src);
676 * WM8350 internal interrupts 678 * WM8350 internal interrupts
677 */ 679 */
678int wm8350_register_irq(struct wm8350 *wm8350, int irq, 680int wm8350_register_irq(struct wm8350 *wm8350, int irq,
679 void (*handler) (struct wm8350 *, int, void *), 681 irq_handler_t handler, unsigned long flags,
680 void *data); 682 const char *name, void *data);
681int wm8350_free_irq(struct wm8350 *wm8350, int irq); 683int wm8350_free_irq(struct wm8350 *wm8350, int irq);
682int wm8350_mask_irq(struct wm8350 *wm8350, int irq); 684int wm8350_mask_irq(struct wm8350 *wm8350, int irq);
683int wm8350_unmask_irq(struct wm8350 *wm8350, int irq); 685int wm8350_unmask_irq(struct wm8350 *wm8350, int irq);
684 686int wm8350_irq_init(struct wm8350 *wm8350, int irq,
687 struct wm8350_platform_data *pdata);
688int wm8350_irq_exit(struct wm8350 *wm8350);
685 689
686#endif 690#endif
diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h
index ed91e8f5d298..71af3d6ebe9d 100644
--- a/include/linux/mfd/wm8350/gpio.h
+++ b/include/linux/mfd/wm8350/gpio.h
@@ -173,6 +173,24 @@
173#define WM8350_GPIO_DEBOUNCE_ON 1 173#define WM8350_GPIO_DEBOUNCE_ON 1
174 174
175/* 175/*
176 * R30 (0x1E) - GPIO Interrupt Status
177 */
178#define WM8350_GP12_EINT 0x1000
179#define WM8350_GP11_EINT 0x0800
180#define WM8350_GP10_EINT 0x0400
181#define WM8350_GP9_EINT 0x0200
182#define WM8350_GP8_EINT 0x0100
183#define WM8350_GP7_EINT 0x0080
184#define WM8350_GP6_EINT 0x0040
185#define WM8350_GP5_EINT 0x0020
186#define WM8350_GP4_EINT 0x0010
187#define WM8350_GP3_EINT 0x0008
188#define WM8350_GP2_EINT 0x0004
189#define WM8350_GP1_EINT 0x0002
190#define WM8350_GP0_EINT 0x0001
191
192
193/*
176 * R128 (0x80) - GPIO Debounce 194 * R128 (0x80) - GPIO Debounce
177 */ 195 */
178#define WM8350_GP12_DB 0x1000 196#define WM8350_GP12_DB 0x1000
diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h
index be3264e286e0..e786fe9841ef 100644
--- a/include/linux/mfd/wm8350/pmic.h
+++ b/include/linux/mfd/wm8350/pmic.h
@@ -666,20 +666,20 @@
666#define WM8350_ISINK_FLASH_DUR_64MS (1 << 8) 666#define WM8350_ISINK_FLASH_DUR_64MS (1 << 8)
667#define WM8350_ISINK_FLASH_DUR_96MS (2 << 8) 667#define WM8350_ISINK_FLASH_DUR_96MS (2 << 8)
668#define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8) 668#define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8)
669#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 4) 669#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0)
670#define WM8350_ISINK_FLASH_ON_0_25S (1 << 4) 670#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0)
671#define WM8350_ISINK_FLASH_ON_0_50S (2 << 4) 671#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0)
672#define WM8350_ISINK_FLASH_ON_1_00S (3 << 4) 672#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0)
673#define WM8350_ISINK_FLASH_ON_1_95S (1 << 4) 673#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0)
674#define WM8350_ISINK_FLASH_ON_3_91S (2 << 4) 674#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0)
675#define WM8350_ISINK_FLASH_ON_7_80S (3 << 4) 675#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0)
676#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 0) 676#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4)
677#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 0) 677#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4)
678#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 0) 678#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4)
679#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 0) 679#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4)
680#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 0) 680#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4)
681#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 0) 681#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4)
682#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 0) 682#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4)
683 683
684/* 684/*
685 * Regulator Interrupts. 685 * Regulator Interrupts.
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 527602cdea1c..7f085c97c799 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -12,7 +12,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **);
12extern int putback_lru_pages(struct list_head *l); 12extern int putback_lru_pages(struct list_head *l);
13extern int migrate_page(struct address_space *, 13extern int migrate_page(struct address_space *,
14 struct page *, struct page *); 14 struct page *, struct page *);
15extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long); 15extern int migrate_pages(struct list_head *l, new_page_t x,
16 unsigned long private, int offlining);
16 17
17extern int fail_migrate_page(struct address_space *, 18extern int fail_migrate_page(struct address_space *,
18 struct page *, struct page *); 19 struct page *, struct page *);
@@ -26,10 +27,7 @@ extern int migrate_vmas(struct mm_struct *mm,
26 27
27static inline int putback_lru_pages(struct list_head *l) { return 0; } 28static inline int putback_lru_pages(struct list_head *l) { return 0; }
28static inline int migrate_pages(struct list_head *l, new_page_t x, 29static inline int migrate_pages(struct list_head *l, new_page_t x,
29 unsigned long private) { return -ENOSYS; } 30 unsigned long private, int offlining) { return -ENOSYS; }
30
31static inline int migrate_pages_to(struct list_head *pagelist,
32 struct vm_area_struct *vma, int dest) { return 0; }
33 31
34static inline int migrate_prep(void) { return -ENOSYS; } 32static inline int migrate_prep(void) { return -ENOSYS; }
35 33
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index ce7cc6c7bcbb..e92d1bfdb330 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -61,6 +61,7 @@ enum {
61 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, 61 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8,
62 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, 62 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9,
63 MLX4_DEV_CAP_FLAG_DPDP = 1 << 12, 63 MLX4_DEV_CAP_FLAG_DPDP = 1 << 12,
64 MLX4_DEV_CAP_FLAG_BLH = 1 << 15,
64 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, 65 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16,
65 MLX4_DEV_CAP_FLAG_APM = 1 << 17, 66 MLX4_DEV_CAP_FLAG_APM = 1 << 17,
66 MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, 67 MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 24c395694f4d..60c467bfbabd 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -620,13 +620,22 @@ void page_address_init(void);
620/* 620/*
621 * On an anonymous page mapped into a user virtual memory area, 621 * On an anonymous page mapped into a user virtual memory area,
622 * page->mapping points to its anon_vma, not to a struct address_space; 622 * page->mapping points to its anon_vma, not to a struct address_space;
623 * with the PAGE_MAPPING_ANON bit set to distinguish it. 623 * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
624 *
625 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
626 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
627 * and then page->mapping points, not to an anon_vma, but to a private
628 * structure which KSM associates with that merged page. See ksm.h.
629 *
630 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
624 * 631 *
625 * Please note that, confusingly, "page_mapping" refers to the inode 632 * Please note that, confusingly, "page_mapping" refers to the inode
626 * address_space which maps the page from disk; whereas "page_mapped" 633 * address_space which maps the page from disk; whereas "page_mapped"
627 * refers to user virtual address space into which the page is mapped. 634 * refers to user virtual address space into which the page is mapped.
628 */ 635 */
629#define PAGE_MAPPING_ANON 1 636#define PAGE_MAPPING_ANON 1
637#define PAGE_MAPPING_KSM 2
638#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
630 639
631extern struct address_space swapper_space; 640extern struct address_space swapper_space;
632static inline struct address_space *page_mapping(struct page *page) 641static inline struct address_space *page_mapping(struct page *page)
@@ -634,16 +643,19 @@ static inline struct address_space *page_mapping(struct page *page)
634 struct address_space *mapping = page->mapping; 643 struct address_space *mapping = page->mapping;
635 644
636 VM_BUG_ON(PageSlab(page)); 645 VM_BUG_ON(PageSlab(page));
637#ifdef CONFIG_SWAP
638 if (unlikely(PageSwapCache(page))) 646 if (unlikely(PageSwapCache(page)))
639 mapping = &swapper_space; 647 mapping = &swapper_space;
640 else 648 else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
641#endif
642 if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
643 mapping = NULL; 649 mapping = NULL;
644 return mapping; 650 return mapping;
645} 651}
646 652
653/* Neutral page->mapping pointer to address_space or anon_vma or other */
654static inline void *page_rmapping(struct page *page)
655{
656 return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
657}
658
647static inline int PageAnon(struct page *page) 659static inline int PageAnon(struct page *page)
648{ 660{
649 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; 661 return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -758,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb,
758 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry 770 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
759 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry 771 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
760 * @pte_hole: if set, called for each hole at all levels 772 * @pte_hole: if set, called for each hole at all levels
773 * @hugetlb_entry: if set, called for each hugetlb entry
761 * 774 *
762 * (see walk_page_range for more details) 775 * (see walk_page_range for more details)
763 */ 776 */
@@ -767,6 +780,8 @@ struct mm_walk {
767 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); 780 int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
768 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); 781 int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
769 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); 782 int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
783 int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long,
784 struct mm_walk *);
770 struct mm_struct *mm; 785 struct mm_struct *mm;
771 void *private; 786 void *private;
772}; 787};
@@ -1022,6 +1037,9 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn,
1022extern void remove_active_range(unsigned int nid, unsigned long start_pfn, 1037extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
1023 unsigned long end_pfn); 1038 unsigned long end_pfn);
1024extern void remove_all_active_ranges(void); 1039extern void remove_all_active_ranges(void);
1040void sort_node_map(void);
1041unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1042 unsigned long end_pfn);
1025extern unsigned long absent_pages_in_range(unsigned long start_pfn, 1043extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1026 unsigned long end_pfn); 1044 unsigned long end_pfn);
1027extern void get_pfn_range_for_nid(unsigned int nid, 1045extern void get_pfn_range_for_nid(unsigned int nid,
@@ -1071,6 +1089,7 @@ extern void zone_pcp_update(struct zone *zone);
1071 1089
1072/* nommu.c */ 1090/* nommu.c */
1073extern atomic_long_t mmap_pages_allocated; 1091extern atomic_long_t mmap_pages_allocated;
1092extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1074 1093
1075/* prio_tree.c */ 1094/* prio_tree.c */
1076void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); 1095void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
@@ -1316,11 +1335,17 @@ extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
1316 size_t size); 1335 size_t size);
1317extern void refund_locked_memory(struct mm_struct *mm, size_t size); 1336extern void refund_locked_memory(struct mm_struct *mm, size_t size);
1318 1337
1338enum mf_flags {
1339 MF_COUNT_INCREASED = 1 << 0,
1340};
1319extern void memory_failure(unsigned long pfn, int trapno); 1341extern void memory_failure(unsigned long pfn, int trapno);
1320extern int __memory_failure(unsigned long pfn, int trapno, int ref); 1342extern int __memory_failure(unsigned long pfn, int trapno, int flags);
1343extern int unpoison_memory(unsigned long pfn);
1321extern int sysctl_memory_failure_early_kill; 1344extern int sysctl_memory_failure_early_kill;
1322extern int sysctl_memory_failure_recovery; 1345extern int sysctl_memory_failure_recovery;
1346extern void shake_page(struct page *p, int access);
1323extern atomic_long_t mce_bad_pages; 1347extern atomic_long_t mce_bad_pages;
1348extern int soft_offline_page(struct page *page, int flags);
1324 1349
1325#endif /* __KERNEL__ */ 1350#endif /* __KERNEL__ */
1326#endif /* _LINUX_MM_H */ 1351#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 84a524afb3dc..36f96271306c 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -122,7 +122,9 @@ struct vm_region {
122 unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ 122 unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */
123 struct file *vm_file; /* the backing file or NULL */ 123 struct file *vm_file; /* the backing file or NULL */
124 124
125 atomic_t vm_usage; /* region usage count */ 125 int vm_usage; /* region usage count (access under nommu_region_sem) */
126 bool vm_icache_flushed : 1; /* true if the icache has been flushed for
127 * this region */
126}; 128};
127 129
128/* 130/*
@@ -203,10 +205,12 @@ struct mm_struct {
203 struct vm_area_struct * mmap; /* list of VMAs */ 205 struct vm_area_struct * mmap; /* list of VMAs */
204 struct rb_root mm_rb; 206 struct rb_root mm_rb;
205 struct vm_area_struct * mmap_cache; /* last find_vma result */ 207 struct vm_area_struct * mmap_cache; /* last find_vma result */
208#ifdef CONFIG_MMU
206 unsigned long (*get_unmapped_area) (struct file *filp, 209 unsigned long (*get_unmapped_area) (struct file *filp,
207 unsigned long addr, unsigned long len, 210 unsigned long addr, unsigned long len,
208 unsigned long pgoff, unsigned long flags); 211 unsigned long pgoff, unsigned long flags);
209 void (*unmap_area) (struct mm_struct *mm, unsigned long addr); 212 void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
213#endif
210 unsigned long mmap_base; /* base of mmap area */ 214 unsigned long mmap_base; /* base of mmap area */
211 unsigned long task_size; /* size of task vm space */ 215 unsigned long task_size; /* size of task vm space */
212 unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ 216 unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 8a5509877192..ee24ef8ab616 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -1,8 +1,6 @@
1#ifndef LINUX_MM_DEBUG_H 1#ifndef LINUX_MM_DEBUG_H
2#define LINUX_MM_DEBUG_H 1 2#define LINUX_MM_DEBUG_H 1
3 3
4#include <linux/autoconf.h>
5
6#ifdef CONFIG_DEBUG_VM 4#ifdef CONFIG_DEBUG_VM
7#define VM_BUG_ON(cond) BUG_ON(cond) 5#define VM_BUG_ON(cond) BUG_ON(cond)
8#else 6#else
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6f7561730d88..30fe668c2542 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -15,7 +15,7 @@
15#include <linux/seqlock.h> 15#include <linux/seqlock.h>
16#include <linux/nodemask.h> 16#include <linux/nodemask.h>
17#include <linux/pageblock-flags.h> 17#include <linux/pageblock-flags.h>
18#include <linux/bounds.h> 18#include <generated/bounds.h>
19#include <asm/atomic.h> 19#include <asm/atomic.h>
20#include <asm/page.h> 20#include <asm/page.h>
21 21
diff --git a/include/linux/module.h b/include/linux/module.h
index 482efc865acf..6cb1a3cab5d3 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -25,8 +25,10 @@
25/* Not Yet Implemented */ 25/* Not Yet Implemented */
26#define MODULE_SUPPORTED_DEVICE(name) 26#define MODULE_SUPPORTED_DEVICE(name)
27 27
28/* some toolchains uses a `_' prefix for all user symbols */ 28/* Some toolchains use a `_' prefix for all user symbols. */
29#ifndef MODULE_SYMBOL_PREFIX 29#ifdef CONFIG_SYMBOL_PREFIX
30#define MODULE_SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX
31#else
30#define MODULE_SYMBOL_PREFIX "" 32#define MODULE_SYMBOL_PREFIX ""
31#endif 33#endif
32 34
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h
index fff8c53e5434..9c3757c5759d 100644
--- a/include/linux/mtd/bbm.h
+++ b/include/linux/mtd/bbm.h
@@ -19,22 +19,21 @@
19 19
20/** 20/**
21 * struct nand_bbt_descr - bad block table descriptor 21 * struct nand_bbt_descr - bad block table descriptor
22 * @options: options for this descriptor 22 * @options: options for this descriptor
23 * @pages: the page(s) where we find the bbt, used with 23 * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
24 * option BBT_ABSPAGE when bbt is searched, 24 * when bbt is searched, then we store the found bbts pages here.
25 * then we store the found bbts pages here. 25 * Its an array and supports up to 8 chips now
26 * Its an array and supports up to 8 chips now 26 * @offs: offset of the pattern in the oob area of the page
27 * @offs: offset of the pattern in the oob area of the page 27 * @veroffs: offset of the bbt version counter in the oob are of the page
28 * @veroffs: offset of the bbt version counter in the oob area of the page 28 * @version: version read from the bbt page during scan
29 * @version: version read from the bbt page during scan 29 * @len: length of the pattern, if 0 no pattern check is performed
30 * @len: length of the pattern, if 0 no pattern check is performed 30 * @maxblocks: maximum number of blocks to search for a bbt. This number of
31 * @maxblocks: maximum number of blocks to search for a bbt. This 31 * blocks is reserved at the end of the device where the tables are
32 * number of blocks is reserved at the end of the device 32 * written.
33 * where the tables are written. 33 * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
34 * @reserved_block_code: if non-0, this pattern denotes a reserved 34 * bad) block in the stored bbt
35 * (rather than bad) block in the stored bbt 35 * @pattern: pattern to identify bad block table or factory marked good /
36 * @pattern: pattern to identify bad block table or factory marked 36 * bad blocks, can be NULL, if len = 0
37 * good / bad blocks, can be NULL, if len = 0
38 * 37 *
39 * Descriptor for the bad block table marker and the descriptor for the 38 * Descriptor for the bad block table marker and the descriptor for the
40 * pattern which identifies good and bad blocks. The assumption is made 39 * pattern which identifies good and bad blocks. The assumption is made
@@ -90,7 +89,9 @@ struct nand_bbt_descr {
90/* 89/*
91 * Constants for oob configuration 90 * Constants for oob configuration
92 */ 91 */
93#define ONENAND_BADBLOCK_POS 0 92#define NAND_SMALL_BADBLOCK_POS 5
93#define NAND_LARGE_BADBLOCK_POS 0
94#define ONENAND_BADBLOCK_POS 0
94 95
95/* 96/*
96 * Bad block scanning errors 97 * Bad block scanning errors
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index 88d3d8fbf9f2..df89f4275232 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -518,10 +518,11 @@ struct cfi_fixup {
518#define CFI_MFR_ANY 0xffff 518#define CFI_MFR_ANY 0xffff
519#define CFI_ID_ANY 0xffff 519#define CFI_ID_ANY 0xffff
520 520
521#define CFI_MFR_AMD 0x0001 521#define CFI_MFR_AMD 0x0001
522#define CFI_MFR_ATMEL 0x001F 522#define CFI_MFR_INTEL 0x0089
523#define CFI_MFR_SAMSUNG 0x00EC 523#define CFI_MFR_ATMEL 0x001F
524#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ 524#define CFI_MFR_SAMSUNG 0x00EC
525#define CFI_MFR_ST 0x0020 /* STMicroelectronics */
525 526
526void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); 527void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
527 528
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index d4f38c5fd44e..d0bf422ae374 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -38,6 +38,15 @@ typedef enum {
38 FL_XIP_WHILE_ERASING, 38 FL_XIP_WHILE_ERASING,
39 FL_XIP_WHILE_WRITING, 39 FL_XIP_WHILE_WRITING,
40 FL_SHUTDOWN, 40 FL_SHUTDOWN,
41 /* These 2 come from nand_state_t, which has been unified here */
42 FL_READING,
43 FL_CACHEDPRG,
44 /* These 4 come from onenand_state_t, which has been unified here */
45 FL_RESETING,
46 FL_OTPING,
47 FL_PREPARING_ERASE,
48 FL_VERIFYING_ERASE,
49
41 FL_UNKNOWN 50 FL_UNKNOWN
42} flstate_t; 51} flstate_t;
43 52
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 7a232a9bdd62..ccab9dfc5217 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -21,6 +21,8 @@
21#include <linux/wait.h> 21#include <linux/wait.h>
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/mtd/mtd.h> 23#include <linux/mtd/mtd.h>
24#include <linux/mtd/flashchip.h>
25#include <linux/mtd/bbm.h>
24 26
25struct mtd_info; 27struct mtd_info;
26/* Scan and identify a NAND device */ 28/* Scan and identify a NAND device */
@@ -168,7 +170,6 @@ typedef enum {
168/* Chip does not allow subpage writes */ 170/* Chip does not allow subpage writes */
169#define NAND_NO_SUBPAGE_WRITE 0x00000200 171#define NAND_NO_SUBPAGE_WRITE 0x00000200
170 172
171
172/* Options valid for Samsung large page devices */ 173/* Options valid for Samsung large page devices */
173#define NAND_SAMSUNG_LP_OPTIONS \ 174#define NAND_SAMSUNG_LP_OPTIONS \
174 (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) 175 (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
@@ -194,6 +195,9 @@ typedef enum {
194/* This option is defined if the board driver allocates its own buffers 195/* This option is defined if the board driver allocates its own buffers
195 (e.g. because it needs them DMA-coherent */ 196 (e.g. because it needs them DMA-coherent */
196#define NAND_OWN_BUFFERS 0x00040000 197#define NAND_OWN_BUFFERS 0x00040000
198/* Chip may not exist, so silence any errors in scan */
199#define NAND_SCAN_SILENT_NODEV 0x00080000
200
197/* Options set by nand scan */ 201/* Options set by nand scan */
198/* Nand scan has allocated controller struct */ 202/* Nand scan has allocated controller struct */
199#define NAND_CONTROLLER_ALLOC 0x80000000 203#define NAND_CONTROLLER_ALLOC 0x80000000
@@ -202,20 +206,6 @@ typedef enum {
202#define NAND_CI_CHIPNR_MSK 0x03 206#define NAND_CI_CHIPNR_MSK 0x03
203#define NAND_CI_CELLTYPE_MSK 0x0C 207#define NAND_CI_CELLTYPE_MSK 0x0C
204 208
205/*
206 * nand_state_t - chip states
207 * Enumeration for NAND flash chip state
208 */
209typedef enum {
210 FL_READY,
211 FL_READING,
212 FL_WRITING,
213 FL_ERASING,
214 FL_SYNCING,
215 FL_CACHEDPRG,
216 FL_PM_SUSPENDED,
217} nand_state_t;
218
219/* Keep gcc happy */ 209/* Keep gcc happy */
220struct nand_chip; 210struct nand_chip;
221 211
@@ -402,7 +392,7 @@ struct nand_chip {
402 uint8_t cellinfo; 392 uint8_t cellinfo;
403 int badblockpos; 393 int badblockpos;
404 394
405 nand_state_t state; 395 flstate_t state;
406 396
407 uint8_t *oob_poi; 397 uint8_t *oob_poi;
408 struct nand_hw_control *controller; 398 struct nand_hw_control *controller;
@@ -470,75 +460,6 @@ struct nand_manufacturers {
470extern struct nand_flash_dev nand_flash_ids[]; 460extern struct nand_flash_dev nand_flash_ids[];
471extern struct nand_manufacturers nand_manuf_ids[]; 461extern struct nand_manufacturers nand_manuf_ids[];
472 462
473/**
474 * struct nand_bbt_descr - bad block table descriptor
475 * @options: options for this descriptor
476 * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE
477 * when bbt is searched, then we store the found bbts pages here.
478 * Its an array and supports up to 8 chips now
479 * @offs: offset of the pattern in the oob area of the page
480 * @veroffs: offset of the bbt version counter in the oob are of the page
481 * @version: version read from the bbt page during scan
482 * @len: length of the pattern, if 0 no pattern check is performed
483 * @maxblocks: maximum number of blocks to search for a bbt. This number of
484 * blocks is reserved at the end of the device where the tables are
485 * written.
486 * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than
487 * bad) block in the stored bbt
488 * @pattern: pattern to identify bad block table or factory marked good /
489 * bad blocks, can be NULL, if len = 0
490 *
491 * Descriptor for the bad block table marker and the descriptor for the
492 * pattern which identifies good and bad blocks. The assumption is made
493 * that the pattern and the version count are always located in the oob area
494 * of the first block.
495 */
496struct nand_bbt_descr {
497 int options;
498 int pages[NAND_MAX_CHIPS];
499 int offs;
500 int veroffs;
501 uint8_t version[NAND_MAX_CHIPS];
502 int len;
503 int maxblocks;
504 int reserved_block_code;
505 uint8_t *pattern;
506};
507
508/* Options for the bad block table descriptors */
509
510/* The number of bits used per block in the bbt on the device */
511#define NAND_BBT_NRBITS_MSK 0x0000000F
512#define NAND_BBT_1BIT 0x00000001
513#define NAND_BBT_2BIT 0x00000002
514#define NAND_BBT_4BIT 0x00000004
515#define NAND_BBT_8BIT 0x00000008
516/* The bad block table is in the last good block of the device */
517#define NAND_BBT_LASTBLOCK 0x00000010
518/* The bbt is at the given page, else we must scan for the bbt */
519#define NAND_BBT_ABSPAGE 0x00000020
520/* The bbt is at the given page, else we must scan for the bbt */
521#define NAND_BBT_SEARCH 0x00000040
522/* bbt is stored per chip on multichip devices */
523#define NAND_BBT_PERCHIP 0x00000080
524/* bbt has a version counter at offset veroffs */
525#define NAND_BBT_VERSION 0x00000100
526/* Create a bbt if none axists */
527#define NAND_BBT_CREATE 0x00000200
528/* Search good / bad pattern through all pages of a block */
529#define NAND_BBT_SCANALLPAGES 0x00000400
530/* Scan block empty during good / bad block scan */
531#define NAND_BBT_SCANEMPTY 0x00000800
532/* Write bbt if neccecary */
533#define NAND_BBT_WRITE 0x00001000
534/* Read and write back block contents when writing bbt */
535#define NAND_BBT_SAVECONTENT 0x00002000
536/* Search good / bad pattern on the first and the second page */
537#define NAND_BBT_SCAN2NDPAGE 0x00004000
538
539/* The maximum number of blocks to scan for a bbt */
540#define NAND_BBT_SCAN_MAXBLOCKS 4
541
542extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd); 463extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
543extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs); 464extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
544extern int nand_default_bbt(struct mtd_info *mtd); 465extern int nand_default_bbt(struct mtd_info *mtd);
@@ -548,12 +469,6 @@ extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
548extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, 469extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
549 size_t * retlen, uint8_t * buf); 470 size_t * retlen, uint8_t * buf);
550 471
551/*
552* Constants for oob configuration
553*/
554#define NAND_SMALL_BADBLOCK_POS 5
555#define NAND_LARGE_BADBLOCK_POS 0
556
557/** 472/**
558 * struct platform_nand_chip - chip level device structure 473 * struct platform_nand_chip - chip level device structure
559 * @nr_chips: max. number of chips to scan for 474 * @nr_chips: max. number of chips to scan for
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 052ea8ca2434..41bc013571d0 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -16,7 +16,13 @@
16struct mtd_info; 16struct mtd_info;
17 17
18/* 18/*
19 * Calculate 3 byte ECC code for 256 byte block 19 * Calculate 3 byte ECC code for eccsize byte block
20 */
21void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
22 u_char *ecc_code);
23
24/*
25 * Calculate 3 byte ECC code for 256/512 byte block
20 */ 26 */
21int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); 27int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code);
22 28
@@ -27,7 +33,7 @@ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
27 unsigned int eccsize); 33 unsigned int eccsize);
28 34
29/* 35/*
30 * Detect and correct a 1 bit error for 256 byte block 36 * Detect and correct a 1 bit error for 256/512 byte block
31 */ 37 */
32int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); 38int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
33 39
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h
index 4e49f3350678..5509eb06b326 100644
--- a/include/linux/mtd/onenand.h
+++ b/include/linux/mtd/onenand.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/include/linux/mtd/onenand.h 2 * linux/include/linux/mtd/onenand.h
3 * 3 *
4 * Copyright (C) 2005-2007 Samsung Electronics 4 * Copyright © 2005-2009 Samsung Electronics
5 * Kyungmin Park <kyungmin.park@samsung.com> 5 * Kyungmin Park <kyungmin.park@samsung.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
@@ -14,6 +14,7 @@
14 14
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16#include <linux/completion.h> 16#include <linux/completion.h>
17#include <linux/mtd/flashchip.h>
17#include <linux/mtd/onenand_regs.h> 18#include <linux/mtd/onenand_regs.h>
18#include <linux/mtd/bbm.h> 19#include <linux/mtd/bbm.h>
19 20
@@ -25,22 +26,6 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips);
25/* Free resources held by the OneNAND device */ 26/* Free resources held by the OneNAND device */
26extern void onenand_release(struct mtd_info *mtd); 27extern void onenand_release(struct mtd_info *mtd);
27 28
28/*
29 * onenand_state_t - chip states
30 * Enumeration for OneNAND flash chip state
31 */
32typedef enum {
33 FL_READY,
34 FL_READING,
35 FL_WRITING,
36 FL_ERASING,
37 FL_SYNCING,
38 FL_LOCKING,
39 FL_RESETING,
40 FL_OTPING,
41 FL_PM_SUSPENDED,
42} onenand_state_t;
43
44/** 29/**
45 * struct onenand_bufferram - OneNAND BufferRAM Data 30 * struct onenand_bufferram - OneNAND BufferRAM Data
46 * @blockpage: block & page address in BufferRAM 31 * @blockpage: block & page address in BufferRAM
@@ -137,7 +122,7 @@ struct onenand_chip {
137 122
138 spinlock_t chip_lock; 123 spinlock_t chip_lock;
139 wait_queue_head_t wq; 124 wait_queue_head_t wq;
140 onenand_state_t state; 125 flstate_t state;
141 unsigned char *page_buf; 126 unsigned char *page_buf;
142 unsigned char *oob_buf; 127 unsigned char *oob_buf;
143 128
@@ -152,6 +137,8 @@ struct onenand_chip {
152/* 137/*
153 * Helper macros 138 * Helper macros
154 */ 139 */
140#define ONENAND_PAGES_PER_BLOCK (1<<6)
141
155#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index) 142#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index)
156#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1) 143#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1)
157#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1) 144#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1)
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h
index acadbf53a69f..cd6f3b431195 100644
--- a/include/linux/mtd/onenand_regs.h
+++ b/include/linux/mtd/onenand_regs.h
@@ -131,6 +131,8 @@
131#define ONENAND_CMD_LOCK_TIGHT (0x2C) 131#define ONENAND_CMD_LOCK_TIGHT (0x2C)
132#define ONENAND_CMD_UNLOCK_ALL (0x27) 132#define ONENAND_CMD_UNLOCK_ALL (0x27)
133#define ONENAND_CMD_ERASE (0x94) 133#define ONENAND_CMD_ERASE (0x94)
134#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95)
135#define ONENAND_CMD_ERASE_VERIFY (0x71)
134#define ONENAND_CMD_RESET (0xF0) 136#define ONENAND_CMD_RESET (0xF0)
135#define ONENAND_CMD_OTP_ACCESS (0x65) 137#define ONENAND_CMD_OTP_ACCESS (0x65)
136#define ONENAND_CMD_READID (0x90) 138#define ONENAND_CMD_READID (0x90)
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 028946750289..05b441d93642 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -72,8 +72,6 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
72 72
73extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, 73extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry,
74 int (*open)(struct inode *, struct file *)); 74 int (*open)(struct inode *, struct file *));
75extern struct file *nameidata_to_filp(struct nameidata *nd, int flags);
76extern void release_open_intent(struct nameidata *);
77 75
78extern struct dentry *lookup_one_len(const char *, struct dentry *, int); 76extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
79 77
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index c4c060208109..9b8299af3741 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -128,6 +128,8 @@
128#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040 128#define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040
129#define SEQ4_STATUS_LEASE_MOVED 0x00000080 129#define SEQ4_STATUS_LEASE_MOVED 0x00000080
130#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100 130#define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100
131#define SEQ4_STATUS_CB_PATH_DOWN_SESSION 0x00000200
132#define SEQ4_STATUS_BACKCHANNEL_FAULT 0x00000400
131 133
132#define NFS4_MAX_UINT64 (~(u64)0) 134#define NFS4_MAX_UINT64 (~(u64)0)
133 135
@@ -528,6 +530,7 @@ enum {
528 NFSPROC4_CLNT_DESTROY_SESSION, 530 NFSPROC4_CLNT_DESTROY_SESSION,
529 NFSPROC4_CLNT_SEQUENCE, 531 NFSPROC4_CLNT_SEQUENCE,
530 NFSPROC4_CLNT_GET_LEASE_TIME, 532 NFSPROC4_CLNT_GET_LEASE_TIME,
533 NFSPROC4_CLNT_RECLAIM_COMPLETE,
531}; 534};
532 535
533/* nfs41 types */ 536/* nfs41 types */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 320569eabe3b..34fc6be5bfcf 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -209,6 +209,7 @@ struct nfs4_session {
209 unsigned long session_state; 209 unsigned long session_state;
210 u32 hash_alg; 210 u32 hash_alg;
211 u32 ssv_len; 211 u32 ssv_len;
212 struct completion complete;
212 213
213 /* The fore and back channel */ 214 /* The fore and back channel */
214 struct nfs4_channel_attrs fc_attrs; 215 struct nfs4_channel_attrs fc_attrs;
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 62f63fb0c4c8..89b28812ec24 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -2,6 +2,7 @@
2#define _LINUX_NFS_XDR_H 2#define _LINUX_NFS_XDR_H
3 3
4#include <linux/nfsacl.h> 4#include <linux/nfsacl.h>
5#include <linux/nfs3.h>
5 6
6/* 7/*
7 * To change the maximum rsize and wsize supported by the NFS client, adjust 8 * To change the maximum rsize and wsize supported by the NFS client, adjust
@@ -170,8 +171,9 @@ struct nfs4_sequence_args {
170struct nfs4_sequence_res { 171struct nfs4_sequence_res {
171 struct nfs4_session *sr_session; 172 struct nfs4_session *sr_session;
172 u8 sr_slotid; /* slot used to send request */ 173 u8 sr_slotid; /* slot used to send request */
173 unsigned long sr_renewal_time;
174 int sr_status; /* sequence operation status */ 174 int sr_status; /* sequence operation status */
175 unsigned long sr_renewal_time;
176 u32 sr_status_flags;
175}; 177};
176 178
177struct nfs4_get_lease_time_args { 179struct nfs4_get_lease_time_args {
@@ -938,6 +940,16 @@ struct nfs41_create_session_args {
938struct nfs41_create_session_res { 940struct nfs41_create_session_res {
939 struct nfs_client *client; 941 struct nfs_client *client;
940}; 942};
943
944struct nfs41_reclaim_complete_args {
945 /* In the future extend to include curr_fh for use with migration */
946 unsigned char one_fs:1;
947 struct nfs4_sequence_args seq_args;
948};
949
950struct nfs41_reclaim_complete_res {
951 struct nfs4_sequence_res seq_res;
952};
941#endif /* CONFIG_NFS_V4_1 */ 953#endif /* CONFIG_NFS_V4_1 */
942 954
943struct nfs_page; 955struct nfs_page;
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h
index 43011b69297c..f321b578edeb 100644
--- a/include/linux/nfsacl.h
+++ b/include/linux/nfsacl.h
@@ -29,6 +29,7 @@
29#ifdef __KERNEL__ 29#ifdef __KERNEL__
30 30
31#include <linux/posix_acl.h> 31#include <linux/posix_acl.h>
32#include <linux/sunrpc/xdr.h>
32 33
33/* Maximum number of ACL entries over NFS */ 34/* Maximum number of ACL entries over NFS */
34#define NFS_ACL_MAX_ENTRIES 1024 35#define NFS_ACL_MAX_ENTRIES 1024
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h
deleted file mode 100644
index 3a3f58934f5e..000000000000
--- a/include/linux/nfsd/cache.h
+++ /dev/null
@@ -1,86 +0,0 @@
1/*
2 * include/linux/nfsd/cache.h
3 *
4 * Request reply cache. This was heavily inspired by the
5 * implementation in 4.3BSD/4.4BSD.
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 */
9
10#ifndef NFSCACHE_H
11#define NFSCACHE_H
12
13#include <linux/in.h>
14#include <linux/uio.h>
15
16/*
17 * Representation of a reply cache entry.
18 */
19struct svc_cacherep {
20 struct hlist_node c_hash;
21 struct list_head c_lru;
22
23 unsigned char c_state, /* unused, inprog, done */
24 c_type, /* status, buffer */
25 c_secure : 1; /* req came from port < 1024 */
26 struct sockaddr_in c_addr;
27 __be32 c_xid;
28 u32 c_prot;
29 u32 c_proc;
30 u32 c_vers;
31 unsigned long c_timestamp;
32 union {
33 struct kvec u_vec;
34 __be32 u_status;
35 } c_u;
36};
37
38#define c_replvec c_u.u_vec
39#define c_replstat c_u.u_status
40
41/* cache entry states */
42enum {
43 RC_UNUSED,
44 RC_INPROG,
45 RC_DONE
46};
47
48/* return values */
49enum {
50 RC_DROPIT,
51 RC_REPLY,
52 RC_DOIT,
53 RC_INTR
54};
55
56/*
57 * Cache types.
58 * We may want to add more types one day, e.g. for diropres and
59 * attrstat replies. Using cache entries with fixed length instead
60 * of buffer pointers may be more efficient.
61 */
62enum {
63 RC_NOCACHE,
64 RC_REPLSTAT,
65 RC_REPLBUFF,
66};
67
68/*
69 * If requests are retransmitted within this interval, they're dropped.
70 */
71#define RC_DELAY (HZ/5)
72
73int nfsd_reply_cache_init(void);
74void nfsd_reply_cache_shutdown(void);
75int nfsd_cache_lookup(struct svc_rqst *, int);
76void nfsd_cache_update(struct svc_rqst *, int, __be32 *);
77
78#ifdef CONFIG_NFSD_V4
79void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp);
80#else /* CONFIG_NFSD_V4 */
81static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp)
82{
83}
84#endif /* CONFIG_NFSD_V4 */
85
86#endif /* NFSCACHE_H */
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index a6d9ef2bb34a..8ae78a61eea4 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -12,7 +12,7 @@
12 12
13# include <linux/types.h> 13# include <linux/types.h>
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15# include <linux/in.h> 15# include <linux/nfsd/nfsfh.h>
16#endif 16#endif
17 17
18/* 18/*
@@ -39,11 +39,23 @@
39#define NFSEXP_FSID 0x2000 39#define NFSEXP_FSID 0x2000
40#define NFSEXP_CROSSMOUNT 0x4000 40#define NFSEXP_CROSSMOUNT 0x4000
41#define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */ 41#define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */
42#define NFSEXP_ALLFLAGS 0xFE3F 42/*
43 * The NFSEXP_V4ROOT flag causes the kernel to give access only to NFSv4
44 * clients, and only to the single directory that is the root of the
45 * export; further lookup and readdir operations are treated as if every
46 * subdirectory was a mountpoint, and ignored if they are not themselves
47 * exported. This is used by nfsd and mountd to construct the NFSv4
48 * pseudofilesystem, which provides access only to paths leading to each
49 * exported filesystem.
50 */
51#define NFSEXP_V4ROOT 0x10000
52/* All flags that we claim to support. (Note we don't support NOACL.) */
53#define NFSEXP_ALLFLAGS 0x17E3F
43 54
44/* The flags that may vary depending on security flavor: */ 55/* The flags that may vary depending on security flavor: */
45#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ 56#define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \
46 | NFSEXP_ALLSQUASH) 57 | NFSEXP_ALLSQUASH \
58 | NFSEXP_INSECURE_PORT)
47 59
48#ifdef __KERNEL__ 60#ifdef __KERNEL__
49 61
@@ -108,7 +120,6 @@ struct svc_expkey {
108 struct path ek_path; 120 struct path ek_path;
109}; 121};
110 122
111#define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT))
112#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) 123#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
113#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) 124#define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE)
114#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) 125#define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES)
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h
deleted file mode 100644
index 510ffdd5020e..000000000000
--- a/include/linux/nfsd/nfsd.h
+++ /dev/null
@@ -1,424 +0,0 @@
1/*
2 * linux/include/linux/nfsd/nfsd.h
3 *
4 * Hodge-podge collection of knfsd-related stuff.
5 * I will sort this out later.
6 *
7 * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de>
8 */
9
10#ifndef LINUX_NFSD_NFSD_H
11#define LINUX_NFSD_NFSD_H
12
13#include <linux/types.h>
14#include <linux/unistd.h>
15#include <linux/fs.h>
16#include <linux/posix_acl.h>
17#include <linux/mount.h>
18
19#include <linux/nfsd/debug.h>
20#include <linux/nfsd/nfsfh.h>
21#include <linux/nfsd/export.h>
22#include <linux/nfsd/stats.h>
23/*
24 * nfsd version
25 */
26#define NFSD_SUPPORTED_MINOR_VERSION 1
27
28/*
29 * Flags for nfsd_permission
30 */
31#define NFSD_MAY_NOP 0
32#define NFSD_MAY_EXEC 1 /* == MAY_EXEC */
33#define NFSD_MAY_WRITE 2 /* == MAY_WRITE */
34#define NFSD_MAY_READ 4 /* == MAY_READ */
35#define NFSD_MAY_SATTR 8
36#define NFSD_MAY_TRUNC 16
37#define NFSD_MAY_LOCK 32
38#define NFSD_MAY_OWNER_OVERRIDE 64
39#define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
40#define NFSD_MAY_BYPASS_GSS_ON_ROOT 256
41
42#define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE)
43#define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC)
44
45/*
46 * Callback function for readdir
47 */
48struct readdir_cd {
49 __be32 err; /* 0, nfserr, or nfserr_eof */
50};
51typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int);
52
53extern struct svc_program nfsd_program;
54extern struct svc_version nfsd_version2, nfsd_version3,
55 nfsd_version4;
56extern u32 nfsd_supported_minorversion;
57extern struct mutex nfsd_mutex;
58extern struct svc_serv *nfsd_serv;
59extern spinlock_t nfsd_drc_lock;
60extern unsigned int nfsd_drc_max_mem;
61extern unsigned int nfsd_drc_mem_used;
62
63extern const struct seq_operations nfs_exports_op;
64
65/*
66 * Function prototypes.
67 */
68int nfsd_svc(unsigned short port, int nrservs);
69int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp);
70
71int nfsd_nrthreads(void);
72int nfsd_nrpools(void);
73int nfsd_get_nrthreads(int n, int *);
74int nfsd_set_nrthreads(int n, int *);
75
76/* nfsd/vfs.c */
77int fh_lock_parent(struct svc_fh *, struct dentry *);
78int nfsd_racache_init(int);
79void nfsd_racache_shutdown(void);
80int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
81 struct svc_export **expp);
82__be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *,
83 const char *, unsigned int, struct svc_fh *);
84__be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *,
85 const char *, unsigned int,
86 struct svc_export **, struct dentry **);
87__be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *,
88 struct iattr *, int, time_t);
89#ifdef CONFIG_NFSD_V4
90__be32 nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
91 struct nfs4_acl *);
92int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
93#endif /* CONFIG_NFSD_V4 */
94__be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
95 char *name, int len, struct iattr *attrs,
96 int type, dev_t rdev, struct svc_fh *res);
97#ifdef CONFIG_NFSD_V3
98__be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *);
99__be32 nfsd_create_v3(struct svc_rqst *, struct svc_fh *,
100 char *name, int len, struct iattr *attrs,
101 struct svc_fh *res, int createmode,
102 u32 *verifier, int *truncp, int *created);
103__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
104 loff_t, unsigned long);
105#endif /* CONFIG_NFSD_V3 */
106__be32 nfsd_open(struct svc_rqst *, struct svc_fh *, int,
107 int, struct file **);
108void nfsd_close(struct file *);
109__be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *,
110 loff_t, struct kvec *, int, unsigned long *);
111__be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *,
112 loff_t, struct kvec *,int, unsigned long *, int *);
113__be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *,
114 char *, int *);
115__be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *,
116 char *name, int len, char *path, int plen,
117 struct svc_fh *res, struct iattr *);
118__be32 nfsd_link(struct svc_rqst *, struct svc_fh *,
119 char *, int, struct svc_fh *);
120__be32 nfsd_rename(struct svc_rqst *,
121 struct svc_fh *, char *, int,
122 struct svc_fh *, char *, int);
123__be32 nfsd_remove(struct svc_rqst *,
124 struct svc_fh *, char *, int);
125__be32 nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type,
126 char *name, int len);
127int nfsd_truncate(struct svc_rqst *, struct svc_fh *,
128 unsigned long size);
129__be32 nfsd_readdir(struct svc_rqst *, struct svc_fh *,
130 loff_t *, struct readdir_cd *, filldir_t);
131__be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *,
132 struct kstatfs *, int access);
133
134int nfsd_notify_change(struct inode *, struct iattr *);
135__be32 nfsd_permission(struct svc_rqst *, struct svc_export *,
136 struct dentry *, int);
137int nfsd_sync_dir(struct dentry *dp);
138
139#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
140#ifdef CONFIG_NFSD_V2_ACL
141extern struct svc_version nfsd_acl_version2;
142#else
143#define nfsd_acl_version2 NULL
144#endif
145#ifdef CONFIG_NFSD_V3_ACL
146extern struct svc_version nfsd_acl_version3;
147#else
148#define nfsd_acl_version3 NULL
149#endif
150struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int);
151int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *);
152#endif
153
154enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL };
155int nfsd_vers(int vers, enum vers_op change);
156int nfsd_minorversion(u32 minorversion, enum vers_op change);
157void nfsd_reset_versions(void);
158int nfsd_create_serv(void);
159
160extern int nfsd_max_blksize;
161
162/*
163 * NFSv4 State
164 */
165#ifdef CONFIG_NFSD_V4
166extern unsigned int max_delegations;
167int nfs4_state_init(void);
168void nfsd4_free_slabs(void);
169int nfs4_state_start(void);
170void nfs4_state_shutdown(void);
171time_t nfs4_lease_time(void);
172void nfs4_reset_lease(time_t leasetime);
173int nfs4_reset_recoverydir(char *recdir);
174#else
175static inline int nfs4_state_init(void) { return 0; }
176static inline void nfsd4_free_slabs(void) { }
177static inline int nfs4_state_start(void) { return 0; }
178static inline void nfs4_state_shutdown(void) { }
179static inline time_t nfs4_lease_time(void) { return 0; }
180static inline void nfs4_reset_lease(time_t leasetime) { }
181static inline int nfs4_reset_recoverydir(char *recdir) { return 0; }
182#endif
183
184/*
185 * lockd binding
186 */
187void nfsd_lockd_init(void);
188void nfsd_lockd_shutdown(void);
189
190
191/*
192 * These macros provide pre-xdr'ed values for faster operation.
193 */
194#define nfs_ok cpu_to_be32(NFS_OK)
195#define nfserr_perm cpu_to_be32(NFSERR_PERM)
196#define nfserr_noent cpu_to_be32(NFSERR_NOENT)
197#define nfserr_io cpu_to_be32(NFSERR_IO)
198#define nfserr_nxio cpu_to_be32(NFSERR_NXIO)
199#define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN)
200#define nfserr_acces cpu_to_be32(NFSERR_ACCES)
201#define nfserr_exist cpu_to_be32(NFSERR_EXIST)
202#define nfserr_xdev cpu_to_be32(NFSERR_XDEV)
203#define nfserr_nodev cpu_to_be32(NFSERR_NODEV)
204#define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR)
205#define nfserr_isdir cpu_to_be32(NFSERR_ISDIR)
206#define nfserr_inval cpu_to_be32(NFSERR_INVAL)
207#define nfserr_fbig cpu_to_be32(NFSERR_FBIG)
208#define nfserr_nospc cpu_to_be32(NFSERR_NOSPC)
209#define nfserr_rofs cpu_to_be32(NFSERR_ROFS)
210#define nfserr_mlink cpu_to_be32(NFSERR_MLINK)
211#define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP)
212#define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG)
213#define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY)
214#define nfserr_dquot cpu_to_be32(NFSERR_DQUOT)
215#define nfserr_stale cpu_to_be32(NFSERR_STALE)
216#define nfserr_remote cpu_to_be32(NFSERR_REMOTE)
217#define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH)
218#define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE)
219#define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC)
220#define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE)
221#define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP)
222#define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL)
223#define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT)
224#define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE)
225#define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX)
226#define nfserr_denied cpu_to_be32(NFSERR_DENIED)
227#define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK)
228#define nfserr_expired cpu_to_be32(NFSERR_EXPIRED)
229#define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE)
230#define nfserr_same cpu_to_be32(NFSERR_SAME)
231#define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE)
232#define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID)
233#define nfserr_resource cpu_to_be32(NFSERR_RESOURCE)
234#define nfserr_moved cpu_to_be32(NFSERR_MOVED)
235#define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE)
236#define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH)
237#define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED)
238#define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID)
239#define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID)
240#define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID)
241#define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID)
242#define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK)
243#define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME)
244#define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH)
245#define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP)
246#define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR)
247#define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE)
248#define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD)
249#define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL)
250#define nfserr_grace cpu_to_be32(NFSERR_GRACE)
251#define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE)
252#define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD)
253#define nfserr_badname cpu_to_be32(NFSERR_BADNAME)
254#define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN)
255#define nfserr_locked cpu_to_be32(NFSERR_LOCKED)
256#define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC)
257#define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE)
258#define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT)
259#define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST)
260#define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION)
261#define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT)
262#define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY)
263#define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
264#define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED)
265#define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY)
266#define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER)
267#define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE)
268#define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT)
269#define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT)
270#define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE)
271#define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED)
272#define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS)
273#define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG)
274#define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG)
275#define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE)
276#define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP)
277#define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND)
278#define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS)
279#define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION)
280#define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP)
281#define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY)
282#define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE)
283#define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY)
284#define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT)
285#define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION)
286#define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP)
287#define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT)
288#define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP)
289#define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED)
290#define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE)
291#define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL)
292#define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG)
293#define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT)
294#define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED)
295
296/* error codes for internal use */
297/* if a request fails due to kmalloc failure, it gets dropped.
298 * Client should resend eventually
299 */
300#define nfserr_dropit cpu_to_be32(30000)
301/* end-of-file indicator in readdir */
302#define nfserr_eof cpu_to_be32(30001)
303/* replay detected */
304#define nfserr_replay_me cpu_to_be32(11001)
305/* nfs41 replay detected */
306#define nfserr_replay_cache cpu_to_be32(11002)
307
308/* Check for dir entries '.' and '..' */
309#define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.'))
310
311/*
312 * Time of server startup
313 */
314extern struct timeval nfssvc_boot;
315
316#ifdef CONFIG_NFSD_V4
317
318/* before processing a COMPOUND operation, we have to check that there
319 * is enough space in the buffer for XDR encode to succeed. otherwise,
320 * we might process an operation with side effects, and be unable to
321 * tell the client that the operation succeeded.
322 *
323 * COMPOUND_SLACK_SPACE - this is the minimum bytes of buffer space
324 * needed to encode an "ordinary" _successful_ operation. (GETATTR,
325 * READ, READDIR, and READLINK have their own buffer checks.) if we
326 * fall below this level, we fail the next operation with NFS4ERR_RESOURCE.
327 *
328 * COMPOUND_ERR_SLACK_SPACE - this is the minimum bytes of buffer space
329 * needed to encode an operation which has failed with NFS4ERR_RESOURCE.
330 * care is taken to ensure that we never fall below this level for any
331 * reason.
332 */
333#define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */
334#define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */
335
336#define NFSD_LEASE_TIME (nfs4_lease_time())
337#define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */
338
339/*
340 * The following attributes are currently not supported by the NFSv4 server:
341 * ARCHIVE (deprecated anyway)
342 * HIDDEN (unlikely to be supported any time soon)
343 * MIMETYPE (unlikely to be supported any time soon)
344 * QUOTA_* (will be supported in a forthcoming patch)
345 * SYSTEM (unlikely to be supported any time soon)
346 * TIME_BACKUP (unlikely to be supported any time soon)
347 * TIME_CREATE (unlikely to be supported any time soon)
348 */
349#define NFSD4_SUPPORTED_ATTRS_WORD0 \
350(FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \
351 | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \
352 | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \
353 | FATTR4_WORD0_UNIQUE_HANDLES | FATTR4_WORD0_LEASE_TIME | FATTR4_WORD0_RDATTR_ERROR \
354 | FATTR4_WORD0_ACLSUPPORT | FATTR4_WORD0_CANSETTIME | FATTR4_WORD0_CASE_INSENSITIVE \
355 | FATTR4_WORD0_CASE_PRESERVING | FATTR4_WORD0_CHOWN_RESTRICTED \
356 | FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FILEID | FATTR4_WORD0_FILES_AVAIL \
357 | FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_HOMOGENEOUS \
358 | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \
359 | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL)
360
361#define NFSD4_SUPPORTED_ATTRS_WORD1 \
362(FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \
363 | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \
364 | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \
365 | FATTR4_WORD1_SPACE_USED | FATTR4_WORD1_TIME_ACCESS | FATTR4_WORD1_TIME_ACCESS_SET \
366 | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \
367 | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID)
368
369#define NFSD4_SUPPORTED_ATTRS_WORD2 0
370
371#define NFSD4_1_SUPPORTED_ATTRS_WORD0 \
372 NFSD4_SUPPORTED_ATTRS_WORD0
373
374#define NFSD4_1_SUPPORTED_ATTRS_WORD1 \
375 NFSD4_SUPPORTED_ATTRS_WORD1
376
377#define NFSD4_1_SUPPORTED_ATTRS_WORD2 \
378 (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
379
380static inline u32 nfsd_suppattrs0(u32 minorversion)
381{
382 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
383 : NFSD4_SUPPORTED_ATTRS_WORD0;
384}
385
386static inline u32 nfsd_suppattrs1(u32 minorversion)
387{
388 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1
389 : NFSD4_SUPPORTED_ATTRS_WORD1;
390}
391
392static inline u32 nfsd_suppattrs2(u32 minorversion)
393{
394 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2
395 : NFSD4_SUPPORTED_ATTRS_WORD2;
396}
397
398/* These will return ERR_INVAL if specified in GETATTR or READDIR. */
399#define NFSD_WRITEONLY_ATTRS_WORD1 \
400(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
401
402/* These are the only attrs allowed in CREATE/OPEN/SETATTR. */
403#define NFSD_WRITEABLE_ATTRS_WORD0 \
404(FATTR4_WORD0_SIZE | FATTR4_WORD0_ACL )
405#define NFSD_WRITEABLE_ATTRS_WORD1 \
406(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \
407 | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
408#define NFSD_WRITEABLE_ATTRS_WORD2 0
409
410#define NFSD_SUPPATTR_EXCLCREAT_WORD0 \
411 NFSD_WRITEABLE_ATTRS_WORD0
412/*
413 * we currently store the exclusive create verifier in the v_{a,m}time
414 * attributes so the client can't set these at create time using EXCLUSIVE4_1
415 */
416#define NFSD_SUPPATTR_EXCLCREAT_WORD1 \
417 (NFSD_WRITEABLE_ATTRS_WORD1 & \
418 ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET))
419#define NFSD_SUPPATTR_EXCLCREAT_WORD2 \
420 NFSD_WRITEABLE_ATTRS_WORD2
421
422#endif /* CONFIG_NFSD_V4 */
423
424#endif /* LINUX_NFSD_NFSD_H */
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h
index 8f641c908450..65e333afaee4 100644
--- a/include/linux/nfsd/nfsfh.h
+++ b/include/linux/nfsd/nfsfh.h
@@ -16,11 +16,9 @@
16 16
17# include <linux/types.h> 17# include <linux/types.h>
18#ifdef __KERNEL__ 18#ifdef __KERNEL__
19# include <linux/string.h> 19# include <linux/sunrpc/svc.h>
20# include <linux/fs.h>
21#endif 20#endif
22#include <linux/nfsd/const.h> 21#include <linux/nfsd/const.h>
23#include <linux/nfsd/debug.h>
24 22
25/* 23/*
26 * This is the old "dentry style" Linux NFSv2 file handle. 24 * This is the old "dentry style" Linux NFSv2 file handle.
@@ -164,208 +162,6 @@ typedef struct svc_fh {
164 162
165} svc_fh; 163} svc_fh;
166 164
167enum nfsd_fsid {
168 FSID_DEV = 0,
169 FSID_NUM,
170 FSID_MAJOR_MINOR,
171 FSID_ENCODE_DEV,
172 FSID_UUID4_INUM,
173 FSID_UUID8,
174 FSID_UUID16,
175 FSID_UUID16_INUM,
176};
177
178enum fsid_source {
179 FSIDSOURCE_DEV,
180 FSIDSOURCE_FSID,
181 FSIDSOURCE_UUID,
182};
183extern enum fsid_source fsid_source(struct svc_fh *fhp);
184
185
186/* This might look a little large to "inline" but in all calls except
187 * one, 'vers' is constant so moste of the function disappears.
188 */
189static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino,
190 u32 fsid, unsigned char *uuid)
191{
192 u32 *up;
193 switch(vers) {
194 case FSID_DEV:
195 fsidv[0] = htonl((MAJOR(dev)<<16) |
196 MINOR(dev));
197 fsidv[1] = ino_t_to_u32(ino);
198 break;
199 case FSID_NUM:
200 fsidv[0] = fsid;
201 break;
202 case FSID_MAJOR_MINOR:
203 fsidv[0] = htonl(MAJOR(dev));
204 fsidv[1] = htonl(MINOR(dev));
205 fsidv[2] = ino_t_to_u32(ino);
206 break;
207
208 case FSID_ENCODE_DEV:
209 fsidv[0] = new_encode_dev(dev);
210 fsidv[1] = ino_t_to_u32(ino);
211 break;
212
213 case FSID_UUID4_INUM:
214 /* 4 byte fsid and inode number */
215 up = (u32*)uuid;
216 fsidv[0] = ino_t_to_u32(ino);
217 fsidv[1] = up[0] ^ up[1] ^ up[2] ^ up[3];
218 break;
219
220 case FSID_UUID8:
221 /* 8 byte fsid */
222 up = (u32*)uuid;
223 fsidv[0] = up[0] ^ up[2];
224 fsidv[1] = up[1] ^ up[3];
225 break;
226
227 case FSID_UUID16:
228 /* 16 byte fsid - NFSv3+ only */
229 memcpy(fsidv, uuid, 16);
230 break;
231
232 case FSID_UUID16_INUM:
233 /* 8 byte inode and 16 byte fsid */
234 *(u64*)fsidv = (u64)ino;
235 memcpy(fsidv+2, uuid, 16);
236 break;
237 default: BUG();
238 }
239}
240
241static inline int key_len(int type)
242{
243 switch(type) {
244 case FSID_DEV: return 8;
245 case FSID_NUM: return 4;
246 case FSID_MAJOR_MINOR: return 12;
247 case FSID_ENCODE_DEV: return 8;
248 case FSID_UUID4_INUM: return 8;
249 case FSID_UUID8: return 8;
250 case FSID_UUID16: return 16;
251 case FSID_UUID16_INUM: return 24;
252 default: return 0;
253 }
254}
255
256/*
257 * Shorthand for dprintk()'s
258 */
259extern char * SVCFH_fmt(struct svc_fh *fhp);
260
261/*
262 * Function prototypes
263 */
264__be32 fh_verify(struct svc_rqst *, struct svc_fh *, int, int);
265__be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *);
266__be32 fh_update(struct svc_fh *);
267void fh_put(struct svc_fh *);
268
269static __inline__ struct svc_fh *
270fh_copy(struct svc_fh *dst, struct svc_fh *src)
271{
272 WARN_ON(src->fh_dentry || src->fh_locked);
273
274 *dst = *src;
275 return dst;
276}
277
278static inline void
279fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src)
280{
281 dst->fh_size = src->fh_size;
282 memcpy(&dst->fh_base, &src->fh_base, src->fh_size);
283}
284
285static __inline__ struct svc_fh *
286fh_init(struct svc_fh *fhp, int maxsize)
287{
288 memset(fhp, 0, sizeof(*fhp));
289 fhp->fh_maxsize = maxsize;
290 return fhp;
291}
292
293#ifdef CONFIG_NFSD_V3
294/*
295 * Fill in the pre_op attr for the wcc data
296 */
297static inline void
298fill_pre_wcc(struct svc_fh *fhp)
299{
300 struct inode *inode;
301
302 inode = fhp->fh_dentry->d_inode;
303 if (!fhp->fh_pre_saved) {
304 fhp->fh_pre_mtime = inode->i_mtime;
305 fhp->fh_pre_ctime = inode->i_ctime;
306 fhp->fh_pre_size = inode->i_size;
307 fhp->fh_pre_change = inode->i_version;
308 fhp->fh_pre_saved = 1;
309 }
310}
311
312extern void fill_post_wcc(struct svc_fh *);
313#else
314#define fill_pre_wcc(ignored)
315#define fill_post_wcc(notused)
316#endif /* CONFIG_NFSD_V3 */
317
318
319/*
320 * Lock a file handle/inode
321 * NOTE: both fh_lock and fh_unlock are done "by hand" in
322 * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once
323 * so, any changes here should be reflected there.
324 */
325
326static inline void
327fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
328{
329 struct dentry *dentry = fhp->fh_dentry;
330 struct inode *inode;
331
332 dfprintk(FILEOP, "nfsd: fh_lock(%s) locked = %d\n",
333 SVCFH_fmt(fhp), fhp->fh_locked);
334
335 BUG_ON(!dentry);
336
337 if (fhp->fh_locked) {
338 printk(KERN_WARNING "fh_lock: %s/%s already locked!\n",
339 dentry->d_parent->d_name.name, dentry->d_name.name);
340 return;
341 }
342
343 inode = dentry->d_inode;
344 mutex_lock_nested(&inode->i_mutex, subclass);
345 fill_pre_wcc(fhp);
346 fhp->fh_locked = 1;
347}
348
349static inline void
350fh_lock(struct svc_fh *fhp)
351{
352 fh_lock_nested(fhp, I_MUTEX_NORMAL);
353}
354
355/*
356 * Unlock a file handle/inode
357 */
358static inline void
359fh_unlock(struct svc_fh *fhp)
360{
361 BUG_ON(!fhp->fh_dentry);
362
363 if (fhp->fh_locked) {
364 fill_post_wcc(fhp);
365 mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex);
366 fhp->fh_locked = 0;
367 }
368}
369#endif /* __KERNEL__ */ 165#endif /* __KERNEL__ */
370 166
371 167
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h
deleted file mode 100644
index b38d11324189..000000000000
--- a/include/linux/nfsd/state.h
+++ /dev/null
@@ -1,404 +0,0 @@
1/*
2 * linux/include/nfsd/state.h
3 *
4 * Copyright (c) 2001 The Regents of the University of Michigan.
5 * All rights reserved.
6 *
7 * Kendrick Smith <kmsmith@umich.edu>
8 * Andy Adamson <andros@umich.edu>
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 *
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. Neither the name of the University nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 *
35 */
36
37#ifndef _NFSD4_STATE_H
38#define _NFSD4_STATE_H
39
40#include <linux/list.h>
41#include <linux/kref.h>
42#include <linux/sunrpc/clnt.h>
43
44typedef struct {
45 u32 cl_boot;
46 u32 cl_id;
47} clientid_t;
48
49typedef struct {
50 u32 so_boot;
51 u32 so_stateownerid;
52 u32 so_fileid;
53} stateid_opaque_t;
54
55typedef struct {
56 u32 si_generation;
57 stateid_opaque_t si_opaque;
58} stateid_t;
59#define si_boot si_opaque.so_boot
60#define si_stateownerid si_opaque.so_stateownerid
61#define si_fileid si_opaque.so_fileid
62
63struct nfsd4_cb_sequence {
64 /* args/res */
65 u32 cbs_minorversion;
66 struct nfs4_client *cbs_clp;
67};
68
69struct nfs4_delegation {
70 struct list_head dl_perfile;
71 struct list_head dl_perclnt;
72 struct list_head dl_recall_lru; /* delegation recalled */
73 atomic_t dl_count; /* ref count */
74 struct nfs4_client *dl_client;
75 struct nfs4_file *dl_file;
76 struct file_lock *dl_flock;
77 struct file *dl_vfs_file;
78 u32 dl_type;
79 time_t dl_time;
80/* For recall: */
81 u32 dl_ident;
82 stateid_t dl_stateid;
83 struct knfsd_fh dl_fh;
84 int dl_retries;
85};
86
87/* client delegation callback info */
88struct nfs4_cb_conn {
89 /* SETCLIENTID info */
90 struct sockaddr_storage cb_addr;
91 size_t cb_addrlen;
92 u32 cb_prog;
93 u32 cb_minorversion;
94 u32 cb_ident; /* minorversion 0 only */
95 /* RPC client info */
96 atomic_t cb_set; /* successful CB_NULL call */
97 struct rpc_clnt * cb_client;
98};
99
100/* Maximum number of slots per session. 160 is useful for long haul TCP */
101#define NFSD_MAX_SLOTS_PER_SESSION 160
102/* Maximum number of operations per session compound */
103#define NFSD_MAX_OPS_PER_COMPOUND 16
104/* Maximum session per slot cache size */
105#define NFSD_SLOT_CACHE_SIZE 1024
106/* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */
107#define NFSD_CACHE_SIZE_SLOTS_PER_SESSION 32
108#define NFSD_MAX_MEM_PER_SESSION \
109 (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE)
110
111struct nfsd4_slot {
112 bool sl_inuse;
113 bool sl_cachethis;
114 u16 sl_opcnt;
115 u32 sl_seqid;
116 __be32 sl_status;
117 u32 sl_datalen;
118 char sl_data[];
119};
120
121struct nfsd4_channel_attrs {
122 u32 headerpadsz;
123 u32 maxreq_sz;
124 u32 maxresp_sz;
125 u32 maxresp_cached;
126 u32 maxops;
127 u32 maxreqs;
128 u32 nr_rdma_attrs;
129 u32 rdma_attrs;
130};
131
132struct nfsd4_create_session {
133 clientid_t clientid;
134 struct nfs4_sessionid sessionid;
135 u32 seqid;
136 u32 flags;
137 struct nfsd4_channel_attrs fore_channel;
138 struct nfsd4_channel_attrs back_channel;
139 u32 callback_prog;
140 u32 uid;
141 u32 gid;
142};
143
144/* The single slot clientid cache structure */
145struct nfsd4_clid_slot {
146 u32 sl_seqid;
147 __be32 sl_status;
148 struct nfsd4_create_session sl_cr_ses;
149};
150
151struct nfsd4_session {
152 struct kref se_ref;
153 struct list_head se_hash; /* hash by sessionid */
154 struct list_head se_perclnt;
155 u32 se_flags;
156 struct nfs4_client *se_client; /* for expire_client */
157 struct nfs4_sessionid se_sessionid;
158 struct nfsd4_channel_attrs se_fchannel;
159 struct nfsd4_channel_attrs se_bchannel;
160 struct nfsd4_slot *se_slots[]; /* forward channel slots */
161};
162
163static inline void
164nfsd4_put_session(struct nfsd4_session *ses)
165{
166 extern void free_session(struct kref *kref);
167 kref_put(&ses->se_ref, free_session);
168}
169
170static inline void
171nfsd4_get_session(struct nfsd4_session *ses)
172{
173 kref_get(&ses->se_ref);
174}
175
176/* formatted contents of nfs4_sessionid */
177struct nfsd4_sessionid {
178 clientid_t clientid;
179 u32 sequence;
180 u32 reserved;
181};
182
183#define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */
184
185/*
186 * struct nfs4_client - one per client. Clientids live here.
187 * o Each nfs4_client is hashed by clientid.
188 *
189 * o Each nfs4_clients is also hashed by name
190 * (the opaque quantity initially sent by the client to identify itself).
191 *
192 * o cl_perclient list is used to ensure no dangling stateowner references
193 * when we expire the nfs4_client
194 */
195struct nfs4_client {
196 struct list_head cl_idhash; /* hash by cl_clientid.id */
197 struct list_head cl_strhash; /* hash by cl_name */
198 struct list_head cl_openowners;
199 struct list_head cl_delegations;
200 struct list_head cl_lru; /* tail queue */
201 struct xdr_netobj cl_name; /* id generated by client */
202 char cl_recdir[HEXDIR_LEN]; /* recovery dir */
203 nfs4_verifier cl_verifier; /* generated by client */
204 time_t cl_time; /* time of last lease renewal */
205 struct sockaddr_storage cl_addr; /* client ipaddress */
206 u32 cl_flavor; /* setclientid pseudoflavor */
207 char *cl_principal; /* setclientid principal name */
208 struct svc_cred cl_cred; /* setclientid principal */
209 clientid_t cl_clientid; /* generated by server */
210 nfs4_verifier cl_confirm; /* generated by server */
211 struct nfs4_cb_conn cl_cb_conn; /* callback info */
212 atomic_t cl_count; /* ref count */
213 u32 cl_firststate; /* recovery dir creation */
214
215 /* for nfs41 */
216 struct list_head cl_sessions;
217 struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */
218 u32 cl_exchange_flags;
219 struct nfs4_sessionid cl_sessionid;
220
221 /* for nfs41 callbacks */
222 /* We currently support a single back channel with a single slot */
223 unsigned long cl_cb_slot_busy;
224 u32 cl_cb_seq_nr;
225 struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */
226 struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */
227 /* wait here for slots */
228};
229
230/* struct nfs4_client_reset
231 * one per old client. Populates reset_str_hashtbl. Filled from conf_id_hashtbl
232 * upon lease reset, or from upcall to state_daemon (to read in state
233 * from non-volitile storage) upon reboot.
234 */
235struct nfs4_client_reclaim {
236 struct list_head cr_strhash; /* hash by cr_name */
237 char cr_recdir[HEXDIR_LEN]; /* recover dir */
238};
239
240static inline void
241update_stateid(stateid_t *stateid)
242{
243 stateid->si_generation++;
244}
245
246/* A reasonable value for REPLAY_ISIZE was estimated as follows:
247 * The OPEN response, typically the largest, requires
248 * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) +
249 * 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) +
250 * 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes
251 */
252
253#define NFSD4_REPLAY_ISIZE 112
254
255/*
256 * Replay buffer, where the result of the last seqid-mutating operation
257 * is cached.
258 */
259struct nfs4_replay {
260 __be32 rp_status;
261 unsigned int rp_buflen;
262 char *rp_buf;
263 unsigned intrp_allocated;
264 struct knfsd_fh rp_openfh;
265 char rp_ibuf[NFSD4_REPLAY_ISIZE];
266};
267
268/*
269* nfs4_stateowner can either be an open_owner, or a lock_owner
270*
271* so_idhash: stateid_hashtbl[] for open owner, lockstateid_hashtbl[]
272* for lock_owner
273* so_strhash: ownerstr_hashtbl[] for open_owner, lock_ownerstr_hashtbl[]
274* for lock_owner
275* so_perclient: nfs4_client->cl_perclient entry - used when nfs4_client
276* struct is reaped.
277* so_perfilestate: heads the list of nfs4_stateid (either open or lock)
278* and is used to ensure no dangling nfs4_stateid references when we
279* release a stateowner.
280* so_perlockowner: (open) nfs4_stateid->st_perlockowner entry - used when
281* close is called to reap associated byte-range locks
282* so_close_lru: (open) stateowner is placed on this list instead of being
283* reaped (when so_perfilestate is empty) to hold the last close replay.
284* reaped by laundramat thread after lease period.
285*/
286struct nfs4_stateowner {
287 struct kref so_ref;
288 struct list_head so_idhash; /* hash by so_id */
289 struct list_head so_strhash; /* hash by op_name */
290 struct list_head so_perclient;
291 struct list_head so_stateids;
292 struct list_head so_perstateid; /* for lockowners only */
293 struct list_head so_close_lru; /* tail queue */
294 time_t so_time; /* time of placement on so_close_lru */
295 int so_is_open_owner; /* 1=openowner,0=lockowner */
296 u32 so_id;
297 struct nfs4_client * so_client;
298 /* after increment in ENCODE_SEQID_OP_TAIL, represents the next
299 * sequence id expected from the client: */
300 u32 so_seqid;
301 struct xdr_netobj so_owner; /* open owner name */
302 int so_confirmed; /* successful OPEN_CONFIRM? */
303 struct nfs4_replay so_replay;
304};
305
306/*
307* nfs4_file: a file opened by some number of (open) nfs4_stateowners.
308* o fi_perfile list is used to search for conflicting
309* share_acces, share_deny on the file.
310*/
311struct nfs4_file {
312 atomic_t fi_ref;
313 struct list_head fi_hash; /* hash by "struct inode *" */
314 struct list_head fi_stateids;
315 struct list_head fi_delegations;
316 struct inode *fi_inode;
317 u32 fi_id; /* used with stateowner->so_id
318 * for stateid_hashtbl hash */
319 bool fi_had_conflict;
320};
321
322/*
323* nfs4_stateid can either be an open stateid or (eventually) a lock stateid
324*
325* (open)nfs4_stateid: one per (open)nfs4_stateowner, nfs4_file
326*
327* st_hash: stateid_hashtbl[] entry or lockstateid_hashtbl entry
328* st_perfile: file_hashtbl[] entry.
329* st_perfile_state: nfs4_stateowner->so_perfilestate
330* st_perlockowner: (open stateid) list of lock nfs4_stateowners
331* st_access_bmap: used only for open stateid
332* st_deny_bmap: used only for open stateid
333* st_openstp: open stateid lock stateid was derived from
334*
335* XXX: open stateids and lock stateids have diverged sufficiently that
336* we should consider defining separate structs for the two cases.
337*/
338
339struct nfs4_stateid {
340 struct list_head st_hash;
341 struct list_head st_perfile;
342 struct list_head st_perstateowner;
343 struct list_head st_lockowners;
344 struct nfs4_stateowner * st_stateowner;
345 struct nfs4_file * st_file;
346 stateid_t st_stateid;
347 struct file * st_vfs_file;
348 unsigned long st_access_bmap;
349 unsigned long st_deny_bmap;
350 struct nfs4_stateid * st_openstp;
351};
352
353/* flags for preprocess_seqid_op() */
354#define HAS_SESSION 0x00000001
355#define CONFIRM 0x00000002
356#define OPEN_STATE 0x00000004
357#define LOCK_STATE 0x00000008
358#define RD_STATE 0x00000010
359#define WR_STATE 0x00000020
360#define CLOSE_STATE 0x00000040
361
362#define seqid_mutating_err(err) \
363 (((err) != nfserr_stale_clientid) && \
364 ((err) != nfserr_bad_seqid) && \
365 ((err) != nfserr_stale_stateid) && \
366 ((err) != nfserr_bad_stateid))
367
368struct nfsd4_compound_state;
369
370extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
371 stateid_t *stateid, int flags, struct file **filp);
372extern void nfs4_lock_state(void);
373extern void nfs4_unlock_state(void);
374extern int nfs4_in_grace(void);
375extern __be32 nfs4_check_open_reclaim(clientid_t *clid);
376extern void put_nfs4_client(struct nfs4_client *clp);
377extern void nfs4_free_stateowner(struct kref *kref);
378extern int set_callback_cred(void);
379extern void nfsd4_probe_callback(struct nfs4_client *clp);
380extern void nfsd4_cb_recall(struct nfs4_delegation *dp);
381extern void nfs4_put_delegation(struct nfs4_delegation *dp);
382extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname);
383extern void nfsd4_init_recdir(char *recdir_name);
384extern int nfsd4_recdir_load(void);
385extern void nfsd4_shutdown_recdir(void);
386extern int nfs4_client_to_reclaim(const char *name);
387extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id);
388extern void nfsd4_recdir_purge_old(void);
389extern int nfsd4_create_clid_dir(struct nfs4_client *clp);
390extern void nfsd4_remove_clid_dir(struct nfs4_client *clp);
391
392static inline void
393nfs4_put_stateowner(struct nfs4_stateowner *so)
394{
395 kref_put(&so->so_ref, nfs4_free_stateowner);
396}
397
398static inline void
399nfs4_get_stateowner(struct nfs4_stateowner *so)
400{
401 kref_get(&so->so_ref);
402}
403
404#endif /* NFSD4_STATE_H */
diff --git a/include/linux/nfsd/syscall.h b/include/linux/nfsd/syscall.h
index 7a3b565b898f..812bc1e160dc 100644
--- a/include/linux/nfsd/syscall.h
+++ b/include/linux/nfsd/syscall.h
@@ -9,14 +9,8 @@
9#ifndef NFSD_SYSCALL_H 9#ifndef NFSD_SYSCALL_H
10#define NFSD_SYSCALL_H 10#define NFSD_SYSCALL_H
11 11
12# include <linux/types.h> 12#include <linux/types.h>
13#ifdef __KERNEL__
14# include <linux/in.h>
15#endif
16#include <linux/posix_types.h>
17#include <linux/nfsd/const.h>
18#include <linux/nfsd/export.h> 13#include <linux/nfsd/export.h>
19#include <linux/nfsd/nfsfh.h>
20 14
21/* 15/*
22 * Version of the syscall interface 16 * Version of the syscall interface
diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h
deleted file mode 100644
index a0132ef58f21..000000000000
--- a/include/linux/nfsd/xdr.h
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * linux/include/linux/nfsd/xdr.h
3 *
4 * XDR types for nfsd. This is mainly a typing exercise.
5 */
6
7#ifndef LINUX_NFSD_H
8#define LINUX_NFSD_H
9
10#include <linux/fs.h>
11#include <linux/vfs.h>
12#include <linux/nfs.h>
13
14struct nfsd_fhandle {
15 struct svc_fh fh;
16};
17
18struct nfsd_sattrargs {
19 struct svc_fh fh;
20 struct iattr attrs;
21};
22
23struct nfsd_diropargs {
24 struct svc_fh fh;
25 char * name;
26 unsigned int len;
27};
28
29struct nfsd_readargs {
30 struct svc_fh fh;
31 __u32 offset;
32 __u32 count;
33 int vlen;
34};
35
36struct nfsd_writeargs {
37 svc_fh fh;
38 __u32 offset;
39 int len;
40 int vlen;
41};
42
43struct nfsd_createargs {
44 struct svc_fh fh;
45 char * name;
46 unsigned int len;
47 struct iattr attrs;
48};
49
50struct nfsd_renameargs {
51 struct svc_fh ffh;
52 char * fname;
53 unsigned int flen;
54 struct svc_fh tfh;
55 char * tname;
56 unsigned int tlen;
57};
58
59struct nfsd_readlinkargs {
60 struct svc_fh fh;
61 char * buffer;
62};
63
64struct nfsd_linkargs {
65 struct svc_fh ffh;
66 struct svc_fh tfh;
67 char * tname;
68 unsigned int tlen;
69};
70
71struct nfsd_symlinkargs {
72 struct svc_fh ffh;
73 char * fname;
74 unsigned int flen;
75 char * tname;
76 unsigned int tlen;
77 struct iattr attrs;
78};
79
80struct nfsd_readdirargs {
81 struct svc_fh fh;
82 __u32 cookie;
83 __u32 count;
84 __be32 * buffer;
85};
86
87struct nfsd_attrstat {
88 struct svc_fh fh;
89 struct kstat stat;
90};
91
92struct nfsd_diropres {
93 struct svc_fh fh;
94 struct kstat stat;
95};
96
97struct nfsd_readlinkres {
98 int len;
99};
100
101struct nfsd_readres {
102 struct svc_fh fh;
103 unsigned long count;
104 struct kstat stat;
105};
106
107struct nfsd_readdirres {
108 int count;
109
110 struct readdir_cd common;
111 __be32 * buffer;
112 int buflen;
113 __be32 * offset;
114};
115
116struct nfsd_statfsres {
117 struct kstatfs stats;
118};
119
120/*
121 * Storage requirements for XDR arguments and results.
122 */
123union nfsd_xdrstore {
124 struct nfsd_sattrargs sattr;
125 struct nfsd_diropargs dirop;
126 struct nfsd_readargs read;
127 struct nfsd_writeargs write;
128 struct nfsd_createargs create;
129 struct nfsd_renameargs rename;
130 struct nfsd_linkargs link;
131 struct nfsd_symlinkargs symlink;
132 struct nfsd_readdirargs readdir;
133};
134
135#define NFS2_SVC_XDRSIZE sizeof(union nfsd_xdrstore)
136
137
138int nfssvc_decode_void(struct svc_rqst *, __be32 *, void *);
139int nfssvc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
140int nfssvc_decode_sattrargs(struct svc_rqst *, __be32 *,
141 struct nfsd_sattrargs *);
142int nfssvc_decode_diropargs(struct svc_rqst *, __be32 *,
143 struct nfsd_diropargs *);
144int nfssvc_decode_readargs(struct svc_rqst *, __be32 *,
145 struct nfsd_readargs *);
146int nfssvc_decode_writeargs(struct svc_rqst *, __be32 *,
147 struct nfsd_writeargs *);
148int nfssvc_decode_createargs(struct svc_rqst *, __be32 *,
149 struct nfsd_createargs *);
150int nfssvc_decode_renameargs(struct svc_rqst *, __be32 *,
151 struct nfsd_renameargs *);
152int nfssvc_decode_readlinkargs(struct svc_rqst *, __be32 *,
153 struct nfsd_readlinkargs *);
154int nfssvc_decode_linkargs(struct svc_rqst *, __be32 *,
155 struct nfsd_linkargs *);
156int nfssvc_decode_symlinkargs(struct svc_rqst *, __be32 *,
157 struct nfsd_symlinkargs *);
158int nfssvc_decode_readdirargs(struct svc_rqst *, __be32 *,
159 struct nfsd_readdirargs *);
160int nfssvc_encode_void(struct svc_rqst *, __be32 *, void *);
161int nfssvc_encode_attrstat(struct svc_rqst *, __be32 *, struct nfsd_attrstat *);
162int nfssvc_encode_diropres(struct svc_rqst *, __be32 *, struct nfsd_diropres *);
163int nfssvc_encode_readlinkres(struct svc_rqst *, __be32 *, struct nfsd_readlinkres *);
164int nfssvc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd_readres *);
165int nfssvc_encode_statfsres(struct svc_rqst *, __be32 *, struct nfsd_statfsres *);
166int nfssvc_encode_readdirres(struct svc_rqst *, __be32 *, struct nfsd_readdirres *);
167
168int nfssvc_encode_entry(void *, const char *name,
169 int namlen, loff_t offset, u64 ino, unsigned int);
170
171int nfssvc_release_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
172
173/* Helper functions for NFSv2 ACL code */
174__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp);
175__be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp);
176
177#endif /* LINUX_NFSD_H */
diff --git a/include/linux/nfsd/xdr3.h b/include/linux/nfsd/xdr3.h
deleted file mode 100644
index 421eddd65a25..000000000000
--- a/include/linux/nfsd/xdr3.h
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * linux/include/linux/nfsd/xdr3.h
3 *
4 * XDR types for NFSv3 in nfsd.
5 *
6 * Copyright (C) 1996-1998, Olaf Kirch <okir@monad.swb.de>
7 */
8
9#ifndef _LINUX_NFSD_XDR3_H
10#define _LINUX_NFSD_XDR3_H
11
12#include <linux/nfsd/xdr.h>
13
14struct nfsd3_sattrargs {
15 struct svc_fh fh;
16 struct iattr attrs;
17 int check_guard;
18 time_t guardtime;
19};
20
21struct nfsd3_diropargs {
22 struct svc_fh fh;
23 char * name;
24 unsigned int len;
25};
26
27struct nfsd3_accessargs {
28 struct svc_fh fh;
29 unsigned int access;
30};
31
32struct nfsd3_readargs {
33 struct svc_fh fh;
34 __u64 offset;
35 __u32 count;
36 int vlen;
37};
38
39struct nfsd3_writeargs {
40 svc_fh fh;
41 __u64 offset;
42 __u32 count;
43 int stable;
44 __u32 len;
45 int vlen;
46};
47
48struct nfsd3_createargs {
49 struct svc_fh fh;
50 char * name;
51 unsigned int len;
52 int createmode;
53 struct iattr attrs;
54 __be32 * verf;
55};
56
57struct nfsd3_mknodargs {
58 struct svc_fh fh;
59 char * name;
60 unsigned int len;
61 __u32 ftype;
62 __u32 major, minor;
63 struct iattr attrs;
64};
65
66struct nfsd3_renameargs {
67 struct svc_fh ffh;
68 char * fname;
69 unsigned int flen;
70 struct svc_fh tfh;
71 char * tname;
72 unsigned int tlen;
73};
74
75struct nfsd3_readlinkargs {
76 struct svc_fh fh;
77 char * buffer;
78};
79
80struct nfsd3_linkargs {
81 struct svc_fh ffh;
82 struct svc_fh tfh;
83 char * tname;
84 unsigned int tlen;
85};
86
87struct nfsd3_symlinkargs {
88 struct svc_fh ffh;
89 char * fname;
90 unsigned int flen;
91 char * tname;
92 unsigned int tlen;
93 struct iattr attrs;
94};
95
96struct nfsd3_readdirargs {
97 struct svc_fh fh;
98 __u64 cookie;
99 __u32 dircount;
100 __u32 count;
101 __be32 * verf;
102 __be32 * buffer;
103};
104
105struct nfsd3_commitargs {
106 struct svc_fh fh;
107 __u64 offset;
108 __u32 count;
109};
110
111struct nfsd3_getaclargs {
112 struct svc_fh fh;
113 int mask;
114};
115
116struct posix_acl;
117struct nfsd3_setaclargs {
118 struct svc_fh fh;
119 int mask;
120 struct posix_acl *acl_access;
121 struct posix_acl *acl_default;
122};
123
124struct nfsd3_attrstat {
125 __be32 status;
126 struct svc_fh fh;
127 struct kstat stat;
128};
129
130/* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */
131struct nfsd3_diropres {
132 __be32 status;
133 struct svc_fh dirfh;
134 struct svc_fh fh;
135};
136
137struct nfsd3_accessres {
138 __be32 status;
139 struct svc_fh fh;
140 __u32 access;
141};
142
143struct nfsd3_readlinkres {
144 __be32 status;
145 struct svc_fh fh;
146 __u32 len;
147};
148
149struct nfsd3_readres {
150 __be32 status;
151 struct svc_fh fh;
152 unsigned long count;
153 int eof;
154};
155
156struct nfsd3_writeres {
157 __be32 status;
158 struct svc_fh fh;
159 unsigned long count;
160 int committed;
161};
162
163struct nfsd3_renameres {
164 __be32 status;
165 struct svc_fh ffh;
166 struct svc_fh tfh;
167};
168
169struct nfsd3_linkres {
170 __be32 status;
171 struct svc_fh tfh;
172 struct svc_fh fh;
173};
174
175struct nfsd3_readdirres {
176 __be32 status;
177 struct svc_fh fh;
178 int count;
179 __be32 verf[2];
180
181 struct readdir_cd common;
182 __be32 * buffer;
183 int buflen;
184 __be32 * offset;
185 __be32 * offset1;
186 struct svc_rqst * rqstp;
187
188};
189
190struct nfsd3_fsstatres {
191 __be32 status;
192 struct kstatfs stats;
193 __u32 invarsec;
194};
195
196struct nfsd3_fsinfores {
197 __be32 status;
198 __u32 f_rtmax;
199 __u32 f_rtpref;
200 __u32 f_rtmult;
201 __u32 f_wtmax;
202 __u32 f_wtpref;
203 __u32 f_wtmult;
204 __u32 f_dtpref;
205 __u64 f_maxfilesize;
206 __u32 f_properties;
207};
208
209struct nfsd3_pathconfres {
210 __be32 status;
211 __u32 p_link_max;
212 __u32 p_name_max;
213 __u32 p_no_trunc;
214 __u32 p_chown_restricted;
215 __u32 p_case_insensitive;
216 __u32 p_case_preserving;
217};
218
219struct nfsd3_commitres {
220 __be32 status;
221 struct svc_fh fh;
222};
223
224struct nfsd3_getaclres {
225 __be32 status;
226 struct svc_fh fh;
227 int mask;
228 struct posix_acl *acl_access;
229 struct posix_acl *acl_default;
230};
231
232/* dummy type for release */
233struct nfsd3_fhandle_pair {
234 __u32 dummy;
235 struct svc_fh fh1;
236 struct svc_fh fh2;
237};
238
239/*
240 * Storage requirements for XDR arguments and results.
241 */
242union nfsd3_xdrstore {
243 struct nfsd3_sattrargs sattrargs;
244 struct nfsd3_diropargs diropargs;
245 struct nfsd3_readargs readargs;
246 struct nfsd3_writeargs writeargs;
247 struct nfsd3_createargs createargs;
248 struct nfsd3_renameargs renameargs;
249 struct nfsd3_linkargs linkargs;
250 struct nfsd3_symlinkargs symlinkargs;
251 struct nfsd3_readdirargs readdirargs;
252 struct nfsd3_diropres diropres;
253 struct nfsd3_accessres accessres;
254 struct nfsd3_readlinkres readlinkres;
255 struct nfsd3_readres readres;
256 struct nfsd3_writeres writeres;
257 struct nfsd3_renameres renameres;
258 struct nfsd3_linkres linkres;
259 struct nfsd3_readdirres readdirres;
260 struct nfsd3_fsstatres fsstatres;
261 struct nfsd3_fsinfores fsinfores;
262 struct nfsd3_pathconfres pathconfres;
263 struct nfsd3_commitres commitres;
264 struct nfsd3_getaclres getaclres;
265};
266
267#define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore)
268
269int nfs3svc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *);
270int nfs3svc_decode_sattrargs(struct svc_rqst *, __be32 *,
271 struct nfsd3_sattrargs *);
272int nfs3svc_decode_diropargs(struct svc_rqst *, __be32 *,
273 struct nfsd3_diropargs *);
274int nfs3svc_decode_accessargs(struct svc_rqst *, __be32 *,
275 struct nfsd3_accessargs *);
276int nfs3svc_decode_readargs(struct svc_rqst *, __be32 *,
277 struct nfsd3_readargs *);
278int nfs3svc_decode_writeargs(struct svc_rqst *, __be32 *,
279 struct nfsd3_writeargs *);
280int nfs3svc_decode_createargs(struct svc_rqst *, __be32 *,
281 struct nfsd3_createargs *);
282int nfs3svc_decode_mkdirargs(struct svc_rqst *, __be32 *,
283 struct nfsd3_createargs *);
284int nfs3svc_decode_mknodargs(struct svc_rqst *, __be32 *,
285 struct nfsd3_mknodargs *);
286int nfs3svc_decode_renameargs(struct svc_rqst *, __be32 *,
287 struct nfsd3_renameargs *);
288int nfs3svc_decode_readlinkargs(struct svc_rqst *, __be32 *,
289 struct nfsd3_readlinkargs *);
290int nfs3svc_decode_linkargs(struct svc_rqst *, __be32 *,
291 struct nfsd3_linkargs *);
292int nfs3svc_decode_symlinkargs(struct svc_rqst *, __be32 *,
293 struct nfsd3_symlinkargs *);
294int nfs3svc_decode_readdirargs(struct svc_rqst *, __be32 *,
295 struct nfsd3_readdirargs *);
296int nfs3svc_decode_readdirplusargs(struct svc_rqst *, __be32 *,
297 struct nfsd3_readdirargs *);
298int nfs3svc_decode_commitargs(struct svc_rqst *, __be32 *,
299 struct nfsd3_commitargs *);
300int nfs3svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
301int nfs3svc_encode_attrstat(struct svc_rqst *, __be32 *,
302 struct nfsd3_attrstat *);
303int nfs3svc_encode_wccstat(struct svc_rqst *, __be32 *,
304 struct nfsd3_attrstat *);
305int nfs3svc_encode_diropres(struct svc_rqst *, __be32 *,
306 struct nfsd3_diropres *);
307int nfs3svc_encode_accessres(struct svc_rqst *, __be32 *,
308 struct nfsd3_accessres *);
309int nfs3svc_encode_readlinkres(struct svc_rqst *, __be32 *,
310 struct nfsd3_readlinkres *);
311int nfs3svc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd3_readres *);
312int nfs3svc_encode_writeres(struct svc_rqst *, __be32 *, struct nfsd3_writeres *);
313int nfs3svc_encode_createres(struct svc_rqst *, __be32 *,
314 struct nfsd3_diropres *);
315int nfs3svc_encode_renameres(struct svc_rqst *, __be32 *,
316 struct nfsd3_renameres *);
317int nfs3svc_encode_linkres(struct svc_rqst *, __be32 *,
318 struct nfsd3_linkres *);
319int nfs3svc_encode_readdirres(struct svc_rqst *, __be32 *,
320 struct nfsd3_readdirres *);
321int nfs3svc_encode_fsstatres(struct svc_rqst *, __be32 *,
322 struct nfsd3_fsstatres *);
323int nfs3svc_encode_fsinfores(struct svc_rqst *, __be32 *,
324 struct nfsd3_fsinfores *);
325int nfs3svc_encode_pathconfres(struct svc_rqst *, __be32 *,
326 struct nfsd3_pathconfres *);
327int nfs3svc_encode_commitres(struct svc_rqst *, __be32 *,
328 struct nfsd3_commitres *);
329
330int nfs3svc_release_fhandle(struct svc_rqst *, __be32 *,
331 struct nfsd3_attrstat *);
332int nfs3svc_release_fhandle2(struct svc_rqst *, __be32 *,
333 struct nfsd3_fhandle_pair *);
334int nfs3svc_encode_entry(void *, const char *name,
335 int namlen, loff_t offset, u64 ino,
336 unsigned int);
337int nfs3svc_encode_entry_plus(void *, const char *name,
338 int namlen, loff_t offset, u64 ino,
339 unsigned int);
340/* Helper functions for NFSv3 ACL code */
341__be32 *nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p,
342 struct svc_fh *fhp);
343__be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp);
344
345
346#endif /* _LINUX_NFSD_XDR3_H */
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h
deleted file mode 100644
index 73164c2b3d29..000000000000
--- a/include/linux/nfsd/xdr4.h
+++ /dev/null
@@ -1,563 +0,0 @@
1/*
2 * include/linux/nfsd/xdr4.h
3 *
4 * Server-side types for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 *
37 */
38
39#ifndef _LINUX_NFSD_XDR4_H
40#define _LINUX_NFSD_XDR4_H
41
42#include <linux/nfs4.h>
43
44#define NFSD4_MAX_TAGLEN 128
45#define XDR_LEN(n) (((n) + 3) & ~3)
46
47struct nfsd4_compound_state {
48 struct svc_fh current_fh;
49 struct svc_fh save_fh;
50 struct nfs4_stateowner *replay_owner;
51 /* For sessions DRC */
52 struct nfsd4_session *session;
53 struct nfsd4_slot *slot;
54 __be32 *datap;
55 size_t iovlen;
56 u32 minorversion;
57 u32 status;
58};
59
60static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs)
61{
62 return cs->slot != NULL;
63}
64
65struct nfsd4_change_info {
66 u32 atomic;
67 bool change_supported;
68 u32 before_ctime_sec;
69 u32 before_ctime_nsec;
70 u64 before_change;
71 u32 after_ctime_sec;
72 u32 after_ctime_nsec;
73 u64 after_change;
74};
75
76struct nfsd4_access {
77 u32 ac_req_access; /* request */
78 u32 ac_supported; /* response */
79 u32 ac_resp_access; /* response */
80};
81
82struct nfsd4_close {
83 u32 cl_seqid; /* request */
84 stateid_t cl_stateid; /* request+response */
85 struct nfs4_stateowner * cl_stateowner; /* response */
86};
87
88struct nfsd4_commit {
89 u64 co_offset; /* request */
90 u32 co_count; /* request */
91 nfs4_verifier co_verf; /* response */
92};
93
94struct nfsd4_create {
95 u32 cr_namelen; /* request */
96 char * cr_name; /* request */
97 u32 cr_type; /* request */
98 union { /* request */
99 struct {
100 u32 namelen;
101 char *name;
102 } link; /* NF4LNK */
103 struct {
104 u32 specdata1;
105 u32 specdata2;
106 } dev; /* NF4BLK, NF4CHR */
107 } u;
108 u32 cr_bmval[3]; /* request */
109 struct iattr cr_iattr; /* request */
110 struct nfsd4_change_info cr_cinfo; /* response */
111 struct nfs4_acl *cr_acl;
112};
113#define cr_linklen u.link.namelen
114#define cr_linkname u.link.name
115#define cr_specdata1 u.dev.specdata1
116#define cr_specdata2 u.dev.specdata2
117
118struct nfsd4_delegreturn {
119 stateid_t dr_stateid;
120};
121
122struct nfsd4_getattr {
123 u32 ga_bmval[3]; /* request */
124 struct svc_fh *ga_fhp; /* response */
125};
126
127struct nfsd4_link {
128 u32 li_namelen; /* request */
129 char * li_name; /* request */
130 struct nfsd4_change_info li_cinfo; /* response */
131};
132
133struct nfsd4_lock_denied {
134 clientid_t ld_clientid;
135 struct nfs4_stateowner *ld_sop;
136 u64 ld_start;
137 u64 ld_length;
138 u32 ld_type;
139};
140
141struct nfsd4_lock {
142 /* request */
143 u32 lk_type;
144 u32 lk_reclaim; /* boolean */
145 u64 lk_offset;
146 u64 lk_length;
147 u32 lk_is_new;
148 union {
149 struct {
150 u32 open_seqid;
151 stateid_t open_stateid;
152 u32 lock_seqid;
153 clientid_t clientid;
154 struct xdr_netobj owner;
155 } new;
156 struct {
157 stateid_t lock_stateid;
158 u32 lock_seqid;
159 } old;
160 } v;
161
162 /* response */
163 union {
164 struct {
165 stateid_t stateid;
166 } ok;
167 struct nfsd4_lock_denied denied;
168 } u;
169 /* The lk_replay_owner is the open owner in the open_to_lock_owner
170 * case and the lock owner otherwise: */
171 struct nfs4_stateowner *lk_replay_owner;
172};
173#define lk_new_open_seqid v.new.open_seqid
174#define lk_new_open_stateid v.new.open_stateid
175#define lk_new_lock_seqid v.new.lock_seqid
176#define lk_new_clientid v.new.clientid
177#define lk_new_owner v.new.owner
178#define lk_old_lock_stateid v.old.lock_stateid
179#define lk_old_lock_seqid v.old.lock_seqid
180
181#define lk_rflags u.ok.rflags
182#define lk_resp_stateid u.ok.stateid
183#define lk_denied u.denied
184
185
186struct nfsd4_lockt {
187 u32 lt_type;
188 clientid_t lt_clientid;
189 struct xdr_netobj lt_owner;
190 u64 lt_offset;
191 u64 lt_length;
192 struct nfs4_stateowner * lt_stateowner;
193 struct nfsd4_lock_denied lt_denied;
194};
195
196
197struct nfsd4_locku {
198 u32 lu_type;
199 u32 lu_seqid;
200 stateid_t lu_stateid;
201 u64 lu_offset;
202 u64 lu_length;
203 struct nfs4_stateowner *lu_stateowner;
204};
205
206
207struct nfsd4_lookup {
208 u32 lo_len; /* request */
209 char * lo_name; /* request */
210};
211
212struct nfsd4_putfh {
213 u32 pf_fhlen; /* request */
214 char *pf_fhval; /* request */
215};
216
217struct nfsd4_open {
218 u32 op_claim_type; /* request */
219 struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */
220 u32 op_delegate_type; /* request - CLAIM_PREV only */
221 stateid_t op_delegate_stateid; /* request - response */
222 u32 op_create; /* request */
223 u32 op_createmode; /* request */
224 u32 op_bmval[3]; /* request */
225 struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
226 nfs4_verifier verf; /* EXCLUSIVE4 */
227 clientid_t op_clientid; /* request */
228 struct xdr_netobj op_owner; /* request */
229 u32 op_seqid; /* request */
230 u32 op_share_access; /* request */
231 u32 op_share_deny; /* request */
232 stateid_t op_stateid; /* response */
233 u32 op_recall; /* recall */
234 struct nfsd4_change_info op_cinfo; /* response */
235 u32 op_rflags; /* response */
236 int op_truncate; /* used during processing */
237 struct nfs4_stateowner *op_stateowner; /* used during processing */
238 struct nfs4_acl *op_acl;
239};
240#define op_iattr iattr
241#define op_verf verf
242
243struct nfsd4_open_confirm {
244 stateid_t oc_req_stateid /* request */;
245 u32 oc_seqid /* request */;
246 stateid_t oc_resp_stateid /* response */;
247 struct nfs4_stateowner * oc_stateowner; /* response */
248};
249
250struct nfsd4_open_downgrade {
251 stateid_t od_stateid;
252 u32 od_seqid;
253 u32 od_share_access;
254 u32 od_share_deny;
255 struct nfs4_stateowner *od_stateowner;
256};
257
258
259struct nfsd4_read {
260 stateid_t rd_stateid; /* request */
261 u64 rd_offset; /* request */
262 u32 rd_length; /* request */
263 int rd_vlen;
264 struct file *rd_filp;
265
266 struct svc_rqst *rd_rqstp; /* response */
267 struct svc_fh * rd_fhp; /* response */
268};
269
270struct nfsd4_readdir {
271 u64 rd_cookie; /* request */
272 nfs4_verifier rd_verf; /* request */
273 u32 rd_dircount; /* request */
274 u32 rd_maxcount; /* request */
275 u32 rd_bmval[3]; /* request */
276 struct svc_rqst *rd_rqstp; /* response */
277 struct svc_fh * rd_fhp; /* response */
278
279 struct readdir_cd common;
280 __be32 * buffer;
281 int buflen;
282 __be32 * offset;
283};
284
285struct nfsd4_release_lockowner {
286 clientid_t rl_clientid;
287 struct xdr_netobj rl_owner;
288};
289struct nfsd4_readlink {
290 struct svc_rqst *rl_rqstp; /* request */
291 struct svc_fh * rl_fhp; /* request */
292};
293
294struct nfsd4_remove {
295 u32 rm_namelen; /* request */
296 char * rm_name; /* request */
297 struct nfsd4_change_info rm_cinfo; /* response */
298};
299
300struct nfsd4_rename {
301 u32 rn_snamelen; /* request */
302 char * rn_sname; /* request */
303 u32 rn_tnamelen; /* request */
304 char * rn_tname; /* request */
305 struct nfsd4_change_info rn_sinfo; /* response */
306 struct nfsd4_change_info rn_tinfo; /* response */
307};
308
309struct nfsd4_secinfo {
310 u32 si_namelen; /* request */
311 char *si_name; /* request */
312 struct svc_export *si_exp; /* response */
313};
314
315struct nfsd4_setattr {
316 stateid_t sa_stateid; /* request */
317 u32 sa_bmval[3]; /* request */
318 struct iattr sa_iattr; /* request */
319 struct nfs4_acl *sa_acl;
320};
321
322struct nfsd4_setclientid {
323 nfs4_verifier se_verf; /* request */
324 u32 se_namelen; /* request */
325 char * se_name; /* request */
326 u32 se_callback_prog; /* request */
327 u32 se_callback_netid_len; /* request */
328 char * se_callback_netid_val; /* request */
329 u32 se_callback_addr_len; /* request */
330 char * se_callback_addr_val; /* request */
331 u32 se_callback_ident; /* request */
332 clientid_t se_clientid; /* response */
333 nfs4_verifier se_confirm; /* response */
334};
335
336struct nfsd4_setclientid_confirm {
337 clientid_t sc_clientid;
338 nfs4_verifier sc_confirm;
339};
340
341/* also used for NVERIFY */
342struct nfsd4_verify {
343 u32 ve_bmval[3]; /* request */
344 u32 ve_attrlen; /* request */
345 char * ve_attrval; /* request */
346};
347
348struct nfsd4_write {
349 stateid_t wr_stateid; /* request */
350 u64 wr_offset; /* request */
351 u32 wr_stable_how; /* request */
352 u32 wr_buflen; /* request */
353 int wr_vlen;
354
355 u32 wr_bytes_written; /* response */
356 u32 wr_how_written; /* response */
357 nfs4_verifier wr_verifier; /* response */
358};
359
360struct nfsd4_exchange_id {
361 nfs4_verifier verifier;
362 struct xdr_netobj clname;
363 u32 flags;
364 clientid_t clientid;
365 u32 seqid;
366 int spa_how;
367};
368
369struct nfsd4_sequence {
370 struct nfs4_sessionid sessionid; /* request/response */
371 u32 seqid; /* request/response */
372 u32 slotid; /* request/response */
373 u32 maxslots; /* request/response */
374 u32 cachethis; /* request */
375#if 0
376 u32 target_maxslots; /* response */
377 u32 status_flags; /* response */
378#endif /* not yet */
379};
380
381struct nfsd4_destroy_session {
382 struct nfs4_sessionid sessionid;
383};
384
385struct nfsd4_op {
386 int opnum;
387 __be32 status;
388 union {
389 struct nfsd4_access access;
390 struct nfsd4_close close;
391 struct nfsd4_commit commit;
392 struct nfsd4_create create;
393 struct nfsd4_delegreturn delegreturn;
394 struct nfsd4_getattr getattr;
395 struct svc_fh * getfh;
396 struct nfsd4_link link;
397 struct nfsd4_lock lock;
398 struct nfsd4_lockt lockt;
399 struct nfsd4_locku locku;
400 struct nfsd4_lookup lookup;
401 struct nfsd4_verify nverify;
402 struct nfsd4_open open;
403 struct nfsd4_open_confirm open_confirm;
404 struct nfsd4_open_downgrade open_downgrade;
405 struct nfsd4_putfh putfh;
406 struct nfsd4_read read;
407 struct nfsd4_readdir readdir;
408 struct nfsd4_readlink readlink;
409 struct nfsd4_remove remove;
410 struct nfsd4_rename rename;
411 clientid_t renew;
412 struct nfsd4_secinfo secinfo;
413 struct nfsd4_setattr setattr;
414 struct nfsd4_setclientid setclientid;
415 struct nfsd4_setclientid_confirm setclientid_confirm;
416 struct nfsd4_verify verify;
417 struct nfsd4_write write;
418 struct nfsd4_release_lockowner release_lockowner;
419
420 /* NFSv4.1 */
421 struct nfsd4_exchange_id exchange_id;
422 struct nfsd4_create_session create_session;
423 struct nfsd4_destroy_session destroy_session;
424 struct nfsd4_sequence sequence;
425 } u;
426 struct nfs4_replay * replay;
427};
428
429struct nfsd4_compoundargs {
430 /* scratch variables for XDR decode */
431 __be32 * p;
432 __be32 * end;
433 struct page ** pagelist;
434 int pagelen;
435 __be32 tmp[8];
436 __be32 * tmpp;
437 struct tmpbuf {
438 struct tmpbuf *next;
439 void (*release)(const void *);
440 void *buf;
441 } *to_free;
442
443 struct svc_rqst *rqstp;
444
445 u32 taglen;
446 char * tag;
447 u32 minorversion;
448 u32 opcnt;
449 struct nfsd4_op *ops;
450 struct nfsd4_op iops[8];
451};
452
453struct nfsd4_compoundres {
454 /* scratch variables for XDR encode */
455 __be32 * p;
456 __be32 * end;
457 struct xdr_buf * xbuf;
458 struct svc_rqst * rqstp;
459
460 u32 taglen;
461 char * tag;
462 u32 opcnt;
463 __be32 * tagp; /* tag, opcount encode location */
464 struct nfsd4_compound_state cstate;
465};
466
467static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp)
468{
469 struct nfsd4_compoundargs *args = resp->rqstp->rq_argp;
470 return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE;
471}
472
473static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp)
474{
475 return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp);
476}
477
478#define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs)
479
480static inline void
481set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
482{
483 BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved);
484 cinfo->atomic = 1;
485 cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode);
486 if (cinfo->change_supported) {
487 cinfo->before_change = fhp->fh_pre_change;
488 cinfo->after_change = fhp->fh_post_change;
489 } else {
490 cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec;
491 cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec;
492 cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec;
493 cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec;
494 }
495}
496
497int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
498int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *,
499 struct nfsd4_compoundargs *);
500int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *,
501 struct nfsd4_compoundres *);
502void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
503void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op);
504__be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
505 struct dentry *dentry, __be32 *buffer, int *countp,
506 u32 *bmval, struct svc_rqst *, int ignore_crossmnt);
507extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp,
508 struct nfsd4_compound_state *,
509 struct nfsd4_setclientid *setclid);
510extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
511 struct nfsd4_compound_state *,
512 struct nfsd4_setclientid_confirm *setclientid_confirm);
513extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp);
514extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
515 struct nfsd4_sequence *seq);
516extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp,
517 struct nfsd4_compound_state *,
518struct nfsd4_exchange_id *);
519 extern __be32 nfsd4_create_session(struct svc_rqst *,
520 struct nfsd4_compound_state *,
521 struct nfsd4_create_session *);
522extern __be32 nfsd4_sequence(struct svc_rqst *,
523 struct nfsd4_compound_state *,
524 struct nfsd4_sequence *);
525extern __be32 nfsd4_destroy_session(struct svc_rqst *,
526 struct nfsd4_compound_state *,
527 struct nfsd4_destroy_session *);
528extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *,
529 struct nfsd4_open *open);
530extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp,
531 struct svc_fh *current_fh, struct nfsd4_open *open);
532extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp,
533 struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc);
534extern __be32 nfsd4_close(struct svc_rqst *rqstp,
535 struct nfsd4_compound_state *,
536 struct nfsd4_close *close);
537extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp,
538 struct nfsd4_compound_state *,
539 struct nfsd4_open_downgrade *od);
540extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *,
541 struct nfsd4_lock *lock);
542extern __be32 nfsd4_lockt(struct svc_rqst *rqstp,
543 struct nfsd4_compound_state *,
544 struct nfsd4_lockt *lockt);
545extern __be32 nfsd4_locku(struct svc_rqst *rqstp,
546 struct nfsd4_compound_state *,
547 struct nfsd4_locku *locku);
548extern __be32
549nfsd4_release_lockowner(struct svc_rqst *rqstp,
550 struct nfsd4_compound_state *,
551 struct nfsd4_release_lockowner *rlockowner);
552extern void nfsd4_release_compoundargs(struct nfsd4_compoundargs *);
553extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp,
554 struct nfsd4_compound_state *, struct nfsd4_delegreturn *dr);
555extern __be32 nfsd4_renew(struct svc_rqst *rqstp,
556 struct nfsd4_compound_state *, clientid_t *clid);
557#endif
558
559/*
560 * Local variables:
561 * c-basic-offset: 8
562 * End:
563 */
diff --git a/include/linux/node.h b/include/linux/node.h
index 681a697b9a86..06292dac3eab 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -21,13 +21,19 @@
21 21
22#include <linux/sysdev.h> 22#include <linux/sysdev.h>
23#include <linux/cpumask.h> 23#include <linux/cpumask.h>
24#include <linux/workqueue.h>
24 25
25struct node { 26struct node {
26 struct sys_device sysdev; 27 struct sys_device sysdev;
28
29#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
30 struct work_struct node_work;
31#endif
27}; 32};
28 33
29struct memory_block; 34struct memory_block;
30extern struct node node_devices[]; 35extern struct node node_devices[];
36typedef void (*node_registration_func_t)(struct node *);
31 37
32extern int register_node(struct node *, int, struct node *); 38extern int register_node(struct node *, int, struct node *);
33extern void unregister_node(struct node *node); 39extern void unregister_node(struct node *node);
@@ -39,6 +45,11 @@ extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
39extern int register_mem_sect_under_node(struct memory_block *mem_blk, 45extern int register_mem_sect_under_node(struct memory_block *mem_blk,
40 int nid); 46 int nid);
41extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk); 47extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk);
48
49#ifdef CONFIG_HUGETLBFS
50extern void register_hugetlbfs_with_node(node_registration_func_t doregister,
51 node_registration_func_t unregister);
52#endif
42#else 53#else
43static inline int register_one_node(int nid) 54static inline int register_one_node(int nid)
44{ 55{
@@ -65,6 +76,11 @@ static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
65{ 76{
66 return 0; 77 return 0;
67} 78}
79
80static inline void register_hugetlbfs_with_node(node_registration_func_t reg,
81 node_registration_func_t unreg)
82{
83}
68#endif 84#endif
69 85
70#define to_node(sys_device) container_of(sys_device, struct node, sysdev) 86#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index b359c4a9ec9e..454997cccbd8 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -245,14 +245,19 @@ static inline int __next_node(int n, const nodemask_t *srcp)
245 return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); 245 return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1));
246} 246}
247 247
248static inline void init_nodemask_of_node(nodemask_t *mask, int node)
249{
250 nodes_clear(*mask);
251 node_set(node, *mask);
252}
253
248#define nodemask_of_node(node) \ 254#define nodemask_of_node(node) \
249({ \ 255({ \
250 typeof(_unused_nodemask_arg_) m; \ 256 typeof(_unused_nodemask_arg_) m; \
251 if (sizeof(m) == sizeof(unsigned long)) { \ 257 if (sizeof(m) == sizeof(unsigned long)) { \
252 m.bits[0] = 1UL<<(node); \ 258 m.bits[0] = 1UL << (node); \
253 } else { \ 259 } else { \
254 nodes_clear(m); \ 260 init_nodemask_of_node(&m, (node)); \
255 node_set((node), m); \
256 } \ 261 } \
257 m; \ 262 m; \
258}) 263})
@@ -480,15 +485,17 @@ static inline int num_node_state(enum node_states state)
480#define for_each_online_node(node) for_each_node_state(node, N_ONLINE) 485#define for_each_online_node(node) for_each_node_state(node, N_ONLINE)
481 486
482/* 487/*
483 * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h) 488 * For nodemask scrach area.
489 * NODEMASK_ALLOC(type, name) allocates an object with a specified type and
490 * name.
484 */ 491 */
485 492#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
486#if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */ 493#define NODEMASK_ALLOC(type, name, gfp_flags) \
487#define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL) 494 type *name = kmalloc(sizeof(*name), gfp_flags)
488#define NODEMASK_FREE(m) kfree(m) 495#define NODEMASK_FREE(m) kfree(m)
489#else 496#else
490#define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m 497#define NODEMASK_ALLOC(type, name, gfp_flags) type _name, *name = &_name
491#define NODEMASK_FREE(m) 498#define NODEMASK_FREE(m) do {} while (0)
492#endif 499#endif
493 500
494/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ 501/* A example struture for using NODEMASK_ALLOC, used in mempolicy. */
@@ -497,8 +504,10 @@ struct nodemask_scratch {
497 nodemask_t mask2; 504 nodemask_t mask2;
498}; 505};
499 506
500#define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x) 507#define NODEMASK_SCRATCH(x) \
501#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) 508 NODEMASK_ALLOC(struct nodemask_scratch, x, \
509 GFP_KERNEL | __GFP_NORETRY)
510#define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x)
502 511
503 512
504#endif /* __LINUX_NODEMASK_H */ 513#endif /* __LINUX_NODEMASK_H */
diff --git a/include/linux/numa.h b/include/linux/numa.h
index a31a7301b159..3aaa31603a86 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -10,4 +10,6 @@
10 10
11#define MAX_NUMNODES (1 << NODES_SHIFT) 11#define MAX_NUMNODES (1 << NODES_SHIFT)
12 12
13#define NUMA_NO_NODE (-1)
14
13#endif /* _LINUX_NUMA_H */ 15#endif /* _LINUX_NUMA_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 6aac5fe4f6f1..537662315627 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -10,6 +10,7 @@
10#ifdef __KERNEL__ 10#ifdef __KERNEL__
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/nodemask.h>
13 14
14struct zonelist; 15struct zonelist;
15struct notifier_block; 16struct notifier_block;
@@ -26,7 +27,8 @@ enum oom_constraint {
26extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags); 27extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags);
27extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 28extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
28 29
29extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order); 30extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
31 int order, nodemask_t *mask);
30extern int register_oom_notifier(struct notifier_block *nb); 32extern int register_oom_notifier(struct notifier_block *nb);
31extern int unregister_oom_notifier(struct notifier_block *nb); 33extern int unregister_oom_notifier(struct notifier_block *nb);
32 34
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 6b202b173955..5b59f35dcb8f 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -8,7 +8,7 @@
8#include <linux/types.h> 8#include <linux/types.h>
9#ifndef __GENERATING_BOUNDS_H 9#ifndef __GENERATING_BOUNDS_H
10#include <linux/mm_types.h> 10#include <linux/mm_types.h>
11#include <linux/bounds.h> 11#include <generated/bounds.h>
12#endif /* !__GENERATING_BOUNDS_H */ 12#endif /* !__GENERATING_BOUNDS_H */
13 13
14/* 14/*
@@ -99,7 +99,7 @@ enum pageflags {
99 PG_buddy, /* Page is free, on buddy lists */ 99 PG_buddy, /* Page is free, on buddy lists */
100 PG_swapbacked, /* Page is backed by RAM/swap */ 100 PG_swapbacked, /* Page is backed by RAM/swap */
101 PG_unevictable, /* Page is "unevictable" */ 101 PG_unevictable, /* Page is "unevictable" */
102#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 102#ifdef CONFIG_MMU
103 PG_mlocked, /* Page is vma mlocked */ 103 PG_mlocked, /* Page is vma mlocked */
104#endif 104#endif
105#ifdef CONFIG_ARCH_USES_PG_UNCACHED 105#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache)
259PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) 259PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
260 TESTCLEARFLAG(Unevictable, unevictable) 260 TESTCLEARFLAG(Unevictable, unevictable)
261 261
262#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 262#ifdef CONFIG_MMU
263#define MLOCK_PAGES 1
264PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) 263PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
265 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) 264 TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
266#else 265#else
267#define MLOCK_PAGES 0
268PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) 266PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
269 TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) 267 TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
270#endif 268#endif
@@ -277,13 +275,15 @@ PAGEFLAG_FALSE(Uncached)
277 275
278#ifdef CONFIG_MEMORY_FAILURE 276#ifdef CONFIG_MEMORY_FAILURE
279PAGEFLAG(HWPoison, hwpoison) 277PAGEFLAG(HWPoison, hwpoison)
280TESTSETFLAG(HWPoison, hwpoison) 278TESTSCFLAG(HWPoison, hwpoison)
281#define __PG_HWPOISON (1UL << PG_hwpoison) 279#define __PG_HWPOISON (1UL << PG_hwpoison)
282#else 280#else
283PAGEFLAG_FALSE(HWPoison) 281PAGEFLAG_FALSE(HWPoison)
284#define __PG_HWPOISON 0 282#define __PG_HWPOISON 0
285#endif 283#endif
286 284
285u64 stable_page_flags(struct page *page);
286
287static inline int PageUptodate(struct page *page) 287static inline int PageUptodate(struct page *page)
288{ 288{
289 int ret = test_bit(PG_uptodate, &(page)->flags); 289 int ret = test_bit(PG_uptodate, &(page)->flags);
@@ -393,7 +393,7 @@ static inline void __ClearPageTail(struct page *page)
393 393
394#endif /* !PAGEFLAGS_EXTENDED */ 394#endif /* !PAGEFLAGS_EXTENDED */
395 395
396#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT 396#ifdef CONFIG_MMU
397#define __PG_MLOCKED (1 << PG_mlocked) 397#define __PG_MLOCKED (1 << PG_mlocked)
398#else 398#else
399#define __PG_MLOCKED 0 399#define __PG_MLOCKED 0
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 4b938d4f3ac2..b0e4eb126236 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -57,6 +57,8 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
57static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ 57static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
58 { return test_and_clear_bit(PCG_##lname, &pc->flags); } 58 { return test_and_clear_bit(PCG_##lname, &pc->flags); }
59 59
60TESTPCGFLAG(Locked, LOCK)
61
60/* Cache flag is set only once (at allocation) */ 62/* Cache flag is set only once (at allocation) */
61TESTPCGFLAG(Cache, CACHE) 63TESTPCGFLAG(Cache, CACHE)
62CLEARPCGFLAG(Cache, CACHE) 64CLEARPCGFLAG(Cache, CACHE)
@@ -86,11 +88,6 @@ static inline void lock_page_cgroup(struct page_cgroup *pc)
86 bit_spin_lock(PCG_LOCK, &pc->flags); 88 bit_spin_lock(PCG_LOCK, &pc->flags);
87} 89}
88 90
89static inline int trylock_page_cgroup(struct page_cgroup *pc)
90{
91 return bit_spin_trylock(PCG_LOCK, &pc->flags);
92}
93
94static inline void unlock_page_cgroup(struct page_cgroup *pc) 91static inline void unlock_page_cgroup(struct page_cgroup *pc)
95{ 92{
96 bit_spin_unlock(PCG_LOCK, &pc->flags); 93 bit_spin_unlock(PCG_LOCK, &pc->flags);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 04771b9c3316..174e5392e51e 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -243,6 +243,7 @@ struct pci_dev {
243 unsigned int d2_support:1; /* Low power state D2 is supported */ 243 unsigned int d2_support:1; /* Low power state D2 is supported */
244 unsigned int no_d1d2:1; /* Only allow D0 and D3 */ 244 unsigned int no_d1d2:1; /* Only allow D0 and D3 */
245 unsigned int wakeup_prepared:1; 245 unsigned int wakeup_prepared:1;
246 unsigned int d3_delay; /* D3->D0 transition time in ms */
246 247
247#ifdef CONFIG_PCIEASPM 248#ifdef CONFIG_PCIEASPM
248 struct pcie_link_state *link_state; /* ASPM link state. */ 249 struct pcie_link_state *link_state; /* ASPM link state. */
@@ -566,6 +567,9 @@ void pcibios_align_resource(void *, struct resource *, resource_size_t,
566 resource_size_t); 567 resource_size_t);
567void pcibios_update_irq(struct pci_dev *, int irq); 568void pcibios_update_irq(struct pci_dev *, int irq);
568 569
570/* Weak but can be overriden by arch */
571void pci_fixup_cardbus(struct pci_bus *);
572
569/* Generic PCI functions used internally */ 573/* Generic PCI functions used internally */
570 574
571extern struct pci_bus *pci_find_bus(int domain, int busnr); 575extern struct pci_bus *pci_find_bus(int domain, int busnr);
@@ -1255,7 +1259,7 @@ extern int pci_pci_problems;
1255 1259
1256extern unsigned long pci_cardbus_io_size; 1260extern unsigned long pci_cardbus_io_size;
1257extern unsigned long pci_cardbus_mem_size; 1261extern unsigned long pci_cardbus_mem_size;
1258extern u8 pci_dfl_cache_line_size; 1262extern u8 __devinitdata pci_dfl_cache_line_size;
1259extern u8 pci_cache_line_size; 1263extern u8 pci_cache_line_size;
1260 1264
1261extern unsigned long pci_hotplug_io_size; 1265extern unsigned long pci_hotplug_io_size;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index eae1f864c934..cca8a044e2b6 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2295,6 +2295,20 @@
2295#define PCI_DEVICE_ID_MPC8536 0x0051 2295#define PCI_DEVICE_ID_MPC8536 0x0051
2296#define PCI_DEVICE_ID_P2020E 0x0070 2296#define PCI_DEVICE_ID_P2020E 0x0070
2297#define PCI_DEVICE_ID_P2020 0x0071 2297#define PCI_DEVICE_ID_P2020 0x0071
2298#define PCI_DEVICE_ID_P2010E 0x0078
2299#define PCI_DEVICE_ID_P2010 0x0079
2300#define PCI_DEVICE_ID_P1020E 0x0100
2301#define PCI_DEVICE_ID_P1020 0x0101
2302#define PCI_DEVICE_ID_P1011E 0x0108
2303#define PCI_DEVICE_ID_P1011 0x0109
2304#define PCI_DEVICE_ID_P1022E 0x0110
2305#define PCI_DEVICE_ID_P1022 0x0111
2306#define PCI_DEVICE_ID_P1013E 0x0118
2307#define PCI_DEVICE_ID_P1013 0x0119
2308#define PCI_DEVICE_ID_P4080E 0x0400
2309#define PCI_DEVICE_ID_P4080 0x0401
2310#define PCI_DEVICE_ID_P4040E 0x0408
2311#define PCI_DEVICE_ID_P4040 0x0409
2298#define PCI_DEVICE_ID_MPC8641 0x7010 2312#define PCI_DEVICE_ID_MPC8641 0x7010
2299#define PCI_DEVICE_ID_MPC8641D 0x7011 2313#define PCI_DEVICE_ID_MPC8641D 0x7011
2300#define PCI_DEVICE_ID_MPC8610 0x7018 2314#define PCI_DEVICE_ID_MPC8610 0x7018
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 9bd03193ecd4..5a5d6ce4bd55 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -60,6 +60,7 @@
60 60
61#define DEFINE_PER_CPU_SECTION(type, name, sec) \ 61#define DEFINE_PER_CPU_SECTION(type, name, sec) \
62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \ 62 __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
63 extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
63 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ 64 __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
64 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \ 65 __PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
65 __typeof__(type) per_cpu__##name 66 __typeof__(type) per_cpu__##name
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 878836ca999c..cf5efbcf716c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,8 +34,6 @@
34 34
35#ifdef CONFIG_SMP 35#ifdef CONFIG_SMP
36 36
37#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
38
39/* minimum unit size, also is the maximum supported allocation size */ 37/* minimum unit size, also is the maximum supported allocation size */
40#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) 38#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
41 39
@@ -130,30 +128,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
130#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) 128#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
131 129
132extern void *__alloc_reserved_percpu(size_t size, size_t align); 130extern void *__alloc_reserved_percpu(size_t size, size_t align);
133
134#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
135
136struct percpu_data {
137 void *ptrs[1];
138};
139
140/* pointer disguising messes up the kmemleak objects tracking */
141#ifndef CONFIG_DEBUG_KMEMLEAK
142#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
143#else
144#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
145#endif
146
147#define per_cpu_ptr(ptr, cpu) \
148({ \
149 struct percpu_data *__p = __percpu_disguise(ptr); \
150 (__typeof__(ptr))__p->ptrs[(cpu)]; \
151})
152
153#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
154
155extern void *__alloc_percpu(size_t size, size_t align); 131extern void *__alloc_percpu(size_t size, size_t align);
156extern void free_percpu(void *__pdata); 132extern void free_percpu(void *__pdata);
133extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
157 134
158#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA 135#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
159extern void __init setup_per_cpu_areas(void); 136extern void __init setup_per_cpu_areas(void);
@@ -179,6 +156,11 @@ static inline void free_percpu(void *p)
179 kfree(p); 156 kfree(p);
180} 157}
181 158
159static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
160{
161 return __pa(addr);
162}
163
182static inline void __init setup_per_cpu_areas(void) { } 164static inline void __init setup_per_cpu_areas(void) { }
183 165
184static inline void *pcpu_lpage_remapped(void *kaddr) 166static inline void *pcpu_lpage_remapped(void *kaddr)
@@ -188,8 +170,8 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
188 170
189#endif /* CONFIG_SMP */ 171#endif /* CONFIG_SMP */
190 172
191#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ 173#define alloc_percpu(type) \
192 __alignof__(type)) 174 (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
193 175
194/* 176/*
195 * Optional methods for optimized non-lvalue per-cpu variable access. 177 * Optional methods for optimized non-lvalue per-cpu variable access.
@@ -243,4 +225,404 @@ do { \
243# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) 225# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=)
244#endif 226#endif
245 227
228/*
229 * Branching function to split up a function into a set of functions that
230 * are called for different scalar sizes of the objects handled.
231 */
232
233extern void __bad_size_call_parameter(void);
234
235#define __pcpu_size_call_return(stem, variable) \
236({ typeof(variable) pscr_ret__; \
237 switch(sizeof(variable)) { \
238 case 1: pscr_ret__ = stem##1(variable);break; \
239 case 2: pscr_ret__ = stem##2(variable);break; \
240 case 4: pscr_ret__ = stem##4(variable);break; \
241 case 8: pscr_ret__ = stem##8(variable);break; \
242 default: \
243 __bad_size_call_parameter();break; \
244 } \
245 pscr_ret__; \
246})
247
248#define __pcpu_size_call(stem, variable, ...) \
249do { \
250 switch(sizeof(variable)) { \
251 case 1: stem##1(variable, __VA_ARGS__);break; \
252 case 2: stem##2(variable, __VA_ARGS__);break; \
253 case 4: stem##4(variable, __VA_ARGS__);break; \
254 case 8: stem##8(variable, __VA_ARGS__);break; \
255 default: \
256 __bad_size_call_parameter();break; \
257 } \
258} while (0)
259
260/*
261 * Optimized manipulation for memory allocated through the per cpu
262 * allocator or for addresses of per cpu variables (can be determined
263 * using per_cpu_var(xx).
264 *
265 * These operation guarantee exclusivity of access for other operations
266 * on the *same* processor. The assumption is that per cpu data is only
267 * accessed by a single processor instance (the current one).
268 *
269 * The first group is used for accesses that must be done in a
270 * preemption safe way since we know that the context is not preempt
271 * safe. Interrupts may occur. If the interrupt modifies the variable
272 * too then RMW actions will not be reliable.
273 *
274 * The arch code can provide optimized functions in two ways:
275 *
276 * 1. Override the function completely. F.e. define this_cpu_add().
277 * The arch must then ensure that the various scalar format passed
278 * are handled correctly.
279 *
280 * 2. Provide functions for certain scalar sizes. F.e. provide
281 * this_cpu_add_2() to provide per cpu atomic operations for 2 byte
282 * sized RMW actions. If arch code does not provide operations for
283 * a scalar size then the fallback in the generic code will be
284 * used.
285 */
286
287#define _this_cpu_generic_read(pcp) \
288({ typeof(pcp) ret__; \
289 preempt_disable(); \
290 ret__ = *this_cpu_ptr(&(pcp)); \
291 preempt_enable(); \
292 ret__; \
293})
294
295#ifndef this_cpu_read
296# ifndef this_cpu_read_1
297# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
298# endif
299# ifndef this_cpu_read_2
300# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
301# endif
302# ifndef this_cpu_read_4
303# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
304# endif
305# ifndef this_cpu_read_8
306# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
307# endif
308# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp))
309#endif
310
311#define _this_cpu_generic_to_op(pcp, val, op) \
312do { \
313 preempt_disable(); \
314 *__this_cpu_ptr(&pcp) op val; \
315 preempt_enable(); \
316} while (0)
317
318#ifndef this_cpu_write
319# ifndef this_cpu_write_1
320# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
321# endif
322# ifndef this_cpu_write_2
323# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
324# endif
325# ifndef this_cpu_write_4
326# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
327# endif
328# ifndef this_cpu_write_8
329# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =)
330# endif
331# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val))
332#endif
333
334#ifndef this_cpu_add
335# ifndef this_cpu_add_1
336# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
337# endif
338# ifndef this_cpu_add_2
339# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
340# endif
341# ifndef this_cpu_add_4
342# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
343# endif
344# ifndef this_cpu_add_8
345# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=)
346# endif
347# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val))
348#endif
349
350#ifndef this_cpu_sub
351# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(val))
352#endif
353
354#ifndef this_cpu_inc
355# define this_cpu_inc(pcp) this_cpu_add((pcp), 1)
356#endif
357
358#ifndef this_cpu_dec
359# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1)
360#endif
361
362#ifndef this_cpu_and
363# ifndef this_cpu_and_1
364# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
365# endif
366# ifndef this_cpu_and_2
367# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
368# endif
369# ifndef this_cpu_and_4
370# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
371# endif
372# ifndef this_cpu_and_8
373# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=)
374# endif
375# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val))
376#endif
377
378#ifndef this_cpu_or
379# ifndef this_cpu_or_1
380# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
381# endif
382# ifndef this_cpu_or_2
383# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
384# endif
385# ifndef this_cpu_or_4
386# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
387# endif
388# ifndef this_cpu_or_8
389# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=)
390# endif
391# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
392#endif
393
394#ifndef this_cpu_xor
395# ifndef this_cpu_xor_1
396# define this_cpu_xor_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
397# endif
398# ifndef this_cpu_xor_2
399# define this_cpu_xor_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
400# endif
401# ifndef this_cpu_xor_4
402# define this_cpu_xor_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
403# endif
404# ifndef this_cpu_xor_8
405# define this_cpu_xor_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), ^=)
406# endif
407# define this_cpu_xor(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val))
408#endif
409
410/*
411 * Generic percpu operations that do not require preemption handling.
412 * Either we do not care about races or the caller has the
413 * responsibility of handling preemptions issues. Arch code can still
414 * override these instructions since the arch per cpu code may be more
415 * efficient and may actually get race freeness for free (that is the
416 * case for x86 for example).
417 *
418 * If there is no other protection through preempt disable and/or
419 * disabling interupts then one of these RMW operations can show unexpected
420 * behavior because the execution thread was rescheduled on another processor
421 * or an interrupt occurred and the same percpu variable was modified from
422 * the interrupt context.
423 */
424#ifndef __this_cpu_read
425# ifndef __this_cpu_read_1
426# define __this_cpu_read_1(pcp) (*__this_cpu_ptr(&(pcp)))
427# endif
428# ifndef __this_cpu_read_2
429# define __this_cpu_read_2(pcp) (*__this_cpu_ptr(&(pcp)))
430# endif
431# ifndef __this_cpu_read_4
432# define __this_cpu_read_4(pcp) (*__this_cpu_ptr(&(pcp)))
433# endif
434# ifndef __this_cpu_read_8
435# define __this_cpu_read_8(pcp) (*__this_cpu_ptr(&(pcp)))
436# endif
437# define __this_cpu_read(pcp) __pcpu_size_call_return(__this_cpu_read_, (pcp))
438#endif
439
440#define __this_cpu_generic_to_op(pcp, val, op) \
441do { \
442 *__this_cpu_ptr(&(pcp)) op val; \
443} while (0)
444
445#ifndef __this_cpu_write
446# ifndef __this_cpu_write_1
447# define __this_cpu_write_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
448# endif
449# ifndef __this_cpu_write_2
450# define __this_cpu_write_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
451# endif
452# ifndef __this_cpu_write_4
453# define __this_cpu_write_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
454# endif
455# ifndef __this_cpu_write_8
456# define __this_cpu_write_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), =)
457# endif
458# define __this_cpu_write(pcp, val) __pcpu_size_call(__this_cpu_write_, (pcp), (val))
459#endif
460
461#ifndef __this_cpu_add
462# ifndef __this_cpu_add_1
463# define __this_cpu_add_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
464# endif
465# ifndef __this_cpu_add_2
466# define __this_cpu_add_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
467# endif
468# ifndef __this_cpu_add_4
469# define __this_cpu_add_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
470# endif
471# ifndef __this_cpu_add_8
472# define __this_cpu_add_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), +=)
473# endif
474# define __this_cpu_add(pcp, val) __pcpu_size_call(__this_cpu_add_, (pcp), (val))
475#endif
476
477#ifndef __this_cpu_sub
478# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(val))
479#endif
480
481#ifndef __this_cpu_inc
482# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1)
483#endif
484
485#ifndef __this_cpu_dec
486# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1)
487#endif
488
489#ifndef __this_cpu_and
490# ifndef __this_cpu_and_1
491# define __this_cpu_and_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
492# endif
493# ifndef __this_cpu_and_2
494# define __this_cpu_and_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
495# endif
496# ifndef __this_cpu_and_4
497# define __this_cpu_and_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
498# endif
499# ifndef __this_cpu_and_8
500# define __this_cpu_and_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), &=)
501# endif
502# define __this_cpu_and(pcp, val) __pcpu_size_call(__this_cpu_and_, (pcp), (val))
503#endif
504
505#ifndef __this_cpu_or
506# ifndef __this_cpu_or_1
507# define __this_cpu_or_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
508# endif
509# ifndef __this_cpu_or_2
510# define __this_cpu_or_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
511# endif
512# ifndef __this_cpu_or_4
513# define __this_cpu_or_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
514# endif
515# ifndef __this_cpu_or_8
516# define __this_cpu_or_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), |=)
517# endif
518# define __this_cpu_or(pcp, val) __pcpu_size_call(__this_cpu_or_, (pcp), (val))
519#endif
520
521#ifndef __this_cpu_xor
522# ifndef __this_cpu_xor_1
523# define __this_cpu_xor_1(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
524# endif
525# ifndef __this_cpu_xor_2
526# define __this_cpu_xor_2(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
527# endif
528# ifndef __this_cpu_xor_4
529# define __this_cpu_xor_4(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
530# endif
531# ifndef __this_cpu_xor_8
532# define __this_cpu_xor_8(pcp, val) __this_cpu_generic_to_op((pcp), (val), ^=)
533# endif
534# define __this_cpu_xor(pcp, val) __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
535#endif
536
537/*
538 * IRQ safe versions of the per cpu RMW operations. Note that these operations
539 * are *not* safe against modification of the same variable from another
540 * processors (which one gets when using regular atomic operations)
541 . They are guaranteed to be atomic vs. local interrupts and
542 * preemption only.
543 */
544#define irqsafe_cpu_generic_to_op(pcp, val, op) \
545do { \
546 unsigned long flags; \
547 local_irq_save(flags); \
548 *__this_cpu_ptr(&(pcp)) op val; \
549 local_irq_restore(flags); \
550} while (0)
551
552#ifndef irqsafe_cpu_add
553# ifndef irqsafe_cpu_add_1
554# define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
555# endif
556# ifndef irqsafe_cpu_add_2
557# define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
558# endif
559# ifndef irqsafe_cpu_add_4
560# define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
561# endif
562# ifndef irqsafe_cpu_add_8
563# define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
564# endif
565# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
566#endif
567
568#ifndef irqsafe_cpu_sub
569# define irqsafe_cpu_sub(pcp, val) irqsafe_cpu_add((pcp), -(val))
570#endif
571
572#ifndef irqsafe_cpu_inc
573# define irqsafe_cpu_inc(pcp) irqsafe_cpu_add((pcp), 1)
574#endif
575
576#ifndef irqsafe_cpu_dec
577# define irqsafe_cpu_dec(pcp) irqsafe_cpu_sub((pcp), 1)
578#endif
579
580#ifndef irqsafe_cpu_and
581# ifndef irqsafe_cpu_and_1
582# define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
583# endif
584# ifndef irqsafe_cpu_and_2
585# define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
586# endif
587# ifndef irqsafe_cpu_and_4
588# define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
589# endif
590# ifndef irqsafe_cpu_and_8
591# define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
592# endif
593# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
594#endif
595
596#ifndef irqsafe_cpu_or
597# ifndef irqsafe_cpu_or_1
598# define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
599# endif
600# ifndef irqsafe_cpu_or_2
601# define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
602# endif
603# ifndef irqsafe_cpu_or_4
604# define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
605# endif
606# ifndef irqsafe_cpu_or_8
607# define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
608# endif
609# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
610#endif
611
612#ifndef irqsafe_cpu_xor
613# ifndef irqsafe_cpu_xor_1
614# define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
615# endif
616# ifndef irqsafe_cpu_xor_2
617# define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
618# endif
619# ifndef irqsafe_cpu_xor_4
620# define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
621# endif
622# ifndef irqsafe_cpu_xor_8
623# define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
624# endif
625# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
626#endif
627
246#endif /* __LINUX_PERCPU_H */ 628#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
deleted file mode 100644
index e3fb25606706..000000000000
--- a/include/linux/perf_counter.h
+++ /dev/null
@@ -1,444 +0,0 @@
1/*
2 * NOTE: this file will be removed in a future kernel release, it is
3 * provided as a courtesy copy of user-space code that relies on the
4 * old (pre-rename) symbols and constants.
5 *
6 * Performance events:
7 *
8 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
11 *
12 * Data type definitions, declarations, prototypes.
13 *
14 * Started by: Thomas Gleixner and Ingo Molnar
15 *
16 * For licencing details see kernel-base/COPYING
17 */
18#ifndef _LINUX_PERF_COUNTER_H
19#define _LINUX_PERF_COUNTER_H
20
21#include <linux/types.h>
22#include <linux/ioctl.h>
23#include <asm/byteorder.h>
24
25/*
26 * User-space ABI bits:
27 */
28
29/*
30 * attr.type
31 */
32enum perf_type_id {
33 PERF_TYPE_HARDWARE = 0,
34 PERF_TYPE_SOFTWARE = 1,
35 PERF_TYPE_TRACEPOINT = 2,
36 PERF_TYPE_HW_CACHE = 3,
37 PERF_TYPE_RAW = 4,
38
39 PERF_TYPE_MAX, /* non-ABI */
40};
41
42/*
43 * Generalized performance counter event types, used by the
44 * attr.event_id parameter of the sys_perf_counter_open()
45 * syscall:
46 */
47enum perf_hw_id {
48 /*
49 * Common hardware events, generalized by the kernel:
50 */
51 PERF_COUNT_HW_CPU_CYCLES = 0,
52 PERF_COUNT_HW_INSTRUCTIONS = 1,
53 PERF_COUNT_HW_CACHE_REFERENCES = 2,
54 PERF_COUNT_HW_CACHE_MISSES = 3,
55 PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
56 PERF_COUNT_HW_BRANCH_MISSES = 5,
57 PERF_COUNT_HW_BUS_CYCLES = 6,
58
59 PERF_COUNT_HW_MAX, /* non-ABI */
60};
61
62/*
63 * Generalized hardware cache counters:
64 *
65 * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
66 * { read, write, prefetch } x
67 * { accesses, misses }
68 */
69enum perf_hw_cache_id {
70 PERF_COUNT_HW_CACHE_L1D = 0,
71 PERF_COUNT_HW_CACHE_L1I = 1,
72 PERF_COUNT_HW_CACHE_LL = 2,
73 PERF_COUNT_HW_CACHE_DTLB = 3,
74 PERF_COUNT_HW_CACHE_ITLB = 4,
75 PERF_COUNT_HW_CACHE_BPU = 5,
76
77 PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
78};
79
80enum perf_hw_cache_op_id {
81 PERF_COUNT_HW_CACHE_OP_READ = 0,
82 PERF_COUNT_HW_CACHE_OP_WRITE = 1,
83 PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
84
85 PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
86};
87
88enum perf_hw_cache_op_result_id {
89 PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
90 PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
91
92 PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
93};
94
95/*
96 * Special "software" counters provided by the kernel, even if the hardware
97 * does not support performance counters. These counters measure various
98 * physical and sw events of the kernel (and allow the profiling of them as
99 * well):
100 */
101enum perf_sw_ids {
102 PERF_COUNT_SW_CPU_CLOCK = 0,
103 PERF_COUNT_SW_TASK_CLOCK = 1,
104 PERF_COUNT_SW_PAGE_FAULTS = 2,
105 PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
106 PERF_COUNT_SW_CPU_MIGRATIONS = 4,
107 PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
108 PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
109 PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
110 PERF_COUNT_SW_EMULATION_FAULTS = 8,
111
112 PERF_COUNT_SW_MAX, /* non-ABI */
113};
114
115/*
116 * Bits that can be set in attr.sample_type to request information
117 * in the overflow packets.
118 */
119enum perf_counter_sample_format {
120 PERF_SAMPLE_IP = 1U << 0,
121 PERF_SAMPLE_TID = 1U << 1,
122 PERF_SAMPLE_TIME = 1U << 2,
123 PERF_SAMPLE_ADDR = 1U << 3,
124 PERF_SAMPLE_READ = 1U << 4,
125 PERF_SAMPLE_CALLCHAIN = 1U << 5,
126 PERF_SAMPLE_ID = 1U << 6,
127 PERF_SAMPLE_CPU = 1U << 7,
128 PERF_SAMPLE_PERIOD = 1U << 8,
129 PERF_SAMPLE_STREAM_ID = 1U << 9,
130 PERF_SAMPLE_RAW = 1U << 10,
131
132 PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */
133};
134
135/*
136 * The format of the data returned by read() on a perf counter fd,
137 * as specified by attr.read_format:
138 *
139 * struct read_format {
140 * { u64 value;
141 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
142 * { u64 time_running; } && PERF_FORMAT_RUNNING
143 * { u64 id; } && PERF_FORMAT_ID
144 * } && !PERF_FORMAT_GROUP
145 *
146 * { u64 nr;
147 * { u64 time_enabled; } && PERF_FORMAT_ENABLED
148 * { u64 time_running; } && PERF_FORMAT_RUNNING
149 * { u64 value;
150 * { u64 id; } && PERF_FORMAT_ID
151 * } cntr[nr];
152 * } && PERF_FORMAT_GROUP
153 * };
154 */
155enum perf_counter_read_format {
156 PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
157 PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
158 PERF_FORMAT_ID = 1U << 2,
159 PERF_FORMAT_GROUP = 1U << 3,
160
161 PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
162};
163
164#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
165
166/*
167 * Hardware event to monitor via a performance monitoring counter:
168 */
169struct perf_counter_attr {
170
171 /*
172 * Major type: hardware/software/tracepoint/etc.
173 */
174 __u32 type;
175
176 /*
177 * Size of the attr structure, for fwd/bwd compat.
178 */
179 __u32 size;
180
181 /*
182 * Type specific configuration information.
183 */
184 __u64 config;
185
186 union {
187 __u64 sample_period;
188 __u64 sample_freq;
189 };
190
191 __u64 sample_type;
192 __u64 read_format;
193
194 __u64 disabled : 1, /* off by default */
195 inherit : 1, /* children inherit it */
196 pinned : 1, /* must always be on PMU */
197 exclusive : 1, /* only group on PMU */
198 exclude_user : 1, /* don't count user */
199 exclude_kernel : 1, /* ditto kernel */
200 exclude_hv : 1, /* ditto hypervisor */
201 exclude_idle : 1, /* don't count when idle */
202 mmap : 1, /* include mmap data */
203 comm : 1, /* include comm data */
204 freq : 1, /* use freq, not period */
205 inherit_stat : 1, /* per task counts */
206 enable_on_exec : 1, /* next exec enables */
207 task : 1, /* trace fork/exit */
208 watermark : 1, /* wakeup_watermark */
209
210 __reserved_1 : 49;
211
212 union {
213 __u32 wakeup_events; /* wakeup every n events */
214 __u32 wakeup_watermark; /* bytes before wakeup */
215 };
216 __u32 __reserved_2;
217
218 __u64 __reserved_3;
219};
220
221/*
222 * Ioctls that can be done on a perf counter fd:
223 */
224#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
225#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
226#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
227#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
228#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
229#define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5)
230#define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *)
231
232enum perf_counter_ioc_flags {
233 PERF_IOC_FLAG_GROUP = 1U << 0,
234};
235
236/*
237 * Structure of the page that can be mapped via mmap
238 */
239struct perf_counter_mmap_page {
240 __u32 version; /* version number of this structure */
241 __u32 compat_version; /* lowest version this is compat with */
242
243 /*
244 * Bits needed to read the hw counters in user-space.
245 *
246 * u32 seq;
247 * s64 count;
248 *
249 * do {
250 * seq = pc->lock;
251 *
252 * barrier()
253 * if (pc->index) {
254 * count = pmc_read(pc->index - 1);
255 * count += pc->offset;
256 * } else
257 * goto regular_read;
258 *
259 * barrier();
260 * } while (pc->lock != seq);
261 *
262 * NOTE: for obvious reason this only works on self-monitoring
263 * processes.
264 */
265 __u32 lock; /* seqlock for synchronization */
266 __u32 index; /* hardware counter identifier */
267 __s64 offset; /* add to hardware counter value */
268 __u64 time_enabled; /* time counter active */
269 __u64 time_running; /* time counter on cpu */
270
271 /*
272 * Hole for extension of the self monitor capabilities
273 */
274
275 __u64 __reserved[123]; /* align to 1k */
276
277 /*
278 * Control data for the mmap() data buffer.
279 *
280 * User-space reading the @data_head value should issue an rmb(), on
281 * SMP capable platforms, after reading this value -- see
282 * perf_counter_wakeup().
283 *
284 * When the mapping is PROT_WRITE the @data_tail value should be
285 * written by userspace to reflect the last read data. In this case
286 * the kernel will not over-write unread data.
287 */
288 __u64 data_head; /* head in the data section */
289 __u64 data_tail; /* user-space written tail */
290};
291
292#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
293#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
294#define PERF_EVENT_MISC_KERNEL (1 << 0)
295#define PERF_EVENT_MISC_USER (2 << 0)
296#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
297
298struct perf_event_header {
299 __u32 type;
300 __u16 misc;
301 __u16 size;
302};
303
304enum perf_event_type {
305
306 /*
307 * The MMAP events record the PROT_EXEC mappings so that we can
308 * correlate userspace IPs to code. They have the following structure:
309 *
310 * struct {
311 * struct perf_event_header header;
312 *
313 * u32 pid, tid;
314 * u64 addr;
315 * u64 len;
316 * u64 pgoff;
317 * char filename[];
318 * };
319 */
320 PERF_EVENT_MMAP = 1,
321
322 /*
323 * struct {
324 * struct perf_event_header header;
325 * u64 id;
326 * u64 lost;
327 * };
328 */
329 PERF_EVENT_LOST = 2,
330
331 /*
332 * struct {
333 * struct perf_event_header header;
334 *
335 * u32 pid, tid;
336 * char comm[];
337 * };
338 */
339 PERF_EVENT_COMM = 3,
340
341 /*
342 * struct {
343 * struct perf_event_header header;
344 * u32 pid, ppid;
345 * u32 tid, ptid;
346 * u64 time;
347 * };
348 */
349 PERF_EVENT_EXIT = 4,
350
351 /*
352 * struct {
353 * struct perf_event_header header;
354 * u64 time;
355 * u64 id;
356 * u64 stream_id;
357 * };
358 */
359 PERF_EVENT_THROTTLE = 5,
360 PERF_EVENT_UNTHROTTLE = 6,
361
362 /*
363 * struct {
364 * struct perf_event_header header;
365 * u32 pid, ppid;
366 * u32 tid, ptid;
367 * u64 time;
368 * };
369 */
370 PERF_EVENT_FORK = 7,
371
372 /*
373 * struct {
374 * struct perf_event_header header;
375 * u32 pid, tid;
376 *
377 * struct read_format values;
378 * };
379 */
380 PERF_EVENT_READ = 8,
381
382 /*
383 * struct {
384 * struct perf_event_header header;
385 *
386 * { u64 ip; } && PERF_SAMPLE_IP
387 * { u32 pid, tid; } && PERF_SAMPLE_TID
388 * { u64 time; } && PERF_SAMPLE_TIME
389 * { u64 addr; } && PERF_SAMPLE_ADDR
390 * { u64 id; } && PERF_SAMPLE_ID
391 * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
392 * { u32 cpu, res; } && PERF_SAMPLE_CPU
393 * { u64 period; } && PERF_SAMPLE_PERIOD
394 *
395 * { struct read_format values; } && PERF_SAMPLE_READ
396 *
397 * { u64 nr,
398 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
399 *
400 * #
401 * # The RAW record below is opaque data wrt the ABI
402 * #
403 * # That is, the ABI doesn't make any promises wrt to
404 * # the stability of its content, it may vary depending
405 * # on event, hardware, kernel version and phase of
406 * # the moon.
407 * #
408 * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
409 * #
410 *
411 * { u32 size;
412 * char data[size];}&& PERF_SAMPLE_RAW
413 * };
414 */
415 PERF_EVENT_SAMPLE = 9,
416
417 PERF_EVENT_MAX, /* non-ABI */
418};
419
420enum perf_callchain_context {
421 PERF_CONTEXT_HV = (__u64)-32,
422 PERF_CONTEXT_KERNEL = (__u64)-128,
423 PERF_CONTEXT_USER = (__u64)-512,
424
425 PERF_CONTEXT_GUEST = (__u64)-2048,
426 PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
427 PERF_CONTEXT_GUEST_USER = (__u64)-2560,
428
429 PERF_CONTEXT_MAX = (__u64)-4095,
430};
431
432#define PERF_FLAG_FD_NO_GROUP (1U << 0)
433#define PERF_FLAG_FD_OUTPUT (1U << 1)
434
435/*
436 * In case some app still references the old symbols:
437 */
438
439#define __NR_perf_counter_open __NR_perf_event_open
440
441#define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE
442#define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE
443
444#endif /* _LINUX_PERF_COUNTER_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 43adbd7f0010..8fa71874113f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -18,10 +18,6 @@
18#include <linux/ioctl.h> 18#include <linux/ioctl.h>
19#include <asm/byteorder.h> 19#include <asm/byteorder.h>
20 20
21#ifdef CONFIG_HAVE_HW_BREAKPOINT
22#include <asm/hw_breakpoint.h>
23#endif
24
25/* 21/*
26 * User-space ABI bits: 22 * User-space ABI bits:
27 */ 23 */
@@ -215,17 +211,11 @@ struct perf_event_attr {
215 __u32 wakeup_watermark; /* bytes before wakeup */ 211 __u32 wakeup_watermark; /* bytes before wakeup */
216 }; 212 };
217 213
218 union {
219 struct { /* Hardware breakpoint info */
220 __u64 bp_addr;
221 __u32 bp_type;
222 __u32 bp_len;
223 };
224 };
225
226 __u32 __reserved_2; 214 __u32 __reserved_2;
227 215
228 __u64 __reserved_3; 216 __u64 bp_addr;
217 __u32 bp_type;
218 __u32 bp_len;
229}; 219};
230 220
231/* 221/*
@@ -451,6 +441,10 @@ enum perf_callchain_context {
451# include <asm/perf_event.h> 441# include <asm/perf_event.h>
452#endif 442#endif
453 443
444#ifdef CONFIG_HAVE_HW_BREAKPOINT
445#include <asm/hw_breakpoint.h>
446#endif
447
454#include <linux/list.h> 448#include <linux/list.h>
455#include <linux/mutex.h> 449#include <linux/mutex.h>
456#include <linux/rculist.h> 450#include <linux/rculist.h>
@@ -565,10 +559,12 @@ struct perf_pending_entry {
565 void (*func)(struct perf_pending_entry *); 559 void (*func)(struct perf_pending_entry *);
566}; 560};
567 561
568typedef void (*perf_callback_t)(struct perf_event *, void *);
569
570struct perf_sample_data; 562struct perf_sample_data;
571 563
564typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
565 struct perf_sample_data *,
566 struct pt_regs *regs);
567
572/** 568/**
573 * struct perf_event - performance event kernel representation: 569 * struct perf_event - performance event kernel representation:
574 */ 570 */
@@ -660,18 +656,12 @@ struct perf_event {
660 struct pid_namespace *ns; 656 struct pid_namespace *ns;
661 u64 id; 657 u64 id;
662 658
663 void (*overflow_handler)(struct perf_event *event, 659 perf_overflow_handler_t overflow_handler;
664 int nmi, struct perf_sample_data *data,
665 struct pt_regs *regs);
666 660
667#ifdef CONFIG_EVENT_PROFILE 661#ifdef CONFIG_EVENT_PROFILE
668 struct event_filter *filter; 662 struct event_filter *filter;
669#endif 663#endif
670 664
671 perf_callback_t callback;
672
673 perf_callback_t event_callback;
674
675#endif /* CONFIG_PERF_EVENTS */ 665#endif /* CONFIG_PERF_EVENTS */
676}; 666};
677 667
@@ -685,7 +675,7 @@ struct perf_event_context {
685 * Protect the states of the events in the list, 675 * Protect the states of the events in the list,
686 * nr_active, and the list: 676 * nr_active, and the list:
687 */ 677 */
688 spinlock_t lock; 678 raw_spinlock_t lock;
689 /* 679 /*
690 * Protect the list of events. Locking either mutex or lock 680 * Protect the list of events. Locking either mutex or lock
691 * is sufficient to ensure the list doesn't change; to change 681 * is sufficient to ensure the list doesn't change; to change
@@ -781,7 +771,7 @@ extern struct perf_event *
781perf_event_create_kernel_counter(struct perf_event_attr *attr, 771perf_event_create_kernel_counter(struct perf_event_attr *attr,
782 int cpu, 772 int cpu,
783 pid_t pid, 773 pid_t pid,
784 perf_callback_t callback); 774 perf_overflow_handler_t callback);
785extern u64 perf_event_read_value(struct perf_event *event, 775extern u64 perf_event_read_value(struct perf_event *event,
786 u64 *enabled, u64 *running); 776 u64 *enabled, u64 *running);
787 777
@@ -824,9 +814,14 @@ extern int perf_event_overflow(struct perf_event *event, int nmi,
824 */ 814 */
825static inline int is_software_event(struct perf_event *event) 815static inline int is_software_event(struct perf_event *event)
826{ 816{
827 return (event->attr.type != PERF_TYPE_RAW) && 817 switch (event->attr.type) {
828 (event->attr.type != PERF_TYPE_HARDWARE) && 818 case PERF_TYPE_SOFTWARE:
829 (event->attr.type != PERF_TYPE_HW_CACHE); 819 case PERF_TYPE_TRACEPOINT:
820 /* for now the breakpoint stuff also works as software event */
821 case PERF_TYPE_BREAKPOINT:
822 return 1;
823 }
824 return 0;
830} 825}
831 826
832extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; 827extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
@@ -876,6 +871,8 @@ extern void perf_output_copy(struct perf_output_handle *handle,
876 const void *buf, unsigned int len); 871 const void *buf, unsigned int len);
877extern int perf_swevent_get_recursion_context(void); 872extern int perf_swevent_get_recursion_context(void);
878extern void perf_swevent_put_recursion_context(int rctx); 873extern void perf_swevent_put_recursion_context(int rctx);
874extern void perf_event_enable(struct perf_event *event);
875extern void perf_event_disable(struct perf_event *event);
879#else 876#else
880static inline void 877static inline void
881perf_event_task_sched_in(struct task_struct *task, int cpu) { } 878perf_event_task_sched_in(struct task_struct *task, int cpu) { }
@@ -906,7 +903,8 @@ static inline void perf_event_fork(struct task_struct *tsk) { }
906static inline void perf_event_init(void) { } 903static inline void perf_event_init(void) { }
907static inline int perf_swevent_get_recursion_context(void) { return -1; } 904static inline int perf_swevent_get_recursion_context(void) { return -1; }
908static inline void perf_swevent_put_recursion_context(int rctx) { } 905static inline void perf_swevent_put_recursion_context(int rctx) { }
909 906static inline void perf_event_enable(struct perf_event *event) { }
907static inline void perf_event_disable(struct perf_event *event) { }
910#endif 908#endif
911 909
912#define perf_output_put(handle, x) \ 910#define perf_output_put(handle, x) \
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 45926d77d6ac..8227f717c70f 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -81,7 +81,8 @@ struct plist_head {
81 struct list_head prio_list; 81 struct list_head prio_list;
82 struct list_head node_list; 82 struct list_head node_list;
83#ifdef CONFIG_DEBUG_PI_LIST 83#ifdef CONFIG_DEBUG_PI_LIST
84 spinlock_t *lock; 84 raw_spinlock_t *rawlock;
85 spinlock_t *spinlock;
85#endif 86#endif
86}; 87};
87 88
@@ -91,9 +92,11 @@ struct plist_node {
91}; 92};
92 93
93#ifdef CONFIG_DEBUG_PI_LIST 94#ifdef CONFIG_DEBUG_PI_LIST
94# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock 95# define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock
96# define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock
95#else 97#else
96# define PLIST_HEAD_LOCK_INIT(_lock) 98# define PLIST_HEAD_LOCK_INIT(_lock)
99# define PLIST_HEAD_LOCK_INIT_RAW(_lock)
97#endif 100#endif
98 101
99#define _PLIST_HEAD_INIT(head) \ 102#define _PLIST_HEAD_INIT(head) \
@@ -107,11 +110,22 @@ struct plist_node {
107 */ 110 */
108#define PLIST_HEAD_INIT(head, _lock) \ 111#define PLIST_HEAD_INIT(head, _lock) \
109{ \ 112{ \
110 _PLIST_HEAD_INIT(head), \ 113 _PLIST_HEAD_INIT(head), \
111 PLIST_HEAD_LOCK_INIT(&(_lock)) \ 114 PLIST_HEAD_LOCK_INIT(&(_lock)) \
112} 115}
113 116
114/** 117/**
118 * PLIST_HEAD_INIT_RAW - static struct plist_head initializer
119 * @head: struct plist_head variable name
120 * @_lock: lock to initialize for this list
121 */
122#define PLIST_HEAD_INIT_RAW(head, _lock) \
123{ \
124 _PLIST_HEAD_INIT(head), \
125 PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \
126}
127
128/**
115 * PLIST_NODE_INIT - static struct plist_node initializer 129 * PLIST_NODE_INIT - static struct plist_node initializer
116 * @node: struct plist_node variable name 130 * @node: struct plist_node variable name
117 * @__prio: initial node priority 131 * @__prio: initial node priority
@@ -119,13 +133,13 @@ struct plist_node {
119#define PLIST_NODE_INIT(node, __prio) \ 133#define PLIST_NODE_INIT(node, __prio) \
120{ \ 134{ \
121 .prio = (__prio), \ 135 .prio = (__prio), \
122 .plist = { _PLIST_HEAD_INIT((node).plist) }, \ 136 .plist = { _PLIST_HEAD_INIT((node).plist) }, \
123} 137}
124 138
125/** 139/**
126 * plist_head_init - dynamic struct plist_head initializer 140 * plist_head_init - dynamic struct plist_head initializer
127 * @head: &struct plist_head pointer 141 * @head: &struct plist_head pointer
128 * @lock: list spinlock, remembered for debugging 142 * @lock: spinlock protecting the list (debugging)
129 */ 143 */
130static inline void 144static inline void
131plist_head_init(struct plist_head *head, spinlock_t *lock) 145plist_head_init(struct plist_head *head, spinlock_t *lock)
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock)
133 INIT_LIST_HEAD(&head->prio_list); 147 INIT_LIST_HEAD(&head->prio_list);
134 INIT_LIST_HEAD(&head->node_list); 148 INIT_LIST_HEAD(&head->node_list);
135#ifdef CONFIG_DEBUG_PI_LIST 149#ifdef CONFIG_DEBUG_PI_LIST
136 head->lock = lock; 150 head->spinlock = lock;
151 head->rawlock = NULL;
152#endif
153}
154
155/**
156 * plist_head_init_raw - dynamic struct plist_head initializer
157 * @head: &struct plist_head pointer
158 * @lock: raw_spinlock protecting the list (debugging)
159 */
160static inline void
161plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock)
162{
163 INIT_LIST_HEAD(&head->prio_list);
164 INIT_LIST_HEAD(&head->node_list);
165#ifdef CONFIG_DEBUG_PI_LIST
166 head->rawlock = lock;
167 head->spinlock = NULL;
137#endif 168#endif
138} 169}
139 170
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 0d65934246af..198b8f9fe05e 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -219,7 +219,7 @@ struct dev_pm_ops {
219 * to RAM and hibernation. 219 * to RAM and hibernation.
220 */ 220 */
221#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ 221#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
222struct dev_pm_ops name = { \ 222const struct dev_pm_ops name = { \
223 .suspend = suspend_fn, \ 223 .suspend = suspend_fn, \
224 .resume = resume_fn, \ 224 .resume = resume_fn, \
225 .freeze = suspend_fn, \ 225 .freeze = suspend_fn, \
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index fddfafaed024..7c4193eb0072 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -334,6 +334,19 @@ extern struct pnp_protocol pnpbios_protocol;
334#define pnp_device_is_pnpbios(dev) 0 334#define pnp_device_is_pnpbios(dev) 0
335#endif 335#endif
336 336
337#ifdef CONFIG_PNPACPI
338extern struct pnp_protocol pnpacpi_protocol;
339
340static inline struct acpi_device *pnp_acpi_device(struct pnp_dev *dev)
341{
342 if (dev->protocol == &pnpacpi_protocol)
343 return dev->data;
344 return NULL;
345}
346#else
347#define pnp_acpi_device(dev) 0
348#endif
349
337/* status */ 350/* status */
338#define PNP_READY 0x0000 351#define PNP_READY 0x0000
339#define PNP_ATTACHED 0x0001 352#define PNP_ATTACHED 0x0001
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 7fc194aef8c2..2110a81c5e2a 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -2,13 +2,25 @@
2#define _LINUX_POISON_H 2#define _LINUX_POISON_H
3 3
4/********** include/linux/list.h **********/ 4/********** include/linux/list.h **********/
5
6/*
7 * Architectures might want to move the poison pointer offset
8 * into some well-recognized area such as 0xdead000000000000,
9 * that is also not mappable by user-space exploits:
10 */
11#ifdef CONFIG_ILLEGAL_POINTER_VALUE
12# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
13#else
14# define POISON_POINTER_DELTA 0
15#endif
16
5/* 17/*
6 * These are non-NULL pointers that will result in page faults 18 * These are non-NULL pointers that will result in page faults
7 * under normal circumstances, used to verify that nobody uses 19 * under normal circumstances, used to verify that nobody uses
8 * non-initialized list entries. 20 * non-initialized list entries.
9 */ 21 */
10#define LIST_POISON1 ((void *) 0x00100100) 22#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
11#define LIST_POISON2 ((void *) 0x00200200) 23#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
12 24
13/********** include/linux/timer.h **********/ 25/********** include/linux/timer.h **********/
14/* 26/*
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 7456d7d87a19..56f2d63a5cbb 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -105,12 +105,7 @@ static inline int ptrace_reparented(struct task_struct *child)
105{ 105{
106 return child->real_parent != child->parent; 106 return child->real_parent != child->parent;
107} 107}
108static inline void ptrace_link(struct task_struct *child, 108
109 struct task_struct *new_parent)
110{
111 if (unlikely(child->ptrace))
112 __ptrace_link(child, new_parent);
113}
114static inline void ptrace_unlink(struct task_struct *child) 109static inline void ptrace_unlink(struct task_struct *child)
115{ 110{
116 if (unlikely(child->ptrace)) 111 if (unlikely(child->ptrace))
@@ -169,9 +164,9 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
169 INIT_LIST_HEAD(&child->ptraced); 164 INIT_LIST_HEAD(&child->ptraced);
170 child->parent = child->real_parent; 165 child->parent = child->real_parent;
171 child->ptrace = 0; 166 child->ptrace = 0;
172 if (unlikely(ptrace)) { 167 if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
173 child->ptrace = current->ptrace; 168 child->ptrace = current->ptrace;
174 ptrace_link(child, current->parent); 169 __ptrace_link(child, current->parent);
175 } 170 }
176} 171}
177 172
@@ -278,6 +273,18 @@ static inline void user_enable_block_step(struct task_struct *task)
278} 273}
279#endif /* arch_has_block_step */ 274#endif /* arch_has_block_step */
280 275
276#ifdef ARCH_HAS_USER_SINGLE_STEP_INFO
277extern void user_single_step_siginfo(struct task_struct *tsk,
278 struct pt_regs *regs, siginfo_t *info);
279#else
280static inline void user_single_step_siginfo(struct task_struct *tsk,
281 struct pt_regs *regs, siginfo_t *info)
282{
283 memset(info, 0, sizeof(*info));
284 info->si_signo = SIGTRAP;
285}
286#endif
287
281#ifndef arch_ptrace_stop_needed 288#ifndef arch_ptrace_stop_needed
282/** 289/**
283 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called 290 * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 7a9754c96775..01b3d759f1fc 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -10,7 +10,7 @@ struct platform_pwm_backlight_data {
10 unsigned int dft_brightness; 10 unsigned int dft_brightness;
11 unsigned int pwm_period_ns; 11 unsigned int pwm_period_ns;
12 int (*init)(struct device *dev); 12 int (*init)(struct device *dev);
13 int (*notify)(int brightness); 13 int (*notify)(struct device *dev, int brightness);
14 void (*exit)(struct device *dev); 14 void (*exit)(struct device *dev);
15}; 15};
16 16
diff --git a/include/linux/quota.h b/include/linux/quota.h
index e70e62194243..a6861f117480 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -315,8 +315,9 @@ struct dquot_operations {
315 int (*claim_space) (struct inode *, qsize_t); 315 int (*claim_space) (struct inode *, qsize_t);
316 /* release rsved quota for delayed alloc */ 316 /* release rsved quota for delayed alloc */
317 void (*release_rsv) (struct inode *, qsize_t); 317 void (*release_rsv) (struct inode *, qsize_t);
318 /* get reserved quota for delayed alloc */ 318 /* get reserved quota for delayed alloc, value returned is managed by
319 qsize_t (*get_reserved_space) (struct inode *); 319 * quota code only */
320 qsize_t *(*get_reserved_space) (struct inode *);
320}; 321};
321 322
322/* Operations handling requests from userspace */ 323/* Operations handling requests from userspace */
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index d92480f8285c..1cbbd2c11aa9 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -78,6 +78,25 @@ struct raid6_calls {
78/* Selected algorithm */ 78/* Selected algorithm */
79extern struct raid6_calls raid6_call; 79extern struct raid6_calls raid6_call;
80 80
81/* Various routine sets */
82extern const struct raid6_calls raid6_intx1;
83extern const struct raid6_calls raid6_intx2;
84extern const struct raid6_calls raid6_intx4;
85extern const struct raid6_calls raid6_intx8;
86extern const struct raid6_calls raid6_intx16;
87extern const struct raid6_calls raid6_intx32;
88extern const struct raid6_calls raid6_mmxx1;
89extern const struct raid6_calls raid6_mmxx2;
90extern const struct raid6_calls raid6_sse1x1;
91extern const struct raid6_calls raid6_sse1x2;
92extern const struct raid6_calls raid6_sse2x1;
93extern const struct raid6_calls raid6_sse2x2;
94extern const struct raid6_calls raid6_sse2x4;
95extern const struct raid6_calls raid6_altivec1;
96extern const struct raid6_calls raid6_altivec2;
97extern const struct raid6_calls raid6_altivec4;
98extern const struct raid6_calls raid6_altivec8;
99
81/* Algorithm list */ 100/* Algorithm list */
82extern const struct raid6_calls * const raid6_algos[]; 101extern const struct raid6_calls * const raid6_algos[];
83int raid6_select_algo(void); 102int raid6_select_algo(void);
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index c4ba9a78721e..96cc307ed9f4 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -101,4 +101,9 @@ static inline void exit_rcu(void)
101{ 101{
102} 102}
103 103
104static inline int rcu_preempt_depth(void)
105{
106 return 0;
107}
108
104#endif /* __LINUX_RCUTINY_H */ 109#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index c93eee5911b0..8044b1b94333 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -45,6 +45,12 @@ extern void __rcu_read_unlock(void);
45extern void synchronize_rcu(void); 45extern void synchronize_rcu(void);
46extern void exit_rcu(void); 46extern void exit_rcu(void);
47 47
48/*
49 * Defined as macro as it is a very low level header
50 * included from areas that don't even know about current
51 */
52#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
53
48#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ 54#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
49 55
50static inline void __rcu_read_lock(void) 56static inline void __rcu_read_lock(void)
@@ -63,6 +69,11 @@ static inline void exit_rcu(void)
63{ 69{
64} 70}
65 71
72static inline int rcu_preempt_depth(void)
73{
74 return 0;
75}
76
66#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ 77#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
67 78
68static inline void __rcu_read_lock_bh(void) 79static inline void __rcu_read_lock_bh(void)
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 490c5b37b6d7..030d92255c7a 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -35,6 +35,8 @@
35#ifndef __LINUX_REGULATOR_CONSUMER_H_ 35#ifndef __LINUX_REGULATOR_CONSUMER_H_
36#define __LINUX_REGULATOR_CONSUMER_H_ 36#define __LINUX_REGULATOR_CONSUMER_H_
37 37
38#include <linux/device.h>
39
38/* 40/*
39 * Regulator operating modes. 41 * Regulator operating modes.
40 * 42 *
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 87f5f176d4ef..234a8476cba8 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -43,16 +43,20 @@ struct regulator;
43/** 43/**
44 * struct regulator_state - regulator state during low power system states 44 * struct regulator_state - regulator state during low power system states
45 * 45 *
46 * This describes a regulators state during a system wide low power state. 46 * This describes a regulators state during a system wide low power
47 * state. One of enabled or disabled must be set for the
48 * configuration to be applied.
47 * 49 *
48 * @uV: Operating voltage during suspend. 50 * @uV: Operating voltage during suspend.
49 * @mode: Operating mode during suspend. 51 * @mode: Operating mode during suspend.
50 * @enabled: Enabled during suspend. 52 * @enabled: Enabled during suspend.
53 * @disabled: Disabled during suspend.
51 */ 54 */
52struct regulator_state { 55struct regulator_state {
53 int uV; /* suspend voltage */ 56 int uV; /* suspend voltage */
54 unsigned int mode; /* suspend regulator operating mode */ 57 unsigned int mode; /* suspend regulator operating mode */
55 int enabled; /* is regulator enabled in this suspend state */ 58 int enabled; /* is regulator enabled in this suspend state */
59 int disabled; /* is the regulator disbled in this suspend state */
56}; 60};
57 61
58/** 62/**
diff --git a/include/linux/regulator/max8660.h b/include/linux/regulator/max8660.h
new file mode 100644
index 000000000000..9936763621c7
--- /dev/null
+++ b/include/linux/regulator/max8660.h
@@ -0,0 +1,57 @@
1/*
2 * max8660.h -- Voltage regulation for the Maxim 8660/8661
3 *
4 * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#ifndef __LINUX_REGULATOR_MAX8660_H
21#define __LINUX_REGULATOR_MAX8660_H
22
23#include <linux/regulator/machine.h>
24
25enum {
26 MAX8660_V3,
27 MAX8660_V4,
28 MAX8660_V5,
29 MAX8660_V6,
30 MAX8660_V7,
31 MAX8660_V_END,
32};
33
34/**
35 * max8660_subdev_data - regulator subdev data
36 * @id: regulator id
37 * @name: regulator name
38 * @platform_data: regulator init data
39 */
40struct max8660_subdev_data {
41 int id;
42 char *name;
43 struct regulator_init_data *platform_data;
44};
45
46/**
47 * max8660_platform_data - platform data for max8660
48 * @num_subdevs: number of regulators used
49 * @subdevs: pointer to regulators used
50 * @en34_is_high: if EN34 is driven high, regulators cannot be en-/disabled.
51 */
52struct max8660_platform_data {
53 int num_subdevs;
54 struct max8660_subdev_data *subdevs;
55 unsigned en34_is_high:1;
56};
57#endif
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
index a05b4a20768d..1ba3cf6edfbb 100644
--- a/include/linux/reiserfs_fs.h
+++ b/include/linux/reiserfs_fs.h
@@ -62,6 +62,12 @@ void reiserfs_write_unlock(struct super_block *s);
62int reiserfs_write_lock_once(struct super_block *s); 62int reiserfs_write_lock_once(struct super_block *s);
63void reiserfs_write_unlock_once(struct super_block *s, int lock_depth); 63void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
64 64
65#ifdef CONFIG_REISERFS_CHECK
66void reiserfs_lock_check_recursive(struct super_block *s);
67#else
68static inline void reiserfs_lock_check_recursive(struct super_block *s) { }
69#endif
70
65/* 71/*
66 * Several mutexes depend on the write lock. 72 * Several mutexes depend on the write lock.
67 * However sometimes we want to relax the write lock while we hold 73 * However sometimes we want to relax the write lock while we hold
@@ -92,11 +98,31 @@ void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
92static inline void reiserfs_mutex_lock_safe(struct mutex *m, 98static inline void reiserfs_mutex_lock_safe(struct mutex *m,
93 struct super_block *s) 99 struct super_block *s)
94{ 100{
101 reiserfs_lock_check_recursive(s);
95 reiserfs_write_unlock(s); 102 reiserfs_write_unlock(s);
96 mutex_lock(m); 103 mutex_lock(m);
97 reiserfs_write_lock(s); 104 reiserfs_write_lock(s);
98} 105}
99 106
107static inline void
108reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass,
109 struct super_block *s)
110{
111 reiserfs_lock_check_recursive(s);
112 reiserfs_write_unlock(s);
113 mutex_lock_nested(m, subclass);
114 reiserfs_write_lock(s);
115}
116
117static inline void
118reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
119{
120 reiserfs_lock_check_recursive(s);
121 reiserfs_write_unlock(s);
122 down_read(sem);
123 reiserfs_write_lock(s);
124}
125
100/* 126/*
101 * When we schedule, we usually want to also release the write lock, 127 * When we schedule, we usually want to also release the write lock,
102 * according to the previous bkl based locking scheme of reiserfs. 128 * according to the previous bkl based locking scheme of reiserfs.
@@ -2051,25 +2077,12 @@ void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
2051int search_by_entry_key(struct super_block *sb, const struct cpu_key *key, 2077int search_by_entry_key(struct super_block *sb, const struct cpu_key *key,
2052 struct treepath *path, struct reiserfs_dir_entry *de); 2078 struct treepath *path, struct reiserfs_dir_entry *de);
2053struct dentry *reiserfs_get_parent(struct dentry *); 2079struct dentry *reiserfs_get_parent(struct dentry *);
2054/* procfs.c */
2055
2056#if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO )
2057#define REISERFS_PROC_INFO
2058#else
2059#undef REISERFS_PROC_INFO
2060#endif
2061 2080
2081#ifdef CONFIG_REISERFS_PROC_INFO
2062int reiserfs_proc_info_init(struct super_block *sb); 2082int reiserfs_proc_info_init(struct super_block *sb);
2063int reiserfs_proc_info_done(struct super_block *sb); 2083int reiserfs_proc_info_done(struct super_block *sb);
2064struct proc_dir_entry *reiserfs_proc_register_global(char *name,
2065 read_proc_t * func);
2066void reiserfs_proc_unregister_global(const char *name);
2067int reiserfs_proc_info_global_init(void); 2084int reiserfs_proc_info_global_init(void);
2068int reiserfs_proc_info_global_done(void); 2085int reiserfs_proc_info_global_done(void);
2069int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
2070 int count, int *eof, void *data);
2071
2072#if defined( REISERFS_PROC_INFO )
2073 2086
2074#define PROC_EXP( e ) e 2087#define PROC_EXP( e ) e
2075 2088
@@ -2084,6 +2097,26 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
2084 PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \ 2097 PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \
2085 PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) ) 2098 PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) )
2086#else 2099#else
2100static inline int reiserfs_proc_info_init(struct super_block *sb)
2101{
2102 return 0;
2103}
2104
2105static inline int reiserfs_proc_info_done(struct super_block *sb)
2106{
2107 return 0;
2108}
2109
2110static inline int reiserfs_proc_info_global_init(void)
2111{
2112 return 0;
2113}
2114
2115static inline int reiserfs_proc_info_global_done(void)
2116{
2117 return 0;
2118}
2119
2087#define PROC_EXP( e ) 2120#define PROC_EXP( e )
2088#define VOID_V ( ( void ) 0 ) 2121#define VOID_V ( ( void ) 0 )
2089#define PROC_INFO_MAX( sb, field, value ) VOID_V 2122#define PROC_INFO_MAX( sb, field, value ) VOID_V
diff --git a/include/linux/resource.h b/include/linux/resource.h
index 40fc7e626082..f1e914eefeab 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -3,8 +3,6 @@
3 3
4#include <linux/time.h> 4#include <linux/time.h>
5 5
6struct task_struct;
7
8/* 6/*
9 * Resource control/accounting header file for linux 7 * Resource control/accounting header file for linux
10 */ 8 */
@@ -70,6 +68,12 @@ struct rlimit {
70 */ 68 */
71#include <asm/resource.h> 69#include <asm/resource.h>
72 70
71#ifdef __KERNEL__
72
73struct task_struct;
74
73int getrusage(struct task_struct *p, int who, struct rusage __user *ru); 75int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
74 76
77#endif /* __KERNEL__ */
78
75#endif 79#endif
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index cb0ba7032609..b019ae64e2ab 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -26,6 +26,9 @@
26 */ 26 */
27struct anon_vma { 27struct anon_vma {
28 spinlock_t lock; /* Serialize access to vma list */ 28 spinlock_t lock; /* Serialize access to vma list */
29#ifdef CONFIG_KSM
30 atomic_t ksm_refcount;
31#endif
29 /* 32 /*
30 * NOTE: the LSB of the head.next is set by 33 * NOTE: the LSB of the head.next is set by
31 * mm_take_all_locks() _after_ taking the above lock. So the 34 * mm_take_all_locks() _after_ taking the above lock. So the
@@ -38,6 +41,34 @@ struct anon_vma {
38}; 41};
39 42
40#ifdef CONFIG_MMU 43#ifdef CONFIG_MMU
44#ifdef CONFIG_KSM
45static inline void ksm_refcount_init(struct anon_vma *anon_vma)
46{
47 atomic_set(&anon_vma->ksm_refcount, 0);
48}
49
50static inline int ksm_refcount(struct anon_vma *anon_vma)
51{
52 return atomic_read(&anon_vma->ksm_refcount);
53}
54#else
55static inline void ksm_refcount_init(struct anon_vma *anon_vma)
56{
57}
58
59static inline int ksm_refcount(struct anon_vma *anon_vma)
60{
61 return 0;
62}
63#endif /* CONFIG_KSM */
64
65static inline struct anon_vma *page_anon_vma(struct page *page)
66{
67 if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) !=
68 PAGE_MAPPING_ANON)
69 return NULL;
70 return page_rmapping(page);
71}
41 72
42static inline void anon_vma_lock(struct vm_area_struct *vma) 73static inline void anon_vma_lock(struct vm_area_struct *vma)
43{ 74{
@@ -62,6 +93,7 @@ void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *);
62void anon_vma_unlink(struct vm_area_struct *); 93void anon_vma_unlink(struct vm_area_struct *);
63void anon_vma_link(struct vm_area_struct *); 94void anon_vma_link(struct vm_area_struct *);
64void __anon_vma_link(struct vm_area_struct *); 95void __anon_vma_link(struct vm_area_struct *);
96void anon_vma_free(struct anon_vma *);
65 97
66/* 98/*
67 * rmap interfaces called when adding or removing pte of page 99 * rmap interfaces called when adding or removing pte of page
@@ -81,6 +113,9 @@ static inline void page_dup_rmap(struct page *page)
81 */ 113 */
82int page_referenced(struct page *, int is_locked, 114int page_referenced(struct page *, int is_locked,
83 struct mem_cgroup *cnt, unsigned long *vm_flags); 115 struct mem_cgroup *cnt, unsigned long *vm_flags);
116int page_referenced_one(struct page *, struct vm_area_struct *,
117 unsigned long address, unsigned int *mapcount, unsigned long *vm_flags);
118
84enum ttu_flags { 119enum ttu_flags {
85 TTU_UNMAP = 0, /* unmap mode */ 120 TTU_UNMAP = 0, /* unmap mode */
86 TTU_MIGRATION = 1, /* migration mode */ 121 TTU_MIGRATION = 1, /* migration mode */
@@ -94,6 +129,8 @@ enum ttu_flags {
94#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) 129#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
95 130
96int try_to_unmap(struct page *, enum ttu_flags flags); 131int try_to_unmap(struct page *, enum ttu_flags flags);
132int try_to_unmap_one(struct page *, struct vm_area_struct *,
133 unsigned long address, enum ttu_flags flags);
97 134
98/* 135/*
99 * Called from mm/filemap_xip.c to unmap empty zero page 136 * Called from mm/filemap_xip.c to unmap empty zero page
@@ -127,6 +164,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page);
127void page_unlock_anon_vma(struct anon_vma *anon_vma); 164void page_unlock_anon_vma(struct anon_vma *anon_vma);
128int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); 165int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
129 166
167/*
168 * Called by migrate.c to remove migration ptes, but might be used more later.
169 */
170int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
171 struct vm_area_struct *, unsigned long, void *), void *arg);
172
130#else /* !CONFIG_MMU */ 173#else /* !CONFIG_MMU */
131 174
132#define anon_vma_init() do {} while (0) 175#define anon_vma_init() do {} while (0)
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index f19b00b7d530..281d8fd775e8 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -24,7 +24,7 @@
24 * @owner: the mutex owner 24 * @owner: the mutex owner
25 */ 25 */
26struct rt_mutex { 26struct rt_mutex {
27 spinlock_t wait_lock; 27 raw_spinlock_t wait_lock;
28 struct plist_head wait_list; 28 struct plist_head wait_list;
29 struct task_struct *owner; 29 struct task_struct *owner;
30#ifdef CONFIG_DEBUG_RT_MUTEXES 30#ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -63,8 +63,8 @@ struct hrtimer_sleeper;
63#endif 63#endif
64 64
65#define __RT_MUTEX_INITIALIZER(mutexname) \ 65#define __RT_MUTEX_INITIALIZER(mutexname) \
66 { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ 66 { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
67 , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ 67 , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \
68 , .owner = NULL \ 68 , .owner = NULL \
69 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} 69 __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
70 70
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
new file mode 100644
index 000000000000..71e0b00b6f2c
--- /dev/null
+++ b/include/linux/rwlock.h
@@ -0,0 +1,125 @@
1#ifndef __LINUX_RWLOCK_H
2#define __LINUX_RWLOCK_H
3
4#ifndef __LINUX_SPINLOCK_H
5# error "please don't include this file directly"
6#endif
7
8/*
9 * rwlock related methods
10 *
11 * split out from spinlock.h
12 *
13 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
14 * Released under the General Public License (GPL).
15 */
16
17#ifdef CONFIG_DEBUG_SPINLOCK
18 extern void __rwlock_init(rwlock_t *lock, const char *name,
19 struct lock_class_key *key);
20# define rwlock_init(lock) \
21do { \
22 static struct lock_class_key __key; \
23 \
24 __rwlock_init((lock), #lock, &__key); \
25} while (0)
26#else
27# define rwlock_init(lock) \
28 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
29#endif
30
31#ifdef CONFIG_DEBUG_SPINLOCK
32 extern void do_raw_read_lock(rwlock_t *lock);
33#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
34 extern int do_raw_read_trylock(rwlock_t *lock);
35 extern void do_raw_read_unlock(rwlock_t *lock);
36 extern void do_raw_write_lock(rwlock_t *lock);
37#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
38 extern int do_raw_write_trylock(rwlock_t *lock);
39 extern void do_raw_write_unlock(rwlock_t *lock);
40#else
41# define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock)
42# define do_raw_read_lock_flags(lock, flags) \
43 arch_read_lock_flags(&(lock)->raw_lock, *(flags))
44# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
45# define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock)
46# define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock)
47# define do_raw_write_lock_flags(lock, flags) \
48 arch_write_lock_flags(&(lock)->raw_lock, *(flags))
49# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
50# define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock)
51#endif
52
53#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
54#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
55
56/*
57 * Define the various rw_lock methods. Note we define these
58 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
59 * methods are defined as nops in the case they are not required.
60 */
61#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
62#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
63
64#define write_lock(lock) _raw_write_lock(lock)
65#define read_lock(lock) _raw_read_lock(lock)
66
67#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
68
69#define read_lock_irqsave(lock, flags) \
70 do { \
71 typecheck(unsigned long, flags); \
72 flags = _raw_read_lock_irqsave(lock); \
73 } while (0)
74#define write_lock_irqsave(lock, flags) \
75 do { \
76 typecheck(unsigned long, flags); \
77 flags = _raw_write_lock_irqsave(lock); \
78 } while (0)
79
80#else
81
82#define read_lock_irqsave(lock, flags) \
83 do { \
84 typecheck(unsigned long, flags); \
85 _raw_read_lock_irqsave(lock, flags); \
86 } while (0)
87#define write_lock_irqsave(lock, flags) \
88 do { \
89 typecheck(unsigned long, flags); \
90 _raw_write_lock_irqsave(lock, flags); \
91 } while (0)
92
93#endif
94
95#define read_lock_irq(lock) _raw_read_lock_irq(lock)
96#define read_lock_bh(lock) _raw_read_lock_bh(lock)
97#define write_lock_irq(lock) _raw_write_lock_irq(lock)
98#define write_lock_bh(lock) _raw_write_lock_bh(lock)
99#define read_unlock(lock) _raw_read_unlock(lock)
100#define write_unlock(lock) _raw_write_unlock(lock)
101#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
102#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
103
104#define read_unlock_irqrestore(lock, flags) \
105 do { \
106 typecheck(unsigned long, flags); \
107 _raw_read_unlock_irqrestore(lock, flags); \
108 } while (0)
109#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
110
111#define write_unlock_irqrestore(lock, flags) \
112 do { \
113 typecheck(unsigned long, flags); \
114 _raw_write_unlock_irqrestore(lock, flags); \
115 } while (0)
116#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
117
118#define write_trylock_irqsave(lock, flags) \
119({ \
120 local_irq_save(flags); \
121 write_trylock(lock) ? \
122 1 : ({ local_irq_restore(flags); 0; }); \
123})
124
125#endif /* __LINUX_RWLOCK_H */
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
new file mode 100644
index 000000000000..9c9f0495d37c
--- /dev/null
+++ b/include/linux/rwlock_api_smp.h
@@ -0,0 +1,282 @@
1#ifndef __LINUX_RWLOCK_API_SMP_H
2#define __LINUX_RWLOCK_API_SMP_H
3
4#ifndef __LINUX_SPINLOCK_API_SMP_H
5# error "please don't include this file directly"
6#endif
7
8/*
9 * include/linux/rwlock_api_smp.h
10 *
11 * spinlock API declarations on SMP (and debug)
12 * (implemented in kernel/spinlock.c)
13 *
14 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
15 * Released under the General Public License (GPL).
16 */
17
18void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
19void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
20void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
21void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
22void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
23void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock);
24unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
25 __acquires(lock);
26unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
27 __acquires(lock);
28int __lockfunc _raw_read_trylock(rwlock_t *lock);
29int __lockfunc _raw_write_trylock(rwlock_t *lock);
30void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock);
31void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock);
32void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock);
33void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock);
34void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock);
35void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock);
36void __lockfunc
37_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
38 __releases(lock);
39void __lockfunc
40_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
41 __releases(lock);
42
43#ifdef CONFIG_INLINE_READ_LOCK
44#define _raw_read_lock(lock) __raw_read_lock(lock)
45#endif
46
47#ifdef CONFIG_INLINE_WRITE_LOCK
48#define _raw_write_lock(lock) __raw_write_lock(lock)
49#endif
50
51#ifdef CONFIG_INLINE_READ_LOCK_BH
52#define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock)
53#endif
54
55#ifdef CONFIG_INLINE_WRITE_LOCK_BH
56#define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock)
57#endif
58
59#ifdef CONFIG_INLINE_READ_LOCK_IRQ
60#define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock)
61#endif
62
63#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
64#define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock)
65#endif
66
67#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
68#define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock)
69#endif
70
71#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
72#define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock)
73#endif
74
75#ifdef CONFIG_INLINE_READ_TRYLOCK
76#define _raw_read_trylock(lock) __raw_read_trylock(lock)
77#endif
78
79#ifdef CONFIG_INLINE_WRITE_TRYLOCK
80#define _raw_write_trylock(lock) __raw_write_trylock(lock)
81#endif
82
83#ifdef CONFIG_INLINE_READ_UNLOCK
84#define _raw_read_unlock(lock) __raw_read_unlock(lock)
85#endif
86
87#ifdef CONFIG_INLINE_WRITE_UNLOCK
88#define _raw_write_unlock(lock) __raw_write_unlock(lock)
89#endif
90
91#ifdef CONFIG_INLINE_READ_UNLOCK_BH
92#define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock)
93#endif
94
95#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
96#define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock)
97#endif
98
99#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
100#define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock)
101#endif
102
103#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
104#define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock)
105#endif
106
107#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
108#define _raw_read_unlock_irqrestore(lock, flags) \
109 __raw_read_unlock_irqrestore(lock, flags)
110#endif
111
112#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
113#define _raw_write_unlock_irqrestore(lock, flags) \
114 __raw_write_unlock_irqrestore(lock, flags)
115#endif
116
117static inline int __raw_read_trylock(rwlock_t *lock)
118{
119 preempt_disable();
120 if (do_raw_read_trylock(lock)) {
121 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
122 return 1;
123 }
124 preempt_enable();
125 return 0;
126}
127
128static inline int __raw_write_trylock(rwlock_t *lock)
129{
130 preempt_disable();
131 if (do_raw_write_trylock(lock)) {
132 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
133 return 1;
134 }
135 preempt_enable();
136 return 0;
137}
138
139/*
140 * If lockdep is enabled then we use the non-preemption spin-ops
141 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
142 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
143 */
144#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
145
146static inline void __raw_read_lock(rwlock_t *lock)
147{
148 preempt_disable();
149 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
150 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
151}
152
153static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock)
154{
155 unsigned long flags;
156
157 local_irq_save(flags);
158 preempt_disable();
159 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
160 LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock,
161 do_raw_read_lock_flags, &flags);
162 return flags;
163}
164
165static inline void __raw_read_lock_irq(rwlock_t *lock)
166{
167 local_irq_disable();
168 preempt_disable();
169 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
170 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
171}
172
173static inline void __raw_read_lock_bh(rwlock_t *lock)
174{
175 local_bh_disable();
176 preempt_disable();
177 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
178 LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock);
179}
180
181static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock)
182{
183 unsigned long flags;
184
185 local_irq_save(flags);
186 preempt_disable();
187 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
188 LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock,
189 do_raw_write_lock_flags, &flags);
190 return flags;
191}
192
193static inline void __raw_write_lock_irq(rwlock_t *lock)
194{
195 local_irq_disable();
196 preempt_disable();
197 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
198 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
199}
200
201static inline void __raw_write_lock_bh(rwlock_t *lock)
202{
203 local_bh_disable();
204 preempt_disable();
205 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
206 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
207}
208
209static inline void __raw_write_lock(rwlock_t *lock)
210{
211 preempt_disable();
212 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
213 LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
214}
215
216#endif /* CONFIG_PREEMPT */
217
218static inline void __raw_write_unlock(rwlock_t *lock)
219{
220 rwlock_release(&lock->dep_map, 1, _RET_IP_);
221 do_raw_write_unlock(lock);
222 preempt_enable();
223}
224
225static inline void __raw_read_unlock(rwlock_t *lock)
226{
227 rwlock_release(&lock->dep_map, 1, _RET_IP_);
228 do_raw_read_unlock(lock);
229 preempt_enable();
230}
231
232static inline void
233__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
234{
235 rwlock_release(&lock->dep_map, 1, _RET_IP_);
236 do_raw_read_unlock(lock);
237 local_irq_restore(flags);
238 preempt_enable();
239}
240
241static inline void __raw_read_unlock_irq(rwlock_t *lock)
242{
243 rwlock_release(&lock->dep_map, 1, _RET_IP_);
244 do_raw_read_unlock(lock);
245 local_irq_enable();
246 preempt_enable();
247}
248
249static inline void __raw_read_unlock_bh(rwlock_t *lock)
250{
251 rwlock_release(&lock->dep_map, 1, _RET_IP_);
252 do_raw_read_unlock(lock);
253 preempt_enable_no_resched();
254 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
255}
256
257static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
258 unsigned long flags)
259{
260 rwlock_release(&lock->dep_map, 1, _RET_IP_);
261 do_raw_write_unlock(lock);
262 local_irq_restore(flags);
263 preempt_enable();
264}
265
266static inline void __raw_write_unlock_irq(rwlock_t *lock)
267{
268 rwlock_release(&lock->dep_map, 1, _RET_IP_);
269 do_raw_write_unlock(lock);
270 local_irq_enable();
271 preempt_enable();
272}
273
274static inline void __raw_write_unlock_bh(rwlock_t *lock)
275{
276 rwlock_release(&lock->dep_map, 1, _RET_IP_);
277 do_raw_write_unlock(lock);
278 preempt_enable_no_resched();
279 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
280}
281
282#endif /* __LINUX_RWLOCK_API_SMP_H */
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
new file mode 100644
index 000000000000..bd31808c7d8e
--- /dev/null
+++ b/include/linux/rwlock_types.h
@@ -0,0 +1,56 @@
1#ifndef __LINUX_RWLOCK_TYPES_H
2#define __LINUX_RWLOCK_TYPES_H
3
4/*
5 * include/linux/rwlock_types.h - generic rwlock type definitions
6 * and initializers
7 *
8 * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
9 * Released under the General Public License (GPL).
10 */
11typedef struct {
12 arch_rwlock_t raw_lock;
13#ifdef CONFIG_GENERIC_LOCKBREAK
14 unsigned int break_lock;
15#endif
16#ifdef CONFIG_DEBUG_SPINLOCK
17 unsigned int magic, owner_cpu;
18 void *owner;
19#endif
20#ifdef CONFIG_DEBUG_LOCK_ALLOC
21 struct lockdep_map dep_map;
22#endif
23} rwlock_t;
24
25#define RWLOCK_MAGIC 0xdeaf1eed
26
27#ifdef CONFIG_DEBUG_LOCK_ALLOC
28# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
29#else
30# define RW_DEP_MAP_INIT(lockname)
31#endif
32
33#ifdef CONFIG_DEBUG_SPINLOCK
34#define __RW_LOCK_UNLOCKED(lockname) \
35 (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
36 .magic = RWLOCK_MAGIC, \
37 .owner = SPINLOCK_OWNER_INIT, \
38 .owner_cpu = -1, \
39 RW_DEP_MAP_INIT(lockname) }
40#else
41#define __RW_LOCK_UNLOCKED(lockname) \
42 (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
43 RW_DEP_MAP_INIT(lockname) }
44#endif
45
46/*
47 * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence
48 * deprecated.
49 *
50 * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate.
51 */
52#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
53
54#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
55
56#endif /* __LINUX_RWLOCK_TYPES_H */
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index 6c3c0f6c261f..bdfcc2527970 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -68,11 +68,7 @@ extern int __down_write_trylock(struct rw_semaphore *sem);
68extern void __up_read(struct rw_semaphore *sem); 68extern void __up_read(struct rw_semaphore *sem);
69extern void __up_write(struct rw_semaphore *sem); 69extern void __up_write(struct rw_semaphore *sem);
70extern void __downgrade_write(struct rw_semaphore *sem); 70extern void __downgrade_write(struct rw_semaphore *sem);
71 71extern int rwsem_is_locked(struct rw_semaphore *sem);
72static inline int rwsem_is_locked(struct rw_semaphore *sem)
73{
74 return (sem->activity != 0);
75}
76 72
77#endif /* __KERNEL__ */ 73#endif /* __KERNEL__ */
78#endif /* _LINUX_RWSEM_SPINLOCK_H */ 74#endif /* _LINUX_RWSEM_SPINLOCK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 89115ec7d43f..6f7bba93929b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -192,6 +192,12 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
192#define TASK_DEAD 64 192#define TASK_DEAD 64
193#define TASK_WAKEKILL 128 193#define TASK_WAKEKILL 128
194#define TASK_WAKING 256 194#define TASK_WAKING 256
195#define TASK_STATE_MAX 512
196
197#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
198
199extern char ___assert_task_state[1 - 2*!!(
200 sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
195 201
196/* Convenience macros for the sake of set_task_state */ 202/* Convenience macros for the sake of set_task_state */
197#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) 203#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
@@ -371,6 +377,8 @@ extern int sysctl_max_map_count;
371 377
372#include <linux/aio.h> 378#include <linux/aio.h>
373 379
380#ifdef CONFIG_MMU
381extern void arch_pick_mmap_layout(struct mm_struct *mm);
374extern unsigned long 382extern unsigned long
375arch_get_unmapped_area(struct file *, unsigned long, unsigned long, 383arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
376 unsigned long, unsigned long); 384 unsigned long, unsigned long);
@@ -380,6 +388,9 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
380 unsigned long flags); 388 unsigned long flags);
381extern void arch_unmap_area(struct mm_struct *, unsigned long); 389extern void arch_unmap_area(struct mm_struct *, unsigned long);
382extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); 390extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
391#else
392static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
393#endif
383 394
384#if USE_SPLIT_PTLOCKS 395#if USE_SPLIT_PTLOCKS
385/* 396/*
@@ -1091,7 +1102,8 @@ struct sched_class {
1091 enum cpu_idle_type idle); 1102 enum cpu_idle_type idle);
1092 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1103 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1093 void (*post_schedule) (struct rq *this_rq); 1104 void (*post_schedule) (struct rq *this_rq);
1094 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 1105 void (*task_waking) (struct rq *this_rq, struct task_struct *task);
1106 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1095 1107
1096 void (*set_cpus_allowed)(struct task_struct *p, 1108 void (*set_cpus_allowed)(struct task_struct *p,
1097 const struct cpumask *newmask); 1109 const struct cpumask *newmask);
@@ -1102,7 +1114,7 @@ struct sched_class {
1102 1114
1103 void (*set_curr_task) (struct rq *rq); 1115 void (*set_curr_task) (struct rq *rq);
1104 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); 1116 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1105 void (*task_new) (struct rq *rq, struct task_struct *p); 1117 void (*task_fork) (struct task_struct *p);
1106 1118
1107 void (*switched_from) (struct rq *this_rq, struct task_struct *task, 1119 void (*switched_from) (struct rq *this_rq, struct task_struct *task,
1108 int running); 1120 int running);
@@ -1111,10 +1123,11 @@ struct sched_class {
1111 void (*prio_changed) (struct rq *this_rq, struct task_struct *task, 1123 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1112 int oldprio, int running); 1124 int oldprio, int running);
1113 1125
1114 unsigned int (*get_rr_interval) (struct task_struct *task); 1126 unsigned int (*get_rr_interval) (struct rq *rq,
1127 struct task_struct *task);
1115 1128
1116#ifdef CONFIG_FAIR_GROUP_SCHED 1129#ifdef CONFIG_FAIR_GROUP_SCHED
1117 void (*moved_group) (struct task_struct *p); 1130 void (*moved_group) (struct task_struct *p, int on_rq);
1118#endif 1131#endif
1119}; 1132};
1120 1133
@@ -1151,8 +1164,6 @@ struct sched_entity {
1151 u64 start_runtime; 1164 u64 start_runtime;
1152 u64 avg_wakeup; 1165 u64 avg_wakeup;
1153 1166
1154 u64 avg_running;
1155
1156#ifdef CONFIG_SCHEDSTATS 1167#ifdef CONFIG_SCHEDSTATS
1157 u64 wait_start; 1168 u64 wait_start;
1158 u64 wait_max; 1169 u64 wait_max;
@@ -1175,7 +1186,6 @@ struct sched_entity {
1175 u64 nr_failed_migrations_running; 1186 u64 nr_failed_migrations_running;
1176 u64 nr_failed_migrations_hot; 1187 u64 nr_failed_migrations_hot;
1177 u64 nr_forced_migrations; 1188 u64 nr_forced_migrations;
1178 u64 nr_forced2_migrations;
1179 1189
1180 u64 nr_wakeups; 1190 u64 nr_wakeups;
1181 u64 nr_wakeups_sync; 1191 u64 nr_wakeups_sync;
@@ -1411,7 +1421,7 @@ struct task_struct {
1411#endif 1421#endif
1412 1422
1413 /* Protection of the PI data structures: */ 1423 /* Protection of the PI data structures: */
1414 spinlock_t pi_lock; 1424 raw_spinlock_t pi_lock;
1415 1425
1416#ifdef CONFIG_RT_MUTEXES 1426#ifdef CONFIG_RT_MUTEXES
1417 /* PI waiters blocked on a rt_mutex held by this task */ 1427 /* PI waiters blocked on a rt_mutex held by this task */
@@ -1544,10 +1554,18 @@ struct task_struct {
1544 unsigned long trace_recursion; 1554 unsigned long trace_recursion;
1545#endif /* CONFIG_TRACING */ 1555#endif /* CONFIG_TRACING */
1546 unsigned long stack_start; 1556 unsigned long stack_start;
1557#ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */
1558 struct memcg_batch_info {
1559 int do_batch; /* incremented when batch uncharge started */
1560 struct mem_cgroup *memcg; /* target memcg of uncharge */
1561 unsigned long bytes; /* uncharged usage */
1562 unsigned long memsw_bytes; /* uncharged mem+swap usage */
1563 } memcg_batch;
1564#endif
1547}; 1565};
1548 1566
1549/* Future-safe accessor for struct task_struct's cpus_allowed. */ 1567/* Future-safe accessor for struct task_struct's cpus_allowed. */
1550#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) 1568#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1551 1569
1552/* 1570/*
1553 * Priority of a process goes from 0..MAX_PRIO-1, valid RT 1571 * Priority of a process goes from 0..MAX_PRIO-1, valid RT
@@ -1840,7 +1858,8 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1840extern int sched_clock_stable; 1858extern int sched_clock_stable;
1841#endif 1859#endif
1842 1860
1843extern unsigned long long sched_clock(void); 1861/* ftrace calls sched_clock() directly */
1862extern unsigned long long notrace sched_clock(void);
1844 1863
1845extern void sched_clock_init(void); 1864extern void sched_clock_init(void);
1846extern u64 sched_clock_cpu(int cpu); 1865extern u64 sched_clock_cpu(int cpu);
@@ -1903,14 +1922,22 @@ extern unsigned int sysctl_sched_wakeup_granularity;
1903extern unsigned int sysctl_sched_shares_ratelimit; 1922extern unsigned int sysctl_sched_shares_ratelimit;
1904extern unsigned int sysctl_sched_shares_thresh; 1923extern unsigned int sysctl_sched_shares_thresh;
1905extern unsigned int sysctl_sched_child_runs_first; 1924extern unsigned int sysctl_sched_child_runs_first;
1925
1926enum sched_tunable_scaling {
1927 SCHED_TUNABLESCALING_NONE,
1928 SCHED_TUNABLESCALING_LOG,
1929 SCHED_TUNABLESCALING_LINEAR,
1930 SCHED_TUNABLESCALING_END,
1931};
1932extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
1933
1906#ifdef CONFIG_SCHED_DEBUG 1934#ifdef CONFIG_SCHED_DEBUG
1907extern unsigned int sysctl_sched_features;
1908extern unsigned int sysctl_sched_migration_cost; 1935extern unsigned int sysctl_sched_migration_cost;
1909extern unsigned int sysctl_sched_nr_migrate; 1936extern unsigned int sysctl_sched_nr_migrate;
1910extern unsigned int sysctl_sched_time_avg; 1937extern unsigned int sysctl_sched_time_avg;
1911extern unsigned int sysctl_timer_migration; 1938extern unsigned int sysctl_timer_migration;
1912 1939
1913int sched_nr_latency_handler(struct ctl_table *table, int write, 1940int sched_proc_update_handler(struct ctl_table *table, int write,
1914 void __user *buffer, size_t *length, 1941 void __user *buffer, size_t *length,
1915 loff_t *ppos); 1942 loff_t *ppos);
1916#endif 1943#endif
@@ -2066,7 +2093,6 @@ extern int kill_proc_info(int, struct siginfo *, pid_t);
2066extern int do_notify_parent(struct task_struct *, int); 2093extern int do_notify_parent(struct task_struct *, int);
2067extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 2094extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2068extern void force_sig(int, struct task_struct *); 2095extern void force_sig(int, struct task_struct *);
2069extern void force_sig_specific(int, struct task_struct *);
2070extern int send_sig(int, struct task_struct *, int); 2096extern int send_sig(int, struct task_struct *, int);
2071extern void zap_other_threads(struct task_struct *p); 2097extern void zap_other_threads(struct task_struct *p);
2072extern struct sigqueue *sigqueue_alloc(void); 2098extern struct sigqueue *sigqueue_alloc(void);
@@ -2085,11 +2111,6 @@ static inline int kill_cad_pid(int sig, int priv)
2085#define SEND_SIG_PRIV ((struct siginfo *) 1) 2111#define SEND_SIG_PRIV ((struct siginfo *) 1)
2086#define SEND_SIG_FORCED ((struct siginfo *) 2) 2112#define SEND_SIG_FORCED ((struct siginfo *) 2)
2087 2113
2088static inline int is_si_special(const struct siginfo *info)
2089{
2090 return info <= SEND_SIG_FORCED;
2091}
2092
2093/* 2114/*
2094 * True if we are on the alternate signal stack. 2115 * True if we are on the alternate signal stack.
2095 */ 2116 */
@@ -2475,8 +2496,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2475 2496
2476#endif /* CONFIG_SMP */ 2497#endif /* CONFIG_SMP */
2477 2498
2478extern void arch_pick_mmap_layout(struct mm_struct *mm);
2479
2480#ifdef CONFIG_TRACING 2499#ifdef CONFIG_TRACING
2481extern void 2500extern void
2482__trace_special(void *__tr, void *__data, 2501__trace_special(void *__tr, void *__data,
@@ -2585,7 +2604,27 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
2585} 2604}
2586#endif /* CONFIG_MM_OWNER */ 2605#endif /* CONFIG_MM_OWNER */
2587 2606
2588#define TASK_STATE_TO_CHAR_STR "RSDTtZX" 2607static inline unsigned long task_rlimit(const struct task_struct *tsk,
2608 unsigned int limit)
2609{
2610 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
2611}
2612
2613static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
2614 unsigned int limit)
2615{
2616 return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
2617}
2618
2619static inline unsigned long rlimit(unsigned int limit)
2620{
2621 return task_rlimit(current, limit);
2622}
2623
2624static inline unsigned long rlimit_max(unsigned int limit)
2625{
2626 return task_rlimit_max(current, limit);
2627}
2589 2628
2590#endif /* __KERNEL__ */ 2629#endif /* __KERNEL__ */
2591 2630
diff --git a/include/linux/security.h b/include/linux/security.h
index 466cbadbd1ef..2c627d361c02 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -95,8 +95,13 @@ struct seq_file;
95extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); 95extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
96extern int cap_netlink_recv(struct sk_buff *skb, int cap); 96extern int cap_netlink_recv(struct sk_buff *skb, int cap);
97 97
98#ifdef CONFIG_MMU
98extern unsigned long mmap_min_addr; 99extern unsigned long mmap_min_addr;
99extern unsigned long dac_mmap_min_addr; 100extern unsigned long dac_mmap_min_addr;
101#else
102#define dac_mmap_min_addr 0UL
103#endif
104
100/* 105/*
101 * Values used in the task_security_ops calls 106 * Values used in the task_security_ops calls
102 */ 107 */
@@ -121,6 +126,7 @@ struct request_sock;
121#define LSM_UNSAFE_PTRACE 2 126#define LSM_UNSAFE_PTRACE 2
122#define LSM_UNSAFE_PTRACE_CAP 4 127#define LSM_UNSAFE_PTRACE_CAP 4
123 128
129#ifdef CONFIG_MMU
124/* 130/*
125 * If a hint addr is less than mmap_min_addr change hint to be as 131 * If a hint addr is less than mmap_min_addr change hint to be as
126 * low as possible but still greater than mmap_min_addr 132 * low as possible but still greater than mmap_min_addr
@@ -135,6 +141,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
135} 141}
136extern int mmap_min_addr_handler(struct ctl_table *table, int write, 142extern int mmap_min_addr_handler(struct ctl_table *table, int write,
137 void __user *buffer, size_t *lenp, loff_t *ppos); 143 void __user *buffer, size_t *lenp, loff_t *ppos);
144#endif
138 145
139#ifdef CONFIG_SECURITY 146#ifdef CONFIG_SECURITY
140 147
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 1b191c176bcd..8a4adbef8a0f 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -86,6 +86,7 @@ struct task_struct;
86struct sem { 86struct sem {
87 int semval; /* current value */ 87 int semval; /* current value */
88 int sempid; /* pid of last operation */ 88 int sempid; /* pid of last operation */
89 struct list_head sem_pending; /* pending single-sop operations */
89}; 90};
90 91
91/* One sem_array data structure for each set of semaphores in the system. */ 92/* One sem_array data structure for each set of semaphores in the system. */
@@ -96,11 +97,13 @@ struct sem_array {
96 struct sem *sem_base; /* ptr to first semaphore in array */ 97 struct sem *sem_base; /* ptr to first semaphore in array */
97 struct list_head sem_pending; /* pending operations to be processed */ 98 struct list_head sem_pending; /* pending operations to be processed */
98 struct list_head list_id; /* undo requests on this array */ 99 struct list_head list_id; /* undo requests on this array */
99 unsigned long sem_nsems; /* no. of semaphores in array */ 100 int sem_nsems; /* no. of semaphores in array */
101 int complex_count; /* pending complex operations */
100}; 102};
101 103
102/* One queue for each sleeping process in the system. */ 104/* One queue for each sleeping process in the system. */
103struct sem_queue { 105struct sem_queue {
106 struct list_head simple_list; /* queue of pending operations */
104 struct list_head list; /* queue of pending operations */ 107 struct list_head list; /* queue of pending operations */
105 struct task_struct *sleeper; /* this process */ 108 struct task_struct *sleeper; /* this process */
106 struct sem_undo *undo; /* undo structure */ 109 struct sem_undo *undo; /* undo structure */
diff --git a/include/linux/serio.h b/include/linux/serio.h
index e2f3044d4a4a..813d26c247ec 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -136,25 +136,6 @@ static inline void serio_continue_rx(struct serio *serio)
136 spin_unlock_irq(&serio->lock); 136 spin_unlock_irq(&serio->lock);
137} 137}
138 138
139/*
140 * Use the following functions to pin serio's driver in process context
141 */
142static inline int serio_pin_driver(struct serio *serio)
143{
144 return mutex_lock_interruptible(&serio->drv_mutex);
145}
146
147static inline void serio_pin_driver_uninterruptible(struct serio *serio)
148{
149 mutex_lock(&serio->drv_mutex);
150}
151
152static inline void serio_unpin_driver(struct serio *serio)
153{
154 mutex_unlock(&serio->drv_mutex);
155}
156
157
158#endif 139#endif
159 140
160/* 141/*
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index deee7afd8d66..e164291fb3e7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -41,20 +41,4 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
41extern int init_tmpfs(void); 41extern int init_tmpfs(void);
42extern int shmem_fill_super(struct super_block *sb, void *data, int silent); 42extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
43 43
44#ifdef CONFIG_TMPFS_POSIX_ACL
45int shmem_check_acl(struct inode *, int);
46int shmem_acl_init(struct inode *, struct inode *);
47
48extern struct xattr_handler shmem_xattr_acl_access_handler;
49extern struct xattr_handler shmem_xattr_acl_default_handler;
50
51extern struct generic_acl_operations shmem_acl_ops;
52
53#else
54static inline int shmem_acl_init(struct inode *inode, struct inode *dir)
55{
56 return 0;
57}
58#endif /* CONFIG_TMPFS_POSIX_ACL */
59
60#endif 44#endif
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 850d057500de..ca6b2b317991 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -110,7 +110,7 @@ extern struct cache_sizes malloc_sizes[];
110void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 110void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
111void *__kmalloc(size_t size, gfp_t flags); 111void *__kmalloc(size_t size, gfp_t flags);
112 112
113#ifdef CONFIG_KMEMTRACE 113#ifdef CONFIG_TRACING
114extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); 114extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
115extern size_t slab_buffer_size(struct kmem_cache *cachep); 115extern size_t slab_buffer_size(struct kmem_cache *cachep);
116#else 116#else
@@ -166,7 +166,7 @@ found:
166extern void *__kmalloc_node(size_t size, gfp_t flags, int node); 166extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
167extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 167extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
168 168
169#ifdef CONFIG_KMEMTRACE 169#ifdef CONFIG_TRACING
170extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 170extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
171 gfp_t flags, 171 gfp_t flags,
172 int nodeid); 172 int nodeid);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5ad70a60fd74..1e14beb23f9b 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -217,7 +217,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
217void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 217void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
218void *__kmalloc(size_t size, gfp_t flags); 218void *__kmalloc(size_t size, gfp_t flags);
219 219
220#ifdef CONFIG_KMEMTRACE 220#ifdef CONFIG_TRACING
221extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); 221extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
222#else 222#else
223static __always_inline void * 223static __always_inline void *
@@ -266,7 +266,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
266void *__kmalloc_node(size_t size, gfp_t flags, int node); 266void *__kmalloc_node(size_t size, gfp_t flags, int node);
267void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 267void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
268 268
269#ifdef CONFIG_KMEMTRACE 269#ifdef CONFIG_TRACING
270extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 270extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
271 gfp_t gfpflags, 271 gfp_t gfpflags,
272 int node); 272 int node);
diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h
index d53642d2d899..67ed2c542831 100644
--- a/include/linux/sm501-regs.h
+++ b/include/linux/sm501-regs.h
@@ -31,6 +31,8 @@
31#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11) 31#define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11)
32#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15) 32#define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15)
33 33
34#define SM501_SYSCTRL_2D_ENGINE_STATUS (1<<19)
35
34/* miscellaneous control */ 36/* miscellaneous control */
35 37
36#define SM501_MISC_CONTROL (0x000004) 38#define SM501_MISC_CONTROL (0x000004)
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h
index 34c4475ac4a2..4f95c1aac2fd 100644
--- a/include/linux/sonypi.h
+++ b/include/linux/sonypi.h
@@ -111,6 +111,7 @@
111#define SONYPI_EVENT_VOLUME_INC_PRESSED 69 111#define SONYPI_EVENT_VOLUME_INC_PRESSED 69
112#define SONYPI_EVENT_VOLUME_DEC_PRESSED 70 112#define SONYPI_EVENT_VOLUME_DEC_PRESSED 70
113#define SONYPI_EVENT_BRIGHTNESS_PRESSED 71 113#define SONYPI_EVENT_BRIGHTNESS_PRESSED 71
114#define SONYPI_EVENT_MEDIA_PRESSED 72
114 115
115/* get/set brightness */ 116/* get/set brightness */
116#define SONYPI_IOCGBRT _IOR('v', 0, __u8) 117#define SONYPI_IOCGBRT _IOR('v', 0, __u8)
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h
new file mode 100644
index 000000000000..51b3e771a9a3
--- /dev/null
+++ b/include/linux/spi/dw_spi.h
@@ -0,0 +1,212 @@
1#ifndef DW_SPI_HEADER_H
2#define DW_SPI_HEADER_H
3#include <linux/io.h>
4
5/* Bit fields in CTRLR0 */
6#define SPI_DFS_OFFSET 0
7
8#define SPI_FRF_OFFSET 4
9#define SPI_FRF_SPI 0x0
10#define SPI_FRF_SSP 0x1
11#define SPI_FRF_MICROWIRE 0x2
12#define SPI_FRF_RESV 0x3
13
14#define SPI_MODE_OFFSET 6
15#define SPI_SCPH_OFFSET 6
16#define SPI_SCOL_OFFSET 7
17#define SPI_TMOD_OFFSET 8
18#define SPI_TMOD_TR 0x0 /* xmit & recv */
19#define SPI_TMOD_TO 0x1 /* xmit only */
20#define SPI_TMOD_RO 0x2 /* recv only */
21#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
22
23#define SPI_SLVOE_OFFSET 10
24#define SPI_SRL_OFFSET 11
25#define SPI_CFS_OFFSET 12
26
27/* Bit fields in SR, 7 bits */
28#define SR_MASK 0x7f /* cover 7 bits */
29#define SR_BUSY (1 << 0)
30#define SR_TF_NOT_FULL (1 << 1)
31#define SR_TF_EMPT (1 << 2)
32#define SR_RF_NOT_EMPT (1 << 3)
33#define SR_RF_FULL (1 << 4)
34#define SR_TX_ERR (1 << 5)
35#define SR_DCOL (1 << 6)
36
37/* Bit fields in ISR, IMR, RISR, 7 bits */
38#define SPI_INT_TXEI (1 << 0)
39#define SPI_INT_TXOI (1 << 1)
40#define SPI_INT_RXUI (1 << 2)
41#define SPI_INT_RXOI (1 << 3)
42#define SPI_INT_RXFI (1 << 4)
43#define SPI_INT_MSTI (1 << 5)
44
45/* TX RX interrupt level threshhold, max can be 256 */
46#define SPI_INT_THRESHOLD 32
47
48enum dw_ssi_type {
49 SSI_MOTO_SPI = 0,
50 SSI_TI_SSP,
51 SSI_NS_MICROWIRE,
52};
53
54struct dw_spi_reg {
55 u32 ctrl0;
56 u32 ctrl1;
57 u32 ssienr;
58 u32 mwcr;
59 u32 ser;
60 u32 baudr;
61 u32 txfltr;
62 u32 rxfltr;
63 u32 txflr;
64 u32 rxflr;
65 u32 sr;
66 u32 imr;
67 u32 isr;
68 u32 risr;
69 u32 txoicr;
70 u32 rxoicr;
71 u32 rxuicr;
72 u32 msticr;
73 u32 icr;
74 u32 dmacr;
75 u32 dmatdlr;
76 u32 dmardlr;
77 u32 idr;
78 u32 version;
79 u32 dr; /* Currently oper as 32 bits,
80 though only low 16 bits matters */
81} __packed;
82
83struct dw_spi {
84 struct spi_master *master;
85 struct spi_device *cur_dev;
86 struct device *parent_dev;
87 enum dw_ssi_type type;
88
89 void __iomem *regs;
90 unsigned long paddr;
91 u32 iolen;
92 int irq;
93 u32 max_freq; /* max bus freq supported */
94
95 u16 bus_num;
96 u16 num_cs; /* supported slave numbers */
97
98 /* Driver message queue */
99 struct workqueue_struct *workqueue;
100 struct work_struct pump_messages;
101 spinlock_t lock;
102 struct list_head queue;
103 int busy;
104 int run;
105
106 /* Message Transfer pump */
107 struct tasklet_struct pump_transfers;
108
109 /* Current message transfer state info */
110 struct spi_message *cur_msg;
111 struct spi_transfer *cur_transfer;
112 struct chip_data *cur_chip;
113 struct chip_data *prev_chip;
114 size_t len;
115 void *tx;
116 void *tx_end;
117 void *rx;
118 void *rx_end;
119 int dma_mapped;
120 dma_addr_t rx_dma;
121 dma_addr_t tx_dma;
122 size_t rx_map_len;
123 size_t tx_map_len;
124 u8 n_bytes; /* current is a 1/2 bytes op */
125 u8 max_bits_per_word; /* maxim is 16b */
126 u32 dma_width;
127 int cs_change;
128 int (*write)(struct dw_spi *dws);
129 int (*read)(struct dw_spi *dws);
130 irqreturn_t (*transfer_handler)(struct dw_spi *dws);
131 void (*cs_control)(u32 command);
132
133 /* Dma info */
134 int dma_inited;
135 struct dma_chan *txchan;
136 struct dma_chan *rxchan;
137 int txdma_done;
138 int rxdma_done;
139 u64 tx_param;
140 u64 rx_param;
141 struct device *dma_dev;
142 dma_addr_t dma_addr;
143
144 /* Bus interface info */
145 void *priv;
146#ifdef CONFIG_DEBUG_FS
147 struct dentry *debugfs;
148#endif
149};
150
151#define dw_readl(dw, name) \
152 __raw_readl(&(((struct dw_spi_reg *)dw->regs)->name))
153#define dw_writel(dw, name, val) \
154 __raw_writel((val), &(((struct dw_spi_reg *)dw->regs)->name))
155#define dw_readw(dw, name) \
156 __raw_readw(&(((struct dw_spi_reg *)dw->regs)->name))
157#define dw_writew(dw, name, val) \
158 __raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name))
159
160static inline void spi_enable_chip(struct dw_spi *dws, int enable)
161{
162 dw_writel(dws, ssienr, (enable ? 1 : 0));
163}
164
165static inline void spi_set_clk(struct dw_spi *dws, u16 div)
166{
167 dw_writel(dws, baudr, div);
168}
169
170static inline void spi_chip_sel(struct dw_spi *dws, u16 cs)
171{
172 if (cs > dws->num_cs)
173 return;
174 dw_writel(dws, ser, 1 << cs);
175}
176
177/* Disable IRQ bits */
178static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
179{
180 u32 new_mask;
181
182 new_mask = dw_readl(dws, imr) & ~mask;
183 dw_writel(dws, imr, new_mask);
184}
185
186/* Enable IRQ bits */
187static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
188{
189 u32 new_mask;
190
191 new_mask = dw_readl(dws, imr) | mask;
192 dw_writel(dws, imr, new_mask);
193}
194
195/*
196 * Each SPI slave device to work with dw_api controller should
197 * has such a structure claiming its working mode (PIO/DMA etc),
198 * which can be save in the "controller_data" member of the
199 * struct spi_device
200 */
201struct dw_spi_chip {
202 u8 poll_mode; /* 0 for contoller polling mode */
203 u8 type; /* SPI/SSP/Micrwire */
204 u8 enable_dma;
205 void (*cs_control)(u32 command);
206};
207
208extern int dw_spi_add_host(struct dw_spi *dws);
209extern void dw_spi_remove_host(struct dw_spi *dws);
210extern int dw_spi_suspend_host(struct dw_spi *dws);
211extern int dw_spi_resume_host(struct dw_spi *dws);
212#endif /* DW_SPI_HEADER_H */
diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h
new file mode 100644
index 000000000000..2e8db3d2d2e5
--- /dev/null
+++ b/include/linux/spi/sh_msiof.h
@@ -0,0 +1,10 @@
1#ifndef __SPI_SH_MSIOF_H__
2#define __SPI_SH_MSIOF_H__
3
4struct sh_msiof_spi_info {
5 int tx_fifo_override;
6 int rx_fifo_override;
7 u16 num_chipselect;
8};
9
10#endif /* __SPI_SH_MSIOF_H__ */
diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h
new file mode 100644
index 000000000000..6f17278810b0
--- /dev/null
+++ b/include/linux/spi/xilinx_spi.h
@@ -0,0 +1,20 @@
1#ifndef __LINUX_SPI_XILINX_SPI_H
2#define __LINUX_SPI_XILINX_SPI_H
3
4/**
5 * struct xspi_platform_data - Platform data of the Xilinx SPI driver
6 * @num_chipselect: Number of chip select by the IP.
7 * @little_endian: If registers should be accessed little endian or not.
8 * @bits_per_word: Number of bits per word.
9 * @devices: Devices to add when the driver is probed.
10 * @num_devices: Number of devices in the devices array.
11 */
12struct xspi_platform_data {
13 u16 num_chipselect;
14 bool little_endian;
15 u8 bits_per_word;
16 struct spi_board_info *devices;
17 u8 num_devices;
18};
19
20#endif /* __LINUX_SPI_XILINX_SPI_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 71dccfeb0d88..86088213334a 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -8,13 +8,13 @@
8 * 8 *
9 * on SMP builds: 9 * on SMP builds:
10 * 10 *
11 * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the 11 * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
12 * initializers 12 * initializers
13 * 13 *
14 * linux/spinlock_types.h: 14 * linux/spinlock_types.h:
15 * defines the generic type and initializers 15 * defines the generic type and initializers
16 * 16 *
17 * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel 17 * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
18 * implementations, mostly inline assembly code 18 * implementations, mostly inline assembly code
19 * 19 *
20 * (also included on UP-debug builds:) 20 * (also included on UP-debug builds:)
@@ -34,7 +34,7 @@
34 * defines the generic type and initializers 34 * defines the generic type and initializers
35 * 35 *
36 * linux/spinlock_up.h: 36 * linux/spinlock_up.h:
37 * contains the __raw_spin_*()/etc. version of UP 37 * contains the arch_spin_*()/etc. version of UP
38 * builds. (which are NOPs on non-debug, non-preempt 38 * builds. (which are NOPs on non-debug, non-preempt
39 * builds) 39 * builds)
40 * 40 *
@@ -75,12 +75,12 @@
75#define __lockfunc __attribute__((section(".spinlock.text"))) 75#define __lockfunc __attribute__((section(".spinlock.text")))
76 76
77/* 77/*
78 * Pull the raw_spinlock_t and raw_rwlock_t definitions: 78 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
79 */ 79 */
80#include <linux/spinlock_types.h> 80#include <linux/spinlock_types.h>
81 81
82/* 82/*
83 * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): 83 * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
84 */ 84 */
85#ifdef CONFIG_SMP 85#ifdef CONFIG_SMP
86# include <asm/spinlock.h> 86# include <asm/spinlock.h>
@@ -89,45 +89,31 @@
89#endif 89#endif
90 90
91#ifdef CONFIG_DEBUG_SPINLOCK 91#ifdef CONFIG_DEBUG_SPINLOCK
92 extern void __spin_lock_init(spinlock_t *lock, const char *name, 92 extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
93 struct lock_class_key *key); 93 struct lock_class_key *key);
94# define spin_lock_init(lock) \ 94# define raw_spin_lock_init(lock) \
95do { \ 95do { \
96 static struct lock_class_key __key; \ 96 static struct lock_class_key __key; \
97 \ 97 \
98 __spin_lock_init((lock), #lock, &__key); \ 98 __raw_spin_lock_init((lock), #lock, &__key); \
99} while (0) 99} while (0)
100 100
101#else 101#else
102# define spin_lock_init(lock) \ 102# define raw_spin_lock_init(lock) \
103 do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) 103 do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
104#endif 104#endif
105 105
106#ifdef CONFIG_DEBUG_SPINLOCK 106#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
107 extern void __rwlock_init(rwlock_t *lock, const char *name,
108 struct lock_class_key *key);
109# define rwlock_init(lock) \
110do { \
111 static struct lock_class_key __key; \
112 \
113 __rwlock_init((lock), #lock, &__key); \
114} while (0)
115#else
116# define rwlock_init(lock) \
117 do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
118#endif
119
120#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
121 107
122#ifdef CONFIG_GENERIC_LOCKBREAK 108#ifdef CONFIG_GENERIC_LOCKBREAK
123#define spin_is_contended(lock) ((lock)->break_lock) 109#define raw_spin_is_contended(lock) ((lock)->break_lock)
124#else 110#else
125 111
126#ifdef __raw_spin_is_contended 112#ifdef arch_spin_is_contended
127#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) 113#define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
128#else 114#else
129#define spin_is_contended(lock) (((void)(lock), 0)) 115#define raw_spin_is_contended(lock) (((void)(lock), 0))
130#endif /*__raw_spin_is_contended*/ 116#endif /*arch_spin_is_contended*/
131#endif 117#endif
132 118
133/* The lock does not imply full memory barrier. */ 119/* The lock does not imply full memory barrier. */
@@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
136#endif 122#endif
137 123
138/** 124/**
139 * spin_unlock_wait - wait until the spinlock gets unlocked 125 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
140 * @lock: the spinlock in question. 126 * @lock: the spinlock in question.
141 */ 127 */
142#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) 128#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
143 129
144#ifdef CONFIG_DEBUG_SPINLOCK 130#ifdef CONFIG_DEBUG_SPINLOCK
145 extern void _raw_spin_lock(spinlock_t *lock); 131 extern void do_raw_spin_lock(raw_spinlock_t *lock);
146#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) 132#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
147 extern int _raw_spin_trylock(spinlock_t *lock); 133 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
148 extern void _raw_spin_unlock(spinlock_t *lock); 134 extern void do_raw_spin_unlock(raw_spinlock_t *lock);
149 extern void _raw_read_lock(rwlock_t *lock);
150#define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock)
151 extern int _raw_read_trylock(rwlock_t *lock);
152 extern void _raw_read_unlock(rwlock_t *lock);
153 extern void _raw_write_lock(rwlock_t *lock);
154#define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock)
155 extern int _raw_write_trylock(rwlock_t *lock);
156 extern void _raw_write_unlock(rwlock_t *lock);
157#else 135#else
158# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) 136static inline void do_raw_spin_lock(raw_spinlock_t *lock)
159# define _raw_spin_lock_flags(lock, flags) \ 137{
160 __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) 138 arch_spin_lock(&lock->raw_lock);
161# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) 139}
162# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) 140
163# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) 141static inline void
164# define _raw_read_lock_flags(lock, flags) \ 142do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
165 __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) 143{
166# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) 144 arch_spin_lock_flags(&lock->raw_lock, *flags);
167# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) 145}
168# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) 146
169# define _raw_write_lock_flags(lock, flags) \ 147static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
170 __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) 148{
171# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) 149 return arch_spin_trylock(&(lock)->raw_lock);
172# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) 150}
151
152static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
153{
154 arch_spin_unlock(&lock->raw_lock);
155}
173#endif 156#endif
174 157
175#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
176#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
177
178/* 158/*
179 * Define the various spin_lock and rw_lock methods. Note we define these 159 * Define the various spin_lock methods. Note we define these
180 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various 160 * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
181 * methods are defined as nops in the case they are not required. 161 * various methods are defined as nops in the case they are not
162 * required.
182 */ 163 */
183#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) 164#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
184#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
185#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
186 165
187#define spin_lock(lock) _spin_lock(lock) 166#define raw_spin_lock(lock) _raw_spin_lock(lock)
188 167
189#ifdef CONFIG_DEBUG_LOCK_ALLOC 168#ifdef CONFIG_DEBUG_LOCK_ALLOC
190# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) 169# define raw_spin_lock_nested(lock, subclass) \
191# define spin_lock_nest_lock(lock, nest_lock) \ 170 _raw_spin_lock_nested(lock, subclass)
171
172# define raw_spin_lock_nest_lock(lock, nest_lock) \
192 do { \ 173 do { \
193 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ 174 typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
194 _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ 175 _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
195 } while (0) 176 } while (0)
196#else 177#else
197# define spin_lock_nested(lock, subclass) _spin_lock(lock) 178# define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock)
198# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) 179# define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
199#endif 180#endif
200 181
201#define write_lock(lock) _write_lock(lock)
202#define read_lock(lock) _read_lock(lock)
203
204#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) 182#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
205 183
206#define spin_lock_irqsave(lock, flags) \ 184#define raw_spin_lock_irqsave(lock, flags) \
207 do { \ 185 do { \
208 typecheck(unsigned long, flags); \ 186 typecheck(unsigned long, flags); \
209 flags = _spin_lock_irqsave(lock); \ 187 flags = _raw_spin_lock_irqsave(lock); \
210 } while (0)
211#define read_lock_irqsave(lock, flags) \
212 do { \
213 typecheck(unsigned long, flags); \
214 flags = _read_lock_irqsave(lock); \
215 } while (0)
216#define write_lock_irqsave(lock, flags) \
217 do { \
218 typecheck(unsigned long, flags); \
219 flags = _write_lock_irqsave(lock); \
220 } while (0) 188 } while (0)
221 189
222#ifdef CONFIG_DEBUG_LOCK_ALLOC 190#ifdef CONFIG_DEBUG_LOCK_ALLOC
223#define spin_lock_irqsave_nested(lock, flags, subclass) \ 191#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
224 do { \ 192 do { \
225 typecheck(unsigned long, flags); \ 193 typecheck(unsigned long, flags); \
226 flags = _spin_lock_irqsave_nested(lock, subclass); \ 194 flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
227 } while (0) 195 } while (0)
228#else 196#else
229#define spin_lock_irqsave_nested(lock, flags, subclass) \ 197#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
230 do { \ 198 do { \
231 typecheck(unsigned long, flags); \ 199 typecheck(unsigned long, flags); \
232 flags = _spin_lock_irqsave(lock); \ 200 flags = _raw_spin_lock_irqsave(lock); \
233 } while (0) 201 } while (0)
234#endif 202#endif
235 203
236#else 204#else
237 205
238#define spin_lock_irqsave(lock, flags) \ 206#define raw_spin_lock_irqsave(lock, flags) \
239 do { \
240 typecheck(unsigned long, flags); \
241 _spin_lock_irqsave(lock, flags); \
242 } while (0)
243#define read_lock_irqsave(lock, flags) \
244 do { \
245 typecheck(unsigned long, flags); \
246 _read_lock_irqsave(lock, flags); \
247 } while (0)
248#define write_lock_irqsave(lock, flags) \
249 do { \ 207 do { \
250 typecheck(unsigned long, flags); \ 208 typecheck(unsigned long, flags); \
251 _write_lock_irqsave(lock, flags); \ 209 _raw_spin_lock_irqsave(lock, flags); \
252 } while (0) 210 } while (0)
253#define spin_lock_irqsave_nested(lock, flags, subclass) \
254 spin_lock_irqsave(lock, flags)
255 211
256#endif 212#define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
213 raw_spin_lock_irqsave(lock, flags)
257 214
258#define spin_lock_irq(lock) _spin_lock_irq(lock) 215#endif
259#define spin_lock_bh(lock) _spin_lock_bh(lock)
260#define read_lock_irq(lock) _read_lock_irq(lock)
261#define read_lock_bh(lock) _read_lock_bh(lock)
262#define write_lock_irq(lock) _write_lock_irq(lock)
263#define write_lock_bh(lock) _write_lock_bh(lock)
264#define spin_unlock(lock) _spin_unlock(lock)
265#define read_unlock(lock) _read_unlock(lock)
266#define write_unlock(lock) _write_unlock(lock)
267#define spin_unlock_irq(lock) _spin_unlock_irq(lock)
268#define read_unlock_irq(lock) _read_unlock_irq(lock)
269#define write_unlock_irq(lock) _write_unlock_irq(lock)
270
271#define spin_unlock_irqrestore(lock, flags) \
272 do { \
273 typecheck(unsigned long, flags); \
274 _spin_unlock_irqrestore(lock, flags); \
275 } while (0)
276#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
277 216
278#define read_unlock_irqrestore(lock, flags) \ 217#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
279 do { \ 218#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
280 typecheck(unsigned long, flags); \ 219#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
281 _read_unlock_irqrestore(lock, flags); \ 220#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
282 } while (0)
283#define read_unlock_bh(lock) _read_unlock_bh(lock)
284 221
285#define write_unlock_irqrestore(lock, flags) \ 222#define raw_spin_unlock_irqrestore(lock, flags) \
286 do { \ 223 do { \
287 typecheck(unsigned long, flags); \ 224 typecheck(unsigned long, flags); \
288 _write_unlock_irqrestore(lock, flags); \ 225 _raw_spin_unlock_irqrestore(lock, flags); \
289 } while (0) 226 } while (0)
290#define write_unlock_bh(lock) _write_unlock_bh(lock) 227#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
291 228
292#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) 229#define raw_spin_trylock_bh(lock) \
230 __cond_lock(lock, _raw_spin_trylock_bh(lock))
293 231
294#define spin_trylock_irq(lock) \ 232#define raw_spin_trylock_irq(lock) \
295({ \ 233({ \
296 local_irq_disable(); \ 234 local_irq_disable(); \
297 spin_trylock(lock) ? \ 235 raw_spin_trylock(lock) ? \
298 1 : ({ local_irq_enable(); 0; }); \ 236 1 : ({ local_irq_enable(); 0; }); \
299}) 237})
300 238
301#define spin_trylock_irqsave(lock, flags) \ 239#define raw_spin_trylock_irqsave(lock, flags) \
302({ \ 240({ \
303 local_irq_save(flags); \ 241 local_irq_save(flags); \
304 spin_trylock(lock) ? \ 242 raw_spin_trylock(lock) ? \
305 1 : ({ local_irq_restore(flags); 0; }); \ 243 1 : ({ local_irq_restore(flags); 0; }); \
306}) 244})
307 245
308#define write_trylock_irqsave(lock, flags) \ 246/**
309({ \ 247 * raw_spin_can_lock - would raw_spin_trylock() succeed?
310 local_irq_save(flags); \ 248 * @lock: the spinlock in question.
311 write_trylock(lock) ? \ 249 */
312 1 : ({ local_irq_restore(flags); 0; }); \ 250#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
251
252/* Include rwlock functions */
253#include <linux/rwlock.h>
254
255/*
256 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
257 */
258#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
259# include <linux/spinlock_api_smp.h>
260#else
261# include <linux/spinlock_api_up.h>
262#endif
263
264/*
265 * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
266 */
267
268static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
269{
270 return &lock->rlock;
271}
272
273#define spin_lock_init(_lock) \
274do { \
275 spinlock_check(_lock); \
276 raw_spin_lock_init(&(_lock)->rlock); \
277} while (0)
278
279static inline void spin_lock(spinlock_t *lock)
280{
281 raw_spin_lock(&lock->rlock);
282}
283
284static inline void spin_lock_bh(spinlock_t *lock)
285{
286 raw_spin_lock_bh(&lock->rlock);
287}
288
289static inline int spin_trylock(spinlock_t *lock)
290{
291 return raw_spin_trylock(&lock->rlock);
292}
293
294#define spin_lock_nested(lock, subclass) \
295do { \
296 raw_spin_lock_nested(spinlock_check(lock), subclass); \
297} while (0)
298
299#define spin_lock_nest_lock(lock, nest_lock) \
300do { \
301 raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
302} while (0)
303
304static inline void spin_lock_irq(spinlock_t *lock)
305{
306 raw_spin_lock_irq(&lock->rlock);
307}
308
309#define spin_lock_irqsave(lock, flags) \
310do { \
311 raw_spin_lock_irqsave(spinlock_check(lock), flags); \
312} while (0)
313
314#define spin_lock_irqsave_nested(lock, flags, subclass) \
315do { \
316 raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
317} while (0)
318
319static inline void spin_unlock(spinlock_t *lock)
320{
321 raw_spin_unlock(&lock->rlock);
322}
323
324static inline void spin_unlock_bh(spinlock_t *lock)
325{
326 raw_spin_unlock_bh(&lock->rlock);
327}
328
329static inline void spin_unlock_irq(spinlock_t *lock)
330{
331 raw_spin_unlock_irq(&lock->rlock);
332}
333
334static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
335{
336 raw_spin_unlock_irqrestore(&lock->rlock, flags);
337}
338
339static inline int spin_trylock_bh(spinlock_t *lock)
340{
341 return raw_spin_trylock_bh(&lock->rlock);
342}
343
344static inline int spin_trylock_irq(spinlock_t *lock)
345{
346 return raw_spin_trylock_irq(&lock->rlock);
347}
348
349#define spin_trylock_irqsave(lock, flags) \
350({ \
351 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
313}) 352})
314 353
354static inline void spin_unlock_wait(spinlock_t *lock)
355{
356 raw_spin_unlock_wait(&lock->rlock);
357}
358
359static inline int spin_is_locked(spinlock_t *lock)
360{
361 return raw_spin_is_locked(&lock->rlock);
362}
363
364static inline int spin_is_contended(spinlock_t *lock)
365{
366 return raw_spin_is_contended(&lock->rlock);
367}
368
369static inline int spin_can_lock(spinlock_t *lock)
370{
371 return raw_spin_can_lock(&lock->rlock);
372}
373
374static inline void assert_spin_locked(spinlock_t *lock)
375{
376 assert_raw_spin_locked(&lock->rlock);
377}
378
315/* 379/*
316 * Pull the atomic_t declaration: 380 * Pull the atomic_t declaration:
317 * (asm-mips/atomic.h needs above definitions) 381 * (asm-mips/atomic.h needs above definitions)
@@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
329#define atomic_dec_and_lock(atomic, lock) \ 393#define atomic_dec_and_lock(atomic, lock) \
330 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) 394 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
331 395
332/**
333 * spin_can_lock - would spin_trylock() succeed?
334 * @lock: the spinlock in question.
335 */
336#define spin_can_lock(lock) (!spin_is_locked(lock))
337
338/*
339 * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
340 */
341#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
342# include <linux/spinlock_api_smp.h>
343#else
344# include <linux/spinlock_api_up.h>
345#endif
346
347#endif /* __LINUX_SPINLOCK_H */ 396#endif /* __LINUX_SPINLOCK_H */
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index 8264a7f459bc..e253ccd7a604 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -17,165 +17,76 @@
17 17
18int in_lock_functions(unsigned long addr); 18int in_lock_functions(unsigned long addr);
19 19
20#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) 20#define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x))
21 21
22void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); 22void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
23void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) 23void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
24 __acquires(lock); 24 __acquires(lock);
25void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) 25void __lockfunc
26 __acquires(lock); 26_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map)
27void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); 27 __acquires(lock);
28void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); 28void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock);
29void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); 29void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
30void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); 30 __acquires(lock);
31void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); 31
32void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); 32unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
33void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); 33 __acquires(lock);
34void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); 34unsigned long __lockfunc
35unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) 35_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass)
36 __acquires(lock); 36 __acquires(lock);
37unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) 37int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock);
38 __acquires(lock); 38int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock);
39unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) 39void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
40 __acquires(lock); 40void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock);
41unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) 41void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock);
42 __acquires(lock); 42void __lockfunc
43int __lockfunc _spin_trylock(spinlock_t *lock); 43_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
44int __lockfunc _read_trylock(rwlock_t *lock); 44 __releases(lock);
45int __lockfunc _write_trylock(rwlock_t *lock);
46int __lockfunc _spin_trylock_bh(spinlock_t *lock);
47void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock);
48void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock);
49void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock);
50void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock);
51void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock);
52void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock);
53void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock);
54void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock);
55void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock);
56void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
57 __releases(lock);
58void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
59 __releases(lock);
60void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
61 __releases(lock);
62 45
63#ifdef CONFIG_INLINE_SPIN_LOCK 46#ifdef CONFIG_INLINE_SPIN_LOCK
64#define _spin_lock(lock) __spin_lock(lock) 47#define _raw_spin_lock(lock) __raw_spin_lock(lock)
65#endif
66
67#ifdef CONFIG_INLINE_READ_LOCK
68#define _read_lock(lock) __read_lock(lock)
69#endif
70
71#ifdef CONFIG_INLINE_WRITE_LOCK
72#define _write_lock(lock) __write_lock(lock)
73#endif 48#endif
74 49
75#ifdef CONFIG_INLINE_SPIN_LOCK_BH 50#ifdef CONFIG_INLINE_SPIN_LOCK_BH
76#define _spin_lock_bh(lock) __spin_lock_bh(lock) 51#define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock)
77#endif
78
79#ifdef CONFIG_INLINE_READ_LOCK_BH
80#define _read_lock_bh(lock) __read_lock_bh(lock)
81#endif
82
83#ifdef CONFIG_INLINE_WRITE_LOCK_BH
84#define _write_lock_bh(lock) __write_lock_bh(lock)
85#endif 52#endif
86 53
87#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ 54#ifdef CONFIG_INLINE_SPIN_LOCK_IRQ
88#define _spin_lock_irq(lock) __spin_lock_irq(lock) 55#define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock)
89#endif
90
91#ifdef CONFIG_INLINE_READ_LOCK_IRQ
92#define _read_lock_irq(lock) __read_lock_irq(lock)
93#endif
94
95#ifdef CONFIG_INLINE_WRITE_LOCK_IRQ
96#define _write_lock_irq(lock) __write_lock_irq(lock)
97#endif 56#endif
98 57
99#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE 58#ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
100#define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) 59#define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock)
101#endif
102
103#ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE
104#define _read_lock_irqsave(lock) __read_lock_irqsave(lock)
105#endif
106
107#ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
108#define _write_lock_irqsave(lock) __write_lock_irqsave(lock)
109#endif 60#endif
110 61
111#ifdef CONFIG_INLINE_SPIN_TRYLOCK 62#ifdef CONFIG_INLINE_SPIN_TRYLOCK
112#define _spin_trylock(lock) __spin_trylock(lock) 63#define _raw_spin_trylock(lock) __raw_spin_trylock(lock)
113#endif
114
115#ifdef CONFIG_INLINE_READ_TRYLOCK
116#define _read_trylock(lock) __read_trylock(lock)
117#endif
118
119#ifdef CONFIG_INLINE_WRITE_TRYLOCK
120#define _write_trylock(lock) __write_trylock(lock)
121#endif 64#endif
122 65
123#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH 66#ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH
124#define _spin_trylock_bh(lock) __spin_trylock_bh(lock) 67#define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock)
125#endif 68#endif
126 69
127#ifdef CONFIG_INLINE_SPIN_UNLOCK 70#ifdef CONFIG_INLINE_SPIN_UNLOCK
128#define _spin_unlock(lock) __spin_unlock(lock) 71#define _raw_spin_unlock(lock) __raw_spin_unlock(lock)
129#endif
130
131#ifdef CONFIG_INLINE_READ_UNLOCK
132#define _read_unlock(lock) __read_unlock(lock)
133#endif
134
135#ifdef CONFIG_INLINE_WRITE_UNLOCK
136#define _write_unlock(lock) __write_unlock(lock)
137#endif 72#endif
138 73
139#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH 74#ifdef CONFIG_INLINE_SPIN_UNLOCK_BH
140#define _spin_unlock_bh(lock) __spin_unlock_bh(lock) 75#define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock)
141#endif
142
143#ifdef CONFIG_INLINE_READ_UNLOCK_BH
144#define _read_unlock_bh(lock) __read_unlock_bh(lock)
145#endif
146
147#ifdef CONFIG_INLINE_WRITE_UNLOCK_BH
148#define _write_unlock_bh(lock) __write_unlock_bh(lock)
149#endif 76#endif
150 77
151#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ 78#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ
152#define _spin_unlock_irq(lock) __spin_unlock_irq(lock) 79#define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock)
153#endif
154
155#ifdef CONFIG_INLINE_READ_UNLOCK_IRQ
156#define _read_unlock_irq(lock) __read_unlock_irq(lock)
157#endif
158
159#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ
160#define _write_unlock_irq(lock) __write_unlock_irq(lock)
161#endif 80#endif
162 81
163#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE 82#ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
164#define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) 83#define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags)
165#endif
166
167#ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
168#define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags)
169#endif
170
171#ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
172#define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags)
173#endif 84#endif
174 85
175static inline int __spin_trylock(spinlock_t *lock) 86static inline int __raw_spin_trylock(raw_spinlock_t *lock)
176{ 87{
177 preempt_disable(); 88 preempt_disable();
178 if (_raw_spin_trylock(lock)) { 89 if (do_raw_spin_trylock(lock)) {
179 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 90 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
180 return 1; 91 return 1;
181 } 92 }
@@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock)
183 return 0; 94 return 0;
184} 95}
185 96
186static inline int __read_trylock(rwlock_t *lock)
187{
188 preempt_disable();
189 if (_raw_read_trylock(lock)) {
190 rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_);
191 return 1;
192 }
193 preempt_enable();
194 return 0;
195}
196
197static inline int __write_trylock(rwlock_t *lock)
198{
199 preempt_disable();
200 if (_raw_write_trylock(lock)) {
201 rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_);
202 return 1;
203 }
204 preempt_enable();
205 return 0;
206}
207
208/* 97/*
209 * If lockdep is enabled then we use the non-preemption spin-ops 98 * If lockdep is enabled then we use the non-preemption spin-ops
210 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are 99 * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
@@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock)
212 */ 101 */
213#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) 102#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
214 103
215static inline void __read_lock(rwlock_t *lock) 104static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
216{
217 preempt_disable();
218 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
219 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
220}
221
222static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
223{ 105{
224 unsigned long flags; 106 unsigned long flags;
225 107
@@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock)
228 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 110 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
229 /* 111 /*
230 * On lockdep we dont want the hand-coded irq-enable of 112 * On lockdep we dont want the hand-coded irq-enable of
231 * _raw_spin_lock_flags() code, because lockdep assumes 113 * do_raw_spin_lock_flags() code, because lockdep assumes
232 * that interrupts are not re-enabled during lock-acquire: 114 * that interrupts are not re-enabled during lock-acquire:
233 */ 115 */
234#ifdef CONFIG_LOCKDEP 116#ifdef CONFIG_LOCKDEP
235 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 117 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
236#else 118#else
237 _raw_spin_lock_flags(lock, &flags); 119 do_raw_spin_lock_flags(lock, &flags);
238#endif 120#endif
239 return flags; 121 return flags;
240} 122}
241 123
242static inline void __spin_lock_irq(spinlock_t *lock) 124static inline void __raw_spin_lock_irq(raw_spinlock_t *lock)
243{ 125{
244 local_irq_disable(); 126 local_irq_disable();
245 preempt_disable(); 127 preempt_disable();
246 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 128 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
247 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 129 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
248} 130}
249 131
250static inline void __spin_lock_bh(spinlock_t *lock) 132static inline void __raw_spin_lock_bh(raw_spinlock_t *lock)
251{ 133{
252 local_bh_disable(); 134 local_bh_disable();
253 preempt_disable(); 135 preempt_disable();
254 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 136 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
255 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 137 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
256}
257
258static inline unsigned long __read_lock_irqsave(rwlock_t *lock)
259{
260 unsigned long flags;
261
262 local_irq_save(flags);
263 preempt_disable();
264 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
265 LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
266 _raw_read_lock_flags, &flags);
267 return flags;
268}
269
270static inline void __read_lock_irq(rwlock_t *lock)
271{
272 local_irq_disable();
273 preempt_disable();
274 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
275 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
276}
277
278static inline void __read_lock_bh(rwlock_t *lock)
279{
280 local_bh_disable();
281 preempt_disable();
282 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
283 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock);
284}
285
286static inline unsigned long __write_lock_irqsave(rwlock_t *lock)
287{
288 unsigned long flags;
289
290 local_irq_save(flags);
291 preempt_disable();
292 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
293 LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
294 _raw_write_lock_flags, &flags);
295 return flags;
296}
297
298static inline void __write_lock_irq(rwlock_t *lock)
299{
300 local_irq_disable();
301 preempt_disable();
302 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
303 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
304} 138}
305 139
306static inline void __write_lock_bh(rwlock_t *lock) 140static inline void __raw_spin_lock(raw_spinlock_t *lock)
307{
308 local_bh_disable();
309 preempt_disable();
310 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
311 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
312}
313
314static inline void __spin_lock(spinlock_t *lock)
315{ 141{
316 preempt_disable(); 142 preempt_disable();
317 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); 143 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
318 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 144 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
319}
320
321static inline void __write_lock(rwlock_t *lock)
322{
323 preempt_disable();
324 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
325 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock);
326} 145}
327 146
328#endif /* CONFIG_PREEMPT */ 147#endif /* CONFIG_PREEMPT */
329 148
330static inline void __spin_unlock(spinlock_t *lock) 149static inline void __raw_spin_unlock(raw_spinlock_t *lock)
331{ 150{
332 spin_release(&lock->dep_map, 1, _RET_IP_); 151 spin_release(&lock->dep_map, 1, _RET_IP_);
333 _raw_spin_unlock(lock); 152 do_raw_spin_unlock(lock);
334 preempt_enable();
335}
336
337static inline void __write_unlock(rwlock_t *lock)
338{
339 rwlock_release(&lock->dep_map, 1, _RET_IP_);
340 _raw_write_unlock(lock);
341 preempt_enable();
342}
343
344static inline void __read_unlock(rwlock_t *lock)
345{
346 rwlock_release(&lock->dep_map, 1, _RET_IP_);
347 _raw_read_unlock(lock);
348 preempt_enable(); 153 preempt_enable();
349} 154}
350 155
351static inline void __spin_unlock_irqrestore(spinlock_t *lock, 156static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
352 unsigned long flags) 157 unsigned long flags)
353{ 158{
354 spin_release(&lock->dep_map, 1, _RET_IP_); 159 spin_release(&lock->dep_map, 1, _RET_IP_);
355 _raw_spin_unlock(lock); 160 do_raw_spin_unlock(lock);
356 local_irq_restore(flags); 161 local_irq_restore(flags);
357 preempt_enable(); 162 preempt_enable();
358} 163}
359 164
360static inline void __spin_unlock_irq(spinlock_t *lock) 165static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
361{ 166{
362 spin_release(&lock->dep_map, 1, _RET_IP_); 167 spin_release(&lock->dep_map, 1, _RET_IP_);
363 _raw_spin_unlock(lock); 168 do_raw_spin_unlock(lock);
364 local_irq_enable(); 169 local_irq_enable();
365 preempt_enable(); 170 preempt_enable();
366} 171}
367 172
368static inline void __spin_unlock_bh(spinlock_t *lock) 173static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
369{ 174{
370 spin_release(&lock->dep_map, 1, _RET_IP_); 175 spin_release(&lock->dep_map, 1, _RET_IP_);
371 _raw_spin_unlock(lock); 176 do_raw_spin_unlock(lock);
372 preempt_enable_no_resched();
373 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
374}
375
376static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
377{
378 rwlock_release(&lock->dep_map, 1, _RET_IP_);
379 _raw_read_unlock(lock);
380 local_irq_restore(flags);
381 preempt_enable();
382}
383
384static inline void __read_unlock_irq(rwlock_t *lock)
385{
386 rwlock_release(&lock->dep_map, 1, _RET_IP_);
387 _raw_read_unlock(lock);
388 local_irq_enable();
389 preempt_enable();
390}
391
392static inline void __read_unlock_bh(rwlock_t *lock)
393{
394 rwlock_release(&lock->dep_map, 1, _RET_IP_);
395 _raw_read_unlock(lock);
396 preempt_enable_no_resched(); 177 preempt_enable_no_resched();
397 local_bh_enable_ip((unsigned long)__builtin_return_address(0)); 178 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
398} 179}
399 180
400static inline void __write_unlock_irqrestore(rwlock_t *lock, 181static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock)
401 unsigned long flags)
402{
403 rwlock_release(&lock->dep_map, 1, _RET_IP_);
404 _raw_write_unlock(lock);
405 local_irq_restore(flags);
406 preempt_enable();
407}
408
409static inline void __write_unlock_irq(rwlock_t *lock)
410{
411 rwlock_release(&lock->dep_map, 1, _RET_IP_);
412 _raw_write_unlock(lock);
413 local_irq_enable();
414 preempt_enable();
415}
416
417static inline void __write_unlock_bh(rwlock_t *lock)
418{
419 rwlock_release(&lock->dep_map, 1, _RET_IP_);
420 _raw_write_unlock(lock);
421 preempt_enable_no_resched();
422 local_bh_enable_ip((unsigned long)__builtin_return_address(0));
423}
424
425static inline int __spin_trylock_bh(spinlock_t *lock)
426{ 182{
427 local_bh_disable(); 183 local_bh_disable();
428 preempt_disable(); 184 preempt_disable();
429 if (_raw_spin_trylock(lock)) { 185 if (do_raw_spin_trylock(lock)) {
430 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); 186 spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
431 return 1; 187 return 1;
432 } 188 }
@@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock)
435 return 0; 191 return 0;
436} 192}
437 193
194#include <linux/rwlock_api_smp.h>
195
438#endif /* __LINUX_SPINLOCK_API_SMP_H */ 196#endif /* __LINUX_SPINLOCK_API_SMP_H */
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index 04e1d3164576..af1f47229e70 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -16,7 +16,7 @@
16 16
17#define in_lock_functions(ADDR) 0 17#define in_lock_functions(ADDR) 0
18 18
19#define assert_spin_locked(lock) do { (void)(lock); } while (0) 19#define assert_raw_spin_locked(lock) do { (void)(lock); } while (0)
20 20
21/* 21/*
22 * In the UP-nondebug case there's no real locking going on, so the 22 * In the UP-nondebug case there's no real locking going on, so the
@@ -40,7 +40,8 @@
40 do { preempt_enable(); __release(lock); (void)(lock); } while (0) 40 do { preempt_enable(); __release(lock); (void)(lock); } while (0)
41 41
42#define __UNLOCK_BH(lock) \ 42#define __UNLOCK_BH(lock) \
43 do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) 43 do { preempt_enable_no_resched(); local_bh_enable(); \
44 __release(lock); (void)(lock); } while (0)
44 45
45#define __UNLOCK_IRQ(lock) \ 46#define __UNLOCK_IRQ(lock) \
46 do { local_irq_enable(); __UNLOCK(lock); } while (0) 47 do { local_irq_enable(); __UNLOCK(lock); } while (0)
@@ -48,34 +49,37 @@
48#define __UNLOCK_IRQRESTORE(lock, flags) \ 49#define __UNLOCK_IRQRESTORE(lock, flags) \
49 do { local_irq_restore(flags); __UNLOCK(lock); } while (0) 50 do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
50 51
51#define _spin_lock(lock) __LOCK(lock) 52#define _raw_spin_lock(lock) __LOCK(lock)
52#define _spin_lock_nested(lock, subclass) __LOCK(lock) 53#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
53#define _read_lock(lock) __LOCK(lock) 54#define _raw_read_lock(lock) __LOCK(lock)
54#define _write_lock(lock) __LOCK(lock) 55#define _raw_write_lock(lock) __LOCK(lock)
55#define _spin_lock_bh(lock) __LOCK_BH(lock) 56#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
56#define _read_lock_bh(lock) __LOCK_BH(lock) 57#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
57#define _write_lock_bh(lock) __LOCK_BH(lock) 58#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
58#define _spin_lock_irq(lock) __LOCK_IRQ(lock) 59#define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock)
59#define _read_lock_irq(lock) __LOCK_IRQ(lock) 60#define _raw_read_lock_irq(lock) __LOCK_IRQ(lock)
60#define _write_lock_irq(lock) __LOCK_IRQ(lock) 61#define _raw_write_lock_irq(lock) __LOCK_IRQ(lock)
61#define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 62#define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
62#define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 63#define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
63#define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) 64#define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags)
64#define _spin_trylock(lock) ({ __LOCK(lock); 1; }) 65#define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; })
65#define _read_trylock(lock) ({ __LOCK(lock); 1; }) 66#define _raw_read_trylock(lock) ({ __LOCK(lock); 1; })
66#define _write_trylock(lock) ({ __LOCK(lock); 1; }) 67#define _raw_write_trylock(lock) ({ __LOCK(lock); 1; })
67#define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) 68#define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; })
68#define _spin_unlock(lock) __UNLOCK(lock) 69#define _raw_spin_unlock(lock) __UNLOCK(lock)
69#define _read_unlock(lock) __UNLOCK(lock) 70#define _raw_read_unlock(lock) __UNLOCK(lock)
70#define _write_unlock(lock) __UNLOCK(lock) 71#define _raw_write_unlock(lock) __UNLOCK(lock)
71#define _spin_unlock_bh(lock) __UNLOCK_BH(lock) 72#define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock)
72#define _write_unlock_bh(lock) __UNLOCK_BH(lock) 73#define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock)
73#define _read_unlock_bh(lock) __UNLOCK_BH(lock) 74#define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock)
74#define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) 75#define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock)
75#define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) 76#define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock)
76#define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) 77#define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock)
77#define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 78#define _raw_spin_unlock_irqrestore(lock, flags) \
78#define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 79 __UNLOCK_IRQRESTORE(lock, flags)
79#define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) 80#define _raw_read_unlock_irqrestore(lock, flags) \
81 __UNLOCK_IRQRESTORE(lock, flags)
82#define _raw_write_unlock_irqrestore(lock, flags) \
83 __UNLOCK_IRQRESTORE(lock, flags)
80 84
81#endif /* __LINUX_SPINLOCK_API_UP_H */ 85#endif /* __LINUX_SPINLOCK_API_UP_H */
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 68d88f71f1a2..851b7783720d 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -17,8 +17,8 @@
17 17
18#include <linux/lockdep.h> 18#include <linux/lockdep.h>
19 19
20typedef struct { 20typedef struct raw_spinlock {
21 raw_spinlock_t raw_lock; 21 arch_spinlock_t raw_lock;
22#ifdef CONFIG_GENERIC_LOCKBREAK 22#ifdef CONFIG_GENERIC_LOCKBREAK
23 unsigned int break_lock; 23 unsigned int break_lock;
24#endif 24#endif
@@ -29,26 +29,10 @@ typedef struct {
29#ifdef CONFIG_DEBUG_LOCK_ALLOC 29#ifdef CONFIG_DEBUG_LOCK_ALLOC
30 struct lockdep_map dep_map; 30 struct lockdep_map dep_map;
31#endif 31#endif
32} spinlock_t; 32} raw_spinlock_t;
33 33
34#define SPINLOCK_MAGIC 0xdead4ead 34#define SPINLOCK_MAGIC 0xdead4ead
35 35
36typedef struct {
37 raw_rwlock_t raw_lock;
38#ifdef CONFIG_GENERIC_LOCKBREAK
39 unsigned int break_lock;
40#endif
41#ifdef CONFIG_DEBUG_SPINLOCK
42 unsigned int magic, owner_cpu;
43 void *owner;
44#endif
45#ifdef CONFIG_DEBUG_LOCK_ALLOC
46 struct lockdep_map dep_map;
47#endif
48} rwlock_t;
49
50#define RWLOCK_MAGIC 0xdeaf1eed
51
52#define SPINLOCK_OWNER_INIT ((void *)-1L) 36#define SPINLOCK_OWNER_INIT ((void *)-1L)
53 37
54#ifdef CONFIG_DEBUG_LOCK_ALLOC 38#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -57,44 +41,56 @@ typedef struct {
57# define SPIN_DEP_MAP_INIT(lockname) 41# define SPIN_DEP_MAP_INIT(lockname)
58#endif 42#endif
59 43
60#ifdef CONFIG_DEBUG_LOCK_ALLOC 44#ifdef CONFIG_DEBUG_SPINLOCK
61# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } 45# define SPIN_DEBUG_INIT(lockname) \
46 .magic = SPINLOCK_MAGIC, \
47 .owner_cpu = -1, \
48 .owner = SPINLOCK_OWNER_INIT,
62#else 49#else
63# define RW_DEP_MAP_INIT(lockname) 50# define SPIN_DEBUG_INIT(lockname)
64#endif 51#endif
65 52
66#ifdef CONFIG_DEBUG_SPINLOCK 53#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
67# define __SPIN_LOCK_UNLOCKED(lockname) \ 54 { \
68 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 55 .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
69 .magic = SPINLOCK_MAGIC, \ 56 SPIN_DEBUG_INIT(lockname) \
70 .owner = SPINLOCK_OWNER_INIT, \ 57 SPIN_DEP_MAP_INIT(lockname) }
71 .owner_cpu = -1, \ 58
72 SPIN_DEP_MAP_INIT(lockname) } 59#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
73#define __RW_LOCK_UNLOCKED(lockname) \ 60 (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
74 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 61
75 .magic = RWLOCK_MAGIC, \ 62#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
76 .owner = SPINLOCK_OWNER_INIT, \ 63
77 .owner_cpu = -1, \ 64typedef struct spinlock {
78 RW_DEP_MAP_INIT(lockname) } 65 union {
79#else 66 struct raw_spinlock rlock;
80# define __SPIN_LOCK_UNLOCKED(lockname) \ 67
81 (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ 68#ifdef CONFIG_DEBUG_LOCK_ALLOC
82 SPIN_DEP_MAP_INIT(lockname) } 69# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
83#define __RW_LOCK_UNLOCKED(lockname) \ 70 struct {
84 (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ 71 u8 __padding[LOCK_PADSIZE];
85 RW_DEP_MAP_INIT(lockname) } 72 struct lockdep_map dep_map;
73 };
86#endif 74#endif
75 };
76} spinlock_t;
77
78#define __SPIN_LOCK_INITIALIZER(lockname) \
79 { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
80
81#define __SPIN_LOCK_UNLOCKED(lockname) \
82 (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
87 83
88/* 84/*
89 * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and 85 * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence
90 * are hence deprecated. 86 * deprecated.
91 * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or 87 * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as
92 * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. 88 * appropriate.
93 */ 89 */
94#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) 90#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
95#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
96 91
97#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) 92#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
98#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) 93
94#include <linux/rwlock_types.h>
99 95
100#endif /* __LINUX_SPINLOCK_TYPES_H */ 96#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h
index 04135b0e198e..c09b6407ae1b 100644
--- a/include/linux/spinlock_types_up.h
+++ b/include/linux/spinlock_types_up.h
@@ -16,22 +16,22 @@
16 16
17typedef struct { 17typedef struct {
18 volatile unsigned int slock; 18 volatile unsigned int slock;
19} raw_spinlock_t; 19} arch_spinlock_t;
20 20
21#define __RAW_SPIN_LOCK_UNLOCKED { 1 } 21#define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
22 22
23#else 23#else
24 24
25typedef struct { } raw_spinlock_t; 25typedef struct { } arch_spinlock_t;
26 26
27#define __RAW_SPIN_LOCK_UNLOCKED { } 27#define __ARCH_SPIN_LOCK_UNLOCKED { }
28 28
29#endif 29#endif
30 30
31typedef struct { 31typedef struct {
32 /* no debug version on UP */ 32 /* no debug version on UP */
33} raw_rwlock_t; 33} arch_rwlock_t;
34 34
35#define __RAW_RW_LOCK_UNLOCKED { } 35#define __ARCH_RW_LOCK_UNLOCKED { }
36 36
37#endif /* __LINUX_SPINLOCK_TYPES_UP_H */ 37#endif /* __LINUX_SPINLOCK_TYPES_UP_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index d4841ed8215b..b14f6a91e19f 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -18,21 +18,21 @@
18 */ 18 */
19 19
20#ifdef CONFIG_DEBUG_SPINLOCK 20#ifdef CONFIG_DEBUG_SPINLOCK
21#define __raw_spin_is_locked(x) ((x)->slock == 0) 21#define arch_spin_is_locked(x) ((x)->slock == 0)
22 22
23static inline void __raw_spin_lock(raw_spinlock_t *lock) 23static inline void arch_spin_lock(arch_spinlock_t *lock)
24{ 24{
25 lock->slock = 0; 25 lock->slock = 0;
26} 26}
27 27
28static inline void 28static inline void
29__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) 29arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
30{ 30{
31 local_irq_save(flags); 31 local_irq_save(flags);
32 lock->slock = 0; 32 lock->slock = 0;
33} 33}
34 34
35static inline int __raw_spin_trylock(raw_spinlock_t *lock) 35static inline int arch_spin_trylock(arch_spinlock_t *lock)
36{ 36{
37 char oldval = lock->slock; 37 char oldval = lock->slock;
38 38
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
41 return oldval > 0; 41 return oldval > 0;
42} 42}
43 43
44static inline void __raw_spin_unlock(raw_spinlock_t *lock) 44static inline void arch_spin_unlock(arch_spinlock_t *lock)
45{ 45{
46 lock->slock = 1; 46 lock->slock = 1;
47} 47}
@@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
49/* 49/*
50 * Read-write spinlocks. No debug version. 50 * Read-write spinlocks. No debug version.
51 */ 51 */
52#define __raw_read_lock(lock) do { (void)(lock); } while (0) 52#define arch_read_lock(lock) do { (void)(lock); } while (0)
53#define __raw_write_lock(lock) do { (void)(lock); } while (0) 53#define arch_write_lock(lock) do { (void)(lock); } while (0)
54#define __raw_read_trylock(lock) ({ (void)(lock); 1; }) 54#define arch_read_trylock(lock) ({ (void)(lock); 1; })
55#define __raw_write_trylock(lock) ({ (void)(lock); 1; }) 55#define arch_write_trylock(lock) ({ (void)(lock); 1; })
56#define __raw_read_unlock(lock) do { (void)(lock); } while (0) 56#define arch_read_unlock(lock) do { (void)(lock); } while (0)
57#define __raw_write_unlock(lock) do { (void)(lock); } while (0) 57#define arch_write_unlock(lock) do { (void)(lock); } while (0)
58 58
59#else /* DEBUG_SPINLOCK */ 59#else /* DEBUG_SPINLOCK */
60#define __raw_spin_is_locked(lock) ((void)(lock), 0) 60#define arch_spin_is_locked(lock) ((void)(lock), 0)
61/* for sched.c and kernel_lock.c: */ 61/* for sched.c and kernel_lock.c: */
62# define __raw_spin_lock(lock) do { (void)(lock); } while (0) 62# define arch_spin_lock(lock) do { (void)(lock); } while (0)
63# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) 63# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
64# define __raw_spin_unlock(lock) do { (void)(lock); } while (0) 64# define arch_spin_unlock(lock) do { (void)(lock); } while (0)
65# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) 65# define arch_spin_trylock(lock) ({ (void)(lock); 1; })
66#endif /* DEBUG_SPINLOCK */ 66#endif /* DEBUG_SPINLOCK */
67 67
68#define __raw_spin_is_contended(lock) (((void)(lock), 0)) 68#define arch_spin_is_contended(lock) (((void)(lock), 0))
69 69
70#define __raw_read_can_lock(lock) (((void)(lock), 1)) 70#define arch_read_can_lock(lock) (((void)(lock), 1))
71#define __raw_write_can_lock(lock) (((void)(lock), 1)) 71#define arch_write_can_lock(lock) (((void)(lock), 1))
72 72
73#define __raw_spin_unlock_wait(lock) \ 73#define arch_spin_unlock_wait(lock) \
74 do { cpu_relax(); } while (__raw_spin_is_locked(lock)) 74 do { cpu_relax(); } while (arch_spin_is_locked(lock))
75 75
76#endif /* __LINUX_SPINLOCK_UP_H */ 76#endif /* __LINUX_SPINLOCK_UP_H */
diff --git a/include/linux/string.h b/include/linux/string.h
index b8508868d5ad..a716ee2a8adb 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -62,9 +62,20 @@ extern char * strnchr(const char *, size_t, int);
62#ifndef __HAVE_ARCH_STRRCHR 62#ifndef __HAVE_ARCH_STRRCHR
63extern char * strrchr(const char *,int); 63extern char * strrchr(const char *,int);
64#endif 64#endif
65extern char * __must_check strstrip(char *); 65extern char * __must_check skip_spaces(const char *);
66
67extern char *strim(char *);
68
69static inline __must_check char *strstrip(char *str)
70{
71 return strim(str);
72}
73
66#ifndef __HAVE_ARCH_STRSTR 74#ifndef __HAVE_ARCH_STRSTR
67extern char * strstr(const char *,const char *); 75extern char * strstr(const char *, const char *);
76#endif
77#ifndef __HAVE_ARCH_STRNSTR
78extern char * strnstr(const char *, const char *, size_t);
68#endif 79#endif
69#ifndef __HAVE_ARCH_STRLEN 80#ifndef __HAVE_ARCH_STRLEN
70extern __kernel_size_t strlen(const char *); 81extern __kernel_size_t strlen(const char *);
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h
index 10709cbe96fd..c2786f20016f 100644
--- a/include/linux/sunrpc/debug.h
+++ b/include/linux/sunrpc/debug.h
@@ -28,9 +28,6 @@
28 28
29#ifdef __KERNEL__ 29#ifdef __KERNEL__
30 30
31#include <linux/timer.h>
32#include <linux/workqueue.h>
33
34/* 31/*
35 * Enable RPC debugging/profiling. 32 * Enable RPC debugging/profiling.
36 */ 33 */
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h
index 87b895d5c786..b78f16b1dea3 100644
--- a/include/linux/sunrpc/rpc_rdma.h
+++ b/include/linux/sunrpc/rpc_rdma.h
@@ -40,6 +40,8 @@
40#ifndef _LINUX_SUNRPC_RPC_RDMA_H 40#ifndef _LINUX_SUNRPC_RPC_RDMA_H
41#define _LINUX_SUNRPC_RPC_RDMA_H 41#define _LINUX_SUNRPC_RPC_RDMA_H
42 42
43#include <linux/types.h>
44
43struct rpcrdma_segment { 45struct rpcrdma_segment {
44 __be32 rs_handle; /* Registered memory handle */ 46 __be32 rs_handle; /* Registered memory handle */
45 __be32 rs_length; /* Length of the chunk in bytes */ 47 __be32 rs_length; /* Length of the chunk in bytes */
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 401097781fc0..7bc7fd5291ce 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -130,12 +130,14 @@ struct rpc_task_setup {
130#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ 130#define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
131#define RPC_TASK_KILLED 0x0100 /* task was killed */ 131#define RPC_TASK_KILLED 0x0100 /* task was killed */
132#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ 132#define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
133#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
133 134
134#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) 135#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
135#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) 136#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
136#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) 137#define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
137#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) 138#define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
138#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) 139#define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
140#define RPC_IS_SOFTCONN(t) ((t)->tk_flags & RPC_TASK_SOFTCONN)
139 141
140#define RPC_TASK_RUNNING 0 142#define RPC_TASK_RUNNING 0
141#define RPC_TASK_QUEUED 1 143#define RPC_TASK_QUEUED 1
@@ -171,7 +173,8 @@ struct rpc_task_setup {
171#define RPC_PRIORITY_LOW (-1) 173#define RPC_PRIORITY_LOW (-1)
172#define RPC_PRIORITY_NORMAL (0) 174#define RPC_PRIORITY_NORMAL (0)
173#define RPC_PRIORITY_HIGH (1) 175#define RPC_PRIORITY_HIGH (1)
174#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW) 176#define RPC_PRIORITY_PRIVILEGED (2)
177#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW)
175 178
176struct rpc_timer { 179struct rpc_timer {
177 struct timer_list timer; 180 struct timer_list timer;
@@ -227,6 +230,7 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *,
227void rpc_wake_up(struct rpc_wait_queue *); 230void rpc_wake_up(struct rpc_wait_queue *);
228struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); 231struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
229void rpc_wake_up_status(struct rpc_wait_queue *, int); 232void rpc_wake_up_status(struct rpc_wait_queue *, int);
233int rpc_queue_empty(struct rpc_wait_queue *);
230void rpc_delay(struct rpc_task *, unsigned long); 234void rpc_delay(struct rpc_task *, unsigned long);
231void * rpc_malloc(struct rpc_task *, size_t); 235void * rpc_malloc(struct rpc_task *, size_t);
232void rpc_free(void *); 236void rpc_free(void *);
@@ -252,6 +256,16 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task)
252 return __rpc_wait_for_completion_task(task, NULL); 256 return __rpc_wait_for_completion_task(task, NULL);
253} 257}
254 258
259static inline void rpc_task_set_priority(struct rpc_task *task, unsigned char prio)
260{
261 task->tk_priority = prio - RPC_PRIORITY_LOW;
262}
263
264static inline int rpc_task_has_priority(struct rpc_task *task, unsigned char prio)
265{
266 return (task->tk_priority + RPC_PRIORITY_LOW == prio);
267}
268
255#ifdef RPC_DEBUG 269#ifdef RPC_DEBUG
256static inline const char * rpc_qname(struct rpc_wait_queue *q) 270static inline const char * rpc_qname(struct rpc_wait_queue *q)
257{ 271{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 52e8cb0a7569..5a3085b9b394 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -29,7 +29,6 @@ struct svc_pool_stats {
29 unsigned long packets; 29 unsigned long packets;
30 unsigned long sockets_queued; 30 unsigned long sockets_queued;
31 unsigned long threads_woken; 31 unsigned long threads_woken;
32 unsigned long overloads_avoided;
33 unsigned long threads_timedout; 32 unsigned long threads_timedout;
34}; 33};
35 34
@@ -50,7 +49,6 @@ struct svc_pool {
50 struct list_head sp_sockets; /* pending sockets */ 49 struct list_head sp_sockets; /* pending sockets */
51 unsigned int sp_nrthreads; /* # of threads in pool */ 50 unsigned int sp_nrthreads; /* # of threads in pool */
52 struct list_head sp_all_threads; /* all server threads */ 51 struct list_head sp_all_threads; /* all server threads */
53 int sp_nwaking; /* number of threads woken but not yet active */
54 struct svc_pool_stats sp_stats; /* statistics on pool operation */ 52 struct svc_pool_stats sp_stats; /* statistics on pool operation */
55} ____cacheline_aligned_in_smp; 53} ____cacheline_aligned_in_smp;
56 54
@@ -275,16 +273,11 @@ struct svc_rqst {
275 struct auth_domain * rq_client; /* RPC peer info */ 273 struct auth_domain * rq_client; /* RPC peer info */
276 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ 274 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
277 struct svc_cacherep * rq_cacherep; /* cache info */ 275 struct svc_cacherep * rq_cacherep; /* cache info */
278 struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to
279 * determine what device number
280 * to report (real or virtual)
281 */
282 int rq_splice_ok; /* turned off in gss privacy 276 int rq_splice_ok; /* turned off in gss privacy
283 * to prevent encrypting page 277 * to prevent encrypting page
284 * cache pages */ 278 * cache pages */
285 wait_queue_head_t rq_wait; /* synchronization */ 279 wait_queue_head_t rq_wait; /* synchronization */
286 struct task_struct *rq_task; /* service thread */ 280 struct task_struct *rq_task; /* service thread */
287 int rq_waking; /* 1 if thread is being woken */
288}; 281};
289 282
290/* 283/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 4ec90019c1a4..a2602a8207a6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -145,38 +145,43 @@ enum {
145 SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ 145 SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */
146 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ 146 SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */
147 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ 147 SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */
148 SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
148 /* add others here before... */ 149 /* add others here before... */
149 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ 150 SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
150}; 151};
151 152
152#define SWAP_CLUSTER_MAX 32 153#define SWAP_CLUSTER_MAX 32
153 154
154#define SWAP_MAP_MAX 0x7ffe 155#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
155#define SWAP_MAP_BAD 0x7fff 156#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
156#define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */ 157#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
157#define SWAP_COUNT_MASK (~SWAP_HAS_CACHE) 158#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
159#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
160#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
161
158/* 162/*
159 * The in-memory structure used to track swap areas. 163 * The in-memory structure used to track swap areas.
160 */ 164 */
161struct swap_info_struct { 165struct swap_info_struct {
162 unsigned long flags; 166 unsigned long flags; /* SWP_USED etc: see above */
163 int prio; /* swap priority */ 167 signed short prio; /* swap priority of this type */
164 int next; /* next entry on swap list */ 168 signed char type; /* strange name for an index */
165 struct file *swap_file; 169 signed char next; /* next type on the swap list */
166 struct block_device *bdev; 170 unsigned int max; /* extent of the swap_map */
167 struct list_head extent_list; 171 unsigned char *swap_map; /* vmalloc'ed array of usage counts */
168 struct swap_extent *curr_swap_extent; 172 unsigned int lowest_bit; /* index of first free in swap_map */
169 unsigned short *swap_map; 173 unsigned int highest_bit; /* index of last free in swap_map */
170 unsigned int lowest_bit; 174 unsigned int pages; /* total of usable pages of swap */
171 unsigned int highest_bit; 175 unsigned int inuse_pages; /* number of those currently in use */
176 unsigned int cluster_next; /* likely index for next allocation */
177 unsigned int cluster_nr; /* countdown to next cluster search */
172 unsigned int lowest_alloc; /* while preparing discard cluster */ 178 unsigned int lowest_alloc; /* while preparing discard cluster */
173 unsigned int highest_alloc; /* while preparing discard cluster */ 179 unsigned int highest_alloc; /* while preparing discard cluster */
174 unsigned int cluster_next; 180 struct swap_extent *curr_swap_extent;
175 unsigned int cluster_nr; 181 struct swap_extent first_swap_extent;
176 unsigned int pages; 182 struct block_device *bdev; /* swap device or bdev of swap file */
177 unsigned int max; 183 struct file *swap_file; /* seldom referenced */
178 unsigned int inuse_pages; 184 unsigned int old_block_size; /* seldom referenced */
179 unsigned int old_block_size;
180}; 185};
181 186
182struct swap_list_t { 187struct swap_list_t {
@@ -273,6 +278,7 @@ extern int scan_unevictable_register_node(struct node *node);
273extern void scan_unevictable_unregister_node(struct node *node); 278extern void scan_unevictable_unregister_node(struct node *node);
274 279
275extern int kswapd_run(int nid); 280extern int kswapd_run(int nid);
281extern void kswapd_stop(int nid);
276 282
277#ifdef CONFIG_MMU 283#ifdef CONFIG_MMU
278/* linux/mm/shmem.c */ 284/* linux/mm/shmem.c */
@@ -309,17 +315,18 @@ extern long total_swap_pages;
309extern void si_swapinfo(struct sysinfo *); 315extern void si_swapinfo(struct sysinfo *);
310extern swp_entry_t get_swap_page(void); 316extern swp_entry_t get_swap_page(void);
311extern swp_entry_t get_swap_page_of_type(int); 317extern swp_entry_t get_swap_page_of_type(int);
312extern void swap_duplicate(swp_entry_t);
313extern int swapcache_prepare(swp_entry_t);
314extern int valid_swaphandles(swp_entry_t, unsigned long *); 318extern int valid_swaphandles(swp_entry_t, unsigned long *);
319extern int add_swap_count_continuation(swp_entry_t, gfp_t);
320extern void swap_shmem_alloc(swp_entry_t);
321extern int swap_duplicate(swp_entry_t);
322extern int swapcache_prepare(swp_entry_t);
315extern void swap_free(swp_entry_t); 323extern void swap_free(swp_entry_t);
316extern void swapcache_free(swp_entry_t, struct page *page); 324extern void swapcache_free(swp_entry_t, struct page *page);
317extern int free_swap_and_cache(swp_entry_t); 325extern int free_swap_and_cache(swp_entry_t);
318extern int swap_type_of(dev_t, sector_t, struct block_device **); 326extern int swap_type_of(dev_t, sector_t, struct block_device **);
319extern unsigned int count_swap_pages(int, int); 327extern unsigned int count_swap_pages(int, int);
320extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); 328extern sector_t map_swap_page(struct page *, struct block_device **);
321extern sector_t swapdev_block(int, pgoff_t); 329extern sector_t swapdev_block(int, pgoff_t);
322extern struct swap_info_struct *get_swap_info_struct(unsigned);
323extern int reuse_swap_page(struct page *); 330extern int reuse_swap_page(struct page *);
324extern int try_to_free_swap(struct page *); 331extern int try_to_free_swap(struct page *);
325struct backing_dev_info; 332struct backing_dev_info;
@@ -384,8 +391,18 @@ static inline void show_swap_cache_info(void)
384#define free_swap_and_cache(swp) is_migration_entry(swp) 391#define free_swap_and_cache(swp) is_migration_entry(swp)
385#define swapcache_prepare(swp) is_migration_entry(swp) 392#define swapcache_prepare(swp) is_migration_entry(swp)
386 393
387static inline void swap_duplicate(swp_entry_t swp) 394static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
388{ 395{
396 return 0;
397}
398
399static inline void swap_shmem_alloc(swp_entry_t swp)
400{
401}
402
403static inline int swap_duplicate(swp_entry_t swp)
404{
405 return 0;
389} 406}
390 407
391static inline void swap_free(swp_entry_t swp) 408static inline void swap_free(swp_entry_t swp)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 939a61507ac5..207466a49f3d 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -102,12 +102,10 @@ struct perf_event_attr;
102#ifdef CONFIG_EVENT_PROFILE 102#ifdef CONFIG_EVENT_PROFILE
103 103
104#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ 104#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
105 .profile_count = ATOMIC_INIT(-1), \
106 .profile_enable = prof_sysenter_enable, \ 105 .profile_enable = prof_sysenter_enable, \
107 .profile_disable = prof_sysenter_disable, 106 .profile_disable = prof_sysenter_disable,
108 107
109#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ 108#define TRACE_SYS_EXIT_PROFILE_INIT(sname) \
110 .profile_count = ATOMIC_INIT(-1), \
111 .profile_enable = prof_sysexit_enable, \ 109 .profile_enable = prof_sysexit_enable, \
112 .profile_disable = prof_sysexit_disable, 110 .profile_disable = prof_sysexit_disable,
113#else 111#else
@@ -145,7 +143,7 @@ struct perf_event_attr;
145 .name = "sys_enter"#sname, \ 143 .name = "sys_enter"#sname, \
146 .system = "syscalls", \ 144 .system = "syscalls", \
147 .event = &enter_syscall_print_##sname, \ 145 .event = &enter_syscall_print_##sname, \
148 .raw_init = init_syscall_trace, \ 146 .raw_init = trace_event_raw_init, \
149 .show_format = syscall_enter_format, \ 147 .show_format = syscall_enter_format, \
150 .define_fields = syscall_enter_define_fields, \ 148 .define_fields = syscall_enter_define_fields, \
151 .regfunc = reg_event_syscall_enter, \ 149 .regfunc = reg_event_syscall_enter, \
@@ -167,7 +165,7 @@ struct perf_event_attr;
167 .name = "sys_exit"#sname, \ 165 .name = "sys_exit"#sname, \
168 .system = "syscalls", \ 166 .system = "syscalls", \
169 .event = &exit_syscall_print_##sname, \ 167 .event = &exit_syscall_print_##sname, \
170 .raw_init = init_syscall_trace, \ 168 .raw_init = trace_event_raw_init, \
171 .show_format = syscall_exit_format, \ 169 .show_format = syscall_exit_format, \
172 .define_fields = syscall_exit_define_fields, \ 170 .define_fields = syscall_exit_define_fields, \
173 .regfunc = reg_event_syscall_exit, \ 171 .regfunc = reg_event_syscall_exit, \
@@ -197,7 +195,7 @@ struct perf_event_attr;
197 static const struct syscall_metadata __used \ 195 static const struct syscall_metadata __used \
198 __attribute__((__aligned__(4))) \ 196 __attribute__((__aligned__(4))) \
199 __attribute__((section("__syscalls_metadata"))) \ 197 __attribute__((section("__syscalls_metadata"))) \
200 __syscall_meta_##sname = { \ 198 __syscall_meta__##sname = { \
201 .name = "sys_"#sname, \ 199 .name = "sys_"#sname, \
202 .nb_args = 0, \ 200 .nb_args = 0, \
203 .enter_event = &event_enter__##sname, \ 201 .enter_event = &event_enter__##sname, \
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 9d68fed50f11..cfa83083a2d4 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -99,8 +99,9 @@ int __must_check sysfs_chmod_file(struct kobject *kobj, struct attribute *attr,
99void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); 99void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr);
100 100
101int __must_check sysfs_create_bin_file(struct kobject *kobj, 101int __must_check sysfs_create_bin_file(struct kobject *kobj,
102 struct bin_attribute *attr); 102 const struct bin_attribute *attr);
103void sysfs_remove_bin_file(struct kobject *kobj, struct bin_attribute *attr); 103void sysfs_remove_bin_file(struct kobject *kobj,
104 const struct bin_attribute *attr);
104 105
105int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target, 106int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target,
106 const char *name); 107 const char *name);
@@ -175,13 +176,13 @@ static inline void sysfs_remove_file(struct kobject *kobj,
175} 176}
176 177
177static inline int sysfs_create_bin_file(struct kobject *kobj, 178static inline int sysfs_create_bin_file(struct kobject *kobj,
178 struct bin_attribute *attr) 179 const struct bin_attribute *attr)
179{ 180{
180 return 0; 181 return 0;
181} 182}
182 183
183static inline void sysfs_remove_bin_file(struct kobject *kobj, 184static inline void sysfs_remove_bin_file(struct kobject *kobj,
184 struct bin_attribute *attr) 185 const struct bin_attribute *attr)
185{ 186{
186} 187}
187 188
diff --git a/include/linux/timb_gpio.h b/include/linux/timb_gpio.h
new file mode 100644
index 000000000000..ce456eaae861
--- /dev/null
+++ b/include/linux/timb_gpio.h
@@ -0,0 +1,37 @@
1/*
2 * timb_gpio.h timberdale FPGA GPIO driver, platform data definition
3 * Copyright (c) 2009 Intel Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#ifndef _LINUX_TIMB_GPIO_H
20#define _LINUX_TIMB_GPIO_H
21
22/**
23 * struct timbgpio_platform_data - Platform data of the Timberdale GPIO driver
24 * @gpio_base The number of the first GPIO pin, set to -1 for
25 * dynamic number allocation.
26 * @nr_pins Number of pins that is supported by the hardware (1-32)
27 * @irq_base If IRQ is supported by the hardware, this is the base
28 * number of IRQ:s. One IRQ per pin will be used. Set to
29 * -1 if IRQ:s is not supported.
30 */
31struct timbgpio_platform_data {
32 int gpio_base;
33 int nr_pins;
34 int irq_base;
35};
36
37#endif
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 57e63579bfdd..5b81156780b1 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -99,7 +99,7 @@ int arch_update_cpu_topology(void);
99 | 1*SD_WAKE_AFFINE \ 99 | 1*SD_WAKE_AFFINE \
100 | 1*SD_SHARE_CPUPOWER \ 100 | 1*SD_SHARE_CPUPOWER \
101 | 0*SD_POWERSAVINGS_BALANCE \ 101 | 0*SD_POWERSAVINGS_BALANCE \
102 | 0*SD_SHARE_PKG_RESOURCES \ 102 | 1*SD_SHARE_PKG_RESOURCES \
103 | 0*SD_SERIALIZE \ 103 | 0*SD_SERIALIZE \
104 | 0*SD_PREFER_SIBLING \ 104 | 0*SD_PREFER_SIBLING \
105 , \ 105 , \
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 09077f6ed128..5cf397ceb726 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -14,6 +14,7 @@ struct trace_seq {
14 unsigned char buffer[PAGE_SIZE]; 14 unsigned char buffer[PAGE_SIZE];
15 unsigned int len; 15 unsigned int len;
16 unsigned int readpos; 16 unsigned int readpos;
17 int full;
17}; 18};
18 19
19static inline void 20static inline void
@@ -21,6 +22,7 @@ trace_seq_init(struct trace_seq *s)
21{ 22{
22 s->len = 0; 23 s->len = 0;
23 s->readpos = 0; 24 s->readpos = 0;
25 s->full = 0;
24} 26}
25 27
26/* 28/*
@@ -33,7 +35,7 @@ extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
33 __attribute__ ((format (printf, 2, 0))); 35 __attribute__ ((format (printf, 2, 0)));
34extern int 36extern int
35trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); 37trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
36extern void trace_print_seq(struct seq_file *m, struct trace_seq *s); 38extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
37extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 39extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
38 size_t cnt); 40 size_t cnt);
39extern int trace_seq_puts(struct trace_seq *s, const char *str); 41extern int trace_seq_puts(struct trace_seq *s, const char *str);
@@ -55,8 +57,9 @@ trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
55 return 0; 57 return 0;
56} 58}
57 59
58static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s) 60static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
59{ 61{
62 return 0;
60} 63}
61static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 64static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
62 size_t cnt) 65 size_t cnt)
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 1eb44a924e56..10db0102a890 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -134,6 +134,13 @@ static inline __must_check int tracehook_report_syscall_entry(
134 */ 134 */
135static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) 135static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
136{ 136{
137 if (step) {
138 siginfo_t info;
139 user_single_step_siginfo(current, regs, &info);
140 force_sig_info(SIGTRAP, &info, current);
141 return;
142 }
143
137 ptrace_report_syscall(regs); 144 ptrace_report_syscall(regs);
138} 145}
139 146
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 405a9035fe40..6abfcf5b5887 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -350,8 +350,6 @@ extern void tty_write_flush(struct tty_struct *);
350 350
351extern struct ktermios tty_std_termios; 351extern struct ktermios tty_std_termios;
352 352
353extern int kmsg_redirect;
354
355extern void console_init(void); 353extern void console_init(void);
356extern int vcs_init(void); 354extern int vcs_init(void);
357 355
@@ -466,7 +464,7 @@ extern int tty_port_alloc_xmit_buf(struct tty_port *port);
466extern void tty_port_free_xmit_buf(struct tty_port *port); 464extern void tty_port_free_xmit_buf(struct tty_port *port);
467extern void tty_port_put(struct tty_port *port); 465extern void tty_port_put(struct tty_port *port);
468 466
469extern inline struct tty_port *tty_port_get(struct tty_port *port) 467static inline struct tty_port *tty_port_get(struct tty_port *port)
470{ 468{
471 if (port) 469 if (port)
472 kref_get(&port->kref); 470 kref_get(&port->kref);
@@ -488,7 +486,7 @@ extern void tty_port_close(struct tty_port *port,
488 struct tty_struct *tty, struct file *filp); 486 struct tty_struct *tty, struct file *filp);
489extern int tty_port_open(struct tty_port *port, 487extern int tty_port_open(struct tty_port *port,
490 struct tty_struct *tty, struct file *filp); 488 struct tty_struct *tty, struct file *filp);
491extern inline int tty_port_users(struct tty_port *port) 489static inline int tty_port_users(struct tty_port *port)
492{ 490{
493 return port->count + port->blocked_open; 491 return port->count + port->blocked_open;
494} 492}
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 6b58367d145e..d512d98dfb7d 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -94,6 +94,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
94 * happens, handle that and return -EFAULT. 94 * happens, handle that and return -EFAULT.
95 */ 95 */
96extern long probe_kernel_read(void *dst, void *src, size_t size); 96extern long probe_kernel_read(void *dst, void *src, size_t size);
97extern long __probe_kernel_read(void *dst, void *src, size_t size);
97 98
98/* 99/*
99 * probe_kernel_write(): safely attempt to write to a location 100 * probe_kernel_write(): safely attempt to write to a location
@@ -104,6 +105,7 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
104 * Safely write to address @dst from the buffer at @src. If a kernel fault 105 * Safely write to address @dst from the buffer at @src. If a kernel fault
105 * happens, handle that and return -EFAULT. 106 * happens, handle that and return -EFAULT.
106 */ 107 */
107extern long probe_kernel_write(void *dst, void *src, size_t size); 108extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
109extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
108 110
109#endif /* __LINUX_UACCESS_H__ */ 111#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index e101a2d04d75..d7ace1b80f09 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -192,6 +192,7 @@ struct usb_interface {
192 unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ 192 unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
193 unsigned needs_binding:1; /* needs delayed unbind/rebind */ 193 unsigned needs_binding:1; /* needs delayed unbind/rebind */
194 unsigned reset_running:1; 194 unsigned reset_running:1;
195 unsigned resetting_device:1; /* true: bandwidth alloc after reset */
195 196
196 struct device dev; /* interface specific device info */ 197 struct device dev; /* interface specific device info */
197 struct device *usb_dev; 198 struct device *usb_dev;
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index acf6e457c04b..1819396ed501 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -16,6 +16,7 @@
16#include <linux/kref.h> 16#include <linux/kref.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/sysrq.h> 18#include <linux/sysrq.h>
19#include <linux/kfifo.h>
19 20
20#define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ 21#define SERIAL_TTY_MAJOR 188 /* Nice legal number now */
21#define SERIAL_TTY_MINORS 254 /* loads of devices :) */ 22#define SERIAL_TTY_MINORS 254 /* loads of devices :) */
@@ -94,7 +95,7 @@ struct usb_serial_port {
94 unsigned char *bulk_out_buffer; 95 unsigned char *bulk_out_buffer;
95 int bulk_out_size; 96 int bulk_out_size;
96 struct urb *write_urb; 97 struct urb *write_urb;
97 struct kfifo *write_fifo; 98 struct kfifo write_fifo;
98 int write_urb_busy; 99 int write_urb_busy;
99 __u8 bulk_out_endpointAddress; 100 __u8 bulk_out_endpointAddress;
100 101
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
index b2a7d8ba6ee3..15591d2ea400 100644
--- a/include/linux/usbdevice_fs.h
+++ b/include/linux/usbdevice_fs.h
@@ -128,6 +128,29 @@ struct usbdevfs_hub_portinfo {
128#ifdef __KERNEL__ 128#ifdef __KERNEL__
129#ifdef CONFIG_COMPAT 129#ifdef CONFIG_COMPAT
130#include <linux/compat.h> 130#include <linux/compat.h>
131
132struct usbdevfs_ctrltransfer32 {
133 u8 bRequestType;
134 u8 bRequest;
135 u16 wValue;
136 u16 wIndex;
137 u16 wLength;
138 u32 timeout; /* in milliseconds */
139 compat_caddr_t data;
140};
141
142struct usbdevfs_bulktransfer32 {
143 compat_uint_t ep;
144 compat_uint_t len;
145 compat_uint_t timeout; /* in milliseconds */
146 compat_caddr_t data;
147};
148
149struct usbdevfs_disconnectsignal32 {
150 compat_int_t signr;
151 compat_caddr_t context;
152};
153
131struct usbdevfs_urb32 { 154struct usbdevfs_urb32 {
132 unsigned char type; 155 unsigned char type;
133 unsigned char endpoint; 156 unsigned char endpoint;
@@ -153,7 +176,9 @@ struct usbdevfs_ioctl32 {
153#endif /* __KERNEL__ */ 176#endif /* __KERNEL__ */
154 177
155#define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer) 178#define USBDEVFS_CONTROL _IOWR('U', 0, struct usbdevfs_ctrltransfer)
179#define USBDEVFS_CONTROL32 _IOWR('U', 0, struct usbdevfs_ctrltransfer32)
156#define USBDEVFS_BULK _IOWR('U', 2, struct usbdevfs_bulktransfer) 180#define USBDEVFS_BULK _IOWR('U', 2, struct usbdevfs_bulktransfer)
181#define USBDEVFS_BULK32 _IOWR('U', 2, struct usbdevfs_bulktransfer32)
157#define USBDEVFS_RESETEP _IOR('U', 3, unsigned int) 182#define USBDEVFS_RESETEP _IOR('U', 3, unsigned int)
158#define USBDEVFS_SETINTERFACE _IOR('U', 4, struct usbdevfs_setinterface) 183#define USBDEVFS_SETINTERFACE _IOR('U', 4, struct usbdevfs_setinterface)
159#define USBDEVFS_SETCONFIGURATION _IOR('U', 5, unsigned int) 184#define USBDEVFS_SETCONFIGURATION _IOR('U', 5, unsigned int)
@@ -166,6 +191,7 @@ struct usbdevfs_ioctl32 {
166#define USBDEVFS_REAPURBNDELAY _IOW('U', 13, void *) 191#define USBDEVFS_REAPURBNDELAY _IOW('U', 13, void *)
167#define USBDEVFS_REAPURBNDELAY32 _IOW('U', 13, __u32) 192#define USBDEVFS_REAPURBNDELAY32 _IOW('U', 13, __u32)
168#define USBDEVFS_DISCSIGNAL _IOR('U', 14, struct usbdevfs_disconnectsignal) 193#define USBDEVFS_DISCSIGNAL _IOR('U', 14, struct usbdevfs_disconnectsignal)
194#define USBDEVFS_DISCSIGNAL32 _IOR('U', 14, struct usbdevfs_disconnectsignal32)
169#define USBDEVFS_CLAIMINTERFACE _IOR('U', 15, unsigned int) 195#define USBDEVFS_CLAIMINTERFACE _IOR('U', 15, unsigned int)
170#define USBDEVFS_RELEASEINTERFACE _IOR('U', 16, unsigned int) 196#define USBDEVFS_RELEASEINTERFACE _IOR('U', 16, unsigned int)
171#define USBDEVFS_CONNECTINFO _IOW('U', 17, struct usbdevfs_connectinfo) 197#define USBDEVFS_CONNECTINFO _IOW('U', 17, struct usbdevfs_connectinfo)
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
index 79b9837d9ca0..cf97b5b9d1fe 100644
--- a/include/linux/vermagic.h
+++ b/include/linux/vermagic.h
@@ -1,4 +1,4 @@
1#include <linux/utsrelease.h> 1#include <generated/utsrelease.h>
2#include <linux/module.h> 2#include <linux/module.h>
3 3
4/* Simply sanity version stamp for modules. */ 4/* Simply sanity version stamp for modules. */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 32b92298fd79..d4962a782b8a 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -294,6 +294,7 @@ struct v4l2_pix_format {
294 294
295/* Grey formats */ 295/* Grey formats */
296#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ 296#define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */
297#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
297#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ 298#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
298 299
299/* Palette formats */ 300/* Palette formats */
@@ -329,7 +330,11 @@ struct v4l2_pix_format {
329#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ 330#define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */
330#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ 331#define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */
331#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */ 332#define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */
332#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10bit raw bayer */ 333#define V4L2_PIX_FMT_SRGGB8 v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */
334#define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */
335#define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */
336#define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */
337#define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */
333 /* 10bit raw bayer DPCM compressed to 8 bits */ 338 /* 10bit raw bayer DPCM compressed to 8 bits */
334#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0') 339#define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0')
335 /* 340 /*
@@ -732,6 +737,99 @@ struct v4l2_standard {
732}; 737};
733 738
734/* 739/*
740 * V I D E O T I M I N G S D V P R E S E T
741 */
742struct v4l2_dv_preset {
743 __u32 preset;
744 __u32 reserved[4];
745};
746
747/*
748 * D V P R E S E T S E N U M E R A T I O N
749 */
750struct v4l2_dv_enum_preset {
751 __u32 index;
752 __u32 preset;
753 __u8 name[32]; /* Name of the preset timing */
754 __u32 width;
755 __u32 height;
756 __u32 reserved[4];
757};
758
759/*
760 * D V P R E S E T V A L U E S
761 */
762#define V4L2_DV_INVALID 0
763#define V4L2_DV_480P59_94 1 /* BT.1362 */
764#define V4L2_DV_576P50 2 /* BT.1362 */
765#define V4L2_DV_720P24 3 /* SMPTE 296M */
766#define V4L2_DV_720P25 4 /* SMPTE 296M */
767#define V4L2_DV_720P30 5 /* SMPTE 296M */
768#define V4L2_DV_720P50 6 /* SMPTE 296M */
769#define V4L2_DV_720P59_94 7 /* SMPTE 274M */
770#define V4L2_DV_720P60 8 /* SMPTE 274M/296M */
771#define V4L2_DV_1080I29_97 9 /* BT.1120/ SMPTE 274M */
772#define V4L2_DV_1080I30 10 /* BT.1120/ SMPTE 274M */
773#define V4L2_DV_1080I25 11 /* BT.1120 */
774#define V4L2_DV_1080I50 12 /* SMPTE 296M */
775#define V4L2_DV_1080I60 13 /* SMPTE 296M */
776#define V4L2_DV_1080P24 14 /* SMPTE 296M */
777#define V4L2_DV_1080P25 15 /* SMPTE 296M */
778#define V4L2_DV_1080P30 16 /* SMPTE 296M */
779#define V4L2_DV_1080P50 17 /* BT.1120 */
780#define V4L2_DV_1080P60 18 /* BT.1120 */
781
782/*
783 * D V B T T I M I N G S
784 */
785
786/* BT.656/BT.1120 timing data */
787struct v4l2_bt_timings {
788 __u32 width; /* width in pixels */
789 __u32 height; /* height in lines */
790 __u32 interlaced; /* Interlaced or progressive */
791 __u32 polarities; /* Positive or negative polarity */
792 __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz->74250000 */
793 __u32 hfrontporch; /* Horizpontal front porch in pixels */
794 __u32 hsync; /* Horizontal Sync length in pixels */
795 __u32 hbackporch; /* Horizontal back porch in pixels */
796 __u32 vfrontporch; /* Vertical front porch in pixels */
797 __u32 vsync; /* Vertical Sync length in lines */
798 __u32 vbackporch; /* Vertical back porch in lines */
799 __u32 il_vfrontporch; /* Vertical front porch for bottom field of
800 * interlaced field formats
801 */
802 __u32 il_vsync; /* Vertical sync length for bottom field of
803 * interlaced field formats
804 */
805 __u32 il_vbackporch; /* Vertical back porch for bottom field of
806 * interlaced field formats
807 */
808 __u32 reserved[16];
809} __attribute__ ((packed));
810
811/* Interlaced or progressive format */
812#define V4L2_DV_PROGRESSIVE 0
813#define V4L2_DV_INTERLACED 1
814
815/* Polarities. If bit is not set, it is assumed to be negative polarity */
816#define V4L2_DV_VSYNC_POS_POL 0x00000001
817#define V4L2_DV_HSYNC_POS_POL 0x00000002
818
819
820/* DV timings */
821struct v4l2_dv_timings {
822 __u32 type;
823 union {
824 struct v4l2_bt_timings bt;
825 __u32 reserved[32];
826 };
827} __attribute__ ((packed));
828
829/* Values for the type field */
830#define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */
831
832/*
735 * V I D E O I N P U T S 833 * V I D E O I N P U T S
736 */ 834 */
737struct v4l2_input { 835struct v4l2_input {
@@ -742,7 +840,8 @@ struct v4l2_input {
742 __u32 tuner; /* Associated tuner */ 840 __u32 tuner; /* Associated tuner */
743 v4l2_std_id std; 841 v4l2_std_id std;
744 __u32 status; 842 __u32 status;
745 __u32 reserved[4]; 843 __u32 capabilities;
844 __u32 reserved[3];
746}; 845};
747 846
748/* Values for the 'type' field */ 847/* Values for the 'type' field */
@@ -773,6 +872,11 @@ struct v4l2_input {
773#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */ 872#define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */
774#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */ 873#define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */
775 874
875/* capabilities flags */
876#define V4L2_IN_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
877#define V4L2_IN_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
878#define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */
879
776/* 880/*
777 * V I D E O O U T P U T S 881 * V I D E O O U T P U T S
778 */ 882 */
@@ -783,13 +887,19 @@ struct v4l2_output {
783 __u32 audioset; /* Associated audios (bitfield) */ 887 __u32 audioset; /* Associated audios (bitfield) */
784 __u32 modulator; /* Associated modulator */ 888 __u32 modulator; /* Associated modulator */
785 v4l2_std_id std; 889 v4l2_std_id std;
786 __u32 reserved[4]; 890 __u32 capabilities;
891 __u32 reserved[3];
787}; 892};
788/* Values for the 'type' field */ 893/* Values for the 'type' field */
789#define V4L2_OUTPUT_TYPE_MODULATOR 1 894#define V4L2_OUTPUT_TYPE_MODULATOR 1
790#define V4L2_OUTPUT_TYPE_ANALOG 2 895#define V4L2_OUTPUT_TYPE_ANALOG 2
791#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3 896#define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3
792 897
898/* capabilities flags */
899#define V4L2_OUT_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */
900#define V4L2_OUT_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */
901#define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */
902
793/* 903/*
794 * C O N T R O L S 904 * C O N T R O L S
795 */ 905 */
@@ -1624,6 +1734,13 @@ struct v4l2_dbg_chip_ident {
1624#endif 1734#endif
1625 1735
1626#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) 1736#define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek)
1737#define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct v4l2_dv_enum_preset)
1738#define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct v4l2_dv_preset)
1739#define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct v4l2_dv_preset)
1740#define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct v4l2_dv_preset)
1741#define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
1742#define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
1743
1627/* Reminder: when adding new ioctls please add support for them to 1744/* Reminder: when adding new ioctls please add support for them to
1628 drivers/media/video/v4l2-compat-ioctl32.c as well! */ 1745 drivers/media/video/v4l2-compat-ioctl32.c as well! */
1629 1746
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index 2d0f222388a8..ee03bba9c5df 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -40,6 +40,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
40 PGSCAN_ZONE_RECLAIM_FAILED, 40 PGSCAN_ZONE_RECLAIM_FAILED,
41#endif 41#endif
42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, 42 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 KSWAPD_SKIP_CONGESTION_WAIT,
43 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 45 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
44#ifdef CONFIG_HUGETLB_PAGE 46#ifdef CONFIG_HUGETLB_PAGE
45 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, 47 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
@@ -76,24 +78,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
76 78
77static inline void __count_vm_event(enum vm_event_item item) 79static inline void __count_vm_event(enum vm_event_item item)
78{ 80{
79 __get_cpu_var(vm_event_states).event[item]++; 81 __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
80} 82}
81 83
82static inline void count_vm_event(enum vm_event_item item) 84static inline void count_vm_event(enum vm_event_item item)
83{ 85{
84 get_cpu_var(vm_event_states).event[item]++; 86 this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
85 put_cpu();
86} 87}
87 88
88static inline void __count_vm_events(enum vm_event_item item, long delta) 89static inline void __count_vm_events(enum vm_event_item item, long delta)
89{ 90{
90 __get_cpu_var(vm_event_states).event[item] += delta; 91 __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
91} 92}
92 93
93static inline void count_vm_events(enum vm_event_item item, long delta) 94static inline void count_vm_events(enum vm_event_item item, long delta)
94{ 95{
95 get_cpu_var(vm_event_states).event[item] += delta; 96 this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
96 put_cpu();
97} 97}
98 98
99extern void all_vm_events(unsigned long *); 99extern void all_vm_events(unsigned long *);
diff --git a/include/linux/vt.h b/include/linux/vt.h
index 7ffa11f06232..d5dd0bc408fd 100644
--- a/include/linux/vt.h
+++ b/include/linux/vt.h
@@ -84,4 +84,23 @@ struct vt_setactivate {
84 84
85#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ 85#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */
86 86
87#ifdef __KERNEL__
88
89#ifdef CONFIG_VT_CONSOLE
90
91extern int vt_kmsg_redirect(int new);
92
93#else
94
95static inline int vt_kmsg_redirect(int new)
96{
97 return 0;
98}
99
100#endif
101
102#endif /* __KERNEL__ */
103
104#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
105
87#endif /* _LINUX_VT_H */ 106#endif /* _LINUX_VT_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 705f01fe413a..76e8903cd204 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -70,6 +70,7 @@ struct writeback_control {
70struct bdi_writeback; 70struct bdi_writeback;
71int inode_wait(void *); 71int inode_wait(void *);
72void writeback_inodes_sb(struct super_block *); 72void writeback_inodes_sb(struct super_block *);
73int writeback_inodes_sb_if_idle(struct super_block *);
73void sync_inodes_sb(struct super_block *); 74void sync_inodes_sb(struct super_block *);
74void writeback_inodes_wbc(struct writeback_control *wbc); 75void writeback_inodes_wbc(struct writeback_control *wbc);
75long wb_do_writeback(struct bdi_writeback *wb, int force_wait); 76long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
@@ -79,8 +80,7 @@ void wakeup_flusher_threads(long nr_pages);
79static inline void wait_on_inode(struct inode *inode) 80static inline void wait_on_inode(struct inode *inode)
80{ 81{
81 might_sleep(); 82 might_sleep();
82 wait_on_bit(&inode->i_state, __I_LOCK, inode_wait, 83 wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
83 TASK_UNINTERRUPTIBLE);
84} 84}
85static inline void inode_sync_wait(struct inode *inode) 85static inline void inode_sync_wait(struct inode *inode)
86{ 86{
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 5c84af8c5f6f..fb9b7e6e1e2d 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -38,12 +38,13 @@ struct dentry;
38 38
39struct xattr_handler { 39struct xattr_handler {
40 char *prefix; 40 char *prefix;
41 size_t (*list)(struct inode *inode, char *list, size_t list_size, 41 int flags; /* fs private flags passed back to the handlers */
42 const char *name, size_t name_len); 42 size_t (*list)(struct dentry *dentry, char *list, size_t list_size,
43 int (*get)(struct inode *inode, const char *name, void *buffer, 43 const char *name, size_t name_len, int handler_flags);
44 size_t size); 44 int (*get)(struct dentry *dentry, const char *name, void *buffer,
45 int (*set)(struct inode *inode, const char *name, const void *buffer, 45 size_t size, int handler_flags);
46 size_t size, int flags); 46 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
47 size_t size, int flags, int handler_flags);
47}; 48};
48 49
49ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); 50ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);