diff options
author | James Morris <jmorris@namei.org> | 2009-05-08 03:56:47 -0400 |
---|---|---|
committer | James Morris <jmorris@namei.org> | 2009-05-08 03:56:47 -0400 |
commit | d254117099d711f215e62427f55dfb8ebd5ad011 (patch) | |
tree | 0848ff8dd74314fec14a86497f8d288c86ba7c65 /include/linux | |
parent | 07ff7a0b187f3951788f64ae1f30e8109bc8e9eb (diff) | |
parent | 8c9ed899b44c19e81859fbb0e9d659fe2f8630fc (diff) |
Merge branch 'master' into next
Diffstat (limited to 'include/linux')
243 files changed, 7675 insertions, 3368 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index e9581fd9fb66..ca9b9b9bd331 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -67,6 +67,7 @@ header-y += falloc.h | |||
67 | header-y += fd.h | 67 | header-y += fd.h |
68 | header-y += fdreg.h | 68 | header-y += fdreg.h |
69 | header-y += fib_rules.h | 69 | header-y += fib_rules.h |
70 | header-y += fiemap.h | ||
70 | header-y += firewire-cdev.h | 71 | header-y += firewire-cdev.h |
71 | header-y += firewire-constants.h | 72 | header-y += firewire-constants.h |
72 | header-y += fuse.h | 73 | header-y += fuse.h |
@@ -158,8 +159,6 @@ header-y += ultrasound.h | |||
158 | header-y += un.h | 159 | header-y += un.h |
159 | header-y += utime.h | 160 | header-y += utime.h |
160 | header-y += veth.h | 161 | header-y += veth.h |
161 | header-y += video_decoder.h | ||
162 | header-y += video_encoder.h | ||
163 | header-y += videotext.h | 162 | header-y += videotext.h |
164 | header-y += x25.h | 163 | header-y += x25.h |
165 | 164 | ||
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 78199151c00b..88be890ee3c7 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -97,6 +97,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); | |||
97 | /* the following four functions are architecture-dependent */ | 97 | /* the following four functions are architecture-dependent */ |
98 | void acpi_numa_slit_init (struct acpi_table_slit *slit); | 98 | void acpi_numa_slit_init (struct acpi_table_slit *slit); |
99 | void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); | 99 | void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); |
100 | void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); | ||
100 | void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); | 101 | void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); |
101 | void acpi_numa_arch_fixup(void); | 102 | void acpi_numa_arch_fixup(void); |
102 | 103 | ||
@@ -110,6 +111,7 @@ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); | |||
110 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); | 111 | int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base); |
111 | void acpi_irq_stats_init(void); | 112 | void acpi_irq_stats_init(void); |
112 | extern u32 acpi_irq_handled; | 113 | extern u32 acpi_irq_handled; |
114 | extern u32 acpi_irq_not_handled; | ||
113 | 115 | ||
114 | extern struct acpi_mcfg_allocation *pci_mmcfg_config; | 116 | extern struct acpi_mcfg_allocation *pci_mmcfg_config; |
115 | extern int pci_mmcfg_config_num; | 117 | extern int pci_mmcfg_config_num; |
@@ -257,6 +259,40 @@ void __init acpi_no_s4_hw_signature(void); | |||
257 | void __init acpi_old_suspend_ordering(void); | 259 | void __init acpi_old_suspend_ordering(void); |
258 | void __init acpi_s4_no_nvs(void); | 260 | void __init acpi_s4_no_nvs(void); |
259 | #endif /* CONFIG_PM_SLEEP */ | 261 | #endif /* CONFIG_PM_SLEEP */ |
262 | |||
263 | #define OSC_QUERY_TYPE 0 | ||
264 | #define OSC_SUPPORT_TYPE 1 | ||
265 | #define OSC_CONTROL_TYPE 2 | ||
266 | #define OSC_SUPPORT_MASKS 0x1f | ||
267 | |||
268 | /* _OSC DW0 Definition */ | ||
269 | #define OSC_QUERY_ENABLE 1 | ||
270 | #define OSC_REQUEST_ERROR 2 | ||
271 | #define OSC_INVALID_UUID_ERROR 4 | ||
272 | #define OSC_INVALID_REVISION_ERROR 8 | ||
273 | #define OSC_CAPABILITIES_MASK_ERROR 16 | ||
274 | |||
275 | /* _OSC DW1 Definition (OS Support Fields) */ | ||
276 | #define OSC_EXT_PCI_CONFIG_SUPPORT 1 | ||
277 | #define OSC_ACTIVE_STATE_PWR_SUPPORT 2 | ||
278 | #define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4 | ||
279 | #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8 | ||
280 | #define OSC_MSI_SUPPORT 16 | ||
281 | |||
282 | /* _OSC DW1 Definition (OS Control Fields) */ | ||
283 | #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1 | ||
284 | #define OSC_SHPC_NATIVE_HP_CONTROL 2 | ||
285 | #define OSC_PCI_EXPRESS_PME_CONTROL 4 | ||
286 | #define OSC_PCI_EXPRESS_AER_CONTROL 8 | ||
287 | #define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16 | ||
288 | |||
289 | #define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \ | ||
290 | OSC_SHPC_NATIVE_HP_CONTROL | \ | ||
291 | OSC_PCI_EXPRESS_PME_CONTROL | \ | ||
292 | OSC_PCI_EXPRESS_AER_CONTROL | \ | ||
293 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) | ||
294 | |||
295 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags); | ||
260 | #else /* CONFIG_ACPI */ | 296 | #else /* CONFIG_ACPI */ |
261 | 297 | ||
262 | static inline int early_acpi_boot_init(void) | 298 | static inline int early_acpi_boot_init(void) |
diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 45f6297821bd..5fc2ef8d97fa 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h | |||
@@ -21,6 +21,15 @@ | |||
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | 23 | ||
24 | /* on architectures without dma-mapping capabilities we need to ensure | ||
25 | * that the asynchronous path compiles away | ||
26 | */ | ||
27 | #ifdef CONFIG_HAS_DMA | ||
28 | #define __async_inline | ||
29 | #else | ||
30 | #define __async_inline __always_inline | ||
31 | #endif | ||
32 | |||
24 | /** | 33 | /** |
25 | * dma_chan_ref - object used to manage dma channels received from the | 34 | * dma_chan_ref - object used to manage dma channels received from the |
26 | * dmaengine core. | 35 | * dmaengine core. |
diff --git a/include/linux/ata.h b/include/linux/ata.h index 6617c9f8f2ca..cb79b7a208e1 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -29,6 +29,8 @@ | |||
29 | #ifndef __LINUX_ATA_H__ | 29 | #ifndef __LINUX_ATA_H__ |
30 | #define __LINUX_ATA_H__ | 30 | #define __LINUX_ATA_H__ |
31 | 31 | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/string.h> | ||
32 | #include <linux/types.h> | 34 | #include <linux/types.h> |
33 | #include <asm/byteorder.h> | 35 | #include <asm/byteorder.h> |
34 | 36 | ||
@@ -91,6 +93,7 @@ enum { | |||
91 | ATA_ID_CFA_POWER = 160, | 93 | ATA_ID_CFA_POWER = 160, |
92 | ATA_ID_CFA_KEY_MGMT = 162, | 94 | ATA_ID_CFA_KEY_MGMT = 162, |
93 | ATA_ID_CFA_MODES = 163, | 95 | ATA_ID_CFA_MODES = 163, |
96 | ATA_ID_DATA_SET_MGMT = 169, | ||
94 | ATA_ID_ROT_SPEED = 217, | 97 | ATA_ID_ROT_SPEED = 217, |
95 | ATA_ID_PIO4 = (1 << 1), | 98 | ATA_ID_PIO4 = (1 << 1), |
96 | 99 | ||
@@ -248,6 +251,7 @@ enum { | |||
248 | ATA_CMD_SMART = 0xB0, | 251 | ATA_CMD_SMART = 0xB0, |
249 | ATA_CMD_MEDIA_LOCK = 0xDE, | 252 | ATA_CMD_MEDIA_LOCK = 0xDE, |
250 | ATA_CMD_MEDIA_UNLOCK = 0xDF, | 253 | ATA_CMD_MEDIA_UNLOCK = 0xDF, |
254 | ATA_CMD_DSM = 0x06, | ||
251 | /* marked obsolete in the ATA/ATAPI-7 spec */ | 255 | /* marked obsolete in the ATA/ATAPI-7 spec */ |
252 | ATA_CMD_RESTORE = 0x10, | 256 | ATA_CMD_RESTORE = 0x10, |
253 | 257 | ||
@@ -321,6 +325,9 @@ enum { | |||
321 | ATA_SMART_READ_VALUES = 0xD0, | 325 | ATA_SMART_READ_VALUES = 0xD0, |
322 | ATA_SMART_READ_THRESHOLDS = 0xD1, | 326 | ATA_SMART_READ_THRESHOLDS = 0xD1, |
323 | 327 | ||
328 | /* feature values for Data Set Management */ | ||
329 | ATA_DSM_TRIM = 0x01, | ||
330 | |||
324 | /* password used in LBA Mid / LBA High for executing SMART commands */ | 331 | /* password used in LBA Mid / LBA High for executing SMART commands */ |
325 | ATA_SMART_LBAM_PASS = 0x4F, | 332 | ATA_SMART_LBAM_PASS = 0x4F, |
326 | ATA_SMART_LBAH_PASS = 0xC2, | 333 | ATA_SMART_LBAH_PASS = 0xC2, |
@@ -723,6 +730,14 @@ static inline int ata_id_has_unload(const u16 *id) | |||
723 | return 0; | 730 | return 0; |
724 | } | 731 | } |
725 | 732 | ||
733 | static inline int ata_id_has_trim(const u16 *id) | ||
734 | { | ||
735 | if (ata_id_major_version(id) >= 7 && | ||
736 | (id[ATA_ID_DATA_SET_MGMT] & 1)) | ||
737 | return 1; | ||
738 | return 0; | ||
739 | } | ||
740 | |||
726 | static inline int ata_id_current_chs_valid(const u16 *id) | 741 | static inline int ata_id_current_chs_valid(const u16 *id) |
727 | { | 742 | { |
728 | /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command | 743 | /* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command |
@@ -863,6 +878,32 @@ static inline void ata_id_to_hd_driveid(u16 *id) | |||
863 | #endif | 878 | #endif |
864 | } | 879 | } |
865 | 880 | ||
881 | /* | ||
882 | * Write up to 'max' LBA Range Entries to the buffer that will cover the | ||
883 | * extent from sector to sector + count. This is used for TRIM and for | ||
884 | * ADD LBA(S) TO NV CACHE PINNED SET. | ||
885 | */ | ||
886 | static inline unsigned ata_set_lba_range_entries(void *_buffer, unsigned max, | ||
887 | u64 sector, unsigned long count) | ||
888 | { | ||
889 | __le64 *buffer = _buffer; | ||
890 | unsigned i = 0; | ||
891 | |||
892 | while (i < max) { | ||
893 | u64 entry = sector | | ||
894 | ((u64)(count > 0xffff ? 0xffff : count) << 48); | ||
895 | buffer[i++] = __cpu_to_le64(entry); | ||
896 | if (count <= 0xffff) | ||
897 | break; | ||
898 | count -= 0xffff; | ||
899 | sector += 0xffff; | ||
900 | } | ||
901 | |||
902 | max = ALIGN(i * 8, 512); | ||
903 | memset(buffer + i, 0, max - i * 8); | ||
904 | return max; | ||
905 | } | ||
906 | |||
866 | static inline int is_multi_taskfile(struct ata_taskfile *tf) | 907 | static inline int is_multi_taskfile(struct ata_taskfile *tf) |
867 | { | 908 | { |
868 | return (tf->command == ATA_CMD_READ_MULTI) || | 909 | return (tf->command == ATA_CMD_READ_MULTI) || |
diff --git a/include/linux/auto_dev-ioctl.h b/include/linux/auto_dev-ioctl.h index 91a773993a5c..850f39b33e74 100644 --- a/include/linux/auto_dev-ioctl.h +++ b/include/linux/auto_dev-ioctl.h | |||
@@ -10,8 +10,13 @@ | |||
10 | #ifndef _LINUX_AUTO_DEV_IOCTL_H | 10 | #ifndef _LINUX_AUTO_DEV_IOCTL_H |
11 | #define _LINUX_AUTO_DEV_IOCTL_H | 11 | #define _LINUX_AUTO_DEV_IOCTL_H |
12 | 12 | ||
13 | #include <linux/auto_fs.h> | ||
14 | |||
15 | #ifdef __KERNEL__ | ||
13 | #include <linux/string.h> | 16 | #include <linux/string.h> |
14 | #include <linux/types.h> | 17 | #else |
18 | #include <string.h> | ||
19 | #endif /* __KERNEL__ */ | ||
15 | 20 | ||
16 | #define AUTOFS_DEVICE_NAME "autofs" | 21 | #define AUTOFS_DEVICE_NAME "autofs" |
17 | 22 | ||
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h index c21e5972a3e8..63265852b7d1 100644 --- a/include/linux/auto_fs.h +++ b/include/linux/auto_fs.h | |||
@@ -17,11 +17,13 @@ | |||
17 | #ifdef __KERNEL__ | 17 | #ifdef __KERNEL__ |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/limits.h> | 19 | #include <linux/limits.h> |
20 | #include <linux/types.h> | ||
21 | #include <linux/ioctl.h> | ||
22 | #else | ||
20 | #include <asm/types.h> | 23 | #include <asm/types.h> |
24 | #include <sys/ioctl.h> | ||
21 | #endif /* __KERNEL__ */ | 25 | #endif /* __KERNEL__ */ |
22 | 26 | ||
23 | #include <linux/ioctl.h> | ||
24 | |||
25 | /* This file describes autofs v3 */ | 27 | /* This file describes autofs v3 */ |
26 | #define AUTOFS_PROTO_VERSION 3 | 28 | #define AUTOFS_PROTO_VERSION 3 |
27 | 29 | ||
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index bee52abb8a4d..0ec2c594868e 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -24,8 +24,8 @@ struct dentry; | |||
24 | */ | 24 | */ |
25 | enum bdi_state { | 25 | enum bdi_state { |
26 | BDI_pdflush, /* A pdflush thread is working this device */ | 26 | BDI_pdflush, /* A pdflush thread is working this device */ |
27 | BDI_write_congested, /* The write queue is getting full */ | 27 | BDI_async_congested, /* The async (write) queue is getting full */ |
28 | BDI_read_congested, /* The read queue is getting full */ | 28 | BDI_sync_congested, /* The sync queue is getting full */ |
29 | BDI_unused, /* Available bits start here */ | 29 | BDI_unused, /* Available bits start here */ |
30 | }; | 30 | }; |
31 | 31 | ||
@@ -215,18 +215,18 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) | |||
215 | 215 | ||
216 | static inline int bdi_read_congested(struct backing_dev_info *bdi) | 216 | static inline int bdi_read_congested(struct backing_dev_info *bdi) |
217 | { | 217 | { |
218 | return bdi_congested(bdi, 1 << BDI_read_congested); | 218 | return bdi_congested(bdi, 1 << BDI_sync_congested); |
219 | } | 219 | } |
220 | 220 | ||
221 | static inline int bdi_write_congested(struct backing_dev_info *bdi) | 221 | static inline int bdi_write_congested(struct backing_dev_info *bdi) |
222 | { | 222 | { |
223 | return bdi_congested(bdi, 1 << BDI_write_congested); | 223 | return bdi_congested(bdi, 1 << BDI_async_congested); |
224 | } | 224 | } |
225 | 225 | ||
226 | static inline int bdi_rw_congested(struct backing_dev_info *bdi) | 226 | static inline int bdi_rw_congested(struct backing_dev_info *bdi) |
227 | { | 227 | { |
228 | return bdi_congested(bdi, (1 << BDI_read_congested)| | 228 | return bdi_congested(bdi, (1 << BDI_sync_congested) | |
229 | (1 << BDI_write_congested)); | 229 | (1 << BDI_async_congested)); |
230 | } | 230 | } |
231 | 231 | ||
232 | void clear_bdi_congested(struct backing_dev_info *bdi, int rw); | 232 | void clear_bdi_congested(struct backing_dev_info *bdi, int rw); |
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 77b4a9e46004..61ee18c1bdb4 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -35,8 +35,7 @@ struct linux_binprm{ | |||
35 | #endif | 35 | #endif |
36 | struct mm_struct *mm; | 36 | struct mm_struct *mm; |
37 | unsigned long p; /* current top of mem */ | 37 | unsigned long p; /* current top of mem */ |
38 | unsigned int sh_bang:1, | 38 | unsigned int |
39 | misc_bang:1, | ||
40 | cred_prepared:1,/* true if creds already prepared (multiple | 39 | cred_prepared:1,/* true if creds already prepared (multiple |
41 | * preps happen for interpreters) */ | 40 | * preps happen for interpreters) */ |
42 | cap_effective:1;/* true if has elevated effective capabilities, | 41 | cap_effective:1;/* true if has elevated effective capabilities, |
@@ -83,7 +82,19 @@ struct linux_binfmt { | |||
83 | int hasvdso; | 82 | int hasvdso; |
84 | }; | 83 | }; |
85 | 84 | ||
86 | extern int register_binfmt(struct linux_binfmt *); | 85 | extern int __register_binfmt(struct linux_binfmt *fmt, int insert); |
86 | |||
87 | /* Registration of default binfmt handlers */ | ||
88 | static inline int register_binfmt(struct linux_binfmt *fmt) | ||
89 | { | ||
90 | return __register_binfmt(fmt, 0); | ||
91 | } | ||
92 | /* Same as above, but adds a new binfmt at the top of the list */ | ||
93 | static inline int insert_binfmt(struct linux_binfmt *fmt) | ||
94 | { | ||
95 | return __register_binfmt(fmt, 1); | ||
96 | } | ||
97 | |||
87 | extern void unregister_binfmt(struct linux_binfmt *); | 98 | extern void unregister_binfmt(struct linux_binfmt *); |
88 | 99 | ||
89 | extern int prepare_binprm(struct linux_binprm *); | 100 | extern int prepare_binprm(struct linux_binprm *); |
diff --git a/include/linux/bio.h b/include/linux/bio.h index b05b1d4d17d2..7b214fd672a2 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -132,6 +132,7 @@ struct bio { | |||
132 | * top 4 bits of bio flags indicate the pool this bio came from | 132 | * top 4 bits of bio flags indicate the pool this bio came from |
133 | */ | 133 | */ |
134 | #define BIO_POOL_BITS (4) | 134 | #define BIO_POOL_BITS (4) |
135 | #define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) | ||
135 | #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) | 136 | #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) |
136 | #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) | 137 | #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) |
137 | #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) | 138 | #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) |
@@ -145,20 +146,21 @@ struct bio { | |||
145 | * bit 2 -- barrier | 146 | * bit 2 -- barrier |
146 | * Insert a serialization point in the IO queue, forcing previously | 147 | * Insert a serialization point in the IO queue, forcing previously |
147 | * submitted IO to be completed before this one is issued. | 148 | * submitted IO to be completed before this one is issued. |
148 | * bit 3 -- synchronous I/O hint: the block layer will unplug immediately | 149 | * bit 3 -- synchronous I/O hint. |
149 | * Note that this does NOT indicate that the IO itself is sync, just | 150 | * bit 4 -- Unplug the device immediately after submitting this bio. |
150 | * that the block layer will not postpone issue of this IO by plugging. | 151 | * bit 5 -- metadata request |
151 | * bit 4 -- metadata request | ||
152 | * Used for tracing to differentiate metadata and data IO. May also | 152 | * Used for tracing to differentiate metadata and data IO. May also |
153 | * get some preferential treatment in the IO scheduler | 153 | * get some preferential treatment in the IO scheduler |
154 | * bit 5 -- discard sectors | 154 | * bit 6 -- discard sectors |
155 | * Informs the lower level device that this range of sectors is no longer | 155 | * Informs the lower level device that this range of sectors is no longer |
156 | * used by the file system and may thus be freed by the device. Used | 156 | * used by the file system and may thus be freed by the device. Used |
157 | * for flash based storage. | 157 | * for flash based storage. |
158 | * bit 6 -- fail fast device errors | 158 | * bit 7 -- fail fast device errors |
159 | * bit 7 -- fail fast transport errors | 159 | * bit 8 -- fail fast transport errors |
160 | * bit 8 -- fail fast driver errors | 160 | * bit 9 -- fail fast driver errors |
161 | * Don't want driver retries for any fast fail whatever the reason. | 161 | * Don't want driver retries for any fast fail whatever the reason. |
162 | * bit 10 -- Tell the IO scheduler not to wait for more requests after this | ||
163 | one has been submitted, even if it is a SYNC request. | ||
162 | */ | 164 | */ |
163 | #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ | 165 | #define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */ |
164 | #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ | 166 | #define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */ |
@@ -170,6 +172,7 @@ struct bio { | |||
170 | #define BIO_RW_FAILFAST_DEV 7 | 172 | #define BIO_RW_FAILFAST_DEV 7 |
171 | #define BIO_RW_FAILFAST_TRANSPORT 8 | 173 | #define BIO_RW_FAILFAST_TRANSPORT 8 |
172 | #define BIO_RW_FAILFAST_DRIVER 9 | 174 | #define BIO_RW_FAILFAST_DRIVER 9 |
175 | #define BIO_RW_NOIDLE 10 | ||
173 | 176 | ||
174 | #define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) | 177 | #define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) |
175 | 178 | ||
@@ -188,6 +191,7 @@ struct bio { | |||
188 | #define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) | 191 | #define bio_rw_ahead(bio) bio_rw_flagged(bio, BIO_RW_AHEAD) |
189 | #define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) | 192 | #define bio_rw_meta(bio) bio_rw_flagged(bio, BIO_RW_META) |
190 | #define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) | 193 | #define bio_discard(bio) bio_rw_flagged(bio, BIO_RW_DISCARD) |
194 | #define bio_noidle(bio) bio_rw_flagged(bio, BIO_RW_NOIDLE) | ||
191 | 195 | ||
192 | /* | 196 | /* |
193 | * upper 16 bits of bi_rw define the io priority of this bio | 197 | * upper 16 bits of bi_rw define the io priority of this bio |
@@ -501,6 +505,115 @@ static inline int bio_has_data(struct bio *bio) | |||
501 | return bio && bio->bi_io_vec != NULL; | 505 | return bio && bio->bi_io_vec != NULL; |
502 | } | 506 | } |
503 | 507 | ||
508 | /* | ||
509 | * BIO list managment for use by remapping drivers (e.g. DM or MD). | ||
510 | * | ||
511 | * A bio_list anchors a singly-linked list of bios chained through the bi_next | ||
512 | * member of the bio. The bio_list also caches the last list member to allow | ||
513 | * fast access to the tail. | ||
514 | */ | ||
515 | struct bio_list { | ||
516 | struct bio *head; | ||
517 | struct bio *tail; | ||
518 | }; | ||
519 | |||
520 | static inline int bio_list_empty(const struct bio_list *bl) | ||
521 | { | ||
522 | return bl->head == NULL; | ||
523 | } | ||
524 | |||
525 | static inline void bio_list_init(struct bio_list *bl) | ||
526 | { | ||
527 | bl->head = bl->tail = NULL; | ||
528 | } | ||
529 | |||
530 | #define bio_list_for_each(bio, bl) \ | ||
531 | for (bio = (bl)->head; bio; bio = bio->bi_next) | ||
532 | |||
533 | static inline unsigned bio_list_size(const struct bio_list *bl) | ||
534 | { | ||
535 | unsigned sz = 0; | ||
536 | struct bio *bio; | ||
537 | |||
538 | bio_list_for_each(bio, bl) | ||
539 | sz++; | ||
540 | |||
541 | return sz; | ||
542 | } | ||
543 | |||
544 | static inline void bio_list_add(struct bio_list *bl, struct bio *bio) | ||
545 | { | ||
546 | bio->bi_next = NULL; | ||
547 | |||
548 | if (bl->tail) | ||
549 | bl->tail->bi_next = bio; | ||
550 | else | ||
551 | bl->head = bio; | ||
552 | |||
553 | bl->tail = bio; | ||
554 | } | ||
555 | |||
556 | static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) | ||
557 | { | ||
558 | bio->bi_next = bl->head; | ||
559 | |||
560 | bl->head = bio; | ||
561 | |||
562 | if (!bl->tail) | ||
563 | bl->tail = bio; | ||
564 | } | ||
565 | |||
566 | static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) | ||
567 | { | ||
568 | if (!bl2->head) | ||
569 | return; | ||
570 | |||
571 | if (bl->tail) | ||
572 | bl->tail->bi_next = bl2->head; | ||
573 | else | ||
574 | bl->head = bl2->head; | ||
575 | |||
576 | bl->tail = bl2->tail; | ||
577 | } | ||
578 | |||
579 | static inline void bio_list_merge_head(struct bio_list *bl, | ||
580 | struct bio_list *bl2) | ||
581 | { | ||
582 | if (!bl2->head) | ||
583 | return; | ||
584 | |||
585 | if (bl->head) | ||
586 | bl2->tail->bi_next = bl->head; | ||
587 | else | ||
588 | bl->tail = bl2->tail; | ||
589 | |||
590 | bl->head = bl2->head; | ||
591 | } | ||
592 | |||
593 | static inline struct bio *bio_list_pop(struct bio_list *bl) | ||
594 | { | ||
595 | struct bio *bio = bl->head; | ||
596 | |||
597 | if (bio) { | ||
598 | bl->head = bl->head->bi_next; | ||
599 | if (!bl->head) | ||
600 | bl->tail = NULL; | ||
601 | |||
602 | bio->bi_next = NULL; | ||
603 | } | ||
604 | |||
605 | return bio; | ||
606 | } | ||
607 | |||
608 | static inline struct bio *bio_list_get(struct bio_list *bl) | ||
609 | { | ||
610 | struct bio *bio = bl->head; | ||
611 | |||
612 | bl->head = bl->tail = NULL; | ||
613 | |||
614 | return bio; | ||
615 | } | ||
616 | |||
504 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 617 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
505 | 618 | ||
506 | #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) | 619 | #define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 61829139795a..c05a29cb9bb2 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -112,6 +112,25 @@ static inline unsigned fls_long(unsigned long l) | |||
112 | return fls64(l); | 112 | return fls64(l); |
113 | } | 113 | } |
114 | 114 | ||
115 | /** | ||
116 | * __ffs64 - find first set bit in a 64 bit word | ||
117 | * @word: The 64 bit word | ||
118 | * | ||
119 | * On 64 bit arches this is a synomyn for __ffs | ||
120 | * The result is not defined if no bits are set, so check that @word | ||
121 | * is non-zero before calling this. | ||
122 | */ | ||
123 | static inline unsigned long __ffs64(u64 word) | ||
124 | { | ||
125 | #if BITS_PER_LONG == 32 | ||
126 | if (((u32)word) == 0UL) | ||
127 | return __ffs((u32)(word >> 32)) + 32; | ||
128 | #elif BITS_PER_LONG != 64 | ||
129 | #error BITS_PER_LONG not 32 or 64 | ||
130 | #endif | ||
131 | return __ffs((unsigned long)word); | ||
132 | } | ||
133 | |||
115 | #ifdef __KERNEL__ | 134 | #ifdef __KERNEL__ |
116 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | 135 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT |
117 | 136 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 465d6babc847..b4f71f1a4af7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -38,6 +38,10 @@ struct request; | |||
38 | typedef void (rq_end_io_fn)(struct request *, int); | 38 | typedef void (rq_end_io_fn)(struct request *, int); |
39 | 39 | ||
40 | struct request_list { | 40 | struct request_list { |
41 | /* | ||
42 | * count[], starved[], and wait[] are indexed by | ||
43 | * BLK_RW_SYNC/BLK_RW_ASYNC | ||
44 | */ | ||
41 | int count[2]; | 45 | int count[2]; |
42 | int starved[2]; | 46 | int starved[2]; |
43 | int elvpriv; | 47 | int elvpriv; |
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits { | |||
66 | REQ_TYPE_ATA_PC, | 70 | REQ_TYPE_ATA_PC, |
67 | }; | 71 | }; |
68 | 72 | ||
73 | enum { | ||
74 | BLK_RW_ASYNC = 0, | ||
75 | BLK_RW_SYNC = 1, | ||
76 | }; | ||
77 | |||
69 | /* | 78 | /* |
70 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 79 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being |
71 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 80 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a |
@@ -103,12 +112,13 @@ enum rq_flag_bits { | |||
103 | __REQ_QUIET, /* don't worry about errors */ | 112 | __REQ_QUIET, /* don't worry about errors */ |
104 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | 113 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ |
105 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | 114 | __REQ_ORDERED_COLOR, /* is before or after barrier */ |
106 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ | 115 | __REQ_RW_SYNC, /* request is sync (sync write or read) */ |
107 | __REQ_ALLOCED, /* request came from our alloc pool */ | 116 | __REQ_ALLOCED, /* request came from our alloc pool */ |
108 | __REQ_RW_META, /* metadata io request */ | 117 | __REQ_RW_META, /* metadata io request */ |
109 | __REQ_COPY_USER, /* contains copies of user pages */ | 118 | __REQ_COPY_USER, /* contains copies of user pages */ |
110 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | 119 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ |
111 | __REQ_UNPLUG, /* unplug queue on submission */ | 120 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ |
121 | __REQ_IO_STAT, /* account I/O stat */ | ||
112 | __REQ_NR_BITS, /* stops here */ | 122 | __REQ_NR_BITS, /* stops here */ |
113 | }; | 123 | }; |
114 | 124 | ||
@@ -135,7 +145,8 @@ enum rq_flag_bits { | |||
135 | #define REQ_RW_META (1 << __REQ_RW_META) | 145 | #define REQ_RW_META (1 << __REQ_RW_META) |
136 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | 146 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) |
137 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | 147 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) |
138 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) | 148 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) |
149 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | ||
139 | 150 | ||
140 | #define BLK_MAX_CDB 16 | 151 | #define BLK_MAX_CDB 16 |
141 | 152 | ||
@@ -438,8 +449,8 @@ struct request_queue | |||
438 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 449 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
439 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 450 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
440 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 451 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
441 | #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ | 452 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
442 | #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ | 453 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
443 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 454 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
444 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 455 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
445 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 456 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
@@ -589,6 +600,8 @@ enum { | |||
589 | blk_failfast_transport(rq) || \ | 600 | blk_failfast_transport(rq) || \ |
590 | blk_failfast_driver(rq)) | 601 | blk_failfast_driver(rq)) |
591 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) | 602 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) |
603 | #define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) | ||
604 | #define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) | ||
592 | 605 | ||
593 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) | 606 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) |
594 | 607 | ||
@@ -611,32 +624,42 @@ enum { | |||
611 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) | 624 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
612 | 625 | ||
613 | /* | 626 | /* |
614 | * We regard a request as sync, if it's a READ or a SYNC write. | 627 | * We regard a request as sync, if either a read or a sync write |
615 | */ | 628 | */ |
616 | #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) | 629 | static inline bool rw_is_sync(unsigned int rw_flags) |
630 | { | ||
631 | return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); | ||
632 | } | ||
633 | |||
634 | static inline bool rq_is_sync(struct request *rq) | ||
635 | { | ||
636 | return rw_is_sync(rq->cmd_flags); | ||
637 | } | ||
638 | |||
617 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) | 639 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) |
640 | #define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) | ||
618 | 641 | ||
619 | static inline int blk_queue_full(struct request_queue *q, int rw) | 642 | static inline int blk_queue_full(struct request_queue *q, int sync) |
620 | { | 643 | { |
621 | if (rw == READ) | 644 | if (sync) |
622 | return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 645 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); |
623 | return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 646 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); |
624 | } | 647 | } |
625 | 648 | ||
626 | static inline void blk_set_queue_full(struct request_queue *q, int rw) | 649 | static inline void blk_set_queue_full(struct request_queue *q, int sync) |
627 | { | 650 | { |
628 | if (rw == READ) | 651 | if (sync) |
629 | queue_flag_set(QUEUE_FLAG_READFULL, q); | 652 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); |
630 | else | 653 | else |
631 | queue_flag_set(QUEUE_FLAG_WRITEFULL, q); | 654 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); |
632 | } | 655 | } |
633 | 656 | ||
634 | static inline void blk_clear_queue_full(struct request_queue *q, int rw) | 657 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) |
635 | { | 658 | { |
636 | if (rw == READ) | 659 | if (sync) |
637 | queue_flag_clear(QUEUE_FLAG_READFULL, q); | 660 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); |
638 | else | 661 | else |
639 | queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); | 662 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); |
640 | } | 663 | } |
641 | 664 | ||
642 | 665 | ||
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 6e915878e88c..d960889e92ef 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -144,6 +144,9 @@ struct blk_user_trace_setup { | |||
144 | 144 | ||
145 | #ifdef __KERNEL__ | 145 | #ifdef __KERNEL__ |
146 | #if defined(CONFIG_BLK_DEV_IO_TRACE) | 146 | #if defined(CONFIG_BLK_DEV_IO_TRACE) |
147 | |||
148 | #include <linux/sysfs.h> | ||
149 | |||
147 | struct blk_trace { | 150 | struct blk_trace { |
148 | int trace_state; | 151 | int trace_state; |
149 | struct rchan *rchan; | 152 | struct rchan *rchan; |
@@ -194,6 +197,8 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
194 | extern int blk_trace_startstop(struct request_queue *q, int start); | 197 | extern int blk_trace_startstop(struct request_queue *q, int start); |
195 | extern int blk_trace_remove(struct request_queue *q); | 198 | extern int blk_trace_remove(struct request_queue *q); |
196 | 199 | ||
200 | extern struct attribute_group blk_trace_attr_group; | ||
201 | |||
197 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ | 202 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ |
198 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) | 203 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) |
199 | #define blk_trace_shutdown(q) do { } while (0) | 204 | #define blk_trace_shutdown(q) do { } while (0) |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 455d83219fae..bc3ab7073695 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -146,10 +146,10 @@ extern void *alloc_large_system_hash(const char *tablename, | |||
146 | 146 | ||
147 | #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ | 147 | #define HASH_EARLY 0x00000001 /* Allocating during early boot? */ |
148 | 148 | ||
149 | /* Only NUMA needs hash distribution. | 149 | /* Only NUMA needs hash distribution. 64bit NUMA architectures have |
150 | * IA64 and x86_64 have sufficient vmalloc space. | 150 | * sufficient vmalloc space. |
151 | */ | 151 | */ |
152 | #if defined(CONFIG_NUMA) && (defined(CONFIG_IA64) || defined(CONFIG_X86_64)) | 152 | #if defined(CONFIG_NUMA) && defined(CONFIG_64BIT) |
153 | #define HASHDIST_DEFAULT 1 | 153 | #define HASHDIST_DEFAULT 1 |
154 | #else | 154 | #else |
155 | #define HASHDIST_DEFAULT 0 | 155 | #define HASHDIST_DEFAULT 0 |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index f19fd9045ea0..16ed0284d780 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -155,6 +155,7 @@ void create_empty_buffers(struct page *, unsigned long, | |||
155 | unsigned long b_state); | 155 | unsigned long b_state); |
156 | void end_buffer_read_sync(struct buffer_head *bh, int uptodate); | 156 | void end_buffer_read_sync(struct buffer_head *bh, int uptodate); |
157 | void end_buffer_write_sync(struct buffer_head *bh, int uptodate); | 157 | void end_buffer_write_sync(struct buffer_head *bh, int uptodate); |
158 | void end_buffer_async_write(struct buffer_head *bh, int uptodate); | ||
158 | 159 | ||
159 | /* Things to do with buffers at mapping->private_list */ | 160 | /* Things to do with buffers at mapping->private_list */ |
160 | void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); | 161 | void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode); |
@@ -197,6 +198,8 @@ extern int buffer_heads_over_limit; | |||
197 | void block_invalidatepage(struct page *page, unsigned long offset); | 198 | void block_invalidatepage(struct page *page, unsigned long offset); |
198 | int block_write_full_page(struct page *page, get_block_t *get_block, | 199 | int block_write_full_page(struct page *page, get_block_t *get_block, |
199 | struct writeback_control *wbc); | 200 | struct writeback_control *wbc); |
201 | int block_write_full_page_endio(struct page *page, get_block_t *get_block, | ||
202 | struct writeback_control *wbc, bh_end_io_t *handler); | ||
200 | int block_read_full_page(struct page*, get_block_t*); | 203 | int block_read_full_page(struct page*, get_block_t*); |
201 | int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, | 204 | int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, |
202 | unsigned long from); | 205 | unsigned long from); |
@@ -216,7 +219,7 @@ int cont_write_begin(struct file *, struct address_space *, loff_t, | |||
216 | get_block_t *, loff_t *); | 219 | get_block_t *, loff_t *); |
217 | int generic_cont_expand_simple(struct inode *inode, loff_t size); | 220 | int generic_cont_expand_simple(struct inode *inode, loff_t size); |
218 | int block_commit_write(struct page *page, unsigned from, unsigned to); | 221 | int block_commit_write(struct page *page, unsigned from, unsigned to); |
219 | int block_page_mkwrite(struct vm_area_struct *vma, struct page *page, | 222 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
220 | get_block_t get_block); | 223 | get_block_t get_block); |
221 | void block_sync_page(struct page *); | 224 | void block_sync_page(struct page *); |
222 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); | 225 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); |
@@ -332,22 +335,10 @@ extern int __set_page_dirty_buffers(struct page *page); | |||
332 | 335 | ||
333 | static inline void buffer_init(void) {} | 336 | static inline void buffer_init(void) {} |
334 | static inline int try_to_free_buffers(struct page *page) { return 1; } | 337 | static inline int try_to_free_buffers(struct page *page) { return 1; } |
335 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } | ||
336 | static inline int inode_has_buffers(struct inode *inode) { return 0; } | 338 | static inline int inode_has_buffers(struct inode *inode) { return 0; } |
337 | static inline void invalidate_inode_buffers(struct inode *inode) {} | 339 | static inline void invalidate_inode_buffers(struct inode *inode) {} |
338 | static inline int remove_inode_buffers(struct inode *inode) { return 1; } | 340 | static inline int remove_inode_buffers(struct inode *inode) { return 1; } |
339 | static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } | 341 | static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } |
340 | static inline void invalidate_bdev(struct block_device *bdev) {} | ||
341 | |||
342 | static inline struct super_block *freeze_bdev(struct block_device *sb) | ||
343 | { | ||
344 | return NULL; | ||
345 | } | ||
346 | |||
347 | static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) | ||
348 | { | ||
349 | return 0; | ||
350 | } | ||
351 | 342 | ||
352 | #endif /* CONFIG_BLOCK */ | 343 | #endif /* CONFIG_BLOCK */ |
353 | #endif /* _LINUX_BUFFER_HEAD_H */ | 344 | #endif /* _LINUX_BUFFER_HEAD_H */ |
diff --git a/include/linux/capability.h b/include/linux/capability.h index 4864a43b2b45..c3021105edc0 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
@@ -377,7 +377,21 @@ struct cpu_vfs_cap_data { | |||
377 | #define CAP_FOR_EACH_U32(__capi) \ | 377 | #define CAP_FOR_EACH_U32(__capi) \ |
378 | for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi) | 378 | for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi) |
379 | 379 | ||
380 | /* | ||
381 | * CAP_FS_MASK and CAP_NFSD_MASKS: | ||
382 | * | ||
383 | * The fs mask is all the privileges that fsuid==0 historically meant. | ||
384 | * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE. | ||
385 | * | ||
386 | * It has never meant setting security.* and trusted.* xattrs. | ||
387 | * | ||
388 | * We could also define fsmask as follows: | ||
389 | * 1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions | ||
390 | * 2. The security.* and trusted.* xattrs are fs-related MAC permissions | ||
391 | */ | ||
392 | |||
380 | # define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \ | 393 | # define CAP_FS_MASK_B0 (CAP_TO_MASK(CAP_CHOWN) \ |
394 | | CAP_TO_MASK(CAP_MKNOD) \ | ||
381 | | CAP_TO_MASK(CAP_DAC_OVERRIDE) \ | 395 | | CAP_TO_MASK(CAP_DAC_OVERRIDE) \ |
382 | | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \ | 396 | | CAP_TO_MASK(CAP_DAC_READ_SEARCH) \ |
383 | | CAP_TO_MASK(CAP_FOWNER) \ | 397 | | CAP_TO_MASK(CAP_FOWNER) \ |
@@ -392,11 +406,12 @@ struct cpu_vfs_cap_data { | |||
392 | # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }}) | 406 | # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }}) |
393 | # define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }}) | 407 | # define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }}) |
394 | # define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }}) | 408 | # define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }}) |
395 | # define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } }) | 409 | # define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ |
410 | | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \ | ||
411 | CAP_FS_MASK_B1 } }) | ||
396 | # define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ | 412 | # define CAP_NFSD_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ |
397 | | CAP_TO_MASK(CAP_SYS_RESOURCE) \ | 413 | | CAP_TO_MASK(CAP_SYS_RESOURCE), \ |
398 | | CAP_TO_MASK(CAP_MKNOD), \ | 414 | CAP_FS_MASK_B1 } }) |
399 | CAP_FS_MASK_B1 } }) | ||
400 | 415 | ||
401 | #endif /* _KERNEL_CAPABILITY_U32S != 2 */ | 416 | #endif /* _KERNEL_CAPABILITY_U32S != 2 */ |
402 | 417 | ||
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 499900d0cee7..665fa70e4094 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/cgroupstats.h> | 15 | #include <linux/cgroupstats.h> |
16 | #include <linux/prio_heap.h> | 16 | #include <linux/prio_heap.h> |
17 | #include <linux/rwsem.h> | 17 | #include <linux/rwsem.h> |
18 | #include <linux/idr.h> | ||
18 | 19 | ||
19 | #ifdef CONFIG_CGROUPS | 20 | #ifdef CONFIG_CGROUPS |
20 | 21 | ||
@@ -22,6 +23,7 @@ struct cgroupfs_root; | |||
22 | struct cgroup_subsys; | 23 | struct cgroup_subsys; |
23 | struct inode; | 24 | struct inode; |
24 | struct cgroup; | 25 | struct cgroup; |
26 | struct css_id; | ||
25 | 27 | ||
26 | extern int cgroup_init_early(void); | 28 | extern int cgroup_init_early(void); |
27 | extern int cgroup_init(void); | 29 | extern int cgroup_init(void); |
@@ -47,18 +49,24 @@ enum cgroup_subsys_id { | |||
47 | 49 | ||
48 | /* Per-subsystem/per-cgroup state maintained by the system. */ | 50 | /* Per-subsystem/per-cgroup state maintained by the system. */ |
49 | struct cgroup_subsys_state { | 51 | struct cgroup_subsys_state { |
50 | /* The cgroup that this subsystem is attached to. Useful | 52 | /* |
53 | * The cgroup that this subsystem is attached to. Useful | ||
51 | * for subsystems that want to know about the cgroup | 54 | * for subsystems that want to know about the cgroup |
52 | * hierarchy structure */ | 55 | * hierarchy structure |
56 | */ | ||
53 | struct cgroup *cgroup; | 57 | struct cgroup *cgroup; |
54 | 58 | ||
55 | /* State maintained by the cgroup system to allow subsystems | 59 | /* |
60 | * State maintained by the cgroup system to allow subsystems | ||
56 | * to be "busy". Should be accessed via css_get(), | 61 | * to be "busy". Should be accessed via css_get(), |
57 | * css_tryget() and and css_put(). */ | 62 | * css_tryget() and and css_put(). |
63 | */ | ||
58 | 64 | ||
59 | atomic_t refcnt; | 65 | atomic_t refcnt; |
60 | 66 | ||
61 | unsigned long flags; | 67 | unsigned long flags; |
68 | /* ID for this css, if possible */ | ||
69 | struct css_id *id; | ||
62 | }; | 70 | }; |
63 | 71 | ||
64 | /* bits in struct cgroup_subsys_state flags field */ | 72 | /* bits in struct cgroup_subsys_state flags field */ |
@@ -120,19 +128,26 @@ static inline void css_put(struct cgroup_subsys_state *css) | |||
120 | enum { | 128 | enum { |
121 | /* Control Group is dead */ | 129 | /* Control Group is dead */ |
122 | CGRP_REMOVED, | 130 | CGRP_REMOVED, |
123 | /* Control Group has previously had a child cgroup or a task, | 131 | /* |
124 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ | 132 | * Control Group has previously had a child cgroup or a task, |
133 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) | ||
134 | */ | ||
125 | CGRP_RELEASABLE, | 135 | CGRP_RELEASABLE, |
126 | /* Control Group requires release notifications to userspace */ | 136 | /* Control Group requires release notifications to userspace */ |
127 | CGRP_NOTIFY_ON_RELEASE, | 137 | CGRP_NOTIFY_ON_RELEASE, |
138 | /* | ||
139 | * A thread in rmdir() is wating for this cgroup. | ||
140 | */ | ||
141 | CGRP_WAIT_ON_RMDIR, | ||
128 | }; | 142 | }; |
129 | 143 | ||
130 | struct cgroup { | 144 | struct cgroup { |
131 | unsigned long flags; /* "unsigned long" so bitops work */ | 145 | unsigned long flags; /* "unsigned long" so bitops work */ |
132 | 146 | ||
133 | /* count users of this cgroup. >0 means busy, but doesn't | 147 | /* |
134 | * necessarily indicate the number of tasks in the | 148 | * count users of this cgroup. >0 means busy, but doesn't |
135 | * cgroup */ | 149 | * necessarily indicate the number of tasks in the cgroup |
150 | */ | ||
136 | atomic_t count; | 151 | atomic_t count; |
137 | 152 | ||
138 | /* | 153 | /* |
@@ -142,7 +157,7 @@ struct cgroup { | |||
142 | struct list_head sibling; /* my parent's children */ | 157 | struct list_head sibling; /* my parent's children */ |
143 | struct list_head children; /* my children */ | 158 | struct list_head children; /* my children */ |
144 | 159 | ||
145 | struct cgroup *parent; /* my parent */ | 160 | struct cgroup *parent; /* my parent */ |
146 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ | 161 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ |
147 | 162 | ||
148 | /* Private pointers for each registered subsystem */ | 163 | /* Private pointers for each registered subsystem */ |
@@ -177,11 +192,12 @@ struct cgroup { | |||
177 | struct rcu_head rcu_head; | 192 | struct rcu_head rcu_head; |
178 | }; | 193 | }; |
179 | 194 | ||
180 | /* A css_set is a structure holding pointers to a set of | 195 | /* |
196 | * A css_set is a structure holding pointers to a set of | ||
181 | * cgroup_subsys_state objects. This saves space in the task struct | 197 | * cgroup_subsys_state objects. This saves space in the task struct |
182 | * object and speeds up fork()/exit(), since a single inc/dec and a | 198 | * object and speeds up fork()/exit(), since a single inc/dec and a |
183 | * list_add()/del() can bump the reference count on the entire | 199 | * list_add()/del() can bump the reference count on the entire cgroup |
184 | * cgroup set for a task. | 200 | * set for a task. |
185 | */ | 201 | */ |
186 | 202 | ||
187 | struct css_set { | 203 | struct css_set { |
@@ -226,13 +242,8 @@ struct cgroup_map_cb { | |||
226 | void *state; | 242 | void *state; |
227 | }; | 243 | }; |
228 | 244 | ||
229 | /* struct cftype: | 245 | /* |
230 | * | 246 | * struct cftype: handler definitions for cgroup control files |
231 | * The files in the cgroup filesystem mostly have a very simple read/write | ||
232 | * handling, some common function will take care of it. Nevertheless some cases | ||
233 | * (read tasks) are special and therefore I define this structure for every | ||
234 | * kind of file. | ||
235 | * | ||
236 | * | 247 | * |
237 | * When reading/writing to a file: | 248 | * When reading/writing to a file: |
238 | * - the cgroup to use is file->f_dentry->d_parent->d_fsdata | 249 | * - the cgroup to use is file->f_dentry->d_parent->d_fsdata |
@@ -241,10 +252,17 @@ struct cgroup_map_cb { | |||
241 | 252 | ||
242 | #define MAX_CFTYPE_NAME 64 | 253 | #define MAX_CFTYPE_NAME 64 |
243 | struct cftype { | 254 | struct cftype { |
244 | /* By convention, the name should begin with the name of the | 255 | /* |
245 | * subsystem, followed by a period */ | 256 | * By convention, the name should begin with the name of the |
257 | * subsystem, followed by a period | ||
258 | */ | ||
246 | char name[MAX_CFTYPE_NAME]; | 259 | char name[MAX_CFTYPE_NAME]; |
247 | int private; | 260 | int private; |
261 | /* | ||
262 | * If not 0, file mode is set to this value, otherwise it will | ||
263 | * be figured out automatically | ||
264 | */ | ||
265 | mode_t mode; | ||
248 | 266 | ||
249 | /* | 267 | /* |
250 | * If non-zero, defines the maximum length of string that can | 268 | * If non-zero, defines the maximum length of string that can |
@@ -319,15 +337,20 @@ struct cgroup_scanner { | |||
319 | void (*process_task)(struct task_struct *p, | 337 | void (*process_task)(struct task_struct *p, |
320 | struct cgroup_scanner *scan); | 338 | struct cgroup_scanner *scan); |
321 | struct ptr_heap *heap; | 339 | struct ptr_heap *heap; |
340 | void *data; | ||
322 | }; | 341 | }; |
323 | 342 | ||
324 | /* Add a new file to the given cgroup directory. Should only be | 343 | /* |
325 | * called by subsystems from within a populate() method */ | 344 | * Add a new file to the given cgroup directory. Should only be |
345 | * called by subsystems from within a populate() method | ||
346 | */ | ||
326 | int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, | 347 | int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, |
327 | const struct cftype *cft); | 348 | const struct cftype *cft); |
328 | 349 | ||
329 | /* Add a set of new files to the given cgroup directory. Should | 350 | /* |
330 | * only be called by subsystems from within a populate() method */ | 351 | * Add a set of new files to the given cgroup directory. Should |
352 | * only be called by subsystems from within a populate() method | ||
353 | */ | ||
331 | int cgroup_add_files(struct cgroup *cgrp, | 354 | int cgroup_add_files(struct cgroup *cgrp, |
332 | struct cgroup_subsys *subsys, | 355 | struct cgroup_subsys *subsys, |
333 | const struct cftype cft[], | 356 | const struct cftype cft[], |
@@ -339,15 +362,18 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); | |||
339 | 362 | ||
340 | int cgroup_task_count(const struct cgroup *cgrp); | 363 | int cgroup_task_count(const struct cgroup *cgrp); |
341 | 364 | ||
342 | /* Return true if the cgroup is a descendant of the current cgroup */ | 365 | /* Return true if cgrp is a descendant of the task's cgroup */ |
343 | int cgroup_is_descendant(const struct cgroup *cgrp); | 366 | int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); |
344 | 367 | ||
345 | /* Control Group subsystem type. See Documentation/cgroups.txt for details */ | 368 | /* |
369 | * Control Group subsystem type. | ||
370 | * See Documentation/cgroups/cgroups.txt for details | ||
371 | */ | ||
346 | 372 | ||
347 | struct cgroup_subsys { | 373 | struct cgroup_subsys { |
348 | struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, | 374 | struct cgroup_subsys_state *(*create)(struct cgroup_subsys *ss, |
349 | struct cgroup *cgrp); | 375 | struct cgroup *cgrp); |
350 | void (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 376 | int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
351 | void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 377 | void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
352 | int (*can_attach)(struct cgroup_subsys *ss, | 378 | int (*can_attach)(struct cgroup_subsys *ss, |
353 | struct cgroup *cgrp, struct task_struct *tsk); | 379 | struct cgroup *cgrp, struct task_struct *tsk); |
@@ -364,6 +390,11 @@ struct cgroup_subsys { | |||
364 | int active; | 390 | int active; |
365 | int disabled; | 391 | int disabled; |
366 | int early_init; | 392 | int early_init; |
393 | /* | ||
394 | * True if this subsys uses ID. ID is not available before cgroup_init() | ||
395 | * (not available in early_init time.) | ||
396 | */ | ||
397 | bool use_id; | ||
367 | #define MAX_CGROUP_TYPE_NAMELEN 32 | 398 | #define MAX_CGROUP_TYPE_NAMELEN 32 |
368 | const char *name; | 399 | const char *name; |
369 | 400 | ||
@@ -386,6 +417,9 @@ struct cgroup_subsys { | |||
386 | */ | 417 | */ |
387 | struct cgroupfs_root *root; | 418 | struct cgroupfs_root *root; |
388 | struct list_head sibling; | 419 | struct list_head sibling; |
420 | /* used when use_id == true */ | ||
421 | struct idr idr; | ||
422 | spinlock_t id_lock; | ||
389 | }; | 423 | }; |
390 | 424 | ||
391 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; | 425 | #define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; |
@@ -419,7 +453,8 @@ struct cgroup_iter { | |||
419 | struct list_head *task; | 453 | struct list_head *task; |
420 | }; | 454 | }; |
421 | 455 | ||
422 | /* To iterate across the tasks in a cgroup: | 456 | /* |
457 | * To iterate across the tasks in a cgroup: | ||
423 | * | 458 | * |
424 | * 1) call cgroup_iter_start to intialize an iterator | 459 | * 1) call cgroup_iter_start to intialize an iterator |
425 | * | 460 | * |
@@ -428,9 +463,10 @@ struct cgroup_iter { | |||
428 | * | 463 | * |
429 | * 3) call cgroup_iter_end() to destroy the iterator. | 464 | * 3) call cgroup_iter_end() to destroy the iterator. |
430 | * | 465 | * |
431 | * Or, call cgroup_scan_tasks() to iterate through every task in a cpuset. | 466 | * Or, call cgroup_scan_tasks() to iterate through every task in a |
432 | * - cgroup_scan_tasks() holds the css_set_lock when calling the test_task() | 467 | * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling |
433 | * callback, but not while calling the process_task() callback. | 468 | * the test_task() callback, but not while calling the process_task() |
469 | * callback. | ||
434 | */ | 470 | */ |
435 | void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); | 471 | void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); |
436 | struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | 472 | struct task_struct *cgroup_iter_next(struct cgroup *cgrp, |
@@ -439,6 +475,44 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); | |||
439 | int cgroup_scan_tasks(struct cgroup_scanner *scan); | 475 | int cgroup_scan_tasks(struct cgroup_scanner *scan); |
440 | int cgroup_attach_task(struct cgroup *, struct task_struct *); | 476 | int cgroup_attach_task(struct cgroup *, struct task_struct *); |
441 | 477 | ||
478 | /* | ||
479 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | ||
480 | * if cgroup_subsys.use_id == true. It can be used for looking up and scanning. | ||
481 | * CSS ID is assigned at cgroup allocation (create) automatically | ||
482 | * and removed when subsys calls free_css_id() function. This is because | ||
483 | * the lifetime of cgroup_subsys_state is subsys's matter. | ||
484 | * | ||
485 | * Looking up and scanning function should be called under rcu_read_lock(). | ||
486 | * Taking cgroup_mutex()/hierarchy_mutex() is not necessary for following calls. | ||
487 | * But the css returned by this routine can be "not populated yet" or "being | ||
488 | * destroyed". The caller should check css and cgroup's status. | ||
489 | */ | ||
490 | |||
491 | /* | ||
492 | * Typically Called at ->destroy(), or somewhere the subsys frees | ||
493 | * cgroup_subsys_state. | ||
494 | */ | ||
495 | void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css); | ||
496 | |||
497 | /* Find a cgroup_subsys_state which has given ID */ | ||
498 | |||
499 | struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id); | ||
500 | |||
501 | /* | ||
502 | * Get a cgroup whose id is greater than or equal to id under tree of root. | ||
503 | * Returning a cgroup_subsys_state or NULL. | ||
504 | */ | ||
505 | struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id, | ||
506 | struct cgroup_subsys_state *root, int *foundid); | ||
507 | |||
508 | /* Returns true if root is ancestor of cg */ | ||
509 | bool css_is_ancestor(struct cgroup_subsys_state *cg, | ||
510 | const struct cgroup_subsys_state *root); | ||
511 | |||
512 | /* Get id and depth of css */ | ||
513 | unsigned short css_id(struct cgroup_subsys_state *css); | ||
514 | unsigned short css_depth(struct cgroup_subsys_state *css); | ||
515 | |||
442 | #else /* !CONFIG_CGROUPS */ | 516 | #else /* !CONFIG_CGROUPS */ |
443 | 517 | ||
444 | static inline int cgroup_init_early(void) { return 0; } | 518 | static inline int cgroup_init_early(void) { return 0; } |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 573819ef4cc0..5a40d14daa9f 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -143,7 +143,9 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, | |||
143 | * 400-499: Perfect | 143 | * 400-499: Perfect |
144 | * The ideal clocksource. A must-use where | 144 | * The ideal clocksource. A must-use where |
145 | * available. | 145 | * available. |
146 | * @read: returns a cycle value | 146 | * @read: returns a cycle value, passes clocksource as argument |
147 | * @enable: optional function to enable the clocksource | ||
148 | * @disable: optional function to disable the clocksource | ||
147 | * @mask: bitmask for two's complement | 149 | * @mask: bitmask for two's complement |
148 | * subtraction of non 64 bit counters | 150 | * subtraction of non 64 bit counters |
149 | * @mult: cycle to nanosecond multiplier (adjusted by NTP) | 151 | * @mult: cycle to nanosecond multiplier (adjusted by NTP) |
@@ -162,7 +164,9 @@ struct clocksource { | |||
162 | char *name; | 164 | char *name; |
163 | struct list_head list; | 165 | struct list_head list; |
164 | int rating; | 166 | int rating; |
165 | cycle_t (*read)(void); | 167 | cycle_t (*read)(struct clocksource *cs); |
168 | int (*enable)(struct clocksource *cs); | ||
169 | void (*disable)(struct clocksource *cs); | ||
166 | cycle_t mask; | 170 | cycle_t mask; |
167 | u32 mult; | 171 | u32 mult; |
168 | u32 mult_orig; | 172 | u32 mult_orig; |
@@ -271,7 +275,34 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) | |||
271 | */ | 275 | */ |
272 | static inline cycle_t clocksource_read(struct clocksource *cs) | 276 | static inline cycle_t clocksource_read(struct clocksource *cs) |
273 | { | 277 | { |
274 | return cs->read(); | 278 | return cs->read(cs); |
279 | } | ||
280 | |||
281 | /** | ||
282 | * clocksource_enable: - enable clocksource | ||
283 | * @cs: pointer to clocksource | ||
284 | * | ||
285 | * Enables the specified clocksource. The clocksource callback | ||
286 | * function should start up the hardware and setup mult and field | ||
287 | * members of struct clocksource to reflect hardware capabilities. | ||
288 | */ | ||
289 | static inline int clocksource_enable(struct clocksource *cs) | ||
290 | { | ||
291 | return cs->enable ? cs->enable(cs) : 0; | ||
292 | } | ||
293 | |||
294 | /** | ||
295 | * clocksource_disable: - disable clocksource | ||
296 | * @cs: pointer to clocksource | ||
297 | * | ||
298 | * Disables the specified clocksource. The clocksource callback | ||
299 | * function should power down the now unused hardware block to | ||
300 | * save power. | ||
301 | */ | ||
302 | static inline void clocksource_disable(struct clocksource *cs) | ||
303 | { | ||
304 | if (cs->disable) | ||
305 | cs->disable(cs); | ||
275 | } | 306 | } |
276 | 307 | ||
277 | /** | 308 | /** |
diff --git a/include/linux/compat.h b/include/linux/compat.h index b880864672de..f2ded21f9a3c 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -191,6 +191,12 @@ asmlinkage ssize_t compat_sys_readv(unsigned long fd, | |||
191 | const struct compat_iovec __user *vec, unsigned long vlen); | 191 | const struct compat_iovec __user *vec, unsigned long vlen); |
192 | asmlinkage ssize_t compat_sys_writev(unsigned long fd, | 192 | asmlinkage ssize_t compat_sys_writev(unsigned long fd, |
193 | const struct compat_iovec __user *vec, unsigned long vlen); | 193 | const struct compat_iovec __user *vec, unsigned long vlen); |
194 | asmlinkage ssize_t compat_sys_preadv(unsigned long fd, | ||
195 | const struct compat_iovec __user *vec, | ||
196 | unsigned long vlen, u32 pos_low, u32 pos_high); | ||
197 | asmlinkage ssize_t compat_sys_pwritev(unsigned long fd, | ||
198 | const struct compat_iovec __user *vec, | ||
199 | unsigned long vlen, u32 pos_low, u32 pos_high); | ||
194 | 200 | ||
195 | int compat_do_execve(char * filename, compat_uptr_t __user *argv, | 201 | int compat_do_execve(char * filename, compat_uptr_t __user *argv, |
196 | compat_uptr_t __user *envp, struct pt_regs * regs); | 202 | compat_uptr_t __user *envp, struct pt_regs * regs); |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index d95da1020f1c..37bcb50a4d7c 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -68,6 +68,7 @@ struct ftrace_branch_data { | |||
68 | unsigned long miss; | 68 | unsigned long miss; |
69 | unsigned long hit; | 69 | unsigned long hit; |
70 | }; | 70 | }; |
71 | unsigned long miss_hit[2]; | ||
71 | }; | 72 | }; |
72 | }; | 73 | }; |
73 | 74 | ||
@@ -75,7 +76,8 @@ struct ftrace_branch_data { | |||
75 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code | 76 | * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code |
76 | * to disable branch tracing on a per file basis. | 77 | * to disable branch tracing on a per file basis. |
77 | */ | 78 | */ |
78 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) && !defined(DISABLE_BRANCH_PROFILING) | 79 | #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ |
80 | && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) | ||
79 | void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | 81 | void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); |
80 | 82 | ||
81 | #define likely_notrace(x) __builtin_expect(!!(x), 1) | 83 | #define likely_notrace(x) __builtin_expect(!!(x), 1) |
@@ -113,7 +115,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
113 | * "Define 'is'", Bill Clinton | 115 | * "Define 'is'", Bill Clinton |
114 | * "Define 'if'", Steven Rostedt | 116 | * "Define 'if'", Steven Rostedt |
115 | */ | 117 | */ |
116 | #define if(cond) if (__builtin_constant_p((cond)) ? !!(cond) : \ | 118 | #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) |
119 | #define __trace_if(cond) \ | ||
120 | if (__builtin_constant_p((cond)) ? !!(cond) : \ | ||
117 | ({ \ | 121 | ({ \ |
118 | int ______r; \ | 122 | int ______r; \ |
119 | static struct ftrace_branch_data \ | 123 | static struct ftrace_branch_data \ |
@@ -125,10 +129,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
125 | .line = __LINE__, \ | 129 | .line = __LINE__, \ |
126 | }; \ | 130 | }; \ |
127 | ______r = !!(cond); \ | 131 | ______r = !!(cond); \ |
128 | if (______r) \ | 132 | ______f.miss_hit[______r]++; \ |
129 | ______f.hit++; \ | ||
130 | else \ | ||
131 | ______f.miss++; \ | ||
132 | ______r; \ | 133 | ______r; \ |
133 | })) | 134 | })) |
134 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ | 135 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
diff --git a/include/linux/connector.h b/include/linux/connector.h index fc65d219d88c..b9966e64604e 100644 --- a/include/linux/connector.h +++ b/include/linux/connector.h | |||
@@ -39,8 +39,10 @@ | |||
39 | #define CN_IDX_V86D 0x4 | 39 | #define CN_IDX_V86D 0x4 |
40 | #define CN_VAL_V86D_UVESAFB 0x1 | 40 | #define CN_VAL_V86D_UVESAFB 0x1 |
41 | #define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ | 41 | #define CN_IDX_BB 0x5 /* BlackBoard, from the TSP GPL sampling framework */ |
42 | #define CN_DST_IDX 0x6 | ||
43 | #define CN_DST_VAL 0x1 | ||
42 | 44 | ||
43 | #define CN_NETLINK_USERS 6 | 45 | #define CN_NETLINK_USERS 7 |
44 | 46 | ||
45 | /* | 47 | /* |
46 | * Maximum connector's message size. | 48 | * Maximum connector's message size. |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index c2747ac2ae43..2643d848df90 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/node.h> | 23 | #include <linux/node.h> |
24 | #include <linux/compiler.h> | 24 | #include <linux/compiler.h> |
25 | #include <linux/cpumask.h> | 25 | #include <linux/cpumask.h> |
26 | #include <linux/mutex.h> | ||
27 | 26 | ||
28 | struct cpu { | 27 | struct cpu { |
29 | int node_id; /* The node which contains the CPU */ | 28 | int node_id; /* The node which contains the CPU */ |
@@ -103,16 +102,6 @@ extern struct sysdev_class cpu_sysdev_class; | |||
103 | #ifdef CONFIG_HOTPLUG_CPU | 102 | #ifdef CONFIG_HOTPLUG_CPU |
104 | /* Stop CPUs going up and down. */ | 103 | /* Stop CPUs going up and down. */ |
105 | 104 | ||
106 | static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) | ||
107 | { | ||
108 | mutex_lock(cpu_hp_mutex); | ||
109 | } | ||
110 | |||
111 | static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) | ||
112 | { | ||
113 | mutex_unlock(cpu_hp_mutex); | ||
114 | } | ||
115 | |||
116 | extern void get_online_cpus(void); | 105 | extern void get_online_cpus(void); |
117 | extern void put_online_cpus(void); | 106 | extern void put_online_cpus(void); |
118 | #define hotcpu_notifier(fn, pri) { \ | 107 | #define hotcpu_notifier(fn, pri) { \ |
@@ -126,11 +115,6 @@ int cpu_down(unsigned int cpu); | |||
126 | 115 | ||
127 | #else /* CONFIG_HOTPLUG_CPU */ | 116 | #else /* CONFIG_HOTPLUG_CPU */ |
128 | 117 | ||
129 | static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) | ||
130 | { } | ||
131 | static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) | ||
132 | { } | ||
133 | |||
134 | #define get_online_cpus() do { } while (0) | 118 | #define get_online_cpus() do { } while (0) |
135 | #define put_online_cpus() do { } while (0) | 119 | #define put_online_cpus() do { } while (0) |
136 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) | 120 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 90c6074a36ca..05ea1dd7d681 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/cpumask.h> | 12 | #include <linux/cpumask.h> |
13 | #include <linux/nodemask.h> | 13 | #include <linux/nodemask.h> |
14 | #include <linux/cgroup.h> | 14 | #include <linux/cgroup.h> |
15 | #include <linux/mm.h> | ||
15 | 16 | ||
16 | #ifdef CONFIG_CPUSETS | 17 | #ifdef CONFIG_CPUSETS |
17 | 18 | ||
@@ -29,19 +30,29 @@ void cpuset_init_current_mems_allowed(void); | |||
29 | void cpuset_update_task_memory_state(void); | 30 | void cpuset_update_task_memory_state(void); |
30 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); | 31 | int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); |
31 | 32 | ||
32 | extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask); | 33 | extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); |
33 | extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask); | 34 | extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask); |
34 | 35 | ||
35 | static int inline cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | 36 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) |
36 | { | 37 | { |
37 | return number_of_cpusets <= 1 || | 38 | return number_of_cpusets <= 1 || |
38 | __cpuset_zone_allowed_softwall(z, gfp_mask); | 39 | __cpuset_node_allowed_softwall(node, gfp_mask); |
39 | } | 40 | } |
40 | 41 | ||
41 | static int inline cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | 42 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) |
42 | { | 43 | { |
43 | return number_of_cpusets <= 1 || | 44 | return number_of_cpusets <= 1 || |
44 | __cpuset_zone_allowed_hardwall(z, gfp_mask); | 45 | __cpuset_node_allowed_hardwall(node, gfp_mask); |
46 | } | ||
47 | |||
48 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | ||
49 | { | ||
50 | return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask); | ||
51 | } | ||
52 | |||
53 | static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | ||
54 | { | ||
55 | return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask); | ||
45 | } | 56 | } |
46 | 57 | ||
47 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, | 58 | extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, |
@@ -90,12 +101,12 @@ static inline void cpuset_init_smp(void) {} | |||
90 | static inline void cpuset_cpus_allowed(struct task_struct *p, | 101 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
91 | struct cpumask *mask) | 102 | struct cpumask *mask) |
92 | { | 103 | { |
93 | *mask = cpu_possible_map; | 104 | cpumask_copy(mask, cpu_possible_mask); |
94 | } | 105 | } |
95 | static inline void cpuset_cpus_allowed_locked(struct task_struct *p, | 106 | static inline void cpuset_cpus_allowed_locked(struct task_struct *p, |
96 | struct cpumask *mask) | 107 | struct cpumask *mask) |
97 | { | 108 | { |
98 | *mask = cpu_possible_map; | 109 | cpumask_copy(mask, cpu_possible_mask); |
99 | } | 110 | } |
100 | 111 | ||
101 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) | 112 | static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) |
@@ -112,6 +123,16 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | |||
112 | return 1; | 123 | return 1; |
113 | } | 124 | } |
114 | 125 | ||
126 | static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | ||
127 | { | ||
128 | return 1; | ||
129 | } | ||
130 | |||
131 | static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | ||
132 | { | ||
133 | return 1; | ||
134 | } | ||
135 | |||
115 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | 136 | static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) |
116 | { | 137 | { |
117 | return 1; | 138 | return 1; |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 096476f1fb35..29b3ce3f2a1d 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
@@ -2,12 +2,20 @@ | |||
2 | #define __LINUX_DEBUG_LOCKING_H | 2 | #define __LINUX_DEBUG_LOCKING_H |
3 | 3 | ||
4 | #include <linux/kernel.h> | 4 | #include <linux/kernel.h> |
5 | #include <asm/atomic.h> | ||
6 | #include <asm/system.h> | ||
5 | 7 | ||
6 | struct task_struct; | 8 | struct task_struct; |
7 | 9 | ||
8 | extern int debug_locks; | 10 | extern int debug_locks; |
9 | extern int debug_locks_silent; | 11 | extern int debug_locks_silent; |
10 | 12 | ||
13 | |||
14 | static inline int __debug_locks_off(void) | ||
15 | { | ||
16 | return xchg(&debug_locks, 0); | ||
17 | } | ||
18 | |||
11 | /* | 19 | /* |
12 | * Generic 'turn off all lock debugging' function: | 20 | * Generic 'turn off all lock debugging' function: |
13 | */ | 21 | */ |
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index af0e01d4c663..eb5c2ba2f81a 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h | |||
@@ -71,6 +71,9 @@ struct dentry *debugfs_create_bool(const char *name, mode_t mode, | |||
71 | struct dentry *debugfs_create_blob(const char *name, mode_t mode, | 71 | struct dentry *debugfs_create_blob(const char *name, mode_t mode, |
72 | struct dentry *parent, | 72 | struct dentry *parent, |
73 | struct debugfs_blob_wrapper *blob); | 73 | struct debugfs_blob_wrapper *blob); |
74 | |||
75 | bool debugfs_initialized(void); | ||
76 | |||
74 | #else | 77 | #else |
75 | 78 | ||
76 | #include <linux/err.h> | 79 | #include <linux/err.h> |
@@ -183,6 +186,11 @@ static inline struct dentry *debugfs_create_blob(const char *name, mode_t mode, | |||
183 | return ERR_PTR(-ENODEV); | 186 | return ERR_PTR(-ENODEV); |
184 | } | 187 | } |
185 | 188 | ||
189 | static inline bool debugfs_initialized(void) | ||
190 | { | ||
191 | return false; | ||
192 | } | ||
193 | |||
186 | #endif | 194 | #endif |
187 | 195 | ||
188 | #endif | 196 | #endif |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 8209e08969f9..ded2d7c42668 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -116,7 +116,6 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d); | |||
116 | /* | 116 | /* |
117 | * Target features | 117 | * Target features |
118 | */ | 118 | */ |
119 | #define DM_TARGET_SUPPORTS_BARRIERS 0x00000001 | ||
120 | 119 | ||
121 | struct target_type { | 120 | struct target_type { |
122 | uint64_t features; | 121 | uint64_t features; |
@@ -139,6 +138,9 @@ struct target_type { | |||
139 | dm_ioctl_fn ioctl; | 138 | dm_ioctl_fn ioctl; |
140 | dm_merge_fn merge; | 139 | dm_merge_fn merge; |
141 | dm_busy_fn busy; | 140 | dm_busy_fn busy; |
141 | |||
142 | /* For internal device-mapper use. */ | ||
143 | struct list_head list; | ||
142 | }; | 144 | }; |
143 | 145 | ||
144 | struct io_restrictions { | 146 | struct io_restrictions { |
diff --git a/include/linux/device.h b/include/linux/device.h index 2918c0e8fdfd..6a69caaac18a 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -551,6 +551,7 @@ extern int (*platform_notify_remove)(struct device *dev); | |||
551 | extern struct device *get_device(struct device *dev); | 551 | extern struct device *get_device(struct device *dev); |
552 | extern void put_device(struct device *dev); | 552 | extern void put_device(struct device *dev); |
553 | 553 | ||
554 | extern void wait_for_device_probe(void); | ||
554 | 555 | ||
555 | /* drivers/base/power/shutdown.c */ | 556 | /* drivers/base/power/shutdown.c */ |
556 | extern void device_shutdown(void); | 557 | extern void device_shutdown(void); |
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h index 600c5fb2daad..5e8b11d88f6f 100644 --- a/include/linux/dm-dirty-log.h +++ b/include/linux/dm-dirty-log.h | |||
@@ -28,6 +28,9 @@ struct dm_dirty_log_type { | |||
28 | const char *name; | 28 | const char *name; |
29 | struct module *module; | 29 | struct module *module; |
30 | 30 | ||
31 | /* For internal device-mapper use */ | ||
32 | struct list_head list; | ||
33 | |||
31 | int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, | 34 | int (*ctr)(struct dm_dirty_log *log, struct dm_target *ti, |
32 | unsigned argc, char **argv); | 35 | unsigned argc, char **argv); |
33 | void (*dtr)(struct dm_dirty_log *log); | 36 | void (*dtr)(struct dm_dirty_log *log); |
@@ -113,6 +116,16 @@ struct dm_dirty_log_type { | |||
113 | */ | 116 | */ |
114 | int (*status)(struct dm_dirty_log *log, status_type_t status_type, | 117 | int (*status)(struct dm_dirty_log *log, status_type_t status_type, |
115 | char *result, unsigned maxlen); | 118 | char *result, unsigned maxlen); |
119 | |||
120 | /* | ||
121 | * is_remote_recovering is necessary for cluster mirroring. It provides | ||
122 | * a way to detect recovery on another node, so we aren't writing | ||
123 | * concurrently. This function is likely to block (when a cluster log | ||
124 | * is used). | ||
125 | * | ||
126 | * Returns: 0, 1 | ||
127 | */ | ||
128 | int (*is_remote_recovering)(struct dm_dirty_log *log, region_t region); | ||
116 | }; | 129 | }; |
117 | 130 | ||
118 | int dm_dirty_log_type_register(struct dm_dirty_log_type *type); | 131 | int dm_dirty_log_type_register(struct dm_dirty_log_type *type); |
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h new file mode 100644 index 000000000000..28d53cb7b5a2 --- /dev/null +++ b/include/linux/dma-debug.h | |||
@@ -0,0 +1,174 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Advanced Micro Devices, Inc. | ||
3 | * | ||
4 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License version 2 as published | ||
8 | * by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #ifndef __DMA_DEBUG_H | ||
21 | #define __DMA_DEBUG_H | ||
22 | |||
23 | #include <linux/types.h> | ||
24 | |||
25 | struct device; | ||
26 | struct scatterlist; | ||
27 | struct bus_type; | ||
28 | |||
29 | #ifdef CONFIG_DMA_API_DEBUG | ||
30 | |||
31 | extern void dma_debug_add_bus(struct bus_type *bus); | ||
32 | |||
33 | extern void dma_debug_init(u32 num_entries); | ||
34 | |||
35 | extern void debug_dma_map_page(struct device *dev, struct page *page, | ||
36 | size_t offset, size_t size, | ||
37 | int direction, dma_addr_t dma_addr, | ||
38 | bool map_single); | ||
39 | |||
40 | extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
41 | size_t size, int direction, bool map_single); | ||
42 | |||
43 | extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
44 | int nents, int mapped_ents, int direction); | ||
45 | |||
46 | extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, | ||
47 | int nelems, int dir); | ||
48 | |||
49 | extern void debug_dma_alloc_coherent(struct device *dev, size_t size, | ||
50 | dma_addr_t dma_addr, void *virt); | ||
51 | |||
52 | extern void debug_dma_free_coherent(struct device *dev, size_t size, | ||
53 | void *virt, dma_addr_t addr); | ||
54 | |||
55 | extern void debug_dma_sync_single_for_cpu(struct device *dev, | ||
56 | dma_addr_t dma_handle, size_t size, | ||
57 | int direction); | ||
58 | |||
59 | extern void debug_dma_sync_single_for_device(struct device *dev, | ||
60 | dma_addr_t dma_handle, | ||
61 | size_t size, int direction); | ||
62 | |||
63 | extern void debug_dma_sync_single_range_for_cpu(struct device *dev, | ||
64 | dma_addr_t dma_handle, | ||
65 | unsigned long offset, | ||
66 | size_t size, | ||
67 | int direction); | ||
68 | |||
69 | extern void debug_dma_sync_single_range_for_device(struct device *dev, | ||
70 | dma_addr_t dma_handle, | ||
71 | unsigned long offset, | ||
72 | size_t size, int direction); | ||
73 | |||
74 | extern void debug_dma_sync_sg_for_cpu(struct device *dev, | ||
75 | struct scatterlist *sg, | ||
76 | int nelems, int direction); | ||
77 | |||
78 | extern void debug_dma_sync_sg_for_device(struct device *dev, | ||
79 | struct scatterlist *sg, | ||
80 | int nelems, int direction); | ||
81 | |||
82 | extern void debug_dma_dump_mappings(struct device *dev); | ||
83 | |||
84 | #else /* CONFIG_DMA_API_DEBUG */ | ||
85 | |||
86 | static inline void dma_debug_add_bus(struct bus_type *bus) | ||
87 | { | ||
88 | } | ||
89 | |||
90 | static inline void dma_debug_init(u32 num_entries) | ||
91 | { | ||
92 | } | ||
93 | |||
94 | static inline void debug_dma_map_page(struct device *dev, struct page *page, | ||
95 | size_t offset, size_t size, | ||
96 | int direction, dma_addr_t dma_addr, | ||
97 | bool map_single) | ||
98 | { | ||
99 | } | ||
100 | |||
101 | static inline void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, | ||
102 | size_t size, int direction, | ||
103 | bool map_single) | ||
104 | { | ||
105 | } | ||
106 | |||
107 | static inline void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, | ||
108 | int nents, int mapped_ents, int direction) | ||
109 | { | ||
110 | } | ||
111 | |||
112 | static inline void debug_dma_unmap_sg(struct device *dev, | ||
113 | struct scatterlist *sglist, | ||
114 | int nelems, int dir) | ||
115 | { | ||
116 | } | ||
117 | |||
118 | static inline void debug_dma_alloc_coherent(struct device *dev, size_t size, | ||
119 | dma_addr_t dma_addr, void *virt) | ||
120 | { | ||
121 | } | ||
122 | |||
123 | static inline void debug_dma_free_coherent(struct device *dev, size_t size, | ||
124 | void *virt, dma_addr_t addr) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static inline void debug_dma_sync_single_for_cpu(struct device *dev, | ||
129 | dma_addr_t dma_handle, | ||
130 | size_t size, int direction) | ||
131 | { | ||
132 | } | ||
133 | |||
134 | static inline void debug_dma_sync_single_for_device(struct device *dev, | ||
135 | dma_addr_t dma_handle, | ||
136 | size_t size, int direction) | ||
137 | { | ||
138 | } | ||
139 | |||
140 | static inline void debug_dma_sync_single_range_for_cpu(struct device *dev, | ||
141 | dma_addr_t dma_handle, | ||
142 | unsigned long offset, | ||
143 | size_t size, | ||
144 | int direction) | ||
145 | { | ||
146 | } | ||
147 | |||
148 | static inline void debug_dma_sync_single_range_for_device(struct device *dev, | ||
149 | dma_addr_t dma_handle, | ||
150 | unsigned long offset, | ||
151 | size_t size, | ||
152 | int direction) | ||
153 | { | ||
154 | } | ||
155 | |||
156 | static inline void debug_dma_sync_sg_for_cpu(struct device *dev, | ||
157 | struct scatterlist *sg, | ||
158 | int nelems, int direction) | ||
159 | { | ||
160 | } | ||
161 | |||
162 | static inline void debug_dma_sync_sg_for_device(struct device *dev, | ||
163 | struct scatterlist *sg, | ||
164 | int nelems, int direction) | ||
165 | { | ||
166 | } | ||
167 | |||
168 | static inline void debug_dma_dump_mappings(struct device *dev) | ||
169 | { | ||
170 | } | ||
171 | |||
172 | #endif /* CONFIG_DMA_API_DEBUG */ | ||
173 | |||
174 | #endif /* __DMA_DEBUG_H */ | ||
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ba9114ec5d3a..8083b6a36a38 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | #include <linux/device.h> | 4 | #include <linux/device.h> |
5 | #include <linux/err.h> | 5 | #include <linux/err.h> |
6 | #include <linux/dma-attrs.h> | ||
7 | #include <linux/scatterlist.h> | ||
6 | 8 | ||
7 | /* These definitions mirror those in pci.h, so they can be used | 9 | /* These definitions mirror those in pci.h, so they can be used |
8 | * interchangeably with their PCI_ counterparts */ | 10 | * interchangeably with their PCI_ counterparts */ |
@@ -13,6 +15,52 @@ enum dma_data_direction { | |||
13 | DMA_NONE = 3, | 15 | DMA_NONE = 3, |
14 | }; | 16 | }; |
15 | 17 | ||
18 | struct dma_map_ops { | ||
19 | void* (*alloc_coherent)(struct device *dev, size_t size, | ||
20 | dma_addr_t *dma_handle, gfp_t gfp); | ||
21 | void (*free_coherent)(struct device *dev, size_t size, | ||
22 | void *vaddr, dma_addr_t dma_handle); | ||
23 | dma_addr_t (*map_page)(struct device *dev, struct page *page, | ||
24 | unsigned long offset, size_t size, | ||
25 | enum dma_data_direction dir, | ||
26 | struct dma_attrs *attrs); | ||
27 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, | ||
28 | size_t size, enum dma_data_direction dir, | ||
29 | struct dma_attrs *attrs); | ||
30 | int (*map_sg)(struct device *dev, struct scatterlist *sg, | ||
31 | int nents, enum dma_data_direction dir, | ||
32 | struct dma_attrs *attrs); | ||
33 | void (*unmap_sg)(struct device *dev, | ||
34 | struct scatterlist *sg, int nents, | ||
35 | enum dma_data_direction dir, | ||
36 | struct dma_attrs *attrs); | ||
37 | void (*sync_single_for_cpu)(struct device *dev, | ||
38 | dma_addr_t dma_handle, size_t size, | ||
39 | enum dma_data_direction dir); | ||
40 | void (*sync_single_for_device)(struct device *dev, | ||
41 | dma_addr_t dma_handle, size_t size, | ||
42 | enum dma_data_direction dir); | ||
43 | void (*sync_single_range_for_cpu)(struct device *dev, | ||
44 | dma_addr_t dma_handle, | ||
45 | unsigned long offset, | ||
46 | size_t size, | ||
47 | enum dma_data_direction dir); | ||
48 | void (*sync_single_range_for_device)(struct device *dev, | ||
49 | dma_addr_t dma_handle, | ||
50 | unsigned long offset, | ||
51 | size_t size, | ||
52 | enum dma_data_direction dir); | ||
53 | void (*sync_sg_for_cpu)(struct device *dev, | ||
54 | struct scatterlist *sg, int nents, | ||
55 | enum dma_data_direction dir); | ||
56 | void (*sync_sg_for_device)(struct device *dev, | ||
57 | struct scatterlist *sg, int nents, | ||
58 | enum dma_data_direction dir); | ||
59 | int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); | ||
60 | int (*dma_supported)(struct device *dev, u64 mask); | ||
61 | int is_phys; | ||
62 | }; | ||
63 | |||
16 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) | 64 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
17 | 65 | ||
18 | /* | 66 | /* |
@@ -67,7 +115,7 @@ static inline u64 dma_get_mask(struct device *dev) | |||
67 | { | 115 | { |
68 | if (dev && dev->dma_mask && *dev->dma_mask) | 116 | if (dev && dev->dma_mask && *dev->dma_mask) |
69 | return *dev->dma_mask; | 117 | return *dev->dma_mask; |
70 | return DMA_32BIT_MASK; | 118 | return DMA_BIT_MASK(32); |
71 | } | 119 | } |
72 | 120 | ||
73 | extern u64 dma_get_required_mask(struct device *dev); | 121 | extern u64 dma_get_required_mask(struct device *dev); |
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index af1dab41674b..1a455f1f86d7 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #define DMA_PTE_READ (1) | 12 | #define DMA_PTE_READ (1) |
13 | #define DMA_PTE_WRITE (2) | 13 | #define DMA_PTE_WRITE (2) |
14 | #define DMA_PTE_SNP (1 << 11) | ||
14 | 15 | ||
15 | struct intel_iommu; | 16 | struct intel_iommu; |
16 | struct dmar_domain; | 17 | struct dmar_domain; |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 1956c8d46d32..2e2aa3df170c 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -23,9 +23,6 @@ | |||
23 | 23 | ||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/uio.h> | 25 | #include <linux/uio.h> |
26 | #include <linux/kref.h> | ||
27 | #include <linux/completion.h> | ||
28 | #include <linux/rcupdate.h> | ||
29 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
30 | 27 | ||
31 | /** | 28 | /** |
@@ -205,6 +202,7 @@ struct dma_async_tx_descriptor { | |||
205 | /** | 202 | /** |
206 | * struct dma_device - info on the entity supplying DMA services | 203 | * struct dma_device - info on the entity supplying DMA services |
207 | * @chancnt: how many DMA channels are supported | 204 | * @chancnt: how many DMA channels are supported |
205 | * @privatecnt: how many DMA channels are requested by dma_request_channel | ||
208 | * @channels: the list of struct dma_chan | 206 | * @channels: the list of struct dma_chan |
209 | * @global_node: list_head for global dma_device_list | 207 | * @global_node: list_head for global dma_device_list |
210 | * @cap_mask: one or more dma_capability flags | 208 | * @cap_mask: one or more dma_capability flags |
@@ -227,6 +225,7 @@ struct dma_async_tx_descriptor { | |||
227 | struct dma_device { | 225 | struct dma_device { |
228 | 226 | ||
229 | unsigned int chancnt; | 227 | unsigned int chancnt; |
228 | unsigned int privatecnt; | ||
230 | struct list_head channels; | 229 | struct list_head channels; |
231 | struct list_head global_node; | 230 | struct list_head global_node; |
232 | dma_cap_mask_t cap_mask; | 231 | dma_cap_mask_t cap_mask; |
@@ -291,6 +290,24 @@ static inline void net_dmaengine_put(void) | |||
291 | } | 290 | } |
292 | #endif | 291 | #endif |
293 | 292 | ||
293 | #ifdef CONFIG_ASYNC_TX_DMA | ||
294 | #define async_dmaengine_get() dmaengine_get() | ||
295 | #define async_dmaengine_put() dmaengine_put() | ||
296 | #define async_dma_find_channel(type) dma_find_channel(type) | ||
297 | #else | ||
298 | static inline void async_dmaengine_get(void) | ||
299 | { | ||
300 | } | ||
301 | static inline void async_dmaengine_put(void) | ||
302 | { | ||
303 | } | ||
304 | static inline struct dma_chan * | ||
305 | async_dma_find_channel(enum dma_transaction_type type) | ||
306 | { | ||
307 | return NULL; | ||
308 | } | ||
309 | #endif | ||
310 | |||
294 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 311 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
295 | void *dest, void *src, size_t len); | 312 | void *dest, void *src, size_t len); |
296 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | 313 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, |
@@ -337,6 +354,13 @@ __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | |||
337 | set_bit(tx_type, dstp->bits); | 354 | set_bit(tx_type, dstp->bits); |
338 | } | 355 | } |
339 | 356 | ||
357 | #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) | ||
358 | static inline void | ||
359 | __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | ||
360 | { | ||
361 | clear_bit(tx_type, dstp->bits); | ||
362 | } | ||
363 | |||
340 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) | 364 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) |
341 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) | 365 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) |
342 | { | 366 | { |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index f28440784cf0..e397dc342cda 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -24,16 +24,17 @@ | |||
24 | #include <linux/acpi.h> | 24 | #include <linux/acpi.h> |
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
27 | #include <linux/irqreturn.h> | ||
27 | 28 | ||
28 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) | ||
29 | struct intel_iommu; | 29 | struct intel_iommu; |
30 | 30 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) | |
31 | struct dmar_drhd_unit { | 31 | struct dmar_drhd_unit { |
32 | struct list_head list; /* list of drhd units */ | 32 | struct list_head list; /* list of drhd units */ |
33 | struct acpi_dmar_header *hdr; /* ACPI header */ | 33 | struct acpi_dmar_header *hdr; /* ACPI header */ |
34 | u64 reg_base_addr; /* register base address*/ | 34 | u64 reg_base_addr; /* register base address*/ |
35 | struct pci_dev **devices; /* target device array */ | 35 | struct pci_dev **devices; /* target device array */ |
36 | int devices_cnt; /* target device count */ | 36 | int devices_cnt; /* target device count */ |
37 | u16 segment; /* PCI domain */ | ||
37 | u8 ignored:1; /* ignore drhd */ | 38 | u8 ignored:1; /* ignore drhd */ |
38 | u8 include_all:1; | 39 | u8 include_all:1; |
39 | struct intel_iommu *iommu; | 40 | struct intel_iommu *iommu; |
@@ -44,12 +45,20 @@ extern struct list_head dmar_drhd_units; | |||
44 | #define for_each_drhd_unit(drhd) \ | 45 | #define for_each_drhd_unit(drhd) \ |
45 | list_for_each_entry(drhd, &dmar_drhd_units, list) | 46 | list_for_each_entry(drhd, &dmar_drhd_units, list) |
46 | 47 | ||
48 | #define for_each_active_iommu(i, drhd) \ | ||
49 | list_for_each_entry(drhd, &dmar_drhd_units, list) \ | ||
50 | if (i=drhd->iommu, drhd->ignored) {} else | ||
51 | |||
52 | #define for_each_iommu(i, drhd) \ | ||
53 | list_for_each_entry(drhd, &dmar_drhd_units, list) \ | ||
54 | if (i=drhd->iommu, 0) {} else | ||
55 | |||
47 | extern int dmar_table_init(void); | 56 | extern int dmar_table_init(void); |
48 | extern int dmar_dev_scope_init(void); | 57 | extern int dmar_dev_scope_init(void); |
49 | 58 | ||
50 | /* Intel IOMMU detection */ | 59 | /* Intel IOMMU detection */ |
51 | extern void detect_intel_iommu(void); | 60 | extern void detect_intel_iommu(void); |
52 | 61 | extern int enable_drhd_fault_handling(void); | |
53 | 62 | ||
54 | extern int parse_ioapics_under_ir(void); | 63 | extern int parse_ioapics_under_ir(void); |
55 | extern int alloc_iommu(struct dmar_drhd_unit *); | 64 | extern int alloc_iommu(struct dmar_drhd_unit *); |
@@ -63,12 +72,12 @@ static inline int dmar_table_init(void) | |||
63 | { | 72 | { |
64 | return -ENODEV; | 73 | return -ENODEV; |
65 | } | 74 | } |
75 | static inline int enable_drhd_fault_handling(void) | ||
76 | { | ||
77 | return -1; | ||
78 | } | ||
66 | #endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */ | 79 | #endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */ |
67 | 80 | ||
68 | #ifdef CONFIG_INTR_REMAP | ||
69 | extern int intr_remapping_enabled; | ||
70 | extern int enable_intr_remapping(int); | ||
71 | |||
72 | struct irte { | 81 | struct irte { |
73 | union { | 82 | union { |
74 | struct { | 83 | struct { |
@@ -97,6 +106,12 @@ struct irte { | |||
97 | __u64 high; | 106 | __u64 high; |
98 | }; | 107 | }; |
99 | }; | 108 | }; |
109 | #ifdef CONFIG_INTR_REMAP | ||
110 | extern int intr_remapping_enabled; | ||
111 | extern int enable_intr_remapping(int); | ||
112 | extern void disable_intr_remapping(void); | ||
113 | extern int reenable_intr_remapping(int); | ||
114 | |||
100 | extern int get_irte(int irq, struct irte *entry); | 115 | extern int get_irte(int irq, struct irte *entry); |
101 | extern int modify_irte(int irq, struct irte *irte_modified); | 116 | extern int modify_irte(int irq, struct irte *irte_modified); |
102 | extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); | 117 | extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); |
@@ -111,14 +126,40 @@ extern int irq_remapped(int irq); | |||
111 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); | 126 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); |
112 | extern struct intel_iommu *map_ioapic_to_ir(int apic); | 127 | extern struct intel_iommu *map_ioapic_to_ir(int apic); |
113 | #else | 128 | #else |
129 | static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) | ||
130 | { | ||
131 | return -1; | ||
132 | } | ||
133 | static inline int modify_irte(int irq, struct irte *irte_modified) | ||
134 | { | ||
135 | return -1; | ||
136 | } | ||
137 | static inline int free_irte(int irq) | ||
138 | { | ||
139 | return -1; | ||
140 | } | ||
141 | static inline int map_irq_to_irte_handle(int irq, u16 *sub_handle) | ||
142 | { | ||
143 | return -1; | ||
144 | } | ||
145 | static inline int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, | ||
146 | u16 sub_handle) | ||
147 | { | ||
148 | return -1; | ||
149 | } | ||
150 | static inline struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) | ||
151 | { | ||
152 | return NULL; | ||
153 | } | ||
154 | static inline struct intel_iommu *map_ioapic_to_ir(int apic) | ||
155 | { | ||
156 | return NULL; | ||
157 | } | ||
114 | #define irq_remapped(irq) (0) | 158 | #define irq_remapped(irq) (0) |
115 | #define enable_intr_remapping(mode) (-1) | 159 | #define enable_intr_remapping(mode) (-1) |
116 | #define intr_remapping_enabled (0) | 160 | #define intr_remapping_enabled (0) |
117 | #endif | 161 | #endif |
118 | 162 | ||
119 | #ifdef CONFIG_DMAR | ||
120 | extern const char *dmar_get_fault_reason(u8 fault_reason); | ||
121 | |||
122 | /* Can't use the common MSI interrupt functions | 163 | /* Can't use the common MSI interrupt functions |
123 | * since DMAR is not a pci device | 164 | * since DMAR is not a pci device |
124 | */ | 165 | */ |
@@ -127,8 +168,10 @@ extern void dmar_msi_mask(unsigned int irq); | |||
127 | extern void dmar_msi_read(int irq, struct msi_msg *msg); | 168 | extern void dmar_msi_read(int irq, struct msi_msg *msg); |
128 | extern void dmar_msi_write(int irq, struct msi_msg *msg); | 169 | extern void dmar_msi_write(int irq, struct msi_msg *msg); |
129 | extern int dmar_set_interrupt(struct intel_iommu *iommu); | 170 | extern int dmar_set_interrupt(struct intel_iommu *iommu); |
171 | extern irqreturn_t dmar_fault(int irq, void *dev_id); | ||
130 | extern int arch_setup_dmar_msi(unsigned int irq); | 172 | extern int arch_setup_dmar_msi(unsigned int irq); |
131 | 173 | ||
174 | #ifdef CONFIG_DMAR | ||
132 | extern int iommu_detected, no_iommu; | 175 | extern int iommu_detected, no_iommu; |
133 | extern struct list_head dmar_rmrr_units; | 176 | extern struct list_head dmar_rmrr_units; |
134 | struct dmar_rmrr_unit { | 177 | struct dmar_rmrr_unit { |
diff --git a/include/linux/dmi.h b/include/linux/dmi.h index d741b9ceb0e0..bb5489c82c99 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h | |||
@@ -47,7 +47,8 @@ extern int dmi_get_year(int field); | |||
47 | extern int dmi_name_in_vendors(const char *str); | 47 | extern int dmi_name_in_vendors(const char *str); |
48 | extern int dmi_name_in_serial(const char *str); | 48 | extern int dmi_name_in_serial(const char *str); |
49 | extern int dmi_available; | 49 | extern int dmi_available; |
50 | extern int dmi_walk(void (*decode)(const struct dmi_header *)); | 50 | extern int dmi_walk(void (*decode)(const struct dmi_header *, void *), |
51 | void *private_data); | ||
51 | extern bool dmi_match(enum dmi_field f, const char *str); | 52 | extern bool dmi_match(enum dmi_field f, const char *str); |
52 | 53 | ||
53 | #else | 54 | #else |
@@ -61,8 +62,8 @@ static inline int dmi_get_year(int year) { return 0; } | |||
61 | static inline int dmi_name_in_vendors(const char *s) { return 0; } | 62 | static inline int dmi_name_in_vendors(const char *s) { return 0; } |
62 | static inline int dmi_name_in_serial(const char *s) { return 0; } | 63 | static inline int dmi_name_in_serial(const char *s) { return 0; } |
63 | #define dmi_available 0 | 64 | #define dmi_available 0 |
64 | static inline int dmi_walk(void (*decode)(const struct dmi_header *)) | 65 | static inline int dmi_walk(void (*decode)(const struct dmi_header *, void *), |
65 | { return -1; } | 66 | void *private_data) { return -1; } |
66 | static inline bool dmi_match(enum dmi_field f, const char *str) | 67 | static inline bool dmi_match(enum dmi_field f, const char *str) |
67 | { return false; } | 68 | { return false; } |
68 | static inline const struct dmi_system_id * | 69 | static inline const struct dmi_system_id * |
diff --git a/include/linux/ds1wm.h b/include/linux/ds1wm.h deleted file mode 100644 index d3c65e48a2e7..000000000000 --- a/include/linux/ds1wm.h +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | /* platform data for the DS1WM driver */ | ||
2 | |||
3 | struct ds1wm_platform_data { | ||
4 | int bus_shift; /* number of shifts needed to calculate the | ||
5 | * offset between DS1WM registers; | ||
6 | * e.g. on h5xxx and h2200 this is 2 | ||
7 | * (registers aligned to 4-byte boundaries), | ||
8 | * while on hx4700 this is 1 */ | ||
9 | int active_high; | ||
10 | void (*enable)(struct platform_device *pdev); | ||
11 | void (*disable)(struct platform_device *pdev); | ||
12 | }; | ||
diff --git a/include/linux/dst.h b/include/linux/dst.h new file mode 100644 index 000000000000..e26fed84b1aa --- /dev/null +++ b/include/linux/dst.h | |||
@@ -0,0 +1,587 @@ | |||
1 | /* | ||
2 | * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef __DST_H | ||
17 | #define __DST_H | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | #include <linux/connector.h> | ||
21 | |||
22 | #define DST_NAMELEN 32 | ||
23 | #define DST_NAME "dst" | ||
24 | |||
25 | enum { | ||
26 | /* Remove node with given id from storage */ | ||
27 | DST_DEL_NODE = 0, | ||
28 | /* Add remote node with given id to the storage */ | ||
29 | DST_ADD_REMOTE, | ||
30 | /* Add local node with given id to the storage to be exported and used by remote peers */ | ||
31 | DST_ADD_EXPORT, | ||
32 | /* Crypto initialization command (hash/cipher used to protect the connection) */ | ||
33 | DST_CRYPTO, | ||
34 | /* Security attributes for given connection (permissions for example) */ | ||
35 | DST_SECURITY, | ||
36 | /* Register given node in the block layer subsystem */ | ||
37 | DST_START, | ||
38 | DST_CMD_MAX | ||
39 | }; | ||
40 | |||
41 | struct dst_ctl | ||
42 | { | ||
43 | /* Storage name */ | ||
44 | char name[DST_NAMELEN]; | ||
45 | /* Command flags */ | ||
46 | __u32 flags; | ||
47 | /* Command itself (see above) */ | ||
48 | __u32 cmd; | ||
49 | /* Maximum number of pages per single request in this device */ | ||
50 | __u32 max_pages; | ||
51 | /* Stale/error transaction scanning timeout in milliseconds */ | ||
52 | __u32 trans_scan_timeout; | ||
53 | /* Maximum number of retry sends before completing transaction as broken */ | ||
54 | __u32 trans_max_retries; | ||
55 | /* Storage size */ | ||
56 | __u64 size; | ||
57 | }; | ||
58 | |||
59 | /* Reply command carries completion status */ | ||
60 | struct dst_ctl_ack | ||
61 | { | ||
62 | struct cn_msg msg; | ||
63 | int error; | ||
64 | int unused[3]; | ||
65 | }; | ||
66 | |||
67 | /* | ||
68 | * Unfortunaltely socket address structure is not exported to userspace | ||
69 | * and is redefined there. | ||
70 | */ | ||
71 | #define SADDR_MAX_DATA 128 | ||
72 | |||
73 | struct saddr { | ||
74 | /* address family, AF_xxx */ | ||
75 | unsigned short sa_family; | ||
76 | /* 14 bytes of protocol address */ | ||
77 | char sa_data[SADDR_MAX_DATA]; | ||
78 | /* Number of bytes used in sa_data */ | ||
79 | unsigned short sa_data_len; | ||
80 | }; | ||
81 | |||
82 | /* Address structure */ | ||
83 | struct dst_network_ctl | ||
84 | { | ||
85 | /* Socket type: datagram, stream...*/ | ||
86 | unsigned int type; | ||
87 | /* Let me guess, is it a Jupiter diameter? */ | ||
88 | unsigned int proto; | ||
89 | /* Peer's address */ | ||
90 | struct saddr addr; | ||
91 | }; | ||
92 | |||
93 | struct dst_crypto_ctl | ||
94 | { | ||
95 | /* Cipher and hash names */ | ||
96 | char cipher_algo[DST_NAMELEN]; | ||
97 | char hash_algo[DST_NAMELEN]; | ||
98 | |||
99 | /* Key sizes. Can be zero for digest for example */ | ||
100 | unsigned int cipher_keysize, hash_keysize; | ||
101 | /* Alignment. Calculated by the DST itself. */ | ||
102 | unsigned int crypto_attached_size; | ||
103 | /* Number of threads to perform crypto operations */ | ||
104 | int thread_num; | ||
105 | }; | ||
106 | |||
107 | /* Export security attributes have this bits checked in when client connects */ | ||
108 | #define DST_PERM_READ (1<<0) | ||
109 | #define DST_PERM_WRITE (1<<1) | ||
110 | |||
111 | /* | ||
112 | * Right now it is simple model, where each remote address | ||
113 | * is assigned to set of permissions it is allowed to perform. | ||
114 | * In real world block device does not know anything but | ||
115 | * reading and writing, so it should be more than enough. | ||
116 | */ | ||
117 | struct dst_secure_user | ||
118 | { | ||
119 | unsigned int permissions; | ||
120 | struct saddr addr; | ||
121 | }; | ||
122 | |||
123 | /* | ||
124 | * Export control command: device to export and network address to accept | ||
125 | * clients to work with given device | ||
126 | */ | ||
127 | struct dst_export_ctl | ||
128 | { | ||
129 | char device[DST_NAMELEN]; | ||
130 | struct dst_network_ctl ctl; | ||
131 | }; | ||
132 | |||
133 | enum { | ||
134 | DST_CFG = 1, /* Request remote configuration */ | ||
135 | DST_IO, /* IO command */ | ||
136 | DST_IO_RESPONSE, /* IO response */ | ||
137 | DST_PING, /* Keepalive message */ | ||
138 | DST_NCMD_MAX, | ||
139 | }; | ||
140 | |||
141 | struct dst_cmd | ||
142 | { | ||
143 | /* Network command itself, see above */ | ||
144 | __u32 cmd; | ||
145 | /* | ||
146 | * Size of the attached data | ||
147 | * (in most cases, for READ command it means how many bytes were requested) | ||
148 | */ | ||
149 | __u32 size; | ||
150 | /* Crypto size: number of attached bytes with digest/hmac */ | ||
151 | __u32 csize; | ||
152 | /* Here we can carry secret data */ | ||
153 | __u32 reserved; | ||
154 | /* Read/write bits, see how they are encoded in bio structure */ | ||
155 | __u64 rw; | ||
156 | /* BIO flags */ | ||
157 | __u64 flags; | ||
158 | /* Unique command id (like transaction ID) */ | ||
159 | __u64 id; | ||
160 | /* Sector to start IO from */ | ||
161 | __u64 sector; | ||
162 | /* Hash data is placed after this header */ | ||
163 | __u8 hash[0]; | ||
164 | }; | ||
165 | |||
166 | /* | ||
167 | * Convert command to/from network byte order. | ||
168 | * We do not use hton*() functions, since there is | ||
169 | * no 64-bit implementation. | ||
170 | */ | ||
171 | static inline void dst_convert_cmd(struct dst_cmd *c) | ||
172 | { | ||
173 | c->cmd = __cpu_to_be32(c->cmd); | ||
174 | c->csize = __cpu_to_be32(c->csize); | ||
175 | c->size = __cpu_to_be32(c->size); | ||
176 | c->sector = __cpu_to_be64(c->sector); | ||
177 | c->id = __cpu_to_be64(c->id); | ||
178 | c->flags = __cpu_to_be64(c->flags); | ||
179 | c->rw = __cpu_to_be64(c->rw); | ||
180 | } | ||
181 | |||
182 | /* Transaction id */ | ||
183 | typedef __u64 dst_gen_t; | ||
184 | |||
185 | #ifdef __KERNEL__ | ||
186 | |||
187 | #include <linux/blkdev.h> | ||
188 | #include <linux/bio.h> | ||
189 | #include <linux/device.h> | ||
190 | #include <linux/mempool.h> | ||
191 | #include <linux/net.h> | ||
192 | #include <linux/poll.h> | ||
193 | #include <linux/rbtree.h> | ||
194 | |||
195 | #ifdef CONFIG_DST_DEBUG | ||
196 | #define dprintk(f, a...) printk(KERN_NOTICE f, ##a) | ||
197 | #else | ||
198 | static inline void __attribute__ ((format (printf, 1, 2))) | ||
199 | dprintk(const char *fmt, ...) {} | ||
200 | #endif | ||
201 | |||
202 | struct dst_node; | ||
203 | |||
204 | struct dst_trans | ||
205 | { | ||
206 | /* DST node we are working with */ | ||
207 | struct dst_node *n; | ||
208 | |||
209 | /* Entry inside transaction tree */ | ||
210 | struct rb_node trans_entry; | ||
211 | |||
212 | /* Merlin kills this transaction when this memory cell equals zero */ | ||
213 | atomic_t refcnt; | ||
214 | |||
215 | /* How this transaction should be processed by crypto engine */ | ||
216 | short enc; | ||
217 | /* How many times this transaction was resent */ | ||
218 | short retries; | ||
219 | /* Completion status */ | ||
220 | int error; | ||
221 | |||
222 | /* When did we send it to the remote peer */ | ||
223 | long send_time; | ||
224 | |||
225 | /* My name is... | ||
226 | * Well, computers does not speak, they have unique id instead */ | ||
227 | dst_gen_t gen; | ||
228 | |||
229 | /* Block IO we are working with */ | ||
230 | struct bio *bio; | ||
231 | |||
232 | /* Network command for above block IO request */ | ||
233 | struct dst_cmd cmd; | ||
234 | }; | ||
235 | |||
236 | struct dst_crypto_engine | ||
237 | { | ||
238 | /* What should we do with all block requests */ | ||
239 | struct crypto_hash *hash; | ||
240 | struct crypto_ablkcipher *cipher; | ||
241 | |||
242 | /* Pool of pages used to encrypt data into before sending */ | ||
243 | int page_num; | ||
244 | struct page **pages; | ||
245 | |||
246 | /* What to do with current request */ | ||
247 | int enc; | ||
248 | /* Who we are and where do we go */ | ||
249 | struct scatterlist *src, *dst; | ||
250 | |||
251 | /* Maximum timeout waiting for encryption to be completed */ | ||
252 | long timeout; | ||
253 | /* IV is a 64-bit sequential counter */ | ||
254 | u64 iv; | ||
255 | |||
256 | /* Secret data */ | ||
257 | void *private; | ||
258 | |||
259 | /* Cached temporary data lives here */ | ||
260 | int size; | ||
261 | void *data; | ||
262 | }; | ||
263 | |||
264 | struct dst_state | ||
265 | { | ||
266 | /* The main state protection */ | ||
267 | struct mutex state_lock; | ||
268 | |||
269 | /* Polling machinery for sockets */ | ||
270 | wait_queue_t wait; | ||
271 | wait_queue_head_t *whead; | ||
272 | /* Most of events are being waited here */ | ||
273 | wait_queue_head_t thread_wait; | ||
274 | |||
275 | /* Who owns this? */ | ||
276 | struct dst_node *node; | ||
277 | |||
278 | /* Network address for this state */ | ||
279 | struct dst_network_ctl ctl; | ||
280 | |||
281 | /* Permissions to work with: read-only or rw connection */ | ||
282 | u32 permissions; | ||
283 | |||
284 | /* Called when we need to clean private data */ | ||
285 | void (* cleanup)(struct dst_state *st); | ||
286 | |||
287 | /* Used by the server: BIO completion queues BIOs here */ | ||
288 | struct list_head request_list; | ||
289 | spinlock_t request_lock; | ||
290 | |||
291 | /* Guess what? No, it is not number of planets */ | ||
292 | atomic_t refcnt; | ||
293 | |||
294 | /* This flags is set when connection should be dropped */ | ||
295 | int need_exit; | ||
296 | |||
297 | /* | ||
298 | * Socket to work with. Second pointer is used for | ||
299 | * lockless check if socket was changed before performing | ||
300 | * next action (like working with cached polling result) | ||
301 | */ | ||
302 | struct socket *socket, *read_socket; | ||
303 | |||
304 | /* Cached preallocated data */ | ||
305 | void *data; | ||
306 | unsigned int size; | ||
307 | |||
308 | /* Currently processed command */ | ||
309 | struct dst_cmd cmd; | ||
310 | }; | ||
311 | |||
312 | struct dst_info | ||
313 | { | ||
314 | /* Device size */ | ||
315 | u64 size; | ||
316 | |||
317 | /* Local device name for export devices */ | ||
318 | char local[DST_NAMELEN]; | ||
319 | |||
320 | /* Network setup */ | ||
321 | struct dst_network_ctl net; | ||
322 | |||
323 | /* Sysfs bits use this */ | ||
324 | struct device device; | ||
325 | }; | ||
326 | |||
327 | struct dst_node | ||
328 | { | ||
329 | struct list_head node_entry; | ||
330 | |||
331 | /* Hi, my name is stored here */ | ||
332 | char name[DST_NAMELEN]; | ||
333 | /* My cache name is stored here */ | ||
334 | char cache_name[DST_NAMELEN]; | ||
335 | |||
336 | /* Block device attached to given node. | ||
337 | * Only valid for exporting nodes */ | ||
338 | struct block_device *bdev; | ||
339 | /* Network state machine for given peer */ | ||
340 | struct dst_state *state; | ||
341 | |||
342 | /* Block IO machinery */ | ||
343 | struct request_queue *queue; | ||
344 | struct gendisk *disk; | ||
345 | |||
346 | /* Number of threads in processing pool */ | ||
347 | int thread_num; | ||
348 | /* Maximum number of pages in single IO */ | ||
349 | int max_pages; | ||
350 | |||
351 | /* I'm that big in bytes */ | ||
352 | loff_t size; | ||
353 | |||
354 | /* Exported to userspace node information */ | ||
355 | struct dst_info *info; | ||
356 | |||
357 | /* | ||
358 | * Security attribute list. | ||
359 | * Used only by exporting node currently. | ||
360 | */ | ||
361 | struct list_head security_list; | ||
362 | struct mutex security_lock; | ||
363 | |||
364 | /* | ||
365 | * When this unerflows below zero, university collapses. | ||
366 | * But this will not happen, since node will be freed, | ||
367 | * when reference counter reaches zero. | ||
368 | */ | ||
369 | atomic_t refcnt; | ||
370 | |||
371 | /* How precisely should I be started? */ | ||
372 | int (*start)(struct dst_node *); | ||
373 | |||
374 | /* Crypto capabilities */ | ||
375 | struct dst_crypto_ctl crypto; | ||
376 | u8 *hash_key; | ||
377 | u8 *cipher_key; | ||
378 | |||
379 | /* Pool of processing thread */ | ||
380 | struct thread_pool *pool; | ||
381 | |||
382 | /* Transaction IDs live here */ | ||
383 | atomic_long_t gen; | ||
384 | |||
385 | /* | ||
386 | * How frequently and how many times transaction | ||
387 | * tree should be scanned to drop stale objects. | ||
388 | */ | ||
389 | long trans_scan_timeout; | ||
390 | int trans_max_retries; | ||
391 | |||
392 | /* Small gnomes live here */ | ||
393 | struct rb_root trans_root; | ||
394 | struct mutex trans_lock; | ||
395 | |||
396 | /* | ||
397 | * Transaction cache/memory pool. | ||
398 | * It is big enough to contain not only transaction | ||
399 | * itself, but additional crypto data (digest/hmac). | ||
400 | */ | ||
401 | struct kmem_cache *trans_cache; | ||
402 | mempool_t *trans_pool; | ||
403 | |||
404 | /* This entity scans transaction tree */ | ||
405 | struct delayed_work trans_work; | ||
406 | |||
407 | wait_queue_head_t wait; | ||
408 | }; | ||
409 | |||
410 | /* Kernel representation of the security attribute */ | ||
411 | struct dst_secure | ||
412 | { | ||
413 | struct list_head sec_entry; | ||
414 | struct dst_secure_user sec; | ||
415 | }; | ||
416 | |||
417 | int dst_process_bio(struct dst_node *n, struct bio *bio); | ||
418 | |||
419 | int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r); | ||
420 | int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le); | ||
421 | |||
422 | static inline struct dst_state *dst_state_get(struct dst_state *st) | ||
423 | { | ||
424 | BUG_ON(atomic_read(&st->refcnt) == 0); | ||
425 | atomic_inc(&st->refcnt); | ||
426 | return st; | ||
427 | } | ||
428 | |||
429 | void dst_state_put(struct dst_state *st); | ||
430 | |||
431 | struct dst_state *dst_state_alloc(struct dst_node *n); | ||
432 | int dst_state_socket_create(struct dst_state *st); | ||
433 | void dst_state_socket_release(struct dst_state *st); | ||
434 | |||
435 | void dst_state_exit_connected(struct dst_state *st); | ||
436 | |||
437 | int dst_state_schedule_receiver(struct dst_state *st); | ||
438 | |||
439 | void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str); | ||
440 | |||
441 | static inline void dst_state_lock(struct dst_state *st) | ||
442 | { | ||
443 | mutex_lock(&st->state_lock); | ||
444 | } | ||
445 | |||
446 | static inline void dst_state_unlock(struct dst_state *st) | ||
447 | { | ||
448 | mutex_unlock(&st->state_lock); | ||
449 | } | ||
450 | |||
451 | void dst_poll_exit(struct dst_state *st); | ||
452 | int dst_poll_init(struct dst_state *st); | ||
453 | |||
454 | static inline unsigned int dst_state_poll(struct dst_state *st) | ||
455 | { | ||
456 | unsigned int revents = POLLHUP | POLLERR; | ||
457 | |||
458 | dst_state_lock(st); | ||
459 | if (st->socket) | ||
460 | revents = st->socket->ops->poll(NULL, st->socket, NULL); | ||
461 | dst_state_unlock(st); | ||
462 | |||
463 | return revents; | ||
464 | } | ||
465 | |||
466 | static inline int dst_thread_setup(void *private, void *data) | ||
467 | { | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | void dst_node_put(struct dst_node *n); | ||
472 | |||
473 | static inline struct dst_node *dst_node_get(struct dst_node *n) | ||
474 | { | ||
475 | atomic_inc(&n->refcnt); | ||
476 | return n; | ||
477 | } | ||
478 | |||
479 | int dst_data_recv(struct dst_state *st, void *data, unsigned int size); | ||
480 | int dst_recv_cdata(struct dst_state *st, void *cdata); | ||
481 | int dst_data_send_header(struct socket *sock, | ||
482 | void *data, unsigned int size, int more); | ||
483 | |||
484 | int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio); | ||
485 | |||
486 | int dst_process_io(struct dst_state *st); | ||
487 | int dst_export_crypto(struct dst_node *n, struct bio *bio); | ||
488 | int dst_export_send_bio(struct bio *bio); | ||
489 | int dst_start_export(struct dst_node *n); | ||
490 | |||
491 | int __init dst_export_init(void); | ||
492 | void dst_export_exit(void); | ||
493 | |||
494 | /* Private structure for export block IO requests */ | ||
495 | struct dst_export_priv | ||
496 | { | ||
497 | struct list_head request_entry; | ||
498 | struct dst_state *state; | ||
499 | struct bio *bio; | ||
500 | struct dst_cmd cmd; | ||
501 | }; | ||
502 | |||
503 | static inline void dst_trans_get(struct dst_trans *t) | ||
504 | { | ||
505 | atomic_inc(&t->refcnt); | ||
506 | } | ||
507 | |||
508 | struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen); | ||
509 | int dst_trans_remove(struct dst_trans *t); | ||
510 | int dst_trans_remove_nolock(struct dst_trans *t); | ||
511 | void dst_trans_put(struct dst_trans *t); | ||
512 | |||
513 | /* | ||
514 | * Convert bio into network command. | ||
515 | */ | ||
516 | static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd, | ||
517 | u32 command, u64 id) | ||
518 | { | ||
519 | cmd->cmd = command; | ||
520 | cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS; | ||
521 | cmd->rw = bio->bi_rw; | ||
522 | cmd->size = bio->bi_size; | ||
523 | cmd->csize = 0; | ||
524 | cmd->id = id; | ||
525 | cmd->sector = bio->bi_sector; | ||
526 | }; | ||
527 | |||
528 | int dst_trans_send(struct dst_trans *t); | ||
529 | int dst_trans_crypto(struct dst_trans *t); | ||
530 | |||
531 | int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl); | ||
532 | void dst_node_crypto_exit(struct dst_node *n); | ||
533 | |||
534 | static inline int dst_need_crypto(struct dst_node *n) | ||
535 | { | ||
536 | struct dst_crypto_ctl *c = &n->crypto; | ||
537 | /* | ||
538 | * Logical OR is appropriate here, but boolean one produces | ||
539 | * more optimal code, so it is used instead. | ||
540 | */ | ||
541 | return (c->hash_algo[0] | c->cipher_algo[0]); | ||
542 | } | ||
543 | |||
544 | int dst_node_trans_init(struct dst_node *n, unsigned int size); | ||
545 | void dst_node_trans_exit(struct dst_node *n); | ||
546 | |||
547 | /* | ||
548 | * Pool of threads. | ||
549 | * Ready list contains threads currently free to be used, | ||
550 | * active one contains threads with some work scheduled for them. | ||
551 | * Caller can wait in given queue when thread is ready. | ||
552 | */ | ||
553 | struct thread_pool | ||
554 | { | ||
555 | int thread_num; | ||
556 | struct mutex thread_lock; | ||
557 | struct list_head ready_list, active_list; | ||
558 | |||
559 | wait_queue_head_t wait; | ||
560 | }; | ||
561 | |||
562 | void thread_pool_del_worker(struct thread_pool *p); | ||
563 | void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id); | ||
564 | int thread_pool_add_worker(struct thread_pool *p, | ||
565 | char *name, | ||
566 | unsigned int id, | ||
567 | void *(* init)(void *data), | ||
568 | void (* cleanup)(void *data), | ||
569 | void *data); | ||
570 | |||
571 | void thread_pool_destroy(struct thread_pool *p); | ||
572 | struct thread_pool *thread_pool_create(int num, char *name, | ||
573 | void *(* init)(void *data), | ||
574 | void (* cleanup)(void *data), | ||
575 | void *data); | ||
576 | |||
577 | int thread_pool_schedule(struct thread_pool *p, | ||
578 | int (* setup)(void *stored_private, void *setup_data), | ||
579 | int (* action)(void *stored_private, void *setup_data), | ||
580 | void *setup_data, long timeout); | ||
581 | int thread_pool_schedule_private(struct thread_pool *p, | ||
582 | int (* setup)(void *private, void *data), | ||
583 | int (* action)(void *private, void *data), | ||
584 | void *data, long timeout, void *id); | ||
585 | |||
586 | #endif /* __KERNEL__ */ | ||
587 | #endif /* __DST_H */ | ||
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h index d797dde247f7..c8aad713a046 100644 --- a/include/linux/dw_dmac.h +++ b/include/linux/dw_dmac.h | |||
@@ -74,4 +74,23 @@ struct dw_dma_slave { | |||
74 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ | 74 | #define DWC_CFGL_HS_DST_POL (1 << 18) /* dst handshake active low */ |
75 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ | 75 | #define DWC_CFGL_HS_SRC_POL (1 << 19) /* src handshake active low */ |
76 | 76 | ||
77 | /* DMA API extensions */ | ||
78 | struct dw_cyclic_desc { | ||
79 | struct dw_desc **desc; | ||
80 | unsigned long periods; | ||
81 | void (*period_callback)(void *param); | ||
82 | void *period_callback_param; | ||
83 | }; | ||
84 | |||
85 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | ||
86 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | ||
87 | enum dma_data_direction direction); | ||
88 | void dw_dma_cyclic_free(struct dma_chan *chan); | ||
89 | int dw_dma_cyclic_start(struct dma_chan *chan); | ||
90 | void dw_dma_cyclic_stop(struct dma_chan *chan); | ||
91 | |||
92 | dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan); | ||
93 | |||
94 | dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan); | ||
95 | |||
77 | #endif /* DW_DMAC_H */ | 96 | #endif /* DW_DMAC_H */ |
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index baabf33be244..a0d9422a1569 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
@@ -70,7 +70,7 @@ extern int ddebug_remove_module(char *mod_name); | |||
70 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ | 70 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ |
71 | if (__dynamic_dbg_enabled(descriptor)) \ | 71 | if (__dynamic_dbg_enabled(descriptor)) \ |
72 | dev_printk(KERN_DEBUG, dev, \ | 72 | dev_printk(KERN_DEBUG, dev, \ |
73 | KBUILD_MODNAME ": " pr_fmt(fmt),\ | 73 | KBUILD_MODNAME ": " fmt, \ |
74 | ##__VA_ARGS__); \ | 74 | ##__VA_ARGS__); \ |
75 | } while (0) | 75 | } while (0) |
76 | 76 | ||
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 7a204256b155..c59b769f62b0 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -116,6 +116,7 @@ extern void elv_abort_queue(struct request_queue *); | |||
116 | extern void elv_completed_request(struct request_queue *, struct request *); | 116 | extern void elv_completed_request(struct request_queue *, struct request *); |
117 | extern int elv_set_request(struct request_queue *, struct request *, gfp_t); | 117 | extern int elv_set_request(struct request_queue *, struct request *, gfp_t); |
118 | extern void elv_put_request(struct request_queue *, struct request *); | 118 | extern void elv_put_request(struct request_queue *, struct request *); |
119 | extern void elv_drain_elevator(struct request_queue *); | ||
119 | 120 | ||
120 | /* | 121 | /* |
121 | * io scheduler registration | 122 | * io scheduler registration |
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index a667637b54e3..f45a8ae5f828 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h | |||
@@ -13,10 +13,20 @@ | |||
13 | /* For O_CLOEXEC and O_NONBLOCK */ | 13 | /* For O_CLOEXEC and O_NONBLOCK */ |
14 | #include <linux/fcntl.h> | 14 | #include <linux/fcntl.h> |
15 | 15 | ||
16 | /* Flags for eventfd2. */ | 16 | /* |
17 | * CAREFUL: Check include/asm-generic/fcntl.h when defining | ||
18 | * new flags, since they might collide with O_* ones. We want | ||
19 | * to re-use O_* flags that couldn't possibly have a meaning | ||
20 | * from eventfd, in order to leave a free define-space for | ||
21 | * shared O_* flags. | ||
22 | */ | ||
23 | #define EFD_SEMAPHORE (1 << 0) | ||
17 | #define EFD_CLOEXEC O_CLOEXEC | 24 | #define EFD_CLOEXEC O_CLOEXEC |
18 | #define EFD_NONBLOCK O_NONBLOCK | 25 | #define EFD_NONBLOCK O_NONBLOCK |
19 | 26 | ||
27 | #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) | ||
28 | #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) | ||
29 | |||
20 | struct file *eventfd_fget(int fd); | 30 | struct file *eventfd_fget(int fd); |
21 | int eventfd_signal(struct file *file, int n); | 31 | int eventfd_signal(struct file *file, int n); |
22 | 32 | ||
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index dd495b8c3091..634a5e5aba3e 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h | |||
@@ -208,6 +208,7 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags) | |||
208 | #define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */ | 208 | #define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */ |
209 | #define EXT3_STATE_NEW 0x00000002 /* inode is newly created */ | 209 | #define EXT3_STATE_NEW 0x00000002 /* inode is newly created */ |
210 | #define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */ | 210 | #define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */ |
211 | #define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008 | ||
211 | 212 | ||
212 | /* Used to pass group descriptor data when online resize is done */ | 213 | /* Used to pass group descriptor data when online resize is done */ |
213 | struct ext3_new_group_input { | 214 | struct ext3_new_group_input { |
@@ -893,9 +894,8 @@ extern int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
893 | u64 start, u64 len); | 894 | u64 start, u64 len); |
894 | 895 | ||
895 | /* ioctl.c */ | 896 | /* ioctl.c */ |
896 | extern int ext3_ioctl (struct inode *, struct file *, unsigned int, | 897 | extern long ext3_ioctl(struct file *, unsigned int, unsigned long); |
897 | unsigned long); | 898 | extern long ext3_compat_ioctl(struct file *, unsigned int, unsigned long); |
898 | extern long ext3_compat_ioctl (struct file *, unsigned int, unsigned long); | ||
899 | 899 | ||
900 | /* namei.c */ | 900 | /* namei.c */ |
901 | extern int ext3_orphan_add(handle_t *, struct inode *); | 901 | extern int ext3_orphan_add(handle_t *, struct inode *); |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 31527e17076b..330c4b1bfcaa 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -123,6 +123,7 @@ struct dentry; | |||
123 | #define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ | 123 | #define FB_ACCEL_TRIDENT_3DIMAGE 51 /* Trident 3DImage */ |
124 | #define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ | 124 | #define FB_ACCEL_TRIDENT_BLADE3D 52 /* Trident Blade3D */ |
125 | #define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ | 125 | #define FB_ACCEL_TRIDENT_BLADEXP 53 /* Trident BladeXP */ |
126 | #define FB_ACCEL_CIRRUS_ALPINE 53 /* Cirrus Logic 543x/544x/5480 */ | ||
126 | #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ | 127 | #define FB_ACCEL_NEOMAGIC_NM2070 90 /* NeoMagic NM2070 */ |
127 | #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ | 128 | #define FB_ACCEL_NEOMAGIC_NM2090 91 /* NeoMagic NM2090 */ |
128 | #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ | 129 | #define FB_ACCEL_NEOMAGIC_NM2093 92 /* NeoMagic NM2093 */ |
@@ -172,8 +173,12 @@ struct fb_fix_screeninfo { | |||
172 | /* Interpretation of offset for color fields: All offsets are from the right, | 173 | /* Interpretation of offset for color fields: All offsets are from the right, |
173 | * inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you | 174 | * inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you |
174 | * can use the offset as right argument to <<). A pixel afterwards is a bit | 175 | * can use the offset as right argument to <<). A pixel afterwards is a bit |
175 | * stream and is written to video memory as that unmodified. This implies | 176 | * stream and is written to video memory as that unmodified. |
176 | * big-endian byte order if bits_per_pixel is greater than 8. | 177 | * |
178 | * For pseudocolor: offset and length should be the same for all color | ||
179 | * components. Offset specifies the position of the least significant bit | ||
180 | * of the pallette index in a pixel value. Length indicates the number | ||
181 | * of available palette entries (i.e. # of entries = 1 << length). | ||
177 | */ | 182 | */ |
178 | struct fb_bitfield { | 183 | struct fb_bitfield { |
179 | __u32 offset; /* beginning of bitfield */ | 184 | __u32 offset; /* beginning of bitfield */ |
@@ -960,15 +965,7 @@ extern struct fb_info *registered_fb[FB_MAX]; | |||
960 | extern int num_registered_fb; | 965 | extern int num_registered_fb; |
961 | extern struct class *fb_class; | 966 | extern struct class *fb_class; |
962 | 967 | ||
963 | static inline int lock_fb_info(struct fb_info *info) | 968 | extern int lock_fb_info(struct fb_info *info); |
964 | { | ||
965 | mutex_lock(&info->lock); | ||
966 | if (!info->fbops) { | ||
967 | mutex_unlock(&info->lock); | ||
968 | return 0; | ||
969 | } | ||
970 | return 1; | ||
971 | } | ||
972 | 969 | ||
973 | static inline void unlock_fb_info(struct fb_info *info) | 970 | static inline void unlock_fb_info(struct fb_info *info) |
974 | { | 971 | { |
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 09d6c5bbdddd..a2ec74bc4812 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h | |||
@@ -5,12 +5,14 @@ | |||
5 | #ifndef __LINUX_FDTABLE_H | 5 | #ifndef __LINUX_FDTABLE_H |
6 | #define __LINUX_FDTABLE_H | 6 | #define __LINUX_FDTABLE_H |
7 | 7 | ||
8 | #include <asm/atomic.h> | ||
9 | #include <linux/posix_types.h> | 8 | #include <linux/posix_types.h> |
10 | #include <linux/compiler.h> | 9 | #include <linux/compiler.h> |
11 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
12 | #include <linux/rcupdate.h> | 11 | #include <linux/rcupdate.h> |
13 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/init.h> | ||
14 | |||
15 | #include <asm/atomic.h> | ||
14 | 16 | ||
15 | /* | 17 | /* |
16 | * The default fd array needs to be at least BITS_PER_LONG, | 18 | * The default fd array needs to be at least BITS_PER_LONG, |
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h index 671decbd2aeb..934e22d65801 100644 --- a/include/linux/fiemap.h +++ b/include/linux/fiemap.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #ifndef _LINUX_FIEMAP_H | 11 | #ifndef _LINUX_FIEMAP_H |
12 | #define _LINUX_FIEMAP_H | 12 | #define _LINUX_FIEMAP_H |
13 | 13 | ||
14 | #include <linux/types.h> | ||
15 | |||
14 | struct fiemap_extent { | 16 | struct fiemap_extent { |
15 | __u64 fe_logical; /* logical offset in bytes for the start of | 17 | __u64 fe_logical; /* logical offset in bytes for the start of |
16 | * the extent from the beginning of the file */ | 18 | * the extent from the beginning of the file */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 42436ae42f70..5bed436f4353 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -87,6 +87,60 @@ struct inodes_stat_t { | |||
87 | */ | 87 | */ |
88 | #define FMODE_NOCMTIME ((__force fmode_t)2048) | 88 | #define FMODE_NOCMTIME ((__force fmode_t)2048) |
89 | 89 | ||
90 | /* | ||
91 | * The below are the various read and write types that we support. Some of | ||
92 | * them include behavioral modifiers that send information down to the | ||
93 | * block layer and IO scheduler. Terminology: | ||
94 | * | ||
95 | * The block layer uses device plugging to defer IO a little bit, in | ||
96 | * the hope that we will see more IO very shortly. This increases | ||
97 | * coalescing of adjacent IO and thus reduces the number of IOs we | ||
98 | * have to send to the device. It also allows for better queuing, | ||
99 | * if the IO isn't mergeable. If the caller is going to be waiting | ||
100 | * for the IO, then he must ensure that the device is unplugged so | ||
101 | * that the IO is dispatched to the driver. | ||
102 | * | ||
103 | * All IO is handled async in Linux. This is fine for background | ||
104 | * writes, but for reads or writes that someone waits for completion | ||
105 | * on, we want to notify the block layer and IO scheduler so that they | ||
106 | * know about it. That allows them to make better scheduling | ||
107 | * decisions. So when the below references 'sync' and 'async', it | ||
108 | * is referencing this priority hint. | ||
109 | * | ||
110 | * With that in mind, the available types are: | ||
111 | * | ||
112 | * READ A normal read operation. Device will be plugged. | ||
113 | * READ_SYNC A synchronous read. Device is not plugged, caller can | ||
114 | * immediately wait on this read without caring about | ||
115 | * unplugging. | ||
116 | * READA Used for read-ahead operations. Lower priority, and the | ||
117 | * block layer could (in theory) choose to ignore this | ||
118 | * request if it runs into resource problems. | ||
119 | * WRITE A normal async write. Device will be plugged. | ||
120 | * SWRITE Like WRITE, but a special case for ll_rw_block() that | ||
121 | * tells it to lock the buffer first. Normally a buffer | ||
122 | * must be locked before doing IO. | ||
123 | * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down | ||
124 | * the hint that someone will be waiting on this IO | ||
125 | * shortly. The device must still be unplugged explicitly, | ||
126 | * WRITE_SYNC_PLUG does not do this as we could be | ||
127 | * submitting more writes before we actually wait on any | ||
128 | * of them. | ||
129 | * WRITE_SYNC Like WRITE_SYNC_PLUG, but also unplugs the device | ||
130 | * immediately after submission. The write equivalent | ||
131 | * of READ_SYNC. | ||
132 | * WRITE_ODIRECT Special case write for O_DIRECT only. | ||
133 | * SWRITE_SYNC | ||
134 | * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer. | ||
135 | * See SWRITE. | ||
136 | * WRITE_BARRIER Like WRITE, but tells the block layer that all | ||
137 | * previously submitted writes must be safely on storage | ||
138 | * before this one is started. Also guarantees that when | ||
139 | * this write is complete, it itself is also safely on | ||
140 | * storage. Prevents reordering of writes on both sides | ||
141 | * of this IO. | ||
142 | * | ||
143 | */ | ||
90 | #define RW_MASK 1 | 144 | #define RW_MASK 1 |
91 | #define RWA_MASK 2 | 145 | #define RWA_MASK 2 |
92 | #define READ 0 | 146 | #define READ 0 |
@@ -95,9 +149,18 @@ struct inodes_stat_t { | |||
95 | #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ | 149 | #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ |
96 | #define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) | 150 | #define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) |
97 | #define READ_META (READ | (1 << BIO_RW_META)) | 151 | #define READ_META (READ | (1 << BIO_RW_META)) |
98 | #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) | 152 | #define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) |
99 | #define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) | 153 | #define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) |
154 | #define WRITE_ODIRECT (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) | ||
155 | #define SWRITE_SYNC_PLUG \ | ||
156 | (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) | ||
157 | #define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) | ||
100 | #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) | 158 | #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) |
159 | |||
160 | /* | ||
161 | * These aren't really reads or writes, they pass down information about | ||
162 | * parts of device that are now unused by the file system. | ||
163 | */ | ||
101 | #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) | 164 | #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) |
102 | #define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) | 165 | #define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) |
103 | 166 | ||
@@ -734,9 +797,6 @@ enum inode_i_mutex_lock_class | |||
734 | I_MUTEX_QUOTA | 797 | I_MUTEX_QUOTA |
735 | }; | 798 | }; |
736 | 799 | ||
737 | extern void inode_double_lock(struct inode *inode1, struct inode *inode2); | ||
738 | extern void inode_double_unlock(struct inode *inode1, struct inode *inode2); | ||
739 | |||
740 | /* | 800 | /* |
741 | * NOTE: in a 32bit arch with a preemptable kernel and | 801 | * NOTE: in a 32bit arch with a preemptable kernel and |
742 | * an UP compile the i_size_read/write must be atomic | 802 | * an UP compile the i_size_read/write must be atomic |
@@ -849,7 +909,7 @@ struct file { | |||
849 | #define f_dentry f_path.dentry | 909 | #define f_dentry f_path.dentry |
850 | #define f_vfsmnt f_path.mnt | 910 | #define f_vfsmnt f_path.mnt |
851 | const struct file_operations *f_op; | 911 | const struct file_operations *f_op; |
852 | spinlock_t f_lock; /* f_ep_links, f_flags */ | 912 | spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */ |
853 | atomic_long_t f_count; | 913 | atomic_long_t f_count; |
854 | unsigned int f_flags; | 914 | unsigned int f_flags; |
855 | fmode_t f_mode; | 915 | fmode_t f_mode; |
@@ -1695,6 +1755,9 @@ struct file_system_type { | |||
1695 | struct lock_class_key i_alloc_sem_key; | 1755 | struct lock_class_key i_alloc_sem_key; |
1696 | }; | 1756 | }; |
1697 | 1757 | ||
1758 | extern int get_sb_ns(struct file_system_type *fs_type, int flags, void *data, | ||
1759 | int (*fill_super)(struct super_block *, void *, int), | ||
1760 | struct vfsmount *mnt); | ||
1698 | extern int get_sb_bdev(struct file_system_type *fs_type, | 1761 | extern int get_sb_bdev(struct file_system_type *fs_type, |
1699 | int flags, const char *dev_name, void *data, | 1762 | int flags, const char *dev_name, void *data, |
1700 | int (*fill_super)(struct super_block *, void *, int), | 1763 | int (*fill_super)(struct super_block *, void *, int), |
@@ -1741,6 +1804,8 @@ extern void drop_collected_mounts(struct vfsmount *); | |||
1741 | 1804 | ||
1742 | extern int vfs_statfs(struct dentry *, struct kstatfs *); | 1805 | extern int vfs_statfs(struct dentry *, struct kstatfs *); |
1743 | 1806 | ||
1807 | extern int current_umask(void); | ||
1808 | |||
1744 | /* /sys/fs */ | 1809 | /* /sys/fs */ |
1745 | extern struct kobject *fs_kobj; | 1810 | extern struct kobject *fs_kobj; |
1746 | 1811 | ||
@@ -1878,12 +1943,25 @@ extern struct block_device *open_by_devnum(dev_t, fmode_t); | |||
1878 | extern void invalidate_bdev(struct block_device *); | 1943 | extern void invalidate_bdev(struct block_device *); |
1879 | extern int sync_blockdev(struct block_device *bdev); | 1944 | extern int sync_blockdev(struct block_device *bdev); |
1880 | extern struct super_block *freeze_bdev(struct block_device *); | 1945 | extern struct super_block *freeze_bdev(struct block_device *); |
1946 | extern void emergency_thaw_all(void); | ||
1881 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); | 1947 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); |
1882 | extern int fsync_bdev(struct block_device *); | 1948 | extern int fsync_bdev(struct block_device *); |
1883 | extern int fsync_super(struct super_block *); | 1949 | extern int fsync_super(struct super_block *); |
1884 | extern int fsync_no_super(struct block_device *); | 1950 | extern int fsync_no_super(struct block_device *); |
1885 | #else | 1951 | #else |
1886 | static inline void bd_forget(struct inode *inode) {} | 1952 | static inline void bd_forget(struct inode *inode) {} |
1953 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } | ||
1954 | static inline void invalidate_bdev(struct block_device *bdev) {} | ||
1955 | |||
1956 | static inline struct super_block *freeze_bdev(struct block_device *sb) | ||
1957 | { | ||
1958 | return NULL; | ||
1959 | } | ||
1960 | |||
1961 | static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) | ||
1962 | { | ||
1963 | return 0; | ||
1964 | } | ||
1887 | #endif | 1965 | #endif |
1888 | extern const struct file_operations def_blk_fops; | 1966 | extern const struct file_operations def_blk_fops; |
1889 | extern const struct file_operations def_chr_fops; | 1967 | extern const struct file_operations def_chr_fops; |
@@ -2128,8 +2206,6 @@ extern ssize_t generic_file_splice_read(struct file *, loff_t *, | |||
2128 | struct pipe_inode_info *, size_t, unsigned int); | 2206 | struct pipe_inode_info *, size_t, unsigned int); |
2129 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, | 2207 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, |
2130 | struct file *, loff_t *, size_t, unsigned int); | 2208 | struct file *, loff_t *, size_t, unsigned int); |
2131 | extern ssize_t generic_file_splice_write_nolock(struct pipe_inode_info *, | ||
2132 | struct file *, loff_t *, size_t, unsigned int); | ||
2133 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, | 2209 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, |
2134 | struct file *out, loff_t *, size_t len, unsigned int flags); | 2210 | struct file *out, loff_t *, size_t len, unsigned int flags); |
2135 | extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, | 2211 | extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, |
@@ -2223,9 +2299,8 @@ extern int vfs_readdir(struct file *, filldir_t, void *); | |||
2223 | 2299 | ||
2224 | extern int vfs_stat(char __user *, struct kstat *); | 2300 | extern int vfs_stat(char __user *, struct kstat *); |
2225 | extern int vfs_lstat(char __user *, struct kstat *); | 2301 | extern int vfs_lstat(char __user *, struct kstat *); |
2226 | extern int vfs_stat_fd(int dfd, char __user *, struct kstat *); | ||
2227 | extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *); | ||
2228 | extern int vfs_fstat(unsigned int, struct kstat *); | 2302 | extern int vfs_fstat(unsigned int, struct kstat *); |
2303 | extern int vfs_fstatat(int , char __user *, struct kstat *, int); | ||
2229 | 2304 | ||
2230 | extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, | 2305 | extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, |
2231 | unsigned long arg); | 2306 | unsigned long arg); |
@@ -2322,19 +2397,7 @@ ssize_t simple_transaction_read(struct file *file, char __user *buf, | |||
2322 | size_t size, loff_t *pos); | 2397 | size_t size, loff_t *pos); |
2323 | int simple_transaction_release(struct inode *inode, struct file *file); | 2398 | int simple_transaction_release(struct inode *inode, struct file *file); |
2324 | 2399 | ||
2325 | static inline void simple_transaction_set(struct file *file, size_t n) | 2400 | void simple_transaction_set(struct file *file, size_t n); |
2326 | { | ||
2327 | struct simple_transaction_argresp *ar = file->private_data; | ||
2328 | |||
2329 | BUG_ON(n > SIMPLE_TRANSACTION_LIMIT); | ||
2330 | |||
2331 | /* | ||
2332 | * The barrier ensures that ar->size will really remain zero until | ||
2333 | * ar->data is ready for reading. | ||
2334 | */ | ||
2335 | smp_mb(); | ||
2336 | ar->size = n; | ||
2337 | } | ||
2338 | 2401 | ||
2339 | /* | 2402 | /* |
2340 | * simple attribute files | 2403 | * simple attribute files |
@@ -2381,32 +2444,11 @@ ssize_t simple_attr_read(struct file *file, char __user *buf, | |||
2381 | ssize_t simple_attr_write(struct file *file, const char __user *buf, | 2444 | ssize_t simple_attr_write(struct file *file, const char __user *buf, |
2382 | size_t len, loff_t *ppos); | 2445 | size_t len, loff_t *ppos); |
2383 | 2446 | ||
2384 | |||
2385 | #ifdef CONFIG_SECURITY | ||
2386 | static inline char *alloc_secdata(void) | ||
2387 | { | ||
2388 | return (char *)get_zeroed_page(GFP_KERNEL); | ||
2389 | } | ||
2390 | |||
2391 | static inline void free_secdata(void *secdata) | ||
2392 | { | ||
2393 | free_page((unsigned long)secdata); | ||
2394 | } | ||
2395 | #else | ||
2396 | static inline char *alloc_secdata(void) | ||
2397 | { | ||
2398 | return (char *)1; | ||
2399 | } | ||
2400 | |||
2401 | static inline void free_secdata(void *secdata) | ||
2402 | { } | ||
2403 | #endif /* CONFIG_SECURITY */ | ||
2404 | |||
2405 | struct ctl_table; | 2447 | struct ctl_table; |
2406 | int proc_nr_files(struct ctl_table *table, int write, struct file *filp, | 2448 | int proc_nr_files(struct ctl_table *table, int write, struct file *filp, |
2407 | void __user *buffer, size_t *lenp, loff_t *ppos); | 2449 | void __user *buffer, size_t *lenp, loff_t *ppos); |
2408 | 2450 | ||
2409 | int get_filesystem_list(char * buf); | 2451 | int __init get_filesystem_list(char *buf); |
2410 | 2452 | ||
2411 | #endif /* __KERNEL__ */ | 2453 | #endif /* __KERNEL__ */ |
2412 | #endif /* _LINUX_FS_H */ | 2454 | #endif /* _LINUX_FS_H */ |
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 18b467dbe278..78a05bfcd8eb 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h | |||
@@ -4,12 +4,10 @@ | |||
4 | #include <linux/path.h> | 4 | #include <linux/path.h> |
5 | 5 | ||
6 | struct fs_struct { | 6 | struct fs_struct { |
7 | atomic_t count; /* This usage count is used by check_unsafe_exec() for | 7 | int users; |
8 | * security checking purposes - therefore it may not be | ||
9 | * incremented, except by clone(CLONE_FS). | ||
10 | */ | ||
11 | rwlock_t lock; | 8 | rwlock_t lock; |
12 | int umask; | 9 | int umask; |
10 | int in_exec; | ||
13 | struct path root, pwd; | 11 | struct path root, pwd; |
14 | }; | 12 | }; |
15 | 13 | ||
@@ -19,6 +17,8 @@ extern void exit_fs(struct task_struct *); | |||
19 | extern void set_fs_root(struct fs_struct *, struct path *); | 17 | extern void set_fs_root(struct fs_struct *, struct path *); |
20 | extern void set_fs_pwd(struct fs_struct *, struct path *); | 18 | extern void set_fs_pwd(struct fs_struct *, struct path *); |
21 | extern struct fs_struct *copy_fs_struct(struct fs_struct *); | 19 | extern struct fs_struct *copy_fs_struct(struct fs_struct *); |
22 | extern void put_fs_struct(struct fs_struct *); | 20 | extern void free_fs_struct(struct fs_struct *); |
21 | extern void daemonize_fs_struct(void); | ||
22 | extern int unshare_fs_struct(void); | ||
23 | 23 | ||
24 | #endif /* _LINUX_FS_STRUCT_H */ | 24 | #endif /* _LINUX_FS_STRUCT_H */ |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h new file mode 100644 index 000000000000..84d3532dd3ea --- /dev/null +++ b/include/linux/fscache-cache.h | |||
@@ -0,0 +1,505 @@ | |||
1 | /* General filesystem caching backing cache interface | ||
2 | * | ||
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * NOTE!!! See: | ||
12 | * | ||
13 | * Documentation/filesystems/caching/backend-api.txt | ||
14 | * | ||
15 | * for a description of the cache backend interface declared here. | ||
16 | */ | ||
17 | |||
18 | #ifndef _LINUX_FSCACHE_CACHE_H | ||
19 | #define _LINUX_FSCACHE_CACHE_H | ||
20 | |||
21 | #include <linux/fscache.h> | ||
22 | #include <linux/sched.h> | ||
23 | #include <linux/slow-work.h> | ||
24 | |||
25 | #define NR_MAXCACHES BITS_PER_LONG | ||
26 | |||
27 | struct fscache_cache; | ||
28 | struct fscache_cache_ops; | ||
29 | struct fscache_object; | ||
30 | struct fscache_operation; | ||
31 | |||
32 | /* | ||
33 | * cache tag definition | ||
34 | */ | ||
35 | struct fscache_cache_tag { | ||
36 | struct list_head link; | ||
37 | struct fscache_cache *cache; /* cache referred to by this tag */ | ||
38 | unsigned long flags; | ||
39 | #define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */ | ||
40 | atomic_t usage; | ||
41 | char name[0]; /* tag name */ | ||
42 | }; | ||
43 | |||
44 | /* | ||
45 | * cache definition | ||
46 | */ | ||
47 | struct fscache_cache { | ||
48 | const struct fscache_cache_ops *ops; | ||
49 | struct fscache_cache_tag *tag; /* tag representing this cache */ | ||
50 | struct kobject *kobj; /* system representation of this cache */ | ||
51 | struct list_head link; /* link in list of caches */ | ||
52 | size_t max_index_size; /* maximum size of index data */ | ||
53 | char identifier[36]; /* cache label */ | ||
54 | |||
55 | /* node management */ | ||
56 | struct work_struct op_gc; /* operation garbage collector */ | ||
57 | struct list_head object_list; /* list of data/index objects */ | ||
58 | struct list_head op_gc_list; /* list of ops to be deleted */ | ||
59 | spinlock_t object_list_lock; | ||
60 | spinlock_t op_gc_list_lock; | ||
61 | atomic_t object_count; /* no. of live objects in this cache */ | ||
62 | struct fscache_object *fsdef; /* object for the fsdef index */ | ||
63 | unsigned long flags; | ||
64 | #define FSCACHE_IOERROR 0 /* cache stopped on I/O error */ | ||
65 | #define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */ | ||
66 | }; | ||
67 | |||
68 | extern wait_queue_head_t fscache_cache_cleared_wq; | ||
69 | |||
70 | /* | ||
71 | * operation to be applied to a cache object | ||
72 | * - retrieval initiation operations are done in the context of the process | ||
73 | * that issued them, and not in an async thread pool | ||
74 | */ | ||
75 | typedef void (*fscache_operation_release_t)(struct fscache_operation *op); | ||
76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); | ||
77 | |||
78 | struct fscache_operation { | ||
79 | union { | ||
80 | struct work_struct fast_work; /* record for fast ops */ | ||
81 | struct slow_work slow_work; /* record for (very) slow ops */ | ||
82 | }; | ||
83 | struct list_head pend_link; /* link in object->pending_ops */ | ||
84 | struct fscache_object *object; /* object to be operated upon */ | ||
85 | |||
86 | unsigned long flags; | ||
87 | #define FSCACHE_OP_TYPE 0x000f /* operation type */ | ||
88 | #define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */ | ||
89 | #define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */ | ||
90 | #define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */ | ||
91 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ | ||
92 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ | ||
93 | #define FSCACHE_OP_DEAD 6 /* op is now dead */ | ||
94 | |||
95 | atomic_t usage; | ||
96 | unsigned debug_id; /* debugging ID */ | ||
97 | |||
98 | /* operation processor callback | ||
99 | * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform | ||
100 | * the op in a non-pool thread */ | ||
101 | fscache_operation_processor_t processor; | ||
102 | |||
103 | /* operation releaser */ | ||
104 | fscache_operation_release_t release; | ||
105 | }; | ||
106 | |||
107 | extern atomic_t fscache_op_debug_id; | ||
108 | extern const struct slow_work_ops fscache_op_slow_work_ops; | ||
109 | |||
110 | extern void fscache_enqueue_operation(struct fscache_operation *); | ||
111 | extern void fscache_put_operation(struct fscache_operation *); | ||
112 | |||
113 | /** | ||
114 | * fscache_operation_init - Do basic initialisation of an operation | ||
115 | * @op: The operation to initialise | ||
116 | * @release: The release function to assign | ||
117 | * | ||
118 | * Do basic initialisation of an operation. The caller must still set flags, | ||
119 | * object, either fast_work or slow_work if necessary, and processor if needed. | ||
120 | */ | ||
121 | static inline void fscache_operation_init(struct fscache_operation *op, | ||
122 | fscache_operation_release_t release) | ||
123 | { | ||
124 | atomic_set(&op->usage, 1); | ||
125 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | ||
126 | op->release = release; | ||
127 | INIT_LIST_HEAD(&op->pend_link); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * fscache_operation_init_slow - Do additional initialisation of a slow op | ||
132 | * @op: The operation to initialise | ||
133 | * @processor: The processor function to assign | ||
134 | * | ||
135 | * Do additional initialisation of an operation as required for slow work. | ||
136 | */ | ||
137 | static inline | ||
138 | void fscache_operation_init_slow(struct fscache_operation *op, | ||
139 | fscache_operation_processor_t processor) | ||
140 | { | ||
141 | op->processor = processor; | ||
142 | slow_work_init(&op->slow_work, &fscache_op_slow_work_ops); | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * data read operation | ||
147 | */ | ||
148 | struct fscache_retrieval { | ||
149 | struct fscache_operation op; | ||
150 | struct address_space *mapping; /* netfs pages */ | ||
151 | fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ | ||
152 | void *context; /* netfs read context (pinned) */ | ||
153 | struct list_head to_do; /* list of things to be done by the backend */ | ||
154 | unsigned long start_time; /* time at which retrieval started */ | ||
155 | }; | ||
156 | |||
157 | typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op, | ||
158 | struct page *page, | ||
159 | gfp_t gfp); | ||
160 | |||
161 | typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op, | ||
162 | struct list_head *pages, | ||
163 | unsigned *nr_pages, | ||
164 | gfp_t gfp); | ||
165 | |||
166 | /** | ||
167 | * fscache_get_retrieval - Get an extra reference on a retrieval operation | ||
168 | * @op: The retrieval operation to get a reference on | ||
169 | * | ||
170 | * Get an extra reference on a retrieval operation. | ||
171 | */ | ||
172 | static inline | ||
173 | struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op) | ||
174 | { | ||
175 | atomic_inc(&op->op.usage); | ||
176 | return op; | ||
177 | } | ||
178 | |||
179 | /** | ||
180 | * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing | ||
181 | * @op: The retrieval operation affected | ||
182 | * | ||
183 | * Enqueue a retrieval operation for processing by the FS-Cache thread pool. | ||
184 | */ | ||
185 | static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) | ||
186 | { | ||
187 | fscache_enqueue_operation(&op->op); | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * fscache_put_retrieval - Drop a reference to a retrieval operation | ||
192 | * @op: The retrieval operation affected | ||
193 | * | ||
194 | * Drop a reference to a retrieval operation. | ||
195 | */ | ||
196 | static inline void fscache_put_retrieval(struct fscache_retrieval *op) | ||
197 | { | ||
198 | fscache_put_operation(&op->op); | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * cached page storage work item | ||
203 | * - used to do three things: | ||
204 | * - batch writes to the cache | ||
205 | * - do cache writes asynchronously | ||
206 | * - defer writes until cache object lookup completion | ||
207 | */ | ||
208 | struct fscache_storage { | ||
209 | struct fscache_operation op; | ||
210 | pgoff_t store_limit; /* don't write more than this */ | ||
211 | }; | ||
212 | |||
213 | /* | ||
214 | * cache operations | ||
215 | */ | ||
216 | struct fscache_cache_ops { | ||
217 | /* name of cache provider */ | ||
218 | const char *name; | ||
219 | |||
220 | /* allocate an object record for a cookie */ | ||
221 | struct fscache_object *(*alloc_object)(struct fscache_cache *cache, | ||
222 | struct fscache_cookie *cookie); | ||
223 | |||
224 | /* look up the object for a cookie */ | ||
225 | void (*lookup_object)(struct fscache_object *object); | ||
226 | |||
227 | /* finished looking up */ | ||
228 | void (*lookup_complete)(struct fscache_object *object); | ||
229 | |||
230 | /* increment the usage count on this object (may fail if unmounting) */ | ||
231 | struct fscache_object *(*grab_object)(struct fscache_object *object); | ||
232 | |||
233 | /* pin an object in the cache */ | ||
234 | int (*pin_object)(struct fscache_object *object); | ||
235 | |||
236 | /* unpin an object in the cache */ | ||
237 | void (*unpin_object)(struct fscache_object *object); | ||
238 | |||
239 | /* store the updated auxilliary data on an object */ | ||
240 | void (*update_object)(struct fscache_object *object); | ||
241 | |||
242 | /* discard the resources pinned by an object and effect retirement if | ||
243 | * necessary */ | ||
244 | void (*drop_object)(struct fscache_object *object); | ||
245 | |||
246 | /* dispose of a reference to an object */ | ||
247 | void (*put_object)(struct fscache_object *object); | ||
248 | |||
249 | /* sync a cache */ | ||
250 | void (*sync_cache)(struct fscache_cache *cache); | ||
251 | |||
252 | /* notification that the attributes of a non-index object (such as | ||
253 | * i_size) have changed */ | ||
254 | int (*attr_changed)(struct fscache_object *object); | ||
255 | |||
256 | /* reserve space for an object's data and associated metadata */ | ||
257 | int (*reserve_space)(struct fscache_object *object, loff_t i_size); | ||
258 | |||
259 | /* request a backing block for a page be read or allocated in the | ||
260 | * cache */ | ||
261 | fscache_page_retrieval_func_t read_or_alloc_page; | ||
262 | |||
263 | /* request backing blocks for a list of pages be read or allocated in | ||
264 | * the cache */ | ||
265 | fscache_pages_retrieval_func_t read_or_alloc_pages; | ||
266 | |||
267 | /* request a backing block for a page be allocated in the cache so that | ||
268 | * it can be written directly */ | ||
269 | fscache_page_retrieval_func_t allocate_page; | ||
270 | |||
271 | /* request backing blocks for pages be allocated in the cache so that | ||
272 | * they can be written directly */ | ||
273 | fscache_pages_retrieval_func_t allocate_pages; | ||
274 | |||
275 | /* write a page to its backing block in the cache */ | ||
276 | int (*write_page)(struct fscache_storage *op, struct page *page); | ||
277 | |||
278 | /* detach backing block from a page (optional) | ||
279 | * - must release the cookie lock before returning | ||
280 | * - may sleep | ||
281 | */ | ||
282 | void (*uncache_page)(struct fscache_object *object, | ||
283 | struct page *page); | ||
284 | |||
285 | /* dissociate a cache from all the pages it was backing */ | ||
286 | void (*dissociate_pages)(struct fscache_cache *cache); | ||
287 | }; | ||
288 | |||
289 | /* | ||
290 | * data file or index object cookie | ||
291 | * - a file will only appear in one cache | ||
292 | * - a request to cache a file may or may not be honoured, subject to | ||
293 | * constraints such as disk space | ||
294 | * - indices are created on disk just-in-time | ||
295 | */ | ||
296 | struct fscache_cookie { | ||
297 | atomic_t usage; /* number of users of this cookie */ | ||
298 | atomic_t n_children; /* number of children of this cookie */ | ||
299 | spinlock_t lock; | ||
300 | struct hlist_head backing_objects; /* object(s) backing this file/index */ | ||
301 | const struct fscache_cookie_def *def; /* definition */ | ||
302 | struct fscache_cookie *parent; /* parent of this entry */ | ||
303 | void *netfs_data; /* back pointer to netfs */ | ||
304 | struct radix_tree_root stores; /* pages to be stored on this cookie */ | ||
305 | #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ | ||
306 | |||
307 | unsigned long flags; | ||
308 | #define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */ | ||
309 | #define FSCACHE_COOKIE_CREATING 1 /* T if non-index object being created still */ | ||
310 | #define FSCACHE_COOKIE_NO_DATA_YET 2 /* T if new object with no cached data yet */ | ||
311 | #define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */ | ||
312 | #define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */ | ||
313 | #define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */ | ||
314 | }; | ||
315 | |||
316 | extern struct fscache_cookie fscache_fsdef_index; | ||
317 | |||
318 | /* | ||
319 | * on-disk cache file or index handle | ||
320 | */ | ||
321 | struct fscache_object { | ||
322 | enum fscache_object_state { | ||
323 | FSCACHE_OBJECT_INIT, /* object in initial unbound state */ | ||
324 | FSCACHE_OBJECT_LOOKING_UP, /* looking up object */ | ||
325 | FSCACHE_OBJECT_CREATING, /* creating object */ | ||
326 | |||
327 | /* active states */ | ||
328 | FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */ | ||
329 | FSCACHE_OBJECT_ACTIVE, /* object is usable */ | ||
330 | FSCACHE_OBJECT_UPDATING, /* object is updating */ | ||
331 | |||
332 | /* terminal states */ | ||
333 | FSCACHE_OBJECT_DYING, /* object waiting for accessors to finish */ | ||
334 | FSCACHE_OBJECT_LC_DYING, /* object cleaning up after lookup/create */ | ||
335 | FSCACHE_OBJECT_ABORT_INIT, /* abort the init state */ | ||
336 | FSCACHE_OBJECT_RELEASING, /* releasing object */ | ||
337 | FSCACHE_OBJECT_RECYCLING, /* retiring object */ | ||
338 | FSCACHE_OBJECT_WITHDRAWING, /* withdrawing object */ | ||
339 | FSCACHE_OBJECT_DEAD, /* object is now dead */ | ||
340 | } state; | ||
341 | |||
342 | int debug_id; /* debugging ID */ | ||
343 | int n_children; /* number of child objects */ | ||
344 | int n_ops; /* number of ops outstanding on object */ | ||
345 | int n_obj_ops; /* number of object ops outstanding on object */ | ||
346 | int n_in_progress; /* number of ops in progress */ | ||
347 | int n_exclusive; /* number of exclusive ops queued */ | ||
348 | spinlock_t lock; /* state and operations lock */ | ||
349 | |||
350 | unsigned long lookup_jif; /* time at which lookup started */ | ||
351 | unsigned long event_mask; /* events this object is interested in */ | ||
352 | unsigned long events; /* events to be processed by this object | ||
353 | * (order is important - using fls) */ | ||
354 | #define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */ | ||
355 | #define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */ | ||
356 | #define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */ | ||
357 | #define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */ | ||
358 | #define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ | ||
359 | #define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ | ||
360 | #define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ | ||
361 | |||
362 | unsigned long flags; | ||
363 | #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ | ||
364 | #define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */ | ||
365 | #define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */ | ||
366 | |||
367 | struct list_head cache_link; /* link in cache->object_list */ | ||
368 | struct hlist_node cookie_link; /* link in cookie->backing_objects */ | ||
369 | struct fscache_cache *cache; /* cache that supplied this object */ | ||
370 | struct fscache_cookie *cookie; /* netfs's file/index object */ | ||
371 | struct fscache_object *parent; /* parent object */ | ||
372 | struct slow_work work; /* attention scheduling record */ | ||
373 | struct list_head dependents; /* FIFO of dependent objects */ | ||
374 | struct list_head dep_link; /* link in parent's dependents list */ | ||
375 | struct list_head pending_ops; /* unstarted operations on this object */ | ||
376 | pgoff_t store_limit; /* current storage limit */ | ||
377 | }; | ||
378 | |||
379 | extern const char *fscache_object_states[]; | ||
380 | |||
381 | #define fscache_object_is_active(obj) \ | ||
382 | (!test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ | ||
383 | (obj)->state >= FSCACHE_OBJECT_AVAILABLE && \ | ||
384 | (obj)->state < FSCACHE_OBJECT_DYING) | ||
385 | |||
386 | extern const struct slow_work_ops fscache_object_slow_work_ops; | ||
387 | |||
388 | /** | ||
389 | * fscache_object_init - Initialise a cache object description | ||
390 | * @object: Object description | ||
391 | * | ||
392 | * Initialise a cache object description to its basic values. | ||
393 | * | ||
394 | * See Documentation/filesystems/caching/backend-api.txt for a complete | ||
395 | * description. | ||
396 | */ | ||
397 | static inline | ||
398 | void fscache_object_init(struct fscache_object *object, | ||
399 | struct fscache_cookie *cookie, | ||
400 | struct fscache_cache *cache) | ||
401 | { | ||
402 | atomic_inc(&cache->object_count); | ||
403 | |||
404 | object->state = FSCACHE_OBJECT_INIT; | ||
405 | spin_lock_init(&object->lock); | ||
406 | INIT_LIST_HEAD(&object->cache_link); | ||
407 | INIT_HLIST_NODE(&object->cookie_link); | ||
408 | vslow_work_init(&object->work, &fscache_object_slow_work_ops); | ||
409 | INIT_LIST_HEAD(&object->dependents); | ||
410 | INIT_LIST_HEAD(&object->dep_link); | ||
411 | INIT_LIST_HEAD(&object->pending_ops); | ||
412 | object->n_children = 0; | ||
413 | object->n_ops = object->n_in_progress = object->n_exclusive = 0; | ||
414 | object->events = object->event_mask = 0; | ||
415 | object->flags = 0; | ||
416 | object->store_limit = 0; | ||
417 | object->cache = cache; | ||
418 | object->cookie = cookie; | ||
419 | object->parent = NULL; | ||
420 | } | ||
421 | |||
422 | extern void fscache_object_lookup_negative(struct fscache_object *object); | ||
423 | extern void fscache_obtained_object(struct fscache_object *object); | ||
424 | |||
425 | /** | ||
426 | * fscache_object_destroyed - Note destruction of an object in a cache | ||
427 | * @cache: The cache from which the object came | ||
428 | * | ||
429 | * Note the destruction and deallocation of an object record in a cache. | ||
430 | */ | ||
431 | static inline void fscache_object_destroyed(struct fscache_cache *cache) | ||
432 | { | ||
433 | if (atomic_dec_and_test(&cache->object_count)) | ||
434 | wake_up_all(&fscache_cache_cleared_wq); | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * fscache_object_lookup_error - Note an object encountered an error | ||
439 | * @object: The object on which the error was encountered | ||
440 | * | ||
441 | * Note that an object encountered a fatal error (usually an I/O error) and | ||
442 | * that it should be withdrawn as soon as possible. | ||
443 | */ | ||
444 | static inline void fscache_object_lookup_error(struct fscache_object *object) | ||
445 | { | ||
446 | set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events); | ||
447 | } | ||
448 | |||
449 | /** | ||
450 | * fscache_set_store_limit - Set the maximum size to be stored in an object | ||
451 | * @object: The object to set the maximum on | ||
452 | * @i_size: The limit to set in bytes | ||
453 | * | ||
454 | * Set the maximum size an object is permitted to reach, implying the highest | ||
455 | * byte that may be written. Intended to be called by the attr_changed() op. | ||
456 | * | ||
457 | * See Documentation/filesystems/caching/backend-api.txt for a complete | ||
458 | * description. | ||
459 | */ | ||
460 | static inline | ||
461 | void fscache_set_store_limit(struct fscache_object *object, loff_t i_size) | ||
462 | { | ||
463 | object->store_limit = i_size >> PAGE_SHIFT; | ||
464 | if (i_size & ~PAGE_MASK) | ||
465 | object->store_limit++; | ||
466 | } | ||
467 | |||
468 | /** | ||
469 | * fscache_end_io - End a retrieval operation on a page | ||
470 | * @op: The FS-Cache operation covering the retrieval | ||
471 | * @page: The page that was to be fetched | ||
472 | * @error: The error code (0 if successful) | ||
473 | * | ||
474 | * Note the end of an operation to retrieve a page, as covered by a particular | ||
475 | * operation record. | ||
476 | */ | ||
477 | static inline void fscache_end_io(struct fscache_retrieval *op, | ||
478 | struct page *page, int error) | ||
479 | { | ||
480 | op->end_io_func(page, op->context, error); | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * out-of-line cache backend functions | ||
485 | */ | ||
486 | extern void fscache_init_cache(struct fscache_cache *cache, | ||
487 | const struct fscache_cache_ops *ops, | ||
488 | const char *idfmt, | ||
489 | ...) __attribute__ ((format (printf, 3, 4))); | ||
490 | |||
491 | extern int fscache_add_cache(struct fscache_cache *cache, | ||
492 | struct fscache_object *fsdef, | ||
493 | const char *tagname); | ||
494 | extern void fscache_withdraw_cache(struct fscache_cache *cache); | ||
495 | |||
496 | extern void fscache_io_error(struct fscache_cache *cache); | ||
497 | |||
498 | extern void fscache_mark_pages_cached(struct fscache_retrieval *op, | ||
499 | struct pagevec *pagevec); | ||
500 | |||
501 | extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, | ||
502 | const void *data, | ||
503 | uint16_t datalen); | ||
504 | |||
505 | #endif /* _LINUX_FSCACHE_CACHE_H */ | ||
diff --git a/include/linux/fscache.h b/include/linux/fscache.h new file mode 100644 index 000000000000..6d8ee466e0a0 --- /dev/null +++ b/include/linux/fscache.h | |||
@@ -0,0 +1,618 @@ | |||
1 | /* General filesystem caching interface | ||
2 | * | ||
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * NOTE!!! See: | ||
12 | * | ||
13 | * Documentation/filesystems/caching/netfs-api.txt | ||
14 | * | ||
15 | * for a description of the network filesystem interface declared here. | ||
16 | */ | ||
17 | |||
18 | #ifndef _LINUX_FSCACHE_H | ||
19 | #define _LINUX_FSCACHE_H | ||
20 | |||
21 | #include <linux/fs.h> | ||
22 | #include <linux/list.h> | ||
23 | #include <linux/pagemap.h> | ||
24 | #include <linux/pagevec.h> | ||
25 | |||
26 | #if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE) | ||
27 | #define fscache_available() (1) | ||
28 | #define fscache_cookie_valid(cookie) (cookie) | ||
29 | #else | ||
30 | #define fscache_available() (0) | ||
31 | #define fscache_cookie_valid(cookie) (0) | ||
32 | #endif | ||
33 | |||
34 | |||
35 | /* | ||
36 | * overload PG_private_2 to give us PG_fscache - this is used to indicate that | ||
37 | * a page is currently backed by a local disk cache | ||
38 | */ | ||
39 | #define PageFsCache(page) PagePrivate2((page)) | ||
40 | #define SetPageFsCache(page) SetPagePrivate2((page)) | ||
41 | #define ClearPageFsCache(page) ClearPagePrivate2((page)) | ||
42 | #define TestSetPageFsCache(page) TestSetPagePrivate2((page)) | ||
43 | #define TestClearPageFsCache(page) TestClearPagePrivate2((page)) | ||
44 | |||
45 | /* pattern used to fill dead space in an index entry */ | ||
46 | #define FSCACHE_INDEX_DEADFILL_PATTERN 0x79 | ||
47 | |||
48 | struct pagevec; | ||
49 | struct fscache_cache_tag; | ||
50 | struct fscache_cookie; | ||
51 | struct fscache_netfs; | ||
52 | |||
53 | typedef void (*fscache_rw_complete_t)(struct page *page, | ||
54 | void *context, | ||
55 | int error); | ||
56 | |||
57 | /* result of index entry consultation */ | ||
58 | enum fscache_checkaux { | ||
59 | FSCACHE_CHECKAUX_OKAY, /* entry okay as is */ | ||
60 | FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */ | ||
61 | FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */ | ||
62 | }; | ||
63 | |||
64 | /* | ||
65 | * fscache cookie definition | ||
66 | */ | ||
67 | struct fscache_cookie_def { | ||
68 | /* name of cookie type */ | ||
69 | char name[16]; | ||
70 | |||
71 | /* cookie type */ | ||
72 | uint8_t type; | ||
73 | #define FSCACHE_COOKIE_TYPE_INDEX 0 | ||
74 | #define FSCACHE_COOKIE_TYPE_DATAFILE 1 | ||
75 | |||
76 | /* select the cache into which to insert an entry in this index | ||
77 | * - optional | ||
78 | * - should return a cache identifier or NULL to cause the cache to be | ||
79 | * inherited from the parent if possible or the first cache picked | ||
80 | * for a non-index file if not | ||
81 | */ | ||
82 | struct fscache_cache_tag *(*select_cache)( | ||
83 | const void *parent_netfs_data, | ||
84 | const void *cookie_netfs_data); | ||
85 | |||
86 | /* get an index key | ||
87 | * - should store the key data in the buffer | ||
88 | * - should return the amount of amount stored | ||
89 | * - not permitted to return an error | ||
90 | * - the netfs data from the cookie being used as the source is | ||
91 | * presented | ||
92 | */ | ||
93 | uint16_t (*get_key)(const void *cookie_netfs_data, | ||
94 | void *buffer, | ||
95 | uint16_t bufmax); | ||
96 | |||
97 | /* get certain file attributes from the netfs data | ||
98 | * - this function can be absent for an index | ||
99 | * - not permitted to return an error | ||
100 | * - the netfs data from the cookie being used as the source is | ||
101 | * presented | ||
102 | */ | ||
103 | void (*get_attr)(const void *cookie_netfs_data, uint64_t *size); | ||
104 | |||
105 | /* get the auxilliary data from netfs data | ||
106 | * - this function can be absent if the index carries no state data | ||
107 | * - should store the auxilliary data in the buffer | ||
108 | * - should return the amount of amount stored | ||
109 | * - not permitted to return an error | ||
110 | * - the netfs data from the cookie being used as the source is | ||
111 | * presented | ||
112 | */ | ||
113 | uint16_t (*get_aux)(const void *cookie_netfs_data, | ||
114 | void *buffer, | ||
115 | uint16_t bufmax); | ||
116 | |||
117 | /* consult the netfs about the state of an object | ||
118 | * - this function can be absent if the index carries no state data | ||
119 | * - the netfs data from the cookie being used as the target is | ||
120 | * presented, as is the auxilliary data | ||
121 | */ | ||
122 | enum fscache_checkaux (*check_aux)(void *cookie_netfs_data, | ||
123 | const void *data, | ||
124 | uint16_t datalen); | ||
125 | |||
126 | /* get an extra reference on a read context | ||
127 | * - this function can be absent if the completion function doesn't | ||
128 | * require a context | ||
129 | */ | ||
130 | void (*get_context)(void *cookie_netfs_data, void *context); | ||
131 | |||
132 | /* release an extra reference on a read context | ||
133 | * - this function can be absent if the completion function doesn't | ||
134 | * require a context | ||
135 | */ | ||
136 | void (*put_context)(void *cookie_netfs_data, void *context); | ||
137 | |||
138 | /* indicate pages that now have cache metadata retained | ||
139 | * - this function should mark the specified pages as now being cached | ||
140 | * - the pages will have been marked with PG_fscache before this is | ||
141 | * called, so this is optional | ||
142 | */ | ||
143 | void (*mark_pages_cached)(void *cookie_netfs_data, | ||
144 | struct address_space *mapping, | ||
145 | struct pagevec *cached_pvec); | ||
146 | |||
147 | /* indicate the cookie is no longer cached | ||
148 | * - this function is called when the backing store currently caching | ||
149 | * a cookie is removed | ||
150 | * - the netfs should use this to clean up any markers indicating | ||
151 | * cached pages | ||
152 | * - this is mandatory for any object that may have data | ||
153 | */ | ||
154 | void (*now_uncached)(void *cookie_netfs_data); | ||
155 | }; | ||
156 | |||
157 | /* | ||
158 | * fscache cached network filesystem type | ||
159 | * - name, version and ops must be filled in before registration | ||
160 | * - all other fields will be set during registration | ||
161 | */ | ||
162 | struct fscache_netfs { | ||
163 | uint32_t version; /* indexing version */ | ||
164 | const char *name; /* filesystem name */ | ||
165 | struct fscache_cookie *primary_index; | ||
166 | struct list_head link; /* internal link */ | ||
167 | }; | ||
168 | |||
169 | /* | ||
170 | * slow-path functions for when there is actually caching available, and the | ||
171 | * netfs does actually have a valid token | ||
172 | * - these are not to be called directly | ||
173 | * - these are undefined symbols when FS-Cache is not configured and the | ||
174 | * optimiser takes care of not using them | ||
175 | */ | ||
176 | extern int __fscache_register_netfs(struct fscache_netfs *); | ||
177 | extern void __fscache_unregister_netfs(struct fscache_netfs *); | ||
178 | extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *); | ||
179 | extern void __fscache_release_cache_tag(struct fscache_cache_tag *); | ||
180 | |||
181 | extern struct fscache_cookie *__fscache_acquire_cookie( | ||
182 | struct fscache_cookie *, | ||
183 | const struct fscache_cookie_def *, | ||
184 | void *); | ||
185 | extern void __fscache_relinquish_cookie(struct fscache_cookie *, int); | ||
186 | extern void __fscache_update_cookie(struct fscache_cookie *); | ||
187 | extern int __fscache_attr_changed(struct fscache_cookie *); | ||
188 | extern int __fscache_read_or_alloc_page(struct fscache_cookie *, | ||
189 | struct page *, | ||
190 | fscache_rw_complete_t, | ||
191 | void *, | ||
192 | gfp_t); | ||
193 | extern int __fscache_read_or_alloc_pages(struct fscache_cookie *, | ||
194 | struct address_space *, | ||
195 | struct list_head *, | ||
196 | unsigned *, | ||
197 | fscache_rw_complete_t, | ||
198 | void *, | ||
199 | gfp_t); | ||
200 | extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t); | ||
201 | extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t); | ||
202 | extern void __fscache_uncache_page(struct fscache_cookie *, struct page *); | ||
203 | extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *); | ||
204 | extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *); | ||
205 | |||
206 | /** | ||
207 | * fscache_register_netfs - Register a filesystem as desiring caching services | ||
208 | * @netfs: The description of the filesystem | ||
209 | * | ||
210 | * Register a filesystem as desiring caching services if they're available. | ||
211 | * | ||
212 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
213 | * description. | ||
214 | */ | ||
215 | static inline | ||
216 | int fscache_register_netfs(struct fscache_netfs *netfs) | ||
217 | { | ||
218 | if (fscache_available()) | ||
219 | return __fscache_register_netfs(netfs); | ||
220 | else | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | /** | ||
225 | * fscache_unregister_netfs - Indicate that a filesystem no longer desires | ||
226 | * caching services | ||
227 | * @netfs: The description of the filesystem | ||
228 | * | ||
229 | * Indicate that a filesystem no longer desires caching services for the | ||
230 | * moment. | ||
231 | * | ||
232 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
233 | * description. | ||
234 | */ | ||
235 | static inline | ||
236 | void fscache_unregister_netfs(struct fscache_netfs *netfs) | ||
237 | { | ||
238 | if (fscache_available()) | ||
239 | __fscache_unregister_netfs(netfs); | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * fscache_lookup_cache_tag - Look up a cache tag | ||
244 | * @name: The name of the tag to search for | ||
245 | * | ||
246 | * Acquire a specific cache referral tag that can be used to select a specific | ||
247 | * cache in which to cache an index. | ||
248 | * | ||
249 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
250 | * description. | ||
251 | */ | ||
252 | static inline | ||
253 | struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name) | ||
254 | { | ||
255 | if (fscache_available()) | ||
256 | return __fscache_lookup_cache_tag(name); | ||
257 | else | ||
258 | return NULL; | ||
259 | } | ||
260 | |||
261 | /** | ||
262 | * fscache_release_cache_tag - Release a cache tag | ||
263 | * @tag: The tag to release | ||
264 | * | ||
265 | * Release a reference to a cache referral tag previously looked up. | ||
266 | * | ||
267 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
268 | * description. | ||
269 | */ | ||
270 | static inline | ||
271 | void fscache_release_cache_tag(struct fscache_cache_tag *tag) | ||
272 | { | ||
273 | if (fscache_available()) | ||
274 | __fscache_release_cache_tag(tag); | ||
275 | } | ||
276 | |||
277 | /** | ||
278 | * fscache_acquire_cookie - Acquire a cookie to represent a cache object | ||
279 | * @parent: The cookie that's to be the parent of this one | ||
280 | * @def: A description of the cache object, including callback operations | ||
281 | * @netfs_data: An arbitrary piece of data to be kept in the cookie to | ||
282 | * represent the cache object to the netfs | ||
283 | * | ||
284 | * This function is used to inform FS-Cache about part of an index hierarchy | ||
285 | * that can be used to locate files. This is done by requesting a cookie for | ||
286 | * each index in the path to the file. | ||
287 | * | ||
288 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
289 | * description. | ||
290 | */ | ||
291 | static inline | ||
292 | struct fscache_cookie *fscache_acquire_cookie( | ||
293 | struct fscache_cookie *parent, | ||
294 | const struct fscache_cookie_def *def, | ||
295 | void *netfs_data) | ||
296 | { | ||
297 | if (fscache_cookie_valid(parent)) | ||
298 | return __fscache_acquire_cookie(parent, def, netfs_data); | ||
299 | else | ||
300 | return NULL; | ||
301 | } | ||
302 | |||
303 | /** | ||
304 | * fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding | ||
305 | * it | ||
306 | * @cookie: The cookie being returned | ||
307 | * @retire: True if the cache object the cookie represents is to be discarded | ||
308 | * | ||
309 | * This function returns a cookie to the cache, forcibly discarding the | ||
310 | * associated cache object if retire is set to true. | ||
311 | * | ||
312 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
313 | * description. | ||
314 | */ | ||
315 | static inline | ||
316 | void fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire) | ||
317 | { | ||
318 | if (fscache_cookie_valid(cookie)) | ||
319 | __fscache_relinquish_cookie(cookie, retire); | ||
320 | } | ||
321 | |||
322 | /** | ||
323 | * fscache_update_cookie - Request that a cache object be updated | ||
324 | * @cookie: The cookie representing the cache object | ||
325 | * | ||
326 | * Request an update of the index data for the cache object associated with the | ||
327 | * cookie. | ||
328 | * | ||
329 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
330 | * description. | ||
331 | */ | ||
332 | static inline | ||
333 | void fscache_update_cookie(struct fscache_cookie *cookie) | ||
334 | { | ||
335 | if (fscache_cookie_valid(cookie)) | ||
336 | __fscache_update_cookie(cookie); | ||
337 | } | ||
338 | |||
339 | /** | ||
340 | * fscache_pin_cookie - Pin a data-storage cache object in its cache | ||
341 | * @cookie: The cookie representing the cache object | ||
342 | * | ||
343 | * Permit data-storage cache objects to be pinned in the cache. | ||
344 | * | ||
345 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
346 | * description. | ||
347 | */ | ||
348 | static inline | ||
349 | int fscache_pin_cookie(struct fscache_cookie *cookie) | ||
350 | { | ||
351 | return -ENOBUFS; | ||
352 | } | ||
353 | |||
354 | /** | ||
355 | * fscache_pin_cookie - Unpin a data-storage cache object in its cache | ||
356 | * @cookie: The cookie representing the cache object | ||
357 | * | ||
358 | * Permit data-storage cache objects to be unpinned from the cache. | ||
359 | * | ||
360 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
361 | * description. | ||
362 | */ | ||
363 | static inline | ||
364 | void fscache_unpin_cookie(struct fscache_cookie *cookie) | ||
365 | { | ||
366 | } | ||
367 | |||
368 | /** | ||
369 | * fscache_attr_changed - Notify cache that an object's attributes changed | ||
370 | * @cookie: The cookie representing the cache object | ||
371 | * | ||
372 | * Send a notification to the cache indicating that an object's attributes have | ||
373 | * changed. This includes the data size. These attributes will be obtained | ||
374 | * through the get_attr() cookie definition op. | ||
375 | * | ||
376 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
377 | * description. | ||
378 | */ | ||
379 | static inline | ||
380 | int fscache_attr_changed(struct fscache_cookie *cookie) | ||
381 | { | ||
382 | if (fscache_cookie_valid(cookie)) | ||
383 | return __fscache_attr_changed(cookie); | ||
384 | else | ||
385 | return -ENOBUFS; | ||
386 | } | ||
387 | |||
388 | /** | ||
389 | * fscache_reserve_space - Reserve data space for a cached object | ||
390 | * @cookie: The cookie representing the cache object | ||
391 | * @i_size: The amount of space to be reserved | ||
392 | * | ||
393 | * Reserve an amount of space in the cache for the cache object attached to a | ||
394 | * cookie so that a write to that object within the space can always be | ||
395 | * honoured. | ||
396 | * | ||
397 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
398 | * description. | ||
399 | */ | ||
400 | static inline | ||
401 | int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size) | ||
402 | { | ||
403 | return -ENOBUFS; | ||
404 | } | ||
405 | |||
406 | /** | ||
407 | * fscache_read_or_alloc_page - Read a page from the cache or allocate a block | ||
408 | * in which to store it | ||
409 | * @cookie: The cookie representing the cache object | ||
410 | * @page: The netfs page to fill if possible | ||
411 | * @end_io_func: The callback to invoke when and if the page is filled | ||
412 | * @context: An arbitrary piece of data to pass on to end_io_func() | ||
413 | * @gfp: The conditions under which memory allocation should be made | ||
414 | * | ||
415 | * Read a page from the cache, or if that's not possible make a potential | ||
416 | * one-block reservation in the cache into which the page may be stored once | ||
417 | * fetched from the server. | ||
418 | * | ||
419 | * If the page is not backed by the cache object, or if it there's some reason | ||
420 | * it can't be, -ENOBUFS will be returned and nothing more will be done for | ||
421 | * that page. | ||
422 | * | ||
423 | * Else, if that page is backed by the cache, a read will be initiated directly | ||
424 | * to the netfs's page and 0 will be returned by this function. The | ||
425 | * end_io_func() callback will be invoked when the operation terminates on a | ||
426 | * completion or failure. Note that the callback may be invoked before the | ||
427 | * return. | ||
428 | * | ||
429 | * Else, if the page is unbacked, -ENODATA is returned and a block may have | ||
430 | * been allocated in the cache. | ||
431 | * | ||
432 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
433 | * description. | ||
434 | */ | ||
435 | static inline | ||
436 | int fscache_read_or_alloc_page(struct fscache_cookie *cookie, | ||
437 | struct page *page, | ||
438 | fscache_rw_complete_t end_io_func, | ||
439 | void *context, | ||
440 | gfp_t gfp) | ||
441 | { | ||
442 | if (fscache_cookie_valid(cookie)) | ||
443 | return __fscache_read_or_alloc_page(cookie, page, end_io_func, | ||
444 | context, gfp); | ||
445 | else | ||
446 | return -ENOBUFS; | ||
447 | } | ||
448 | |||
449 | /** | ||
450 | * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate | ||
451 | * blocks in which to store them | ||
452 | * @cookie: The cookie representing the cache object | ||
453 | * @mapping: The netfs inode mapping to which the pages will be attached | ||
454 | * @pages: A list of potential netfs pages to be filled | ||
455 | * @end_io_func: The callback to invoke when and if each page is filled | ||
456 | * @context: An arbitrary piece of data to pass on to end_io_func() | ||
457 | * @gfp: The conditions under which memory allocation should be made | ||
458 | * | ||
459 | * Read a set of pages from the cache, or if that's not possible, attempt to | ||
460 | * make a potential one-block reservation for each page in the cache into which | ||
461 | * that page may be stored once fetched from the server. | ||
462 | * | ||
463 | * If some pages are not backed by the cache object, or if it there's some | ||
464 | * reason they can't be, -ENOBUFS will be returned and nothing more will be | ||
465 | * done for that pages. | ||
466 | * | ||
467 | * Else, if some of the pages are backed by the cache, a read will be initiated | ||
468 | * directly to the netfs's page and 0 will be returned by this function. The | ||
469 | * end_io_func() callback will be invoked when the operation terminates on a | ||
470 | * completion or failure. Note that the callback may be invoked before the | ||
471 | * return. | ||
472 | * | ||
473 | * Else, if a page is unbacked, -ENODATA is returned and a block may have | ||
474 | * been allocated in the cache. | ||
475 | * | ||
476 | * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in | ||
477 | * regard to different pages, the return values are prioritised in that order. | ||
478 | * Any pages submitted for reading are removed from the pages list. | ||
479 | * | ||
480 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
481 | * description. | ||
482 | */ | ||
483 | static inline | ||
484 | int fscache_read_or_alloc_pages(struct fscache_cookie *cookie, | ||
485 | struct address_space *mapping, | ||
486 | struct list_head *pages, | ||
487 | unsigned *nr_pages, | ||
488 | fscache_rw_complete_t end_io_func, | ||
489 | void *context, | ||
490 | gfp_t gfp) | ||
491 | { | ||
492 | if (fscache_cookie_valid(cookie)) | ||
493 | return __fscache_read_or_alloc_pages(cookie, mapping, pages, | ||
494 | nr_pages, end_io_func, | ||
495 | context, gfp); | ||
496 | else | ||
497 | return -ENOBUFS; | ||
498 | } | ||
499 | |||
500 | /** | ||
501 | * fscache_alloc_page - Allocate a block in which to store a page | ||
502 | * @cookie: The cookie representing the cache object | ||
503 | * @page: The netfs page to allocate a page for | ||
504 | * @gfp: The conditions under which memory allocation should be made | ||
505 | * | ||
506 | * Request Allocation a block in the cache in which to store a netfs page | ||
507 | * without retrieving any contents from the cache. | ||
508 | * | ||
509 | * If the page is not backed by a file then -ENOBUFS will be returned and | ||
510 | * nothing more will be done, and no reservation will be made. | ||
511 | * | ||
512 | * Else, a block will be allocated if one wasn't already, and 0 will be | ||
513 | * returned | ||
514 | * | ||
515 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
516 | * description. | ||
517 | */ | ||
518 | static inline | ||
519 | int fscache_alloc_page(struct fscache_cookie *cookie, | ||
520 | struct page *page, | ||
521 | gfp_t gfp) | ||
522 | { | ||
523 | if (fscache_cookie_valid(cookie)) | ||
524 | return __fscache_alloc_page(cookie, page, gfp); | ||
525 | else | ||
526 | return -ENOBUFS; | ||
527 | } | ||
528 | |||
529 | /** | ||
530 | * fscache_write_page - Request storage of a page in the cache | ||
531 | * @cookie: The cookie representing the cache object | ||
532 | * @page: The netfs page to store | ||
533 | * @gfp: The conditions under which memory allocation should be made | ||
534 | * | ||
535 | * Request the contents of the netfs page be written into the cache. This | ||
536 | * request may be ignored if no cache block is currently allocated, in which | ||
537 | * case it will return -ENOBUFS. | ||
538 | * | ||
539 | * If a cache block was already allocated, a write will be initiated and 0 will | ||
540 | * be returned. The PG_fscache_write page bit is set immediately and will then | ||
541 | * be cleared at the completion of the write to indicate the success or failure | ||
542 | * of the operation. Note that the completion may happen before the return. | ||
543 | * | ||
544 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
545 | * description. | ||
546 | */ | ||
547 | static inline | ||
548 | int fscache_write_page(struct fscache_cookie *cookie, | ||
549 | struct page *page, | ||
550 | gfp_t gfp) | ||
551 | { | ||
552 | if (fscache_cookie_valid(cookie)) | ||
553 | return __fscache_write_page(cookie, page, gfp); | ||
554 | else | ||
555 | return -ENOBUFS; | ||
556 | } | ||
557 | |||
558 | /** | ||
559 | * fscache_uncache_page - Indicate that caching is no longer required on a page | ||
560 | * @cookie: The cookie representing the cache object | ||
561 | * @page: The netfs page that was being cached. | ||
562 | * | ||
563 | * Tell the cache that we no longer want a page to be cached and that it should | ||
564 | * remove any knowledge of the netfs page it may have. | ||
565 | * | ||
566 | * Note that this cannot cancel any outstanding I/O operations between this | ||
567 | * page and the cache. | ||
568 | * | ||
569 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
570 | * description. | ||
571 | */ | ||
572 | static inline | ||
573 | void fscache_uncache_page(struct fscache_cookie *cookie, | ||
574 | struct page *page) | ||
575 | { | ||
576 | if (fscache_cookie_valid(cookie)) | ||
577 | __fscache_uncache_page(cookie, page); | ||
578 | } | ||
579 | |||
580 | /** | ||
581 | * fscache_check_page_write - Ask if a page is being writing to the cache | ||
582 | * @cookie: The cookie representing the cache object | ||
583 | * @page: The netfs page that is being cached. | ||
584 | * | ||
585 | * Ask the cache if a page is being written to the cache. | ||
586 | * | ||
587 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
588 | * description. | ||
589 | */ | ||
590 | static inline | ||
591 | bool fscache_check_page_write(struct fscache_cookie *cookie, | ||
592 | struct page *page) | ||
593 | { | ||
594 | if (fscache_cookie_valid(cookie)) | ||
595 | return __fscache_check_page_write(cookie, page); | ||
596 | return false; | ||
597 | } | ||
598 | |||
599 | /** | ||
600 | * fscache_wait_on_page_write - Wait for a page to complete writing to the cache | ||
601 | * @cookie: The cookie representing the cache object | ||
602 | * @page: The netfs page that is being cached. | ||
603 | * | ||
604 | * Ask the cache to wake us up when a page is no longer being written to the | ||
605 | * cache. | ||
606 | * | ||
607 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
608 | * description. | ||
609 | */ | ||
610 | static inline | ||
611 | void fscache_wait_on_page_write(struct fscache_cookie *cookie, | ||
612 | struct page *page) | ||
613 | { | ||
614 | if (fscache_cookie_valid(cookie)) | ||
615 | __fscache_wait_on_page_write(cookie, page); | ||
616 | } | ||
617 | |||
618 | #endif /* _LINUX_FSCACHE_H */ | ||
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index d9051d717d27..244677cc082b 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h | |||
@@ -18,7 +18,6 @@ | |||
18 | #define _FSL_DEVICE_H_ | 18 | #define _FSL_DEVICE_H_ |
19 | 19 | ||
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/phy.h> | ||
22 | 21 | ||
23 | /* | 22 | /* |
24 | * Some conventions on how we handle peripherals on Freescale chips | 23 | * Some conventions on how we handle peripherals on Freescale chips |
@@ -44,31 +43,6 @@ | |||
44 | * | 43 | * |
45 | */ | 44 | */ |
46 | 45 | ||
47 | struct gianfar_platform_data { | ||
48 | /* device specific information */ | ||
49 | u32 device_flags; | ||
50 | char bus_id[BUS_ID_SIZE]; | ||
51 | phy_interface_t interface; | ||
52 | }; | ||
53 | |||
54 | struct gianfar_mdio_data { | ||
55 | /* board specific information */ | ||
56 | int irq[32]; | ||
57 | }; | ||
58 | |||
59 | /* Flags in gianfar_platform_data */ | ||
60 | #define FSL_GIANFAR_BRD_HAS_PHY_INTR 0x00000001 /* set or use a timer */ | ||
61 | #define FSL_GIANFAR_BRD_IS_REDUCED 0x00000002 /* Set if RGMII, RMII */ | ||
62 | |||
63 | struct fsl_i2c_platform_data { | ||
64 | /* device specific information */ | ||
65 | u32 device_flags; | ||
66 | }; | ||
67 | |||
68 | /* Flags related to I2C device features */ | ||
69 | #define FSL_I2C_DEV_SEPARATE_DFSRR 0x00000001 | ||
70 | #define FSL_I2C_DEV_CLOCK_5200 0x00000002 | ||
71 | |||
72 | enum fsl_usb2_operating_modes { | 46 | enum fsl_usb2_operating_modes { |
73 | FSL_USB2_MPH_HOST, | 47 | FSL_USB2_MPH_HOST, |
74 | FSL_USB2_DR_HOST, | 48 | FSL_USB2_DR_HOST, |
@@ -95,15 +69,20 @@ struct fsl_usb2_platform_data { | |||
95 | #define FSL_USB2_PORT0_ENABLED 0x00000001 | 69 | #define FSL_USB2_PORT0_ENABLED 0x00000001 |
96 | #define FSL_USB2_PORT1_ENABLED 0x00000002 | 70 | #define FSL_USB2_PORT1_ENABLED 0x00000002 |
97 | 71 | ||
72 | struct spi_device; | ||
73 | |||
98 | struct fsl_spi_platform_data { | 74 | struct fsl_spi_platform_data { |
99 | u32 initial_spmode; /* initial SPMODE value */ | 75 | u32 initial_spmode; /* initial SPMODE value */ |
100 | u16 bus_num; | 76 | s16 bus_num; |
101 | bool qe_mode; | 77 | bool qe_mode; |
102 | /* board specific information */ | 78 | /* board specific information */ |
103 | u16 max_chipselect; | 79 | u16 max_chipselect; |
80 | void (*cs_control)(struct spi_device *spi, bool on); | ||
81 | u32 sysclk; | ||
82 | |||
83 | /* Legacy hooks, used by mpc52xx_psc_spi driver. */ | ||
104 | void (*activate_cs)(u8 cs, u8 polarity); | 84 | void (*activate_cs)(u8 cs, u8 polarity); |
105 | void (*deactivate_cs)(u8 cs, u8 polarity); | 85 | void (*deactivate_cs)(u8 cs, u8 polarity); |
106 | u32 sysclk; | ||
107 | }; | 86 | }; |
108 | 87 | ||
109 | struct mpc8xx_pcmcia_ops { | 88 | struct mpc8xx_pcmcia_ops { |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 677432b9cb7e..8a0c2f221e6b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -1,15 +1,18 @@ | |||
1 | #ifndef _LINUX_FTRACE_H | 1 | #ifndef _LINUX_FTRACE_H |
2 | #define _LINUX_FTRACE_H | 2 | #define _LINUX_FTRACE_H |
3 | 3 | ||
4 | #include <linux/linkage.h> | 4 | #include <linux/trace_clock.h> |
5 | #include <linux/fs.h> | ||
6 | #include <linux/ktime.h> | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/kallsyms.h> | 5 | #include <linux/kallsyms.h> |
6 | #include <linux/linkage.h> | ||
11 | #include <linux/bitops.h> | 7 | #include <linux/bitops.h> |
8 | #include <linux/module.h> | ||
9 | #include <linux/ktime.h> | ||
12 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/types.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/fs.h> | ||
14 | |||
15 | #include <asm/ftrace.h> | ||
13 | 16 | ||
14 | #ifdef CONFIG_FUNCTION_TRACER | 17 | #ifdef CONFIG_FUNCTION_TRACER |
15 | 18 | ||
@@ -95,9 +98,41 @@ stack_trace_sysctl(struct ctl_table *table, int write, | |||
95 | loff_t *ppos); | 98 | loff_t *ppos); |
96 | #endif | 99 | #endif |
97 | 100 | ||
101 | struct ftrace_func_command { | ||
102 | struct list_head list; | ||
103 | char *name; | ||
104 | int (*func)(char *func, char *cmd, | ||
105 | char *params, int enable); | ||
106 | }; | ||
107 | |||
98 | #ifdef CONFIG_DYNAMIC_FTRACE | 108 | #ifdef CONFIG_DYNAMIC_FTRACE |
99 | /* asm/ftrace.h must be defined for archs supporting dynamic ftrace */ | 109 | |
100 | #include <asm/ftrace.h> | 110 | int ftrace_arch_code_modify_prepare(void); |
111 | int ftrace_arch_code_modify_post_process(void); | ||
112 | |||
113 | struct seq_file; | ||
114 | |||
115 | struct ftrace_probe_ops { | ||
116 | void (*func)(unsigned long ip, | ||
117 | unsigned long parent_ip, | ||
118 | void **data); | ||
119 | int (*callback)(unsigned long ip, void **data); | ||
120 | void (*free)(void **data); | ||
121 | int (*print)(struct seq_file *m, | ||
122 | unsigned long ip, | ||
123 | struct ftrace_probe_ops *ops, | ||
124 | void *data); | ||
125 | }; | ||
126 | |||
127 | extern int | ||
128 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
129 | void *data); | ||
130 | extern void | ||
131 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
132 | void *data); | ||
133 | extern void | ||
134 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops); | ||
135 | extern void unregister_ftrace_function_probe_all(char *glob); | ||
101 | 136 | ||
102 | enum { | 137 | enum { |
103 | FTRACE_FL_FREE = (1 << 0), | 138 | FTRACE_FL_FREE = (1 << 0), |
@@ -110,15 +145,23 @@ enum { | |||
110 | }; | 145 | }; |
111 | 146 | ||
112 | struct dyn_ftrace { | 147 | struct dyn_ftrace { |
113 | struct list_head list; | 148 | union { |
114 | unsigned long ip; /* address of mcount call-site */ | 149 | unsigned long ip; /* address of mcount call-site */ |
115 | unsigned long flags; | 150 | struct dyn_ftrace *freelist; |
116 | struct dyn_arch_ftrace arch; | 151 | }; |
152 | union { | ||
153 | unsigned long flags; | ||
154 | struct dyn_ftrace *newlist; | ||
155 | }; | ||
156 | struct dyn_arch_ftrace arch; | ||
117 | }; | 157 | }; |
118 | 158 | ||
119 | int ftrace_force_update(void); | 159 | int ftrace_force_update(void); |
120 | void ftrace_set_filter(unsigned char *buf, int len, int reset); | 160 | void ftrace_set_filter(unsigned char *buf, int len, int reset); |
121 | 161 | ||
162 | int register_ftrace_command(struct ftrace_func_command *cmd); | ||
163 | int unregister_ftrace_command(struct ftrace_func_command *cmd); | ||
164 | |||
122 | /* defined in arch */ | 165 | /* defined in arch */ |
123 | extern int ftrace_ip_converted(unsigned long ip); | 166 | extern int ftrace_ip_converted(unsigned long ip); |
124 | extern int ftrace_dyn_arch_init(void *data); | 167 | extern int ftrace_dyn_arch_init(void *data); |
@@ -126,6 +169,10 @@ extern int ftrace_update_ftrace_func(ftrace_func_t func); | |||
126 | extern void ftrace_caller(void); | 169 | extern void ftrace_caller(void); |
127 | extern void ftrace_call(void); | 170 | extern void ftrace_call(void); |
128 | extern void mcount_call(void); | 171 | extern void mcount_call(void); |
172 | |||
173 | #ifndef FTRACE_ADDR | ||
174 | #define FTRACE_ADDR ((unsigned long)ftrace_caller) | ||
175 | #endif | ||
129 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 176 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
130 | extern void ftrace_graph_caller(void); | 177 | extern void ftrace_graph_caller(void); |
131 | extern int ftrace_enable_ftrace_graph_caller(void); | 178 | extern int ftrace_enable_ftrace_graph_caller(void); |
@@ -136,7 +183,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } | |||
136 | #endif | 183 | #endif |
137 | 184 | ||
138 | /** | 185 | /** |
139 | * ftrace_make_nop - convert code into top | 186 | * ftrace_make_nop - convert code into nop |
140 | * @mod: module structure if called by module load initialization | 187 | * @mod: module structure if called by module load initialization |
141 | * @rec: the mcount call site record | 188 | * @rec: the mcount call site record |
142 | * @addr: the address that the call site should be calling | 189 | * @addr: the address that the call site should be calling |
@@ -181,7 +228,6 @@ extern int ftrace_make_nop(struct module *mod, | |||
181 | */ | 228 | */ |
182 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); | 229 | extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); |
183 | 230 | ||
184 | |||
185 | /* May be defined in arch */ | 231 | /* May be defined in arch */ |
186 | extern int ftrace_arch_read_dyn_info(char *buf, int size); | 232 | extern int ftrace_arch_read_dyn_info(char *buf, int size); |
187 | 233 | ||
@@ -198,6 +244,14 @@ extern void ftrace_enable_daemon(void); | |||
198 | # define ftrace_disable_daemon() do { } while (0) | 244 | # define ftrace_disable_daemon() do { } while (0) |
199 | # define ftrace_enable_daemon() do { } while (0) | 245 | # define ftrace_enable_daemon() do { } while (0) |
200 | static inline void ftrace_release(void *start, unsigned long size) { } | 246 | static inline void ftrace_release(void *start, unsigned long size) { } |
247 | static inline int register_ftrace_command(struct ftrace_func_command *cmd) | ||
248 | { | ||
249 | return -EINVAL; | ||
250 | } | ||
251 | static inline int unregister_ftrace_command(char *cmd_name) | ||
252 | { | ||
253 | return -EINVAL; | ||
254 | } | ||
201 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 255 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
202 | 256 | ||
203 | /* totally disable ftrace - can not re-enable after this */ | 257 | /* totally disable ftrace - can not re-enable after this */ |
@@ -233,24 +287,25 @@ static inline void __ftrace_enabled_restore(int enabled) | |||
233 | #endif | 287 | #endif |
234 | } | 288 | } |
235 | 289 | ||
236 | #ifdef CONFIG_FRAME_POINTER | 290 | #ifndef HAVE_ARCH_CALLER_ADDR |
237 | /* TODO: need to fix this for ARM */ | 291 | # ifdef CONFIG_FRAME_POINTER |
238 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | 292 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) |
239 | # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) | 293 | # define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1)) |
240 | # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) | 294 | # define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2)) |
241 | # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) | 295 | # define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3)) |
242 | # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) | 296 | # define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4)) |
243 | # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) | 297 | # define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5)) |
244 | # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) | 298 | # define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6)) |
245 | #else | 299 | # else |
246 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) | 300 | # define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0)) |
247 | # define CALLER_ADDR1 0UL | 301 | # define CALLER_ADDR1 0UL |
248 | # define CALLER_ADDR2 0UL | 302 | # define CALLER_ADDR2 0UL |
249 | # define CALLER_ADDR3 0UL | 303 | # define CALLER_ADDR3 0UL |
250 | # define CALLER_ADDR4 0UL | 304 | # define CALLER_ADDR4 0UL |
251 | # define CALLER_ADDR5 0UL | 305 | # define CALLER_ADDR5 0UL |
252 | # define CALLER_ADDR6 0UL | 306 | # define CALLER_ADDR6 0UL |
253 | #endif | 307 | # endif |
308 | #endif /* ifndef HAVE_ARCH_CALLER_ADDR */ | ||
254 | 309 | ||
255 | #ifdef CONFIG_IRQSOFF_TRACER | 310 | #ifdef CONFIG_IRQSOFF_TRACER |
256 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); | 311 | extern void time_hardirqs_on(unsigned long a0, unsigned long a1); |
@@ -268,54 +323,6 @@ static inline void __ftrace_enabled_restore(int enabled) | |||
268 | # define trace_preempt_off(a0, a1) do { } while (0) | 323 | # define trace_preempt_off(a0, a1) do { } while (0) |
269 | #endif | 324 | #endif |
270 | 325 | ||
271 | #ifdef CONFIG_TRACING | ||
272 | extern int ftrace_dump_on_oops; | ||
273 | |||
274 | extern void tracing_start(void); | ||
275 | extern void tracing_stop(void); | ||
276 | extern void ftrace_off_permanent(void); | ||
277 | |||
278 | extern void | ||
279 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); | ||
280 | |||
281 | /** | ||
282 | * ftrace_printk - printf formatting in the ftrace buffer | ||
283 | * @fmt: the printf format for printing | ||
284 | * | ||
285 | * Note: __ftrace_printk is an internal function for ftrace_printk and | ||
286 | * the @ip is passed in via the ftrace_printk macro. | ||
287 | * | ||
288 | * This function allows a kernel developer to debug fast path sections | ||
289 | * that printk is not appropriate for. By scattering in various | ||
290 | * printk like tracing in the code, a developer can quickly see | ||
291 | * where problems are occurring. | ||
292 | * | ||
293 | * This is intended as a debugging tool for the developer only. | ||
294 | * Please refrain from leaving ftrace_printks scattered around in | ||
295 | * your code. | ||
296 | */ | ||
297 | # define ftrace_printk(fmt...) __ftrace_printk(_THIS_IP_, fmt) | ||
298 | extern int | ||
299 | __ftrace_printk(unsigned long ip, const char *fmt, ...) | ||
300 | __attribute__ ((format (printf, 2, 3))); | ||
301 | extern void ftrace_dump(void); | ||
302 | #else | ||
303 | static inline void | ||
304 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } | ||
305 | static inline int | ||
306 | ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); | ||
307 | |||
308 | static inline void tracing_start(void) { } | ||
309 | static inline void tracing_stop(void) { } | ||
310 | static inline void ftrace_off_permanent(void) { } | ||
311 | static inline int | ||
312 | ftrace_printk(const char *fmt, ...) | ||
313 | { | ||
314 | return 0; | ||
315 | } | ||
316 | static inline void ftrace_dump(void) { } | ||
317 | #endif | ||
318 | |||
319 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 326 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
320 | extern void ftrace_init(void); | 327 | extern void ftrace_init(void); |
321 | extern void ftrace_init_module(struct module *mod, | 328 | extern void ftrace_init_module(struct module *mod, |
@@ -327,36 +334,6 @@ ftrace_init_module(struct module *mod, | |||
327 | unsigned long *start, unsigned long *end) { } | 334 | unsigned long *start, unsigned long *end) { } |
328 | #endif | 335 | #endif |
329 | 336 | ||
330 | enum { | ||
331 | POWER_NONE = 0, | ||
332 | POWER_CSTATE = 1, | ||
333 | POWER_PSTATE = 2, | ||
334 | }; | ||
335 | |||
336 | struct power_trace { | ||
337 | #ifdef CONFIG_POWER_TRACER | ||
338 | ktime_t stamp; | ||
339 | ktime_t end; | ||
340 | int type; | ||
341 | int state; | ||
342 | #endif | ||
343 | }; | ||
344 | |||
345 | #ifdef CONFIG_POWER_TRACER | ||
346 | extern void trace_power_start(struct power_trace *it, unsigned int type, | ||
347 | unsigned int state); | ||
348 | extern void trace_power_mark(struct power_trace *it, unsigned int type, | ||
349 | unsigned int state); | ||
350 | extern void trace_power_end(struct power_trace *it); | ||
351 | #else | ||
352 | static inline void trace_power_start(struct power_trace *it, unsigned int type, | ||
353 | unsigned int state) { } | ||
354 | static inline void trace_power_mark(struct power_trace *it, unsigned int type, | ||
355 | unsigned int state) { } | ||
356 | static inline void trace_power_end(struct power_trace *it) { } | ||
357 | #endif | ||
358 | |||
359 | |||
360 | /* | 337 | /* |
361 | * Structure that defines an entry function trace. | 338 | * Structure that defines an entry function trace. |
362 | */ | 339 | */ |
@@ -379,6 +356,32 @@ struct ftrace_graph_ret { | |||
379 | 356 | ||
380 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 357 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
381 | 358 | ||
359 | /* for init task */ | ||
360 | #define INIT_FTRACE_GRAPH .ret_stack = NULL, | ||
361 | |||
362 | /* | ||
363 | * Stack of return addresses for functions | ||
364 | * of a thread. | ||
365 | * Used in struct thread_info | ||
366 | */ | ||
367 | struct ftrace_ret_stack { | ||
368 | unsigned long ret; | ||
369 | unsigned long func; | ||
370 | unsigned long long calltime; | ||
371 | }; | ||
372 | |||
373 | /* | ||
374 | * Primary handler of a function return. | ||
375 | * It relays on ftrace_return_to_handler. | ||
376 | * Defined in entry_32/64.S | ||
377 | */ | ||
378 | extern void return_to_handler(void); | ||
379 | |||
380 | extern int | ||
381 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); | ||
382 | extern void | ||
383 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); | ||
384 | |||
382 | /* | 385 | /* |
383 | * Sometimes we don't want to trace a function with the function | 386 | * Sometimes we don't want to trace a function with the function |
384 | * graph tracer but we want them to keep traced by the usual function | 387 | * graph tracer but we want them to keep traced by the usual function |
@@ -430,10 +433,11 @@ static inline void unpause_graph_tracing(void) | |||
430 | { | 433 | { |
431 | atomic_dec(¤t->tracing_graph_pause); | 434 | atomic_dec(¤t->tracing_graph_pause); |
432 | } | 435 | } |
433 | #else | 436 | #else /* !CONFIG_FUNCTION_GRAPH_TRACER */ |
434 | 437 | ||
435 | #define __notrace_funcgraph | 438 | #define __notrace_funcgraph |
436 | #define __irq_entry | 439 | #define __irq_entry |
440 | #define INIT_FTRACE_GRAPH | ||
437 | 441 | ||
438 | static inline void ftrace_graph_init_task(struct task_struct *t) { } | 442 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
439 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } | 443 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } |
@@ -445,7 +449,7 @@ static inline int task_curr_ret_stack(struct task_struct *tsk) | |||
445 | 449 | ||
446 | static inline void pause_graph_tracing(void) { } | 450 | static inline void pause_graph_tracing(void) { } |
447 | static inline void unpause_graph_tracing(void) { } | 451 | static inline void unpause_graph_tracing(void) { } |
448 | #endif | 452 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
449 | 453 | ||
450 | #ifdef CONFIG_TRACING | 454 | #ifdef CONFIG_TRACING |
451 | #include <linux/sched.h> | 455 | #include <linux/sched.h> |
@@ -490,6 +494,21 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) | |||
490 | return tsk->trace & TSK_TRACE_FL_GRAPH; | 494 | return tsk->trace & TSK_TRACE_FL_GRAPH; |
491 | } | 495 | } |
492 | 496 | ||
497 | extern int ftrace_dump_on_oops; | ||
498 | |||
493 | #endif /* CONFIG_TRACING */ | 499 | #endif /* CONFIG_TRACING */ |
494 | 500 | ||
501 | |||
502 | #ifdef CONFIG_HW_BRANCH_TRACER | ||
503 | |||
504 | void trace_hw_branch(u64 from, u64 to); | ||
505 | void trace_hw_branch_oops(void); | ||
506 | |||
507 | #else /* CONFIG_HW_BRANCH_TRACER */ | ||
508 | |||
509 | static inline void trace_hw_branch(u64 from, u64 to) {} | ||
510 | static inline void trace_hw_branch_oops(void) {} | ||
511 | |||
512 | #endif /* CONFIG_HW_BRANCH_TRACER */ | ||
513 | |||
495 | #endif /* _LINUX_FTRACE_H */ | 514 | #endif /* _LINUX_FTRACE_H */ |
diff --git a/include/linux/ftrace_irq.h b/include/linux/ftrace_irq.h index 366a054d0b05..dca7bf8cffe2 100644 --- a/include/linux/ftrace_irq.h +++ b/include/linux/ftrace_irq.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #define _LINUX_FTRACE_IRQ_H | 2 | #define _LINUX_FTRACE_IRQ_H |
3 | 3 | ||
4 | 4 | ||
5 | #if defined(CONFIG_DYNAMIC_FTRACE) || defined(CONFIG_FUNCTION_GRAPH_TRACER) | 5 | #ifdef CONFIG_FTRACE_NMI_ENTER |
6 | extern void ftrace_nmi_enter(void); | 6 | extern void ftrace_nmi_enter(void); |
7 | extern void ftrace_nmi_exit(void); | 7 | extern void ftrace_nmi_exit(void); |
8 | #else | 8 | #else |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 634c53028fb8..a1a28caed23d 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -214,6 +214,7 @@ static inline void disk_put_part(struct hd_struct *part) | |||
214 | #define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */ | 214 | #define DISK_PITER_REVERSE (1 << 0) /* iterate in the reverse direction */ |
215 | #define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ | 215 | #define DISK_PITER_INCL_EMPTY (1 << 1) /* include 0-sized parts */ |
216 | #define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ | 216 | #define DISK_PITER_INCL_PART0 (1 << 2) /* include partition 0 */ |
217 | #define DISK_PITER_INCL_EMPTY_PART0 (1 << 3) /* include empty partition 0 */ | ||
217 | 218 | ||
218 | struct disk_part_iter { | 219 | struct disk_part_iter { |
219 | struct gendisk *disk; | 220 | struct gendisk *disk; |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index dd20cd78faa8..0bbc15f54536 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/mmzone.h> | 4 | #include <linux/mmzone.h> |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
7 | #include <linux/topology.h> | ||
7 | 8 | ||
8 | struct vm_area_struct; | 9 | struct vm_area_struct; |
9 | 10 | ||
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index f83288347dda..45257475623c 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -15,55 +15,61 @@ | |||
15 | * - bits 0-7 are the preemption count (max preemption depth: 256) | 15 | * - bits 0-7 are the preemption count (max preemption depth: 256) |
16 | * - bits 8-15 are the softirq count (max # of softirqs: 256) | 16 | * - bits 8-15 are the softirq count (max # of softirqs: 256) |
17 | * | 17 | * |
18 | * The hardirq count can be overridden per architecture, the default is: | 18 | * The hardirq count can in theory reach the same as NR_IRQS. |
19 | * In reality, the number of nested IRQS is limited to the stack | ||
20 | * size as well. For archs with over 1000 IRQS it is not practical | ||
21 | * to expect that they will all nest. We give a max of 10 bits for | ||
22 | * hardirq nesting. An arch may choose to give less than 10 bits. | ||
23 | * m68k expects it to be 8. | ||
19 | * | 24 | * |
20 | * - bits 16-27 are the hardirq count (max # of hardirqs: 4096) | 25 | * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) |
21 | * - ( bit 28 is the PREEMPT_ACTIVE flag. ) | 26 | * - bit 26 is the NMI_MASK |
27 | * - bit 28 is the PREEMPT_ACTIVE flag | ||
22 | * | 28 | * |
23 | * PREEMPT_MASK: 0x000000ff | 29 | * PREEMPT_MASK: 0x000000ff |
24 | * SOFTIRQ_MASK: 0x0000ff00 | 30 | * SOFTIRQ_MASK: 0x0000ff00 |
25 | * HARDIRQ_MASK: 0x0fff0000 | 31 | * HARDIRQ_MASK: 0x03ff0000 |
32 | * NMI_MASK: 0x04000000 | ||
26 | */ | 33 | */ |
27 | #define PREEMPT_BITS 8 | 34 | #define PREEMPT_BITS 8 |
28 | #define SOFTIRQ_BITS 8 | 35 | #define SOFTIRQ_BITS 8 |
36 | #define NMI_BITS 1 | ||
29 | 37 | ||
30 | #ifndef HARDIRQ_BITS | 38 | #define MAX_HARDIRQ_BITS 10 |
31 | #define HARDIRQ_BITS 12 | ||
32 | 39 | ||
33 | #ifndef MAX_HARDIRQS_PER_CPU | 40 | #ifndef HARDIRQ_BITS |
34 | #define MAX_HARDIRQS_PER_CPU NR_IRQS | 41 | # define HARDIRQ_BITS MAX_HARDIRQ_BITS |
35 | #endif | 42 | #endif |
36 | 43 | ||
37 | /* | 44 | #if HARDIRQ_BITS > MAX_HARDIRQ_BITS |
38 | * The hardirq mask has to be large enough to have space for potentially | 45 | #error HARDIRQ_BITS too high! |
39 | * all IRQ sources in the system nesting on a single CPU. | ||
40 | */ | ||
41 | #if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU | ||
42 | # error HARDIRQ_BITS is too low! | ||
43 | #endif | ||
44 | #endif | 46 | #endif |
45 | 47 | ||
46 | #define PREEMPT_SHIFT 0 | 48 | #define PREEMPT_SHIFT 0 |
47 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) | 49 | #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) |
48 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) | 50 | #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) |
51 | #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) | ||
49 | 52 | ||
50 | #define __IRQ_MASK(x) ((1UL << (x))-1) | 53 | #define __IRQ_MASK(x) ((1UL << (x))-1) |
51 | 54 | ||
52 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) | 55 | #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) |
53 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | 56 | #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) |
54 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) | 57 | #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) |
58 | #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) | ||
55 | 59 | ||
56 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) | 60 | #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) |
57 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) | 61 | #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) |
58 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | 62 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
63 | #define NMI_OFFSET (1UL << NMI_SHIFT) | ||
59 | 64 | ||
60 | #if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS)) | 65 | #if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS)) |
61 | #error PREEMPT_ACTIVE is too low! | 66 | #error PREEMPT_ACTIVE is too low! |
62 | #endif | 67 | #endif |
63 | 68 | ||
64 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) | 69 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
65 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) | 70 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
66 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK)) | 71 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
72 | | NMI_MASK)) | ||
67 | 73 | ||
68 | /* | 74 | /* |
69 | * Are we doing bottom half or hardware interrupt processing? | 75 | * Are we doing bottom half or hardware interrupt processing? |
@@ -73,6 +79,11 @@ | |||
73 | #define in_softirq() (softirq_count()) | 79 | #define in_softirq() (softirq_count()) |
74 | #define in_interrupt() (irq_count()) | 80 | #define in_interrupt() (irq_count()) |
75 | 81 | ||
82 | /* | ||
83 | * Are we in NMI context? | ||
84 | */ | ||
85 | #define in_nmi() (preempt_count() & NMI_MASK) | ||
86 | |||
76 | #if defined(CONFIG_PREEMPT) | 87 | #if defined(CONFIG_PREEMPT) |
77 | # define PREEMPT_INATOMIC_BASE kernel_locked() | 88 | # define PREEMPT_INATOMIC_BASE kernel_locked() |
78 | # define PREEMPT_CHECK_OFFSET 1 | 89 | # define PREEMPT_CHECK_OFFSET 1 |
@@ -105,7 +116,7 @@ | |||
105 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET | 116 | # define IRQ_EXIT_OFFSET HARDIRQ_OFFSET |
106 | #endif | 117 | #endif |
107 | 118 | ||
108 | #ifdef CONFIG_SMP | 119 | #if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) |
109 | extern void synchronize_irq(unsigned int irq); | 120 | extern void synchronize_irq(unsigned int irq); |
110 | #else | 121 | #else |
111 | # define synchronize_irq(irq) barrier() | 122 | # define synchronize_irq(irq) barrier() |
@@ -164,20 +175,24 @@ extern void irq_enter(void); | |||
164 | */ | 175 | */ |
165 | extern void irq_exit(void); | 176 | extern void irq_exit(void); |
166 | 177 | ||
167 | #define nmi_enter() \ | 178 | #define nmi_enter() \ |
168 | do { \ | 179 | do { \ |
169 | ftrace_nmi_enter(); \ | 180 | ftrace_nmi_enter(); \ |
170 | lockdep_off(); \ | 181 | BUG_ON(in_nmi()); \ |
171 | rcu_nmi_enter(); \ | 182 | add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ |
172 | __irq_enter(); \ | 183 | lockdep_off(); \ |
184 | rcu_nmi_enter(); \ | ||
185 | trace_hardirq_enter(); \ | ||
173 | } while (0) | 186 | } while (0) |
174 | 187 | ||
175 | #define nmi_exit() \ | 188 | #define nmi_exit() \ |
176 | do { \ | 189 | do { \ |
177 | __irq_exit(); \ | 190 | trace_hardirq_exit(); \ |
178 | rcu_nmi_exit(); \ | 191 | rcu_nmi_exit(); \ |
179 | lockdep_on(); \ | 192 | lockdep_on(); \ |
180 | ftrace_nmi_exit(); \ | 193 | BUG_ON(!in_nmi()); \ |
194 | sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ | ||
195 | ftrace_nmi_exit(); \ | ||
181 | } while (0) | 196 | } while (0) |
182 | 197 | ||
183 | #endif /* LINUX_HARDIRQ_H */ | 198 | #endif /* LINUX_HARDIRQ_H */ |
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h index ed21bd3dbd25..29ee2873f4a8 100644 --- a/include/linux/hdreg.h +++ b/include/linux/hdreg.h | |||
@@ -1,68 +1,6 @@ | |||
1 | #ifndef _LINUX_HDREG_H | 1 | #ifndef _LINUX_HDREG_H |
2 | #define _LINUX_HDREG_H | 2 | #define _LINUX_HDREG_H |
3 | 3 | ||
4 | #ifdef __KERNEL__ | ||
5 | #include <linux/ata.h> | ||
6 | |||
7 | /* | ||
8 | * This file contains some defines for the AT-hd-controller. | ||
9 | * Various sources. | ||
10 | */ | ||
11 | |||
12 | /* ide.c has its own port definitions in "ide.h" */ | ||
13 | |||
14 | #define HD_IRQ 14 | ||
15 | |||
16 | /* Hd controller regs. Ref: IBM AT Bios-listing */ | ||
17 | #define HD_DATA 0x1f0 /* _CTL when writing */ | ||
18 | #define HD_ERROR 0x1f1 /* see err-bits */ | ||
19 | #define HD_NSECTOR 0x1f2 /* nr of sectors to read/write */ | ||
20 | #define HD_SECTOR 0x1f3 /* starting sector */ | ||
21 | #define HD_LCYL 0x1f4 /* starting cylinder */ | ||
22 | #define HD_HCYL 0x1f5 /* high byte of starting cyl */ | ||
23 | #define HD_CURRENT 0x1f6 /* 101dhhhh , d=drive, hhhh=head */ | ||
24 | #define HD_STATUS 0x1f7 /* see status-bits */ | ||
25 | #define HD_FEATURE HD_ERROR /* same io address, read=error, write=feature */ | ||
26 | #define HD_PRECOMP HD_FEATURE /* obsolete use of this port - predates IDE */ | ||
27 | #define HD_COMMAND HD_STATUS /* same io address, read=status, write=cmd */ | ||
28 | |||
29 | #define HD_CMD 0x3f6 /* used for resets */ | ||
30 | #define HD_ALTSTATUS 0x3f6 /* same as HD_STATUS but doesn't clear irq */ | ||
31 | |||
32 | /* remainder is shared between hd.c, ide.c, ide-cd.c, and the hdparm utility */ | ||
33 | |||
34 | /* Bits of HD_STATUS */ | ||
35 | #define ERR_STAT 0x01 | ||
36 | #define INDEX_STAT 0x02 | ||
37 | #define ECC_STAT 0x04 /* Corrected error */ | ||
38 | #define DRQ_STAT 0x08 | ||
39 | #define SEEK_STAT 0x10 | ||
40 | #define SRV_STAT 0x10 | ||
41 | #define WRERR_STAT 0x20 | ||
42 | #define READY_STAT 0x40 | ||
43 | #define BUSY_STAT 0x80 | ||
44 | |||
45 | /* Bits for HD_ERROR */ | ||
46 | #define MARK_ERR 0x01 /* Bad address mark */ | ||
47 | #define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */ | ||
48 | #define TRK0_ERR 0x02 /* couldn't find track 0 */ | ||
49 | #define EOM_ERR 0x02 /* End Of Media (ATAPI) */ | ||
50 | #define ABRT_ERR 0x04 /* Command aborted */ | ||
51 | #define MCR_ERR 0x08 /* media change request */ | ||
52 | #define ID_ERR 0x10 /* ID field not found */ | ||
53 | #define MC_ERR 0x20 /* media changed */ | ||
54 | #define ECC_ERR 0x40 /* Uncorrectable ECC error */ | ||
55 | #define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ | ||
56 | #define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ | ||
57 | #define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */ | ||
58 | |||
59 | /* Bits of HD_NSECTOR */ | ||
60 | #define CD 0x01 | ||
61 | #define IO 0x02 | ||
62 | #define REL 0x04 | ||
63 | #define TAG_MASK 0xf8 | ||
64 | #endif /* __KERNEL__ */ | ||
65 | |||
66 | #include <linux/types.h> | 4 | #include <linux/types.h> |
67 | 5 | ||
68 | /* | 6 | /* |
@@ -191,6 +129,7 @@ typedef struct hd_drive_hob_hdr { | |||
191 | #define TASKFILE_INVALID 0x7fff | 129 | #define TASKFILE_INVALID 0x7fff |
192 | #endif | 130 | #endif |
193 | 131 | ||
132 | #ifndef __KERNEL__ | ||
194 | /* ATA/ATAPI Commands pre T13 Spec */ | 133 | /* ATA/ATAPI Commands pre T13 Spec */ |
195 | #define WIN_NOP 0x00 | 134 | #define WIN_NOP 0x00 |
196 | /* | 135 | /* |
@@ -379,6 +318,7 @@ typedef struct hd_drive_hob_hdr { | |||
379 | #define SECURITY_ERASE_UNIT 0xBD | 318 | #define SECURITY_ERASE_UNIT 0xBD |
380 | #define SECURITY_FREEZE_LOCK 0xBE | 319 | #define SECURITY_FREEZE_LOCK 0xBE |
381 | #define SECURITY_DISABLE_PASSWORD 0xBF | 320 | #define SECURITY_DISABLE_PASSWORD 0xBF |
321 | #endif /* __KERNEL__ */ | ||
382 | 322 | ||
383 | struct hd_geometry { | 323 | struct hd_geometry { |
384 | unsigned char heads; | 324 | unsigned char heads; |
@@ -448,6 +388,7 @@ enum { | |||
448 | 388 | ||
449 | #define __NEW_HD_DRIVE_ID | 389 | #define __NEW_HD_DRIVE_ID |
450 | 390 | ||
391 | #ifndef __KERNEL__ | ||
451 | /* | 392 | /* |
452 | * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec. | 393 | * Structure returned by HDIO_GET_IDENTITY, as per ANSI NCITS ATA6 rev.1b spec. |
453 | * | 394 | * |
@@ -699,6 +640,7 @@ struct hd_driveid { | |||
699 | * 7:0 Signature | 640 | * 7:0 Signature |
700 | */ | 641 | */ |
701 | }; | 642 | }; |
643 | #endif /* __KERNEL__ */ | ||
702 | 644 | ||
703 | /* | 645 | /* |
704 | * IDE "nice" flags. These are used on a per drive basis to determine | 646 | * IDE "nice" flags. These are used on a per drive basis to determine |
diff --git a/include/linux/hid.h b/include/linux/hid.h index fa8ee9cef7be..a72876e43589 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
@@ -270,6 +270,7 @@ struct hid_item { | |||
270 | 270 | ||
271 | #define HID_QUIRK_INVERT 0x00000001 | 271 | #define HID_QUIRK_INVERT 0x00000001 |
272 | #define HID_QUIRK_NOTOUCH 0x00000002 | 272 | #define HID_QUIRK_NOTOUCH 0x00000002 |
273 | #define HID_QUIRK_IGNORE 0x00000004 | ||
273 | #define HID_QUIRK_NOGET 0x00000008 | 274 | #define HID_QUIRK_NOGET 0x00000008 |
274 | #define HID_QUIRK_BADPAD 0x00000020 | 275 | #define HID_QUIRK_BADPAD 0x00000020 |
275 | #define HID_QUIRK_MULTI_INPUT 0x00000040 | 276 | #define HID_QUIRK_MULTI_INPUT 0x00000040 |
@@ -603,12 +604,17 @@ struct hid_ll_driver { | |||
603 | int (*open)(struct hid_device *hdev); | 604 | int (*open)(struct hid_device *hdev); |
604 | void (*close)(struct hid_device *hdev); | 605 | void (*close)(struct hid_device *hdev); |
605 | 606 | ||
607 | int (*power)(struct hid_device *hdev, int level); | ||
608 | |||
606 | int (*hidinput_input_event) (struct input_dev *idev, unsigned int type, | 609 | int (*hidinput_input_event) (struct input_dev *idev, unsigned int type, |
607 | unsigned int code, int value); | 610 | unsigned int code, int value); |
608 | 611 | ||
609 | int (*parse)(struct hid_device *hdev); | 612 | int (*parse)(struct hid_device *hdev); |
610 | }; | 613 | }; |
611 | 614 | ||
615 | #define PM_HINT_FULLON 1<<5 | ||
616 | #define PM_HINT_NORMAL 1<<1 | ||
617 | |||
612 | /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ | 618 | /* Applications from HID Usage Tables 4/8/99 Version 1.1 */ |
613 | /* We ignore a few input applications that are not widely used */ | 619 | /* We ignore a few input applications that are not widely used */ |
614 | #define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) | 620 | #define IS_INPUT_APPLICATION(a) (((a >= 0x00010000) && (a <= 0x00010008)) || (a == 0x00010080) || (a == 0x000c0001) || (a == 0x000d0002)) |
@@ -641,6 +647,7 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int | |||
641 | void hid_output_report(struct hid_report *report, __u8 *data); | 647 | void hid_output_report(struct hid_report *report, __u8 *data); |
642 | struct hid_device *hid_allocate_device(void); | 648 | struct hid_device *hid_allocate_device(void); |
643 | int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); | 649 | int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); |
650 | int hid_check_keys_pressed(struct hid_device *hid); | ||
644 | int hid_connect(struct hid_device *hid, unsigned int connect_mask); | 651 | int hid_connect(struct hid_device *hid, unsigned int connect_mask); |
645 | 652 | ||
646 | /** | 653 | /** |
@@ -791,21 +798,5 @@ dbg_hid(const char *fmt, ...) | |||
791 | __FILE__ , ## arg) | 798 | __FILE__ , ## arg) |
792 | #endif /* HID_FF */ | 799 | #endif /* HID_FF */ |
793 | 800 | ||
794 | #ifdef __KERNEL__ | ||
795 | #ifdef CONFIG_HID_COMPAT | ||
796 | #define HID_COMPAT_LOAD_DRIVER(name) \ | ||
797 | /* prototype to avoid sparse warning */ \ | ||
798 | extern void hid_compat_##name(void); \ | ||
799 | void hid_compat_##name(void) { } \ | ||
800 | EXPORT_SYMBOL(hid_compat_##name) | ||
801 | #else | ||
802 | #define HID_COMPAT_LOAD_DRIVER(name) | ||
803 | #endif /* HID_COMPAT */ | ||
804 | #define HID_COMPAT_CALL_DRIVER(name) do { \ | ||
805 | extern void hid_compat_##name(void); \ | ||
806 | hid_compat_##name(); \ | ||
807 | } while (0) | ||
808 | #endif /* __KERNEL__ */ | ||
809 | |||
810 | #endif | 801 | #endif |
811 | 802 | ||
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 13875ce9112a..1fcb7126a01f 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -19,8 +19,21 @@ static inline void flush_kernel_dcache_page(struct page *page) | |||
19 | } | 19 | } |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifdef CONFIG_HIGHMEM | 22 | #include <asm/kmap_types.h> |
23 | |||
24 | #if defined(CONFIG_DEBUG_HIGHMEM) && defined(CONFIG_TRACE_IRQFLAGS_SUPPORT) | ||
25 | |||
26 | void debug_kmap_atomic(enum km_type type); | ||
23 | 27 | ||
28 | #else | ||
29 | |||
30 | static inline void debug_kmap_atomic(enum km_type type) | ||
31 | { | ||
32 | } | ||
33 | |||
34 | #endif | ||
35 | |||
36 | #ifdef CONFIG_HIGHMEM | ||
24 | #include <asm/highmem.h> | 37 | #include <asm/highmem.h> |
25 | 38 | ||
26 | /* declarations for linux/mm/highmem.c */ | 39 | /* declarations for linux/mm/highmem.c */ |
@@ -44,8 +57,6 @@ static inline void *kmap(struct page *page) | |||
44 | 57 | ||
45 | #define kunmap(page) do { (void) (page); } while (0) | 58 | #define kunmap(page) do { (void) (page); } while (0) |
46 | 59 | ||
47 | #include <asm/kmap_types.h> | ||
48 | |||
49 | static inline void *kmap_atomic(struct page *page, enum km_type idx) | 60 | static inline void *kmap_atomic(struct page *page, enum km_type idx) |
50 | { | 61 | { |
51 | pagefault_disable(); | 62 | pagefault_disable(); |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index bd37078c2d7d..0d2f7c8a33d6 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -336,6 +336,11 @@ extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, | |||
336 | const enum hrtimer_mode mode); | 336 | const enum hrtimer_mode mode); |
337 | extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | 337 | extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, |
338 | unsigned long range_ns, const enum hrtimer_mode mode); | 338 | unsigned long range_ns, const enum hrtimer_mode mode); |
339 | extern int | ||
340 | __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | ||
341 | unsigned long delta_ns, | ||
342 | const enum hrtimer_mode mode, int wakeup); | ||
343 | |||
339 | extern int hrtimer_cancel(struct hrtimer *timer); | 344 | extern int hrtimer_cancel(struct hrtimer *timer); |
340 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); | 345 | extern int hrtimer_try_to_cancel(struct hrtimer *timer); |
341 | 346 | ||
diff --git a/include/linux/i2c-algo-sgi.h b/include/linux/i2c-algo-sgi.h deleted file mode 100644 index 3b7715024e69..000000000000 --- a/include/linux/i2c-algo-sgi.h +++ /dev/null | |||
@@ -1,26 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License version 2 as published by the Free Software Foundation. | ||
4 | * | ||
5 | * Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org> | ||
6 | */ | ||
7 | |||
8 | #ifndef I2C_ALGO_SGI_H | ||
9 | #define I2C_ALGO_SGI_H 1 | ||
10 | |||
11 | #include <linux/i2c.h> | ||
12 | |||
13 | struct i2c_algo_sgi_data { | ||
14 | void *data; /* private data for lowlevel routines */ | ||
15 | unsigned (*getctrl)(void *data); | ||
16 | void (*setctrl)(void *data, unsigned val); | ||
17 | unsigned (*rdata)(void *data); | ||
18 | void (*wdata)(void *data, unsigned val); | ||
19 | |||
20 | int xfer_timeout; | ||
21 | int ack_timeout; | ||
22 | }; | ||
23 | |||
24 | int i2c_sgi_add_bus(struct i2c_adapter *); | ||
25 | |||
26 | #endif /* I2C_ALGO_SGI_H */ | ||
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 1ffc23bc5d1e..c9087de5c6c6 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h | |||
@@ -33,46 +33,10 @@ | |||
33 | 33 | ||
34 | #define I2C_DRIVERID_MSP3400 1 | 34 | #define I2C_DRIVERID_MSP3400 1 |
35 | #define I2C_DRIVERID_TUNER 2 | 35 | #define I2C_DRIVERID_TUNER 2 |
36 | #define I2C_DRIVERID_TEA6420 5 /* audio matrix switch */ | ||
37 | #define I2C_DRIVERID_TEA6415C 6 /* video matrix switch */ | ||
38 | #define I2C_DRIVERID_TDA9840 7 /* stereo sound processor */ | ||
39 | #define I2C_DRIVERID_SAA7111A 8 /* video input processor */ | ||
40 | #define I2C_DRIVERID_SAA7185B 13 /* video encoder */ | ||
41 | #define I2C_DRIVERID_SAA7110 22 /* video decoder */ | ||
42 | #define I2C_DRIVERID_SAA5249 24 /* SAA5249 and compatibles */ | ||
43 | #define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */ | 36 | #define I2C_DRIVERID_TDA7432 27 /* Stereo sound processor */ |
44 | #define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */ | 37 | #define I2C_DRIVERID_TVAUDIO 29 /* Generic TV sound driver */ |
45 | #define I2C_DRIVERID_TDA9875 32 /* TV sound decoder chip */ | ||
46 | #define I2C_DRIVERID_BT819 40 /* video decoder */ | ||
47 | #define I2C_DRIVERID_BT856 41 /* video encoder */ | ||
48 | #define I2C_DRIVERID_VPX3220 42 /* video decoder+vbi/vtxt */ | ||
49 | #define I2C_DRIVERID_ADV7175 48 /* ADV 7175/7176 video encoder */ | ||
50 | #define I2C_DRIVERID_SAA7114 49 /* video decoder */ | ||
51 | #define I2C_DRIVERID_ADV7170 54 /* video encoder */ | ||
52 | #define I2C_DRIVERID_SAA7191 57 /* video decoder */ | ||
53 | #define I2C_DRIVERID_INDYCAM 58 /* SGI IndyCam */ | ||
54 | #define I2C_DRIVERID_OVCAMCHIP 61 /* OmniVision CMOS image sens. */ | ||
55 | #define I2C_DRIVERID_SAA6752HS 67 /* MPEG2 encoder */ | ||
56 | #define I2C_DRIVERID_TVEEPROM 68 /* TV EEPROM */ | ||
57 | #define I2C_DRIVERID_WM8775 69 /* wm8775 audio processor */ | ||
58 | #define I2C_DRIVERID_CS53L32A 70 /* cs53l32a audio processor */ | ||
59 | #define I2C_DRIVERID_CX25840 71 /* cx2584x video encoder */ | ||
60 | #define I2C_DRIVERID_SAA7127 72 /* saa7127 video encoder */ | ||
61 | #define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ | 38 | #define I2C_DRIVERID_SAA711X 73 /* saa711x video encoders */ |
62 | #define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */ | 39 | #define I2C_DRIVERID_INFRARED 75 /* I2C InfraRed on Video boards */ |
63 | #define I2C_DRIVERID_TVP5150 76 /* TVP5150 video decoder */ | ||
64 | #define I2C_DRIVERID_WM8739 77 /* wm8739 audio processor */ | ||
65 | #define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */ | ||
66 | #define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */ | ||
67 | #define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */ | ||
68 | #define I2C_DRIVERID_BT866 85 /* Conexant bt866 video encoder */ | ||
69 | #define I2C_DRIVERID_KS0127 86 /* Samsung ks0127 video decoder */ | ||
70 | #define I2C_DRIVERID_TLV320AIC23B 87 /* TI TLV320AIC23B audio codec */ | ||
71 | #define I2C_DRIVERID_VP27SMPX 93 /* Panasonic VP27s tuner internal MPX */ | ||
72 | #define I2C_DRIVERID_M52790 95 /* Mitsubishi M52790SP/FP AV switch */ | ||
73 | #define I2C_DRIVERID_CS5345 96 /* cs5345 audio processor */ | ||
74 | |||
75 | #define I2C_DRIVERID_OV7670 1048 /* Omnivision 7670 camera */ | ||
76 | 40 | ||
77 | /* | 41 | /* |
78 | * ---- Adapter types ---------------------------------------------------- | 42 | * ---- Adapter types ---------------------------------------------------- |
@@ -87,6 +51,8 @@ | |||
87 | #define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */ | 51 | #define I2C_HW_B_CX2341X 0x010020 /* Conexant CX2341X MPEG encoder cards */ |
88 | #define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */ | 52 | #define I2C_HW_B_CX23885 0x010022 /* conexant 23885 based tv cards (bus1) */ |
89 | #define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ | 53 | #define I2C_HW_B_AU0828 0x010023 /* auvitek au0828 usb bridge */ |
54 | #define I2C_HW_B_CX231XX 0x010024 /* Conexant CX231XX USB based cards */ | ||
55 | #define I2C_HW_B_HDPVR 0x010025 /* Hauppauge HD PVR */ | ||
90 | 56 | ||
91 | /* --- SGI adapters */ | 57 | /* --- SGI adapters */ |
92 | #define I2C_HW_SGI_VINO 0x160000 | 58 | #define I2C_HW_SGI_VINO 0x160000 |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index c86c3b07604c..ad2580596033 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -274,7 +274,7 @@ struct i2c_board_info { | |||
274 | * are provided using conventional syntax. | 274 | * are provided using conventional syntax. |
275 | */ | 275 | */ |
276 | #define I2C_BOARD_INFO(dev_type, dev_addr) \ | 276 | #define I2C_BOARD_INFO(dev_type, dev_addr) \ |
277 | .type = (dev_type), .addr = (dev_addr) | 277 | .type = dev_type, .addr = (dev_addr) |
278 | 278 | ||
279 | 279 | ||
280 | /* Add-on boards should register/unregister their devices; e.g. a board | 280 | /* Add-on boards should register/unregister their devices; e.g. a board |
@@ -353,8 +353,8 @@ struct i2c_adapter { | |||
353 | void *algo_data; | 353 | void *algo_data; |
354 | 354 | ||
355 | /* --- administration stuff. */ | 355 | /* --- administration stuff. */ |
356 | int (*client_register)(struct i2c_client *); | 356 | int (*client_register)(struct i2c_client *) __deprecated; |
357 | int (*client_unregister)(struct i2c_client *); | 357 | int (*client_unregister)(struct i2c_client *) __deprecated; |
358 | 358 | ||
359 | /* data fields that are valid for all devices */ | 359 | /* data fields that are valid for all devices */ |
360 | u8 level; /* nesting level for lockdep */ | 360 | u8 level; /* nesting level for lockdep */ |
diff --git a/include/linux/i2c/at24.h b/include/linux/i2c/at24.h index f6edd522a929..8ace93024d60 100644 --- a/include/linux/i2c/at24.h +++ b/include/linux/i2c/at24.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _LINUX_AT24_H | 2 | #define _LINUX_AT24_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/memory.h> | ||
5 | 6 | ||
6 | /* | 7 | /* |
7 | * As seen through Linux I2C, differences between the most common types of I2C | 8 | * As seen through Linux I2C, differences between the most common types of I2C |
@@ -23,6 +24,9 @@ struct at24_platform_data { | |||
23 | #define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ | 24 | #define AT24_FLAG_READONLY 0x40 /* sysfs-entry will be read-only */ |
24 | #define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ | 25 | #define AT24_FLAG_IRUGO 0x20 /* sysfs-entry will be world-readable */ |
25 | #define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ | 26 | #define AT24_FLAG_TAKE8ADDR 0x10 /* take always 8 addresses (24c00) */ |
27 | |||
28 | void (*setup)(struct memory_accessor *, void *context); | ||
29 | void *context; | ||
26 | }; | 30 | }; |
27 | 31 | ||
28 | #endif /* _LINUX_AT24_H */ | 32 | #endif /* _LINUX_AT24_H */ |
diff --git a/include/linux/i2c/s6000.h b/include/linux/i2c/s6000.h new file mode 100644 index 000000000000..d9b34bfdae76 --- /dev/null +++ b/include/linux/i2c/s6000.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef __LINUX_I2C_S6000_H | ||
2 | #define __LINUX_I2C_S6000_H | ||
3 | |||
4 | struct s6_i2c_platform_data { | ||
5 | const char *clock; /* the clock to use */ | ||
6 | int bus_num; /* the bus number to register */ | ||
7 | }; | ||
8 | |||
9 | #endif | ||
10 | |||
diff --git a/include/linux/i2c/twl4030.h b/include/linux/i2c/twl4030.h index 8137f660a5cc..0dc80ef24975 100644 --- a/include/linux/i2c/twl4030.h +++ b/include/linux/i2c/twl4030.h | |||
@@ -218,6 +218,53 @@ int twl4030_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); | |||
218 | 218 | ||
219 | /*----------------------------------------------------------------------*/ | 219 | /*----------------------------------------------------------------------*/ |
220 | 220 | ||
221 | /* Power bus message definitions */ | ||
222 | |||
223 | #define DEV_GRP_NULL 0x0 | ||
224 | #define DEV_GRP_P1 0x1 | ||
225 | #define DEV_GRP_P2 0x2 | ||
226 | #define DEV_GRP_P3 0x4 | ||
227 | |||
228 | #define RES_GRP_RES 0x0 | ||
229 | #define RES_GRP_PP 0x1 | ||
230 | #define RES_GRP_RC 0x2 | ||
231 | #define RES_GRP_PP_RC 0x3 | ||
232 | #define RES_GRP_PR 0x4 | ||
233 | #define RES_GRP_PP_PR 0x5 | ||
234 | #define RES_GRP_RC_PR 0x6 | ||
235 | #define RES_GRP_ALL 0x7 | ||
236 | |||
237 | #define RES_TYPE2_R0 0x0 | ||
238 | |||
239 | #define RES_TYPE_ALL 0x7 | ||
240 | |||
241 | #define RES_STATE_WRST 0xF | ||
242 | #define RES_STATE_ACTIVE 0xE | ||
243 | #define RES_STATE_SLEEP 0x8 | ||
244 | #define RES_STATE_OFF 0x0 | ||
245 | |||
246 | /* | ||
247 | * Power Bus Message Format ... these can be sent individually by Linux, | ||
248 | * but are usually part of downloaded scripts that are run when various | ||
249 | * power events are triggered. | ||
250 | * | ||
251 | * Broadcast Message (16 Bits): | ||
252 | * DEV_GRP[15:13] MT[12] RES_GRP[11:9] RES_TYPE2[8:7] RES_TYPE[6:4] | ||
253 | * RES_STATE[3:0] | ||
254 | * | ||
255 | * Singular Message (16 Bits): | ||
256 | * DEV_GRP[15:13] MT[12] RES_ID[11:4] RES_STATE[3:0] | ||
257 | */ | ||
258 | |||
259 | #define MSG_BROADCAST(devgrp, grp, type, type2, state) \ | ||
260 | ( (devgrp) << 13 | 1 << 12 | (grp) << 9 | (type2) << 7 \ | ||
261 | | (type) << 4 | (state)) | ||
262 | |||
263 | #define MSG_SINGULAR(devgrp, id, state) \ | ||
264 | ((devgrp) << 13 | 0 << 12 | (id) << 4 | (state)) | ||
265 | |||
266 | /*----------------------------------------------------------------------*/ | ||
267 | |||
221 | struct twl4030_bci_platform_data { | 268 | struct twl4030_bci_platform_data { |
222 | int *battery_tmp_tbl; | 269 | int *battery_tmp_tbl; |
223 | unsigned int tblsize; | 270 | unsigned int tblsize; |
diff --git a/include/linux/ide.h b/include/linux/ide.h index 854eba8b2ba3..ff65fffb078f 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -40,6 +40,13 @@ | |||
40 | #define ERROR_RESET 3 /* Reset controller every 4th retry */ | 40 | #define ERROR_RESET 3 /* Reset controller every 4th retry */ |
41 | #define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ | 41 | #define ERROR_RECAL 1 /* Recalibrate every 2nd retry */ |
42 | 42 | ||
43 | /* Error codes returned in rq->errors to the higher part of the driver. */ | ||
44 | enum { | ||
45 | IDE_DRV_ERROR_GENERAL = 101, | ||
46 | IDE_DRV_ERROR_FILEMARK = 102, | ||
47 | IDE_DRV_ERROR_EOD = 103, | ||
48 | }; | ||
49 | |||
43 | /* | 50 | /* |
44 | * Definitions for accessing IDE controller registers | 51 | * Definitions for accessing IDE controller registers |
45 | */ | 52 | */ |
@@ -193,26 +200,8 @@ static inline void ide_std_init_ports(hw_regs_t *hw, | |||
193 | hw->io_ports.ctl_addr = ctl_addr; | 200 | hw->io_ports.ctl_addr = ctl_addr; |
194 | } | 201 | } |
195 | 202 | ||
196 | #if defined(CONFIG_ARM) || defined(CONFIG_M68K) || defined(CONFIG_MIPS) || \ | ||
197 | defined(CONFIG_PARISC) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) | ||
198 | #include <asm/ide.h> | ||
199 | #else | ||
200 | #include <asm-generic/ide_iops.h> | ||
201 | #endif | ||
202 | |||
203 | #define MAX_HWIFS 10 | 203 | #define MAX_HWIFS 10 |
204 | 204 | ||
205 | /* Currently only m68k, apus and m8xx need it */ | ||
206 | #ifndef IDE_ARCH_ACK_INTR | ||
207 | # define ide_ack_intr(hwif) (1) | ||
208 | #endif | ||
209 | |||
210 | /* Currently only Atari needs it */ | ||
211 | #ifndef IDE_ARCH_LOCK | ||
212 | # define ide_release_lock() do {} while (0) | ||
213 | # define ide_get_lock(hdlr, data) do {} while (0) | ||
214 | #endif /* IDE_ARCH_LOCK */ | ||
215 | |||
216 | /* | 205 | /* |
217 | * Now for the data we need to maintain per-drive: ide_drive_t | 206 | * Now for the data we need to maintain per-drive: ide_drive_t |
218 | */ | 207 | */ |
@@ -251,108 +240,92 @@ typedef enum { | |||
251 | } ide_startstop_t; | 240 | } ide_startstop_t; |
252 | 241 | ||
253 | enum { | 242 | enum { |
243 | IDE_VALID_ERROR = (1 << 1), | ||
244 | IDE_VALID_FEATURE = IDE_VALID_ERROR, | ||
245 | IDE_VALID_NSECT = (1 << 2), | ||
246 | IDE_VALID_LBAL = (1 << 3), | ||
247 | IDE_VALID_LBAM = (1 << 4), | ||
248 | IDE_VALID_LBAH = (1 << 5), | ||
249 | IDE_VALID_DEVICE = (1 << 6), | ||
250 | IDE_VALID_LBA = IDE_VALID_LBAL | | ||
251 | IDE_VALID_LBAM | | ||
252 | IDE_VALID_LBAH, | ||
253 | IDE_VALID_OUT_TF = IDE_VALID_FEATURE | | ||
254 | IDE_VALID_NSECT | | ||
255 | IDE_VALID_LBA, | ||
256 | IDE_VALID_IN_TF = IDE_VALID_NSECT | | ||
257 | IDE_VALID_LBA, | ||
258 | IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF, | ||
259 | IDE_VALID_IN_HOB = IDE_VALID_ERROR | | ||
260 | IDE_VALID_NSECT | | ||
261 | IDE_VALID_LBA, | ||
262 | }; | ||
263 | |||
264 | enum { | ||
254 | IDE_TFLAG_LBA48 = (1 << 0), | 265 | IDE_TFLAG_LBA48 = (1 << 0), |
255 | IDE_TFLAG_FLAGGED = (1 << 2), | 266 | IDE_TFLAG_WRITE = (1 << 1), |
256 | IDE_TFLAG_OUT_DATA = (1 << 3), | 267 | IDE_TFLAG_CUSTOM_HANDLER = (1 << 2), |
257 | IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4), | 268 | IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3), |
258 | IDE_TFLAG_OUT_HOB_NSECT = (1 << 5), | ||
259 | IDE_TFLAG_OUT_HOB_LBAL = (1 << 6), | ||
260 | IDE_TFLAG_OUT_HOB_LBAM = (1 << 7), | ||
261 | IDE_TFLAG_OUT_HOB_LBAH = (1 << 8), | ||
262 | IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE | | ||
263 | IDE_TFLAG_OUT_HOB_NSECT | | ||
264 | IDE_TFLAG_OUT_HOB_LBAL | | ||
265 | IDE_TFLAG_OUT_HOB_LBAM | | ||
266 | IDE_TFLAG_OUT_HOB_LBAH, | ||
267 | IDE_TFLAG_OUT_FEATURE = (1 << 9), | ||
268 | IDE_TFLAG_OUT_NSECT = (1 << 10), | ||
269 | IDE_TFLAG_OUT_LBAL = (1 << 11), | ||
270 | IDE_TFLAG_OUT_LBAM = (1 << 12), | ||
271 | IDE_TFLAG_OUT_LBAH = (1 << 13), | ||
272 | IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE | | ||
273 | IDE_TFLAG_OUT_NSECT | | ||
274 | IDE_TFLAG_OUT_LBAL | | ||
275 | IDE_TFLAG_OUT_LBAM | | ||
276 | IDE_TFLAG_OUT_LBAH, | ||
277 | IDE_TFLAG_OUT_DEVICE = (1 << 14), | ||
278 | IDE_TFLAG_WRITE = (1 << 15), | ||
279 | IDE_TFLAG_FLAGGED_SET_IN_FLAGS = (1 << 16), | ||
280 | IDE_TFLAG_IN_DATA = (1 << 17), | ||
281 | IDE_TFLAG_CUSTOM_HANDLER = (1 << 18), | ||
282 | IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 19), | ||
283 | IDE_TFLAG_IN_HOB_FEATURE = (1 << 20), | ||
284 | IDE_TFLAG_IN_HOB_NSECT = (1 << 21), | ||
285 | IDE_TFLAG_IN_HOB_LBAL = (1 << 22), | ||
286 | IDE_TFLAG_IN_HOB_LBAM = (1 << 23), | ||
287 | IDE_TFLAG_IN_HOB_LBAH = (1 << 24), | ||
288 | IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL | | ||
289 | IDE_TFLAG_IN_HOB_LBAM | | ||
290 | IDE_TFLAG_IN_HOB_LBAH, | ||
291 | IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | | ||
292 | IDE_TFLAG_IN_HOB_NSECT | | ||
293 | IDE_TFLAG_IN_HOB_LBA, | ||
294 | IDE_TFLAG_IN_FEATURE = (1 << 1), | ||
295 | IDE_TFLAG_IN_NSECT = (1 << 25), | ||
296 | IDE_TFLAG_IN_LBAL = (1 << 26), | ||
297 | IDE_TFLAG_IN_LBAM = (1 << 27), | ||
298 | IDE_TFLAG_IN_LBAH = (1 << 28), | ||
299 | IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL | | ||
300 | IDE_TFLAG_IN_LBAM | | ||
301 | IDE_TFLAG_IN_LBAH, | ||
302 | IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT | | ||
303 | IDE_TFLAG_IN_LBA, | ||
304 | IDE_TFLAG_IN_DEVICE = (1 << 29), | ||
305 | IDE_TFLAG_HOB = IDE_TFLAG_OUT_HOB | | ||
306 | IDE_TFLAG_IN_HOB, | ||
307 | IDE_TFLAG_TF = IDE_TFLAG_OUT_TF | | ||
308 | IDE_TFLAG_IN_TF, | ||
309 | IDE_TFLAG_DEVICE = IDE_TFLAG_OUT_DEVICE | | ||
310 | IDE_TFLAG_IN_DEVICE, | ||
311 | /* force 16-bit I/O operations */ | 269 | /* force 16-bit I/O operations */ |
312 | IDE_TFLAG_IO_16BIT = (1 << 30), | 270 | IDE_TFLAG_IO_16BIT = (1 << 4), |
313 | /* ide_task_t was allocated using kmalloc() */ | 271 | /* struct ide_cmd was allocated using kmalloc() */ |
314 | IDE_TFLAG_DYN = (1 << 31), | 272 | IDE_TFLAG_DYN = (1 << 5), |
273 | IDE_TFLAG_FS = (1 << 6), | ||
274 | IDE_TFLAG_MULTI_PIO = (1 << 7), | ||
315 | }; | 275 | }; |
316 | 276 | ||
317 | struct ide_taskfile { | 277 | enum { |
318 | u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */ | 278 | IDE_FTFLAG_FLAGGED = (1 << 0), |
279 | IDE_FTFLAG_SET_IN_FLAGS = (1 << 1), | ||
280 | IDE_FTFLAG_OUT_DATA = (1 << 2), | ||
281 | IDE_FTFLAG_IN_DATA = (1 << 3), | ||
282 | }; | ||
319 | 283 | ||
320 | u8 hob_feature; /* 1-5: additional data to support LBA48 */ | 284 | struct ide_taskfile { |
321 | u8 hob_nsect; | 285 | u8 data; /* 0: data byte (for TASKFILE ioctl) */ |
322 | u8 hob_lbal; | 286 | union { /* 1: */ |
323 | u8 hob_lbam; | 287 | u8 error; /* read: error */ |
324 | u8 hob_lbah; | 288 | u8 feature; /* write: feature */ |
289 | }; | ||
290 | u8 nsect; /* 2: number of sectors */ | ||
291 | u8 lbal; /* 3: LBA low */ | ||
292 | u8 lbam; /* 4: LBA mid */ | ||
293 | u8 lbah; /* 5: LBA high */ | ||
294 | u8 device; /* 6: device select */ | ||
295 | union { /* 7: */ | ||
296 | u8 status; /* read: status */ | ||
297 | u8 command; /* write: command */ | ||
298 | }; | ||
299 | }; | ||
325 | 300 | ||
326 | u8 data; /* 6: low data byte (for TASKFILE IOCTL) */ | 301 | struct ide_cmd { |
302 | struct ide_taskfile tf; | ||
303 | struct ide_taskfile hob; | ||
304 | struct { | ||
305 | struct { | ||
306 | u8 tf; | ||
307 | u8 hob; | ||
308 | } out, in; | ||
309 | } valid; | ||
327 | 310 | ||
328 | union { /* Â 7: */ | 311 | u8 tf_flags; |
329 | u8 error; /* read: error */ | 312 | u8 ftf_flags; /* for TASKFILE ioctl */ |
330 | u8 feature; /* write: feature */ | 313 | int protocol; |
331 | }; | ||
332 | 314 | ||
333 | u8 nsect; /* 8: number of sectors */ | 315 | int sg_nents; /* number of sg entries */ |
334 | u8 lbal; /* 9: LBA low */ | 316 | int orig_sg_nents; |
335 | u8 lbam; /* 10: LBA mid */ | 317 | int sg_dma_direction; /* DMA transfer direction */ |
336 | u8 lbah; /* 11: LBA high */ | ||
337 | 318 | ||
338 | u8 device; /* 12: device select */ | 319 | unsigned int nbytes; |
320 | unsigned int nleft; | ||
321 | unsigned int last_xfer_len; | ||
339 | 322 | ||
340 | union { /* 13: */ | 323 | struct scatterlist *cursg; |
341 | u8 status; /*  read: status  */ | 324 | unsigned int cursg_ofs; |
342 | u8 command; /* write: command */ | ||
343 | }; | ||
344 | }; | ||
345 | 325 | ||
346 | typedef struct ide_task_s { | ||
347 | union { | ||
348 | struct ide_taskfile tf; | ||
349 | u8 tf_array[14]; | ||
350 | }; | ||
351 | u32 tf_flags; | ||
352 | int data_phase; | ||
353 | struct request *rq; /* copy of request */ | 326 | struct request *rq; /* copy of request */ |
354 | void *special; /* valid_t generally */ | 327 | void *special; /* valid_t generally */ |
355 | } ide_task_t; | 328 | }; |
356 | 329 | ||
357 | /* ATAPI packet command flags */ | 330 | /* ATAPI packet command flags */ |
358 | enum { | 331 | enum { |
@@ -364,15 +337,13 @@ enum { | |||
364 | PC_FLAG_DMA_IN_PROGRESS = (1 << 4), | 337 | PC_FLAG_DMA_IN_PROGRESS = (1 << 4), |
365 | PC_FLAG_DMA_ERROR = (1 << 5), | 338 | PC_FLAG_DMA_ERROR = (1 << 5), |
366 | PC_FLAG_WRITING = (1 << 6), | 339 | PC_FLAG_WRITING = (1 << 6), |
367 | /* command timed out */ | ||
368 | PC_FLAG_TIMEDOUT = (1 << 7), | ||
369 | }; | 340 | }; |
370 | 341 | ||
371 | /* | 342 | /* |
372 | * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes. | 343 | * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes. |
373 | * This is used for several packet commands (not for READ/WRITE commands). | 344 | * This is used for several packet commands (not for READ/WRITE commands). |
374 | */ | 345 | */ |
375 | #define IDE_PC_BUFFER_SIZE 256 | 346 | #define IDE_PC_BUFFER_SIZE 64 |
376 | #define ATAPI_WAIT_PC (60 * HZ) | 347 | #define ATAPI_WAIT_PC (60 * HZ) |
377 | 348 | ||
378 | struct ide_atapi_pc { | 349 | struct ide_atapi_pc { |
@@ -410,9 +381,6 @@ struct ide_atapi_pc { | |||
410 | struct idetape_bh *bh; | 381 | struct idetape_bh *bh; |
411 | char *b_data; | 382 | char *b_data; |
412 | 383 | ||
413 | struct scatterlist *sg; | ||
414 | unsigned int sg_cnt; | ||
415 | |||
416 | unsigned long timeout; | 384 | unsigned long timeout; |
417 | }; | 385 | }; |
418 | 386 | ||
@@ -436,7 +404,6 @@ struct ide_disk_ops { | |||
436 | int); | 404 | int); |
437 | ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *, | 405 | ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *, |
438 | sector_t); | 406 | sector_t); |
439 | int (*end_request)(struct ide_drive_s *, int, int); | ||
440 | int (*ioctl)(struct ide_drive_s *, struct block_device *, | 407 | int (*ioctl)(struct ide_drive_s *, struct block_device *, |
441 | fmode_t, unsigned int, unsigned long); | 408 | fmode_t, unsigned int, unsigned long); |
442 | }; | 409 | }; |
@@ -454,11 +421,6 @@ enum { | |||
454 | IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), | 421 | IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3), |
455 | /* TOC track numbers are in BCD. */ | 422 | /* TOC track numbers are in BCD. */ |
456 | IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), | 423 | IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4), |
457 | /* | ||
458 | * Drive does not provide data in multiples of SECTOR_SIZE | ||
459 | * when more than one interrupt is needed. | ||
460 | */ | ||
461 | IDE_AFLAG_LIMIT_NFRAMES = (1 << 5), | ||
462 | /* Saved TOC information is current. */ | 424 | /* Saved TOC information is current. */ |
463 | IDE_AFLAG_TOC_VALID = (1 << 6), | 425 | IDE_AFLAG_TOC_VALID = (1 << 6), |
464 | /* We think that the drive door is locked. */ | 426 | /* We think that the drive door is locked. */ |
@@ -512,8 +474,6 @@ enum { | |||
512 | IDE_DFLAG_NICE1 = (1 << 5), | 474 | IDE_DFLAG_NICE1 = (1 << 5), |
513 | /* device is physically present */ | 475 | /* device is physically present */ |
514 | IDE_DFLAG_PRESENT = (1 << 6), | 476 | IDE_DFLAG_PRESENT = (1 << 6), |
515 | /* device ejected hint */ | ||
516 | IDE_DFLAG_DEAD = (1 << 7), | ||
517 | /* id read from device (synthetic if not set) */ | 477 | /* id read from device (synthetic if not set) */ |
518 | IDE_DFLAG_ID_READ = (1 << 8), | 478 | IDE_DFLAG_ID_READ = (1 << 8), |
519 | IDE_DFLAG_NOPROBE = (1 << 9), | 479 | IDE_DFLAG_NOPROBE = (1 << 9), |
@@ -605,7 +565,7 @@ struct ide_drive_s { | |||
605 | 565 | ||
606 | unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */ | 566 | unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */ |
607 | unsigned int cyl; /* "real" number of cyls */ | 567 | unsigned int cyl; /* "real" number of cyls */ |
608 | unsigned int drive_data; /* used by set_pio_mode/selectproc */ | 568 | unsigned int drive_data; /* used by set_pio_mode/dev_select() */ |
609 | unsigned int failures; /* current failure count */ | 569 | unsigned int failures; /* current failure count */ |
610 | unsigned int max_failures; /* maximum allowed failure count */ | 570 | unsigned int max_failures; /* maximum allowed failure count */ |
611 | u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */ | 571 | u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */ |
@@ -627,8 +587,11 @@ struct ide_drive_s { | |||
627 | /* current packet command */ | 587 | /* current packet command */ |
628 | struct ide_atapi_pc *pc; | 588 | struct ide_atapi_pc *pc; |
629 | 589 | ||
590 | /* last failed packet command */ | ||
591 | struct ide_atapi_pc *failed_pc; | ||
592 | |||
630 | /* callback for packet commands */ | 593 | /* callback for packet commands */ |
631 | void (*pc_callback)(struct ide_drive_s *, int); | 594 | int (*pc_callback)(struct ide_drive_s *, int); |
632 | 595 | ||
633 | void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *); | 596 | void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *); |
634 | int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *, | 597 | int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *, |
@@ -658,16 +621,16 @@ struct ide_tp_ops { | |||
658 | void (*exec_command)(struct hwif_s *, u8); | 621 | void (*exec_command)(struct hwif_s *, u8); |
659 | u8 (*read_status)(struct hwif_s *); | 622 | u8 (*read_status)(struct hwif_s *); |
660 | u8 (*read_altstatus)(struct hwif_s *); | 623 | u8 (*read_altstatus)(struct hwif_s *); |
624 | void (*write_devctl)(struct hwif_s *, u8); | ||
661 | 625 | ||
662 | void (*set_irq)(struct hwif_s *, int); | 626 | void (*dev_select)(ide_drive_t *); |
627 | void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8); | ||
628 | void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8); | ||
663 | 629 | ||
664 | void (*tf_load)(ide_drive_t *, struct ide_task_s *); | 630 | void (*input_data)(ide_drive_t *, struct ide_cmd *, |
665 | void (*tf_read)(ide_drive_t *, struct ide_task_s *); | 631 | void *, unsigned int); |
666 | 632 | void (*output_data)(ide_drive_t *, struct ide_cmd *, | |
667 | void (*input_data)(ide_drive_t *, struct request *, void *, | 633 | void *, unsigned int); |
668 | unsigned int); | ||
669 | void (*output_data)(ide_drive_t *, struct request *, void *, | ||
670 | unsigned int); | ||
671 | }; | 634 | }; |
672 | 635 | ||
673 | extern const struct ide_tp_ops default_tp_ops; | 636 | extern const struct ide_tp_ops default_tp_ops; |
@@ -678,7 +641,6 @@ extern const struct ide_tp_ops default_tp_ops; | |||
678 | * @init_dev: host specific initialization of a device | 641 | * @init_dev: host specific initialization of a device |
679 | * @set_pio_mode: routine to program host for PIO mode | 642 | * @set_pio_mode: routine to program host for PIO mode |
680 | * @set_dma_mode: routine to program host for DMA mode | 643 | * @set_dma_mode: routine to program host for DMA mode |
681 | * @selectproc: tweaks hardware to select drive | ||
682 | * @reset_poll: chipset polling based on hba specifics | 644 | * @reset_poll: chipset polling based on hba specifics |
683 | * @pre_reset: chipset specific changes to default for device-hba resets | 645 | * @pre_reset: chipset specific changes to default for device-hba resets |
684 | * @resetproc: routine to reset controller after a disk reset | 646 | * @resetproc: routine to reset controller after a disk reset |
@@ -695,7 +657,6 @@ struct ide_port_ops { | |||
695 | void (*init_dev)(ide_drive_t *); | 657 | void (*init_dev)(ide_drive_t *); |
696 | void (*set_pio_mode)(ide_drive_t *, const u8); | 658 | void (*set_pio_mode)(ide_drive_t *, const u8); |
697 | void (*set_dma_mode)(ide_drive_t *, const u8); | 659 | void (*set_dma_mode)(ide_drive_t *, const u8); |
698 | void (*selectproc)(ide_drive_t *); | ||
699 | int (*reset_poll)(ide_drive_t *); | 660 | int (*reset_poll)(ide_drive_t *); |
700 | void (*pre_reset)(ide_drive_t *); | 661 | void (*pre_reset)(ide_drive_t *); |
701 | void (*resetproc)(ide_drive_t *); | 662 | void (*resetproc)(ide_drive_t *); |
@@ -711,13 +672,15 @@ struct ide_port_ops { | |||
711 | 672 | ||
712 | struct ide_dma_ops { | 673 | struct ide_dma_ops { |
713 | void (*dma_host_set)(struct ide_drive_s *, int); | 674 | void (*dma_host_set)(struct ide_drive_s *, int); |
714 | int (*dma_setup)(struct ide_drive_s *); | 675 | int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *); |
715 | void (*dma_exec_cmd)(struct ide_drive_s *, u8); | ||
716 | void (*dma_start)(struct ide_drive_s *); | 676 | void (*dma_start)(struct ide_drive_s *); |
717 | int (*dma_end)(struct ide_drive_s *); | 677 | int (*dma_end)(struct ide_drive_s *); |
718 | int (*dma_test_irq)(struct ide_drive_s *); | 678 | int (*dma_test_irq)(struct ide_drive_s *); |
719 | void (*dma_lost_irq)(struct ide_drive_s *); | 679 | void (*dma_lost_irq)(struct ide_drive_s *); |
720 | void (*dma_timeout)(struct ide_drive_s *); | 680 | /* below ones are optional */ |
681 | int (*dma_check)(struct ide_drive_s *, struct ide_cmd *); | ||
682 | int (*dma_timer_expiry)(struct ide_drive_s *); | ||
683 | void (*dma_clear)(struct ide_drive_s *); | ||
721 | /* | 684 | /* |
722 | * The following method is optional and only required to be | 685 | * The following method is optional and only required to be |
723 | * implemented for the SFF-8038i compatible controllers. | 686 | * implemented for the SFF-8038i compatible controllers. |
@@ -780,19 +743,8 @@ typedef struct hwif_s { | |||
780 | /* Scatter-gather list used to build the above */ | 743 | /* Scatter-gather list used to build the above */ |
781 | struct scatterlist *sg_table; | 744 | struct scatterlist *sg_table; |
782 | int sg_max_nents; /* Maximum number of entries in it */ | 745 | int sg_max_nents; /* Maximum number of entries in it */ |
783 | int sg_nents; /* Current number of entries in it */ | ||
784 | int orig_sg_nents; | ||
785 | int sg_dma_direction; /* dma transfer direction */ | ||
786 | |||
787 | /* data phase of the active command (currently only valid for PIO/DMA) */ | ||
788 | int data_phase; | ||
789 | |||
790 | struct ide_task_s task; /* current command */ | ||
791 | 746 | ||
792 | unsigned int nsect; | 747 | struct ide_cmd cmd; /* current command */ |
793 | unsigned int nleft; | ||
794 | struct scatterlist *cursg; | ||
795 | unsigned int cursg_ofs; | ||
796 | 748 | ||
797 | int rqsize; /* max sectors per request */ | 749 | int rqsize; /* max sectors per request */ |
798 | int irq; /* our irq number */ | 750 | int irq; /* our irq number */ |
@@ -850,9 +802,18 @@ struct ide_host { | |||
850 | ide_hwif_t *ports[MAX_HOST_PORTS + 1]; | 802 | ide_hwif_t *ports[MAX_HOST_PORTS + 1]; |
851 | unsigned int n_ports; | 803 | unsigned int n_ports; |
852 | struct device *dev[2]; | 804 | struct device *dev[2]; |
805 | |||
853 | int (*init_chipset)(struct pci_dev *); | 806 | int (*init_chipset)(struct pci_dev *); |
807 | |||
808 | void (*get_lock)(irq_handler_t, void *); | ||
809 | void (*release_lock)(void); | ||
810 | |||
854 | irq_handler_t irq_handler; | 811 | irq_handler_t irq_handler; |
812 | |||
855 | unsigned long host_flags; | 813 | unsigned long host_flags; |
814 | |||
815 | int irq_flags; | ||
816 | |||
856 | void *host_priv; | 817 | void *host_priv; |
857 | ide_hwif_t *cur_port; /* for hosts requiring serialization */ | 818 | ide_hwif_t *cur_port; /* for hosts requiring serialization */ |
858 | 819 | ||
@@ -869,7 +830,7 @@ typedef ide_startstop_t (ide_handler_t)(ide_drive_t *); | |||
869 | typedef int (ide_expiry_t)(ide_drive_t *); | 830 | typedef int (ide_expiry_t)(ide_drive_t *); |
870 | 831 | ||
871 | /* used by ide-cd, ide-floppy, etc. */ | 832 | /* used by ide-cd, ide-floppy, etc. */ |
872 | typedef void (xfer_func_t)(ide_drive_t *, struct request *rq, void *, unsigned); | 833 | typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned); |
873 | 834 | ||
874 | extern struct mutex ide_setting_mtx; | 835 | extern struct mutex ide_setting_mtx; |
875 | 836 | ||
@@ -1045,10 +1006,11 @@ enum { | |||
1045 | }; | 1006 | }; |
1046 | 1007 | ||
1047 | /* DRV_NAME has to be defined in the driver before using the macro below */ | 1008 | /* DRV_NAME has to be defined in the driver before using the macro below */ |
1048 | #define __ide_debug_log(lvl, fmt, args...) \ | 1009 | #define __ide_debug_log(lvl, fmt, args...) \ |
1049 | { \ | 1010 | { \ |
1050 | if (unlikely(drive->debug_mask & lvl)) \ | 1011 | if (unlikely(drive->debug_mask & lvl)) \ |
1051 | printk(KERN_INFO DRV_NAME ": " fmt, ## args); \ | 1012 | printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \ |
1013 | __func__, ## args); \ | ||
1052 | } | 1014 | } |
1053 | 1015 | ||
1054 | /* | 1016 | /* |
@@ -1087,7 +1049,7 @@ int generic_ide_resume(struct device *); | |||
1087 | 1049 | ||
1088 | void ide_complete_power_step(ide_drive_t *, struct request *); | 1050 | void ide_complete_power_step(ide_drive_t *, struct request *); |
1089 | ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *); | 1051 | ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *); |
1090 | void ide_complete_pm_request(ide_drive_t *, struct request *); | 1052 | void ide_complete_pm_rq(ide_drive_t *, struct request *); |
1091 | void ide_check_pm_state(ide_drive_t *, struct request *); | 1053 | void ide_check_pm_state(ide_drive_t *, struct request *); |
1092 | 1054 | ||
1093 | /* | 1055 | /* |
@@ -1099,7 +1061,6 @@ void ide_check_pm_state(ide_drive_t *, struct request *); | |||
1099 | struct ide_driver { | 1061 | struct ide_driver { |
1100 | const char *version; | 1062 | const char *version; |
1101 | ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); | 1063 | ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t); |
1102 | int (*end_request)(ide_drive_t *, int, int); | ||
1103 | struct device_driver gen_driver; | 1064 | struct device_driver gen_driver; |
1104 | int (*probe)(ide_drive_t *); | 1065 | int (*probe)(ide_drive_t *); |
1105 | void (*remove)(ide_drive_t *); | 1066 | void (*remove)(ide_drive_t *); |
@@ -1130,19 +1091,15 @@ int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned l | |||
1130 | extern int ide_vlb_clk; | 1091 | extern int ide_vlb_clk; |
1131 | extern int ide_pci_clk; | 1092 | extern int ide_pci_clk; |
1132 | 1093 | ||
1133 | int ide_end_request(ide_drive_t *, int, int); | 1094 | unsigned int ide_rq_bytes(struct request *); |
1134 | int ide_end_dequeued_request(ide_drive_t *, struct request *, int, int); | 1095 | int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int); |
1135 | void ide_kill_rq(ide_drive_t *, struct request *); | 1096 | void ide_kill_rq(ide_drive_t *, struct request *); |
1136 | 1097 | ||
1137 | void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int, | 1098 | void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); |
1138 | ide_expiry_t *); | 1099 | void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int); |
1139 | void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int, | ||
1140 | ide_expiry_t *); | ||
1141 | 1100 | ||
1142 | void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int, | 1101 | void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *, |
1143 | ide_expiry_t *); | 1102 | unsigned int); |
1144 | |||
1145 | void ide_execute_pkt_cmd(ide_drive_t *); | ||
1146 | 1103 | ||
1147 | void ide_pad_transfer(ide_drive_t *, int, int); | 1104 | void ide_pad_transfer(ide_drive_t *, int, int); |
1148 | 1105 | ||
@@ -1164,25 +1121,24 @@ extern ide_startstop_t ide_do_reset (ide_drive_t *); | |||
1164 | extern int ide_devset_execute(ide_drive_t *drive, | 1121 | extern int ide_devset_execute(ide_drive_t *drive, |
1165 | const struct ide_devset *setting, int arg); | 1122 | const struct ide_devset *setting, int arg); |
1166 | 1123 | ||
1167 | extern void ide_end_drive_cmd(ide_drive_t *, u8, u8); | 1124 | void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8); |
1125 | int ide_complete_rq(ide_drive_t *, int, unsigned int); | ||
1168 | 1126 | ||
1169 | void ide_tf_dump(const char *, struct ide_taskfile *); | 1127 | void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd); |
1128 | void ide_tf_dump(const char *, struct ide_cmd *); | ||
1170 | 1129 | ||
1171 | void ide_exec_command(ide_hwif_t *, u8); | 1130 | void ide_exec_command(ide_hwif_t *, u8); |
1172 | u8 ide_read_status(ide_hwif_t *); | 1131 | u8 ide_read_status(ide_hwif_t *); |
1173 | u8 ide_read_altstatus(ide_hwif_t *); | 1132 | u8 ide_read_altstatus(ide_hwif_t *); |
1133 | void ide_write_devctl(ide_hwif_t *, u8); | ||
1174 | 1134 | ||
1175 | void ide_set_irq(ide_hwif_t *, int); | 1135 | void ide_dev_select(ide_drive_t *); |
1176 | 1136 | void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8); | |
1177 | void ide_tf_load(ide_drive_t *, ide_task_t *); | 1137 | void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8); |
1178 | void ide_tf_read(ide_drive_t *, ide_task_t *); | ||
1179 | |||
1180 | void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int); | ||
1181 | void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int); | ||
1182 | 1138 | ||
1183 | int ide_io_buffers(ide_drive_t *, struct ide_atapi_pc *, unsigned int, int); | 1139 | void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); |
1140 | void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int); | ||
1184 | 1141 | ||
1185 | extern void SELECT_DRIVE(ide_drive_t *); | ||
1186 | void SELECT_MASK(ide_drive_t *, int); | 1142 | void SELECT_MASK(ide_drive_t *, int); |
1187 | 1143 | ||
1188 | u8 ide_read_error(ide_drive_t *); | 1144 | u8 ide_read_error(ide_drive_t *); |
@@ -1224,16 +1180,18 @@ int ide_cd_expiry(ide_drive_t *); | |||
1224 | 1180 | ||
1225 | int ide_cd_get_xferlen(struct request *); | 1181 | int ide_cd_get_xferlen(struct request *); |
1226 | 1182 | ||
1227 | ide_startstop_t ide_issue_pc(ide_drive_t *); | 1183 | ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *); |
1228 | 1184 | ||
1229 | ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); | 1185 | ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *); |
1230 | 1186 | ||
1231 | void task_end_request(ide_drive_t *, struct request *, u8); | 1187 | void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int); |
1232 | 1188 | ||
1233 | int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16); | 1189 | void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8); |
1234 | int ide_no_data_taskfile(ide_drive_t *, ide_task_t *); | ||
1235 | 1190 | ||
1236 | int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); | 1191 | int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16); |
1192 | int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *); | ||
1193 | |||
1194 | int ide_taskfile_ioctl(ide_drive_t *, unsigned long); | ||
1237 | 1195 | ||
1238 | int ide_dev_read_id(ide_drive_t *, u8, u16 *); | 1196 | int ide_dev_read_id(ide_drive_t *, u8, u16 *); |
1239 | 1197 | ||
@@ -1335,6 +1293,10 @@ enum { | |||
1335 | IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19), | 1293 | IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19), |
1336 | /* serialize ports */ | 1294 | /* serialize ports */ |
1337 | IDE_HFLAG_SERIALIZE = (1 << 20), | 1295 | IDE_HFLAG_SERIALIZE = (1 << 20), |
1296 | /* host is DTC2278 */ | ||
1297 | IDE_HFLAG_DTC2278 = (1 << 21), | ||
1298 | /* 4 devices on a single set of I/O ports */ | ||
1299 | IDE_HFLAG_4DRIVES = (1 << 22), | ||
1338 | /* host is TRM290 */ | 1300 | /* host is TRM290 */ |
1339 | IDE_HFLAG_TRM290 = (1 << 23), | 1301 | IDE_HFLAG_TRM290 = (1 << 23), |
1340 | /* use 32-bit I/O ops */ | 1302 | /* use 32-bit I/O ops */ |
@@ -1362,7 +1324,12 @@ enum { | |||
1362 | 1324 | ||
1363 | struct ide_port_info { | 1325 | struct ide_port_info { |
1364 | char *name; | 1326 | char *name; |
1327 | |||
1365 | int (*init_chipset)(struct pci_dev *); | 1328 | int (*init_chipset)(struct pci_dev *); |
1329 | |||
1330 | void (*get_lock)(irq_handler_t, void *); | ||
1331 | void (*release_lock)(void); | ||
1332 | |||
1366 | void (*init_iops)(ide_hwif_t *); | 1333 | void (*init_iops)(ide_hwif_t *); |
1367 | void (*init_hwif)(ide_hwif_t *); | 1334 | void (*init_hwif)(ide_hwif_t *); |
1368 | int (*init_dma)(ide_hwif_t *, | 1335 | int (*init_dma)(ide_hwif_t *, |
@@ -1379,6 +1346,9 @@ struct ide_port_info { | |||
1379 | u16 max_sectors; /* if < than the default one */ | 1346 | u16 max_sectors; /* if < than the default one */ |
1380 | 1347 | ||
1381 | u32 host_flags; | 1348 | u32 host_flags; |
1349 | |||
1350 | int irq_flags; | ||
1351 | |||
1382 | u8 pio_mask; | 1352 | u8 pio_mask; |
1383 | u8 swdma_mask; | 1353 | u8 swdma_mask; |
1384 | u8 mwdma_mask; | 1354 | u8 mwdma_mask; |
@@ -1398,8 +1368,8 @@ int ide_pci_resume(struct pci_dev *); | |||
1398 | #define ide_pci_resume NULL | 1368 | #define ide_pci_resume NULL |
1399 | #endif | 1369 | #endif |
1400 | 1370 | ||
1401 | void ide_map_sg(ide_drive_t *, struct request *); | 1371 | void ide_map_sg(ide_drive_t *, struct ide_cmd *); |
1402 | void ide_init_sg_cmd(ide_drive_t *, struct request *); | 1372 | void ide_init_sg_cmd(struct ide_cmd *, unsigned int); |
1403 | 1373 | ||
1404 | #define BAD_DMA_DRIVE 0 | 1374 | #define BAD_DMA_DRIVE 0 |
1405 | #define GOOD_DMA_DRIVE 1 | 1375 | #define GOOD_DMA_DRIVE 1 |
@@ -1433,18 +1403,18 @@ ide_startstop_t ide_dma_intr(ide_drive_t *); | |||
1433 | int ide_allocate_dma_engine(ide_hwif_t *); | 1403 | int ide_allocate_dma_engine(ide_hwif_t *); |
1434 | void ide_release_dma_engine(ide_hwif_t *); | 1404 | void ide_release_dma_engine(ide_hwif_t *); |
1435 | 1405 | ||
1436 | int ide_build_sglist(ide_drive_t *, struct request *); | 1406 | int ide_dma_prepare(ide_drive_t *, struct ide_cmd *); |
1437 | void ide_destroy_dmatable(ide_drive_t *); | 1407 | void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *); |
1438 | 1408 | ||
1439 | #ifdef CONFIG_BLK_DEV_IDEDMA_SFF | 1409 | #ifdef CONFIG_BLK_DEV_IDEDMA_SFF |
1440 | int config_drive_for_dma(ide_drive_t *); | 1410 | int config_drive_for_dma(ide_drive_t *); |
1441 | extern int ide_build_dmatable(ide_drive_t *, struct request *); | 1411 | int ide_build_dmatable(ide_drive_t *, struct ide_cmd *); |
1442 | void ide_dma_host_set(ide_drive_t *, int); | 1412 | void ide_dma_host_set(ide_drive_t *, int); |
1443 | extern int ide_dma_setup(ide_drive_t *); | 1413 | int ide_dma_setup(ide_drive_t *, struct ide_cmd *); |
1444 | void ide_dma_exec_cmd(ide_drive_t *, u8); | ||
1445 | extern void ide_dma_start(ide_drive_t *); | 1414 | extern void ide_dma_start(ide_drive_t *); |
1446 | int ide_dma_end(ide_drive_t *); | 1415 | int ide_dma_end(ide_drive_t *); |
1447 | int ide_dma_test_irq(ide_drive_t *); | 1416 | int ide_dma_test_irq(ide_drive_t *); |
1417 | int ide_dma_sff_timer_expiry(ide_drive_t *); | ||
1448 | u8 ide_dma_sff_read_status(ide_hwif_t *); | 1418 | u8 ide_dma_sff_read_status(ide_hwif_t *); |
1449 | extern const struct ide_dma_ops sff_dma_ops; | 1419 | extern const struct ide_dma_ops sff_dma_ops; |
1450 | #else | 1420 | #else |
@@ -1452,7 +1422,6 @@ static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; } | |||
1452 | #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ | 1422 | #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */ |
1453 | 1423 | ||
1454 | void ide_dma_lost_irq(ide_drive_t *); | 1424 | void ide_dma_lost_irq(ide_drive_t *); |
1455 | void ide_dma_timeout(ide_drive_t *); | ||
1456 | ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int); | 1425 | ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int); |
1457 | 1426 | ||
1458 | #else | 1427 | #else |
@@ -1465,8 +1434,13 @@ static inline void ide_dma_on(ide_drive_t *drive) { ; } | |||
1465 | static inline void ide_dma_verbose(ide_drive_t *drive) { ; } | 1434 | static inline void ide_dma_verbose(ide_drive_t *drive) { ; } |
1466 | static inline int ide_set_dma(ide_drive_t *drive) { return 1; } | 1435 | static inline int ide_set_dma(ide_drive_t *drive) { return 1; } |
1467 | static inline void ide_check_dma_crc(ide_drive_t *drive) { ; } | 1436 | static inline void ide_check_dma_crc(ide_drive_t *drive) { ; } |
1437 | static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; } | ||
1468 | static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; } | 1438 | static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; } |
1469 | static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; } | 1439 | static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; } |
1440 | static inline int ide_dma_prepare(ide_drive_t *drive, | ||
1441 | struct ide_cmd *cmd) { return 1; } | ||
1442 | static inline void ide_dma_unmap_sg(ide_drive_t *drive, | ||
1443 | struct ide_cmd *cmd) { ; } | ||
1470 | #endif /* CONFIG_BLK_DEV_IDEDMA */ | 1444 | #endif /* CONFIG_BLK_DEV_IDEDMA */ |
1471 | 1445 | ||
1472 | #ifdef CONFIG_BLK_DEV_IDEACPI | 1446 | #ifdef CONFIG_BLK_DEV_IDEACPI |
@@ -1518,7 +1492,7 @@ static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data) | |||
1518 | 1492 | ||
1519 | extern void ide_toggle_bounce(ide_drive_t *drive, int on); | 1493 | extern void ide_toggle_bounce(ide_drive_t *drive, int on); |
1520 | 1494 | ||
1521 | u64 ide_get_lba_addr(struct ide_taskfile *, int); | 1495 | u64 ide_get_lba_addr(struct ide_cmd *, int); |
1522 | u8 ide_dump_status(ide_drive_t *, const char *, u8); | 1496 | u8 ide_dump_status(ide_drive_t *, const char *, u8); |
1523 | 1497 | ||
1524 | struct ide_timing { | 1498 | struct ide_timing { |
diff --git a/include/linux/idr.h b/include/linux/idr.h index dd846df8cd32..e968db71e33a 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
@@ -106,6 +106,7 @@ int idr_get_new(struct idr *idp, void *ptr, int *id); | |||
106 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); | 106 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); |
107 | int idr_for_each(struct idr *idp, | 107 | int idr_for_each(struct idr *idp, |
108 | int (*fn)(int id, void *p, void *data), void *data); | 108 | int (*fn)(int id, void *p, void *data), void *data); |
109 | void *idr_get_next(struct idr *idp, int *nextid); | ||
109 | void *idr_replace(struct idr *idp, void *ptr, int id); | 110 | void *idr_replace(struct idr *idp, void *ptr, int id); |
110 | void idr_remove(struct idr *idp, int id); | 111 | void idr_remove(struct idr *idp, int id); |
111 | void idr_remove_all(struct idr *idp); | 112 | void idr_remove_all(struct idr *idp); |
diff --git a/include/linux/init.h b/include/linux/init.h index 68cb0265d009..0e06c176f185 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #define _LINUX_INIT_H | 2 | #define _LINUX_INIT_H |
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <linux/section-names.h> | ||
6 | #include <linux/stringify.h> | ||
5 | 7 | ||
6 | /* These macros are used to mark some functions or | 8 | /* These macros are used to mark some functions or |
7 | * initialized data (doesn't apply to uninitialized data) | 9 | * initialized data (doesn't apply to uninitialized data) |
@@ -60,14 +62,6 @@ | |||
60 | #define __refdata __section(.ref.data) | 62 | #define __refdata __section(.ref.data) |
61 | #define __refconst __section(.ref.rodata) | 63 | #define __refconst __section(.ref.rodata) |
62 | 64 | ||
63 | /* backward compatibility note | ||
64 | * A few places hardcode the old section names: | ||
65 | * .text.init.refok | ||
66 | * .data.init.refok | ||
67 | * .exit.text.refok | ||
68 | * They should be converted to use the defines from this file | ||
69 | */ | ||
70 | |||
71 | /* compatibility defines */ | 65 | /* compatibility defines */ |
72 | #define __init_refok __ref | 66 | #define __init_refok __ref |
73 | #define __initdata_refok __refdata | 67 | #define __initdata_refok __refdata |
@@ -107,7 +101,7 @@ | |||
107 | #define __memexitconst __section(.memexit.rodata) | 101 | #define __memexitconst __section(.memexit.rodata) |
108 | 102 | ||
109 | /* For assembly routines */ | 103 | /* For assembly routines */ |
110 | #define __HEAD .section ".head.text","ax" | 104 | #define __HEAD .section __stringify(HEAD_TEXT_SECTION),"ax" |
111 | #define __INIT .section ".init.text","ax" | 105 | #define __INIT .section ".init.text","ax" |
112 | #define __FINIT .previous | 106 | #define __FINIT .previous |
113 | 107 | ||
@@ -247,6 +241,7 @@ struct obs_kernel_param { | |||
247 | 241 | ||
248 | /* Relies on boot_command_line being set */ | 242 | /* Relies on boot_command_line being set */ |
249 | void __init parse_early_param(void); | 243 | void __init parse_early_param(void); |
244 | void __init parse_early_options(char *cmdline); | ||
250 | #endif /* __ASSEMBLY__ */ | 245 | #endif /* __ASSEMBLY__ */ |
251 | 246 | ||
252 | /** | 247 | /** |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index af1de95e711e..d87247d2641f 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/irqflags.h> | 5 | #include <linux/irqflags.h> |
6 | #include <linux/utsname.h> | 6 | #include <linux/utsname.h> |
7 | #include <linux/lockdep.h> | 7 | #include <linux/lockdep.h> |
8 | #include <linux/ftrace.h> | ||
8 | #include <linux/ipc.h> | 9 | #include <linux/ipc.h> |
9 | #include <linux/pid_namespace.h> | 10 | #include <linux/pid_namespace.h> |
10 | #include <linux/user_namespace.h> | 11 | #include <linux/user_namespace.h> |
@@ -14,19 +15,6 @@ | |||
14 | extern struct files_struct init_files; | 15 | extern struct files_struct init_files; |
15 | extern struct fs_struct init_fs; | 16 | extern struct fs_struct init_fs; |
16 | 17 | ||
17 | #define INIT_KIOCTX(name, which_mm) \ | ||
18 | { \ | ||
19 | .users = ATOMIC_INIT(1), \ | ||
20 | .dead = 0, \ | ||
21 | .mm = &which_mm, \ | ||
22 | .user_id = 0, \ | ||
23 | .next = NULL, \ | ||
24 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \ | ||
25 | .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \ | ||
26 | .reqs_active = 0U, \ | ||
27 | .max_reqs = ~0U, \ | ||
28 | } | ||
29 | |||
30 | #define INIT_MM(name) \ | 18 | #define INIT_MM(name) \ |
31 | { \ | 19 | { \ |
32 | .mm_rb = RB_ROOT, \ | 20 | .mm_rb = RB_ROOT, \ |
@@ -185,6 +173,7 @@ extern struct cred init_cred; | |||
185 | INIT_IDS \ | 173 | INIT_IDS \ |
186 | INIT_TRACE_IRQFLAGS \ | 174 | INIT_TRACE_IRQFLAGS \ |
187 | INIT_LOCKDEP \ | 175 | INIT_LOCKDEP \ |
176 | INIT_FTRACE_GRAPH \ | ||
188 | } | 177 | } |
189 | 178 | ||
190 | 179 | ||
diff --git a/include/linux/input.h b/include/linux/input.h index 6b28048fc568..0e6ff5de3588 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
@@ -106,6 +106,7 @@ struct input_absinfo { | |||
106 | 106 | ||
107 | #define SYN_REPORT 0 | 107 | #define SYN_REPORT 0 |
108 | #define SYN_CONFIG 1 | 108 | #define SYN_CONFIG 1 |
109 | #define SYN_MT_REPORT 2 | ||
109 | 110 | ||
110 | /* | 111 | /* |
111 | * Keys and buttons | 112 | * Keys and buttons |
@@ -445,6 +446,7 @@ struct input_absinfo { | |||
445 | #define BTN_STYLUS2 0x14c | 446 | #define BTN_STYLUS2 0x14c |
446 | #define BTN_TOOL_DOUBLETAP 0x14d | 447 | #define BTN_TOOL_DOUBLETAP 0x14d |
447 | #define BTN_TOOL_TRIPLETAP 0x14e | 448 | #define BTN_TOOL_TRIPLETAP 0x14e |
449 | #define BTN_TOOL_QUADTAP 0x14f /* Four fingers on trackpad */ | ||
448 | 450 | ||
449 | #define BTN_WHEEL 0x150 | 451 | #define BTN_WHEEL 0x150 |
450 | #define BTN_GEAR_DOWN 0x150 | 452 | #define BTN_GEAR_DOWN 0x150 |
@@ -644,6 +646,17 @@ struct input_absinfo { | |||
644 | #define ABS_TOOL_WIDTH 0x1c | 646 | #define ABS_TOOL_WIDTH 0x1c |
645 | #define ABS_VOLUME 0x20 | 647 | #define ABS_VOLUME 0x20 |
646 | #define ABS_MISC 0x28 | 648 | #define ABS_MISC 0x28 |
649 | |||
650 | #define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */ | ||
651 | #define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */ | ||
652 | #define ABS_MT_WIDTH_MAJOR 0x32 /* Major axis of approaching ellipse */ | ||
653 | #define ABS_MT_WIDTH_MINOR 0x33 /* Minor axis (omit if circular) */ | ||
654 | #define ABS_MT_ORIENTATION 0x34 /* Ellipse orientation */ | ||
655 | #define ABS_MT_POSITION_X 0x35 /* Center X ellipse position */ | ||
656 | #define ABS_MT_POSITION_Y 0x36 /* Center Y ellipse position */ | ||
657 | #define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */ | ||
658 | #define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */ | ||
659 | |||
647 | #define ABS_MAX 0x3f | 660 | #define ABS_MAX 0x3f |
648 | #define ABS_CNT (ABS_MAX+1) | 661 | #define ABS_CNT (ABS_MAX+1) |
649 | 662 | ||
@@ -743,6 +756,12 @@ struct input_absinfo { | |||
743 | #define BUS_ATARI 0x1B | 756 | #define BUS_ATARI 0x1B |
744 | 757 | ||
745 | /* | 758 | /* |
759 | * MT_TOOL types | ||
760 | */ | ||
761 | #define MT_TOOL_FINGER 0 | ||
762 | #define MT_TOOL_PEN 1 | ||
763 | |||
764 | /* | ||
746 | * Values describing the status of a force-feedback effect | 765 | * Values describing the status of a force-feedback effect |
747 | */ | 766 | */ |
748 | #define FF_STATUS_STOPPED 0x00 | 767 | #define FF_STATUS_STOPPED 0x00 |
@@ -1311,6 +1330,11 @@ static inline void input_sync(struct input_dev *dev) | |||
1311 | input_event(dev, EV_SYN, SYN_REPORT, 0); | 1330 | input_event(dev, EV_SYN, SYN_REPORT, 0); |
1312 | } | 1331 | } |
1313 | 1332 | ||
1333 | static inline void input_mt_sync(struct input_dev *dev) | ||
1334 | { | ||
1335 | input_event(dev, EV_SYN, SYN_MT_REPORT, 0); | ||
1336 | } | ||
1337 | |||
1314 | void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code); | 1338 | void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code); |
1315 | 1339 | ||
1316 | static inline void input_set_abs_params(struct input_dev *dev, int axis, int min, int max, int fuzz, int flat) | 1340 | static inline void input_set_abs_params(struct input_dev *dev, int axis, int min, int max, int fuzz, int flat) |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index d2e3cbfba14f..aa8c53171233 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
123 | #define ecap_eim_support(e) ((e >> 4) & 0x1) | 123 | #define ecap_eim_support(e) ((e >> 4) & 0x1) |
124 | #define ecap_ir_support(e) ((e >> 3) & 0x1) | 124 | #define ecap_ir_support(e) ((e >> 3) & 0x1) |
125 | #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) | 125 | #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) |
126 | 126 | #define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */ | |
127 | 127 | ||
128 | /* IOTLB_REG */ | 128 | /* IOTLB_REG */ |
129 | #define DMA_TLB_FLUSH_GRANU_OFFSET 60 | 129 | #define DMA_TLB_FLUSH_GRANU_OFFSET 60 |
@@ -164,6 +164,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
164 | #define DMA_GCMD_QIE (((u32)1) << 26) | 164 | #define DMA_GCMD_QIE (((u32)1) << 26) |
165 | #define DMA_GCMD_SIRTP (((u32)1) << 24) | 165 | #define DMA_GCMD_SIRTP (((u32)1) << 24) |
166 | #define DMA_GCMD_IRE (((u32) 1) << 25) | 166 | #define DMA_GCMD_IRE (((u32) 1) << 25) |
167 | #define DMA_GCMD_CFI (((u32) 1) << 23) | ||
167 | 168 | ||
168 | /* GSTS_REG */ | 169 | /* GSTS_REG */ |
169 | #define DMA_GSTS_TES (((u32)1) << 31) | 170 | #define DMA_GSTS_TES (((u32)1) << 31) |
@@ -174,6 +175,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
174 | #define DMA_GSTS_QIES (((u32)1) << 26) | 175 | #define DMA_GSTS_QIES (((u32)1) << 26) |
175 | #define DMA_GSTS_IRTPS (((u32)1) << 24) | 176 | #define DMA_GSTS_IRTPS (((u32)1) << 24) |
176 | #define DMA_GSTS_IRES (((u32)1) << 25) | 177 | #define DMA_GSTS_IRES (((u32)1) << 25) |
178 | #define DMA_GSTS_CFIS (((u32)1) << 23) | ||
177 | 179 | ||
178 | /* CCMD_REG */ | 180 | /* CCMD_REG */ |
179 | #define DMA_CCMD_ICC (((u64)1) << 63) | 181 | #define DMA_CCMD_ICC (((u64)1) << 63) |
@@ -284,6 +286,14 @@ struct iommu_flush { | |||
284 | unsigned int size_order, u64 type, int non_present_entry_flush); | 286 | unsigned int size_order, u64 type, int non_present_entry_flush); |
285 | }; | 287 | }; |
286 | 288 | ||
289 | enum { | ||
290 | SR_DMAR_FECTL_REG, | ||
291 | SR_DMAR_FEDATA_REG, | ||
292 | SR_DMAR_FEADDR_REG, | ||
293 | SR_DMAR_FEUADDR_REG, | ||
294 | MAX_SR_DMAR_REGS | ||
295 | }; | ||
296 | |||
287 | struct intel_iommu { | 297 | struct intel_iommu { |
288 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ | 298 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ |
289 | u64 cap; | 299 | u64 cap; |
@@ -292,6 +302,8 @@ struct intel_iommu { | |||
292 | spinlock_t register_lock; /* protect register handling */ | 302 | spinlock_t register_lock; /* protect register handling */ |
293 | int seq_id; /* sequence id of the iommu */ | 303 | int seq_id; /* sequence id of the iommu */ |
294 | int agaw; /* agaw of this iommu */ | 304 | int agaw; /* agaw of this iommu */ |
305 | unsigned int irq; | ||
306 | unsigned char name[13]; /* Device Name */ | ||
295 | 307 | ||
296 | #ifdef CONFIG_DMAR | 308 | #ifdef CONFIG_DMAR |
297 | unsigned long *domain_ids; /* bitmap of domains */ | 309 | unsigned long *domain_ids; /* bitmap of domains */ |
@@ -299,11 +311,11 @@ struct intel_iommu { | |||
299 | spinlock_t lock; /* protect context, domain ids */ | 311 | spinlock_t lock; /* protect context, domain ids */ |
300 | struct root_entry *root_entry; /* virtual address */ | 312 | struct root_entry *root_entry; /* virtual address */ |
301 | 313 | ||
302 | unsigned int irq; | ||
303 | unsigned char name[7]; /* Device Name */ | ||
304 | struct iommu_flush flush; | 314 | struct iommu_flush flush; |
305 | #endif | 315 | #endif |
306 | struct q_inval *qi; /* Queued invalidation info */ | 316 | struct q_inval *qi; /* Queued invalidation info */ |
317 | u32 *iommu_state; /* Store iommu states between suspend and resume.*/ | ||
318 | |||
307 | #ifdef CONFIG_INTR_REMAP | 319 | #ifdef CONFIG_INTR_REMAP |
308 | struct ir_table *ir_table; /* Interrupt remapping info */ | 320 | struct ir_table *ir_table; /* Interrupt remapping info */ |
309 | #endif | 321 | #endif |
@@ -321,6 +333,8 @@ extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); | |||
321 | extern int alloc_iommu(struct dmar_drhd_unit *drhd); | 333 | extern int alloc_iommu(struct dmar_drhd_unit *drhd); |
322 | extern void free_iommu(struct intel_iommu *iommu); | 334 | extern void free_iommu(struct intel_iommu *iommu); |
323 | extern int dmar_enable_qi(struct intel_iommu *iommu); | 335 | extern int dmar_enable_qi(struct intel_iommu *iommu); |
336 | extern void dmar_disable_qi(struct intel_iommu *iommu); | ||
337 | extern int dmar_reenable_qi(struct intel_iommu *iommu); | ||
324 | extern void qi_global_iec(struct intel_iommu *iommu); | 338 | extern void qi_global_iec(struct intel_iommu *iommu); |
325 | 339 | ||
326 | extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, | 340 | extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, |
@@ -331,11 +345,4 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | |||
331 | 345 | ||
332 | extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); | 346 | extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); |
333 | 347 | ||
334 | extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); | ||
335 | extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); | ||
336 | extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int); | ||
337 | extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int); | ||
338 | extern int intel_map_sg(struct device *, struct scatterlist *, int, int); | ||
339 | extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int); | ||
340 | |||
341 | #endif | 348 | #endif |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 0c9cb63e6895..91bb76f44f14 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -59,6 +59,18 @@ | |||
59 | #define IRQF_NOBALANCING 0x00000800 | 59 | #define IRQF_NOBALANCING 0x00000800 |
60 | #define IRQF_IRQPOLL 0x00001000 | 60 | #define IRQF_IRQPOLL 0x00001000 |
61 | 61 | ||
62 | /* | ||
63 | * Bits used by threaded handlers: | ||
64 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | ||
65 | * IRQTF_DIED - handler thread died | ||
66 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed | ||
67 | */ | ||
68 | enum { | ||
69 | IRQTF_RUNTHREAD, | ||
70 | IRQTF_DIED, | ||
71 | IRQTF_WARNED, | ||
72 | }; | ||
73 | |||
62 | typedef irqreturn_t (*irq_handler_t)(int, void *); | 74 | typedef irqreturn_t (*irq_handler_t)(int, void *); |
63 | 75 | ||
64 | /** | 76 | /** |
@@ -71,6 +83,9 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); | |||
71 | * @next: pointer to the next irqaction for shared interrupts | 83 | * @next: pointer to the next irqaction for shared interrupts |
72 | * @irq: interrupt number | 84 | * @irq: interrupt number |
73 | * @dir: pointer to the proc/irq/NN/name entry | 85 | * @dir: pointer to the proc/irq/NN/name entry |
86 | * @thread_fn: interupt handler function for threaded interrupts | ||
87 | * @thread: thread pointer for threaded interrupts | ||
88 | * @thread_flags: flags related to @thread | ||
74 | */ | 89 | */ |
75 | struct irqaction { | 90 | struct irqaction { |
76 | irq_handler_t handler; | 91 | irq_handler_t handler; |
@@ -81,18 +96,68 @@ struct irqaction { | |||
81 | struct irqaction *next; | 96 | struct irqaction *next; |
82 | int irq; | 97 | int irq; |
83 | struct proc_dir_entry *dir; | 98 | struct proc_dir_entry *dir; |
99 | irq_handler_t thread_fn; | ||
100 | struct task_struct *thread; | ||
101 | unsigned long thread_flags; | ||
84 | }; | 102 | }; |
85 | 103 | ||
86 | extern irqreturn_t no_action(int cpl, void *dev_id); | 104 | extern irqreturn_t no_action(int cpl, void *dev_id); |
87 | extern int __must_check request_irq(unsigned int, irq_handler_t handler, | 105 | |
88 | unsigned long, const char *, void *); | 106 | #ifdef CONFIG_GENERIC_HARDIRQS |
107 | extern int __must_check | ||
108 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | ||
109 | irq_handler_t thread_fn, | ||
110 | unsigned long flags, const char *name, void *dev); | ||
111 | |||
112 | static inline int __must_check | ||
113 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | ||
114 | const char *name, void *dev) | ||
115 | { | ||
116 | return request_threaded_irq(irq, handler, NULL, flags, name, dev); | ||
117 | } | ||
118 | |||
119 | extern void exit_irq_thread(void); | ||
120 | #else | ||
121 | |||
122 | extern int __must_check | ||
123 | request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, | ||
124 | const char *name, void *dev); | ||
125 | |||
126 | /* | ||
127 | * Special function to avoid ifdeffery in kernel/irq/devres.c which | ||
128 | * gets magically built by GENERIC_HARDIRQS=n architectures (sparc, | ||
129 | * m68k). I really love these $@%#!* obvious Makefile references: | ||
130 | * ../../../kernel/irq/devres.o | ||
131 | */ | ||
132 | static inline int __must_check | ||
133 | request_threaded_irq(unsigned int irq, irq_handler_t handler, | ||
134 | irq_handler_t thread_fn, | ||
135 | unsigned long flags, const char *name, void *dev) | ||
136 | { | ||
137 | return request_irq(irq, handler, flags, name, dev); | ||
138 | } | ||
139 | |||
140 | static inline void exit_irq_thread(void) { } | ||
141 | #endif | ||
142 | |||
89 | extern void free_irq(unsigned int, void *); | 143 | extern void free_irq(unsigned int, void *); |
90 | 144 | ||
91 | struct device; | 145 | struct device; |
92 | 146 | ||
93 | extern int __must_check devm_request_irq(struct device *dev, unsigned int irq, | 147 | extern int __must_check |
94 | irq_handler_t handler, unsigned long irqflags, | 148 | devm_request_threaded_irq(struct device *dev, unsigned int irq, |
95 | const char *devname, void *dev_id); | 149 | irq_handler_t handler, irq_handler_t thread_fn, |
150 | unsigned long irqflags, const char *devname, | ||
151 | void *dev_id); | ||
152 | |||
153 | static inline int __must_check | ||
154 | devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler, | ||
155 | unsigned long irqflags, const char *devname, void *dev_id) | ||
156 | { | ||
157 | return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags, | ||
158 | devname, dev_id); | ||
159 | } | ||
160 | |||
96 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); | 161 | extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); |
97 | 162 | ||
98 | /* | 163 | /* |
@@ -117,6 +182,15 @@ extern void disable_irq_nosync(unsigned int irq); | |||
117 | extern void disable_irq(unsigned int irq); | 182 | extern void disable_irq(unsigned int irq); |
118 | extern void enable_irq(unsigned int irq); | 183 | extern void enable_irq(unsigned int irq); |
119 | 184 | ||
185 | /* The following three functions are for the core kernel use only. */ | ||
186 | extern void suspend_device_irqs(void); | ||
187 | extern void resume_device_irqs(void); | ||
188 | #ifdef CONFIG_PM_SLEEP | ||
189 | extern int check_wakeup_irqs(void); | ||
190 | #else | ||
191 | static inline int check_wakeup_irqs(void) { return 0; } | ||
192 | #endif | ||
193 | |||
120 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | 194 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
121 | 195 | ||
122 | extern cpumask_var_t irq_default_affinity; | 196 | extern cpumask_var_t irq_default_affinity; |
@@ -269,6 +343,11 @@ enum | |||
269 | NR_SOFTIRQS | 343 | NR_SOFTIRQS |
270 | }; | 344 | }; |
271 | 345 | ||
346 | /* map softirq index to softirq name. update 'softirq_to_name' in | ||
347 | * kernel/softirq.c when adding a new softirq. | ||
348 | */ | ||
349 | extern char *softirq_to_name[NR_SOFTIRQS]; | ||
350 | |||
272 | /* softirq mask and active fields moved to irq_cpustat_t in | 351 | /* softirq mask and active fields moved to irq_cpustat_t in |
273 | * asm/hardirq.h to get better cache usage. KAO | 352 | * asm/hardirq.h to get better cache usage. KAO |
274 | */ | 353 | */ |
@@ -285,6 +364,7 @@ extern void softirq_init(void); | |||
285 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) | 364 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) |
286 | extern void raise_softirq_irqoff(unsigned int nr); | 365 | extern void raise_softirq_irqoff(unsigned int nr); |
287 | extern void raise_softirq(unsigned int nr); | 366 | extern void raise_softirq(unsigned int nr); |
367 | extern void wakeup_softirqd(void); | ||
288 | 368 | ||
289 | /* This is the worklist that queues up per-cpu softirq work. | 369 | /* This is the worklist that queues up per-cpu softirq work. |
290 | * | 370 | * |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 8a7bfb1b6ca0..3af4ffd591b9 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #define IOMMU_READ (1) | 22 | #define IOMMU_READ (1) |
23 | #define IOMMU_WRITE (2) | 23 | #define IOMMU_WRITE (2) |
24 | #define IOMMU_CACHE (4) /* DMA cache coherency */ | ||
24 | 25 | ||
25 | struct device; | 26 | struct device; |
26 | 27 | ||
@@ -28,6 +29,8 @@ struct iommu_domain { | |||
28 | void *priv; | 29 | void *priv; |
29 | }; | 30 | }; |
30 | 31 | ||
32 | #define IOMMU_CAP_CACHE_COHERENCY 0x1 | ||
33 | |||
31 | struct iommu_ops { | 34 | struct iommu_ops { |
32 | int (*domain_init)(struct iommu_domain *domain); | 35 | int (*domain_init)(struct iommu_domain *domain); |
33 | void (*domain_destroy)(struct iommu_domain *domain); | 36 | void (*domain_destroy)(struct iommu_domain *domain); |
@@ -39,6 +42,8 @@ struct iommu_ops { | |||
39 | size_t size); | 42 | size_t size); |
40 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, | 43 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, |
41 | unsigned long iova); | 44 | unsigned long iova); |
45 | int (*domain_has_cap)(struct iommu_domain *domain, | ||
46 | unsigned long cap); | ||
42 | }; | 47 | }; |
43 | 48 | ||
44 | #ifdef CONFIG_IOMMU_API | 49 | #ifdef CONFIG_IOMMU_API |
@@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, | |||
57 | size_t size); | 62 | size_t size); |
58 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | 63 | extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, |
59 | unsigned long iova); | 64 | unsigned long iova); |
65 | extern int iommu_domain_has_cap(struct iommu_domain *domain, | ||
66 | unsigned long cap); | ||
60 | 67 | ||
61 | #else /* CONFIG_IOMMU_API */ | 68 | #else /* CONFIG_IOMMU_API */ |
62 | 69 | ||
@@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | |||
107 | return 0; | 114 | return 0; |
108 | } | 115 | } |
109 | 116 | ||
117 | static inline int domain_has_cap(struct iommu_domain *domain, | ||
118 | unsigned long cap) | ||
119 | { | ||
120 | return 0; | ||
121 | } | ||
122 | |||
110 | #endif /* CONFIG_IOMMU_API */ | 123 | #endif /* CONFIG_IOMMU_API */ |
111 | 124 | ||
112 | #endif /* __LINUX_IOMMU_H */ | 125 | #endif /* __LINUX_IOMMU_H */ |
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index ea330f9e7100..3bf40e246a80 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h | |||
@@ -25,7 +25,7 @@ struct ipc_ids { | |||
25 | }; | 25 | }; |
26 | 26 | ||
27 | struct ipc_namespace { | 27 | struct ipc_namespace { |
28 | struct kref kref; | 28 | atomic_t count; |
29 | struct ipc_ids ids[3]; | 29 | struct ipc_ids ids[3]; |
30 | 30 | ||
31 | int sem_ctls[4]; | 31 | int sem_ctls[4]; |
@@ -44,25 +44,57 @@ struct ipc_namespace { | |||
44 | int shm_tot; | 44 | int shm_tot; |
45 | 45 | ||
46 | struct notifier_block ipcns_nb; | 46 | struct notifier_block ipcns_nb; |
47 | |||
48 | /* The kern_mount of the mqueuefs sb. We take a ref on it */ | ||
49 | struct vfsmount *mq_mnt; | ||
50 | |||
51 | /* # queues in this ns, protected by mq_lock */ | ||
52 | unsigned int mq_queues_count; | ||
53 | |||
54 | /* next fields are set through sysctl */ | ||
55 | unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */ | ||
56 | unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */ | ||
57 | unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */ | ||
58 | |||
47 | }; | 59 | }; |
48 | 60 | ||
49 | extern struct ipc_namespace init_ipc_ns; | 61 | extern struct ipc_namespace init_ipc_ns; |
50 | extern atomic_t nr_ipc_ns; | 62 | extern atomic_t nr_ipc_ns; |
51 | 63 | ||
52 | #ifdef CONFIG_SYSVIPC | 64 | extern spinlock_t mq_lock; |
65 | #if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) | ||
53 | #define INIT_IPC_NS(ns) .ns = &init_ipc_ns, | 66 | #define INIT_IPC_NS(ns) .ns = &init_ipc_ns, |
67 | #else | ||
68 | #define INIT_IPC_NS(ns) | ||
69 | #endif | ||
54 | 70 | ||
71 | #ifdef CONFIG_SYSVIPC | ||
55 | extern int register_ipcns_notifier(struct ipc_namespace *); | 72 | extern int register_ipcns_notifier(struct ipc_namespace *); |
56 | extern int cond_register_ipcns_notifier(struct ipc_namespace *); | 73 | extern int cond_register_ipcns_notifier(struct ipc_namespace *); |
57 | extern void unregister_ipcns_notifier(struct ipc_namespace *); | 74 | extern void unregister_ipcns_notifier(struct ipc_namespace *); |
58 | extern int ipcns_notify(unsigned long); | 75 | extern int ipcns_notify(unsigned long); |
59 | |||
60 | #else /* CONFIG_SYSVIPC */ | 76 | #else /* CONFIG_SYSVIPC */ |
61 | #define INIT_IPC_NS(ns) | 77 | static inline int register_ipcns_notifier(struct ipc_namespace *ns) |
78 | { return 0; } | ||
79 | static inline int cond_register_ipcns_notifier(struct ipc_namespace *ns) | ||
80 | { return 0; } | ||
81 | static inline void unregister_ipcns_notifier(struct ipc_namespace *ns) { } | ||
82 | static inline int ipcns_notify(unsigned long l) { return 0; } | ||
62 | #endif /* CONFIG_SYSVIPC */ | 83 | #endif /* CONFIG_SYSVIPC */ |
63 | 84 | ||
64 | #if defined(CONFIG_SYSVIPC) && defined(CONFIG_IPC_NS) | 85 | #ifdef CONFIG_POSIX_MQUEUE |
65 | extern void free_ipc_ns(struct kref *kref); | 86 | extern int mq_init_ns(struct ipc_namespace *ns); |
87 | /* default values */ | ||
88 | #define DFLT_QUEUESMAX 256 /* max number of message queues */ | ||
89 | #define DFLT_MSGMAX 10 /* max number of messages in each queue */ | ||
90 | #define HARD_MSGMAX (131072/sizeof(void *)) | ||
91 | #define DFLT_MSGSIZEMAX 8192 /* max message size */ | ||
92 | #else | ||
93 | static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } | ||
94 | #endif | ||
95 | |||
96 | #if defined(CONFIG_IPC_NS) | ||
97 | extern void free_ipc_ns(struct ipc_namespace *ns); | ||
66 | extern struct ipc_namespace *copy_ipcs(unsigned long flags, | 98 | extern struct ipc_namespace *copy_ipcs(unsigned long flags, |
67 | struct ipc_namespace *ns); | 99 | struct ipc_namespace *ns); |
68 | extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, | 100 | extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, |
@@ -72,14 +104,11 @@ extern void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, | |||
72 | static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) | 104 | static inline struct ipc_namespace *get_ipc_ns(struct ipc_namespace *ns) |
73 | { | 105 | { |
74 | if (ns) | 106 | if (ns) |
75 | kref_get(&ns->kref); | 107 | atomic_inc(&ns->count); |
76 | return ns; | 108 | return ns; |
77 | } | 109 | } |
78 | 110 | ||
79 | static inline void put_ipc_ns(struct ipc_namespace *ns) | 111 | extern void put_ipc_ns(struct ipc_namespace *ns); |
80 | { | ||
81 | kref_put(&ns->kref, free_ipc_ns); | ||
82 | } | ||
83 | #else | 112 | #else |
84 | static inline struct ipc_namespace *copy_ipcs(unsigned long flags, | 113 | static inline struct ipc_namespace *copy_ipcs(unsigned long flags, |
85 | struct ipc_namespace *ns) | 114 | struct ipc_namespace *ns) |
@@ -99,4 +128,18 @@ static inline void put_ipc_ns(struct ipc_namespace *ns) | |||
99 | { | 128 | { |
100 | } | 129 | } |
101 | #endif | 130 | #endif |
131 | |||
132 | #ifdef CONFIG_POSIX_MQUEUE_SYSCTL | ||
133 | |||
134 | struct ctl_table_header; | ||
135 | extern struct ctl_table_header *mq_register_sysctl_table(void); | ||
136 | |||
137 | #else /* CONFIG_POSIX_MQUEUE_SYSCTL */ | ||
138 | |||
139 | static inline struct ctl_table_header *mq_register_sysctl_table(void) | ||
140 | { | ||
141 | return NULL; | ||
142 | } | ||
143 | |||
144 | #endif /* CONFIG_POSIX_MQUEUE_SYSCTL */ | ||
102 | #endif | 145 | #endif |
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index 7ebdb4fb4e54..65aae34759de 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h | |||
@@ -198,6 +198,8 @@ struct kernel_ipmi_msg { | |||
198 | response. When you send a | 198 | response. When you send a |
199 | response message, this will | 199 | response message, this will |
200 | be returned. */ | 200 | be returned. */ |
201 | #define IPMI_OEM_RECV_TYPE 5 /* The response for OEM Channels */ | ||
202 | |||
201 | /* Note that async events and received commands do not have a completion | 203 | /* Note that async events and received commands do not have a completion |
202 | code as the first byte of the incoming data, unlike a response. */ | 204 | code as the first byte of the incoming data, unlike a response. */ |
203 | 205 | ||
diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h index b56a158d587a..df97e6e31e87 100644 --- a/include/linux/ipmi_msgdefs.h +++ b/include/linux/ipmi_msgdefs.h | |||
@@ -58,6 +58,12 @@ | |||
58 | #define IPMI_READ_EVENT_MSG_BUFFER_CMD 0x35 | 58 | #define IPMI_READ_EVENT_MSG_BUFFER_CMD 0x35 |
59 | #define IPMI_GET_CHANNEL_INFO_CMD 0x42 | 59 | #define IPMI_GET_CHANNEL_INFO_CMD 0x42 |
60 | 60 | ||
61 | /* Bit for BMC global enables. */ | ||
62 | #define IPMI_BMC_RCV_MSG_INTR 0x01 | ||
63 | #define IPMI_BMC_EVT_MSG_INTR 0x02 | ||
64 | #define IPMI_BMC_EVT_MSG_BUFF 0x04 | ||
65 | #define IPMI_BMC_SYS_LOG 0x08 | ||
66 | |||
61 | #define IPMI_NETFN_STORAGE_REQUEST 0x0a | 67 | #define IPMI_NETFN_STORAGE_REQUEST 0x0a |
62 | #define IPMI_NETFN_STORAGE_RESPONSE 0x0b | 68 | #define IPMI_NETFN_STORAGE_RESPONSE 0x0b |
63 | #define IPMI_ADD_SEL_ENTRY_CMD 0x44 | 69 | #define IPMI_ADD_SEL_ENTRY_CMD 0x44 |
@@ -109,5 +115,7 @@ | |||
109 | #define IPMI_CHANNEL_MEDIUM_USB1 10 | 115 | #define IPMI_CHANNEL_MEDIUM_USB1 10 |
110 | #define IPMI_CHANNEL_MEDIUM_USB2 11 | 116 | #define IPMI_CHANNEL_MEDIUM_USB2 11 |
111 | #define IPMI_CHANNEL_MEDIUM_SYSINTF 12 | 117 | #define IPMI_CHANNEL_MEDIUM_SYSINTF 12 |
118 | #define IPMI_CHANNEL_MEDIUM_OEM_MIN 0x60 | ||
119 | #define IPMI_CHANNEL_MEDIUM_OEM_MAX 0x7f | ||
112 | 120 | ||
113 | #endif /* __LINUX_IPMI_MSGDEFS_H */ | 121 | #endif /* __LINUX_IPMI_MSGDEFS_H */ |
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 62b73668b602..f7c9c75a2775 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
@@ -230,6 +230,6 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) | |||
230 | automatically be dstroyed when the interface is destroyed. */ | 230 | automatically be dstroyed when the interface is destroyed. */ |
231 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | 231 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, |
232 | read_proc_t *read_proc, | 232 | read_proc_t *read_proc, |
233 | void *data, struct module *owner); | 233 | void *data); |
234 | 234 | ||
235 | #endif /* __LINUX_IPMI_SMI_H */ | 235 | #endif /* __LINUX_IPMI_SMI_H */ |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 873e4ac11b81..b7cbeed972e4 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -17,9 +17,12 @@ | |||
17 | #include <linux/cache.h> | 17 | #include <linux/cache.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/cpumask.h> | 19 | #include <linux/cpumask.h> |
20 | #include <linux/gfp.h> | ||
20 | #include <linux/irqreturn.h> | 21 | #include <linux/irqreturn.h> |
21 | #include <linux/irqnr.h> | 22 | #include <linux/irqnr.h> |
22 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <linux/topology.h> | ||
25 | #include <linux/wait.h> | ||
23 | 26 | ||
24 | #include <asm/irq.h> | 27 | #include <asm/irq.h> |
25 | #include <asm/ptrace.h> | 28 | #include <asm/ptrace.h> |
@@ -65,6 +68,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
65 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ | 68 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ |
66 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ | 69 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ |
67 | #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ | 70 | #define IRQ_AFFINITY_SET 0x02000000 /* IRQ affinity was set from userspace*/ |
71 | #define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */ | ||
68 | 72 | ||
69 | #ifdef CONFIG_IRQ_PER_CPU | 73 | #ifdef CONFIG_IRQ_PER_CPU |
70 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 74 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
@@ -155,6 +159,8 @@ struct irq_2_iommu; | |||
155 | * @affinity: IRQ affinity on SMP | 159 | * @affinity: IRQ affinity on SMP |
156 | * @cpu: cpu index useful for balancing | 160 | * @cpu: cpu index useful for balancing |
157 | * @pending_mask: pending rebalanced interrupts | 161 | * @pending_mask: pending rebalanced interrupts |
162 | * @threads_active: number of irqaction threads currently running | ||
163 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
158 | * @dir: /proc/irq/ procfs entry | 164 | * @dir: /proc/irq/ procfs entry |
159 | * @name: flow handler name for /proc/interrupts output | 165 | * @name: flow handler name for /proc/interrupts output |
160 | */ | 166 | */ |
@@ -186,6 +192,8 @@ struct irq_desc { | |||
186 | cpumask_var_t pending_mask; | 192 | cpumask_var_t pending_mask; |
187 | #endif | 193 | #endif |
188 | #endif | 194 | #endif |
195 | atomic_t threads_active; | ||
196 | wait_queue_head_t wait_for_threads; | ||
189 | #ifdef CONFIG_PROC_FS | 197 | #ifdef CONFIG_PROC_FS |
190 | struct proc_dir_entry *dir; | 198 | struct proc_dir_entry *dir; |
191 | #endif | 199 | #endif |
@@ -479,6 +487,16 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc, | |||
479 | #endif | 487 | #endif |
480 | } | 488 | } |
481 | 489 | ||
490 | static inline void free_desc_masks(struct irq_desc *old_desc, | ||
491 | struct irq_desc *new_desc) | ||
492 | { | ||
493 | free_cpumask_var(old_desc->affinity); | ||
494 | |||
495 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
496 | free_cpumask_var(old_desc->pending_mask); | ||
497 | #endif | ||
498 | } | ||
499 | |||
482 | #else /* !CONFIG_SMP */ | 500 | #else /* !CONFIG_SMP */ |
483 | 501 | ||
484 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | 502 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, |
@@ -492,6 +510,10 @@ static inline void init_copy_desc_masks(struct irq_desc *old_desc, | |||
492 | { | 510 | { |
493 | } | 511 | } |
494 | 512 | ||
513 | static inline void free_desc_masks(struct irq_desc *old_desc, | ||
514 | struct irq_desc *new_desc) | ||
515 | { | ||
516 | } | ||
495 | #endif /* CONFIG_SMP */ | 517 | #endif /* CONFIG_SMP */ |
496 | 518 | ||
497 | #endif /* _LINUX_IRQ_H */ | 519 | #endif /* _LINUX_IRQ_H */ |
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 74bde13224c9..b02a3f1d46a0 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h | |||
@@ -24,8 +24,8 @@ | |||
24 | # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) | 24 | # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) |
25 | # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) | 25 | # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) |
26 | # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) | 26 | # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) |
27 | # define trace_softirq_enter() do { current->softirq_context++; } while (0) | 27 | # define lockdep_softirq_enter() do { current->softirq_context++; } while (0) |
28 | # define trace_softirq_exit() do { current->softirq_context--; } while (0) | 28 | # define lockdep_softirq_exit() do { current->softirq_context--; } while (0) |
29 | # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, | 29 | # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, |
30 | #else | 30 | #else |
31 | # define trace_hardirqs_on() do { } while (0) | 31 | # define trace_hardirqs_on() do { } while (0) |
@@ -38,8 +38,8 @@ | |||
38 | # define trace_softirqs_enabled(p) 0 | 38 | # define trace_softirqs_enabled(p) 0 |
39 | # define trace_hardirq_enter() do { } while (0) | 39 | # define trace_hardirq_enter() do { } while (0) |
40 | # define trace_hardirq_exit() do { } while (0) | 40 | # define trace_hardirq_exit() do { } while (0) |
41 | # define trace_softirq_enter() do { } while (0) | 41 | # define lockdep_softirq_enter() do { } while (0) |
42 | # define trace_softirq_exit() do { } while (0) | 42 | # define lockdep_softirq_exit() do { } while (0) |
43 | # define INIT_TRACE_IRQFLAGS | 43 | # define INIT_TRACE_IRQFLAGS |
44 | #endif | 44 | #endif |
45 | 45 | ||
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index c5584ca5b8c9..819acaaac3f5 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h | |||
@@ -5,10 +5,12 @@ | |||
5 | * enum irqreturn | 5 | * enum irqreturn |
6 | * @IRQ_NONE interrupt was not from this device | 6 | * @IRQ_NONE interrupt was not from this device |
7 | * @IRQ_HANDLED interrupt was handled by this device | 7 | * @IRQ_HANDLED interrupt was handled by this device |
8 | * @IRQ_WAKE_THREAD handler requests to wake the handler thread | ||
8 | */ | 9 | */ |
9 | enum irqreturn { | 10 | enum irqreturn { |
10 | IRQ_NONE, | 11 | IRQ_NONE, |
11 | IRQ_HANDLED, | 12 | IRQ_HANDLED, |
13 | IRQ_WAKE_THREAD, | ||
12 | }; | 14 | }; |
13 | 15 | ||
14 | typedef enum irqreturn irqreturn_t; | 16 | typedef enum irqreturn irqreturn_t; |
diff --git a/include/linux/ivtv.h b/include/linux/ivtv.h index f2720280b9ec..062d20f74322 100644 --- a/include/linux/ivtv.h +++ b/include/linux/ivtv.h | |||
@@ -60,10 +60,10 @@ struct ivtv_dma_frame { | |||
60 | 60 | ||
61 | #define IVTV_IOC_DMA_FRAME _IOW ('V', BASE_VIDIOC_PRIVATE+0, struct ivtv_dma_frame) | 61 | #define IVTV_IOC_DMA_FRAME _IOW ('V', BASE_VIDIOC_PRIVATE+0, struct ivtv_dma_frame) |
62 | 62 | ||
63 | /* These are the VBI types as they appear in the embedded VBI private packets. */ | 63 | /* Deprecated defines: applications should use the defines from videodev2.h */ |
64 | #define IVTV_SLICED_TYPE_TELETEXT_B (1) | 64 | #define IVTV_SLICED_TYPE_TELETEXT_B V4L2_MPEG_VBI_IVTV_TELETEXT_B |
65 | #define IVTV_SLICED_TYPE_CAPTION_525 (4) | 65 | #define IVTV_SLICED_TYPE_CAPTION_525 V4L2_MPEG_VBI_IVTV_CAPTION_525 |
66 | #define IVTV_SLICED_TYPE_WSS_625 (5) | 66 | #define IVTV_SLICED_TYPE_WSS_625 V4L2_MPEG_VBI_IVTV_WSS_625 |
67 | #define IVTV_SLICED_TYPE_VPS (7) | 67 | #define IVTV_SLICED_TYPE_VPS V4L2_MPEG_VBI_IVTV_VPS |
68 | 68 | ||
69 | #endif /* _LINUX_IVTV_H */ | 69 | #endif /* _LINUX_IVTV_H */ |
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 64246dce5663..c2049a04fa0b 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
@@ -35,7 +35,7 @@ | |||
35 | #define journal_oom_retry 1 | 35 | #define journal_oom_retry 1 |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Define JBD_PARANIOD_IOFAIL to cause a kernel BUG() if ext3 finds | 38 | * Define JBD_PARANOID_IOFAIL to cause a kernel BUG() if ext3 finds |
39 | * certain classes of error which can occur due to failed IOs. Under | 39 | * certain classes of error which can occur due to failed IOs. Under |
40 | * normal use we want ext3 to continue after such errors, because | 40 | * normal use we want ext3 to continue after such errors, because |
41 | * hardware _can_ fail, but for debugging purposes when running tests on | 41 | * hardware _can_ fail, but for debugging purposes when running tests on |
@@ -552,6 +552,11 @@ struct transaction_s | |||
552 | */ | 552 | */ |
553 | int t_handle_count; | 553 | int t_handle_count; |
554 | 554 | ||
555 | /* | ||
556 | * This transaction is being forced and some process is | ||
557 | * waiting for it to finish. | ||
558 | */ | ||
559 | int t_synchronous_commit:1; | ||
555 | }; | 560 | }; |
556 | 561 | ||
557 | /** | 562 | /** |
@@ -973,7 +978,8 @@ extern void journal_destroy_revoke(journal_t *); | |||
973 | extern int journal_revoke (handle_t *, | 978 | extern int journal_revoke (handle_t *, |
974 | unsigned long, struct buffer_head *); | 979 | unsigned long, struct buffer_head *); |
975 | extern int journal_cancel_revoke(handle_t *, struct journal_head *); | 980 | extern int journal_cancel_revoke(handle_t *, struct journal_head *); |
976 | extern void journal_write_revoke_records(journal_t *, transaction_t *); | 981 | extern void journal_write_revoke_records(journal_t *, |
982 | transaction_t *, int); | ||
977 | 983 | ||
978 | /* Recovery revoke support */ | 984 | /* Recovery revoke support */ |
979 | extern int journal_set_revoke(journal_t *, unsigned long, tid_t); | 985 | extern int journal_set_revoke(journal_t *, unsigned long, tid_t); |
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 4d248b3f1323..cc02393bfce8 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
@@ -649,6 +649,12 @@ struct transaction_s | |||
649 | int t_handle_count; | 649 | int t_handle_count; |
650 | 650 | ||
651 | /* | 651 | /* |
652 | * This transaction is being forced and some process is | ||
653 | * waiting for it to finish. | ||
654 | */ | ||
655 | int t_synchronous_commit:1; | ||
656 | |||
657 | /* | ||
652 | * For use by the filesystem to store fs-specific data | 658 | * For use by the filesystem to store fs-specific data |
653 | * structures associated with the transaction | 659 | * structures associated with the transaction |
654 | */ | 660 | */ |
@@ -1187,7 +1193,8 @@ extern int jbd2_journal_init_revoke_caches(void); | |||
1187 | extern void jbd2_journal_destroy_revoke(journal_t *); | 1193 | extern void jbd2_journal_destroy_revoke(journal_t *); |
1188 | extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); | 1194 | extern int jbd2_journal_revoke (handle_t *, unsigned long long, struct buffer_head *); |
1189 | extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *); | 1195 | extern int jbd2_journal_cancel_revoke(handle_t *, struct journal_head *); |
1190 | extern void jbd2_journal_write_revoke_records(journal_t *, transaction_t *); | 1196 | extern void jbd2_journal_write_revoke_records(journal_t *, |
1197 | transaction_t *, int); | ||
1191 | 1198 | ||
1192 | /* Recovery revoke support */ | 1199 | /* Recovery revoke support */ |
1193 | extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t); | 1200 | extern int jbd2_journal_set_revoke(journal_t *, unsigned long long, tid_t); |
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index f3fe34391d8e..792274269f2b 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h | |||
@@ -13,10 +13,17 @@ | |||
13 | #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ | 13 | #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ |
14 | 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) | 14 | 2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1) |
15 | 15 | ||
16 | struct module; | ||
17 | |||
16 | #ifdef CONFIG_KALLSYMS | 18 | #ifdef CONFIG_KALLSYMS |
17 | /* Lookup the address for a symbol. Returns 0 if not found. */ | 19 | /* Lookup the address for a symbol. Returns 0 if not found. */ |
18 | unsigned long kallsyms_lookup_name(const char *name); | 20 | unsigned long kallsyms_lookup_name(const char *name); |
19 | 21 | ||
22 | /* Call a function on each kallsyms symbol in the core kernel */ | ||
23 | int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, | ||
24 | unsigned long), | ||
25 | void *data); | ||
26 | |||
20 | extern int kallsyms_lookup_size_offset(unsigned long addr, | 27 | extern int kallsyms_lookup_size_offset(unsigned long addr, |
21 | unsigned long *symbolsize, | 28 | unsigned long *symbolsize, |
22 | unsigned long *offset); | 29 | unsigned long *offset); |
@@ -43,6 +50,14 @@ static inline unsigned long kallsyms_lookup_name(const char *name) | |||
43 | return 0; | 50 | return 0; |
44 | } | 51 | } |
45 | 52 | ||
53 | static inline int kallsyms_on_each_symbol(int (*fn)(void *, const char *, | ||
54 | struct module *, | ||
55 | unsigned long), | ||
56 | void *data) | ||
57 | { | ||
58 | return 0; | ||
59 | } | ||
60 | |||
46 | static inline int kallsyms_lookup_size_offset(unsigned long addr, | 61 | static inline int kallsyms_lookup_size_offset(unsigned long addr, |
47 | unsigned long *symbolsize, | 62 | unsigned long *symbolsize, |
48 | unsigned long *offset) | 63 | unsigned long *offset) |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 914918abfdd1..883cd44ff765 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -242,6 +242,20 @@ extern struct ratelimit_state printk_ratelimit_state; | |||
242 | extern int printk_ratelimit(void); | 242 | extern int printk_ratelimit(void); |
243 | extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, | 243 | extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, |
244 | unsigned int interval_msec); | 244 | unsigned int interval_msec); |
245 | |||
246 | /* | ||
247 | * Print a one-time message (analogous to WARN_ONCE() et al): | ||
248 | */ | ||
249 | #define printk_once(x...) ({ \ | ||
250 | static int __print_once = 1; \ | ||
251 | \ | ||
252 | if (__print_once) { \ | ||
253 | __print_once = 0; \ | ||
254 | printk(x); \ | ||
255 | } \ | ||
256 | }) | ||
257 | |||
258 | void log_buf_kexec_setup(void); | ||
245 | #else | 259 | #else |
246 | static inline int vprintk(const char *s, va_list args) | 260 | static inline int vprintk(const char *s, va_list args) |
247 | __attribute__ ((format (printf, 1, 0))); | 261 | __attribute__ ((format (printf, 1, 0))); |
@@ -253,6 +267,13 @@ static inline int printk_ratelimit(void) { return 0; } | |||
253 | static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ | 267 | static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \ |
254 | unsigned int interval_msec) \ | 268 | unsigned int interval_msec) \ |
255 | { return false; } | 269 | { return false; } |
270 | |||
271 | /* No effect, but we still get type checking even in the !PRINTK case: */ | ||
272 | #define printk_once(x...) printk(x) | ||
273 | |||
274 | static inline void log_buf_kexec_setup(void) | ||
275 | { | ||
276 | } | ||
256 | #endif | 277 | #endif |
257 | 278 | ||
258 | extern int printk_needs_cpu(int cpu); | 279 | extern int printk_needs_cpu(int cpu); |
@@ -353,6 +374,17 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
353 | printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | 374 | printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) |
354 | #define pr_info(fmt, ...) \ | 375 | #define pr_info(fmt, ...) \ |
355 | printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | 376 | printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
377 | #define pr_cont(fmt, ...) \ | ||
378 | printk(KERN_CONT fmt, ##__VA_ARGS__) | ||
379 | |||
380 | /* pr_devel() should produce zero code unless DEBUG is defined */ | ||
381 | #ifdef DEBUG | ||
382 | #define pr_devel(fmt, ...) \ | ||
383 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
384 | #else | ||
385 | #define pr_devel(fmt, ...) \ | ||
386 | ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) | ||
387 | #endif | ||
356 | 388 | ||
357 | /* If you are writing a driver, please use dev_dbg instead */ | 389 | /* If you are writing a driver, please use dev_dbg instead */ |
358 | #if defined(DEBUG) | 390 | #if defined(DEBUG) |
@@ -369,6 +401,139 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
369 | #endif | 401 | #endif |
370 | 402 | ||
371 | /* | 403 | /* |
404 | * General tracing related utility functions - trace_printk(), | ||
405 | * tracing_on/tracing_off and tracing_start()/tracing_stop | ||
406 | * | ||
407 | * Use tracing_on/tracing_off when you want to quickly turn on or off | ||
408 | * tracing. It simply enables or disables the recording of the trace events. | ||
409 | * This also corresponds to the user space debugfs/tracing/tracing_on | ||
410 | * file, which gives a means for the kernel and userspace to interact. | ||
411 | * Place a tracing_off() in the kernel where you want tracing to end. | ||
412 | * From user space, examine the trace, and then echo 1 > tracing_on | ||
413 | * to continue tracing. | ||
414 | * | ||
415 | * tracing_stop/tracing_start has slightly more overhead. It is used | ||
416 | * by things like suspend to ram where disabling the recording of the | ||
417 | * trace is not enough, but tracing must actually stop because things | ||
418 | * like calling smp_processor_id() may crash the system. | ||
419 | * | ||
420 | * Most likely, you want to use tracing_on/tracing_off. | ||
421 | */ | ||
422 | #ifdef CONFIG_RING_BUFFER | ||
423 | void tracing_on(void); | ||
424 | void tracing_off(void); | ||
425 | /* trace_off_permanent stops recording with no way to bring it back */ | ||
426 | void tracing_off_permanent(void); | ||
427 | int tracing_is_on(void); | ||
428 | #else | ||
429 | static inline void tracing_on(void) { } | ||
430 | static inline void tracing_off(void) { } | ||
431 | static inline void tracing_off_permanent(void) { } | ||
432 | static inline int tracing_is_on(void) { return 0; } | ||
433 | #endif | ||
434 | #ifdef CONFIG_TRACING | ||
435 | extern void tracing_start(void); | ||
436 | extern void tracing_stop(void); | ||
437 | extern void ftrace_off_permanent(void); | ||
438 | |||
439 | extern void | ||
440 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); | ||
441 | |||
442 | static inline void __attribute__ ((format (printf, 1, 2))) | ||
443 | ____trace_printk_check_format(const char *fmt, ...) | ||
444 | { | ||
445 | } | ||
446 | #define __trace_printk_check_format(fmt, args...) \ | ||
447 | do { \ | ||
448 | if (0) \ | ||
449 | ____trace_printk_check_format(fmt, ##args); \ | ||
450 | } while (0) | ||
451 | |||
452 | /** | ||
453 | * trace_printk - printf formatting in the ftrace buffer | ||
454 | * @fmt: the printf format for printing | ||
455 | * | ||
456 | * Note: __trace_printk is an internal function for trace_printk and | ||
457 | * the @ip is passed in via the trace_printk macro. | ||
458 | * | ||
459 | * This function allows a kernel developer to debug fast path sections | ||
460 | * that printk is not appropriate for. By scattering in various | ||
461 | * printk like tracing in the code, a developer can quickly see | ||
462 | * where problems are occurring. | ||
463 | * | ||
464 | * This is intended as a debugging tool for the developer only. | ||
465 | * Please refrain from leaving trace_printks scattered around in | ||
466 | * your code. | ||
467 | */ | ||
468 | |||
469 | #define trace_printk(fmt, args...) \ | ||
470 | do { \ | ||
471 | __trace_printk_check_format(fmt, ##args); \ | ||
472 | if (__builtin_constant_p(fmt)) { \ | ||
473 | static const char *trace_printk_fmt \ | ||
474 | __attribute__((section("__trace_printk_fmt"))) = \ | ||
475 | __builtin_constant_p(fmt) ? fmt : NULL; \ | ||
476 | \ | ||
477 | __trace_bprintk(_THIS_IP_, trace_printk_fmt, ##args); \ | ||
478 | } else \ | ||
479 | __trace_printk(_THIS_IP_, fmt, ##args); \ | ||
480 | } while (0) | ||
481 | |||
482 | extern int | ||
483 | __trace_bprintk(unsigned long ip, const char *fmt, ...) | ||
484 | __attribute__ ((format (printf, 2, 3))); | ||
485 | |||
486 | extern int | ||
487 | __trace_printk(unsigned long ip, const char *fmt, ...) | ||
488 | __attribute__ ((format (printf, 2, 3))); | ||
489 | |||
490 | /* | ||
491 | * The double __builtin_constant_p is because gcc will give us an error | ||
492 | * if we try to allocate the static variable to fmt if it is not a | ||
493 | * constant. Even with the outer if statement. | ||
494 | */ | ||
495 | #define ftrace_vprintk(fmt, vargs) \ | ||
496 | do { \ | ||
497 | if (__builtin_constant_p(fmt)) { \ | ||
498 | static const char *trace_printk_fmt \ | ||
499 | __attribute__((section("__trace_printk_fmt"))) = \ | ||
500 | __builtin_constant_p(fmt) ? fmt : NULL; \ | ||
501 | \ | ||
502 | __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \ | ||
503 | } else \ | ||
504 | __ftrace_vprintk(_THIS_IP_, fmt, vargs); \ | ||
505 | } while (0) | ||
506 | |||
507 | extern int | ||
508 | __ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); | ||
509 | |||
510 | extern int | ||
511 | __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); | ||
512 | |||
513 | extern void ftrace_dump(void); | ||
514 | #else | ||
515 | static inline void | ||
516 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } | ||
517 | static inline int | ||
518 | trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); | ||
519 | |||
520 | static inline void tracing_start(void) { } | ||
521 | static inline void tracing_stop(void) { } | ||
522 | static inline void ftrace_off_permanent(void) { } | ||
523 | static inline int | ||
524 | trace_printk(const char *fmt, ...) | ||
525 | { | ||
526 | return 0; | ||
527 | } | ||
528 | static inline int | ||
529 | ftrace_vprintk(const char *fmt, va_list ap) | ||
530 | { | ||
531 | return 0; | ||
532 | } | ||
533 | static inline void ftrace_dump(void) { } | ||
534 | #endif /* CONFIG_TRACING */ | ||
535 | |||
536 | /* | ||
372 | * Display an IP address in readable format. | 537 | * Display an IP address in readable format. |
373 | */ | 538 | */ |
374 | 539 | ||
@@ -379,18 +544,6 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
379 | ((unsigned char *)&addr)[3] | 544 | ((unsigned char *)&addr)[3] |
380 | #define NIPQUAD_FMT "%u.%u.%u.%u" | 545 | #define NIPQUAD_FMT "%u.%u.%u.%u" |
381 | 546 | ||
382 | #if defined(__LITTLE_ENDIAN) | ||
383 | #define HIPQUAD(addr) \ | ||
384 | ((unsigned char *)&addr)[3], \ | ||
385 | ((unsigned char *)&addr)[2], \ | ||
386 | ((unsigned char *)&addr)[1], \ | ||
387 | ((unsigned char *)&addr)[0] | ||
388 | #elif defined(__BIG_ENDIAN) | ||
389 | #define HIPQUAD NIPQUAD | ||
390 | #else | ||
391 | #error "Please fix asm/byteorder.h" | ||
392 | #endif /* __LITTLE_ENDIAN */ | ||
393 | |||
394 | /* | 547 | /* |
395 | * min()/max()/clamp() macros that also do | 548 | * min()/max()/clamp() macros that also do |
396 | * strict type-checking.. See the | 549 | * strict type-checking.. See the |
diff --git a/include/linux/key.h b/include/linux/key.h index 21d32a142c00..e544f466d69a 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/rbtree.h> | 20 | #include <linux/rbtree.h> |
21 | #include <linux/rcupdate.h> | 21 | #include <linux/rcupdate.h> |
22 | #include <linux/sysctl.h> | 22 | #include <linux/sysctl.h> |
23 | #include <linux/rwsem.h> | ||
23 | #include <asm/atomic.h> | 24 | #include <asm/atomic.h> |
24 | 25 | ||
25 | #ifdef __KERNEL__ | 26 | #ifdef __KERNEL__ |
diff --git a/include/linux/kmod.h b/include/linux/kmod.h index 92213a9194e1..384ca8bbf1ac 100644 --- a/include/linux/kmod.h +++ b/include/linux/kmod.h | |||
@@ -29,10 +29,15 @@ | |||
29 | #ifdef CONFIG_MODULES | 29 | #ifdef CONFIG_MODULES |
30 | /* modprobe exit status on success, -ve on error. Return value | 30 | /* modprobe exit status on success, -ve on error. Return value |
31 | * usually useless though. */ | 31 | * usually useless though. */ |
32 | extern int request_module(const char * name, ...) __attribute__ ((format (printf, 1, 2))); | 32 | extern int __request_module(bool wait, const char *name, ...) \ |
33 | #define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x))) | 33 | __attribute__((format(printf, 2, 3))); |
34 | #define request_module(mod...) __request_module(true, mod) | ||
35 | #define request_module_nowait(mod...) __request_module(false, mod) | ||
36 | #define try_then_request_module(x, mod...) \ | ||
37 | ((x) ?: (__request_module(true, mod), (x))) | ||
34 | #else | 38 | #else |
35 | static inline int request_module(const char * name, ...) { return -ENOSYS; } | 39 | static inline int request_module(const char *name, ...) { return -ENOSYS; } |
40 | static inline int request_module_nowait(const char *name, ...) { return -ENOSYS; } | ||
36 | #define try_then_request_module(x, mod...) (x) | 41 | #define try_then_request_module(x, mod...) (x) |
37 | #endif | 42 | #endif |
38 | 43 | ||
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 2ec6cc14a114..bcd9c07848be 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -94,12 +94,16 @@ struct kprobe { | |||
94 | /* Called after addr is executed, unless... */ | 94 | /* Called after addr is executed, unless... */ |
95 | kprobe_post_handler_t post_handler; | 95 | kprobe_post_handler_t post_handler; |
96 | 96 | ||
97 | /* ... called if executing addr causes a fault (eg. page fault). | 97 | /* |
98 | * Return 1 if it handled fault, otherwise kernel will see it. */ | 98 | * ... called if executing addr causes a fault (eg. page fault). |
99 | * Return 1 if it handled fault, otherwise kernel will see it. | ||
100 | */ | ||
99 | kprobe_fault_handler_t fault_handler; | 101 | kprobe_fault_handler_t fault_handler; |
100 | 102 | ||
101 | /* ... called if breakpoint trap occurs in probe handler. | 103 | /* |
102 | * Return 1 if it handled break, otherwise kernel will see it. */ | 104 | * ... called if breakpoint trap occurs in probe handler. |
105 | * Return 1 if it handled break, otherwise kernel will see it. | ||
106 | */ | ||
103 | kprobe_break_handler_t break_handler; | 107 | kprobe_break_handler_t break_handler; |
104 | 108 | ||
105 | /* Saved opcode (which has been replaced with breakpoint) */ | 109 | /* Saved opcode (which has been replaced with breakpoint) */ |
@@ -108,18 +112,28 @@ struct kprobe { | |||
108 | /* copy of the original instruction */ | 112 | /* copy of the original instruction */ |
109 | struct arch_specific_insn ainsn; | 113 | struct arch_specific_insn ainsn; |
110 | 114 | ||
111 | /* Indicates various status flags. Protected by kprobe_mutex. */ | 115 | /* |
116 | * Indicates various status flags. | ||
117 | * Protected by kprobe_mutex after this kprobe is registered. | ||
118 | */ | ||
112 | u32 flags; | 119 | u32 flags; |
113 | }; | 120 | }; |
114 | 121 | ||
115 | /* Kprobe status flags */ | 122 | /* Kprobe status flags */ |
116 | #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ | 123 | #define KPROBE_FLAG_GONE 1 /* breakpoint has already gone */ |
124 | #define KPROBE_FLAG_DISABLED 2 /* probe is temporarily disabled */ | ||
117 | 125 | ||
126 | /* Has this kprobe gone ? */ | ||
118 | static inline int kprobe_gone(struct kprobe *p) | 127 | static inline int kprobe_gone(struct kprobe *p) |
119 | { | 128 | { |
120 | return p->flags & KPROBE_FLAG_GONE; | 129 | return p->flags & KPROBE_FLAG_GONE; |
121 | } | 130 | } |
122 | 131 | ||
132 | /* Is this kprobe disabled ? */ | ||
133 | static inline int kprobe_disabled(struct kprobe *p) | ||
134 | { | ||
135 | return p->flags & (KPROBE_FLAG_DISABLED | KPROBE_FLAG_GONE); | ||
136 | } | ||
123 | /* | 137 | /* |
124 | * Special probe type that uses setjmp-longjmp type tricks to resume | 138 | * Special probe type that uses setjmp-longjmp type tricks to resume |
125 | * execution at a specified entry with a matching prototype corresponding | 139 | * execution at a specified entry with a matching prototype corresponding |
@@ -279,6 +293,9 @@ void unregister_kretprobes(struct kretprobe **rps, int num); | |||
279 | void kprobe_flush_task(struct task_struct *tk); | 293 | void kprobe_flush_task(struct task_struct *tk); |
280 | void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); | 294 | void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); |
281 | 295 | ||
296 | int disable_kprobe(struct kprobe *kp); | ||
297 | int enable_kprobe(struct kprobe *kp); | ||
298 | |||
282 | #else /* !CONFIG_KPROBES: */ | 299 | #else /* !CONFIG_KPROBES: */ |
283 | 300 | ||
284 | static inline int kprobes_built_in(void) | 301 | static inline int kprobes_built_in(void) |
@@ -345,5 +362,30 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num) | |||
345 | static inline void kprobe_flush_task(struct task_struct *tk) | 362 | static inline void kprobe_flush_task(struct task_struct *tk) |
346 | { | 363 | { |
347 | } | 364 | } |
365 | static inline int disable_kprobe(struct kprobe *kp) | ||
366 | { | ||
367 | return -ENOSYS; | ||
368 | } | ||
369 | static inline int enable_kprobe(struct kprobe *kp) | ||
370 | { | ||
371 | return -ENOSYS; | ||
372 | } | ||
348 | #endif /* CONFIG_KPROBES */ | 373 | #endif /* CONFIG_KPROBES */ |
374 | static inline int disable_kretprobe(struct kretprobe *rp) | ||
375 | { | ||
376 | return disable_kprobe(&rp->kp); | ||
377 | } | ||
378 | static inline int enable_kretprobe(struct kretprobe *rp) | ||
379 | { | ||
380 | return enable_kprobe(&rp->kp); | ||
381 | } | ||
382 | static inline int disable_jprobe(struct jprobe *jp) | ||
383 | { | ||
384 | return disable_kprobe(&jp->kp); | ||
385 | } | ||
386 | static inline int enable_jprobe(struct jprobe *jp) | ||
387 | { | ||
388 | return enable_kprobe(&jp->kp); | ||
389 | } | ||
390 | |||
349 | #endif /* _LINUX_KPROBES_H */ | 391 | #endif /* _LINUX_KPROBES_H */ |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 311a073afe8a..8cc137911b34 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -409,6 +409,8 @@ struct kvm_trace_rec { | |||
409 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT | 409 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT |
410 | #define KVM_CAP_DEVICE_DEASSIGNMENT 27 | 410 | #define KVM_CAP_DEVICE_DEASSIGNMENT 27 |
411 | #endif | 411 | #endif |
412 | /* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ | ||
413 | #define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 | ||
412 | 414 | ||
413 | #ifdef KVM_CAP_IRQ_ROUTING | 415 | #ifdef KVM_CAP_IRQ_ROUTING |
414 | 416 | ||
diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h new file mode 100644 index 000000000000..42f854a1a199 --- /dev/null +++ b/include/linux/leds-bd2802.h | |||
@@ -0,0 +1,26 @@ | |||
1 | /* | ||
2 | * leds-bd2802.h - RGB LED Driver | ||
3 | * | ||
4 | * Copyright (C) 2009 Samsung Electronics | ||
5 | * Kim Kyuwon <q1.kim@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * Datasheet: http://www.rohm.com/products/databook/driver/pdf/bd2802gu-e.pdf | ||
12 | * | ||
13 | */ | ||
14 | #ifndef _LEDS_BD2802_H_ | ||
15 | #define _LEDS_BD2802_H_ | ||
16 | |||
17 | struct bd2802_led_platform_data{ | ||
18 | int reset_gpio; | ||
19 | u8 rgb_time; | ||
20 | }; | ||
21 | |||
22 | #define RGB_TIME(slopedown, slopeup, waveform) \ | ||
23 | ((slopedown) << 6 | (slopeup) << 4 | (waveform)) | ||
24 | |||
25 | #endif /* _LEDS_BD2802_H_ */ | ||
26 | |||
diff --git a/include/linux/leds.h b/include/linux/leds.h index 24489da701e3..376fe07732ea 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
@@ -30,6 +30,7 @@ enum led_brightness { | |||
30 | struct led_classdev { | 30 | struct led_classdev { |
31 | const char *name; | 31 | const char *name; |
32 | int brightness; | 32 | int brightness; |
33 | int max_brightness; | ||
33 | int flags; | 34 | int flags; |
34 | 35 | ||
35 | /* Lower 16 bits reflect status */ | 36 | /* Lower 16 bits reflect status */ |
@@ -140,7 +141,8 @@ struct gpio_led { | |||
140 | const char *name; | 141 | const char *name; |
141 | const char *default_trigger; | 142 | const char *default_trigger; |
142 | unsigned gpio; | 143 | unsigned gpio; |
143 | u8 active_low; | 144 | u8 active_low : 1; |
145 | u8 retain_state_suspended : 1; | ||
144 | }; | 146 | }; |
145 | 147 | ||
146 | struct gpio_led_platform_data { | 148 | struct gpio_led_platform_data { |
diff --git a/include/linux/leds_pwm.h b/include/linux/leds_pwm.h new file mode 100644 index 000000000000..33a071167489 --- /dev/null +++ b/include/linux/leds_pwm.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* | ||
2 | * PWM LED driver data - see drivers/leds/leds-pwm.c | ||
3 | */ | ||
4 | #ifndef __LINUX_LEDS_PWM_H | ||
5 | #define __LINUX_LEDS_PWM_H | ||
6 | |||
7 | struct led_pwm { | ||
8 | const char *name; | ||
9 | const char *default_trigger; | ||
10 | unsigned pwm_id; | ||
11 | u8 active_low; | ||
12 | unsigned max_brightness; | ||
13 | unsigned pwm_period_ns; | ||
14 | }; | ||
15 | |||
16 | struct led_pwm_platform_data { | ||
17 | int num_leds; | ||
18 | struct led_pwm *leds; | ||
19 | }; | ||
20 | |||
21 | #endif | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index 76262d83656b..3d501db36a26 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -209,6 +209,7 @@ enum { | |||
209 | 209 | ||
210 | /* bits 24:31 of ap->flags are reserved for LLD specific flags */ | 210 | /* bits 24:31 of ap->flags are reserved for LLD specific flags */ |
211 | 211 | ||
212 | |||
212 | /* struct ata_port pflags */ | 213 | /* struct ata_port pflags */ |
213 | ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ | 214 | ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ |
214 | ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ | 215 | ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ |
@@ -225,6 +226,9 @@ enum { | |||
225 | ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ | 226 | ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ |
226 | ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */ | 227 | ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */ |
227 | 228 | ||
229 | ATA_PFLAG_PIO32 = (1 << 20), /* 32bit PIO */ | ||
230 | ATA_PFLAG_PIO32CHANGE = (1 << 21), /* 32bit PIO can be turned on/off */ | ||
231 | |||
228 | /* struct ata_queued_cmd flags */ | 232 | /* struct ata_queued_cmd flags */ |
229 | ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ | 233 | ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ |
230 | ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */ | 234 | ATA_QCFLAG_DMAMAP = (1 << 1), /* SG table is DMA mapped */ |
@@ -379,7 +383,7 @@ enum { | |||
379 | ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ | 383 | ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ |
380 | ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands | 384 | ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands |
381 | not multiple of 16 bytes */ | 385 | not multiple of 16 bytes */ |
382 | ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firwmare update warning */ | 386 | ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */ |
383 | ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ | 387 | ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ |
384 | 388 | ||
385 | /* DMA mask for user DMA control: User visible values; DO NOT | 389 | /* DMA mask for user DMA control: User visible values; DO NOT |
@@ -689,7 +693,10 @@ struct ata_port { | |||
689 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ | 693 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ |
690 | struct ata_port_operations *ops; | 694 | struct ata_port_operations *ops; |
691 | spinlock_t *lock; | 695 | spinlock_t *lock; |
696 | /* Flags owned by the EH context. Only EH should touch these once the | ||
697 | port is active */ | ||
692 | unsigned long flags; /* ATA_FLAG_xxx */ | 698 | unsigned long flags; /* ATA_FLAG_xxx */ |
699 | /* Flags that change dynamically, protected by ap->lock */ | ||
693 | unsigned int pflags; /* ATA_PFLAG_xxx */ | 700 | unsigned int pflags; /* ATA_PFLAG_xxx */ |
694 | unsigned int print_id; /* user visible unique port ID */ | 701 | unsigned int print_id; /* user visible unique port ID */ |
695 | unsigned int port_no; /* 0 based port no. inside the host */ | 702 | unsigned int port_no; /* 0 based port no. inside the host */ |
@@ -1595,6 +1602,7 @@ extern void ata_sff_drain_fifo(struct ata_queued_cmd *qc); | |||
1595 | extern void ata_sff_error_handler(struct ata_port *ap); | 1602 | extern void ata_sff_error_handler(struct ata_port *ap); |
1596 | extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc); | 1603 | extern void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc); |
1597 | extern int ata_sff_port_start(struct ata_port *ap); | 1604 | extern int ata_sff_port_start(struct ata_port *ap); |
1605 | extern int ata_sff_port_start32(struct ata_port *ap); | ||
1598 | extern void ata_sff_std_ports(struct ata_ioports *ioaddr); | 1606 | extern void ata_sff_std_ports(struct ata_ioports *ioaddr); |
1599 | extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev, | 1607 | extern unsigned long ata_bmdma_mode_filter(struct ata_device *dev, |
1600 | unsigned long xfer_mask); | 1608 | unsigned long xfer_mask); |
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index 7dc5b6cb44cd..d39ed1cc5fbf 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h | |||
@@ -25,13 +25,13 @@ struct svc_rqst; | |||
25 | #define NLM_MAXCOOKIELEN 32 | 25 | #define NLM_MAXCOOKIELEN 32 |
26 | #define NLM_MAXSTRLEN 1024 | 26 | #define NLM_MAXSTRLEN 1024 |
27 | 27 | ||
28 | #define nlm_granted __constant_htonl(NLM_LCK_GRANTED) | 28 | #define nlm_granted cpu_to_be32(NLM_LCK_GRANTED) |
29 | #define nlm_lck_denied __constant_htonl(NLM_LCK_DENIED) | 29 | #define nlm_lck_denied cpu_to_be32(NLM_LCK_DENIED) |
30 | #define nlm_lck_denied_nolocks __constant_htonl(NLM_LCK_DENIED_NOLOCKS) | 30 | #define nlm_lck_denied_nolocks cpu_to_be32(NLM_LCK_DENIED_NOLOCKS) |
31 | #define nlm_lck_blocked __constant_htonl(NLM_LCK_BLOCKED) | 31 | #define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED) |
32 | #define nlm_lck_denied_grace_period __constant_htonl(NLM_LCK_DENIED_GRACE_PERIOD) | 32 | #define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD) |
33 | 33 | ||
34 | #define nlm_drop_reply __constant_htonl(30000) | 34 | #define nlm_drop_reply cpu_to_be32(30000) |
35 | 35 | ||
36 | /* Lock info passed via NLM */ | 36 | /* Lock info passed via NLM */ |
37 | struct nlm_lock { | 37 | struct nlm_lock { |
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h index 12bfe09de2b1..7353821341ed 100644 --- a/include/linux/lockd/xdr4.h +++ b/include/linux/lockd/xdr4.h | |||
@@ -15,11 +15,11 @@ | |||
15 | #include <linux/lockd/xdr.h> | 15 | #include <linux/lockd/xdr.h> |
16 | 16 | ||
17 | /* error codes new to NLMv4 */ | 17 | /* error codes new to NLMv4 */ |
18 | #define nlm4_deadlock __constant_htonl(NLM_DEADLCK) | 18 | #define nlm4_deadlock cpu_to_be32(NLM_DEADLCK) |
19 | #define nlm4_rofs __constant_htonl(NLM_ROFS) | 19 | #define nlm4_rofs cpu_to_be32(NLM_ROFS) |
20 | #define nlm4_stale_fh __constant_htonl(NLM_STALE_FH) | 20 | #define nlm4_stale_fh cpu_to_be32(NLM_STALE_FH) |
21 | #define nlm4_fbig __constant_htonl(NLM_FBIG) | 21 | #define nlm4_fbig cpu_to_be32(NLM_FBIG) |
22 | #define nlm4_failed __constant_htonl(NLM_FAILED) | 22 | #define nlm4_failed cpu_to_be32(NLM_FAILED) |
23 | 23 | ||
24 | 24 | ||
25 | 25 | ||
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 23bf02fb124f..da5a5a1f4cd2 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -20,43 +20,10 @@ struct lockdep_map; | |||
20 | #include <linux/stacktrace.h> | 20 | #include <linux/stacktrace.h> |
21 | 21 | ||
22 | /* | 22 | /* |
23 | * Lock-class usage-state bits: | 23 | * We'd rather not expose kernel/lockdep_states.h this wide, but we do need |
24 | * the total number of states... :-( | ||
24 | */ | 25 | */ |
25 | enum lock_usage_bit | 26 | #define XXX_LOCK_USAGE_STATES (1+3*4) |
26 | { | ||
27 | LOCK_USED = 0, | ||
28 | LOCK_USED_IN_HARDIRQ, | ||
29 | LOCK_USED_IN_SOFTIRQ, | ||
30 | LOCK_ENABLED_SOFTIRQS, | ||
31 | LOCK_ENABLED_HARDIRQS, | ||
32 | LOCK_USED_IN_HARDIRQ_READ, | ||
33 | LOCK_USED_IN_SOFTIRQ_READ, | ||
34 | LOCK_ENABLED_SOFTIRQS_READ, | ||
35 | LOCK_ENABLED_HARDIRQS_READ, | ||
36 | LOCK_USAGE_STATES | ||
37 | }; | ||
38 | |||
39 | /* | ||
40 | * Usage-state bitmasks: | ||
41 | */ | ||
42 | #define LOCKF_USED (1 << LOCK_USED) | ||
43 | #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ) | ||
44 | #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ) | ||
45 | #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS) | ||
46 | #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS) | ||
47 | |||
48 | #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS) | ||
49 | #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) | ||
50 | |||
51 | #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ) | ||
52 | #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ) | ||
53 | #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ) | ||
54 | #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ) | ||
55 | |||
56 | #define LOCKF_ENABLED_IRQS_READ \ | ||
57 | (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ) | ||
58 | #define LOCKF_USED_IN_IRQ_READ \ | ||
59 | (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) | ||
60 | 27 | ||
61 | #define MAX_LOCKDEP_SUBCLASSES 8UL | 28 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
62 | 29 | ||
@@ -97,7 +64,7 @@ struct lock_class { | |||
97 | * IRQ/softirq usage tracking bits: | 64 | * IRQ/softirq usage tracking bits: |
98 | */ | 65 | */ |
99 | unsigned long usage_mask; | 66 | unsigned long usage_mask; |
100 | struct stack_trace usage_traces[LOCK_USAGE_STATES]; | 67 | struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
101 | 68 | ||
102 | /* | 69 | /* |
103 | * These fields represent a directed graph of lock dependencies, | 70 | * These fields represent a directed graph of lock dependencies, |
@@ -324,7 +291,11 @@ static inline void lock_set_subclass(struct lockdep_map *lock, | |||
324 | lock_set_class(lock, lock->name, lock->key, subclass, ip); | 291 | lock_set_class(lock, lock->name, lock->key, subclass, ip); |
325 | } | 292 | } |
326 | 293 | ||
327 | # define INIT_LOCKDEP .lockdep_recursion = 0, | 294 | extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask); |
295 | extern void lockdep_clear_current_reclaim_state(void); | ||
296 | extern void lockdep_trace_alloc(gfp_t mask); | ||
297 | |||
298 | # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0, | ||
328 | 299 | ||
329 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) | 300 | #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) |
330 | 301 | ||
@@ -342,6 +313,9 @@ static inline void lockdep_on(void) | |||
342 | # define lock_release(l, n, i) do { } while (0) | 313 | # define lock_release(l, n, i) do { } while (0) |
343 | # define lock_set_class(l, n, k, s, i) do { } while (0) | 314 | # define lock_set_class(l, n, k, s, i) do { } while (0) |
344 | # define lock_set_subclass(l, s, i) do { } while (0) | 315 | # define lock_set_subclass(l, s, i) do { } while (0) |
316 | # define lockdep_set_current_reclaim_state(g) do { } while (0) | ||
317 | # define lockdep_clear_current_reclaim_state() do { } while (0) | ||
318 | # define lockdep_trace_alloc(g) do { } while (0) | ||
345 | # define lockdep_init() do { } while (0) | 319 | # define lockdep_init() do { } while (0) |
346 | # define lockdep_info() do { } while (0) | 320 | # define lockdep_info() do { } while (0) |
347 | # define lockdep_init_map(lock, name, key, sub) \ | 321 | # define lockdep_init_map(lock, name, key, sub) \ |
@@ -390,6 +364,23 @@ do { \ | |||
390 | 364 | ||
391 | #endif /* CONFIG_LOCK_STAT */ | 365 | #endif /* CONFIG_LOCK_STAT */ |
392 | 366 | ||
367 | #ifdef CONFIG_LOCKDEP | ||
368 | |||
369 | /* | ||
370 | * On lockdep we dont want the hand-coded irq-enable of | ||
371 | * _raw_*_lock_flags() code, because lockdep assumes | ||
372 | * that interrupts are not re-enabled during lock-acquire: | ||
373 | */ | ||
374 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | ||
375 | LOCK_CONTENDED((_lock), (try), (lock)) | ||
376 | |||
377 | #else /* CONFIG_LOCKDEP */ | ||
378 | |||
379 | #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \ | ||
380 | lockfl((_lock), (flags)) | ||
381 | |||
382 | #endif /* CONFIG_LOCKDEP */ | ||
383 | |||
393 | #ifdef CONFIG_GENERIC_HARDIRQS | 384 | #ifdef CONFIG_GENERIC_HARDIRQS |
394 | extern void early_init_irq_lock_class(void); | 385 | extern void early_init_irq_lock_class(void); |
395 | #else | 386 | #else |
diff --git a/include/linux/loop.h b/include/linux/loop.h index 6ffd6db5bb0d..40725447f5e0 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h | |||
@@ -160,5 +160,6 @@ int loop_unregister_transfer(int number); | |||
160 | #define LOOP_SET_STATUS64 0x4C04 | 160 | #define LOOP_SET_STATUS64 0x4C04 |
161 | #define LOOP_GET_STATUS64 0x4C05 | 161 | #define LOOP_GET_STATUS64 0x4C05 |
162 | #define LOOP_CHANGE_FD 0x4C06 | 162 | #define LOOP_CHANGE_FD 0x4C06 |
163 | #define LOOP_SET_CAPACITY 0x4C07 | ||
163 | 164 | ||
164 | #endif | 165 | #endif |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 326f45c86530..25b9ca93d232 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -56,7 +56,7 @@ extern void mem_cgroup_move_lists(struct page *page, | |||
56 | enum lru_list from, enum lru_list to); | 56 | enum lru_list from, enum lru_list to); |
57 | extern void mem_cgroup_uncharge_page(struct page *page); | 57 | extern void mem_cgroup_uncharge_page(struct page *page); |
58 | extern void mem_cgroup_uncharge_cache_page(struct page *page); | 58 | extern void mem_cgroup_uncharge_cache_page(struct page *page); |
59 | extern int mem_cgroup_shrink_usage(struct page *page, | 59 | extern int mem_cgroup_shmem_charge_fallback(struct page *page, |
60 | struct mm_struct *mm, gfp_t gfp_mask); | 60 | struct mm_struct *mm, gfp_t gfp_mask); |
61 | 61 | ||
62 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 62 | extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, |
@@ -75,7 +75,7 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) | |||
75 | { | 75 | { |
76 | struct mem_cgroup *mem; | 76 | struct mem_cgroup *mem; |
77 | rcu_read_lock(); | 77 | rcu_read_lock(); |
78 | mem = mem_cgroup_from_task((mm)->owner); | 78 | mem = mem_cgroup_from_task(rcu_dereference((mm)->owner)); |
79 | rcu_read_unlock(); | 79 | rcu_read_unlock(); |
80 | return cgroup == mem; | 80 | return cgroup == mem; |
81 | } | 81 | } |
@@ -88,9 +88,6 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem, | |||
88 | /* | 88 | /* |
89 | * For memory reclaim. | 89 | * For memory reclaim. |
90 | */ | 90 | */ |
91 | extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem); | ||
92 | extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem); | ||
93 | |||
94 | extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem); | 91 | extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem); |
95 | extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, | 92 | extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, |
96 | int priority); | 93 | int priority); |
@@ -104,6 +101,8 @@ struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, | |||
104 | struct zone *zone); | 101 | struct zone *zone); |
105 | struct zone_reclaim_stat* | 102 | struct zone_reclaim_stat* |
106 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); | 103 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); |
104 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | ||
105 | struct task_struct *p); | ||
107 | 106 | ||
108 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 107 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
109 | extern int do_swap_account; | 108 | extern int do_swap_account; |
@@ -156,7 +155,7 @@ static inline void mem_cgroup_uncharge_cache_page(struct page *page) | |||
156 | { | 155 | { |
157 | } | 156 | } |
158 | 157 | ||
159 | static inline int mem_cgroup_shrink_usage(struct page *page, | 158 | static inline int mem_cgroup_shmem_charge_fallback(struct page *page, |
160 | struct mm_struct *mm, gfp_t gfp_mask) | 159 | struct mm_struct *mm, gfp_t gfp_mask) |
161 | { | 160 | { |
162 | return 0; | 161 | return 0; |
@@ -209,16 +208,6 @@ static inline void mem_cgroup_end_migration(struct mem_cgroup *mem, | |||
209 | { | 208 | { |
210 | } | 209 | } |
211 | 210 | ||
212 | static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem) | ||
213 | { | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem) | ||
218 | { | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) | 211 | static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem) |
223 | { | 212 | { |
224 | return 0; | 213 | return 0; |
@@ -270,6 +259,11 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page) | |||
270 | return NULL; | 259 | return NULL; |
271 | } | 260 | } |
272 | 261 | ||
262 | static inline void | ||
263 | mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | ||
264 | { | ||
265 | } | ||
266 | |||
273 | #endif /* CONFIG_CGROUP_MEM_CONT */ | 267 | #endif /* CONFIG_CGROUP_MEM_CONT */ |
274 | 268 | ||
275 | #endif /* _LINUX_MEMCONTROL_H */ | 269 | #endif /* _LINUX_MEMCONTROL_H */ |
diff --git a/include/linux/memory.h b/include/linux/memory.h index 3fdc10806d31..37fa19b34ef5 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
@@ -99,4 +99,21 @@ enum mem_add_context { BOOT, HOTPLUG }; | |||
99 | #define hotplug_memory_notifier(fn, pri) do { } while (0) | 99 | #define hotplug_memory_notifier(fn, pri) do { } while (0) |
100 | #endif | 100 | #endif |
101 | 101 | ||
102 | /* | ||
103 | * 'struct memory_accessor' is a generic interface to provide | ||
104 | * in-kernel access to persistent memory such as i2c or SPI EEPROMs | ||
105 | */ | ||
106 | struct memory_accessor { | ||
107 | ssize_t (*read)(struct memory_accessor *, char *buf, off_t offset, | ||
108 | size_t count); | ||
109 | ssize_t (*write)(struct memory_accessor *, const char *buf, | ||
110 | off_t offset, size_t count); | ||
111 | }; | ||
112 | |||
113 | /* | ||
114 | * Kernel text modification mutex, used for code patching. Users of this lock | ||
115 | * can sleep. | ||
116 | */ | ||
117 | extern struct mutex text_mutex; | ||
118 | |||
102 | #endif /* _LINUX_MEMORY_H_ */ | 119 | #endif /* _LINUX_MEMORY_H_ */ |
diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h new file mode 100644 index 000000000000..be469a357cbb --- /dev/null +++ b/include/linux/mfd/ds1wm.h | |||
@@ -0,0 +1,6 @@ | |||
1 | /* MFD cell driver data for the DS1WM driver */ | ||
2 | |||
3 | struct ds1wm_driver_data { | ||
4 | int active_high; | ||
5 | int clock_rate; | ||
6 | }; | ||
diff --git a/include/linux/mfd/htc-pasic3.h b/include/linux/mfd/htc-pasic3.h index b4294f12c4f8..3d3ed67bd969 100644 --- a/include/linux/mfd/htc-pasic3.h +++ b/include/linux/mfd/htc-pasic3.h | |||
@@ -48,7 +48,6 @@ struct pasic3_leds_machinfo { | |||
48 | 48 | ||
49 | struct pasic3_platform_data { | 49 | struct pasic3_platform_data { |
50 | struct pasic3_leds_machinfo *led_pdata; | 50 | struct pasic3_leds_machinfo *led_pdata; |
51 | unsigned int bus_shift; | ||
52 | unsigned int clock_rate; | 51 | unsigned int clock_rate; |
53 | }; | 52 | }; |
54 | 53 | ||
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index 4455b212d75a..c8f51c3c0a72 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h | |||
@@ -29,6 +29,8 @@ struct pcf50633_platform_data { | |||
29 | char **batteries; | 29 | char **batteries; |
30 | int num_batteries; | 30 | int num_batteries; |
31 | 31 | ||
32 | int charging_restart_interval; | ||
33 | |||
32 | /* Callbacks */ | 34 | /* Callbacks */ |
33 | void (*probe_done)(struct pcf50633 *); | 35 | void (*probe_done)(struct pcf50633 *); |
34 | void (*mbc_event_callback)(struct pcf50633 *, int); | 36 | void (*mbc_event_callback)(struct pcf50633 *, int); |
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h index 6e17619b773a..4119579acf2c 100644 --- a/include/linux/mfd/pcf50633/mbc.h +++ b/include/linux/mfd/pcf50633/mbc.h | |||
@@ -128,7 +128,6 @@ enum pcf50633_reg_mbcs3 { | |||
128 | int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); | 128 | int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); |
129 | 129 | ||
130 | int pcf50633_mbc_get_status(struct pcf50633 *); | 130 | int pcf50633_mbc_get_status(struct pcf50633 *); |
131 | void pcf50633_mbc_set_status(struct pcf50633 *, int what, int status); | ||
132 | 131 | ||
133 | #endif | 132 | #endif |
134 | 133 | ||
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index 980669d50dca..42cca672f340 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h | |||
@@ -640,9 +640,11 @@ struct wm8350 { | |||
640 | * | 640 | * |
641 | * @init: Function called during driver initialisation. Should be | 641 | * @init: Function called during driver initialisation. Should be |
642 | * used by the platform to configure GPIO functions and similar. | 642 | * used by the platform to configure GPIO functions and similar. |
643 | * @irq_high: Set if WM8350 IRQ is active high. | ||
643 | */ | 644 | */ |
644 | struct wm8350_platform_data { | 645 | struct wm8350_platform_data { |
645 | int (*init)(struct wm8350 *wm8350); | 646 | int (*init)(struct wm8350 *wm8350); |
647 | int irq_high; | ||
646 | }; | 648 | }; |
647 | 649 | ||
648 | 650 | ||
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h new file mode 100644 index 000000000000..1f76b1ebf627 --- /dev/null +++ b/include/linux/mg_disk.h | |||
@@ -0,0 +1,206 @@ | |||
1 | /* | ||
2 | * include/linux/mg_disk.c | ||
3 | * | ||
4 | * Support for the mGine m[g]flash IO mode. | ||
5 | * Based on legacy hd.c | ||
6 | * | ||
7 | * (c) 2008 mGine Co.,LTD | ||
8 | * (c) 2008 unsik Kim <donari75@gmail.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __MG_DISK_H__ | ||
16 | #define __MG_DISK_H__ | ||
17 | |||
18 | #include <linux/blkdev.h> | ||
19 | #include <linux/ata.h> | ||
20 | |||
21 | /* name for block device */ | ||
22 | #define MG_DISK_NAME "mgd" | ||
23 | /* name for platform device */ | ||
24 | #define MG_DEV_NAME "mg_disk" | ||
25 | |||
26 | #define MG_DISK_MAJ 0 | ||
27 | #define MG_DISK_MAX_PART 16 | ||
28 | #define MG_SECTOR_SIZE 512 | ||
29 | #define MG_MAX_SECTS 256 | ||
30 | |||
31 | /* Register offsets */ | ||
32 | #define MG_BUFF_OFFSET 0x8000 | ||
33 | #define MG_STORAGE_BUFFER_SIZE 0x200 | ||
34 | #define MG_REG_OFFSET 0xC000 | ||
35 | #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ | ||
36 | #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ | ||
37 | #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4) | ||
38 | #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6) | ||
39 | #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8) | ||
40 | #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA) | ||
41 | #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC) | ||
42 | #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */ | ||
43 | #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */ | ||
44 | #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10) | ||
45 | #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12) | ||
46 | |||
47 | /* "Drive Select/Head Register" bit values */ | ||
48 | #define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */ | ||
49 | #define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON) | ||
50 | #define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON) | ||
51 | #define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON) | ||
52 | |||
53 | |||
54 | /* "Device Control Register" bit values */ | ||
55 | #define MG_REG_CTRL_INTR_ENABLE 0x0 | ||
56 | #define MG_REG_CTRL_INTR_DISABLE (0x1<<1) | ||
57 | #define MG_REG_CTRL_RESET (0x1<<2) | ||
58 | #define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0 | ||
59 | #define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4) | ||
60 | #define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0 | ||
61 | #define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5) | ||
62 | #define MG_REG_CTRL_DPD_DISABLE 0x0 | ||
63 | #define MG_REG_CTRL_DPD_ENABLE (0x1<<6) | ||
64 | |||
65 | /* Status register bit */ | ||
66 | /* error bit in status register */ | ||
67 | #define MG_REG_STATUS_BIT_ERROR 0x01 | ||
68 | /* corrected error in status register */ | ||
69 | #define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04 | ||
70 | /* data request bit in status register */ | ||
71 | #define MG_REG_STATUS_BIT_DATA_REQ 0x08 | ||
72 | /* DSC - Drive Seek Complete */ | ||
73 | #define MG_REG_STATUS_BIT_SEEK_DONE 0x10 | ||
74 | /* DWF - Drive Write Fault */ | ||
75 | #define MG_REG_STATUS_BIT_WRITE_FAULT 0x20 | ||
76 | #define MG_REG_STATUS_BIT_READY 0x40 | ||
77 | #define MG_REG_STATUS_BIT_BUSY 0x80 | ||
78 | |||
79 | /* handy status */ | ||
80 | #define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE) | ||
81 | #define MG_READY_OK(s) (((s) & (MG_STAT_READY | \ | ||
82 | (MG_REG_STATUS_BIT_BUSY | \ | ||
83 | MG_REG_STATUS_BIT_WRITE_FAULT | \ | ||
84 | MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY) | ||
85 | |||
86 | /* Error register */ | ||
87 | #define MG_REG_ERR_AMNF 0x01 | ||
88 | #define MG_REG_ERR_ABRT 0x04 | ||
89 | #define MG_REG_ERR_IDNF 0x10 | ||
90 | #define MG_REG_ERR_UNC 0x40 | ||
91 | #define MG_REG_ERR_BBK 0x80 | ||
92 | |||
93 | /* error code for others */ | ||
94 | #define MG_ERR_NONE 0 | ||
95 | #define MG_ERR_TIMEOUT 0x100 | ||
96 | #define MG_ERR_INIT_STAT 0x101 | ||
97 | #define MG_ERR_TRANSLATION 0x102 | ||
98 | #define MG_ERR_CTRL_RST 0x103 | ||
99 | #define MG_ERR_INV_STAT 0x104 | ||
100 | #define MG_ERR_RSTOUT 0x105 | ||
101 | |||
102 | #define MG_MAX_ERRORS 6 /* Max read/write errors */ | ||
103 | |||
104 | /* command */ | ||
105 | #define MG_CMD_RD 0x20 | ||
106 | #define MG_CMD_WR 0x30 | ||
107 | #define MG_CMD_SLEEP 0x99 | ||
108 | #define MG_CMD_WAKEUP 0xC3 | ||
109 | #define MG_CMD_ID 0xEC | ||
110 | #define MG_CMD_WR_CONF 0x3C | ||
111 | #define MG_CMD_RD_CONF 0x40 | ||
112 | |||
113 | /* operation mode */ | ||
114 | #define MG_OP_CASCADE (1 << 0) | ||
115 | #define MG_OP_CASCADE_SYNC_RD (1 << 1) | ||
116 | #define MG_OP_CASCADE_SYNC_WR (1 << 2) | ||
117 | #define MG_OP_INTERLEAVE (1 << 3) | ||
118 | |||
119 | /* synchronous */ | ||
120 | #define MG_BURST_LAT_4 (3 << 4) | ||
121 | #define MG_BURST_LAT_5 (4 << 4) | ||
122 | #define MG_BURST_LAT_6 (5 << 4) | ||
123 | #define MG_BURST_LAT_7 (6 << 4) | ||
124 | #define MG_BURST_LAT_8 (7 << 4) | ||
125 | #define MG_BURST_LEN_4 (1 << 1) | ||
126 | #define MG_BURST_LEN_8 (2 << 1) | ||
127 | #define MG_BURST_LEN_16 (3 << 1) | ||
128 | #define MG_BURST_LEN_32 (4 << 1) | ||
129 | #define MG_BURST_LEN_CONT (0 << 1) | ||
130 | |||
131 | /* timeout value (unit: ms) */ | ||
132 | #define MG_TMAX_CONF_TO_CMD 1 | ||
133 | #define MG_TMAX_WAIT_RD_DRQ 10 | ||
134 | #define MG_TMAX_WAIT_WR_DRQ 500 | ||
135 | #define MG_TMAX_RST_TO_BUSY 10 | ||
136 | #define MG_TMAX_HDRST_TO_RDY 500 | ||
137 | #define MG_TMAX_SWRST_TO_RDY 500 | ||
138 | #define MG_TMAX_RSTOUT 3000 | ||
139 | |||
140 | /* device attribution */ | ||
141 | /* use mflash as boot device */ | ||
142 | #define MG_BOOT_DEV (1 << 0) | ||
143 | /* use mflash as storage device */ | ||
144 | #define MG_STORAGE_DEV (1 << 1) | ||
145 | /* same as MG_STORAGE_DEV, but bootloader already done reset sequence */ | ||
146 | #define MG_STORAGE_DEV_SKIP_RST (1 << 2) | ||
147 | |||
148 | #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST) | ||
149 | |||
150 | /* names of GPIO resource */ | ||
151 | #define MG_RST_PIN "mg_rst" | ||
152 | /* except MG_BOOT_DEV, reset-out pin should be assigned */ | ||
153 | #define MG_RSTOUT_PIN "mg_rstout" | ||
154 | |||
155 | /* private driver data */ | ||
156 | struct mg_drv_data { | ||
157 | /* disk resource */ | ||
158 | u32 use_polling; | ||
159 | |||
160 | /* device attribution */ | ||
161 | u32 dev_attr; | ||
162 | |||
163 | /* internally used */ | ||
164 | struct mg_host *host; | ||
165 | }; | ||
166 | |||
167 | /* main structure for mflash driver */ | ||
168 | struct mg_host { | ||
169 | struct device *dev; | ||
170 | |||
171 | struct request_queue *breq; | ||
172 | spinlock_t lock; | ||
173 | struct gendisk *gd; | ||
174 | |||
175 | struct timer_list timer; | ||
176 | void (*mg_do_intr) (struct mg_host *); | ||
177 | |||
178 | u16 id[ATA_ID_WORDS]; | ||
179 | |||
180 | u16 cyls; | ||
181 | u16 heads; | ||
182 | u16 sectors; | ||
183 | u32 n_sectors; | ||
184 | u32 nres_sectors; | ||
185 | |||
186 | void __iomem *dev_base; | ||
187 | unsigned int irq; | ||
188 | unsigned int rst; | ||
189 | unsigned int rstout; | ||
190 | |||
191 | u32 major; | ||
192 | u32 error; | ||
193 | }; | ||
194 | |||
195 | /* | ||
196 | * Debugging macro and defines | ||
197 | */ | ||
198 | #undef DO_MG_DEBUG | ||
199 | #ifdef DO_MG_DEBUG | ||
200 | # define MG_DBG(fmt, args...) \ | ||
201 | printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args) | ||
202 | #else /* CONFIG_MG_DEBUG */ | ||
203 | # define MG_DBG(fmt, args...) do { } while (0) | ||
204 | #endif /* CONFIG_MG_DEBUG */ | ||
205 | |||
206 | #endif | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index 065cdf8c09fb..bff1f0d475c7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -104,6 +104,7 @@ extern unsigned int kobjsize(const void *objp); | |||
104 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ | 104 | #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ |
105 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ | 105 | #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ |
106 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ | 106 | #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ |
107 | #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ | ||
107 | 108 | ||
108 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ | 109 | #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ |
109 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS | 110 | #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS |
@@ -134,6 +135,7 @@ extern pgprot_t protection_map[16]; | |||
134 | 135 | ||
135 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ | 136 | #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ |
136 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ | 137 | #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ |
138 | #define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ | ||
137 | 139 | ||
138 | /* | 140 | /* |
139 | * This interface is used by x86 PAT code to identify a pfn mapping that is | 141 | * This interface is used by x86 PAT code to identify a pfn mapping that is |
@@ -145,7 +147,7 @@ extern pgprot_t protection_map[16]; | |||
145 | */ | 147 | */ |
146 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) | 148 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) |
147 | { | 149 | { |
148 | return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff); | 150 | return (vma->vm_flags & VM_PFN_AT_MMAP); |
149 | } | 151 | } |
150 | 152 | ||
151 | static inline int is_pfn_mapping(struct vm_area_struct *vma) | 153 | static inline int is_pfn_mapping(struct vm_area_struct *vma) |
@@ -186,7 +188,7 @@ struct vm_operations_struct { | |||
186 | 188 | ||
187 | /* notification that a previously read-only page is about to become | 189 | /* notification that a previously read-only page is about to become |
188 | * writable, if an error is returned it will cause a SIGBUS */ | 190 | * writable, if an error is returned it will cause a SIGBUS */ |
189 | int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page); | 191 | int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); |
190 | 192 | ||
191 | /* called by access_process_vm when get_user_pages() fails, typically | 193 | /* called by access_process_vm when get_user_pages() fails, typically |
192 | * for use by special VMAs that can switch between memory and hardware | 194 | * for use by special VMAs that can switch between memory and hardware |
@@ -833,6 +835,7 @@ int __set_page_dirty_nobuffers(struct page *page); | |||
833 | int __set_page_dirty_no_writeback(struct page *page); | 835 | int __set_page_dirty_no_writeback(struct page *page); |
834 | int redirty_page_for_writepage(struct writeback_control *wbc, | 836 | int redirty_page_for_writepage(struct writeback_control *wbc, |
835 | struct page *page); | 837 | struct page *page); |
838 | void account_page_dirtied(struct page *page, struct address_space *mapping); | ||
836 | int set_page_dirty(struct page *page); | 839 | int set_page_dirty(struct page *page); |
837 | int set_page_dirty_lock(struct page *page); | 840 | int set_page_dirty_lock(struct page *page); |
838 | int clear_page_dirty_for_io(struct page *page); | 841 | int clear_page_dirty_for_io(struct page *page); |
@@ -1076,7 +1079,7 @@ static inline void setup_per_cpu_pageset(void) {} | |||
1076 | #endif | 1079 | #endif |
1077 | 1080 | ||
1078 | /* nommu.c */ | 1081 | /* nommu.c */ |
1079 | extern atomic_t mmap_pages_allocated; | 1082 | extern atomic_long_t mmap_pages_allocated; |
1080 | 1083 | ||
1081 | /* prio_tree.c */ | 1084 | /* prio_tree.c */ |
1082 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); | 1085 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index d84feb7bdbf0..0e80e26ecf21 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/rwsem.h> | 11 | #include <linux/rwsem.h> |
12 | #include <linux/completion.h> | 12 | #include <linux/completion.h> |
13 | #include <linux/cpumask.h> | 13 | #include <linux/cpumask.h> |
14 | #include <linux/page-debug-flags.h> | ||
14 | #include <asm/page.h> | 15 | #include <asm/page.h> |
15 | #include <asm/mmu.h> | 16 | #include <asm/mmu.h> |
16 | 17 | ||
@@ -94,6 +95,9 @@ struct page { | |||
94 | void *virtual; /* Kernel virtual address (NULL if | 95 | void *virtual; /* Kernel virtual address (NULL if |
95 | not kmapped, ie. highmem) */ | 96 | not kmapped, ie. highmem) */ |
96 | #endif /* WANT_PAGE_VIRTUAL */ | 97 | #endif /* WANT_PAGE_VIRTUAL */ |
98 | #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS | ||
99 | unsigned long debug_flags; /* Use atomic bitops on this */ | ||
100 | #endif | ||
97 | }; | 101 | }; |
98 | 102 | ||
99 | /* | 103 | /* |
diff --git a/include/linux/mman.h b/include/linux/mman.h index 30d1073bac3b..9872d6ca58ae 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h | |||
@@ -12,21 +12,18 @@ | |||
12 | 12 | ||
13 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/percpu_counter.h> | ||
15 | 16 | ||
16 | #include <asm/atomic.h> | 17 | #include <asm/atomic.h> |
17 | 18 | ||
18 | extern int sysctl_overcommit_memory; | 19 | extern int sysctl_overcommit_memory; |
19 | extern int sysctl_overcommit_ratio; | 20 | extern int sysctl_overcommit_ratio; |
20 | extern atomic_long_t vm_committed_space; | 21 | extern struct percpu_counter vm_committed_as; |
21 | 22 | ||
22 | #ifdef CONFIG_SMP | ||
23 | extern void vm_acct_memory(long pages); | ||
24 | #else | ||
25 | static inline void vm_acct_memory(long pages) | 23 | static inline void vm_acct_memory(long pages) |
26 | { | 24 | { |
27 | atomic_long_add(pages, &vm_committed_space); | 25 | percpu_counter_add(&vm_committed_as, pages); |
28 | } | 26 | } |
29 | #endif | ||
30 | 27 | ||
31 | static inline void vm_unacct_memory(long pages) | 28 | static inline void vm_unacct_memory(long pages) |
32 | { | 29 | { |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 4e457256bd33..3e7615e9087e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -192,5 +192,10 @@ static inline void mmc_signal_sdio_irq(struct mmc_host *host) | |||
192 | wake_up_process(host->sdio_irq_thread); | 192 | wake_up_process(host->sdio_irq_thread); |
193 | } | 193 | } |
194 | 194 | ||
195 | struct regulator; | ||
196 | |||
197 | int mmc_regulator_get_ocrmask(struct regulator *supply); | ||
198 | int mmc_regulator_set_ocr(struct regulator *supply, unsigned short vdd_bit); | ||
199 | |||
195 | #endif | 200 | #endif |
196 | 201 | ||
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 1aca6cebbb78..186ec6ab334d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -764,12 +764,6 @@ extern int numa_zonelist_order_handler(struct ctl_table *, int, | |||
764 | extern char numa_zonelist_order[]; | 764 | extern char numa_zonelist_order[]; |
765 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ | 765 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ |
766 | 766 | ||
767 | #include <linux/topology.h> | ||
768 | /* Returns the number of the current Node. */ | ||
769 | #ifndef numa_node_id | ||
770 | #define numa_node_id() (cpu_to_node(raw_smp_processor_id())) | ||
771 | #endif | ||
772 | |||
773 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 767 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
774 | 768 | ||
775 | extern struct pglist_data contig_page_data; | 769 | extern struct pglist_data contig_page_data; |
@@ -806,6 +800,14 @@ extern struct zone *next_zone(struct zone *zone); | |||
806 | zone; \ | 800 | zone; \ |
807 | zone = next_zone(zone)) | 801 | zone = next_zone(zone)) |
808 | 802 | ||
803 | #define for_each_populated_zone(zone) \ | ||
804 | for (zone = (first_online_pgdat())->node_zones; \ | ||
805 | zone; \ | ||
806 | zone = next_zone(zone)) \ | ||
807 | if (!populated_zone(zone)) \ | ||
808 | ; /* do nothing */ \ | ||
809 | else | ||
810 | |||
809 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) | 811 | static inline struct zone *zonelist_zone(struct zoneref *zoneref) |
810 | { | 812 | { |
811 | return zoneref->zone; | 813 | return zoneref->zone; |
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h index 830bbcd449d6..3a059298cc19 100644 --- a/include/linux/mnt_namespace.h +++ b/include/linux/mnt_namespace.h | |||
@@ -22,6 +22,8 @@ struct proc_mounts { | |||
22 | int event; | 22 | int event; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | struct fs_struct; | ||
26 | |||
25 | extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, | 27 | extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, |
26 | struct fs_struct *); | 28 | struct fs_struct *); |
27 | extern void __put_mnt_ns(struct mnt_namespace *ns); | 29 | extern void __put_mnt_ns(struct mnt_namespace *ns); |
diff --git a/include/linux/module.h b/include/linux/module.h index 145a75528cc1..627ac082e2a6 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -248,6 +248,10 @@ struct module | |||
248 | const unsigned long *crcs; | 248 | const unsigned long *crcs; |
249 | unsigned int num_syms; | 249 | unsigned int num_syms; |
250 | 250 | ||
251 | /* Kernel parameters. */ | ||
252 | struct kernel_param *kp; | ||
253 | unsigned int num_kp; | ||
254 | |||
251 | /* GPL-only exported symbols. */ | 255 | /* GPL-only exported symbols. */ |
252 | unsigned int num_gpl_syms; | 256 | unsigned int num_gpl_syms; |
253 | const struct kernel_symbol *gpl_syms; | 257 | const struct kernel_symbol *gpl_syms; |
@@ -329,6 +333,11 @@ struct module | |||
329 | unsigned int num_tracepoints; | 333 | unsigned int num_tracepoints; |
330 | #endif | 334 | #endif |
331 | 335 | ||
336 | #ifdef CONFIG_TRACING | ||
337 | const char **trace_bprintk_fmt_start; | ||
338 | unsigned int num_trace_bprintk_fmt; | ||
339 | #endif | ||
340 | |||
332 | #ifdef CONFIG_MODULE_UNLOAD | 341 | #ifdef CONFIG_MODULE_UNLOAD |
333 | /* What modules depend on me? */ | 342 | /* What modules depend on me? */ |
334 | struct list_head modules_which_use_me; | 343 | struct list_head modules_which_use_me; |
@@ -350,6 +359,8 @@ struct module | |||
350 | #define MODULE_ARCH_INIT {} | 359 | #define MODULE_ARCH_INIT {} |
351 | #endif | 360 | #endif |
352 | 361 | ||
362 | extern struct mutex module_mutex; | ||
363 | |||
353 | /* FIXME: It'd be nice to isolate modules during init, too, so they | 364 | /* FIXME: It'd be nice to isolate modules during init, too, so they |
354 | aren't used before they (may) fail. But presently too much code | 365 | aren't used before they (may) fail. But presently too much code |
355 | (IDE & SCSI) require entry into the module during init.*/ | 366 | (IDE & SCSI) require entry into the module during init.*/ |
@@ -358,10 +369,10 @@ static inline int module_is_live(struct module *mod) | |||
358 | return mod->state != MODULE_STATE_GOING; | 369 | return mod->state != MODULE_STATE_GOING; |
359 | } | 370 | } |
360 | 371 | ||
361 | /* Is this address in a module? (second is with no locks, for oops) */ | ||
362 | struct module *module_text_address(unsigned long addr); | ||
363 | struct module *__module_text_address(unsigned long addr); | 372 | struct module *__module_text_address(unsigned long addr); |
364 | int is_module_address(unsigned long addr); | 373 | struct module *__module_address(unsigned long addr); |
374 | bool is_module_address(unsigned long addr); | ||
375 | bool is_module_text_address(unsigned long addr); | ||
365 | 376 | ||
366 | static inline int within_module_core(unsigned long addr, struct module *mod) | 377 | static inline int within_module_core(unsigned long addr, struct module *mod) |
367 | { | 378 | { |
@@ -375,6 +386,31 @@ static inline int within_module_init(unsigned long addr, struct module *mod) | |||
375 | addr < (unsigned long)mod->module_init + mod->init_size; | 386 | addr < (unsigned long)mod->module_init + mod->init_size; |
376 | } | 387 | } |
377 | 388 | ||
389 | /* Search for module by name: must hold module_mutex. */ | ||
390 | struct module *find_module(const char *name); | ||
391 | |||
392 | struct symsearch { | ||
393 | const struct kernel_symbol *start, *stop; | ||
394 | const unsigned long *crcs; | ||
395 | enum { | ||
396 | NOT_GPL_ONLY, | ||
397 | GPL_ONLY, | ||
398 | WILL_BE_GPL_ONLY, | ||
399 | } licence; | ||
400 | bool unused; | ||
401 | }; | ||
402 | |||
403 | /* Search for an exported symbol by name. */ | ||
404 | const struct kernel_symbol *find_symbol(const char *name, | ||
405 | struct module **owner, | ||
406 | const unsigned long **crc, | ||
407 | bool gplok, | ||
408 | bool warn); | ||
409 | |||
410 | /* Walk the exported symbol table */ | ||
411 | bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner, | ||
412 | unsigned int symnum, void *data), void *data); | ||
413 | |||
378 | /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if | 414 | /* Returns 0 and fills in value, defined and namebuf, or -ERANGE if |
379 | symnum out of range. */ | 415 | symnum out of range. */ |
380 | int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | 416 | int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, |
@@ -383,6 +419,10 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | |||
383 | /* Look for this name: can be of form module:name. */ | 419 | /* Look for this name: can be of form module:name. */ |
384 | unsigned long module_kallsyms_lookup_name(const char *name); | 420 | unsigned long module_kallsyms_lookup_name(const char *name); |
385 | 421 | ||
422 | int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, | ||
423 | struct module *, unsigned long), | ||
424 | void *data); | ||
425 | |||
386 | extern void __module_put_and_exit(struct module *mod, long code) | 426 | extern void __module_put_and_exit(struct module *mod, long code) |
387 | __attribute__((noreturn)); | 427 | __attribute__((noreturn)); |
388 | #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); | 428 | #define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code); |
@@ -444,6 +484,7 @@ static inline void __module_get(struct module *module) | |||
444 | #define symbol_put_addr(p) do { } while(0) | 484 | #define symbol_put_addr(p) do { } while(0) |
445 | 485 | ||
446 | #endif /* CONFIG_MODULE_UNLOAD */ | 486 | #endif /* CONFIG_MODULE_UNLOAD */ |
487 | int use_module(struct module *a, struct module *b); | ||
447 | 488 | ||
448 | /* This is a #define so the string doesn't get put in every .o file */ | 489 | /* This is a #define so the string doesn't get put in every .o file */ |
449 | #define module_name(mod) \ | 490 | #define module_name(mod) \ |
@@ -490,21 +531,24 @@ search_module_extables(unsigned long addr) | |||
490 | return NULL; | 531 | return NULL; |
491 | } | 532 | } |
492 | 533 | ||
493 | /* Is this address in a module? */ | 534 | static inline struct module *__module_address(unsigned long addr) |
494 | static inline struct module *module_text_address(unsigned long addr) | ||
495 | { | 535 | { |
496 | return NULL; | 536 | return NULL; |
497 | } | 537 | } |
498 | 538 | ||
499 | /* Is this address in a module? (don't take a lock, we're oopsing) */ | ||
500 | static inline struct module *__module_text_address(unsigned long addr) | 539 | static inline struct module *__module_text_address(unsigned long addr) |
501 | { | 540 | { |
502 | return NULL; | 541 | return NULL; |
503 | } | 542 | } |
504 | 543 | ||
505 | static inline int is_module_address(unsigned long addr) | 544 | static inline bool is_module_address(unsigned long addr) |
506 | { | 545 | { |
507 | return 0; | 546 | return false; |
547 | } | ||
548 | |||
549 | static inline bool is_module_text_address(unsigned long addr) | ||
550 | { | ||
551 | return false; | ||
508 | } | 552 | } |
509 | 553 | ||
510 | /* Get/put a kernel symbol (calls should be symmetric) */ | 554 | /* Get/put a kernel symbol (calls should be symmetric) */ |
@@ -559,6 +603,14 @@ static inline unsigned long module_kallsyms_lookup_name(const char *name) | |||
559 | return 0; | 603 | return 0; |
560 | } | 604 | } |
561 | 605 | ||
606 | static inline int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, | ||
607 | struct module *, | ||
608 | unsigned long), | ||
609 | void *data) | ||
610 | { | ||
611 | return 0; | ||
612 | } | ||
613 | |||
562 | static inline int register_module_notifier(struct notifier_block * nb) | 614 | static inline int register_module_notifier(struct notifier_block * nb) |
563 | { | 615 | { |
564 | /* no events will happen anyway, so this can always succeed */ | 616 | /* no events will happen anyway, so this can always succeed */ |
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index e4af3399ef48..a4f0b931846c 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
@@ -138,6 +138,16 @@ extern int parse_args(const char *name, | |||
138 | unsigned num, | 138 | unsigned num, |
139 | int (*unknown)(char *param, char *val)); | 139 | int (*unknown)(char *param, char *val)); |
140 | 140 | ||
141 | /* Called by module remove. */ | ||
142 | #ifdef CONFIG_SYSFS | ||
143 | extern void destroy_params(const struct kernel_param *params, unsigned num); | ||
144 | #else | ||
145 | static inline void destroy_params(const struct kernel_param *params, | ||
146 | unsigned num) | ||
147 | { | ||
148 | } | ||
149 | #endif /* !CONFIG_SYSFS */ | ||
150 | |||
141 | /* All the helper functions */ | 151 | /* All the helper functions */ |
142 | /* The macros to do compile-time type checking stolen from Jakub | 152 | /* The macros to do compile-time type checking stolen from Jakub |
143 | Jelinek, who IIRC came up with this idea for the 2.4 module init code. */ | 153 | Jelinek, who IIRC came up with this idea for the 2.4 module init code. */ |
diff --git a/include/linux/mpage.h b/include/linux/mpage.h index 5c42821da2d1..068a0c9946af 100644 --- a/include/linux/mpage.h +++ b/include/linux/mpage.h | |||
@@ -11,21 +11,11 @@ | |||
11 | */ | 11 | */ |
12 | #ifdef CONFIG_BLOCK | 12 | #ifdef CONFIG_BLOCK |
13 | 13 | ||
14 | struct mpage_data { | ||
15 | struct bio *bio; | ||
16 | sector_t last_block_in_bio; | ||
17 | get_block_t *get_block; | ||
18 | unsigned use_writepage; | ||
19 | }; | ||
20 | |||
21 | struct writeback_control; | 14 | struct writeback_control; |
22 | 15 | ||
23 | struct bio *mpage_bio_submit(int rw, struct bio *bio); | ||
24 | int mpage_readpages(struct address_space *mapping, struct list_head *pages, | 16 | int mpage_readpages(struct address_space *mapping, struct list_head *pages, |
25 | unsigned nr_pages, get_block_t get_block); | 17 | unsigned nr_pages, get_block_t get_block); |
26 | int mpage_readpage(struct page *page, get_block_t get_block); | 18 | int mpage_readpage(struct page *page, get_block_t get_block); |
27 | int __mpage_writepage(struct page *page, struct writeback_control *wbc, | ||
28 | void *data); | ||
29 | int mpage_writepages(struct address_space *mapping, | 19 | int mpage_writepages(struct address_space *mapping, |
30 | struct writeback_control *wbc, get_block_t get_block); | 20 | struct writeback_control *wbc, get_block_t get_block); |
31 | int mpage_writepage(struct page *page, get_block_t *get_block, | 21 | int mpage_writepage(struct page *page, get_block_t *get_block, |
diff --git a/include/linux/msi.h b/include/linux/msi.h index d2b8a1e8ca11..6991ab5b24d1 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -20,20 +20,23 @@ extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); | |||
20 | 20 | ||
21 | struct msi_desc { | 21 | struct msi_desc { |
22 | struct { | 22 | struct { |
23 | __u8 type : 5; /* {0: unused, 5h:MSI, 11h:MSI-X} */ | 23 | __u8 is_msix : 1; |
24 | __u8 multiple: 3; /* log2 number of messages */ | ||
24 | __u8 maskbit : 1; /* mask-pending bit supported ? */ | 25 | __u8 maskbit : 1; /* mask-pending bit supported ? */ |
25 | __u8 masked : 1; | ||
26 | __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ | 26 | __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ |
27 | __u8 pos; /* Location of the msi capability */ | 27 | __u8 pos; /* Location of the msi capability */ |
28 | __u32 maskbits_mask; /* mask bits mask */ | ||
29 | __u16 entry_nr; /* specific enabled entry */ | 28 | __u16 entry_nr; /* specific enabled entry */ |
30 | unsigned default_irq; /* default pre-assigned irq */ | 29 | unsigned default_irq; /* default pre-assigned irq */ |
31 | }msi_attrib; | 30 | } msi_attrib; |
32 | 31 | ||
32 | u32 masked; /* mask bits */ | ||
33 | unsigned int irq; | 33 | unsigned int irq; |
34 | struct list_head list; | 34 | struct list_head list; |
35 | 35 | ||
36 | void __iomem *mask_base; | 36 | union { |
37 | void __iomem *mask_base; | ||
38 | u8 mask_pos; | ||
39 | }; | ||
37 | struct pci_dev *dev; | 40 | struct pci_dev *dev; |
38 | 41 | ||
39 | /* Last set MSI message */ | 42 | /* Last set MSI message */ |
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 3aa5d77c2cdb..5675b63a0631 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/uio.h> | 12 | #include <linux/uio.h> |
13 | #include <linux/notifier.h> | 13 | #include <linux/notifier.h> |
14 | #include <linux/device.h> | ||
14 | 15 | ||
15 | #include <linux/mtd/compatmac.h> | 16 | #include <linux/mtd/compatmac.h> |
16 | #include <mtd/mtd-abi.h> | 17 | #include <mtd/mtd-abi.h> |
@@ -162,6 +163,20 @@ struct mtd_info { | |||
162 | /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ | 163 | /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ |
163 | void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len); | 164 | void (*unpoint) (struct mtd_info *mtd, loff_t from, size_t len); |
164 | 165 | ||
166 | /* Allow NOMMU mmap() to directly map the device (if not NULL) | ||
167 | * - return the address to which the offset maps | ||
168 | * - return -ENOSYS to indicate refusal to do the mapping | ||
169 | */ | ||
170 | unsigned long (*get_unmapped_area) (struct mtd_info *mtd, | ||
171 | unsigned long len, | ||
172 | unsigned long offset, | ||
173 | unsigned long flags); | ||
174 | |||
175 | /* Backing device capabilities for this device | ||
176 | * - provides mmap capabilities | ||
177 | */ | ||
178 | struct backing_dev_info *backing_dev_info; | ||
179 | |||
165 | 180 | ||
166 | int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); | 181 | int (*read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); |
167 | int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); | 182 | int (*write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf); |
@@ -223,6 +238,7 @@ struct mtd_info { | |||
223 | void *priv; | 238 | void *priv; |
224 | 239 | ||
225 | struct module *owner; | 240 | struct module *owner; |
241 | struct device dev; | ||
226 | int usecount; | 242 | int usecount; |
227 | 243 | ||
228 | /* If the driver is something smart, like UBI, it may need to maintain | 244 | /* If the driver is something smart, like UBI, it may need to maintain |
@@ -233,6 +249,11 @@ struct mtd_info { | |||
233 | void (*put_device) (struct mtd_info *mtd); | 249 | void (*put_device) (struct mtd_info *mtd); |
234 | }; | 250 | }; |
235 | 251 | ||
252 | static inline struct mtd_info *dev_to_mtd(struct device *dev) | ||
253 | { | ||
254 | return dev ? container_of(dev, struct mtd_info, dev) : NULL; | ||
255 | } | ||
256 | |||
236 | static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) | 257 | static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) |
237 | { | 258 | { |
238 | if (mtd->erasesize_shift) | 259 | if (mtd->erasesize_shift) |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index db5b63da2a7e..7efb9be34662 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -43,8 +43,8 @@ extern void nand_wait_ready(struct mtd_info *mtd); | |||
43 | * is supported now. If you add a chip with bigger oobsize/page | 43 | * is supported now. If you add a chip with bigger oobsize/page |
44 | * adjust this accordingly. | 44 | * adjust this accordingly. |
45 | */ | 45 | */ |
46 | #define NAND_MAX_OOBSIZE 64 | 46 | #define NAND_MAX_OOBSIZE 128 |
47 | #define NAND_MAX_PAGESIZE 2048 | 47 | #define NAND_MAX_PAGESIZE 4096 |
48 | 48 | ||
49 | /* | 49 | /* |
50 | * Constants for hardware specific CLE/ALE/NCE function | 50 | * Constants for hardware specific CLE/ALE/NCE function |
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h index a45dd831b3f8..7535a74083b9 100644 --- a/include/linux/mtd/partitions.h +++ b/include/linux/mtd/partitions.h | |||
@@ -76,4 +76,16 @@ int __devinit of_mtd_parse_partitions(struct device *dev, | |||
76 | struct device_node *node, | 76 | struct device_node *node, |
77 | struct mtd_partition **pparts); | 77 | struct mtd_partition **pparts); |
78 | 78 | ||
79 | #ifdef CONFIG_MTD_PARTITIONS | ||
80 | static inline int mtd_has_partitions(void) { return 1; } | ||
81 | #else | ||
82 | static inline int mtd_has_partitions(void) { return 0; } | ||
83 | #endif | ||
84 | |||
85 | #ifdef CONFIG_MTD_CMDLINE_PARTS | ||
86 | static inline int mtd_has_cmdlinepart(void) { return 1; } | ||
87 | #else | ||
88 | static inline int mtd_has_cmdlinepart(void) { return 0; } | ||
89 | #endif | ||
90 | |||
79 | #endif | 91 | #endif |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 7a0e5c4f8072..3069ec7e0ab8 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -50,8 +50,10 @@ struct mutex { | |||
50 | atomic_t count; | 50 | atomic_t count; |
51 | spinlock_t wait_lock; | 51 | spinlock_t wait_lock; |
52 | struct list_head wait_list; | 52 | struct list_head wait_list; |
53 | #ifdef CONFIG_DEBUG_MUTEXES | 53 | #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) |
54 | struct thread_info *owner; | 54 | struct thread_info *owner; |
55 | #endif | ||
56 | #ifdef CONFIG_DEBUG_MUTEXES | ||
55 | const char *name; | 57 | const char *name; |
56 | void *magic; | 58 | void *magic; |
57 | #endif | 59 | #endif |
@@ -68,7 +70,6 @@ struct mutex_waiter { | |||
68 | struct list_head list; | 70 | struct list_head list; |
69 | struct task_struct *task; | 71 | struct task_struct *task; |
70 | #ifdef CONFIG_DEBUG_MUTEXES | 72 | #ifdef CONFIG_DEBUG_MUTEXES |
71 | struct mutex *lock; | ||
72 | void *magic; | 73 | void *magic; |
73 | #endif | 74 | #endif |
74 | }; | 75 | }; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2e7783f4a755..5a96a1a406e9 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -104,7 +104,7 @@ struct wireless_dev; | |||
104 | # else | 104 | # else |
105 | # define LL_MAX_HEADER 96 | 105 | # define LL_MAX_HEADER 96 |
106 | # endif | 106 | # endif |
107 | #elif defined(CONFIG_TR) | 107 | #elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) |
108 | # define LL_MAX_HEADER 48 | 108 | # define LL_MAX_HEADER 48 |
109 | #else | 109 | #else |
110 | # define LL_MAX_HEADER 32 | 110 | # define LL_MAX_HEADER 32 |
@@ -500,7 +500,7 @@ struct netdev_queue { | |||
500 | * | 500 | * |
501 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); | 501 | * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); |
502 | * This function is called when the Media Access Control address | 502 | * This function is called when the Media Access Control address |
503 | * needs to be changed. If not this interface is not defined, the | 503 | * needs to be changed. If this interface is not defined, the |
504 | * mac address can not be changed. | 504 | * mac address can not be changed. |
505 | * | 505 | * |
506 | * int (*ndo_validate_addr)(struct net_device *dev); | 506 | * int (*ndo_validate_addr)(struct net_device *dev); |
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index 29fe9ea1d346..1a865e48b8eb 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h | |||
@@ -100,6 +100,7 @@ enum ctattr_protoinfo_tcp { | |||
100 | enum ctattr_protoinfo_dccp { | 100 | enum ctattr_protoinfo_dccp { |
101 | CTA_PROTOINFO_DCCP_UNSPEC, | 101 | CTA_PROTOINFO_DCCP_UNSPEC, |
102 | CTA_PROTOINFO_DCCP_STATE, | 102 | CTA_PROTOINFO_DCCP_STATE, |
103 | CTA_PROTOINFO_DCCP_ROLE, | ||
103 | __CTA_PROTOINFO_DCCP_MAX, | 104 | __CTA_PROTOINFO_DCCP_MAX, |
104 | }; | 105 | }; |
105 | #define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1) | 106 | #define CTA_PROTOINFO_DCCP_MAX (__CTA_PROTOINFO_DCCP_MAX - 1) |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 7b1a652066c0..c9efe039dc57 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
@@ -354,9 +354,6 @@ struct xt_table | |||
354 | /* What hooks you will enter on */ | 354 | /* What hooks you will enter on */ |
355 | unsigned int valid_hooks; | 355 | unsigned int valid_hooks; |
356 | 356 | ||
357 | /* Lock for the curtain */ | ||
358 | struct mutex lock; | ||
359 | |||
360 | /* Man behind the curtain... */ | 357 | /* Man behind the curtain... */ |
361 | struct xt_table_info *private; | 358 | struct xt_table_info *private; |
362 | 359 | ||
@@ -434,8 +431,74 @@ extern void xt_proto_fini(struct net *net, u_int8_t af); | |||
434 | 431 | ||
435 | extern struct xt_table_info *xt_alloc_table_info(unsigned int size); | 432 | extern struct xt_table_info *xt_alloc_table_info(unsigned int size); |
436 | extern void xt_free_table_info(struct xt_table_info *info); | 433 | extern void xt_free_table_info(struct xt_table_info *info); |
437 | extern void xt_table_entry_swap_rcu(struct xt_table_info *old, | 434 | |
438 | struct xt_table_info *new); | 435 | /* |
436 | * Per-CPU spinlock associated with per-cpu table entries, and | ||
437 | * with a counter for the "reading" side that allows a recursive | ||
438 | * reader to avoid taking the lock and deadlocking. | ||
439 | * | ||
440 | * "reading" is used by ip/arp/ip6 tables rule processing which runs per-cpu. | ||
441 | * It needs to ensure that the rules are not being changed while the packet | ||
442 | * is being processed. In some cases, the read lock will be acquired | ||
443 | * twice on the same CPU; this is okay because of the count. | ||
444 | * | ||
445 | * "writing" is used when reading counters. | ||
446 | * During replace any readers that are using the old tables have to complete | ||
447 | * before freeing the old table. This is handled by the write locking | ||
448 | * necessary for reading the counters. | ||
449 | */ | ||
450 | struct xt_info_lock { | ||
451 | spinlock_t lock; | ||
452 | unsigned char readers; | ||
453 | }; | ||
454 | DECLARE_PER_CPU(struct xt_info_lock, xt_info_locks); | ||
455 | |||
456 | /* | ||
457 | * Note: we need to ensure that preemption is disabled before acquiring | ||
458 | * the per-cpu-variable, so we do it as a two step process rather than | ||
459 | * using "spin_lock_bh()". | ||
460 | * | ||
461 | * We _also_ need to disable bottom half processing before updating our | ||
462 | * nesting count, to make sure that the only kind of re-entrancy is this | ||
463 | * code being called by itself: since the count+lock is not an atomic | ||
464 | * operation, we can allow no races. | ||
465 | * | ||
466 | * _Only_ that special combination of being per-cpu and never getting | ||
467 | * re-entered asynchronously means that the count is safe. | ||
468 | */ | ||
469 | static inline void xt_info_rdlock_bh(void) | ||
470 | { | ||
471 | struct xt_info_lock *lock; | ||
472 | |||
473 | local_bh_disable(); | ||
474 | lock = &__get_cpu_var(xt_info_locks); | ||
475 | if (likely(!lock->readers++)) | ||
476 | spin_lock(&lock->lock); | ||
477 | } | ||
478 | |||
479 | static inline void xt_info_rdunlock_bh(void) | ||
480 | { | ||
481 | struct xt_info_lock *lock = &__get_cpu_var(xt_info_locks); | ||
482 | |||
483 | if (likely(!--lock->readers)) | ||
484 | spin_unlock(&lock->lock); | ||
485 | local_bh_enable(); | ||
486 | } | ||
487 | |||
488 | /* | ||
489 | * The "writer" side needs to get exclusive access to the lock, | ||
490 | * regardless of readers. This must be called with bottom half | ||
491 | * processing (and thus also preemption) disabled. | ||
492 | */ | ||
493 | static inline void xt_info_wrlock(unsigned int cpu) | ||
494 | { | ||
495 | spin_lock(&per_cpu(xt_info_locks, cpu).lock); | ||
496 | } | ||
497 | |||
498 | static inline void xt_info_wrunlock(unsigned int cpu) | ||
499 | { | ||
500 | spin_unlock(&per_cpu(xt_info_locks, cpu).lock); | ||
501 | } | ||
439 | 502 | ||
440 | /* | 503 | /* |
441 | * This helper is performance critical and must be inlined | 504 | * This helper is performance critical and must be inlined |
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index de99025f2c5d..2524267210d3 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -18,7 +18,7 @@ struct netpoll { | |||
18 | const char *name; | 18 | const char *name; |
19 | void (*rx_hook)(struct netpoll *, int, char *, int); | 19 | void (*rx_hook)(struct netpoll *, int, char *, int); |
20 | 20 | ||
21 | u32 local_ip, remote_ip; | 21 | __be32 local_ip, remote_ip; |
22 | u16 local_port, remote_port; | 22 | u16 local_port, remote_port; |
23 | u8 remote_mac[ETH_ALEN]; | 23 | u8 remote_mac[ETH_ALEN]; |
24 | }; | 24 | }; |
diff --git a/include/linux/nfs.h b/include/linux/nfs.h index 54af92c1c70b..214d499718f7 100644 --- a/include/linux/nfs.h +++ b/include/linux/nfs.h | |||
@@ -109,7 +109,6 @@ | |||
109 | NFSERR_FILE_OPEN = 10046, /* v4 */ | 109 | NFSERR_FILE_OPEN = 10046, /* v4 */ |
110 | NFSERR_ADMIN_REVOKED = 10047, /* v4 */ | 110 | NFSERR_ADMIN_REVOKED = 10047, /* v4 */ |
111 | NFSERR_CB_PATH_DOWN = 10048, /* v4 */ | 111 | NFSERR_CB_PATH_DOWN = 10048, /* v4 */ |
112 | NFSERR_REPLAY_ME = 10049 /* v4 */ | ||
113 | }; | 112 | }; |
114 | 113 | ||
115 | /* NFSv2 file types - beware, these are not the same in NFSv3 */ | 114 | /* NFSv2 file types - beware, these are not the same in NFSv3 */ |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index b912311a56b1..e3f0cbcbd0db 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #define NFS4_FHSIZE 128 | 21 | #define NFS4_FHSIZE 128 |
22 | #define NFS4_MAXPATHLEN PATH_MAX | 22 | #define NFS4_MAXPATHLEN PATH_MAX |
23 | #define NFS4_MAXNAMLEN NAME_MAX | 23 | #define NFS4_MAXNAMLEN NAME_MAX |
24 | #define NFS4_MAX_SESSIONID_LEN 16 | ||
24 | 25 | ||
25 | #define NFS4_ACCESS_READ 0x0001 | 26 | #define NFS4_ACCESS_READ 0x0001 |
26 | #define NFS4_ACCESS_LOOKUP 0x0002 | 27 | #define NFS4_ACCESS_LOOKUP 0x0002 |
@@ -38,6 +39,7 @@ | |||
38 | #define NFS4_OPEN_RESULT_CONFIRM 0x0002 | 39 | #define NFS4_OPEN_RESULT_CONFIRM 0x0002 |
39 | #define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 | 40 | #define NFS4_OPEN_RESULT_LOCKTYPE_POSIX 0x0004 |
40 | 41 | ||
42 | #define NFS4_SHARE_ACCESS_MASK 0x000F | ||
41 | #define NFS4_SHARE_ACCESS_READ 0x0001 | 43 | #define NFS4_SHARE_ACCESS_READ 0x0001 |
42 | #define NFS4_SHARE_ACCESS_WRITE 0x0002 | 44 | #define NFS4_SHARE_ACCESS_WRITE 0x0002 |
43 | #define NFS4_SHARE_ACCESS_BOTH 0x0003 | 45 | #define NFS4_SHARE_ACCESS_BOTH 0x0003 |
@@ -45,6 +47,19 @@ | |||
45 | #define NFS4_SHARE_DENY_WRITE 0x0002 | 47 | #define NFS4_SHARE_DENY_WRITE 0x0002 |
46 | #define NFS4_SHARE_DENY_BOTH 0x0003 | 48 | #define NFS4_SHARE_DENY_BOTH 0x0003 |
47 | 49 | ||
50 | /* nfs41 */ | ||
51 | #define NFS4_SHARE_WANT_MASK 0xFF00 | ||
52 | #define NFS4_SHARE_WANT_NO_PREFERENCE 0x0000 | ||
53 | #define NFS4_SHARE_WANT_READ_DELEG 0x0100 | ||
54 | #define NFS4_SHARE_WANT_WRITE_DELEG 0x0200 | ||
55 | #define NFS4_SHARE_WANT_ANY_DELEG 0x0300 | ||
56 | #define NFS4_SHARE_WANT_NO_DELEG 0x0400 | ||
57 | #define NFS4_SHARE_WANT_CANCEL 0x0500 | ||
58 | |||
59 | #define NFS4_SHARE_WHEN_MASK 0xF0000 | ||
60 | #define NFS4_SHARE_SIGNAL_DELEG_WHEN_RESRC_AVAIL 0x10000 | ||
61 | #define NFS4_SHARE_PUSH_DELEG_WHEN_UNCONTENDED 0x20000 | ||
62 | |||
48 | #define NFS4_SET_TO_SERVER_TIME 0 | 63 | #define NFS4_SET_TO_SERVER_TIME 0 |
49 | #define NFS4_SET_TO_CLIENT_TIME 1 | 64 | #define NFS4_SET_TO_CLIENT_TIME 1 |
50 | 65 | ||
@@ -88,6 +103,31 @@ | |||
88 | #define NFS4_ACE_GENERIC_EXECUTE 0x001200A0 | 103 | #define NFS4_ACE_GENERIC_EXECUTE 0x001200A0 |
89 | #define NFS4_ACE_MASK_ALL 0x001F01FF | 104 | #define NFS4_ACE_MASK_ALL 0x001F01FF |
90 | 105 | ||
106 | #define EXCHGID4_FLAG_SUPP_MOVED_REFER 0x00000001 | ||
107 | #define EXCHGID4_FLAG_SUPP_MOVED_MIGR 0x00000002 | ||
108 | #define EXCHGID4_FLAG_USE_NON_PNFS 0x00010000 | ||
109 | #define EXCHGID4_FLAG_USE_PNFS_MDS 0x00020000 | ||
110 | #define EXCHGID4_FLAG_USE_PNFS_DS 0x00040000 | ||
111 | #define EXCHGID4_FLAG_UPD_CONFIRMED_REC_A 0x40000000 | ||
112 | #define EXCHGID4_FLAG_CONFIRMED_R 0x80000000 | ||
113 | /* | ||
114 | * Since the validity of these bits depends on whether | ||
115 | * they're set in the argument or response, have separate | ||
116 | * invalid flag masks for arg (_A) and resp (_R). | ||
117 | */ | ||
118 | #define EXCHGID4_FLAG_MASK_A 0x40070003 | ||
119 | #define EXCHGID4_FLAG_MASK_R 0x80070003 | ||
120 | |||
121 | #define SEQ4_STATUS_CB_PATH_DOWN 0x00000001 | ||
122 | #define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRING 0x00000002 | ||
123 | #define SEQ4_STATUS_CB_GSS_CONTEXTS_EXPIRED 0x00000004 | ||
124 | #define SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED 0x00000008 | ||
125 | #define SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED 0x00000010 | ||
126 | #define SEQ4_STATUS_ADMIN_STATE_REVOKED 0x00000020 | ||
127 | #define SEQ4_STATUS_RECALLABLE_STATE_REVOKED 0x00000040 | ||
128 | #define SEQ4_STATUS_LEASE_MOVED 0x00000080 | ||
129 | #define SEQ4_STATUS_RESTART_RECLAIM_NEEDED 0x00000100 | ||
130 | |||
91 | #define NFS4_MAX_UINT64 (~(u64)0) | 131 | #define NFS4_MAX_UINT64 (~(u64)0) |
92 | 132 | ||
93 | enum nfs4_acl_whotype { | 133 | enum nfs4_acl_whotype { |
@@ -154,6 +194,28 @@ enum nfs_opnum4 { | |||
154 | OP_VERIFY = 37, | 194 | OP_VERIFY = 37, |
155 | OP_WRITE = 38, | 195 | OP_WRITE = 38, |
156 | OP_RELEASE_LOCKOWNER = 39, | 196 | OP_RELEASE_LOCKOWNER = 39, |
197 | |||
198 | /* nfs41 */ | ||
199 | OP_BACKCHANNEL_CTL = 40, | ||
200 | OP_BIND_CONN_TO_SESSION = 41, | ||
201 | OP_EXCHANGE_ID = 42, | ||
202 | OP_CREATE_SESSION = 43, | ||
203 | OP_DESTROY_SESSION = 44, | ||
204 | OP_FREE_STATEID = 45, | ||
205 | OP_GET_DIR_DELEGATION = 46, | ||
206 | OP_GETDEVICEINFO = 47, | ||
207 | OP_GETDEVICELIST = 48, | ||
208 | OP_LAYOUTCOMMIT = 49, | ||
209 | OP_LAYOUTGET = 50, | ||
210 | OP_LAYOUTRETURN = 51, | ||
211 | OP_SECINFO_NO_NAME = 52, | ||
212 | OP_SEQUENCE = 53, | ||
213 | OP_SET_SSV = 54, | ||
214 | OP_TEST_STATEID = 55, | ||
215 | OP_WANT_DELEGATION = 56, | ||
216 | OP_DESTROY_CLIENTID = 57, | ||
217 | OP_RECLAIM_COMPLETE = 58, | ||
218 | |||
157 | OP_ILLEGAL = 10044, | 219 | OP_ILLEGAL = 10044, |
158 | }; | 220 | }; |
159 | 221 | ||
@@ -230,7 +292,48 @@ enum nfsstat4 { | |||
230 | NFS4ERR_DEADLOCK = 10045, | 292 | NFS4ERR_DEADLOCK = 10045, |
231 | NFS4ERR_FILE_OPEN = 10046, | 293 | NFS4ERR_FILE_OPEN = 10046, |
232 | NFS4ERR_ADMIN_REVOKED = 10047, | 294 | NFS4ERR_ADMIN_REVOKED = 10047, |
233 | NFS4ERR_CB_PATH_DOWN = 10048 | 295 | NFS4ERR_CB_PATH_DOWN = 10048, |
296 | |||
297 | /* nfs41 */ | ||
298 | NFS4ERR_BADIOMODE = 10049, | ||
299 | NFS4ERR_BADLAYOUT = 10050, | ||
300 | NFS4ERR_BAD_SESSION_DIGEST = 10051, | ||
301 | NFS4ERR_BADSESSION = 10052, | ||
302 | NFS4ERR_BADSLOT = 10053, | ||
303 | NFS4ERR_COMPLETE_ALREADY = 10054, | ||
304 | NFS4ERR_CONN_NOT_BOUND_TO_SESSION = 10055, | ||
305 | NFS4ERR_DELEG_ALREADY_WANTED = 10056, | ||
306 | NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */ | ||
307 | NFS4ERR_LAYOUTTRYLATER = 10058, | ||
308 | NFS4ERR_LAYOUTUNAVAILABLE = 10059, | ||
309 | NFS4ERR_NOMATCHING_LAYOUT = 10060, | ||
310 | NFS4ERR_RECALLCONFLICT = 10061, | ||
311 | NFS4ERR_UNKNOWN_LAYOUTTYPE = 10062, | ||
312 | NFS4ERR_SEQ_MISORDERED = 10063, /* unexpected seq.id in req */ | ||
313 | NFS4ERR_SEQUENCE_POS = 10064, /* [CB_]SEQ. op not 1st op */ | ||
314 | NFS4ERR_REQ_TOO_BIG = 10065, /* request too big */ | ||
315 | NFS4ERR_REP_TOO_BIG = 10066, /* reply too big */ | ||
316 | NFS4ERR_REP_TOO_BIG_TO_CACHE = 10067, /* rep. not all cached */ | ||
317 | NFS4ERR_RETRY_UNCACHED_REP = 10068, /* retry & rep. uncached */ | ||
318 | NFS4ERR_UNSAFE_COMPOUND = 10069, /* retry/recovery too hard */ | ||
319 | NFS4ERR_TOO_MANY_OPS = 10070, /* too many ops in [CB_]COMP */ | ||
320 | NFS4ERR_OP_NOT_IN_SESSION = 10071, /* op needs [CB_]SEQ. op */ | ||
321 | NFS4ERR_HASH_ALG_UNSUPP = 10072, /* hash alg. not supp. */ | ||
322 | /* Error 10073 is unused. */ | ||
323 | NFS4ERR_CLIENTID_BUSY = 10074, /* clientid has state */ | ||
324 | NFS4ERR_PNFS_IO_HOLE = 10075, /* IO to _SPARSE file hole */ | ||
325 | NFS4ERR_SEQ_FALSE_RETRY = 10076, /* retry not origional */ | ||
326 | NFS4ERR_BAD_HIGH_SLOT = 10077, /* sequence arg bad */ | ||
327 | NFS4ERR_DEADSESSION = 10078, /* persistent session dead */ | ||
328 | NFS4ERR_ENCR_ALG_UNSUPP = 10079, /* SSV alg mismatch */ | ||
329 | NFS4ERR_PNFS_NO_LAYOUT = 10080, /* direct I/O with no layout */ | ||
330 | NFS4ERR_NOT_ONLY_OP = 10081, /* bad compound */ | ||
331 | NFS4ERR_WRONG_CRED = 10082, /* permissions:state change */ | ||
332 | NFS4ERR_WRONG_TYPE = 10083, /* current operation mismatch */ | ||
333 | NFS4ERR_DIRDELEG_UNAVAIL = 10084, /* no directory delegation */ | ||
334 | NFS4ERR_REJECT_DELEG = 10085, /* on callback */ | ||
335 | NFS4ERR_RETURNCONFLICT = 10086, /* outstanding layoutreturn */ | ||
336 | NFS4ERR_DELEG_REVOKED = 10087, /* deleg./layout revoked */ | ||
234 | }; | 337 | }; |
235 | 338 | ||
236 | /* | 339 | /* |
@@ -265,7 +368,13 @@ enum opentype4 { | |||
265 | enum createmode4 { | 368 | enum createmode4 { |
266 | NFS4_CREATE_UNCHECKED = 0, | 369 | NFS4_CREATE_UNCHECKED = 0, |
267 | NFS4_CREATE_GUARDED = 1, | 370 | NFS4_CREATE_GUARDED = 1, |
268 | NFS4_CREATE_EXCLUSIVE = 2 | 371 | NFS4_CREATE_EXCLUSIVE = 2, |
372 | /* | ||
373 | * New to NFSv4.1. If session is persistent, | ||
374 | * GUARDED4 MUST be used. Otherwise, use | ||
375 | * EXCLUSIVE4_1 instead of EXCLUSIVE4. | ||
376 | */ | ||
377 | NFS4_CREATE_EXCLUSIVE4_1 = 3 | ||
269 | }; | 378 | }; |
270 | 379 | ||
271 | enum limit_by4 { | 380 | enum limit_by4 { |
@@ -301,6 +410,8 @@ enum lock_type4 { | |||
301 | #define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9) | 410 | #define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9) |
302 | #define FATTR4_WORD0_LEASE_TIME (1UL << 10) | 411 | #define FATTR4_WORD0_LEASE_TIME (1UL << 10) |
303 | #define FATTR4_WORD0_RDATTR_ERROR (1UL << 11) | 412 | #define FATTR4_WORD0_RDATTR_ERROR (1UL << 11) |
413 | /* Mandatory in NFSv4.1 */ | ||
414 | #define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11) | ||
304 | 415 | ||
305 | /* Recommended Attributes */ | 416 | /* Recommended Attributes */ |
306 | #define FATTR4_WORD0_ACL (1UL << 12) | 417 | #define FATTR4_WORD0_ACL (1UL << 12) |
@@ -391,6 +502,29 @@ enum { | |||
391 | NFSPROC4_CLNT_GETACL, | 502 | NFSPROC4_CLNT_GETACL, |
392 | NFSPROC4_CLNT_SETACL, | 503 | NFSPROC4_CLNT_SETACL, |
393 | NFSPROC4_CLNT_FS_LOCATIONS, | 504 | NFSPROC4_CLNT_FS_LOCATIONS, |
505 | |||
506 | /* nfs41 */ | ||
507 | NFSPROC4_CLNT_EXCHANGE_ID, | ||
508 | NFSPROC4_CLNT_CREATE_SESSION, | ||
509 | NFSPROC4_CLNT_DESTROY_SESSION, | ||
510 | NFSPROC4_CLNT_SEQUENCE, | ||
511 | NFSPROC4_CLNT_GET_LEASE_TIME, | ||
512 | }; | ||
513 | |||
514 | /* nfs41 types */ | ||
515 | struct nfs4_sessionid { | ||
516 | unsigned char data[NFS4_MAX_SESSIONID_LEN]; | ||
517 | }; | ||
518 | |||
519 | /* Create Session Flags */ | ||
520 | #define SESSION4_PERSIST 0x001 | ||
521 | #define SESSION4_BACK_CHAN 0x002 | ||
522 | #define SESSION4_RDMA 0x004 | ||
523 | |||
524 | enum state_protect_how4 { | ||
525 | SP4_NONE = 0, | ||
526 | SP4_MACH_CRED = 1, | ||
527 | SP4_SSV = 2 | ||
394 | }; | 528 | }; |
395 | 529 | ||
396 | #endif | 530 | #endif |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 8cc8807f77d6..fdffb413b192 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -166,8 +166,7 @@ struct nfs_inode { | |||
166 | */ | 166 | */ |
167 | struct radix_tree_root nfs_page_tree; | 167 | struct radix_tree_root nfs_page_tree; |
168 | 168 | ||
169 | unsigned long ncommit, | 169 | unsigned long npages; |
170 | npages; | ||
171 | 170 | ||
172 | /* Open contexts for shared mmap writes */ | 171 | /* Open contexts for shared mmap writes */ |
173 | struct list_head open_files; | 172 | struct list_head open_files; |
@@ -186,6 +185,9 @@ struct nfs_inode { | |||
186 | fmode_t delegation_state; | 185 | fmode_t delegation_state; |
187 | struct rw_semaphore rwsem; | 186 | struct rw_semaphore rwsem; |
188 | #endif /* CONFIG_NFS_V4*/ | 187 | #endif /* CONFIG_NFS_V4*/ |
188 | #ifdef CONFIG_NFS_FSCACHE | ||
189 | struct fscache_cookie *fscache; | ||
190 | #endif | ||
189 | struct inode vfs_inode; | 191 | struct inode vfs_inode; |
190 | }; | 192 | }; |
191 | 193 | ||
@@ -207,6 +209,9 @@ struct nfs_inode { | |||
207 | #define NFS_INO_STALE (1) /* possible stale inode */ | 209 | #define NFS_INO_STALE (1) /* possible stale inode */ |
208 | #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ | 210 | #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ |
209 | #define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */ | 211 | #define NFS_INO_MOUNTPOINT (3) /* inode is remote mountpoint */ |
212 | #define NFS_INO_FLUSHING (4) /* inode is flushing out data */ | ||
213 | #define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ | ||
214 | #define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ | ||
210 | 215 | ||
211 | static inline struct nfs_inode *NFS_I(const struct inode *inode) | 216 | static inline struct nfs_inode *NFS_I(const struct inode *inode) |
212 | { | 217 | { |
@@ -260,6 +265,11 @@ static inline int NFS_STALE(const struct inode *inode) | |||
260 | return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); | 265 | return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); |
261 | } | 266 | } |
262 | 267 | ||
268 | static inline int NFS_FSCACHE(const struct inode *inode) | ||
269 | { | ||
270 | return test_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags); | ||
271 | } | ||
272 | |||
263 | static inline __u64 NFS_FILEID(const struct inode *inode) | 273 | static inline __u64 NFS_FILEID(const struct inode *inode) |
264 | { | 274 | { |
265 | return NFS_I(inode)->fileid; | 275 | return NFS_I(inode)->fileid; |
@@ -506,6 +516,8 @@ extern int nfs_readpages(struct file *, struct address_space *, | |||
506 | struct list_head *, unsigned); | 516 | struct list_head *, unsigned); |
507 | extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); | 517 | extern int nfs_readpage_result(struct rpc_task *, struct nfs_read_data *); |
508 | extern void nfs_readdata_release(void *data); | 518 | extern void nfs_readdata_release(void *data); |
519 | extern int nfs_readpage_async(struct nfs_open_context *, struct inode *, | ||
520 | struct page *); | ||
509 | 521 | ||
510 | /* | 522 | /* |
511 | * Allocate nfs_read_data structures | 523 | * Allocate nfs_read_data structures |
@@ -583,6 +595,7 @@ extern void * nfs_root_data(void); | |||
583 | #define NFSDBG_CALLBACK 0x0100 | 595 | #define NFSDBG_CALLBACK 0x0100 |
584 | #define NFSDBG_CLIENT 0x0200 | 596 | #define NFSDBG_CLIENT 0x0200 |
585 | #define NFSDBG_MOUNT 0x0400 | 597 | #define NFSDBG_MOUNT 0x0400 |
598 | #define NFSDBG_FSCACHE 0x0800 | ||
586 | #define NFSDBG_ALL 0xFFFF | 599 | #define NFSDBG_ALL 0xFFFF |
587 | 600 | ||
588 | #ifdef __KERNEL__ | 601 | #ifdef __KERNEL__ |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 9bb81aec91cf..6ad75948cbf7 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -64,6 +64,10 @@ struct nfs_client { | |||
64 | char cl_ipaddr[48]; | 64 | char cl_ipaddr[48]; |
65 | unsigned char cl_id_uniquifier; | 65 | unsigned char cl_id_uniquifier; |
66 | #endif | 66 | #endif |
67 | |||
68 | #ifdef CONFIG_NFS_FSCACHE | ||
69 | struct fscache_cookie *fscache; /* client index cache cookie */ | ||
70 | #endif | ||
67 | }; | 71 | }; |
68 | 72 | ||
69 | /* | 73 | /* |
@@ -96,16 +100,28 @@ struct nfs_server { | |||
96 | unsigned int acdirmin; | 100 | unsigned int acdirmin; |
97 | unsigned int acdirmax; | 101 | unsigned int acdirmax; |
98 | unsigned int namelen; | 102 | unsigned int namelen; |
103 | unsigned int options; /* extra options enabled by mount */ | ||
104 | #define NFS_OPTION_FSCACHE 0x00000001 /* - local caching enabled */ | ||
99 | 105 | ||
100 | struct nfs_fsid fsid; | 106 | struct nfs_fsid fsid; |
101 | __u64 maxfilesize; /* maximum file size */ | 107 | __u64 maxfilesize; /* maximum file size */ |
102 | unsigned long mount_time; /* when this fs was mounted */ | 108 | unsigned long mount_time; /* when this fs was mounted */ |
103 | dev_t s_dev; /* superblock dev numbers */ | 109 | dev_t s_dev; /* superblock dev numbers */ |
104 | 110 | ||
111 | #ifdef CONFIG_NFS_FSCACHE | ||
112 | struct nfs_fscache_key *fscache_key; /* unique key for superblock */ | ||
113 | struct fscache_cookie *fscache; /* superblock cookie */ | ||
114 | #endif | ||
115 | |||
105 | #ifdef CONFIG_NFS_V4 | 116 | #ifdef CONFIG_NFS_V4 |
106 | u32 attr_bitmask[2];/* V4 bitmask representing the set | 117 | u32 attr_bitmask[2];/* V4 bitmask representing the set |
107 | of attributes supported on this | 118 | of attributes supported on this |
108 | filesystem */ | 119 | filesystem */ |
120 | u32 cache_consistency_bitmask[2]; | ||
121 | /* V4 bitmask representing the subset | ||
122 | of change attribute, size, ctime | ||
123 | and mtime attributes supported by | ||
124 | the server */ | ||
109 | u32 acl_bitmask; /* V4 bitmask representing the ACEs | 125 | u32 acl_bitmask; /* V4 bitmask representing the ACEs |
110 | that are supported on this | 126 | that are supported on this |
111 | filesystem */ | 127 | filesystem */ |
diff --git a/include/linux/nfs_iostat.h b/include/linux/nfs_iostat.h index 1cb9a3fed2b3..68b10f5f8907 100644 --- a/include/linux/nfs_iostat.h +++ b/include/linux/nfs_iostat.h | |||
@@ -116,4 +116,16 @@ enum nfs_stat_eventcounters { | |||
116 | __NFSIOS_COUNTSMAX, | 116 | __NFSIOS_COUNTSMAX, |
117 | }; | 117 | }; |
118 | 118 | ||
119 | /* | ||
120 | * NFS local caching servicing counters | ||
121 | */ | ||
122 | enum nfs_stat_fscachecounters { | ||
123 | NFSIOS_FSCACHE_PAGES_READ_OK, | ||
124 | NFSIOS_FSCACHE_PAGES_READ_FAIL, | ||
125 | NFSIOS_FSCACHE_PAGES_WRITTEN_OK, | ||
126 | NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, | ||
127 | NFSIOS_FSCACHE_PAGES_UNCACHED, | ||
128 | __NFSIOS_FSCACHEMAX, | ||
129 | }; | ||
130 | |||
119 | #endif /* _LINUX_NFS_IOSTAT */ | 131 | #endif /* _LINUX_NFS_IOSTAT */ |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 43a713fce11c..b89c34e40bc2 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -27,12 +27,8 @@ static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid | |||
27 | } | 27 | } |
28 | 28 | ||
29 | struct nfs_fattr { | 29 | struct nfs_fattr { |
30 | unsigned short valid; /* which fields are valid */ | 30 | unsigned int valid; /* which fields are valid */ |
31 | __u64 pre_size; /* pre_op_attr.size */ | 31 | umode_t mode; |
32 | struct timespec pre_mtime; /* pre_op_attr.mtime */ | ||
33 | struct timespec pre_ctime; /* pre_op_attr.ctime */ | ||
34 | enum nfs_ftype type; /* always use NFSv2 types */ | ||
35 | __u32 mode; | ||
36 | __u32 nlink; | 32 | __u32 nlink; |
37 | __u32 uid; | 33 | __u32 uid; |
38 | __u32 gid; | 34 | __u32 gid; |
@@ -52,19 +48,55 @@ struct nfs_fattr { | |||
52 | struct timespec atime; | 48 | struct timespec atime; |
53 | struct timespec mtime; | 49 | struct timespec mtime; |
54 | struct timespec ctime; | 50 | struct timespec ctime; |
55 | __u32 bitmap[2]; /* NFSv4 returned attribute bitmap */ | ||
56 | __u64 change_attr; /* NFSv4 change attribute */ | 51 | __u64 change_attr; /* NFSv4 change attribute */ |
57 | __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ | 52 | __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ |
53 | __u64 pre_size; /* pre_op_attr.size */ | ||
54 | struct timespec pre_mtime; /* pre_op_attr.mtime */ | ||
55 | struct timespec pre_ctime; /* pre_op_attr.ctime */ | ||
58 | unsigned long time_start; | 56 | unsigned long time_start; |
59 | unsigned long gencount; | 57 | unsigned long gencount; |
60 | }; | 58 | }; |
61 | 59 | ||
62 | #define NFS_ATTR_WCC 0x0001 /* pre-op WCC data */ | 60 | #define NFS_ATTR_FATTR_TYPE (1U << 0) |
63 | #define NFS_ATTR_FATTR 0x0002 /* post-op attributes */ | 61 | #define NFS_ATTR_FATTR_MODE (1U << 1) |
64 | #define NFS_ATTR_FATTR_V3 0x0004 /* NFSv3 attributes */ | 62 | #define NFS_ATTR_FATTR_NLINK (1U << 2) |
65 | #define NFS_ATTR_FATTR_V4 0x0008 /* NFSv4 change attribute */ | 63 | #define NFS_ATTR_FATTR_OWNER (1U << 3) |
66 | #define NFS_ATTR_WCC_V4 0x0010 /* pre-op change attribute */ | 64 | #define NFS_ATTR_FATTR_GROUP (1U << 4) |
67 | #define NFS_ATTR_FATTR_V4_REFERRAL 0x0020 /* NFSv4 referral */ | 65 | #define NFS_ATTR_FATTR_RDEV (1U << 5) |
66 | #define NFS_ATTR_FATTR_SIZE (1U << 6) | ||
67 | #define NFS_ATTR_FATTR_PRESIZE (1U << 7) | ||
68 | #define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8) | ||
69 | #define NFS_ATTR_FATTR_SPACE_USED (1U << 9) | ||
70 | #define NFS_ATTR_FATTR_FSID (1U << 10) | ||
71 | #define NFS_ATTR_FATTR_FILEID (1U << 11) | ||
72 | #define NFS_ATTR_FATTR_ATIME (1U << 12) | ||
73 | #define NFS_ATTR_FATTR_MTIME (1U << 13) | ||
74 | #define NFS_ATTR_FATTR_CTIME (1U << 14) | ||
75 | #define NFS_ATTR_FATTR_PREMTIME (1U << 15) | ||
76 | #define NFS_ATTR_FATTR_PRECTIME (1U << 16) | ||
77 | #define NFS_ATTR_FATTR_CHANGE (1U << 17) | ||
78 | #define NFS_ATTR_FATTR_PRECHANGE (1U << 18) | ||
79 | #define NFS_ATTR_FATTR_V4_REFERRAL (1U << 19) /* NFSv4 referral */ | ||
80 | |||
81 | #define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \ | ||
82 | | NFS_ATTR_FATTR_MODE \ | ||
83 | | NFS_ATTR_FATTR_NLINK \ | ||
84 | | NFS_ATTR_FATTR_OWNER \ | ||
85 | | NFS_ATTR_FATTR_GROUP \ | ||
86 | | NFS_ATTR_FATTR_RDEV \ | ||
87 | | NFS_ATTR_FATTR_SIZE \ | ||
88 | | NFS_ATTR_FATTR_FSID \ | ||
89 | | NFS_ATTR_FATTR_FILEID \ | ||
90 | | NFS_ATTR_FATTR_ATIME \ | ||
91 | | NFS_ATTR_FATTR_MTIME \ | ||
92 | | NFS_ATTR_FATTR_CTIME) | ||
93 | #define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \ | ||
94 | | NFS_ATTR_FATTR_BLOCKS_USED) | ||
95 | #define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \ | ||
96 | | NFS_ATTR_FATTR_SPACE_USED) | ||
97 | #define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \ | ||
98 | | NFS_ATTR_FATTR_SPACE_USED \ | ||
99 | | NFS_ATTR_FATTR_CHANGE) | ||
68 | 100 | ||
69 | /* | 101 | /* |
70 | * Info on the file system | 102 | * Info on the file system |
@@ -836,6 +868,7 @@ struct nfs_rpc_ops { | |||
836 | int (*lock)(struct file *, int, struct file_lock *); | 868 | int (*lock)(struct file *, int, struct file_lock *); |
837 | int (*lock_check_bounds)(const struct file_lock *); | 869 | int (*lock_check_bounds)(const struct file_lock *); |
838 | void (*clear_acl_cache)(struct inode *); | 870 | void (*clear_acl_cache)(struct inode *); |
871 | void (*close_context)(struct nfs_open_context *ctx, int); | ||
839 | }; | 872 | }; |
840 | 873 | ||
841 | /* | 874 | /* |
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h index 04b355c801d8..5bccaab81056 100644 --- a/include/linux/nfsd/cache.h +++ b/include/linux/nfsd/cache.h | |||
@@ -76,4 +76,12 @@ void nfsd_reply_cache_shutdown(void); | |||
76 | int nfsd_cache_lookup(struct svc_rqst *, int); | 76 | int nfsd_cache_lookup(struct svc_rqst *, int); |
77 | void nfsd_cache_update(struct svc_rqst *, int, __be32 *); | 77 | void nfsd_cache_update(struct svc_rqst *, int, __be32 *); |
78 | 78 | ||
79 | #ifdef CONFIG_NFSD_V4 | ||
80 | void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp); | ||
81 | #else /* CONFIG_NFSD_V4 */ | ||
82 | static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp) | ||
83 | { | ||
84 | } | ||
85 | #endif /* CONFIG_NFSD_V4 */ | ||
86 | |||
79 | #endif /* NFSCACHE_H */ | 87 | #endif /* NFSCACHE_H */ |
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h index e19f45991b2e..2b49d676d0c9 100644 --- a/include/linux/nfsd/nfsd.h +++ b/include/linux/nfsd/nfsd.h | |||
@@ -23,7 +23,7 @@ | |||
23 | /* | 23 | /* |
24 | * nfsd version | 24 | * nfsd version |
25 | */ | 25 | */ |
26 | #define NFSD_SUPPORTED_MINOR_VERSION 0 | 26 | #define NFSD_SUPPORTED_MINOR_VERSION 1 |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Flags for nfsd_permission | 29 | * Flags for nfsd_permission |
@@ -53,6 +53,7 @@ typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int); | |||
53 | extern struct svc_program nfsd_program; | 53 | extern struct svc_program nfsd_program; |
54 | extern struct svc_version nfsd_version2, nfsd_version3, | 54 | extern struct svc_version nfsd_version2, nfsd_version3, |
55 | nfsd_version4; | 55 | nfsd_version4; |
56 | extern u32 nfsd_supported_minorversion; | ||
56 | extern struct mutex nfsd_mutex; | 57 | extern struct mutex nfsd_mutex; |
57 | extern struct svc_serv *nfsd_serv; | 58 | extern struct svc_serv *nfsd_serv; |
58 | 59 | ||
@@ -105,7 +106,7 @@ void nfsd_close(struct file *); | |||
105 | __be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *, | 106 | __be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *, |
106 | loff_t, struct kvec *, int, unsigned long *); | 107 | loff_t, struct kvec *, int, unsigned long *); |
107 | __be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, | 108 | __be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, |
108 | loff_t, struct kvec *,int, unsigned long, int *); | 109 | loff_t, struct kvec *,int, unsigned long *, int *); |
109 | __be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, | 110 | __be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, |
110 | char *, int *); | 111 | char *, int *); |
111 | __be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, | 112 | __be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, |
@@ -149,6 +150,7 @@ int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *); | |||
149 | 150 | ||
150 | enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; | 151 | enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; |
151 | int nfsd_vers(int vers, enum vers_op change); | 152 | int nfsd_vers(int vers, enum vers_op change); |
153 | int nfsd_minorversion(u32 minorversion, enum vers_op change); | ||
152 | void nfsd_reset_versions(void); | 154 | void nfsd_reset_versions(void); |
153 | int nfsd_create_serv(void); | 155 | int nfsd_create_serv(void); |
154 | 156 | ||
@@ -186,78 +188,119 @@ void nfsd_lockd_shutdown(void); | |||
186 | /* | 188 | /* |
187 | * These macros provide pre-xdr'ed values for faster operation. | 189 | * These macros provide pre-xdr'ed values for faster operation. |
188 | */ | 190 | */ |
189 | #define nfs_ok __constant_htonl(NFS_OK) | 191 | #define nfs_ok cpu_to_be32(NFS_OK) |
190 | #define nfserr_perm __constant_htonl(NFSERR_PERM) | 192 | #define nfserr_perm cpu_to_be32(NFSERR_PERM) |
191 | #define nfserr_noent __constant_htonl(NFSERR_NOENT) | 193 | #define nfserr_noent cpu_to_be32(NFSERR_NOENT) |
192 | #define nfserr_io __constant_htonl(NFSERR_IO) | 194 | #define nfserr_io cpu_to_be32(NFSERR_IO) |
193 | #define nfserr_nxio __constant_htonl(NFSERR_NXIO) | 195 | #define nfserr_nxio cpu_to_be32(NFSERR_NXIO) |
194 | #define nfserr_eagain __constant_htonl(NFSERR_EAGAIN) | 196 | #define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN) |
195 | #define nfserr_acces __constant_htonl(NFSERR_ACCES) | 197 | #define nfserr_acces cpu_to_be32(NFSERR_ACCES) |
196 | #define nfserr_exist __constant_htonl(NFSERR_EXIST) | 198 | #define nfserr_exist cpu_to_be32(NFSERR_EXIST) |
197 | #define nfserr_xdev __constant_htonl(NFSERR_XDEV) | 199 | #define nfserr_xdev cpu_to_be32(NFSERR_XDEV) |
198 | #define nfserr_nodev __constant_htonl(NFSERR_NODEV) | 200 | #define nfserr_nodev cpu_to_be32(NFSERR_NODEV) |
199 | #define nfserr_notdir __constant_htonl(NFSERR_NOTDIR) | 201 | #define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR) |
200 | #define nfserr_isdir __constant_htonl(NFSERR_ISDIR) | 202 | #define nfserr_isdir cpu_to_be32(NFSERR_ISDIR) |
201 | #define nfserr_inval __constant_htonl(NFSERR_INVAL) | 203 | #define nfserr_inval cpu_to_be32(NFSERR_INVAL) |
202 | #define nfserr_fbig __constant_htonl(NFSERR_FBIG) | 204 | #define nfserr_fbig cpu_to_be32(NFSERR_FBIG) |
203 | #define nfserr_nospc __constant_htonl(NFSERR_NOSPC) | 205 | #define nfserr_nospc cpu_to_be32(NFSERR_NOSPC) |
204 | #define nfserr_rofs __constant_htonl(NFSERR_ROFS) | 206 | #define nfserr_rofs cpu_to_be32(NFSERR_ROFS) |
205 | #define nfserr_mlink __constant_htonl(NFSERR_MLINK) | 207 | #define nfserr_mlink cpu_to_be32(NFSERR_MLINK) |
206 | #define nfserr_opnotsupp __constant_htonl(NFSERR_OPNOTSUPP) | 208 | #define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP) |
207 | #define nfserr_nametoolong __constant_htonl(NFSERR_NAMETOOLONG) | 209 | #define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG) |
208 | #define nfserr_notempty __constant_htonl(NFSERR_NOTEMPTY) | 210 | #define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY) |
209 | #define nfserr_dquot __constant_htonl(NFSERR_DQUOT) | 211 | #define nfserr_dquot cpu_to_be32(NFSERR_DQUOT) |
210 | #define nfserr_stale __constant_htonl(NFSERR_STALE) | 212 | #define nfserr_stale cpu_to_be32(NFSERR_STALE) |
211 | #define nfserr_remote __constant_htonl(NFSERR_REMOTE) | 213 | #define nfserr_remote cpu_to_be32(NFSERR_REMOTE) |
212 | #define nfserr_wflush __constant_htonl(NFSERR_WFLUSH) | 214 | #define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH) |
213 | #define nfserr_badhandle __constant_htonl(NFSERR_BADHANDLE) | 215 | #define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE) |
214 | #define nfserr_notsync __constant_htonl(NFSERR_NOT_SYNC) | 216 | #define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC) |
215 | #define nfserr_badcookie __constant_htonl(NFSERR_BAD_COOKIE) | 217 | #define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE) |
216 | #define nfserr_notsupp __constant_htonl(NFSERR_NOTSUPP) | 218 | #define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP) |
217 | #define nfserr_toosmall __constant_htonl(NFSERR_TOOSMALL) | 219 | #define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL) |
218 | #define nfserr_serverfault __constant_htonl(NFSERR_SERVERFAULT) | 220 | #define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT) |
219 | #define nfserr_badtype __constant_htonl(NFSERR_BADTYPE) | 221 | #define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE) |
220 | #define nfserr_jukebox __constant_htonl(NFSERR_JUKEBOX) | 222 | #define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX) |
221 | #define nfserr_denied __constant_htonl(NFSERR_DENIED) | 223 | #define nfserr_denied cpu_to_be32(NFSERR_DENIED) |
222 | #define nfserr_deadlock __constant_htonl(NFSERR_DEADLOCK) | 224 | #define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK) |
223 | #define nfserr_expired __constant_htonl(NFSERR_EXPIRED) | 225 | #define nfserr_expired cpu_to_be32(NFSERR_EXPIRED) |
224 | #define nfserr_bad_cookie __constant_htonl(NFSERR_BAD_COOKIE) | 226 | #define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE) |
225 | #define nfserr_same __constant_htonl(NFSERR_SAME) | 227 | #define nfserr_same cpu_to_be32(NFSERR_SAME) |
226 | #define nfserr_clid_inuse __constant_htonl(NFSERR_CLID_INUSE) | 228 | #define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE) |
227 | #define nfserr_stale_clientid __constant_htonl(NFSERR_STALE_CLIENTID) | 229 | #define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID) |
228 | #define nfserr_resource __constant_htonl(NFSERR_RESOURCE) | 230 | #define nfserr_resource cpu_to_be32(NFSERR_RESOURCE) |
229 | #define nfserr_moved __constant_htonl(NFSERR_MOVED) | 231 | #define nfserr_moved cpu_to_be32(NFSERR_MOVED) |
230 | #define nfserr_nofilehandle __constant_htonl(NFSERR_NOFILEHANDLE) | 232 | #define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE) |
231 | #define nfserr_minor_vers_mismatch __constant_htonl(NFSERR_MINOR_VERS_MISMATCH) | 233 | #define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH) |
232 | #define nfserr_share_denied __constant_htonl(NFSERR_SHARE_DENIED) | 234 | #define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED) |
233 | #define nfserr_stale_stateid __constant_htonl(NFSERR_STALE_STATEID) | 235 | #define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID) |
234 | #define nfserr_old_stateid __constant_htonl(NFSERR_OLD_STATEID) | 236 | #define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID) |
235 | #define nfserr_bad_stateid __constant_htonl(NFSERR_BAD_STATEID) | 237 | #define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID) |
236 | #define nfserr_bad_seqid __constant_htonl(NFSERR_BAD_SEQID) | 238 | #define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID) |
237 | #define nfserr_symlink __constant_htonl(NFSERR_SYMLINK) | 239 | #define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK) |
238 | #define nfserr_not_same __constant_htonl(NFSERR_NOT_SAME) | 240 | #define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME) |
239 | #define nfserr_restorefh __constant_htonl(NFSERR_RESTOREFH) | 241 | #define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH) |
240 | #define nfserr_attrnotsupp __constant_htonl(NFSERR_ATTRNOTSUPP) | 242 | #define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP) |
241 | #define nfserr_bad_xdr __constant_htonl(NFSERR_BAD_XDR) | 243 | #define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR) |
242 | #define nfserr_openmode __constant_htonl(NFSERR_OPENMODE) | 244 | #define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE) |
243 | #define nfserr_locks_held __constant_htonl(NFSERR_LOCKS_HELD) | 245 | #define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD) |
244 | #define nfserr_op_illegal __constant_htonl(NFSERR_OP_ILLEGAL) | 246 | #define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL) |
245 | #define nfserr_grace __constant_htonl(NFSERR_GRACE) | 247 | #define nfserr_grace cpu_to_be32(NFSERR_GRACE) |
246 | #define nfserr_no_grace __constant_htonl(NFSERR_NO_GRACE) | 248 | #define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE) |
247 | #define nfserr_reclaim_bad __constant_htonl(NFSERR_RECLAIM_BAD) | 249 | #define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD) |
248 | #define nfserr_badname __constant_htonl(NFSERR_BADNAME) | 250 | #define nfserr_badname cpu_to_be32(NFSERR_BADNAME) |
249 | #define nfserr_cb_path_down __constant_htonl(NFSERR_CB_PATH_DOWN) | 251 | #define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN) |
250 | #define nfserr_locked __constant_htonl(NFSERR_LOCKED) | 252 | #define nfserr_locked cpu_to_be32(NFSERR_LOCKED) |
251 | #define nfserr_wrongsec __constant_htonl(NFSERR_WRONGSEC) | 253 | #define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC) |
252 | #define nfserr_replay_me __constant_htonl(NFSERR_REPLAY_ME) | 254 | #define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE) |
255 | #define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT) | ||
256 | #define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST) | ||
257 | #define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION) | ||
258 | #define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT) | ||
259 | #define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY) | ||
260 | #define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION) | ||
261 | #define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED) | ||
262 | #define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY) | ||
263 | #define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER) | ||
264 | #define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE) | ||
265 | #define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT) | ||
266 | #define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT) | ||
267 | #define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE) | ||
268 | #define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED) | ||
269 | #define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS) | ||
270 | #define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG) | ||
271 | #define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG) | ||
272 | #define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE) | ||
273 | #define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP) | ||
274 | #define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND) | ||
275 | #define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS) | ||
276 | #define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION) | ||
277 | #define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP) | ||
278 | #define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY) | ||
279 | #define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE) | ||
280 | #define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY) | ||
281 | #define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT) | ||
282 | #define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION) | ||
283 | #define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP) | ||
284 | #define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT) | ||
285 | #define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP) | ||
286 | #define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED) | ||
287 | #define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE) | ||
288 | #define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL) | ||
289 | #define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG) | ||
290 | #define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT) | ||
291 | #define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED) | ||
253 | 292 | ||
254 | /* error codes for internal use */ | 293 | /* error codes for internal use */ |
255 | /* if a request fails due to kmalloc failure, it gets dropped. | 294 | /* if a request fails due to kmalloc failure, it gets dropped. |
256 | * Client should resend eventually | 295 | * Client should resend eventually |
257 | */ | 296 | */ |
258 | #define nfserr_dropit __constant_htonl(30000) | 297 | #define nfserr_dropit cpu_to_be32(30000) |
259 | /* end-of-file indicator in readdir */ | 298 | /* end-of-file indicator in readdir */ |
260 | #define nfserr_eof __constant_htonl(30001) | 299 | #define nfserr_eof cpu_to_be32(30001) |
300 | /* replay detected */ | ||
301 | #define nfserr_replay_me cpu_to_be32(11001) | ||
302 | /* nfs41 replay detected */ | ||
303 | #define nfserr_replay_cache cpu_to_be32(11002) | ||
261 | 304 | ||
262 | /* Check for dir entries '.' and '..' */ | 305 | /* Check for dir entries '.' and '..' */ |
263 | #define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) | 306 | #define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) |
@@ -300,7 +343,7 @@ extern struct timeval nfssvc_boot; | |||
300 | * TIME_BACKUP (unlikely to be supported any time soon) | 343 | * TIME_BACKUP (unlikely to be supported any time soon) |
301 | * TIME_CREATE (unlikely to be supported any time soon) | 344 | * TIME_CREATE (unlikely to be supported any time soon) |
302 | */ | 345 | */ |
303 | #define NFSD_SUPPORTED_ATTRS_WORD0 \ | 346 | #define NFSD4_SUPPORTED_ATTRS_WORD0 \ |
304 | (FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \ | 347 | (FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \ |
305 | | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \ | 348 | | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \ |
306 | | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \ | 349 | | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \ |
@@ -312,7 +355,7 @@ extern struct timeval nfssvc_boot; | |||
312 | | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \ | 355 | | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \ |
313 | | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL) | 356 | | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL) |
314 | 357 | ||
315 | #define NFSD_SUPPORTED_ATTRS_WORD1 \ | 358 | #define NFSD4_SUPPORTED_ATTRS_WORD1 \ |
316 | (FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \ | 359 | (FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \ |
317 | | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \ | 360 | | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \ |
318 | | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \ | 361 | | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \ |
@@ -320,6 +363,35 @@ extern struct timeval nfssvc_boot; | |||
320 | | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \ | 363 | | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \ |
321 | | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID) | 364 | | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID) |
322 | 365 | ||
366 | #define NFSD4_SUPPORTED_ATTRS_WORD2 0 | ||
367 | |||
368 | #define NFSD4_1_SUPPORTED_ATTRS_WORD0 \ | ||
369 | NFSD4_SUPPORTED_ATTRS_WORD0 | ||
370 | |||
371 | #define NFSD4_1_SUPPORTED_ATTRS_WORD1 \ | ||
372 | NFSD4_SUPPORTED_ATTRS_WORD1 | ||
373 | |||
374 | #define NFSD4_1_SUPPORTED_ATTRS_WORD2 \ | ||
375 | (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) | ||
376 | |||
377 | static inline u32 nfsd_suppattrs0(u32 minorversion) | ||
378 | { | ||
379 | return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 | ||
380 | : NFSD4_SUPPORTED_ATTRS_WORD0; | ||
381 | } | ||
382 | |||
383 | static inline u32 nfsd_suppattrs1(u32 minorversion) | ||
384 | { | ||
385 | return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1 | ||
386 | : NFSD4_SUPPORTED_ATTRS_WORD1; | ||
387 | } | ||
388 | |||
389 | static inline u32 nfsd_suppattrs2(u32 minorversion) | ||
390 | { | ||
391 | return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2 | ||
392 | : NFSD4_SUPPORTED_ATTRS_WORD2; | ||
393 | } | ||
394 | |||
323 | /* These will return ERR_INVAL if specified in GETATTR or READDIR. */ | 395 | /* These will return ERR_INVAL if specified in GETATTR or READDIR. */ |
324 | #define NFSD_WRITEONLY_ATTRS_WORD1 \ | 396 | #define NFSD_WRITEONLY_ATTRS_WORD1 \ |
325 | (FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) | 397 | (FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) |
@@ -330,6 +402,19 @@ extern struct timeval nfssvc_boot; | |||
330 | #define NFSD_WRITEABLE_ATTRS_WORD1 \ | 402 | #define NFSD_WRITEABLE_ATTRS_WORD1 \ |
331 | (FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \ | 403 | (FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \ |
332 | | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) | 404 | | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) |
405 | #define NFSD_WRITEABLE_ATTRS_WORD2 0 | ||
406 | |||
407 | #define NFSD_SUPPATTR_EXCLCREAT_WORD0 \ | ||
408 | NFSD_WRITEABLE_ATTRS_WORD0 | ||
409 | /* | ||
410 | * we currently store the exclusive create verifier in the v_{a,m}time | ||
411 | * attributes so the client can't set these at create time using EXCLUSIVE4_1 | ||
412 | */ | ||
413 | #define NFSD_SUPPATTR_EXCLCREAT_WORD1 \ | ||
414 | (NFSD_WRITEABLE_ATTRS_WORD1 & \ | ||
415 | ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)) | ||
416 | #define NFSD_SUPPATTR_EXCLCREAT_WORD2 \ | ||
417 | NFSD_WRITEABLE_ATTRS_WORD2 | ||
333 | 418 | ||
334 | #endif /* CONFIG_NFSD_V4 */ | 419 | #endif /* CONFIG_NFSD_V4 */ |
335 | 420 | ||
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index fa317f6c154b..afa19016c4a8 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h | |||
@@ -269,6 +269,13 @@ fh_copy(struct svc_fh *dst, struct svc_fh *src) | |||
269 | return dst; | 269 | return dst; |
270 | } | 270 | } |
271 | 271 | ||
272 | static inline void | ||
273 | fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src) | ||
274 | { | ||
275 | dst->fh_size = src->fh_size; | ||
276 | memcpy(&dst->fh_base, &src->fh_base, src->fh_size); | ||
277 | } | ||
278 | |||
272 | static __inline__ struct svc_fh * | 279 | static __inline__ struct svc_fh * |
273 | fh_init(struct svc_fh *fhp, int maxsize) | 280 | fh_init(struct svc_fh *fhp, int maxsize) |
274 | { | 281 | { |
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h index 128298c0362d..4d61c873feed 100644 --- a/include/linux/nfsd/state.h +++ b/include/linux/nfsd/state.h | |||
@@ -66,8 +66,7 @@ struct nfs4_cb_recall { | |||
66 | u32 cbr_ident; | 66 | u32 cbr_ident; |
67 | int cbr_trunc; | 67 | int cbr_trunc; |
68 | stateid_t cbr_stateid; | 68 | stateid_t cbr_stateid; |
69 | u32 cbr_fhlen; | 69 | struct knfsd_fh cbr_fh; |
70 | char cbr_fhval[NFS4_FHSIZE]; | ||
71 | struct nfs4_delegation *cbr_dp; | 70 | struct nfs4_delegation *cbr_dp; |
72 | }; | 71 | }; |
73 | 72 | ||
@@ -86,8 +85,7 @@ struct nfs4_delegation { | |||
86 | }; | 85 | }; |
87 | 86 | ||
88 | #define dl_stateid dl_recall.cbr_stateid | 87 | #define dl_stateid dl_recall.cbr_stateid |
89 | #define dl_fhlen dl_recall.cbr_fhlen | 88 | #define dl_fh dl_recall.cbr_fh |
90 | #define dl_fhval dl_recall.cbr_fhval | ||
91 | 89 | ||
92 | /* client delegation callback info */ | 90 | /* client delegation callback info */ |
93 | struct nfs4_callback { | 91 | struct nfs4_callback { |
@@ -101,6 +99,64 @@ struct nfs4_callback { | |||
101 | struct rpc_clnt * cb_client; | 99 | struct rpc_clnt * cb_client; |
102 | }; | 100 | }; |
103 | 101 | ||
102 | /* Maximum number of slots per session. 128 is useful for long haul TCP */ | ||
103 | #define NFSD_MAX_SLOTS_PER_SESSION 128 | ||
104 | /* Maximum number of pages per slot cache entry */ | ||
105 | #define NFSD_PAGES_PER_SLOT 1 | ||
106 | /* Maximum number of operations per session compound */ | ||
107 | #define NFSD_MAX_OPS_PER_COMPOUND 16 | ||
108 | |||
109 | struct nfsd4_cache_entry { | ||
110 | __be32 ce_status; | ||
111 | struct kvec ce_datav; /* encoded NFSv4.1 data in rq_res.head[0] */ | ||
112 | struct page *ce_respages[NFSD_PAGES_PER_SLOT + 1]; | ||
113 | int ce_cachethis; | ||
114 | short ce_resused; | ||
115 | int ce_opcnt; | ||
116 | int ce_rpchdrlen; | ||
117 | }; | ||
118 | |||
119 | struct nfsd4_slot { | ||
120 | bool sl_inuse; | ||
121 | u32 sl_seqid; | ||
122 | struct nfsd4_cache_entry sl_cache_entry; | ||
123 | }; | ||
124 | |||
125 | struct nfsd4_session { | ||
126 | struct kref se_ref; | ||
127 | struct list_head se_hash; /* hash by sessionid */ | ||
128 | struct list_head se_perclnt; | ||
129 | u32 se_flags; | ||
130 | struct nfs4_client *se_client; /* for expire_client */ | ||
131 | struct nfs4_sessionid se_sessionid; | ||
132 | u32 se_fmaxreq_sz; | ||
133 | u32 se_fmaxresp_sz; | ||
134 | u32 se_fmaxresp_cached; | ||
135 | u32 se_fmaxops; | ||
136 | u32 se_fnumslots; | ||
137 | struct nfsd4_slot se_slots[]; /* forward channel slots */ | ||
138 | }; | ||
139 | |||
140 | static inline void | ||
141 | nfsd4_put_session(struct nfsd4_session *ses) | ||
142 | { | ||
143 | extern void free_session(struct kref *kref); | ||
144 | kref_put(&ses->se_ref, free_session); | ||
145 | } | ||
146 | |||
147 | static inline void | ||
148 | nfsd4_get_session(struct nfsd4_session *ses) | ||
149 | { | ||
150 | kref_get(&ses->se_ref); | ||
151 | } | ||
152 | |||
153 | /* formatted contents of nfs4_sessionid */ | ||
154 | struct nfsd4_sessionid { | ||
155 | clientid_t clientid; | ||
156 | u32 sequence; | ||
157 | u32 reserved; | ||
158 | }; | ||
159 | |||
104 | #define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */ | 160 | #define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */ |
105 | 161 | ||
106 | /* | 162 | /* |
@@ -132,6 +188,12 @@ struct nfs4_client { | |||
132 | struct nfs4_callback cl_callback; /* callback info */ | 188 | struct nfs4_callback cl_callback; /* callback info */ |
133 | atomic_t cl_count; /* ref count */ | 189 | atomic_t cl_count; /* ref count */ |
134 | u32 cl_firststate; /* recovery dir creation */ | 190 | u32 cl_firststate; /* recovery dir creation */ |
191 | |||
192 | /* for nfs41 */ | ||
193 | struct list_head cl_sessions; | ||
194 | struct nfsd4_slot cl_slot; /* create_session slot */ | ||
195 | u32 cl_exchange_flags; | ||
196 | struct nfs4_sessionid cl_sessionid; | ||
135 | }; | 197 | }; |
136 | 198 | ||
137 | /* struct nfs4_client_reset | 199 | /* struct nfs4_client_reset |
@@ -168,8 +230,7 @@ struct nfs4_replay { | |||
168 | unsigned int rp_buflen; | 230 | unsigned int rp_buflen; |
169 | char *rp_buf; | 231 | char *rp_buf; |
170 | unsigned intrp_allocated; | 232 | unsigned intrp_allocated; |
171 | int rp_openfh_len; | 233 | struct knfsd_fh rp_openfh; |
172 | char rp_openfh[NFS4_FHSIZE]; | ||
173 | char rp_ibuf[NFSD4_REPLAY_ISIZE]; | 234 | char rp_ibuf[NFSD4_REPLAY_ISIZE]; |
174 | }; | 235 | }; |
175 | 236 | ||
@@ -217,7 +278,7 @@ struct nfs4_stateowner { | |||
217 | * share_acces, share_deny on the file. | 278 | * share_acces, share_deny on the file. |
218 | */ | 279 | */ |
219 | struct nfs4_file { | 280 | struct nfs4_file { |
220 | struct kref fi_ref; | 281 | atomic_t fi_ref; |
221 | struct list_head fi_hash; /* hash by "struct inode *" */ | 282 | struct list_head fi_hash; /* hash by "struct inode *" */ |
222 | struct list_head fi_stateids; | 283 | struct list_head fi_stateids; |
223 | struct list_head fi_delegations; | 284 | struct list_head fi_delegations; |
@@ -259,14 +320,13 @@ struct nfs4_stateid { | |||
259 | }; | 320 | }; |
260 | 321 | ||
261 | /* flags for preprocess_seqid_op() */ | 322 | /* flags for preprocess_seqid_op() */ |
262 | #define CHECK_FH 0x00000001 | 323 | #define HAS_SESSION 0x00000001 |
263 | #define CONFIRM 0x00000002 | 324 | #define CONFIRM 0x00000002 |
264 | #define OPEN_STATE 0x00000004 | 325 | #define OPEN_STATE 0x00000004 |
265 | #define LOCK_STATE 0x00000008 | 326 | #define LOCK_STATE 0x00000008 |
266 | #define RD_STATE 0x00000010 | 327 | #define RD_STATE 0x00000010 |
267 | #define WR_STATE 0x00000020 | 328 | #define WR_STATE 0x00000020 |
268 | #define CLOSE_STATE 0x00000040 | 329 | #define CLOSE_STATE 0x00000040 |
269 | #define DELEG_RET 0x00000080 | ||
270 | 330 | ||
271 | #define seqid_mutating_err(err) \ | 331 | #define seqid_mutating_err(err) \ |
272 | (((err) != nfserr_stale_clientid) && \ | 332 | (((err) != nfserr_stale_clientid) && \ |
@@ -274,7 +334,9 @@ struct nfs4_stateid { | |||
274 | ((err) != nfserr_stale_stateid) && \ | 334 | ((err) != nfserr_stale_stateid) && \ |
275 | ((err) != nfserr_bad_stateid)) | 335 | ((err) != nfserr_bad_stateid)) |
276 | 336 | ||
277 | extern __be32 nfs4_preprocess_stateid_op(struct svc_fh *current_fh, | 337 | struct nfsd4_compound_state; |
338 | |||
339 | extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, | ||
278 | stateid_t *stateid, int flags, struct file **filp); | 340 | stateid_t *stateid, int flags, struct file **filp); |
279 | extern void nfs4_lock_state(void); | 341 | extern void nfs4_lock_state(void); |
280 | extern void nfs4_unlock_state(void); | 342 | extern void nfs4_unlock_state(void); |
@@ -290,7 +352,7 @@ extern void nfsd4_init_recdir(char *recdir_name); | |||
290 | extern int nfsd4_recdir_load(void); | 352 | extern int nfsd4_recdir_load(void); |
291 | extern void nfsd4_shutdown_recdir(void); | 353 | extern void nfsd4_shutdown_recdir(void); |
292 | extern int nfs4_client_to_reclaim(const char *name); | 354 | extern int nfs4_client_to_reclaim(const char *name); |
293 | extern int nfs4_has_reclaimed_state(const char *name); | 355 | extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id); |
294 | extern void nfsd4_recdir_purge_old(void); | 356 | extern void nfsd4_recdir_purge_old(void); |
295 | extern int nfsd4_create_clid_dir(struct nfs4_client *clp); | 357 | extern int nfsd4_create_clid_dir(struct nfs4_client *clp); |
296 | extern void nfsd4_remove_clid_dir(struct nfs4_client *clp); | 358 | extern void nfsd4_remove_clid_dir(struct nfs4_client *clp); |
diff --git a/include/linux/nfsd/stats.h b/include/linux/nfsd/stats.h index 7678cfbe9960..2693ef647df6 100644 --- a/include/linux/nfsd/stats.h +++ b/include/linux/nfsd/stats.h | |||
@@ -11,6 +11,11 @@ | |||
11 | 11 | ||
12 | #include <linux/nfs4.h> | 12 | #include <linux/nfs4.h> |
13 | 13 | ||
14 | /* thread usage wraps very million seconds (approx one fortnight) */ | ||
15 | #define NFSD_USAGE_WRAP (HZ*1000000) | ||
16 | |||
17 | #ifdef __KERNEL__ | ||
18 | |||
14 | struct nfsd_stats { | 19 | struct nfsd_stats { |
15 | unsigned int rchits; /* repcache hits */ | 20 | unsigned int rchits; /* repcache hits */ |
16 | unsigned int rcmisses; /* repcache hits */ | 21 | unsigned int rcmisses; /* repcache hits */ |
@@ -35,10 +40,6 @@ struct nfsd_stats { | |||
35 | 40 | ||
36 | }; | 41 | }; |
37 | 42 | ||
38 | /* thread usage wraps very million seconds (approx one fortnight) */ | ||
39 | #define NFSD_USAGE_WRAP (HZ*1000000) | ||
40 | |||
41 | #ifdef __KERNEL__ | ||
42 | 43 | ||
43 | extern struct nfsd_stats nfsdstats; | 44 | extern struct nfsd_stats nfsdstats; |
44 | extern struct svc_stat nfsd_svcstats; | 45 | extern struct svc_stat nfsd_svcstats; |
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h index 27bd3e38ec5a..f80d6013fdc3 100644 --- a/include/linux/nfsd/xdr4.h +++ b/include/linux/nfsd/xdr4.h | |||
@@ -45,10 +45,22 @@ | |||
45 | #define XDR_LEN(n) (((n) + 3) & ~3) | 45 | #define XDR_LEN(n) (((n) + 3) & ~3) |
46 | 46 | ||
47 | struct nfsd4_compound_state { | 47 | struct nfsd4_compound_state { |
48 | struct svc_fh current_fh; | 48 | struct svc_fh current_fh; |
49 | struct svc_fh save_fh; | 49 | struct svc_fh save_fh; |
50 | struct nfs4_stateowner *replay_owner; | 50 | struct nfs4_stateowner *replay_owner; |
51 | }; | 51 | /* For sessions DRC */ |
52 | struct nfsd4_session *session; | ||
53 | struct nfsd4_slot *slot; | ||
54 | __be32 *statp; | ||
55 | size_t iovlen; | ||
56 | u32 minorversion; | ||
57 | u32 status; | ||
58 | }; | ||
59 | |||
60 | static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs) | ||
61 | { | ||
62 | return cs->slot != NULL; | ||
63 | } | ||
52 | 64 | ||
53 | struct nfsd4_change_info { | 65 | struct nfsd4_change_info { |
54 | u32 atomic; | 66 | u32 atomic; |
@@ -90,7 +102,7 @@ struct nfsd4_create { | |||
90 | u32 specdata2; | 102 | u32 specdata2; |
91 | } dev; /* NF4BLK, NF4CHR */ | 103 | } dev; /* NF4BLK, NF4CHR */ |
92 | } u; | 104 | } u; |
93 | u32 cr_bmval[2]; /* request */ | 105 | u32 cr_bmval[3]; /* request */ |
94 | struct iattr cr_iattr; /* request */ | 106 | struct iattr cr_iattr; /* request */ |
95 | struct nfsd4_change_info cr_cinfo; /* response */ | 107 | struct nfsd4_change_info cr_cinfo; /* response */ |
96 | struct nfs4_acl *cr_acl; | 108 | struct nfs4_acl *cr_acl; |
@@ -105,7 +117,7 @@ struct nfsd4_delegreturn { | |||
105 | }; | 117 | }; |
106 | 118 | ||
107 | struct nfsd4_getattr { | 119 | struct nfsd4_getattr { |
108 | u32 ga_bmval[2]; /* request */ | 120 | u32 ga_bmval[3]; /* request */ |
109 | struct svc_fh *ga_fhp; /* response */ | 121 | struct svc_fh *ga_fhp; /* response */ |
110 | }; | 122 | }; |
111 | 123 | ||
@@ -206,11 +218,9 @@ struct nfsd4_open { | |||
206 | stateid_t op_delegate_stateid; /* request - response */ | 218 | stateid_t op_delegate_stateid; /* request - response */ |
207 | u32 op_create; /* request */ | 219 | u32 op_create; /* request */ |
208 | u32 op_createmode; /* request */ | 220 | u32 op_createmode; /* request */ |
209 | u32 op_bmval[2]; /* request */ | 221 | u32 op_bmval[3]; /* request */ |
210 | union { /* request */ | 222 | struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */ |
211 | struct iattr iattr; /* UNCHECKED4,GUARDED4 */ | 223 | nfs4_verifier verf; /* EXCLUSIVE4 */ |
212 | nfs4_verifier verf; /* EXCLUSIVE4 */ | ||
213 | } u; | ||
214 | clientid_t op_clientid; /* request */ | 224 | clientid_t op_clientid; /* request */ |
215 | struct xdr_netobj op_owner; /* request */ | 225 | struct xdr_netobj op_owner; /* request */ |
216 | u32 op_seqid; /* request */ | 226 | u32 op_seqid; /* request */ |
@@ -224,8 +234,8 @@ struct nfsd4_open { | |||
224 | struct nfs4_stateowner *op_stateowner; /* used during processing */ | 234 | struct nfs4_stateowner *op_stateowner; /* used during processing */ |
225 | struct nfs4_acl *op_acl; | 235 | struct nfs4_acl *op_acl; |
226 | }; | 236 | }; |
227 | #define op_iattr u.iattr | 237 | #define op_iattr iattr |
228 | #define op_verf u.verf | 238 | #define op_verf verf |
229 | 239 | ||
230 | struct nfsd4_open_confirm { | 240 | struct nfsd4_open_confirm { |
231 | stateid_t oc_req_stateid /* request */; | 241 | stateid_t oc_req_stateid /* request */; |
@@ -259,7 +269,7 @@ struct nfsd4_readdir { | |||
259 | nfs4_verifier rd_verf; /* request */ | 269 | nfs4_verifier rd_verf; /* request */ |
260 | u32 rd_dircount; /* request */ | 270 | u32 rd_dircount; /* request */ |
261 | u32 rd_maxcount; /* request */ | 271 | u32 rd_maxcount; /* request */ |
262 | u32 rd_bmval[2]; /* request */ | 272 | u32 rd_bmval[3]; /* request */ |
263 | struct svc_rqst *rd_rqstp; /* response */ | 273 | struct svc_rqst *rd_rqstp; /* response */ |
264 | struct svc_fh * rd_fhp; /* response */ | 274 | struct svc_fh * rd_fhp; /* response */ |
265 | 275 | ||
@@ -301,7 +311,7 @@ struct nfsd4_secinfo { | |||
301 | 311 | ||
302 | struct nfsd4_setattr { | 312 | struct nfsd4_setattr { |
303 | stateid_t sa_stateid; /* request */ | 313 | stateid_t sa_stateid; /* request */ |
304 | u32 sa_bmval[2]; /* request */ | 314 | u32 sa_bmval[3]; /* request */ |
305 | struct iattr sa_iattr; /* request */ | 315 | struct iattr sa_iattr; /* request */ |
306 | struct nfs4_acl *sa_acl; | 316 | struct nfs4_acl *sa_acl; |
307 | }; | 317 | }; |
@@ -327,7 +337,7 @@ struct nfsd4_setclientid_confirm { | |||
327 | 337 | ||
328 | /* also used for NVERIFY */ | 338 | /* also used for NVERIFY */ |
329 | struct nfsd4_verify { | 339 | struct nfsd4_verify { |
330 | u32 ve_bmval[2]; /* request */ | 340 | u32 ve_bmval[3]; /* request */ |
331 | u32 ve_attrlen; /* request */ | 341 | u32 ve_attrlen; /* request */ |
332 | char * ve_attrval; /* request */ | 342 | char * ve_attrval; /* request */ |
333 | }; | 343 | }; |
@@ -344,6 +354,54 @@ struct nfsd4_write { | |||
344 | nfs4_verifier wr_verifier; /* response */ | 354 | nfs4_verifier wr_verifier; /* response */ |
345 | }; | 355 | }; |
346 | 356 | ||
357 | struct nfsd4_exchange_id { | ||
358 | nfs4_verifier verifier; | ||
359 | struct xdr_netobj clname; | ||
360 | u32 flags; | ||
361 | clientid_t clientid; | ||
362 | u32 seqid; | ||
363 | int spa_how; | ||
364 | }; | ||
365 | |||
366 | struct nfsd4_channel_attrs { | ||
367 | u32 headerpadsz; | ||
368 | u32 maxreq_sz; | ||
369 | u32 maxresp_sz; | ||
370 | u32 maxresp_cached; | ||
371 | u32 maxops; | ||
372 | u32 maxreqs; | ||
373 | u32 nr_rdma_attrs; | ||
374 | u32 rdma_attrs; | ||
375 | }; | ||
376 | |||
377 | struct nfsd4_create_session { | ||
378 | clientid_t clientid; | ||
379 | struct nfs4_sessionid sessionid; | ||
380 | u32 seqid; | ||
381 | u32 flags; | ||
382 | struct nfsd4_channel_attrs fore_channel; | ||
383 | struct nfsd4_channel_attrs back_channel; | ||
384 | u32 callback_prog; | ||
385 | u32 uid; | ||
386 | u32 gid; | ||
387 | }; | ||
388 | |||
389 | struct nfsd4_sequence { | ||
390 | struct nfs4_sessionid sessionid; /* request/response */ | ||
391 | u32 seqid; /* request/response */ | ||
392 | u32 slotid; /* request/response */ | ||
393 | u32 maxslots; /* request/response */ | ||
394 | u32 cachethis; /* request */ | ||
395 | #if 0 | ||
396 | u32 target_maxslots; /* response */ | ||
397 | u32 status_flags; /* response */ | ||
398 | #endif /* not yet */ | ||
399 | }; | ||
400 | |||
401 | struct nfsd4_destroy_session { | ||
402 | struct nfs4_sessionid sessionid; | ||
403 | }; | ||
404 | |||
347 | struct nfsd4_op { | 405 | struct nfsd4_op { |
348 | int opnum; | 406 | int opnum; |
349 | __be32 status; | 407 | __be32 status; |
@@ -378,6 +436,12 @@ struct nfsd4_op { | |||
378 | struct nfsd4_verify verify; | 436 | struct nfsd4_verify verify; |
379 | struct nfsd4_write write; | 437 | struct nfsd4_write write; |
380 | struct nfsd4_release_lockowner release_lockowner; | 438 | struct nfsd4_release_lockowner release_lockowner; |
439 | |||
440 | /* NFSv4.1 */ | ||
441 | struct nfsd4_exchange_id exchange_id; | ||
442 | struct nfsd4_create_session create_session; | ||
443 | struct nfsd4_destroy_session destroy_session; | ||
444 | struct nfsd4_sequence sequence; | ||
381 | } u; | 445 | } u; |
382 | struct nfs4_replay * replay; | 446 | struct nfs4_replay * replay; |
383 | }; | 447 | }; |
@@ -416,9 +480,22 @@ struct nfsd4_compoundres { | |||
416 | u32 taglen; | 480 | u32 taglen; |
417 | char * tag; | 481 | char * tag; |
418 | u32 opcnt; | 482 | u32 opcnt; |
419 | __be32 * tagp; /* where to encode tag and opcount */ | 483 | __be32 * tagp; /* tag, opcount encode location */ |
484 | struct nfsd4_compound_state cstate; | ||
420 | }; | 485 | }; |
421 | 486 | ||
487 | static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) | ||
488 | { | ||
489 | struct nfsd4_compoundargs *args = resp->rqstp->rq_argp; | ||
490 | return args->opcnt == 1; | ||
491 | } | ||
492 | |||
493 | static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) | ||
494 | { | ||
495 | return !resp->cstate.slot->sl_cache_entry.ce_cachethis || | ||
496 | nfsd4_is_solo_sequence(resp); | ||
497 | } | ||
498 | |||
422 | #define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) | 499 | #define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) |
423 | 500 | ||
424 | static inline void | 501 | static inline void |
@@ -448,7 +525,23 @@ extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp, | |||
448 | extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | 525 | extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, |
449 | struct nfsd4_compound_state *, | 526 | struct nfsd4_compound_state *, |
450 | struct nfsd4_setclientid_confirm *setclientid_confirm); | 527 | struct nfsd4_setclientid_confirm *setclientid_confirm); |
451 | extern __be32 nfsd4_process_open1(struct nfsd4_open *open); | 528 | extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp); |
529 | extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, | ||
530 | struct nfsd4_sequence *seq); | ||
531 | extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, | ||
532 | struct nfsd4_compound_state *, | ||
533 | struct nfsd4_exchange_id *); | ||
534 | extern __be32 nfsd4_create_session(struct svc_rqst *, | ||
535 | struct nfsd4_compound_state *, | ||
536 | struct nfsd4_create_session *); | ||
537 | extern __be32 nfsd4_sequence(struct svc_rqst *, | ||
538 | struct nfsd4_compound_state *, | ||
539 | struct nfsd4_sequence *); | ||
540 | extern __be32 nfsd4_destroy_session(struct svc_rqst *, | ||
541 | struct nfsd4_compound_state *, | ||
542 | struct nfsd4_destroy_session *); | ||
543 | extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *, | ||
544 | struct nfsd4_open *open); | ||
452 | extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, | 545 | extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, |
453 | struct svc_fh *current_fh, struct nfsd4_open *open); | 546 | struct svc_fh *current_fh, struct nfsd4_open *open); |
454 | extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, | 547 | extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, |
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h new file mode 100644 index 000000000000..79fec6af3f9f --- /dev/null +++ b/include/linux/nilfs2_fs.h | |||
@@ -0,0 +1,801 @@ | |||
1 | /* | ||
2 | * nilfs2_fs.h - NILFS2 on-disk structures and common declarations. | ||
3 | * | ||
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
19 | * | ||
20 | * Written by Koji Sato <koji@osrg.net> | ||
21 | * Ryusuke Konishi <ryusuke@osrg.net> | ||
22 | */ | ||
23 | /* | ||
24 | * linux/include/linux/ext2_fs.h | ||
25 | * | ||
26 | * Copyright (C) 1992, 1993, 1994, 1995 | ||
27 | * Remy Card (card@masi.ibp.fr) | ||
28 | * Laboratoire MASI - Institut Blaise Pascal | ||
29 | * Universite Pierre et Marie Curie (Paris VI) | ||
30 | * | ||
31 | * from | ||
32 | * | ||
33 | * linux/include/linux/minix_fs.h | ||
34 | * | ||
35 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
36 | */ | ||
37 | |||
38 | #ifndef _LINUX_NILFS_FS_H | ||
39 | #define _LINUX_NILFS_FS_H | ||
40 | |||
41 | #include <linux/types.h> | ||
42 | #include <linux/ioctl.h> | ||
43 | |||
44 | /* | ||
45 | * Inode flags stored in nilfs_inode and on-memory nilfs inode | ||
46 | * | ||
47 | * We define these flags based on ext2-fs because of the | ||
48 | * compatibility reason; to avoid problems in chattr(1) | ||
49 | */ | ||
50 | #define NILFS_SECRM_FL 0x00000001 /* Secure deletion */ | ||
51 | #define NILFS_UNRM_FL 0x00000002 /* Undelete */ | ||
52 | #define NILFS_SYNC_FL 0x00000008 /* Synchronous updates */ | ||
53 | #define NILFS_IMMUTABLE_FL 0x00000010 /* Immutable file */ | ||
54 | #define NILFS_APPEND_FL 0x00000020 /* writes to file may only append */ | ||
55 | #define NILFS_NODUMP_FL 0x00000040 /* do not dump file */ | ||
56 | #define NILFS_NOATIME_FL 0x00000080 /* do not update atime */ | ||
57 | /* Reserved for compression usage... */ | ||
58 | #define NILFS_NOTAIL_FL 0x00008000 /* file tail should not be merged */ | ||
59 | #define NILFS_DIRSYNC_FL 0x00010000 /* dirsync behaviour */ | ||
60 | |||
61 | #define NILFS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */ | ||
62 | #define NILFS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */ | ||
63 | |||
64 | |||
65 | #define NILFS_INODE_BMAP_SIZE 7 | ||
66 | /** | ||
67 | * struct nilfs_inode - structure of an inode on disk | ||
68 | * @i_blocks: blocks count | ||
69 | * @i_size: size in bytes | ||
70 | * @i_ctime: creation time (seconds) | ||
71 | * @i_mtime: modification time (seconds) | ||
72 | * @i_ctime_nsec: creation time (nano seconds) | ||
73 | * @i_mtime_nsec: modification time (nano seconds) | ||
74 | * @i_uid: user id | ||
75 | * @i_gid: group id | ||
76 | * @i_mode: file mode | ||
77 | * @i_links_count: links count | ||
78 | * @i_flags: file flags | ||
79 | * @i_bmap: block mapping | ||
80 | * @i_xattr: extended attributes | ||
81 | * @i_generation: file generation (for NFS) | ||
82 | * @i_pad: padding | ||
83 | */ | ||
84 | struct nilfs_inode { | ||
85 | __le64 i_blocks; | ||
86 | __le64 i_size; | ||
87 | __le64 i_ctime; | ||
88 | __le64 i_mtime; | ||
89 | __le32 i_ctime_nsec; | ||
90 | __le32 i_mtime_nsec; | ||
91 | __le32 i_uid; | ||
92 | __le32 i_gid; | ||
93 | __le16 i_mode; | ||
94 | __le16 i_links_count; | ||
95 | __le32 i_flags; | ||
96 | __le64 i_bmap[NILFS_INODE_BMAP_SIZE]; | ||
97 | #define i_device_code i_bmap[0] | ||
98 | __le64 i_xattr; | ||
99 | __le32 i_generation; | ||
100 | __le32 i_pad; | ||
101 | }; | ||
102 | |||
103 | /** | ||
104 | * struct nilfs_super_root - structure of super root | ||
105 | * @sr_sum: check sum | ||
106 | * @sr_bytes: byte count of the structure | ||
107 | * @sr_flags: flags (reserved) | ||
108 | * @sr_nongc_ctime: write time of the last segment not for cleaner operation | ||
109 | * @sr_dat: DAT file inode | ||
110 | * @sr_cpfile: checkpoint file inode | ||
111 | * @sr_sufile: segment usage file inode | ||
112 | */ | ||
113 | struct nilfs_super_root { | ||
114 | __le32 sr_sum; | ||
115 | __le16 sr_bytes; | ||
116 | __le16 sr_flags; | ||
117 | __le64 sr_nongc_ctime; | ||
118 | struct nilfs_inode sr_dat; | ||
119 | struct nilfs_inode sr_cpfile; | ||
120 | struct nilfs_inode sr_sufile; | ||
121 | }; | ||
122 | |||
123 | #define NILFS_SR_MDT_OFFSET(inode_size, i) \ | ||
124 | ((unsigned long)&((struct nilfs_super_root *)0)->sr_dat + \ | ||
125 | (inode_size) * (i)) | ||
126 | #define NILFS_SR_DAT_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 0) | ||
127 | #define NILFS_SR_CPFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 1) | ||
128 | #define NILFS_SR_SUFILE_OFFSET(inode_size) NILFS_SR_MDT_OFFSET(inode_size, 2) | ||
129 | #define NILFS_SR_BYTES (sizeof(struct nilfs_super_root)) | ||
130 | |||
131 | /* | ||
132 | * Maximal mount counts | ||
133 | */ | ||
134 | #define NILFS_DFL_MAX_MNT_COUNT 50 /* 50 mounts */ | ||
135 | |||
136 | /* | ||
137 | * File system states (sbp->s_state, nilfs->ns_mount_state) | ||
138 | */ | ||
139 | #define NILFS_VALID_FS 0x0001 /* Unmounted cleanly */ | ||
140 | #define NILFS_ERROR_FS 0x0002 /* Errors detected */ | ||
141 | #define NILFS_RESIZE_FS 0x0004 /* Resize required */ | ||
142 | |||
143 | /* | ||
144 | * Mount flags (sbi->s_mount_opt) | ||
145 | */ | ||
146 | #define NILFS_MOUNT_ERROR_MODE 0x0070 /* Error mode mask */ | ||
147 | #define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */ | ||
148 | #define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ | ||
149 | #define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ | ||
150 | #define NILFS_MOUNT_SNAPSHOT 0x0080 /* Snapshot flag */ | ||
151 | #define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ | ||
152 | #define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order | ||
153 | semantics also for data */ | ||
154 | |||
155 | |||
156 | /** | ||
157 | * struct nilfs_super_block - structure of super block on disk | ||
158 | */ | ||
159 | struct nilfs_super_block { | ||
160 | __le32 s_rev_level; /* Revision level */ | ||
161 | __le16 s_minor_rev_level; /* minor revision level */ | ||
162 | __le16 s_magic; /* Magic signature */ | ||
163 | |||
164 | __le16 s_bytes; /* Bytes count of CRC calculation | ||
165 | for this structure. s_reserved | ||
166 | is excluded. */ | ||
167 | __le16 s_flags; /* flags */ | ||
168 | __le32 s_crc_seed; /* Seed value of CRC calculation */ | ||
169 | __le32 s_sum; /* Check sum of super block */ | ||
170 | |||
171 | __le32 s_log_block_size; /* Block size represented as follows | ||
172 | blocksize = | ||
173 | 1 << (s_log_block_size + 10) */ | ||
174 | __le64 s_nsegments; /* Number of segments in filesystem */ | ||
175 | __le64 s_dev_size; /* block device size in bytes */ | ||
176 | __le64 s_first_data_block; /* 1st seg disk block number */ | ||
177 | __le32 s_blocks_per_segment; /* number of blocks per full segment */ | ||
178 | __le32 s_r_segments_percentage; /* Reserved segments percentage */ | ||
179 | |||
180 | __le64 s_last_cno; /* Last checkpoint number */ | ||
181 | __le64 s_last_pseg; /* disk block addr pseg written last */ | ||
182 | __le64 s_last_seq; /* seq. number of seg written last */ | ||
183 | __le64 s_free_blocks_count; /* Free blocks count */ | ||
184 | |||
185 | __le64 s_ctime; /* Creation time (execution time of | ||
186 | newfs) */ | ||
187 | __le64 s_mtime; /* Mount time */ | ||
188 | __le64 s_wtime; /* Write time */ | ||
189 | __le16 s_mnt_count; /* Mount count */ | ||
190 | __le16 s_max_mnt_count; /* Maximal mount count */ | ||
191 | __le16 s_state; /* File system state */ | ||
192 | __le16 s_errors; /* Behaviour when detecting errors */ | ||
193 | __le64 s_lastcheck; /* time of last check */ | ||
194 | |||
195 | __le32 s_checkinterval; /* max. time between checks */ | ||
196 | __le32 s_creator_os; /* OS */ | ||
197 | __le16 s_def_resuid; /* Default uid for reserved blocks */ | ||
198 | __le16 s_def_resgid; /* Default gid for reserved blocks */ | ||
199 | __le32 s_first_ino; /* First non-reserved inode */ | ||
200 | |||
201 | __le16 s_inode_size; /* Size of an inode */ | ||
202 | __le16 s_dat_entry_size; /* Size of a dat entry */ | ||
203 | __le16 s_checkpoint_size; /* Size of a checkpoint */ | ||
204 | __le16 s_segment_usage_size; /* Size of a segment usage */ | ||
205 | |||
206 | __u8 s_uuid[16]; /* 128-bit uuid for volume */ | ||
207 | char s_volume_name[16]; /* volume name */ | ||
208 | char s_last_mounted[64]; /* directory where last mounted */ | ||
209 | |||
210 | __le32 s_c_interval; /* Commit interval of segment */ | ||
211 | __le32 s_c_block_max; /* Threshold of data amount for | ||
212 | the segment construction */ | ||
213 | __u32 s_reserved[192]; /* padding to the end of the block */ | ||
214 | }; | ||
215 | |||
216 | /* | ||
217 | * Codes for operating systems | ||
218 | */ | ||
219 | #define NILFS_OS_LINUX 0 | ||
220 | /* Codes from 1 to 4 are reserved to keep compatibility with ext2 creator-OS */ | ||
221 | |||
222 | /* | ||
223 | * Revision levels | ||
224 | */ | ||
225 | #define NILFS_CURRENT_REV 2 /* current major revision */ | ||
226 | #define NILFS_MINOR_REV 0 /* minor revision */ | ||
227 | |||
228 | /* | ||
229 | * Bytes count of super_block for CRC-calculation | ||
230 | */ | ||
231 | #define NILFS_SB_BYTES \ | ||
232 | ((long)&((struct nilfs_super_block *)0)->s_reserved) | ||
233 | |||
234 | /* | ||
235 | * Special inode number | ||
236 | */ | ||
237 | #define NILFS_ROOT_INO 2 /* Root file inode */ | ||
238 | #define NILFS_DAT_INO 3 /* DAT file */ | ||
239 | #define NILFS_CPFILE_INO 4 /* checkpoint file */ | ||
240 | #define NILFS_SUFILE_INO 5 /* segment usage file */ | ||
241 | #define NILFS_IFILE_INO 6 /* ifile */ | ||
242 | #define NILFS_ATIME_INO 7 /* Atime file (reserved) */ | ||
243 | #define NILFS_XATTR_INO 8 /* Xattribute file (reserved) */ | ||
244 | #define NILFS_SKETCH_INO 10 /* Sketch file */ | ||
245 | #define NILFS_USER_INO 11 /* Fisrt user's file inode number */ | ||
246 | |||
247 | #define NILFS_SB_OFFSET_BYTES 1024 /* byte offset of nilfs superblock */ | ||
248 | #define NILFS_SUPER_MAGIC 0x3434 /* NILFS filesystem magic number */ | ||
249 | |||
250 | #define NILFS_SEG_MIN_BLOCKS 16 /* Minimum number of blocks in | ||
251 | a full segment */ | ||
252 | #define NILFS_PSEG_MIN_BLOCKS 2 /* Minimum number of blocks in | ||
253 | a partial segment */ | ||
254 | #define NILFS_MIN_NRSVSEGS 8 /* Minimum number of reserved | ||
255 | segments */ | ||
256 | |||
257 | /* | ||
258 | * bytes offset of secondary super block | ||
259 | */ | ||
260 | #define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12) | ||
261 | |||
262 | /* | ||
263 | * Maximal count of links to a file | ||
264 | */ | ||
265 | #define NILFS_LINK_MAX 32000 | ||
266 | |||
267 | /* | ||
268 | * Structure of a directory entry | ||
269 | * (Same as ext2) | ||
270 | */ | ||
271 | |||
272 | #define NILFS_NAME_LEN 255 | ||
273 | |||
274 | /* | ||
275 | * The new version of the directory entry. Since V0 structures are | ||
276 | * stored in intel byte order, and the name_len field could never be | ||
277 | * bigger than 255 chars, it's safe to reclaim the extra byte for the | ||
278 | * file_type field. | ||
279 | */ | ||
280 | struct nilfs_dir_entry { | ||
281 | __le64 inode; /* Inode number */ | ||
282 | __le16 rec_len; /* Directory entry length */ | ||
283 | __u8 name_len; /* Name length */ | ||
284 | __u8 file_type; | ||
285 | char name[NILFS_NAME_LEN]; /* File name */ | ||
286 | char pad; | ||
287 | }; | ||
288 | |||
289 | /* | ||
290 | * NILFS directory file types. Only the low 3 bits are used. The | ||
291 | * other bits are reserved for now. | ||
292 | */ | ||
293 | enum { | ||
294 | NILFS_FT_UNKNOWN, | ||
295 | NILFS_FT_REG_FILE, | ||
296 | NILFS_FT_DIR, | ||
297 | NILFS_FT_CHRDEV, | ||
298 | NILFS_FT_BLKDEV, | ||
299 | NILFS_FT_FIFO, | ||
300 | NILFS_FT_SOCK, | ||
301 | NILFS_FT_SYMLINK, | ||
302 | NILFS_FT_MAX | ||
303 | }; | ||
304 | |||
305 | /* | ||
306 | * NILFS_DIR_PAD defines the directory entries boundaries | ||
307 | * | ||
308 | * NOTE: It must be a multiple of 8 | ||
309 | */ | ||
310 | #define NILFS_DIR_PAD 8 | ||
311 | #define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1) | ||
312 | #define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \ | ||
313 | ~NILFS_DIR_ROUND) | ||
314 | |||
315 | |||
316 | /** | ||
317 | * struct nilfs_finfo - file information | ||
318 | * @fi_ino: inode number | ||
319 | * @fi_cno: checkpoint number | ||
320 | * @fi_nblocks: number of blocks (including intermediate blocks) | ||
321 | * @fi_ndatablk: number of file data blocks | ||
322 | */ | ||
323 | struct nilfs_finfo { | ||
324 | __le64 fi_ino; | ||
325 | __le64 fi_cno; | ||
326 | __le32 fi_nblocks; | ||
327 | __le32 fi_ndatablk; | ||
328 | /* array of virtual block numbers */ | ||
329 | }; | ||
330 | |||
331 | /** | ||
332 | * struct nilfs_binfo_v - information for the block to which a virtual block number is assigned | ||
333 | * @bi_vblocknr: virtual block number | ||
334 | * @bi_blkoff: block offset | ||
335 | */ | ||
336 | struct nilfs_binfo_v { | ||
337 | __le64 bi_vblocknr; | ||
338 | __le64 bi_blkoff; | ||
339 | }; | ||
340 | |||
341 | /** | ||
342 | * struct nilfs_binfo_dat - information for the block which belongs to the DAT file | ||
343 | * @bi_blkoff: block offset | ||
344 | * @bi_level: level | ||
345 | * @bi_pad: padding | ||
346 | */ | ||
347 | struct nilfs_binfo_dat { | ||
348 | __le64 bi_blkoff; | ||
349 | __u8 bi_level; | ||
350 | __u8 bi_pad[7]; | ||
351 | }; | ||
352 | |||
353 | /** | ||
354 | * union nilfs_binfo: block information | ||
355 | * @bi_v: nilfs_binfo_v structure | ||
356 | * @bi_dat: nilfs_binfo_dat structure | ||
357 | */ | ||
358 | union nilfs_binfo { | ||
359 | struct nilfs_binfo_v bi_v; | ||
360 | struct nilfs_binfo_dat bi_dat; | ||
361 | }; | ||
362 | |||
363 | /** | ||
364 | * struct nilfs_segment_summary - segment summary | ||
365 | * @ss_datasum: checksum of data | ||
366 | * @ss_sumsum: checksum of segment summary | ||
367 | * @ss_magic: magic number | ||
368 | * @ss_bytes: size of this structure in bytes | ||
369 | * @ss_flags: flags | ||
370 | * @ss_seq: sequence number | ||
371 | * @ss_create: creation timestamp | ||
372 | * @ss_next: next segment | ||
373 | * @ss_nblocks: number of blocks | ||
374 | * @ss_nfinfo: number of finfo structures | ||
375 | * @ss_sumbytes: total size of segment summary in bytes | ||
376 | * @ss_pad: padding | ||
377 | */ | ||
378 | struct nilfs_segment_summary { | ||
379 | __le32 ss_datasum; | ||
380 | __le32 ss_sumsum; | ||
381 | __le32 ss_magic; | ||
382 | __le16 ss_bytes; | ||
383 | __le16 ss_flags; | ||
384 | __le64 ss_seq; | ||
385 | __le64 ss_create; | ||
386 | __le64 ss_next; | ||
387 | __le32 ss_nblocks; | ||
388 | __le32 ss_nfinfo; | ||
389 | __le32 ss_sumbytes; | ||
390 | __le32 ss_pad; | ||
391 | /* array of finfo structures */ | ||
392 | }; | ||
393 | |||
394 | #define NILFS_SEGSUM_MAGIC 0x1eaffa11 /* segment summary magic number */ | ||
395 | |||
396 | /* | ||
397 | * Segment summary flags | ||
398 | */ | ||
399 | #define NILFS_SS_LOGBGN 0x0001 /* begins a logical segment */ | ||
400 | #define NILFS_SS_LOGEND 0x0002 /* ends a logical segment */ | ||
401 | #define NILFS_SS_SR 0x0004 /* has super root */ | ||
402 | #define NILFS_SS_SYNDT 0x0008 /* includes data only updates */ | ||
403 | #define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */ | ||
404 | |||
405 | /** | ||
406 | * struct nilfs_palloc_group_desc - block group descriptor | ||
407 | * @pg_nfrees: number of free entries in block group | ||
408 | */ | ||
409 | struct nilfs_palloc_group_desc { | ||
410 | __le32 pg_nfrees; | ||
411 | }; | ||
412 | |||
413 | /** | ||
414 | * struct nilfs_dat_entry - disk address translation entry | ||
415 | * @dt_blocknr: block number | ||
416 | * @dt_start: start checkpoint number | ||
417 | * @dt_end: end checkpoint number | ||
418 | * @dt_rsv: reserved for future use | ||
419 | */ | ||
420 | struct nilfs_dat_entry { | ||
421 | __le64 de_blocknr; | ||
422 | __le64 de_start; | ||
423 | __le64 de_end; | ||
424 | __le64 de_rsv; | ||
425 | }; | ||
426 | |||
427 | /** | ||
428 | * struct nilfs_dat_group_desc - block group descriptor | ||
429 | * @dg_nfrees: number of free virtual block numbers in block group | ||
430 | */ | ||
431 | struct nilfs_dat_group_desc { | ||
432 | __le32 dg_nfrees; | ||
433 | }; | ||
434 | |||
435 | |||
436 | /** | ||
437 | * struct nilfs_snapshot_list - snapshot list | ||
438 | * @ssl_next: next checkpoint number on snapshot list | ||
439 | * @ssl_prev: previous checkpoint number on snapshot list | ||
440 | */ | ||
441 | struct nilfs_snapshot_list { | ||
442 | __le64 ssl_next; | ||
443 | __le64 ssl_prev; | ||
444 | }; | ||
445 | |||
446 | /** | ||
447 | * struct nilfs_checkpoint - checkpoint structure | ||
448 | * @cp_flags: flags | ||
449 | * @cp_checkpoints_count: checkpoints count in a block | ||
450 | * @cp_snapshot_list: snapshot list | ||
451 | * @cp_cno: checkpoint number | ||
452 | * @cp_create: creation timestamp | ||
453 | * @cp_nblk_inc: number of blocks incremented by this checkpoint | ||
454 | * @cp_inodes_count: inodes count | ||
455 | * @cp_blocks_count: blocks count | ||
456 | * @cp_ifile_inode: inode of ifile | ||
457 | */ | ||
458 | struct nilfs_checkpoint { | ||
459 | __le32 cp_flags; | ||
460 | __le32 cp_checkpoints_count; | ||
461 | struct nilfs_snapshot_list cp_snapshot_list; | ||
462 | __le64 cp_cno; | ||
463 | __le64 cp_create; | ||
464 | __le64 cp_nblk_inc; | ||
465 | __le64 cp_inodes_count; | ||
466 | __le64 cp_blocks_count; /* Reserved (might be deleted) */ | ||
467 | |||
468 | /* Do not change the byte offset of ifile inode. | ||
469 | To keep the compatibility of the disk format, | ||
470 | additional fields should be added behind cp_ifile_inode. */ | ||
471 | struct nilfs_inode cp_ifile_inode; | ||
472 | }; | ||
473 | |||
474 | /* checkpoint flags */ | ||
475 | enum { | ||
476 | NILFS_CHECKPOINT_SNAPSHOT, | ||
477 | NILFS_CHECKPOINT_INVALID, | ||
478 | NILFS_CHECKPOINT_SKETCH, | ||
479 | NILFS_CHECKPOINT_MINOR, | ||
480 | }; | ||
481 | |||
482 | #define NILFS_CHECKPOINT_FNS(flag, name) \ | ||
483 | static inline void \ | ||
484 | nilfs_checkpoint_set_##name(struct nilfs_checkpoint *cp) \ | ||
485 | { \ | ||
486 | cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) | \ | ||
487 | (1UL << NILFS_CHECKPOINT_##flag)); \ | ||
488 | } \ | ||
489 | static inline void \ | ||
490 | nilfs_checkpoint_clear_##name(struct nilfs_checkpoint *cp) \ | ||
491 | { \ | ||
492 | cp->cp_flags = cpu_to_le32(le32_to_cpu(cp->cp_flags) & \ | ||
493 | ~(1UL << NILFS_CHECKPOINT_##flag)); \ | ||
494 | } \ | ||
495 | static inline int \ | ||
496 | nilfs_checkpoint_##name(const struct nilfs_checkpoint *cp) \ | ||
497 | { \ | ||
498 | return !!(le32_to_cpu(cp->cp_flags) & \ | ||
499 | (1UL << NILFS_CHECKPOINT_##flag)); \ | ||
500 | } | ||
501 | |||
502 | NILFS_CHECKPOINT_FNS(SNAPSHOT, snapshot) | ||
503 | NILFS_CHECKPOINT_FNS(INVALID, invalid) | ||
504 | NILFS_CHECKPOINT_FNS(MINOR, minor) | ||
505 | |||
506 | /** | ||
507 | * struct nilfs_cpinfo - checkpoint information | ||
508 | * @ci_flags: flags | ||
509 | * @ci_pad: padding | ||
510 | * @ci_cno: checkpoint number | ||
511 | * @ci_create: creation timestamp | ||
512 | * @ci_nblk_inc: number of blocks incremented by this checkpoint | ||
513 | * @ci_inodes_count: inodes count | ||
514 | * @ci_blocks_count: blocks count | ||
515 | * @ci_next: next checkpoint number in snapshot list | ||
516 | */ | ||
517 | struct nilfs_cpinfo { | ||
518 | __u32 ci_flags; | ||
519 | __u32 ci_pad; | ||
520 | __u64 ci_cno; | ||
521 | __u64 ci_create; | ||
522 | __u64 ci_nblk_inc; | ||
523 | __u64 ci_inodes_count; | ||
524 | __u64 ci_blocks_count; | ||
525 | __u64 ci_next; | ||
526 | }; | ||
527 | |||
528 | #define NILFS_CPINFO_FNS(flag, name) \ | ||
529 | static inline int \ | ||
530 | nilfs_cpinfo_##name(const struct nilfs_cpinfo *cpinfo) \ | ||
531 | { \ | ||
532 | return !!(cpinfo->ci_flags & (1UL << NILFS_CHECKPOINT_##flag)); \ | ||
533 | } | ||
534 | |||
535 | NILFS_CPINFO_FNS(SNAPSHOT, snapshot) | ||
536 | NILFS_CPINFO_FNS(INVALID, invalid) | ||
537 | NILFS_CPINFO_FNS(MINOR, minor) | ||
538 | |||
539 | |||
540 | /** | ||
541 | * struct nilfs_cpfile_header - checkpoint file header | ||
542 | * @ch_ncheckpoints: number of checkpoints | ||
543 | * @ch_nsnapshots: number of snapshots | ||
544 | * @ch_snapshot_list: snapshot list | ||
545 | */ | ||
546 | struct nilfs_cpfile_header { | ||
547 | __le64 ch_ncheckpoints; | ||
548 | __le64 ch_nsnapshots; | ||
549 | struct nilfs_snapshot_list ch_snapshot_list; | ||
550 | }; | ||
551 | |||
552 | #define NILFS_CPFILE_FIRST_CHECKPOINT_OFFSET \ | ||
553 | ((sizeof(struct nilfs_cpfile_header) + \ | ||
554 | sizeof(struct nilfs_checkpoint) - 1) / \ | ||
555 | sizeof(struct nilfs_checkpoint)) | ||
556 | |||
557 | /** | ||
558 | * struct nilfs_segment_usage - segment usage | ||
559 | * @su_lastmod: last modified timestamp | ||
560 | * @su_nblocks: number of blocks in segment | ||
561 | * @su_flags: flags | ||
562 | */ | ||
563 | struct nilfs_segment_usage { | ||
564 | __le64 su_lastmod; | ||
565 | __le32 su_nblocks; | ||
566 | __le32 su_flags; | ||
567 | }; | ||
568 | |||
569 | /* segment usage flag */ | ||
570 | enum { | ||
571 | NILFS_SEGMENT_USAGE_ACTIVE, | ||
572 | NILFS_SEGMENT_USAGE_DIRTY, | ||
573 | NILFS_SEGMENT_USAGE_ERROR, | ||
574 | |||
575 | /* ... */ | ||
576 | }; | ||
577 | |||
578 | #define NILFS_SEGMENT_USAGE_FNS(flag, name) \ | ||
579 | static inline void \ | ||
580 | nilfs_segment_usage_set_##name(struct nilfs_segment_usage *su) \ | ||
581 | { \ | ||
582 | su->su_flags = cpu_to_le32(le32_to_cpu(su->su_flags) | \ | ||
583 | (1UL << NILFS_SEGMENT_USAGE_##flag));\ | ||
584 | } \ | ||
585 | static inline void \ | ||
586 | nilfs_segment_usage_clear_##name(struct nilfs_segment_usage *su) \ | ||
587 | { \ | ||
588 | su->su_flags = \ | ||
589 | cpu_to_le32(le32_to_cpu(su->su_flags) & \ | ||
590 | ~(1UL << NILFS_SEGMENT_USAGE_##flag)); \ | ||
591 | } \ | ||
592 | static inline int \ | ||
593 | nilfs_segment_usage_##name(const struct nilfs_segment_usage *su) \ | ||
594 | { \ | ||
595 | return !!(le32_to_cpu(su->su_flags) & \ | ||
596 | (1UL << NILFS_SEGMENT_USAGE_##flag)); \ | ||
597 | } | ||
598 | |||
599 | NILFS_SEGMENT_USAGE_FNS(ACTIVE, active) | ||
600 | NILFS_SEGMENT_USAGE_FNS(DIRTY, dirty) | ||
601 | NILFS_SEGMENT_USAGE_FNS(ERROR, error) | ||
602 | |||
603 | static inline void | ||
604 | nilfs_segment_usage_set_clean(struct nilfs_segment_usage *su) | ||
605 | { | ||
606 | su->su_lastmod = cpu_to_le64(0); | ||
607 | su->su_nblocks = cpu_to_le32(0); | ||
608 | su->su_flags = cpu_to_le32(0); | ||
609 | } | ||
610 | |||
611 | static inline int | ||
612 | nilfs_segment_usage_clean(const struct nilfs_segment_usage *su) | ||
613 | { | ||
614 | return !le32_to_cpu(su->su_flags); | ||
615 | } | ||
616 | |||
617 | /** | ||
618 | * struct nilfs_sufile_header - segment usage file header | ||
619 | * @sh_ncleansegs: number of clean segments | ||
620 | * @sh_ndirtysegs: number of dirty segments | ||
621 | * @sh_last_alloc: last allocated segment number | ||
622 | */ | ||
623 | struct nilfs_sufile_header { | ||
624 | __le64 sh_ncleansegs; | ||
625 | __le64 sh_ndirtysegs; | ||
626 | __le64 sh_last_alloc; | ||
627 | /* ... */ | ||
628 | }; | ||
629 | |||
630 | #define NILFS_SUFILE_FIRST_SEGMENT_USAGE_OFFSET \ | ||
631 | ((sizeof(struct nilfs_sufile_header) + \ | ||
632 | sizeof(struct nilfs_segment_usage) - 1) / \ | ||
633 | sizeof(struct nilfs_segment_usage)) | ||
634 | |||
635 | /** | ||
636 | * nilfs_suinfo - segment usage information | ||
637 | * @sui_lastmod: | ||
638 | * @sui_nblocks: | ||
639 | * @sui_flags: | ||
640 | */ | ||
641 | struct nilfs_suinfo { | ||
642 | __u64 sui_lastmod; | ||
643 | __u32 sui_nblocks; | ||
644 | __u32 sui_flags; | ||
645 | }; | ||
646 | |||
647 | #define NILFS_SUINFO_FNS(flag, name) \ | ||
648 | static inline int \ | ||
649 | nilfs_suinfo_##name(const struct nilfs_suinfo *si) \ | ||
650 | { \ | ||
651 | return si->sui_flags & (1UL << NILFS_SEGMENT_USAGE_##flag); \ | ||
652 | } | ||
653 | |||
654 | NILFS_SUINFO_FNS(ACTIVE, active) | ||
655 | NILFS_SUINFO_FNS(DIRTY, dirty) | ||
656 | NILFS_SUINFO_FNS(ERROR, error) | ||
657 | |||
658 | static inline int nilfs_suinfo_clean(const struct nilfs_suinfo *si) | ||
659 | { | ||
660 | return !si->sui_flags; | ||
661 | } | ||
662 | |||
663 | /* ioctl */ | ||
664 | enum { | ||
665 | NILFS_CHECKPOINT, | ||
666 | NILFS_SNAPSHOT, | ||
667 | }; | ||
668 | |||
669 | /** | ||
670 | * struct nilfs_cpmode - | ||
671 | * @cc_cno: | ||
672 | * @cc_mode: | ||
673 | */ | ||
674 | struct nilfs_cpmode { | ||
675 | __u64 cm_cno; | ||
676 | __u32 cm_mode; | ||
677 | __u32 cm_pad; | ||
678 | }; | ||
679 | |||
680 | /** | ||
681 | * struct nilfs_argv - argument vector | ||
682 | * @v_base: | ||
683 | * @v_nmembs: | ||
684 | * @v_size: | ||
685 | * @v_flags: | ||
686 | * @v_index: | ||
687 | */ | ||
688 | struct nilfs_argv { | ||
689 | __u64 v_base; | ||
690 | __u32 v_nmembs; /* number of members */ | ||
691 | __u16 v_size; /* size of members */ | ||
692 | __u16 v_flags; | ||
693 | __u64 v_index; | ||
694 | }; | ||
695 | |||
696 | /** | ||
697 | * struct nilfs_period - | ||
698 | * @p_start: | ||
699 | * @p_end: | ||
700 | */ | ||
701 | struct nilfs_period { | ||
702 | __u64 p_start; | ||
703 | __u64 p_end; | ||
704 | }; | ||
705 | |||
706 | /** | ||
707 | * struct nilfs_cpstat - | ||
708 | * @cs_cno: checkpoint number | ||
709 | * @cs_ncps: number of checkpoints | ||
710 | * @cs_nsss: number of snapshots | ||
711 | */ | ||
712 | struct nilfs_cpstat { | ||
713 | __u64 cs_cno; | ||
714 | __u64 cs_ncps; | ||
715 | __u64 cs_nsss; | ||
716 | }; | ||
717 | |||
718 | /** | ||
719 | * struct nilfs_sustat - | ||
720 | * @ss_nsegs: number of segments | ||
721 | * @ss_ncleansegs: number of clean segments | ||
722 | * @ss_ndirtysegs: number of dirty segments | ||
723 | * @ss_ctime: creation time of the last segment | ||
724 | * @ss_nongc_ctime: creation time of the last segment not for GC | ||
725 | * @ss_prot_seq: least sequence number of segments which must not be reclaimed | ||
726 | */ | ||
727 | struct nilfs_sustat { | ||
728 | __u64 ss_nsegs; | ||
729 | __u64 ss_ncleansegs; | ||
730 | __u64 ss_ndirtysegs; | ||
731 | __u64 ss_ctime; | ||
732 | __u64 ss_nongc_ctime; | ||
733 | __u64 ss_prot_seq; | ||
734 | }; | ||
735 | |||
736 | /** | ||
737 | * struct nilfs_vinfo - virtual block number information | ||
738 | * @vi_vblocknr: | ||
739 | * @vi_start: | ||
740 | * @vi_end: | ||
741 | * @vi_blocknr: | ||
742 | */ | ||
743 | struct nilfs_vinfo { | ||
744 | __u64 vi_vblocknr; | ||
745 | __u64 vi_start; | ||
746 | __u64 vi_end; | ||
747 | __u64 vi_blocknr; | ||
748 | }; | ||
749 | |||
750 | /** | ||
751 | * struct nilfs_vdesc - | ||
752 | */ | ||
753 | struct nilfs_vdesc { | ||
754 | __u64 vd_ino; | ||
755 | __u64 vd_cno; | ||
756 | __u64 vd_vblocknr; | ||
757 | struct nilfs_period vd_period; | ||
758 | __u64 vd_blocknr; | ||
759 | __u64 vd_offset; | ||
760 | __u32 vd_flags; | ||
761 | __u32 vd_pad; | ||
762 | }; | ||
763 | |||
764 | /** | ||
765 | * struct nilfs_bdesc - | ||
766 | */ | ||
767 | struct nilfs_bdesc { | ||
768 | __u64 bd_ino; | ||
769 | __u64 bd_oblocknr; | ||
770 | __u64 bd_blocknr; | ||
771 | __u64 bd_offset; | ||
772 | __u32 bd_level; | ||
773 | __u32 bd_pad; | ||
774 | }; | ||
775 | |||
776 | #define NILFS_IOCTL_IDENT 'n' | ||
777 | |||
778 | #define NILFS_IOCTL_CHANGE_CPMODE \ | ||
779 | _IOW(NILFS_IOCTL_IDENT, 0x80, struct nilfs_cpmode) | ||
780 | #define NILFS_IOCTL_DELETE_CHECKPOINT \ | ||
781 | _IOW(NILFS_IOCTL_IDENT, 0x81, __u64) | ||
782 | #define NILFS_IOCTL_GET_CPINFO \ | ||
783 | _IOR(NILFS_IOCTL_IDENT, 0x82, struct nilfs_argv) | ||
784 | #define NILFS_IOCTL_GET_CPSTAT \ | ||
785 | _IOR(NILFS_IOCTL_IDENT, 0x83, struct nilfs_cpstat) | ||
786 | #define NILFS_IOCTL_GET_SUINFO \ | ||
787 | _IOR(NILFS_IOCTL_IDENT, 0x84, struct nilfs_argv) | ||
788 | #define NILFS_IOCTL_GET_SUSTAT \ | ||
789 | _IOR(NILFS_IOCTL_IDENT, 0x85, struct nilfs_sustat) | ||
790 | #define NILFS_IOCTL_GET_VINFO \ | ||
791 | _IOWR(NILFS_IOCTL_IDENT, 0x86, struct nilfs_argv) | ||
792 | #define NILFS_IOCTL_GET_BDESCS \ | ||
793 | _IOWR(NILFS_IOCTL_IDENT, 0x87, struct nilfs_argv) | ||
794 | #define NILFS_IOCTL_CLEAN_SEGMENTS \ | ||
795 | _IOW(NILFS_IOCTL_IDENT, 0x88, struct nilfs_argv[5]) | ||
796 | #define NILFS_IOCTL_SYNC \ | ||
797 | _IOR(NILFS_IOCTL_IDENT, 0x8A, __u64) | ||
798 | #define NILFS_IOCTL_RESIZE \ | ||
799 | _IOW(NILFS_IOCTL_IDENT, 0x8B, __u64) | ||
800 | |||
801 | #endif /* _LINUX_NILFS_FS_H */ | ||
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index afad7dec1b36..7b370c7cfeff 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h | |||
@@ -8,6 +8,7 @@ struct mnt_namespace; | |||
8 | struct uts_namespace; | 8 | struct uts_namespace; |
9 | struct ipc_namespace; | 9 | struct ipc_namespace; |
10 | struct pid_namespace; | 10 | struct pid_namespace; |
11 | struct fs_struct; | ||
11 | 12 | ||
12 | /* | 13 | /* |
13 | * A structure to contain pointers to all per-process | 14 | * A structure to contain pointers to all per-process |
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index 3d327b67d7e2..908406651330 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h | |||
@@ -51,6 +51,16 @@ extern int of_register_driver(struct of_platform_driver *drv, | |||
51 | struct bus_type *bus); | 51 | struct bus_type *bus); |
52 | extern void of_unregister_driver(struct of_platform_driver *drv); | 52 | extern void of_unregister_driver(struct of_platform_driver *drv); |
53 | 53 | ||
54 | /* Platform drivers register/unregister */ | ||
55 | static inline int of_register_platform_driver(struct of_platform_driver *drv) | ||
56 | { | ||
57 | return of_register_driver(drv, &of_platform_bus_type); | ||
58 | } | ||
59 | static inline void of_unregister_platform_driver(struct of_platform_driver *drv) | ||
60 | { | ||
61 | of_unregister_driver(drv); | ||
62 | } | ||
63 | |||
54 | #include <asm/of_platform.h> | 64 | #include <asm/of_platform.h> |
55 | 65 | ||
56 | extern struct of_device *of_find_device_by_node(struct device_node *np); | 66 | extern struct of_device *of_find_device_by_node(struct device_node *np); |
diff --git a/include/linux/page-debug-flags.h b/include/linux/page-debug-flags.h new file mode 100644 index 000000000000..b0638fd91e92 --- /dev/null +++ b/include/linux/page-debug-flags.h | |||
@@ -0,0 +1,30 @@ | |||
1 | #ifndef LINUX_PAGE_DEBUG_FLAGS_H | ||
2 | #define LINUX_PAGE_DEBUG_FLAGS_H | ||
3 | |||
4 | /* | ||
5 | * page->debug_flags bits: | ||
6 | * | ||
7 | * PAGE_DEBUG_FLAG_POISON is set for poisoned pages. This is used to | ||
8 | * implement generic debug pagealloc feature. The pages are filled with | ||
9 | * poison patterns and set this flag after free_pages(). The poisoned | ||
10 | * pages are verified whether the patterns are not corrupted and clear | ||
11 | * the flag before alloc_pages(). | ||
12 | */ | ||
13 | |||
14 | enum page_debug_flags { | ||
15 | PAGE_DEBUG_FLAG_POISON, /* Page is poisoned */ | ||
16 | }; | ||
17 | |||
18 | /* | ||
19 | * Ensure that CONFIG_WANT_PAGE_DEBUG_FLAGS reliably | ||
20 | * gets turned off when no debug features are enabling it! | ||
21 | */ | ||
22 | |||
23 | #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS | ||
24 | #if !defined(CONFIG_PAGE_POISONING) \ | ||
25 | /* && !defined(CONFIG_PAGE_DEBUG_SOMETHING_ELSE) && ... */ | ||
26 | #error WANT_PAGE_DEBUG_FLAGS is turned on with no debug features! | ||
27 | #endif | ||
28 | #endif /* CONFIG_WANT_PAGE_DEBUG_FLAGS */ | ||
29 | |||
30 | #endif /* LINUX_PAGE_DEBUG_FLAGS_H */ | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 219a523ecdb0..62214c7d2d93 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -82,6 +82,7 @@ enum pageflags { | |||
82 | PG_arch_1, | 82 | PG_arch_1, |
83 | PG_reserved, | 83 | PG_reserved, |
84 | PG_private, /* If pagecache, has fs-private data */ | 84 | PG_private, /* If pagecache, has fs-private data */ |
85 | PG_private_2, /* If pagecache, has fs aux data */ | ||
85 | PG_writeback, /* Page is under writeback */ | 86 | PG_writeback, /* Page is under writeback */ |
86 | #ifdef CONFIG_PAGEFLAGS_EXTENDED | 87 | #ifdef CONFIG_PAGEFLAGS_EXTENDED |
87 | PG_head, /* A head page */ | 88 | PG_head, /* A head page */ |
@@ -96,6 +97,8 @@ enum pageflags { | |||
96 | PG_swapbacked, /* Page is backed by RAM/swap */ | 97 | PG_swapbacked, /* Page is backed by RAM/swap */ |
97 | #ifdef CONFIG_UNEVICTABLE_LRU | 98 | #ifdef CONFIG_UNEVICTABLE_LRU |
98 | PG_unevictable, /* Page is "unevictable" */ | 99 | PG_unevictable, /* Page is "unevictable" */ |
100 | #endif | ||
101 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | ||
99 | PG_mlocked, /* Page is vma mlocked */ | 102 | PG_mlocked, /* Page is vma mlocked */ |
100 | #endif | 103 | #endif |
101 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR | 104 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR |
@@ -106,6 +109,12 @@ enum pageflags { | |||
106 | /* Filesystems */ | 109 | /* Filesystems */ |
107 | PG_checked = PG_owner_priv_1, | 110 | PG_checked = PG_owner_priv_1, |
108 | 111 | ||
112 | /* Two page bits are conscripted by FS-Cache to maintain local caching | ||
113 | * state. These bits are set on pages belonging to the netfs's inodes | ||
114 | * when those inodes are being locally cached. | ||
115 | */ | ||
116 | PG_fscache = PG_private_2, /* page backed by cache */ | ||
117 | |||
109 | /* XEN */ | 118 | /* XEN */ |
110 | PG_pinned = PG_owner_priv_1, | 119 | PG_pinned = PG_owner_priv_1, |
111 | PG_savepinned = PG_dirty, | 120 | PG_savepinned = PG_dirty, |
@@ -180,7 +189,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } | |||
180 | 189 | ||
181 | struct page; /* forward declaration */ | 190 | struct page; /* forward declaration */ |
182 | 191 | ||
183 | TESTPAGEFLAG(Locked, locked) | 192 | TESTPAGEFLAG(Locked, locked) TESTSETFLAG(Locked, locked) |
184 | PAGEFLAG(Error, error) | 193 | PAGEFLAG(Error, error) |
185 | PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) | 194 | PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced) |
186 | PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) | 195 | PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty) |
@@ -192,8 +201,6 @@ PAGEFLAG(Checked, checked) /* Used by some filesystems */ | |||
192 | PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ | 201 | PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ |
193 | PAGEFLAG(SavePinned, savepinned); /* Xen */ | 202 | PAGEFLAG(SavePinned, savepinned); /* Xen */ |
194 | PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) | 203 | PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) |
195 | PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) | ||
196 | __SETPAGEFLAG(Private, private) | ||
197 | PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) | 204 | PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) |
198 | 205 | ||
199 | __PAGEFLAG(SlobPage, slob_page) | 206 | __PAGEFLAG(SlobPage, slob_page) |
@@ -203,6 +210,16 @@ __PAGEFLAG(SlubFrozen, slub_frozen) | |||
203 | __PAGEFLAG(SlubDebug, slub_debug) | 210 | __PAGEFLAG(SlubDebug, slub_debug) |
204 | 211 | ||
205 | /* | 212 | /* |
213 | * Private page markings that may be used by the filesystem that owns the page | ||
214 | * for its own purposes. | ||
215 | * - PG_private and PG_private_2 cause releasepage() and co to be invoked | ||
216 | */ | ||
217 | PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private) | ||
218 | __CLEARPAGEFLAG(Private, private) | ||
219 | PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2) | ||
220 | PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1) | ||
221 | |||
222 | /* | ||
206 | * Only test-and-set exist for PG_writeback. The unconditional operators are | 223 | * Only test-and-set exist for PG_writeback. The unconditional operators are |
207 | * risky: they bypass page accounting. | 224 | * risky: they bypass page accounting. |
208 | */ | 225 | */ |
@@ -234,20 +251,20 @@ PAGEFLAG_FALSE(SwapCache) | |||
234 | #ifdef CONFIG_UNEVICTABLE_LRU | 251 | #ifdef CONFIG_UNEVICTABLE_LRU |
235 | PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) | 252 | PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) |
236 | TESTCLEARFLAG(Unevictable, unevictable) | 253 | TESTCLEARFLAG(Unevictable, unevictable) |
254 | #else | ||
255 | PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable) | ||
256 | SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable) | ||
257 | __CLEARPAGEFLAG_NOOP(Unevictable) | ||
258 | #endif | ||
237 | 259 | ||
260 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | ||
238 | #define MLOCK_PAGES 1 | 261 | #define MLOCK_PAGES 1 |
239 | PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) | 262 | PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) |
240 | TESTSCFLAG(Mlocked, mlocked) | 263 | TESTSCFLAG(Mlocked, mlocked) |
241 | |||
242 | #else | 264 | #else |
243 | |||
244 | #define MLOCK_PAGES 0 | 265 | #define MLOCK_PAGES 0 |
245 | PAGEFLAG_FALSE(Mlocked) | 266 | PAGEFLAG_FALSE(Mlocked) |
246 | SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) | 267 | SETPAGEFLAG_NOOP(Mlocked) TESTCLEARFLAG_FALSE(Mlocked) |
247 | |||
248 | PAGEFLAG_FALSE(Unevictable) TESTCLEARFLAG_FALSE(Unevictable) | ||
249 | SETPAGEFLAG_NOOP(Unevictable) CLEARPAGEFLAG_NOOP(Unevictable) | ||
250 | __CLEARPAGEFLAG_NOOP(Unevictable) | ||
251 | #endif | 268 | #endif |
252 | 269 | ||
253 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR | 270 | #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR |
@@ -367,9 +384,13 @@ static inline void __ClearPageTail(struct page *page) | |||
367 | 384 | ||
368 | #ifdef CONFIG_UNEVICTABLE_LRU | 385 | #ifdef CONFIG_UNEVICTABLE_LRU |
369 | #define __PG_UNEVICTABLE (1 << PG_unevictable) | 386 | #define __PG_UNEVICTABLE (1 << PG_unevictable) |
370 | #define __PG_MLOCKED (1 << PG_mlocked) | ||
371 | #else | 387 | #else |
372 | #define __PG_UNEVICTABLE 0 | 388 | #define __PG_UNEVICTABLE 0 |
389 | #endif | ||
390 | |||
391 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | ||
392 | #define __PG_MLOCKED (1 << PG_mlocked) | ||
393 | #else | ||
373 | #define __PG_MLOCKED 0 | 394 | #define __PG_MLOCKED 0 |
374 | #endif | 395 | #endif |
375 | 396 | ||
@@ -378,9 +399,10 @@ static inline void __ClearPageTail(struct page *page) | |||
378 | * these flags set. It they are, there is a problem. | 399 | * these flags set. It they are, there is a problem. |
379 | */ | 400 | */ |
380 | #define PAGE_FLAGS_CHECK_AT_FREE \ | 401 | #define PAGE_FLAGS_CHECK_AT_FREE \ |
381 | (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ | 402 | (1 << PG_lru | 1 << PG_locked | \ |
382 | 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ | 403 | 1 << PG_private | 1 << PG_private_2 | \ |
383 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ | 404 | 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ |
405 | 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ | ||
384 | __PG_UNEVICTABLE | __PG_MLOCKED) | 406 | __PG_UNEVICTABLE | __PG_MLOCKED) |
385 | 407 | ||
386 | /* | 408 | /* |
@@ -391,4 +413,16 @@ static inline void __ClearPageTail(struct page *page) | |||
391 | #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) | 413 | #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) |
392 | 414 | ||
393 | #endif /* !__GENERATING_BOUNDS_H */ | 415 | #endif /* !__GENERATING_BOUNDS_H */ |
416 | |||
417 | /** | ||
418 | * page_has_private - Determine if page has private stuff | ||
419 | * @page: The page to be checked | ||
420 | * | ||
421 | * Determine if a page has private stuff, indicating that release routines | ||
422 | * should be invoked upon it. | ||
423 | */ | ||
424 | #define page_has_private(page) \ | ||
425 | ((page)->flags & ((1 << PG_private) | \ | ||
426 | (1 << PG_private_2))) | ||
427 | |||
394 | #endif /* PAGE_FLAGS_H */ | 428 | #endif /* PAGE_FLAGS_H */ |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 602cc1fdee90..7339c7bf7331 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -91,24 +91,23 @@ static inline void page_cgroup_init(void) | |||
91 | 91 | ||
92 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 92 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
93 | #include <linux/swap.h> | 93 | #include <linux/swap.h> |
94 | extern struct mem_cgroup * | 94 | extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); |
95 | swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem); | 95 | extern unsigned short lookup_swap_cgroup(swp_entry_t ent); |
96 | extern struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent); | ||
97 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); | 96 | extern int swap_cgroup_swapon(int type, unsigned long max_pages); |
98 | extern void swap_cgroup_swapoff(int type); | 97 | extern void swap_cgroup_swapoff(int type); |
99 | #else | 98 | #else |
100 | #include <linux/swap.h> | 99 | #include <linux/swap.h> |
101 | 100 | ||
102 | static inline | 101 | static inline |
103 | struct mem_cgroup *swap_cgroup_record(swp_entry_t ent, struct mem_cgroup *mem) | 102 | unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) |
104 | { | 103 | { |
105 | return NULL; | 104 | return 0; |
106 | } | 105 | } |
107 | 106 | ||
108 | static inline | 107 | static inline |
109 | struct mem_cgroup *lookup_swap_cgroup(swp_entry_t ent) | 108 | unsigned short lookup_swap_cgroup(swp_entry_t ent) |
110 | { | 109 | { |
111 | return NULL; | 110 | return 0; |
112 | } | 111 | } |
113 | 112 | ||
114 | static inline int | 113 | static inline int |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 01ca0856caff..34da5230faab 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -18,9 +18,14 @@ | |||
18 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page | 18 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page |
19 | * allocation mode flags. | 19 | * allocation mode flags. |
20 | */ | 20 | */ |
21 | #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ | 21 | enum mapping_flags { |
22 | #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ | 22 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ |
23 | #define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */ | 23 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ |
24 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ | ||
25 | #ifdef CONFIG_UNEVICTABLE_LRU | ||
26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ | ||
27 | #endif | ||
28 | }; | ||
24 | 29 | ||
25 | static inline void mapping_set_error(struct address_space *mapping, int error) | 30 | static inline void mapping_set_error(struct address_space *mapping, int error) |
26 | { | 31 | { |
@@ -33,7 +38,6 @@ static inline void mapping_set_error(struct address_space *mapping, int error) | |||
33 | } | 38 | } |
34 | 39 | ||
35 | #ifdef CONFIG_UNEVICTABLE_LRU | 40 | #ifdef CONFIG_UNEVICTABLE_LRU |
36 | #define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */ | ||
37 | 41 | ||
38 | static inline void mapping_set_unevictable(struct address_space *mapping) | 42 | static inline void mapping_set_unevictable(struct address_space *mapping) |
39 | { | 43 | { |
@@ -380,6 +384,11 @@ static inline void wait_on_page_writeback(struct page *page) | |||
380 | extern void end_page_writeback(struct page *page); | 384 | extern void end_page_writeback(struct page *page); |
381 | 385 | ||
382 | /* | 386 | /* |
387 | * Add an arbitrary waiter to a page's wait queue | ||
388 | */ | ||
389 | extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); | ||
390 | |||
391 | /* | ||
383 | * Fault a userspace page into pagetables. Return non-zero on a fault. | 392 | * Fault a userspace page into pagetables. Return non-zero on a fault. |
384 | * | 393 | * |
385 | * This assumes that two userspace pages are always sufficient. That's | 394 | * This assumes that two userspace pages are always sufficient. That's |
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 7b2886fa7fdc..bab82f4c571c 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h | |||
@@ -24,7 +24,6 @@ void __pagevec_release(struct pagevec *pvec); | |||
24 | void __pagevec_free(struct pagevec *pvec); | 24 | void __pagevec_free(struct pagevec *pvec); |
25 | void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); | 25 | void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru); |
26 | void pagevec_strip(struct pagevec *pvec); | 26 | void pagevec_strip(struct pagevec *pvec); |
27 | void pagevec_swap_free(struct pagevec *pvec); | ||
28 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, | 27 | unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, |
29 | pgoff_t start, unsigned nr_pages); | 28 | pgoff_t start, unsigned nr_pages); |
30 | unsigned pagevec_lookup_tag(struct pagevec *pvec, | 29 | unsigned pagevec_lookup_tag(struct pagevec *pvec, |
diff --git a/include/linux/parport_pc.h b/include/linux/parport_pc.h index ea8c6d84996d..cc1767f5cca8 100644 --- a/include/linux/parport_pc.h +++ b/include/linux/parport_pc.h | |||
@@ -228,10 +228,11 @@ extern void parport_pc_release_resources(struct parport *p); | |||
228 | extern int parport_pc_claim_resources(struct parport *p); | 228 | extern int parport_pc_claim_resources(struct parport *p); |
229 | 229 | ||
230 | /* PCMCIA code will want to get us to look at a port. Provide a mechanism. */ | 230 | /* PCMCIA code will want to get us to look at a port. Provide a mechanism. */ |
231 | extern struct parport *parport_pc_probe_port (unsigned long base, | 231 | extern struct parport *parport_pc_probe_port(unsigned long base, |
232 | unsigned long base_hi, | 232 | unsigned long base_hi, |
233 | int irq, int dma, | 233 | int irq, int dma, |
234 | struct device *dev); | 234 | struct device *dev, |
235 | extern void parport_pc_unregister_port (struct parport *p); | 235 | int irqflags); |
236 | extern void parport_pc_unregister_port(struct parport *p); | ||
236 | 237 | ||
237 | #endif | 238 | #endif |
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 042c166f65d5..092e82e0048c 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h | |||
@@ -10,72 +10,25 @@ | |||
10 | 10 | ||
11 | #include <linux/acpi.h> | 11 | #include <linux/acpi.h> |
12 | 12 | ||
13 | #define OSC_QUERY_TYPE 0 | ||
14 | #define OSC_SUPPORT_TYPE 1 | ||
15 | #define OSC_CONTROL_TYPE 2 | ||
16 | #define OSC_SUPPORT_MASKS 0x1f | ||
17 | |||
18 | /* | ||
19 | * _OSC DW0 Definition | ||
20 | */ | ||
21 | #define OSC_QUERY_ENABLE 1 | ||
22 | #define OSC_REQUEST_ERROR 2 | ||
23 | #define OSC_INVALID_UUID_ERROR 4 | ||
24 | #define OSC_INVALID_REVISION_ERROR 8 | ||
25 | #define OSC_CAPABILITIES_MASK_ERROR 16 | ||
26 | |||
27 | /* | ||
28 | * _OSC DW1 Definition (OS Support Fields) | ||
29 | */ | ||
30 | #define OSC_EXT_PCI_CONFIG_SUPPORT 1 | ||
31 | #define OSC_ACTIVE_STATE_PWR_SUPPORT 2 | ||
32 | #define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4 | ||
33 | #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8 | ||
34 | #define OSC_MSI_SUPPORT 16 | ||
35 | |||
36 | /* | ||
37 | * _OSC DW1 Definition (OS Control Fields) | ||
38 | */ | ||
39 | #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1 | ||
40 | #define OSC_SHPC_NATIVE_HP_CONTROL 2 | ||
41 | #define OSC_PCI_EXPRESS_PME_CONTROL 4 | ||
42 | #define OSC_PCI_EXPRESS_AER_CONTROL 8 | ||
43 | #define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16 | ||
44 | |||
45 | #define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \ | ||
46 | OSC_SHPC_NATIVE_HP_CONTROL | \ | ||
47 | OSC_PCI_EXPRESS_PME_CONTROL | \ | ||
48 | OSC_PCI_EXPRESS_AER_CONTROL | \ | ||
49 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) | ||
50 | |||
51 | #ifdef CONFIG_ACPI | 13 | #ifdef CONFIG_ACPI |
52 | extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags); | ||
53 | int pci_acpi_osc_support(acpi_handle handle, u32 flags); | ||
54 | static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) | 14 | static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) |
55 | { | 15 | { |
56 | /* Find root host bridge */ | 16 | struct pci_bus *pbus = pdev->bus; |
57 | while (pdev->bus->self) | 17 | /* Find a PCI root bus */ |
58 | pdev = pdev->bus->self; | 18 | while (pbus->parent) |
59 | 19 | pbus = pbus->parent; | |
60 | return acpi_get_pci_rootbridge_handle(pci_domain_nr(pdev->bus), | 20 | return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus), |
61 | pdev->bus->number); | 21 | pbus->number); |
62 | } | 22 | } |
63 | 23 | ||
64 | static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) | 24 | static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus) |
65 | { | 25 | { |
66 | int seg = pci_domain_nr(pbus), busnr = pbus->number; | 26 | if (pbus->parent) |
67 | struct pci_dev *bridge = pbus->self; | 27 | return DEVICE_ACPI_HANDLE(&(pbus->self->dev)); |
68 | if (bridge) | 28 | return acpi_get_pci_rootbridge_handle(pci_domain_nr(pbus), |
69 | return DEVICE_ACPI_HANDLE(&(bridge->dev)); | 29 | pbus->number); |
70 | return acpi_get_pci_rootbridge_handle(seg, busnr); | ||
71 | } | 30 | } |
72 | #else | 31 | #else |
73 | #if !defined(AE_ERROR) | ||
74 | typedef u32 acpi_status; | ||
75 | #define AE_ERROR (acpi_status) (0x0001) | ||
76 | #endif | ||
77 | static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) | ||
78 | {return AE_ERROR;} | ||
79 | static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) | 32 | static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) |
80 | { return NULL; } | 33 | { return NULL; } |
81 | #endif | 34 | #endif |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 7bd624bfdcfd..72698d89e767 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/atomic.h> | 52 | #include <asm/atomic.h> |
53 | #include <linux/device.h> | 53 | #include <linux/device.h> |
54 | #include <linux/io.h> | 54 | #include <linux/io.h> |
55 | #include <linux/irqreturn.h> | ||
55 | 56 | ||
56 | /* Include the ID list */ | 57 | /* Include the ID list */ |
57 | #include <linux/pci_ids.h> | 58 | #include <linux/pci_ids.h> |
@@ -93,6 +94,12 @@ enum { | |||
93 | /* #6: expansion ROM resource */ | 94 | /* #6: expansion ROM resource */ |
94 | PCI_ROM_RESOURCE, | 95 | PCI_ROM_RESOURCE, |
95 | 96 | ||
97 | /* device specific resources */ | ||
98 | #ifdef CONFIG_PCI_IOV | ||
99 | PCI_IOV_RESOURCES, | ||
100 | PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, | ||
101 | #endif | ||
102 | |||
96 | /* resources assigned to buses behind the bridge */ | 103 | /* resources assigned to buses behind the bridge */ |
97 | #define PCI_BRIDGE_RESOURCE_NUM 4 | 104 | #define PCI_BRIDGE_RESOURCE_NUM 4 |
98 | 105 | ||
@@ -180,6 +187,7 @@ struct pci_cap_saved_state { | |||
180 | 187 | ||
181 | struct pcie_link_state; | 188 | struct pcie_link_state; |
182 | struct pci_vpd; | 189 | struct pci_vpd; |
190 | struct pci_sriov; | ||
183 | 191 | ||
184 | /* | 192 | /* |
185 | * The pci_dev structure is used to describe PCI devices. | 193 | * The pci_dev structure is used to describe PCI devices. |
@@ -257,6 +265,8 @@ struct pci_dev { | |||
257 | unsigned int is_managed:1; | 265 | unsigned int is_managed:1; |
258 | unsigned int is_pcie:1; | 266 | unsigned int is_pcie:1; |
259 | unsigned int state_saved:1; | 267 | unsigned int state_saved:1; |
268 | unsigned int is_physfn:1; | ||
269 | unsigned int is_virtfn:1; | ||
260 | pci_dev_flags_t dev_flags; | 270 | pci_dev_flags_t dev_flags; |
261 | atomic_t enable_cnt; /* pci_enable_device has been called */ | 271 | atomic_t enable_cnt; /* pci_enable_device has been called */ |
262 | 272 | ||
@@ -270,6 +280,12 @@ struct pci_dev { | |||
270 | struct list_head msi_list; | 280 | struct list_head msi_list; |
271 | #endif | 281 | #endif |
272 | struct pci_vpd *vpd; | 282 | struct pci_vpd *vpd; |
283 | #ifdef CONFIG_PCI_IOV | ||
284 | union { | ||
285 | struct pci_sriov *sriov; /* SR-IOV capability related */ | ||
286 | struct pci_dev *physfn; /* the PF this VF is associated with */ | ||
287 | }; | ||
288 | #endif | ||
273 | }; | 289 | }; |
274 | 290 | ||
275 | extern struct pci_dev *alloc_pci_dev(void); | 291 | extern struct pci_dev *alloc_pci_dev(void); |
@@ -341,6 +357,15 @@ struct pci_bus { | |||
341 | #define pci_bus_b(n) list_entry(n, struct pci_bus, node) | 357 | #define pci_bus_b(n) list_entry(n, struct pci_bus, node) |
342 | #define to_pci_bus(n) container_of(n, struct pci_bus, dev) | 358 | #define to_pci_bus(n) container_of(n, struct pci_bus, dev) |
343 | 359 | ||
360 | /* | ||
361 | * Returns true if the pci bus is root (behind host-pci bridge), | ||
362 | * false otherwise | ||
363 | */ | ||
364 | static inline bool pci_is_root_bus(struct pci_bus *pbus) | ||
365 | { | ||
366 | return !(pbus->parent); | ||
367 | } | ||
368 | |||
344 | #ifdef CONFIG_PCI_MSI | 369 | #ifdef CONFIG_PCI_MSI |
345 | static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) | 370 | static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) |
346 | { | 371 | { |
@@ -528,7 +553,7 @@ void pcibios_update_irq(struct pci_dev *, int irq); | |||
528 | /* Generic PCI functions used internally */ | 553 | /* Generic PCI functions used internally */ |
529 | 554 | ||
530 | extern struct pci_bus *pci_find_bus(int domain, int busnr); | 555 | extern struct pci_bus *pci_find_bus(int domain, int busnr); |
531 | void pci_bus_add_devices(struct pci_bus *bus); | 556 | void pci_bus_add_devices(const struct pci_bus *bus); |
532 | struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, | 557 | struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, |
533 | struct pci_ops *ops, void *sysdata); | 558 | struct pci_ops *ops, void *sysdata); |
534 | static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, | 559 | static inline struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops, |
@@ -649,6 +674,11 @@ int __must_check pci_reenable_device(struct pci_dev *); | |||
649 | int __must_check pcim_enable_device(struct pci_dev *pdev); | 674 | int __must_check pcim_enable_device(struct pci_dev *pdev); |
650 | void pcim_pin_device(struct pci_dev *pdev); | 675 | void pcim_pin_device(struct pci_dev *pdev); |
651 | 676 | ||
677 | static inline int pci_is_enabled(struct pci_dev *pdev) | ||
678 | { | ||
679 | return (atomic_read(&pdev->enable_cnt) > 0); | ||
680 | } | ||
681 | |||
652 | static inline int pci_is_managed(struct pci_dev *pdev) | 682 | static inline int pci_is_managed(struct pci_dev *pdev) |
653 | { | 683 | { |
654 | return pdev->is_managed; | 684 | return pdev->is_managed; |
@@ -689,6 +719,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size); | |||
689 | /* Power management related routines */ | 719 | /* Power management related routines */ |
690 | int pci_save_state(struct pci_dev *dev); | 720 | int pci_save_state(struct pci_dev *dev); |
691 | int pci_restore_state(struct pci_dev *dev); | 721 | int pci_restore_state(struct pci_dev *dev); |
722 | int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state); | ||
692 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); | 723 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); |
693 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); | 724 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); |
694 | bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); | 725 | bool pci_pme_capable(struct pci_dev *dev, pci_power_t state); |
@@ -701,6 +732,9 @@ int pci_back_from_sleep(struct pci_dev *dev); | |||
701 | 732 | ||
702 | /* Functions for PCI Hotplug drivers to use */ | 733 | /* Functions for PCI Hotplug drivers to use */ |
703 | int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); | 734 | int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap); |
735 | #ifdef CONFIG_HOTPLUG | ||
736 | unsigned int pci_rescan_bus(struct pci_bus *bus); | ||
737 | #endif | ||
704 | 738 | ||
705 | /* Vital product data routines */ | 739 | /* Vital product data routines */ |
706 | ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); | 740 | ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); |
@@ -708,7 +742,7 @@ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void | |||
708 | int pci_vpd_truncate(struct pci_dev *dev, size_t size); | 742 | int pci_vpd_truncate(struct pci_dev *dev, size_t size); |
709 | 743 | ||
710 | /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ | 744 | /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ |
711 | void pci_bus_assign_resources(struct pci_bus *bus); | 745 | void pci_bus_assign_resources(const struct pci_bus *bus); |
712 | void pci_bus_size_bridges(struct pci_bus *bus); | 746 | void pci_bus_size_bridges(struct pci_bus *bus); |
713 | int pci_claim_resource(struct pci_dev *, int); | 747 | int pci_claim_resource(struct pci_dev *, int); |
714 | void pci_assign_unassigned_resources(void); | 748 | void pci_assign_unassigned_resources(void); |
@@ -789,7 +823,7 @@ struct msix_entry { | |||
789 | 823 | ||
790 | 824 | ||
791 | #ifndef CONFIG_PCI_MSI | 825 | #ifndef CONFIG_PCI_MSI |
792 | static inline int pci_enable_msi(struct pci_dev *dev) | 826 | static inline int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) |
793 | { | 827 | { |
794 | return -1; | 828 | return -1; |
795 | } | 829 | } |
@@ -799,6 +833,10 @@ static inline void pci_msi_shutdown(struct pci_dev *dev) | |||
799 | static inline void pci_disable_msi(struct pci_dev *dev) | 833 | static inline void pci_disable_msi(struct pci_dev *dev) |
800 | { } | 834 | { } |
801 | 835 | ||
836 | static inline int pci_msix_table_size(struct pci_dev *dev) | ||
837 | { | ||
838 | return 0; | ||
839 | } | ||
802 | static inline int pci_enable_msix(struct pci_dev *dev, | 840 | static inline int pci_enable_msix(struct pci_dev *dev, |
803 | struct msix_entry *entries, int nvec) | 841 | struct msix_entry *entries, int nvec) |
804 | { | 842 | { |
@@ -820,9 +858,10 @@ static inline int pci_msi_enabled(void) | |||
820 | return 0; | 858 | return 0; |
821 | } | 859 | } |
822 | #else | 860 | #else |
823 | extern int pci_enable_msi(struct pci_dev *dev); | 861 | extern int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec); |
824 | extern void pci_msi_shutdown(struct pci_dev *dev); | 862 | extern void pci_msi_shutdown(struct pci_dev *dev); |
825 | extern void pci_disable_msi(struct pci_dev *dev); | 863 | extern void pci_disable_msi(struct pci_dev *dev); |
864 | extern int pci_msix_table_size(struct pci_dev *dev); | ||
826 | extern int pci_enable_msix(struct pci_dev *dev, | 865 | extern int pci_enable_msix(struct pci_dev *dev, |
827 | struct msix_entry *entries, int nvec); | 866 | struct msix_entry *entries, int nvec); |
828 | extern void pci_msix_shutdown(struct pci_dev *dev); | 867 | extern void pci_msix_shutdown(struct pci_dev *dev); |
@@ -841,6 +880,8 @@ static inline int pcie_aspm_enabled(void) | |||
841 | extern int pcie_aspm_enabled(void); | 880 | extern int pcie_aspm_enabled(void); |
842 | #endif | 881 | #endif |
843 | 882 | ||
883 | #define pci_enable_msi(pdev) pci_enable_msi_block(pdev, 1) | ||
884 | |||
844 | #ifdef CONFIG_HT_IRQ | 885 | #ifdef CONFIG_HT_IRQ |
845 | /* The functions a driver should call */ | 886 | /* The functions a driver should call */ |
846 | int ht_create_irq(struct pci_dev *dev, int idx); | 887 | int ht_create_irq(struct pci_dev *dev, int idx); |
@@ -1194,5 +1235,23 @@ int pci_ext_cfg_avail(struct pci_dev *dev); | |||
1194 | 1235 | ||
1195 | void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); | 1236 | void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar); |
1196 | 1237 | ||
1238 | #ifdef CONFIG_PCI_IOV | ||
1239 | extern int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn); | ||
1240 | extern void pci_disable_sriov(struct pci_dev *dev); | ||
1241 | extern irqreturn_t pci_sriov_migration(struct pci_dev *dev); | ||
1242 | #else | ||
1243 | static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) | ||
1244 | { | ||
1245 | return -ENODEV; | ||
1246 | } | ||
1247 | static inline void pci_disable_sriov(struct pci_dev *dev) | ||
1248 | { | ||
1249 | } | ||
1250 | static inline irqreturn_t pci_sriov_migration(struct pci_dev *dev) | ||
1251 | { | ||
1252 | return IRQ_NONE; | ||
1253 | } | ||
1254 | #endif | ||
1255 | |||
1197 | #endif /* __KERNEL__ */ | 1256 | #endif /* __KERNEL__ */ |
1198 | #endif /* LINUX_PCI_H */ | 1257 | #endif /* LINUX_PCI_H */ |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 2c9e8080da5e..06ba90c211a5 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -526,6 +526,7 @@ | |||
526 | #define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 | 526 | #define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443 |
527 | #define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 | 527 | #define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443 |
528 | #define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 | 528 | #define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445 |
529 | #define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 | ||
529 | #define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 | 530 | #define PCI_DEVICE_ID_AMD_8111_LPC 0x7468 |
530 | #define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 | 531 | #define PCI_DEVICE_ID_AMD_8111_IDE 0x7469 |
531 | #define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a | 532 | #define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a |
@@ -943,6 +944,32 @@ | |||
943 | #define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 | 944 | #define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801 |
944 | #define PCI_DEVICE_ID_SUN_CASSINI 0xabba | 945 | #define PCI_DEVICE_ID_SUN_CASSINI 0xabba |
945 | 946 | ||
947 | #define PCI_VENDOR_ID_NI 0x1093 | ||
948 | #define PCI_DEVICE_ID_NI_PCI2322 0xd130 | ||
949 | #define PCI_DEVICE_ID_NI_PCI2324 0xd140 | ||
950 | #define PCI_DEVICE_ID_NI_PCI2328 0xd150 | ||
951 | #define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190 | ||
952 | #define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0 | ||
953 | #define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0 | ||
954 | #define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0 | ||
955 | #define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0 | ||
956 | #define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1 | ||
957 | #define PCI_DEVICE_ID_NI_PCI2322I 0xd250 | ||
958 | #define PCI_DEVICE_ID_NI_PCI2324I 0xd270 | ||
959 | #define PCI_DEVICE_ID_NI_PCI23216 0xd2b0 | ||
960 | #define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080 | ||
961 | #define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db | ||
962 | #define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd | ||
963 | #define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df | ||
964 | #define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2 | ||
965 | #define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4 | ||
966 | #define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6 | ||
967 | #define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7 | ||
968 | #define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8 | ||
969 | #define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea | ||
970 | #define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec | ||
971 | #define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee | ||
972 | |||
946 | #define PCI_VENDOR_ID_CMD 0x1095 | 973 | #define PCI_VENDOR_ID_CMD 0x1095 |
947 | #define PCI_DEVICE_ID_CMD_643 0x0643 | 974 | #define PCI_DEVICE_ID_CMD_643 0x0643 |
948 | #define PCI_DEVICE_ID_CMD_646 0x0646 | 975 | #define PCI_DEVICE_ID_CMD_646 0x0646 |
@@ -2232,6 +2259,14 @@ | |||
2232 | #define PCI_DEVICE_ID_TDI_EHCI 0x0101 | 2259 | #define PCI_DEVICE_ID_TDI_EHCI 0x0101 |
2233 | 2260 | ||
2234 | #define PCI_VENDOR_ID_FREESCALE 0x1957 | 2261 | #define PCI_VENDOR_ID_FREESCALE 0x1957 |
2262 | #define PCI_DEVICE_ID_MPC8315E 0x00b4 | ||
2263 | #define PCI_DEVICE_ID_MPC8315 0x00b5 | ||
2264 | #define PCI_DEVICE_ID_MPC8314E 0x00b6 | ||
2265 | #define PCI_DEVICE_ID_MPC8314 0x00b7 | ||
2266 | #define PCI_DEVICE_ID_MPC8378E 0x00c4 | ||
2267 | #define PCI_DEVICE_ID_MPC8378 0x00c5 | ||
2268 | #define PCI_DEVICE_ID_MPC8377E 0x00c6 | ||
2269 | #define PCI_DEVICE_ID_MPC8377 0x00c7 | ||
2235 | #define PCI_DEVICE_ID_MPC8548E 0x0012 | 2270 | #define PCI_DEVICE_ID_MPC8548E 0x0012 |
2236 | #define PCI_DEVICE_ID_MPC8548 0x0013 | 2271 | #define PCI_DEVICE_ID_MPC8548 0x0013 |
2237 | #define PCI_DEVICE_ID_MPC8543E 0x0014 | 2272 | #define PCI_DEVICE_ID_MPC8543E 0x0014 |
@@ -2388,6 +2423,7 @@ | |||
2388 | #define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c | 2423 | #define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c |
2389 | #define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0 | 2424 | #define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0 |
2390 | #define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1 | 2425 | #define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1 |
2426 | #define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2 | ||
2391 | #define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3 | 2427 | #define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3 |
2392 | #define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5 | 2428 | #define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5 |
2393 | #define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6 | 2429 | #define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6 |
@@ -2478,6 +2514,8 @@ | |||
2478 | #define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 | 2514 | #define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433 |
2479 | #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 | 2515 | #define PCI_DEVICE_ID_INTEL_82830_HB 0x3575 |
2480 | #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 | 2516 | #define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577 |
2517 | #define PCI_DEVICE_ID_INTEL_82854_HB 0x358c | ||
2518 | #define PCI_DEVICE_ID_INTEL_82854_IG 0x358e | ||
2481 | #define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 | 2519 | #define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580 |
2482 | #define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582 | 2520 | #define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582 |
2483 | #define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590 | 2521 | #define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590 |
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h index 027815b4635e..616bf8b3c8b5 100644 --- a/include/linux/pci_regs.h +++ b/include/linux/pci_regs.h | |||
@@ -235,7 +235,7 @@ | |||
235 | #define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ | 235 | #define PCI_PM_CAP_PME_SHIFT 11 /* Start of the PME Mask in PMC */ |
236 | #define PCI_PM_CTRL 4 /* PM control and status register */ | 236 | #define PCI_PM_CTRL 4 /* PM control and status register */ |
237 | #define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ | 237 | #define PCI_PM_CTRL_STATE_MASK 0x0003 /* Current power state (D0 to D3) */ |
238 | #define PCI_PM_CTRL_NO_SOFT_RESET 0x0004 /* No reset for D3hot->D0 */ | 238 | #define PCI_PM_CTRL_NO_SOFT_RESET 0x0008 /* No reset for D3hot->D0 */ |
239 | #define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */ | 239 | #define PCI_PM_CTRL_PME_ENABLE 0x0100 /* PME pin enable */ |
240 | #define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */ | 240 | #define PCI_PM_CTRL_DATA_SEL_MASK 0x1e00 /* Data select (??) */ |
241 | #define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */ | 241 | #define PCI_PM_CTRL_DATA_SCALE_MASK 0x6000 /* Data scale (??) */ |
@@ -375,6 +375,8 @@ | |||
375 | #define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */ | 375 | #define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */ |
376 | #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ | 376 | #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ |
377 | #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */ | 377 | #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */ |
378 | #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */ | ||
379 | #define PCI_EXP_TYPE_RC_EC 0x10 /* Root Complex Event Collector */ | ||
378 | #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ | 380 | #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ |
379 | #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ | 381 | #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ |
380 | #define PCI_EXP_DEVCAP 4 /* Device capabilities */ | 382 | #define PCI_EXP_DEVCAP 4 /* Device capabilities */ |
@@ -487,6 +489,8 @@ | |||
487 | #define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */ | 489 | #define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */ |
488 | #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ | 490 | #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ |
489 | #define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */ | 491 | #define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */ |
492 | #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ | ||
493 | #define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */ | ||
490 | 494 | ||
491 | /* Extended Capabilities (PCI-X 2.0 and Express) */ | 495 | /* Extended Capabilities (PCI-X 2.0 and Express) */ |
492 | #define PCI_EXT_CAP_ID(header) (header & 0x0000ffff) | 496 | #define PCI_EXT_CAP_ID(header) (header & 0x0000ffff) |
@@ -498,6 +502,7 @@ | |||
498 | #define PCI_EXT_CAP_ID_DSN 3 | 502 | #define PCI_EXT_CAP_ID_DSN 3 |
499 | #define PCI_EXT_CAP_ID_PWR 4 | 503 | #define PCI_EXT_CAP_ID_PWR 4 |
500 | #define PCI_EXT_CAP_ID_ARI 14 | 504 | #define PCI_EXT_CAP_ID_ARI 14 |
505 | #define PCI_EXT_CAP_ID_SRIOV 16 | ||
501 | 506 | ||
502 | /* Advanced Error Reporting */ | 507 | /* Advanced Error Reporting */ |
503 | #define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ | 508 | #define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */ |
@@ -615,4 +620,35 @@ | |||
615 | #define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */ | 620 | #define PCI_ARI_CTRL_ACS 0x0002 /* ACS Function Groups Enable */ |
616 | #define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */ | 621 | #define PCI_ARI_CTRL_FG(x) (((x) >> 4) & 7) /* Function Group */ |
617 | 622 | ||
623 | /* Single Root I/O Virtualization */ | ||
624 | #define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */ | ||
625 | #define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */ | ||
626 | #define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */ | ||
627 | #define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */ | ||
628 | #define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */ | ||
629 | #define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */ | ||
630 | #define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */ | ||
631 | #define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */ | ||
632 | #define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */ | ||
633 | #define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */ | ||
634 | #define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */ | ||
635 | #define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */ | ||
636 | #define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */ | ||
637 | #define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */ | ||
638 | #define PCI_SRIOV_FUNC_LINK 0x12 /* Function Dependency Link */ | ||
639 | #define PCI_SRIOV_VF_OFFSET 0x14 /* First VF Offset */ | ||
640 | #define PCI_SRIOV_VF_STRIDE 0x16 /* Following VF Stride */ | ||
641 | #define PCI_SRIOV_VF_DID 0x1a /* VF Device ID */ | ||
642 | #define PCI_SRIOV_SUP_PGSIZE 0x1c /* Supported Page Sizes */ | ||
643 | #define PCI_SRIOV_SYS_PGSIZE 0x20 /* System Page Size */ | ||
644 | #define PCI_SRIOV_BAR 0x24 /* VF BAR0 */ | ||
645 | #define PCI_SRIOV_NUM_BARS 6 /* Number of VF BARs */ | ||
646 | #define PCI_SRIOV_VFM 0x3c /* VF Migration State Array Offset*/ | ||
647 | #define PCI_SRIOV_VFM_BIR(x) ((x) & 7) /* State BIR */ | ||
648 | #define PCI_SRIOV_VFM_OFFSET(x) ((x) & ~7) /* State Offset */ | ||
649 | #define PCI_SRIOV_VFM_UA 0x0 /* Inactive.Unavailable */ | ||
650 | #define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */ | ||
651 | #define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */ | ||
652 | #define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */ | ||
653 | |||
618 | #endif /* LINUX_PCI_REGS_H */ | 654 | #endif /* LINUX_PCI_REGS_H */ |
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h index 6cd91e3f9820..b4c79545330b 100644 --- a/include/linux/pcieport_if.h +++ b/include/linux/pcieport_if.h | |||
@@ -16,29 +16,30 @@ | |||
16 | #define PCIE_ANY_PORT 7 | 16 | #define PCIE_ANY_PORT 7 |
17 | 17 | ||
18 | /* Service Type */ | 18 | /* Service Type */ |
19 | #define PCIE_PORT_SERVICE_PME 1 /* Power Management Event */ | 19 | #define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */ |
20 | #define PCIE_PORT_SERVICE_AER 2 /* Advanced Error Reporting */ | 20 | #define PCIE_PORT_SERVICE_PME (1 << PCIE_PORT_SERVICE_PME_SHIFT) |
21 | #define PCIE_PORT_SERVICE_HP 4 /* Native Hotplug */ | 21 | #define PCIE_PORT_SERVICE_AER_SHIFT 1 /* Advanced Error Reporting */ |
22 | #define PCIE_PORT_SERVICE_VC 8 /* Virtual Channel */ | 22 | #define PCIE_PORT_SERVICE_AER (1 << PCIE_PORT_SERVICE_AER_SHIFT) |
23 | #define PCIE_PORT_SERVICE_HP_SHIFT 2 /* Native Hotplug */ | ||
24 | #define PCIE_PORT_SERVICE_HP (1 << PCIE_PORT_SERVICE_HP_SHIFT) | ||
25 | #define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ | ||
26 | #define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) | ||
23 | 27 | ||
24 | /* Root/Upstream/Downstream Port's Interrupt Mode */ | 28 | /* Root/Upstream/Downstream Port's Interrupt Mode */ |
29 | #define PCIE_PORT_NO_IRQ (-1) | ||
25 | #define PCIE_PORT_INTx_MODE 0 | 30 | #define PCIE_PORT_INTx_MODE 0 |
26 | #define PCIE_PORT_MSI_MODE 1 | 31 | #define PCIE_PORT_MSI_MODE 1 |
27 | #define PCIE_PORT_MSIX_MODE 2 | 32 | #define PCIE_PORT_MSIX_MODE 2 |
28 | 33 | ||
29 | struct pcie_port_service_id { | 34 | struct pcie_port_data { |
30 | __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ | 35 | int port_type; /* Type of the port */ |
31 | __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ | 36 | int port_irq_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */ |
32 | __u32 class, class_mask; /* (class,subclass,prog-if) triplet */ | ||
33 | __u32 port_type, service_type; /* Port Entity */ | ||
34 | kernel_ulong_t driver_data; | ||
35 | }; | 37 | }; |
36 | 38 | ||
37 | struct pcie_device { | 39 | struct pcie_device { |
38 | int irq; /* Service IRQ/MSI/MSI-X Vector */ | 40 | int irq; /* Service IRQ/MSI/MSI-X Vector */ |
39 | int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */ | 41 | struct pci_dev *port; /* Root/Upstream/Downstream Port */ |
40 | struct pcie_port_service_id id; /* Service ID */ | 42 | u32 service; /* Port service this device represents */ |
41 | struct pci_dev *port; /* Root/Upstream/Downstream Port */ | ||
42 | void *priv_data; /* Service Private Data */ | 43 | void *priv_data; /* Service Private Data */ |
43 | struct device device; /* Generic Device Interface */ | 44 | struct device device; /* Generic Device Interface */ |
44 | }; | 45 | }; |
@@ -56,10 +57,9 @@ static inline void* get_service_data(struct pcie_device *dev) | |||
56 | 57 | ||
57 | struct pcie_port_service_driver { | 58 | struct pcie_port_service_driver { |
58 | const char *name; | 59 | const char *name; |
59 | int (*probe) (struct pcie_device *dev, | 60 | int (*probe) (struct pcie_device *dev); |
60 | const struct pcie_port_service_id *id); | ||
61 | void (*remove) (struct pcie_device *dev); | 61 | void (*remove) (struct pcie_device *dev); |
62 | int (*suspend) (struct pcie_device *dev, pm_message_t state); | 62 | int (*suspend) (struct pcie_device *dev); |
63 | int (*resume) (struct pcie_device *dev); | 63 | int (*resume) (struct pcie_device *dev); |
64 | 64 | ||
65 | /* Service Error Recovery Handler */ | 65 | /* Service Error Recovery Handler */ |
@@ -68,7 +68,9 @@ struct pcie_port_service_driver { | |||
68 | /* Link Reset Capability - AER service driver specific */ | 68 | /* Link Reset Capability - AER service driver specific */ |
69 | pci_ers_result_t (*reset_link) (struct pci_dev *dev); | 69 | pci_ers_result_t (*reset_link) (struct pci_dev *dev); |
70 | 70 | ||
71 | const struct pcie_port_service_id *id_table; | 71 | int port_type; /* Type of the port this driver can handle */ |
72 | u32 service; /* Port service this device represents */ | ||
73 | |||
72 | struct device_driver driver; | 74 | struct device_driver driver; |
73 | }; | 75 | }; |
74 | #define to_service_driver(d) \ | 76 | #define to_service_driver(d) \ |
diff --git a/include/linux/pda_power.h b/include/linux/pda_power.h index cb7d10f30763..d4cf7a2ceb3e 100644 --- a/include/linux/pda_power.h +++ b/include/linux/pda_power.h | |||
@@ -31,6 +31,8 @@ struct pda_power_pdata { | |||
31 | unsigned int wait_for_status; /* msecs, default is 500 */ | 31 | unsigned int wait_for_status; /* msecs, default is 500 */ |
32 | unsigned int wait_for_charger; /* msecs, default is 500 */ | 32 | unsigned int wait_for_charger; /* msecs, default is 500 */ |
33 | unsigned int polling_interval; /* msecs, default is 2000 */ | 33 | unsigned int polling_interval; /* msecs, default is 2000 */ |
34 | |||
35 | unsigned long ac_max_uA; /* current to draw when on AC */ | ||
34 | }; | 36 | }; |
35 | 37 | ||
36 | #endif /* __PDA_POWER_H__ */ | 38 | #endif /* __PDA_POWER_H__ */ |
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h new file mode 100644 index 000000000000..8f921d74f49f --- /dev/null +++ b/include/linux/percpu-defs.h | |||
@@ -0,0 +1,84 @@ | |||
1 | #ifndef _LINUX_PERCPU_DEFS_H | ||
2 | #define _LINUX_PERCPU_DEFS_H | ||
3 | |||
4 | /* | ||
5 | * Determine the real variable name from the name visible in the | ||
6 | * kernel sources. | ||
7 | */ | ||
8 | #define per_cpu_var(var) per_cpu__##var | ||
9 | |||
10 | /* | ||
11 | * Base implementations of per-CPU variable declarations and definitions, where | ||
12 | * the section in which the variable is to be placed is provided by the | ||
13 | * 'section' argument. This may be used to affect the parameters governing the | ||
14 | * variable's storage. | ||
15 | * | ||
16 | * NOTE! The sections for the DECLARE and for the DEFINE must match, lest | ||
17 | * linkage errors occur due the compiler generating the wrong code to access | ||
18 | * that section. | ||
19 | */ | ||
20 | #define DECLARE_PER_CPU_SECTION(type, name, section) \ | ||
21 | extern \ | ||
22 | __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ | ||
23 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | ||
24 | |||
25 | #define DEFINE_PER_CPU_SECTION(type, name, section) \ | ||
26 | __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ | ||
27 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | ||
28 | |||
29 | /* | ||
30 | * Variant on the per-CPU variable declaration/definition theme used for | ||
31 | * ordinary per-CPU variables. | ||
32 | */ | ||
33 | #define DECLARE_PER_CPU(type, name) \ | ||
34 | DECLARE_PER_CPU_SECTION(type, name, "") | ||
35 | |||
36 | #define DEFINE_PER_CPU(type, name) \ | ||
37 | DEFINE_PER_CPU_SECTION(type, name, "") | ||
38 | |||
39 | /* | ||
40 | * Declaration/definition used for per-CPU variables that must come first in | ||
41 | * the set of variables. | ||
42 | */ | ||
43 | #define DECLARE_PER_CPU_FIRST(type, name) \ | ||
44 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | ||
45 | |||
46 | #define DEFINE_PER_CPU_FIRST(type, name) \ | ||
47 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | ||
48 | |||
49 | /* | ||
50 | * Declaration/definition used for per-CPU variables that must be cacheline | ||
51 | * aligned under SMP conditions so that, whilst a particular instance of the | ||
52 | * data corresponds to a particular CPU, inefficiencies due to direct access by | ||
53 | * other CPUs are reduced by preventing the data from unnecessarily spanning | ||
54 | * cachelines. | ||
55 | * | ||
56 | * An example of this would be statistical data, where each CPU's set of data | ||
57 | * is updated by that CPU alone, but the data from across all CPUs is collated | ||
58 | * by a CPU processing a read from a proc file. | ||
59 | */ | ||
60 | #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
61 | DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | ||
62 | ____cacheline_aligned_in_smp | ||
63 | |||
64 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
65 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | ||
66 | ____cacheline_aligned_in_smp | ||
67 | |||
68 | /* | ||
69 | * Declaration/definition used for per-CPU variables that must be page aligned. | ||
70 | */ | ||
71 | #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ | ||
72 | DECLARE_PER_CPU_SECTION(type, name, ".page_aligned") | ||
73 | |||
74 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | ||
75 | DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") | ||
76 | |||
77 | /* | ||
78 | * Intermodule exports for per-CPU variables. | ||
79 | */ | ||
80 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | ||
81 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | ||
82 | |||
83 | |||
84 | #endif /* _LINUX_PERCPU_DEFS_H */ | ||
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index ee5615d65211..1581ff235c7e 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -9,50 +9,6 @@ | |||
9 | 9 | ||
10 | #include <asm/percpu.h> | 10 | #include <asm/percpu.h> |
11 | 11 | ||
12 | #ifndef PER_CPU_BASE_SECTION | ||
13 | #ifdef CONFIG_SMP | ||
14 | #define PER_CPU_BASE_SECTION ".data.percpu" | ||
15 | #else | ||
16 | #define PER_CPU_BASE_SECTION ".data" | ||
17 | #endif | ||
18 | #endif | ||
19 | |||
20 | #ifdef CONFIG_SMP | ||
21 | |||
22 | #ifdef MODULE | ||
23 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | ||
24 | #else | ||
25 | #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" | ||
26 | #endif | ||
27 | #define PER_CPU_FIRST_SECTION ".first" | ||
28 | |||
29 | #else | ||
30 | |||
31 | #define PER_CPU_SHARED_ALIGNED_SECTION "" | ||
32 | #define PER_CPU_FIRST_SECTION "" | ||
33 | |||
34 | #endif | ||
35 | |||
36 | #define DEFINE_PER_CPU_SECTION(type, name, section) \ | ||
37 | __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ | ||
38 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | ||
39 | |||
40 | #define DEFINE_PER_CPU(type, name) \ | ||
41 | DEFINE_PER_CPU_SECTION(type, name, "") | ||
42 | |||
43 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
44 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ | ||
45 | ____cacheline_aligned_in_smp | ||
46 | |||
47 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | ||
48 | DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") | ||
49 | |||
50 | #define DEFINE_PER_CPU_FIRST(type, name) \ | ||
51 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | ||
52 | |||
53 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | ||
54 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | ||
55 | |||
56 | /* enough to cover all DEFINE_PER_CPUs in modules */ | 12 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
57 | #ifdef CONFIG_MODULES | 13 | #ifdef CONFIG_MODULES |
58 | #define PERCPU_MODULE_RESERVE (8 << 10) | 14 | #define PERCPU_MODULE_RESERVE (8 << 10) |
@@ -168,4 +124,56 @@ static inline void free_percpu(void *p) | |||
168 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ | 124 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ |
169 | __alignof__(type)) | 125 | __alignof__(type)) |
170 | 126 | ||
127 | /* | ||
128 | * Optional methods for optimized non-lvalue per-cpu variable access. | ||
129 | * | ||
130 | * @var can be a percpu variable or a field of it and its size should | ||
131 | * equal char, int or long. percpu_read() evaluates to a lvalue and | ||
132 | * all others to void. | ||
133 | * | ||
134 | * These operations are guaranteed to be atomic w.r.t. preemption. | ||
135 | * The generic versions use plain get/put_cpu_var(). Archs are | ||
136 | * encouraged to implement single-instruction alternatives which don't | ||
137 | * require preemption protection. | ||
138 | */ | ||
139 | #ifndef percpu_read | ||
140 | # define percpu_read(var) \ | ||
141 | ({ \ | ||
142 | typeof(per_cpu_var(var)) __tmp_var__; \ | ||
143 | __tmp_var__ = get_cpu_var(var); \ | ||
144 | put_cpu_var(var); \ | ||
145 | __tmp_var__; \ | ||
146 | }) | ||
147 | #endif | ||
148 | |||
149 | #define __percpu_generic_to_op(var, val, op) \ | ||
150 | do { \ | ||
151 | get_cpu_var(var) op val; \ | ||
152 | put_cpu_var(var); \ | ||
153 | } while (0) | ||
154 | |||
155 | #ifndef percpu_write | ||
156 | # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) | ||
157 | #endif | ||
158 | |||
159 | #ifndef percpu_add | ||
160 | # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) | ||
161 | #endif | ||
162 | |||
163 | #ifndef percpu_sub | ||
164 | # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) | ||
165 | #endif | ||
166 | |||
167 | #ifndef percpu_and | ||
168 | # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) | ||
169 | #endif | ||
170 | |||
171 | #ifndef percpu_or | ||
172 | # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) | ||
173 | #endif | ||
174 | |||
175 | #ifndef percpu_xor | ||
176 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) | ||
177 | #endif | ||
178 | |||
171 | #endif /* __LINUX_PERCPU_H */ | 179 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 32cf14a4b034..97e40cb6b588 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -388,6 +388,12 @@ struct phy_driver { | |||
388 | /* Enables or disables interrupts */ | 388 | /* Enables or disables interrupts */ |
389 | int (*config_intr)(struct phy_device *phydev); | 389 | int (*config_intr)(struct phy_device *phydev); |
390 | 390 | ||
391 | /* | ||
392 | * Checks if the PHY generated an interrupt. | ||
393 | * For multi-PHY devices with shared PHY interrupt pin | ||
394 | */ | ||
395 | int (*did_interrupt)(struct phy_device *phydev); | ||
396 | |||
391 | /* Clears up any memory if needed */ | 397 | /* Clears up any memory if needed */ |
392 | void (*remove)(struct phy_device *phydev); | 398 | void (*remove)(struct phy_device *phydev); |
393 | 399 | ||
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 8e4120285f72..c8f038554e80 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h | |||
@@ -134,6 +134,11 @@ struct pipe_buf_operations { | |||
134 | memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ | 134 | memory allocation, whereas PIPE_BUF makes atomicity guarantees. */ |
135 | #define PIPE_SIZE PAGE_SIZE | 135 | #define PIPE_SIZE PAGE_SIZE |
136 | 136 | ||
137 | /* Pipe lock and unlock operations */ | ||
138 | void pipe_lock(struct pipe_inode_info *); | ||
139 | void pipe_unlock(struct pipe_inode_info *); | ||
140 | void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); | ||
141 | |||
137 | /* Drop the inode semaphore and wait for a pipe event, atomically */ | 142 | /* Drop the inode semaphore and wait for a pipe event, atomically */ |
138 | void pipe_wait(struct pipe_inode_info *pipe); | 143 | void pipe_wait(struct pipe_inode_info *pipe); |
139 | 144 | ||
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h index 04b4d7330e6d..d745f5b6c7b0 100644 --- a/include/linux/pktcdvd.h +++ b/include/linux/pktcdvd.h | |||
@@ -113,6 +113,7 @@ struct pkt_ctrl_command { | |||
113 | #include <linux/cdrom.h> | 113 | #include <linux/cdrom.h> |
114 | #include <linux/kobject.h> | 114 | #include <linux/kobject.h> |
115 | #include <linux/sysfs.h> | 115 | #include <linux/sysfs.h> |
116 | #include <linux/mempool.h> | ||
116 | 117 | ||
117 | /* default bio write queue congestion marks */ | 118 | /* default bio write queue congestion marks */ |
118 | #define PKT_WRITE_CONGESTION_ON 10000 | 119 | #define PKT_WRITE_CONGESTION_ON 10000 |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 76e470a299bf..72736fd8223c 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -77,4 +77,46 @@ extern int platform_driver_probe(struct platform_driver *driver, | |||
77 | #define platform_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev) | 77 | #define platform_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev) |
78 | #define platform_set_drvdata(_dev,data) dev_set_drvdata(&(_dev)->dev, (data)) | 78 | #define platform_set_drvdata(_dev,data) dev_set_drvdata(&(_dev)->dev, (data)) |
79 | 79 | ||
80 | /* early platform driver interface */ | ||
81 | struct early_platform_driver { | ||
82 | const char *class_str; | ||
83 | struct platform_driver *pdrv; | ||
84 | struct list_head list; | ||
85 | int requested_id; | ||
86 | }; | ||
87 | |||
88 | #define EARLY_PLATFORM_ID_UNSET -2 | ||
89 | #define EARLY_PLATFORM_ID_ERROR -3 | ||
90 | |||
91 | extern int early_platform_driver_register(struct early_platform_driver *epdrv, | ||
92 | char *buf); | ||
93 | extern void early_platform_add_devices(struct platform_device **devs, int num); | ||
94 | |||
95 | static inline int is_early_platform_device(struct platform_device *pdev) | ||
96 | { | ||
97 | return !pdev->dev.driver; | ||
98 | } | ||
99 | |||
100 | extern void early_platform_driver_register_all(char *class_str); | ||
101 | extern int early_platform_driver_probe(char *class_str, | ||
102 | int nr_probe, int user_only); | ||
103 | extern void early_platform_cleanup(void); | ||
104 | |||
105 | |||
106 | #ifndef MODULE | ||
107 | #define early_platform_init(class_string, platform_driver) \ | ||
108 | static __initdata struct early_platform_driver early_driver = { \ | ||
109 | .class_str = class_string, \ | ||
110 | .pdrv = platform_driver, \ | ||
111 | .requested_id = EARLY_PLATFORM_ID_UNSET, \ | ||
112 | }; \ | ||
113 | static int __init early_platform_driver_setup_func(char *buf) \ | ||
114 | { \ | ||
115 | return early_platform_driver_register(&early_driver, buf); \ | ||
116 | } \ | ||
117 | early_param(class_string, early_platform_driver_setup_func) | ||
118 | #else /* MODULE */ | ||
119 | #define early_platform_init(class_string, platform_driver) | ||
120 | #endif /* MODULE */ | ||
121 | |||
80 | #endif /* _PLATFORM_DEVICE_H_ */ | 122 | #endif /* _PLATFORM_DEVICE_H_ */ |
diff --git a/include/linux/poison.h b/include/linux/poison.h index 9f31683728fd..6729f7dcd60e 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h | |||
@@ -17,6 +17,9 @@ | |||
17 | */ | 17 | */ |
18 | #define TIMER_ENTRY_STATIC ((void *) 0x74737461) | 18 | #define TIMER_ENTRY_STATIC ((void *) 0x74737461) |
19 | 19 | ||
20 | /********** mm/debug-pagealloc.c **********/ | ||
21 | #define PAGE_POISON 0xaa | ||
22 | |||
20 | /********** mm/slab.c **********/ | 23 | /********** mm/slab.c **********/ |
21 | /* | 24 | /* |
22 | * Magic nums for obj red zoning. | 25 | * Magic nums for obj red zoning. |
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 8ff25e0e7f7a..594c494ac3f0 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h | |||
@@ -73,6 +73,8 @@ enum power_supply_property { | |||
73 | POWER_SUPPLY_PROP_VOLTAGE_AVG, | 73 | POWER_SUPPLY_PROP_VOLTAGE_AVG, |
74 | POWER_SUPPLY_PROP_CURRENT_NOW, | 74 | POWER_SUPPLY_PROP_CURRENT_NOW, |
75 | POWER_SUPPLY_PROP_CURRENT_AVG, | 75 | POWER_SUPPLY_PROP_CURRENT_AVG, |
76 | POWER_SUPPLY_PROP_POWER_NOW, | ||
77 | POWER_SUPPLY_PROP_POWER_AVG, | ||
76 | POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, | 78 | POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, |
77 | POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN, | 79 | POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN, |
78 | POWER_SUPPLY_PROP_CHARGE_FULL, | 80 | POWER_SUPPLY_PROP_CHARGE_FULL, |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index b8bdb96eff78..fbfa3d44d33d 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -41,9 +41,6 @@ enum { | |||
41 | * while parent/subdir create the directory structure (every | 41 | * while parent/subdir create the directory structure (every |
42 | * /proc file has a parent, but "subdir" is NULL for all | 42 | * /proc file has a parent, but "subdir" is NULL for all |
43 | * non-directory entries). | 43 | * non-directory entries). |
44 | * | ||
45 | * "owner" is used to protect module | ||
46 | * from unloading while proc_dir_entry is in use | ||
47 | */ | 44 | */ |
48 | 45 | ||
49 | typedef int (read_proc_t)(char *page, char **start, off_t off, | 46 | typedef int (read_proc_t)(char *page, char **start, off_t off, |
@@ -70,7 +67,6 @@ struct proc_dir_entry { | |||
70 | * somewhere. | 67 | * somewhere. |
71 | */ | 68 | */ |
72 | const struct file_operations *proc_fops; | 69 | const struct file_operations *proc_fops; |
73 | struct module *owner; | ||
74 | struct proc_dir_entry *next, *parent, *subdir; | 70 | struct proc_dir_entry *next, *parent, *subdir; |
75 | void *data; | 71 | void *data; |
76 | read_proc_t *read_proc; | 72 | read_proc_t *read_proc; |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 98b93ca4db06..67c15653fc23 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -94,6 +94,7 @@ extern void ptrace_notify(int exit_code); | |||
94 | extern void __ptrace_link(struct task_struct *child, | 94 | extern void __ptrace_link(struct task_struct *child, |
95 | struct task_struct *new_parent); | 95 | struct task_struct *new_parent); |
96 | extern void __ptrace_unlink(struct task_struct *child); | 96 | extern void __ptrace_unlink(struct task_struct *child); |
97 | extern void exit_ptrace(struct task_struct *tracer); | ||
97 | extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); | 98 | extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); |
98 | #define PTRACE_MODE_READ 1 | 99 | #define PTRACE_MODE_READ 1 |
99 | #define PTRACE_MODE_ATTACH 2 | 100 | #define PTRACE_MODE_ATTACH 2 |
diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 3945f803d514..7c775751392c 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h | |||
@@ -28,4 +28,4 @@ int pwm_enable(struct pwm_device *pwm); | |||
28 | */ | 28 | */ |
29 | void pwm_disable(struct pwm_device *pwm); | 29 | void pwm_disable(struct pwm_device *pwm); |
30 | 30 | ||
31 | #endif /* __ASM_ARCH_PWM_H */ | 31 | #endif /* __LINUX_PWM_H */ |
diff --git a/include/linux/raid/bitmap.h b/include/linux/raid/bitmap.h deleted file mode 100644 index e98900671ca9..000000000000 --- a/include/linux/raid/bitmap.h +++ /dev/null | |||
@@ -1,288 +0,0 @@ | |||
1 | /* | ||
2 | * bitmap.h: Copyright (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003 | ||
3 | * | ||
4 | * additions: Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. | ||
5 | */ | ||
6 | #ifndef BITMAP_H | ||
7 | #define BITMAP_H 1 | ||
8 | |||
9 | #define BITMAP_MAJOR_LO 3 | ||
10 | /* version 4 insists the bitmap is in little-endian order | ||
11 | * with version 3, it is host-endian which is non-portable | ||
12 | */ | ||
13 | #define BITMAP_MAJOR_HI 4 | ||
14 | #define BITMAP_MAJOR_HOSTENDIAN 3 | ||
15 | |||
16 | #define BITMAP_MINOR 39 | ||
17 | |||
18 | /* | ||
19 | * in-memory bitmap: | ||
20 | * | ||
21 | * Use 16 bit block counters to track pending writes to each "chunk". | ||
22 | * The 2 high order bits are special-purpose, the first is a flag indicating | ||
23 | * whether a resync is needed. The second is a flag indicating whether a | ||
24 | * resync is active. | ||
25 | * This means that the counter is actually 14 bits: | ||
26 | * | ||
27 | * +--------+--------+------------------------------------------------+ | ||
28 | * | resync | resync | counter | | ||
29 | * | needed | active | | | ||
30 | * | (0-1) | (0-1) | (0-16383) | | ||
31 | * +--------+--------+------------------------------------------------+ | ||
32 | * | ||
33 | * The "resync needed" bit is set when: | ||
34 | * a '1' bit is read from storage at startup. | ||
35 | * a write request fails on some drives | ||
36 | * a resync is aborted on a chunk with 'resync active' set | ||
37 | * It is cleared (and resync-active set) when a resync starts across all drives | ||
38 | * of the chunk. | ||
39 | * | ||
40 | * | ||
41 | * The "resync active" bit is set when: | ||
42 | * a resync is started on all drives, and resync_needed is set. | ||
43 | * resync_needed will be cleared (as long as resync_active wasn't already set). | ||
44 | * It is cleared when a resync completes. | ||
45 | * | ||
46 | * The counter counts pending write requests, plus the on-disk bit. | ||
47 | * When the counter is '1' and the resync bits are clear, the on-disk | ||
48 | * bit can be cleared aswell, thus setting the counter to 0. | ||
49 | * When we set a bit, or in the counter (to start a write), if the fields is | ||
50 | * 0, we first set the disk bit and set the counter to 1. | ||
51 | * | ||
52 | * If the counter is 0, the on-disk bit is clear and the stipe is clean | ||
53 | * Anything that dirties the stipe pushes the counter to 2 (at least) | ||
54 | * and sets the on-disk bit (lazily). | ||
55 | * If a periodic sweep find the counter at 2, it is decremented to 1. | ||
56 | * If the sweep find the counter at 1, the on-disk bit is cleared and the | ||
57 | * counter goes to zero. | ||
58 | * | ||
59 | * Also, we'll hijack the "map" pointer itself and use it as two 16 bit block | ||
60 | * counters as a fallback when "page" memory cannot be allocated: | ||
61 | * | ||
62 | * Normal case (page memory allocated): | ||
63 | * | ||
64 | * page pointer (32-bit) | ||
65 | * | ||
66 | * [ ] ------+ | ||
67 | * | | ||
68 | * +-------> [ ][ ]..[ ] (4096 byte page == 2048 counters) | ||
69 | * c1 c2 c2048 | ||
70 | * | ||
71 | * Hijacked case (page memory allocation failed): | ||
72 | * | ||
73 | * hijacked page pointer (32-bit) | ||
74 | * | ||
75 | * [ ][ ] (no page memory allocated) | ||
76 | * counter #1 (16-bit) counter #2 (16-bit) | ||
77 | * | ||
78 | */ | ||
79 | |||
80 | #ifdef __KERNEL__ | ||
81 | |||
82 | #define PAGE_BITS (PAGE_SIZE << 3) | ||
83 | #define PAGE_BIT_SHIFT (PAGE_SHIFT + 3) | ||
84 | |||
85 | typedef __u16 bitmap_counter_t; | ||
86 | #define COUNTER_BITS 16 | ||
87 | #define COUNTER_BIT_SHIFT 4 | ||
88 | #define COUNTER_BYTE_RATIO (COUNTER_BITS / 8) | ||
89 | #define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3) | ||
90 | |||
91 | #define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) | ||
92 | #define RESYNC_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 2))) | ||
93 | #define COUNTER_MAX ((bitmap_counter_t) RESYNC_MASK - 1) | ||
94 | #define NEEDED(x) (((bitmap_counter_t) x) & NEEDED_MASK) | ||
95 | #define RESYNC(x) (((bitmap_counter_t) x) & RESYNC_MASK) | ||
96 | #define COUNTER(x) (((bitmap_counter_t) x) & COUNTER_MAX) | ||
97 | |||
98 | /* how many counters per page? */ | ||
99 | #define PAGE_COUNTER_RATIO (PAGE_BITS / COUNTER_BITS) | ||
100 | /* same, except a shift value for more efficient bitops */ | ||
101 | #define PAGE_COUNTER_SHIFT (PAGE_BIT_SHIFT - COUNTER_BIT_SHIFT) | ||
102 | /* same, except a mask value for more efficient bitops */ | ||
103 | #define PAGE_COUNTER_MASK (PAGE_COUNTER_RATIO - 1) | ||
104 | |||
105 | #define BITMAP_BLOCK_SIZE 512 | ||
106 | #define BITMAP_BLOCK_SHIFT 9 | ||
107 | |||
108 | /* how many blocks per chunk? (this is variable) */ | ||
109 | #define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->chunksize >> BITMAP_BLOCK_SHIFT) | ||
110 | #define CHUNK_BLOCK_SHIFT(bitmap) ((bitmap)->chunkshift - BITMAP_BLOCK_SHIFT) | ||
111 | #define CHUNK_BLOCK_MASK(bitmap) (CHUNK_BLOCK_RATIO(bitmap) - 1) | ||
112 | |||
113 | /* when hijacked, the counters and bits represent even larger "chunks" */ | ||
114 | /* there will be 1024 chunks represented by each counter in the page pointers */ | ||
115 | #define PAGEPTR_BLOCK_RATIO(bitmap) \ | ||
116 | (CHUNK_BLOCK_RATIO(bitmap) << PAGE_COUNTER_SHIFT >> 1) | ||
117 | #define PAGEPTR_BLOCK_SHIFT(bitmap) \ | ||
118 | (CHUNK_BLOCK_SHIFT(bitmap) + PAGE_COUNTER_SHIFT - 1) | ||
119 | #define PAGEPTR_BLOCK_MASK(bitmap) (PAGEPTR_BLOCK_RATIO(bitmap) - 1) | ||
120 | |||
121 | /* | ||
122 | * on-disk bitmap: | ||
123 | * | ||
124 | * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap | ||
125 | * file a page at a time. There's a superblock at the start of the file. | ||
126 | */ | ||
127 | |||
128 | /* map chunks (bits) to file pages - offset by the size of the superblock */ | ||
129 | #define CHUNK_BIT_OFFSET(chunk) ((chunk) + (sizeof(bitmap_super_t) << 3)) | ||
130 | |||
131 | #endif | ||
132 | |||
133 | /* | ||
134 | * bitmap structures: | ||
135 | */ | ||
136 | |||
137 | #define BITMAP_MAGIC 0x6d746962 | ||
138 | |||
139 | /* use these for bitmap->flags and bitmap->sb->state bit-fields */ | ||
140 | enum bitmap_state { | ||
141 | BITMAP_STALE = 0x002, /* the bitmap file is out of date or had -EIO */ | ||
142 | BITMAP_WRITE_ERROR = 0x004, /* A write error has occurred */ | ||
143 | BITMAP_HOSTENDIAN = 0x8000, | ||
144 | }; | ||
145 | |||
146 | /* the superblock at the front of the bitmap file -- little endian */ | ||
147 | typedef struct bitmap_super_s { | ||
148 | __le32 magic; /* 0 BITMAP_MAGIC */ | ||
149 | __le32 version; /* 4 the bitmap major for now, could change... */ | ||
150 | __u8 uuid[16]; /* 8 128 bit uuid - must match md device uuid */ | ||
151 | __le64 events; /* 24 event counter for the bitmap (1)*/ | ||
152 | __le64 events_cleared;/*32 event counter when last bit cleared (2) */ | ||
153 | __le64 sync_size; /* 40 the size of the md device's sync range(3) */ | ||
154 | __le32 state; /* 48 bitmap state information */ | ||
155 | __le32 chunksize; /* 52 the bitmap chunk size in bytes */ | ||
156 | __le32 daemon_sleep; /* 56 seconds between disk flushes */ | ||
157 | __le32 write_behind; /* 60 number of outstanding write-behind writes */ | ||
158 | |||
159 | __u8 pad[256 - 64]; /* set to zero */ | ||
160 | } bitmap_super_t; | ||
161 | |||
162 | /* notes: | ||
163 | * (1) This event counter is updated before the eventcounter in the md superblock | ||
164 | * When a bitmap is loaded, it is only accepted if this event counter is equal | ||
165 | * to, or one greater than, the event counter in the superblock. | ||
166 | * (2) This event counter is updated when the other one is *if*and*only*if* the | ||
167 | * array is not degraded. As bits are not cleared when the array is degraded, | ||
168 | * this represents the last time that any bits were cleared. | ||
169 | * If a device is being added that has an event count with this value or | ||
170 | * higher, it is accepted as conforming to the bitmap. | ||
171 | * (3)This is the number of sectors represented by the bitmap, and is the range that | ||
172 | * resync happens across. For raid1 and raid5/6 it is the size of individual | ||
173 | * devices. For raid10 it is the size of the array. | ||
174 | */ | ||
175 | |||
176 | #ifdef __KERNEL__ | ||
177 | |||
178 | /* the in-memory bitmap is represented by bitmap_pages */ | ||
179 | struct bitmap_page { | ||
180 | /* | ||
181 | * map points to the actual memory page | ||
182 | */ | ||
183 | char *map; | ||
184 | /* | ||
185 | * in emergencies (when map cannot be alloced), hijack the map | ||
186 | * pointer and use it as two counters itself | ||
187 | */ | ||
188 | unsigned int hijacked:1; | ||
189 | /* | ||
190 | * count of dirty bits on the page | ||
191 | */ | ||
192 | unsigned int count:31; | ||
193 | }; | ||
194 | |||
195 | /* keep track of bitmap file pages that have pending writes on them */ | ||
196 | struct page_list { | ||
197 | struct list_head list; | ||
198 | struct page *page; | ||
199 | }; | ||
200 | |||
201 | /* the main bitmap structure - one per mddev */ | ||
202 | struct bitmap { | ||
203 | struct bitmap_page *bp; | ||
204 | unsigned long pages; /* total number of pages in the bitmap */ | ||
205 | unsigned long missing_pages; /* number of pages not yet allocated */ | ||
206 | |||
207 | mddev_t *mddev; /* the md device that the bitmap is for */ | ||
208 | |||
209 | int counter_bits; /* how many bits per block counter */ | ||
210 | |||
211 | /* bitmap chunksize -- how much data does each bit represent? */ | ||
212 | unsigned long chunksize; | ||
213 | unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ | ||
214 | unsigned long chunks; /* total number of data chunks for the array */ | ||
215 | |||
216 | /* We hold a count on the chunk currently being synced, and drop | ||
217 | * it when the last block is started. If the resync is aborted | ||
218 | * midway, we need to be able to drop that count, so we remember | ||
219 | * the counted chunk.. | ||
220 | */ | ||
221 | unsigned long syncchunk; | ||
222 | |||
223 | __u64 events_cleared; | ||
224 | int need_sync; | ||
225 | |||
226 | /* bitmap spinlock */ | ||
227 | spinlock_t lock; | ||
228 | |||
229 | long offset; /* offset from superblock if file is NULL */ | ||
230 | struct file *file; /* backing disk file */ | ||
231 | struct page *sb_page; /* cached copy of the bitmap file superblock */ | ||
232 | struct page **filemap; /* list of cache pages for the file */ | ||
233 | unsigned long *filemap_attr; /* attributes associated w/ filemap pages */ | ||
234 | unsigned long file_pages; /* number of pages in the file */ | ||
235 | int last_page_size; /* bytes in the last page */ | ||
236 | |||
237 | unsigned long flags; | ||
238 | |||
239 | int allclean; | ||
240 | |||
241 | unsigned long max_write_behind; /* write-behind mode */ | ||
242 | atomic_t behind_writes; | ||
243 | |||
244 | /* | ||
245 | * the bitmap daemon - periodically wakes up and sweeps the bitmap | ||
246 | * file, cleaning up bits and flushing out pages to disk as necessary | ||
247 | */ | ||
248 | unsigned long daemon_lastrun; /* jiffies of last run */ | ||
249 | unsigned long daemon_sleep; /* how many seconds between updates? */ | ||
250 | unsigned long last_end_sync; /* when we lasted called end_sync to | ||
251 | * update bitmap with resync progress */ | ||
252 | |||
253 | atomic_t pending_writes; /* pending writes to the bitmap file */ | ||
254 | wait_queue_head_t write_wait; | ||
255 | wait_queue_head_t overflow_wait; | ||
256 | |||
257 | }; | ||
258 | |||
259 | /* the bitmap API */ | ||
260 | |||
261 | /* these are used only by md/bitmap */ | ||
262 | int bitmap_create(mddev_t *mddev); | ||
263 | void bitmap_flush(mddev_t *mddev); | ||
264 | void bitmap_destroy(mddev_t *mddev); | ||
265 | |||
266 | void bitmap_print_sb(struct bitmap *bitmap); | ||
267 | void bitmap_update_sb(struct bitmap *bitmap); | ||
268 | |||
269 | int bitmap_setallbits(struct bitmap *bitmap); | ||
270 | void bitmap_write_all(struct bitmap *bitmap); | ||
271 | |||
272 | void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e); | ||
273 | |||
274 | /* these are exported */ | ||
275 | int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, | ||
276 | unsigned long sectors, int behind); | ||
277 | void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, | ||
278 | unsigned long sectors, int success, int behind); | ||
279 | int bitmap_start_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int degraded); | ||
280 | void bitmap_end_sync(struct bitmap *bitmap, sector_t offset, int *blocks, int aborted); | ||
281 | void bitmap_close_sync(struct bitmap *bitmap); | ||
282 | void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector); | ||
283 | |||
284 | void bitmap_unplug(struct bitmap *bitmap); | ||
285 | void bitmap_daemon_work(struct bitmap *bitmap); | ||
286 | #endif | ||
287 | |||
288 | #endif | ||
diff --git a/include/linux/raid/linear.h b/include/linux/raid/linear.h deleted file mode 100644 index f38b9c586afb..000000000000 --- a/include/linux/raid/linear.h +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | #ifndef _LINEAR_H | ||
2 | #define _LINEAR_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | struct dev_info { | ||
7 | mdk_rdev_t *rdev; | ||
8 | sector_t num_sectors; | ||
9 | sector_t start_sector; | ||
10 | }; | ||
11 | |||
12 | typedef struct dev_info dev_info_t; | ||
13 | |||
14 | struct linear_private_data | ||
15 | { | ||
16 | struct linear_private_data *prev; /* earlier version */ | ||
17 | dev_info_t **hash_table; | ||
18 | sector_t spacing; | ||
19 | sector_t array_sectors; | ||
20 | int sector_shift; /* shift before dividing | ||
21 | * by spacing | ||
22 | */ | ||
23 | dev_info_t disks[0]; | ||
24 | }; | ||
25 | |||
26 | |||
27 | typedef struct linear_private_data linear_conf_t; | ||
28 | |||
29 | #define mddev_to_conf(mddev) ((linear_conf_t *) mddev->private) | ||
30 | |||
31 | #endif | ||
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h deleted file mode 100644 index 82bea14cae1a..000000000000 --- a/include/linux/raid/md.h +++ /dev/null | |||
@@ -1,81 +0,0 @@ | |||
1 | /* | ||
2 | md.h : Multiple Devices driver for Linux | ||
3 | Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman | ||
4 | Copyright (C) 1994-96 Marc ZYNGIER | ||
5 | <zyngier@ufr-info-p7.ibp.fr> or | ||
6 | <maz@gloups.fdn.fr> | ||
7 | |||
8 | This program is free software; you can redistribute it and/or modify | ||
9 | it under the terms of the GNU General Public License as published by | ||
10 | the Free Software Foundation; either version 2, or (at your option) | ||
11 | any later version. | ||
12 | |||
13 | You should have received a copy of the GNU General Public License | ||
14 | (for example /usr/src/linux/COPYING); if not, write to the Free | ||
15 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
16 | */ | ||
17 | |||
18 | #ifndef _MD_H | ||
19 | #define _MD_H | ||
20 | |||
21 | #include <linux/blkdev.h> | ||
22 | #include <linux/seq_file.h> | ||
23 | |||
24 | /* | ||
25 | * 'md_p.h' holds the 'physical' layout of RAID devices | ||
26 | * 'md_u.h' holds the user <=> kernel API | ||
27 | * | ||
28 | * 'md_k.h' holds kernel internal definitions | ||
29 | */ | ||
30 | |||
31 | #include <linux/raid/md_p.h> | ||
32 | #include <linux/raid/md_u.h> | ||
33 | #include <linux/raid/md_k.h> | ||
34 | |||
35 | #ifdef CONFIG_MD | ||
36 | |||
37 | /* | ||
38 | * Different major versions are not compatible. | ||
39 | * Different minor versions are only downward compatible. | ||
40 | * Different patchlevel versions are downward and upward compatible. | ||
41 | */ | ||
42 | #define MD_MAJOR_VERSION 0 | ||
43 | #define MD_MINOR_VERSION 90 | ||
44 | /* | ||
45 | * MD_PATCHLEVEL_VERSION indicates kernel functionality. | ||
46 | * >=1 means different superblock formats are selectable using SET_ARRAY_INFO | ||
47 | * and major_version/minor_version accordingly | ||
48 | * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT | ||
49 | * in the super status byte | ||
50 | * >=3 means that bitmap superblock version 4 is supported, which uses | ||
51 | * little-ending representation rather than host-endian | ||
52 | */ | ||
53 | #define MD_PATCHLEVEL_VERSION 3 | ||
54 | |||
55 | extern int mdp_major; | ||
56 | |||
57 | extern int register_md_personality(struct mdk_personality *p); | ||
58 | extern int unregister_md_personality(struct mdk_personality *p); | ||
59 | extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), | ||
60 | mddev_t *mddev, const char *name); | ||
61 | extern void md_unregister_thread(mdk_thread_t *thread); | ||
62 | extern void md_wakeup_thread(mdk_thread_t *thread); | ||
63 | extern void md_check_recovery(mddev_t *mddev); | ||
64 | extern void md_write_start(mddev_t *mddev, struct bio *bi); | ||
65 | extern void md_write_end(mddev_t *mddev); | ||
66 | extern void md_done_sync(mddev_t *mddev, int blocks, int ok); | ||
67 | extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev); | ||
68 | |||
69 | extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | ||
70 | sector_t sector, int size, struct page *page); | ||
71 | extern void md_super_wait(mddev_t *mddev); | ||
72 | extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, | ||
73 | struct page *page, int rw); | ||
74 | extern void md_do_sync(mddev_t *mddev); | ||
75 | extern void md_new_event(mddev_t *mddev); | ||
76 | extern int md_allow_write(mddev_t *mddev); | ||
77 | extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev); | ||
78 | |||
79 | #endif /* CONFIG_MD */ | ||
80 | #endif | ||
81 | |||
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h deleted file mode 100644 index 9743e4dbc918..000000000000 --- a/include/linux/raid/md_k.h +++ /dev/null | |||
@@ -1,402 +0,0 @@ | |||
1 | /* | ||
2 | md_k.h : kernel internal structure of the Linux MD driver | ||
3 | Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman | ||
4 | |||
5 | This program is free software; you can redistribute it and/or modify | ||
6 | it under the terms of the GNU General Public License as published by | ||
7 | the Free Software Foundation; either version 2, or (at your option) | ||
8 | any later version. | ||
9 | |||
10 | You should have received a copy of the GNU General Public License | ||
11 | (for example /usr/src/linux/COPYING); if not, write to the Free | ||
12 | Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
13 | */ | ||
14 | |||
15 | #ifndef _MD_K_H | ||
16 | #define _MD_K_H | ||
17 | |||
18 | /* and dm-bio-list.h is not under include/linux because.... ??? */ | ||
19 | #include "../../../drivers/md/dm-bio-list.h" | ||
20 | |||
21 | #ifdef CONFIG_BLOCK | ||
22 | |||
23 | #define LEVEL_MULTIPATH (-4) | ||
24 | #define LEVEL_LINEAR (-1) | ||
25 | #define LEVEL_FAULTY (-5) | ||
26 | |||
27 | /* we need a value for 'no level specified' and 0 | ||
28 | * means 'raid0', so we need something else. This is | ||
29 | * for internal use only | ||
30 | */ | ||
31 | #define LEVEL_NONE (-1000000) | ||
32 | |||
33 | #define MaxSector (~(sector_t)0) | ||
34 | |||
35 | typedef struct mddev_s mddev_t; | ||
36 | typedef struct mdk_rdev_s mdk_rdev_t; | ||
37 | |||
38 | /* | ||
39 | * options passed in raidrun: | ||
40 | */ | ||
41 | |||
42 | /* Currently this must fit in an 'int' */ | ||
43 | #define MAX_CHUNK_SIZE (1<<30) | ||
44 | |||
45 | /* | ||
46 | * MD's 'extended' device | ||
47 | */ | ||
48 | struct mdk_rdev_s | ||
49 | { | ||
50 | struct list_head same_set; /* RAID devices within the same set */ | ||
51 | |||
52 | sector_t size; /* Device size (in blocks) */ | ||
53 | mddev_t *mddev; /* RAID array if running */ | ||
54 | long last_events; /* IO event timestamp */ | ||
55 | |||
56 | struct block_device *bdev; /* block device handle */ | ||
57 | |||
58 | struct page *sb_page; | ||
59 | int sb_loaded; | ||
60 | __u64 sb_events; | ||
61 | sector_t data_offset; /* start of data in array */ | ||
62 | sector_t sb_start; /* offset of the super block (in 512byte sectors) */ | ||
63 | int sb_size; /* bytes in the superblock */ | ||
64 | int preferred_minor; /* autorun support */ | ||
65 | |||
66 | struct kobject kobj; | ||
67 | |||
68 | /* A device can be in one of three states based on two flags: | ||
69 | * Not working: faulty==1 in_sync==0 | ||
70 | * Fully working: faulty==0 in_sync==1 | ||
71 | * Working, but not | ||
72 | * in sync with array | ||
73 | * faulty==0 in_sync==0 | ||
74 | * | ||
75 | * It can never have faulty==1, in_sync==1 | ||
76 | * This reduces the burden of testing multiple flags in many cases | ||
77 | */ | ||
78 | |||
79 | unsigned long flags; | ||
80 | #define Faulty 1 /* device is known to have a fault */ | ||
81 | #define In_sync 2 /* device is in_sync with rest of array */ | ||
82 | #define WriteMostly 4 /* Avoid reading if at all possible */ | ||
83 | #define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ | ||
84 | #define AllReserved 6 /* If whole device is reserved for | ||
85 | * one array */ | ||
86 | #define AutoDetected 7 /* added by auto-detect */ | ||
87 | #define Blocked 8 /* An error occured on an externally | ||
88 | * managed array, don't allow writes | ||
89 | * until it is cleared */ | ||
90 | #define StateChanged 9 /* Faulty or Blocked has changed during | ||
91 | * interrupt, so it needs to be | ||
92 | * notified by the thread */ | ||
93 | wait_queue_head_t blocked_wait; | ||
94 | |||
95 | int desc_nr; /* descriptor index in the superblock */ | ||
96 | int raid_disk; /* role of device in array */ | ||
97 | int saved_raid_disk; /* role that device used to have in the | ||
98 | * array and could again if we did a partial | ||
99 | * resync from the bitmap | ||
100 | */ | ||
101 | sector_t recovery_offset;/* If this device has been partially | ||
102 | * recovered, this is where we were | ||
103 | * up to. | ||
104 | */ | ||
105 | |||
106 | atomic_t nr_pending; /* number of pending requests. | ||
107 | * only maintained for arrays that | ||
108 | * support hot removal | ||
109 | */ | ||
110 | atomic_t read_errors; /* number of consecutive read errors that | ||
111 | * we have tried to ignore. | ||
112 | */ | ||
113 | atomic_t corrected_errors; /* number of corrected read errors, | ||
114 | * for reporting to userspace and storing | ||
115 | * in superblock. | ||
116 | */ | ||
117 | struct work_struct del_work; /* used for delayed sysfs removal */ | ||
118 | |||
119 | struct sysfs_dirent *sysfs_state; /* handle for 'state' | ||
120 | * sysfs entry */ | ||
121 | }; | ||
122 | |||
123 | struct mddev_s | ||
124 | { | ||
125 | void *private; | ||
126 | struct mdk_personality *pers; | ||
127 | dev_t unit; | ||
128 | int md_minor; | ||
129 | struct list_head disks; | ||
130 | unsigned long flags; | ||
131 | #define MD_CHANGE_DEVS 0 /* Some device status has changed */ | ||
132 | #define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ | ||
133 | #define MD_CHANGE_PENDING 2 /* superblock update in progress */ | ||
134 | |||
135 | int ro; | ||
136 | |||
137 | struct gendisk *gendisk; | ||
138 | |||
139 | struct kobject kobj; | ||
140 | int hold_active; | ||
141 | #define UNTIL_IOCTL 1 | ||
142 | #define UNTIL_STOP 2 | ||
143 | |||
144 | /* Superblock information */ | ||
145 | int major_version, | ||
146 | minor_version, | ||
147 | patch_version; | ||
148 | int persistent; | ||
149 | int external; /* metadata is | ||
150 | * managed externally */ | ||
151 | char metadata_type[17]; /* externally set*/ | ||
152 | int chunk_size; | ||
153 | time_t ctime, utime; | ||
154 | int level, layout; | ||
155 | char clevel[16]; | ||
156 | int raid_disks; | ||
157 | int max_disks; | ||
158 | sector_t size; /* used size of component devices */ | ||
159 | sector_t array_sectors; /* exported array size */ | ||
160 | __u64 events; | ||
161 | |||
162 | char uuid[16]; | ||
163 | |||
164 | /* If the array is being reshaped, we need to record the | ||
165 | * new shape and an indication of where we are up to. | ||
166 | * This is written to the superblock. | ||
167 | * If reshape_position is MaxSector, then no reshape is happening (yet). | ||
168 | */ | ||
169 | sector_t reshape_position; | ||
170 | int delta_disks, new_level, new_layout, new_chunk; | ||
171 | |||
172 | struct mdk_thread_s *thread; /* management thread */ | ||
173 | struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ | ||
174 | sector_t curr_resync; /* last block scheduled */ | ||
175 | unsigned long resync_mark; /* a recent timestamp */ | ||
176 | sector_t resync_mark_cnt;/* blocks written at resync_mark */ | ||
177 | sector_t curr_mark_cnt; /* blocks scheduled now */ | ||
178 | |||
179 | sector_t resync_max_sectors; /* may be set by personality */ | ||
180 | |||
181 | sector_t resync_mismatches; /* count of sectors where | ||
182 | * parity/replica mismatch found | ||
183 | */ | ||
184 | |||
185 | /* allow user-space to request suspension of IO to regions of the array */ | ||
186 | sector_t suspend_lo; | ||
187 | sector_t suspend_hi; | ||
188 | /* if zero, use the system-wide default */ | ||
189 | int sync_speed_min; | ||
190 | int sync_speed_max; | ||
191 | |||
192 | /* resync even though the same disks are shared among md-devices */ | ||
193 | int parallel_resync; | ||
194 | |||
195 | int ok_start_degraded; | ||
196 | /* recovery/resync flags | ||
197 | * NEEDED: we might need to start a resync/recover | ||
198 | * RUNNING: a thread is running, or about to be started | ||
199 | * SYNC: actually doing a resync, not a recovery | ||
200 | * RECOVER: doing recovery, or need to try it. | ||
201 | * INTR: resync needs to be aborted for some reason | ||
202 | * DONE: thread is done and is waiting to be reaped | ||
203 | * REQUEST: user-space has requested a sync (used with SYNC) | ||
204 | * CHECK: user-space request for for check-only, no repair | ||
205 | * RESHAPE: A reshape is happening | ||
206 | * | ||
207 | * If neither SYNC or RESHAPE are set, then it is a recovery. | ||
208 | */ | ||
209 | #define MD_RECOVERY_RUNNING 0 | ||
210 | #define MD_RECOVERY_SYNC 1 | ||
211 | #define MD_RECOVERY_RECOVER 2 | ||
212 | #define MD_RECOVERY_INTR 3 | ||
213 | #define MD_RECOVERY_DONE 4 | ||
214 | #define MD_RECOVERY_NEEDED 5 | ||
215 | #define MD_RECOVERY_REQUESTED 6 | ||
216 | #define MD_RECOVERY_CHECK 7 | ||
217 | #define MD_RECOVERY_RESHAPE 8 | ||
218 | #define MD_RECOVERY_FROZEN 9 | ||
219 | |||
220 | unsigned long recovery; | ||
221 | int recovery_disabled; /* if we detect that recovery | ||
222 | * will always fail, set this | ||
223 | * so we don't loop trying */ | ||
224 | |||
225 | int in_sync; /* know to not need resync */ | ||
226 | struct mutex reconfig_mutex; | ||
227 | atomic_t active; /* general refcount */ | ||
228 | atomic_t openers; /* number of active opens */ | ||
229 | |||
230 | int changed; /* true if we might need to reread partition info */ | ||
231 | int degraded; /* whether md should consider | ||
232 | * adding a spare | ||
233 | */ | ||
234 | int barriers_work; /* initialised to true, cleared as soon | ||
235 | * as a barrier request to slave | ||
236 | * fails. Only supported | ||
237 | */ | ||
238 | struct bio *biolist; /* bios that need to be retried | ||
239 | * because BIO_RW_BARRIER is not supported | ||
240 | */ | ||
241 | |||
242 | atomic_t recovery_active; /* blocks scheduled, but not written */ | ||
243 | wait_queue_head_t recovery_wait; | ||
244 | sector_t recovery_cp; | ||
245 | sector_t resync_min; /* user requested sync | ||
246 | * starts here */ | ||
247 | sector_t resync_max; /* resync should pause | ||
248 | * when it gets here */ | ||
249 | |||
250 | struct sysfs_dirent *sysfs_state; /* handle for 'array_state' | ||
251 | * file in sysfs. | ||
252 | */ | ||
253 | struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */ | ||
254 | |||
255 | struct work_struct del_work; /* used for delayed sysfs removal */ | ||
256 | |||
257 | spinlock_t write_lock; | ||
258 | wait_queue_head_t sb_wait; /* for waiting on superblock updates */ | ||
259 | atomic_t pending_writes; /* number of active superblock writes */ | ||
260 | |||
261 | unsigned int safemode; /* if set, update "clean" superblock | ||
262 | * when no writes pending. | ||
263 | */ | ||
264 | unsigned int safemode_delay; | ||
265 | struct timer_list safemode_timer; | ||
266 | atomic_t writes_pending; | ||
267 | struct request_queue *queue; /* for plugging ... */ | ||
268 | |||
269 | atomic_t write_behind; /* outstanding async IO */ | ||
270 | unsigned int max_write_behind; /* 0 = sync */ | ||
271 | |||
272 | struct bitmap *bitmap; /* the bitmap for the device */ | ||
273 | struct file *bitmap_file; /* the bitmap file */ | ||
274 | long bitmap_offset; /* offset from superblock of | ||
275 | * start of bitmap. May be | ||
276 | * negative, but not '0' | ||
277 | */ | ||
278 | long default_bitmap_offset; /* this is the offset to use when | ||
279 | * hot-adding a bitmap. It should | ||
280 | * eventually be settable by sysfs. | ||
281 | */ | ||
282 | |||
283 | struct list_head all_mddevs; | ||
284 | }; | ||
285 | |||
286 | |||
287 | static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev) | ||
288 | { | ||
289 | int faulty = test_bit(Faulty, &rdev->flags); | ||
290 | if (atomic_dec_and_test(&rdev->nr_pending) && faulty) | ||
291 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); | ||
292 | } | ||
293 | |||
294 | static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) | ||
295 | { | ||
296 | atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); | ||
297 | } | ||
298 | |||
299 | struct mdk_personality | ||
300 | { | ||
301 | char *name; | ||
302 | int level; | ||
303 | struct list_head list; | ||
304 | struct module *owner; | ||
305 | int (*make_request)(struct request_queue *q, struct bio *bio); | ||
306 | int (*run)(mddev_t *mddev); | ||
307 | int (*stop)(mddev_t *mddev); | ||
308 | void (*status)(struct seq_file *seq, mddev_t *mddev); | ||
309 | /* error_handler must set ->faulty and clear ->in_sync | ||
310 | * if appropriate, and should abort recovery if needed | ||
311 | */ | ||
312 | void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev); | ||
313 | int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev); | ||
314 | int (*hot_remove_disk) (mddev_t *mddev, int number); | ||
315 | int (*spare_active) (mddev_t *mddev); | ||
316 | sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); | ||
317 | int (*resize) (mddev_t *mddev, sector_t sectors); | ||
318 | int (*check_reshape) (mddev_t *mddev); | ||
319 | int (*start_reshape) (mddev_t *mddev); | ||
320 | int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); | ||
321 | /* quiesce moves between quiescence states | ||
322 | * 0 - fully active | ||
323 | * 1 - no new requests allowed | ||
324 | * others - reserved | ||
325 | */ | ||
326 | void (*quiesce) (mddev_t *mddev, int state); | ||
327 | }; | ||
328 | |||
329 | |||
330 | struct md_sysfs_entry { | ||
331 | struct attribute attr; | ||
332 | ssize_t (*show)(mddev_t *, char *); | ||
333 | ssize_t (*store)(mddev_t *, const char *, size_t); | ||
334 | }; | ||
335 | |||
336 | |||
337 | static inline char * mdname (mddev_t * mddev) | ||
338 | { | ||
339 | return mddev->gendisk ? mddev->gendisk->disk_name : "mdX"; | ||
340 | } | ||
341 | |||
342 | /* | ||
343 | * iterates through some rdev ringlist. It's safe to remove the | ||
344 | * current 'rdev'. Dont touch 'tmp' though. | ||
345 | */ | ||
346 | #define rdev_for_each_list(rdev, tmp, head) \ | ||
347 | list_for_each_entry_safe(rdev, tmp, head, same_set) | ||
348 | |||
349 | /* | ||
350 | * iterates through the 'same array disks' ringlist | ||
351 | */ | ||
352 | #define rdev_for_each(rdev, tmp, mddev) \ | ||
353 | list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set) | ||
354 | |||
355 | #define rdev_for_each_rcu(rdev, mddev) \ | ||
356 | list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set) | ||
357 | |||
358 | typedef struct mdk_thread_s { | ||
359 | void (*run) (mddev_t *mddev); | ||
360 | mddev_t *mddev; | ||
361 | wait_queue_head_t wqueue; | ||
362 | unsigned long flags; | ||
363 | struct task_struct *tsk; | ||
364 | unsigned long timeout; | ||
365 | } mdk_thread_t; | ||
366 | |||
367 | #define THREAD_WAKEUP 0 | ||
368 | |||
369 | #define __wait_event_lock_irq(wq, condition, lock, cmd) \ | ||
370 | do { \ | ||
371 | wait_queue_t __wait; \ | ||
372 | init_waitqueue_entry(&__wait, current); \ | ||
373 | \ | ||
374 | add_wait_queue(&wq, &__wait); \ | ||
375 | for (;;) { \ | ||
376 | set_current_state(TASK_UNINTERRUPTIBLE); \ | ||
377 | if (condition) \ | ||
378 | break; \ | ||
379 | spin_unlock_irq(&lock); \ | ||
380 | cmd; \ | ||
381 | schedule(); \ | ||
382 | spin_lock_irq(&lock); \ | ||
383 | } \ | ||
384 | current->state = TASK_RUNNING; \ | ||
385 | remove_wait_queue(&wq, &__wait); \ | ||
386 | } while (0) | ||
387 | |||
388 | #define wait_event_lock_irq(wq, condition, lock, cmd) \ | ||
389 | do { \ | ||
390 | if (condition) \ | ||
391 | break; \ | ||
392 | __wait_event_lock_irq(wq, condition, lock, cmd); \ | ||
393 | } while (0) | ||
394 | |||
395 | static inline void safe_put_page(struct page *p) | ||
396 | { | ||
397 | if (p) put_page(p); | ||
398 | } | ||
399 | |||
400 | #endif /* CONFIG_BLOCK */ | ||
401 | #endif | ||
402 | |||
diff --git a/include/linux/raid/md_u.h b/include/linux/raid/md_u.h index 7192035fc4b0..fb1abb3367e9 100644 --- a/include/linux/raid/md_u.h +++ b/include/linux/raid/md_u.h | |||
@@ -15,6 +15,24 @@ | |||
15 | #ifndef _MD_U_H | 15 | #ifndef _MD_U_H |
16 | #define _MD_U_H | 16 | #define _MD_U_H |
17 | 17 | ||
18 | /* | ||
19 | * Different major versions are not compatible. | ||
20 | * Different minor versions are only downward compatible. | ||
21 | * Different patchlevel versions are downward and upward compatible. | ||
22 | */ | ||
23 | #define MD_MAJOR_VERSION 0 | ||
24 | #define MD_MINOR_VERSION 90 | ||
25 | /* | ||
26 | * MD_PATCHLEVEL_VERSION indicates kernel functionality. | ||
27 | * >=1 means different superblock formats are selectable using SET_ARRAY_INFO | ||
28 | * and major_version/minor_version accordingly | ||
29 | * >=2 means that Internal bitmaps are supported by setting MD_SB_BITMAP_PRESENT | ||
30 | * in the super status byte | ||
31 | * >=3 means that bitmap superblock version 4 is supported, which uses | ||
32 | * little-ending representation rather than host-endian | ||
33 | */ | ||
34 | #define MD_PATCHLEVEL_VERSION 3 | ||
35 | |||
18 | /* ioctls */ | 36 | /* ioctls */ |
19 | 37 | ||
20 | /* status */ | 38 | /* status */ |
@@ -46,6 +64,12 @@ | |||
46 | #define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) | 64 | #define STOP_ARRAY_RO _IO (MD_MAJOR, 0x33) |
47 | #define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) | 65 | #define RESTART_ARRAY_RW _IO (MD_MAJOR, 0x34) |
48 | 66 | ||
67 | /* 63 partitions with the alternate major number (mdp) */ | ||
68 | #define MdpMinorShift 6 | ||
69 | #ifdef __KERNEL__ | ||
70 | extern int mdp_major; | ||
71 | #endif | ||
72 | |||
49 | typedef struct mdu_version_s { | 73 | typedef struct mdu_version_s { |
50 | int major; | 74 | int major; |
51 | int minor; | 75 | int minor; |
@@ -85,6 +109,17 @@ typedef struct mdu_array_info_s { | |||
85 | 109 | ||
86 | } mdu_array_info_t; | 110 | } mdu_array_info_t; |
87 | 111 | ||
112 | /* non-obvious values for 'level' */ | ||
113 | #define LEVEL_MULTIPATH (-4) | ||
114 | #define LEVEL_LINEAR (-1) | ||
115 | #define LEVEL_FAULTY (-5) | ||
116 | |||
117 | /* we need a value for 'no level specified' and 0 | ||
118 | * means 'raid0', so we need something else. This is | ||
119 | * for internal use only | ||
120 | */ | ||
121 | #define LEVEL_NONE (-1000000) | ||
122 | |||
88 | typedef struct mdu_disk_info_s { | 123 | typedef struct mdu_disk_info_s { |
89 | /* | 124 | /* |
90 | * configuration/status of one particular disk | 125 | * configuration/status of one particular disk |
diff --git a/include/linux/raid/multipath.h b/include/linux/raid/multipath.h deleted file mode 100644 index 6f53fc177a47..000000000000 --- a/include/linux/raid/multipath.h +++ /dev/null | |||
@@ -1,42 +0,0 @@ | |||
1 | #ifndef _MULTIPATH_H | ||
2 | #define _MULTIPATH_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | struct multipath_info { | ||
7 | mdk_rdev_t *rdev; | ||
8 | }; | ||
9 | |||
10 | struct multipath_private_data { | ||
11 | mddev_t *mddev; | ||
12 | struct multipath_info *multipaths; | ||
13 | int raid_disks; | ||
14 | int working_disks; | ||
15 | spinlock_t device_lock; | ||
16 | struct list_head retry_list; | ||
17 | |||
18 | mempool_t *pool; | ||
19 | }; | ||
20 | |||
21 | typedef struct multipath_private_data multipath_conf_t; | ||
22 | |||
23 | /* | ||
24 | * this is the only point in the RAID code where we violate | ||
25 | * C type safety. mddev->private is an 'opaque' pointer. | ||
26 | */ | ||
27 | #define mddev_to_conf(mddev) ((multipath_conf_t *) mddev->private) | ||
28 | |||
29 | /* | ||
30 | * this is our 'private' 'collective' MULTIPATH buffer head. | ||
31 | * it contains information about what kind of IO operations were started | ||
32 | * for this MULTIPATH operation, and about their status: | ||
33 | */ | ||
34 | |||
35 | struct multipath_bh { | ||
36 | mddev_t *mddev; | ||
37 | struct bio *master_bio; | ||
38 | struct bio bio; | ||
39 | int path; | ||
40 | struct list_head retry_list; | ||
41 | }; | ||
42 | #endif | ||
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h new file mode 100644 index 000000000000..d92480f8285c --- /dev/null +++ b/include/linux/raid/pq.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2003 H. Peter Anvin - All Rights Reserved | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation, Inc., 53 Temple Place Ste 330, | ||
8 | * Boston MA 02111-1307, USA; either version 2 of the License, or | ||
9 | * (at your option) any later version; incorporated herein by reference. | ||
10 | * | ||
11 | * ----------------------------------------------------------------------- */ | ||
12 | |||
13 | #ifndef LINUX_RAID_RAID6_H | ||
14 | #define LINUX_RAID_RAID6_H | ||
15 | |||
16 | #ifdef __KERNEL__ | ||
17 | |||
18 | /* Set to 1 to use kernel-wide empty_zero_page */ | ||
19 | #define RAID6_USE_EMPTY_ZERO_PAGE 0 | ||
20 | #include <linux/blkdev.h> | ||
21 | |||
22 | /* We need a pre-zeroed page... if we don't want to use the kernel-provided | ||
23 | one define it here */ | ||
24 | #if RAID6_USE_EMPTY_ZERO_PAGE | ||
25 | # define raid6_empty_zero_page empty_zero_page | ||
26 | #else | ||
27 | extern const char raid6_empty_zero_page[PAGE_SIZE]; | ||
28 | #endif | ||
29 | |||
30 | #else /* ! __KERNEL__ */ | ||
31 | /* Used for testing in user space */ | ||
32 | |||
33 | #include <errno.h> | ||
34 | #include <inttypes.h> | ||
35 | #include <limits.h> | ||
36 | #include <stddef.h> | ||
37 | #include <sys/mman.h> | ||
38 | #include <sys/types.h> | ||
39 | |||
40 | /* Not standard, but glibc defines it */ | ||
41 | #define BITS_PER_LONG __WORDSIZE | ||
42 | |||
43 | typedef uint8_t u8; | ||
44 | typedef uint16_t u16; | ||
45 | typedef uint32_t u32; | ||
46 | typedef uint64_t u64; | ||
47 | |||
48 | #ifndef PAGE_SIZE | ||
49 | # define PAGE_SIZE 4096 | ||
50 | #endif | ||
51 | extern const char raid6_empty_zero_page[PAGE_SIZE]; | ||
52 | |||
53 | #define __init | ||
54 | #define __exit | ||
55 | #define __attribute_const__ __attribute__((const)) | ||
56 | #define noinline __attribute__((noinline)) | ||
57 | |||
58 | #define preempt_enable() | ||
59 | #define preempt_disable() | ||
60 | #define cpu_has_feature(x) 1 | ||
61 | #define enable_kernel_altivec() | ||
62 | #define disable_kernel_altivec() | ||
63 | |||
64 | #define EXPORT_SYMBOL(sym) | ||
65 | #define MODULE_LICENSE(licence) | ||
66 | #define subsys_initcall(x) | ||
67 | #define module_exit(x) | ||
68 | #endif /* __KERNEL__ */ | ||
69 | |||
70 | /* Routine choices */ | ||
71 | struct raid6_calls { | ||
72 | void (*gen_syndrome)(int, size_t, void **); | ||
73 | int (*valid)(void); /* Returns 1 if this routine set is usable */ | ||
74 | const char *name; /* Name of this routine set */ | ||
75 | int prefer; /* Has special performance attribute */ | ||
76 | }; | ||
77 | |||
78 | /* Selected algorithm */ | ||
79 | extern struct raid6_calls raid6_call; | ||
80 | |||
81 | /* Algorithm list */ | ||
82 | extern const struct raid6_calls * const raid6_algos[]; | ||
83 | int raid6_select_algo(void); | ||
84 | |||
85 | /* Return values from chk_syndrome */ | ||
86 | #define RAID6_OK 0 | ||
87 | #define RAID6_P_BAD 1 | ||
88 | #define RAID6_Q_BAD 2 | ||
89 | #define RAID6_PQ_BAD 3 | ||
90 | |||
91 | /* Galois field tables */ | ||
92 | extern const u8 raid6_gfmul[256][256] __attribute__((aligned(256))); | ||
93 | extern const u8 raid6_gfexp[256] __attribute__((aligned(256))); | ||
94 | extern const u8 raid6_gfinv[256] __attribute__((aligned(256))); | ||
95 | extern const u8 raid6_gfexi[256] __attribute__((aligned(256))); | ||
96 | |||
97 | /* Recovery routines */ | ||
98 | void raid6_2data_recov(int disks, size_t bytes, int faila, int failb, | ||
99 | void **ptrs); | ||
100 | void raid6_datap_recov(int disks, size_t bytes, int faila, void **ptrs); | ||
101 | void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, | ||
102 | void **ptrs); | ||
103 | |||
104 | /* Some definitions to allow code to be compiled for testing in userspace */ | ||
105 | #ifndef __KERNEL__ | ||
106 | |||
107 | # define jiffies raid6_jiffies() | ||
108 | # define printk printf | ||
109 | # define GFP_KERNEL 0 | ||
110 | # define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \ | ||
111 | PROT_READ|PROT_WRITE, \ | ||
112 | MAP_PRIVATE|MAP_ANONYMOUS,\ | ||
113 | 0, 0)) | ||
114 | # define free_pages(x, y) munmap((void *)(x), (y)*PAGE_SIZE) | ||
115 | |||
116 | static inline void cpu_relax(void) | ||
117 | { | ||
118 | /* Nothing */ | ||
119 | } | ||
120 | |||
121 | #undef HZ | ||
122 | #define HZ 1000 | ||
123 | static inline uint32_t raid6_jiffies(void) | ||
124 | { | ||
125 | struct timeval tv; | ||
126 | gettimeofday(&tv, NULL); | ||
127 | return tv.tv_sec*1000 + tv.tv_usec/1000; | ||
128 | } | ||
129 | |||
130 | #endif /* ! __KERNEL__ */ | ||
131 | |||
132 | #endif /* LINUX_RAID_RAID6_H */ | ||
diff --git a/include/linux/raid/raid0.h b/include/linux/raid/raid0.h deleted file mode 100644 index fd42aa87c391..000000000000 --- a/include/linux/raid/raid0.h +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | #ifndef _RAID0_H | ||
2 | #define _RAID0_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | struct strip_zone | ||
7 | { | ||
8 | sector_t zone_start; /* Zone offset in md_dev (in sectors) */ | ||
9 | sector_t dev_start; /* Zone offset in real dev (in sectors) */ | ||
10 | sector_t sectors; /* Zone size in sectors */ | ||
11 | int nb_dev; /* # of devices attached to the zone */ | ||
12 | mdk_rdev_t **dev; /* Devices attached to the zone */ | ||
13 | }; | ||
14 | |||
15 | struct raid0_private_data | ||
16 | { | ||
17 | struct strip_zone **hash_table; /* Table of indexes into strip_zone */ | ||
18 | struct strip_zone *strip_zone; | ||
19 | mdk_rdev_t **devlist; /* lists of rdevs, pointed to by strip_zone->dev */ | ||
20 | int nr_strip_zones; | ||
21 | |||
22 | sector_t spacing; | ||
23 | int sector_shift; /* shift this before divide by spacing */ | ||
24 | }; | ||
25 | |||
26 | typedef struct raid0_private_data raid0_conf_t; | ||
27 | |||
28 | #define mddev_to_conf(mddev) ((raid0_conf_t *) mddev->private) | ||
29 | |||
30 | #endif | ||
diff --git a/include/linux/raid/raid1.h b/include/linux/raid/raid1.h deleted file mode 100644 index 0a9ba7c3302e..000000000000 --- a/include/linux/raid/raid1.h +++ /dev/null | |||
@@ -1,134 +0,0 @@ | |||
1 | #ifndef _RAID1_H | ||
2 | #define _RAID1_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | typedef struct mirror_info mirror_info_t; | ||
7 | |||
8 | struct mirror_info { | ||
9 | mdk_rdev_t *rdev; | ||
10 | sector_t head_position; | ||
11 | }; | ||
12 | |||
13 | /* | ||
14 | * memory pools need a pointer to the mddev, so they can force an unplug | ||
15 | * when memory is tight, and a count of the number of drives that the | ||
16 | * pool was allocated for, so they know how much to allocate and free. | ||
17 | * mddev->raid_disks cannot be used, as it can change while a pool is active | ||
18 | * These two datums are stored in a kmalloced struct. | ||
19 | */ | ||
20 | |||
21 | struct pool_info { | ||
22 | mddev_t *mddev; | ||
23 | int raid_disks; | ||
24 | }; | ||
25 | |||
26 | |||
27 | typedef struct r1bio_s r1bio_t; | ||
28 | |||
29 | struct r1_private_data_s { | ||
30 | mddev_t *mddev; | ||
31 | mirror_info_t *mirrors; | ||
32 | int raid_disks; | ||
33 | int last_used; | ||
34 | sector_t next_seq_sect; | ||
35 | spinlock_t device_lock; | ||
36 | |||
37 | struct list_head retry_list; | ||
38 | /* queue pending writes and submit them on unplug */ | ||
39 | struct bio_list pending_bio_list; | ||
40 | /* queue of writes that have been unplugged */ | ||
41 | struct bio_list flushing_bio_list; | ||
42 | |||
43 | /* for use when syncing mirrors: */ | ||
44 | |||
45 | spinlock_t resync_lock; | ||
46 | int nr_pending; | ||
47 | int nr_waiting; | ||
48 | int nr_queued; | ||
49 | int barrier; | ||
50 | sector_t next_resync; | ||
51 | int fullsync; /* set to 1 if a full sync is needed, | ||
52 | * (fresh device added). | ||
53 | * Cleared when a sync completes. | ||
54 | */ | ||
55 | |||
56 | wait_queue_head_t wait_barrier; | ||
57 | |||
58 | struct pool_info *poolinfo; | ||
59 | |||
60 | struct page *tmppage; | ||
61 | |||
62 | mempool_t *r1bio_pool; | ||
63 | mempool_t *r1buf_pool; | ||
64 | }; | ||
65 | |||
66 | typedef struct r1_private_data_s conf_t; | ||
67 | |||
68 | /* | ||
69 | * this is the only point in the RAID code where we violate | ||
70 | * C type safety. mddev->private is an 'opaque' pointer. | ||
71 | */ | ||
72 | #define mddev_to_conf(mddev) ((conf_t *) mddev->private) | ||
73 | |||
74 | /* | ||
75 | * this is our 'private' RAID1 bio. | ||
76 | * | ||
77 | * it contains information about what kind of IO operations were started | ||
78 | * for this RAID1 operation, and about their status: | ||
79 | */ | ||
80 | |||
81 | struct r1bio_s { | ||
82 | atomic_t remaining; /* 'have we finished' count, | ||
83 | * used from IRQ handlers | ||
84 | */ | ||
85 | atomic_t behind_remaining; /* number of write-behind ios remaining | ||
86 | * in this BehindIO request | ||
87 | */ | ||
88 | sector_t sector; | ||
89 | int sectors; | ||
90 | unsigned long state; | ||
91 | mddev_t *mddev; | ||
92 | /* | ||
93 | * original bio going to /dev/mdx | ||
94 | */ | ||
95 | struct bio *master_bio; | ||
96 | /* | ||
97 | * if the IO is in READ direction, then this is where we read | ||
98 | */ | ||
99 | int read_disk; | ||
100 | |||
101 | struct list_head retry_list; | ||
102 | struct bitmap_update *bitmap_update; | ||
103 | /* | ||
104 | * if the IO is in WRITE direction, then multiple bios are used. | ||
105 | * We choose the number when they are allocated. | ||
106 | */ | ||
107 | struct bio *bios[0]; | ||
108 | /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ | ||
109 | }; | ||
110 | |||
111 | /* when we get a read error on a read-only array, we redirect to another | ||
112 | * device without failing the first device, or trying to over-write to | ||
113 | * correct the read error. To keep track of bad blocks on a per-bio | ||
114 | * level, we store IO_BLOCKED in the appropriate 'bios' pointer | ||
115 | */ | ||
116 | #define IO_BLOCKED ((struct bio*)1) | ||
117 | |||
118 | /* bits for r1bio.state */ | ||
119 | #define R1BIO_Uptodate 0 | ||
120 | #define R1BIO_IsSync 1 | ||
121 | #define R1BIO_Degraded 2 | ||
122 | #define R1BIO_BehindIO 3 | ||
123 | #define R1BIO_Barrier 4 | ||
124 | #define R1BIO_BarrierRetry 5 | ||
125 | /* For write-behind requests, we call bi_end_io when | ||
126 | * the last non-write-behind device completes, providing | ||
127 | * any write was successful. Otherwise we call when | ||
128 | * any write-behind write succeeds, otherwise we call | ||
129 | * with failure when last write completes (and all failed). | ||
130 | * Record that bi_end_io was called with this flag... | ||
131 | */ | ||
132 | #define R1BIO_Returned 6 | ||
133 | |||
134 | #endif | ||
diff --git a/include/linux/raid/raid10.h b/include/linux/raid/raid10.h deleted file mode 100644 index e9091cfeb286..000000000000 --- a/include/linux/raid/raid10.h +++ /dev/null | |||
@@ -1,123 +0,0 @@ | |||
1 | #ifndef _RAID10_H | ||
2 | #define _RAID10_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | typedef struct mirror_info mirror_info_t; | ||
7 | |||
8 | struct mirror_info { | ||
9 | mdk_rdev_t *rdev; | ||
10 | sector_t head_position; | ||
11 | }; | ||
12 | |||
13 | typedef struct r10bio_s r10bio_t; | ||
14 | |||
15 | struct r10_private_data_s { | ||
16 | mddev_t *mddev; | ||
17 | mirror_info_t *mirrors; | ||
18 | int raid_disks; | ||
19 | spinlock_t device_lock; | ||
20 | |||
21 | /* geometry */ | ||
22 | int near_copies; /* number of copies layed out raid0 style */ | ||
23 | int far_copies; /* number of copies layed out | ||
24 | * at large strides across drives | ||
25 | */ | ||
26 | int far_offset; /* far_copies are offset by 1 stripe | ||
27 | * instead of many | ||
28 | */ | ||
29 | int copies; /* near_copies * far_copies. | ||
30 | * must be <= raid_disks | ||
31 | */ | ||
32 | sector_t stride; /* distance between far copies. | ||
33 | * This is size / far_copies unless | ||
34 | * far_offset, in which case it is | ||
35 | * 1 stripe. | ||
36 | */ | ||
37 | |||
38 | int chunk_shift; /* shift from chunks to sectors */ | ||
39 | sector_t chunk_mask; | ||
40 | |||
41 | struct list_head retry_list; | ||
42 | /* queue pending writes and submit them on unplug */ | ||
43 | struct bio_list pending_bio_list; | ||
44 | |||
45 | |||
46 | spinlock_t resync_lock; | ||
47 | int nr_pending; | ||
48 | int nr_waiting; | ||
49 | int nr_queued; | ||
50 | int barrier; | ||
51 | sector_t next_resync; | ||
52 | int fullsync; /* set to 1 if a full sync is needed, | ||
53 | * (fresh device added). | ||
54 | * Cleared when a sync completes. | ||
55 | */ | ||
56 | |||
57 | wait_queue_head_t wait_barrier; | ||
58 | |||
59 | mempool_t *r10bio_pool; | ||
60 | mempool_t *r10buf_pool; | ||
61 | struct page *tmppage; | ||
62 | }; | ||
63 | |||
64 | typedef struct r10_private_data_s conf_t; | ||
65 | |||
66 | /* | ||
67 | * this is the only point in the RAID code where we violate | ||
68 | * C type safety. mddev->private is an 'opaque' pointer. | ||
69 | */ | ||
70 | #define mddev_to_conf(mddev) ((conf_t *) mddev->private) | ||
71 | |||
72 | /* | ||
73 | * this is our 'private' RAID10 bio. | ||
74 | * | ||
75 | * it contains information about what kind of IO operations were started | ||
76 | * for this RAID10 operation, and about their status: | ||
77 | */ | ||
78 | |||
79 | struct r10bio_s { | ||
80 | atomic_t remaining; /* 'have we finished' count, | ||
81 | * used from IRQ handlers | ||
82 | */ | ||
83 | sector_t sector; /* virtual sector number */ | ||
84 | int sectors; | ||
85 | unsigned long state; | ||
86 | mddev_t *mddev; | ||
87 | /* | ||
88 | * original bio going to /dev/mdx | ||
89 | */ | ||
90 | struct bio *master_bio; | ||
91 | /* | ||
92 | * if the IO is in READ direction, then this is where we read | ||
93 | */ | ||
94 | int read_slot; | ||
95 | |||
96 | struct list_head retry_list; | ||
97 | /* | ||
98 | * if the IO is in WRITE direction, then multiple bios are used, | ||
99 | * one for each copy. | ||
100 | * When resyncing we also use one for each copy. | ||
101 | * When reconstructing, we use 2 bios, one for read, one for write. | ||
102 | * We choose the number when they are allocated. | ||
103 | */ | ||
104 | struct { | ||
105 | struct bio *bio; | ||
106 | sector_t addr; | ||
107 | int devnum; | ||
108 | } devs[0]; | ||
109 | }; | ||
110 | |||
111 | /* when we get a read error on a read-only array, we redirect to another | ||
112 | * device without failing the first device, or trying to over-write to | ||
113 | * correct the read error. To keep track of bad blocks on a per-bio | ||
114 | * level, we store IO_BLOCKED in the appropriate 'bios' pointer | ||
115 | */ | ||
116 | #define IO_BLOCKED ((struct bio*)1) | ||
117 | |||
118 | /* bits for r10bio.state */ | ||
119 | #define R10BIO_Uptodate 0 | ||
120 | #define R10BIO_IsSync 1 | ||
121 | #define R10BIO_IsRecover 2 | ||
122 | #define R10BIO_Degraded 3 | ||
123 | #endif | ||
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h deleted file mode 100644 index 3b2672792457..000000000000 --- a/include/linux/raid/raid5.h +++ /dev/null | |||
@@ -1,402 +0,0 @@ | |||
1 | #ifndef _RAID5_H | ||
2 | #define _RAID5_H | ||
3 | |||
4 | #include <linux/raid/md.h> | ||
5 | #include <linux/raid/xor.h> | ||
6 | |||
7 | /* | ||
8 | * | ||
9 | * Each stripe contains one buffer per disc. Each buffer can be in | ||
10 | * one of a number of states stored in "flags". Changes between | ||
11 | * these states happen *almost* exclusively under a per-stripe | ||
12 | * spinlock. Some very specific changes can happen in bi_end_io, and | ||
13 | * these are not protected by the spin lock. | ||
14 | * | ||
15 | * The flag bits that are used to represent these states are: | ||
16 | * R5_UPTODATE and R5_LOCKED | ||
17 | * | ||
18 | * State Empty == !UPTODATE, !LOCK | ||
19 | * We have no data, and there is no active request | ||
20 | * State Want == !UPTODATE, LOCK | ||
21 | * A read request is being submitted for this block | ||
22 | * State Dirty == UPTODATE, LOCK | ||
23 | * Some new data is in this buffer, and it is being written out | ||
24 | * State Clean == UPTODATE, !LOCK | ||
25 | * We have valid data which is the same as on disc | ||
26 | * | ||
27 | * The possible state transitions are: | ||
28 | * | ||
29 | * Empty -> Want - on read or write to get old data for parity calc | ||
30 | * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE) | ||
31 | * Empty -> Clean - on compute_block when computing a block for failed drive | ||
32 | * Want -> Empty - on failed read | ||
33 | * Want -> Clean - on successful completion of read request | ||
34 | * Dirty -> Clean - on successful completion of write request | ||
35 | * Dirty -> Clean - on failed write | ||
36 | * Clean -> Dirty - on compute_parity to satisfy write/sync (RECONSTRUCT or RMW) | ||
37 | * | ||
38 | * The Want->Empty, Want->Clean, Dirty->Clean, transitions | ||
39 | * all happen in b_end_io at interrupt time. | ||
40 | * Each sets the Uptodate bit before releasing the Lock bit. | ||
41 | * This leaves one multi-stage transition: | ||
42 | * Want->Dirty->Clean | ||
43 | * This is safe because thinking that a Clean buffer is actually dirty | ||
44 | * will at worst delay some action, and the stripe will be scheduled | ||
45 | * for attention after the transition is complete. | ||
46 | * | ||
47 | * There is one possibility that is not covered by these states. That | ||
48 | * is if one drive has failed and there is a spare being rebuilt. We | ||
49 | * can't distinguish between a clean block that has been generated | ||
50 | * from parity calculations, and a clean block that has been | ||
51 | * successfully written to the spare ( or to parity when resyncing). | ||
52 | * To distingush these states we have a stripe bit STRIPE_INSYNC that | ||
53 | * is set whenever a write is scheduled to the spare, or to the parity | ||
54 | * disc if there is no spare. A sync request clears this bit, and | ||
55 | * when we find it set with no buffers locked, we know the sync is | ||
56 | * complete. | ||
57 | * | ||
58 | * Buffers for the md device that arrive via make_request are attached | ||
59 | * to the appropriate stripe in one of two lists linked on b_reqnext. | ||
60 | * One list (bh_read) for read requests, one (bh_write) for write. | ||
61 | * There should never be more than one buffer on the two lists | ||
62 | * together, but we are not guaranteed of that so we allow for more. | ||
63 | * | ||
64 | * If a buffer is on the read list when the associated cache buffer is | ||
65 | * Uptodate, the data is copied into the read buffer and it's b_end_io | ||
66 | * routine is called. This may happen in the end_request routine only | ||
67 | * if the buffer has just successfully been read. end_request should | ||
68 | * remove the buffers from the list and then set the Uptodate bit on | ||
69 | * the buffer. Other threads may do this only if they first check | ||
70 | * that the Uptodate bit is set. Once they have checked that they may | ||
71 | * take buffers off the read queue. | ||
72 | * | ||
73 | * When a buffer on the write list is committed for write it is copied | ||
74 | * into the cache buffer, which is then marked dirty, and moved onto a | ||
75 | * third list, the written list (bh_written). Once both the parity | ||
76 | * block and the cached buffer are successfully written, any buffer on | ||
77 | * a written list can be returned with b_end_io. | ||
78 | * | ||
79 | * The write list and read list both act as fifos. The read list is | ||
80 | * protected by the device_lock. The write and written lists are | ||
81 | * protected by the stripe lock. The device_lock, which can be | ||
82 | * claimed while the stipe lock is held, is only for list | ||
83 | * manipulations and will only be held for a very short time. It can | ||
84 | * be claimed from interrupts. | ||
85 | * | ||
86 | * | ||
87 | * Stripes in the stripe cache can be on one of two lists (or on | ||
88 | * neither). The "inactive_list" contains stripes which are not | ||
89 | * currently being used for any request. They can freely be reused | ||
90 | * for another stripe. The "handle_list" contains stripes that need | ||
91 | * to be handled in some way. Both of these are fifo queues. Each | ||
92 | * stripe is also (potentially) linked to a hash bucket in the hash | ||
93 | * table so that it can be found by sector number. Stripes that are | ||
94 | * not hashed must be on the inactive_list, and will normally be at | ||
95 | * the front. All stripes start life this way. | ||
96 | * | ||
97 | * The inactive_list, handle_list and hash bucket lists are all protected by the | ||
98 | * device_lock. | ||
99 | * - stripes on the inactive_list never have their stripe_lock held. | ||
100 | * - stripes have a reference counter. If count==0, they are on a list. | ||
101 | * - If a stripe might need handling, STRIPE_HANDLE is set. | ||
102 | * - When refcount reaches zero, then if STRIPE_HANDLE it is put on | ||
103 | * handle_list else inactive_list | ||
104 | * | ||
105 | * This, combined with the fact that STRIPE_HANDLE is only ever | ||
106 | * cleared while a stripe has a non-zero count means that if the | ||
107 | * refcount is 0 and STRIPE_HANDLE is set, then it is on the | ||
108 | * handle_list and if recount is 0 and STRIPE_HANDLE is not set, then | ||
109 | * the stripe is on inactive_list. | ||
110 | * | ||
111 | * The possible transitions are: | ||
112 | * activate an unhashed/inactive stripe (get_active_stripe()) | ||
113 | * lockdev check-hash unlink-stripe cnt++ clean-stripe hash-stripe unlockdev | ||
114 | * activate a hashed, possibly active stripe (get_active_stripe()) | ||
115 | * lockdev check-hash if(!cnt++)unlink-stripe unlockdev | ||
116 | * attach a request to an active stripe (add_stripe_bh()) | ||
117 | * lockdev attach-buffer unlockdev | ||
118 | * handle a stripe (handle_stripe()) | ||
119 | * lockstripe clrSTRIPE_HANDLE ... | ||
120 | * (lockdev check-buffers unlockdev) .. | ||
121 | * change-state .. | ||
122 | * record io/ops needed unlockstripe schedule io/ops | ||
123 | * release an active stripe (release_stripe()) | ||
124 | * lockdev if (!--cnt) { if STRIPE_HANDLE, add to handle_list else add to inactive-list } unlockdev | ||
125 | * | ||
126 | * The refcount counts each thread that have activated the stripe, | ||
127 | * plus raid5d if it is handling it, plus one for each active request | ||
128 | * on a cached buffer, and plus one if the stripe is undergoing stripe | ||
129 | * operations. | ||
130 | * | ||
131 | * Stripe operations are performed outside the stripe lock, | ||
132 | * the stripe operations are: | ||
133 | * -copying data between the stripe cache and user application buffers | ||
134 | * -computing blocks to save a disk access, or to recover a missing block | ||
135 | * -updating the parity on a write operation (reconstruct write and | ||
136 | * read-modify-write) | ||
137 | * -checking parity correctness | ||
138 | * -running i/o to disk | ||
139 | * These operations are carried out by raid5_run_ops which uses the async_tx | ||
140 | * api to (optionally) offload operations to dedicated hardware engines. | ||
141 | * When requesting an operation handle_stripe sets the pending bit for the | ||
142 | * operation and increments the count. raid5_run_ops is then run whenever | ||
143 | * the count is non-zero. | ||
144 | * There are some critical dependencies between the operations that prevent some | ||
145 | * from being requested while another is in flight. | ||
146 | * 1/ Parity check operations destroy the in cache version of the parity block, | ||
147 | * so we prevent parity dependent operations like writes and compute_blocks | ||
148 | * from starting while a check is in progress. Some dma engines can perform | ||
149 | * the check without damaging the parity block, in these cases the parity | ||
150 | * block is re-marked up to date (assuming the check was successful) and is | ||
151 | * not re-read from disk. | ||
152 | * 2/ When a write operation is requested we immediately lock the affected | ||
153 | * blocks, and mark them as not up to date. This causes new read requests | ||
154 | * to be held off, as well as parity checks and compute block operations. | ||
155 | * 3/ Once a compute block operation has been requested handle_stripe treats | ||
156 | * that block as if it is up to date. raid5_run_ops guaruntees that any | ||
157 | * operation that is dependent on the compute block result is initiated after | ||
158 | * the compute block completes. | ||
159 | */ | ||
160 | |||
161 | /* | ||
162 | * Operations state - intermediate states that are visible outside of sh->lock | ||
163 | * In general _idle indicates nothing is running, _run indicates a data | ||
164 | * processing operation is active, and _result means the data processing result | ||
165 | * is stable and can be acted upon. For simple operations like biofill and | ||
166 | * compute that only have an _idle and _run state they are indicated with | ||
167 | * sh->state flags (STRIPE_BIOFILL_RUN and STRIPE_COMPUTE_RUN) | ||
168 | */ | ||
169 | /** | ||
170 | * enum check_states - handles syncing / repairing a stripe | ||
171 | * @check_state_idle - check operations are quiesced | ||
172 | * @check_state_run - check operation is running | ||
173 | * @check_state_result - set outside lock when check result is valid | ||
174 | * @check_state_compute_run - check failed and we are repairing | ||
175 | * @check_state_compute_result - set outside lock when compute result is valid | ||
176 | */ | ||
177 | enum check_states { | ||
178 | check_state_idle = 0, | ||
179 | check_state_run, /* parity check */ | ||
180 | check_state_check_result, | ||
181 | check_state_compute_run, /* parity repair */ | ||
182 | check_state_compute_result, | ||
183 | }; | ||
184 | |||
185 | /** | ||
186 | * enum reconstruct_states - handles writing or expanding a stripe | ||
187 | */ | ||
188 | enum reconstruct_states { | ||
189 | reconstruct_state_idle = 0, | ||
190 | reconstruct_state_prexor_drain_run, /* prexor-write */ | ||
191 | reconstruct_state_drain_run, /* write */ | ||
192 | reconstruct_state_run, /* expand */ | ||
193 | reconstruct_state_prexor_drain_result, | ||
194 | reconstruct_state_drain_result, | ||
195 | reconstruct_state_result, | ||
196 | }; | ||
197 | |||
198 | struct stripe_head { | ||
199 | struct hlist_node hash; | ||
200 | struct list_head lru; /* inactive_list or handle_list */ | ||
201 | struct raid5_private_data *raid_conf; | ||
202 | sector_t sector; /* sector of this row */ | ||
203 | int pd_idx; /* parity disk index */ | ||
204 | unsigned long state; /* state flags */ | ||
205 | atomic_t count; /* nr of active thread/requests */ | ||
206 | spinlock_t lock; | ||
207 | int bm_seq; /* sequence number for bitmap flushes */ | ||
208 | int disks; /* disks in stripe */ | ||
209 | enum check_states check_state; | ||
210 | enum reconstruct_states reconstruct_state; | ||
211 | /* stripe_operations | ||
212 | * @target - STRIPE_OP_COMPUTE_BLK target | ||
213 | */ | ||
214 | struct stripe_operations { | ||
215 | int target; | ||
216 | u32 zero_sum_result; | ||
217 | } ops; | ||
218 | struct r5dev { | ||
219 | struct bio req; | ||
220 | struct bio_vec vec; | ||
221 | struct page *page; | ||
222 | struct bio *toread, *read, *towrite, *written; | ||
223 | sector_t sector; /* sector of this page */ | ||
224 | unsigned long flags; | ||
225 | } dev[1]; /* allocated with extra space depending of RAID geometry */ | ||
226 | }; | ||
227 | |||
228 | /* stripe_head_state - collects and tracks the dynamic state of a stripe_head | ||
229 | * for handle_stripe. It is only valid under spin_lock(sh->lock); | ||
230 | */ | ||
231 | struct stripe_head_state { | ||
232 | int syncing, expanding, expanded; | ||
233 | int locked, uptodate, to_read, to_write, failed, written; | ||
234 | int to_fill, compute, req_compute, non_overwrite; | ||
235 | int failed_num; | ||
236 | unsigned long ops_request; | ||
237 | }; | ||
238 | |||
239 | /* r6_state - extra state data only relevant to r6 */ | ||
240 | struct r6_state { | ||
241 | int p_failed, q_failed, qd_idx, failed_num[2]; | ||
242 | }; | ||
243 | |||
244 | /* Flags */ | ||
245 | #define R5_UPTODATE 0 /* page contains current data */ | ||
246 | #define R5_LOCKED 1 /* IO has been submitted on "req" */ | ||
247 | #define R5_OVERWRITE 2 /* towrite covers whole page */ | ||
248 | /* and some that are internal to handle_stripe */ | ||
249 | #define R5_Insync 3 /* rdev && rdev->in_sync at start */ | ||
250 | #define R5_Wantread 4 /* want to schedule a read */ | ||
251 | #define R5_Wantwrite 5 | ||
252 | #define R5_Overlap 7 /* There is a pending overlapping request on this block */ | ||
253 | #define R5_ReadError 8 /* seen a read error here recently */ | ||
254 | #define R5_ReWrite 9 /* have tried to over-write the readerror */ | ||
255 | |||
256 | #define R5_Expanded 10 /* This block now has post-expand data */ | ||
257 | #define R5_Wantcompute 11 /* compute_block in progress treat as | ||
258 | * uptodate | ||
259 | */ | ||
260 | #define R5_Wantfill 12 /* dev->toread contains a bio that needs | ||
261 | * filling | ||
262 | */ | ||
263 | #define R5_Wantdrain 13 /* dev->towrite needs to be drained */ | ||
264 | /* | ||
265 | * Write method | ||
266 | */ | ||
267 | #define RECONSTRUCT_WRITE 1 | ||
268 | #define READ_MODIFY_WRITE 2 | ||
269 | /* not a write method, but a compute_parity mode */ | ||
270 | #define CHECK_PARITY 3 | ||
271 | |||
272 | /* | ||
273 | * Stripe state | ||
274 | */ | ||
275 | #define STRIPE_HANDLE 2 | ||
276 | #define STRIPE_SYNCING 3 | ||
277 | #define STRIPE_INSYNC 4 | ||
278 | #define STRIPE_PREREAD_ACTIVE 5 | ||
279 | #define STRIPE_DELAYED 6 | ||
280 | #define STRIPE_DEGRADED 7 | ||
281 | #define STRIPE_BIT_DELAY 8 | ||
282 | #define STRIPE_EXPANDING 9 | ||
283 | #define STRIPE_EXPAND_SOURCE 10 | ||
284 | #define STRIPE_EXPAND_READY 11 | ||
285 | #define STRIPE_IO_STARTED 12 /* do not count towards 'bypass_count' */ | ||
286 | #define STRIPE_FULL_WRITE 13 /* all blocks are set to be overwritten */ | ||
287 | #define STRIPE_BIOFILL_RUN 14 | ||
288 | #define STRIPE_COMPUTE_RUN 15 | ||
289 | /* | ||
290 | * Operation request flags | ||
291 | */ | ||
292 | #define STRIPE_OP_BIOFILL 0 | ||
293 | #define STRIPE_OP_COMPUTE_BLK 1 | ||
294 | #define STRIPE_OP_PREXOR 2 | ||
295 | #define STRIPE_OP_BIODRAIN 3 | ||
296 | #define STRIPE_OP_POSTXOR 4 | ||
297 | #define STRIPE_OP_CHECK 5 | ||
298 | |||
299 | /* | ||
300 | * Plugging: | ||
301 | * | ||
302 | * To improve write throughput, we need to delay the handling of some | ||
303 | * stripes until there has been a chance that several write requests | ||
304 | * for the one stripe have all been collected. | ||
305 | * In particular, any write request that would require pre-reading | ||
306 | * is put on a "delayed" queue until there are no stripes currently | ||
307 | * in a pre-read phase. Further, if the "delayed" queue is empty when | ||
308 | * a stripe is put on it then we "plug" the queue and do not process it | ||
309 | * until an unplug call is made. (the unplug_io_fn() is called). | ||
310 | * | ||
311 | * When preread is initiated on a stripe, we set PREREAD_ACTIVE and add | ||
312 | * it to the count of prereading stripes. | ||
313 | * When write is initiated, or the stripe refcnt == 0 (just in case) we | ||
314 | * clear the PREREAD_ACTIVE flag and decrement the count | ||
315 | * Whenever the 'handle' queue is empty and the device is not plugged, we | ||
316 | * move any strips from delayed to handle and clear the DELAYED flag and set | ||
317 | * PREREAD_ACTIVE. | ||
318 | * In stripe_handle, if we find pre-reading is necessary, we do it if | ||
319 | * PREREAD_ACTIVE is set, else we set DELAYED which will send it to the delayed queue. | ||
320 | * HANDLE gets cleared if stripe_handle leave nothing locked. | ||
321 | */ | ||
322 | |||
323 | |||
324 | struct disk_info { | ||
325 | mdk_rdev_t *rdev; | ||
326 | }; | ||
327 | |||
328 | struct raid5_private_data { | ||
329 | struct hlist_head *stripe_hashtbl; | ||
330 | mddev_t *mddev; | ||
331 | struct disk_info *spare; | ||
332 | int chunk_size, level, algorithm; | ||
333 | int max_degraded; | ||
334 | int raid_disks; | ||
335 | int max_nr_stripes; | ||
336 | |||
337 | /* used during an expand */ | ||
338 | sector_t expand_progress; /* MaxSector when no expand happening */ | ||
339 | sector_t expand_lo; /* from here up to expand_progress it out-of-bounds | ||
340 | * as we haven't flushed the metadata yet | ||
341 | */ | ||
342 | int previous_raid_disks; | ||
343 | |||
344 | struct list_head handle_list; /* stripes needing handling */ | ||
345 | struct list_head hold_list; /* preread ready stripes */ | ||
346 | struct list_head delayed_list; /* stripes that have plugged requests */ | ||
347 | struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ | ||
348 | struct bio *retry_read_aligned; /* currently retrying aligned bios */ | ||
349 | struct bio *retry_read_aligned_list; /* aligned bios retry list */ | ||
350 | atomic_t preread_active_stripes; /* stripes with scheduled io */ | ||
351 | atomic_t active_aligned_reads; | ||
352 | atomic_t pending_full_writes; /* full write backlog */ | ||
353 | int bypass_count; /* bypassed prereads */ | ||
354 | int bypass_threshold; /* preread nice */ | ||
355 | struct list_head *last_hold; /* detect hold_list promotions */ | ||
356 | |||
357 | atomic_t reshape_stripes; /* stripes with pending writes for reshape */ | ||
358 | /* unfortunately we need two cache names as we temporarily have | ||
359 | * two caches. | ||
360 | */ | ||
361 | int active_name; | ||
362 | char cache_name[2][20]; | ||
363 | struct kmem_cache *slab_cache; /* for allocating stripes */ | ||
364 | |||
365 | int seq_flush, seq_write; | ||
366 | int quiesce; | ||
367 | |||
368 | int fullsync; /* set to 1 if a full sync is needed, | ||
369 | * (fresh device added). | ||
370 | * Cleared when a sync completes. | ||
371 | */ | ||
372 | |||
373 | struct page *spare_page; /* Used when checking P/Q in raid6 */ | ||
374 | |||
375 | /* | ||
376 | * Free stripes pool | ||
377 | */ | ||
378 | atomic_t active_stripes; | ||
379 | struct list_head inactive_list; | ||
380 | wait_queue_head_t wait_for_stripe; | ||
381 | wait_queue_head_t wait_for_overlap; | ||
382 | int inactive_blocked; /* release of inactive stripes blocked, | ||
383 | * waiting for 25% to be free | ||
384 | */ | ||
385 | int pool_size; /* number of disks in stripeheads in pool */ | ||
386 | spinlock_t device_lock; | ||
387 | struct disk_info *disks; | ||
388 | }; | ||
389 | |||
390 | typedef struct raid5_private_data raid5_conf_t; | ||
391 | |||
392 | #define mddev_to_conf(mddev) ((raid5_conf_t *) mddev->private) | ||
393 | |||
394 | /* | ||
395 | * Our supported algorithms | ||
396 | */ | ||
397 | #define ALGORITHM_LEFT_ASYMMETRIC 0 | ||
398 | #define ALGORITHM_RIGHT_ASYMMETRIC 1 | ||
399 | #define ALGORITHM_LEFT_SYMMETRIC 2 | ||
400 | #define ALGORITHM_RIGHT_SYMMETRIC 3 | ||
401 | |||
402 | #endif | ||
diff --git a/include/linux/raid/xor.h b/include/linux/raid/xor.h index 3e120587eada..5a210959e3f8 100644 --- a/include/linux/raid/xor.h +++ b/include/linux/raid/xor.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _XOR_H | 1 | #ifndef _XOR_H |
2 | #define _XOR_H | 2 | #define _XOR_H |
3 | 3 | ||
4 | #include <linux/raid/md.h> | ||
5 | |||
6 | #define MAX_XOR_BLOCKS 4 | 4 | #define MAX_XOR_BLOCKS 4 |
7 | 5 | ||
8 | extern void xor_blocks(unsigned int count, unsigned int bytes, | 6 | extern void xor_blocks(unsigned int count, unsigned int bytes, |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h index 80044a4f3ab9..bfd92e1e5d2c 100644 --- a/include/linux/rcuclassic.h +++ b/include/linux/rcuclassic.h | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/cache.h> | 36 | #include <linux/cache.h> |
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/threads.h> | 38 | #include <linux/threads.h> |
39 | #include <linux/percpu.h> | ||
40 | #include <linux/cpumask.h> | 39 | #include <linux/cpumask.h> |
41 | #include <linux/seqlock.h> | 40 | #include <linux/seqlock.h> |
42 | 41 | ||
@@ -108,25 +107,14 @@ struct rcu_data { | |||
108 | struct rcu_head barrier; | 107 | struct rcu_head barrier; |
109 | }; | 108 | }; |
110 | 109 | ||
111 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | ||
112 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
113 | |||
114 | /* | 110 | /* |
115 | * Increment the quiescent state counter. | 111 | * Increment the quiescent state counter. |
116 | * The counter is a bit degenerated: We do not need to know | 112 | * The counter is a bit degenerated: We do not need to know |
117 | * how many quiescent states passed, just if there was at least | 113 | * how many quiescent states passed, just if there was at least |
118 | * one since the start of the grace period. Thus just a flag. | 114 | * one since the start of the grace period. Thus just a flag. |
119 | */ | 115 | */ |
120 | static inline void rcu_qsctr_inc(int cpu) | 116 | extern void rcu_qsctr_inc(int cpu); |
121 | { | 117 | extern void rcu_bh_qsctr_inc(int cpu); |
122 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
123 | rdp->passed_quiesc = 1; | ||
124 | } | ||
125 | static inline void rcu_bh_qsctr_inc(int cpu) | ||
126 | { | ||
127 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
128 | rdp->passed_quiesc = 1; | ||
129 | } | ||
130 | 118 | ||
131 | extern int rcu_pending(int cpu); | 119 | extern int rcu_pending(int cpu); |
132 | extern int rcu_needs_cpu(int cpu); | 120 | extern int rcu_needs_cpu(int cpu); |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 528343e6da51..15fbb3ca634d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <linux/cache.h> | 36 | #include <linux/cache.h> |
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/threads.h> | 38 | #include <linux/threads.h> |
39 | #include <linux/percpu.h> | ||
40 | #include <linux/cpumask.h> | 39 | #include <linux/cpumask.h> |
41 | #include <linux/seqlock.h> | 40 | #include <linux/seqlock.h> |
42 | #include <linux/lockdep.h> | 41 | #include <linux/lockdep.h> |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h index 74304b4538d8..fce522782ffa 100644 --- a/include/linux/rcupreempt.h +++ b/include/linux/rcupreempt.h | |||
@@ -36,34 +36,19 @@ | |||
36 | #include <linux/cache.h> | 36 | #include <linux/cache.h> |
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/threads.h> | 38 | #include <linux/threads.h> |
39 | #include <linux/percpu.h> | 39 | #include <linux/smp.h> |
40 | #include <linux/cpumask.h> | 40 | #include <linux/cpumask.h> |
41 | #include <linux/seqlock.h> | 41 | #include <linux/seqlock.h> |
42 | 42 | ||
43 | struct rcu_dyntick_sched { | 43 | extern void rcu_qsctr_inc(int cpu); |
44 | int dynticks; | 44 | static inline void rcu_bh_qsctr_inc(int cpu) { } |
45 | int dynticks_snap; | ||
46 | int sched_qs; | ||
47 | int sched_qs_snap; | ||
48 | int sched_dynticks_snap; | ||
49 | }; | ||
50 | |||
51 | DECLARE_PER_CPU(struct rcu_dyntick_sched, rcu_dyntick_sched); | ||
52 | |||
53 | static inline void rcu_qsctr_inc(int cpu) | ||
54 | { | ||
55 | struct rcu_dyntick_sched *rdssp = &per_cpu(rcu_dyntick_sched, cpu); | ||
56 | |||
57 | rdssp->sched_qs++; | ||
58 | } | ||
59 | #define rcu_bh_qsctr_inc(cpu) | ||
60 | 45 | ||
61 | /* | 46 | /* |
62 | * Someone might want to pass call_rcu_bh as a function pointer. | 47 | * Someone might want to pass call_rcu_bh as a function pointer. |
63 | * So this needs to just be a rename and not a macro function. | 48 | * So this needs to just be a rename and not a macro function. |
64 | * (no parentheses) | 49 | * (no parentheses) |
65 | */ | 50 | */ |
66 | #define call_rcu_bh call_rcu | 51 | #define call_rcu_bh call_rcu |
67 | 52 | ||
68 | /** | 53 | /** |
69 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. | 54 | * call_rcu_sched - Queue RCU callback for invocation after sched grace period. |
@@ -117,30 +102,12 @@ extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | |||
117 | struct softirq_action; | 102 | struct softirq_action; |
118 | 103 | ||
119 | #ifdef CONFIG_NO_HZ | 104 | #ifdef CONFIG_NO_HZ |
120 | 105 | extern void rcu_enter_nohz(void); | |
121 | static inline void rcu_enter_nohz(void) | 106 | extern void rcu_exit_nohz(void); |
122 | { | 107 | #else |
123 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); | 108 | # define rcu_enter_nohz() do { } while (0) |
124 | 109 | # define rcu_exit_nohz() do { } while (0) | |
125 | smp_mb(); /* CPUs seeing ++ must see prior RCU read-side crit sects */ | 110 | #endif |
126 | __get_cpu_var(rcu_dyntick_sched).dynticks++; | ||
127 | WARN_ON_RATELIMIT(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1, &rs); | ||
128 | } | ||
129 | |||
130 | static inline void rcu_exit_nohz(void) | ||
131 | { | ||
132 | static DEFINE_RATELIMIT_STATE(rs, 10 * HZ, 1); | ||
133 | |||
134 | __get_cpu_var(rcu_dyntick_sched).dynticks++; | ||
135 | smp_mb(); /* CPUs seeing ++ must see later RCU read-side crit sects */ | ||
136 | WARN_ON_RATELIMIT(!(__get_cpu_var(rcu_dyntick_sched).dynticks & 0x1), | ||
137 | &rs); | ||
138 | } | ||
139 | |||
140 | #else /* CONFIG_NO_HZ */ | ||
141 | #define rcu_enter_nohz() do { } while (0) | ||
142 | #define rcu_exit_nohz() do { } while (0) | ||
143 | #endif /* CONFIG_NO_HZ */ | ||
144 | 111 | ||
145 | /* | 112 | /* |
146 | * A context switch is a grace period for rcupreempt synchronize_rcu() | 113 | * A context switch is a grace period for rcupreempt synchronize_rcu() |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index a722fb67bb2d..58b2aa5312b9 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/cache.h> | 33 | #include <linux/cache.h> |
34 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
35 | #include <linux/threads.h> | 35 | #include <linux/threads.h> |
36 | #include <linux/percpu.h> | ||
37 | #include <linux/cpumask.h> | 36 | #include <linux/cpumask.h> |
38 | #include <linux/seqlock.h> | 37 | #include <linux/seqlock.h> |
39 | 38 | ||
@@ -162,9 +161,8 @@ struct rcu_data { | |||
162 | unsigned long offline_fqs; /* Kicked due to being offline. */ | 161 | unsigned long offline_fqs; /* Kicked due to being offline. */ |
163 | unsigned long resched_ipi; /* Sent a resched IPI. */ | 162 | unsigned long resched_ipi; /* Sent a resched IPI. */ |
164 | 163 | ||
165 | /* 5) state to allow this CPU to force_quiescent_state on others */ | 164 | /* 5) For future __rcu_pending statistics. */ |
166 | long n_rcu_pending; /* rcu_pending() calls since boot. */ | 165 | long n_rcu_pending; /* rcu_pending() calls since boot. */ |
167 | long n_rcu_pending_force_qs; /* when to force quiescent states. */ | ||
168 | 166 | ||
169 | int cpu; | 167 | int cpu; |
170 | }; | 168 | }; |
@@ -236,30 +234,8 @@ struct rcu_state { | |||
236 | #endif /* #ifdef CONFIG_NO_HZ */ | 234 | #endif /* #ifdef CONFIG_NO_HZ */ |
237 | }; | 235 | }; |
238 | 236 | ||
239 | extern struct rcu_state rcu_state; | 237 | extern void rcu_qsctr_inc(int cpu); |
240 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | 238 | extern void rcu_bh_qsctr_inc(int cpu); |
241 | |||
242 | extern struct rcu_state rcu_bh_state; | ||
243 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
244 | |||
245 | /* | ||
246 | * Increment the quiescent state counter. | ||
247 | * The counter is a bit degenerated: We do not need to know | ||
248 | * how many quiescent states passed, just if there was at least | ||
249 | * one since the start of the grace period. Thus just a flag. | ||
250 | */ | ||
251 | static inline void rcu_qsctr_inc(int cpu) | ||
252 | { | ||
253 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
254 | rdp->passed_quiesc = 1; | ||
255 | rdp->passed_quiesc_completed = rdp->completed; | ||
256 | } | ||
257 | static inline void rcu_bh_qsctr_inc(int cpu) | ||
258 | { | ||
259 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
260 | rdp->passed_quiesc = 1; | ||
261 | rdp->passed_quiesc_completed = rdp->completed; | ||
262 | } | ||
263 | 239 | ||
264 | extern int rcu_pending(int cpu); | 240 | extern int rcu_pending(int cpu); |
265 | extern int rcu_needs_cpu(int cpu); | 241 | extern int rcu_needs_cpu(int cpu); |
diff --git a/include/linux/regulator/bq24022.h b/include/linux/regulator/bq24022.h index e84b0a9feda5..a6d014005d49 100644 --- a/include/linux/regulator/bq24022.h +++ b/include/linux/regulator/bq24022.h | |||
@@ -10,6 +10,8 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | struct regulator_init_data; | ||
14 | |||
13 | /** | 15 | /** |
14 | * bq24022_mach_info - platform data for bq24022 | 16 | * bq24022_mach_info - platform data for bq24022 |
15 | * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging | 17 | * @gpio_nce: GPIO line connected to the nCE pin, used to enable / disable charging |
@@ -18,4 +20,5 @@ | |||
18 | struct bq24022_mach_info { | 20 | struct bq24022_mach_info { |
19 | int gpio_nce; | 21 | int gpio_nce; |
20 | int gpio_iset2; | 22 | int gpio_iset2; |
23 | struct regulator_init_data *init_data; | ||
21 | }; | 24 | }; |
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 801bf77ff4e2..277f4b964df5 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. | 4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. |
5 | * | 5 | * |
6 | * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> | 6 | * Author: Liam Girdwood <lrg@slimlogic.co.uk> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -88,6 +88,7 @@ | |||
88 | * FAIL Regulator output has failed. | 88 | * FAIL Regulator output has failed. |
89 | * OVER_TEMP Regulator over temp. | 89 | * OVER_TEMP Regulator over temp. |
90 | * FORCE_DISABLE Regulator shut down by software. | 90 | * FORCE_DISABLE Regulator shut down by software. |
91 | * VOLTAGE_CHANGE Regulator voltage changed. | ||
91 | * | 92 | * |
92 | * NOTE: These events can be OR'ed together when passed into handler. | 93 | * NOTE: These events can be OR'ed together when passed into handler. |
93 | */ | 94 | */ |
@@ -98,6 +99,7 @@ | |||
98 | #define REGULATOR_EVENT_FAIL 0x08 | 99 | #define REGULATOR_EVENT_FAIL 0x08 |
99 | #define REGULATOR_EVENT_OVER_TEMP 0x10 | 100 | #define REGULATOR_EVENT_OVER_TEMP 0x10 |
100 | #define REGULATOR_EVENT_FORCE_DISABLE 0x20 | 101 | #define REGULATOR_EVENT_FORCE_DISABLE 0x20 |
102 | #define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40 | ||
101 | 103 | ||
102 | struct regulator; | 104 | struct regulator; |
103 | 105 | ||
@@ -140,6 +142,8 @@ int regulator_bulk_disable(int num_consumers, | |||
140 | void regulator_bulk_free(int num_consumers, | 142 | void regulator_bulk_free(int num_consumers, |
141 | struct regulator_bulk_data *consumers); | 143 | struct regulator_bulk_data *consumers); |
142 | 144 | ||
145 | int regulator_count_voltages(struct regulator *regulator); | ||
146 | int regulator_list_voltage(struct regulator *regulator, unsigned selector); | ||
143 | int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); | 147 | int regulator_set_voltage(struct regulator *regulator, int min_uV, int max_uV); |
144 | int regulator_get_voltage(struct regulator *regulator); | 148 | int regulator_get_voltage(struct regulator *regulator); |
145 | int regulator_set_current_limit(struct regulator *regulator, | 149 | int regulator_set_current_limit(struct regulator *regulator, |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 2dae05705f13..225f733e7533 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. | 4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. |
5 | * | 5 | * |
6 | * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> | 6 | * Author: Liam Girdwood <lrg@slimlogic.co.uk> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -21,25 +21,39 @@ | |||
21 | struct regulator_dev; | 21 | struct regulator_dev; |
22 | struct regulator_init_data; | 22 | struct regulator_init_data; |
23 | 23 | ||
24 | enum regulator_status { | ||
25 | REGULATOR_STATUS_OFF, | ||
26 | REGULATOR_STATUS_ON, | ||
27 | REGULATOR_STATUS_ERROR, | ||
28 | /* fast/normal/idle/standby are flavors of "on" */ | ||
29 | REGULATOR_STATUS_FAST, | ||
30 | REGULATOR_STATUS_NORMAL, | ||
31 | REGULATOR_STATUS_IDLE, | ||
32 | REGULATOR_STATUS_STANDBY, | ||
33 | }; | ||
34 | |||
24 | /** | 35 | /** |
25 | * struct regulator_ops - regulator operations. | 36 | * struct regulator_ops - regulator operations. |
26 | * | 37 | * |
27 | * This struct describes regulator operations which can be implemented by | 38 | * @enable: Configure the regulator as enabled. |
28 | * regulator chip drivers. | 39 | * @disable: Configure the regulator as disabled. |
29 | * | ||
30 | * @enable: Enable the regulator. | ||
31 | * @disable: Disable the regulator. | ||
32 | * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. | 40 | * @is_enabled: Return 1 if the regulator is enabled, 0 otherwise. |
33 | * | 41 | * |
34 | * @set_voltage: Set the voltage for the regulator within the range specified. | 42 | * @set_voltage: Set the voltage for the regulator within the range specified. |
35 | * The driver should select the voltage closest to min_uV. | 43 | * The driver should select the voltage closest to min_uV. |
36 | * @get_voltage: Return the currently configured voltage for the regulator. | 44 | * @get_voltage: Return the currently configured voltage for the regulator. |
45 | * @list_voltage: Return one of the supported voltages, in microvolts; zero | ||
46 | * if the selector indicates a voltage that is unusable on this system; | ||
47 | * or negative errno. Selectors range from zero to one less than | ||
48 | * regulator_desc.n_voltages. Voltages may be reported in any order. | ||
37 | * | 49 | * |
38 | * @set_current_limit: Configure a limit for a current-limited regulator. | 50 | * @set_current_limit: Configure a limit for a current-limited regulator. |
39 | * @get_current_limit: Get the limit for a current-limited regulator. | 51 | * @get_current_limit: Get the configured limit for a current-limited regulator. |
40 | * | 52 | * |
41 | * @set_mode: Set the operating mode for the regulator. | 53 | * @set_mode: Set the configured operating mode for the regulator. |
42 | * @get_mode: Get the current operating mode for the regulator. | 54 | * @get_mode: Get the configured operating mode for the regulator. |
55 | * @get_status: Return actual (not as-configured) status of regulator, as a | ||
56 | * REGULATOR_STATUS value (or negative errno) | ||
43 | * @get_optimum_mode: Get the most efficient operating mode for the regulator | 57 | * @get_optimum_mode: Get the most efficient operating mode for the regulator |
44 | * when running with the specified parameters. | 58 | * when running with the specified parameters. |
45 | * | 59 | * |
@@ -51,9 +65,15 @@ struct regulator_init_data; | |||
51 | * suspended. | 65 | * suspended. |
52 | * @set_suspend_mode: Set the operating mode for the regulator when the | 66 | * @set_suspend_mode: Set the operating mode for the regulator when the |
53 | * system is suspended. | 67 | * system is suspended. |
68 | * | ||
69 | * This struct describes regulator operations which can be implemented by | ||
70 | * regulator chip drivers. | ||
54 | */ | 71 | */ |
55 | struct regulator_ops { | 72 | struct regulator_ops { |
56 | 73 | ||
74 | /* enumerate supported voltages */ | ||
75 | int (*list_voltage) (struct regulator_dev *, unsigned selector); | ||
76 | |||
57 | /* get/set regulator voltage */ | 77 | /* get/set regulator voltage */ |
58 | int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); | 78 | int (*set_voltage) (struct regulator_dev *, int min_uV, int max_uV); |
59 | int (*get_voltage) (struct regulator_dev *); | 79 | int (*get_voltage) (struct regulator_dev *); |
@@ -72,6 +92,13 @@ struct regulator_ops { | |||
72 | int (*set_mode) (struct regulator_dev *, unsigned int mode); | 92 | int (*set_mode) (struct regulator_dev *, unsigned int mode); |
73 | unsigned int (*get_mode) (struct regulator_dev *); | 93 | unsigned int (*get_mode) (struct regulator_dev *); |
74 | 94 | ||
95 | /* report regulator status ... most other accessors report | ||
96 | * control inputs, this reports results of combining inputs | ||
97 | * from Linux (and other sources) with the actual load. | ||
98 | * returns REGULATOR_STATUS_* or negative errno. | ||
99 | */ | ||
100 | int (*get_status)(struct regulator_dev *); | ||
101 | |||
75 | /* get most efficient regulator operating mode for load */ | 102 | /* get most efficient regulator operating mode for load */ |
76 | unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, | 103 | unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, |
77 | int output_uV, int load_uA); | 104 | int output_uV, int load_uA); |
@@ -106,6 +133,7 @@ enum regulator_type { | |||
106 | * | 133 | * |
107 | * @name: Identifying name for the regulator. | 134 | * @name: Identifying name for the regulator. |
108 | * @id: Numerical identifier for the regulator. | 135 | * @id: Numerical identifier for the regulator. |
136 | * @n_voltages: Number of selectors available for ops.list_voltage(). | ||
109 | * @ops: Regulator operations table. | 137 | * @ops: Regulator operations table. |
110 | * @irq: Interrupt number for the regulator. | 138 | * @irq: Interrupt number for the regulator. |
111 | * @type: Indicates if the regulator is a voltage or current regulator. | 139 | * @type: Indicates if the regulator is a voltage or current regulator. |
@@ -114,14 +142,48 @@ enum regulator_type { | |||
114 | struct regulator_desc { | 142 | struct regulator_desc { |
115 | const char *name; | 143 | const char *name; |
116 | int id; | 144 | int id; |
145 | unsigned n_voltages; | ||
117 | struct regulator_ops *ops; | 146 | struct regulator_ops *ops; |
118 | int irq; | 147 | int irq; |
119 | enum regulator_type type; | 148 | enum regulator_type type; |
120 | struct module *owner; | 149 | struct module *owner; |
121 | }; | 150 | }; |
122 | 151 | ||
152 | /* | ||
153 | * struct regulator_dev | ||
154 | * | ||
155 | * Voltage / Current regulator class device. One for each | ||
156 | * regulator. | ||
157 | * | ||
158 | * This should *not* be used directly by anything except the regulator | ||
159 | * core and notification injection (which should take the mutex and do | ||
160 | * no other direct access). | ||
161 | */ | ||
162 | struct regulator_dev { | ||
163 | struct regulator_desc *desc; | ||
164 | int use_count; | ||
165 | |||
166 | /* lists we belong to */ | ||
167 | struct list_head list; /* list of all regulators */ | ||
168 | struct list_head slist; /* list of supplied regulators */ | ||
169 | |||
170 | /* lists we own */ | ||
171 | struct list_head consumer_list; /* consumers we supply */ | ||
172 | struct list_head supply_list; /* regulators we supply */ | ||
173 | |||
174 | struct blocking_notifier_head notifier; | ||
175 | struct mutex mutex; /* consumer lock */ | ||
176 | struct module *owner; | ||
177 | struct device dev; | ||
178 | struct regulation_constraints *constraints; | ||
179 | struct regulator_dev *supply; /* for tree */ | ||
180 | |||
181 | void *reg_data; /* regulator_dev data */ | ||
182 | }; | ||
183 | |||
123 | struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, | 184 | struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, |
124 | struct device *dev, void *driver_data); | 185 | struct device *dev, struct regulator_init_data *init_data, |
186 | void *driver_data); | ||
125 | void regulator_unregister(struct regulator_dev *rdev); | 187 | void regulator_unregister(struct regulator_dev *rdev); |
126 | 188 | ||
127 | int regulator_notifier_call_chain(struct regulator_dev *rdev, | 189 | int regulator_notifier_call_chain(struct regulator_dev *rdev, |
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h index 1387a5d2190e..91b4da31f1b5 100644 --- a/include/linux/regulator/fixed.h +++ b/include/linux/regulator/fixed.h | |||
@@ -14,9 +14,12 @@ | |||
14 | #ifndef __REGULATOR_FIXED_H | 14 | #ifndef __REGULATOR_FIXED_H |
15 | #define __REGULATOR_FIXED_H | 15 | #define __REGULATOR_FIXED_H |
16 | 16 | ||
17 | struct regulator_init_data; | ||
18 | |||
17 | struct fixed_voltage_config { | 19 | struct fixed_voltage_config { |
18 | const char *supply_name; | 20 | const char *supply_name; |
19 | int microvolts; | 21 | int microvolts; |
22 | struct regulator_init_data *init_data; | ||
20 | }; | 23 | }; |
21 | 24 | ||
22 | #endif | 25 | #endif |
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 3794773b23d2..bac64fa390f2 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. | 4 | * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. |
5 | * | 5 | * |
6 | * Author: Liam Girdwood <lg@opensource.wolfsonmicro.com> | 6 | * Author: Liam Girdwood <lrg@slimlogic.co.uk> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
@@ -73,7 +73,9 @@ struct regulator_state { | |||
73 | * | 73 | * |
74 | * @always_on: Set if the regulator should never be disabled. | 74 | * @always_on: Set if the regulator should never be disabled. |
75 | * @boot_on: Set if the regulator is enabled when the system is initially | 75 | * @boot_on: Set if the regulator is enabled when the system is initially |
76 | * started. | 76 | * started. If the regulator is not enabled by the hardware or |
77 | * bootloader then it will be enabled when the constraints are | ||
78 | * applied. | ||
77 | * @apply_uV: Apply the voltage constraint when initialising. | 79 | * @apply_uV: Apply the voltage constraint when initialising. |
78 | * | 80 | * |
79 | * @input_uV: Input voltage for regulator when supplied by another regulator. | 81 | * @input_uV: Input voltage for regulator when supplied by another regulator. |
@@ -83,6 +85,7 @@ struct regulator_state { | |||
83 | * @state_standby: State for regulator when system is suspended in standby | 85 | * @state_standby: State for regulator when system is suspended in standby |
84 | * mode. | 86 | * mode. |
85 | * @initial_state: Suspend state to set by default. | 87 | * @initial_state: Suspend state to set by default. |
88 | * @initial_mode: Mode to set at startup. | ||
86 | */ | 89 | */ |
87 | struct regulation_constraints { | 90 | struct regulation_constraints { |
88 | 91 | ||
@@ -111,6 +114,9 @@ struct regulation_constraints { | |||
111 | struct regulator_state state_standby; | 114 | struct regulator_state state_standby; |
112 | suspend_state_t initial_state; /* suspend state to set at init */ | 115 | suspend_state_t initial_state; /* suspend state to set at init */ |
113 | 116 | ||
117 | /* mode to set on startup */ | ||
118 | unsigned int initial_mode; | ||
119 | |||
114 | /* constriant flags */ | 120 | /* constriant flags */ |
115 | unsigned always_on:1; /* regulator never off when system is on */ | 121 | unsigned always_on:1; /* regulator never off when system is on */ |
116 | unsigned boot_on:1; /* bootloader/firmware enabled regulator */ | 122 | unsigned boot_on:1; /* bootloader/firmware enabled regulator */ |
@@ -160,4 +166,6 @@ struct regulator_init_data { | |||
160 | 166 | ||
161 | int regulator_suspend_prepare(suspend_state_t state); | 167 | int regulator_suspend_prepare(suspend_state_t state); |
162 | 168 | ||
169 | void regulator_has_full_constraints(void); | ||
170 | |||
163 | #endif | 171 | #endif |
diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h index fe00f781a622..8cc65757e47a 100644 --- a/include/linux/reiserfs_acl.h +++ b/include/linux/reiserfs_acl.h | |||
@@ -49,13 +49,12 @@ static inline int reiserfs_acl_count(size_t size) | |||
49 | #ifdef CONFIG_REISERFS_FS_POSIX_ACL | 49 | #ifdef CONFIG_REISERFS_FS_POSIX_ACL |
50 | struct posix_acl *reiserfs_get_acl(struct inode *inode, int type); | 50 | struct posix_acl *reiserfs_get_acl(struct inode *inode, int type); |
51 | int reiserfs_acl_chmod(struct inode *inode); | 51 | int reiserfs_acl_chmod(struct inode *inode); |
52 | int reiserfs_inherit_default_acl(struct inode *dir, struct dentry *dentry, | 52 | int reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th, |
53 | struct inode *dir, struct dentry *dentry, | ||
53 | struct inode *inode); | 54 | struct inode *inode); |
54 | int reiserfs_cache_default_acl(struct inode *dir); | 55 | int reiserfs_cache_default_acl(struct inode *dir); |
55 | extern int reiserfs_xattr_posix_acl_init(void) __init; | 56 | extern struct xattr_handler reiserfs_posix_acl_default_handler; |
56 | extern int reiserfs_xattr_posix_acl_exit(void); | 57 | extern struct xattr_handler reiserfs_posix_acl_access_handler; |
57 | extern struct reiserfs_xattr_handler posix_acl_default_handler; | ||
58 | extern struct reiserfs_xattr_handler posix_acl_access_handler; | ||
59 | 58 | ||
60 | static inline void reiserfs_init_acl_access(struct inode *inode) | 59 | static inline void reiserfs_init_acl_access(struct inode *inode) |
61 | { | 60 | { |
@@ -75,23 +74,14 @@ static inline struct posix_acl *reiserfs_get_acl(struct inode *inode, int type) | |||
75 | return NULL; | 74 | return NULL; |
76 | } | 75 | } |
77 | 76 | ||
78 | static inline int reiserfs_xattr_posix_acl_init(void) | ||
79 | { | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | static inline int reiserfs_xattr_posix_acl_exit(void) | ||
84 | { | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static inline int reiserfs_acl_chmod(struct inode *inode) | 77 | static inline int reiserfs_acl_chmod(struct inode *inode) |
89 | { | 78 | { |
90 | return 0; | 79 | return 0; |
91 | } | 80 | } |
92 | 81 | ||
93 | static inline int | 82 | static inline int |
94 | reiserfs_inherit_default_acl(const struct inode *dir, struct dentry *dentry, | 83 | reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th, |
84 | const struct inode *dir, struct dentry *dentry, | ||
95 | struct inode *inode) | 85 | struct inode *inode) |
96 | { | 86 | { |
97 | return 0; | 87 | return 0; |
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index e356c99f0659..2245c78d5876 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h | |||
@@ -58,8 +58,6 @@ | |||
58 | #define reiserfs_write_lock( sb ) lock_kernel() | 58 | #define reiserfs_write_lock( sb ) lock_kernel() |
59 | #define reiserfs_write_unlock( sb ) unlock_kernel() | 59 | #define reiserfs_write_unlock( sb ) unlock_kernel() |
60 | 60 | ||
61 | /* xattr stuff */ | ||
62 | #define REISERFS_XATTR_DIR_SEM(s) (REISERFS_SB(s)->xattr_dir_sem) | ||
63 | struct fid; | 61 | struct fid; |
64 | 62 | ||
65 | /* in reading the #defines, it may help to understand that they employ | 63 | /* in reading the #defines, it may help to understand that they employ |
@@ -104,15 +102,21 @@ struct fid; | |||
104 | */ | 102 | */ |
105 | #define REISERFS_DEBUG_CODE 5 /* extra messages to help find/debug errors */ | 103 | #define REISERFS_DEBUG_CODE 5 /* extra messages to help find/debug errors */ |
106 | 104 | ||
107 | void reiserfs_warning(struct super_block *s, const char *fmt, ...); | 105 | void __reiserfs_warning(struct super_block *s, const char *id, |
106 | const char *func, const char *fmt, ...); | ||
107 | #define reiserfs_warning(s, id, fmt, args...) \ | ||
108 | __reiserfs_warning(s, id, __func__, fmt, ##args) | ||
108 | /* assertions handling */ | 109 | /* assertions handling */ |
109 | 110 | ||
110 | /** always check a condition and panic if it's false. */ | 111 | /** always check a condition and panic if it's false. */ |
111 | #define __RASSERT( cond, scond, format, args... ) \ | 112 | #define __RASSERT(cond, scond, format, args...) \ |
112 | if( !( cond ) ) \ | 113 | do { \ |
113 | reiserfs_panic( NULL, "reiserfs[%i]: assertion " scond " failed at " \ | 114 | if (!(cond)) \ |
114 | __FILE__ ":%i:%s: " format "\n", \ | 115 | reiserfs_panic(NULL, "assertion failure", "(" #cond ") at " \ |
115 | in_interrupt() ? -1 : task_pid_nr(current), __LINE__ , __func__ , ##args ) | 116 | __FILE__ ":%i:%s: " format "\n", \ |
117 | in_interrupt() ? -1 : task_pid_nr(current), \ | ||
118 | __LINE__, __func__ , ##args); \ | ||
119 | } while (0) | ||
116 | 120 | ||
117 | #define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args) | 121 | #define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args) |
118 | 122 | ||
@@ -196,7 +200,11 @@ struct reiserfs_super_block { | |||
196 | __le32 s_flags; /* Right now used only by inode-attributes, if enabled */ | 200 | __le32 s_flags; /* Right now used only by inode-attributes, if enabled */ |
197 | unsigned char s_uuid[16]; /* filesystem unique identifier */ | 201 | unsigned char s_uuid[16]; /* filesystem unique identifier */ |
198 | unsigned char s_label[16]; /* filesystem volume label */ | 202 | unsigned char s_label[16]; /* filesystem volume label */ |
199 | char s_unused[88]; /* zero filled by mkreiserfs and | 203 | __le16 s_mnt_count; /* Count of mounts since last fsck */ |
204 | __le16 s_max_mnt_count; /* Maximum mounts before check */ | ||
205 | __le32 s_lastcheck; /* Timestamp of last fsck */ | ||
206 | __le32 s_check_interval; /* Interval between checks */ | ||
207 | char s_unused[76]; /* zero filled by mkreiserfs and | ||
200 | * reiserfs_convert_objectid_map_v1() | 208 | * reiserfs_convert_objectid_map_v1() |
201 | * so any additions must be updated | 209 | * so any additions must be updated |
202 | * there as well. */ | 210 | * there as well. */ |
@@ -578,10 +586,8 @@ static inline int uniqueness2type(__u32 uniqueness) | |||
578 | return TYPE_DIRECT; | 586 | return TYPE_DIRECT; |
579 | case V1_DIRENTRY_UNIQUENESS: | 587 | case V1_DIRENTRY_UNIQUENESS: |
580 | return TYPE_DIRENTRY; | 588 | return TYPE_DIRENTRY; |
581 | default: | ||
582 | reiserfs_warning(NULL, "vs-500: unknown uniqueness %d", | ||
583 | uniqueness); | ||
584 | case V1_ANY_UNIQUENESS: | 589 | case V1_ANY_UNIQUENESS: |
590 | default: | ||
585 | return TYPE_ANY; | 591 | return TYPE_ANY; |
586 | } | 592 | } |
587 | } | 593 | } |
@@ -598,9 +604,8 @@ static inline __u32 type2uniqueness(int type) | |||
598 | return V1_DIRECT_UNIQUENESS; | 604 | return V1_DIRECT_UNIQUENESS; |
599 | case TYPE_DIRENTRY: | 605 | case TYPE_DIRENTRY: |
600 | return V1_DIRENTRY_UNIQUENESS; | 606 | return V1_DIRENTRY_UNIQUENESS; |
601 | default: | ||
602 | reiserfs_warning(NULL, "vs-501: unknown type %d", type); | ||
603 | case TYPE_ANY: | 607 | case TYPE_ANY: |
608 | default: | ||
604 | return V1_ANY_UNIQUENESS; | 609 | return V1_ANY_UNIQUENESS; |
605 | } | 610 | } |
606 | } | 611 | } |
@@ -712,9 +717,9 @@ static inline void cpu_key_k_offset_dec(struct cpu_key *key) | |||
712 | #define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key))) | 717 | #define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key))) |
713 | #define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key))) | 718 | #define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key))) |
714 | 719 | ||
715 | #define I_K_KEY_IN_ITEM(p_s_ih, p_s_key, n_blocksize) \ | 720 | #define I_K_KEY_IN_ITEM(ih, key, n_blocksize) \ |
716 | ( ! COMP_SHORT_KEYS(p_s_ih, p_s_key) && \ | 721 | (!COMP_SHORT_KEYS(ih, key) && \ |
717 | I_OFF_BYTE_IN_ITEM(p_s_ih, k_offset (p_s_key), n_blocksize) ) | 722 | I_OFF_BYTE_IN_ITEM(ih, k_offset(key), n_blocksize)) |
718 | 723 | ||
719 | /* maximal length of item */ | 724 | /* maximal length of item */ |
720 | #define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE) | 725 | #define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE) |
@@ -770,25 +775,25 @@ struct block_head { | |||
770 | #define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */ | 775 | #define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */ |
771 | 776 | ||
772 | /* Given the buffer head of a formatted node, resolve to the block head of that node. */ | 777 | /* Given the buffer head of a formatted node, resolve to the block head of that node. */ |
773 | #define B_BLK_HEAD(p_s_bh) ((struct block_head *)((p_s_bh)->b_data)) | 778 | #define B_BLK_HEAD(bh) ((struct block_head *)((bh)->b_data)) |
774 | /* Number of items that are in buffer. */ | 779 | /* Number of items that are in buffer. */ |
775 | #define B_NR_ITEMS(p_s_bh) (blkh_nr_item(B_BLK_HEAD(p_s_bh))) | 780 | #define B_NR_ITEMS(bh) (blkh_nr_item(B_BLK_HEAD(bh))) |
776 | #define B_LEVEL(p_s_bh) (blkh_level(B_BLK_HEAD(p_s_bh))) | 781 | #define B_LEVEL(bh) (blkh_level(B_BLK_HEAD(bh))) |
777 | #define B_FREE_SPACE(p_s_bh) (blkh_free_space(B_BLK_HEAD(p_s_bh))) | 782 | #define B_FREE_SPACE(bh) (blkh_free_space(B_BLK_HEAD(bh))) |
778 | 783 | ||
779 | #define PUT_B_NR_ITEMS(p_s_bh,val) do { set_blkh_nr_item(B_BLK_HEAD(p_s_bh),val); } while (0) | 784 | #define PUT_B_NR_ITEMS(bh, val) do { set_blkh_nr_item(B_BLK_HEAD(bh), val); } while (0) |
780 | #define PUT_B_LEVEL(p_s_bh,val) do { set_blkh_level(B_BLK_HEAD(p_s_bh),val); } while (0) | 785 | #define PUT_B_LEVEL(bh, val) do { set_blkh_level(B_BLK_HEAD(bh), val); } while (0) |
781 | #define PUT_B_FREE_SPACE(p_s_bh,val) do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0) | 786 | #define PUT_B_FREE_SPACE(bh, val) do { set_blkh_free_space(B_BLK_HEAD(bh), val); } while (0) |
782 | 787 | ||
783 | /* Get right delimiting key. -- little endian */ | 788 | /* Get right delimiting key. -- little endian */ |
784 | #define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh)))) | 789 | #define B_PRIGHT_DELIM_KEY(bh) (&(blk_right_delim_key(B_BLK_HEAD(bh)))) |
785 | 790 | ||
786 | /* Does the buffer contain a disk leaf. */ | 791 | /* Does the buffer contain a disk leaf. */ |
787 | #define B_IS_ITEMS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL) | 792 | #define B_IS_ITEMS_LEVEL(bh) (B_LEVEL(bh) == DISK_LEAF_NODE_LEVEL) |
788 | 793 | ||
789 | /* Does the buffer contain a disk internal node */ | 794 | /* Does the buffer contain a disk internal node */ |
790 | #define B_IS_KEYS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) > DISK_LEAF_NODE_LEVEL \ | 795 | #define B_IS_KEYS_LEVEL(bh) (B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL \ |
791 | && B_LEVEL(p_s_bh) <= MAX_HEIGHT) | 796 | && B_LEVEL(bh) <= MAX_HEIGHT) |
792 | 797 | ||
793 | /***************************************************************************/ | 798 | /***************************************************************************/ |
794 | /* STAT DATA */ | 799 | /* STAT DATA */ |
@@ -1138,12 +1143,13 @@ struct disk_child { | |||
1138 | #define put_dc_size(dc_p, val) do { (dc_p)->dc_size = cpu_to_le16(val); } while(0) | 1143 | #define put_dc_size(dc_p, val) do { (dc_p)->dc_size = cpu_to_le16(val); } while(0) |
1139 | 1144 | ||
1140 | /* Get disk child by buffer header and position in the tree node. */ | 1145 | /* Get disk child by buffer header and position in the tree node. */ |
1141 | #define B_N_CHILD(p_s_bh,n_pos) ((struct disk_child *)\ | 1146 | #define B_N_CHILD(bh, n_pos) ((struct disk_child *)\ |
1142 | ((p_s_bh)->b_data+BLKH_SIZE+B_NR_ITEMS(p_s_bh)*KEY_SIZE+DC_SIZE*(n_pos))) | 1147 | ((bh)->b_data + BLKH_SIZE + B_NR_ITEMS(bh) * KEY_SIZE + DC_SIZE * (n_pos))) |
1143 | 1148 | ||
1144 | /* Get disk child number by buffer header and position in the tree node. */ | 1149 | /* Get disk child number by buffer header and position in the tree node. */ |
1145 | #define B_N_CHILD_NUM(p_s_bh,n_pos) (dc_block_number(B_N_CHILD(p_s_bh,n_pos))) | 1150 | #define B_N_CHILD_NUM(bh, n_pos) (dc_block_number(B_N_CHILD(bh, n_pos))) |
1146 | #define PUT_B_N_CHILD_NUM(p_s_bh,n_pos, val) (put_dc_block_number(B_N_CHILD(p_s_bh,n_pos), val )) | 1151 | #define PUT_B_N_CHILD_NUM(bh, n_pos, val) \ |
1152 | (put_dc_block_number(B_N_CHILD(bh, n_pos), val)) | ||
1147 | 1153 | ||
1148 | /* maximal value of field child_size in structure disk_child */ | 1154 | /* maximal value of field child_size in structure disk_child */ |
1149 | /* child size is the combined size of all items and their headers */ | 1155 | /* child size is the combined size of all items and their headers */ |
@@ -1214,33 +1220,33 @@ struct treepath { | |||
1214 | struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,} | 1220 | struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,} |
1215 | 1221 | ||
1216 | /* Get path element by path and path position. */ | 1222 | /* Get path element by path and path position. */ |
1217 | #define PATH_OFFSET_PELEMENT(p_s_path,n_offset) ((p_s_path)->path_elements +(n_offset)) | 1223 | #define PATH_OFFSET_PELEMENT(path, n_offset) ((path)->path_elements + (n_offset)) |
1218 | 1224 | ||
1219 | /* Get buffer header at the path by path and path position. */ | 1225 | /* Get buffer header at the path by path and path position. */ |
1220 | #define PATH_OFFSET_PBUFFER(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_buffer) | 1226 | #define PATH_OFFSET_PBUFFER(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_buffer) |
1221 | 1227 | ||
1222 | /* Get position in the element at the path by path and path position. */ | 1228 | /* Get position in the element at the path by path and path position. */ |
1223 | #define PATH_OFFSET_POSITION(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_position) | 1229 | #define PATH_OFFSET_POSITION(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_position) |
1224 | 1230 | ||
1225 | #define PATH_PLAST_BUFFER(p_s_path) (PATH_OFFSET_PBUFFER((p_s_path), (p_s_path)->path_length)) | 1231 | #define PATH_PLAST_BUFFER(path) (PATH_OFFSET_PBUFFER((path), (path)->path_length)) |
1226 | /* you know, to the person who didn't | 1232 | /* you know, to the person who didn't |
1227 | write this the macro name does not | 1233 | write this the macro name does not |
1228 | at first suggest what it does. | 1234 | at first suggest what it does. |
1229 | Maybe POSITION_FROM_PATH_END? Or | 1235 | Maybe POSITION_FROM_PATH_END? Or |
1230 | maybe we should just focus on | 1236 | maybe we should just focus on |
1231 | dumping paths... -Hans */ | 1237 | dumping paths... -Hans */ |
1232 | #define PATH_LAST_POSITION(p_s_path) (PATH_OFFSET_POSITION((p_s_path), (p_s_path)->path_length)) | 1238 | #define PATH_LAST_POSITION(path) (PATH_OFFSET_POSITION((path), (path)->path_length)) |
1233 | 1239 | ||
1234 | #define PATH_PITEM_HEAD(p_s_path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_path),PATH_LAST_POSITION(p_s_path)) | 1240 | #define PATH_PITEM_HEAD(path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path)) |
1235 | 1241 | ||
1236 | /* in do_balance leaf has h == 0 in contrast with path structure, | 1242 | /* in do_balance leaf has h == 0 in contrast with path structure, |
1237 | where root has level == 0. That is why we need these defines */ | 1243 | where root has level == 0. That is why we need these defines */ |
1238 | #define PATH_H_PBUFFER(p_s_path, h) PATH_OFFSET_PBUFFER (p_s_path, p_s_path->path_length - (h)) /* tb->S[h] */ | 1244 | #define PATH_H_PBUFFER(path, h) PATH_OFFSET_PBUFFER (path, path->path_length - (h)) /* tb->S[h] */ |
1239 | #define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */ | 1245 | #define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */ |
1240 | #define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h)) | 1246 | #define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h)) |
1241 | #define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */ | 1247 | #define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */ |
1242 | 1248 | ||
1243 | #define PATH_H_PATH_OFFSET(p_s_path, n_h) ((p_s_path)->path_length - (n_h)) | 1249 | #define PATH_H_PATH_OFFSET(path, n_h) ((path)->path_length - (n_h)) |
1244 | 1250 | ||
1245 | #define get_last_bh(path) PATH_PLAST_BUFFER(path) | 1251 | #define get_last_bh(path) PATH_PLAST_BUFFER(path) |
1246 | #define get_ih(path) PATH_PITEM_HEAD(path) | 1252 | #define get_ih(path) PATH_PITEM_HEAD(path) |
@@ -1470,6 +1476,16 @@ struct buffer_info { | |||
1470 | int bi_position; | 1476 | int bi_position; |
1471 | }; | 1477 | }; |
1472 | 1478 | ||
1479 | static inline struct super_block *sb_from_tb(struct tree_balance *tb) | ||
1480 | { | ||
1481 | return tb ? tb->tb_sb : NULL; | ||
1482 | } | ||
1483 | |||
1484 | static inline struct super_block *sb_from_bi(struct buffer_info *bi) | ||
1485 | { | ||
1486 | return bi ? sb_from_tb(bi->tb) : NULL; | ||
1487 | } | ||
1488 | |||
1473 | /* there are 4 types of items: stat data, directory item, indirect, direct. | 1489 | /* there are 4 types of items: stat data, directory item, indirect, direct. |
1474 | +-------------------+------------+--------------+------------+ | 1490 | +-------------------+------------+--------------+------------+ |
1475 | | | k_offset | k_uniqueness | mergeable? | | 1491 | | | k_offset | k_uniqueness | mergeable? | |
@@ -1520,7 +1536,7 @@ extern struct item_operations *item_ops[TYPE_ANY + 1]; | |||
1520 | #define COMP_SHORT_KEYS comp_short_keys | 1536 | #define COMP_SHORT_KEYS comp_short_keys |
1521 | 1537 | ||
1522 | /* number of blocks pointed to by the indirect item */ | 1538 | /* number of blocks pointed to by the indirect item */ |
1523 | #define I_UNFM_NUM(p_s_ih) ( ih_item_len(p_s_ih) / UNFM_P_SIZE ) | 1539 | #define I_UNFM_NUM(ih) (ih_item_len(ih) / UNFM_P_SIZE) |
1524 | 1540 | ||
1525 | /* the used space within the unformatted node corresponding to pos within the item pointed to by ih */ | 1541 | /* the used space within the unformatted node corresponding to pos within the item pointed to by ih */ |
1526 | #define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size)) | 1542 | #define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size)) |
@@ -1623,6 +1639,10 @@ struct reiserfs_journal_header { | |||
1623 | #define JOURNAL_MAX_COMMIT_AGE 30 | 1639 | #define JOURNAL_MAX_COMMIT_AGE 30 |
1624 | #define JOURNAL_MAX_TRANS_AGE 30 | 1640 | #define JOURNAL_MAX_TRANS_AGE 30 |
1625 | #define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9) | 1641 | #define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9) |
1642 | #define JOURNAL_BLOCKS_PER_OBJECT(sb) (JOURNAL_PER_BALANCE_CNT * 3 + \ | ||
1643 | 2 * (REISERFS_QUOTA_INIT_BLOCKS(sb) + \ | ||
1644 | REISERFS_QUOTA_TRANS_BLOCKS(sb))) | ||
1645 | |||
1626 | #ifdef CONFIG_QUOTA | 1646 | #ifdef CONFIG_QUOTA |
1627 | /* We need to update data and inode (atime) */ | 1647 | /* We need to update data and inode (atime) */ |
1628 | #define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? 2 : 0) | 1648 | #define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? 2 : 0) |
@@ -1697,7 +1717,7 @@ struct reiserfs_transaction_handle { | |||
1697 | int t_refcount; | 1717 | int t_refcount; |
1698 | int t_blocks_logged; /* number of blocks this writer has logged */ | 1718 | int t_blocks_logged; /* number of blocks this writer has logged */ |
1699 | int t_blocks_allocated; /* number of blocks this writer allocated */ | 1719 | int t_blocks_allocated; /* number of blocks this writer allocated */ |
1700 | unsigned long t_trans_id; /* sanity check, equals the current trans id */ | 1720 | unsigned int t_trans_id; /* sanity check, equals the current trans id */ |
1701 | void *t_handle_save; /* save existing current->journal_info */ | 1721 | void *t_handle_save; /* save existing current->journal_info */ |
1702 | unsigned displace_new_blocks:1; /* if new block allocation occurres, that block | 1722 | unsigned displace_new_blocks:1; /* if new block allocation occurres, that block |
1703 | should be displaced from others */ | 1723 | should be displaced from others */ |
@@ -1773,13 +1793,13 @@ int journal_end_sync(struct reiserfs_transaction_handle *, struct super_block *, | |||
1773 | int journal_mark_freed(struct reiserfs_transaction_handle *, | 1793 | int journal_mark_freed(struct reiserfs_transaction_handle *, |
1774 | struct super_block *, b_blocknr_t blocknr); | 1794 | struct super_block *, b_blocknr_t blocknr); |
1775 | int journal_transaction_should_end(struct reiserfs_transaction_handle *, int); | 1795 | int journal_transaction_should_end(struct reiserfs_transaction_handle *, int); |
1776 | int reiserfs_in_journal(struct super_block *p_s_sb, unsigned int bmap_nr, | 1796 | int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr, |
1777 | int bit_nr, int searchall, b_blocknr_t *next); | 1797 | int bit_nr, int searchall, b_blocknr_t *next); |
1778 | int journal_begin(struct reiserfs_transaction_handle *, | 1798 | int journal_begin(struct reiserfs_transaction_handle *, |
1779 | struct super_block *p_s_sb, unsigned long); | 1799 | struct super_block *sb, unsigned long); |
1780 | int journal_join_abort(struct reiserfs_transaction_handle *, | 1800 | int journal_join_abort(struct reiserfs_transaction_handle *, |
1781 | struct super_block *p_s_sb, unsigned long); | 1801 | struct super_block *sb, unsigned long); |
1782 | void reiserfs_journal_abort(struct super_block *sb, int errno); | 1802 | void reiserfs_abort_journal(struct super_block *sb, int errno); |
1783 | void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...); | 1803 | void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...); |
1784 | int reiserfs_allocate_list_bitmaps(struct super_block *s, | 1804 | int reiserfs_allocate_list_bitmaps(struct super_block *s, |
1785 | struct reiserfs_list_bitmap *, unsigned int); | 1805 | struct reiserfs_list_bitmap *, unsigned int); |
@@ -1796,8 +1816,8 @@ int reiserfs_convert_objectid_map_v1(struct super_block *); | |||
1796 | 1816 | ||
1797 | /* stree.c */ | 1817 | /* stree.c */ |
1798 | int B_IS_IN_TREE(const struct buffer_head *); | 1818 | int B_IS_IN_TREE(const struct buffer_head *); |
1799 | extern void copy_item_head(struct item_head *p_v_to, | 1819 | extern void copy_item_head(struct item_head *to, |
1800 | const struct item_head *p_v_from); | 1820 | const struct item_head *from); |
1801 | 1821 | ||
1802 | // first key is in cpu form, second - le | 1822 | // first key is in cpu form, second - le |
1803 | extern int comp_short_keys(const struct reiserfs_key *le_key, | 1823 | extern int comp_short_keys(const struct reiserfs_key *le_key, |
@@ -1832,20 +1852,20 @@ static inline void copy_key(struct reiserfs_key *to, | |||
1832 | memcpy(to, from, KEY_SIZE); | 1852 | memcpy(to, from, KEY_SIZE); |
1833 | } | 1853 | } |
1834 | 1854 | ||
1835 | int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path); | 1855 | int comp_items(const struct item_head *stored_ih, const struct treepath *path); |
1836 | const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path, | 1856 | const struct reiserfs_key *get_rkey(const struct treepath *chk_path, |
1837 | const struct super_block *p_s_sb); | 1857 | const struct super_block *sb); |
1838 | int search_by_key(struct super_block *, const struct cpu_key *, | 1858 | int search_by_key(struct super_block *, const struct cpu_key *, |
1839 | struct treepath *, int); | 1859 | struct treepath *, int); |
1840 | #define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL) | 1860 | #define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL) |
1841 | int search_for_position_by_key(struct super_block *p_s_sb, | 1861 | int search_for_position_by_key(struct super_block *sb, |
1842 | const struct cpu_key *p_s_cpu_key, | 1862 | const struct cpu_key *cpu_key, |
1843 | struct treepath *p_s_search_path); | 1863 | struct treepath *search_path); |
1844 | extern void decrement_bcount(struct buffer_head *p_s_bh); | 1864 | extern void decrement_bcount(struct buffer_head *bh); |
1845 | void decrement_counters_in_path(struct treepath *p_s_search_path); | 1865 | void decrement_counters_in_path(struct treepath *search_path); |
1846 | void pathrelse(struct treepath *p_s_search_path); | 1866 | void pathrelse(struct treepath *search_path); |
1847 | int reiserfs_check_path(struct treepath *p); | 1867 | int reiserfs_check_path(struct treepath *p); |
1848 | void pathrelse_and_restore(struct super_block *s, struct treepath *p_s_search_path); | 1868 | void pathrelse_and_restore(struct super_block *s, struct treepath *search_path); |
1849 | 1869 | ||
1850 | int reiserfs_insert_item(struct reiserfs_transaction_handle *th, | 1870 | int reiserfs_insert_item(struct reiserfs_transaction_handle *th, |
1851 | struct treepath *path, | 1871 | struct treepath *path, |
@@ -1868,14 +1888,14 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th, | |||
1868 | int reiserfs_delete_item(struct reiserfs_transaction_handle *th, | 1888 | int reiserfs_delete_item(struct reiserfs_transaction_handle *th, |
1869 | struct treepath *path, | 1889 | struct treepath *path, |
1870 | const struct cpu_key *key, | 1890 | const struct cpu_key *key, |
1871 | struct inode *inode, struct buffer_head *p_s_un_bh); | 1891 | struct inode *inode, struct buffer_head *un_bh); |
1872 | 1892 | ||
1873 | void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, | 1893 | void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th, |
1874 | struct inode *inode, struct reiserfs_key *key); | 1894 | struct inode *inode, struct reiserfs_key *key); |
1875 | int reiserfs_delete_object(struct reiserfs_transaction_handle *th, | 1895 | int reiserfs_delete_object(struct reiserfs_transaction_handle *th, |
1876 | struct inode *p_s_inode); | 1896 | struct inode *inode); |
1877 | int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, | 1897 | int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, |
1878 | struct inode *p_s_inode, struct page *, | 1898 | struct inode *inode, struct page *, |
1879 | int update_timestamps); | 1899 | int update_timestamps); |
1880 | 1900 | ||
1881 | #define i_block_size(inode) ((inode)->i_sb->s_blocksize) | 1901 | #define i_block_size(inode) ((inode)->i_sb->s_blocksize) |
@@ -1919,10 +1939,12 @@ void make_le_item_head(struct item_head *ih, const struct cpu_key *key, | |||
1919 | loff_t offset, int type, int length, int entry_count); | 1939 | loff_t offset, int type, int length, int entry_count); |
1920 | struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key); | 1940 | struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key); |
1921 | 1941 | ||
1942 | struct reiserfs_security_handle; | ||
1922 | int reiserfs_new_inode(struct reiserfs_transaction_handle *th, | 1943 | int reiserfs_new_inode(struct reiserfs_transaction_handle *th, |
1923 | struct inode *dir, int mode, | 1944 | struct inode *dir, int mode, |
1924 | const char *symname, loff_t i_size, | 1945 | const char *symname, loff_t i_size, |
1925 | struct dentry *dentry, struct inode *inode); | 1946 | struct dentry *dentry, struct inode *inode, |
1947 | struct reiserfs_security_handle *security); | ||
1926 | 1948 | ||
1927 | void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th, | 1949 | void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th, |
1928 | struct inode *inode, loff_t size); | 1950 | struct inode *inode, loff_t size); |
@@ -1980,7 +2002,7 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset, | |||
1980 | #define PROC_INFO_MAX( sb, field, value ) VOID_V | 2002 | #define PROC_INFO_MAX( sb, field, value ) VOID_V |
1981 | #define PROC_INFO_INC( sb, field ) VOID_V | 2003 | #define PROC_INFO_INC( sb, field ) VOID_V |
1982 | #define PROC_INFO_ADD( sb, field, val ) VOID_V | 2004 | #define PROC_INFO_ADD( sb, field, val ) VOID_V |
1983 | #define PROC_INFO_BH_STAT( p_s_sb, p_s_bh, n_node_level ) VOID_V | 2005 | #define PROC_INFO_BH_STAT(sb, bh, n_node_level) VOID_V |
1984 | #endif | 2006 | #endif |
1985 | 2007 | ||
1986 | /* dir.c */ | 2008 | /* dir.c */ |
@@ -1988,6 +2010,7 @@ extern const struct inode_operations reiserfs_dir_inode_operations; | |||
1988 | extern const struct inode_operations reiserfs_symlink_inode_operations; | 2010 | extern const struct inode_operations reiserfs_symlink_inode_operations; |
1989 | extern const struct inode_operations reiserfs_special_inode_operations; | 2011 | extern const struct inode_operations reiserfs_special_inode_operations; |
1990 | extern const struct file_operations reiserfs_dir_operations; | 2012 | extern const struct file_operations reiserfs_dir_operations; |
2013 | int reiserfs_readdir_dentry(struct dentry *, void *, filldir_t, loff_t *); | ||
1991 | 2014 | ||
1992 | /* tail_conversion.c */ | 2015 | /* tail_conversion.c */ |
1993 | int direct2indirect(struct reiserfs_transaction_handle *, struct inode *, | 2016 | int direct2indirect(struct reiserfs_transaction_handle *, struct inode *, |
@@ -2004,13 +2027,20 @@ extern const struct address_space_operations reiserfs_address_space_operations; | |||
2004 | 2027 | ||
2005 | /* fix_nodes.c */ | 2028 | /* fix_nodes.c */ |
2006 | 2029 | ||
2007 | int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, | 2030 | int fix_nodes(int n_op_mode, struct tree_balance *tb, |
2008 | struct item_head *p_s_ins_ih, const void *); | 2031 | struct item_head *ins_ih, const void *); |
2009 | void unfix_nodes(struct tree_balance *); | 2032 | void unfix_nodes(struct tree_balance *); |
2010 | 2033 | ||
2011 | /* prints.c */ | 2034 | /* prints.c */ |
2012 | void reiserfs_panic(struct super_block *s, const char *fmt, ...) | 2035 | void __reiserfs_panic(struct super_block *s, const char *id, |
2036 | const char *function, const char *fmt, ...) | ||
2013 | __attribute__ ((noreturn)); | 2037 | __attribute__ ((noreturn)); |
2038 | #define reiserfs_panic(s, id, fmt, args...) \ | ||
2039 | __reiserfs_panic(s, id, __func__, fmt, ##args) | ||
2040 | void __reiserfs_error(struct super_block *s, const char *id, | ||
2041 | const char *function, const char *fmt, ...); | ||
2042 | #define reiserfs_error(s, id, fmt, args...) \ | ||
2043 | __reiserfs_error(s, id, __func__, fmt, ##args) | ||
2014 | void reiserfs_info(struct super_block *s, const char *fmt, ...); | 2044 | void reiserfs_info(struct super_block *s, const char *fmt, ...); |
2015 | void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...); | 2045 | void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...); |
2016 | void print_indirect_item(struct buffer_head *bh, int item_num); | 2046 | void print_indirect_item(struct buffer_head *bh, int item_num); |
@@ -2047,7 +2077,7 @@ void leaf_paste_in_buffer(struct buffer_info *bi, int pasted_item_num, | |||
2047 | int zeros_number); | 2077 | int zeros_number); |
2048 | void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, | 2078 | void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num, |
2049 | int pos_in_item, int cut_size); | 2079 | int pos_in_item, int cut_size); |
2050 | void leaf_paste_entries(struct buffer_head *bh, int item_num, int before, | 2080 | void leaf_paste_entries(struct buffer_info *bi, int item_num, int before, |
2051 | int new_entry_count, struct reiserfs_de_head *new_dehs, | 2081 | int new_entry_count, struct reiserfs_de_head *new_dehs, |
2052 | const char *records, int paste_size); | 2082 | const char *records, int paste_size); |
2053 | /* ibalance.c */ | 2083 | /* ibalance.c */ |
@@ -2203,6 +2233,6 @@ long reiserfs_compat_ioctl(struct file *filp, | |||
2203 | unsigned int cmd, unsigned long arg); | 2233 | unsigned int cmd, unsigned long arg); |
2204 | int reiserfs_unpack(struct inode *inode, struct file *filp); | 2234 | int reiserfs_unpack(struct inode *inode, struct file *filp); |
2205 | 2235 | ||
2206 | |||
2207 | #endif /* __KERNEL__ */ | 2236 | #endif /* __KERNEL__ */ |
2237 | |||
2208 | #endif /* _LINUX_REISER_FS_H */ | 2238 | #endif /* _LINUX_REISER_FS_H */ |
diff --git a/include/linux/reiserfs_fs_i.h b/include/linux/reiserfs_fs_i.h index ce3663fb0101..76360b36ac33 100644 --- a/include/linux/reiserfs_fs_i.h +++ b/include/linux/reiserfs_fs_i.h | |||
@@ -51,7 +51,7 @@ struct reiserfs_inode_info { | |||
51 | /* we use these for fsync or O_SYNC to decide which transaction | 51 | /* we use these for fsync or O_SYNC to decide which transaction |
52 | ** needs to be committed in order for this inode to be properly | 52 | ** needs to be committed in order for this inode to be properly |
53 | ** flushed */ | 53 | ** flushed */ |
54 | unsigned long i_trans_id; | 54 | unsigned int i_trans_id; |
55 | struct reiserfs_journal_list *i_jl; | 55 | struct reiserfs_journal_list *i_jl; |
56 | struct mutex i_mmap; | 56 | struct mutex i_mmap; |
57 | #ifdef CONFIG_REISERFS_FS_POSIX_ACL | 57 | #ifdef CONFIG_REISERFS_FS_POSIX_ACL |
@@ -59,7 +59,7 @@ struct reiserfs_inode_info { | |||
59 | struct posix_acl *i_acl_default; | 59 | struct posix_acl *i_acl_default; |
60 | #endif | 60 | #endif |
61 | #ifdef CONFIG_REISERFS_FS_XATTR | 61 | #ifdef CONFIG_REISERFS_FS_XATTR |
62 | struct rw_semaphore xattr_sem; | 62 | struct rw_semaphore i_xattr_sem; |
63 | #endif | 63 | #endif |
64 | struct inode vfs_inode; | 64 | struct inode vfs_inode; |
65 | }; | 65 | }; |
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index bda6b562a1e0..6b361d23a499 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h | |||
@@ -14,7 +14,7 @@ typedef enum { | |||
14 | } reiserfs_super_block_flags; | 14 | } reiserfs_super_block_flags; |
15 | 15 | ||
16 | /* struct reiserfs_super_block accessors/mutators | 16 | /* struct reiserfs_super_block accessors/mutators |
17 | * since this is a disk structure, it will always be in | 17 | * since this is a disk structure, it will always be in |
18 | * little endian format. */ | 18 | * little endian format. */ |
19 | #define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count)) | 19 | #define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count)) |
20 | #define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v)) | 20 | #define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v)) |
@@ -73,6 +73,9 @@ typedef enum { | |||
73 | #define sb_version(sbp) (le16_to_cpu((sbp)->s_v1.s_version)) | 73 | #define sb_version(sbp) (le16_to_cpu((sbp)->s_v1.s_version)) |
74 | #define set_sb_version(sbp,v) ((sbp)->s_v1.s_version = cpu_to_le16(v)) | 74 | #define set_sb_version(sbp,v) ((sbp)->s_v1.s_version = cpu_to_le16(v)) |
75 | 75 | ||
76 | #define sb_mnt_count(sbp) (le16_to_cpu((sbp)->s_mnt_count)) | ||
77 | #define set_sb_mnt_count(sbp, v) ((sbp)->s_mnt_count = cpu_to_le16(v)) | ||
78 | |||
76 | #define sb_reserved_for_journal(sbp) \ | 79 | #define sb_reserved_for_journal(sbp) \ |
77 | (le16_to_cpu((sbp)->s_v1.s_reserved_for_journal)) | 80 | (le16_to_cpu((sbp)->s_v1.s_reserved_for_journal)) |
78 | #define set_sb_reserved_for_journal(sbp,v) \ | 81 | #define set_sb_reserved_for_journal(sbp,v) \ |
@@ -80,16 +83,16 @@ typedef enum { | |||
80 | 83 | ||
81 | /* LOGGING -- */ | 84 | /* LOGGING -- */ |
82 | 85 | ||
83 | /* These all interelate for performance. | 86 | /* These all interelate for performance. |
84 | ** | 87 | ** |
85 | ** If the journal block count is smaller than n transactions, you lose speed. | 88 | ** If the journal block count is smaller than n transactions, you lose speed. |
86 | ** I don't know what n is yet, I'm guessing 8-16. | 89 | ** I don't know what n is yet, I'm guessing 8-16. |
87 | ** | 90 | ** |
88 | ** typical transaction size depends on the application, how often fsync is | 91 | ** typical transaction size depends on the application, how often fsync is |
89 | ** called, and how many metadata blocks you dirty in a 30 second period. | 92 | ** called, and how many metadata blocks you dirty in a 30 second period. |
90 | ** The more small files (<16k) you use, the larger your transactions will | 93 | ** The more small files (<16k) you use, the larger your transactions will |
91 | ** be. | 94 | ** be. |
92 | ** | 95 | ** |
93 | ** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal | 96 | ** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal |
94 | ** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough | 97 | ** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough |
95 | ** to prevent wrapping before dirty meta blocks get to disk. | 98 | ** to prevent wrapping before dirty meta blocks get to disk. |
@@ -153,7 +156,7 @@ struct reiserfs_journal_list { | |||
153 | atomic_t j_commit_left; | 156 | atomic_t j_commit_left; |
154 | atomic_t j_older_commits_done; /* all commits older than this on disk */ | 157 | atomic_t j_older_commits_done; /* all commits older than this on disk */ |
155 | struct mutex j_commit_mutex; | 158 | struct mutex j_commit_mutex; |
156 | unsigned long j_trans_id; | 159 | unsigned int j_trans_id; |
157 | time_t j_timestamp; | 160 | time_t j_timestamp; |
158 | struct reiserfs_list_bitmap *j_list_bitmap; | 161 | struct reiserfs_list_bitmap *j_list_bitmap; |
159 | struct buffer_head *j_commit_bh; /* commit buffer head */ | 162 | struct buffer_head *j_commit_bh; /* commit buffer head */ |
@@ -182,7 +185,7 @@ struct reiserfs_journal { | |||
182 | int j_1st_reserved_block; /* first block on s_dev of reserved area journal */ | 185 | int j_1st_reserved_block; /* first block on s_dev of reserved area journal */ |
183 | 186 | ||
184 | unsigned long j_state; | 187 | unsigned long j_state; |
185 | unsigned long j_trans_id; | 188 | unsigned int j_trans_id; |
186 | unsigned long j_mount_id; | 189 | unsigned long j_mount_id; |
187 | unsigned long j_start; /* start of current waiting commit (index into j_ap_blocks) */ | 190 | unsigned long j_start; /* start of current waiting commit (index into j_ap_blocks) */ |
188 | unsigned long j_len; /* length of current waiting commit */ | 191 | unsigned long j_len; /* length of current waiting commit */ |
@@ -190,7 +193,7 @@ struct reiserfs_journal { | |||
190 | atomic_t j_wcount; /* count of writers for current commit */ | 193 | atomic_t j_wcount; /* count of writers for current commit */ |
191 | unsigned long j_bcount; /* batch count. allows turning X transactions into 1 */ | 194 | unsigned long j_bcount; /* batch count. allows turning X transactions into 1 */ |
192 | unsigned long j_first_unflushed_offset; /* first unflushed transactions offset */ | 195 | unsigned long j_first_unflushed_offset; /* first unflushed transactions offset */ |
193 | unsigned long j_last_flush_trans_id; /* last fully flushed journal timestamp */ | 196 | unsigned j_last_flush_trans_id; /* last fully flushed journal timestamp */ |
194 | struct buffer_head *j_header_bh; | 197 | struct buffer_head *j_header_bh; |
195 | 198 | ||
196 | time_t j_trans_start_time; /* time this transaction started */ | 199 | time_t j_trans_start_time; /* time this transaction started */ |
@@ -223,10 +226,10 @@ struct reiserfs_journal { | |||
223 | int j_num_work_lists; /* number that need attention from kreiserfsd */ | 226 | int j_num_work_lists; /* number that need attention from kreiserfsd */ |
224 | 227 | ||
225 | /* debugging to make sure things are flushed in order */ | 228 | /* debugging to make sure things are flushed in order */ |
226 | int j_last_flush_id; | 229 | unsigned int j_last_flush_id; |
227 | 230 | ||
228 | /* debugging to make sure things are committed in order */ | 231 | /* debugging to make sure things are committed in order */ |
229 | int j_last_commit_id; | 232 | unsigned int j_last_commit_id; |
230 | 233 | ||
231 | struct list_head j_bitmap_nodes; | 234 | struct list_head j_bitmap_nodes; |
232 | struct list_head j_dirty_buffers; | 235 | struct list_head j_dirty_buffers; |
@@ -239,7 +242,7 @@ struct reiserfs_journal { | |||
239 | 242 | ||
240 | struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */ | 243 | struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */ |
241 | struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */ | 244 | struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */ |
242 | struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all | 245 | struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all |
243 | the transactions */ | 246 | the transactions */ |
244 | struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */ | 247 | struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */ |
245 | int j_persistent_trans; | 248 | int j_persistent_trans; |
@@ -399,10 +402,7 @@ struct reiserfs_sb_info { | |||
399 | int reserved_blocks; /* amount of blocks reserved for further allocations */ | 402 | int reserved_blocks; /* amount of blocks reserved for further allocations */ |
400 | spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */ | 403 | spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */ |
401 | struct dentry *priv_root; /* root of /.reiserfs_priv */ | 404 | struct dentry *priv_root; /* root of /.reiserfs_priv */ |
402 | #ifdef CONFIG_REISERFS_FS_XATTR | ||
403 | struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */ | 405 | struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */ |
404 | struct rw_semaphore xattr_dir_sem; | ||
405 | #endif | ||
406 | int j_errno; | 406 | int j_errno; |
407 | #ifdef CONFIG_QUOTA | 407 | #ifdef CONFIG_QUOTA |
408 | char *s_qf_names[MAXQUOTAS]; | 408 | char *s_qf_names[MAXQUOTAS]; |
@@ -426,7 +426,7 @@ enum reiserfs_mount_options { | |||
426 | partition will be dealt with in a | 426 | partition will be dealt with in a |
427 | manner of 3.5.x */ | 427 | manner of 3.5.x */ |
428 | 428 | ||
429 | /* -o hash={tea, rupasov, r5, detect} is meant for properly mounting | 429 | /* -o hash={tea, rupasov, r5, detect} is meant for properly mounting |
430 | ** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option | 430 | ** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option |
431 | ** is not required. If the normal autodection code can't determine which | 431 | ** is not required. If the normal autodection code can't determine which |
432 | ** hash to use (because both hashes had the same value for a file) | 432 | ** hash to use (because both hashes had the same value for a file) |
@@ -451,7 +451,6 @@ enum reiserfs_mount_options { | |||
451 | REISERFS_NO_UNHASHED_RELOCATION, | 451 | REISERFS_NO_UNHASHED_RELOCATION, |
452 | REISERFS_HASHED_RELOCATION, | 452 | REISERFS_HASHED_RELOCATION, |
453 | REISERFS_ATTRS, | 453 | REISERFS_ATTRS, |
454 | REISERFS_XATTRS, | ||
455 | REISERFS_XATTRS_USER, | 454 | REISERFS_XATTRS_USER, |
456 | REISERFS_POSIXACL, | 455 | REISERFS_POSIXACL, |
457 | REISERFS_BARRIER_NONE, | 456 | REISERFS_BARRIER_NONE, |
@@ -489,7 +488,7 @@ enum reiserfs_mount_options { | |||
489 | #define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG)) | 488 | #define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG)) |
490 | #define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED)) | 489 | #define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED)) |
491 | #define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK)) | 490 | #define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK)) |
492 | #define reiserfs_xattrs(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS)) | 491 | #define reiserfs_xattrs(s) ((s)->s_xattr != NULL) |
493 | #define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER)) | 492 | #define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER)) |
494 | #define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL)) | 493 | #define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL)) |
495 | #define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s)) | 494 | #define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s)) |
diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h index af135ae895db..dcae01e63e40 100644 --- a/include/linux/reiserfs_xattr.h +++ b/include/linux/reiserfs_xattr.h | |||
@@ -15,6 +15,12 @@ struct reiserfs_xattr_header { | |||
15 | __le32 h_hash; /* hash of the value */ | 15 | __le32 h_hash; /* hash of the value */ |
16 | }; | 16 | }; |
17 | 17 | ||
18 | struct reiserfs_security_handle { | ||
19 | char *name; | ||
20 | void *value; | ||
21 | size_t length; | ||
22 | }; | ||
23 | |||
18 | #ifdef __KERNEL__ | 24 | #ifdef __KERNEL__ |
19 | 25 | ||
20 | #include <linux/init.h> | 26 | #include <linux/init.h> |
@@ -29,22 +35,13 @@ struct iattr; | |||
29 | struct super_block; | 35 | struct super_block; |
30 | struct nameidata; | 36 | struct nameidata; |
31 | 37 | ||
32 | struct reiserfs_xattr_handler { | 38 | int reiserfs_xattr_register_handlers(void) __init; |
33 | char *prefix; | 39 | void reiserfs_xattr_unregister_handlers(void); |
34 | int (*init) (void); | 40 | int reiserfs_xattr_init(struct super_block *sb, int mount_flags); |
35 | void (*exit) (void); | 41 | int reiserfs_delete_xattrs(struct inode *inode); |
36 | int (*get) (struct inode * inode, const char *name, void *buffer, | 42 | int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); |
37 | size_t size); | ||
38 | int (*set) (struct inode * inode, const char *name, const void *buffer, | ||
39 | size_t size, int flags); | ||
40 | int (*del) (struct inode * inode, const char *name); | ||
41 | int (*list) (struct inode * inode, const char *name, int namelen, | ||
42 | char *out); | ||
43 | struct list_head handlers; | ||
44 | }; | ||
45 | 43 | ||
46 | #ifdef CONFIG_REISERFS_FS_XATTR | 44 | #ifdef CONFIG_REISERFS_FS_XATTR |
47 | #define is_reiserfs_priv_object(inode) IS_PRIVATE(inode) | ||
48 | #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) | 45 | #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) |
49 | ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name, | 46 | ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name, |
50 | void *buffer, size_t size); | 47 | void *buffer, size_t size); |
@@ -52,104 +49,97 @@ int reiserfs_setxattr(struct dentry *dentry, const char *name, | |||
52 | const void *value, size_t size, int flags); | 49 | const void *value, size_t size, int flags); |
53 | ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size); | 50 | ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size); |
54 | int reiserfs_removexattr(struct dentry *dentry, const char *name); | 51 | int reiserfs_removexattr(struct dentry *dentry, const char *name); |
55 | int reiserfs_delete_xattrs(struct inode *inode); | ||
56 | int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs); | ||
57 | int reiserfs_xattr_init(struct super_block *sb, int mount_flags); | ||
58 | int reiserfs_permission(struct inode *inode, int mask); | 52 | int reiserfs_permission(struct inode *inode, int mask); |
59 | 53 | ||
60 | int reiserfs_xattr_del(struct inode *, const char *); | 54 | int reiserfs_xattr_get(struct inode *, const char *, void *, size_t); |
61 | int reiserfs_xattr_get(const struct inode *, const char *, void *, size_t); | ||
62 | int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int); | 55 | int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int); |
63 | 56 | int reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *, | |
64 | extern struct reiserfs_xattr_handler user_handler; | 57 | struct inode *, const char *, const void *, |
65 | extern struct reiserfs_xattr_handler trusted_handler; | 58 | size_t, int); |
66 | extern struct reiserfs_xattr_handler security_handler; | 59 | |
67 | 60 | extern struct xattr_handler reiserfs_xattr_user_handler; | |
68 | int reiserfs_xattr_register_handlers(void) __init; | 61 | extern struct xattr_handler reiserfs_xattr_trusted_handler; |
69 | void reiserfs_xattr_unregister_handlers(void); | 62 | extern struct xattr_handler reiserfs_xattr_security_handler; |
70 | 63 | #ifdef CONFIG_REISERFS_FS_SECURITY | |
71 | static inline void reiserfs_write_lock_xattrs(struct super_block *sb) | 64 | int reiserfs_security_init(struct inode *dir, struct inode *inode, |
72 | { | 65 | struct reiserfs_security_handle *sec); |
73 | down_write(&REISERFS_XATTR_DIR_SEM(sb)); | 66 | int reiserfs_security_write(struct reiserfs_transaction_handle *th, |
74 | } | 67 | struct inode *inode, |
75 | static inline void reiserfs_write_unlock_xattrs(struct super_block *sb) | 68 | struct reiserfs_security_handle *sec); |
76 | { | 69 | void reiserfs_security_free(struct reiserfs_security_handle *sec); |
77 | up_write(&REISERFS_XATTR_DIR_SEM(sb)); | 70 | #endif |
78 | } | 71 | |
79 | static inline void reiserfs_read_lock_xattrs(struct super_block *sb) | 72 | #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header)) |
80 | { | 73 | static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size) |
81 | down_read(&REISERFS_XATTR_DIR_SEM(sb)); | ||
82 | } | ||
83 | |||
84 | static inline void reiserfs_read_unlock_xattrs(struct super_block *sb) | ||
85 | { | 74 | { |
86 | up_read(&REISERFS_XATTR_DIR_SEM(sb)); | 75 | loff_t ret = 0; |
76 | if (reiserfs_file_data_log(inode)) { | ||
77 | ret = _ROUND_UP(xattr_size(size), inode->i_sb->s_blocksize); | ||
78 | ret >>= inode->i_sb->s_blocksize_bits; | ||
79 | } | ||
80 | return ret; | ||
87 | } | 81 | } |
88 | 82 | ||
89 | static inline void reiserfs_write_lock_xattr_i(struct inode *inode) | 83 | /* We may have to create up to 3 objects: xattr root, xattr dir, xattr file. |
90 | { | 84 | * Let's try to be smart about it. |
91 | down_write(&REISERFS_I(inode)->xattr_sem); | 85 | * xattr root: We cache it. If it's not cached, we may need to create it. |
92 | } | 86 | * xattr dir: If anything has been loaded for this inode, we can set a flag |
93 | static inline void reiserfs_write_unlock_xattr_i(struct inode *inode) | 87 | * saying so. |
88 | * xattr file: Since we don't cache xattrs, we can't tell. We always include | ||
89 | * blocks for it. | ||
90 | * | ||
91 | * However, since root and dir can be created between calls - YOU MUST SAVE | ||
92 | * THIS VALUE. | ||
93 | */ | ||
94 | static inline size_t reiserfs_xattr_jcreate_nblocks(struct inode *inode) | ||
94 | { | 95 | { |
95 | up_write(&REISERFS_I(inode)->xattr_sem); | 96 | size_t nblocks = JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); |
96 | } | ||
97 | static inline void reiserfs_read_lock_xattr_i(struct inode *inode) | ||
98 | { | ||
99 | down_read(&REISERFS_I(inode)->xattr_sem); | ||
100 | } | ||
101 | 97 | ||
102 | static inline void reiserfs_read_unlock_xattr_i(struct inode *inode) | 98 | if ((REISERFS_I(inode)->i_flags & i_has_xattr_dir) == 0) { |
103 | { | 99 | nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); |
104 | up_read(&REISERFS_I(inode)->xattr_sem); | 100 | if (REISERFS_SB(inode->i_sb)->xattr_root == NULL) |
105 | } | 101 | nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); |
102 | } | ||
106 | 103 | ||
107 | static inline void reiserfs_mark_inode_private(struct inode *inode) | 104 | return nblocks; |
108 | { | ||
109 | inode->i_flags |= S_PRIVATE; | ||
110 | } | 105 | } |
111 | 106 | ||
112 | static inline void reiserfs_init_xattr_rwsem(struct inode *inode) | 107 | static inline void reiserfs_init_xattr_rwsem(struct inode *inode) |
113 | { | 108 | { |
114 | init_rwsem(&REISERFS_I(inode)->xattr_sem); | 109 | init_rwsem(&REISERFS_I(inode)->i_xattr_sem); |
115 | } | 110 | } |
116 | 111 | ||
117 | #else | 112 | #else |
118 | 113 | ||
119 | #define is_reiserfs_priv_object(inode) 0 | ||
120 | #define reiserfs_mark_inode_private(inode) do {;} while(0) | ||
121 | #define reiserfs_getxattr NULL | 114 | #define reiserfs_getxattr NULL |
122 | #define reiserfs_setxattr NULL | 115 | #define reiserfs_setxattr NULL |
123 | #define reiserfs_listxattr NULL | 116 | #define reiserfs_listxattr NULL |
124 | #define reiserfs_removexattr NULL | 117 | #define reiserfs_removexattr NULL |
125 | #define reiserfs_write_lock_xattrs(sb) do {;} while(0) | ||
126 | #define reiserfs_write_unlock_xattrs(sb) do {;} while(0) | ||
127 | #define reiserfs_read_lock_xattrs(sb) | ||
128 | #define reiserfs_read_unlock_xattrs(sb) | ||
129 | 118 | ||
130 | #define reiserfs_permission NULL | 119 | #define reiserfs_permission NULL |
131 | 120 | ||
132 | #define reiserfs_xattr_register_handlers() 0 | 121 | static inline void reiserfs_init_xattr_rwsem(struct inode *inode) |
133 | #define reiserfs_xattr_unregister_handlers() | ||
134 | |||
135 | static inline int reiserfs_delete_xattrs(struct inode *inode) | ||
136 | { | 122 | { |
137 | return 0; | 123 | } |
138 | }; | 124 | #endif /* CONFIG_REISERFS_FS_XATTR */ |
139 | static inline int reiserfs_chown_xattrs(struct inode *inode, | 125 | |
140 | struct iattr *attrs) | 126 | #ifndef CONFIG_REISERFS_FS_SECURITY |
127 | static inline int reiserfs_security_init(struct inode *dir, | ||
128 | struct inode *inode, | ||
129 | struct reiserfs_security_handle *sec) | ||
141 | { | 130 | { |
142 | return 0; | 131 | return 0; |
143 | }; | 132 | } |
144 | static inline int reiserfs_xattr_init(struct super_block *sb, int mount_flags) | 133 | static inline int |
134 | reiserfs_security_write(struct reiserfs_transaction_handle *th, | ||
135 | struct inode *inode, | ||
136 | struct reiserfs_security_handle *sec) | ||
145 | { | 137 | { |
146 | sb->s_flags = (sb->s_flags & ~MS_POSIXACL); /* to be sure */ | ||
147 | return 0; | 138 | return 0; |
148 | }; | ||
149 | static inline void reiserfs_init_xattr_rwsem(struct inode *inode) | ||
150 | { | ||
151 | } | 139 | } |
152 | #endif /* CONFIG_REISERFS_FS_XATTR */ | 140 | static inline void reiserfs_security_free(struct reiserfs_security_handle *sec) |
141 | {} | ||
142 | #endif | ||
153 | 143 | ||
154 | #endif /* __KERNEL__ */ | 144 | #endif /* __KERNEL__ */ |
155 | 145 | ||
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index b3b359660082..e1b7b2173885 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -8,7 +8,7 @@ struct ring_buffer; | |||
8 | struct ring_buffer_iter; | 8 | struct ring_buffer_iter; |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * Don't reference this struct directly, use functions below. | 11 | * Don't refer to this struct directly, use functions below. |
12 | */ | 12 | */ |
13 | struct ring_buffer_event { | 13 | struct ring_buffer_event { |
14 | u32 type:2, len:3, time_delta:27; | 14 | u32 type:2, len:3, time_delta:27; |
@@ -18,10 +18,13 @@ struct ring_buffer_event { | |||
18 | /** | 18 | /** |
19 | * enum ring_buffer_type - internal ring buffer types | 19 | * enum ring_buffer_type - internal ring buffer types |
20 | * | 20 | * |
21 | * @RINGBUF_TYPE_PADDING: Left over page padding | 21 | * @RINGBUF_TYPE_PADDING: Left over page padding or discarded event |
22 | * array is ignored | 22 | * If time_delta is 0: |
23 | * size is variable depending on how much | 23 | * array is ignored |
24 | * size is variable depending on how much | ||
24 | * padding is needed | 25 | * padding is needed |
26 | * If time_delta is non zero: | ||
27 | * everything else same as RINGBUF_TYPE_DATA | ||
25 | * | 28 | * |
26 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta | 29 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta |
27 | * array[0] = time delta (28 .. 59) | 30 | * array[0] = time delta (28 .. 59) |
@@ -65,6 +68,8 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) | |||
65 | return event->time_delta; | 68 | return event->time_delta; |
66 | } | 69 | } |
67 | 70 | ||
71 | void ring_buffer_event_discard(struct ring_buffer_event *event); | ||
72 | |||
68 | /* | 73 | /* |
69 | * size is in bytes for each per CPU buffer. | 74 | * size is in bytes for each per CPU buffer. |
70 | */ | 75 | */ |
@@ -74,13 +79,10 @@ void ring_buffer_free(struct ring_buffer *buffer); | |||
74 | 79 | ||
75 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); | 80 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); |
76 | 81 | ||
77 | struct ring_buffer_event * | 82 | struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, |
78 | ring_buffer_lock_reserve(struct ring_buffer *buffer, | 83 | unsigned long length); |
79 | unsigned long length, | ||
80 | unsigned long *flags); | ||
81 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, | 84 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, |
82 | struct ring_buffer_event *event, | 85 | struct ring_buffer_event *event); |
83 | unsigned long flags); | ||
84 | int ring_buffer_write(struct ring_buffer *buffer, | 86 | int ring_buffer_write(struct ring_buffer *buffer, |
85 | unsigned long length, void *data); | 87 | unsigned long length, void *data); |
86 | 88 | ||
@@ -121,17 +123,19 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer); | |||
121 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); | 123 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); |
122 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | 124 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); |
123 | 125 | ||
124 | u64 ring_buffer_time_stamp(int cpu); | 126 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); |
125 | void ring_buffer_normalize_time_stamp(int cpu, u64 *ts); | 127 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, |
128 | int cpu, u64 *ts); | ||
129 | void ring_buffer_set_clock(struct ring_buffer *buffer, | ||
130 | u64 (*clock)(void)); | ||
131 | |||
132 | size_t ring_buffer_page_len(void *page); | ||
126 | 133 | ||
127 | void tracing_on(void); | ||
128 | void tracing_off(void); | ||
129 | void tracing_off_permanent(void); | ||
130 | 134 | ||
131 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); | 135 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer); |
132 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); | 136 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); |
133 | int ring_buffer_read_page(struct ring_buffer *buffer, | 137 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, |
134 | void **data_page, int cpu, int full); | 138 | size_t len, int cpu, int full); |
135 | 139 | ||
136 | enum ring_buffer_flags { | 140 | enum ring_buffer_flags { |
137 | RB_FL_OVERWRITE = 1 << 0, | 141 | RB_FL_OVERWRITE = 1 << 0, |
diff --git a/include/linux/rotary_encoder.h b/include/linux/rotary_encoder.h new file mode 100644 index 000000000000..12d63a30c347 --- /dev/null +++ b/include/linux/rotary_encoder.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ROTARY_ENCODER_H__ | ||
2 | #define __ROTARY_ENCODER_H__ | ||
3 | |||
4 | struct rotary_encoder_platform_data { | ||
5 | unsigned int steps; | ||
6 | unsigned int axis; | ||
7 | unsigned int gpio_a; | ||
8 | unsigned int gpio_b; | ||
9 | unsigned int inverted_a; | ||
10 | unsigned int inverted_b; | ||
11 | }; | ||
12 | |||
13 | #endif /* __ROTARY_ENCODER_H__ */ | ||
diff --git a/include/linux/rtc-v3020.h b/include/linux/rtc-v3020.h index bf74e63c98fe..8ba646e610d9 100644 --- a/include/linux/rtc-v3020.h +++ b/include/linux/rtc-v3020.h | |||
@@ -14,6 +14,12 @@ | |||
14 | * is used depends on the board. */ | 14 | * is used depends on the board. */ |
15 | struct v3020_platform_data { | 15 | struct v3020_platform_data { |
16 | int leftshift; /* (1<<(leftshift)) & readl() */ | 16 | int leftshift; /* (1<<(leftshift)) & readl() */ |
17 | |||
18 | int use_gpio:1; | ||
19 | unsigned int gpio_cs; | ||
20 | unsigned int gpio_wr; | ||
21 | unsigned int gpio_rd; | ||
22 | unsigned int gpio_io; | ||
17 | }; | 23 | }; |
18 | 24 | ||
19 | #define V3020_STATUS_0 0x00 | 25 | #define V3020_STATUS_0 0x00 |
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 4046b75563c1..60f88a7fb13d 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
@@ -99,6 +99,7 @@ struct rtc_pll_info { | |||
99 | 99 | ||
100 | #ifdef __KERNEL__ | 100 | #ifdef __KERNEL__ |
101 | 101 | ||
102 | #include <linux/types.h> | ||
102 | #include <linux/interrupt.h> | 103 | #include <linux/interrupt.h> |
103 | 104 | ||
104 | extern int rtc_month_days(unsigned int month, unsigned int year); | 105 | extern int rtc_month_days(unsigned int month, unsigned int year); |
@@ -232,6 +233,11 @@ int rtc_register(rtc_task_t *task); | |||
232 | int rtc_unregister(rtc_task_t *task); | 233 | int rtc_unregister(rtc_task_t *task); |
233 | int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); | 234 | int rtc_control(rtc_task_t *t, unsigned int cmd, unsigned long arg); |
234 | 235 | ||
236 | static inline bool is_leap_year(unsigned int year) | ||
237 | { | ||
238 | return (!(year % 4) && (year % 100)) || !(year % 400); | ||
239 | } | ||
240 | |||
235 | #endif /* __KERNEL__ */ | 241 | #endif /* __KERNEL__ */ |
236 | 242 | ||
237 | #endif /* _LINUX_RTC_H_ */ | 243 | #endif /* _LINUX_RTC_H_ */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index d3b787c7aef3..3fa82b353c98 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -68,7 +68,7 @@ struct sched_param { | |||
68 | #include <linux/smp.h> | 68 | #include <linux/smp.h> |
69 | #include <linux/sem.h> | 69 | #include <linux/sem.h> |
70 | #include <linux/signal.h> | 70 | #include <linux/signal.h> |
71 | #include <linux/fs_struct.h> | 71 | #include <linux/path.h> |
72 | #include <linux/compiler.h> | 72 | #include <linux/compiler.h> |
73 | #include <linux/completion.h> | 73 | #include <linux/completion.h> |
74 | #include <linux/pid.h> | 74 | #include <linux/pid.h> |
@@ -97,6 +97,7 @@ struct futex_pi_state; | |||
97 | struct robust_list_head; | 97 | struct robust_list_head; |
98 | struct bio; | 98 | struct bio; |
99 | struct bts_tracer; | 99 | struct bts_tracer; |
100 | struct fs_struct; | ||
100 | 101 | ||
101 | /* | 102 | /* |
102 | * List of flags we want to share for kernel threads, | 103 | * List of flags we want to share for kernel threads, |
@@ -137,6 +138,8 @@ extern unsigned long nr_uninterruptible(void); | |||
137 | extern unsigned long nr_active(void); | 138 | extern unsigned long nr_active(void); |
138 | extern unsigned long nr_iowait(void); | 139 | extern unsigned long nr_iowait(void); |
139 | 140 | ||
141 | extern unsigned long get_parent_ip(unsigned long addr); | ||
142 | |||
140 | struct seq_file; | 143 | struct seq_file; |
141 | struct cfs_rq; | 144 | struct cfs_rq; |
142 | struct task_group; | 145 | struct task_group; |
@@ -202,7 +205,8 @@ extern unsigned long long time_sync_thresh; | |||
202 | #define task_is_stopped_or_traced(task) \ | 205 | #define task_is_stopped_or_traced(task) \ |
203 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) | 206 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
204 | #define task_contributes_to_load(task) \ | 207 | #define task_contributes_to_load(task) \ |
205 | ((task->state & TASK_UNINTERRUPTIBLE) != 0) | 208 | ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
209 | (task->flags & PF_FROZEN) == 0) | ||
206 | 210 | ||
207 | #define __set_task_state(tsk, state_value) \ | 211 | #define __set_task_state(tsk, state_value) \ |
208 | do { (tsk)->state = (state_value); } while (0) | 212 | do { (tsk)->state = (state_value); } while (0) |
@@ -297,17 +301,11 @@ extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | |||
297 | struct file *filp, void __user *buffer, | 301 | struct file *filp, void __user *buffer, |
298 | size_t *lenp, loff_t *ppos); | 302 | size_t *lenp, loff_t *ppos); |
299 | extern unsigned int softlockup_panic; | 303 | extern unsigned int softlockup_panic; |
300 | extern unsigned long sysctl_hung_task_check_count; | ||
301 | extern unsigned long sysctl_hung_task_timeout_secs; | ||
302 | extern unsigned long sysctl_hung_task_warnings; | ||
303 | extern int softlockup_thresh; | 304 | extern int softlockup_thresh; |
304 | #else | 305 | #else |
305 | static inline void softlockup_tick(void) | 306 | static inline void softlockup_tick(void) |
306 | { | 307 | { |
307 | } | 308 | } |
308 | static inline void spawn_softlockup_task(void) | ||
309 | { | ||
310 | } | ||
311 | static inline void touch_softlockup_watchdog(void) | 309 | static inline void touch_softlockup_watchdog(void) |
312 | { | 310 | { |
313 | } | 311 | } |
@@ -316,6 +314,15 @@ static inline void touch_all_softlockup_watchdogs(void) | |||
316 | } | 314 | } |
317 | #endif | 315 | #endif |
318 | 316 | ||
317 | #ifdef CONFIG_DETECT_HUNG_TASK | ||
318 | extern unsigned int sysctl_hung_task_panic; | ||
319 | extern unsigned long sysctl_hung_task_check_count; | ||
320 | extern unsigned long sysctl_hung_task_timeout_secs; | ||
321 | extern unsigned long sysctl_hung_task_warnings; | ||
322 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | ||
323 | struct file *filp, void __user *buffer, | ||
324 | size_t *lenp, loff_t *ppos); | ||
325 | #endif | ||
319 | 326 | ||
320 | /* Attach to any functions which should be ignored in wchan output. */ | 327 | /* Attach to any functions which should be ignored in wchan output. */ |
321 | #define __sched __attribute__((__section__(".sched.text"))) | 328 | #define __sched __attribute__((__section__(".sched.text"))) |
@@ -331,7 +338,9 @@ extern signed long schedule_timeout(signed long timeout); | |||
331 | extern signed long schedule_timeout_interruptible(signed long timeout); | 338 | extern signed long schedule_timeout_interruptible(signed long timeout); |
332 | extern signed long schedule_timeout_killable(signed long timeout); | 339 | extern signed long schedule_timeout_killable(signed long timeout); |
333 | extern signed long schedule_timeout_uninterruptible(signed long timeout); | 340 | extern signed long schedule_timeout_uninterruptible(signed long timeout); |
341 | asmlinkage void __schedule(void); | ||
334 | asmlinkage void schedule(void); | 342 | asmlinkage void schedule(void); |
343 | extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner); | ||
335 | 344 | ||
336 | struct nsproxy; | 345 | struct nsproxy; |
337 | struct user_namespace; | 346 | struct user_namespace; |
@@ -389,8 +398,15 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | |||
389 | (mm)->hiwater_vm = (mm)->total_vm; \ | 398 | (mm)->hiwater_vm = (mm)->total_vm; \ |
390 | } while (0) | 399 | } while (0) |
391 | 400 | ||
392 | #define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm)) | 401 | static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm) |
393 | #define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm) | 402 | { |
403 | return max(mm->hiwater_rss, get_mm_rss(mm)); | ||
404 | } | ||
405 | |||
406 | static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm) | ||
407 | { | ||
408 | return max(mm->hiwater_vm, mm->total_vm); | ||
409 | } | ||
394 | 410 | ||
395 | extern void set_dumpable(struct mm_struct *mm, int value); | 411 | extern void set_dumpable(struct mm_struct *mm, int value); |
396 | extern int get_dumpable(struct mm_struct *mm); | 412 | extern int get_dumpable(struct mm_struct *mm); |
@@ -538,25 +554,8 @@ struct signal_struct { | |||
538 | 554 | ||
539 | struct list_head cpu_timers[3]; | 555 | struct list_head cpu_timers[3]; |
540 | 556 | ||
541 | /* job control IDs */ | ||
542 | |||
543 | /* | ||
544 | * pgrp and session fields are deprecated. | ||
545 | * use the task_session_Xnr and task_pgrp_Xnr routines below | ||
546 | */ | ||
547 | |||
548 | union { | ||
549 | pid_t pgrp __deprecated; | ||
550 | pid_t __pgrp; | ||
551 | }; | ||
552 | |||
553 | struct pid *tty_old_pgrp; | 557 | struct pid *tty_old_pgrp; |
554 | 558 | ||
555 | union { | ||
556 | pid_t session __deprecated; | ||
557 | pid_t __session; | ||
558 | }; | ||
559 | |||
560 | /* boolean value for session group leader */ | 559 | /* boolean value for session group leader */ |
561 | int leader; | 560 | int leader; |
562 | 561 | ||
@@ -1260,9 +1259,8 @@ struct task_struct { | |||
1260 | /* ipc stuff */ | 1259 | /* ipc stuff */ |
1261 | struct sysv_sem sysvsem; | 1260 | struct sysv_sem sysvsem; |
1262 | #endif | 1261 | #endif |
1263 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 1262 | #ifdef CONFIG_DETECT_HUNG_TASK |
1264 | /* hung task detection */ | 1263 | /* hung task detection */ |
1265 | unsigned long last_switch_timestamp; | ||
1266 | unsigned long last_switch_count; | 1264 | unsigned long last_switch_count; |
1267 | #endif | 1265 | #endif |
1268 | /* CPU-specific state of this task */ | 1266 | /* CPU-specific state of this task */ |
@@ -1299,6 +1297,11 @@ struct task_struct { | |||
1299 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ | 1297 | /* Protection of (de-)allocation: mm, files, fs, tty, keyrings */ |
1300 | spinlock_t alloc_lock; | 1298 | spinlock_t alloc_lock; |
1301 | 1299 | ||
1300 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
1301 | /* IRQ handler threads */ | ||
1302 | struct irqaction *irqaction; | ||
1303 | #endif | ||
1304 | |||
1302 | /* Protection of the PI data structures: */ | 1305 | /* Protection of the PI data structures: */ |
1303 | spinlock_t pi_lock; | 1306 | spinlock_t pi_lock; |
1304 | 1307 | ||
@@ -1334,6 +1337,7 @@ struct task_struct { | |||
1334 | int lockdep_depth; | 1337 | int lockdep_depth; |
1335 | unsigned int lockdep_recursion; | 1338 | unsigned int lockdep_recursion; |
1336 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | 1339 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
1340 | gfp_t lockdep_reclaim_gfp; | ||
1337 | #endif | 1341 | #endif |
1338 | 1342 | ||
1339 | /* journalling filesystem info */ | 1343 | /* journalling filesystem info */ |
@@ -1411,6 +1415,8 @@ struct task_struct { | |||
1411 | int curr_ret_stack; | 1415 | int curr_ret_stack; |
1412 | /* Stack of return addresses for return function tracing */ | 1416 | /* Stack of return addresses for return function tracing */ |
1413 | struct ftrace_ret_stack *ret_stack; | 1417 | struct ftrace_ret_stack *ret_stack; |
1418 | /* time stamp for last schedule */ | ||
1419 | unsigned long long ftrace_timestamp; | ||
1414 | /* | 1420 | /* |
1415 | * Number of functions that haven't been traced | 1421 | * Number of functions that haven't been traced |
1416 | * because of depth overrun. | 1422 | * because of depth overrun. |
@@ -1459,16 +1465,6 @@ static inline int rt_task(struct task_struct *p) | |||
1459 | return rt_prio(p->prio); | 1465 | return rt_prio(p->prio); |
1460 | } | 1466 | } |
1461 | 1467 | ||
1462 | static inline void set_task_session(struct task_struct *tsk, pid_t session) | ||
1463 | { | ||
1464 | tsk->signal->__session = session; | ||
1465 | } | ||
1466 | |||
1467 | static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp) | ||
1468 | { | ||
1469 | tsk->signal->__pgrp = pgrp; | ||
1470 | } | ||
1471 | |||
1472 | static inline struct pid *task_pid(struct task_struct *task) | 1468 | static inline struct pid *task_pid(struct task_struct *task) |
1473 | { | 1469 | { |
1474 | return task->pids[PIDTYPE_PID].pid; | 1470 | return task->pids[PIDTYPE_PID].pid; |
@@ -1479,6 +1475,11 @@ static inline struct pid *task_tgid(struct task_struct *task) | |||
1479 | return task->group_leader->pids[PIDTYPE_PID].pid; | 1475 | return task->group_leader->pids[PIDTYPE_PID].pid; |
1480 | } | 1476 | } |
1481 | 1477 | ||
1478 | /* | ||
1479 | * Without tasklist or rcu lock it is not safe to dereference | ||
1480 | * the result of task_pgrp/task_session even if task == current, | ||
1481 | * we can race with another thread doing sys_setsid/sys_setpgid. | ||
1482 | */ | ||
1482 | static inline struct pid *task_pgrp(struct task_struct *task) | 1483 | static inline struct pid *task_pgrp(struct task_struct *task) |
1483 | { | 1484 | { |
1484 | return task->group_leader->pids[PIDTYPE_PGID].pid; | 1485 | return task->group_leader->pids[PIDTYPE_PGID].pid; |
@@ -1504,17 +1505,23 @@ struct pid_namespace; | |||
1504 | * | 1505 | * |
1505 | * see also pid_nr() etc in include/linux/pid.h | 1506 | * see also pid_nr() etc in include/linux/pid.h |
1506 | */ | 1507 | */ |
1508 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, | ||
1509 | struct pid_namespace *ns); | ||
1507 | 1510 | ||
1508 | static inline pid_t task_pid_nr(struct task_struct *tsk) | 1511 | static inline pid_t task_pid_nr(struct task_struct *tsk) |
1509 | { | 1512 | { |
1510 | return tsk->pid; | 1513 | return tsk->pid; |
1511 | } | 1514 | } |
1512 | 1515 | ||
1513 | pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | 1516 | static inline pid_t task_pid_nr_ns(struct task_struct *tsk, |
1517 | struct pid_namespace *ns) | ||
1518 | { | ||
1519 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); | ||
1520 | } | ||
1514 | 1521 | ||
1515 | static inline pid_t task_pid_vnr(struct task_struct *tsk) | 1522 | static inline pid_t task_pid_vnr(struct task_struct *tsk) |
1516 | { | 1523 | { |
1517 | return pid_vnr(task_pid(tsk)); | 1524 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); |
1518 | } | 1525 | } |
1519 | 1526 | ||
1520 | 1527 | ||
@@ -1531,31 +1538,34 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk) | |||
1531 | } | 1538 | } |
1532 | 1539 | ||
1533 | 1540 | ||
1534 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) | 1541 | static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, |
1542 | struct pid_namespace *ns) | ||
1535 | { | 1543 | { |
1536 | return tsk->signal->__pgrp; | 1544 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); |
1537 | } | 1545 | } |
1538 | 1546 | ||
1539 | pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | ||
1540 | |||
1541 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) | 1547 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) |
1542 | { | 1548 | { |
1543 | return pid_vnr(task_pgrp(tsk)); | 1549 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); |
1544 | } | 1550 | } |
1545 | 1551 | ||
1546 | 1552 | ||
1547 | static inline pid_t task_session_nr(struct task_struct *tsk) | 1553 | static inline pid_t task_session_nr_ns(struct task_struct *tsk, |
1554 | struct pid_namespace *ns) | ||
1548 | { | 1555 | { |
1549 | return tsk->signal->__session; | 1556 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); |
1550 | } | 1557 | } |
1551 | 1558 | ||
1552 | pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); | ||
1553 | |||
1554 | static inline pid_t task_session_vnr(struct task_struct *tsk) | 1559 | static inline pid_t task_session_vnr(struct task_struct *tsk) |
1555 | { | 1560 | { |
1556 | return pid_vnr(task_session(tsk)); | 1561 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); |
1557 | } | 1562 | } |
1558 | 1563 | ||
1564 | /* obsolete, do not use */ | ||
1565 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) | ||
1566 | { | ||
1567 | return task_pgrp_nr_ns(tsk, &init_pid_ns); | ||
1568 | } | ||
1559 | 1569 | ||
1560 | /** | 1570 | /** |
1561 | * pid_alive - check that a task structure is not stale | 1571 | * pid_alive - check that a task structure is not stale |
@@ -1966,7 +1976,8 @@ extern void mm_release(struct task_struct *, struct mm_struct *); | |||
1966 | /* Allocate a new mm structure and copy contents from tsk->mm */ | 1976 | /* Allocate a new mm structure and copy contents from tsk->mm */ |
1967 | extern struct mm_struct *dup_mm(struct task_struct *tsk); | 1977 | extern struct mm_struct *dup_mm(struct task_struct *tsk); |
1968 | 1978 | ||
1969 | extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *); | 1979 | extern int copy_thread(unsigned long, unsigned long, unsigned long, |
1980 | struct task_struct *, struct pt_regs *); | ||
1970 | extern void flush_thread(void); | 1981 | extern void flush_thread(void); |
1971 | extern void exit_thread(void); | 1982 | extern void exit_thread(void); |
1972 | 1983 | ||
@@ -2051,6 +2062,11 @@ static inline int thread_group_empty(struct task_struct *p) | |||
2051 | #define delay_group_leader(p) \ | 2062 | #define delay_group_leader(p) \ |
2052 | (thread_group_leader(p) && !thread_group_empty(p)) | 2063 | (thread_group_leader(p) && !thread_group_empty(p)) |
2053 | 2064 | ||
2065 | static inline int task_detached(struct task_struct *p) | ||
2066 | { | ||
2067 | return p->exit_signal == -1; | ||
2068 | } | ||
2069 | |||
2054 | /* | 2070 | /* |
2055 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring | 2071 | * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring |
2056 | * subscriptions and synchronises with wait4(). Also used in procfs. Also | 2072 | * subscriptions and synchronises with wait4(). Also used in procfs. Also |
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 262a8dccfa81..167c33361d9c 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h | |||
@@ -21,6 +21,8 @@ extern long prctl_set_seccomp(unsigned long); | |||
21 | 21 | ||
22 | #else /* CONFIG_SECCOMP */ | 22 | #else /* CONFIG_SECCOMP */ |
23 | 23 | ||
24 | #include <linux/errno.h> | ||
25 | |||
24 | typedef struct { } seccomp_t; | 26 | typedef struct { } seccomp_t; |
25 | 27 | ||
26 | #define secure_computing(x) do { } while (0) | 28 | #define secure_computing(x) do { } while (0) |
diff --git a/include/linux/section-names.h b/include/linux/section-names.h new file mode 100644 index 000000000000..c956f4eb2adf --- /dev/null +++ b/include/linux/section-names.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef __LINUX_SECTION_NAMES_H | ||
2 | #define __LINUX_SECTION_NAMES_H | ||
3 | |||
4 | #define HEAD_TEXT_SECTION .head.text | ||
5 | |||
6 | #endif /* !__LINUX_SECTION_NAMES_H */ | ||
diff --git a/include/linux/security.h b/include/linux/security.h index 54ed15799a83..d5fd6163606f 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/key.h> | 33 | #include <linux/key.h> |
34 | #include <linux/xfrm.h> | 34 | #include <linux/xfrm.h> |
35 | #include <linux/gfp.h> | ||
35 | #include <net/flow.h> | 36 | #include <net/flow.h> |
36 | 37 | ||
37 | /* Maximum number of letters for an LSM name string */ | 38 | /* Maximum number of letters for an LSM name string */ |
@@ -2953,5 +2954,28 @@ static inline void securityfs_remove(struct dentry *dentry) | |||
2953 | 2954 | ||
2954 | #endif | 2955 | #endif |
2955 | 2956 | ||
2957 | #ifdef CONFIG_SECURITY | ||
2958 | |||
2959 | static inline char *alloc_secdata(void) | ||
2960 | { | ||
2961 | return (char *)get_zeroed_page(GFP_KERNEL); | ||
2962 | } | ||
2963 | |||
2964 | static inline void free_secdata(void *secdata) | ||
2965 | { | ||
2966 | free_page((unsigned long)secdata); | ||
2967 | } | ||
2968 | |||
2969 | #else | ||
2970 | |||
2971 | static inline char *alloc_secdata(void) | ||
2972 | { | ||
2973 | return (char *)1; | ||
2974 | } | ||
2975 | |||
2976 | static inline void free_secdata(void *secdata) | ||
2977 | { } | ||
2978 | #endif /* CONFIG_SECURITY */ | ||
2979 | |||
2956 | #endif /* ! __LINUX_SECURITY_H */ | 2980 | #endif /* ! __LINUX_SECURITY_H */ |
2957 | 2981 | ||
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index f616f31576d7..004f3b3342c5 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h | |||
@@ -55,7 +55,7 @@ int seq_bitmap(struct seq_file *m, const unsigned long *bits, | |||
55 | unsigned int nr_bits); | 55 | unsigned int nr_bits); |
56 | static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) | 56 | static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) |
57 | { | 57 | { |
58 | return seq_bitmap(m, mask->bits, nr_cpu_ids); | 58 | return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids); |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) | 61 | static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) |
@@ -63,12 +63,13 @@ static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) | |||
63 | return seq_bitmap(m, mask->bits, MAX_NUMNODES); | 63 | return seq_bitmap(m, mask->bits, MAX_NUMNODES); |
64 | } | 64 | } |
65 | 65 | ||
66 | int seq_bitmap_list(struct seq_file *m, unsigned long *bits, | 66 | int seq_bitmap_list(struct seq_file *m, const unsigned long *bits, |
67 | unsigned int nr_bits); | 67 | unsigned int nr_bits); |
68 | 68 | ||
69 | static inline int seq_cpumask_list(struct seq_file *m, cpumask_t *mask) | 69 | static inline int seq_cpumask_list(struct seq_file *m, |
70 | const struct cpumask *mask) | ||
70 | { | 71 | { |
71 | return seq_bitmap_list(m, mask->bits, NR_CPUS); | 72 | return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids); |
72 | } | 73 | } |
73 | 74 | ||
74 | static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask) | 75 | static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask) |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index df9245c7bd3b..57a97e52e58d 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -164,6 +164,9 @@ | |||
164 | /* NWPSERIAL */ | 164 | /* NWPSERIAL */ |
165 | #define PORT_NWPSERIAL 85 | 165 | #define PORT_NWPSERIAL 85 |
166 | 166 | ||
167 | /* MAX3100 */ | ||
168 | #define PORT_MAX3100 86 | ||
169 | |||
167 | #ifdef __KERNEL__ | 170 | #ifdef __KERNEL__ |
168 | 171 | ||
169 | #include <linux/compiler.h> | 172 | #include <linux/compiler.h> |
@@ -277,7 +280,7 @@ struct uart_port { | |||
277 | struct uart_icount icount; /* statistics */ | 280 | struct uart_icount icount; /* statistics */ |
278 | 281 | ||
279 | struct console *cons; /* struct console, if any */ | 282 | struct console *cons; /* struct console, if any */ |
280 | #ifdef CONFIG_SERIAL_CORE_CONSOLE | 283 | #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ) |
281 | unsigned long sysrq; /* sysrq timeout */ | 284 | unsigned long sysrq; /* sysrq timeout */ |
282 | #endif | 285 | #endif |
283 | 286 | ||
diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h new file mode 100644 index 000000000000..4976befb6aeb --- /dev/null +++ b/include/linux/serial_max3100.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Copyright (C) 2007 Christian Pellegrin | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | */ | ||
10 | |||
11 | |||
12 | #ifndef _LINUX_SERIAL_MAX3100_H | ||
13 | #define _LINUX_SERIAL_MAX3100_H 1 | ||
14 | |||
15 | |||
16 | /** | ||
17 | * struct plat_max3100 - MAX3100 SPI UART platform data | ||
18 | * @loopback: force MAX3100 in loopback | ||
19 | * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432 | ||
20 | * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook | ||
21 | * called on suspend and resume to activate it. | ||
22 | * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw | ||
23 | * flow ctrl is possible but you have less CPU usage) | ||
24 | * | ||
25 | * You should use this structure in your machine description to specify | ||
26 | * how the MAX3100 is connected. Example: | ||
27 | * | ||
28 | * static struct plat_max3100 max3100_plat_data = { | ||
29 | * .loopback = 0, | ||
30 | * .crystal = 0, | ||
31 | * .poll_time = 100, | ||
32 | * }; | ||
33 | * | ||
34 | * static struct spi_board_info spi_board_info[] = { | ||
35 | * { | ||
36 | * .modalias = "max3100", | ||
37 | * .platform_data = &max3100_plat_data, | ||
38 | * .irq = IRQ_EINT12, | ||
39 | * .max_speed_hz = 5*1000*1000, | ||
40 | * .chip_select = 0, | ||
41 | * }, | ||
42 | * }; | ||
43 | * | ||
44 | **/ | ||
45 | struct plat_max3100 { | ||
46 | int loopback; | ||
47 | int crystal; | ||
48 | void (*max3100_hw_suspend) (int suspend); | ||
49 | int poll_time; | ||
50 | }; | ||
51 | |||
52 | #endif | ||
diff --git a/include/linux/sht15.h b/include/linux/sht15.h new file mode 100644 index 000000000000..046bce05ecab --- /dev/null +++ b/include/linux/sht15.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * sht15.h - support for the SHT15 Temperature and Humidity Sensor | ||
3 | * | ||
4 | * Copyright (c) 2009 Jonathan Cameron | ||
5 | * | ||
6 | * Copyright (c) 2007 Wouter Horre | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | /** | ||
14 | * struct sht15_platform_data - sht15 connectivity info | ||
15 | * @gpio_data: no. of gpio to which bidirectional data line is connected | ||
16 | * @gpio_sck: no. of gpio to which the data clock is connected. | ||
17 | * @supply_mv: supply voltage in mv. Overridden by regulator if available. | ||
18 | **/ | ||
19 | struct sht15_platform_data { | ||
20 | int gpio_data; | ||
21 | int gpio_sck; | ||
22 | int supply_mv; | ||
23 | }; | ||
24 | |||
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 55d67300fa10..5fd389162f01 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -168,6 +168,7 @@ struct skb_shared_hwtstamps { | |||
168 | * @software: generate software time stamp | 168 | * @software: generate software time stamp |
169 | * @in_progress: device driver is going to provide | 169 | * @in_progress: device driver is going to provide |
170 | * hardware time stamp | 170 | * hardware time stamp |
171 | * @flags: all shared_tx flags | ||
171 | * | 172 | * |
172 | * These flags are attached to packets as part of the | 173 | * These flags are attached to packets as part of the |
173 | * &skb_shared_info. Use skb_tx() to get a pointer. | 174 | * &skb_shared_info. Use skb_tx() to get a pointer. |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 6ca6a7b66d75..5ac9b0bcaf9a 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <trace/kmemtrace.h> | ||
17 | 18 | ||
18 | /* Size description struct for general caches. */ | 19 | /* Size description struct for general caches. */ |
19 | struct cache_sizes { | 20 | struct cache_sizes { |
@@ -28,8 +29,26 @@ extern struct cache_sizes malloc_sizes[]; | |||
28 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 29 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
29 | void *__kmalloc(size_t size, gfp_t flags); | 30 | void *__kmalloc(size_t size, gfp_t flags); |
30 | 31 | ||
31 | static inline void *kmalloc(size_t size, gfp_t flags) | 32 | #ifdef CONFIG_KMEMTRACE |
33 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); | ||
34 | extern size_t slab_buffer_size(struct kmem_cache *cachep); | ||
35 | #else | ||
36 | static __always_inline void * | ||
37 | kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) | ||
32 | { | 38 | { |
39 | return kmem_cache_alloc(cachep, flags); | ||
40 | } | ||
41 | static inline size_t slab_buffer_size(struct kmem_cache *cachep) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | #endif | ||
46 | |||
47 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
48 | { | ||
49 | struct kmem_cache *cachep; | ||
50 | void *ret; | ||
51 | |||
33 | if (__builtin_constant_p(size)) { | 52 | if (__builtin_constant_p(size)) { |
34 | int i = 0; | 53 | int i = 0; |
35 | 54 | ||
@@ -47,10 +66,17 @@ static inline void *kmalloc(size_t size, gfp_t flags) | |||
47 | found: | 66 | found: |
48 | #ifdef CONFIG_ZONE_DMA | 67 | #ifdef CONFIG_ZONE_DMA |
49 | if (flags & GFP_DMA) | 68 | if (flags & GFP_DMA) |
50 | return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep, | 69 | cachep = malloc_sizes[i].cs_dmacachep; |
51 | flags); | 70 | else |
52 | #endif | 71 | #endif |
53 | return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags); | 72 | cachep = malloc_sizes[i].cs_cachep; |
73 | |||
74 | ret = kmem_cache_alloc_notrace(cachep, flags); | ||
75 | |||
76 | trace_kmalloc(_THIS_IP_, ret, | ||
77 | size, slab_buffer_size(cachep), flags); | ||
78 | |||
79 | return ret; | ||
54 | } | 80 | } |
55 | return __kmalloc(size, flags); | 81 | return __kmalloc(size, flags); |
56 | } | 82 | } |
@@ -59,8 +85,25 @@ found: | |||
59 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | 85 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
60 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 86 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
61 | 87 | ||
62 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 88 | #ifdef CONFIG_KMEMTRACE |
89 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
90 | gfp_t flags, | ||
91 | int nodeid); | ||
92 | #else | ||
93 | static __always_inline void * | ||
94 | kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, | ||
95 | gfp_t flags, | ||
96 | int nodeid) | ||
97 | { | ||
98 | return kmem_cache_alloc_node(cachep, flags, nodeid); | ||
99 | } | ||
100 | #endif | ||
101 | |||
102 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
63 | { | 103 | { |
104 | struct kmem_cache *cachep; | ||
105 | void *ret; | ||
106 | |||
64 | if (__builtin_constant_p(size)) { | 107 | if (__builtin_constant_p(size)) { |
65 | int i = 0; | 108 | int i = 0; |
66 | 109 | ||
@@ -78,11 +121,18 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
78 | found: | 121 | found: |
79 | #ifdef CONFIG_ZONE_DMA | 122 | #ifdef CONFIG_ZONE_DMA |
80 | if (flags & GFP_DMA) | 123 | if (flags & GFP_DMA) |
81 | return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep, | 124 | cachep = malloc_sizes[i].cs_dmacachep; |
82 | flags, node); | 125 | else |
83 | #endif | 126 | #endif |
84 | return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep, | 127 | cachep = malloc_sizes[i].cs_cachep; |
85 | flags, node); | 128 | |
129 | ret = kmem_cache_alloc_node_notrace(cachep, flags, node); | ||
130 | |||
131 | trace_kmalloc_node(_THIS_IP_, ret, | ||
132 | size, slab_buffer_size(cachep), | ||
133 | flags, node); | ||
134 | |||
135 | return ret; | ||
86 | } | 136 | } |
87 | return __kmalloc_node(size, flags, node); | 137 | return __kmalloc_node(size, flags, node); |
88 | } | 138 | } |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 59a3fa476ab9..0ec00b39d006 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
@@ -3,14 +3,15 @@ | |||
3 | 3 | ||
4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 4 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
5 | 5 | ||
6 | static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 6 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, |
7 | gfp_t flags) | ||
7 | { | 8 | { |
8 | return kmem_cache_alloc_node(cachep, flags, -1); | 9 | return kmem_cache_alloc_node(cachep, flags, -1); |
9 | } | 10 | } |
10 | 11 | ||
11 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 12 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
12 | 13 | ||
13 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 14 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
14 | { | 15 | { |
15 | return __kmalloc_node(size, flags, node); | 16 | return __kmalloc_node(size, flags, node); |
16 | } | 17 | } |
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
23 | * kmalloc is the normal method of allocating memory | 24 | * kmalloc is the normal method of allocating memory |
24 | * in the kernel. | 25 | * in the kernel. |
25 | */ | 26 | */ |
26 | static inline void *kmalloc(size_t size, gfp_t flags) | 27 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
27 | { | 28 | { |
28 | return __kmalloc_node(size, flags, -1); | 29 | return __kmalloc_node(size, flags, -1); |
29 | } | 30 | } |
30 | 31 | ||
31 | static inline void *__kmalloc(size_t size, gfp_t flags) | 32 | static __always_inline void *__kmalloc(size_t size, gfp_t flags) |
32 | { | 33 | { |
33 | return kmalloc(size, flags); | 34 | return kmalloc(size, flags); |
34 | } | 35 | } |
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h new file mode 100644 index 000000000000..b65c8881f07a --- /dev/null +++ b/include/linux/slow-work.h | |||
@@ -0,0 +1,95 @@ | |||
1 | /* Worker thread pool for slow items, such as filesystem lookups or mkdirs | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | * | ||
11 | * See Documentation/slow-work.txt | ||
12 | */ | ||
13 | |||
14 | #ifndef _LINUX_SLOW_WORK_H | ||
15 | #define _LINUX_SLOW_WORK_H | ||
16 | |||
17 | #ifdef CONFIG_SLOW_WORK | ||
18 | |||
19 | #include <linux/sysctl.h> | ||
20 | |||
21 | struct slow_work; | ||
22 | |||
23 | /* | ||
24 | * The operations used to support slow work items | ||
25 | */ | ||
26 | struct slow_work_ops { | ||
27 | /* get a ref on a work item | ||
28 | * - return 0 if successful, -ve if not | ||
29 | */ | ||
30 | int (*get_ref)(struct slow_work *work); | ||
31 | |||
32 | /* discard a ref to a work item */ | ||
33 | void (*put_ref)(struct slow_work *work); | ||
34 | |||
35 | /* execute a work item */ | ||
36 | void (*execute)(struct slow_work *work); | ||
37 | }; | ||
38 | |||
39 | /* | ||
40 | * A slow work item | ||
41 | * - A reference is held on the parent object by the thread pool when it is | ||
42 | * queued | ||
43 | */ | ||
44 | struct slow_work { | ||
45 | unsigned long flags; | ||
46 | #define SLOW_WORK_PENDING 0 /* item pending (further) execution */ | ||
47 | #define SLOW_WORK_EXECUTING 1 /* item currently executing */ | ||
48 | #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ | ||
49 | #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ | ||
50 | const struct slow_work_ops *ops; /* operations table for this item */ | ||
51 | struct list_head link; /* link in queue */ | ||
52 | }; | ||
53 | |||
54 | /** | ||
55 | * slow_work_init - Initialise a slow work item | ||
56 | * @work: The work item to initialise | ||
57 | * @ops: The operations to use to handle the slow work item | ||
58 | * | ||
59 | * Initialise a slow work item. | ||
60 | */ | ||
61 | static inline void slow_work_init(struct slow_work *work, | ||
62 | const struct slow_work_ops *ops) | ||
63 | { | ||
64 | work->flags = 0; | ||
65 | work->ops = ops; | ||
66 | INIT_LIST_HEAD(&work->link); | ||
67 | } | ||
68 | |||
69 | /** | ||
70 | * vslow_work_init - Initialise a very slow work item | ||
71 | * @work: The work item to initialise | ||
72 | * @ops: The operations to use to handle the slow work item | ||
73 | * | ||
74 | * Initialise a very slow work item. This item will be restricted such that | ||
75 | * only a certain number of the pool threads will be able to execute items of | ||
76 | * this type. | ||
77 | */ | ||
78 | static inline void vslow_work_init(struct slow_work *work, | ||
79 | const struct slow_work_ops *ops) | ||
80 | { | ||
81 | work->flags = 1 << SLOW_WORK_VERY_SLOW; | ||
82 | work->ops = ops; | ||
83 | INIT_LIST_HEAD(&work->link); | ||
84 | } | ||
85 | |||
86 | extern int slow_work_enqueue(struct slow_work *work); | ||
87 | extern int slow_work_register_user(void); | ||
88 | extern void slow_work_unregister_user(void); | ||
89 | |||
90 | #ifdef CONFIG_SYSCTL | ||
91 | extern ctl_table slow_work_sysctls[]; | ||
92 | #endif | ||
93 | |||
94 | #endif /* CONFIG_SLOW_WORK */ | ||
95 | #endif /* _LINUX_SLOW_WORK_H */ | ||
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index e37b6aa8a9fb..5046f90c1171 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <trace/kmemtrace.h> | ||
13 | 14 | ||
14 | enum stat_item { | 15 | enum stat_item { |
15 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
@@ -217,13 +218,30 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
217 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 218 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
218 | void *__kmalloc(size_t size, gfp_t flags); | 219 | void *__kmalloc(size_t size, gfp_t flags); |
219 | 220 | ||
221 | #ifdef CONFIG_KMEMTRACE | ||
222 | extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); | ||
223 | #else | ||
224 | static __always_inline void * | ||
225 | kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) | ||
226 | { | ||
227 | return kmem_cache_alloc(s, gfpflags); | ||
228 | } | ||
229 | #endif | ||
230 | |||
220 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | 231 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) |
221 | { | 232 | { |
222 | return (void *)__get_free_pages(flags | __GFP_COMP, get_order(size)); | 233 | unsigned int order = get_order(size); |
234 | void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); | ||
235 | |||
236 | trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); | ||
237 | |||
238 | return ret; | ||
223 | } | 239 | } |
224 | 240 | ||
225 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | 241 | static __always_inline void *kmalloc(size_t size, gfp_t flags) |
226 | { | 242 | { |
243 | void *ret; | ||
244 | |||
227 | if (__builtin_constant_p(size)) { | 245 | if (__builtin_constant_p(size)) { |
228 | if (size > SLUB_MAX_SIZE) | 246 | if (size > SLUB_MAX_SIZE) |
229 | return kmalloc_large(size, flags); | 247 | return kmalloc_large(size, flags); |
@@ -234,7 +252,11 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
234 | if (!s) | 252 | if (!s) |
235 | return ZERO_SIZE_PTR; | 253 | return ZERO_SIZE_PTR; |
236 | 254 | ||
237 | return kmem_cache_alloc(s, flags); | 255 | ret = kmem_cache_alloc_notrace(s, flags); |
256 | |||
257 | trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); | ||
258 | |||
259 | return ret; | ||
238 | } | 260 | } |
239 | } | 261 | } |
240 | return __kmalloc(size, flags); | 262 | return __kmalloc(size, flags); |
@@ -244,8 +266,24 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) | |||
244 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | 266 | void *__kmalloc_node(size_t size, gfp_t flags, int node); |
245 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | 267 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
246 | 268 | ||
269 | #ifdef CONFIG_KMEMTRACE | ||
270 | extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
271 | gfp_t gfpflags, | ||
272 | int node); | ||
273 | #else | ||
274 | static __always_inline void * | ||
275 | kmem_cache_alloc_node_notrace(struct kmem_cache *s, | ||
276 | gfp_t gfpflags, | ||
277 | int node) | ||
278 | { | ||
279 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
280 | } | ||
281 | #endif | ||
282 | |||
247 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | 283 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
248 | { | 284 | { |
285 | void *ret; | ||
286 | |||
249 | if (__builtin_constant_p(size) && | 287 | if (__builtin_constant_p(size) && |
250 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { | 288 | size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { |
251 | struct kmem_cache *s = kmalloc_slab(size); | 289 | struct kmem_cache *s = kmalloc_slab(size); |
@@ -253,7 +291,12 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
253 | if (!s) | 291 | if (!s) |
254 | return ZERO_SIZE_PTR; | 292 | return ZERO_SIZE_PTR; |
255 | 293 | ||
256 | return kmem_cache_alloc_node(s, flags, node); | 294 | ret = kmem_cache_alloc_node_notrace(s, flags, node); |
295 | |||
296 | trace_kmalloc_node(_THIS_IP_, ret, | ||
297 | size, s->size, flags, node); | ||
298 | |||
299 | return ret; | ||
257 | } | 300 | } |
258 | return __kmalloc_node(size, flags, node); | 301 | return __kmalloc_node(size, flags, node); |
259 | } | 302 | } |
diff --git a/include/linux/smp.h b/include/linux/smp.h index bbacb7baa446..a69db820eed6 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -38,7 +38,7 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | |||
38 | /* | 38 | /* |
39 | * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. | 39 | * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. |
40 | * (defined in asm header): | 40 | * (defined in asm header): |
41 | */ | 41 | */ |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * stops all CPUs but the current one: | 44 | * stops all CPUs but the current one: |
@@ -82,7 +82,8 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info, | |||
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | 84 | ||
85 | void __smp_call_function_single(int cpuid, struct call_single_data *data); | 85 | void __smp_call_function_single(int cpuid, struct call_single_data *data, |
86 | int wait); | ||
86 | 87 | ||
87 | /* | 88 | /* |
88 | * Generic and arch helpers | 89 | * Generic and arch helpers |
@@ -121,6 +122,8 @@ extern unsigned int setup_max_cpus; | |||
121 | 122 | ||
122 | #else /* !SMP */ | 123 | #else /* !SMP */ |
123 | 124 | ||
125 | static inline void smp_send_stop(void) { } | ||
126 | |||
124 | /* | 127 | /* |
125 | * These macros fold the SMP functionality into a single CPU system | 128 | * These macros fold the SMP functionality into a single CPU system |
126 | */ | 129 | */ |
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h index f41ffd7c2dd9..34c4475ac4a2 100644 --- a/include/linux/sonypi.h +++ b/include/linux/sonypi.h | |||
@@ -103,6 +103,14 @@ | |||
103 | #define SONYPI_EVENT_WIRELESS_OFF 61 | 103 | #define SONYPI_EVENT_WIRELESS_OFF 61 |
104 | #define SONYPI_EVENT_ZOOM_IN_PRESSED 62 | 104 | #define SONYPI_EVENT_ZOOM_IN_PRESSED 62 |
105 | #define SONYPI_EVENT_ZOOM_OUT_PRESSED 63 | 105 | #define SONYPI_EVENT_ZOOM_OUT_PRESSED 63 |
106 | #define SONYPI_EVENT_CD_EJECT_PRESSED 64 | ||
107 | #define SONYPI_EVENT_MODEKEY_PRESSED 65 | ||
108 | #define SONYPI_EVENT_PKEY_P4 66 | ||
109 | #define SONYPI_EVENT_PKEY_P5 67 | ||
110 | #define SONYPI_EVENT_SETTINGKEY_PRESSED 68 | ||
111 | #define SONYPI_EVENT_VOLUME_INC_PRESSED 69 | ||
112 | #define SONYPI_EVENT_VOLUME_DEC_PRESSED 70 | ||
113 | #define SONYPI_EVENT_BRIGHTNESS_PRESSED 71 | ||
106 | 114 | ||
107 | /* get/set brightness */ | 115 | /* get/set brightness */ |
108 | #define SONYPI_IOCGBRT _IOR('v', 0, __u8) | 116 | #define SONYPI_IOCGBRT _IOR('v', 0, __u8) |
diff --git a/include/linux/spi/ad7879.h b/include/linux/spi/ad7879.h new file mode 100644 index 000000000000..4231104c9afa --- /dev/null +++ b/include/linux/spi/ad7879.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* linux/spi/ad7879.h */ | ||
2 | |||
3 | /* Touchscreen characteristics vary between boards and models. The | ||
4 | * platform_data for the device's "struct device" holds this information. | ||
5 | * | ||
6 | * It's OK if the min/max values are zero. | ||
7 | */ | ||
8 | struct ad7879_platform_data { | ||
9 | u16 model; /* 7879 */ | ||
10 | u16 x_plate_ohms; | ||
11 | u16 x_min, x_max; | ||
12 | u16 y_min, y_max; | ||
13 | u16 pressure_min, pressure_max; | ||
14 | |||
15 | /* [0..255] 0=OFF Starts at 1=550us and goes | ||
16 | * all the way to 9.440ms in steps of 35us. | ||
17 | */ | ||
18 | u8 pen_down_acc_interval; | ||
19 | /* [0..15] Starts at 0=128us and goes all the | ||
20 | * way to 4.096ms in steps of 128us. | ||
21 | */ | ||
22 | u8 first_conversion_delay; | ||
23 | /* [0..3] 0 = 2us, 1 = 4us, 2 = 8us, 3 = 16us */ | ||
24 | u8 acquisition_time; | ||
25 | /* [0..3] Average X middle samples 0 = 2, 1 = 4, 2 = 8, 3 = 16 */ | ||
26 | u8 averaging; | ||
27 | /* [0..3] Perform X measurements 0 = OFF, | ||
28 | * 1 = 4, 2 = 8, 3 = 16 (median > averaging) | ||
29 | */ | ||
30 | u8 median; | ||
31 | /* 1 = AUX/VBAT/GPIO set to GPIO Output */ | ||
32 | u8 gpio_output; | ||
33 | /* Initial GPIO pin state (valid if gpio_output = 1) */ | ||
34 | u8 gpio_default; | ||
35 | }; | ||
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h index 05eab2f11e63..2ea20320c093 100644 --- a/include/linux/spi/ads7846.h +++ b/include/linux/spi/ads7846.h | |||
@@ -51,5 +51,6 @@ struct ads7846_platform_data { | |||
51 | void **filter_data); | 51 | void **filter_data); |
52 | int (*filter) (void *filter_data, int data_idx, int *val); | 52 | int (*filter) (void *filter_data, int data_idx, int *val); |
53 | void (*filter_cleanup)(void *filter_data); | 53 | void (*filter_cleanup)(void *filter_data); |
54 | void (*wait_for_sync)(void); | ||
54 | }; | 55 | }; |
55 | 56 | ||
diff --git a/include/linux/spi/eeprom.h b/include/linux/spi/eeprom.h index 1085212c446e..306e7b1c69ed 100644 --- a/include/linux/spi/eeprom.h +++ b/include/linux/spi/eeprom.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __LINUX_SPI_EEPROM_H | 1 | #ifndef __LINUX_SPI_EEPROM_H |
2 | #define __LINUX_SPI_EEPROM_H | 2 | #define __LINUX_SPI_EEPROM_H |
3 | 3 | ||
4 | #include <linux/memory.h> | ||
5 | |||
4 | /* | 6 | /* |
5 | * Put one of these structures in platform_data for SPI EEPROMS handled | 7 | * Put one of these structures in platform_data for SPI EEPROMS handled |
6 | * by the "at25" driver. On SPI, most EEPROMS understand the same core | 8 | * by the "at25" driver. On SPI, most EEPROMS understand the same core |
@@ -17,6 +19,10 @@ struct spi_eeprom { | |||
17 | #define EE_ADDR2 0x0002 /* 16 bit addrs */ | 19 | #define EE_ADDR2 0x0002 /* 16 bit addrs */ |
18 | #define EE_ADDR3 0x0004 /* 24 bit addrs */ | 20 | #define EE_ADDR3 0x0004 /* 24 bit addrs */ |
19 | #define EE_READONLY 0x0008 /* disallow writes */ | 21 | #define EE_READONLY 0x0008 /* disallow writes */ |
22 | |||
23 | /* for exporting this chip's data to other kernel code */ | ||
24 | void (*setup)(struct memory_accessor *mem, void *context); | ||
25 | void *context; | ||
20 | }; | 26 | }; |
21 | 27 | ||
22 | #endif /* __LINUX_SPI_EEPROM_H */ | 28 | #endif /* __LINUX_SPI_EEPROM_H */ |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 68bb1c501d0d..a0faa18f7b1b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -204,6 +204,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
204 | * SPI slaves, and are numbered from zero to num_chipselects. | 204 | * SPI slaves, and are numbered from zero to num_chipselects. |
205 | * each slave has a chipselect signal, but it's common that not | 205 | * each slave has a chipselect signal, but it's common that not |
206 | * every chipselect is connected to a slave. | 206 | * every chipselect is connected to a slave. |
207 | * @dma_alignment: SPI controller constraint on DMA buffers alignment. | ||
207 | * @setup: updates the device mode and clocking records used by a | 208 | * @setup: updates the device mode and clocking records used by a |
208 | * device's SPI controller; protocol code may call this. This | 209 | * device's SPI controller; protocol code may call this. This |
209 | * must fail if an unrecognized or unsupported mode is requested. | 210 | * must fail if an unrecognized or unsupported mode is requested. |
@@ -239,7 +240,17 @@ struct spi_master { | |||
239 | */ | 240 | */ |
240 | u16 num_chipselect; | 241 | u16 num_chipselect; |
241 | 242 | ||
242 | /* setup mode and clock, etc (spi driver may call many times) */ | 243 | /* some SPI controllers pose alignment requirements on DMAable |
244 | * buffers; let protocol drivers know about these requirements. | ||
245 | */ | ||
246 | u16 dma_alignment; | ||
247 | |||
248 | /* Setup mode and clock, etc (spi driver may call many times). | ||
249 | * | ||
250 | * IMPORTANT: this may be called when transfers to another | ||
251 | * device are active. DO NOT UPDATE SHARED REGISTERS in ways | ||
252 | * which could break those transfers. | ||
253 | */ | ||
243 | int (*setup)(struct spi_device *spi); | 254 | int (*setup)(struct spi_device *spi); |
244 | 255 | ||
245 | /* bidirectional bulk transfers | 256 | /* bidirectional bulk transfers |
diff --git a/include/linux/spi/spi_gpio.h b/include/linux/spi/spi_gpio.h index 0f01a0f1f40c..ca6782ee4b9f 100644 --- a/include/linux/spi/spi_gpio.h +++ b/include/linux/spi/spi_gpio.h | |||
@@ -25,10 +25,16 @@ | |||
25 | * ... | 25 | * ... |
26 | * }; | 26 | * }; |
27 | * | 27 | * |
28 | * If chipselect is not used (there's only one device on the bus), assign | ||
29 | * SPI_GPIO_NO_CHIPSELECT to the controller_data: | ||
30 | * .controller_data = (void *) SPI_GPIO_NO_CHIPSELECT; | ||
31 | * | ||
28 | * If the bitbanged bus is later switched to a "native" controller, | 32 | * If the bitbanged bus is later switched to a "native" controller, |
29 | * that platform_device and controller_data should be removed. | 33 | * that platform_device and controller_data should be removed. |
30 | */ | 34 | */ |
31 | 35 | ||
36 | #define SPI_GPIO_NO_CHIPSELECT ((unsigned long)-1l) | ||
37 | |||
32 | /** | 38 | /** |
33 | * struct spi_gpio_platform_data - parameter for bitbanged SPI master | 39 | * struct spi_gpio_platform_data - parameter for bitbanged SPI master |
34 | * @sck: number of the GPIO used for clock output | 40 | * @sck: number of the GPIO used for clock output |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index a0c66a2e00ad..252b245cfcf4 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -153,9 +153,11 @@ do { \ | |||
153 | extern int _raw_spin_trylock(spinlock_t *lock); | 153 | extern int _raw_spin_trylock(spinlock_t *lock); |
154 | extern void _raw_spin_unlock(spinlock_t *lock); | 154 | extern void _raw_spin_unlock(spinlock_t *lock); |
155 | extern void _raw_read_lock(rwlock_t *lock); | 155 | extern void _raw_read_lock(rwlock_t *lock); |
156 | #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) | ||
156 | extern int _raw_read_trylock(rwlock_t *lock); | 157 | extern int _raw_read_trylock(rwlock_t *lock); |
157 | extern void _raw_read_unlock(rwlock_t *lock); | 158 | extern void _raw_read_unlock(rwlock_t *lock); |
158 | extern void _raw_write_lock(rwlock_t *lock); | 159 | extern void _raw_write_lock(rwlock_t *lock); |
160 | #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) | ||
159 | extern int _raw_write_trylock(rwlock_t *lock); | 161 | extern int _raw_write_trylock(rwlock_t *lock); |
160 | extern void _raw_write_unlock(rwlock_t *lock); | 162 | extern void _raw_write_unlock(rwlock_t *lock); |
161 | #else | 163 | #else |
@@ -165,9 +167,13 @@ do { \ | |||
165 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) | 167 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) |
166 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) | 168 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) |
167 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) | 169 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) |
170 | # define _raw_read_lock_flags(lock, flags) \ | ||
171 | __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) | ||
168 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) | 172 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) |
169 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) | 173 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) |
170 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) | 174 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) |
175 | # define _raw_write_lock_flags(lock, flags) \ | ||
176 | __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) | ||
171 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) | 177 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) |
172 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) | 178 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) |
173 | #endif | 179 | #endif |
diff --git a/include/linux/splice.h b/include/linux/splice.h index 528dcb93c2f2..5f3faa9d15ae 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h | |||
@@ -36,6 +36,8 @@ struct splice_desc { | |||
36 | void *data; /* cookie */ | 36 | void *data; /* cookie */ |
37 | } u; | 37 | } u; |
38 | loff_t pos; /* file position */ | 38 | loff_t pos; /* file position */ |
39 | size_t num_spliced; /* number of bytes already spliced */ | ||
40 | bool need_wakeup; /* need to wake up writer */ | ||
39 | }; | 41 | }; |
40 | 42 | ||
41 | struct partial_page { | 43 | struct partial_page { |
@@ -66,6 +68,16 @@ extern ssize_t splice_from_pipe(struct pipe_inode_info *, struct file *, | |||
66 | splice_actor *); | 68 | splice_actor *); |
67 | extern ssize_t __splice_from_pipe(struct pipe_inode_info *, | 69 | extern ssize_t __splice_from_pipe(struct pipe_inode_info *, |
68 | struct splice_desc *, splice_actor *); | 70 | struct splice_desc *, splice_actor *); |
71 | extern int splice_from_pipe_feed(struct pipe_inode_info *, struct splice_desc *, | ||
72 | splice_actor *); | ||
73 | extern int splice_from_pipe_next(struct pipe_inode_info *, | ||
74 | struct splice_desc *); | ||
75 | extern void splice_from_pipe_begin(struct splice_desc *); | ||
76 | extern void splice_from_pipe_end(struct pipe_inode_info *, | ||
77 | struct splice_desc *); | ||
78 | extern int pipe_to_file(struct pipe_inode_info *, struct pipe_buffer *, | ||
79 | struct splice_desc *); | ||
80 | |||
69 | extern ssize_t splice_to_pipe(struct pipe_inode_info *, | 81 | extern ssize_t splice_to_pipe(struct pipe_inode_info *, |
70 | struct splice_pipe_desc *); | 82 | struct splice_pipe_desc *); |
71 | extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, | 83 | extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, |
diff --git a/include/linux/string.h b/include/linux/string.h index d18fc198aa2f..489019ef1694 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -10,8 +10,10 @@ | |||
10 | #include <linux/compiler.h> /* for inline */ | 10 | #include <linux/compiler.h> /* for inline */ |
11 | #include <linux/types.h> /* for size_t */ | 11 | #include <linux/types.h> /* for size_t */ |
12 | #include <linux/stddef.h> /* for NULL */ | 12 | #include <linux/stddef.h> /* for NULL */ |
13 | #include <stdarg.h> | ||
13 | 14 | ||
14 | extern char *strndup_user(const char __user *, long); | 15 | extern char *strndup_user(const char __user *, long); |
16 | extern void *memdup_user(const void __user *, size_t); | ||
15 | 17 | ||
16 | /* | 18 | /* |
17 | * Include machine specific inline routines | 19 | * Include machine specific inline routines |
@@ -111,8 +113,23 @@ extern void argv_free(char **argv); | |||
111 | 113 | ||
112 | extern bool sysfs_streq(const char *s1, const char *s2); | 114 | extern bool sysfs_streq(const char *s1, const char *s2); |
113 | 115 | ||
116 | #ifdef CONFIG_BINARY_PRINTF | ||
117 | int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); | ||
118 | int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); | ||
119 | int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); | ||
120 | #endif | ||
121 | |||
114 | extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, | 122 | extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, |
115 | const void *from, size_t available); | 123 | const void *from, size_t available); |
116 | 124 | ||
125 | /** | ||
126 | * strstarts - does @str start with @prefix? | ||
127 | * @str: string to examine | ||
128 | * @prefix: prefix to look for. | ||
129 | */ | ||
130 | static inline bool strstarts(const char *str, const char *prefix) | ||
131 | { | ||
132 | return strncmp(str, prefix, strlen(prefix)) == 0; | ||
133 | } | ||
117 | #endif | 134 | #endif |
118 | #endif /* _LINUX_STRING_H_ */ | 135 | #endif /* _LINUX_STRING_H_ */ |
diff --git a/include/linux/stringify.h b/include/linux/stringify.h index 0b4388356c87..841cec8ed525 100644 --- a/include/linux/stringify.h +++ b/include/linux/stringify.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * converts to "bar". | 6 | * converts to "bar". |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define __stringify_1(x) #x | 9 | #define __stringify_1(x...) #x |
10 | #define __stringify(x) __stringify_1(x) | 10 | #define __stringify(x...) __stringify_1(x) |
11 | 11 | ||
12 | #endif /* !__LINUX_STRINGIFY_H */ | 12 | #endif /* !__LINUX_STRINGIFY_H */ |
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 3435d24bfe55..2a30775959e9 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h | |||
@@ -24,6 +24,15 @@ | |||
24 | */ | 24 | */ |
25 | typedef int (*svc_thread_fn)(void *); | 25 | typedef int (*svc_thread_fn)(void *); |
26 | 26 | ||
27 | /* statistics for svc_pool structures */ | ||
28 | struct svc_pool_stats { | ||
29 | unsigned long packets; | ||
30 | unsigned long sockets_queued; | ||
31 | unsigned long threads_woken; | ||
32 | unsigned long overloads_avoided; | ||
33 | unsigned long threads_timedout; | ||
34 | }; | ||
35 | |||
27 | /* | 36 | /* |
28 | * | 37 | * |
29 | * RPC service thread pool. | 38 | * RPC service thread pool. |
@@ -41,6 +50,8 @@ struct svc_pool { | |||
41 | struct list_head sp_sockets; /* pending sockets */ | 50 | struct list_head sp_sockets; /* pending sockets */ |
42 | unsigned int sp_nrthreads; /* # of threads in pool */ | 51 | unsigned int sp_nrthreads; /* # of threads in pool */ |
43 | struct list_head sp_all_threads; /* all server threads */ | 52 | struct list_head sp_all_threads; /* all server threads */ |
53 | int sp_nwaking; /* number of threads woken but not yet active */ | ||
54 | struct svc_pool_stats sp_stats; /* statistics on pool operation */ | ||
44 | } ____cacheline_aligned_in_smp; | 55 | } ____cacheline_aligned_in_smp; |
45 | 56 | ||
46 | /* | 57 | /* |
@@ -69,7 +80,6 @@ struct svc_serv { | |||
69 | struct list_head sv_tempsocks; /* all temporary sockets */ | 80 | struct list_head sv_tempsocks; /* all temporary sockets */ |
70 | int sv_tmpcnt; /* count of temporary sockets */ | 81 | int sv_tmpcnt; /* count of temporary sockets */ |
71 | struct timer_list sv_temptimer; /* timer for aging temporary sockets */ | 82 | struct timer_list sv_temptimer; /* timer for aging temporary sockets */ |
72 | sa_family_t sv_family; /* listener's address family */ | ||
73 | 83 | ||
74 | char * sv_name; /* service name */ | 84 | char * sv_name; /* service name */ |
75 | 85 | ||
@@ -84,6 +94,8 @@ struct svc_serv { | |||
84 | struct module * sv_module; /* optional module to count when | 94 | struct module * sv_module; /* optional module to count when |
85 | * adding threads */ | 95 | * adding threads */ |
86 | svc_thread_fn sv_function; /* main function for threads */ | 96 | svc_thread_fn sv_function; /* main function for threads */ |
97 | unsigned int sv_drc_max_pages; /* Total pages for DRC */ | ||
98 | unsigned int sv_drc_pages_used;/* DRC pages used */ | ||
87 | }; | 99 | }; |
88 | 100 | ||
89 | /* | 101 | /* |
@@ -219,6 +231,7 @@ struct svc_rqst { | |||
219 | struct svc_cred rq_cred; /* auth info */ | 231 | struct svc_cred rq_cred; /* auth info */ |
220 | void * rq_xprt_ctxt; /* transport specific context ptr */ | 232 | void * rq_xprt_ctxt; /* transport specific context ptr */ |
221 | struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ | 233 | struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ |
234 | int rq_usedeferral; /* use deferral */ | ||
222 | 235 | ||
223 | size_t rq_xprt_hlen; /* xprt header len */ | 236 | size_t rq_xprt_hlen; /* xprt header len */ |
224 | struct xdr_buf rq_arg; | 237 | struct xdr_buf rq_arg; |
@@ -264,6 +277,7 @@ struct svc_rqst { | |||
264 | * cache pages */ | 277 | * cache pages */ |
265 | wait_queue_head_t rq_wait; /* synchronization */ | 278 | wait_queue_head_t rq_wait; /* synchronization */ |
266 | struct task_struct *rq_task; /* service thread */ | 279 | struct task_struct *rq_task; /* service thread */ |
280 | int rq_waking; /* 1 if thread is being woken */ | ||
267 | }; | 281 | }; |
268 | 282 | ||
269 | /* | 283 | /* |
@@ -385,19 +399,20 @@ struct svc_procedure { | |||
385 | /* | 399 | /* |
386 | * Function prototypes. | 400 | * Function prototypes. |
387 | */ | 401 | */ |
388 | struct svc_serv *svc_create(struct svc_program *, unsigned int, sa_family_t, | 402 | struct svc_serv *svc_create(struct svc_program *, unsigned int, |
389 | void (*shutdown)(struct svc_serv *)); | 403 | void (*shutdown)(struct svc_serv *)); |
390 | struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, | 404 | struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, |
391 | struct svc_pool *pool); | 405 | struct svc_pool *pool); |
392 | void svc_exit_thread(struct svc_rqst *); | 406 | void svc_exit_thread(struct svc_rqst *); |
393 | struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, | 407 | struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int, |
394 | sa_family_t, void (*shutdown)(struct svc_serv *), | 408 | void (*shutdown)(struct svc_serv *), |
395 | svc_thread_fn, struct module *); | 409 | svc_thread_fn, struct module *); |
396 | int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); | 410 | int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int); |
411 | int svc_pool_stats_open(struct svc_serv *serv, struct file *file); | ||
397 | void svc_destroy(struct svc_serv *); | 412 | void svc_destroy(struct svc_serv *); |
398 | int svc_process(struct svc_rqst *); | 413 | int svc_process(struct svc_rqst *); |
399 | int svc_register(const struct svc_serv *, const unsigned short, | 414 | int svc_register(const struct svc_serv *, const int, |
400 | const unsigned short); | 415 | const unsigned short, const unsigned short); |
401 | 416 | ||
402 | void svc_wake_up(struct svc_serv *); | 417 | void svc_wake_up(struct svc_serv *); |
403 | void svc_reserve(struct svc_rqst *rqstp, int space); | 418 | void svc_reserve(struct svc_rqst *rqstp, int space); |
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index 0127daca4354..0d9cb6ef28b0 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h | |||
@@ -71,7 +71,8 @@ int svc_reg_xprt_class(struct svc_xprt_class *); | |||
71 | void svc_unreg_xprt_class(struct svc_xprt_class *); | 71 | void svc_unreg_xprt_class(struct svc_xprt_class *); |
72 | void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *, | 72 | void svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *, |
73 | struct svc_serv *); | 73 | struct svc_serv *); |
74 | int svc_create_xprt(struct svc_serv *, char *, unsigned short, int); | 74 | int svc_create_xprt(struct svc_serv *, const char *, const int, |
75 | const unsigned short, int); | ||
75 | void svc_xprt_enqueue(struct svc_xprt *xprt); | 76 | void svc_xprt_enqueue(struct svc_xprt *xprt); |
76 | void svc_xprt_received(struct svc_xprt *); | 77 | void svc_xprt_received(struct svc_xprt *); |
77 | void svc_xprt_put(struct svc_xprt *xprt); | 78 | void svc_xprt_put(struct svc_xprt *xprt); |
@@ -80,7 +81,8 @@ void svc_close_xprt(struct svc_xprt *xprt); | |||
80 | void svc_delete_xprt(struct svc_xprt *xprt); | 81 | void svc_delete_xprt(struct svc_xprt *xprt); |
81 | int svc_port_is_privileged(struct sockaddr *sin); | 82 | int svc_port_is_privileged(struct sockaddr *sin); |
82 | int svc_print_xprts(char *buf, int maxlen); | 83 | int svc_print_xprts(char *buf, int maxlen); |
83 | struct svc_xprt *svc_find_xprt(struct svc_serv *, char *, int, int); | 84 | struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, |
85 | const sa_family_t af, const unsigned short port); | ||
84 | int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen); | 86 | int svc_xprt_names(struct svc_serv *serv, char *buf, int buflen); |
85 | 87 | ||
86 | static inline void svc_xprt_get(struct svc_xprt *xprt) | 88 | static inline void svc_xprt_get(struct svc_xprt *xprt) |
@@ -88,29 +90,32 @@ static inline void svc_xprt_get(struct svc_xprt *xprt) | |||
88 | kref_get(&xprt->xpt_ref); | 90 | kref_get(&xprt->xpt_ref); |
89 | } | 91 | } |
90 | static inline void svc_xprt_set_local(struct svc_xprt *xprt, | 92 | static inline void svc_xprt_set_local(struct svc_xprt *xprt, |
91 | struct sockaddr *sa, int salen) | 93 | const struct sockaddr *sa, |
94 | const size_t salen) | ||
92 | { | 95 | { |
93 | memcpy(&xprt->xpt_local, sa, salen); | 96 | memcpy(&xprt->xpt_local, sa, salen); |
94 | xprt->xpt_locallen = salen; | 97 | xprt->xpt_locallen = salen; |
95 | } | 98 | } |
96 | static inline void svc_xprt_set_remote(struct svc_xprt *xprt, | 99 | static inline void svc_xprt_set_remote(struct svc_xprt *xprt, |
97 | struct sockaddr *sa, int salen) | 100 | const struct sockaddr *sa, |
101 | const size_t salen) | ||
98 | { | 102 | { |
99 | memcpy(&xprt->xpt_remote, sa, salen); | 103 | memcpy(&xprt->xpt_remote, sa, salen); |
100 | xprt->xpt_remotelen = salen; | 104 | xprt->xpt_remotelen = salen; |
101 | } | 105 | } |
102 | static inline unsigned short svc_addr_port(struct sockaddr *sa) | 106 | static inline unsigned short svc_addr_port(const struct sockaddr *sa) |
103 | { | 107 | { |
104 | unsigned short ret = 0; | 108 | const struct sockaddr_in *sin = (const struct sockaddr_in *)sa; |
109 | const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sa; | ||
110 | |||
105 | switch (sa->sa_family) { | 111 | switch (sa->sa_family) { |
106 | case AF_INET: | 112 | case AF_INET: |
107 | ret = ntohs(((struct sockaddr_in *)sa)->sin_port); | 113 | return ntohs(sin->sin_port); |
108 | break; | ||
109 | case AF_INET6: | 114 | case AF_INET6: |
110 | ret = ntohs(((struct sockaddr_in6 *)sa)->sin6_port); | 115 | return ntohs(sin6->sin6_port); |
111 | break; | ||
112 | } | 116 | } |
113 | return ret; | 117 | |
118 | return 0; | ||
114 | } | 119 | } |
115 | 120 | ||
116 | static inline size_t svc_addr_len(struct sockaddr *sa) | 121 | static inline size_t svc_addr_len(struct sockaddr *sa) |
@@ -124,36 +129,39 @@ static inline size_t svc_addr_len(struct sockaddr *sa) | |||
124 | return -EAFNOSUPPORT; | 129 | return -EAFNOSUPPORT; |
125 | } | 130 | } |
126 | 131 | ||
127 | static inline unsigned short svc_xprt_local_port(struct svc_xprt *xprt) | 132 | static inline unsigned short svc_xprt_local_port(const struct svc_xprt *xprt) |
128 | { | 133 | { |
129 | return svc_addr_port((struct sockaddr *)&xprt->xpt_local); | 134 | return svc_addr_port((const struct sockaddr *)&xprt->xpt_local); |
130 | } | 135 | } |
131 | 136 | ||
132 | static inline unsigned short svc_xprt_remote_port(struct svc_xprt *xprt) | 137 | static inline unsigned short svc_xprt_remote_port(const struct svc_xprt *xprt) |
133 | { | 138 | { |
134 | return svc_addr_port((struct sockaddr *)&xprt->xpt_remote); | 139 | return svc_addr_port((const struct sockaddr *)&xprt->xpt_remote); |
135 | } | 140 | } |
136 | 141 | ||
137 | static inline char *__svc_print_addr(struct sockaddr *addr, | 142 | static inline char *__svc_print_addr(const struct sockaddr *addr, |
138 | char *buf, size_t len) | 143 | char *buf, const size_t len) |
139 | { | 144 | { |
145 | const struct sockaddr_in *sin = (const struct sockaddr_in *)addr; | ||
146 | const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)addr; | ||
147 | |||
140 | switch (addr->sa_family) { | 148 | switch (addr->sa_family) { |
141 | case AF_INET: | 149 | case AF_INET: |
142 | snprintf(buf, len, "%pI4, port=%u", | 150 | snprintf(buf, len, "%pI4, port=%u", &sin->sin_addr, |
143 | &((struct sockaddr_in *)addr)->sin_addr, | 151 | ntohs(sin->sin_port)); |
144 | ntohs(((struct sockaddr_in *) addr)->sin_port)); | ||
145 | break; | 152 | break; |
146 | 153 | ||
147 | case AF_INET6: | 154 | case AF_INET6: |
148 | snprintf(buf, len, "%pI6, port=%u", | 155 | snprintf(buf, len, "%pI6, port=%u", |
149 | &((struct sockaddr_in6 *)addr)->sin6_addr, | 156 | &sin6->sin6_addr, |
150 | ntohs(((struct sockaddr_in6 *) addr)->sin6_port)); | 157 | ntohs(sin6->sin6_port)); |
151 | break; | 158 | break; |
152 | 159 | ||
153 | default: | 160 | default: |
154 | snprintf(buf, len, "unknown address type: %d", addr->sa_family); | 161 | snprintf(buf, len, "unknown address type: %d", addr->sa_family); |
155 | break; | 162 | break; |
156 | } | 163 | } |
164 | |||
157 | return buf; | 165 | return buf; |
158 | } | 166 | } |
159 | #endif /* SUNRPC_SVC_XPRT_H */ | 167 | #endif /* SUNRPC_SVC_XPRT_H */ |
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 49e1eb454465..d8910b68e1bd 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h | |||
@@ -69,27 +69,27 @@ struct xdr_buf { | |||
69 | * pre-xdr'ed macros. | 69 | * pre-xdr'ed macros. |
70 | */ | 70 | */ |
71 | 71 | ||
72 | #define xdr_zero __constant_htonl(0) | 72 | #define xdr_zero cpu_to_be32(0) |
73 | #define xdr_one __constant_htonl(1) | 73 | #define xdr_one cpu_to_be32(1) |
74 | #define xdr_two __constant_htonl(2) | 74 | #define xdr_two cpu_to_be32(2) |
75 | 75 | ||
76 | #define rpc_success __constant_htonl(RPC_SUCCESS) | 76 | #define rpc_success cpu_to_be32(RPC_SUCCESS) |
77 | #define rpc_prog_unavail __constant_htonl(RPC_PROG_UNAVAIL) | 77 | #define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL) |
78 | #define rpc_prog_mismatch __constant_htonl(RPC_PROG_MISMATCH) | 78 | #define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH) |
79 | #define rpc_proc_unavail __constant_htonl(RPC_PROC_UNAVAIL) | 79 | #define rpc_proc_unavail cpu_to_be32(RPC_PROC_UNAVAIL) |
80 | #define rpc_garbage_args __constant_htonl(RPC_GARBAGE_ARGS) | 80 | #define rpc_garbage_args cpu_to_be32(RPC_GARBAGE_ARGS) |
81 | #define rpc_system_err __constant_htonl(RPC_SYSTEM_ERR) | 81 | #define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR) |
82 | #define rpc_drop_reply __constant_htonl(RPC_DROP_REPLY) | 82 | #define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY) |
83 | 83 | ||
84 | #define rpc_auth_ok __constant_htonl(RPC_AUTH_OK) | 84 | #define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK) |
85 | #define rpc_autherr_badcred __constant_htonl(RPC_AUTH_BADCRED) | 85 | #define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED) |
86 | #define rpc_autherr_rejectedcred __constant_htonl(RPC_AUTH_REJECTEDCRED) | 86 | #define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED) |
87 | #define rpc_autherr_badverf __constant_htonl(RPC_AUTH_BADVERF) | 87 | #define rpc_autherr_badverf cpu_to_be32(RPC_AUTH_BADVERF) |
88 | #define rpc_autherr_rejectedverf __constant_htonl(RPC_AUTH_REJECTEDVERF) | 88 | #define rpc_autherr_rejectedverf cpu_to_be32(RPC_AUTH_REJECTEDVERF) |
89 | #define rpc_autherr_tooweak __constant_htonl(RPC_AUTH_TOOWEAK) | 89 | #define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK) |
90 | #define rpcsec_gsserr_credproblem __constant_htonl(RPCSEC_GSS_CREDPROBLEM) | 90 | #define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM) |
91 | #define rpcsec_gsserr_ctxproblem __constant_htonl(RPCSEC_GSS_CTXPROBLEM) | 91 | #define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM) |
92 | #define rpc_autherr_oldseqnum __constant_htonl(101) | 92 | #define rpc_autherr_oldseqnum cpu_to_be32(101) |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * Miscellaneous XDR helper functions | 95 | * Miscellaneous XDR helper functions |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 11fc71d50c1e..08afe43118f4 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -235,6 +235,7 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 * | |||
235 | */ | 235 | */ |
236 | int xprt_register_transport(struct xprt_class *type); | 236 | int xprt_register_transport(struct xprt_class *type); |
237 | int xprt_unregister_transport(struct xprt_class *type); | 237 | int xprt_unregister_transport(struct xprt_class *type); |
238 | int xprt_load_transport(const char *); | ||
238 | void xprt_set_retrans_timeout_def(struct rpc_task *task); | 239 | void xprt_set_retrans_timeout_def(struct rpc_task *task); |
239 | void xprt_set_retrans_timeout_rtt(struct rpc_task *task); | 240 | void xprt_set_retrans_timeout_rtt(struct rpc_task *task); |
240 | void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); | 241 | void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status); |
@@ -259,6 +260,8 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); | |||
259 | #define XPRT_BOUND (4) | 260 | #define XPRT_BOUND (4) |
260 | #define XPRT_BINDING (5) | 261 | #define XPRT_BINDING (5) |
261 | #define XPRT_CLOSING (6) | 262 | #define XPRT_CLOSING (6) |
263 | #define XPRT_CONNECTION_ABORT (7) | ||
264 | #define XPRT_CONNECTION_CLOSE (8) | ||
262 | 265 | ||
263 | static inline void xprt_set_connected(struct rpc_xprt *xprt) | 266 | static inline void xprt_set_connected(struct rpc_xprt *xprt) |
264 | { | 267 | { |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index c7d9bb1832ba..795032edfc46 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -1,9 +1,6 @@ | |||
1 | #ifndef _LINUX_SUSPEND_H | 1 | #ifndef _LINUX_SUSPEND_H |
2 | #define _LINUX_SUSPEND_H | 2 | #define _LINUX_SUSPEND_H |
3 | 3 | ||
4 | #if defined(CONFIG_X86) || defined(CONFIG_FRV) || defined(CONFIG_PPC32) || defined(CONFIG_PPC64) | ||
5 | #include <asm/suspend.h> | ||
6 | #endif | ||
7 | #include <linux/swap.h> | 4 | #include <linux/swap.h> |
8 | #include <linux/notifier.h> | 5 | #include <linux/notifier.h> |
9 | #include <linux/init.h> | 6 | #include <linux/init.h> |
@@ -61,10 +58,17 @@ typedef int __bitwise suspend_state_t; | |||
61 | * by @begin(). | 58 | * by @begin(). |
62 | * @prepare() is called right after devices have been suspended (ie. the | 59 | * @prepare() is called right after devices have been suspended (ie. the |
63 | * appropriate .suspend() method has been executed for each device) and | 60 | * appropriate .suspend() method has been executed for each device) and |
64 | * before the nonboot CPUs are disabled (it is executed with IRQs enabled). | 61 | * before device drivers' late suspend callbacks are executed. It returns |
65 | * This callback is optional. It returns 0 on success or a negative | 62 | * 0 on success or a negative error code otherwise, in which case the |
66 | * error code otherwise, in which case the system cannot enter the desired | 63 | * system cannot enter the desired sleep state (@prepare_late(), @enter(), |
67 | * sleep state (@enter() and @finish() will not be called in that case). | 64 | * @wake(), and @finish() will not be called in that case). |
65 | * | ||
66 | * @prepare_late: Finish preparing the platform for entering the system sleep | ||
67 | * state indicated by @begin(). | ||
68 | * @prepare_late is called before disabling nonboot CPUs and after | ||
69 | * device drivers' late suspend callbacks have been executed. It returns | ||
70 | * 0 on success or a negative error code otherwise, in which case the | ||
71 | * system cannot enter the desired sleep state (@enter() and @wake()). | ||
68 | * | 72 | * |
69 | * @enter: Enter the system sleep state indicated by @begin() or represented by | 73 | * @enter: Enter the system sleep state indicated by @begin() or represented by |
70 | * the argument if @begin() is not implemented. | 74 | * the argument if @begin() is not implemented. |
@@ -72,19 +76,26 @@ typedef int __bitwise suspend_state_t; | |||
72 | * error code otherwise, in which case the system cannot enter the desired | 76 | * error code otherwise, in which case the system cannot enter the desired |
73 | * sleep state. | 77 | * sleep state. |
74 | * | 78 | * |
75 | * @finish: Called when the system has just left a sleep state, right after | 79 | * @wake: Called when the system has just left a sleep state, right after |
76 | * the nonboot CPUs have been enabled and before devices are resumed (it is | 80 | * the nonboot CPUs have been enabled and before device drivers' early |
77 | * executed with IRQs enabled). | 81 | * resume callbacks are executed. |
82 | * This callback is optional, but should be implemented by the platforms | ||
83 | * that implement @prepare_late(). If implemented, it is always called | ||
84 | * after @enter(), even if @enter() fails. | ||
85 | * | ||
86 | * @finish: Finish wake-up of the platform. | ||
87 | * @finish is called right prior to calling device drivers' regular suspend | ||
88 | * callbacks. | ||
78 | * This callback is optional, but should be implemented by the platforms | 89 | * This callback is optional, but should be implemented by the platforms |
79 | * that implement @prepare(). If implemented, it is always called after | 90 | * that implement @prepare(). If implemented, it is always called after |
80 | * @enter() (even if @enter() fails). | 91 | * @enter() and @wake(), if implemented, even if any of them fails. |
81 | * | 92 | * |
82 | * @end: Called by the PM core right after resuming devices, to indicate to | 93 | * @end: Called by the PM core right after resuming devices, to indicate to |
83 | * the platform that the system has returned to the working state or | 94 | * the platform that the system has returned to the working state or |
84 | * the transition to the sleep state has been aborted. | 95 | * the transition to the sleep state has been aborted. |
85 | * This callback is optional, but should be implemented by the platforms | 96 | * This callback is optional, but should be implemented by the platforms |
86 | * that implement @begin(), but platforms implementing @begin() should | 97 | * that implement @begin(). Accordingly, platforms implementing @begin() |
87 | * also provide a @end() which cleans up transitions aborted before | 98 | * should also provide a @end() which cleans up transitions aborted before |
88 | * @enter(). | 99 | * @enter(). |
89 | * | 100 | * |
90 | * @recover: Recover the platform from a suspend failure. | 101 | * @recover: Recover the platform from a suspend failure. |
@@ -96,7 +107,9 @@ struct platform_suspend_ops { | |||
96 | int (*valid)(suspend_state_t state); | 107 | int (*valid)(suspend_state_t state); |
97 | int (*begin)(suspend_state_t state); | 108 | int (*begin)(suspend_state_t state); |
98 | int (*prepare)(void); | 109 | int (*prepare)(void); |
110 | int (*prepare_late)(void); | ||
99 | int (*enter)(suspend_state_t state); | 111 | int (*enter)(suspend_state_t state); |
112 | void (*wake)(void); | ||
100 | void (*finish)(void); | 113 | void (*finish)(void); |
101 | void (*end)(void); | 114 | void (*end)(void); |
102 | void (*recover)(void); | 115 | void (*recover)(void); |
diff --git a/include/linux/swap.h b/include/linux/swap.h index d30215578877..62d81435347a 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -212,7 +212,7 @@ static inline void lru_cache_add_active_file(struct page *page) | |||
212 | 212 | ||
213 | /* linux/mm/vmscan.c */ | 213 | /* linux/mm/vmscan.c */ |
214 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | 214 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
215 | gfp_t gfp_mask); | 215 | gfp_t gfp_mask, nodemask_t *mask); |
216 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, | 216 | extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, |
217 | gfp_t gfp_mask, bool noswap, | 217 | gfp_t gfp_mask, bool noswap, |
218 | unsigned int swappiness); | 218 | unsigned int swappiness); |
@@ -382,6 +382,11 @@ static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, | |||
382 | return NULL; | 382 | return NULL; |
383 | } | 383 | } |
384 | 384 | ||
385 | static inline int swap_writepage(struct page *p, struct writeback_control *wbc) | ||
386 | { | ||
387 | return 0; | ||
388 | } | ||
389 | |||
385 | static inline struct page *lookup_swap_cache(swp_entry_t swp) | 390 | static inline struct page *lookup_swap_cache(swp_entry_t swp) |
386 | { | 391 | { |
387 | return NULL; | 392 | return NULL; |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index dedd3c0cfe30..ac9ff54f7cb3 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -31,7 +31,7 @@ extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, | |||
31 | phys_addr_t address); | 31 | phys_addr_t address); |
32 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); | 32 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); |
33 | 33 | ||
34 | extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); | 34 | extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size); |
35 | 35 | ||
36 | extern void | 36 | extern void |
37 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 37 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
@@ -41,20 +41,13 @@ extern void | |||
41 | swiotlb_free_coherent(struct device *hwdev, size_t size, | 41 | swiotlb_free_coherent(struct device *hwdev, size_t size, |
42 | void *vaddr, dma_addr_t dma_handle); | 42 | void *vaddr, dma_addr_t dma_handle); |
43 | 43 | ||
44 | extern dma_addr_t | 44 | extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, |
45 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir); | 45 | unsigned long offset, size_t size, |
46 | 46 | enum dma_data_direction dir, | |
47 | extern void | 47 | struct dma_attrs *attrs); |
48 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | 48 | extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, |
49 | size_t size, int dir); | 49 | size_t size, enum dma_data_direction dir, |
50 | 50 | struct dma_attrs *attrs); | |
51 | extern dma_addr_t | ||
52 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, | ||
53 | int dir, struct dma_attrs *attrs); | ||
54 | |||
55 | extern void | ||
56 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, | ||
57 | size_t size, int dir, struct dma_attrs *attrs); | ||
58 | 51 | ||
59 | extern int | 52 | extern int |
60 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, | 53 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
@@ -66,36 +59,38 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, | |||
66 | 59 | ||
67 | extern int | 60 | extern int |
68 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, | 61 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, |
69 | int dir, struct dma_attrs *attrs); | 62 | enum dma_data_direction dir, struct dma_attrs *attrs); |
70 | 63 | ||
71 | extern void | 64 | extern void |
72 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | 65 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
73 | int nelems, int dir, struct dma_attrs *attrs); | 66 | int nelems, enum dma_data_direction dir, |
67 | struct dma_attrs *attrs); | ||
74 | 68 | ||
75 | extern void | 69 | extern void |
76 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 70 | swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
77 | size_t size, int dir); | 71 | size_t size, enum dma_data_direction dir); |
78 | 72 | ||
79 | extern void | 73 | extern void |
80 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | 74 | swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
81 | int nelems, int dir); | 75 | int nelems, enum dma_data_direction dir); |
82 | 76 | ||
83 | extern void | 77 | extern void |
84 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | 78 | swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, |
85 | size_t size, int dir); | 79 | size_t size, enum dma_data_direction dir); |
86 | 80 | ||
87 | extern void | 81 | extern void |
88 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | 82 | swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
89 | int nelems, int dir); | 83 | int nelems, enum dma_data_direction dir); |
90 | 84 | ||
91 | extern void | 85 | extern void |
92 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | 86 | swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |
93 | unsigned long offset, size_t size, int dir); | 87 | unsigned long offset, size_t size, |
88 | enum dma_data_direction dir); | ||
94 | 89 | ||
95 | extern void | 90 | extern void |
96 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | 91 | swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, |
97 | unsigned long offset, size_t size, | 92 | unsigned long offset, size_t size, |
98 | int dir); | 93 | enum dma_data_direction dir); |
99 | 94 | ||
100 | extern int | 95 | extern int |
101 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); | 96 | swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); |
diff --git a/include/linux/synclink.h b/include/linux/synclink.h index 99b8bdb17b2b..0ff2779c44d0 100644 --- a/include/linux/synclink.h +++ b/include/linux/synclink.h | |||
@@ -125,6 +125,7 @@ | |||
125 | #define MGSL_MODE_MONOSYNC 3 | 125 | #define MGSL_MODE_MONOSYNC 3 |
126 | #define MGSL_MODE_BISYNC 4 | 126 | #define MGSL_MODE_BISYNC 4 |
127 | #define MGSL_MODE_RAW 6 | 127 | #define MGSL_MODE_RAW 6 |
128 | #define MGSL_MODE_BASE_CLOCK 7 | ||
128 | 129 | ||
129 | #define MGSL_BUS_TYPE_ISA 1 | 130 | #define MGSL_BUS_TYPE_ISA 1 |
130 | #define MGSL_BUS_TYPE_EISA 2 | 131 | #define MGSL_BUS_TYPE_EISA 2 |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index f9f900cfd066..40617c1d8976 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -65,6 +65,7 @@ struct old_linux_dirent; | |||
65 | #include <asm/signal.h> | 65 | #include <asm/signal.h> |
66 | #include <linux/quota.h> | 66 | #include <linux/quota.h> |
67 | #include <linux/key.h> | 67 | #include <linux/key.h> |
68 | #include <trace/syscall.h> | ||
68 | 69 | ||
69 | #define __SC_DECL1(t1, a1) t1 a1 | 70 | #define __SC_DECL1(t1, a1) t1 a1 |
70 | #define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) | 71 | #define __SC_DECL2(t2, a2, ...) t2 a2, __SC_DECL1(__VA_ARGS__) |
@@ -95,7 +96,46 @@ struct old_linux_dirent; | |||
95 | #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) | 96 | #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) |
96 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) | 97 | #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) |
97 | 98 | ||
99 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
100 | #define __SC_STR_ADECL1(t, a) #a | ||
101 | #define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) | ||
102 | #define __SC_STR_ADECL3(t, a, ...) #a, __SC_STR_ADECL2(__VA_ARGS__) | ||
103 | #define __SC_STR_ADECL4(t, a, ...) #a, __SC_STR_ADECL3(__VA_ARGS__) | ||
104 | #define __SC_STR_ADECL5(t, a, ...) #a, __SC_STR_ADECL4(__VA_ARGS__) | ||
105 | #define __SC_STR_ADECL6(t, a, ...) #a, __SC_STR_ADECL5(__VA_ARGS__) | ||
106 | |||
107 | #define __SC_STR_TDECL1(t, a) #t | ||
108 | #define __SC_STR_TDECL2(t, a, ...) #t, __SC_STR_TDECL1(__VA_ARGS__) | ||
109 | #define __SC_STR_TDECL3(t, a, ...) #t, __SC_STR_TDECL2(__VA_ARGS__) | ||
110 | #define __SC_STR_TDECL4(t, a, ...) #t, __SC_STR_TDECL3(__VA_ARGS__) | ||
111 | #define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) | ||
112 | #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) | ||
113 | |||
114 | #define SYSCALL_METADATA(sname, nb) \ | ||
115 | static const struct syscall_metadata __used \ | ||
116 | __attribute__((__aligned__(4))) \ | ||
117 | __attribute__((section("__syscalls_metadata"))) \ | ||
118 | __syscall_meta_##sname = { \ | ||
119 | .name = "sys"#sname, \ | ||
120 | .nb_args = nb, \ | ||
121 | .types = types_##sname, \ | ||
122 | .args = args_##sname, \ | ||
123 | } | ||
124 | |||
125 | #define SYSCALL_DEFINE0(sname) \ | ||
126 | static const struct syscall_metadata __used \ | ||
127 | __attribute__((__aligned__(4))) \ | ||
128 | __attribute__((section("__syscalls_metadata"))) \ | ||
129 | __syscall_meta_##sname = { \ | ||
130 | .name = "sys_"#sname, \ | ||
131 | .nb_args = 0, \ | ||
132 | }; \ | ||
133 | asmlinkage long sys_##sname(void) | ||
134 | |||
135 | #else | ||
98 | #define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) | 136 | #define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void) |
137 | #endif | ||
138 | |||
99 | #define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) | 139 | #define SYSCALL_DEFINE1(name, ...) SYSCALL_DEFINEx(1, _##name, __VA_ARGS__) |
100 | #define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) | 140 | #define SYSCALL_DEFINE2(name, ...) SYSCALL_DEFINEx(2, _##name, __VA_ARGS__) |
101 | #define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) | 141 | #define SYSCALL_DEFINE3(name, ...) SYSCALL_DEFINEx(3, _##name, __VA_ARGS__) |
@@ -108,7 +148,7 @@ struct old_linux_dirent; | |||
108 | asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \ | 148 | asm ("\t.globl " #alias "\n\t.set " #alias ", " #name "\n" \ |
109 | "\t.globl ." #alias "\n\t.set ." #alias ", ." #name) | 149 | "\t.globl ." #alias "\n\t.set ." #alias ", ." #name) |
110 | #else | 150 | #else |
111 | #ifdef CONFIG_ALPHA | 151 | #if defined(CONFIG_ALPHA) || defined(CONFIG_MIPS) |
112 | #define SYSCALL_ALIAS(alias, name) \ | 152 | #define SYSCALL_ALIAS(alias, name) \ |
113 | asm ( #alias " = " #name "\n\t.globl " #alias) | 153 | asm ( #alias " = " #name "\n\t.globl " #alias) |
114 | #else | 154 | #else |
@@ -117,10 +157,26 @@ struct old_linux_dirent; | |||
117 | #endif | 157 | #endif |
118 | #endif | 158 | #endif |
119 | 159 | ||
160 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
161 | #define SYSCALL_DEFINEx(x, sname, ...) \ | ||
162 | static const char *types_##sname[] = { \ | ||
163 | __SC_STR_TDECL##x(__VA_ARGS__) \ | ||
164 | }; \ | ||
165 | static const char *args_##sname[] = { \ | ||
166 | __SC_STR_ADECL##x(__VA_ARGS__) \ | ||
167 | }; \ | ||
168 | SYSCALL_METADATA(sname, x); \ | ||
169 | __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) | ||
170 | #else | ||
171 | #define SYSCALL_DEFINEx(x, sname, ...) \ | ||
172 | __SYSCALL_DEFINEx(x, sname, __VA_ARGS__) | ||
173 | #endif | ||
174 | |||
120 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS | 175 | #ifdef CONFIG_HAVE_SYSCALL_WRAPPERS |
121 | 176 | ||
122 | #define SYSCALL_DEFINE(name) static inline long SYSC_##name | 177 | #define SYSCALL_DEFINE(name) static inline long SYSC_##name |
123 | #define SYSCALL_DEFINEx(x, name, ...) \ | 178 | |
179 | #define __SYSCALL_DEFINEx(x, name, ...) \ | ||
124 | asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ | 180 | asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)); \ |
125 | static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ | 181 | static inline long SYSC##name(__SC_DECL##x(__VA_ARGS__)); \ |
126 | asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ | 182 | asmlinkage long SyS##name(__SC_LONG##x(__VA_ARGS__)) \ |
@@ -134,7 +190,7 @@ struct old_linux_dirent; | |||
134 | #else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ | 190 | #else /* CONFIG_HAVE_SYSCALL_WRAPPERS */ |
135 | 191 | ||
136 | #define SYSCALL_DEFINE(name) asmlinkage long sys_##name | 192 | #define SYSCALL_DEFINE(name) asmlinkage long sys_##name |
137 | #define SYSCALL_DEFINEx(x, name, ...) \ | 193 | #define __SYSCALL_DEFINEx(x, name, ...) \ |
138 | asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) | 194 | asmlinkage long sys##name(__SC_DECL##x(__VA_ARGS__)) |
139 | 195 | ||
140 | #endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ | 196 | #endif /* CONFIG_HAVE_SYSCALL_WRAPPERS */ |
@@ -461,6 +517,10 @@ asmlinkage long sys_pread64(unsigned int fd, char __user *buf, | |||
461 | size_t count, loff_t pos); | 517 | size_t count, loff_t pos); |
462 | asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, | 518 | asmlinkage long sys_pwrite64(unsigned int fd, const char __user *buf, |
463 | size_t count, loff_t pos); | 519 | size_t count, loff_t pos); |
520 | asmlinkage long sys_preadv(unsigned long fd, const struct iovec __user *vec, | ||
521 | unsigned long vlen, unsigned long pos_l, unsigned long pos_h); | ||
522 | asmlinkage long sys_pwritev(unsigned long fd, const struct iovec __user *vec, | ||
523 | unsigned long vlen, unsigned long pos_l, unsigned long pos_h); | ||
464 | asmlinkage long sys_getcwd(char __user *buf, unsigned long size); | 524 | asmlinkage long sys_getcwd(char __user *buf, unsigned long size); |
465 | asmlinkage long sys_mkdir(const char __user *pathname, int mode); | 525 | asmlinkage long sys_mkdir(const char __user *pathname, int mode); |
466 | asmlinkage long sys_chdir(const char __user *filename); | 526 | asmlinkage long sys_chdir(const char __user *filename); |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 917707e6151d..1de8b9eb841b 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
@@ -27,27 +27,46 @@ | |||
27 | 27 | ||
28 | #include <linux/idr.h> | 28 | #include <linux/idr.h> |
29 | #include <linux/device.h> | 29 | #include <linux/device.h> |
30 | #include <linux/workqueue.h> | ||
30 | 31 | ||
31 | struct thermal_zone_device; | 32 | struct thermal_zone_device; |
32 | struct thermal_cooling_device; | 33 | struct thermal_cooling_device; |
33 | 34 | ||
35 | enum thermal_device_mode { | ||
36 | THERMAL_DEVICE_DISABLED = 0, | ||
37 | THERMAL_DEVICE_ENABLED, | ||
38 | }; | ||
39 | |||
40 | enum thermal_trip_type { | ||
41 | THERMAL_TRIP_ACTIVE = 0, | ||
42 | THERMAL_TRIP_PASSIVE, | ||
43 | THERMAL_TRIP_HOT, | ||
44 | THERMAL_TRIP_CRITICAL, | ||
45 | }; | ||
46 | |||
34 | struct thermal_zone_device_ops { | 47 | struct thermal_zone_device_ops { |
35 | int (*bind) (struct thermal_zone_device *, | 48 | int (*bind) (struct thermal_zone_device *, |
36 | struct thermal_cooling_device *); | 49 | struct thermal_cooling_device *); |
37 | int (*unbind) (struct thermal_zone_device *, | 50 | int (*unbind) (struct thermal_zone_device *, |
38 | struct thermal_cooling_device *); | 51 | struct thermal_cooling_device *); |
39 | int (*get_temp) (struct thermal_zone_device *, char *); | 52 | int (*get_temp) (struct thermal_zone_device *, unsigned long *); |
40 | int (*get_mode) (struct thermal_zone_device *, char *); | 53 | int (*get_mode) (struct thermal_zone_device *, |
41 | int (*set_mode) (struct thermal_zone_device *, const char *); | 54 | enum thermal_device_mode *); |
42 | int (*get_trip_type) (struct thermal_zone_device *, int, char *); | 55 | int (*set_mode) (struct thermal_zone_device *, |
43 | int (*get_trip_temp) (struct thermal_zone_device *, int, char *); | 56 | enum thermal_device_mode); |
57 | int (*get_trip_type) (struct thermal_zone_device *, int, | ||
58 | enum thermal_trip_type *); | ||
59 | int (*get_trip_temp) (struct thermal_zone_device *, int, | ||
60 | unsigned long *); | ||
44 | int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *); | 61 | int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *); |
62 | int (*notify) (struct thermal_zone_device *, int, | ||
63 | enum thermal_trip_type); | ||
45 | }; | 64 | }; |
46 | 65 | ||
47 | struct thermal_cooling_device_ops { | 66 | struct thermal_cooling_device_ops { |
48 | int (*get_max_state) (struct thermal_cooling_device *, char *); | 67 | int (*get_max_state) (struct thermal_cooling_device *, unsigned long *); |
49 | int (*get_cur_state) (struct thermal_cooling_device *, char *); | 68 | int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *); |
50 | int (*set_cur_state) (struct thermal_cooling_device *, unsigned int); | 69 | int (*set_cur_state) (struct thermal_cooling_device *, unsigned long); |
51 | }; | 70 | }; |
52 | 71 | ||
53 | #define THERMAL_TRIPS_NONE -1 | 72 | #define THERMAL_TRIPS_NONE -1 |
@@ -88,11 +107,19 @@ struct thermal_zone_device { | |||
88 | struct device device; | 107 | struct device device; |
89 | void *devdata; | 108 | void *devdata; |
90 | int trips; | 109 | int trips; |
110 | int tc1; | ||
111 | int tc2; | ||
112 | int passive_delay; | ||
113 | int polling_delay; | ||
114 | int last_temperature; | ||
115 | bool passive; | ||
116 | unsigned int forced_passive; | ||
91 | struct thermal_zone_device_ops *ops; | 117 | struct thermal_zone_device_ops *ops; |
92 | struct list_head cooling_devices; | 118 | struct list_head cooling_devices; |
93 | struct idr idr; | 119 | struct idr idr; |
94 | struct mutex lock; /* protect cooling devices list */ | 120 | struct mutex lock; /* protect cooling devices list */ |
95 | struct list_head node; | 121 | struct list_head node; |
122 | struct delayed_work poll_queue; | ||
96 | #if defined(CONFIG_THERMAL_HWMON) | 123 | #if defined(CONFIG_THERMAL_HWMON) |
97 | struct list_head hwmon_node; | 124 | struct list_head hwmon_node; |
98 | struct thermal_hwmon_device *hwmon; | 125 | struct thermal_hwmon_device *hwmon; |
@@ -104,13 +131,16 @@ struct thermal_zone_device { | |||
104 | struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, | 131 | struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, |
105 | struct | 132 | struct |
106 | thermal_zone_device_ops | 133 | thermal_zone_device_ops |
107 | *); | 134 | *, int tc1, int tc2, |
135 | int passive_freq, | ||
136 | int polling_freq); | ||
108 | void thermal_zone_device_unregister(struct thermal_zone_device *); | 137 | void thermal_zone_device_unregister(struct thermal_zone_device *); |
109 | 138 | ||
110 | int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, | 139 | int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, |
111 | struct thermal_cooling_device *); | 140 | struct thermal_cooling_device *); |
112 | int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, | 141 | int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, |
113 | struct thermal_cooling_device *); | 142 | struct thermal_cooling_device *); |
143 | void thermal_zone_device_update(struct thermal_zone_device *); | ||
114 | struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, | 144 | struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, |
115 | struct | 145 | struct |
116 | thermal_cooling_device_ops | 146 | thermal_cooling_device_ops |
diff --git a/include/linux/timer.h b/include/linux/timer.h index e2d662e3416e..6cdb6f3331f1 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/ktime.h> | 5 | #include <linux/ktime.h> |
6 | #include <linux/stddef.h> | 6 | #include <linux/stddef.h> |
7 | #include <linux/debugobjects.h> | 7 | #include <linux/debugobjects.h> |
8 | #include <linux/stringify.h> | ||
8 | 9 | ||
9 | struct tvec_base; | 10 | struct tvec_base; |
10 | 11 | ||
@@ -21,52 +22,126 @@ struct timer_list { | |||
21 | char start_comm[16]; | 22 | char start_comm[16]; |
22 | int start_pid; | 23 | int start_pid; |
23 | #endif | 24 | #endif |
25 | #ifdef CONFIG_LOCKDEP | ||
26 | struct lockdep_map lockdep_map; | ||
27 | #endif | ||
24 | }; | 28 | }; |
25 | 29 | ||
26 | extern struct tvec_base boot_tvec_bases; | 30 | extern struct tvec_base boot_tvec_bases; |
27 | 31 | ||
32 | #ifdef CONFIG_LOCKDEP | ||
33 | /* | ||
34 | * NB: because we have to copy the lockdep_map, setting the lockdep_map key | ||
35 | * (second argument) here is required, otherwise it could be initialised to | ||
36 | * the copy of the lockdep_map later! We use the pointer to and the string | ||
37 | * "<file>:<line>" as the key resp. the name of the lockdep_map. | ||
38 | */ | ||
39 | #define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) \ | ||
40 | .lockdep_map = STATIC_LOCKDEP_MAP_INIT(_kn, &_kn), | ||
41 | #else | ||
42 | #define __TIMER_LOCKDEP_MAP_INITIALIZER(_kn) | ||
43 | #endif | ||
44 | |||
28 | #define TIMER_INITIALIZER(_function, _expires, _data) { \ | 45 | #define TIMER_INITIALIZER(_function, _expires, _data) { \ |
29 | .entry = { .prev = TIMER_ENTRY_STATIC }, \ | 46 | .entry = { .prev = TIMER_ENTRY_STATIC }, \ |
30 | .function = (_function), \ | 47 | .function = (_function), \ |
31 | .expires = (_expires), \ | 48 | .expires = (_expires), \ |
32 | .data = (_data), \ | 49 | .data = (_data), \ |
33 | .base = &boot_tvec_bases, \ | 50 | .base = &boot_tvec_bases, \ |
51 | __TIMER_LOCKDEP_MAP_INITIALIZER( \ | ||
52 | __FILE__ ":" __stringify(__LINE__)) \ | ||
34 | } | 53 | } |
35 | 54 | ||
36 | #define DEFINE_TIMER(_name, _function, _expires, _data) \ | 55 | #define DEFINE_TIMER(_name, _function, _expires, _data) \ |
37 | struct timer_list _name = \ | 56 | struct timer_list _name = \ |
38 | TIMER_INITIALIZER(_function, _expires, _data) | 57 | TIMER_INITIALIZER(_function, _expires, _data) |
39 | 58 | ||
40 | void init_timer(struct timer_list *timer); | 59 | void init_timer_key(struct timer_list *timer, |
41 | void init_timer_deferrable(struct timer_list *timer); | 60 | const char *name, |
61 | struct lock_class_key *key); | ||
62 | void init_timer_deferrable_key(struct timer_list *timer, | ||
63 | const char *name, | ||
64 | struct lock_class_key *key); | ||
65 | |||
66 | #ifdef CONFIG_LOCKDEP | ||
67 | #define init_timer(timer) \ | ||
68 | do { \ | ||
69 | static struct lock_class_key __key; \ | ||
70 | init_timer_key((timer), #timer, &__key); \ | ||
71 | } while (0) | ||
72 | |||
73 | #define init_timer_deferrable(timer) \ | ||
74 | do { \ | ||
75 | static struct lock_class_key __key; \ | ||
76 | init_timer_deferrable_key((timer), #timer, &__key); \ | ||
77 | } while (0) | ||
78 | |||
79 | #define init_timer_on_stack(timer) \ | ||
80 | do { \ | ||
81 | static struct lock_class_key __key; \ | ||
82 | init_timer_on_stack_key((timer), #timer, &__key); \ | ||
83 | } while (0) | ||
84 | |||
85 | #define setup_timer(timer, fn, data) \ | ||
86 | do { \ | ||
87 | static struct lock_class_key __key; \ | ||
88 | setup_timer_key((timer), #timer, &__key, (fn), (data));\ | ||
89 | } while (0) | ||
90 | |||
91 | #define setup_timer_on_stack(timer, fn, data) \ | ||
92 | do { \ | ||
93 | static struct lock_class_key __key; \ | ||
94 | setup_timer_on_stack_key((timer), #timer, &__key, \ | ||
95 | (fn), (data)); \ | ||
96 | } while (0) | ||
97 | #else | ||
98 | #define init_timer(timer)\ | ||
99 | init_timer_key((timer), NULL, NULL) | ||
100 | #define init_timer_deferrable(timer)\ | ||
101 | init_timer_deferrable_key((timer), NULL, NULL) | ||
102 | #define init_timer_on_stack(timer)\ | ||
103 | init_timer_on_stack_key((timer), NULL, NULL) | ||
104 | #define setup_timer(timer, fn, data)\ | ||
105 | setup_timer_key((timer), NULL, NULL, (fn), (data)) | ||
106 | #define setup_timer_on_stack(timer, fn, data)\ | ||
107 | setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) | ||
108 | #endif | ||
42 | 109 | ||
43 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS | 110 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
44 | extern void init_timer_on_stack(struct timer_list *timer); | 111 | extern void init_timer_on_stack_key(struct timer_list *timer, |
112 | const char *name, | ||
113 | struct lock_class_key *key); | ||
45 | extern void destroy_timer_on_stack(struct timer_list *timer); | 114 | extern void destroy_timer_on_stack(struct timer_list *timer); |
46 | #else | 115 | #else |
47 | static inline void destroy_timer_on_stack(struct timer_list *timer) { } | 116 | static inline void destroy_timer_on_stack(struct timer_list *timer) { } |
48 | static inline void init_timer_on_stack(struct timer_list *timer) | 117 | static inline void init_timer_on_stack_key(struct timer_list *timer, |
118 | const char *name, | ||
119 | struct lock_class_key *key) | ||
49 | { | 120 | { |
50 | init_timer(timer); | 121 | init_timer_key(timer, name, key); |
51 | } | 122 | } |
52 | #endif | 123 | #endif |
53 | 124 | ||
54 | static inline void setup_timer(struct timer_list * timer, | 125 | static inline void setup_timer_key(struct timer_list * timer, |
126 | const char *name, | ||
127 | struct lock_class_key *key, | ||
55 | void (*function)(unsigned long), | 128 | void (*function)(unsigned long), |
56 | unsigned long data) | 129 | unsigned long data) |
57 | { | 130 | { |
58 | timer->function = function; | 131 | timer->function = function; |
59 | timer->data = data; | 132 | timer->data = data; |
60 | init_timer(timer); | 133 | init_timer_key(timer, name, key); |
61 | } | 134 | } |
62 | 135 | ||
63 | static inline void setup_timer_on_stack(struct timer_list *timer, | 136 | static inline void setup_timer_on_stack_key(struct timer_list *timer, |
137 | const char *name, | ||
138 | struct lock_class_key *key, | ||
64 | void (*function)(unsigned long), | 139 | void (*function)(unsigned long), |
65 | unsigned long data) | 140 | unsigned long data) |
66 | { | 141 | { |
67 | timer->function = function; | 142 | timer->function = function; |
68 | timer->data = data; | 143 | timer->data = data; |
69 | init_timer_on_stack(timer); | 144 | init_timer_on_stack_key(timer, name, key); |
70 | } | 145 | } |
71 | 146 | ||
72 | /** | 147 | /** |
diff --git a/include/linux/timeriomem-rng.h b/include/linux/timeriomem-rng.h index dd253177f65f..3e08a1c86830 100644 --- a/include/linux/timeriomem-rng.h +++ b/include/linux/timeriomem-rng.h | |||
@@ -14,7 +14,7 @@ struct timeriomem_rng_data { | |||
14 | struct completion completion; | 14 | struct completion completion; |
15 | unsigned int present:1; | 15 | unsigned int present:1; |
16 | 16 | ||
17 | u32 __iomem *address; | 17 | void __iomem *address; |
18 | 18 | ||
19 | /* measures in usecs */ | 19 | /* measures in usecs */ |
20 | unsigned int period; | 20 | unsigned int period; |
diff --git a/include/linux/topology.h b/include/linux/topology.h index a16b9e06f2e5..7402c1a27c4f 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -38,11 +38,7 @@ | |||
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #ifndef nr_cpus_node | 40 | #ifndef nr_cpus_node |
41 | #define nr_cpus_node(node) \ | 41 | #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) |
42 | ({ \ | ||
43 | node_to_cpumask_ptr(__tmp__, node); \ | ||
44 | cpus_weight(*__tmp__); \ | ||
45 | }) | ||
46 | #endif | 42 | #endif |
47 | 43 | ||
48 | #define for_each_node_with_cpus(node) \ | 44 | #define for_each_node_with_cpus(node) \ |
@@ -200,4 +196,9 @@ int arch_update_cpu_topology(void); | |||
200 | #define topology_core_cpumask(cpu) cpumask_of(cpu) | 196 | #define topology_core_cpumask(cpu) cpumask_of(cpu) |
201 | #endif | 197 | #endif |
202 | 198 | ||
199 | /* Returns the number of the current Node. */ | ||
200 | #ifndef numa_node_id | ||
201 | #define numa_node_id() (cpu_to_node(raw_smp_processor_id())) | ||
202 | #endif | ||
203 | |||
203 | #endif /* _LINUX_TOPOLOGY_H */ | 204 | #endif /* _LINUX_TOPOLOGY_H */ |
diff --git a/include/linux/trace_clock.h b/include/linux/trace_clock.h new file mode 100644 index 000000000000..7a8130384087 --- /dev/null +++ b/include/linux/trace_clock.h | |||
@@ -0,0 +1,19 @@ | |||
1 | #ifndef _LINUX_TRACE_CLOCK_H | ||
2 | #define _LINUX_TRACE_CLOCK_H | ||
3 | |||
4 | /* | ||
5 | * 3 trace clock variants, with differing scalability/precision | ||
6 | * tradeoffs: | ||
7 | * | ||
8 | * - local: CPU-local trace clock | ||
9 | * - medium: scalable global clock with some jitter | ||
10 | * - global: globally monotonic, serialized clock | ||
11 | */ | ||
12 | #include <linux/compiler.h> | ||
13 | #include <linux/types.h> | ||
14 | |||
15 | extern u64 notrace trace_clock_local(void); | ||
16 | extern u64 notrace trace_clock(void); | ||
17 | extern u64 notrace trace_clock_global(void); | ||
18 | |||
19 | #endif /* _LINUX_TRACE_CLOCK_H */ | ||
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 6186a789d6c7..c7aa154f4bfc 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
@@ -388,17 +388,14 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info, | |||
388 | * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal | 388 | * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal |
389 | * @task: task receiving the signal | 389 | * @task: task receiving the signal |
390 | * @sig: signal number being sent | 390 | * @sig: signal number being sent |
391 | * @handler: %SIG_IGN or %SIG_DFL | ||
392 | * | 391 | * |
393 | * Return zero iff tracing doesn't care to examine this ignored signal, | 392 | * Return zero iff tracing doesn't care to examine this ignored signal, |
394 | * so it can short-circuit normal delivery and never even get queued. | 393 | * so it can short-circuit normal delivery and never even get queued. |
395 | * Either @handler is %SIG_DFL and @sig's default is ignore, or it's %SIG_IGN. | ||
396 | * | 394 | * |
397 | * Called with @task->sighand->siglock held. | 395 | * Called with @task->sighand->siglock held. |
398 | */ | 396 | */ |
399 | static inline int tracehook_consider_ignored_signal(struct task_struct *task, | 397 | static inline int tracehook_consider_ignored_signal(struct task_struct *task, |
400 | int sig, | 398 | int sig) |
401 | void __user *handler) | ||
402 | { | 399 | { |
403 | return (task_ptrace(task) & PT_PTRACED) != 0; | 400 | return (task_ptrace(task) & PT_PTRACED) != 0; |
404 | } | 401 | } |
@@ -407,19 +404,17 @@ static inline int tracehook_consider_ignored_signal(struct task_struct *task, | |||
407 | * tracehook_consider_fatal_signal - suppress special handling of fatal signal | 404 | * tracehook_consider_fatal_signal - suppress special handling of fatal signal |
408 | * @task: task receiving the signal | 405 | * @task: task receiving the signal |
409 | * @sig: signal number being sent | 406 | * @sig: signal number being sent |
410 | * @handler: %SIG_DFL or %SIG_IGN | ||
411 | * | 407 | * |
412 | * Return nonzero to prevent special handling of this termination signal. | 408 | * Return nonzero to prevent special handling of this termination signal. |
413 | * Normally @handler is %SIG_DFL. It can be %SIG_IGN if @sig is ignored, | 409 | * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is |
414 | * in which case force_sig() is about to reset it to %SIG_DFL. | 410 | * ignored, in which case force_sig() is about to reset it to %SIG_DFL. |
415 | * When this returns zero, this signal might cause a quick termination | 411 | * When this returns zero, this signal might cause a quick termination |
416 | * that does not give the debugger a chance to intercept the signal. | 412 | * that does not give the debugger a chance to intercept the signal. |
417 | * | 413 | * |
418 | * Called with or without @task->sighand->siglock held. | 414 | * Called with or without @task->sighand->siglock held. |
419 | */ | 415 | */ |
420 | static inline int tracehook_consider_fatal_signal(struct task_struct *task, | 416 | static inline int tracehook_consider_fatal_signal(struct task_struct *task, |
421 | int sig, | 417 | int sig) |
422 | void __user *handler) | ||
423 | { | 418 | { |
424 | return (task_ptrace(task) & PT_PTRACED) != 0; | 419 | return (task_ptrace(task) & PT_PTRACED) != 0; |
425 | } | 420 | } |
@@ -507,7 +502,7 @@ static inline int tracehook_notify_jctl(int notify, int why) | |||
507 | static inline int tracehook_notify_death(struct task_struct *task, | 502 | static inline int tracehook_notify_death(struct task_struct *task, |
508 | void **death_cookie, int group_dead) | 503 | void **death_cookie, int group_dead) |
509 | { | 504 | { |
510 | if (task->exit_signal == -1) | 505 | if (task_detached(task)) |
511 | return task->ptrace ? SIGCHLD : DEATH_REAP; | 506 | return task->ptrace ? SIGCHLD : DEATH_REAP; |
512 | 507 | ||
513 | /* | 508 | /* |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 757005458366..d35a7ee7611f 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -31,8 +31,8 @@ struct tracepoint { | |||
31 | * Keep in sync with vmlinux.lds.h. | 31 | * Keep in sync with vmlinux.lds.h. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define TPPROTO(args...) args | 34 | #define TP_PROTO(args...) args |
35 | #define TPARGS(args...) args | 35 | #define TP_ARGS(args...) args |
36 | 36 | ||
37 | #ifdef CONFIG_TRACEPOINTS | 37 | #ifdef CONFIG_TRACEPOINTS |
38 | 38 | ||
@@ -65,7 +65,7 @@ struct tracepoint { | |||
65 | { \ | 65 | { \ |
66 | if (unlikely(__tracepoint_##name.state)) \ | 66 | if (unlikely(__tracepoint_##name.state)) \ |
67 | __DO_TRACE(&__tracepoint_##name, \ | 67 | __DO_TRACE(&__tracepoint_##name, \ |
68 | TPPROTO(proto), TPARGS(args)); \ | 68 | TP_PROTO(proto), TP_ARGS(args)); \ |
69 | } \ | 69 | } \ |
70 | static inline int register_trace_##name(void (*probe)(proto)) \ | 70 | static inline int register_trace_##name(void (*probe)(proto)) \ |
71 | { \ | 71 | { \ |
@@ -153,4 +153,114 @@ static inline void tracepoint_synchronize_unregister(void) | |||
153 | synchronize_sched(); | 153 | synchronize_sched(); |
154 | } | 154 | } |
155 | 155 | ||
156 | #define PARAMS(args...) args | ||
157 | #define TRACE_FORMAT(name, proto, args, fmt) \ | ||
158 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | ||
159 | |||
160 | |||
161 | /* | ||
162 | * For use with the TRACE_EVENT macro: | ||
163 | * | ||
164 | * We define a tracepoint, its arguments, its printk format | ||
165 | * and its 'fast binay record' layout. | ||
166 | * | ||
167 | * Firstly, name your tracepoint via TRACE_EVENT(name : the | ||
168 | * 'subsystem_event' notation is fine. | ||
169 | * | ||
170 | * Think about this whole construct as the | ||
171 | * 'trace_sched_switch() function' from now on. | ||
172 | * | ||
173 | * | ||
174 | * TRACE_EVENT(sched_switch, | ||
175 | * | ||
176 | * * | ||
177 | * * A function has a regular function arguments | ||
178 | * * prototype, declare it via TP_PROTO(): | ||
179 | * * | ||
180 | * | ||
181 | * TP_PROTO(struct rq *rq, struct task_struct *prev, | ||
182 | * struct task_struct *next), | ||
183 | * | ||
184 | * * | ||
185 | * * Define the call signature of the 'function'. | ||
186 | * * (Design sidenote: we use this instead of a | ||
187 | * * TP_PROTO1/TP_PROTO2/TP_PROTO3 ugliness.) | ||
188 | * * | ||
189 | * | ||
190 | * TP_ARGS(rq, prev, next), | ||
191 | * | ||
192 | * * | ||
193 | * * Fast binary tracing: define the trace record via | ||
194 | * * TP_STRUCT__entry(). You can think about it like a | ||
195 | * * regular C structure local variable definition. | ||
196 | * * | ||
197 | * * This is how the trace record is structured and will | ||
198 | * * be saved into the ring buffer. These are the fields | ||
199 | * * that will be exposed to user-space in | ||
200 | * * /debug/tracing/events/<*>/format. | ||
201 | * * | ||
202 | * * The declared 'local variable' is called '__entry' | ||
203 | * * | ||
204 | * * __field(pid_t, prev_prid) is equivalent to a standard declariton: | ||
205 | * * | ||
206 | * * pid_t prev_pid; | ||
207 | * * | ||
208 | * * __array(char, prev_comm, TASK_COMM_LEN) is equivalent to: | ||
209 | * * | ||
210 | * * char prev_comm[TASK_COMM_LEN]; | ||
211 | * * | ||
212 | * | ||
213 | * TP_STRUCT__entry( | ||
214 | * __array( char, prev_comm, TASK_COMM_LEN ) | ||
215 | * __field( pid_t, prev_pid ) | ||
216 | * __field( int, prev_prio ) | ||
217 | * __array( char, next_comm, TASK_COMM_LEN ) | ||
218 | * __field( pid_t, next_pid ) | ||
219 | * __field( int, next_prio ) | ||
220 | * ), | ||
221 | * | ||
222 | * * | ||
223 | * * Assign the entry into the trace record, by embedding | ||
224 | * * a full C statement block into TP_fast_assign(). You | ||
225 | * * can refer to the trace record as '__entry' - | ||
226 | * * otherwise you can put arbitrary C code in here. | ||
227 | * * | ||
228 | * * Note: this C code will execute every time a trace event | ||
229 | * * happens, on an active tracepoint. | ||
230 | * * | ||
231 | * | ||
232 | * TP_fast_assign( | ||
233 | * memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); | ||
234 | * __entry->prev_pid = prev->pid; | ||
235 | * __entry->prev_prio = prev->prio; | ||
236 | * memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); | ||
237 | * __entry->next_pid = next->pid; | ||
238 | * __entry->next_prio = next->prio; | ||
239 | * ) | ||
240 | * | ||
241 | * * | ||
242 | * * Formatted output of a trace record via TP_printk(). | ||
243 | * * This is how the tracepoint will appear under ftrace | ||
244 | * * plugins that make use of this tracepoint. | ||
245 | * * | ||
246 | * * (raw-binary tracing wont actually perform this step.) | ||
247 | * * | ||
248 | * | ||
249 | * TP_printk("task %s:%d [%d] ==> %s:%d [%d]", | ||
250 | * __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, | ||
251 | * __entry->next_comm, __entry->next_pid, __entry->next_prio), | ||
252 | * | ||
253 | * ); | ||
254 | * | ||
255 | * This macro construct is thus used for the regular printk format | ||
256 | * tracing setup, it is used to construct a function pointer based | ||
257 | * tracepoint callback (this is used by programmatic plugins and | ||
258 | * can also by used by generic instrumentation like SystemTap), and | ||
259 | * it is also used to expose a structured trace record in | ||
260 | * /debug/tracing/events/. | ||
261 | */ | ||
262 | |||
263 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ | ||
264 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | ||
265 | |||
156 | #endif | 266 | #endif |
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 08e088334dba..bcba84ea2d86 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h | |||
@@ -252,8 +252,6 @@ struct tty_operations { | |||
252 | void (*set_ldisc)(struct tty_struct *tty); | 252 | void (*set_ldisc)(struct tty_struct *tty); |
253 | void (*wait_until_sent)(struct tty_struct *tty, int timeout); | 253 | void (*wait_until_sent)(struct tty_struct *tty, int timeout); |
254 | void (*send_xchar)(struct tty_struct *tty, char ch); | 254 | void (*send_xchar)(struct tty_struct *tty, char ch); |
255 | int (*read_proc)(char *page, char **start, off_t off, | ||
256 | int count, int *eof, void *data); | ||
257 | int (*tiocmget)(struct tty_struct *tty, struct file *file); | 255 | int (*tiocmget)(struct tty_struct *tty, struct file *file); |
258 | int (*tiocmset)(struct tty_struct *tty, struct file *file, | 256 | int (*tiocmset)(struct tty_struct *tty, struct file *file, |
259 | unsigned int set, unsigned int clear); | 257 | unsigned int set, unsigned int clear); |
@@ -264,6 +262,7 @@ struct tty_operations { | |||
264 | int (*poll_get_char)(struct tty_driver *driver, int line); | 262 | int (*poll_get_char)(struct tty_driver *driver, int line); |
265 | void (*poll_put_char)(struct tty_driver *driver, int line, char ch); | 263 | void (*poll_put_char)(struct tty_driver *driver, int line, char ch); |
266 | #endif | 264 | #endif |
265 | const struct file_operations *proc_fops; | ||
267 | }; | 266 | }; |
268 | 267 | ||
269 | struct tty_driver { | 268 | struct tty_driver { |
@@ -310,7 +309,8 @@ extern void tty_set_operations(struct tty_driver *driver, | |||
310 | extern struct tty_driver *tty_find_polling_driver(char *name, int *line); | 309 | extern struct tty_driver *tty_find_polling_driver(char *name, int *line); |
311 | 310 | ||
312 | extern void tty_driver_kref_put(struct tty_driver *driver); | 311 | extern void tty_driver_kref_put(struct tty_driver *driver); |
313 | extern inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) | 312 | |
313 | static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) | ||
314 | { | 314 | { |
315 | kref_get(&d->kref); | 315 | kref_get(&d->kref); |
316 | return d; | 316 | return d; |
diff --git a/include/linux/usb.h b/include/linux/usb.h index c6b2ab41b908..3aa2cd1f8d08 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
@@ -1387,6 +1387,7 @@ extern int usb_string(struct usb_device *dev, int index, | |||
1387 | extern int usb_clear_halt(struct usb_device *dev, int pipe); | 1387 | extern int usb_clear_halt(struct usb_device *dev, int pipe); |
1388 | extern int usb_reset_configuration(struct usb_device *dev); | 1388 | extern int usb_reset_configuration(struct usb_device *dev); |
1389 | extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate); | 1389 | extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate); |
1390 | extern void usb_reset_endpoint(struct usb_device *dev, unsigned int epaddr); | ||
1390 | 1391 | ||
1391 | /* this request isn't really synchronous, but it belongs with the others */ | 1392 | /* this request isn't really synchronous, but it belongs with the others */ |
1392 | extern int usb_driver_set_configuration(struct usb_device *udev, int config); | 1393 | extern int usb_driver_set_configuration(struct usb_device *udev, int config); |
@@ -1491,14 +1492,6 @@ void usb_sg_wait(struct usb_sg_request *io); | |||
1491 | #define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL) | 1492 | #define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL) |
1492 | #define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK) | 1493 | #define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK) |
1493 | 1494 | ||
1494 | /* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */ | ||
1495 | #define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1) | ||
1496 | #define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep))) | ||
1497 | #define usb_settoggle(dev, ep, out, bit) \ | ||
1498 | ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \ | ||
1499 | ((bit) << (ep))) | ||
1500 | |||
1501 | |||
1502 | static inline unsigned int __create_pipe(struct usb_device *dev, | 1495 | static inline unsigned int __create_pipe(struct usb_device *dev, |
1503 | unsigned int endpoint) | 1496 | unsigned int endpoint) |
1504 | { | 1497 | { |
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h index 3c86ed25a04c..c24124a42ce5 100644 --- a/include/linux/usb/cdc.h +++ b/include/linux/usb/cdc.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #define USB_CDC_SUBCLASS_DMM 0x09 | 17 | #define USB_CDC_SUBCLASS_DMM 0x09 |
18 | #define USB_CDC_SUBCLASS_MDLM 0x0a | 18 | #define USB_CDC_SUBCLASS_MDLM 0x0a |
19 | #define USB_CDC_SUBCLASS_OBEX 0x0b | 19 | #define USB_CDC_SUBCLASS_OBEX 0x0b |
20 | #define USB_CDC_SUBCLASS_EEM 0x0c | ||
20 | 21 | ||
21 | #define USB_CDC_PROTO_NONE 0 | 22 | #define USB_CDC_PROTO_NONE 0 |
22 | 23 | ||
@@ -28,6 +29,8 @@ | |||
28 | #define USB_CDC_ACM_PROTO_AT_CDMA 6 | 29 | #define USB_CDC_ACM_PROTO_AT_CDMA 6 |
29 | #define USB_CDC_ACM_PROTO_VENDOR 0xff | 30 | #define USB_CDC_ACM_PROTO_VENDOR 0xff |
30 | 31 | ||
32 | #define USB_CDC_PROTO_EEM 7 | ||
33 | |||
31 | /*-------------------------------------------------------------------------*/ | 34 | /*-------------------------------------------------------------------------*/ |
32 | 35 | ||
33 | /* | 36 | /* |
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h index d6aad0ea6033..d43755669261 100644 --- a/include/linux/usb/musb.h +++ b/include/linux/usb/musb.h | |||
@@ -7,6 +7,9 @@ | |||
7 | * key configuration differences between boards. | 7 | * key configuration differences between boards. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef __LINUX_USB_MUSB_H | ||
11 | #define __LINUX_USB_MUSB_H | ||
12 | |||
10 | /* The USB role is defined by the connector used on the board, so long as | 13 | /* The USB role is defined by the connector used on the board, so long as |
11 | * standards are being followed. (Developer boards sometimes won't.) | 14 | * standards are being followed. (Developer boards sometimes won't.) |
12 | */ | 15 | */ |
@@ -101,3 +104,5 @@ extern int __init tusb6010_setup_interface( | |||
101 | extern int tusb6010_platform_retime(unsigned is_refclk); | 104 | extern int tusb6010_platform_retime(unsigned is_refclk); |
102 | 105 | ||
103 | #endif /* OMAP2 */ | 106 | #endif /* OMAP2 */ |
107 | |||
108 | #endif /* __LINUX_USB_MUSB_H */ | ||
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index b95842542590..625e9e4639c6 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
@@ -29,7 +29,7 @@ | |||
29 | /** | 29 | /** |
30 | * usb_serial_port: structure for the specific ports of a device. | 30 | * usb_serial_port: structure for the specific ports of a device. |
31 | * @serial: pointer back to the struct usb_serial owner of this port. | 31 | * @serial: pointer back to the struct usb_serial owner of this port. |
32 | * @tty: pointer to the corresponding tty for this port. | 32 | * @port: pointer to the corresponding tty_port for this port. |
33 | * @lock: spinlock to grab when updating portions of this structure. | 33 | * @lock: spinlock to grab when updating portions of this structure. |
34 | * @mutex: mutex used to synchronize serial_open() and serial_close() | 34 | * @mutex: mutex used to synchronize serial_open() and serial_close() |
35 | * access for this port. | 35 | * access for this port. |
@@ -44,19 +44,22 @@ | |||
44 | * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe | 44 | * @interrupt_out_endpointAddress: endpoint address for the interrupt out pipe |
45 | * for this port. | 45 | * for this port. |
46 | * @bulk_in_buffer: pointer to the bulk in buffer for this port. | 46 | * @bulk_in_buffer: pointer to the bulk in buffer for this port. |
47 | * @bulk_in_size: the size of the bulk_in_buffer, in bytes. | ||
47 | * @read_urb: pointer to the bulk in struct urb for this port. | 48 | * @read_urb: pointer to the bulk in struct urb for this port. |
48 | * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this | 49 | * @bulk_in_endpointAddress: endpoint address for the bulk in pipe for this |
49 | * port. | 50 | * port. |
50 | * @bulk_out_buffer: pointer to the bulk out buffer for this port. | 51 | * @bulk_out_buffer: pointer to the bulk out buffer for this port. |
51 | * @bulk_out_size: the size of the bulk_out_buffer, in bytes. | 52 | * @bulk_out_size: the size of the bulk_out_buffer, in bytes. |
52 | * @write_urb: pointer to the bulk out struct urb for this port. | 53 | * @write_urb: pointer to the bulk out struct urb for this port. |
54 | * @write_urb_busy: port`s writing status | ||
53 | * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this | 55 | * @bulk_out_endpointAddress: endpoint address for the bulk out pipe for this |
54 | * port. | 56 | * port. |
55 | * @write_wait: a wait_queue_head_t used by the port. | 57 | * @write_wait: a wait_queue_head_t used by the port. |
56 | * @work: work queue entry for the line discipline waking up. | 58 | * @work: work queue entry for the line discipline waking up. |
57 | * @open_count: number of times this port has been opened. | ||
58 | * @throttled: nonzero if the read urb is inactive to throttle the device | 59 | * @throttled: nonzero if the read urb is inactive to throttle the device |
59 | * @throttle_req: nonzero if the tty wants to throttle us | 60 | * @throttle_req: nonzero if the tty wants to throttle us |
61 | * @console: attached usb serial console | ||
62 | * @dev: pointer to the serial device | ||
60 | * | 63 | * |
61 | * This structure is used by the usb-serial core and drivers for the specific | 64 | * This structure is used by the usb-serial core and drivers for the specific |
62 | * ports of a device. | 65 | * ports of a device. |
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h index 5f401b644ed5..429c631d2aad 100644 --- a/include/linux/usb/wusb.h +++ b/include/linux/usb/wusb.h | |||
@@ -80,8 +80,7 @@ struct wusb_ckhdid { | |||
80 | u8 data[16]; | 80 | u8 data[16]; |
81 | } __attribute__((packed)); | 81 | } __attribute__((packed)); |
82 | 82 | ||
83 | const static | 83 | static const struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; |
84 | struct wusb_ckhdid wusb_ckhdid_zero = { .data = { 0 } }; | ||
85 | 84 | ||
86 | #define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) | 85 | #define WUSB_CKHDID_STRSIZE (3 * sizeof(struct wusb_ckhdid) + 1) |
87 | 86 | ||
diff --git a/include/linux/video_decoder.h b/include/linux/video_decoder.h deleted file mode 100644 index e26c0c86a6ea..000000000000 --- a/include/linux/video_decoder.h +++ /dev/null | |||
@@ -1,48 +0,0 @@ | |||
1 | #ifndef _LINUX_VIDEO_DECODER_H | ||
2 | #define _LINUX_VIDEO_DECODER_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | #define HAVE_VIDEO_DECODER 1 | ||
7 | |||
8 | struct video_decoder_capability { /* this name is too long */ | ||
9 | __u32 flags; | ||
10 | #define VIDEO_DECODER_PAL 1 /* can decode PAL signal */ | ||
11 | #define VIDEO_DECODER_NTSC 2 /* can decode NTSC */ | ||
12 | #define VIDEO_DECODER_SECAM 4 /* can decode SECAM */ | ||
13 | #define VIDEO_DECODER_AUTO 8 /* can autosense norm */ | ||
14 | #define VIDEO_DECODER_CCIR 16 /* CCIR-601 pixel rate (720 pixels per line) instead of square pixel rate */ | ||
15 | int inputs; /* number of inputs */ | ||
16 | int outputs; /* number of outputs */ | ||
17 | }; | ||
18 | |||
19 | /* | ||
20 | DECODER_GET_STATUS returns the following flags. The only one you need is | ||
21 | DECODER_STATUS_GOOD, the others are just nice things to know. | ||
22 | */ | ||
23 | #define DECODER_STATUS_GOOD 1 /* receiving acceptable input */ | ||
24 | #define DECODER_STATUS_COLOR 2 /* receiving color information */ | ||
25 | #define DECODER_STATUS_PAL 4 /* auto detected */ | ||
26 | #define DECODER_STATUS_NTSC 8 /* auto detected */ | ||
27 | #define DECODER_STATUS_SECAM 16 /* auto detected */ | ||
28 | |||
29 | struct video_decoder_init { | ||
30 | unsigned char len; | ||
31 | const unsigned char *data; | ||
32 | }; | ||
33 | |||
34 | #define DECODER_GET_CAPABILITIES _IOR('d', 1, struct video_decoder_capability) | ||
35 | #define DECODER_GET_STATUS _IOR('d', 2, int) | ||
36 | #define DECODER_SET_NORM _IOW('d', 3, int) | ||
37 | #define DECODER_SET_INPUT _IOW('d', 4, int) /* 0 <= input < #inputs */ | ||
38 | #define DECODER_SET_OUTPUT _IOW('d', 5, int) /* 0 <= output < #outputs */ | ||
39 | #define DECODER_ENABLE_OUTPUT _IOW('d', 6, int) /* boolean output enable control */ | ||
40 | #define DECODER_SET_PICTURE _IOW('d', 7, struct video_picture) | ||
41 | #define DECODER_SET_GPIO _IOW('d', 8, int) /* switch general purpose pin */ | ||
42 | #define DECODER_INIT _IOW('d', 9, struct video_decoder_init) /* init internal registers at once */ | ||
43 | #define DECODER_SET_VBI_BYPASS _IOW('d', 10, int) /* switch vbi bypass */ | ||
44 | |||
45 | #define DECODER_DUMP _IO('d', 192) /* debug hook */ | ||
46 | |||
47 | |||
48 | #endif | ||
diff --git a/include/linux/video_encoder.h b/include/linux/video_encoder.h deleted file mode 100644 index b7b6423bbb8a..000000000000 --- a/include/linux/video_encoder.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | #ifndef _LINUX_VIDEO_ENCODER_H | ||
2 | #define _LINUX_VIDEO_ENCODER_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | struct video_encoder_capability { /* this name is too long */ | ||
7 | __u32 flags; | ||
8 | #define VIDEO_ENCODER_PAL 1 /* can encode PAL signal */ | ||
9 | #define VIDEO_ENCODER_NTSC 2 /* can encode NTSC */ | ||
10 | #define VIDEO_ENCODER_SECAM 4 /* can encode SECAM */ | ||
11 | #define VIDEO_ENCODER_CCIR 16 /* CCIR-601 pixel rate (720 pixels per line) instead of square pixel rate */ | ||
12 | int inputs; /* number of inputs */ | ||
13 | int outputs; /* number of outputs */ | ||
14 | }; | ||
15 | |||
16 | #define ENCODER_GET_CAPABILITIES _IOR('e', 1, struct video_encoder_capability) | ||
17 | #define ENCODER_SET_NORM _IOW('e', 2, int) | ||
18 | #define ENCODER_SET_INPUT _IOW('e', 3, int) /* 0 <= input < #inputs */ | ||
19 | #define ENCODER_SET_OUTPUT _IOW('e', 4, int) /* 0 <= output < #outputs */ | ||
20 | #define ENCODER_ENABLE_OUTPUT _IOW('e', 5, int) /* boolean output enable control */ | ||
21 | |||
22 | |||
23 | #endif | ||
diff --git a/include/linux/videodev.h b/include/linux/videodev.h index 837f392fbe97..b19eab140977 100644 --- a/include/linux/videodev.h +++ b/include/linux/videodev.h | |||
@@ -16,6 +16,23 @@ | |||
16 | #include <linux/ioctl.h> | 16 | #include <linux/ioctl.h> |
17 | #include <linux/videodev2.h> | 17 | #include <linux/videodev2.h> |
18 | 18 | ||
19 | #if defined(__MIN_V4L1) && defined (__KERNEL__) | ||
20 | |||
21 | /* | ||
22 | * Used by those V4L2 core functions that need a minimum V4L1 support, | ||
23 | * in order to allow V4L1 Compatibilty code compilation. | ||
24 | */ | ||
25 | |||
26 | struct video_mbuf | ||
27 | { | ||
28 | int size; /* Total memory to map */ | ||
29 | int frames; /* Frames */ | ||
30 | int offsets[VIDEO_MAX_FRAME]; | ||
31 | }; | ||
32 | |||
33 | #define VIDIOCGMBUF _IOR('v',20, struct video_mbuf) /* Memory map buffer info */ | ||
34 | |||
35 | #else | ||
19 | #if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__) | 36 | #if defined(CONFIG_VIDEO_V4L1_COMPAT) || !defined (__KERNEL__) |
20 | 37 | ||
21 | #define VID_TYPE_CAPTURE 1 /* Can capture */ | 38 | #define VID_TYPE_CAPTURE 1 /* Can capture */ |
@@ -312,6 +329,7 @@ struct video_code | |||
312 | #define VID_PLAY_END_MARK 14 | 329 | #define VID_PLAY_END_MARK 14 |
313 | 330 | ||
314 | #endif /* CONFIG_VIDEO_V4L1_COMPAT */ | 331 | #endif /* CONFIG_VIDEO_V4L1_COMPAT */ |
332 | #endif /* __MIN_V4L1 */ | ||
315 | 333 | ||
316 | #endif /* __LINUX_VIDEODEV_H */ | 334 | #endif /* __LINUX_VIDEODEV_H */ |
317 | 335 | ||
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 5571dbe1c0ad..ebb2ea6b4995 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h | |||
@@ -344,6 +344,8 @@ struct v4l2_pix_format { | |||
344 | #define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */ | 344 | #define V4L2_PIX_FMT_SPCA508 v4l2_fourcc('S', '5', '0', '8') /* YUVY per line */ |
345 | #define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ | 345 | #define V4L2_PIX_FMT_SPCA561 v4l2_fourcc('S', '5', '6', '1') /* compressed GBRG bayer */ |
346 | #define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ | 346 | #define V4L2_PIX_FMT_PAC207 v4l2_fourcc('P', '2', '0', '7') /* compressed BGGR bayer */ |
347 | #define V4L2_PIX_FMT_MR97310A v4l2_fourcc('M', '3', '1', '0') /* compressed BGGR bayer */ | ||
348 | #define V4L2_PIX_FMT_SQ905C v4l2_fourcc('9', '0', '5', 'C') /* compressed RGGB bayer */ | ||
347 | #define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */ | 349 | #define V4L2_PIX_FMT_PJPG v4l2_fourcc('P', 'J', 'P', 'G') /* Pixart 73xx JPEG */ |
348 | #define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */ | 350 | #define V4L2_PIX_FMT_YVYU v4l2_fourcc('Y', 'V', 'Y', 'U') /* 16 YVU 4:2:2 */ |
349 | 351 | ||
@@ -735,6 +737,11 @@ struct v4l2_input { | |||
735 | #define V4L2_IN_ST_NO_SIGNAL 0x00000002 | 737 | #define V4L2_IN_ST_NO_SIGNAL 0x00000002 |
736 | #define V4L2_IN_ST_NO_COLOR 0x00000004 | 738 | #define V4L2_IN_ST_NO_COLOR 0x00000004 |
737 | 739 | ||
740 | /* field 'status' - sensor orientation */ | ||
741 | /* If sensor is mounted upside down set both bits */ | ||
742 | #define V4L2_IN_ST_HFLIP 0x00000010 /* Frames are flipped horizontally */ | ||
743 | #define V4L2_IN_ST_VFLIP 0x00000020 /* Frames are flipped vertically */ | ||
744 | |||
738 | /* field 'status' - analog */ | 745 | /* field 'status' - analog */ |
739 | #define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */ | 746 | #define V4L2_IN_ST_NO_H_LOCK 0x00000100 /* No horizontal sync lock */ |
740 | #define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */ | 747 | #define V4L2_IN_ST_COLOR_KILL 0x00000200 /* Color killer is active */ |
@@ -829,6 +836,7 @@ struct v4l2_querymenu { | |||
829 | #define V4L2_CTRL_FLAG_UPDATE 0x0008 | 836 | #define V4L2_CTRL_FLAG_UPDATE 0x0008 |
830 | #define V4L2_CTRL_FLAG_INACTIVE 0x0010 | 837 | #define V4L2_CTRL_FLAG_INACTIVE 0x0010 |
831 | #define V4L2_CTRL_FLAG_SLIDER 0x0020 | 838 | #define V4L2_CTRL_FLAG_SLIDER 0x0020 |
839 | #define V4L2_CTRL_FLAG_WRITE_ONLY 0x0040 | ||
832 | 840 | ||
833 | /* Query flag, to be ORed with the control ID */ | 841 | /* Query flag, to be ORed with the control ID */ |
834 | #define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000 | 842 | #define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000 |
@@ -879,8 +887,15 @@ enum v4l2_power_line_frequency { | |||
879 | #define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28) | 887 | #define V4L2_CID_BACKLIGHT_COMPENSATION (V4L2_CID_BASE+28) |
880 | #define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29) | 888 | #define V4L2_CID_CHROMA_AGC (V4L2_CID_BASE+29) |
881 | #define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30) | 889 | #define V4L2_CID_COLOR_KILLER (V4L2_CID_BASE+30) |
890 | #define V4L2_CID_COLORFX (V4L2_CID_BASE+31) | ||
891 | enum v4l2_colorfx { | ||
892 | V4L2_COLORFX_NONE = 0, | ||
893 | V4L2_COLORFX_BW = 1, | ||
894 | V4L2_COLORFX_SEPIA = 2, | ||
895 | }; | ||
896 | |||
882 | /* last CID + 1 */ | 897 | /* last CID + 1 */ |
883 | #define V4L2_CID_LASTP1 (V4L2_CID_BASE+31) | 898 | #define V4L2_CID_LASTP1 (V4L2_CID_BASE+32) |
884 | 899 | ||
885 | /* MPEG-class control IDs defined by V4L2 */ | 900 | /* MPEG-class control IDs defined by V4L2 */ |
886 | #define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900) | 901 | #define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900) |
@@ -1339,6 +1354,53 @@ struct v4l2_sliced_vbi_data { | |||
1339 | }; | 1354 | }; |
1340 | 1355 | ||
1341 | /* | 1356 | /* |
1357 | * Sliced VBI data inserted into MPEG Streams | ||
1358 | */ | ||
1359 | |||
1360 | /* | ||
1361 | * V4L2_MPEG_STREAM_VBI_FMT_IVTV: | ||
1362 | * | ||
1363 | * Structure of payload contained in an MPEG 2 Private Stream 1 PES Packet in an | ||
1364 | * MPEG-2 Program Pack that contains V4L2_MPEG_STREAM_VBI_FMT_IVTV Sliced VBI | ||
1365 | * data | ||
1366 | * | ||
1367 | * Note, the MPEG-2 Program Pack and Private Stream 1 PES packet header | ||
1368 | * definitions are not included here. See the MPEG-2 specifications for details | ||
1369 | * on these headers. | ||
1370 | */ | ||
1371 | |||
1372 | /* Line type IDs */ | ||
1373 | #define V4L2_MPEG_VBI_IVTV_TELETEXT_B (1) | ||
1374 | #define V4L2_MPEG_VBI_IVTV_CAPTION_525 (4) | ||
1375 | #define V4L2_MPEG_VBI_IVTV_WSS_625 (5) | ||
1376 | #define V4L2_MPEG_VBI_IVTV_VPS (7) | ||
1377 | |||
1378 | struct v4l2_mpeg_vbi_itv0_line { | ||
1379 | __u8 id; /* One of V4L2_MPEG_VBI_IVTV_* above */ | ||
1380 | __u8 data[42]; /* Sliced VBI data for the line */ | ||
1381 | } __attribute__ ((packed)); | ||
1382 | |||
1383 | struct v4l2_mpeg_vbi_itv0 { | ||
1384 | __le32 linemask[2]; /* Bitmasks of VBI service lines present */ | ||
1385 | struct v4l2_mpeg_vbi_itv0_line line[35]; | ||
1386 | } __attribute__ ((packed)); | ||
1387 | |||
1388 | struct v4l2_mpeg_vbi_ITV0 { | ||
1389 | struct v4l2_mpeg_vbi_itv0_line line[36]; | ||
1390 | } __attribute__ ((packed)); | ||
1391 | |||
1392 | #define V4L2_MPEG_VBI_IVTV_MAGIC0 "itv0" | ||
1393 | #define V4L2_MPEG_VBI_IVTV_MAGIC1 "ITV0" | ||
1394 | |||
1395 | struct v4l2_mpeg_vbi_fmt_ivtv { | ||
1396 | __u8 magic[4]; | ||
1397 | union { | ||
1398 | struct v4l2_mpeg_vbi_itv0 itv0; | ||
1399 | struct v4l2_mpeg_vbi_ITV0 ITV0; | ||
1400 | }; | ||
1401 | } __attribute__ ((packed)); | ||
1402 | |||
1403 | /* | ||
1342 | * A G G R E G A T E S T R U C T U R E S | 1404 | * A G G R E G A T E S T R U C T U R E S |
1343 | */ | 1405 | */ |
1344 | 1406 | ||
@@ -1403,14 +1465,6 @@ struct v4l2_dbg_chip_ident { | |||
1403 | __u32 revision; /* chip revision, chip specific */ | 1465 | __u32 revision; /* chip revision, chip specific */ |
1404 | } __attribute__ ((packed)); | 1466 | } __attribute__ ((packed)); |
1405 | 1467 | ||
1406 | /* VIDIOC_G_CHIP_IDENT_OLD: Deprecated, do not use */ | ||
1407 | struct v4l2_chip_ident_old { | ||
1408 | __u32 match_type; /* Match type */ | ||
1409 | __u32 match_chip; /* Match this chip, meaning determined by match_type */ | ||
1410 | __u32 ident; /* chip identifier as specified in <media/v4l2-chip-ident.h> */ | ||
1411 | __u32 revision; /* chip revision, chip specific */ | ||
1412 | }; | ||
1413 | |||
1414 | /* | 1468 | /* |
1415 | * I O C T L C O D E S F O R V I D E O D E V I C E S | 1469 | * I O C T L C O D E S F O R V I D E O D E V I C E S |
1416 | * | 1470 | * |
@@ -1488,8 +1542,6 @@ struct v4l2_chip_ident_old { | |||
1488 | /* Experimental, meant for debugging, testing and internal use. | 1542 | /* Experimental, meant for debugging, testing and internal use. |
1489 | Never use this ioctl in applications! */ | 1543 | Never use this ioctl in applications! */ |
1490 | #define VIDIOC_DBG_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_dbg_chip_ident) | 1544 | #define VIDIOC_DBG_G_CHIP_IDENT _IOWR('V', 81, struct v4l2_dbg_chip_ident) |
1491 | /* This is deprecated and will go away in 2.6.30 */ | ||
1492 | #define VIDIOC_G_CHIP_IDENT_OLD _IOWR('V', 81, struct v4l2_chip_ident_old) | ||
1493 | #endif | 1545 | #endif |
1494 | 1546 | ||
1495 | #define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) | 1547 | #define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) |
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 242348bb3766..cec79adbe3ea 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h | |||
@@ -4,6 +4,7 @@ | |||
4 | * compatible drivers/servers. */ | 4 | * compatible drivers/servers. */ |
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | #include <linux/virtio_config.h> | 6 | #include <linux/virtio_config.h> |
7 | #include <linux/if_ether.h> | ||
7 | 8 | ||
8 | /* The ID for virtio_net */ | 9 | /* The ID for virtio_net */ |
9 | #define VIRTIO_ID_NET 1 | 10 | #define VIRTIO_ID_NET 1 |
diff --git a/include/linux/wait.h b/include/linux/wait.h index a210ede73b56..bc024632f365 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -135,8 +135,11 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, | |||
135 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | 135 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
136 | int nr_exclusive, int sync, void *key); | 136 | int nr_exclusive, int sync, void *key); |
137 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 137 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
138 | extern void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); | 138 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
139 | extern void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); | 139 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, |
140 | void *key); | ||
141 | void __wake_up_locked(wait_queue_head_t *q, unsigned int mode); | ||
142 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); | ||
140 | void __wake_up_bit(wait_queue_head_t *, void *, int); | 143 | void __wake_up_bit(wait_queue_head_t *, void *, int); |
141 | int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); | 144 | int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); |
142 | int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); | 145 | int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); |
@@ -155,21 +158,17 @@ wait_queue_head_t *bit_waitqueue(void *, int); | |||
155 | #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) | 158 | #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL) |
156 | #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) | 159 | #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1) |
157 | 160 | ||
158 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
159 | /* | 161 | /* |
160 | * macro to avoid include hell | 162 | * Wakeup macros to be used to report events to the targets. |
161 | */ | 163 | */ |
162 | #define wake_up_nested(x, s) \ | 164 | #define wake_up_poll(x, m) \ |
163 | do { \ | 165 | __wake_up(x, TASK_NORMAL, 1, (void *) (m)) |
164 | unsigned long flags; \ | 166 | #define wake_up_locked_poll(x, m) \ |
165 | \ | 167 | __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) |
166 | spin_lock_irqsave_nested(&(x)->lock, flags, (s)); \ | 168 | #define wake_up_interruptible_poll(x, m) \ |
167 | wake_up_locked(x); \ | 169 | __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) |
168 | spin_unlock_irqrestore(&(x)->lock, flags); \ | 170 | #define wake_up_interruptible_sync_poll(x, m) \ |
169 | } while (0) | 171 | __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) |
170 | #else | ||
171 | #define wake_up_nested(x, s) wake_up(x) | ||
172 | #endif | ||
173 | 172 | ||
174 | #define __wait_event(wq, condition) \ | 173 | #define __wait_event(wq, condition) \ |
175 | do { \ | 174 | do { \ |
@@ -441,13 +440,15 @@ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, | |||
441 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 440 | int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
442 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 441 | int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); |
443 | 442 | ||
444 | #define DEFINE_WAIT(name) \ | 443 | #define DEFINE_WAIT_FUNC(name, function) \ |
445 | wait_queue_t name = { \ | 444 | wait_queue_t name = { \ |
446 | .private = current, \ | 445 | .private = current, \ |
447 | .func = autoremove_wake_function, \ | 446 | .func = function, \ |
448 | .task_list = LIST_HEAD_INIT((name).task_list), \ | 447 | .task_list = LIST_HEAD_INIT((name).task_list), \ |
449 | } | 448 | } |
450 | 449 | ||
450 | #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) | ||
451 | |||
451 | #define DEFINE_WAIT_BIT(name, word, bit) \ | 452 | #define DEFINE_WAIT_BIT(name, word, bit) \ |
452 | struct wait_bit_queue name = { \ | 453 | struct wait_bit_queue name = { \ |
453 | .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ | 454 | .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \ |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 3cd51e579ab1..13e1adf55c4c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -41,6 +41,11 @@ struct delayed_work { | |||
41 | struct timer_list timer; | 41 | struct timer_list timer; |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static inline struct delayed_work *to_delayed_work(struct work_struct *work) | ||
45 | { | ||
46 | return container_of(work, struct delayed_work, work); | ||
47 | } | ||
48 | |||
44 | struct execute_work { | 49 | struct execute_work { |
45 | struct work_struct work; | 50 | struct work_struct work; |
46 | }; | 51 | }; |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 7300ecdc480c..9c1ed1fb6ddb 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -109,8 +109,8 @@ extern int dirty_background_ratio; | |||
109 | extern unsigned long dirty_background_bytes; | 109 | extern unsigned long dirty_background_bytes; |
110 | extern int vm_dirty_ratio; | 110 | extern int vm_dirty_ratio; |
111 | extern unsigned long vm_dirty_bytes; | 111 | extern unsigned long vm_dirty_bytes; |
112 | extern int dirty_writeback_interval; | 112 | extern unsigned int dirty_writeback_interval; |
113 | extern int dirty_expire_interval; | 113 | extern unsigned int dirty_expire_interval; |
114 | extern int vm_highmem_is_dirtyable; | 114 | extern int vm_highmem_is_dirtyable; |
115 | extern int block_dump; | 115 | extern int block_dump; |
116 | extern int laptop_mode; | 116 | extern int laptop_mode; |
@@ -168,6 +168,8 @@ void writeback_set_ratelimit(void); | |||
168 | /* pdflush.c */ | 168 | /* pdflush.c */ |
169 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl | 169 | extern int nr_pdflush_threads; /* Global so it can be exported to sysctl |
170 | read-only. */ | 170 | read-only. */ |
171 | extern int nr_pdflush_threads_max; /* Global so it can be exported to sysctl */ | ||
172 | extern int nr_pdflush_threads_min; /* Global so it can be exported to sysctl */ | ||
171 | 173 | ||
172 | 174 | ||
173 | #endif /* WRITEBACK_H */ | 175 | #endif /* WRITEBACK_H */ |