diff options
Diffstat (limited to 'include/linux')
178 files changed, 3338 insertions, 4612 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index f72914db2a11..756f831cbdd5 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -118,6 +118,7 @@ header-y += mtio.h | |||
118 | header-y += ncp_no.h | 118 | header-y += ncp_no.h |
119 | header-y += neighbour.h | 119 | header-y += neighbour.h |
120 | header-y += net_dropmon.h | 120 | header-y += net_dropmon.h |
121 | header-y += net_tstamp.h | ||
121 | header-y += netfilter_arp.h | 122 | header-y += netfilter_arp.h |
122 | header-y += netrom.h | 123 | header-y += netrom.h |
123 | header-y += nfs2.h | 124 | header-y += nfs2.h |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index dfcd920c3e54..b926afe8c03e 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -80,7 +80,7 @@ char * __acpi_map_table (unsigned long phys_addr, unsigned long size); | |||
80 | void __acpi_unmap_table(char *map, unsigned long size); | 80 | void __acpi_unmap_table(char *map, unsigned long size); |
81 | int early_acpi_boot_init(void); | 81 | int early_acpi_boot_init(void); |
82 | int acpi_boot_init (void); | 82 | int acpi_boot_init (void); |
83 | int acpi_boot_table_init (void); | 83 | void acpi_boot_table_init (void); |
84 | int acpi_mps_check (void); | 84 | int acpi_mps_check (void); |
85 | int acpi_numa_init (void); | 85 | int acpi_numa_init (void); |
86 | 86 | ||
@@ -240,7 +240,7 @@ extern int pnpacpi_disabled; | |||
240 | #define PXM_INVAL (-1) | 240 | #define PXM_INVAL (-1) |
241 | #define NID_INVAL (-1) | 241 | #define NID_INVAL (-1) |
242 | 242 | ||
243 | int acpi_check_resource_conflict(struct resource *res); | 243 | int acpi_check_resource_conflict(const struct resource *res); |
244 | 244 | ||
245 | int acpi_check_region(resource_size_t start, resource_size_t n, | 245 | int acpi_check_region(resource_size_t start, resource_size_t n, |
246 | const char *name); | 246 | const char *name); |
@@ -251,12 +251,19 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n, | |||
251 | void __init acpi_no_s4_hw_signature(void); | 251 | void __init acpi_no_s4_hw_signature(void); |
252 | void __init acpi_old_suspend_ordering(void); | 252 | void __init acpi_old_suspend_ordering(void); |
253 | void __init acpi_s4_no_nvs(void); | 253 | void __init acpi_s4_no_nvs(void); |
254 | void __init acpi_set_sci_en_on_resume(void); | ||
254 | #endif /* CONFIG_PM_SLEEP */ | 255 | #endif /* CONFIG_PM_SLEEP */ |
255 | 256 | ||
257 | struct acpi_osc_context { | ||
258 | char *uuid_str; /* uuid string */ | ||
259 | int rev; | ||
260 | struct acpi_buffer cap; /* arg2/arg3 */ | ||
261 | struct acpi_buffer ret; /* free by caller if success */ | ||
262 | }; | ||
263 | |||
256 | #define OSC_QUERY_TYPE 0 | 264 | #define OSC_QUERY_TYPE 0 |
257 | #define OSC_SUPPORT_TYPE 1 | 265 | #define OSC_SUPPORT_TYPE 1 |
258 | #define OSC_CONTROL_TYPE 2 | 266 | #define OSC_CONTROL_TYPE 2 |
259 | #define OSC_SUPPORT_MASKS 0x1f | ||
260 | 267 | ||
261 | /* _OSC DW0 Definition */ | 268 | /* _OSC DW0 Definition */ |
262 | #define OSC_QUERY_ENABLE 1 | 269 | #define OSC_QUERY_ENABLE 1 |
@@ -265,12 +272,23 @@ void __init acpi_s4_no_nvs(void); | |||
265 | #define OSC_INVALID_REVISION_ERROR 8 | 272 | #define OSC_INVALID_REVISION_ERROR 8 |
266 | #define OSC_CAPABILITIES_MASK_ERROR 16 | 273 | #define OSC_CAPABILITIES_MASK_ERROR 16 |
267 | 274 | ||
275 | acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); | ||
276 | |||
277 | /* platform-wide _OSC bits */ | ||
278 | #define OSC_SB_PAD_SUPPORT 1 | ||
279 | #define OSC_SB_PPC_OST_SUPPORT 2 | ||
280 | #define OSC_SB_PR3_SUPPORT 4 | ||
281 | #define OSC_SB_CPUHP_OST_SUPPORT 8 | ||
282 | #define OSC_SB_APEI_SUPPORT 16 | ||
283 | |||
284 | /* PCI defined _OSC bits */ | ||
268 | /* _OSC DW1 Definition (OS Support Fields) */ | 285 | /* _OSC DW1 Definition (OS Support Fields) */ |
269 | #define OSC_EXT_PCI_CONFIG_SUPPORT 1 | 286 | #define OSC_EXT_PCI_CONFIG_SUPPORT 1 |
270 | #define OSC_ACTIVE_STATE_PWR_SUPPORT 2 | 287 | #define OSC_ACTIVE_STATE_PWR_SUPPORT 2 |
271 | #define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4 | 288 | #define OSC_CLOCK_PWR_CAPABILITY_SUPPORT 4 |
272 | #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8 | 289 | #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 8 |
273 | #define OSC_MSI_SUPPORT 16 | 290 | #define OSC_MSI_SUPPORT 16 |
291 | #define OSC_PCI_SUPPORT_MASKS 0x1f | ||
274 | 292 | ||
275 | /* _OSC DW1 Definition (OS Control Fields) */ | 293 | /* _OSC DW1 Definition (OS Control Fields) */ |
276 | #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1 | 294 | #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 1 |
@@ -279,7 +297,7 @@ void __init acpi_s4_no_nvs(void); | |||
279 | #define OSC_PCI_EXPRESS_AER_CONTROL 8 | 297 | #define OSC_PCI_EXPRESS_AER_CONTROL 8 |
280 | #define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16 | 298 | #define OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL 16 |
281 | 299 | ||
282 | #define OSC_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \ | 300 | #define OSC_PCI_CONTROL_MASKS (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL | \ |
283 | OSC_SHPC_NATIVE_HP_CONTROL | \ | 301 | OSC_SHPC_NATIVE_HP_CONTROL | \ |
284 | OSC_PCI_EXPRESS_PME_CONTROL | \ | 302 | OSC_PCI_EXPRESS_PME_CONTROL | \ |
285 | OSC_PCI_EXPRESS_AER_CONTROL | \ | 303 | OSC_PCI_EXPRESS_AER_CONTROL | \ |
@@ -303,9 +321,9 @@ static inline int acpi_boot_init(void) | |||
303 | return 0; | 321 | return 0; |
304 | } | 322 | } |
305 | 323 | ||
306 | static inline int acpi_boot_table_init(void) | 324 | static inline void acpi_boot_table_init(void) |
307 | { | 325 | { |
308 | return 0; | 326 | return; |
309 | } | 327 | } |
310 | 328 | ||
311 | static inline int acpi_mps_check(void) | 329 | static inline int acpi_mps_check(void) |
diff --git a/include/linux/aio.h b/include/linux/aio.h index aea219d7d8d1..811dbb369379 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h | |||
@@ -102,7 +102,6 @@ struct kiocb { | |||
102 | } ki_obj; | 102 | } ki_obj; |
103 | 103 | ||
104 | __u64 ki_user_data; /* user's data for completion */ | 104 | __u64 ki_user_data; /* user's data for completion */ |
105 | wait_queue_t ki_wait; | ||
106 | loff_t ki_pos; | 105 | loff_t ki_pos; |
107 | 106 | ||
108 | void *private; | 107 | void *private; |
@@ -140,7 +139,6 @@ struct kiocb { | |||
140 | (x)->ki_dtor = NULL; \ | 139 | (x)->ki_dtor = NULL; \ |
141 | (x)->ki_obj.tsk = tsk; \ | 140 | (x)->ki_obj.tsk = tsk; \ |
142 | (x)->ki_user_data = 0; \ | 141 | (x)->ki_user_data = 0; \ |
143 | init_wait((&(x)->ki_wait)); \ | ||
144 | } while (0) | 142 | } while (0) |
145 | 143 | ||
146 | #define AIO_RING_MAGIC 0xa10a10a1 | 144 | #define AIO_RING_MAGIC 0xa10a10a1 |
@@ -223,8 +221,6 @@ struct mm_struct; | |||
223 | static inline void exit_aio(struct mm_struct *mm) { } | 221 | static inline void exit_aio(struct mm_struct *mm) { } |
224 | #endif /* CONFIG_AIO */ | 222 | #endif /* CONFIG_AIO */ |
225 | 223 | ||
226 | #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait) | ||
227 | |||
228 | static inline struct kiocb *list_kiocb(struct list_head *h) | 224 | static inline struct kiocb *list_kiocb(struct list_head *h) |
229 | { | 225 | { |
230 | return list_entry(h, struct kiocb, ki_list); | 226 | return list_entry(h, struct kiocb, ki_list); |
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h index 57b1846a3c87..3e09b345f4d6 100644 --- a/include/linux/atmel-mci.h +++ b/include/linux/atmel-mci.h | |||
@@ -3,8 +3,6 @@ | |||
3 | 3 | ||
4 | #define ATMEL_MCI_MAX_NR_SLOTS 2 | 4 | #define ATMEL_MCI_MAX_NR_SLOTS 2 |
5 | 5 | ||
6 | #include <linux/dw_dmac.h> | ||
7 | |||
8 | /** | 6 | /** |
9 | * struct mci_slot_pdata - board-specific per-slot configuration | 7 | * struct mci_slot_pdata - board-specific per-slot configuration |
10 | * @bus_width: Number of data lines wired up the slot | 8 | * @bus_width: Number of data lines wired up the slot |
@@ -34,7 +32,7 @@ struct mci_slot_pdata { | |||
34 | * @slot: Per-slot configuration data. | 32 | * @slot: Per-slot configuration data. |
35 | */ | 33 | */ |
36 | struct mci_platform_data { | 34 | struct mci_platform_data { |
37 | struct dw_dma_slave dma_slave; | 35 | struct mci_dma_data *dma_slave; |
38 | struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; | 36 | struct mci_slot_pdata slot[ATMEL_MCI_MAX_NR_SLOTS]; |
39 | }; | 37 | }; |
40 | 38 | ||
diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 0f5f57858a23..8c4f884db6b4 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h | |||
@@ -36,18 +36,18 @@ struct backlight_device; | |||
36 | struct fb_info; | 36 | struct fb_info; |
37 | 37 | ||
38 | struct backlight_ops { | 38 | struct backlight_ops { |
39 | unsigned int options; | 39 | const unsigned int options; |
40 | 40 | ||
41 | #define BL_CORE_SUSPENDRESUME (1 << 0) | 41 | #define BL_CORE_SUSPENDRESUME (1 << 0) |
42 | 42 | ||
43 | /* Notify the backlight driver some property has changed */ | 43 | /* Notify the backlight driver some property has changed */ |
44 | int (*update_status)(struct backlight_device *); | 44 | int (* const update_status)(struct backlight_device *); |
45 | /* Return the current backlight brightness (accounting for power, | 45 | /* Return the current backlight brightness (accounting for power, |
46 | fb_blank etc.) */ | 46 | fb_blank etc.) */ |
47 | int (*get_brightness)(struct backlight_device *); | 47 | int (* const get_brightness)(struct backlight_device *); |
48 | /* Check if given framebuffer device is the one bound to this backlight; | 48 | /* Check if given framebuffer device is the one bound to this backlight; |
49 | return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ | 49 | return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */ |
50 | int (*check_fb)(struct fb_info *); | 50 | int (* const check_fb)(struct fb_info *); |
51 | }; | 51 | }; |
52 | 52 | ||
53 | /* This structure defines all the properties of a backlight */ | 53 | /* This structure defines all the properties of a backlight */ |
@@ -86,7 +86,7 @@ struct backlight_device { | |||
86 | registered this device has been unloaded, and if class_get_devdata() | 86 | registered this device has been unloaded, and if class_get_devdata() |
87 | points to something in the body of that driver, it is also invalid. */ | 87 | points to something in the body of that driver, it is also invalid. */ |
88 | struct mutex ops_lock; | 88 | struct mutex ops_lock; |
89 | struct backlight_ops *ops; | 89 | const struct backlight_ops *ops; |
90 | 90 | ||
91 | /* The framebuffer notifier block */ | 91 | /* The framebuffer notifier block */ |
92 | struct notifier_block fb_notif; | 92 | struct notifier_block fb_notif; |
@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd) | |||
103 | } | 103 | } |
104 | 104 | ||
105 | extern struct backlight_device *backlight_device_register(const char *name, | 105 | extern struct backlight_device *backlight_device_register(const char *name, |
106 | struct device *dev, void *devdata, struct backlight_ops *ops); | 106 | struct device *dev, void *devdata, const struct backlight_ops *ops); |
107 | extern void backlight_device_unregister(struct backlight_device *bd); | 107 | extern void backlight_device_unregister(struct backlight_device *bd); |
108 | extern void backlight_force_update(struct backlight_device *bd, | 108 | extern void backlight_force_update(struct backlight_device *bd, |
109 | enum backlight_update_reason reason); | 109 | enum backlight_update_reason reason); |
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index aece486ac734..89c6249fc561 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -68,6 +68,14 @@ struct linux_binprm{ | |||
68 | 68 | ||
69 | #define BINPRM_MAX_RECURSION 4 | 69 | #define BINPRM_MAX_RECURSION 4 |
70 | 70 | ||
71 | /* Function parameter for binfmt->coredump */ | ||
72 | struct coredump_params { | ||
73 | long signr; | ||
74 | struct pt_regs *regs; | ||
75 | struct file *file; | ||
76 | unsigned long limit; | ||
77 | }; | ||
78 | |||
71 | /* | 79 | /* |
72 | * This structure defines the functions that are used to load the binary formats that | 80 | * This structure defines the functions that are used to load the binary formats that |
73 | * linux accepts. | 81 | * linux accepts. |
@@ -77,7 +85,7 @@ struct linux_binfmt { | |||
77 | struct module *module; | 85 | struct module *module; |
78 | int (*load_binary)(struct linux_binprm *, struct pt_regs * regs); | 86 | int (*load_binary)(struct linux_binprm *, struct pt_regs * regs); |
79 | int (*load_shlib)(struct file *); | 87 | int (*load_shlib)(struct file *); |
80 | int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); | 88 | int (*core_dump)(struct coredump_params *cprm); |
81 | unsigned long min_coredump; /* minimal dump size */ | 89 | unsigned long min_coredump; /* minimal dump size */ |
82 | int hasvdso; | 90 | int hasvdso; |
83 | }; | 91 | }; |
@@ -101,6 +109,7 @@ extern int prepare_binprm(struct linux_binprm *); | |||
101 | extern int __must_check remove_arg_zero(struct linux_binprm *); | 109 | extern int __must_check remove_arg_zero(struct linux_binprm *); |
102 | extern int search_binary_handler(struct linux_binprm *,struct pt_regs *); | 110 | extern int search_binary_handler(struct linux_binprm *,struct pt_regs *); |
103 | extern int flush_old_exec(struct linux_binprm * bprm); | 111 | extern int flush_old_exec(struct linux_binprm * bprm); |
112 | extern void setup_new_exec(struct linux_binprm * bprm); | ||
104 | 113 | ||
105 | extern int suid_dumpable; | 114 | extern int suid_dumpable; |
106 | #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ | 115 | #define SUID_DUMP_DISABLE 0 /* No setuid dumping */ |
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 756d78b8c1c5..daf8c480c786 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h | |||
@@ -42,6 +42,9 @@ | |||
42 | * bitmap_empty(src, nbits) Are all bits zero in *src? | 42 | * bitmap_empty(src, nbits) Are all bits zero in *src? |
43 | * bitmap_full(src, nbits) Are all bits set in *src? | 43 | * bitmap_full(src, nbits) Are all bits set in *src? |
44 | * bitmap_weight(src, nbits) Hamming Weight: number set bits | 44 | * bitmap_weight(src, nbits) Hamming Weight: number set bits |
45 | * bitmap_set(dst, pos, nbits) Set specified bit area | ||
46 | * bitmap_clear(dst, pos, nbits) Clear specified bit area | ||
47 | * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area | ||
45 | * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n | 48 | * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n |
46 | * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n | 49 | * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n |
47 | * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) | 50 | * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) |
@@ -108,6 +111,14 @@ extern int __bitmap_subset(const unsigned long *bitmap1, | |||
108 | const unsigned long *bitmap2, int bits); | 111 | const unsigned long *bitmap2, int bits); |
109 | extern int __bitmap_weight(const unsigned long *bitmap, int bits); | 112 | extern int __bitmap_weight(const unsigned long *bitmap, int bits); |
110 | 113 | ||
114 | extern void bitmap_set(unsigned long *map, int i, int len); | ||
115 | extern void bitmap_clear(unsigned long *map, int start, int nr); | ||
116 | extern unsigned long bitmap_find_next_zero_area(unsigned long *map, | ||
117 | unsigned long size, | ||
118 | unsigned long start, | ||
119 | unsigned int nr, | ||
120 | unsigned long align_mask); | ||
121 | |||
111 | extern int bitmap_scnprintf(char *buf, unsigned int len, | 122 | extern int bitmap_scnprintf(char *buf, unsigned int len, |
112 | const unsigned long *src, int nbits); | 123 | const unsigned long *src, int nbits); |
113 | extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, | 124 | extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 784a919aa0d0..5c8018977efa 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -845,7 +845,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | |||
845 | * blk_rq_err_bytes() : bytes left till the next error boundary | 845 | * blk_rq_err_bytes() : bytes left till the next error boundary |
846 | * blk_rq_sectors() : sectors left in the entire request | 846 | * blk_rq_sectors() : sectors left in the entire request |
847 | * blk_rq_cur_sectors() : sectors left in the current segment | 847 | * blk_rq_cur_sectors() : sectors left in the current segment |
848 | * blk_rq_err_sectors() : sectors left till the next error boundary | ||
849 | */ | 848 | */ |
850 | static inline sector_t blk_rq_pos(const struct request *rq) | 849 | static inline sector_t blk_rq_pos(const struct request *rq) |
851 | { | 850 | { |
@@ -874,11 +873,6 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |||
874 | return blk_rq_cur_bytes(rq) >> 9; | 873 | return blk_rq_cur_bytes(rq) >> 9; |
875 | } | 874 | } |
876 | 875 | ||
877 | static inline unsigned int blk_rq_err_sectors(const struct request *rq) | ||
878 | { | ||
879 | return blk_rq_err_bytes(rq) >> 9; | ||
880 | } | ||
881 | |||
882 | /* | 876 | /* |
883 | * Request issue related functions. | 877 | * Request issue related functions. |
884 | */ | 878 | */ |
@@ -944,6 +938,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | |||
944 | extern void blk_set_default_limits(struct queue_limits *lim); | 938 | extern void blk_set_default_limits(struct queue_limits *lim); |
945 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 939 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
946 | sector_t offset); | 940 | sector_t offset); |
941 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | ||
942 | sector_t offset); | ||
947 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | 943 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
948 | sector_t offset); | 944 | sector_t offset); |
949 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 945 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
@@ -1116,11 +1112,18 @@ static inline int queue_alignment_offset(struct request_queue *q) | |||
1116 | return q->limits.alignment_offset; | 1112 | return q->limits.alignment_offset; |
1117 | } | 1113 | } |
1118 | 1114 | ||
1115 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset) | ||
1116 | { | ||
1117 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); | ||
1118 | |||
1119 | offset &= granularity - 1; | ||
1120 | return (granularity + lim->alignment_offset - offset) & (granularity - 1); | ||
1121 | } | ||
1122 | |||
1119 | static inline int queue_sector_alignment_offset(struct request_queue *q, | 1123 | static inline int queue_sector_alignment_offset(struct request_queue *q, |
1120 | sector_t sector) | 1124 | sector_t sector) |
1121 | { | 1125 | { |
1122 | return ((sector << 9) - q->limits.alignment_offset) | 1126 | return queue_limit_alignment_offset(&q->limits, sector << 9); |
1123 | & (q->limits.io_min - 1); | ||
1124 | } | 1127 | } |
1125 | 1128 | ||
1126 | static inline int bdev_alignment_offset(struct block_device *bdev) | 1129 | static inline int bdev_alignment_offset(struct block_device *bdev) |
@@ -1147,8 +1150,11 @@ static inline int queue_discard_alignment(struct request_queue *q) | |||
1147 | static inline int queue_sector_discard_alignment(struct request_queue *q, | 1150 | static inline int queue_sector_discard_alignment(struct request_queue *q, |
1148 | sector_t sector) | 1151 | sector_t sector) |
1149 | { | 1152 | { |
1150 | return ((sector << 9) - q->limits.discard_alignment) | 1153 | struct queue_limits *lim = &q->limits; |
1151 | & (q->limits.discard_granularity - 1); | 1154 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); |
1155 | |||
1156 | return (lim->discard_granularity + lim->discard_alignment - alignment) | ||
1157 | & (lim->discard_granularity - 1); | ||
1152 | } | 1158 | } |
1153 | 1159 | ||
1154 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) | 1160 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) |
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 1ed2a5cc03f5..3db7767d2a17 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h | |||
@@ -51,6 +51,15 @@ struct can_priv { | |||
51 | struct sk_buff **echo_skb; | 51 | struct sk_buff **echo_skb; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | /* | ||
55 | * get_can_dlc(value) - helper macro to cast a given data length code (dlc) | ||
56 | * to __u8 and ensure the dlc value to be max. 8 bytes. | ||
57 | * | ||
58 | * To be used in the CAN netdriver receive path to ensure conformance with | ||
59 | * ISO 11898-1 Chapter 8.4.2.3 (DLC field) | ||
60 | */ | ||
61 | #define get_can_dlc(i) (min_t(__u8, (i), 8)) | ||
62 | |||
54 | struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max); | 63 | struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max); |
55 | void free_candev(struct net_device *dev); | 64 | void free_candev(struct net_device *dev); |
56 | 65 | ||
diff --git a/include/linux/cs5535.h b/include/linux/cs5535.h new file mode 100644 index 000000000000..d5a1d4810b80 --- /dev/null +++ b/include/linux/cs5535.h | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * AMD CS5535/CS5536 definitions | ||
3 | * Copyright (C) 2006 Advanced Micro Devices, Inc. | ||
4 | * Copyright (C) 2009 Andres Salomon <dilinger@collabora.co.uk> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of version 2 of the GNU General Public License | ||
8 | * as published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef _CS5535_H | ||
12 | #define _CS5535_H | ||
13 | |||
14 | /* MSRs */ | ||
15 | #define MSR_GLIU_P2D_RO0 0x10000029 | ||
16 | |||
17 | #define MSR_LX_GLD_MSR_CONFIG 0x48002001 | ||
18 | #define MSR_LX_MSR_PADSEL 0x48002011 /* NOT 0x48000011; the data | ||
19 | * sheet has the wrong value */ | ||
20 | #define MSR_GLCP_SYS_RSTPLL 0x4C000014 | ||
21 | #define MSR_GLCP_DOTPLL 0x4C000015 | ||
22 | |||
23 | #define MSR_LBAR_SMB 0x5140000B | ||
24 | #define MSR_LBAR_GPIO 0x5140000C | ||
25 | #define MSR_LBAR_MFGPT 0x5140000D | ||
26 | #define MSR_LBAR_ACPI 0x5140000E | ||
27 | #define MSR_LBAR_PMS 0x5140000F | ||
28 | |||
29 | #define MSR_DIVIL_SOFT_RESET 0x51400017 | ||
30 | |||
31 | #define MSR_PIC_YSEL_LOW 0x51400020 | ||
32 | #define MSR_PIC_YSEL_HIGH 0x51400021 | ||
33 | #define MSR_PIC_ZSEL_LOW 0x51400022 | ||
34 | #define MSR_PIC_ZSEL_HIGH 0x51400023 | ||
35 | #define MSR_PIC_IRQM_LPC 0x51400025 | ||
36 | |||
37 | #define MSR_MFGPT_IRQ 0x51400028 | ||
38 | #define MSR_MFGPT_NR 0x51400029 | ||
39 | #define MSR_MFGPT_SETUP 0x5140002B | ||
40 | |||
41 | #define MSR_LX_SPARE_MSR 0x80000011 /* DC-specific */ | ||
42 | |||
43 | #define MSR_GX_GLD_MSR_CONFIG 0xC0002001 | ||
44 | #define MSR_GX_MSR_PADSEL 0xC0002011 | ||
45 | |||
46 | /* resource sizes */ | ||
47 | #define LBAR_GPIO_SIZE 0xFF | ||
48 | #define LBAR_MFGPT_SIZE 0x40 | ||
49 | #define LBAR_ACPI_SIZE 0x40 | ||
50 | #define LBAR_PMS_SIZE 0x80 | ||
51 | |||
52 | /* VSA2 magic values */ | ||
53 | #define VSA_VRC_INDEX 0xAC1C | ||
54 | #define VSA_VRC_DATA 0xAC1E | ||
55 | #define VSA_VR_UNLOCK 0xFC53 /* unlock virtual register */ | ||
56 | #define VSA_VR_SIGNATURE 0x0003 | ||
57 | #define VSA_VR_MEM_SIZE 0x0200 | ||
58 | #define AMD_VSA_SIG 0x4132 /* signature is ascii 'VSA2' */ | ||
59 | #define GSW_VSA_SIG 0x534d /* General Software signature */ | ||
60 | |||
61 | #include <linux/io.h> | ||
62 | |||
63 | static inline int cs5535_has_vsa2(void) | ||
64 | { | ||
65 | static int has_vsa2 = -1; | ||
66 | |||
67 | if (has_vsa2 == -1) { | ||
68 | uint16_t val; | ||
69 | |||
70 | /* | ||
71 | * The VSA has virtual registers that we can query for a | ||
72 | * signature. | ||
73 | */ | ||
74 | outw(VSA_VR_UNLOCK, VSA_VRC_INDEX); | ||
75 | outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX); | ||
76 | |||
77 | val = inw(VSA_VRC_DATA); | ||
78 | has_vsa2 = (val == AMD_VSA_SIG || val == GSW_VSA_SIG); | ||
79 | } | ||
80 | |||
81 | return has_vsa2; | ||
82 | } | ||
83 | |||
84 | /* GPIOs */ | ||
85 | #define GPIO_OUTPUT_VAL 0x00 | ||
86 | #define GPIO_OUTPUT_ENABLE 0x04 | ||
87 | #define GPIO_OUTPUT_OPEN_DRAIN 0x08 | ||
88 | #define GPIO_OUTPUT_INVERT 0x0C | ||
89 | #define GPIO_OUTPUT_AUX1 0x10 | ||
90 | #define GPIO_OUTPUT_AUX2 0x14 | ||
91 | #define GPIO_PULL_UP 0x18 | ||
92 | #define GPIO_PULL_DOWN 0x1C | ||
93 | #define GPIO_INPUT_ENABLE 0x20 | ||
94 | #define GPIO_INPUT_INVERT 0x24 | ||
95 | #define GPIO_INPUT_FILTER 0x28 | ||
96 | #define GPIO_INPUT_EVENT_COUNT 0x2C | ||
97 | #define GPIO_READ_BACK 0x30 | ||
98 | #define GPIO_INPUT_AUX1 0x34 | ||
99 | #define GPIO_EVENTS_ENABLE 0x38 | ||
100 | #define GPIO_LOCK_ENABLE 0x3C | ||
101 | #define GPIO_POSITIVE_EDGE_EN 0x40 | ||
102 | #define GPIO_NEGATIVE_EDGE_EN 0x44 | ||
103 | #define GPIO_POSITIVE_EDGE_STS 0x48 | ||
104 | #define GPIO_NEGATIVE_EDGE_STS 0x4C | ||
105 | |||
106 | #define GPIO_MAP_X 0xE0 | ||
107 | #define GPIO_MAP_Y 0xE4 | ||
108 | #define GPIO_MAP_Z 0xE8 | ||
109 | #define GPIO_MAP_W 0xEC | ||
110 | |||
111 | void cs5535_gpio_set(unsigned offset, unsigned int reg); | ||
112 | void cs5535_gpio_clear(unsigned offset, unsigned int reg); | ||
113 | int cs5535_gpio_isset(unsigned offset, unsigned int reg); | ||
114 | |||
115 | /* MFGPTs */ | ||
116 | |||
117 | #define MFGPT_MAX_TIMERS 8 | ||
118 | #define MFGPT_TIMER_ANY (-1) | ||
119 | |||
120 | #define MFGPT_DOMAIN_WORKING 1 | ||
121 | #define MFGPT_DOMAIN_STANDBY 2 | ||
122 | #define MFGPT_DOMAIN_ANY (MFGPT_DOMAIN_WORKING | MFGPT_DOMAIN_STANDBY) | ||
123 | |||
124 | #define MFGPT_CMP1 0 | ||
125 | #define MFGPT_CMP2 1 | ||
126 | |||
127 | #define MFGPT_EVENT_IRQ 0 | ||
128 | #define MFGPT_EVENT_NMI 1 | ||
129 | #define MFGPT_EVENT_RESET 3 | ||
130 | |||
131 | #define MFGPT_REG_CMP1 0 | ||
132 | #define MFGPT_REG_CMP2 2 | ||
133 | #define MFGPT_REG_COUNTER 4 | ||
134 | #define MFGPT_REG_SETUP 6 | ||
135 | |||
136 | #define MFGPT_SETUP_CNTEN (1 << 15) | ||
137 | #define MFGPT_SETUP_CMP2 (1 << 14) | ||
138 | #define MFGPT_SETUP_CMP1 (1 << 13) | ||
139 | #define MFGPT_SETUP_SETUP (1 << 12) | ||
140 | #define MFGPT_SETUP_STOPEN (1 << 11) | ||
141 | #define MFGPT_SETUP_EXTEN (1 << 10) | ||
142 | #define MFGPT_SETUP_REVEN (1 << 5) | ||
143 | #define MFGPT_SETUP_CLKSEL (1 << 4) | ||
144 | |||
145 | struct cs5535_mfgpt_timer; | ||
146 | |||
147 | extern uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, | ||
148 | uint16_t reg); | ||
149 | extern void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg, | ||
150 | uint16_t value); | ||
151 | |||
152 | extern int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp, | ||
153 | int event, int enable); | ||
154 | extern int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, | ||
155 | int *irq, int enable); | ||
156 | extern struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer, | ||
157 | int domain); | ||
158 | extern void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer); | ||
159 | |||
160 | static inline int cs5535_mfgpt_setup_irq(struct cs5535_mfgpt_timer *timer, | ||
161 | int cmp, int *irq) | ||
162 | { | ||
163 | return cs5535_mfgpt_set_irq(timer, cmp, irq, 1); | ||
164 | } | ||
165 | |||
166 | static inline int cs5535_mfgpt_release_irq(struct cs5535_mfgpt_timer *timer, | ||
167 | int cmp, int *irq) | ||
168 | { | ||
169 | return cs5535_mfgpt_set_irq(timer, cmp, irq, 0); | ||
170 | } | ||
171 | |||
172 | #endif | ||
diff --git a/include/linux/ctype.h b/include/linux/ctype.h index afa36392297a..a3d6ee0044f9 100644 --- a/include/linux/ctype.h +++ b/include/linux/ctype.h | |||
@@ -15,7 +15,7 @@ | |||
15 | #define _X 0x40 /* hex digit */ | 15 | #define _X 0x40 /* hex digit */ |
16 | #define _SP 0x80 /* hard space (0x20) */ | 16 | #define _SP 0x80 /* hard space (0x20) */ |
17 | 17 | ||
18 | extern unsigned char _ctype[]; | 18 | extern const unsigned char _ctype[]; |
19 | 19 | ||
20 | #define __ismask(x) (_ctype[(int)(unsigned char)(x)]) | 20 | #define __ismask(x) (_ctype[(int)(unsigned char)(x)]) |
21 | 21 | ||
@@ -27,6 +27,7 @@ extern unsigned char _ctype[]; | |||
27 | #define islower(c) ((__ismask(c)&(_L)) != 0) | 27 | #define islower(c) ((__ismask(c)&(_L)) != 0) |
28 | #define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) | 28 | #define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) |
29 | #define ispunct(c) ((__ismask(c)&(_P)) != 0) | 29 | #define ispunct(c) ((__ismask(c)&(_P)) != 0) |
30 | /* Note: isspace() must return false for %NUL-terminator */ | ||
30 | #define isspace(c) ((__ismask(c)&(_S)) != 0) | 31 | #define isspace(c) ((__ismask(c)&(_S)) != 0) |
31 | #define isupper(c) ((__ismask(c)&(_U)) != 0) | 32 | #define isupper(c) ((__ismask(c)&(_U)) != 0) |
32 | #define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) | 33 | #define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) |
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h index 12ff8c3f1d05..5032b9a31ae7 100644 --- a/include/linux/decompress/mm.h +++ b/include/linux/decompress/mm.h | |||
@@ -25,7 +25,7 @@ static void *malloc(int size) | |||
25 | void *p; | 25 | void *p; |
26 | 26 | ||
27 | if (size < 0) | 27 | if (size < 0) |
28 | error("Malloc error"); | 28 | return NULL; |
29 | if (!malloc_ptr) | 29 | if (!malloc_ptr) |
30 | malloc_ptr = free_mem_ptr; | 30 | malloc_ptr = free_mem_ptr; |
31 | 31 | ||
@@ -35,7 +35,7 @@ static void *malloc(int size) | |||
35 | malloc_ptr += size; | 35 | malloc_ptr += size; |
36 | 36 | ||
37 | if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) | 37 | if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) |
38 | error("Out of memory"); | 38 | return NULL; |
39 | 39 | ||
40 | malloc_count++; | 40 | malloc_count++; |
41 | return p; | 41 | return p; |
diff --git a/include/linux/decompress/unlzo.h b/include/linux/decompress/unlzo.h new file mode 100644 index 000000000000..987229752519 --- /dev/null +++ b/include/linux/decompress/unlzo.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef DECOMPRESS_UNLZO_H | ||
2 | #define DECOMPRESS_UNLZO_H | ||
3 | |||
4 | int unlzo(unsigned char *inbuf, int len, | ||
5 | int(*fill)(void*, unsigned int), | ||
6 | int(*flush)(void*, unsigned int), | ||
7 | unsigned char *output, | ||
8 | int *pos, | ||
9 | void(*error)(char *x)); | ||
10 | #endif | ||
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index df7607e6dce8..d4c9c0b88adc 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -235,7 +235,7 @@ void dm_uevent_add(struct mapped_device *md, struct list_head *elist); | |||
235 | const char *dm_device_name(struct mapped_device *md); | 235 | const char *dm_device_name(struct mapped_device *md); |
236 | int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); | 236 | int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); |
237 | struct gendisk *dm_disk(struct mapped_device *md); | 237 | struct gendisk *dm_disk(struct mapped_device *md); |
238 | int dm_suspended(struct mapped_device *md); | 238 | int dm_suspended(struct dm_target *ti); |
239 | int dm_noflush_suspending(struct dm_target *ti); | 239 | int dm_noflush_suspending(struct dm_target *ti); |
240 | union map_info *dm_get_mapinfo(struct bio *bio); | 240 | union map_info *dm_get_mapinfo(struct bio *bio); |
241 | union map_info *dm_get_rq_mapinfo(struct request *rq); | 241 | union map_info *dm_get_rq_mapinfo(struct request *rq); |
@@ -276,7 +276,7 @@ void dm_table_unplug_all(struct dm_table *t); | |||
276 | /* | 276 | /* |
277 | * Table reference counting. | 277 | * Table reference counting. |
278 | */ | 278 | */ |
279 | struct dm_table *dm_get_table(struct mapped_device *md); | 279 | struct dm_table *dm_get_live_table(struct mapped_device *md); |
280 | void dm_table_get(struct dm_table *t); | 280 | void dm_table_get(struct dm_table *t); |
281 | void dm_table_put(struct dm_table *t); | 281 | void dm_table_put(struct dm_table *t); |
282 | 282 | ||
@@ -295,8 +295,10 @@ void dm_table_event(struct dm_table *t); | |||
295 | 295 | ||
296 | /* | 296 | /* |
297 | * The device must be suspended before calling this method. | 297 | * The device must be suspended before calling this method. |
298 | * Returns the previous table, which the caller must destroy. | ||
298 | */ | 299 | */ |
299 | int dm_swap_table(struct mapped_device *md, struct dm_table *t); | 300 | struct dm_table *dm_swap_table(struct mapped_device *md, |
301 | struct dm_table *t); | ||
300 | 302 | ||
301 | /* | 303 | /* |
302 | * A wrapper around vmalloc. | 304 | * A wrapper around vmalloc. |
diff --git a/include/linux/device.h b/include/linux/device.h index 2a73d9bcbc9c..a62799f2ab00 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -166,9 +166,9 @@ struct driver_attribute driver_attr_##_name = \ | |||
166 | __ATTR(_name, _mode, _show, _store) | 166 | __ATTR(_name, _mode, _show, _store) |
167 | 167 | ||
168 | extern int __must_check driver_create_file(struct device_driver *driver, | 168 | extern int __must_check driver_create_file(struct device_driver *driver, |
169 | struct driver_attribute *attr); | 169 | const struct driver_attribute *attr); |
170 | extern void driver_remove_file(struct device_driver *driver, | 170 | extern void driver_remove_file(struct device_driver *driver, |
171 | struct driver_attribute *attr); | 171 | const struct driver_attribute *attr); |
172 | 172 | ||
173 | extern int __must_check driver_add_kobj(struct device_driver *drv, | 173 | extern int __must_check driver_add_kobj(struct device_driver *drv, |
174 | struct kobject *kobj, | 174 | struct kobject *kobj, |
@@ -319,13 +319,13 @@ struct device_attribute { | |||
319 | struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) | 319 | struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) |
320 | 320 | ||
321 | extern int __must_check device_create_file(struct device *device, | 321 | extern int __must_check device_create_file(struct device *device, |
322 | struct device_attribute *entry); | 322 | const struct device_attribute *entry); |
323 | extern void device_remove_file(struct device *dev, | 323 | extern void device_remove_file(struct device *dev, |
324 | struct device_attribute *attr); | 324 | const struct device_attribute *attr); |
325 | extern int __must_check device_create_bin_file(struct device *dev, | 325 | extern int __must_check device_create_bin_file(struct device *dev, |
326 | struct bin_attribute *attr); | 326 | const struct bin_attribute *attr); |
327 | extern void device_remove_bin_file(struct device *dev, | 327 | extern void device_remove_bin_file(struct device *dev, |
328 | struct bin_attribute *attr); | 328 | const struct bin_attribute *attr); |
329 | extern int device_schedule_callback_owner(struct device *dev, | 329 | extern int device_schedule_callback_owner(struct device *dev, |
330 | void (*func)(struct device *dev), struct module *owner); | 330 | void (*func)(struct device *dev), struct module *owner); |
331 | 331 | ||
diff --git a/include/linux/dm-dirty-log.h b/include/linux/dm-dirty-log.h index 5e8b11d88f6f..7084503c3405 100644 --- a/include/linux/dm-dirty-log.h +++ b/include/linux/dm-dirty-log.h | |||
@@ -21,6 +21,7 @@ struct dm_dirty_log_type; | |||
21 | 21 | ||
22 | struct dm_dirty_log { | 22 | struct dm_dirty_log { |
23 | struct dm_dirty_log_type *type; | 23 | struct dm_dirty_log_type *type; |
24 | int (*flush_callback_fn)(struct dm_target *ti); | ||
24 | void *context; | 25 | void *context; |
25 | }; | 26 | }; |
26 | 27 | ||
@@ -136,8 +137,9 @@ int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type); | |||
136 | * type->constructor/destructor() directly. | 137 | * type->constructor/destructor() directly. |
137 | */ | 138 | */ |
138 | struct dm_dirty_log *dm_dirty_log_create(const char *type_name, | 139 | struct dm_dirty_log *dm_dirty_log_create(const char *type_name, |
139 | struct dm_target *ti, | 140 | struct dm_target *ti, |
140 | unsigned argc, char **argv); | 141 | int (*flush_callback_fn)(struct dm_target *ti), |
142 | unsigned argc, char **argv); | ||
141 | void dm_dirty_log_destroy(struct dm_dirty_log *log); | 143 | void dm_dirty_log_destroy(struct dm_dirty_log *log); |
142 | 144 | ||
143 | #endif /* __KERNEL__ */ | 145 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index 2ab84c83c31a..aa95508d2f95 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited. | 2 | * Copyright (C) 2001 - 2003 Sistina Software (UK) Limited. |
3 | * Copyright (C) 2004 - 2005 Red Hat, Inc. All rights reserved. | 3 | * Copyright (C) 2004 - 2009 Red Hat, Inc. All rights reserved. |
4 | * | 4 | * |
5 | * This file is released under the LGPL. | 5 | * This file is released under the LGPL. |
6 | */ | 6 | */ |
@@ -266,9 +266,9 @@ enum { | |||
266 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) | 266 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) |
267 | 267 | ||
268 | #define DM_VERSION_MAJOR 4 | 268 | #define DM_VERSION_MAJOR 4 |
269 | #define DM_VERSION_MINOR 15 | 269 | #define DM_VERSION_MINOR 16 |
270 | #define DM_VERSION_PATCHLEVEL 0 | 270 | #define DM_VERSION_PATCHLEVEL 0 |
271 | #define DM_VERSION_EXTRA "-ioctl (2009-04-01)" | 271 | #define DM_VERSION_EXTRA "-ioctl (2009-11-05)" |
272 | 272 | ||
273 | /* Status bits */ | 273 | /* Status bits */ |
274 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ | 274 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ |
@@ -309,4 +309,11 @@ enum { | |||
309 | */ | 309 | */ |
310 | #define DM_NOFLUSH_FLAG (1 << 11) /* In */ | 310 | #define DM_NOFLUSH_FLAG (1 << 11) /* In */ |
311 | 311 | ||
312 | /* | ||
313 | * If set, any table information returned will relate to the inactive | ||
314 | * table instead of the live one. Always check DM_INACTIVE_PRESENT_FLAG | ||
315 | * is set before using the data returned. | ||
316 | */ | ||
317 | #define DM_QUERY_INACTIVE_TABLE_FLAG (1 << 12) /* In */ | ||
318 | |||
312 | #endif /* _LINUX_DM_IOCTL_H */ | 319 | #endif /* _LINUX_DM_IOCTL_H */ |
diff --git a/include/linux/dm-region-hash.h b/include/linux/dm-region-hash.h index a9e652a41373..9e2a7a401df5 100644 --- a/include/linux/dm-region-hash.h +++ b/include/linux/dm-region-hash.h | |||
@@ -78,8 +78,7 @@ void dm_rh_dec(struct dm_region_hash *rh, region_t region); | |||
78 | /* Delay bios on regions. */ | 78 | /* Delay bios on regions. */ |
79 | void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); | 79 | void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); |
80 | 80 | ||
81 | void dm_rh_mark_nosync(struct dm_region_hash *rh, | 81 | void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio); |
82 | struct bio *bio, unsigned done, int error); | ||
83 | 82 | ||
84 | /* | 83 | /* |
85 | * Region recovery control. | 84 | * Region recovery control. |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 2b9f2ac7ed60..78784982b33e 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -74,7 +74,7 @@ enum dma_transaction_type { | |||
74 | * control completion, and communicate status. | 74 | * control completion, and communicate status. |
75 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of | 75 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of |
76 | * this transaction | 76 | * this transaction |
77 | * @DMA_CTRL_ACK - the descriptor cannot be reused until the client | 77 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client |
78 | * acknowledges receipt, i.e. has has a chance to establish any dependency | 78 | * acknowledges receipt, i.e. has has a chance to establish any dependency |
79 | * chains | 79 | * chains |
80 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | 80 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) |
diff --git a/include/linux/drbd.h b/include/linux/drbd.h index e84f4733cb55..78962272338a 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h | |||
@@ -53,7 +53,7 @@ | |||
53 | 53 | ||
54 | 54 | ||
55 | extern const char *drbd_buildtag(void); | 55 | extern const char *drbd_buildtag(void); |
56 | #define REL_VERSION "8.3.6" | 56 | #define REL_VERSION "8.3.7" |
57 | #define API_VERSION 88 | 57 | #define API_VERSION 88 |
58 | #define PRO_VERSION_MIN 86 | 58 | #define PRO_VERSION_MIN 86 |
59 | #define PRO_VERSION_MAX 91 | 59 | #define PRO_VERSION_MAX 91 |
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h index db5721ad50d1..a4d82f895994 100644 --- a/include/linux/drbd_nl.h +++ b/include/linux/drbd_nl.h | |||
@@ -69,6 +69,7 @@ NL_PACKET(disconnect, 6, ) | |||
69 | 69 | ||
70 | NL_PACKET(resize, 7, | 70 | NL_PACKET(resize, 7, |
71 | NL_INT64( 29, T_MAY_IGNORE, resize_size) | 71 | NL_INT64( 29, T_MAY_IGNORE, resize_size) |
72 | NL_BIT( 68, T_MAY_IGNORE, resize_force) | ||
72 | ) | 73 | ) |
73 | 74 | ||
74 | NL_PACKET(syncer_conf, 8, | 75 | NL_PACKET(syncer_conf, 8, |
diff --git a/include/linux/dst.h b/include/linux/dst.h deleted file mode 100644 index e26fed84b1aa..000000000000 --- a/include/linux/dst.h +++ /dev/null | |||
@@ -1,587 +0,0 @@ | |||
1 | /* | ||
2 | * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> | ||
3 | * All rights reserved. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License as published by | ||
7 | * the Free Software Foundation; either version 2 of the License, or | ||
8 | * (at your option) any later version. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #ifndef __DST_H | ||
17 | #define __DST_H | ||
18 | |||
19 | #include <linux/types.h> | ||
20 | #include <linux/connector.h> | ||
21 | |||
22 | #define DST_NAMELEN 32 | ||
23 | #define DST_NAME "dst" | ||
24 | |||
25 | enum { | ||
26 | /* Remove node with given id from storage */ | ||
27 | DST_DEL_NODE = 0, | ||
28 | /* Add remote node with given id to the storage */ | ||
29 | DST_ADD_REMOTE, | ||
30 | /* Add local node with given id to the storage to be exported and used by remote peers */ | ||
31 | DST_ADD_EXPORT, | ||
32 | /* Crypto initialization command (hash/cipher used to protect the connection) */ | ||
33 | DST_CRYPTO, | ||
34 | /* Security attributes for given connection (permissions for example) */ | ||
35 | DST_SECURITY, | ||
36 | /* Register given node in the block layer subsystem */ | ||
37 | DST_START, | ||
38 | DST_CMD_MAX | ||
39 | }; | ||
40 | |||
41 | struct dst_ctl | ||
42 | { | ||
43 | /* Storage name */ | ||
44 | char name[DST_NAMELEN]; | ||
45 | /* Command flags */ | ||
46 | __u32 flags; | ||
47 | /* Command itself (see above) */ | ||
48 | __u32 cmd; | ||
49 | /* Maximum number of pages per single request in this device */ | ||
50 | __u32 max_pages; | ||
51 | /* Stale/error transaction scanning timeout in milliseconds */ | ||
52 | __u32 trans_scan_timeout; | ||
53 | /* Maximum number of retry sends before completing transaction as broken */ | ||
54 | __u32 trans_max_retries; | ||
55 | /* Storage size */ | ||
56 | __u64 size; | ||
57 | }; | ||
58 | |||
59 | /* Reply command carries completion status */ | ||
60 | struct dst_ctl_ack | ||
61 | { | ||
62 | struct cn_msg msg; | ||
63 | int error; | ||
64 | int unused[3]; | ||
65 | }; | ||
66 | |||
67 | /* | ||
68 | * Unfortunaltely socket address structure is not exported to userspace | ||
69 | * and is redefined there. | ||
70 | */ | ||
71 | #define SADDR_MAX_DATA 128 | ||
72 | |||
73 | struct saddr { | ||
74 | /* address family, AF_xxx */ | ||
75 | unsigned short sa_family; | ||
76 | /* 14 bytes of protocol address */ | ||
77 | char sa_data[SADDR_MAX_DATA]; | ||
78 | /* Number of bytes used in sa_data */ | ||
79 | unsigned short sa_data_len; | ||
80 | }; | ||
81 | |||
82 | /* Address structure */ | ||
83 | struct dst_network_ctl | ||
84 | { | ||
85 | /* Socket type: datagram, stream...*/ | ||
86 | unsigned int type; | ||
87 | /* Let me guess, is it a Jupiter diameter? */ | ||
88 | unsigned int proto; | ||
89 | /* Peer's address */ | ||
90 | struct saddr addr; | ||
91 | }; | ||
92 | |||
93 | struct dst_crypto_ctl | ||
94 | { | ||
95 | /* Cipher and hash names */ | ||
96 | char cipher_algo[DST_NAMELEN]; | ||
97 | char hash_algo[DST_NAMELEN]; | ||
98 | |||
99 | /* Key sizes. Can be zero for digest for example */ | ||
100 | unsigned int cipher_keysize, hash_keysize; | ||
101 | /* Alignment. Calculated by the DST itself. */ | ||
102 | unsigned int crypto_attached_size; | ||
103 | /* Number of threads to perform crypto operations */ | ||
104 | int thread_num; | ||
105 | }; | ||
106 | |||
107 | /* Export security attributes have this bits checked in when client connects */ | ||
108 | #define DST_PERM_READ (1<<0) | ||
109 | #define DST_PERM_WRITE (1<<1) | ||
110 | |||
111 | /* | ||
112 | * Right now it is simple model, where each remote address | ||
113 | * is assigned to set of permissions it is allowed to perform. | ||
114 | * In real world block device does not know anything but | ||
115 | * reading and writing, so it should be more than enough. | ||
116 | */ | ||
117 | struct dst_secure_user | ||
118 | { | ||
119 | unsigned int permissions; | ||
120 | struct saddr addr; | ||
121 | }; | ||
122 | |||
123 | /* | ||
124 | * Export control command: device to export and network address to accept | ||
125 | * clients to work with given device | ||
126 | */ | ||
127 | struct dst_export_ctl | ||
128 | { | ||
129 | char device[DST_NAMELEN]; | ||
130 | struct dst_network_ctl ctl; | ||
131 | }; | ||
132 | |||
133 | enum { | ||
134 | DST_CFG = 1, /* Request remote configuration */ | ||
135 | DST_IO, /* IO command */ | ||
136 | DST_IO_RESPONSE, /* IO response */ | ||
137 | DST_PING, /* Keepalive message */ | ||
138 | DST_NCMD_MAX, | ||
139 | }; | ||
140 | |||
141 | struct dst_cmd | ||
142 | { | ||
143 | /* Network command itself, see above */ | ||
144 | __u32 cmd; | ||
145 | /* | ||
146 | * Size of the attached data | ||
147 | * (in most cases, for READ command it means how many bytes were requested) | ||
148 | */ | ||
149 | __u32 size; | ||
150 | /* Crypto size: number of attached bytes with digest/hmac */ | ||
151 | __u32 csize; | ||
152 | /* Here we can carry secret data */ | ||
153 | __u32 reserved; | ||
154 | /* Read/write bits, see how they are encoded in bio structure */ | ||
155 | __u64 rw; | ||
156 | /* BIO flags */ | ||
157 | __u64 flags; | ||
158 | /* Unique command id (like transaction ID) */ | ||
159 | __u64 id; | ||
160 | /* Sector to start IO from */ | ||
161 | __u64 sector; | ||
162 | /* Hash data is placed after this header */ | ||
163 | __u8 hash[0]; | ||
164 | }; | ||
165 | |||
166 | /* | ||
167 | * Convert command to/from network byte order. | ||
168 | * We do not use hton*() functions, since there is | ||
169 | * no 64-bit implementation. | ||
170 | */ | ||
171 | static inline void dst_convert_cmd(struct dst_cmd *c) | ||
172 | { | ||
173 | c->cmd = __cpu_to_be32(c->cmd); | ||
174 | c->csize = __cpu_to_be32(c->csize); | ||
175 | c->size = __cpu_to_be32(c->size); | ||
176 | c->sector = __cpu_to_be64(c->sector); | ||
177 | c->id = __cpu_to_be64(c->id); | ||
178 | c->flags = __cpu_to_be64(c->flags); | ||
179 | c->rw = __cpu_to_be64(c->rw); | ||
180 | } | ||
181 | |||
182 | /* Transaction id */ | ||
183 | typedef __u64 dst_gen_t; | ||
184 | |||
185 | #ifdef __KERNEL__ | ||
186 | |||
187 | #include <linux/blkdev.h> | ||
188 | #include <linux/bio.h> | ||
189 | #include <linux/device.h> | ||
190 | #include <linux/mempool.h> | ||
191 | #include <linux/net.h> | ||
192 | #include <linux/poll.h> | ||
193 | #include <linux/rbtree.h> | ||
194 | |||
195 | #ifdef CONFIG_DST_DEBUG | ||
196 | #define dprintk(f, a...) printk(KERN_NOTICE f, ##a) | ||
197 | #else | ||
198 | static inline void __attribute__ ((format (printf, 1, 2))) | ||
199 | dprintk(const char *fmt, ...) {} | ||
200 | #endif | ||
201 | |||
202 | struct dst_node; | ||
203 | |||
204 | struct dst_trans | ||
205 | { | ||
206 | /* DST node we are working with */ | ||
207 | struct dst_node *n; | ||
208 | |||
209 | /* Entry inside transaction tree */ | ||
210 | struct rb_node trans_entry; | ||
211 | |||
212 | /* Merlin kills this transaction when this memory cell equals zero */ | ||
213 | atomic_t refcnt; | ||
214 | |||
215 | /* How this transaction should be processed by crypto engine */ | ||
216 | short enc; | ||
217 | /* How many times this transaction was resent */ | ||
218 | short retries; | ||
219 | /* Completion status */ | ||
220 | int error; | ||
221 | |||
222 | /* When did we send it to the remote peer */ | ||
223 | long send_time; | ||
224 | |||
225 | /* My name is... | ||
226 | * Well, computers does not speak, they have unique id instead */ | ||
227 | dst_gen_t gen; | ||
228 | |||
229 | /* Block IO we are working with */ | ||
230 | struct bio *bio; | ||
231 | |||
232 | /* Network command for above block IO request */ | ||
233 | struct dst_cmd cmd; | ||
234 | }; | ||
235 | |||
236 | struct dst_crypto_engine | ||
237 | { | ||
238 | /* What should we do with all block requests */ | ||
239 | struct crypto_hash *hash; | ||
240 | struct crypto_ablkcipher *cipher; | ||
241 | |||
242 | /* Pool of pages used to encrypt data into before sending */ | ||
243 | int page_num; | ||
244 | struct page **pages; | ||
245 | |||
246 | /* What to do with current request */ | ||
247 | int enc; | ||
248 | /* Who we are and where do we go */ | ||
249 | struct scatterlist *src, *dst; | ||
250 | |||
251 | /* Maximum timeout waiting for encryption to be completed */ | ||
252 | long timeout; | ||
253 | /* IV is a 64-bit sequential counter */ | ||
254 | u64 iv; | ||
255 | |||
256 | /* Secret data */ | ||
257 | void *private; | ||
258 | |||
259 | /* Cached temporary data lives here */ | ||
260 | int size; | ||
261 | void *data; | ||
262 | }; | ||
263 | |||
264 | struct dst_state | ||
265 | { | ||
266 | /* The main state protection */ | ||
267 | struct mutex state_lock; | ||
268 | |||
269 | /* Polling machinery for sockets */ | ||
270 | wait_queue_t wait; | ||
271 | wait_queue_head_t *whead; | ||
272 | /* Most of events are being waited here */ | ||
273 | wait_queue_head_t thread_wait; | ||
274 | |||
275 | /* Who owns this? */ | ||
276 | struct dst_node *node; | ||
277 | |||
278 | /* Network address for this state */ | ||
279 | struct dst_network_ctl ctl; | ||
280 | |||
281 | /* Permissions to work with: read-only or rw connection */ | ||
282 | u32 permissions; | ||
283 | |||
284 | /* Called when we need to clean private data */ | ||
285 | void (* cleanup)(struct dst_state *st); | ||
286 | |||
287 | /* Used by the server: BIO completion queues BIOs here */ | ||
288 | struct list_head request_list; | ||
289 | spinlock_t request_lock; | ||
290 | |||
291 | /* Guess what? No, it is not number of planets */ | ||
292 | atomic_t refcnt; | ||
293 | |||
294 | /* This flags is set when connection should be dropped */ | ||
295 | int need_exit; | ||
296 | |||
297 | /* | ||
298 | * Socket to work with. Second pointer is used for | ||
299 | * lockless check if socket was changed before performing | ||
300 | * next action (like working with cached polling result) | ||
301 | */ | ||
302 | struct socket *socket, *read_socket; | ||
303 | |||
304 | /* Cached preallocated data */ | ||
305 | void *data; | ||
306 | unsigned int size; | ||
307 | |||
308 | /* Currently processed command */ | ||
309 | struct dst_cmd cmd; | ||
310 | }; | ||
311 | |||
312 | struct dst_info | ||
313 | { | ||
314 | /* Device size */ | ||
315 | u64 size; | ||
316 | |||
317 | /* Local device name for export devices */ | ||
318 | char local[DST_NAMELEN]; | ||
319 | |||
320 | /* Network setup */ | ||
321 | struct dst_network_ctl net; | ||
322 | |||
323 | /* Sysfs bits use this */ | ||
324 | struct device device; | ||
325 | }; | ||
326 | |||
327 | struct dst_node | ||
328 | { | ||
329 | struct list_head node_entry; | ||
330 | |||
331 | /* Hi, my name is stored here */ | ||
332 | char name[DST_NAMELEN]; | ||
333 | /* My cache name is stored here */ | ||
334 | char cache_name[DST_NAMELEN]; | ||
335 | |||
336 | /* Block device attached to given node. | ||
337 | * Only valid for exporting nodes */ | ||
338 | struct block_device *bdev; | ||
339 | /* Network state machine for given peer */ | ||
340 | struct dst_state *state; | ||
341 | |||
342 | /* Block IO machinery */ | ||
343 | struct request_queue *queue; | ||
344 | struct gendisk *disk; | ||
345 | |||
346 | /* Number of threads in processing pool */ | ||
347 | int thread_num; | ||
348 | /* Maximum number of pages in single IO */ | ||
349 | int max_pages; | ||
350 | |||
351 | /* I'm that big in bytes */ | ||
352 | loff_t size; | ||
353 | |||
354 | /* Exported to userspace node information */ | ||
355 | struct dst_info *info; | ||
356 | |||
357 | /* | ||
358 | * Security attribute list. | ||
359 | * Used only by exporting node currently. | ||
360 | */ | ||
361 | struct list_head security_list; | ||
362 | struct mutex security_lock; | ||
363 | |||
364 | /* | ||
365 | * When this unerflows below zero, university collapses. | ||
366 | * But this will not happen, since node will be freed, | ||
367 | * when reference counter reaches zero. | ||
368 | */ | ||
369 | atomic_t refcnt; | ||
370 | |||
371 | /* How precisely should I be started? */ | ||
372 | int (*start)(struct dst_node *); | ||
373 | |||
374 | /* Crypto capabilities */ | ||
375 | struct dst_crypto_ctl crypto; | ||
376 | u8 *hash_key; | ||
377 | u8 *cipher_key; | ||
378 | |||
379 | /* Pool of processing thread */ | ||
380 | struct thread_pool *pool; | ||
381 | |||
382 | /* Transaction IDs live here */ | ||
383 | atomic_long_t gen; | ||
384 | |||
385 | /* | ||
386 | * How frequently and how many times transaction | ||
387 | * tree should be scanned to drop stale objects. | ||
388 | */ | ||
389 | long trans_scan_timeout; | ||
390 | int trans_max_retries; | ||
391 | |||
392 | /* Small gnomes live here */ | ||
393 | struct rb_root trans_root; | ||
394 | struct mutex trans_lock; | ||
395 | |||
396 | /* | ||
397 | * Transaction cache/memory pool. | ||
398 | * It is big enough to contain not only transaction | ||
399 | * itself, but additional crypto data (digest/hmac). | ||
400 | */ | ||
401 | struct kmem_cache *trans_cache; | ||
402 | mempool_t *trans_pool; | ||
403 | |||
404 | /* This entity scans transaction tree */ | ||
405 | struct delayed_work trans_work; | ||
406 | |||
407 | wait_queue_head_t wait; | ||
408 | }; | ||
409 | |||
410 | /* Kernel representation of the security attribute */ | ||
411 | struct dst_secure | ||
412 | { | ||
413 | struct list_head sec_entry; | ||
414 | struct dst_secure_user sec; | ||
415 | }; | ||
416 | |||
417 | int dst_process_bio(struct dst_node *n, struct bio *bio); | ||
418 | |||
419 | int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r); | ||
420 | int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le); | ||
421 | |||
422 | static inline struct dst_state *dst_state_get(struct dst_state *st) | ||
423 | { | ||
424 | BUG_ON(atomic_read(&st->refcnt) == 0); | ||
425 | atomic_inc(&st->refcnt); | ||
426 | return st; | ||
427 | } | ||
428 | |||
429 | void dst_state_put(struct dst_state *st); | ||
430 | |||
431 | struct dst_state *dst_state_alloc(struct dst_node *n); | ||
432 | int dst_state_socket_create(struct dst_state *st); | ||
433 | void dst_state_socket_release(struct dst_state *st); | ||
434 | |||
435 | void dst_state_exit_connected(struct dst_state *st); | ||
436 | |||
437 | int dst_state_schedule_receiver(struct dst_state *st); | ||
438 | |||
439 | void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str); | ||
440 | |||
441 | static inline void dst_state_lock(struct dst_state *st) | ||
442 | { | ||
443 | mutex_lock(&st->state_lock); | ||
444 | } | ||
445 | |||
446 | static inline void dst_state_unlock(struct dst_state *st) | ||
447 | { | ||
448 | mutex_unlock(&st->state_lock); | ||
449 | } | ||
450 | |||
451 | void dst_poll_exit(struct dst_state *st); | ||
452 | int dst_poll_init(struct dst_state *st); | ||
453 | |||
454 | static inline unsigned int dst_state_poll(struct dst_state *st) | ||
455 | { | ||
456 | unsigned int revents = POLLHUP | POLLERR; | ||
457 | |||
458 | dst_state_lock(st); | ||
459 | if (st->socket) | ||
460 | revents = st->socket->ops->poll(NULL, st->socket, NULL); | ||
461 | dst_state_unlock(st); | ||
462 | |||
463 | return revents; | ||
464 | } | ||
465 | |||
466 | static inline int dst_thread_setup(void *private, void *data) | ||
467 | { | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | void dst_node_put(struct dst_node *n); | ||
472 | |||
473 | static inline struct dst_node *dst_node_get(struct dst_node *n) | ||
474 | { | ||
475 | atomic_inc(&n->refcnt); | ||
476 | return n; | ||
477 | } | ||
478 | |||
479 | int dst_data_recv(struct dst_state *st, void *data, unsigned int size); | ||
480 | int dst_recv_cdata(struct dst_state *st, void *cdata); | ||
481 | int dst_data_send_header(struct socket *sock, | ||
482 | void *data, unsigned int size, int more); | ||
483 | |||
484 | int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio); | ||
485 | |||
486 | int dst_process_io(struct dst_state *st); | ||
487 | int dst_export_crypto(struct dst_node *n, struct bio *bio); | ||
488 | int dst_export_send_bio(struct bio *bio); | ||
489 | int dst_start_export(struct dst_node *n); | ||
490 | |||
491 | int __init dst_export_init(void); | ||
492 | void dst_export_exit(void); | ||
493 | |||
494 | /* Private structure for export block IO requests */ | ||
495 | struct dst_export_priv | ||
496 | { | ||
497 | struct list_head request_entry; | ||
498 | struct dst_state *state; | ||
499 | struct bio *bio; | ||
500 | struct dst_cmd cmd; | ||
501 | }; | ||
502 | |||
503 | static inline void dst_trans_get(struct dst_trans *t) | ||
504 | { | ||
505 | atomic_inc(&t->refcnt); | ||
506 | } | ||
507 | |||
508 | struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen); | ||
509 | int dst_trans_remove(struct dst_trans *t); | ||
510 | int dst_trans_remove_nolock(struct dst_trans *t); | ||
511 | void dst_trans_put(struct dst_trans *t); | ||
512 | |||
513 | /* | ||
514 | * Convert bio into network command. | ||
515 | */ | ||
516 | static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd, | ||
517 | u32 command, u64 id) | ||
518 | { | ||
519 | cmd->cmd = command; | ||
520 | cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS; | ||
521 | cmd->rw = bio->bi_rw; | ||
522 | cmd->size = bio->bi_size; | ||
523 | cmd->csize = 0; | ||
524 | cmd->id = id; | ||
525 | cmd->sector = bio->bi_sector; | ||
526 | }; | ||
527 | |||
528 | int dst_trans_send(struct dst_trans *t); | ||
529 | int dst_trans_crypto(struct dst_trans *t); | ||
530 | |||
531 | int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl); | ||
532 | void dst_node_crypto_exit(struct dst_node *n); | ||
533 | |||
534 | static inline int dst_need_crypto(struct dst_node *n) | ||
535 | { | ||
536 | struct dst_crypto_ctl *c = &n->crypto; | ||
537 | /* | ||
538 | * Logical OR is appropriate here, but boolean one produces | ||
539 | * more optimal code, so it is used instead. | ||
540 | */ | ||
541 | return (c->hash_algo[0] | c->cipher_algo[0]); | ||
542 | } | ||
543 | |||
544 | int dst_node_trans_init(struct dst_node *n, unsigned int size); | ||
545 | void dst_node_trans_exit(struct dst_node *n); | ||
546 | |||
547 | /* | ||
548 | * Pool of threads. | ||
549 | * Ready list contains threads currently free to be used, | ||
550 | * active one contains threads with some work scheduled for them. | ||
551 | * Caller can wait in given queue when thread is ready. | ||
552 | */ | ||
553 | struct thread_pool | ||
554 | { | ||
555 | int thread_num; | ||
556 | struct mutex thread_lock; | ||
557 | struct list_head ready_list, active_list; | ||
558 | |||
559 | wait_queue_head_t wait; | ||
560 | }; | ||
561 | |||
562 | void thread_pool_del_worker(struct thread_pool *p); | ||
563 | void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id); | ||
564 | int thread_pool_add_worker(struct thread_pool *p, | ||
565 | char *name, | ||
566 | unsigned int id, | ||
567 | void *(* init)(void *data), | ||
568 | void (* cleanup)(void *data), | ||
569 | void *data); | ||
570 | |||
571 | void thread_pool_destroy(struct thread_pool *p); | ||
572 | struct thread_pool *thread_pool_create(int num, char *name, | ||
573 | void *(* init)(void *data), | ||
574 | void (* cleanup)(void *data), | ||
575 | void *data); | ||
576 | |||
577 | int thread_pool_schedule(struct thread_pool *p, | ||
578 | int (* setup)(void *stored_private, void *setup_data), | ||
579 | int (* action)(void *stored_private, void *setup_data), | ||
580 | void *setup_data, long timeout); | ||
581 | int thread_pool_schedule_private(struct thread_pool *p, | ||
582 | int (* setup)(void *private, void *data), | ||
583 | int (* action)(void *private, void *data), | ||
584 | void *data, long timeout, void *id); | ||
585 | |||
586 | #endif /* __KERNEL__ */ | ||
587 | #endif /* __DST_H */ | ||
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index a0d9422a1569..f8c2e1767500 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
@@ -57,8 +57,7 @@ extern int ddebug_remove_module(char *mod_name); | |||
57 | { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ | 57 | { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ |
58 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ | 58 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ |
59 | if (__dynamic_dbg_enabled(descriptor)) \ | 59 | if (__dynamic_dbg_enabled(descriptor)) \ |
60 | printk(KERN_DEBUG KBUILD_MODNAME ":" pr_fmt(fmt), \ | 60 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ |
61 | ##__VA_ARGS__); \ | ||
62 | } while (0) | 61 | } while (0) |
63 | 62 | ||
64 | 63 | ||
@@ -69,9 +68,7 @@ extern int ddebug_remove_module(char *mod_name); | |||
69 | { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ | 68 | { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ |
70 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ | 69 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ |
71 | if (__dynamic_dbg_enabled(descriptor)) \ | 70 | if (__dynamic_dbg_enabled(descriptor)) \ |
72 | dev_printk(KERN_DEBUG, dev, \ | 71 | dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ |
73 | KBUILD_MODNAME ": " fmt, \ | ||
74 | ##__VA_ARGS__); \ | ||
75 | } while (0) | 72 | } while (0) |
76 | 73 | ||
77 | #else | 74 | #else |
@@ -81,8 +78,10 @@ static inline int ddebug_remove_module(char *mod) | |||
81 | return 0; | 78 | return 0; |
82 | } | 79 | } |
83 | 80 | ||
84 | #define dynamic_pr_debug(fmt, ...) do { } while (0) | 81 | #define dynamic_pr_debug(fmt, ...) \ |
85 | #define dynamic_dev_dbg(dev, format, ...) do { } while (0) | 82 | do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) |
83 | #define dynamic_dev_dbg(dev, format, ...) \ | ||
84 | do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) | ||
86 | #endif | 85 | #endif |
87 | 86 | ||
88 | #endif | 87 | #endif |
diff --git a/include/linux/efi.h b/include/linux/efi.h index ce4581fbc08b..fb737bc19a8c 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -280,11 +280,7 @@ efi_guidcmp (efi_guid_t left, efi_guid_t right) | |||
280 | static inline char * | 280 | static inline char * |
281 | efi_guid_unparse(efi_guid_t *guid, char *out) | 281 | efi_guid_unparse(efi_guid_t *guid, char *out) |
282 | { | 282 | { |
283 | sprintf(out, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x", | 283 | sprintf(out, "%pUl", guid->b); |
284 | guid->b[3], guid->b[2], guid->b[1], guid->b[0], | ||
285 | guid->b[5], guid->b[4], guid->b[7], guid->b[6], | ||
286 | guid->b[8], guid->b[9], guid->b[10], guid->b[11], | ||
287 | guid->b[12], guid->b[13], guid->b[14], guid->b[15]); | ||
288 | return out; | 284 | return out; |
289 | } | 285 | } |
290 | 286 | ||
diff --git a/include/linux/elf.h b/include/linux/elf.h index 90a4ed0ea0e5..0cc4d55151b7 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h | |||
@@ -361,7 +361,7 @@ typedef struct elf64_shdr { | |||
361 | #define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ | 361 | #define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ |
362 | #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ | 362 | #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ |
363 | #define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ | 363 | #define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ |
364 | #define NT_PRXSTATUS 0x300 /* s390 upper register halves */ | 364 | #define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */ |
365 | 365 | ||
366 | 366 | ||
367 | /* Note header in a PT_NOTE section */ | 367 | /* Note header in a PT_NOTE section */ |
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h index 90d1c2184112..9a33c5f7e126 100644 --- a/include/linux/enclosure.h +++ b/include/linux/enclosure.h | |||
@@ -42,6 +42,8 @@ enum enclosure_status { | |||
42 | ENCLOSURE_STATUS_NOT_INSTALLED, | 42 | ENCLOSURE_STATUS_NOT_INSTALLED, |
43 | ENCLOSURE_STATUS_UNKNOWN, | 43 | ENCLOSURE_STATUS_UNKNOWN, |
44 | ENCLOSURE_STATUS_UNAVAILABLE, | 44 | ENCLOSURE_STATUS_UNAVAILABLE, |
45 | /* last element for counting purposes */ | ||
46 | ENCLOSURE_STATUS_MAX | ||
45 | }; | 47 | }; |
46 | 48 | ||
47 | /* SFF-8485 activity light settings */ | 49 | /* SFF-8485 activity light settings */ |
diff --git a/include/linux/err.h b/include/linux/err.h index ec87f3142bf3..1b12642636c7 100644 --- a/include/linux/err.h +++ b/include/linux/err.h | |||
@@ -34,6 +34,11 @@ static inline long IS_ERR(const void *ptr) | |||
34 | return IS_ERR_VALUE((unsigned long)ptr); | 34 | return IS_ERR_VALUE((unsigned long)ptr); |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline long IS_ERR_OR_NULL(const void *ptr) | ||
38 | { | ||
39 | return !ptr || IS_ERR_VALUE((unsigned long)ptr); | ||
40 | } | ||
41 | |||
37 | /** | 42 | /** |
38 | * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type | 43 | * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type |
39 | * @ptr: The pointer to cast. | 44 | * @ptr: The pointer to cast. |
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 94dd10366a78..91bb4f27238c 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/fcntl.h> | 11 | #include <linux/fcntl.h> |
12 | #include <linux/file.h> | 12 | #include <linux/file.h> |
13 | #include <linux/wait.h> | ||
13 | 14 | ||
14 | /* | 15 | /* |
15 | * CAREFUL: Check include/asm-generic/fcntl.h when defining | 16 | * CAREFUL: Check include/asm-generic/fcntl.h when defining |
@@ -34,6 +35,9 @@ struct file *eventfd_fget(int fd); | |||
34 | struct eventfd_ctx *eventfd_ctx_fdget(int fd); | 35 | struct eventfd_ctx *eventfd_ctx_fdget(int fd); |
35 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); | 36 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); |
36 | int eventfd_signal(struct eventfd_ctx *ctx, int n); | 37 | int eventfd_signal(struct eventfd_ctx *ctx, int n); |
38 | ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); | ||
39 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, | ||
40 | __u64 *cnt); | ||
37 | 41 | ||
38 | #else /* CONFIG_EVENTFD */ | 42 | #else /* CONFIG_EVENTFD */ |
39 | 43 | ||
@@ -61,6 +65,18 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) | |||
61 | 65 | ||
62 | } | 66 | } |
63 | 67 | ||
68 | static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, | ||
69 | __u64 *cnt) | ||
70 | { | ||
71 | return -ENOSYS; | ||
72 | } | ||
73 | |||
74 | static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, | ||
75 | wait_queue_t *wait, __u64 *cnt) | ||
76 | { | ||
77 | return -ENOSYS; | ||
78 | } | ||
79 | |||
64 | #endif | 80 | #endif |
65 | 81 | ||
66 | #endif /* _LINUX_EVENTFD_H */ | 82 | #endif /* _LINUX_EVENTFD_H */ |
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 27e772cefb6a..dc12f416a49f 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h | |||
@@ -97,7 +97,7 @@ struct fid { | |||
97 | * @get_name: find the name for a given inode in a given directory | 97 | * @get_name: find the name for a given inode in a given directory |
98 | * @get_parent: find the parent of a given directory | 98 | * @get_parent: find the parent of a given directory |
99 | * | 99 | * |
100 | * See Documentation/filesystems/Exporting for details on how to use | 100 | * See Documentation/filesystems/nfs/Exporting for details on how to use |
101 | * this interface correctly. | 101 | * this interface correctly. |
102 | * | 102 | * |
103 | * encode_fh: | 103 | * encode_fh: |
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h index f07f34de2f0e..258088ab3c6b 100644 --- a/include/linux/ext3_fs_sb.h +++ b/include/linux/ext3_fs_sb.h | |||
@@ -72,6 +72,8 @@ struct ext3_sb_info { | |||
72 | struct inode * s_journal_inode; | 72 | struct inode * s_journal_inode; |
73 | struct journal_s * s_journal; | 73 | struct journal_s * s_journal; |
74 | struct list_head s_orphan; | 74 | struct list_head s_orphan; |
75 | struct mutex s_orphan_lock; | ||
76 | struct mutex s_resize_lock; | ||
75 | unsigned long s_commit_interval; | 77 | unsigned long s_commit_interval; |
76 | struct block_device *journal_bdev; | 78 | struct block_device *journal_bdev; |
77 | #ifdef CONFIG_JBD_DEBUG | 79 | #ifdef CONFIG_JBD_DEBUG |
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h index cf82d519be40..d7b5ddca99c2 100644 --- a/include/linux/ext3_jbd.h +++ b/include/linux/ext3_jbd.h | |||
@@ -44,13 +44,13 @@ | |||
44 | 44 | ||
45 | #define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \ | 45 | #define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \ |
46 | EXT3_XATTR_TRANS_BLOCKS - 2 + \ | 46 | EXT3_XATTR_TRANS_BLOCKS - 2 + \ |
47 | 2*EXT3_QUOTA_TRANS_BLOCKS(sb)) | 47 | EXT3_MAXQUOTAS_TRANS_BLOCKS(sb)) |
48 | 48 | ||
49 | /* Delete operations potentially hit one directory's namespace plus an | 49 | /* Delete operations potentially hit one directory's namespace plus an |
50 | * entire inode, plus arbitrary amounts of bitmap/indirection data. Be | 50 | * entire inode, plus arbitrary amounts of bitmap/indirection data. Be |
51 | * generous. We can grow the delete transaction later if necessary. */ | 51 | * generous. We can grow the delete transaction later if necessary. */ |
52 | 52 | ||
53 | #define EXT3_DELETE_TRANS_BLOCKS(sb) (2 * EXT3_DATA_TRANS_BLOCKS(sb) + 64) | 53 | #define EXT3_DELETE_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64) |
54 | 54 | ||
55 | /* Define an arbitrary limit for the amount of data we will anticipate | 55 | /* Define an arbitrary limit for the amount of data we will anticipate |
56 | * writing to any given transaction. For unbounded transactions such as | 56 | * writing to any given transaction. For unbounded transactions such as |
@@ -86,6 +86,9 @@ | |||
86 | #define EXT3_QUOTA_INIT_BLOCKS(sb) 0 | 86 | #define EXT3_QUOTA_INIT_BLOCKS(sb) 0 |
87 | #define EXT3_QUOTA_DEL_BLOCKS(sb) 0 | 87 | #define EXT3_QUOTA_DEL_BLOCKS(sb) 0 |
88 | #endif | 88 | #endif |
89 | #define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb)) | ||
90 | #define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb)) | ||
91 | #define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb)) | ||
89 | 92 | ||
90 | int | 93 | int |
91 | ext3_mark_iloc_dirty(handle_t *handle, | 94 | ext3_mark_iloc_dirty(handle_t *handle, |
diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h index 934e22d65801..d830747f5c0b 100644 --- a/include/linux/fiemap.h +++ b/include/linux/fiemap.h | |||
@@ -62,5 +62,7 @@ struct fiemap { | |||
62 | #define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively | 62 | #define FIEMAP_EXTENT_MERGED 0x00001000 /* File does not natively |
63 | * support extents. Result | 63 | * support extents. Result |
64 | * merged for efficiency. */ | 64 | * merged for efficiency. */ |
65 | #define FIEMAP_EXTENT_SHARED 0x00002000 /* Space shared with other | ||
66 | * files. */ | ||
65 | 67 | ||
66 | #endif /* _LINUX_FIEMAP_H */ | 68 | #endif /* _LINUX_FIEMAP_H */ |
diff --git a/include/linux/file.h b/include/linux/file.h index 335a0a5c316e..5555508fd517 100644 --- a/include/linux/file.h +++ b/include/linux/file.h | |||
@@ -18,11 +18,9 @@ extern void drop_file_write_access(struct file *file); | |||
18 | struct file_operations; | 18 | struct file_operations; |
19 | struct vfsmount; | 19 | struct vfsmount; |
20 | struct dentry; | 20 | struct dentry; |
21 | extern int init_file(struct file *, struct vfsmount *mnt, | 21 | struct path; |
22 | struct dentry *dentry, fmode_t mode, | 22 | extern struct file *alloc_file(struct path *, fmode_t mode, |
23 | const struct file_operations *fop); | 23 | const struct file_operations *fop); |
24 | extern struct file *alloc_file(struct vfsmount *, struct dentry *dentry, | ||
25 | fmode_t mode, const struct file_operations *fop); | ||
26 | 24 | ||
27 | static inline void fput_light(struct file *file, int fput_needed) | 25 | static inline void fput_light(struct file *file, int fput_needed) |
28 | { | 26 | { |
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index c6b3ca3af6df..520ecf86cbb3 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h | |||
@@ -340,6 +340,9 @@ struct fw_cdev_send_response { | |||
340 | * The @closure field is passed back to userspace in the response event. | 340 | * The @closure field is passed back to userspace in the response event. |
341 | * The @handle field is an out parameter, returning a handle to the allocated | 341 | * The @handle field is an out parameter, returning a handle to the allocated |
342 | * range to be used for later deallocation of the range. | 342 | * range to be used for later deallocation of the range. |
343 | * | ||
344 | * The address range is allocated on all local nodes. The address allocation | ||
345 | * is exclusive except for the FCP command and response registers. | ||
343 | */ | 346 | */ |
344 | struct fw_cdev_allocate { | 347 | struct fw_cdev_allocate { |
345 | __u64 offset; | 348 | __u64 offset; |
@@ -377,7 +380,7 @@ struct fw_cdev_initiate_bus_reset { | |||
377 | * @immediate: If non-zero, immediate key to insert before pointer | 380 | * @immediate: If non-zero, immediate key to insert before pointer |
378 | * @key: Upper 8 bits of root directory pointer | 381 | * @key: Upper 8 bits of root directory pointer |
379 | * @data: Userspace pointer to contents of descriptor block | 382 | * @data: Userspace pointer to contents of descriptor block |
380 | * @length: Length of descriptor block data, in bytes | 383 | * @length: Length of descriptor block data, in quadlets |
381 | * @handle: Handle to the descriptor, written by the kernel | 384 | * @handle: Handle to the descriptor, written by the kernel |
382 | * | 385 | * |
383 | * Add a descriptor block and optionally a preceding immediate key to the local | 386 | * Add a descriptor block and optionally a preceding immediate key to the local |
@@ -391,6 +394,8 @@ struct fw_cdev_initiate_bus_reset { | |||
391 | * If not 0, the @immediate field specifies an immediate key which will be | 394 | * If not 0, the @immediate field specifies an immediate key which will be |
392 | * inserted before the root directory pointer. | 395 | * inserted before the root directory pointer. |
393 | * | 396 | * |
397 | * @immediate, @key, and @data array elements are CPU-endian quadlets. | ||
398 | * | ||
394 | * If successful, the kernel adds the descriptor and writes back a handle to the | 399 | * If successful, the kernel adds the descriptor and writes back a handle to the |
395 | * kernel-side object to be used for later removal of the descriptor block and | 400 | * kernel-side object to be used for later removal of the descriptor block and |
396 | * immediate key. | 401 | * immediate key. |
diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 9416a461b696..a0e67150a729 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h | |||
@@ -248,8 +248,8 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, | |||
248 | void *data, size_t length, | 248 | void *data, size_t length, |
249 | void *callback_data); | 249 | void *callback_data); |
250 | /* | 250 | /* |
251 | * Important note: The callback must guarantee that either fw_send_response() | 251 | * Important note: Except for the FCP registers, the callback must guarantee |
252 | * or kfree() is called on the @request. | 252 | * that either fw_send_response() or kfree() is called on the @request. |
253 | */ | 253 | */ |
254 | typedef void (*fw_address_callback_t)(struct fw_card *card, | 254 | typedef void (*fw_address_callback_t)(struct fw_card *card, |
255 | struct fw_request *request, | 255 | struct fw_request *request, |
diff --git a/include/linux/fs.h b/include/linux/fs.h index a057f48eb156..b1bcb275b596 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -152,6 +152,7 @@ struct inodes_stat_t { | |||
152 | #define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) | 152 | #define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) |
153 | #define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) | 153 | #define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) |
154 | #define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO)) | 154 | #define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO)) |
155 | #define WRITE_META (WRITE | (1 << BIO_RW_META)) | ||
155 | #define SWRITE_SYNC_PLUG \ | 156 | #define SWRITE_SYNC_PLUG \ |
156 | (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) | 157 | (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) |
157 | #define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) | 158 | #define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) |
@@ -1094,10 +1095,6 @@ struct file_lock { | |||
1094 | 1095 | ||
1095 | extern void send_sigio(struct fown_struct *fown, int fd, int band); | 1096 | extern void send_sigio(struct fown_struct *fown, int fd, int band); |
1096 | 1097 | ||
1097 | /* fs/sync.c */ | ||
1098 | extern int do_sync_mapping_range(struct address_space *mapping, loff_t offset, | ||
1099 | loff_t endbyte, unsigned int flags); | ||
1100 | |||
1101 | #ifdef CONFIG_FILE_LOCKING | 1098 | #ifdef CONFIG_FILE_LOCKING |
1102 | extern int fcntl_getlk(struct file *, struct flock __user *); | 1099 | extern int fcntl_getlk(struct file *, struct flock __user *); |
1103 | extern int fcntl_setlk(unsigned int, struct file *, unsigned int, | 1100 | extern int fcntl_setlk(unsigned int, struct file *, unsigned int, |
@@ -1590,7 +1587,7 @@ struct super_operations { | |||
1590 | * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at | 1587 | * until that flag is cleared. I_WILL_FREE, I_FREEING and I_CLEAR are set at |
1591 | * various stages of removing an inode. | 1588 | * various stages of removing an inode. |
1592 | * | 1589 | * |
1593 | * Two bits are used for locking and completion notification, I_LOCK and I_SYNC. | 1590 | * Two bits are used for locking and completion notification, I_NEW and I_SYNC. |
1594 | * | 1591 | * |
1595 | * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on | 1592 | * I_DIRTY_SYNC Inode is dirty, but doesn't have to be written on |
1596 | * fdatasync(). i_atime is the usual cause. | 1593 | * fdatasync(). i_atime is the usual cause. |
@@ -1599,8 +1596,14 @@ struct super_operations { | |||
1599 | * don't have to write inode on fdatasync() when only | 1596 | * don't have to write inode on fdatasync() when only |
1600 | * mtime has changed in it. | 1597 | * mtime has changed in it. |
1601 | * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. | 1598 | * I_DIRTY_PAGES Inode has dirty pages. Inode itself may be clean. |
1602 | * I_NEW get_new_inode() sets i_state to I_LOCK|I_NEW. Both | 1599 | * I_NEW Serves as both a mutex and completion notification. |
1603 | * are cleared by unlock_new_inode(), called from iget(). | 1600 | * New inodes set I_NEW. If two processes both create |
1601 | * the same inode, one of them will release its inode and | ||
1602 | * wait for I_NEW to be released before returning. | ||
1603 | * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can | ||
1604 | * also cause waiting on I_NEW, without I_NEW actually | ||
1605 | * being set. find_inode() uses this to prevent returning | ||
1606 | * nearly-dead inodes. | ||
1604 | * I_WILL_FREE Must be set when calling write_inode_now() if i_count | 1607 | * I_WILL_FREE Must be set when calling write_inode_now() if i_count |
1605 | * is zero. I_FREEING must be set when I_WILL_FREE is | 1608 | * is zero. I_FREEING must be set when I_WILL_FREE is |
1606 | * cleared. | 1609 | * cleared. |
@@ -1614,35 +1617,23 @@ struct super_operations { | |||
1614 | * prohibited for many purposes. iget() must wait for | 1617 | * prohibited for many purposes. iget() must wait for |
1615 | * the inode to be completely released, then create it | 1618 | * the inode to be completely released, then create it |
1616 | * anew. Other functions will just ignore such inodes, | 1619 | * anew. Other functions will just ignore such inodes, |
1617 | * if appropriate. I_LOCK is used for waiting. | 1620 | * if appropriate. I_NEW is used for waiting. |
1618 | * | 1621 | * |
1619 | * I_LOCK Serves as both a mutex and completion notification. | 1622 | * I_SYNC Synchonized write of dirty inode data. The bits is |
1620 | * New inodes set I_LOCK. If two processes both create | 1623 | * set during data writeback, and cleared with a wakeup |
1621 | * the same inode, one of them will release its inode and | 1624 | * on the bit address once it is done. |
1622 | * wait for I_LOCK to be released before returning. | ||
1623 | * Inodes in I_WILL_FREE, I_FREEING or I_CLEAR state can | ||
1624 | * also cause waiting on I_LOCK, without I_LOCK actually | ||
1625 | * being set. find_inode() uses this to prevent returning | ||
1626 | * nearly-dead inodes. | ||
1627 | * I_SYNC Similar to I_LOCK, but limited in scope to writeback | ||
1628 | * of inode dirty data. Having a separate lock for this | ||
1629 | * purpose reduces latency and prevents some filesystem- | ||
1630 | * specific deadlocks. | ||
1631 | * | 1625 | * |
1632 | * Q: What is the difference between I_WILL_FREE and I_FREEING? | 1626 | * Q: What is the difference between I_WILL_FREE and I_FREEING? |
1633 | * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on | ||
1634 | * I_CLEAR? If not, why? | ||
1635 | */ | 1627 | */ |
1636 | #define I_DIRTY_SYNC 1 | 1628 | #define I_DIRTY_SYNC 1 |
1637 | #define I_DIRTY_DATASYNC 2 | 1629 | #define I_DIRTY_DATASYNC 2 |
1638 | #define I_DIRTY_PAGES 4 | 1630 | #define I_DIRTY_PAGES 4 |
1639 | #define I_NEW 8 | 1631 | #define __I_NEW 3 |
1632 | #define I_NEW (1 << __I_NEW) | ||
1640 | #define I_WILL_FREE 16 | 1633 | #define I_WILL_FREE 16 |
1641 | #define I_FREEING 32 | 1634 | #define I_FREEING 32 |
1642 | #define I_CLEAR 64 | 1635 | #define I_CLEAR 64 |
1643 | #define __I_LOCK 7 | 1636 | #define __I_SYNC 7 |
1644 | #define I_LOCK (1 << __I_LOCK) | ||
1645 | #define __I_SYNC 8 | ||
1646 | #define I_SYNC (1 << __I_SYNC) | 1637 | #define I_SYNC (1 << __I_SYNC) |
1647 | 1638 | ||
1648 | #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) | 1639 | #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) |
@@ -2189,7 +2180,6 @@ static inline void insert_inode_hash(struct inode *inode) { | |||
2189 | __insert_inode_hash(inode, inode->i_ino); | 2180 | __insert_inode_hash(inode, inode->i_ino); |
2190 | } | 2181 | } |
2191 | 2182 | ||
2192 | extern struct file * get_empty_filp(void); | ||
2193 | extern void file_move(struct file *f, struct list_head *list); | 2183 | extern void file_move(struct file *f, struct list_head *list); |
2194 | extern void file_kill(struct file *f); | 2184 | extern void file_kill(struct file *f); |
2195 | #ifdef CONFIG_BLOCK | 2185 | #ifdef CONFIG_BLOCK |
@@ -2264,9 +2254,11 @@ ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
2264 | int lock_type); | 2254 | int lock_type); |
2265 | 2255 | ||
2266 | enum { | 2256 | enum { |
2267 | DIO_LOCKING = 1, /* need locking between buffered and direct access */ | 2257 | /* need locking between buffered and direct access */ |
2268 | DIO_NO_LOCKING, /* bdev; no locking at all between buffered/direct */ | 2258 | DIO_LOCKING = 0x01, |
2269 | DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */ | 2259 | |
2260 | /* filesystem does not support filling holes */ | ||
2261 | DIO_SKIP_HOLES = 0x02, | ||
2270 | }; | 2262 | }; |
2271 | 2263 | ||
2272 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, | 2264 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, |
@@ -2275,7 +2267,8 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, | |||
2275 | dio_iodone_t end_io) | 2267 | dio_iodone_t end_io) |
2276 | { | 2268 | { |
2277 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | 2269 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, |
2278 | nr_segs, get_block, end_io, DIO_LOCKING); | 2270 | nr_segs, get_block, end_io, |
2271 | DIO_LOCKING | DIO_SKIP_HOLES); | ||
2279 | } | 2272 | } |
2280 | 2273 | ||
2281 | static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, | 2274 | static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, |
@@ -2284,16 +2277,7 @@ static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, | |||
2284 | dio_iodone_t end_io) | 2277 | dio_iodone_t end_io) |
2285 | { | 2278 | { |
2286 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | 2279 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, |
2287 | nr_segs, get_block, end_io, DIO_NO_LOCKING); | 2280 | nr_segs, get_block, end_io, 0); |
2288 | } | ||
2289 | |||
2290 | static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb, | ||
2291 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | ||
2292 | loff_t offset, unsigned long nr_segs, get_block_t get_block, | ||
2293 | dio_iodone_t end_io) | ||
2294 | { | ||
2295 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | ||
2296 | nr_segs, get_block, end_io, DIO_OWN_LOCKING); | ||
2297 | } | 2281 | } |
2298 | #endif | 2282 | #endif |
2299 | 2283 | ||
@@ -2313,6 +2297,7 @@ extern const struct inode_operations page_symlink_inode_operations; | |||
2313 | extern int generic_readlink(struct dentry *, char __user *, int); | 2297 | extern int generic_readlink(struct dentry *, char __user *, int); |
2314 | extern void generic_fillattr(struct inode *, struct kstat *); | 2298 | extern void generic_fillattr(struct inode *, struct kstat *); |
2315 | extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 2299 | extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
2300 | void __inode_add_bytes(struct inode *inode, loff_t bytes); | ||
2316 | void inode_add_bytes(struct inode *inode, loff_t bytes); | 2301 | void inode_add_bytes(struct inode *inode, loff_t bytes); |
2317 | void inode_sub_bytes(struct inode *inode, loff_t bytes); | 2302 | void inode_sub_bytes(struct inode *inode, loff_t bytes); |
2318 | loff_t inode_get_bytes(struct inode *inode); | 2303 | loff_t inode_get_bytes(struct inode *inode); |
@@ -2478,5 +2463,8 @@ int proc_nr_files(struct ctl_table *table, int write, | |||
2478 | 2463 | ||
2479 | int __init get_filesystem_list(char *buf); | 2464 | int __init get_filesystem_list(char *buf); |
2480 | 2465 | ||
2466 | #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) | ||
2467 | #define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE)) | ||
2468 | |||
2481 | #endif /* __KERNEL__ */ | 2469 | #endif /* __KERNEL__ */ |
2482 | #endif /* _LINUX_FS_H */ | 2470 | #endif /* _LINUX_FS_H */ |
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h index bb516ceeefc9..da317c7163ab 100644 --- a/include/linux/fs_stack.h +++ b/include/linux/fs_stack.h | |||
@@ -8,10 +8,8 @@ | |||
8 | #include <linux/fs.h> | 8 | #include <linux/fs.h> |
9 | 9 | ||
10 | /* externs for fs/stack.c */ | 10 | /* externs for fs/stack.c */ |
11 | extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src, | 11 | extern void fsstack_copy_attr_all(struct inode *dest, const struct inode *src); |
12 | int (*get_nlinks)(struct inode *)); | 12 | extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src); |
13 | |||
14 | extern void fsstack_copy_inode_size(struct inode *dst, const struct inode *src); | ||
15 | 13 | ||
16 | /* inlines */ | 14 | /* inlines */ |
17 | static inline void fsstack_copy_attr_atime(struct inode *dest, | 15 | static inline void fsstack_copy_attr_atime(struct inode *dest, |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 38f8d6553831..2233c98d80df 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -131,7 +131,7 @@ struct ftrace_event_call { | |||
131 | void *mod; | 131 | void *mod; |
132 | void *data; | 132 | void *data; |
133 | 133 | ||
134 | atomic_t profile_count; | 134 | int profile_count; |
135 | int (*profile_enable)(struct ftrace_event_call *); | 135 | int (*profile_enable)(struct ftrace_event_call *); |
136 | void (*profile_disable)(struct ftrace_event_call *); | 136 | void (*profile_disable)(struct ftrace_event_call *); |
137 | }; | 137 | }; |
@@ -158,7 +158,7 @@ enum { | |||
158 | FILTER_PTR_STRING, | 158 | FILTER_PTR_STRING, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | extern int trace_define_common_fields(struct ftrace_event_call *call); | 161 | extern int trace_event_raw_init(struct ftrace_event_call *call); |
162 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, | 162 | extern int trace_define_field(struct ftrace_event_call *call, const char *type, |
163 | const char *name, int offset, int size, | 163 | const char *name, int offset, int size, |
164 | int is_signed, int filter_type); | 164 | int is_signed, int filter_type); |
diff --git a/include/linux/generic_acl.h b/include/linux/generic_acl.h index 886f5faa08cb..ca666d18ed67 100644 --- a/include/linux/generic_acl.h +++ b/include/linux/generic_acl.h | |||
@@ -1,36 +1,15 @@ | |||
1 | /* | 1 | #ifndef LINUX_GENERIC_ACL_H |
2 | * include/linux/generic_acl.h | 2 | #define LINUX_GENERIC_ACL_H |
3 | * | ||
4 | * (C) 2005 Andreas Gruenbacher <agruen@suse.de> | ||
5 | * | ||
6 | * This file is released under the GPL. | ||
7 | */ | ||
8 | 3 | ||
9 | #ifndef GENERIC_ACL_H | 4 | #include <linux/xattr.h> |
10 | #define GENERIC_ACL_H | ||
11 | 5 | ||
12 | #include <linux/posix_acl.h> | 6 | struct inode; |
13 | #include <linux/posix_acl_xattr.h> | ||
14 | 7 | ||
15 | /** | 8 | extern struct xattr_handler generic_acl_access_handler; |
16 | * struct generic_acl_operations - filesystem operations | 9 | extern struct xattr_handler generic_acl_default_handler; |
17 | * | ||
18 | * Filesystems must make these operations available to the generic | ||
19 | * operations. | ||
20 | */ | ||
21 | struct generic_acl_operations { | ||
22 | struct posix_acl *(*getacl)(struct inode *, int); | ||
23 | void (*setacl)(struct inode *, int, struct posix_acl *); | ||
24 | }; | ||
25 | 10 | ||
26 | size_t generic_acl_list(struct inode *, struct generic_acl_operations *, int, | 11 | int generic_acl_init(struct inode *, struct inode *); |
27 | char *, size_t); | 12 | int generic_acl_chmod(struct inode *); |
28 | int generic_acl_get(struct inode *, struct generic_acl_operations *, int, | 13 | int generic_check_acl(struct inode *inode, int mask); |
29 | void *, size_t); | ||
30 | int generic_acl_set(struct inode *, struct generic_acl_operations *, int, | ||
31 | const void *, size_t); | ||
32 | int generic_acl_init(struct inode *, struct inode *, | ||
33 | struct generic_acl_operations *); | ||
34 | int generic_acl_chmod(struct inode *, struct generic_acl_operations *); | ||
35 | 14 | ||
36 | #endif | 15 | #endif /* LINUX_GENERIC_ACL_H */ |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index c6c0c41af35f..9717081c75ad 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -256,9 +256,9 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, | |||
256 | #define part_stat_read(part, field) \ | 256 | #define part_stat_read(part, field) \ |
257 | ({ \ | 257 | ({ \ |
258 | typeof((part)->dkstats->field) res = 0; \ | 258 | typeof((part)->dkstats->field) res = 0; \ |
259 | int i; \ | 259 | unsigned int _cpu; \ |
260 | for_each_possible_cpu(i) \ | 260 | for_each_possible_cpu(_cpu) \ |
261 | res += per_cpu_ptr((part)->dkstats, i)->field; \ | 261 | res += per_cpu_ptr((part)->dkstats, _cpu)->field; \ |
262 | res; \ | 262 | res; \ |
263 | }) | 263 | }) |
264 | 264 | ||
diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 059bd189d35d..4e949a5b5b85 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h | |||
@@ -99,6 +99,12 @@ static inline int gpio_export_link(struct device *dev, const char *name, | |||
99 | return -EINVAL; | 99 | return -EINVAL; |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline int gpio_sysfs_set_active_low(unsigned gpio, int value) | ||
103 | { | ||
104 | /* GPIO can never have been requested */ | ||
105 | WARN_ON(1); | ||
106 | return -EINVAL; | ||
107 | } | ||
102 | 108 | ||
103 | static inline void gpio_unexport(unsigned gpio) | 109 | static inline void gpio_unexport(unsigned gpio) |
104 | { | 110 | { |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 211ff4497269..ab2cc20e21a5 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -46,7 +46,7 @@ void kmap_flush_unused(void); | |||
46 | 46 | ||
47 | static inline unsigned int nr_free_highpages(void) { return 0; } | 47 | static inline unsigned int nr_free_highpages(void) { return 0; } |
48 | 48 | ||
49 | #define totalhigh_pages 0 | 49 | #define totalhigh_pages 0UL |
50 | 50 | ||
51 | #ifndef ARCH_HAS_KMAP | 51 | #ifndef ARCH_HAS_KMAP |
52 | static inline void *kmap(struct page *page) | 52 | static inline void *kmap(struct page *page) |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index af634e95871d..5d86fb2309d2 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -169,7 +169,7 @@ struct hrtimer_clock_base { | |||
169 | * @max_hang_time: Maximum time spent in hrtimer_interrupt | 169 | * @max_hang_time: Maximum time spent in hrtimer_interrupt |
170 | */ | 170 | */ |
171 | struct hrtimer_cpu_base { | 171 | struct hrtimer_cpu_base { |
172 | spinlock_t lock; | 172 | raw_spinlock_t lock; |
173 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 173 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
174 | #ifdef CONFIG_HIGH_RES_TIMERS | 174 | #ifdef CONFIG_HIGH_RES_TIMERS |
175 | ktime_t expires_next; | 175 | ktime_t expires_next; |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 41a59afc70fa..78b4bc64c006 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -23,6 +23,12 @@ void reset_vma_resv_huge_pages(struct vm_area_struct *vma); | |||
23 | int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); | 23 | int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
24 | int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); | 24 | int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
25 | int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); | 25 | int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); |
26 | |||
27 | #ifdef CONFIG_NUMA | ||
28 | int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, | ||
29 | void __user *, size_t *, loff_t *); | ||
30 | #endif | ||
31 | |||
26 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); | 32 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); |
27 | int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, | 33 | int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, |
28 | struct page **, struct vm_area_struct **, | 34 | struct page **, struct vm_area_struct **, |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 419ab546b266..02fc617782ef 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -110,7 +110,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, | |||
110 | * @driver: Device driver model driver | 110 | * @driver: Device driver model driver |
111 | * @id_table: List of I2C devices supported by this driver | 111 | * @id_table: List of I2C devices supported by this driver |
112 | * @detect: Callback for device detection | 112 | * @detect: Callback for device detection |
113 | * @address_data: The I2C addresses to probe (for detect) | 113 | * @address_list: The I2C addresses to probe (for detect) |
114 | * @clients: List of detected clients we created (for i2c-core use only) | 114 | * @clients: List of detected clients we created (for i2c-core use only) |
115 | * | 115 | * |
116 | * The driver.owner field should be set to the module owner of this driver. | 116 | * The driver.owner field should be set to the module owner of this driver. |
@@ -161,8 +161,8 @@ struct i2c_driver { | |||
161 | const struct i2c_device_id *id_table; | 161 | const struct i2c_device_id *id_table; |
162 | 162 | ||
163 | /* Device detection callback for automatic device creation */ | 163 | /* Device detection callback for automatic device creation */ |
164 | int (*detect)(struct i2c_client *, int kind, struct i2c_board_info *); | 164 | int (*detect)(struct i2c_client *, struct i2c_board_info *); |
165 | const struct i2c_client_address_data *address_data; | 165 | const unsigned short *address_list; |
166 | struct list_head clients; | 166 | struct list_head clients; |
167 | }; | 167 | }; |
168 | #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) | 168 | #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver) |
@@ -391,14 +391,6 @@ static inline void i2c_unlock_adapter(struct i2c_adapter *adapter) | |||
391 | #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ | 391 | #define I2C_CLASS_DDC (1<<3) /* DDC bus on graphics adapters */ |
392 | #define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ | 392 | #define I2C_CLASS_SPD (1<<7) /* SPD EEPROMs and similar */ |
393 | 393 | ||
394 | /* i2c_client_address_data is the struct for holding default client | ||
395 | * addresses for a driver and for the parameters supplied on the | ||
396 | * command line | ||
397 | */ | ||
398 | struct i2c_client_address_data { | ||
399 | const unsigned short *normal_i2c; | ||
400 | }; | ||
401 | |||
402 | /* Internal numbers to terminate lists */ | 394 | /* Internal numbers to terminate lists */ |
403 | #define I2C_CLIENT_END 0xfffeU | 395 | #define I2C_CLIENT_END 0xfffeU |
404 | 396 | ||
@@ -576,82 +568,4 @@ union i2c_smbus_data { | |||
576 | #define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */ | 568 | #define I2C_SMBUS_BLOCK_PROC_CALL 7 /* SMBus 2.0 */ |
577 | #define I2C_SMBUS_I2C_BLOCK_DATA 8 | 569 | #define I2C_SMBUS_I2C_BLOCK_DATA 8 |
578 | 570 | ||
579 | |||
580 | #ifdef __KERNEL__ | ||
581 | |||
582 | /* These defines are used for probing i2c client addresses */ | ||
583 | /* The length of the option lists */ | ||
584 | #define I2C_CLIENT_MAX_OPTS 48 | ||
585 | |||
586 | /* Default fill of many variables */ | ||
587 | #define I2C_CLIENT_DEFAULTS {I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
588 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
589 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
590 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
591 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
592 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
593 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
594 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
595 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
596 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
597 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
598 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
599 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
600 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
601 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END, \ | ||
602 | I2C_CLIENT_END, I2C_CLIENT_END, I2C_CLIENT_END} | ||
603 | |||
604 | /* I2C_CLIENT_MODULE_PARM creates a module parameter, and puts it in the | ||
605 | module header */ | ||
606 | |||
607 | #define I2C_CLIENT_MODULE_PARM(var,desc) \ | ||
608 | static unsigned short var[I2C_CLIENT_MAX_OPTS] = I2C_CLIENT_DEFAULTS; \ | ||
609 | static unsigned int var##_num; \ | ||
610 | module_param_array(var, short, &var##_num, 0); \ | ||
611 | MODULE_PARM_DESC(var, desc) | ||
612 | |||
613 | #define I2C_CLIENT_INSMOD_COMMON \ | ||
614 | static const struct i2c_client_address_data addr_data = { \ | ||
615 | .normal_i2c = normal_i2c, \ | ||
616 | } | ||
617 | |||
618 | /* These are the ones you want to use in your own drivers. Pick the one | ||
619 | which matches the number of devices the driver differenciates between. */ | ||
620 | #define I2C_CLIENT_INSMOD \ | ||
621 | I2C_CLIENT_INSMOD_COMMON | ||
622 | |||
623 | #define I2C_CLIENT_INSMOD_1(chip1) \ | ||
624 | enum chips { any_chip, chip1 }; \ | ||
625 | I2C_CLIENT_INSMOD_COMMON | ||
626 | |||
627 | #define I2C_CLIENT_INSMOD_2(chip1, chip2) \ | ||
628 | enum chips { any_chip, chip1, chip2 }; \ | ||
629 | I2C_CLIENT_INSMOD_COMMON | ||
630 | |||
631 | #define I2C_CLIENT_INSMOD_3(chip1, chip2, chip3) \ | ||
632 | enum chips { any_chip, chip1, chip2, chip3 }; \ | ||
633 | I2C_CLIENT_INSMOD_COMMON | ||
634 | |||
635 | #define I2C_CLIENT_INSMOD_4(chip1, chip2, chip3, chip4) \ | ||
636 | enum chips { any_chip, chip1, chip2, chip3, chip4 }; \ | ||
637 | I2C_CLIENT_INSMOD_COMMON | ||
638 | |||
639 | #define I2C_CLIENT_INSMOD_5(chip1, chip2, chip3, chip4, chip5) \ | ||
640 | enum chips { any_chip, chip1, chip2, chip3, chip4, chip5 }; \ | ||
641 | I2C_CLIENT_INSMOD_COMMON | ||
642 | |||
643 | #define I2C_CLIENT_INSMOD_6(chip1, chip2, chip3, chip4, chip5, chip6) \ | ||
644 | enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6 }; \ | ||
645 | I2C_CLIENT_INSMOD_COMMON | ||
646 | |||
647 | #define I2C_CLIENT_INSMOD_7(chip1, chip2, chip3, chip4, chip5, chip6, chip7) \ | ||
648 | enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \ | ||
649 | chip7 }; \ | ||
650 | I2C_CLIENT_INSMOD_COMMON | ||
651 | |||
652 | #define I2C_CLIENT_INSMOD_8(chip1, chip2, chip3, chip4, chip5, chip6, chip7, chip8) \ | ||
653 | enum chips { any_chip, chip1, chip2, chip3, chip4, chip5, chip6, \ | ||
654 | chip7, chip8 }; \ | ||
655 | I2C_CLIENT_INSMOD_COMMON | ||
656 | #endif /* __KERNEL__ */ | ||
657 | #endif /* _LINUX_I2C_H */ | 571 | #endif /* _LINUX_I2C_H */ |
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h index fc5db826b48e..02c9af374741 100644 --- a/include/linux/i2c/adp5588.h +++ b/include/linux/i2c/adp5588.h | |||
@@ -89,4 +89,16 @@ struct adp5588_kpad_platform_data { | |||
89 | unsigned short unlock_key2; /* Unlock Key 2 */ | 89 | unsigned short unlock_key2; /* Unlock Key 2 */ |
90 | }; | 90 | }; |
91 | 91 | ||
92 | struct adp5588_gpio_platform_data { | ||
93 | unsigned gpio_start; /* GPIO Chip base # */ | ||
94 | unsigned pullup_dis_mask; /* Pull-Up Disable Mask */ | ||
95 | int (*setup)(struct i2c_client *client, | ||
96 | int gpio, unsigned ngpio, | ||
97 | void *context); | ||
98 | int (*teardown)(struct i2c_client *client, | ||
99 | int gpio, unsigned ngpio, | ||
100 | void *context); | ||
101 | void *context; | ||
102 | }; | ||
103 | |||
92 | #endif | 104 | #endif |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index d9724a28c0c2..163c840437d6 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
@@ -832,7 +832,7 @@ struct ieee80211_ht_cap { | |||
832 | #define IEEE80211_HT_CAP_DELAY_BA 0x0400 | 832 | #define IEEE80211_HT_CAP_DELAY_BA 0x0400 |
833 | #define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 | 833 | #define IEEE80211_HT_CAP_MAX_AMSDU 0x0800 |
834 | #define IEEE80211_HT_CAP_DSSSCCK40 0x1000 | 834 | #define IEEE80211_HT_CAP_DSSSCCK40 0x1000 |
835 | #define IEEE80211_HT_CAP_PSMP_SUPPORT 0x2000 | 835 | #define IEEE80211_HT_CAP_RESERVED 0x2000 |
836 | #define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000 | 836 | #define IEEE80211_HT_CAP_40MHZ_INTOLERANT 0x4000 |
837 | #define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000 | 837 | #define IEEE80211_HT_CAP_LSIG_TXOP_PROT 0x8000 |
838 | 838 | ||
diff --git a/include/linux/ima.h b/include/linux/ima.h index 0e3f2a4c25f6..99dc6d5cf7e5 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h | |||
@@ -13,18 +13,14 @@ | |||
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | struct linux_binprm; | 14 | struct linux_binprm; |
15 | 15 | ||
16 | #define IMA_COUNT_UPDATE 1 | ||
17 | #define IMA_COUNT_LEAVE 0 | ||
18 | |||
19 | #ifdef CONFIG_IMA | 16 | #ifdef CONFIG_IMA |
20 | extern int ima_bprm_check(struct linux_binprm *bprm); | 17 | extern int ima_bprm_check(struct linux_binprm *bprm); |
21 | extern int ima_inode_alloc(struct inode *inode); | 18 | extern int ima_inode_alloc(struct inode *inode); |
22 | extern void ima_inode_free(struct inode *inode); | 19 | extern void ima_inode_free(struct inode *inode); |
23 | extern int ima_path_check(struct path *path, int mask, int update_counts); | 20 | extern int ima_path_check(struct path *path, int mask); |
24 | extern void ima_file_free(struct file *file); | 21 | extern void ima_file_free(struct file *file); |
25 | extern int ima_file_mmap(struct file *file, unsigned long prot); | 22 | extern int ima_file_mmap(struct file *file, unsigned long prot); |
26 | extern void ima_counts_get(struct file *file); | 23 | extern void ima_counts_get(struct file *file); |
27 | extern void ima_counts_put(struct path *path, int mask); | ||
28 | 24 | ||
29 | #else | 25 | #else |
30 | static inline int ima_bprm_check(struct linux_binprm *bprm) | 26 | static inline int ima_bprm_check(struct linux_binprm *bprm) |
@@ -42,7 +38,7 @@ static inline void ima_inode_free(struct inode *inode) | |||
42 | return; | 38 | return; |
43 | } | 39 | } |
44 | 40 | ||
45 | static inline int ima_path_check(struct path *path, int mask, int update_counts) | 41 | static inline int ima_path_check(struct path *path, int mask) |
46 | { | 42 | { |
47 | return 0; | 43 | return 0; |
48 | } | 44 | } |
@@ -62,9 +58,5 @@ static inline void ima_counts_get(struct file *file) | |||
62 | return; | 58 | return; |
63 | } | 59 | } |
64 | 60 | ||
65 | static inline void ima_counts_put(struct path *path, int mask) | ||
66 | { | ||
67 | return; | ||
68 | } | ||
69 | #endif /* CONFIG_IMA_H */ | 61 | #endif /* CONFIG_IMA_H */ |
70 | #endif /* _LINUX_IMA_H */ | 62 | #endif /* _LINUX_IMA_H */ |
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 699e85c01a4d..b2304929434e 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h | |||
@@ -81,6 +81,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) | |||
81 | #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) | 81 | #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) |
82 | #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) | 82 | #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) |
83 | #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) | 83 | #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) |
84 | #define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) | ||
84 | #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ | 85 | #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ |
85 | ACCEPT_SOURCE_ROUTE) | 86 | ACCEPT_SOURCE_ROUTE) |
86 | #define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL) | 87 | #define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL) |
diff --git a/include/linux/init.h b/include/linux/init.h index ff8bde520d03..ab1d31f9352b 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -149,6 +149,8 @@ void prepare_namespace(void); | |||
149 | 149 | ||
150 | extern void (*late_time_init)(void); | 150 | extern void (*late_time_init)(void); |
151 | 151 | ||
152 | extern int initcall_debug; | ||
153 | |||
152 | #endif | 154 | #endif |
153 | 155 | ||
154 | #ifndef MODULE | 156 | #ifndef MODULE |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 8d10aa7fd4c9..abec69b63d7e 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -165,7 +165,7 @@ extern struct cred init_cred; | |||
165 | .journal_info = NULL, \ | 165 | .journal_info = NULL, \ |
166 | .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ | 166 | .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ |
167 | .fs_excl = ATOMIC_INIT(0), \ | 167 | .fs_excl = ATOMIC_INIT(0), \ |
168 | .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ | 168 | .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ |
169 | .timer_slack_ns = 50000, /* 50 usec default slack */ \ | 169 | .timer_slack_ns = 50000, /* 50 usec default slack */ \ |
170 | .pids = { \ | 170 | .pids = { \ |
171 | [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ | 171 | [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 4f0a72a9740c..9310c699a37d 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -332,6 +332,7 @@ struct intel_iommu { | |||
332 | #ifdef CONFIG_INTR_REMAP | 332 | #ifdef CONFIG_INTR_REMAP |
333 | struct ir_table *ir_table; /* Interrupt remapping info */ | 333 | struct ir_table *ir_table; /* Interrupt remapping info */ |
334 | #endif | 334 | #endif |
335 | int node; | ||
335 | }; | 336 | }; |
336 | 337 | ||
337 | static inline void __iommu_flush_cache( | 338 | static inline void __iommu_flush_cache( |
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index a63235996309..78ef023227d4 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
@@ -4,32 +4,6 @@ | |||
4 | #include <linux/radix-tree.h> | 4 | #include <linux/radix-tree.h> |
5 | #include <linux/rcupdate.h> | 5 | #include <linux/rcupdate.h> |
6 | 6 | ||
7 | /* | ||
8 | * This is the per-process anticipatory I/O scheduler state. | ||
9 | */ | ||
10 | struct as_io_context { | ||
11 | spinlock_t lock; | ||
12 | |||
13 | void (*dtor)(struct as_io_context *aic); /* destructor */ | ||
14 | void (*exit)(struct as_io_context *aic); /* called on task exit */ | ||
15 | |||
16 | unsigned long state; | ||
17 | atomic_t nr_queued; /* queued reads & sync writes */ | ||
18 | atomic_t nr_dispatched; /* number of requests gone to the drivers */ | ||
19 | |||
20 | /* IO History tracking */ | ||
21 | /* Thinktime */ | ||
22 | unsigned long last_end_request; | ||
23 | unsigned long ttime_total; | ||
24 | unsigned long ttime_samples; | ||
25 | unsigned long ttime_mean; | ||
26 | /* Layout pattern */ | ||
27 | unsigned int seek_samples; | ||
28 | sector_t last_request_pos; | ||
29 | u64 seek_total; | ||
30 | sector_t seek_mean; | ||
31 | }; | ||
32 | |||
33 | struct cfq_queue; | 7 | struct cfq_queue; |
34 | struct cfq_io_context { | 8 | struct cfq_io_context { |
35 | void *key; | 9 | void *key; |
@@ -78,7 +52,6 @@ struct io_context { | |||
78 | unsigned long last_waited; /* Time last woken after wait for request */ | 52 | unsigned long last_waited; /* Time last woken after wait for request */ |
79 | int nr_batch_requests; /* Number of requests left in the batch */ | 53 | int nr_batch_requests; /* Number of requests left in the batch */ |
80 | 54 | ||
81 | struct as_io_context *aic; | ||
82 | struct radix_tree_root radix_root; | 55 | struct radix_tree_root radix_root; |
83 | struct hlist_head cic_list; | 56 | struct hlist_head cic_list; |
84 | void *ioc_data; | 57 | void *ioc_data; |
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h index 3b068e5b5671..64d1b638745d 100644 --- a/include/linux/iommu-helper.h +++ b/include/linux/iommu-helper.h | |||
@@ -14,14 +14,11 @@ static inline unsigned long iommu_device_max_index(unsigned long size, | |||
14 | extern int iommu_is_span_boundary(unsigned int index, unsigned int nr, | 14 | extern int iommu_is_span_boundary(unsigned int index, unsigned int nr, |
15 | unsigned long shift, | 15 | unsigned long shift, |
16 | unsigned long boundary_size); | 16 | unsigned long boundary_size); |
17 | extern void iommu_area_reserve(unsigned long *map, unsigned long i, int len); | ||
18 | extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, | 17 | extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, |
19 | unsigned long start, unsigned int nr, | 18 | unsigned long start, unsigned int nr, |
20 | unsigned long shift, | 19 | unsigned long shift, |
21 | unsigned long boundary_size, | 20 | unsigned long boundary_size, |
22 | unsigned long align_mask); | 21 | unsigned long align_mask); |
23 | extern void iommu_area_free(unsigned long *map, unsigned long start, | ||
24 | unsigned int nr); | ||
25 | 22 | ||
26 | extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len, | 23 | extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len, |
27 | unsigned long io_page_size); | 24 | unsigned long io_page_size); |
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 83aa81297ea3..7129504e053d 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
@@ -126,11 +126,11 @@ extern int allocate_resource(struct resource *root, struct resource *new, | |||
126 | int adjust_resource(struct resource *res, resource_size_t start, | 126 | int adjust_resource(struct resource *res, resource_size_t start, |
127 | resource_size_t size); | 127 | resource_size_t size); |
128 | resource_size_t resource_alignment(struct resource *res); | 128 | resource_size_t resource_alignment(struct resource *res); |
129 | static inline resource_size_t resource_size(struct resource *res) | 129 | static inline resource_size_t resource_size(const struct resource *res) |
130 | { | 130 | { |
131 | return res->end - res->start + 1; | 131 | return res->end - res->start + 1; |
132 | } | 132 | } |
133 | static inline unsigned long resource_type(struct resource *res) | 133 | static inline unsigned long resource_type(const struct resource *res) |
134 | { | 134 | { |
135 | return res->flags & IORESOURCE_TYPE_BITS; | 135 | return res->flags & IORESOURCE_TYPE_BITS; |
136 | } | 136 | } |
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index e408722a84c7..07baa38bce37 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h | |||
@@ -87,7 +87,7 @@ extern int mq_init_ns(struct ipc_namespace *ns); | |||
87 | /* default values */ | 87 | /* default values */ |
88 | #define DFLT_QUEUESMAX 256 /* max number of message queues */ | 88 | #define DFLT_QUEUESMAX 256 /* max number of message queues */ |
89 | #define DFLT_MSGMAX 10 /* max number of messages in each queue */ | 89 | #define DFLT_MSGMAX 10 /* max number of messages in each queue */ |
90 | #define HARD_MSGMAX (131072/sizeof(void *)) | 90 | #define HARD_MSGMAX (32768*sizeof(void *)/4) |
91 | #define DFLT_MSGSIZEMAX 8192 /* max message size */ | 91 | #define DFLT_MSGSIZEMAX 8192 /* max message size */ |
92 | #else | 92 | #else |
93 | static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } | 93 | static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } |
diff --git a/include/linux/irq.h b/include/linux/irq.h index a287cfc0b1a6..451481c082b5 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -192,7 +192,7 @@ struct irq_desc { | |||
192 | unsigned int irq_count; /* For detecting broken IRQs */ | 192 | unsigned int irq_count; /* For detecting broken IRQs */ |
193 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | 193 | unsigned long last_unhandled; /* Aging timer for unhandled count */ |
194 | unsigned int irqs_unhandled; | 194 | unsigned int irqs_unhandled; |
195 | spinlock_t lock; | 195 | raw_spinlock_t lock; |
196 | #ifdef CONFIG_SMP | 196 | #ifdef CONFIG_SMP |
197 | cpumask_var_t affinity; | 197 | cpumask_var_t affinity; |
198 | unsigned int node; | 198 | unsigned int node; |
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index f1011f7f3d41..638ce4554c76 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
@@ -653,6 +653,7 @@ struct transaction_s | |||
653 | * waiting for it to finish. | 653 | * waiting for it to finish. |
654 | */ | 654 | */ |
655 | unsigned int t_synchronous_commit:1; | 655 | unsigned int t_synchronous_commit:1; |
656 | unsigned int t_flushed_data_blocks:1; | ||
656 | 657 | ||
657 | /* | 658 | /* |
658 | * For use by the filesystem to store fs-specific data | 659 | * For use by the filesystem to store fs-specific data |
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index 792274269f2b..d8e9b3d1c23c 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h | |||
@@ -107,18 +107,6 @@ static inline void print_symbol(const char *fmt, unsigned long addr) | |||
107 | __builtin_extract_return_addr((void *)addr)); | 107 | __builtin_extract_return_addr((void *)addr)); |
108 | } | 108 | } |
109 | 109 | ||
110 | /* | ||
111 | * Pretty-print a function pointer. This function is deprecated. | ||
112 | * Please use the "%pF" vsprintf format instead. | ||
113 | */ | ||
114 | static inline void __deprecated print_fn_descriptor_symbol(const char *fmt, void *addr) | ||
115 | { | ||
116 | #if defined(CONFIG_IA64) || defined(CONFIG_PPC64) | ||
117 | addr = *(void **)addr; | ||
118 | #endif | ||
119 | print_symbol(fmt, (unsigned long)addr); | ||
120 | } | ||
121 | |||
122 | static inline void print_ip_sym(unsigned long ip) | 110 | static inline void print_ip_sym(unsigned long ip) |
123 | { | 111 | { |
124 | printk("[<%p>] %pS\n", (void *) ip, (void *) ip); | 112 | printk("[<%p>] %pS\n", (void *) ip, (void *) ip); |
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h new file mode 100644 index 000000000000..bd92a89f4b0a --- /dev/null +++ b/include/linux/kernel-page-flags.h | |||
@@ -0,0 +1,46 @@ | |||
1 | #ifndef LINUX_KERNEL_PAGE_FLAGS_H | ||
2 | #define LINUX_KERNEL_PAGE_FLAGS_H | ||
3 | |||
4 | /* | ||
5 | * Stable page flag bits exported to user space | ||
6 | */ | ||
7 | |||
8 | #define KPF_LOCKED 0 | ||
9 | #define KPF_ERROR 1 | ||
10 | #define KPF_REFERENCED 2 | ||
11 | #define KPF_UPTODATE 3 | ||
12 | #define KPF_DIRTY 4 | ||
13 | #define KPF_LRU 5 | ||
14 | #define KPF_ACTIVE 6 | ||
15 | #define KPF_SLAB 7 | ||
16 | #define KPF_WRITEBACK 8 | ||
17 | #define KPF_RECLAIM 9 | ||
18 | #define KPF_BUDDY 10 | ||
19 | |||
20 | /* 11-20: new additions in 2.6.31 */ | ||
21 | #define KPF_MMAP 11 | ||
22 | #define KPF_ANON 12 | ||
23 | #define KPF_SWAPCACHE 13 | ||
24 | #define KPF_SWAPBACKED 14 | ||
25 | #define KPF_COMPOUND_HEAD 15 | ||
26 | #define KPF_COMPOUND_TAIL 16 | ||
27 | #define KPF_HUGE 17 | ||
28 | #define KPF_UNEVICTABLE 18 | ||
29 | #define KPF_HWPOISON 19 | ||
30 | #define KPF_NOPAGE 20 | ||
31 | |||
32 | #define KPF_KSM 21 | ||
33 | |||
34 | /* kernel hacking assistances | ||
35 | * WARNING: subject to change, never rely on them! | ||
36 | */ | ||
37 | #define KPF_RESERVED 32 | ||
38 | #define KPF_MLOCKED 33 | ||
39 | #define KPF_MAPPEDTODISK 34 | ||
40 | #define KPF_PRIVATE 35 | ||
41 | #define KPF_PRIVATE_2 36 | ||
42 | #define KPF_OWNER_PRIVATE 37 | ||
43 | #define KPF_ARCH 38 | ||
44 | #define KPF_UNCACHED 39 | ||
45 | |||
46 | #endif /* LINUX_KERNEL_PAGE_FLAGS_H */ | ||
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3fa4c590cf12..328bca609b9b 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -251,10 +251,10 @@ extern int printk_delay_msec; | |||
251 | * Print a one-time message (analogous to WARN_ONCE() et al): | 251 | * Print a one-time message (analogous to WARN_ONCE() et al): |
252 | */ | 252 | */ |
253 | #define printk_once(x...) ({ \ | 253 | #define printk_once(x...) ({ \ |
254 | static bool __print_once = true; \ | 254 | static bool __print_once; \ |
255 | \ | 255 | \ |
256 | if (__print_once) { \ | 256 | if (!__print_once) { \ |
257 | __print_once = false; \ | 257 | __print_once = true; \ |
258 | printk(x); \ | 258 | printk(x); \ |
259 | } \ | 259 | } \ |
260 | }) | 260 | }) |
@@ -397,15 +397,58 @@ static inline char *pack_hex_byte(char *buf, u8 byte) | |||
397 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | 397 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) |
398 | #elif defined(CONFIG_DYNAMIC_DEBUG) | 398 | #elif defined(CONFIG_DYNAMIC_DEBUG) |
399 | /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ | 399 | /* dynamic_pr_debug() uses pr_fmt() internally so we don't need it here */ |
400 | #define pr_debug(fmt, ...) do { \ | 400 | #define pr_debug(fmt, ...) \ |
401 | dynamic_pr_debug(fmt, ##__VA_ARGS__); \ | 401 | dynamic_pr_debug(fmt, ##__VA_ARGS__) |
402 | } while (0) | ||
403 | #else | 402 | #else |
404 | #define pr_debug(fmt, ...) \ | 403 | #define pr_debug(fmt, ...) \ |
405 | ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) | 404 | ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; }) |
406 | #endif | 405 | #endif |
407 | 406 | ||
408 | /* | 407 | /* |
408 | * ratelimited messages with local ratelimit_state, | ||
409 | * no local ratelimit_state used in the !PRINTK case | ||
410 | */ | ||
411 | #ifdef CONFIG_PRINTK | ||
412 | #define printk_ratelimited(fmt, ...) ({ \ | ||
413 | static struct ratelimit_state _rs = { \ | ||
414 | .interval = DEFAULT_RATELIMIT_INTERVAL, \ | ||
415 | .burst = DEFAULT_RATELIMIT_BURST, \ | ||
416 | }; \ | ||
417 | \ | ||
418 | if (!__ratelimit(&_rs)) \ | ||
419 | printk(fmt, ##__VA_ARGS__); \ | ||
420 | }) | ||
421 | #else | ||
422 | /* No effect, but we still get type checking even in the !PRINTK case: */ | ||
423 | #define printk_ratelimited printk | ||
424 | #endif | ||
425 | |||
426 | #define pr_emerg_ratelimited(fmt, ...) \ | ||
427 | printk_ratelimited(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__) | ||
428 | #define pr_alert_ratelimited(fmt, ...) \ | ||
429 | printk_ratelimited(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__) | ||
430 | #define pr_crit_ratelimited(fmt, ...) \ | ||
431 | printk_ratelimited(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__) | ||
432 | #define pr_err_ratelimited(fmt, ...) \ | ||
433 | printk_ratelimited(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__) | ||
434 | #define pr_warning_ratelimited(fmt, ...) \ | ||
435 | printk_ratelimited(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__) | ||
436 | #define pr_notice_ratelimited(fmt, ...) \ | ||
437 | printk_ratelimited(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__) | ||
438 | #define pr_info_ratelimited(fmt, ...) \ | ||
439 | printk_ratelimited(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | ||
440 | /* no pr_cont_ratelimited, don't do that... */ | ||
441 | /* If you are writing a driver, please use dev_dbg instead */ | ||
442 | #if defined(DEBUG) | ||
443 | #define pr_debug_ratelimited(fmt, ...) \ | ||
444 | printk_ratelimited(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) | ||
445 | #else | ||
446 | #define pr_debug_ratelimited(fmt, ...) \ | ||
447 | ({ if (0) printk_ratelimited(KERN_DEBUG pr_fmt(fmt), \ | ||
448 | ##__VA_ARGS__); 0; }) | ||
449 | #endif | ||
450 | |||
451 | /* | ||
409 | * General tracing related utility functions - trace_printk(), | 452 | * General tracing related utility functions - trace_printk(), |
410 | * tracing_on/tracing_off and tracing_start()/tracing_stop | 453 | * tracing_on/tracing_off and tracing_start()/tracing_stop |
411 | * | 454 | * |
@@ -492,6 +535,8 @@ extern int | |||
492 | __trace_printk(unsigned long ip, const char *fmt, ...) | 535 | __trace_printk(unsigned long ip, const char *fmt, ...) |
493 | __attribute__ ((format (printf, 2, 3))); | 536 | __attribute__ ((format (printf, 2, 3))); |
494 | 537 | ||
538 | extern void trace_dump_stack(void); | ||
539 | |||
495 | /* | 540 | /* |
496 | * The double __builtin_constant_p is because gcc will give us an error | 541 | * The double __builtin_constant_p is because gcc will give us an error |
497 | * if we try to allocate the static variable to fmt if it is not a | 542 | * if we try to allocate the static variable to fmt if it is not a |
@@ -525,6 +570,7 @@ trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); | |||
525 | static inline void tracing_start(void) { } | 570 | static inline void tracing_start(void) { } |
526 | static inline void tracing_stop(void) { } | 571 | static inline void tracing_stop(void) { } |
527 | static inline void ftrace_off_permanent(void) { } | 572 | static inline void ftrace_off_permanent(void) { } |
573 | static inline void trace_dump_stack(void) { } | ||
528 | static inline int | 574 | static inline int |
529 | trace_printk(const char *fmt, ...) | 575 | trace_printk(const char *fmt, ...) |
530 | { | 576 | { |
@@ -688,6 +734,10 @@ struct sysinfo { | |||
688 | /* Force a compilation error if condition is constant and true */ | 734 | /* Force a compilation error if condition is constant and true */ |
689 | #define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)])) | 735 | #define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)])) |
690 | 736 | ||
737 | /* Force a compilation error if a constant expression is not a power of 2 */ | ||
738 | #define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ | ||
739 | BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) | ||
740 | |||
691 | /* Force a compilation error if condition is true, but also produce a | 741 | /* Force a compilation error if condition is true, but also produce a |
692 | result (of value 0 and type size_t), so the expression can be used | 742 | result (of value 0 and type size_t), so the expression can be used |
693 | e.g. in a structure initializer (or where-ever else comma expressions | 743 | e.g. in a structure initializer (or where-ever else comma expressions |
diff --git a/include/linux/kexec.h b/include/linux/kexec.h index adc34f2c6eff..c356b6914ffd 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h | |||
@@ -206,6 +206,8 @@ extern size_t vmcoreinfo_max_size; | |||
206 | 206 | ||
207 | int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, | 207 | int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, |
208 | unsigned long long *crash_size, unsigned long long *crash_base); | 208 | unsigned long long *crash_size, unsigned long long *crash_base); |
209 | int crash_shrink_memory(unsigned long new_size); | ||
210 | size_t crash_get_memory_size(void); | ||
209 | 211 | ||
210 | #else /* !CONFIG_KEXEC */ | 212 | #else /* !CONFIG_KEXEC */ |
211 | struct pt_regs; | 213 | struct pt_regs; |
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index ad6bdf5a5970..6f6c5f300af6 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * A simple kernel FIFO implementation. | 2 | * A generic kernel FIFO implementation. |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net> | ||
4 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> | 5 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> |
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -18,6 +19,25 @@ | |||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
19 | * | 20 | * |
20 | */ | 21 | */ |
22 | |||
23 | /* | ||
24 | * Howto porting drivers to the new generic fifo API: | ||
25 | * | ||
26 | * - Modify the declaration of the "struct kfifo *" object into a | ||
27 | * in-place "struct kfifo" object | ||
28 | * - Init the in-place object with kfifo_alloc() or kfifo_init() | ||
29 | * Note: The address of the in-place "struct kfifo" object must be | ||
30 | * passed as the first argument to this functions | ||
31 | * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get | ||
32 | * into kfifo_out | ||
33 | * - Replace the use of kfifo_put into kfifo_in_locked and kfifo_get | ||
34 | * into kfifo_out_locked | ||
35 | * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc | ||
36 | * must be passed now to the kfifo_in_locked and kfifo_out_locked | ||
37 | * as the last parameter. | ||
38 | * - All formerly name __kfifo_* functions has been renamed into kfifo_* | ||
39 | */ | ||
40 | |||
21 | #ifndef _LINUX_KFIFO_H | 41 | #ifndef _LINUX_KFIFO_H |
22 | #define _LINUX_KFIFO_H | 42 | #define _LINUX_KFIFO_H |
23 | 43 | ||
@@ -29,26 +49,82 @@ struct kfifo { | |||
29 | unsigned int size; /* the size of the allocated buffer */ | 49 | unsigned int size; /* the size of the allocated buffer */ |
30 | unsigned int in; /* data is added at offset (in % size) */ | 50 | unsigned int in; /* data is added at offset (in % size) */ |
31 | unsigned int out; /* data is extracted from off. (out % size) */ | 51 | unsigned int out; /* data is extracted from off. (out % size) */ |
32 | spinlock_t *lock; /* protects concurrent modifications */ | ||
33 | }; | 52 | }; |
34 | 53 | ||
35 | extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, | 54 | /* |
36 | gfp_t gfp_mask, spinlock_t *lock); | 55 | * Macros for declaration and initialization of the kfifo datatype |
37 | extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, | 56 | */ |
38 | spinlock_t *lock); | 57 | |
58 | /* helper macro */ | ||
59 | #define __kfifo_initializer(s, b) \ | ||
60 | (struct kfifo) { \ | ||
61 | .size = s, \ | ||
62 | .in = 0, \ | ||
63 | .out = 0, \ | ||
64 | .buffer = b \ | ||
65 | } | ||
66 | |||
67 | /** | ||
68 | * DECLARE_KFIFO - macro to declare a kfifo and the associated buffer | ||
69 | * @name: name of the declared kfifo datatype | ||
70 | * @size: size of the fifo buffer. Must be a power of two. | ||
71 | * | ||
72 | * Note1: the macro can be used inside struct or union declaration | ||
73 | * Note2: the macro creates two objects: | ||
74 | * A kfifo object with the given name and a buffer for the kfifo | ||
75 | * object named name##kfifo_buffer | ||
76 | */ | ||
77 | #define DECLARE_KFIFO(name, size) \ | ||
78 | union { \ | ||
79 | struct kfifo name; \ | ||
80 | unsigned char name##kfifo_buffer[size + sizeof(struct kfifo)]; \ | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * INIT_KFIFO - Initialize a kfifo declared by DECLARE_KFIFO | ||
85 | * @name: name of the declared kfifo datatype | ||
86 | */ | ||
87 | #define INIT_KFIFO(name) \ | ||
88 | name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \ | ||
89 | sizeof(struct kfifo), name##kfifo_buffer) | ||
90 | |||
91 | /** | ||
92 | * DEFINE_KFIFO - macro to define and initialize a kfifo | ||
93 | * @name: name of the declared kfifo datatype | ||
94 | * @size: size of the fifo buffer. Must be a power of two. | ||
95 | * | ||
96 | * Note1: the macro can be used for global and local kfifo data type variables | ||
97 | * Note2: the macro creates two objects: | ||
98 | * A kfifo object with the given name and a buffer for the kfifo | ||
99 | * object named name##kfifo_buffer | ||
100 | */ | ||
101 | #define DEFINE_KFIFO(name, size) \ | ||
102 | unsigned char name##kfifo_buffer[size]; \ | ||
103 | struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer) | ||
104 | |||
105 | #undef __kfifo_initializer | ||
106 | |||
107 | extern void kfifo_init(struct kfifo *fifo, void *buffer, | ||
108 | unsigned int size); | ||
109 | extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size, | ||
110 | gfp_t gfp_mask); | ||
39 | extern void kfifo_free(struct kfifo *fifo); | 111 | extern void kfifo_free(struct kfifo *fifo); |
40 | extern unsigned int __kfifo_put(struct kfifo *fifo, | 112 | extern unsigned int kfifo_in(struct kfifo *fifo, |
41 | const unsigned char *buffer, unsigned int len); | 113 | const void *from, unsigned int len); |
42 | extern unsigned int __kfifo_get(struct kfifo *fifo, | 114 | extern __must_check unsigned int kfifo_out(struct kfifo *fifo, |
43 | unsigned char *buffer, unsigned int len); | 115 | void *to, unsigned int len); |
116 | extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo, | ||
117 | void *to, unsigned int len, unsigned offset); | ||
44 | 118 | ||
45 | /** | 119 | /** |
46 | * __kfifo_reset - removes the entire FIFO contents, no locking version | 120 | * kfifo_initialized - Check if kfifo is initialized. |
47 | * @fifo: the fifo to be emptied. | 121 | * @fifo: fifo to check |
122 | * Return %true if FIFO is initialized, otherwise %false. | ||
123 | * Assumes the fifo was 0 before. | ||
48 | */ | 124 | */ |
49 | static inline void __kfifo_reset(struct kfifo *fifo) | 125 | static inline bool kfifo_initialized(struct kfifo *fifo) |
50 | { | 126 | { |
51 | fifo->in = fifo->out = 0; | 127 | return fifo->buffer != 0; |
52 | } | 128 | } |
53 | 129 | ||
54 | /** | 130 | /** |
@@ -57,96 +133,484 @@ static inline void __kfifo_reset(struct kfifo *fifo) | |||
57 | */ | 133 | */ |
58 | static inline void kfifo_reset(struct kfifo *fifo) | 134 | static inline void kfifo_reset(struct kfifo *fifo) |
59 | { | 135 | { |
60 | unsigned long flags; | 136 | fifo->in = fifo->out = 0; |
137 | } | ||
61 | 138 | ||
62 | spin_lock_irqsave(fifo->lock, flags); | 139 | /** |
140 | * kfifo_reset_out - skip FIFO contents | ||
141 | * @fifo: the fifo to be emptied. | ||
142 | */ | ||
143 | static inline void kfifo_reset_out(struct kfifo *fifo) | ||
144 | { | ||
145 | smp_mb(); | ||
146 | fifo->out = fifo->in; | ||
147 | } | ||
63 | 148 | ||
64 | __kfifo_reset(fifo); | 149 | /** |
150 | * kfifo_size - returns the size of the fifo in bytes | ||
151 | * @fifo: the fifo to be used. | ||
152 | */ | ||
153 | static inline __must_check unsigned int kfifo_size(struct kfifo *fifo) | ||
154 | { | ||
155 | return fifo->size; | ||
156 | } | ||
65 | 157 | ||
66 | spin_unlock_irqrestore(fifo->lock, flags); | 158 | /** |
159 | * kfifo_len - returns the number of used bytes in the FIFO | ||
160 | * @fifo: the fifo to be used. | ||
161 | */ | ||
162 | static inline unsigned int kfifo_len(struct kfifo *fifo) | ||
163 | { | ||
164 | register unsigned int out; | ||
165 | |||
166 | out = fifo->out; | ||
167 | smp_rmb(); | ||
168 | return fifo->in - out; | ||
67 | } | 169 | } |
68 | 170 | ||
69 | /** | 171 | /** |
70 | * kfifo_put - puts some data into the FIFO | 172 | * kfifo_is_empty - returns true if the fifo is empty |
71 | * @fifo: the fifo to be used. | 173 | * @fifo: the fifo to be used. |
72 | * @buffer: the data to be added. | 174 | */ |
73 | * @len: the length of the data to be added. | 175 | static inline __must_check int kfifo_is_empty(struct kfifo *fifo) |
176 | { | ||
177 | return fifo->in == fifo->out; | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * kfifo_is_full - returns true if the fifo is full | ||
182 | * @fifo: the fifo to be used. | ||
183 | */ | ||
184 | static inline __must_check int kfifo_is_full(struct kfifo *fifo) | ||
185 | { | ||
186 | return kfifo_len(fifo) == kfifo_size(fifo); | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * kfifo_avail - returns the number of bytes available in the FIFO | ||
191 | * @fifo: the fifo to be used. | ||
192 | */ | ||
193 | static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo) | ||
194 | { | ||
195 | return kfifo_size(fifo) - kfifo_len(fifo); | ||
196 | } | ||
197 | |||
198 | /** | ||
199 | * kfifo_in_locked - puts some data into the FIFO using a spinlock for locking | ||
200 | * @fifo: the fifo to be used. | ||
201 | * @from: the data to be added. | ||
202 | * @n: the length of the data to be added. | ||
203 | * @lock: pointer to the spinlock to use for locking. | ||
74 | * | 204 | * |
75 | * This function copies at most @len bytes from the @buffer into | 205 | * This function copies at most @len bytes from the @from buffer into |
76 | * the FIFO depending on the free space, and returns the number of | 206 | * the FIFO depending on the free space, and returns the number of |
77 | * bytes copied. | 207 | * bytes copied. |
78 | */ | 208 | */ |
79 | static inline unsigned int kfifo_put(struct kfifo *fifo, | 209 | static inline unsigned int kfifo_in_locked(struct kfifo *fifo, |
80 | const unsigned char *buffer, unsigned int len) | 210 | const void *from, unsigned int n, spinlock_t *lock) |
81 | { | 211 | { |
82 | unsigned long flags; | 212 | unsigned long flags; |
83 | unsigned int ret; | 213 | unsigned int ret; |
84 | 214 | ||
85 | spin_lock_irqsave(fifo->lock, flags); | 215 | spin_lock_irqsave(lock, flags); |
86 | 216 | ||
87 | ret = __kfifo_put(fifo, buffer, len); | 217 | ret = kfifo_in(fifo, from, n); |
88 | 218 | ||
89 | spin_unlock_irqrestore(fifo->lock, flags); | 219 | spin_unlock_irqrestore(lock, flags); |
90 | 220 | ||
91 | return ret; | 221 | return ret; |
92 | } | 222 | } |
93 | 223 | ||
94 | /** | 224 | /** |
95 | * kfifo_get - gets some data from the FIFO | 225 | * kfifo_out_locked - gets some data from the FIFO using a spinlock for locking |
96 | * @fifo: the fifo to be used. | 226 | * @fifo: the fifo to be used. |
97 | * @buffer: where the data must be copied. | 227 | * @to: where the data must be copied. |
98 | * @len: the size of the destination buffer. | 228 | * @n: the size of the destination buffer. |
229 | * @lock: pointer to the spinlock to use for locking. | ||
99 | * | 230 | * |
100 | * This function copies at most @len bytes from the FIFO into the | 231 | * This function copies at most @len bytes from the FIFO into the |
101 | * @buffer and returns the number of copied bytes. | 232 | * @to buffer and returns the number of copied bytes. |
102 | */ | 233 | */ |
103 | static inline unsigned int kfifo_get(struct kfifo *fifo, | 234 | static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo, |
104 | unsigned char *buffer, unsigned int len) | 235 | void *to, unsigned int n, spinlock_t *lock) |
105 | { | 236 | { |
106 | unsigned long flags; | 237 | unsigned long flags; |
107 | unsigned int ret; | 238 | unsigned int ret; |
108 | 239 | ||
109 | spin_lock_irqsave(fifo->lock, flags); | 240 | spin_lock_irqsave(lock, flags); |
241 | |||
242 | ret = kfifo_out(fifo, to, n); | ||
243 | |||
244 | spin_unlock_irqrestore(lock, flags); | ||
245 | |||
246 | return ret; | ||
247 | } | ||
248 | |||
249 | extern void kfifo_skip(struct kfifo *fifo, unsigned int len); | ||
250 | |||
251 | extern __must_check int kfifo_from_user(struct kfifo *fifo, | ||
252 | const void __user *from, unsigned int n, unsigned *lenout); | ||
253 | |||
254 | extern __must_check int kfifo_to_user(struct kfifo *fifo, | ||
255 | void __user *to, unsigned int n, unsigned *lenout); | ||
256 | |||
257 | /* | ||
258 | * __kfifo_add_out internal helper function for updating the out offset | ||
259 | */ | ||
260 | static inline void __kfifo_add_out(struct kfifo *fifo, | ||
261 | unsigned int off) | ||
262 | { | ||
263 | smp_mb(); | ||
264 | fifo->out += off; | ||
265 | } | ||
266 | |||
267 | /* | ||
268 | * __kfifo_add_in internal helper function for updating the in offset | ||
269 | */ | ||
270 | static inline void __kfifo_add_in(struct kfifo *fifo, | ||
271 | unsigned int off) | ||
272 | { | ||
273 | smp_wmb(); | ||
274 | fifo->in += off; | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * __kfifo_off internal helper function for calculating the index of a | ||
279 | * given offeset | ||
280 | */ | ||
281 | static inline unsigned int __kfifo_off(struct kfifo *fifo, unsigned int off) | ||
282 | { | ||
283 | return off & (fifo->size - 1); | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * __kfifo_peek_n internal helper function for determinate the length of | ||
288 | * the next record in the fifo | ||
289 | */ | ||
290 | static inline unsigned int __kfifo_peek_n(struct kfifo *fifo, | ||
291 | unsigned int recsize) | ||
292 | { | ||
293 | #define __KFIFO_GET(fifo, off, shift) \ | ||
294 | ((fifo)->buffer[__kfifo_off((fifo), (fifo)->out+(off))] << (shift)) | ||
295 | |||
296 | unsigned int l; | ||
297 | |||
298 | l = __KFIFO_GET(fifo, 0, 0); | ||
299 | |||
300 | if (--recsize) | ||
301 | l |= __KFIFO_GET(fifo, 1, 8); | ||
302 | |||
303 | return l; | ||
304 | #undef __KFIFO_GET | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * __kfifo_poke_n internal helper function for storing the length of | ||
309 | * the next record into the fifo | ||
310 | */ | ||
311 | static inline void __kfifo_poke_n(struct kfifo *fifo, | ||
312 | unsigned int recsize, unsigned int n) | ||
313 | { | ||
314 | #define __KFIFO_PUT(fifo, off, val, shift) \ | ||
315 | ( \ | ||
316 | (fifo)->buffer[__kfifo_off((fifo), (fifo)->in+(off))] = \ | ||
317 | (unsigned char)((val) >> (shift)) \ | ||
318 | ) | ||
319 | |||
320 | __KFIFO_PUT(fifo, 0, n, 0); | ||
321 | |||
322 | if (--recsize) | ||
323 | __KFIFO_PUT(fifo, 1, n, 8); | ||
324 | #undef __KFIFO_PUT | ||
325 | } | ||
326 | |||
327 | /* | ||
328 | * __kfifo_in_... internal functions for put date into the fifo | ||
329 | * do not call it directly, use kfifo_in_rec() instead | ||
330 | */ | ||
331 | extern unsigned int __kfifo_in_n(struct kfifo *fifo, | ||
332 | const void *from, unsigned int n, unsigned int recsize); | ||
110 | 333 | ||
111 | ret = __kfifo_get(fifo, buffer, len); | 334 | extern unsigned int __kfifo_in_generic(struct kfifo *fifo, |
335 | const void *from, unsigned int n, unsigned int recsize); | ||
112 | 336 | ||
113 | /* | 337 | static inline unsigned int __kfifo_in_rec(struct kfifo *fifo, |
114 | * optimization: if the FIFO is empty, set the indices to 0 | 338 | const void *from, unsigned int n, unsigned int recsize) |
115 | * so we don't wrap the next time | 339 | { |
116 | */ | 340 | unsigned int ret; |
117 | if (fifo->in == fifo->out) | ||
118 | fifo->in = fifo->out = 0; | ||
119 | 341 | ||
120 | spin_unlock_irqrestore(fifo->lock, flags); | 342 | ret = __kfifo_in_n(fifo, from, n, recsize); |
121 | 343 | ||
344 | if (likely(ret == 0)) { | ||
345 | if (recsize) | ||
346 | __kfifo_poke_n(fifo, recsize, n); | ||
347 | __kfifo_add_in(fifo, n + recsize); | ||
348 | } | ||
122 | return ret; | 349 | return ret; |
123 | } | 350 | } |
124 | 351 | ||
125 | /** | 352 | /** |
126 | * __kfifo_len - returns the number of bytes available in the FIFO, no locking version | 353 | * kfifo_in_rec - puts some record data into the FIFO |
127 | * @fifo: the fifo to be used. | 354 | * @fifo: the fifo to be used. |
355 | * @from: the data to be added. | ||
356 | * @n: the length of the data to be added. | ||
357 | * @recsize: size of record field | ||
358 | * | ||
359 | * This function copies @n bytes from the @from into the FIFO and returns | ||
360 | * the number of bytes which cannot be copied. | ||
361 | * A returned value greater than the @n value means that the record doesn't | ||
362 | * fit into the buffer. | ||
363 | * | ||
364 | * Note that with only one concurrent reader and one concurrent | ||
365 | * writer, you don't need extra locking to use these functions. | ||
366 | */ | ||
367 | static inline __must_check unsigned int kfifo_in_rec(struct kfifo *fifo, | ||
368 | void *from, unsigned int n, unsigned int recsize) | ||
369 | { | ||
370 | if (!__builtin_constant_p(recsize)) | ||
371 | return __kfifo_in_generic(fifo, from, n, recsize); | ||
372 | return __kfifo_in_rec(fifo, from, n, recsize); | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * __kfifo_out_... internal functions for get date from the fifo | ||
377 | * do not call it directly, use kfifo_out_rec() instead | ||
128 | */ | 378 | */ |
129 | static inline unsigned int __kfifo_len(struct kfifo *fifo) | 379 | extern unsigned int __kfifo_out_n(struct kfifo *fifo, |
380 | void *to, unsigned int reclen, unsigned int recsize); | ||
381 | |||
382 | extern unsigned int __kfifo_out_generic(struct kfifo *fifo, | ||
383 | void *to, unsigned int n, | ||
384 | unsigned int recsize, unsigned int *total); | ||
385 | |||
386 | static inline unsigned int __kfifo_out_rec(struct kfifo *fifo, | ||
387 | void *to, unsigned int n, unsigned int recsize, | ||
388 | unsigned int *total) | ||
130 | { | 389 | { |
131 | return fifo->in - fifo->out; | 390 | unsigned int l; |
391 | |||
392 | if (!recsize) { | ||
393 | l = n; | ||
394 | if (total) | ||
395 | *total = l; | ||
396 | } else { | ||
397 | l = __kfifo_peek_n(fifo, recsize); | ||
398 | if (total) | ||
399 | *total = l; | ||
400 | if (n < l) | ||
401 | return l; | ||
402 | } | ||
403 | |||
404 | return __kfifo_out_n(fifo, to, l, recsize); | ||
132 | } | 405 | } |
133 | 406 | ||
134 | /** | 407 | /** |
135 | * kfifo_len - returns the number of bytes available in the FIFO | 408 | * kfifo_out_rec - gets some record data from the FIFO |
136 | * @fifo: the fifo to be used. | 409 | * @fifo: the fifo to be used. |
410 | * @to: where the data must be copied. | ||
411 | * @n: the size of the destination buffer. | ||
412 | * @recsize: size of record field | ||
413 | * @total: pointer where the total number of to copied bytes should stored | ||
414 | * | ||
415 | * This function copies at most @n bytes from the FIFO to @to and returns the | ||
416 | * number of bytes which cannot be copied. | ||
417 | * A returned value greater than the @n value means that the record doesn't | ||
418 | * fit into the @to buffer. | ||
419 | * | ||
420 | * Note that with only one concurrent reader and one concurrent | ||
421 | * writer, you don't need extra locking to use these functions. | ||
137 | */ | 422 | */ |
138 | static inline unsigned int kfifo_len(struct kfifo *fifo) | 423 | static inline __must_check unsigned int kfifo_out_rec(struct kfifo *fifo, |
424 | void *to, unsigned int n, unsigned int recsize, | ||
425 | unsigned int *total) | ||
426 | |||
139 | { | 427 | { |
140 | unsigned long flags; | 428 | if (!__builtin_constant_p(recsize)) |
141 | unsigned int ret; | 429 | return __kfifo_out_generic(fifo, to, n, recsize, total); |
430 | return __kfifo_out_rec(fifo, to, n, recsize, total); | ||
431 | } | ||
142 | 432 | ||
143 | spin_lock_irqsave(fifo->lock, flags); | 433 | /* |
434 | * __kfifo_from_user_... internal functions for transfer from user space into | ||
435 | * the fifo. do not call it directly, use kfifo_from_user_rec() instead | ||
436 | */ | ||
437 | extern unsigned int __kfifo_from_user_n(struct kfifo *fifo, | ||
438 | const void __user *from, unsigned int n, unsigned int recsize); | ||
439 | |||
440 | extern unsigned int __kfifo_from_user_generic(struct kfifo *fifo, | ||
441 | const void __user *from, unsigned int n, unsigned int recsize); | ||
144 | 442 | ||
145 | ret = __kfifo_len(fifo); | 443 | static inline unsigned int __kfifo_from_user_rec(struct kfifo *fifo, |
444 | const void __user *from, unsigned int n, unsigned int recsize) | ||
445 | { | ||
446 | unsigned int ret; | ||
146 | 447 | ||
147 | spin_unlock_irqrestore(fifo->lock, flags); | 448 | ret = __kfifo_from_user_n(fifo, from, n, recsize); |
148 | 449 | ||
450 | if (likely(ret == 0)) { | ||
451 | if (recsize) | ||
452 | __kfifo_poke_n(fifo, recsize, n); | ||
453 | __kfifo_add_in(fifo, n + recsize); | ||
454 | } | ||
149 | return ret; | 455 | return ret; |
150 | } | 456 | } |
151 | 457 | ||
458 | /** | ||
459 | * kfifo_from_user_rec - puts some data from user space into the FIFO | ||
460 | * @fifo: the fifo to be used. | ||
461 | * @from: pointer to the data to be added. | ||
462 | * @n: the length of the data to be added. | ||
463 | * @recsize: size of record field | ||
464 | * | ||
465 | * This function copies @n bytes from the @from into the | ||
466 | * FIFO and returns the number of bytes which cannot be copied. | ||
467 | * | ||
468 | * If the returned value is equal or less the @n value, the copy_from_user() | ||
469 | * functions has failed. Otherwise the record doesn't fit into the buffer. | ||
470 | * | ||
471 | * Note that with only one concurrent reader and one concurrent | ||
472 | * writer, you don't need extra locking to use these functions. | ||
473 | */ | ||
474 | static inline __must_check unsigned int kfifo_from_user_rec(struct kfifo *fifo, | ||
475 | const void __user *from, unsigned int n, unsigned int recsize) | ||
476 | { | ||
477 | if (!__builtin_constant_p(recsize)) | ||
478 | return __kfifo_from_user_generic(fifo, from, n, recsize); | ||
479 | return __kfifo_from_user_rec(fifo, from, n, recsize); | ||
480 | } | ||
481 | |||
482 | /* | ||
483 | * __kfifo_to_user_... internal functions for transfer fifo data into user space | ||
484 | * do not call it directly, use kfifo_to_user_rec() instead | ||
485 | */ | ||
486 | extern unsigned int __kfifo_to_user_n(struct kfifo *fifo, | ||
487 | void __user *to, unsigned int n, unsigned int reclen, | ||
488 | unsigned int recsize); | ||
489 | |||
490 | extern unsigned int __kfifo_to_user_generic(struct kfifo *fifo, | ||
491 | void __user *to, unsigned int n, unsigned int recsize, | ||
492 | unsigned int *total); | ||
493 | |||
494 | static inline unsigned int __kfifo_to_user_rec(struct kfifo *fifo, | ||
495 | void __user *to, unsigned int n, | ||
496 | unsigned int recsize, unsigned int *total) | ||
497 | { | ||
498 | unsigned int l; | ||
499 | |||
500 | if (!recsize) { | ||
501 | l = n; | ||
502 | if (total) | ||
503 | *total = l; | ||
504 | } else { | ||
505 | l = __kfifo_peek_n(fifo, recsize); | ||
506 | if (total) | ||
507 | *total = l; | ||
508 | if (n < l) | ||
509 | return l; | ||
510 | } | ||
511 | |||
512 | return __kfifo_to_user_n(fifo, to, n, l, recsize); | ||
513 | } | ||
514 | |||
515 | /** | ||
516 | * kfifo_to_user_rec - gets data from the FIFO and write it to user space | ||
517 | * @fifo: the fifo to be used. | ||
518 | * @to: where the data must be copied. | ||
519 | * @n: the size of the destination buffer. | ||
520 | * @recsize: size of record field | ||
521 | * @total: pointer where the total number of to copied bytes should stored | ||
522 | * | ||
523 | * This function copies at most @n bytes from the FIFO to the @to. | ||
524 | * In case of an error, the function returns the number of bytes which cannot | ||
525 | * be copied. | ||
526 | * If the returned value is equal or less the @n value, the copy_to_user() | ||
527 | * functions has failed. Otherwise the record doesn't fit into the @to buffer. | ||
528 | * | ||
529 | * Note that with only one concurrent reader and one concurrent | ||
530 | * writer, you don't need extra locking to use these functions. | ||
531 | */ | ||
532 | static inline __must_check unsigned int kfifo_to_user_rec(struct kfifo *fifo, | ||
533 | void __user *to, unsigned int n, unsigned int recsize, | ||
534 | unsigned int *total) | ||
535 | { | ||
536 | if (!__builtin_constant_p(recsize)) | ||
537 | return __kfifo_to_user_generic(fifo, to, n, recsize, total); | ||
538 | return __kfifo_to_user_rec(fifo, to, n, recsize, total); | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * __kfifo_peek_... internal functions for peek into the next fifo record | ||
543 | * do not call it directly, use kfifo_peek_rec() instead | ||
544 | */ | ||
545 | extern unsigned int __kfifo_peek_generic(struct kfifo *fifo, | ||
546 | unsigned int recsize); | ||
547 | |||
548 | /** | ||
549 | * kfifo_peek_rec - gets the size of the next FIFO record data | ||
550 | * @fifo: the fifo to be used. | ||
551 | * @recsize: size of record field | ||
552 | * | ||
553 | * This function returns the size of the next FIFO record in number of bytes | ||
554 | */ | ||
555 | static inline __must_check unsigned int kfifo_peek_rec(struct kfifo *fifo, | ||
556 | unsigned int recsize) | ||
557 | { | ||
558 | if (!__builtin_constant_p(recsize)) | ||
559 | return __kfifo_peek_generic(fifo, recsize); | ||
560 | if (!recsize) | ||
561 | return kfifo_len(fifo); | ||
562 | return __kfifo_peek_n(fifo, recsize); | ||
563 | } | ||
564 | |||
565 | /* | ||
566 | * __kfifo_skip_... internal functions for skip the next fifo record | ||
567 | * do not call it directly, use kfifo_skip_rec() instead | ||
568 | */ | ||
569 | extern void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize); | ||
570 | |||
571 | static inline void __kfifo_skip_rec(struct kfifo *fifo, | ||
572 | unsigned int recsize) | ||
573 | { | ||
574 | unsigned int l; | ||
575 | |||
576 | if (recsize) { | ||
577 | l = __kfifo_peek_n(fifo, recsize); | ||
578 | |||
579 | if (l + recsize <= kfifo_len(fifo)) { | ||
580 | __kfifo_add_out(fifo, l + recsize); | ||
581 | return; | ||
582 | } | ||
583 | } | ||
584 | kfifo_reset_out(fifo); | ||
585 | } | ||
586 | |||
587 | /** | ||
588 | * kfifo_skip_rec - skip the next fifo out record | ||
589 | * @fifo: the fifo to be used. | ||
590 | * @recsize: size of record field | ||
591 | * | ||
592 | * This function skips the next FIFO record | ||
593 | */ | ||
594 | static inline void kfifo_skip_rec(struct kfifo *fifo, | ||
595 | unsigned int recsize) | ||
596 | { | ||
597 | if (!__builtin_constant_p(recsize)) | ||
598 | __kfifo_skip_generic(fifo, recsize); | ||
599 | else | ||
600 | __kfifo_skip_rec(fifo, recsize); | ||
601 | } | ||
602 | |||
603 | /** | ||
604 | * kfifo_avail_rec - returns the number of bytes available in a record FIFO | ||
605 | * @fifo: the fifo to be used. | ||
606 | * @recsize: size of record field | ||
607 | */ | ||
608 | static inline __must_check unsigned int kfifo_avail_rec(struct kfifo *fifo, | ||
609 | unsigned int recsize) | ||
610 | { | ||
611 | unsigned int l = kfifo_size(fifo) - kfifo_len(fifo); | ||
612 | |||
613 | return (l > recsize) ? l - recsize : 0; | ||
614 | } | ||
615 | |||
152 | #endif | 616 | #endif |
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h index 6adcc297e354..19ec41a183f5 100644 --- a/include/linux/kgdb.h +++ b/include/linux/kgdb.h | |||
@@ -29,8 +29,7 @@ struct pt_regs; | |||
29 | * | 29 | * |
30 | * On some architectures it is required to skip a breakpoint | 30 | * On some architectures it is required to skip a breakpoint |
31 | * exception when it occurs after a breakpoint has been removed. | 31 | * exception when it occurs after a breakpoint has been removed. |
32 | * This can be implemented in the architecture specific portion of | 32 | * This can be implemented in the architecture specific portion of kgdb. |
33 | * for kgdb. | ||
34 | */ | 33 | */ |
35 | extern int kgdb_skipexception(int exception, struct pt_regs *regs); | 34 | extern int kgdb_skipexception(int exception, struct pt_regs *regs); |
36 | 35 | ||
@@ -65,7 +64,7 @@ struct uart_port; | |||
65 | /** | 64 | /** |
66 | * kgdb_breakpoint - compiled in breakpoint | 65 | * kgdb_breakpoint - compiled in breakpoint |
67 | * | 66 | * |
68 | * This will be impelmented a static inline per architecture. This | 67 | * This will be implemented as a static inline per architecture. This |
69 | * function is called by the kgdb core to execute an architecture | 68 | * function is called by the kgdb core to execute an architecture |
70 | * specific trap to cause kgdb to enter the exception processing. | 69 | * specific trap to cause kgdb to enter the exception processing. |
71 | * | 70 | * |
@@ -190,7 +189,7 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code, | |||
190 | * @flags: Current IRQ state | 189 | * @flags: Current IRQ state |
191 | * | 190 | * |
192 | * On SMP systems, we need to get the attention of the other CPUs | 191 | * On SMP systems, we need to get the attention of the other CPUs |
193 | * and get them be in a known state. This should do what is needed | 192 | * and get them into a known state. This should do what is needed |
194 | * to get the other CPUs to call kgdb_wait(). Note that on some arches, | 193 | * to get the other CPUs to call kgdb_wait(). Note that on some arches, |
195 | * the NMI approach is not used for rounding up all the CPUs. For example, | 194 | * the NMI approach is not used for rounding up all the CPUs. For example, |
196 | * in case of MIPS, smp_call_function() is used to roundup CPUs. In | 195 | * in case of MIPS, smp_call_function() is used to roundup CPUs. In |
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h index e880d4cf9e22..08d7dc4ddf40 100644 --- a/include/linux/kmemcheck.h +++ b/include/linux/kmemcheck.h | |||
@@ -36,6 +36,56 @@ int kmemcheck_hide_addr(unsigned long address); | |||
36 | 36 | ||
37 | bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size); | 37 | bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size); |
38 | 38 | ||
39 | /* | ||
40 | * Bitfield annotations | ||
41 | * | ||
42 | * How to use: If you have a struct using bitfields, for example | ||
43 | * | ||
44 | * struct a { | ||
45 | * int x:8, y:8; | ||
46 | * }; | ||
47 | * | ||
48 | * then this should be rewritten as | ||
49 | * | ||
50 | * struct a { | ||
51 | * kmemcheck_bitfield_begin(flags); | ||
52 | * int x:8, y:8; | ||
53 | * kmemcheck_bitfield_end(flags); | ||
54 | * }; | ||
55 | * | ||
56 | * Now the "flags_begin" and "flags_end" members may be used to refer to the | ||
57 | * beginning and end, respectively, of the bitfield (and things like | ||
58 | * &x.flags_begin is allowed). As soon as the struct is allocated, the bit- | ||
59 | * fields should be annotated: | ||
60 | * | ||
61 | * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL); | ||
62 | * kmemcheck_annotate_bitfield(a, flags); | ||
63 | */ | ||
64 | #define kmemcheck_bitfield_begin(name) \ | ||
65 | int name##_begin[0]; | ||
66 | |||
67 | #define kmemcheck_bitfield_end(name) \ | ||
68 | int name##_end[0]; | ||
69 | |||
70 | #define kmemcheck_annotate_bitfield(ptr, name) \ | ||
71 | do { \ | ||
72 | int _n; \ | ||
73 | \ | ||
74 | if (!ptr) \ | ||
75 | break; \ | ||
76 | \ | ||
77 | _n = (long) &((ptr)->name##_end) \ | ||
78 | - (long) &((ptr)->name##_begin); \ | ||
79 | MAYBE_BUILD_BUG_ON(_n < 0); \ | ||
80 | \ | ||
81 | kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \ | ||
82 | } while (0) | ||
83 | |||
84 | #define kmemcheck_annotate_variable(var) \ | ||
85 | do { \ | ||
86 | kmemcheck_mark_initialized(&(var), sizeof(var)); \ | ||
87 | } while (0) \ | ||
88 | |||
39 | #else | 89 | #else |
40 | #define kmemcheck_enabled 0 | 90 | #define kmemcheck_enabled 0 |
41 | 91 | ||
@@ -106,60 +156,16 @@ static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) | |||
106 | return true; | 156 | return true; |
107 | } | 157 | } |
108 | 158 | ||
109 | #endif /* CONFIG_KMEMCHECK */ | 159 | #define kmemcheck_bitfield_begin(name) |
110 | 160 | #define kmemcheck_bitfield_end(name) | |
111 | /* | 161 | #define kmemcheck_annotate_bitfield(ptr, name) \ |
112 | * Bitfield annotations | 162 | do { \ |
113 | * | 163 | } while (0) |
114 | * How to use: If you have a struct using bitfields, for example | ||
115 | * | ||
116 | * struct a { | ||
117 | * int x:8, y:8; | ||
118 | * }; | ||
119 | * | ||
120 | * then this should be rewritten as | ||
121 | * | ||
122 | * struct a { | ||
123 | * kmemcheck_bitfield_begin(flags); | ||
124 | * int x:8, y:8; | ||
125 | * kmemcheck_bitfield_end(flags); | ||
126 | * }; | ||
127 | * | ||
128 | * Now the "flags_begin" and "flags_end" members may be used to refer to the | ||
129 | * beginning and end, respectively, of the bitfield (and things like | ||
130 | * &x.flags_begin is allowed). As soon as the struct is allocated, the bit- | ||
131 | * fields should be annotated: | ||
132 | * | ||
133 | * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL); | ||
134 | * kmemcheck_annotate_bitfield(a, flags); | ||
135 | * | ||
136 | * Note: We provide the same definitions for both kmemcheck and non- | ||
137 | * kmemcheck kernels. This makes it harder to introduce accidental errors. It | ||
138 | * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield(). | ||
139 | */ | ||
140 | #define kmemcheck_bitfield_begin(name) \ | ||
141 | int name##_begin[0]; | ||
142 | |||
143 | #define kmemcheck_bitfield_end(name) \ | ||
144 | int name##_end[0]; | ||
145 | 164 | ||
146 | #define kmemcheck_annotate_bitfield(ptr, name) \ | 165 | #define kmemcheck_annotate_variable(var) \ |
147 | do { \ | 166 | do { \ |
148 | int _n; \ | ||
149 | \ | ||
150 | if (!ptr) \ | ||
151 | break; \ | ||
152 | \ | ||
153 | _n = (long) &((ptr)->name##_end) \ | ||
154 | - (long) &((ptr)->name##_begin); \ | ||
155 | MAYBE_BUILD_BUG_ON(_n < 0); \ | ||
156 | \ | ||
157 | kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \ | ||
158 | } while (0) | 167 | } while (0) |
159 | 168 | ||
160 | #define kmemcheck_annotate_variable(var) \ | 169 | #endif /* CONFIG_KMEMCHECK */ |
161 | do { \ | ||
162 | kmemcheck_mark_initialized(&(var), sizeof(var)); \ | ||
163 | } while (0) \ | ||
164 | 170 | ||
165 | #endif /* LINUX_KMEMCHECK_H */ | 171 | #endif /* LINUX_KMEMCHECK_H */ |
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 3c7497d46ee9..99d9a6766f7e 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h | |||
@@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset, | |||
32 | size_t size) __ref; | 32 | size_t size) __ref; |
33 | extern void kmemleak_not_leak(const void *ptr) __ref; | 33 | extern void kmemleak_not_leak(const void *ptr) __ref; |
34 | extern void kmemleak_ignore(const void *ptr) __ref; | 34 | extern void kmemleak_ignore(const void *ptr) __ref; |
35 | extern void kmemleak_scan_area(const void *ptr, unsigned long offset, | 35 | extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; |
36 | size_t length, gfp_t gfp) __ref; | ||
37 | extern void kmemleak_no_scan(const void *ptr) __ref; | 36 | extern void kmemleak_no_scan(const void *ptr) __ref; |
38 | 37 | ||
39 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | 38 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, |
@@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr) | |||
84 | static inline void kmemleak_ignore(const void *ptr) | 83 | static inline void kmemleak_ignore(const void *ptr) |
85 | { | 84 | { |
86 | } | 85 | } |
87 | static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, | 86 | static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) |
88 | size_t length, gfp_t gfp) | ||
89 | { | 87 | { |
90 | } | 88 | } |
91 | static inline void kmemleak_erase(void **ptr) | 89 | static inline void kmemleak_erase(void **ptr) |
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h new file mode 100644 index 000000000000..24b44145a886 --- /dev/null +++ b/include/linux/kmsg_dump.h | |||
@@ -0,0 +1,61 @@ | |||
1 | /* | ||
2 | * linux/include/kmsg_dump.h | ||
3 | * | ||
4 | * Copyright (C) 2009 Net Insight AB | ||
5 | * | ||
6 | * Author: Simon Kagstrom <simon.kagstrom@netinsight.net> | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file COPYING in the main directory of this archive | ||
10 | * for more details. | ||
11 | */ | ||
12 | #ifndef _LINUX_KMSG_DUMP_H | ||
13 | #define _LINUX_KMSG_DUMP_H | ||
14 | |||
15 | #include <linux/list.h> | ||
16 | |||
17 | enum kmsg_dump_reason { | ||
18 | KMSG_DUMP_OOPS, | ||
19 | KMSG_DUMP_PANIC, | ||
20 | KMSG_DUMP_KEXEC, | ||
21 | }; | ||
22 | |||
23 | /** | ||
24 | * struct kmsg_dumper - kernel crash message dumper structure | ||
25 | * @dump: The callback which gets called on crashes. The buffer is passed | ||
26 | * as two sections, where s1 (length l1) contains the older | ||
27 | * messages and s2 (length l2) contains the newer. | ||
28 | * @list: Entry in the dumper list (private) | ||
29 | * @registered: Flag that specifies if this is already registered | ||
30 | */ | ||
31 | struct kmsg_dumper { | ||
32 | void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason, | ||
33 | const char *s1, unsigned long l1, | ||
34 | const char *s2, unsigned long l2); | ||
35 | struct list_head list; | ||
36 | int registered; | ||
37 | }; | ||
38 | |||
39 | #ifdef CONFIG_PRINTK | ||
40 | void kmsg_dump(enum kmsg_dump_reason reason); | ||
41 | |||
42 | int kmsg_dump_register(struct kmsg_dumper *dumper); | ||
43 | |||
44 | int kmsg_dump_unregister(struct kmsg_dumper *dumper); | ||
45 | #else | ||
46 | static inline void kmsg_dump(enum kmsg_dump_reason reason) | ||
47 | { | ||
48 | } | ||
49 | |||
50 | static inline int kmsg_dump_register(struct kmsg_dumper *dumper) | ||
51 | { | ||
52 | return -EINVAL; | ||
53 | } | ||
54 | |||
55 | static inline int kmsg_dump_unregister(struct kmsg_dumper *dumper) | ||
56 | { | ||
57 | return -EINVAL; | ||
58 | } | ||
59 | #endif | ||
60 | |||
61 | #endif /* _LINUX_KMSG_DUMP_H */ | ||
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index a485c14ecd5d..43bdab769fc3 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
@@ -9,8 +9,12 @@ | |||
9 | 9 | ||
10 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/pagemap.h> | ||
13 | #include <linux/rmap.h> | ||
12 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
13 | #include <linux/vmstat.h> | 15 | |
16 | struct stable_node; | ||
17 | struct mem_cgroup; | ||
14 | 18 | ||
15 | #ifdef CONFIG_KSM | 19 | #ifdef CONFIG_KSM |
16 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 20 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
@@ -34,46 +38,110 @@ static inline void ksm_exit(struct mm_struct *mm) | |||
34 | /* | 38 | /* |
35 | * A KSM page is one of those write-protected "shared pages" or "merged pages" | 39 | * A KSM page is one of those write-protected "shared pages" or "merged pages" |
36 | * which KSM maps into multiple mms, wherever identical anonymous page content | 40 | * which KSM maps into multiple mms, wherever identical anonymous page content |
37 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma. | 41 | * is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any |
42 | * anon_vma, but to that page's node of the stable tree. | ||
38 | */ | 43 | */ |
39 | static inline int PageKsm(struct page *page) | 44 | static inline int PageKsm(struct page *page) |
40 | { | 45 | { |
41 | return ((unsigned long)page->mapping == PAGE_MAPPING_ANON); | 46 | return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == |
47 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | ||
48 | } | ||
49 | |||
50 | static inline struct stable_node *page_stable_node(struct page *page) | ||
51 | { | ||
52 | return PageKsm(page) ? page_rmapping(page) : NULL; | ||
53 | } | ||
54 | |||
55 | static inline void set_page_stable_node(struct page *page, | ||
56 | struct stable_node *stable_node) | ||
57 | { | ||
58 | page->mapping = (void *)stable_node + | ||
59 | (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); | ||
42 | } | 60 | } |
43 | 61 | ||
44 | /* | 62 | /* |
45 | * But we have to avoid the checking which page_add_anon_rmap() performs. | 63 | * When do_swap_page() first faults in from swap what used to be a KSM page, |
64 | * no problem, it will be assigned to this vma's anon_vma; but thereafter, | ||
65 | * it might be faulted into a different anon_vma (or perhaps to a different | ||
66 | * offset in the same anon_vma). do_swap_page() cannot do all the locking | ||
67 | * needed to reconstitute a cross-anon_vma KSM page: for now it has to make | ||
68 | * a copy, and leave remerging the pages to a later pass of ksmd. | ||
69 | * | ||
70 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, | ||
71 | * but what if the vma was unmerged while the page was swapped out? | ||
46 | */ | 72 | */ |
47 | static inline void page_add_ksm_rmap(struct page *page) | 73 | struct page *ksm_does_need_to_copy(struct page *page, |
74 | struct vm_area_struct *vma, unsigned long address); | ||
75 | static inline struct page *ksm_might_need_to_copy(struct page *page, | ||
76 | struct vm_area_struct *vma, unsigned long address) | ||
48 | { | 77 | { |
49 | if (atomic_inc_and_test(&page->_mapcount)) { | 78 | struct anon_vma *anon_vma = page_anon_vma(page); |
50 | page->mapping = (void *) PAGE_MAPPING_ANON; | 79 | |
51 | __inc_zone_page_state(page, NR_ANON_PAGES); | 80 | if (!anon_vma || |
52 | } | 81 | (anon_vma == vma->anon_vma && |
82 | page->index == linear_page_index(vma, address))) | ||
83 | return page; | ||
84 | |||
85 | return ksm_does_need_to_copy(page, vma, address); | ||
53 | } | 86 | } |
87 | |||
88 | int page_referenced_ksm(struct page *page, | ||
89 | struct mem_cgroup *memcg, unsigned long *vm_flags); | ||
90 | int try_to_unmap_ksm(struct page *page, enum ttu_flags flags); | ||
91 | int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, | ||
92 | struct vm_area_struct *, unsigned long, void *), void *arg); | ||
93 | void ksm_migrate_page(struct page *newpage, struct page *oldpage); | ||
94 | |||
54 | #else /* !CONFIG_KSM */ | 95 | #else /* !CONFIG_KSM */ |
55 | 96 | ||
97 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | ||
98 | { | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static inline void ksm_exit(struct mm_struct *mm) | ||
103 | { | ||
104 | } | ||
105 | |||
106 | static inline int PageKsm(struct page *page) | ||
107 | { | ||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | #ifdef CONFIG_MMU | ||
56 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 112 | static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
57 | unsigned long end, int advice, unsigned long *vm_flags) | 113 | unsigned long end, int advice, unsigned long *vm_flags) |
58 | { | 114 | { |
59 | return 0; | 115 | return 0; |
60 | } | 116 | } |
61 | 117 | ||
62 | static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) | 118 | static inline struct page *ksm_might_need_to_copy(struct page *page, |
119 | struct vm_area_struct *vma, unsigned long address) | ||
120 | { | ||
121 | return page; | ||
122 | } | ||
123 | |||
124 | static inline int page_referenced_ksm(struct page *page, | ||
125 | struct mem_cgroup *memcg, unsigned long *vm_flags) | ||
63 | { | 126 | { |
64 | return 0; | 127 | return 0; |
65 | } | 128 | } |
66 | 129 | ||
67 | static inline void ksm_exit(struct mm_struct *mm) | 130 | static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) |
68 | { | 131 | { |
132 | return 0; | ||
69 | } | 133 | } |
70 | 134 | ||
71 | static inline int PageKsm(struct page *page) | 135 | static inline int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page*, |
136 | struct vm_area_struct *, unsigned long, void *), void *arg) | ||
72 | { | 137 | { |
73 | return 0; | 138 | return 0; |
74 | } | 139 | } |
75 | 140 | ||
76 | /* No stub required for page_add_ksm_rmap(page) */ | 141 | static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) |
142 | { | ||
143 | } | ||
144 | #endif /* CONFIG_MMU */ | ||
77 | #endif /* !CONFIG_KSM */ | 145 | #endif /* !CONFIG_KSM */ |
78 | 146 | ||
79 | #endif | 147 | #endif /* __LINUX_KSM_H */ |
diff --git a/include/linux/leds-lp3944.h b/include/linux/leds-lp3944.h index afc9f9fd70f5..2618aa9063bc 100644 --- a/include/linux/leds-lp3944.h +++ b/include/linux/leds-lp3944.h | |||
@@ -12,9 +12,6 @@ | |||
12 | #ifndef __LINUX_LEDS_LP3944_H | 12 | #ifndef __LINUX_LEDS_LP3944_H |
13 | #define __LINUX_LEDS_LP3944_H | 13 | #define __LINUX_LEDS_LP3944_H |
14 | 14 | ||
15 | #include <linux/leds.h> | ||
16 | #include <linux/workqueue.h> | ||
17 | |||
18 | #define LP3944_LED0 0 | 15 | #define LP3944_LED0 0 |
19 | #define LP3944_LED1 1 | 16 | #define LP3944_LED1 1 |
20 | #define LP3944_LED2 2 | 17 | #define LP3944_LED2 2 |
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h index 96eea90f01a8..f158eb1149aa 100644 --- a/include/linux/leds-pca9532.h +++ b/include/linux/leds-pca9532.h | |||
@@ -32,7 +32,7 @@ struct pca9532_led { | |||
32 | struct i2c_client *client; | 32 | struct i2c_client *client; |
33 | char *name; | 33 | char *name; |
34 | struct led_classdev ldev; | 34 | struct led_classdev ldev; |
35 | struct work_struct work; | 35 | struct work_struct work; |
36 | enum pca9532_type type; | 36 | enum pca9532_type type; |
37 | enum pca9532_state state; | 37 | enum pca9532_state state; |
38 | }; | 38 | }; |
diff --git a/include/linux/leds-regulator.h b/include/linux/leds-regulator.h new file mode 100644 index 000000000000..5a8eb389aab8 --- /dev/null +++ b/include/linux/leds-regulator.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * leds-regulator.h - platform data structure for regulator driven LEDs. | ||
3 | * | ||
4 | * Copyright (C) 2009 Antonio Ospite <ospite@studenti.unina.it> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #ifndef __LINUX_LEDS_REGULATOR_H | ||
13 | #define __LINUX_LEDS_REGULATOR_H | ||
14 | |||
15 | /* | ||
16 | * Use "vled" as supply id when declaring the regulator consumer: | ||
17 | * | ||
18 | * static struct regulator_consumer_supply pcap_regulator_VVIB_consumers [] = { | ||
19 | * { .dev_name = "leds-regulator.0", supply = "vled" }, | ||
20 | * }; | ||
21 | * | ||
22 | * If you have several regulator driven LEDs, you can append a numerical id to | ||
23 | * .dev_name as done above, and use the same id when declaring the platform | ||
24 | * device: | ||
25 | * | ||
26 | * static struct led_regulator_platform_data a780_vibrator_data = { | ||
27 | * .name = "a780::vibrator", | ||
28 | * }; | ||
29 | * | ||
30 | * static struct platform_device a780_vibrator = { | ||
31 | * .name = "leds-regulator", | ||
32 | * .id = 0, | ||
33 | * .dev = { | ||
34 | * .platform_data = &a780_vibrator_data, | ||
35 | * }, | ||
36 | * }; | ||
37 | */ | ||
38 | |||
39 | #include <linux/leds.h> | ||
40 | |||
41 | struct led_regulator_platform_data { | ||
42 | char *name; /* LED name as expected by LED class */ | ||
43 | enum led_brightness brightness; /* initial brightness value */ | ||
44 | }; | ||
45 | |||
46 | #endif /* __LINUX_LEDS_REGULATOR_H */ | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index 6a9c4ddd3d95..73112250862c 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -354,6 +354,9 @@ enum { | |||
354 | /* max tries if error condition is still set after ->error_handler */ | 354 | /* max tries if error condition is still set after ->error_handler */ |
355 | ATA_EH_MAX_TRIES = 5, | 355 | ATA_EH_MAX_TRIES = 5, |
356 | 356 | ||
357 | /* sometimes resuming a link requires several retries */ | ||
358 | ATA_LINK_RESUME_TRIES = 5, | ||
359 | |||
357 | /* how hard are we gonna try to probe/recover devices */ | 360 | /* how hard are we gonna try to probe/recover devices */ |
358 | ATA_PROBE_MAX_TRIES = 3, | 361 | ATA_PROBE_MAX_TRIES = 3, |
359 | ATA_EH_DEV_TRIES = 3, | 362 | ATA_EH_DEV_TRIES = 3, |
diff --git a/include/linux/lis3lv02d.h b/include/linux/lis3lv02d.h index 3cc2f2c53e4c..f1ca0dcc1628 100644 --- a/include/linux/lis3lv02d.h +++ b/include/linux/lis3lv02d.h | |||
@@ -43,6 +43,21 @@ struct lis3lv02d_platform_data { | |||
43 | #define LIS3_WAKEUP_Z_HI (1 << 5) | 43 | #define LIS3_WAKEUP_Z_HI (1 << 5) |
44 | unsigned char wakeup_flags; | 44 | unsigned char wakeup_flags; |
45 | unsigned char wakeup_thresh; | 45 | unsigned char wakeup_thresh; |
46 | #define LIS3_NO_MAP 0 | ||
47 | #define LIS3_DEV_X 1 | ||
48 | #define LIS3_DEV_Y 2 | ||
49 | #define LIS3_DEV_Z 3 | ||
50 | #define LIS3_INV_DEV_X -1 | ||
51 | #define LIS3_INV_DEV_Y -2 | ||
52 | #define LIS3_INV_DEV_Z -3 | ||
53 | s8 axis_x; | ||
54 | s8 axis_y; | ||
55 | s8 axis_z; | ||
56 | int (*setup_resources)(void); | ||
57 | int (*release_resources)(void); | ||
58 | /* Limits for selftest are specified in chip data sheet */ | ||
59 | s16 st_min_limits[3]; /* min pass limit x, y, z */ | ||
60 | s16 st_max_limits[3]; /* max pass limit x, y, z */ | ||
46 | }; | 61 | }; |
47 | 62 | ||
48 | #endif /* __LIS3LV02D_H_ */ | 63 | #endif /* __LIS3LV02D_H_ */ |
diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h new file mode 100644 index 000000000000..1a2df2efb771 --- /dev/null +++ b/include/linux/list_sort.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef _LINUX_LIST_SORT_H | ||
2 | #define _LINUX_LIST_SORT_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | struct list_head; | ||
7 | |||
8 | void list_sort(void *priv, struct list_head *head, | ||
9 | int (*cmp)(void *priv, struct list_head *a, | ||
10 | struct list_head *b)); | ||
11 | #endif | ||
diff --git a/include/linux/lmb.h b/include/linux/lmb.h index 2442e3f3d033..ef82b8fcbddb 100644 --- a/include/linux/lmb.h +++ b/include/linux/lmb.h | |||
@@ -54,6 +54,7 @@ extern u64 __init lmb_phys_mem_size(void); | |||
54 | extern u64 lmb_end_of_DRAM(void); | 54 | extern u64 lmb_end_of_DRAM(void); |
55 | extern void __init lmb_enforce_memory_limit(u64 memory_limit); | 55 | extern void __init lmb_enforce_memory_limit(u64 memory_limit); |
56 | extern int __init lmb_is_reserved(u64 addr); | 56 | extern int __init lmb_is_reserved(u64 addr); |
57 | extern int lmb_is_region_reserved(u64 base, u64 size); | ||
57 | extern int lmb_find(struct lmb_property *res); | 58 | extern int lmb_find(struct lmb_property *res); |
58 | 59 | ||
59 | extern void lmb_dump_all(void); | 60 | extern void lmb_dump_all(void); |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index bf9213b2db8f..1f9b119f4ace 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -54,6 +54,11 @@ extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru); | |||
54 | extern void mem_cgroup_del_lru(struct page *page); | 54 | extern void mem_cgroup_del_lru(struct page *page); |
55 | extern void mem_cgroup_move_lists(struct page *page, | 55 | extern void mem_cgroup_move_lists(struct page *page, |
56 | enum lru_list from, enum lru_list to); | 56 | enum lru_list from, enum lru_list to); |
57 | |||
58 | /* For coalescing uncharge for reducing memcg' overhead*/ | ||
59 | extern void mem_cgroup_uncharge_start(void); | ||
60 | extern void mem_cgroup_uncharge_end(void); | ||
61 | |||
57 | extern void mem_cgroup_uncharge_page(struct page *page); | 62 | extern void mem_cgroup_uncharge_page(struct page *page); |
58 | extern void mem_cgroup_uncharge_cache_page(struct page *page); | 63 | extern void mem_cgroup_uncharge_cache_page(struct page *page); |
59 | extern int mem_cgroup_shmem_charge_fallback(struct page *page, | 64 | extern int mem_cgroup_shmem_charge_fallback(struct page *page, |
@@ -68,6 +73,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |||
68 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); | 73 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); |
69 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); | 74 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); |
70 | 75 | ||
76 | extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); | ||
71 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | 77 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); |
72 | 78 | ||
73 | static inline | 79 | static inline |
@@ -80,6 +86,8 @@ int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) | |||
80 | return cgroup == mem; | 86 | return cgroup == mem; |
81 | } | 87 | } |
82 | 88 | ||
89 | extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem); | ||
90 | |||
83 | extern int | 91 | extern int |
84 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); | 92 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr); |
85 | extern void mem_cgroup_end_migration(struct mem_cgroup *mem, | 93 | extern void mem_cgroup_end_migration(struct mem_cgroup *mem, |
@@ -117,7 +125,7 @@ static inline bool mem_cgroup_disabled(void) | |||
117 | } | 125 | } |
118 | 126 | ||
119 | extern bool mem_cgroup_oom_called(struct task_struct *task); | 127 | extern bool mem_cgroup_oom_called(struct task_struct *task); |
120 | void mem_cgroup_update_mapped_file_stat(struct page *page, int val); | 128 | void mem_cgroup_update_file_mapped(struct page *page, int val); |
121 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | 129 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
122 | gfp_t gfp_mask, int nid, | 130 | gfp_t gfp_mask, int nid, |
123 | int zid); | 131 | int zid); |
@@ -151,6 +159,14 @@ static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr) | |||
151 | { | 159 | { |
152 | } | 160 | } |
153 | 161 | ||
162 | static inline void mem_cgroup_uncharge_start(void) | ||
163 | { | ||
164 | } | ||
165 | |||
166 | static inline void mem_cgroup_uncharge_end(void) | ||
167 | { | ||
168 | } | ||
169 | |||
154 | static inline void mem_cgroup_uncharge_page(struct page *page) | 170 | static inline void mem_cgroup_uncharge_page(struct page *page) |
155 | { | 171 | { |
156 | } | 172 | } |
@@ -189,6 +205,11 @@ mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to) | |||
189 | { | 205 | { |
190 | } | 206 | } |
191 | 207 | ||
208 | static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) | ||
209 | { | ||
210 | return NULL; | ||
211 | } | ||
212 | |||
192 | static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) | 213 | static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) |
193 | { | 214 | { |
194 | return 1; | 215 | return 1; |
@@ -200,6 +221,11 @@ static inline int task_in_mem_cgroup(struct task_struct *task, | |||
200 | return 1; | 221 | return 1; |
201 | } | 222 | } |
202 | 223 | ||
224 | static inline struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) | ||
225 | { | ||
226 | return NULL; | ||
227 | } | ||
228 | |||
203 | static inline int | 229 | static inline int |
204 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) | 230 | mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr) |
205 | { | 231 | { |
@@ -274,7 +300,7 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |||
274 | { | 300 | { |
275 | } | 301 | } |
276 | 302 | ||
277 | static inline void mem_cgroup_update_mapped_file_stat(struct page *page, | 303 | static inline void mem_cgroup_update_file_mapped(struct page *page, |
278 | int val) | 304 | int val) |
279 | { | 305 | { |
280 | } | 306 | } |
diff --git a/include/linux/memory.h b/include/linux/memory.h index 37fa19b34ef5..1adfe779eb99 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
@@ -50,6 +50,19 @@ struct memory_notify { | |||
50 | int status_change_nid; | 50 | int status_change_nid; |
51 | }; | 51 | }; |
52 | 52 | ||
53 | /* | ||
54 | * During pageblock isolation, count the number of pages within the | ||
55 | * range [start_pfn, start_pfn + nr_pages) which are owned by code | ||
56 | * in the notifier chain. | ||
57 | */ | ||
58 | #define MEM_ISOLATE_COUNT (1<<0) | ||
59 | |||
60 | struct memory_isolate_notify { | ||
61 | unsigned long start_pfn; /* Start of range to check */ | ||
62 | unsigned int nr_pages; /* # pages in range to check */ | ||
63 | unsigned int pages_found; /* # pages owned found by callbacks */ | ||
64 | }; | ||
65 | |||
53 | struct notifier_block; | 66 | struct notifier_block; |
54 | struct mem_section; | 67 | struct mem_section; |
55 | 68 | ||
@@ -76,14 +89,28 @@ static inline int memory_notify(unsigned long val, void *v) | |||
76 | { | 89 | { |
77 | return 0; | 90 | return 0; |
78 | } | 91 | } |
92 | static inline int register_memory_isolate_notifier(struct notifier_block *nb) | ||
93 | { | ||
94 | return 0; | ||
95 | } | ||
96 | static inline void unregister_memory_isolate_notifier(struct notifier_block *nb) | ||
97 | { | ||
98 | } | ||
99 | static inline int memory_isolate_notify(unsigned long val, void *v) | ||
100 | { | ||
101 | return 0; | ||
102 | } | ||
79 | #else | 103 | #else |
80 | extern int register_memory_notifier(struct notifier_block *nb); | 104 | extern int register_memory_notifier(struct notifier_block *nb); |
81 | extern void unregister_memory_notifier(struct notifier_block *nb); | 105 | extern void unregister_memory_notifier(struct notifier_block *nb); |
106 | extern int register_memory_isolate_notifier(struct notifier_block *nb); | ||
107 | extern void unregister_memory_isolate_notifier(struct notifier_block *nb); | ||
82 | extern int register_new_memory(int, struct mem_section *); | 108 | extern int register_new_memory(int, struct mem_section *); |
83 | extern int unregister_memory_section(struct mem_section *); | 109 | extern int unregister_memory_section(struct mem_section *); |
84 | extern int memory_dev_init(void); | 110 | extern int memory_dev_init(void); |
85 | extern int remove_memory_block(unsigned long, struct mem_section *, int); | 111 | extern int remove_memory_block(unsigned long, struct mem_section *, int); |
86 | extern int memory_notify(unsigned long val, void *v); | 112 | extern int memory_notify(unsigned long val, void *v); |
113 | extern int memory_isolate_notify(unsigned long val, void *v); | ||
87 | extern struct memory_block *find_memory_block(struct mem_section *); | 114 | extern struct memory_block *find_memory_block(struct mem_section *); |
88 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) | 115 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) |
89 | enum mem_add_context { BOOT, HOTPLUG }; | 116 | enum mem_add_context { BOOT, HOTPLUG }; |
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index fed969281a41..35b07b773e6c 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -69,7 +69,6 @@ extern void online_page(struct page *page); | |||
69 | /* VM interface that may be used by firmware interface */ | 69 | /* VM interface that may be used by firmware interface */ |
70 | extern int online_pages(unsigned long, unsigned long); | 70 | extern int online_pages(unsigned long, unsigned long); |
71 | extern void __offline_isolated_pages(unsigned long, unsigned long); | 71 | extern void __offline_isolated_pages(unsigned long, unsigned long); |
72 | extern int offline_pages(unsigned long, unsigned long, unsigned long); | ||
73 | 72 | ||
74 | /* reasonably generic interface to expand the physical pages in a zone */ | 73 | /* reasonably generic interface to expand the physical pages in a zone */ |
75 | extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, | 74 | extern int __add_pages(int nid, struct zone *zone, unsigned long start_pfn, |
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 085c903fe0f1..1cc966cd3e5f 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
@@ -201,6 +201,7 @@ extern void mpol_fix_fork_child_flag(struct task_struct *p); | |||
201 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, | 201 | extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, |
202 | unsigned long addr, gfp_t gfp_flags, | 202 | unsigned long addr, gfp_t gfp_flags, |
203 | struct mempolicy **mpol, nodemask_t **nodemask); | 203 | struct mempolicy **mpol, nodemask_t **nodemask); |
204 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); | ||
204 | extern unsigned slab_node(struct mempolicy *policy); | 205 | extern unsigned slab_node(struct mempolicy *policy); |
205 | 206 | ||
206 | extern enum zone_type policy_zone; | 207 | extern enum zone_type policy_zone; |
@@ -328,6 +329,8 @@ static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, | |||
328 | return node_zonelist(0, gfp_flags); | 329 | return node_zonelist(0, gfp_flags); |
329 | } | 330 | } |
330 | 331 | ||
332 | static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; } | ||
333 | |||
331 | static inline int do_migrate_pages(struct mm_struct *mm, | 334 | static inline int do_migrate_pages(struct mm_struct *mm, |
332 | const nodemask_t *from_nodes, | 335 | const nodemask_t *from_nodes, |
333 | const nodemask_t *to_nodes, int flags) | 336 | const nodemask_t *to_nodes, int flags) |
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h index d9034cc87f18..3398bd9aab11 100644 --- a/include/linux/mfd/pcf50633/core.h +++ b/include/linux/mfd/pcf50633/core.h | |||
@@ -29,7 +29,12 @@ struct pcf50633_platform_data { | |||
29 | char **batteries; | 29 | char **batteries; |
30 | int num_batteries; | 30 | int num_batteries; |
31 | 31 | ||
32 | int charging_restart_interval; | 32 | /* |
33 | * Should be set accordingly to the reference resistor used, see | ||
34 | * I_{ch(ref)} charger reference current in the pcf50633 User | ||
35 | * Manual. | ||
36 | */ | ||
37 | int charger_reference_current_ma; | ||
33 | 38 | ||
34 | /* Callbacks */ | 39 | /* Callbacks */ |
35 | void (*probe_done)(struct pcf50633 *); | 40 | void (*probe_done)(struct pcf50633 *); |
diff --git a/include/linux/mfd/pcf50633/mbc.h b/include/linux/mfd/pcf50633/mbc.h index 4119579acf2c..df4f5fa88de3 100644 --- a/include/linux/mfd/pcf50633/mbc.h +++ b/include/linux/mfd/pcf50633/mbc.h | |||
@@ -128,6 +128,7 @@ enum pcf50633_reg_mbcs3 { | |||
128 | int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); | 128 | int pcf50633_mbc_usb_curlim_set(struct pcf50633 *pcf, int ma); |
129 | 129 | ||
130 | int pcf50633_mbc_get_status(struct pcf50633 *); | 130 | int pcf50633_mbc_get_status(struct pcf50633 *); |
131 | int pcf50633_mbc_get_usb_online_status(struct pcf50633 *); | ||
131 | 132 | ||
132 | #endif | 133 | #endif |
133 | 134 | ||
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index 6b9c5d06690c..9cb1834deffa 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #define MFD_TMIO_H | 2 | #define MFD_TMIO_H |
3 | 3 | ||
4 | #include <linux/fb.h> | 4 | #include <linux/fb.h> |
5 | #include <linux/io.h> | ||
6 | #include <linux/platform_device.h> | ||
5 | 7 | ||
6 | #define tmio_ioread8(addr) readb(addr) | 8 | #define tmio_ioread8(addr) readb(addr) |
7 | #define tmio_ioread16(addr) readw(addr) | 9 | #define tmio_ioread16(addr) readw(addr) |
@@ -18,11 +20,48 @@ | |||
18 | writew((val) >> 16, (addr) + 2); \ | 20 | writew((val) >> 16, (addr) + 2); \ |
19 | } while (0) | 21 | } while (0) |
20 | 22 | ||
23 | #define CNF_CMD 0x04 | ||
24 | #define CNF_CTL_BASE 0x10 | ||
25 | #define CNF_INT_PIN 0x3d | ||
26 | #define CNF_STOP_CLK_CTL 0x40 | ||
27 | #define CNF_GCLK_CTL 0x41 | ||
28 | #define CNF_SD_CLK_MODE 0x42 | ||
29 | #define CNF_PIN_STATUS 0x44 | ||
30 | #define CNF_PWR_CTL_1 0x48 | ||
31 | #define CNF_PWR_CTL_2 0x49 | ||
32 | #define CNF_PWR_CTL_3 0x4a | ||
33 | #define CNF_CARD_DETECT_MODE 0x4c | ||
34 | #define CNF_SD_SLOT 0x50 | ||
35 | #define CNF_EXT_GCLK_CTL_1 0xf0 | ||
36 | #define CNF_EXT_GCLK_CTL_2 0xf1 | ||
37 | #define CNF_EXT_GCLK_CTL_3 0xf9 | ||
38 | #define CNF_SD_LED_EN_1 0xfa | ||
39 | #define CNF_SD_LED_EN_2 0xfe | ||
40 | |||
41 | #define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/ | ||
42 | |||
43 | #define sd_config_write8(base, shift, reg, val) \ | ||
44 | tmio_iowrite8((val), (base) + ((reg) << (shift))) | ||
45 | #define sd_config_write16(base, shift, reg, val) \ | ||
46 | tmio_iowrite16((val), (base) + ((reg) << (shift))) | ||
47 | #define sd_config_write32(base, shift, reg, val) \ | ||
48 | do { \ | ||
49 | tmio_iowrite16((val), (base) + ((reg) << (shift))); \ | ||
50 | tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \ | ||
51 | } while (0) | ||
52 | |||
53 | int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); | ||
54 | int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); | ||
55 | void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state); | ||
56 | void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state); | ||
57 | |||
21 | /* | 58 | /* |
22 | * data for the MMC controller | 59 | * data for the MMC controller |
23 | */ | 60 | */ |
24 | struct tmio_mmc_data { | 61 | struct tmio_mmc_data { |
25 | const unsigned int hclk; | 62 | const unsigned int hclk; |
63 | void (*set_pwr)(struct platform_device *host, int state); | ||
64 | void (*set_clk_div)(struct platform_device *host, int state); | ||
26 | }; | 65 | }; |
27 | 66 | ||
28 | /* | 67 | /* |
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h index 415c228743d5..fd322aca33ba 100644 --- a/include/linux/mfd/wm831x/pdata.h +++ b/include/linux/mfd/wm831x/pdata.h | |||
@@ -41,6 +41,23 @@ struct wm831x_battery_pdata { | |||
41 | int timeout; /** Charge cycle timeout, in minutes */ | 41 | int timeout; /** Charge cycle timeout, in minutes */ |
42 | }; | 42 | }; |
43 | 43 | ||
44 | /** | ||
45 | * Configuration for the WM831x DC-DC BuckWise convertors. This | ||
46 | * should be passed as driver_data in the regulator_init_data. | ||
47 | * | ||
48 | * Currently all the configuration is for the fast DVS switching | ||
49 | * support of the devices. This allows MFPs on the device to be | ||
50 | * configured as an input to switch between two output voltages, | ||
51 | * allowing voltage transitions without the expense of an access over | ||
52 | * I2C or SPI buses. | ||
53 | */ | ||
54 | struct wm831x_buckv_pdata { | ||
55 | int dvs_gpio; /** CPU GPIO to use for DVS switching */ | ||
56 | int dvs_control_src; /** Hardware DVS source to use (1 or 2) */ | ||
57 | int dvs_init_state; /** DVS state to expect on startup */ | ||
58 | int dvs_state_gpio; /** CPU GPIO to use for monitoring status */ | ||
59 | }; | ||
60 | |||
44 | /* Sources for status LED configuration. Values are register values | 61 | /* Sources for status LED configuration. Values are register values |
45 | * plus 1 to allow for a zero default for preserve. | 62 | * plus 1 to allow for a zero default for preserve. |
46 | */ | 63 | */ |
diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h index be3264e286e0..e786fe9841ef 100644 --- a/include/linux/mfd/wm8350/pmic.h +++ b/include/linux/mfd/wm8350/pmic.h | |||
@@ -666,20 +666,20 @@ | |||
666 | #define WM8350_ISINK_FLASH_DUR_64MS (1 << 8) | 666 | #define WM8350_ISINK_FLASH_DUR_64MS (1 << 8) |
667 | #define WM8350_ISINK_FLASH_DUR_96MS (2 << 8) | 667 | #define WM8350_ISINK_FLASH_DUR_96MS (2 << 8) |
668 | #define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8) | 668 | #define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8) |
669 | #define WM8350_ISINK_FLASH_ON_INSTANT (0 << 4) | 669 | #define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0) |
670 | #define WM8350_ISINK_FLASH_ON_0_25S (1 << 4) | 670 | #define WM8350_ISINK_FLASH_ON_0_25S (1 << 0) |
671 | #define WM8350_ISINK_FLASH_ON_0_50S (2 << 4) | 671 | #define WM8350_ISINK_FLASH_ON_0_50S (2 << 0) |
672 | #define WM8350_ISINK_FLASH_ON_1_00S (3 << 4) | 672 | #define WM8350_ISINK_FLASH_ON_1_00S (3 << 0) |
673 | #define WM8350_ISINK_FLASH_ON_1_95S (1 << 4) | 673 | #define WM8350_ISINK_FLASH_ON_1_95S (1 << 0) |
674 | #define WM8350_ISINK_FLASH_ON_3_91S (2 << 4) | 674 | #define WM8350_ISINK_FLASH_ON_3_91S (2 << 0) |
675 | #define WM8350_ISINK_FLASH_ON_7_80S (3 << 4) | 675 | #define WM8350_ISINK_FLASH_ON_7_80S (3 << 0) |
676 | #define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 0) | 676 | #define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4) |
677 | #define WM8350_ISINK_FLASH_OFF_0_25S (1 << 0) | 677 | #define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4) |
678 | #define WM8350_ISINK_FLASH_OFF_0_50S (2 << 0) | 678 | #define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4) |
679 | #define WM8350_ISINK_FLASH_OFF_1_00S (3 << 0) | 679 | #define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4) |
680 | #define WM8350_ISINK_FLASH_OFF_1_95S (1 << 0) | 680 | #define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4) |
681 | #define WM8350_ISINK_FLASH_OFF_3_91S (2 << 0) | 681 | #define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4) |
682 | #define WM8350_ISINK_FLASH_OFF_7_80S (3 << 0) | 682 | #define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4) |
683 | 683 | ||
684 | /* | 684 | /* |
685 | * Regulator Interrupts. | 685 | * Regulator Interrupts. |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 527602cdea1c..7f085c97c799 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -12,7 +12,8 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **); | |||
12 | extern int putback_lru_pages(struct list_head *l); | 12 | extern int putback_lru_pages(struct list_head *l); |
13 | extern int migrate_page(struct address_space *, | 13 | extern int migrate_page(struct address_space *, |
14 | struct page *, struct page *); | 14 | struct page *, struct page *); |
15 | extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long); | 15 | extern int migrate_pages(struct list_head *l, new_page_t x, |
16 | unsigned long private, int offlining); | ||
16 | 17 | ||
17 | extern int fail_migrate_page(struct address_space *, | 18 | extern int fail_migrate_page(struct address_space *, |
18 | struct page *, struct page *); | 19 | struct page *, struct page *); |
@@ -26,10 +27,7 @@ extern int migrate_vmas(struct mm_struct *mm, | |||
26 | 27 | ||
27 | static inline int putback_lru_pages(struct list_head *l) { return 0; } | 28 | static inline int putback_lru_pages(struct list_head *l) { return 0; } |
28 | static inline int migrate_pages(struct list_head *l, new_page_t x, | 29 | static inline int migrate_pages(struct list_head *l, new_page_t x, |
29 | unsigned long private) { return -ENOSYS; } | 30 | unsigned long private, int offlining) { return -ENOSYS; } |
30 | |||
31 | static inline int migrate_pages_to(struct list_head *pagelist, | ||
32 | struct vm_area_struct *vma, int dest) { return 0; } | ||
33 | 31 | ||
34 | static inline int migrate_prep(void) { return -ENOSYS; } | 32 | static inline int migrate_prep(void) { return -ENOSYS; } |
35 | 33 | ||
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index ce7cc6c7bcbb..e92d1bfdb330 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -61,6 +61,7 @@ enum { | |||
61 | MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, | 61 | MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, |
62 | MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, | 62 | MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, |
63 | MLX4_DEV_CAP_FLAG_DPDP = 1 << 12, | 63 | MLX4_DEV_CAP_FLAG_DPDP = 1 << 12, |
64 | MLX4_DEV_CAP_FLAG_BLH = 1 << 15, | ||
64 | MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, | 65 | MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, |
65 | MLX4_DEV_CAP_FLAG_APM = 1 << 17, | 66 | MLX4_DEV_CAP_FLAG_APM = 1 << 17, |
66 | MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, | 67 | MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 24c395694f4d..60c467bfbabd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -620,13 +620,22 @@ void page_address_init(void); | |||
620 | /* | 620 | /* |
621 | * On an anonymous page mapped into a user virtual memory area, | 621 | * On an anonymous page mapped into a user virtual memory area, |
622 | * page->mapping points to its anon_vma, not to a struct address_space; | 622 | * page->mapping points to its anon_vma, not to a struct address_space; |
623 | * with the PAGE_MAPPING_ANON bit set to distinguish it. | 623 | * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. |
624 | * | ||
625 | * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, | ||
626 | * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit; | ||
627 | * and then page->mapping points, not to an anon_vma, but to a private | ||
628 | * structure which KSM associates with that merged page. See ksm.h. | ||
629 | * | ||
630 | * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used. | ||
624 | * | 631 | * |
625 | * Please note that, confusingly, "page_mapping" refers to the inode | 632 | * Please note that, confusingly, "page_mapping" refers to the inode |
626 | * address_space which maps the page from disk; whereas "page_mapped" | 633 | * address_space which maps the page from disk; whereas "page_mapped" |
627 | * refers to user virtual address space into which the page is mapped. | 634 | * refers to user virtual address space into which the page is mapped. |
628 | */ | 635 | */ |
629 | #define PAGE_MAPPING_ANON 1 | 636 | #define PAGE_MAPPING_ANON 1 |
637 | #define PAGE_MAPPING_KSM 2 | ||
638 | #define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM) | ||
630 | 639 | ||
631 | extern struct address_space swapper_space; | 640 | extern struct address_space swapper_space; |
632 | static inline struct address_space *page_mapping(struct page *page) | 641 | static inline struct address_space *page_mapping(struct page *page) |
@@ -634,16 +643,19 @@ static inline struct address_space *page_mapping(struct page *page) | |||
634 | struct address_space *mapping = page->mapping; | 643 | struct address_space *mapping = page->mapping; |
635 | 644 | ||
636 | VM_BUG_ON(PageSlab(page)); | 645 | VM_BUG_ON(PageSlab(page)); |
637 | #ifdef CONFIG_SWAP | ||
638 | if (unlikely(PageSwapCache(page))) | 646 | if (unlikely(PageSwapCache(page))) |
639 | mapping = &swapper_space; | 647 | mapping = &swapper_space; |
640 | else | 648 | else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) |
641 | #endif | ||
642 | if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) | ||
643 | mapping = NULL; | 649 | mapping = NULL; |
644 | return mapping; | 650 | return mapping; |
645 | } | 651 | } |
646 | 652 | ||
653 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ | ||
654 | static inline void *page_rmapping(struct page *page) | ||
655 | { | ||
656 | return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); | ||
657 | } | ||
658 | |||
647 | static inline int PageAnon(struct page *page) | 659 | static inline int PageAnon(struct page *page) |
648 | { | 660 | { |
649 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; | 661 | return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; |
@@ -758,6 +770,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlb, | |||
758 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry | 770 | * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry |
759 | * @pte_entry: if set, called for each non-empty PTE (4th-level) entry | 771 | * @pte_entry: if set, called for each non-empty PTE (4th-level) entry |
760 | * @pte_hole: if set, called for each hole at all levels | 772 | * @pte_hole: if set, called for each hole at all levels |
773 | * @hugetlb_entry: if set, called for each hugetlb entry | ||
761 | * | 774 | * |
762 | * (see walk_page_range for more details) | 775 | * (see walk_page_range for more details) |
763 | */ | 776 | */ |
@@ -767,6 +780,8 @@ struct mm_walk { | |||
767 | int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); | 780 | int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); |
768 | int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); | 781 | int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); |
769 | int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); | 782 | int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); |
783 | int (*hugetlb_entry)(pte_t *, unsigned long, unsigned long, | ||
784 | struct mm_walk *); | ||
770 | struct mm_struct *mm; | 785 | struct mm_struct *mm; |
771 | void *private; | 786 | void *private; |
772 | }; | 787 | }; |
@@ -1022,6 +1037,9 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn, | |||
1022 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, | 1037 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, |
1023 | unsigned long end_pfn); | 1038 | unsigned long end_pfn); |
1024 | extern void remove_all_active_ranges(void); | 1039 | extern void remove_all_active_ranges(void); |
1040 | void sort_node_map(void); | ||
1041 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, | ||
1042 | unsigned long end_pfn); | ||
1025 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, | 1043 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, |
1026 | unsigned long end_pfn); | 1044 | unsigned long end_pfn); |
1027 | extern void get_pfn_range_for_nid(unsigned int nid, | 1045 | extern void get_pfn_range_for_nid(unsigned int nid, |
@@ -1071,6 +1089,7 @@ extern void zone_pcp_update(struct zone *zone); | |||
1071 | 1089 | ||
1072 | /* nommu.c */ | 1090 | /* nommu.c */ |
1073 | extern atomic_long_t mmap_pages_allocated; | 1091 | extern atomic_long_t mmap_pages_allocated; |
1092 | extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); | ||
1074 | 1093 | ||
1075 | /* prio_tree.c */ | 1094 | /* prio_tree.c */ |
1076 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); | 1095 | void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); |
@@ -1316,11 +1335,17 @@ extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, | |||
1316 | size_t size); | 1335 | size_t size); |
1317 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); | 1336 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); |
1318 | 1337 | ||
1338 | enum mf_flags { | ||
1339 | MF_COUNT_INCREASED = 1 << 0, | ||
1340 | }; | ||
1319 | extern void memory_failure(unsigned long pfn, int trapno); | 1341 | extern void memory_failure(unsigned long pfn, int trapno); |
1320 | extern int __memory_failure(unsigned long pfn, int trapno, int ref); | 1342 | extern int __memory_failure(unsigned long pfn, int trapno, int flags); |
1343 | extern int unpoison_memory(unsigned long pfn); | ||
1321 | extern int sysctl_memory_failure_early_kill; | 1344 | extern int sysctl_memory_failure_early_kill; |
1322 | extern int sysctl_memory_failure_recovery; | 1345 | extern int sysctl_memory_failure_recovery; |
1346 | extern void shake_page(struct page *p, int access); | ||
1323 | extern atomic_long_t mce_bad_pages; | 1347 | extern atomic_long_t mce_bad_pages; |
1348 | extern int soft_offline_page(struct page *page, int flags); | ||
1324 | 1349 | ||
1325 | #endif /* __KERNEL__ */ | 1350 | #endif /* __KERNEL__ */ |
1326 | #endif /* _LINUX_MM_H */ | 1351 | #endif /* _LINUX_MM_H */ |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 84a524afb3dc..36f96271306c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -122,7 +122,9 @@ struct vm_region { | |||
122 | unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ | 122 | unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ |
123 | struct file *vm_file; /* the backing file or NULL */ | 123 | struct file *vm_file; /* the backing file or NULL */ |
124 | 124 | ||
125 | atomic_t vm_usage; /* region usage count */ | 125 | int vm_usage; /* region usage count (access under nommu_region_sem) */ |
126 | bool vm_icache_flushed : 1; /* true if the icache has been flushed for | ||
127 | * this region */ | ||
126 | }; | 128 | }; |
127 | 129 | ||
128 | /* | 130 | /* |
@@ -203,10 +205,12 @@ struct mm_struct { | |||
203 | struct vm_area_struct * mmap; /* list of VMAs */ | 205 | struct vm_area_struct * mmap; /* list of VMAs */ |
204 | struct rb_root mm_rb; | 206 | struct rb_root mm_rb; |
205 | struct vm_area_struct * mmap_cache; /* last find_vma result */ | 207 | struct vm_area_struct * mmap_cache; /* last find_vma result */ |
208 | #ifdef CONFIG_MMU | ||
206 | unsigned long (*get_unmapped_area) (struct file *filp, | 209 | unsigned long (*get_unmapped_area) (struct file *filp, |
207 | unsigned long addr, unsigned long len, | 210 | unsigned long addr, unsigned long len, |
208 | unsigned long pgoff, unsigned long flags); | 211 | unsigned long pgoff, unsigned long flags); |
209 | void (*unmap_area) (struct mm_struct *mm, unsigned long addr); | 212 | void (*unmap_area) (struct mm_struct *mm, unsigned long addr); |
213 | #endif | ||
210 | unsigned long mmap_base; /* base of mmap area */ | 214 | unsigned long mmap_base; /* base of mmap area */ |
211 | unsigned long task_size; /* size of task vm space */ | 215 | unsigned long task_size; /* size of task vm space */ |
212 | unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ | 216 | unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ |
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 8a5509877192..ee24ef8ab616 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef LINUX_MM_DEBUG_H | 1 | #ifndef LINUX_MM_DEBUG_H |
2 | #define LINUX_MM_DEBUG_H 1 | 2 | #define LINUX_MM_DEBUG_H 1 |
3 | 3 | ||
4 | #include <linux/autoconf.h> | ||
5 | |||
6 | #ifdef CONFIG_DEBUG_VM | 4 | #ifdef CONFIG_DEBUG_VM |
7 | #define VM_BUG_ON(cond) BUG_ON(cond) | 5 | #define VM_BUG_ON(cond) BUG_ON(cond) |
8 | #else | 6 | #else |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6f7561730d88..30fe668c2542 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <linux/seqlock.h> | 15 | #include <linux/seqlock.h> |
16 | #include <linux/nodemask.h> | 16 | #include <linux/nodemask.h> |
17 | #include <linux/pageblock-flags.h> | 17 | #include <linux/pageblock-flags.h> |
18 | #include <linux/bounds.h> | 18 | #include <generated/bounds.h> |
19 | #include <asm/atomic.h> | 19 | #include <asm/atomic.h> |
20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
21 | 21 | ||
diff --git a/include/linux/module.h b/include/linux/module.h index 482efc865acf..6cb1a3cab5d3 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -25,8 +25,10 @@ | |||
25 | /* Not Yet Implemented */ | 25 | /* Not Yet Implemented */ |
26 | #define MODULE_SUPPORTED_DEVICE(name) | 26 | #define MODULE_SUPPORTED_DEVICE(name) |
27 | 27 | ||
28 | /* some toolchains uses a `_' prefix for all user symbols */ | 28 | /* Some toolchains use a `_' prefix for all user symbols. */ |
29 | #ifndef MODULE_SYMBOL_PREFIX | 29 | #ifdef CONFIG_SYMBOL_PREFIX |
30 | #define MODULE_SYMBOL_PREFIX CONFIG_SYMBOL_PREFIX | ||
31 | #else | ||
30 | #define MODULE_SYMBOL_PREFIX "" | 32 | #define MODULE_SYMBOL_PREFIX "" |
31 | #endif | 33 | #endif |
32 | 34 | ||
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index fff8c53e5434..9c3757c5759d 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h | |||
@@ -19,22 +19,21 @@ | |||
19 | 19 | ||
20 | /** | 20 | /** |
21 | * struct nand_bbt_descr - bad block table descriptor | 21 | * struct nand_bbt_descr - bad block table descriptor |
22 | * @options: options for this descriptor | 22 | * @options: options for this descriptor |
23 | * @pages: the page(s) where we find the bbt, used with | 23 | * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE |
24 | * option BBT_ABSPAGE when bbt is searched, | 24 | * when bbt is searched, then we store the found bbts pages here. |
25 | * then we store the found bbts pages here. | 25 | * Its an array and supports up to 8 chips now |
26 | * Its an array and supports up to 8 chips now | 26 | * @offs: offset of the pattern in the oob area of the page |
27 | * @offs: offset of the pattern in the oob area of the page | 27 | * @veroffs: offset of the bbt version counter in the oob are of the page |
28 | * @veroffs: offset of the bbt version counter in the oob area of the page | 28 | * @version: version read from the bbt page during scan |
29 | * @version: version read from the bbt page during scan | 29 | * @len: length of the pattern, if 0 no pattern check is performed |
30 | * @len: length of the pattern, if 0 no pattern check is performed | 30 | * @maxblocks: maximum number of blocks to search for a bbt. This number of |
31 | * @maxblocks: maximum number of blocks to search for a bbt. This | 31 | * blocks is reserved at the end of the device where the tables are |
32 | * number of blocks is reserved at the end of the device | 32 | * written. |
33 | * where the tables are written. | 33 | * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than |
34 | * @reserved_block_code: if non-0, this pattern denotes a reserved | 34 | * bad) block in the stored bbt |
35 | * (rather than bad) block in the stored bbt | 35 | * @pattern: pattern to identify bad block table or factory marked good / |
36 | * @pattern: pattern to identify bad block table or factory marked | 36 | * bad blocks, can be NULL, if len = 0 |
37 | * good / bad blocks, can be NULL, if len = 0 | ||
38 | * | 37 | * |
39 | * Descriptor for the bad block table marker and the descriptor for the | 38 | * Descriptor for the bad block table marker and the descriptor for the |
40 | * pattern which identifies good and bad blocks. The assumption is made | 39 | * pattern which identifies good and bad blocks. The assumption is made |
@@ -90,7 +89,9 @@ struct nand_bbt_descr { | |||
90 | /* | 89 | /* |
91 | * Constants for oob configuration | 90 | * Constants for oob configuration |
92 | */ | 91 | */ |
93 | #define ONENAND_BADBLOCK_POS 0 | 92 | #define NAND_SMALL_BADBLOCK_POS 5 |
93 | #define NAND_LARGE_BADBLOCK_POS 0 | ||
94 | #define ONENAND_BADBLOCK_POS 0 | ||
94 | 95 | ||
95 | /* | 96 | /* |
96 | * Bad block scanning errors | 97 | * Bad block scanning errors |
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 88d3d8fbf9f2..df89f4275232 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h | |||
@@ -518,10 +518,11 @@ struct cfi_fixup { | |||
518 | #define CFI_MFR_ANY 0xffff | 518 | #define CFI_MFR_ANY 0xffff |
519 | #define CFI_ID_ANY 0xffff | 519 | #define CFI_ID_ANY 0xffff |
520 | 520 | ||
521 | #define CFI_MFR_AMD 0x0001 | 521 | #define CFI_MFR_AMD 0x0001 |
522 | #define CFI_MFR_ATMEL 0x001F | 522 | #define CFI_MFR_INTEL 0x0089 |
523 | #define CFI_MFR_SAMSUNG 0x00EC | 523 | #define CFI_MFR_ATMEL 0x001F |
524 | #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ | 524 | #define CFI_MFR_SAMSUNG 0x00EC |
525 | #define CFI_MFR_ST 0x0020 /* STMicroelectronics */ | ||
525 | 526 | ||
526 | void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); | 527 | void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups); |
527 | 528 | ||
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index d4f38c5fd44e..d0bf422ae374 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h | |||
@@ -38,6 +38,15 @@ typedef enum { | |||
38 | FL_XIP_WHILE_ERASING, | 38 | FL_XIP_WHILE_ERASING, |
39 | FL_XIP_WHILE_WRITING, | 39 | FL_XIP_WHILE_WRITING, |
40 | FL_SHUTDOWN, | 40 | FL_SHUTDOWN, |
41 | /* These 2 come from nand_state_t, which has been unified here */ | ||
42 | FL_READING, | ||
43 | FL_CACHEDPRG, | ||
44 | /* These 4 come from onenand_state_t, which has been unified here */ | ||
45 | FL_RESETING, | ||
46 | FL_OTPING, | ||
47 | FL_PREPARING_ERASE, | ||
48 | FL_VERIFYING_ERASE, | ||
49 | |||
41 | FL_UNKNOWN | 50 | FL_UNKNOWN |
42 | } flstate_t; | 51 | } flstate_t; |
43 | 52 | ||
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 7a232a9bdd62..ccab9dfc5217 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/wait.h> | 21 | #include <linux/wait.h> |
22 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
23 | #include <linux/mtd/mtd.h> | 23 | #include <linux/mtd/mtd.h> |
24 | #include <linux/mtd/flashchip.h> | ||
25 | #include <linux/mtd/bbm.h> | ||
24 | 26 | ||
25 | struct mtd_info; | 27 | struct mtd_info; |
26 | /* Scan and identify a NAND device */ | 28 | /* Scan and identify a NAND device */ |
@@ -168,7 +170,6 @@ typedef enum { | |||
168 | /* Chip does not allow subpage writes */ | 170 | /* Chip does not allow subpage writes */ |
169 | #define NAND_NO_SUBPAGE_WRITE 0x00000200 | 171 | #define NAND_NO_SUBPAGE_WRITE 0x00000200 |
170 | 172 | ||
171 | |||
172 | /* Options valid for Samsung large page devices */ | 173 | /* Options valid for Samsung large page devices */ |
173 | #define NAND_SAMSUNG_LP_OPTIONS \ | 174 | #define NAND_SAMSUNG_LP_OPTIONS \ |
174 | (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) | 175 | (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) |
@@ -194,6 +195,9 @@ typedef enum { | |||
194 | /* This option is defined if the board driver allocates its own buffers | 195 | /* This option is defined if the board driver allocates its own buffers |
195 | (e.g. because it needs them DMA-coherent */ | 196 | (e.g. because it needs them DMA-coherent */ |
196 | #define NAND_OWN_BUFFERS 0x00040000 | 197 | #define NAND_OWN_BUFFERS 0x00040000 |
198 | /* Chip may not exist, so silence any errors in scan */ | ||
199 | #define NAND_SCAN_SILENT_NODEV 0x00080000 | ||
200 | |||
197 | /* Options set by nand scan */ | 201 | /* Options set by nand scan */ |
198 | /* Nand scan has allocated controller struct */ | 202 | /* Nand scan has allocated controller struct */ |
199 | #define NAND_CONTROLLER_ALLOC 0x80000000 | 203 | #define NAND_CONTROLLER_ALLOC 0x80000000 |
@@ -202,20 +206,6 @@ typedef enum { | |||
202 | #define NAND_CI_CHIPNR_MSK 0x03 | 206 | #define NAND_CI_CHIPNR_MSK 0x03 |
203 | #define NAND_CI_CELLTYPE_MSK 0x0C | 207 | #define NAND_CI_CELLTYPE_MSK 0x0C |
204 | 208 | ||
205 | /* | ||
206 | * nand_state_t - chip states | ||
207 | * Enumeration for NAND flash chip state | ||
208 | */ | ||
209 | typedef enum { | ||
210 | FL_READY, | ||
211 | FL_READING, | ||
212 | FL_WRITING, | ||
213 | FL_ERASING, | ||
214 | FL_SYNCING, | ||
215 | FL_CACHEDPRG, | ||
216 | FL_PM_SUSPENDED, | ||
217 | } nand_state_t; | ||
218 | |||
219 | /* Keep gcc happy */ | 209 | /* Keep gcc happy */ |
220 | struct nand_chip; | 210 | struct nand_chip; |
221 | 211 | ||
@@ -402,7 +392,7 @@ struct nand_chip { | |||
402 | uint8_t cellinfo; | 392 | uint8_t cellinfo; |
403 | int badblockpos; | 393 | int badblockpos; |
404 | 394 | ||
405 | nand_state_t state; | 395 | flstate_t state; |
406 | 396 | ||
407 | uint8_t *oob_poi; | 397 | uint8_t *oob_poi; |
408 | struct nand_hw_control *controller; | 398 | struct nand_hw_control *controller; |
@@ -470,75 +460,6 @@ struct nand_manufacturers { | |||
470 | extern struct nand_flash_dev nand_flash_ids[]; | 460 | extern struct nand_flash_dev nand_flash_ids[]; |
471 | extern struct nand_manufacturers nand_manuf_ids[]; | 461 | extern struct nand_manufacturers nand_manuf_ids[]; |
472 | 462 | ||
473 | /** | ||
474 | * struct nand_bbt_descr - bad block table descriptor | ||
475 | * @options: options for this descriptor | ||
476 | * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE | ||
477 | * when bbt is searched, then we store the found bbts pages here. | ||
478 | * Its an array and supports up to 8 chips now | ||
479 | * @offs: offset of the pattern in the oob area of the page | ||
480 | * @veroffs: offset of the bbt version counter in the oob are of the page | ||
481 | * @version: version read from the bbt page during scan | ||
482 | * @len: length of the pattern, if 0 no pattern check is performed | ||
483 | * @maxblocks: maximum number of blocks to search for a bbt. This number of | ||
484 | * blocks is reserved at the end of the device where the tables are | ||
485 | * written. | ||
486 | * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than | ||
487 | * bad) block in the stored bbt | ||
488 | * @pattern: pattern to identify bad block table or factory marked good / | ||
489 | * bad blocks, can be NULL, if len = 0 | ||
490 | * | ||
491 | * Descriptor for the bad block table marker and the descriptor for the | ||
492 | * pattern which identifies good and bad blocks. The assumption is made | ||
493 | * that the pattern and the version count are always located in the oob area | ||
494 | * of the first block. | ||
495 | */ | ||
496 | struct nand_bbt_descr { | ||
497 | int options; | ||
498 | int pages[NAND_MAX_CHIPS]; | ||
499 | int offs; | ||
500 | int veroffs; | ||
501 | uint8_t version[NAND_MAX_CHIPS]; | ||
502 | int len; | ||
503 | int maxblocks; | ||
504 | int reserved_block_code; | ||
505 | uint8_t *pattern; | ||
506 | }; | ||
507 | |||
508 | /* Options for the bad block table descriptors */ | ||
509 | |||
510 | /* The number of bits used per block in the bbt on the device */ | ||
511 | #define NAND_BBT_NRBITS_MSK 0x0000000F | ||
512 | #define NAND_BBT_1BIT 0x00000001 | ||
513 | #define NAND_BBT_2BIT 0x00000002 | ||
514 | #define NAND_BBT_4BIT 0x00000004 | ||
515 | #define NAND_BBT_8BIT 0x00000008 | ||
516 | /* The bad block table is in the last good block of the device */ | ||
517 | #define NAND_BBT_LASTBLOCK 0x00000010 | ||
518 | /* The bbt is at the given page, else we must scan for the bbt */ | ||
519 | #define NAND_BBT_ABSPAGE 0x00000020 | ||
520 | /* The bbt is at the given page, else we must scan for the bbt */ | ||
521 | #define NAND_BBT_SEARCH 0x00000040 | ||
522 | /* bbt is stored per chip on multichip devices */ | ||
523 | #define NAND_BBT_PERCHIP 0x00000080 | ||
524 | /* bbt has a version counter at offset veroffs */ | ||
525 | #define NAND_BBT_VERSION 0x00000100 | ||
526 | /* Create a bbt if none axists */ | ||
527 | #define NAND_BBT_CREATE 0x00000200 | ||
528 | /* Search good / bad pattern through all pages of a block */ | ||
529 | #define NAND_BBT_SCANALLPAGES 0x00000400 | ||
530 | /* Scan block empty during good / bad block scan */ | ||
531 | #define NAND_BBT_SCANEMPTY 0x00000800 | ||
532 | /* Write bbt if neccecary */ | ||
533 | #define NAND_BBT_WRITE 0x00001000 | ||
534 | /* Read and write back block contents when writing bbt */ | ||
535 | #define NAND_BBT_SAVECONTENT 0x00002000 | ||
536 | /* Search good / bad pattern on the first and the second page */ | ||
537 | #define NAND_BBT_SCAN2NDPAGE 0x00004000 | ||
538 | |||
539 | /* The maximum number of blocks to scan for a bbt */ | ||
540 | #define NAND_BBT_SCAN_MAXBLOCKS 4 | ||
541 | |||
542 | extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd); | 463 | extern int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd); |
543 | extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs); | 464 | extern int nand_update_bbt(struct mtd_info *mtd, loff_t offs); |
544 | extern int nand_default_bbt(struct mtd_info *mtd); | 465 | extern int nand_default_bbt(struct mtd_info *mtd); |
@@ -548,12 +469,6 @@ extern int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, | |||
548 | extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, | 469 | extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, |
549 | size_t * retlen, uint8_t * buf); | 470 | size_t * retlen, uint8_t * buf); |
550 | 471 | ||
551 | /* | ||
552 | * Constants for oob configuration | ||
553 | */ | ||
554 | #define NAND_SMALL_BADBLOCK_POS 5 | ||
555 | #define NAND_LARGE_BADBLOCK_POS 0 | ||
556 | |||
557 | /** | 472 | /** |
558 | * struct platform_nand_chip - chip level device structure | 473 | * struct platform_nand_chip - chip level device structure |
559 | * @nr_chips: max. number of chips to scan for | 474 | * @nr_chips: max. number of chips to scan for |
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index 052ea8ca2434..41bc013571d0 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h | |||
@@ -16,7 +16,13 @@ | |||
16 | struct mtd_info; | 16 | struct mtd_info; |
17 | 17 | ||
18 | /* | 18 | /* |
19 | * Calculate 3 byte ECC code for 256 byte block | 19 | * Calculate 3 byte ECC code for eccsize byte block |
20 | */ | ||
21 | void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize, | ||
22 | u_char *ecc_code); | ||
23 | |||
24 | /* | ||
25 | * Calculate 3 byte ECC code for 256/512 byte block | ||
20 | */ | 26 | */ |
21 | int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); | 27 | int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); |
22 | 28 | ||
@@ -27,7 +33,7 @@ int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc, | |||
27 | unsigned int eccsize); | 33 | unsigned int eccsize); |
28 | 34 | ||
29 | /* | 35 | /* |
30 | * Detect and correct a 1 bit error for 256 byte block | 36 | * Detect and correct a 1 bit error for 256/512 byte block |
31 | */ | 37 | */ |
32 | int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); | 38 | int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); |
33 | 39 | ||
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index 4e49f3350678..5509eb06b326 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * linux/include/linux/mtd/onenand.h | 2 | * linux/include/linux/mtd/onenand.h |
3 | * | 3 | * |
4 | * Copyright (C) 2005-2007 Samsung Electronics | 4 | * Copyright © 2005-2009 Samsung Electronics |
5 | * Kyungmin Park <kyungmin.park@samsung.com> | 5 | * Kyungmin Park <kyungmin.park@samsung.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/completion.h> | 16 | #include <linux/completion.h> |
17 | #include <linux/mtd/flashchip.h> | ||
17 | #include <linux/mtd/onenand_regs.h> | 18 | #include <linux/mtd/onenand_regs.h> |
18 | #include <linux/mtd/bbm.h> | 19 | #include <linux/mtd/bbm.h> |
19 | 20 | ||
@@ -25,22 +26,6 @@ extern int onenand_scan(struct mtd_info *mtd, int max_chips); | |||
25 | /* Free resources held by the OneNAND device */ | 26 | /* Free resources held by the OneNAND device */ |
26 | extern void onenand_release(struct mtd_info *mtd); | 27 | extern void onenand_release(struct mtd_info *mtd); |
27 | 28 | ||
28 | /* | ||
29 | * onenand_state_t - chip states | ||
30 | * Enumeration for OneNAND flash chip state | ||
31 | */ | ||
32 | typedef enum { | ||
33 | FL_READY, | ||
34 | FL_READING, | ||
35 | FL_WRITING, | ||
36 | FL_ERASING, | ||
37 | FL_SYNCING, | ||
38 | FL_LOCKING, | ||
39 | FL_RESETING, | ||
40 | FL_OTPING, | ||
41 | FL_PM_SUSPENDED, | ||
42 | } onenand_state_t; | ||
43 | |||
44 | /** | 29 | /** |
45 | * struct onenand_bufferram - OneNAND BufferRAM Data | 30 | * struct onenand_bufferram - OneNAND BufferRAM Data |
46 | * @blockpage: block & page address in BufferRAM | 31 | * @blockpage: block & page address in BufferRAM |
@@ -137,7 +122,7 @@ struct onenand_chip { | |||
137 | 122 | ||
138 | spinlock_t chip_lock; | 123 | spinlock_t chip_lock; |
139 | wait_queue_head_t wq; | 124 | wait_queue_head_t wq; |
140 | onenand_state_t state; | 125 | flstate_t state; |
141 | unsigned char *page_buf; | 126 | unsigned char *page_buf; |
142 | unsigned char *oob_buf; | 127 | unsigned char *oob_buf; |
143 | 128 | ||
@@ -152,6 +137,8 @@ struct onenand_chip { | |||
152 | /* | 137 | /* |
153 | * Helper macros | 138 | * Helper macros |
154 | */ | 139 | */ |
140 | #define ONENAND_PAGES_PER_BLOCK (1<<6) | ||
141 | |||
155 | #define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index) | 142 | #define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index) |
156 | #define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1) | 143 | #define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1) |
157 | #define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1) | 144 | #define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1) |
diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index acadbf53a69f..cd6f3b431195 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h | |||
@@ -131,6 +131,8 @@ | |||
131 | #define ONENAND_CMD_LOCK_TIGHT (0x2C) | 131 | #define ONENAND_CMD_LOCK_TIGHT (0x2C) |
132 | #define ONENAND_CMD_UNLOCK_ALL (0x27) | 132 | #define ONENAND_CMD_UNLOCK_ALL (0x27) |
133 | #define ONENAND_CMD_ERASE (0x94) | 133 | #define ONENAND_CMD_ERASE (0x94) |
134 | #define ONENAND_CMD_MULTIBLOCK_ERASE (0x95) | ||
135 | #define ONENAND_CMD_ERASE_VERIFY (0x71) | ||
134 | #define ONENAND_CMD_RESET (0xF0) | 136 | #define ONENAND_CMD_RESET (0xF0) |
135 | #define ONENAND_CMD_OTP_ACCESS (0x65) | 137 | #define ONENAND_CMD_OTP_ACCESS (0x65) |
136 | #define ONENAND_CMD_READID (0x90) | 138 | #define ONENAND_CMD_READID (0x90) |
diff --git a/include/linux/mtd/pismo.h b/include/linux/mtd/pismo.h new file mode 100644 index 000000000000..8dfb7e1421c5 --- /dev/null +++ b/include/linux/mtd/pismo.h | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * PISMO memory driver - http://www.pismoworld.org/ | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License. | ||
7 | */ | ||
8 | #ifndef __LINUX_MTD_PISMO_H | ||
9 | #define __LINUX_MTD_PISMO_H | ||
10 | |||
11 | struct pismo_pdata { | ||
12 | void (*set_vpp)(void *, int); | ||
13 | void *vpp_data; | ||
14 | phys_addr_t cs_addrs[5]; | ||
15 | }; | ||
16 | |||
17 | #endif | ||
diff --git a/include/linux/namei.h b/include/linux/namei.h index 028946750289..05b441d93642 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
@@ -72,8 +72,6 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *, | |||
72 | 72 | ||
73 | extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, | 73 | extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, |
74 | int (*open)(struct inode *, struct file *)); | 74 | int (*open)(struct inode *, struct file *)); |
75 | extern struct file *nameidata_to_filp(struct nameidata *nd, int flags); | ||
76 | extern void release_open_intent(struct nameidata *); | ||
77 | 75 | ||
78 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); | 76 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); |
79 | 77 | ||
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 51071b335751..89b28812ec24 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _LINUX_NFS_XDR_H | 2 | #define _LINUX_NFS_XDR_H |
3 | 3 | ||
4 | #include <linux/nfsacl.h> | 4 | #include <linux/nfsacl.h> |
5 | #include <linux/nfs3.h> | ||
5 | 6 | ||
6 | /* | 7 | /* |
7 | * To change the maximum rsize and wsize supported by the NFS client, adjust | 8 | * To change the maximum rsize and wsize supported by the NFS client, adjust |
diff --git a/include/linux/nfsacl.h b/include/linux/nfsacl.h index 43011b69297c..f321b578edeb 100644 --- a/include/linux/nfsacl.h +++ b/include/linux/nfsacl.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #ifdef __KERNEL__ | 29 | #ifdef __KERNEL__ |
30 | 30 | ||
31 | #include <linux/posix_acl.h> | 31 | #include <linux/posix_acl.h> |
32 | #include <linux/sunrpc/xdr.h> | ||
32 | 33 | ||
33 | /* Maximum number of ACL entries over NFS */ | 34 | /* Maximum number of ACL entries over NFS */ |
34 | #define NFS_ACL_MAX_ENTRIES 1024 | 35 | #define NFS_ACL_MAX_ENTRIES 1024 |
diff --git a/include/linux/nfsd/cache.h b/include/linux/nfsd/cache.h deleted file mode 100644 index 3a3f58934f5e..000000000000 --- a/include/linux/nfsd/cache.h +++ /dev/null | |||
@@ -1,86 +0,0 @@ | |||
1 | /* | ||
2 | * include/linux/nfsd/cache.h | ||
3 | * | ||
4 | * Request reply cache. This was heavily inspired by the | ||
5 | * implementation in 4.3BSD/4.4BSD. | ||
6 | * | ||
7 | * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> | ||
8 | */ | ||
9 | |||
10 | #ifndef NFSCACHE_H | ||
11 | #define NFSCACHE_H | ||
12 | |||
13 | #include <linux/in.h> | ||
14 | #include <linux/uio.h> | ||
15 | |||
16 | /* | ||
17 | * Representation of a reply cache entry. | ||
18 | */ | ||
19 | struct svc_cacherep { | ||
20 | struct hlist_node c_hash; | ||
21 | struct list_head c_lru; | ||
22 | |||
23 | unsigned char c_state, /* unused, inprog, done */ | ||
24 | c_type, /* status, buffer */ | ||
25 | c_secure : 1; /* req came from port < 1024 */ | ||
26 | struct sockaddr_in c_addr; | ||
27 | __be32 c_xid; | ||
28 | u32 c_prot; | ||
29 | u32 c_proc; | ||
30 | u32 c_vers; | ||
31 | unsigned long c_timestamp; | ||
32 | union { | ||
33 | struct kvec u_vec; | ||
34 | __be32 u_status; | ||
35 | } c_u; | ||
36 | }; | ||
37 | |||
38 | #define c_replvec c_u.u_vec | ||
39 | #define c_replstat c_u.u_status | ||
40 | |||
41 | /* cache entry states */ | ||
42 | enum { | ||
43 | RC_UNUSED, | ||
44 | RC_INPROG, | ||
45 | RC_DONE | ||
46 | }; | ||
47 | |||
48 | /* return values */ | ||
49 | enum { | ||
50 | RC_DROPIT, | ||
51 | RC_REPLY, | ||
52 | RC_DOIT, | ||
53 | RC_INTR | ||
54 | }; | ||
55 | |||
56 | /* | ||
57 | * Cache types. | ||
58 | * We may want to add more types one day, e.g. for diropres and | ||
59 | * attrstat replies. Using cache entries with fixed length instead | ||
60 | * of buffer pointers may be more efficient. | ||
61 | */ | ||
62 | enum { | ||
63 | RC_NOCACHE, | ||
64 | RC_REPLSTAT, | ||
65 | RC_REPLBUFF, | ||
66 | }; | ||
67 | |||
68 | /* | ||
69 | * If requests are retransmitted within this interval, they're dropped. | ||
70 | */ | ||
71 | #define RC_DELAY (HZ/5) | ||
72 | |||
73 | int nfsd_reply_cache_init(void); | ||
74 | void nfsd_reply_cache_shutdown(void); | ||
75 | int nfsd_cache_lookup(struct svc_rqst *, int); | ||
76 | void nfsd_cache_update(struct svc_rqst *, int, __be32 *); | ||
77 | |||
78 | #ifdef CONFIG_NFSD_V4 | ||
79 | void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp); | ||
80 | #else /* CONFIG_NFSD_V4 */ | ||
81 | static inline void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp) | ||
82 | { | ||
83 | } | ||
84 | #endif /* CONFIG_NFSD_V4 */ | ||
85 | |||
86 | #endif /* NFSCACHE_H */ | ||
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index a6d9ef2bb34a..8ae78a61eea4 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | # include <linux/types.h> | 13 | # include <linux/types.h> |
14 | #ifdef __KERNEL__ | 14 | #ifdef __KERNEL__ |
15 | # include <linux/in.h> | 15 | # include <linux/nfsd/nfsfh.h> |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | /* | 18 | /* |
@@ -39,11 +39,23 @@ | |||
39 | #define NFSEXP_FSID 0x2000 | 39 | #define NFSEXP_FSID 0x2000 |
40 | #define NFSEXP_CROSSMOUNT 0x4000 | 40 | #define NFSEXP_CROSSMOUNT 0x4000 |
41 | #define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */ | 41 | #define NFSEXP_NOACL 0x8000 /* reserved for possible ACL related use */ |
42 | #define NFSEXP_ALLFLAGS 0xFE3F | 42 | /* |
43 | * The NFSEXP_V4ROOT flag causes the kernel to give access only to NFSv4 | ||
44 | * clients, and only to the single directory that is the root of the | ||
45 | * export; further lookup and readdir operations are treated as if every | ||
46 | * subdirectory was a mountpoint, and ignored if they are not themselves | ||
47 | * exported. This is used by nfsd and mountd to construct the NFSv4 | ||
48 | * pseudofilesystem, which provides access only to paths leading to each | ||
49 | * exported filesystem. | ||
50 | */ | ||
51 | #define NFSEXP_V4ROOT 0x10000 | ||
52 | /* All flags that we claim to support. (Note we don't support NOACL.) */ | ||
53 | #define NFSEXP_ALLFLAGS 0x17E3F | ||
43 | 54 | ||
44 | /* The flags that may vary depending on security flavor: */ | 55 | /* The flags that may vary depending on security flavor: */ |
45 | #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ | 56 | #define NFSEXP_SECINFO_FLAGS (NFSEXP_READONLY | NFSEXP_ROOTSQUASH \ |
46 | | NFSEXP_ALLSQUASH) | 57 | | NFSEXP_ALLSQUASH \ |
58 | | NFSEXP_INSECURE_PORT) | ||
47 | 59 | ||
48 | #ifdef __KERNEL__ | 60 | #ifdef __KERNEL__ |
49 | 61 | ||
@@ -108,7 +120,6 @@ struct svc_expkey { | |||
108 | struct path ek_path; | 120 | struct path ek_path; |
109 | }; | 121 | }; |
110 | 122 | ||
111 | #define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT)) | ||
112 | #define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) | 123 | #define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC)) |
113 | #define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) | 124 | #define EX_NOHIDE(exp) ((exp)->ex_flags & NFSEXP_NOHIDE) |
114 | #define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) | 125 | #define EX_WGATHER(exp) ((exp)->ex_flags & NFSEXP_GATHERED_WRITES) |
diff --git a/include/linux/nfsd/nfsd.h b/include/linux/nfsd/nfsd.h deleted file mode 100644 index 510ffdd5020e..000000000000 --- a/include/linux/nfsd/nfsd.h +++ /dev/null | |||
@@ -1,424 +0,0 @@ | |||
1 | /* | ||
2 | * linux/include/linux/nfsd/nfsd.h | ||
3 | * | ||
4 | * Hodge-podge collection of knfsd-related stuff. | ||
5 | * I will sort this out later. | ||
6 | * | ||
7 | * Copyright (C) 1995-1997 Olaf Kirch <okir@monad.swb.de> | ||
8 | */ | ||
9 | |||
10 | #ifndef LINUX_NFSD_NFSD_H | ||
11 | #define LINUX_NFSD_NFSD_H | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <linux/unistd.h> | ||
15 | #include <linux/fs.h> | ||
16 | #include <linux/posix_acl.h> | ||
17 | #include <linux/mount.h> | ||
18 | |||
19 | #include <linux/nfsd/debug.h> | ||
20 | #include <linux/nfsd/nfsfh.h> | ||
21 | #include <linux/nfsd/export.h> | ||
22 | #include <linux/nfsd/stats.h> | ||
23 | /* | ||
24 | * nfsd version | ||
25 | */ | ||
26 | #define NFSD_SUPPORTED_MINOR_VERSION 1 | ||
27 | |||
28 | /* | ||
29 | * Flags for nfsd_permission | ||
30 | */ | ||
31 | #define NFSD_MAY_NOP 0 | ||
32 | #define NFSD_MAY_EXEC 1 /* == MAY_EXEC */ | ||
33 | #define NFSD_MAY_WRITE 2 /* == MAY_WRITE */ | ||
34 | #define NFSD_MAY_READ 4 /* == MAY_READ */ | ||
35 | #define NFSD_MAY_SATTR 8 | ||
36 | #define NFSD_MAY_TRUNC 16 | ||
37 | #define NFSD_MAY_LOCK 32 | ||
38 | #define NFSD_MAY_OWNER_OVERRIDE 64 | ||
39 | #define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/ | ||
40 | #define NFSD_MAY_BYPASS_GSS_ON_ROOT 256 | ||
41 | |||
42 | #define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE) | ||
43 | #define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC) | ||
44 | |||
45 | /* | ||
46 | * Callback function for readdir | ||
47 | */ | ||
48 | struct readdir_cd { | ||
49 | __be32 err; /* 0, nfserr, or nfserr_eof */ | ||
50 | }; | ||
51 | typedef int (*nfsd_dirop_t)(struct inode *, struct dentry *, int, int); | ||
52 | |||
53 | extern struct svc_program nfsd_program; | ||
54 | extern struct svc_version nfsd_version2, nfsd_version3, | ||
55 | nfsd_version4; | ||
56 | extern u32 nfsd_supported_minorversion; | ||
57 | extern struct mutex nfsd_mutex; | ||
58 | extern struct svc_serv *nfsd_serv; | ||
59 | extern spinlock_t nfsd_drc_lock; | ||
60 | extern unsigned int nfsd_drc_max_mem; | ||
61 | extern unsigned int nfsd_drc_mem_used; | ||
62 | |||
63 | extern const struct seq_operations nfs_exports_op; | ||
64 | |||
65 | /* | ||
66 | * Function prototypes. | ||
67 | */ | ||
68 | int nfsd_svc(unsigned short port, int nrservs); | ||
69 | int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp); | ||
70 | |||
71 | int nfsd_nrthreads(void); | ||
72 | int nfsd_nrpools(void); | ||
73 | int nfsd_get_nrthreads(int n, int *); | ||
74 | int nfsd_set_nrthreads(int n, int *); | ||
75 | |||
76 | /* nfsd/vfs.c */ | ||
77 | int fh_lock_parent(struct svc_fh *, struct dentry *); | ||
78 | int nfsd_racache_init(int); | ||
79 | void nfsd_racache_shutdown(void); | ||
80 | int nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp, | ||
81 | struct svc_export **expp); | ||
82 | __be32 nfsd_lookup(struct svc_rqst *, struct svc_fh *, | ||
83 | const char *, unsigned int, struct svc_fh *); | ||
84 | __be32 nfsd_lookup_dentry(struct svc_rqst *, struct svc_fh *, | ||
85 | const char *, unsigned int, | ||
86 | struct svc_export **, struct dentry **); | ||
87 | __be32 nfsd_setattr(struct svc_rqst *, struct svc_fh *, | ||
88 | struct iattr *, int, time_t); | ||
89 | #ifdef CONFIG_NFSD_V4 | ||
90 | __be32 nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *, | ||
91 | struct nfs4_acl *); | ||
92 | int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **); | ||
93 | #endif /* CONFIG_NFSD_V4 */ | ||
94 | __be32 nfsd_create(struct svc_rqst *, struct svc_fh *, | ||
95 | char *name, int len, struct iattr *attrs, | ||
96 | int type, dev_t rdev, struct svc_fh *res); | ||
97 | #ifdef CONFIG_NFSD_V3 | ||
98 | __be32 nfsd_access(struct svc_rqst *, struct svc_fh *, u32 *, u32 *); | ||
99 | __be32 nfsd_create_v3(struct svc_rqst *, struct svc_fh *, | ||
100 | char *name, int len, struct iattr *attrs, | ||
101 | struct svc_fh *res, int createmode, | ||
102 | u32 *verifier, int *truncp, int *created); | ||
103 | __be32 nfsd_commit(struct svc_rqst *, struct svc_fh *, | ||
104 | loff_t, unsigned long); | ||
105 | #endif /* CONFIG_NFSD_V3 */ | ||
106 | __be32 nfsd_open(struct svc_rqst *, struct svc_fh *, int, | ||
107 | int, struct file **); | ||
108 | void nfsd_close(struct file *); | ||
109 | __be32 nfsd_read(struct svc_rqst *, struct svc_fh *, struct file *, | ||
110 | loff_t, struct kvec *, int, unsigned long *); | ||
111 | __be32 nfsd_write(struct svc_rqst *, struct svc_fh *,struct file *, | ||
112 | loff_t, struct kvec *,int, unsigned long *, int *); | ||
113 | __be32 nfsd_readlink(struct svc_rqst *, struct svc_fh *, | ||
114 | char *, int *); | ||
115 | __be32 nfsd_symlink(struct svc_rqst *, struct svc_fh *, | ||
116 | char *name, int len, char *path, int plen, | ||
117 | struct svc_fh *res, struct iattr *); | ||
118 | __be32 nfsd_link(struct svc_rqst *, struct svc_fh *, | ||
119 | char *, int, struct svc_fh *); | ||
120 | __be32 nfsd_rename(struct svc_rqst *, | ||
121 | struct svc_fh *, char *, int, | ||
122 | struct svc_fh *, char *, int); | ||
123 | __be32 nfsd_remove(struct svc_rqst *, | ||
124 | struct svc_fh *, char *, int); | ||
125 | __be32 nfsd_unlink(struct svc_rqst *, struct svc_fh *, int type, | ||
126 | char *name, int len); | ||
127 | int nfsd_truncate(struct svc_rqst *, struct svc_fh *, | ||
128 | unsigned long size); | ||
129 | __be32 nfsd_readdir(struct svc_rqst *, struct svc_fh *, | ||
130 | loff_t *, struct readdir_cd *, filldir_t); | ||
131 | __be32 nfsd_statfs(struct svc_rqst *, struct svc_fh *, | ||
132 | struct kstatfs *, int access); | ||
133 | |||
134 | int nfsd_notify_change(struct inode *, struct iattr *); | ||
135 | __be32 nfsd_permission(struct svc_rqst *, struct svc_export *, | ||
136 | struct dentry *, int); | ||
137 | int nfsd_sync_dir(struct dentry *dp); | ||
138 | |||
139 | #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) | ||
140 | #ifdef CONFIG_NFSD_V2_ACL | ||
141 | extern struct svc_version nfsd_acl_version2; | ||
142 | #else | ||
143 | #define nfsd_acl_version2 NULL | ||
144 | #endif | ||
145 | #ifdef CONFIG_NFSD_V3_ACL | ||
146 | extern struct svc_version nfsd_acl_version3; | ||
147 | #else | ||
148 | #define nfsd_acl_version3 NULL | ||
149 | #endif | ||
150 | struct posix_acl *nfsd_get_posix_acl(struct svc_fh *, int); | ||
151 | int nfsd_set_posix_acl(struct svc_fh *, int, struct posix_acl *); | ||
152 | #endif | ||
153 | |||
154 | enum vers_op {NFSD_SET, NFSD_CLEAR, NFSD_TEST, NFSD_AVAIL }; | ||
155 | int nfsd_vers(int vers, enum vers_op change); | ||
156 | int nfsd_minorversion(u32 minorversion, enum vers_op change); | ||
157 | void nfsd_reset_versions(void); | ||
158 | int nfsd_create_serv(void); | ||
159 | |||
160 | extern int nfsd_max_blksize; | ||
161 | |||
162 | /* | ||
163 | * NFSv4 State | ||
164 | */ | ||
165 | #ifdef CONFIG_NFSD_V4 | ||
166 | extern unsigned int max_delegations; | ||
167 | int nfs4_state_init(void); | ||
168 | void nfsd4_free_slabs(void); | ||
169 | int nfs4_state_start(void); | ||
170 | void nfs4_state_shutdown(void); | ||
171 | time_t nfs4_lease_time(void); | ||
172 | void nfs4_reset_lease(time_t leasetime); | ||
173 | int nfs4_reset_recoverydir(char *recdir); | ||
174 | #else | ||
175 | static inline int nfs4_state_init(void) { return 0; } | ||
176 | static inline void nfsd4_free_slabs(void) { } | ||
177 | static inline int nfs4_state_start(void) { return 0; } | ||
178 | static inline void nfs4_state_shutdown(void) { } | ||
179 | static inline time_t nfs4_lease_time(void) { return 0; } | ||
180 | static inline void nfs4_reset_lease(time_t leasetime) { } | ||
181 | static inline int nfs4_reset_recoverydir(char *recdir) { return 0; } | ||
182 | #endif | ||
183 | |||
184 | /* | ||
185 | * lockd binding | ||
186 | */ | ||
187 | void nfsd_lockd_init(void); | ||
188 | void nfsd_lockd_shutdown(void); | ||
189 | |||
190 | |||
191 | /* | ||
192 | * These macros provide pre-xdr'ed values for faster operation. | ||
193 | */ | ||
194 | #define nfs_ok cpu_to_be32(NFS_OK) | ||
195 | #define nfserr_perm cpu_to_be32(NFSERR_PERM) | ||
196 | #define nfserr_noent cpu_to_be32(NFSERR_NOENT) | ||
197 | #define nfserr_io cpu_to_be32(NFSERR_IO) | ||
198 | #define nfserr_nxio cpu_to_be32(NFSERR_NXIO) | ||
199 | #define nfserr_eagain cpu_to_be32(NFSERR_EAGAIN) | ||
200 | #define nfserr_acces cpu_to_be32(NFSERR_ACCES) | ||
201 | #define nfserr_exist cpu_to_be32(NFSERR_EXIST) | ||
202 | #define nfserr_xdev cpu_to_be32(NFSERR_XDEV) | ||
203 | #define nfserr_nodev cpu_to_be32(NFSERR_NODEV) | ||
204 | #define nfserr_notdir cpu_to_be32(NFSERR_NOTDIR) | ||
205 | #define nfserr_isdir cpu_to_be32(NFSERR_ISDIR) | ||
206 | #define nfserr_inval cpu_to_be32(NFSERR_INVAL) | ||
207 | #define nfserr_fbig cpu_to_be32(NFSERR_FBIG) | ||
208 | #define nfserr_nospc cpu_to_be32(NFSERR_NOSPC) | ||
209 | #define nfserr_rofs cpu_to_be32(NFSERR_ROFS) | ||
210 | #define nfserr_mlink cpu_to_be32(NFSERR_MLINK) | ||
211 | #define nfserr_opnotsupp cpu_to_be32(NFSERR_OPNOTSUPP) | ||
212 | #define nfserr_nametoolong cpu_to_be32(NFSERR_NAMETOOLONG) | ||
213 | #define nfserr_notempty cpu_to_be32(NFSERR_NOTEMPTY) | ||
214 | #define nfserr_dquot cpu_to_be32(NFSERR_DQUOT) | ||
215 | #define nfserr_stale cpu_to_be32(NFSERR_STALE) | ||
216 | #define nfserr_remote cpu_to_be32(NFSERR_REMOTE) | ||
217 | #define nfserr_wflush cpu_to_be32(NFSERR_WFLUSH) | ||
218 | #define nfserr_badhandle cpu_to_be32(NFSERR_BADHANDLE) | ||
219 | #define nfserr_notsync cpu_to_be32(NFSERR_NOT_SYNC) | ||
220 | #define nfserr_badcookie cpu_to_be32(NFSERR_BAD_COOKIE) | ||
221 | #define nfserr_notsupp cpu_to_be32(NFSERR_NOTSUPP) | ||
222 | #define nfserr_toosmall cpu_to_be32(NFSERR_TOOSMALL) | ||
223 | #define nfserr_serverfault cpu_to_be32(NFSERR_SERVERFAULT) | ||
224 | #define nfserr_badtype cpu_to_be32(NFSERR_BADTYPE) | ||
225 | #define nfserr_jukebox cpu_to_be32(NFSERR_JUKEBOX) | ||
226 | #define nfserr_denied cpu_to_be32(NFSERR_DENIED) | ||
227 | #define nfserr_deadlock cpu_to_be32(NFSERR_DEADLOCK) | ||
228 | #define nfserr_expired cpu_to_be32(NFSERR_EXPIRED) | ||
229 | #define nfserr_bad_cookie cpu_to_be32(NFSERR_BAD_COOKIE) | ||
230 | #define nfserr_same cpu_to_be32(NFSERR_SAME) | ||
231 | #define nfserr_clid_inuse cpu_to_be32(NFSERR_CLID_INUSE) | ||
232 | #define nfserr_stale_clientid cpu_to_be32(NFSERR_STALE_CLIENTID) | ||
233 | #define nfserr_resource cpu_to_be32(NFSERR_RESOURCE) | ||
234 | #define nfserr_moved cpu_to_be32(NFSERR_MOVED) | ||
235 | #define nfserr_nofilehandle cpu_to_be32(NFSERR_NOFILEHANDLE) | ||
236 | #define nfserr_minor_vers_mismatch cpu_to_be32(NFSERR_MINOR_VERS_MISMATCH) | ||
237 | #define nfserr_share_denied cpu_to_be32(NFSERR_SHARE_DENIED) | ||
238 | #define nfserr_stale_stateid cpu_to_be32(NFSERR_STALE_STATEID) | ||
239 | #define nfserr_old_stateid cpu_to_be32(NFSERR_OLD_STATEID) | ||
240 | #define nfserr_bad_stateid cpu_to_be32(NFSERR_BAD_STATEID) | ||
241 | #define nfserr_bad_seqid cpu_to_be32(NFSERR_BAD_SEQID) | ||
242 | #define nfserr_symlink cpu_to_be32(NFSERR_SYMLINK) | ||
243 | #define nfserr_not_same cpu_to_be32(NFSERR_NOT_SAME) | ||
244 | #define nfserr_restorefh cpu_to_be32(NFSERR_RESTOREFH) | ||
245 | #define nfserr_attrnotsupp cpu_to_be32(NFSERR_ATTRNOTSUPP) | ||
246 | #define nfserr_bad_xdr cpu_to_be32(NFSERR_BAD_XDR) | ||
247 | #define nfserr_openmode cpu_to_be32(NFSERR_OPENMODE) | ||
248 | #define nfserr_locks_held cpu_to_be32(NFSERR_LOCKS_HELD) | ||
249 | #define nfserr_op_illegal cpu_to_be32(NFSERR_OP_ILLEGAL) | ||
250 | #define nfserr_grace cpu_to_be32(NFSERR_GRACE) | ||
251 | #define nfserr_no_grace cpu_to_be32(NFSERR_NO_GRACE) | ||
252 | #define nfserr_reclaim_bad cpu_to_be32(NFSERR_RECLAIM_BAD) | ||
253 | #define nfserr_badname cpu_to_be32(NFSERR_BADNAME) | ||
254 | #define nfserr_cb_path_down cpu_to_be32(NFSERR_CB_PATH_DOWN) | ||
255 | #define nfserr_locked cpu_to_be32(NFSERR_LOCKED) | ||
256 | #define nfserr_wrongsec cpu_to_be32(NFSERR_WRONGSEC) | ||
257 | #define nfserr_badiomode cpu_to_be32(NFS4ERR_BADIOMODE) | ||
258 | #define nfserr_badlayout cpu_to_be32(NFS4ERR_BADLAYOUT) | ||
259 | #define nfserr_bad_session_digest cpu_to_be32(NFS4ERR_BAD_SESSION_DIGEST) | ||
260 | #define nfserr_badsession cpu_to_be32(NFS4ERR_BADSESSION) | ||
261 | #define nfserr_badslot cpu_to_be32(NFS4ERR_BADSLOT) | ||
262 | #define nfserr_complete_already cpu_to_be32(NFS4ERR_COMPLETE_ALREADY) | ||
263 | #define nfserr_conn_not_bound_to_session cpu_to_be32(NFS4ERR_CONN_NOT_BOUND_TO_SESSION) | ||
264 | #define nfserr_deleg_already_wanted cpu_to_be32(NFS4ERR_DELEG_ALREADY_WANTED) | ||
265 | #define nfserr_back_chan_busy cpu_to_be32(NFS4ERR_BACK_CHAN_BUSY) | ||
266 | #define nfserr_layouttrylater cpu_to_be32(NFS4ERR_LAYOUTTRYLATER) | ||
267 | #define nfserr_layoutunavailable cpu_to_be32(NFS4ERR_LAYOUTUNAVAILABLE) | ||
268 | #define nfserr_nomatching_layout cpu_to_be32(NFS4ERR_NOMATCHING_LAYOUT) | ||
269 | #define nfserr_recallconflict cpu_to_be32(NFS4ERR_RECALLCONFLICT) | ||
270 | #define nfserr_unknown_layouttype cpu_to_be32(NFS4ERR_UNKNOWN_LAYOUTTYPE) | ||
271 | #define nfserr_seq_misordered cpu_to_be32(NFS4ERR_SEQ_MISORDERED) | ||
272 | #define nfserr_sequence_pos cpu_to_be32(NFS4ERR_SEQUENCE_POS) | ||
273 | #define nfserr_req_too_big cpu_to_be32(NFS4ERR_REQ_TOO_BIG) | ||
274 | #define nfserr_rep_too_big cpu_to_be32(NFS4ERR_REP_TOO_BIG) | ||
275 | #define nfserr_rep_too_big_to_cache cpu_to_be32(NFS4ERR_REP_TOO_BIG_TO_CACHE) | ||
276 | #define nfserr_retry_uncached_rep cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP) | ||
277 | #define nfserr_unsafe_compound cpu_to_be32(NFS4ERR_UNSAFE_COMPOUND) | ||
278 | #define nfserr_too_many_ops cpu_to_be32(NFS4ERR_TOO_MANY_OPS) | ||
279 | #define nfserr_op_not_in_session cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION) | ||
280 | #define nfserr_hash_alg_unsupp cpu_to_be32(NFS4ERR_HASH_ALG_UNSUPP) | ||
281 | #define nfserr_clientid_busy cpu_to_be32(NFS4ERR_CLIENTID_BUSY) | ||
282 | #define nfserr_pnfs_io_hole cpu_to_be32(NFS4ERR_PNFS_IO_HOLE) | ||
283 | #define nfserr_seq_false_retry cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY) | ||
284 | #define nfserr_bad_high_slot cpu_to_be32(NFS4ERR_BAD_HIGH_SLOT) | ||
285 | #define nfserr_deadsession cpu_to_be32(NFS4ERR_DEADSESSION) | ||
286 | #define nfserr_encr_alg_unsupp cpu_to_be32(NFS4ERR_ENCR_ALG_UNSUPP) | ||
287 | #define nfserr_pnfs_no_layout cpu_to_be32(NFS4ERR_PNFS_NO_LAYOUT) | ||
288 | #define nfserr_not_only_op cpu_to_be32(NFS4ERR_NOT_ONLY_OP) | ||
289 | #define nfserr_wrong_cred cpu_to_be32(NFS4ERR_WRONG_CRED) | ||
290 | #define nfserr_wrong_type cpu_to_be32(NFS4ERR_WRONG_TYPE) | ||
291 | #define nfserr_dirdeleg_unavail cpu_to_be32(NFS4ERR_DIRDELEG_UNAVAIL) | ||
292 | #define nfserr_reject_deleg cpu_to_be32(NFS4ERR_REJECT_DELEG) | ||
293 | #define nfserr_returnconflict cpu_to_be32(NFS4ERR_RETURNCONFLICT) | ||
294 | #define nfserr_deleg_revoked cpu_to_be32(NFS4ERR_DELEG_REVOKED) | ||
295 | |||
296 | /* error codes for internal use */ | ||
297 | /* if a request fails due to kmalloc failure, it gets dropped. | ||
298 | * Client should resend eventually | ||
299 | */ | ||
300 | #define nfserr_dropit cpu_to_be32(30000) | ||
301 | /* end-of-file indicator in readdir */ | ||
302 | #define nfserr_eof cpu_to_be32(30001) | ||
303 | /* replay detected */ | ||
304 | #define nfserr_replay_me cpu_to_be32(11001) | ||
305 | /* nfs41 replay detected */ | ||
306 | #define nfserr_replay_cache cpu_to_be32(11002) | ||
307 | |||
308 | /* Check for dir entries '.' and '..' */ | ||
309 | #define isdotent(n, l) (l < 3 && n[0] == '.' && (l == 1 || n[1] == '.')) | ||
310 | |||
311 | /* | ||
312 | * Time of server startup | ||
313 | */ | ||
314 | extern struct timeval nfssvc_boot; | ||
315 | |||
316 | #ifdef CONFIG_NFSD_V4 | ||
317 | |||
318 | /* before processing a COMPOUND operation, we have to check that there | ||
319 | * is enough space in the buffer for XDR encode to succeed. otherwise, | ||
320 | * we might process an operation with side effects, and be unable to | ||
321 | * tell the client that the operation succeeded. | ||
322 | * | ||
323 | * COMPOUND_SLACK_SPACE - this is the minimum bytes of buffer space | ||
324 | * needed to encode an "ordinary" _successful_ operation. (GETATTR, | ||
325 | * READ, READDIR, and READLINK have their own buffer checks.) if we | ||
326 | * fall below this level, we fail the next operation with NFS4ERR_RESOURCE. | ||
327 | * | ||
328 | * COMPOUND_ERR_SLACK_SPACE - this is the minimum bytes of buffer space | ||
329 | * needed to encode an operation which has failed with NFS4ERR_RESOURCE. | ||
330 | * care is taken to ensure that we never fall below this level for any | ||
331 | * reason. | ||
332 | */ | ||
333 | #define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */ | ||
334 | #define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */ | ||
335 | |||
336 | #define NFSD_LEASE_TIME (nfs4_lease_time()) | ||
337 | #define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */ | ||
338 | |||
339 | /* | ||
340 | * The following attributes are currently not supported by the NFSv4 server: | ||
341 | * ARCHIVE (deprecated anyway) | ||
342 | * HIDDEN (unlikely to be supported any time soon) | ||
343 | * MIMETYPE (unlikely to be supported any time soon) | ||
344 | * QUOTA_* (will be supported in a forthcoming patch) | ||
345 | * SYSTEM (unlikely to be supported any time soon) | ||
346 | * TIME_BACKUP (unlikely to be supported any time soon) | ||
347 | * TIME_CREATE (unlikely to be supported any time soon) | ||
348 | */ | ||
349 | #define NFSD4_SUPPORTED_ATTRS_WORD0 \ | ||
350 | (FATTR4_WORD0_SUPPORTED_ATTRS | FATTR4_WORD0_TYPE | FATTR4_WORD0_FH_EXPIRE_TYPE \ | ||
351 | | FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE | FATTR4_WORD0_LINK_SUPPORT \ | ||
352 | | FATTR4_WORD0_SYMLINK_SUPPORT | FATTR4_WORD0_NAMED_ATTR | FATTR4_WORD0_FSID \ | ||
353 | | FATTR4_WORD0_UNIQUE_HANDLES | FATTR4_WORD0_LEASE_TIME | FATTR4_WORD0_RDATTR_ERROR \ | ||
354 | | FATTR4_WORD0_ACLSUPPORT | FATTR4_WORD0_CANSETTIME | FATTR4_WORD0_CASE_INSENSITIVE \ | ||
355 | | FATTR4_WORD0_CASE_PRESERVING | FATTR4_WORD0_CHOWN_RESTRICTED \ | ||
356 | | FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FILEID | FATTR4_WORD0_FILES_AVAIL \ | ||
357 | | FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_HOMOGENEOUS \ | ||
358 | | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \ | ||
359 | | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL) | ||
360 | |||
361 | #define NFSD4_SUPPORTED_ATTRS_WORD1 \ | ||
362 | (FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \ | ||
363 | | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP | FATTR4_WORD1_RAWDEV \ | ||
364 | | FATTR4_WORD1_SPACE_AVAIL | FATTR4_WORD1_SPACE_FREE | FATTR4_WORD1_SPACE_TOTAL \ | ||
365 | | FATTR4_WORD1_SPACE_USED | FATTR4_WORD1_TIME_ACCESS | FATTR4_WORD1_TIME_ACCESS_SET \ | ||
366 | | FATTR4_WORD1_TIME_DELTA | FATTR4_WORD1_TIME_METADATA \ | ||
367 | | FATTR4_WORD1_TIME_MODIFY | FATTR4_WORD1_TIME_MODIFY_SET | FATTR4_WORD1_MOUNTED_ON_FILEID) | ||
368 | |||
369 | #define NFSD4_SUPPORTED_ATTRS_WORD2 0 | ||
370 | |||
371 | #define NFSD4_1_SUPPORTED_ATTRS_WORD0 \ | ||
372 | NFSD4_SUPPORTED_ATTRS_WORD0 | ||
373 | |||
374 | #define NFSD4_1_SUPPORTED_ATTRS_WORD1 \ | ||
375 | NFSD4_SUPPORTED_ATTRS_WORD1 | ||
376 | |||
377 | #define NFSD4_1_SUPPORTED_ATTRS_WORD2 \ | ||
378 | (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) | ||
379 | |||
380 | static inline u32 nfsd_suppattrs0(u32 minorversion) | ||
381 | { | ||
382 | return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 | ||
383 | : NFSD4_SUPPORTED_ATTRS_WORD0; | ||
384 | } | ||
385 | |||
386 | static inline u32 nfsd_suppattrs1(u32 minorversion) | ||
387 | { | ||
388 | return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD1 | ||
389 | : NFSD4_SUPPORTED_ATTRS_WORD1; | ||
390 | } | ||
391 | |||
392 | static inline u32 nfsd_suppattrs2(u32 minorversion) | ||
393 | { | ||
394 | return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD2 | ||
395 | : NFSD4_SUPPORTED_ATTRS_WORD2; | ||
396 | } | ||
397 | |||
398 | /* These will return ERR_INVAL if specified in GETATTR or READDIR. */ | ||
399 | #define NFSD_WRITEONLY_ATTRS_WORD1 \ | ||
400 | (FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) | ||
401 | |||
402 | /* These are the only attrs allowed in CREATE/OPEN/SETATTR. */ | ||
403 | #define NFSD_WRITEABLE_ATTRS_WORD0 \ | ||
404 | (FATTR4_WORD0_SIZE | FATTR4_WORD0_ACL ) | ||
405 | #define NFSD_WRITEABLE_ATTRS_WORD1 \ | ||
406 | (FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \ | ||
407 | | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) | ||
408 | #define NFSD_WRITEABLE_ATTRS_WORD2 0 | ||
409 | |||
410 | #define NFSD_SUPPATTR_EXCLCREAT_WORD0 \ | ||
411 | NFSD_WRITEABLE_ATTRS_WORD0 | ||
412 | /* | ||
413 | * we currently store the exclusive create verifier in the v_{a,m}time | ||
414 | * attributes so the client can't set these at create time using EXCLUSIVE4_1 | ||
415 | */ | ||
416 | #define NFSD_SUPPATTR_EXCLCREAT_WORD1 \ | ||
417 | (NFSD_WRITEABLE_ATTRS_WORD1 & \ | ||
418 | ~(FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)) | ||
419 | #define NFSD_SUPPATTR_EXCLCREAT_WORD2 \ | ||
420 | NFSD_WRITEABLE_ATTRS_WORD2 | ||
421 | |||
422 | #endif /* CONFIG_NFSD_V4 */ | ||
423 | |||
424 | #endif /* LINUX_NFSD_NFSD_H */ | ||
diff --git a/include/linux/nfsd/nfsfh.h b/include/linux/nfsd/nfsfh.h index 8f641c908450..65e333afaee4 100644 --- a/include/linux/nfsd/nfsfh.h +++ b/include/linux/nfsd/nfsfh.h | |||
@@ -16,11 +16,9 @@ | |||
16 | 16 | ||
17 | # include <linux/types.h> | 17 | # include <linux/types.h> |
18 | #ifdef __KERNEL__ | 18 | #ifdef __KERNEL__ |
19 | # include <linux/string.h> | 19 | # include <linux/sunrpc/svc.h> |
20 | # include <linux/fs.h> | ||
21 | #endif | 20 | #endif |
22 | #include <linux/nfsd/const.h> | 21 | #include <linux/nfsd/const.h> |
23 | #include <linux/nfsd/debug.h> | ||
24 | 22 | ||
25 | /* | 23 | /* |
26 | * This is the old "dentry style" Linux NFSv2 file handle. | 24 | * This is the old "dentry style" Linux NFSv2 file handle. |
@@ -164,208 +162,6 @@ typedef struct svc_fh { | |||
164 | 162 | ||
165 | } svc_fh; | 163 | } svc_fh; |
166 | 164 | ||
167 | enum nfsd_fsid { | ||
168 | FSID_DEV = 0, | ||
169 | FSID_NUM, | ||
170 | FSID_MAJOR_MINOR, | ||
171 | FSID_ENCODE_DEV, | ||
172 | FSID_UUID4_INUM, | ||
173 | FSID_UUID8, | ||
174 | FSID_UUID16, | ||
175 | FSID_UUID16_INUM, | ||
176 | }; | ||
177 | |||
178 | enum fsid_source { | ||
179 | FSIDSOURCE_DEV, | ||
180 | FSIDSOURCE_FSID, | ||
181 | FSIDSOURCE_UUID, | ||
182 | }; | ||
183 | extern enum fsid_source fsid_source(struct svc_fh *fhp); | ||
184 | |||
185 | |||
186 | /* This might look a little large to "inline" but in all calls except | ||
187 | * one, 'vers' is constant so moste of the function disappears. | ||
188 | */ | ||
189 | static inline void mk_fsid(int vers, u32 *fsidv, dev_t dev, ino_t ino, | ||
190 | u32 fsid, unsigned char *uuid) | ||
191 | { | ||
192 | u32 *up; | ||
193 | switch(vers) { | ||
194 | case FSID_DEV: | ||
195 | fsidv[0] = htonl((MAJOR(dev)<<16) | | ||
196 | MINOR(dev)); | ||
197 | fsidv[1] = ino_t_to_u32(ino); | ||
198 | break; | ||
199 | case FSID_NUM: | ||
200 | fsidv[0] = fsid; | ||
201 | break; | ||
202 | case FSID_MAJOR_MINOR: | ||
203 | fsidv[0] = htonl(MAJOR(dev)); | ||
204 | fsidv[1] = htonl(MINOR(dev)); | ||
205 | fsidv[2] = ino_t_to_u32(ino); | ||
206 | break; | ||
207 | |||
208 | case FSID_ENCODE_DEV: | ||
209 | fsidv[0] = new_encode_dev(dev); | ||
210 | fsidv[1] = ino_t_to_u32(ino); | ||
211 | break; | ||
212 | |||
213 | case FSID_UUID4_INUM: | ||
214 | /* 4 byte fsid and inode number */ | ||
215 | up = (u32*)uuid; | ||
216 | fsidv[0] = ino_t_to_u32(ino); | ||
217 | fsidv[1] = up[0] ^ up[1] ^ up[2] ^ up[3]; | ||
218 | break; | ||
219 | |||
220 | case FSID_UUID8: | ||
221 | /* 8 byte fsid */ | ||
222 | up = (u32*)uuid; | ||
223 | fsidv[0] = up[0] ^ up[2]; | ||
224 | fsidv[1] = up[1] ^ up[3]; | ||
225 | break; | ||
226 | |||
227 | case FSID_UUID16: | ||
228 | /* 16 byte fsid - NFSv3+ only */ | ||
229 | memcpy(fsidv, uuid, 16); | ||
230 | break; | ||
231 | |||
232 | case FSID_UUID16_INUM: | ||
233 | /* 8 byte inode and 16 byte fsid */ | ||
234 | *(u64*)fsidv = (u64)ino; | ||
235 | memcpy(fsidv+2, uuid, 16); | ||
236 | break; | ||
237 | default: BUG(); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | static inline int key_len(int type) | ||
242 | { | ||
243 | switch(type) { | ||
244 | case FSID_DEV: return 8; | ||
245 | case FSID_NUM: return 4; | ||
246 | case FSID_MAJOR_MINOR: return 12; | ||
247 | case FSID_ENCODE_DEV: return 8; | ||
248 | case FSID_UUID4_INUM: return 8; | ||
249 | case FSID_UUID8: return 8; | ||
250 | case FSID_UUID16: return 16; | ||
251 | case FSID_UUID16_INUM: return 24; | ||
252 | default: return 0; | ||
253 | } | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Shorthand for dprintk()'s | ||
258 | */ | ||
259 | extern char * SVCFH_fmt(struct svc_fh *fhp); | ||
260 | |||
261 | /* | ||
262 | * Function prototypes | ||
263 | */ | ||
264 | __be32 fh_verify(struct svc_rqst *, struct svc_fh *, int, int); | ||
265 | __be32 fh_compose(struct svc_fh *, struct svc_export *, struct dentry *, struct svc_fh *); | ||
266 | __be32 fh_update(struct svc_fh *); | ||
267 | void fh_put(struct svc_fh *); | ||
268 | |||
269 | static __inline__ struct svc_fh * | ||
270 | fh_copy(struct svc_fh *dst, struct svc_fh *src) | ||
271 | { | ||
272 | WARN_ON(src->fh_dentry || src->fh_locked); | ||
273 | |||
274 | *dst = *src; | ||
275 | return dst; | ||
276 | } | ||
277 | |||
278 | static inline void | ||
279 | fh_copy_shallow(struct knfsd_fh *dst, struct knfsd_fh *src) | ||
280 | { | ||
281 | dst->fh_size = src->fh_size; | ||
282 | memcpy(&dst->fh_base, &src->fh_base, src->fh_size); | ||
283 | } | ||
284 | |||
285 | static __inline__ struct svc_fh * | ||
286 | fh_init(struct svc_fh *fhp, int maxsize) | ||
287 | { | ||
288 | memset(fhp, 0, sizeof(*fhp)); | ||
289 | fhp->fh_maxsize = maxsize; | ||
290 | return fhp; | ||
291 | } | ||
292 | |||
293 | #ifdef CONFIG_NFSD_V3 | ||
294 | /* | ||
295 | * Fill in the pre_op attr for the wcc data | ||
296 | */ | ||
297 | static inline void | ||
298 | fill_pre_wcc(struct svc_fh *fhp) | ||
299 | { | ||
300 | struct inode *inode; | ||
301 | |||
302 | inode = fhp->fh_dentry->d_inode; | ||
303 | if (!fhp->fh_pre_saved) { | ||
304 | fhp->fh_pre_mtime = inode->i_mtime; | ||
305 | fhp->fh_pre_ctime = inode->i_ctime; | ||
306 | fhp->fh_pre_size = inode->i_size; | ||
307 | fhp->fh_pre_change = inode->i_version; | ||
308 | fhp->fh_pre_saved = 1; | ||
309 | } | ||
310 | } | ||
311 | |||
312 | extern void fill_post_wcc(struct svc_fh *); | ||
313 | #else | ||
314 | #define fill_pre_wcc(ignored) | ||
315 | #define fill_post_wcc(notused) | ||
316 | #endif /* CONFIG_NFSD_V3 */ | ||
317 | |||
318 | |||
319 | /* | ||
320 | * Lock a file handle/inode | ||
321 | * NOTE: both fh_lock and fh_unlock are done "by hand" in | ||
322 | * vfs.c:nfsd_rename as it needs to grab 2 i_mutex's at once | ||
323 | * so, any changes here should be reflected there. | ||
324 | */ | ||
325 | |||
326 | static inline void | ||
327 | fh_lock_nested(struct svc_fh *fhp, unsigned int subclass) | ||
328 | { | ||
329 | struct dentry *dentry = fhp->fh_dentry; | ||
330 | struct inode *inode; | ||
331 | |||
332 | dfprintk(FILEOP, "nfsd: fh_lock(%s) locked = %d\n", | ||
333 | SVCFH_fmt(fhp), fhp->fh_locked); | ||
334 | |||
335 | BUG_ON(!dentry); | ||
336 | |||
337 | if (fhp->fh_locked) { | ||
338 | printk(KERN_WARNING "fh_lock: %s/%s already locked!\n", | ||
339 | dentry->d_parent->d_name.name, dentry->d_name.name); | ||
340 | return; | ||
341 | } | ||
342 | |||
343 | inode = dentry->d_inode; | ||
344 | mutex_lock_nested(&inode->i_mutex, subclass); | ||
345 | fill_pre_wcc(fhp); | ||
346 | fhp->fh_locked = 1; | ||
347 | } | ||
348 | |||
349 | static inline void | ||
350 | fh_lock(struct svc_fh *fhp) | ||
351 | { | ||
352 | fh_lock_nested(fhp, I_MUTEX_NORMAL); | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Unlock a file handle/inode | ||
357 | */ | ||
358 | static inline void | ||
359 | fh_unlock(struct svc_fh *fhp) | ||
360 | { | ||
361 | BUG_ON(!fhp->fh_dentry); | ||
362 | |||
363 | if (fhp->fh_locked) { | ||
364 | fill_post_wcc(fhp); | ||
365 | mutex_unlock(&fhp->fh_dentry->d_inode->i_mutex); | ||
366 | fhp->fh_locked = 0; | ||
367 | } | ||
368 | } | ||
369 | #endif /* __KERNEL__ */ | 165 | #endif /* __KERNEL__ */ |
370 | 166 | ||
371 | 167 | ||
diff --git a/include/linux/nfsd/state.h b/include/linux/nfsd/state.h deleted file mode 100644 index b38d11324189..000000000000 --- a/include/linux/nfsd/state.h +++ /dev/null | |||
@@ -1,404 +0,0 @@ | |||
1 | /* | ||
2 | * linux/include/nfsd/state.h | ||
3 | * | ||
4 | * Copyright (c) 2001 The Regents of the University of Michigan. | ||
5 | * All rights reserved. | ||
6 | * | ||
7 | * Kendrick Smith <kmsmith@umich.edu> | ||
8 | * Andy Adamson <andros@umich.edu> | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or without | ||
11 | * modification, are permitted provided that the following conditions | ||
12 | * are met: | ||
13 | * | ||
14 | * 1. Redistributions of source code must retain the above copyright | ||
15 | * notice, this list of conditions and the following disclaimer. | ||
16 | * 2. Redistributions in binary form must reproduce the above copyright | ||
17 | * notice, this list of conditions and the following disclaimer in the | ||
18 | * documentation and/or other materials provided with the distribution. | ||
19 | * 3. Neither the name of the University nor the names of its | ||
20 | * contributors may be used to endorse or promote products derived | ||
21 | * from this software without specific prior written permission. | ||
22 | * | ||
23 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
24 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
25 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
26 | * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | ||
27 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
28 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
29 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
30 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
31 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | ||
32 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
33 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
34 | * | ||
35 | */ | ||
36 | |||
37 | #ifndef _NFSD4_STATE_H | ||
38 | #define _NFSD4_STATE_H | ||
39 | |||
40 | #include <linux/list.h> | ||
41 | #include <linux/kref.h> | ||
42 | #include <linux/sunrpc/clnt.h> | ||
43 | |||
44 | typedef struct { | ||
45 | u32 cl_boot; | ||
46 | u32 cl_id; | ||
47 | } clientid_t; | ||
48 | |||
49 | typedef struct { | ||
50 | u32 so_boot; | ||
51 | u32 so_stateownerid; | ||
52 | u32 so_fileid; | ||
53 | } stateid_opaque_t; | ||
54 | |||
55 | typedef struct { | ||
56 | u32 si_generation; | ||
57 | stateid_opaque_t si_opaque; | ||
58 | } stateid_t; | ||
59 | #define si_boot si_opaque.so_boot | ||
60 | #define si_stateownerid si_opaque.so_stateownerid | ||
61 | #define si_fileid si_opaque.so_fileid | ||
62 | |||
63 | struct nfsd4_cb_sequence { | ||
64 | /* args/res */ | ||
65 | u32 cbs_minorversion; | ||
66 | struct nfs4_client *cbs_clp; | ||
67 | }; | ||
68 | |||
69 | struct nfs4_delegation { | ||
70 | struct list_head dl_perfile; | ||
71 | struct list_head dl_perclnt; | ||
72 | struct list_head dl_recall_lru; /* delegation recalled */ | ||
73 | atomic_t dl_count; /* ref count */ | ||
74 | struct nfs4_client *dl_client; | ||
75 | struct nfs4_file *dl_file; | ||
76 | struct file_lock *dl_flock; | ||
77 | struct file *dl_vfs_file; | ||
78 | u32 dl_type; | ||
79 | time_t dl_time; | ||
80 | /* For recall: */ | ||
81 | u32 dl_ident; | ||
82 | stateid_t dl_stateid; | ||
83 | struct knfsd_fh dl_fh; | ||
84 | int dl_retries; | ||
85 | }; | ||
86 | |||
87 | /* client delegation callback info */ | ||
88 | struct nfs4_cb_conn { | ||
89 | /* SETCLIENTID info */ | ||
90 | struct sockaddr_storage cb_addr; | ||
91 | size_t cb_addrlen; | ||
92 | u32 cb_prog; | ||
93 | u32 cb_minorversion; | ||
94 | u32 cb_ident; /* minorversion 0 only */ | ||
95 | /* RPC client info */ | ||
96 | atomic_t cb_set; /* successful CB_NULL call */ | ||
97 | struct rpc_clnt * cb_client; | ||
98 | }; | ||
99 | |||
100 | /* Maximum number of slots per session. 160 is useful for long haul TCP */ | ||
101 | #define NFSD_MAX_SLOTS_PER_SESSION 160 | ||
102 | /* Maximum number of operations per session compound */ | ||
103 | #define NFSD_MAX_OPS_PER_COMPOUND 16 | ||
104 | /* Maximum session per slot cache size */ | ||
105 | #define NFSD_SLOT_CACHE_SIZE 1024 | ||
106 | /* Maximum number of NFSD_SLOT_CACHE_SIZE slots per session */ | ||
107 | #define NFSD_CACHE_SIZE_SLOTS_PER_SESSION 32 | ||
108 | #define NFSD_MAX_MEM_PER_SESSION \ | ||
109 | (NFSD_CACHE_SIZE_SLOTS_PER_SESSION * NFSD_SLOT_CACHE_SIZE) | ||
110 | |||
111 | struct nfsd4_slot { | ||
112 | bool sl_inuse; | ||
113 | bool sl_cachethis; | ||
114 | u16 sl_opcnt; | ||
115 | u32 sl_seqid; | ||
116 | __be32 sl_status; | ||
117 | u32 sl_datalen; | ||
118 | char sl_data[]; | ||
119 | }; | ||
120 | |||
121 | struct nfsd4_channel_attrs { | ||
122 | u32 headerpadsz; | ||
123 | u32 maxreq_sz; | ||
124 | u32 maxresp_sz; | ||
125 | u32 maxresp_cached; | ||
126 | u32 maxops; | ||
127 | u32 maxreqs; | ||
128 | u32 nr_rdma_attrs; | ||
129 | u32 rdma_attrs; | ||
130 | }; | ||
131 | |||
132 | struct nfsd4_create_session { | ||
133 | clientid_t clientid; | ||
134 | struct nfs4_sessionid sessionid; | ||
135 | u32 seqid; | ||
136 | u32 flags; | ||
137 | struct nfsd4_channel_attrs fore_channel; | ||
138 | struct nfsd4_channel_attrs back_channel; | ||
139 | u32 callback_prog; | ||
140 | u32 uid; | ||
141 | u32 gid; | ||
142 | }; | ||
143 | |||
144 | /* The single slot clientid cache structure */ | ||
145 | struct nfsd4_clid_slot { | ||
146 | u32 sl_seqid; | ||
147 | __be32 sl_status; | ||
148 | struct nfsd4_create_session sl_cr_ses; | ||
149 | }; | ||
150 | |||
151 | struct nfsd4_session { | ||
152 | struct kref se_ref; | ||
153 | struct list_head se_hash; /* hash by sessionid */ | ||
154 | struct list_head se_perclnt; | ||
155 | u32 se_flags; | ||
156 | struct nfs4_client *se_client; /* for expire_client */ | ||
157 | struct nfs4_sessionid se_sessionid; | ||
158 | struct nfsd4_channel_attrs se_fchannel; | ||
159 | struct nfsd4_channel_attrs se_bchannel; | ||
160 | struct nfsd4_slot *se_slots[]; /* forward channel slots */ | ||
161 | }; | ||
162 | |||
163 | static inline void | ||
164 | nfsd4_put_session(struct nfsd4_session *ses) | ||
165 | { | ||
166 | extern void free_session(struct kref *kref); | ||
167 | kref_put(&ses->se_ref, free_session); | ||
168 | } | ||
169 | |||
170 | static inline void | ||
171 | nfsd4_get_session(struct nfsd4_session *ses) | ||
172 | { | ||
173 | kref_get(&ses->se_ref); | ||
174 | } | ||
175 | |||
176 | /* formatted contents of nfs4_sessionid */ | ||
177 | struct nfsd4_sessionid { | ||
178 | clientid_t clientid; | ||
179 | u32 sequence; | ||
180 | u32 reserved; | ||
181 | }; | ||
182 | |||
183 | #define HEXDIR_LEN 33 /* hex version of 16 byte md5 of cl_name plus '\0' */ | ||
184 | |||
185 | /* | ||
186 | * struct nfs4_client - one per client. Clientids live here. | ||
187 | * o Each nfs4_client is hashed by clientid. | ||
188 | * | ||
189 | * o Each nfs4_clients is also hashed by name | ||
190 | * (the opaque quantity initially sent by the client to identify itself). | ||
191 | * | ||
192 | * o cl_perclient list is used to ensure no dangling stateowner references | ||
193 | * when we expire the nfs4_client | ||
194 | */ | ||
195 | struct nfs4_client { | ||
196 | struct list_head cl_idhash; /* hash by cl_clientid.id */ | ||
197 | struct list_head cl_strhash; /* hash by cl_name */ | ||
198 | struct list_head cl_openowners; | ||
199 | struct list_head cl_delegations; | ||
200 | struct list_head cl_lru; /* tail queue */ | ||
201 | struct xdr_netobj cl_name; /* id generated by client */ | ||
202 | char cl_recdir[HEXDIR_LEN]; /* recovery dir */ | ||
203 | nfs4_verifier cl_verifier; /* generated by client */ | ||
204 | time_t cl_time; /* time of last lease renewal */ | ||
205 | struct sockaddr_storage cl_addr; /* client ipaddress */ | ||
206 | u32 cl_flavor; /* setclientid pseudoflavor */ | ||
207 | char *cl_principal; /* setclientid principal name */ | ||
208 | struct svc_cred cl_cred; /* setclientid principal */ | ||
209 | clientid_t cl_clientid; /* generated by server */ | ||
210 | nfs4_verifier cl_confirm; /* generated by server */ | ||
211 | struct nfs4_cb_conn cl_cb_conn; /* callback info */ | ||
212 | atomic_t cl_count; /* ref count */ | ||
213 | u32 cl_firststate; /* recovery dir creation */ | ||
214 | |||
215 | /* for nfs41 */ | ||
216 | struct list_head cl_sessions; | ||
217 | struct nfsd4_clid_slot cl_cs_slot; /* create_session slot */ | ||
218 | u32 cl_exchange_flags; | ||
219 | struct nfs4_sessionid cl_sessionid; | ||
220 | |||
221 | /* for nfs41 callbacks */ | ||
222 | /* We currently support a single back channel with a single slot */ | ||
223 | unsigned long cl_cb_slot_busy; | ||
224 | u32 cl_cb_seq_nr; | ||
225 | struct svc_xprt *cl_cb_xprt; /* 4.1 callback transport */ | ||
226 | struct rpc_wait_queue cl_cb_waitq; /* backchannel callers may */ | ||
227 | /* wait here for slots */ | ||
228 | }; | ||
229 | |||
230 | /* struct nfs4_client_reset | ||
231 | * one per old client. Populates reset_str_hashtbl. Filled from conf_id_hashtbl | ||
232 | * upon lease reset, or from upcall to state_daemon (to read in state | ||
233 | * from non-volitile storage) upon reboot. | ||
234 | */ | ||
235 | struct nfs4_client_reclaim { | ||
236 | struct list_head cr_strhash; /* hash by cr_name */ | ||
237 | char cr_recdir[HEXDIR_LEN]; /* recover dir */ | ||
238 | }; | ||
239 | |||
240 | static inline void | ||
241 | update_stateid(stateid_t *stateid) | ||
242 | { | ||
243 | stateid->si_generation++; | ||
244 | } | ||
245 | |||
246 | /* A reasonable value for REPLAY_ISIZE was estimated as follows: | ||
247 | * The OPEN response, typically the largest, requires | ||
248 | * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) + | ||
249 | * 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) + | ||
250 | * 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes | ||
251 | */ | ||
252 | |||
253 | #define NFSD4_REPLAY_ISIZE 112 | ||
254 | |||
255 | /* | ||
256 | * Replay buffer, where the result of the last seqid-mutating operation | ||
257 | * is cached. | ||
258 | */ | ||
259 | struct nfs4_replay { | ||
260 | __be32 rp_status; | ||
261 | unsigned int rp_buflen; | ||
262 | char *rp_buf; | ||
263 | unsigned intrp_allocated; | ||
264 | struct knfsd_fh rp_openfh; | ||
265 | char rp_ibuf[NFSD4_REPLAY_ISIZE]; | ||
266 | }; | ||
267 | |||
268 | /* | ||
269 | * nfs4_stateowner can either be an open_owner, or a lock_owner | ||
270 | * | ||
271 | * so_idhash: stateid_hashtbl[] for open owner, lockstateid_hashtbl[] | ||
272 | * for lock_owner | ||
273 | * so_strhash: ownerstr_hashtbl[] for open_owner, lock_ownerstr_hashtbl[] | ||
274 | * for lock_owner | ||
275 | * so_perclient: nfs4_client->cl_perclient entry - used when nfs4_client | ||
276 | * struct is reaped. | ||
277 | * so_perfilestate: heads the list of nfs4_stateid (either open or lock) | ||
278 | * and is used to ensure no dangling nfs4_stateid references when we | ||
279 | * release a stateowner. | ||
280 | * so_perlockowner: (open) nfs4_stateid->st_perlockowner entry - used when | ||
281 | * close is called to reap associated byte-range locks | ||
282 | * so_close_lru: (open) stateowner is placed on this list instead of being | ||
283 | * reaped (when so_perfilestate is empty) to hold the last close replay. | ||
284 | * reaped by laundramat thread after lease period. | ||
285 | */ | ||
286 | struct nfs4_stateowner { | ||
287 | struct kref so_ref; | ||
288 | struct list_head so_idhash; /* hash by so_id */ | ||
289 | struct list_head so_strhash; /* hash by op_name */ | ||
290 | struct list_head so_perclient; | ||
291 | struct list_head so_stateids; | ||
292 | struct list_head so_perstateid; /* for lockowners only */ | ||
293 | struct list_head so_close_lru; /* tail queue */ | ||
294 | time_t so_time; /* time of placement on so_close_lru */ | ||
295 | int so_is_open_owner; /* 1=openowner,0=lockowner */ | ||
296 | u32 so_id; | ||
297 | struct nfs4_client * so_client; | ||
298 | /* after increment in ENCODE_SEQID_OP_TAIL, represents the next | ||
299 | * sequence id expected from the client: */ | ||
300 | u32 so_seqid; | ||
301 | struct xdr_netobj so_owner; /* open owner name */ | ||
302 | int so_confirmed; /* successful OPEN_CONFIRM? */ | ||
303 | struct nfs4_replay so_replay; | ||
304 | }; | ||
305 | |||
306 | /* | ||
307 | * nfs4_file: a file opened by some number of (open) nfs4_stateowners. | ||
308 | * o fi_perfile list is used to search for conflicting | ||
309 | * share_acces, share_deny on the file. | ||
310 | */ | ||
311 | struct nfs4_file { | ||
312 | atomic_t fi_ref; | ||
313 | struct list_head fi_hash; /* hash by "struct inode *" */ | ||
314 | struct list_head fi_stateids; | ||
315 | struct list_head fi_delegations; | ||
316 | struct inode *fi_inode; | ||
317 | u32 fi_id; /* used with stateowner->so_id | ||
318 | * for stateid_hashtbl hash */ | ||
319 | bool fi_had_conflict; | ||
320 | }; | ||
321 | |||
322 | /* | ||
323 | * nfs4_stateid can either be an open stateid or (eventually) a lock stateid | ||
324 | * | ||
325 | * (open)nfs4_stateid: one per (open)nfs4_stateowner, nfs4_file | ||
326 | * | ||
327 | * st_hash: stateid_hashtbl[] entry or lockstateid_hashtbl entry | ||
328 | * st_perfile: file_hashtbl[] entry. | ||
329 | * st_perfile_state: nfs4_stateowner->so_perfilestate | ||
330 | * st_perlockowner: (open stateid) list of lock nfs4_stateowners | ||
331 | * st_access_bmap: used only for open stateid | ||
332 | * st_deny_bmap: used only for open stateid | ||
333 | * st_openstp: open stateid lock stateid was derived from | ||
334 | * | ||
335 | * XXX: open stateids and lock stateids have diverged sufficiently that | ||
336 | * we should consider defining separate structs for the two cases. | ||
337 | */ | ||
338 | |||
339 | struct nfs4_stateid { | ||
340 | struct list_head st_hash; | ||
341 | struct list_head st_perfile; | ||
342 | struct list_head st_perstateowner; | ||
343 | struct list_head st_lockowners; | ||
344 | struct nfs4_stateowner * st_stateowner; | ||
345 | struct nfs4_file * st_file; | ||
346 | stateid_t st_stateid; | ||
347 | struct file * st_vfs_file; | ||
348 | unsigned long st_access_bmap; | ||
349 | unsigned long st_deny_bmap; | ||
350 | struct nfs4_stateid * st_openstp; | ||
351 | }; | ||
352 | |||
353 | /* flags for preprocess_seqid_op() */ | ||
354 | #define HAS_SESSION 0x00000001 | ||
355 | #define CONFIRM 0x00000002 | ||
356 | #define OPEN_STATE 0x00000004 | ||
357 | #define LOCK_STATE 0x00000008 | ||
358 | #define RD_STATE 0x00000010 | ||
359 | #define WR_STATE 0x00000020 | ||
360 | #define CLOSE_STATE 0x00000040 | ||
361 | |||
362 | #define seqid_mutating_err(err) \ | ||
363 | (((err) != nfserr_stale_clientid) && \ | ||
364 | ((err) != nfserr_bad_seqid) && \ | ||
365 | ((err) != nfserr_stale_stateid) && \ | ||
366 | ((err) != nfserr_bad_stateid)) | ||
367 | |||
368 | struct nfsd4_compound_state; | ||
369 | |||
370 | extern __be32 nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate, | ||
371 | stateid_t *stateid, int flags, struct file **filp); | ||
372 | extern void nfs4_lock_state(void); | ||
373 | extern void nfs4_unlock_state(void); | ||
374 | extern int nfs4_in_grace(void); | ||
375 | extern __be32 nfs4_check_open_reclaim(clientid_t *clid); | ||
376 | extern void put_nfs4_client(struct nfs4_client *clp); | ||
377 | extern void nfs4_free_stateowner(struct kref *kref); | ||
378 | extern int set_callback_cred(void); | ||
379 | extern void nfsd4_probe_callback(struct nfs4_client *clp); | ||
380 | extern void nfsd4_cb_recall(struct nfs4_delegation *dp); | ||
381 | extern void nfs4_put_delegation(struct nfs4_delegation *dp); | ||
382 | extern __be32 nfs4_make_rec_clidname(char *clidname, struct xdr_netobj *clname); | ||
383 | extern void nfsd4_init_recdir(char *recdir_name); | ||
384 | extern int nfsd4_recdir_load(void); | ||
385 | extern void nfsd4_shutdown_recdir(void); | ||
386 | extern int nfs4_client_to_reclaim(const char *name); | ||
387 | extern int nfs4_has_reclaimed_state(const char *name, bool use_exchange_id); | ||
388 | extern void nfsd4_recdir_purge_old(void); | ||
389 | extern int nfsd4_create_clid_dir(struct nfs4_client *clp); | ||
390 | extern void nfsd4_remove_clid_dir(struct nfs4_client *clp); | ||
391 | |||
392 | static inline void | ||
393 | nfs4_put_stateowner(struct nfs4_stateowner *so) | ||
394 | { | ||
395 | kref_put(&so->so_ref, nfs4_free_stateowner); | ||
396 | } | ||
397 | |||
398 | static inline void | ||
399 | nfs4_get_stateowner(struct nfs4_stateowner *so) | ||
400 | { | ||
401 | kref_get(&so->so_ref); | ||
402 | } | ||
403 | |||
404 | #endif /* NFSD4_STATE_H */ | ||
diff --git a/include/linux/nfsd/syscall.h b/include/linux/nfsd/syscall.h index 7a3b565b898f..812bc1e160dc 100644 --- a/include/linux/nfsd/syscall.h +++ b/include/linux/nfsd/syscall.h | |||
@@ -9,14 +9,8 @@ | |||
9 | #ifndef NFSD_SYSCALL_H | 9 | #ifndef NFSD_SYSCALL_H |
10 | #define NFSD_SYSCALL_H | 10 | #define NFSD_SYSCALL_H |
11 | 11 | ||
12 | # include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #ifdef __KERNEL__ | ||
14 | # include <linux/in.h> | ||
15 | #endif | ||
16 | #include <linux/posix_types.h> | ||
17 | #include <linux/nfsd/const.h> | ||
18 | #include <linux/nfsd/export.h> | 13 | #include <linux/nfsd/export.h> |
19 | #include <linux/nfsd/nfsfh.h> | ||
20 | 14 | ||
21 | /* | 15 | /* |
22 | * Version of the syscall interface | 16 | * Version of the syscall interface |
diff --git a/include/linux/nfsd/xdr.h b/include/linux/nfsd/xdr.h deleted file mode 100644 index a0132ef58f21..000000000000 --- a/include/linux/nfsd/xdr.h +++ /dev/null | |||
@@ -1,177 +0,0 @@ | |||
1 | /* | ||
2 | * linux/include/linux/nfsd/xdr.h | ||
3 | * | ||
4 | * XDR types for nfsd. This is mainly a typing exercise. | ||
5 | */ | ||
6 | |||
7 | #ifndef LINUX_NFSD_H | ||
8 | #define LINUX_NFSD_H | ||
9 | |||
10 | #include <linux/fs.h> | ||
11 | #include <linux/vfs.h> | ||
12 | #include <linux/nfs.h> | ||
13 | |||
14 | struct nfsd_fhandle { | ||
15 | struct svc_fh fh; | ||
16 | }; | ||
17 | |||
18 | struct nfsd_sattrargs { | ||
19 | struct svc_fh fh; | ||
20 | struct iattr attrs; | ||
21 | }; | ||
22 | |||
23 | struct nfsd_diropargs { | ||
24 | struct svc_fh fh; | ||
25 | char * name; | ||
26 | unsigned int len; | ||
27 | }; | ||
28 | |||
29 | struct nfsd_readargs { | ||
30 | struct svc_fh fh; | ||
31 | __u32 offset; | ||
32 | __u32 count; | ||
33 | int vlen; | ||
34 | }; | ||
35 | |||
36 | struct nfsd_writeargs { | ||
37 | svc_fh fh; | ||
38 | __u32 offset; | ||
39 | int len; | ||
40 | int vlen; | ||
41 | }; | ||
42 | |||
43 | struct nfsd_createargs { | ||
44 | struct svc_fh fh; | ||
45 | char * name; | ||
46 | unsigned int len; | ||
47 | struct iattr attrs; | ||
48 | }; | ||
49 | |||
50 | struct nfsd_renameargs { | ||
51 | struct svc_fh ffh; | ||
52 | char * fname; | ||
53 | unsigned int flen; | ||
54 | struct svc_fh tfh; | ||
55 | char * tname; | ||
56 | unsigned int tlen; | ||
57 | }; | ||
58 | |||
59 | struct nfsd_readlinkargs { | ||
60 | struct svc_fh fh; | ||
61 | char * buffer; | ||
62 | }; | ||
63 | |||
64 | struct nfsd_linkargs { | ||
65 | struct svc_fh ffh; | ||
66 | struct svc_fh tfh; | ||
67 | char * tname; | ||
68 | unsigned int tlen; | ||
69 | }; | ||
70 | |||
71 | struct nfsd_symlinkargs { | ||
72 | struct svc_fh ffh; | ||
73 | char * fname; | ||
74 | unsigned int flen; | ||
75 | char * tname; | ||
76 | unsigned int tlen; | ||
77 | struct iattr attrs; | ||
78 | }; | ||
79 | |||
80 | struct nfsd_readdirargs { | ||
81 | struct svc_fh fh; | ||
82 | __u32 cookie; | ||
83 | __u32 count; | ||
84 | __be32 * buffer; | ||
85 | }; | ||
86 | |||
87 | struct nfsd_attrstat { | ||
88 | struct svc_fh fh; | ||
89 | struct kstat stat; | ||
90 | }; | ||
91 | |||
92 | struct nfsd_diropres { | ||
93 | struct svc_fh fh; | ||
94 | struct kstat stat; | ||
95 | }; | ||
96 | |||
97 | struct nfsd_readlinkres { | ||
98 | int len; | ||
99 | }; | ||
100 | |||
101 | struct nfsd_readres { | ||
102 | struct svc_fh fh; | ||
103 | unsigned long count; | ||
104 | struct kstat stat; | ||
105 | }; | ||
106 | |||
107 | struct nfsd_readdirres { | ||
108 | int count; | ||
109 | |||
110 | struct readdir_cd common; | ||
111 | __be32 * buffer; | ||
112 | int buflen; | ||
113 | __be32 * offset; | ||
114 | }; | ||
115 | |||
116 | struct nfsd_statfsres { | ||
117 | struct kstatfs stats; | ||
118 | }; | ||
119 | |||
120 | /* | ||
121 | * Storage requirements for XDR arguments and results. | ||
122 | */ | ||
123 | union nfsd_xdrstore { | ||
124 | struct nfsd_sattrargs sattr; | ||
125 | struct nfsd_diropargs dirop; | ||
126 | struct nfsd_readargs read; | ||
127 | struct nfsd_writeargs write; | ||
128 | struct nfsd_createargs create; | ||
129 | struct nfsd_renameargs rename; | ||
130 | struct nfsd_linkargs link; | ||
131 | struct nfsd_symlinkargs symlink; | ||
132 | struct nfsd_readdirargs readdir; | ||
133 | }; | ||
134 | |||
135 | #define NFS2_SVC_XDRSIZE sizeof(union nfsd_xdrstore) | ||
136 | |||
137 | |||
138 | int nfssvc_decode_void(struct svc_rqst *, __be32 *, void *); | ||
139 | int nfssvc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *); | ||
140 | int nfssvc_decode_sattrargs(struct svc_rqst *, __be32 *, | ||
141 | struct nfsd_sattrargs *); | ||
142 | int nfssvc_decode_diropargs(struct svc_rqst *, __be32 *, | ||
143 | struct nfsd_diropargs *); | ||
144 | int nfssvc_decode_readargs(struct svc_rqst *, __be32 *, | ||
145 | struct nfsd_readargs *); | ||
146 | int nfssvc_decode_writeargs(struct svc_rqst *, __be32 *, | ||
147 | struct nfsd_writeargs *); | ||
148 | int nfssvc_decode_createargs(struct svc_rqst *, __be32 *, | ||
149 | struct nfsd_createargs *); | ||
150 | int nfssvc_decode_renameargs(struct svc_rqst *, __be32 *, | ||
151 | struct nfsd_renameargs *); | ||
152 | int nfssvc_decode_readlinkargs(struct svc_rqst *, __be32 *, | ||
153 | struct nfsd_readlinkargs *); | ||
154 | int nfssvc_decode_linkargs(struct svc_rqst *, __be32 *, | ||
155 | struct nfsd_linkargs *); | ||
156 | int nfssvc_decode_symlinkargs(struct svc_rqst *, __be32 *, | ||
157 | struct nfsd_symlinkargs *); | ||
158 | int nfssvc_decode_readdirargs(struct svc_rqst *, __be32 *, | ||
159 | struct nfsd_readdirargs *); | ||
160 | int nfssvc_encode_void(struct svc_rqst *, __be32 *, void *); | ||
161 | int nfssvc_encode_attrstat(struct svc_rqst *, __be32 *, struct nfsd_attrstat *); | ||
162 | int nfssvc_encode_diropres(struct svc_rqst *, __be32 *, struct nfsd_diropres *); | ||
163 | int nfssvc_encode_readlinkres(struct svc_rqst *, __be32 *, struct nfsd_readlinkres *); | ||
164 | int nfssvc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd_readres *); | ||
165 | int nfssvc_encode_statfsres(struct svc_rqst *, __be32 *, struct nfsd_statfsres *); | ||
166 | int nfssvc_encode_readdirres(struct svc_rqst *, __be32 *, struct nfsd_readdirres *); | ||
167 | |||
168 | int nfssvc_encode_entry(void *, const char *name, | ||
169 | int namlen, loff_t offset, u64 ino, unsigned int); | ||
170 | |||
171 | int nfssvc_release_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *); | ||
172 | |||
173 | /* Helper functions for NFSv2 ACL code */ | ||
174 | __be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp); | ||
175 | __be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp); | ||
176 | |||
177 | #endif /* LINUX_NFSD_H */ | ||
diff --git a/include/linux/nfsd/xdr3.h b/include/linux/nfsd/xdr3.h deleted file mode 100644 index 421eddd65a25..000000000000 --- a/include/linux/nfsd/xdr3.h +++ /dev/null | |||
@@ -1,346 +0,0 @@ | |||
1 | /* | ||
2 | * linux/include/linux/nfsd/xdr3.h | ||
3 | * | ||
4 | * XDR types for NFSv3 in nfsd. | ||
5 | * | ||
6 | * Copyright (C) 1996-1998, Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LINUX_NFSD_XDR3_H | ||
10 | #define _LINUX_NFSD_XDR3_H | ||
11 | |||
12 | #include <linux/nfsd/xdr.h> | ||
13 | |||
14 | struct nfsd3_sattrargs { | ||
15 | struct svc_fh fh; | ||
16 | struct iattr attrs; | ||
17 | int check_guard; | ||
18 | time_t guardtime; | ||
19 | }; | ||
20 | |||
21 | struct nfsd3_diropargs { | ||
22 | struct svc_fh fh; | ||
23 | char * name; | ||
24 | unsigned int len; | ||
25 | }; | ||
26 | |||
27 | struct nfsd3_accessargs { | ||
28 | struct svc_fh fh; | ||
29 | unsigned int access; | ||
30 | }; | ||
31 | |||
32 | struct nfsd3_readargs { | ||
33 | struct svc_fh fh; | ||
34 | __u64 offset; | ||
35 | __u32 count; | ||
36 | int vlen; | ||
37 | }; | ||
38 | |||
39 | struct nfsd3_writeargs { | ||
40 | svc_fh fh; | ||
41 | __u64 offset; | ||
42 | __u32 count; | ||
43 | int stable; | ||
44 | __u32 len; | ||
45 | int vlen; | ||
46 | }; | ||
47 | |||
48 | struct nfsd3_createargs { | ||
49 | struct svc_fh fh; | ||
50 | char * name; | ||
51 | unsigned int len; | ||
52 | int createmode; | ||
53 | struct iattr attrs; | ||
54 | __be32 * verf; | ||
55 | }; | ||
56 | |||
57 | struct nfsd3_mknodargs { | ||
58 | struct svc_fh fh; | ||
59 | char * name; | ||
60 | unsigned int len; | ||
61 | __u32 ftype; | ||
62 | __u32 major, minor; | ||
63 | struct iattr attrs; | ||
64 | }; | ||
65 | |||
66 | struct nfsd3_renameargs { | ||
67 | struct svc_fh ffh; | ||
68 | char * fname; | ||
69 | unsigned int flen; | ||
70 | struct svc_fh tfh; | ||
71 | char * tname; | ||
72 | unsigned int tlen; | ||
73 | }; | ||
74 | |||
75 | struct nfsd3_readlinkargs { | ||
76 | struct svc_fh fh; | ||
77 | char * buffer; | ||
78 | }; | ||
79 | |||
80 | struct nfsd3_linkargs { | ||
81 | struct svc_fh ffh; | ||
82 | struct svc_fh tfh; | ||
83 | char * tname; | ||
84 | unsigned int tlen; | ||
85 | }; | ||
86 | |||
87 | struct nfsd3_symlinkargs { | ||
88 | struct svc_fh ffh; | ||
89 | char * fname; | ||
90 | unsigned int flen; | ||
91 | char * tname; | ||
92 | unsigned int tlen; | ||
93 | struct iattr attrs; | ||
94 | }; | ||
95 | |||
96 | struct nfsd3_readdirargs { | ||
97 | struct svc_fh fh; | ||
98 | __u64 cookie; | ||
99 | __u32 dircount; | ||
100 | __u32 count; | ||
101 | __be32 * verf; | ||
102 | __be32 * buffer; | ||
103 | }; | ||
104 | |||
105 | struct nfsd3_commitargs { | ||
106 | struct svc_fh fh; | ||
107 | __u64 offset; | ||
108 | __u32 count; | ||
109 | }; | ||
110 | |||
111 | struct nfsd3_getaclargs { | ||
112 | struct svc_fh fh; | ||
113 | int mask; | ||
114 | }; | ||
115 | |||
116 | struct posix_acl; | ||
117 | struct nfsd3_setaclargs { | ||
118 | struct svc_fh fh; | ||
119 | int mask; | ||
120 | struct posix_acl *acl_access; | ||
121 | struct posix_acl *acl_default; | ||
122 | }; | ||
123 | |||
124 | struct nfsd3_attrstat { | ||
125 | __be32 status; | ||
126 | struct svc_fh fh; | ||
127 | struct kstat stat; | ||
128 | }; | ||
129 | |||
130 | /* LOOKUP, CREATE, MKDIR, SYMLINK, MKNOD */ | ||
131 | struct nfsd3_diropres { | ||
132 | __be32 status; | ||
133 | struct svc_fh dirfh; | ||
134 | struct svc_fh fh; | ||
135 | }; | ||
136 | |||
137 | struct nfsd3_accessres { | ||
138 | __be32 status; | ||
139 | struct svc_fh fh; | ||
140 | __u32 access; | ||
141 | }; | ||
142 | |||
143 | struct nfsd3_readlinkres { | ||
144 | __be32 status; | ||
145 | struct svc_fh fh; | ||
146 | __u32 len; | ||
147 | }; | ||
148 | |||
149 | struct nfsd3_readres { | ||
150 | __be32 status; | ||
151 | struct svc_fh fh; | ||
152 | unsigned long count; | ||
153 | int eof; | ||
154 | }; | ||
155 | |||
156 | struct nfsd3_writeres { | ||
157 | __be32 status; | ||
158 | struct svc_fh fh; | ||
159 | unsigned long count; | ||
160 | int committed; | ||
161 | }; | ||
162 | |||
163 | struct nfsd3_renameres { | ||
164 | __be32 status; | ||
165 | struct svc_fh ffh; | ||
166 | struct svc_fh tfh; | ||
167 | }; | ||
168 | |||
169 | struct nfsd3_linkres { | ||
170 | __be32 status; | ||
171 | struct svc_fh tfh; | ||
172 | struct svc_fh fh; | ||
173 | }; | ||
174 | |||
175 | struct nfsd3_readdirres { | ||
176 | __be32 status; | ||
177 | struct svc_fh fh; | ||
178 | int count; | ||
179 | __be32 verf[2]; | ||
180 | |||
181 | struct readdir_cd common; | ||
182 | __be32 * buffer; | ||
183 | int buflen; | ||
184 | __be32 * offset; | ||
185 | __be32 * offset1; | ||
186 | struct svc_rqst * rqstp; | ||
187 | |||
188 | }; | ||
189 | |||
190 | struct nfsd3_fsstatres { | ||
191 | __be32 status; | ||
192 | struct kstatfs stats; | ||
193 | __u32 invarsec; | ||
194 | }; | ||
195 | |||
196 | struct nfsd3_fsinfores { | ||
197 | __be32 status; | ||
198 | __u32 f_rtmax; | ||
199 | __u32 f_rtpref; | ||
200 | __u32 f_rtmult; | ||
201 | __u32 f_wtmax; | ||
202 | __u32 f_wtpref; | ||
203 | __u32 f_wtmult; | ||
204 | __u32 f_dtpref; | ||
205 | __u64 f_maxfilesize; | ||
206 | __u32 f_properties; | ||
207 | }; | ||
208 | |||
209 | struct nfsd3_pathconfres { | ||
210 | __be32 status; | ||
211 | __u32 p_link_max; | ||
212 | __u32 p_name_max; | ||
213 | __u32 p_no_trunc; | ||
214 | __u32 p_chown_restricted; | ||
215 | __u32 p_case_insensitive; | ||
216 | __u32 p_case_preserving; | ||
217 | }; | ||
218 | |||
219 | struct nfsd3_commitres { | ||
220 | __be32 status; | ||
221 | struct svc_fh fh; | ||
222 | }; | ||
223 | |||
224 | struct nfsd3_getaclres { | ||
225 | __be32 status; | ||
226 | struct svc_fh fh; | ||
227 | int mask; | ||
228 | struct posix_acl *acl_access; | ||
229 | struct posix_acl *acl_default; | ||
230 | }; | ||
231 | |||
232 | /* dummy type for release */ | ||
233 | struct nfsd3_fhandle_pair { | ||
234 | __u32 dummy; | ||
235 | struct svc_fh fh1; | ||
236 | struct svc_fh fh2; | ||
237 | }; | ||
238 | |||
239 | /* | ||
240 | * Storage requirements for XDR arguments and results. | ||
241 | */ | ||
242 | union nfsd3_xdrstore { | ||
243 | struct nfsd3_sattrargs sattrargs; | ||
244 | struct nfsd3_diropargs diropargs; | ||
245 | struct nfsd3_readargs readargs; | ||
246 | struct nfsd3_writeargs writeargs; | ||
247 | struct nfsd3_createargs createargs; | ||
248 | struct nfsd3_renameargs renameargs; | ||
249 | struct nfsd3_linkargs linkargs; | ||
250 | struct nfsd3_symlinkargs symlinkargs; | ||
251 | struct nfsd3_readdirargs readdirargs; | ||
252 | struct nfsd3_diropres diropres; | ||
253 | struct nfsd3_accessres accessres; | ||
254 | struct nfsd3_readlinkres readlinkres; | ||
255 | struct nfsd3_readres readres; | ||
256 | struct nfsd3_writeres writeres; | ||
257 | struct nfsd3_renameres renameres; | ||
258 | struct nfsd3_linkres linkres; | ||
259 | struct nfsd3_readdirres readdirres; | ||
260 | struct nfsd3_fsstatres fsstatres; | ||
261 | struct nfsd3_fsinfores fsinfores; | ||
262 | struct nfsd3_pathconfres pathconfres; | ||
263 | struct nfsd3_commitres commitres; | ||
264 | struct nfsd3_getaclres getaclres; | ||
265 | }; | ||
266 | |||
267 | #define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore) | ||
268 | |||
269 | int nfs3svc_decode_fhandle(struct svc_rqst *, __be32 *, struct nfsd_fhandle *); | ||
270 | int nfs3svc_decode_sattrargs(struct svc_rqst *, __be32 *, | ||
271 | struct nfsd3_sattrargs *); | ||
272 | int nfs3svc_decode_diropargs(struct svc_rqst *, __be32 *, | ||
273 | struct nfsd3_diropargs *); | ||
274 | int nfs3svc_decode_accessargs(struct svc_rqst *, __be32 *, | ||
275 | struct nfsd3_accessargs *); | ||
276 | int nfs3svc_decode_readargs(struct svc_rqst *, __be32 *, | ||
277 | struct nfsd3_readargs *); | ||
278 | int nfs3svc_decode_writeargs(struct svc_rqst *, __be32 *, | ||
279 | struct nfsd3_writeargs *); | ||
280 | int nfs3svc_decode_createargs(struct svc_rqst *, __be32 *, | ||
281 | struct nfsd3_createargs *); | ||
282 | int nfs3svc_decode_mkdirargs(struct svc_rqst *, __be32 *, | ||
283 | struct nfsd3_createargs *); | ||
284 | int nfs3svc_decode_mknodargs(struct svc_rqst *, __be32 *, | ||
285 | struct nfsd3_mknodargs *); | ||
286 | int nfs3svc_decode_renameargs(struct svc_rqst *, __be32 *, | ||
287 | struct nfsd3_renameargs *); | ||
288 | int nfs3svc_decode_readlinkargs(struct svc_rqst *, __be32 *, | ||
289 | struct nfsd3_readlinkargs *); | ||
290 | int nfs3svc_decode_linkargs(struct svc_rqst *, __be32 *, | ||
291 | struct nfsd3_linkargs *); | ||
292 | int nfs3svc_decode_symlinkargs(struct svc_rqst *, __be32 *, | ||
293 | struct nfsd3_symlinkargs *); | ||
294 | int nfs3svc_decode_readdirargs(struct svc_rqst *, __be32 *, | ||
295 | struct nfsd3_readdirargs *); | ||
296 | int nfs3svc_decode_readdirplusargs(struct svc_rqst *, __be32 *, | ||
297 | struct nfsd3_readdirargs *); | ||
298 | int nfs3svc_decode_commitargs(struct svc_rqst *, __be32 *, | ||
299 | struct nfsd3_commitargs *); | ||
300 | int nfs3svc_encode_voidres(struct svc_rqst *, __be32 *, void *); | ||
301 | int nfs3svc_encode_attrstat(struct svc_rqst *, __be32 *, | ||
302 | struct nfsd3_attrstat *); | ||
303 | int nfs3svc_encode_wccstat(struct svc_rqst *, __be32 *, | ||
304 | struct nfsd3_attrstat *); | ||
305 | int nfs3svc_encode_diropres(struct svc_rqst *, __be32 *, | ||
306 | struct nfsd3_diropres *); | ||
307 | int nfs3svc_encode_accessres(struct svc_rqst *, __be32 *, | ||
308 | struct nfsd3_accessres *); | ||
309 | int nfs3svc_encode_readlinkres(struct svc_rqst *, __be32 *, | ||
310 | struct nfsd3_readlinkres *); | ||
311 | int nfs3svc_encode_readres(struct svc_rqst *, __be32 *, struct nfsd3_readres *); | ||
312 | int nfs3svc_encode_writeres(struct svc_rqst *, __be32 *, struct nfsd3_writeres *); | ||
313 | int nfs3svc_encode_createres(struct svc_rqst *, __be32 *, | ||
314 | struct nfsd3_diropres *); | ||
315 | int nfs3svc_encode_renameres(struct svc_rqst *, __be32 *, | ||
316 | struct nfsd3_renameres *); | ||
317 | int nfs3svc_encode_linkres(struct svc_rqst *, __be32 *, | ||
318 | struct nfsd3_linkres *); | ||
319 | int nfs3svc_encode_readdirres(struct svc_rqst *, __be32 *, | ||
320 | struct nfsd3_readdirres *); | ||
321 | int nfs3svc_encode_fsstatres(struct svc_rqst *, __be32 *, | ||
322 | struct nfsd3_fsstatres *); | ||
323 | int nfs3svc_encode_fsinfores(struct svc_rqst *, __be32 *, | ||
324 | struct nfsd3_fsinfores *); | ||
325 | int nfs3svc_encode_pathconfres(struct svc_rqst *, __be32 *, | ||
326 | struct nfsd3_pathconfres *); | ||
327 | int nfs3svc_encode_commitres(struct svc_rqst *, __be32 *, | ||
328 | struct nfsd3_commitres *); | ||
329 | |||
330 | int nfs3svc_release_fhandle(struct svc_rqst *, __be32 *, | ||
331 | struct nfsd3_attrstat *); | ||
332 | int nfs3svc_release_fhandle2(struct svc_rqst *, __be32 *, | ||
333 | struct nfsd3_fhandle_pair *); | ||
334 | int nfs3svc_encode_entry(void *, const char *name, | ||
335 | int namlen, loff_t offset, u64 ino, | ||
336 | unsigned int); | ||
337 | int nfs3svc_encode_entry_plus(void *, const char *name, | ||
338 | int namlen, loff_t offset, u64 ino, | ||
339 | unsigned int); | ||
340 | /* Helper functions for NFSv3 ACL code */ | ||
341 | __be32 *nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p, | ||
342 | struct svc_fh *fhp); | ||
343 | __be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp); | ||
344 | |||
345 | |||
346 | #endif /* _LINUX_NFSD_XDR3_H */ | ||
diff --git a/include/linux/nfsd/xdr4.h b/include/linux/nfsd/xdr4.h deleted file mode 100644 index 73164c2b3d29..000000000000 --- a/include/linux/nfsd/xdr4.h +++ /dev/null | |||
@@ -1,563 +0,0 @@ | |||
1 | /* | ||
2 | * include/linux/nfsd/xdr4.h | ||
3 | * | ||
4 | * Server-side types for NFSv4. | ||
5 | * | ||
6 | * Copyright (c) 2002 The Regents of the University of Michigan. | ||
7 | * All rights reserved. | ||
8 | * | ||
9 | * Kendrick Smith <kmsmith@umich.edu> | ||
10 | * Andy Adamson <andros@umich.edu> | ||
11 | * | ||
12 | * Redistribution and use in source and binary forms, with or without | ||
13 | * modification, are permitted provided that the following conditions | ||
14 | * are met: | ||
15 | * | ||
16 | * 1. Redistributions of source code must retain the above copyright | ||
17 | * notice, this list of conditions and the following disclaimer. | ||
18 | * 2. Redistributions in binary form must reproduce the above copyright | ||
19 | * notice, this list of conditions and the following disclaimer in the | ||
20 | * documentation and/or other materials provided with the distribution. | ||
21 | * 3. Neither the name of the University nor the names of its | ||
22 | * contributors may be used to endorse or promote products derived | ||
23 | * from this software without specific prior written permission. | ||
24 | * | ||
25 | * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED | ||
26 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | ||
27 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | ||
28 | * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE | ||
29 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
32 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | ||
33 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | ||
34 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | ||
35 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
36 | * | ||
37 | */ | ||
38 | |||
39 | #ifndef _LINUX_NFSD_XDR4_H | ||
40 | #define _LINUX_NFSD_XDR4_H | ||
41 | |||
42 | #include <linux/nfs4.h> | ||
43 | |||
44 | #define NFSD4_MAX_TAGLEN 128 | ||
45 | #define XDR_LEN(n) (((n) + 3) & ~3) | ||
46 | |||
47 | struct nfsd4_compound_state { | ||
48 | struct svc_fh current_fh; | ||
49 | struct svc_fh save_fh; | ||
50 | struct nfs4_stateowner *replay_owner; | ||
51 | /* For sessions DRC */ | ||
52 | struct nfsd4_session *session; | ||
53 | struct nfsd4_slot *slot; | ||
54 | __be32 *datap; | ||
55 | size_t iovlen; | ||
56 | u32 minorversion; | ||
57 | u32 status; | ||
58 | }; | ||
59 | |||
60 | static inline bool nfsd4_has_session(struct nfsd4_compound_state *cs) | ||
61 | { | ||
62 | return cs->slot != NULL; | ||
63 | } | ||
64 | |||
65 | struct nfsd4_change_info { | ||
66 | u32 atomic; | ||
67 | bool change_supported; | ||
68 | u32 before_ctime_sec; | ||
69 | u32 before_ctime_nsec; | ||
70 | u64 before_change; | ||
71 | u32 after_ctime_sec; | ||
72 | u32 after_ctime_nsec; | ||
73 | u64 after_change; | ||
74 | }; | ||
75 | |||
76 | struct nfsd4_access { | ||
77 | u32 ac_req_access; /* request */ | ||
78 | u32 ac_supported; /* response */ | ||
79 | u32 ac_resp_access; /* response */ | ||
80 | }; | ||
81 | |||
82 | struct nfsd4_close { | ||
83 | u32 cl_seqid; /* request */ | ||
84 | stateid_t cl_stateid; /* request+response */ | ||
85 | struct nfs4_stateowner * cl_stateowner; /* response */ | ||
86 | }; | ||
87 | |||
88 | struct nfsd4_commit { | ||
89 | u64 co_offset; /* request */ | ||
90 | u32 co_count; /* request */ | ||
91 | nfs4_verifier co_verf; /* response */ | ||
92 | }; | ||
93 | |||
94 | struct nfsd4_create { | ||
95 | u32 cr_namelen; /* request */ | ||
96 | char * cr_name; /* request */ | ||
97 | u32 cr_type; /* request */ | ||
98 | union { /* request */ | ||
99 | struct { | ||
100 | u32 namelen; | ||
101 | char *name; | ||
102 | } link; /* NF4LNK */ | ||
103 | struct { | ||
104 | u32 specdata1; | ||
105 | u32 specdata2; | ||
106 | } dev; /* NF4BLK, NF4CHR */ | ||
107 | } u; | ||
108 | u32 cr_bmval[3]; /* request */ | ||
109 | struct iattr cr_iattr; /* request */ | ||
110 | struct nfsd4_change_info cr_cinfo; /* response */ | ||
111 | struct nfs4_acl *cr_acl; | ||
112 | }; | ||
113 | #define cr_linklen u.link.namelen | ||
114 | #define cr_linkname u.link.name | ||
115 | #define cr_specdata1 u.dev.specdata1 | ||
116 | #define cr_specdata2 u.dev.specdata2 | ||
117 | |||
118 | struct nfsd4_delegreturn { | ||
119 | stateid_t dr_stateid; | ||
120 | }; | ||
121 | |||
122 | struct nfsd4_getattr { | ||
123 | u32 ga_bmval[3]; /* request */ | ||
124 | struct svc_fh *ga_fhp; /* response */ | ||
125 | }; | ||
126 | |||
127 | struct nfsd4_link { | ||
128 | u32 li_namelen; /* request */ | ||
129 | char * li_name; /* request */ | ||
130 | struct nfsd4_change_info li_cinfo; /* response */ | ||
131 | }; | ||
132 | |||
133 | struct nfsd4_lock_denied { | ||
134 | clientid_t ld_clientid; | ||
135 | struct nfs4_stateowner *ld_sop; | ||
136 | u64 ld_start; | ||
137 | u64 ld_length; | ||
138 | u32 ld_type; | ||
139 | }; | ||
140 | |||
141 | struct nfsd4_lock { | ||
142 | /* request */ | ||
143 | u32 lk_type; | ||
144 | u32 lk_reclaim; /* boolean */ | ||
145 | u64 lk_offset; | ||
146 | u64 lk_length; | ||
147 | u32 lk_is_new; | ||
148 | union { | ||
149 | struct { | ||
150 | u32 open_seqid; | ||
151 | stateid_t open_stateid; | ||
152 | u32 lock_seqid; | ||
153 | clientid_t clientid; | ||
154 | struct xdr_netobj owner; | ||
155 | } new; | ||
156 | struct { | ||
157 | stateid_t lock_stateid; | ||
158 | u32 lock_seqid; | ||
159 | } old; | ||
160 | } v; | ||
161 | |||
162 | /* response */ | ||
163 | union { | ||
164 | struct { | ||
165 | stateid_t stateid; | ||
166 | } ok; | ||
167 | struct nfsd4_lock_denied denied; | ||
168 | } u; | ||
169 | /* The lk_replay_owner is the open owner in the open_to_lock_owner | ||
170 | * case and the lock owner otherwise: */ | ||
171 | struct nfs4_stateowner *lk_replay_owner; | ||
172 | }; | ||
173 | #define lk_new_open_seqid v.new.open_seqid | ||
174 | #define lk_new_open_stateid v.new.open_stateid | ||
175 | #define lk_new_lock_seqid v.new.lock_seqid | ||
176 | #define lk_new_clientid v.new.clientid | ||
177 | #define lk_new_owner v.new.owner | ||
178 | #define lk_old_lock_stateid v.old.lock_stateid | ||
179 | #define lk_old_lock_seqid v.old.lock_seqid | ||
180 | |||
181 | #define lk_rflags u.ok.rflags | ||
182 | #define lk_resp_stateid u.ok.stateid | ||
183 | #define lk_denied u.denied | ||
184 | |||
185 | |||
186 | struct nfsd4_lockt { | ||
187 | u32 lt_type; | ||
188 | clientid_t lt_clientid; | ||
189 | struct xdr_netobj lt_owner; | ||
190 | u64 lt_offset; | ||
191 | u64 lt_length; | ||
192 | struct nfs4_stateowner * lt_stateowner; | ||
193 | struct nfsd4_lock_denied lt_denied; | ||
194 | }; | ||
195 | |||
196 | |||
197 | struct nfsd4_locku { | ||
198 | u32 lu_type; | ||
199 | u32 lu_seqid; | ||
200 | stateid_t lu_stateid; | ||
201 | u64 lu_offset; | ||
202 | u64 lu_length; | ||
203 | struct nfs4_stateowner *lu_stateowner; | ||
204 | }; | ||
205 | |||
206 | |||
207 | struct nfsd4_lookup { | ||
208 | u32 lo_len; /* request */ | ||
209 | char * lo_name; /* request */ | ||
210 | }; | ||
211 | |||
212 | struct nfsd4_putfh { | ||
213 | u32 pf_fhlen; /* request */ | ||
214 | char *pf_fhval; /* request */ | ||
215 | }; | ||
216 | |||
217 | struct nfsd4_open { | ||
218 | u32 op_claim_type; /* request */ | ||
219 | struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */ | ||
220 | u32 op_delegate_type; /* request - CLAIM_PREV only */ | ||
221 | stateid_t op_delegate_stateid; /* request - response */ | ||
222 | u32 op_create; /* request */ | ||
223 | u32 op_createmode; /* request */ | ||
224 | u32 op_bmval[3]; /* request */ | ||
225 | struct iattr iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */ | ||
226 | nfs4_verifier verf; /* EXCLUSIVE4 */ | ||
227 | clientid_t op_clientid; /* request */ | ||
228 | struct xdr_netobj op_owner; /* request */ | ||
229 | u32 op_seqid; /* request */ | ||
230 | u32 op_share_access; /* request */ | ||
231 | u32 op_share_deny; /* request */ | ||
232 | stateid_t op_stateid; /* response */ | ||
233 | u32 op_recall; /* recall */ | ||
234 | struct nfsd4_change_info op_cinfo; /* response */ | ||
235 | u32 op_rflags; /* response */ | ||
236 | int op_truncate; /* used during processing */ | ||
237 | struct nfs4_stateowner *op_stateowner; /* used during processing */ | ||
238 | struct nfs4_acl *op_acl; | ||
239 | }; | ||
240 | #define op_iattr iattr | ||
241 | #define op_verf verf | ||
242 | |||
243 | struct nfsd4_open_confirm { | ||
244 | stateid_t oc_req_stateid /* request */; | ||
245 | u32 oc_seqid /* request */; | ||
246 | stateid_t oc_resp_stateid /* response */; | ||
247 | struct nfs4_stateowner * oc_stateowner; /* response */ | ||
248 | }; | ||
249 | |||
250 | struct nfsd4_open_downgrade { | ||
251 | stateid_t od_stateid; | ||
252 | u32 od_seqid; | ||
253 | u32 od_share_access; | ||
254 | u32 od_share_deny; | ||
255 | struct nfs4_stateowner *od_stateowner; | ||
256 | }; | ||
257 | |||
258 | |||
259 | struct nfsd4_read { | ||
260 | stateid_t rd_stateid; /* request */ | ||
261 | u64 rd_offset; /* request */ | ||
262 | u32 rd_length; /* request */ | ||
263 | int rd_vlen; | ||
264 | struct file *rd_filp; | ||
265 | |||
266 | struct svc_rqst *rd_rqstp; /* response */ | ||
267 | struct svc_fh * rd_fhp; /* response */ | ||
268 | }; | ||
269 | |||
270 | struct nfsd4_readdir { | ||
271 | u64 rd_cookie; /* request */ | ||
272 | nfs4_verifier rd_verf; /* request */ | ||
273 | u32 rd_dircount; /* request */ | ||
274 | u32 rd_maxcount; /* request */ | ||
275 | u32 rd_bmval[3]; /* request */ | ||
276 | struct svc_rqst *rd_rqstp; /* response */ | ||
277 | struct svc_fh * rd_fhp; /* response */ | ||
278 | |||
279 | struct readdir_cd common; | ||
280 | __be32 * buffer; | ||
281 | int buflen; | ||
282 | __be32 * offset; | ||
283 | }; | ||
284 | |||
285 | struct nfsd4_release_lockowner { | ||
286 | clientid_t rl_clientid; | ||
287 | struct xdr_netobj rl_owner; | ||
288 | }; | ||
289 | struct nfsd4_readlink { | ||
290 | struct svc_rqst *rl_rqstp; /* request */ | ||
291 | struct svc_fh * rl_fhp; /* request */ | ||
292 | }; | ||
293 | |||
294 | struct nfsd4_remove { | ||
295 | u32 rm_namelen; /* request */ | ||
296 | char * rm_name; /* request */ | ||
297 | struct nfsd4_change_info rm_cinfo; /* response */ | ||
298 | }; | ||
299 | |||
300 | struct nfsd4_rename { | ||
301 | u32 rn_snamelen; /* request */ | ||
302 | char * rn_sname; /* request */ | ||
303 | u32 rn_tnamelen; /* request */ | ||
304 | char * rn_tname; /* request */ | ||
305 | struct nfsd4_change_info rn_sinfo; /* response */ | ||
306 | struct nfsd4_change_info rn_tinfo; /* response */ | ||
307 | }; | ||
308 | |||
309 | struct nfsd4_secinfo { | ||
310 | u32 si_namelen; /* request */ | ||
311 | char *si_name; /* request */ | ||
312 | struct svc_export *si_exp; /* response */ | ||
313 | }; | ||
314 | |||
315 | struct nfsd4_setattr { | ||
316 | stateid_t sa_stateid; /* request */ | ||
317 | u32 sa_bmval[3]; /* request */ | ||
318 | struct iattr sa_iattr; /* request */ | ||
319 | struct nfs4_acl *sa_acl; | ||
320 | }; | ||
321 | |||
322 | struct nfsd4_setclientid { | ||
323 | nfs4_verifier se_verf; /* request */ | ||
324 | u32 se_namelen; /* request */ | ||
325 | char * se_name; /* request */ | ||
326 | u32 se_callback_prog; /* request */ | ||
327 | u32 se_callback_netid_len; /* request */ | ||
328 | char * se_callback_netid_val; /* request */ | ||
329 | u32 se_callback_addr_len; /* request */ | ||
330 | char * se_callback_addr_val; /* request */ | ||
331 | u32 se_callback_ident; /* request */ | ||
332 | clientid_t se_clientid; /* response */ | ||
333 | nfs4_verifier se_confirm; /* response */ | ||
334 | }; | ||
335 | |||
336 | struct nfsd4_setclientid_confirm { | ||
337 | clientid_t sc_clientid; | ||
338 | nfs4_verifier sc_confirm; | ||
339 | }; | ||
340 | |||
341 | /* also used for NVERIFY */ | ||
342 | struct nfsd4_verify { | ||
343 | u32 ve_bmval[3]; /* request */ | ||
344 | u32 ve_attrlen; /* request */ | ||
345 | char * ve_attrval; /* request */ | ||
346 | }; | ||
347 | |||
348 | struct nfsd4_write { | ||
349 | stateid_t wr_stateid; /* request */ | ||
350 | u64 wr_offset; /* request */ | ||
351 | u32 wr_stable_how; /* request */ | ||
352 | u32 wr_buflen; /* request */ | ||
353 | int wr_vlen; | ||
354 | |||
355 | u32 wr_bytes_written; /* response */ | ||
356 | u32 wr_how_written; /* response */ | ||
357 | nfs4_verifier wr_verifier; /* response */ | ||
358 | }; | ||
359 | |||
360 | struct nfsd4_exchange_id { | ||
361 | nfs4_verifier verifier; | ||
362 | struct xdr_netobj clname; | ||
363 | u32 flags; | ||
364 | clientid_t clientid; | ||
365 | u32 seqid; | ||
366 | int spa_how; | ||
367 | }; | ||
368 | |||
369 | struct nfsd4_sequence { | ||
370 | struct nfs4_sessionid sessionid; /* request/response */ | ||
371 | u32 seqid; /* request/response */ | ||
372 | u32 slotid; /* request/response */ | ||
373 | u32 maxslots; /* request/response */ | ||
374 | u32 cachethis; /* request */ | ||
375 | #if 0 | ||
376 | u32 target_maxslots; /* response */ | ||
377 | u32 status_flags; /* response */ | ||
378 | #endif /* not yet */ | ||
379 | }; | ||
380 | |||
381 | struct nfsd4_destroy_session { | ||
382 | struct nfs4_sessionid sessionid; | ||
383 | }; | ||
384 | |||
385 | struct nfsd4_op { | ||
386 | int opnum; | ||
387 | __be32 status; | ||
388 | union { | ||
389 | struct nfsd4_access access; | ||
390 | struct nfsd4_close close; | ||
391 | struct nfsd4_commit commit; | ||
392 | struct nfsd4_create create; | ||
393 | struct nfsd4_delegreturn delegreturn; | ||
394 | struct nfsd4_getattr getattr; | ||
395 | struct svc_fh * getfh; | ||
396 | struct nfsd4_link link; | ||
397 | struct nfsd4_lock lock; | ||
398 | struct nfsd4_lockt lockt; | ||
399 | struct nfsd4_locku locku; | ||
400 | struct nfsd4_lookup lookup; | ||
401 | struct nfsd4_verify nverify; | ||
402 | struct nfsd4_open open; | ||
403 | struct nfsd4_open_confirm open_confirm; | ||
404 | struct nfsd4_open_downgrade open_downgrade; | ||
405 | struct nfsd4_putfh putfh; | ||
406 | struct nfsd4_read read; | ||
407 | struct nfsd4_readdir readdir; | ||
408 | struct nfsd4_readlink readlink; | ||
409 | struct nfsd4_remove remove; | ||
410 | struct nfsd4_rename rename; | ||
411 | clientid_t renew; | ||
412 | struct nfsd4_secinfo secinfo; | ||
413 | struct nfsd4_setattr setattr; | ||
414 | struct nfsd4_setclientid setclientid; | ||
415 | struct nfsd4_setclientid_confirm setclientid_confirm; | ||
416 | struct nfsd4_verify verify; | ||
417 | struct nfsd4_write write; | ||
418 | struct nfsd4_release_lockowner release_lockowner; | ||
419 | |||
420 | /* NFSv4.1 */ | ||
421 | struct nfsd4_exchange_id exchange_id; | ||
422 | struct nfsd4_create_session create_session; | ||
423 | struct nfsd4_destroy_session destroy_session; | ||
424 | struct nfsd4_sequence sequence; | ||
425 | } u; | ||
426 | struct nfs4_replay * replay; | ||
427 | }; | ||
428 | |||
429 | struct nfsd4_compoundargs { | ||
430 | /* scratch variables for XDR decode */ | ||
431 | __be32 * p; | ||
432 | __be32 * end; | ||
433 | struct page ** pagelist; | ||
434 | int pagelen; | ||
435 | __be32 tmp[8]; | ||
436 | __be32 * tmpp; | ||
437 | struct tmpbuf { | ||
438 | struct tmpbuf *next; | ||
439 | void (*release)(const void *); | ||
440 | void *buf; | ||
441 | } *to_free; | ||
442 | |||
443 | struct svc_rqst *rqstp; | ||
444 | |||
445 | u32 taglen; | ||
446 | char * tag; | ||
447 | u32 minorversion; | ||
448 | u32 opcnt; | ||
449 | struct nfsd4_op *ops; | ||
450 | struct nfsd4_op iops[8]; | ||
451 | }; | ||
452 | |||
453 | struct nfsd4_compoundres { | ||
454 | /* scratch variables for XDR encode */ | ||
455 | __be32 * p; | ||
456 | __be32 * end; | ||
457 | struct xdr_buf * xbuf; | ||
458 | struct svc_rqst * rqstp; | ||
459 | |||
460 | u32 taglen; | ||
461 | char * tag; | ||
462 | u32 opcnt; | ||
463 | __be32 * tagp; /* tag, opcount encode location */ | ||
464 | struct nfsd4_compound_state cstate; | ||
465 | }; | ||
466 | |||
467 | static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) | ||
468 | { | ||
469 | struct nfsd4_compoundargs *args = resp->rqstp->rq_argp; | ||
470 | return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE; | ||
471 | } | ||
472 | |||
473 | static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) | ||
474 | { | ||
475 | return !resp->cstate.slot->sl_cachethis || nfsd4_is_solo_sequence(resp); | ||
476 | } | ||
477 | |||
478 | #define NFS4_SVC_XDRSIZE sizeof(struct nfsd4_compoundargs) | ||
479 | |||
480 | static inline void | ||
481 | set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp) | ||
482 | { | ||
483 | BUG_ON(!fhp->fh_pre_saved || !fhp->fh_post_saved); | ||
484 | cinfo->atomic = 1; | ||
485 | cinfo->change_supported = IS_I_VERSION(fhp->fh_dentry->d_inode); | ||
486 | if (cinfo->change_supported) { | ||
487 | cinfo->before_change = fhp->fh_pre_change; | ||
488 | cinfo->after_change = fhp->fh_post_change; | ||
489 | } else { | ||
490 | cinfo->before_ctime_sec = fhp->fh_pre_ctime.tv_sec; | ||
491 | cinfo->before_ctime_nsec = fhp->fh_pre_ctime.tv_nsec; | ||
492 | cinfo->after_ctime_sec = fhp->fh_post_attr.ctime.tv_sec; | ||
493 | cinfo->after_ctime_nsec = fhp->fh_post_attr.ctime.tv_nsec; | ||
494 | } | ||
495 | } | ||
496 | |||
497 | int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *); | ||
498 | int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *, | ||
499 | struct nfsd4_compoundargs *); | ||
500 | int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *, | ||
501 | struct nfsd4_compoundres *); | ||
502 | void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *); | ||
503 | void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op); | ||
504 | __be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, | ||
505 | struct dentry *dentry, __be32 *buffer, int *countp, | ||
506 | u32 *bmval, struct svc_rqst *, int ignore_crossmnt); | ||
507 | extern __be32 nfsd4_setclientid(struct svc_rqst *rqstp, | ||
508 | struct nfsd4_compound_state *, | ||
509 | struct nfsd4_setclientid *setclid); | ||
510 | extern __be32 nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | ||
511 | struct nfsd4_compound_state *, | ||
512 | struct nfsd4_setclientid_confirm *setclientid_confirm); | ||
513 | extern void nfsd4_store_cache_entry(struct nfsd4_compoundres *resp); | ||
514 | extern __be32 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp, | ||
515 | struct nfsd4_sequence *seq); | ||
516 | extern __be32 nfsd4_exchange_id(struct svc_rqst *rqstp, | ||
517 | struct nfsd4_compound_state *, | ||
518 | struct nfsd4_exchange_id *); | ||
519 | extern __be32 nfsd4_create_session(struct svc_rqst *, | ||
520 | struct nfsd4_compound_state *, | ||
521 | struct nfsd4_create_session *); | ||
522 | extern __be32 nfsd4_sequence(struct svc_rqst *, | ||
523 | struct nfsd4_compound_state *, | ||
524 | struct nfsd4_sequence *); | ||
525 | extern __be32 nfsd4_destroy_session(struct svc_rqst *, | ||
526 | struct nfsd4_compound_state *, | ||
527 | struct nfsd4_destroy_session *); | ||
528 | extern __be32 nfsd4_process_open1(struct nfsd4_compound_state *, | ||
529 | struct nfsd4_open *open); | ||
530 | extern __be32 nfsd4_process_open2(struct svc_rqst *rqstp, | ||
531 | struct svc_fh *current_fh, struct nfsd4_open *open); | ||
532 | extern __be32 nfsd4_open_confirm(struct svc_rqst *rqstp, | ||
533 | struct nfsd4_compound_state *, struct nfsd4_open_confirm *oc); | ||
534 | extern __be32 nfsd4_close(struct svc_rqst *rqstp, | ||
535 | struct nfsd4_compound_state *, | ||
536 | struct nfsd4_close *close); | ||
537 | extern __be32 nfsd4_open_downgrade(struct svc_rqst *rqstp, | ||
538 | struct nfsd4_compound_state *, | ||
539 | struct nfsd4_open_downgrade *od); | ||
540 | extern __be32 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *, | ||
541 | struct nfsd4_lock *lock); | ||
542 | extern __be32 nfsd4_lockt(struct svc_rqst *rqstp, | ||
543 | struct nfsd4_compound_state *, | ||
544 | struct nfsd4_lockt *lockt); | ||
545 | extern __be32 nfsd4_locku(struct svc_rqst *rqstp, | ||
546 | struct nfsd4_compound_state *, | ||
547 | struct nfsd4_locku *locku); | ||
548 | extern __be32 | ||
549 | nfsd4_release_lockowner(struct svc_rqst *rqstp, | ||
550 | struct nfsd4_compound_state *, | ||
551 | struct nfsd4_release_lockowner *rlockowner); | ||
552 | extern void nfsd4_release_compoundargs(struct nfsd4_compoundargs *); | ||
553 | extern __be32 nfsd4_delegreturn(struct svc_rqst *rqstp, | ||
554 | struct nfsd4_compound_state *, struct nfsd4_delegreturn *dr); | ||
555 | extern __be32 nfsd4_renew(struct svc_rqst *rqstp, | ||
556 | struct nfsd4_compound_state *, clientid_t *clid); | ||
557 | #endif | ||
558 | |||
559 | /* | ||
560 | * Local variables: | ||
561 | * c-basic-offset: 8 | ||
562 | * End: | ||
563 | */ | ||
diff --git a/include/linux/node.h b/include/linux/node.h index 681a697b9a86..06292dac3eab 100644 --- a/include/linux/node.h +++ b/include/linux/node.h | |||
@@ -21,13 +21,19 @@ | |||
21 | 21 | ||
22 | #include <linux/sysdev.h> | 22 | #include <linux/sysdev.h> |
23 | #include <linux/cpumask.h> | 23 | #include <linux/cpumask.h> |
24 | #include <linux/workqueue.h> | ||
24 | 25 | ||
25 | struct node { | 26 | struct node { |
26 | struct sys_device sysdev; | 27 | struct sys_device sysdev; |
28 | |||
29 | #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS) | ||
30 | struct work_struct node_work; | ||
31 | #endif | ||
27 | }; | 32 | }; |
28 | 33 | ||
29 | struct memory_block; | 34 | struct memory_block; |
30 | extern struct node node_devices[]; | 35 | extern struct node node_devices[]; |
36 | typedef void (*node_registration_func_t)(struct node *); | ||
31 | 37 | ||
32 | extern int register_node(struct node *, int, struct node *); | 38 | extern int register_node(struct node *, int, struct node *); |
33 | extern void unregister_node(struct node *node); | 39 | extern void unregister_node(struct node *node); |
@@ -39,6 +45,11 @@ extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); | |||
39 | extern int register_mem_sect_under_node(struct memory_block *mem_blk, | 45 | extern int register_mem_sect_under_node(struct memory_block *mem_blk, |
40 | int nid); | 46 | int nid); |
41 | extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk); | 47 | extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk); |
48 | |||
49 | #ifdef CONFIG_HUGETLBFS | ||
50 | extern void register_hugetlbfs_with_node(node_registration_func_t doregister, | ||
51 | node_registration_func_t unregister); | ||
52 | #endif | ||
42 | #else | 53 | #else |
43 | static inline int register_one_node(int nid) | 54 | static inline int register_one_node(int nid) |
44 | { | 55 | { |
@@ -65,6 +76,11 @@ static inline int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) | |||
65 | { | 76 | { |
66 | return 0; | 77 | return 0; |
67 | } | 78 | } |
79 | |||
80 | static inline void register_hugetlbfs_with_node(node_registration_func_t reg, | ||
81 | node_registration_func_t unreg) | ||
82 | { | ||
83 | } | ||
68 | #endif | 84 | #endif |
69 | 85 | ||
70 | #define to_node(sys_device) container_of(sys_device, struct node, sysdev) | 86 | #define to_node(sys_device) container_of(sys_device, struct node, sysdev) |
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index b359c4a9ec9e..454997cccbd8 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h | |||
@@ -245,14 +245,19 @@ static inline int __next_node(int n, const nodemask_t *srcp) | |||
245 | return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); | 245 | return min_t(int,MAX_NUMNODES,find_next_bit(srcp->bits, MAX_NUMNODES, n+1)); |
246 | } | 246 | } |
247 | 247 | ||
248 | static inline void init_nodemask_of_node(nodemask_t *mask, int node) | ||
249 | { | ||
250 | nodes_clear(*mask); | ||
251 | node_set(node, *mask); | ||
252 | } | ||
253 | |||
248 | #define nodemask_of_node(node) \ | 254 | #define nodemask_of_node(node) \ |
249 | ({ \ | 255 | ({ \ |
250 | typeof(_unused_nodemask_arg_) m; \ | 256 | typeof(_unused_nodemask_arg_) m; \ |
251 | if (sizeof(m) == sizeof(unsigned long)) { \ | 257 | if (sizeof(m) == sizeof(unsigned long)) { \ |
252 | m.bits[0] = 1UL<<(node); \ | 258 | m.bits[0] = 1UL << (node); \ |
253 | } else { \ | 259 | } else { \ |
254 | nodes_clear(m); \ | 260 | init_nodemask_of_node(&m, (node)); \ |
255 | node_set((node), m); \ | ||
256 | } \ | 261 | } \ |
257 | m; \ | 262 | m; \ |
258 | }) | 263 | }) |
@@ -480,15 +485,17 @@ static inline int num_node_state(enum node_states state) | |||
480 | #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) | 485 | #define for_each_online_node(node) for_each_node_state(node, N_ONLINE) |
481 | 486 | ||
482 | /* | 487 | /* |
483 | * For nodemask scrach area.(See CPUMASK_ALLOC() in cpumask.h) | 488 | * For nodemask scrach area. |
489 | * NODEMASK_ALLOC(type, name) allocates an object with a specified type and | ||
490 | * name. | ||
484 | */ | 491 | */ |
485 | 492 | #if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */ | |
486 | #if NODES_SHIFT > 8 /* nodemask_t > 64 bytes */ | 493 | #define NODEMASK_ALLOC(type, name, gfp_flags) \ |
487 | #define NODEMASK_ALLOC(x, m) struct x *m = kmalloc(sizeof(*m), GFP_KERNEL) | 494 | type *name = kmalloc(sizeof(*name), gfp_flags) |
488 | #define NODEMASK_FREE(m) kfree(m) | 495 | #define NODEMASK_FREE(m) kfree(m) |
489 | #else | 496 | #else |
490 | #define NODEMASK_ALLOC(x, m) struct x _m, *m = &_m | 497 | #define NODEMASK_ALLOC(type, name, gfp_flags) type _name, *name = &_name |
491 | #define NODEMASK_FREE(m) | 498 | #define NODEMASK_FREE(m) do {} while (0) |
492 | #endif | 499 | #endif |
493 | 500 | ||
494 | /* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ | 501 | /* A example struture for using NODEMASK_ALLOC, used in mempolicy. */ |
@@ -497,8 +504,10 @@ struct nodemask_scratch { | |||
497 | nodemask_t mask2; | 504 | nodemask_t mask2; |
498 | }; | 505 | }; |
499 | 506 | ||
500 | #define NODEMASK_SCRATCH(x) NODEMASK_ALLOC(nodemask_scratch, x) | 507 | #define NODEMASK_SCRATCH(x) \ |
501 | #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) | 508 | NODEMASK_ALLOC(struct nodemask_scratch, x, \ |
509 | GFP_KERNEL | __GFP_NORETRY) | ||
510 | #define NODEMASK_SCRATCH_FREE(x) NODEMASK_FREE(x) | ||
502 | 511 | ||
503 | 512 | ||
504 | #endif /* __LINUX_NODEMASK_H */ | 513 | #endif /* __LINUX_NODEMASK_H */ |
diff --git a/include/linux/numa.h b/include/linux/numa.h index a31a7301b159..3aaa31603a86 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h | |||
@@ -10,4 +10,6 @@ | |||
10 | 10 | ||
11 | #define MAX_NUMNODES (1 << NODES_SHIFT) | 11 | #define MAX_NUMNODES (1 << NODES_SHIFT) |
12 | 12 | ||
13 | #define NUMA_NO_NODE (-1) | ||
14 | |||
13 | #endif /* _LINUX_NUMA_H */ | 15 | #endif /* _LINUX_NUMA_H */ |
diff --git a/include/linux/oom.h b/include/linux/oom.h index 6aac5fe4f6f1..537662315627 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #ifdef __KERNEL__ | 10 | #ifdef __KERNEL__ |
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/nodemask.h> | ||
13 | 14 | ||
14 | struct zonelist; | 15 | struct zonelist; |
15 | struct notifier_block; | 16 | struct notifier_block; |
@@ -26,7 +27,8 @@ enum oom_constraint { | |||
26 | extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 27 | extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags); |
27 | extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 28 | extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); |
28 | 29 | ||
29 | extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order); | 30 | extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, |
31 | int order, nodemask_t *mask); | ||
30 | extern int register_oom_notifier(struct notifier_block *nb); | 32 | extern int register_oom_notifier(struct notifier_block *nb); |
31 | extern int unregister_oom_notifier(struct notifier_block *nb); | 33 | extern int unregister_oom_notifier(struct notifier_block *nb); |
32 | 34 | ||
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6b202b173955..5b59f35dcb8f 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | #ifndef __GENERATING_BOUNDS_H | 9 | #ifndef __GENERATING_BOUNDS_H |
10 | #include <linux/mm_types.h> | 10 | #include <linux/mm_types.h> |
11 | #include <linux/bounds.h> | 11 | #include <generated/bounds.h> |
12 | #endif /* !__GENERATING_BOUNDS_H */ | 12 | #endif /* !__GENERATING_BOUNDS_H */ |
13 | 13 | ||
14 | /* | 14 | /* |
@@ -99,7 +99,7 @@ enum pageflags { | |||
99 | PG_buddy, /* Page is free, on buddy lists */ | 99 | PG_buddy, /* Page is free, on buddy lists */ |
100 | PG_swapbacked, /* Page is backed by RAM/swap */ | 100 | PG_swapbacked, /* Page is backed by RAM/swap */ |
101 | PG_unevictable, /* Page is "unevictable" */ | 101 | PG_unevictable, /* Page is "unevictable" */ |
102 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 102 | #ifdef CONFIG_MMU |
103 | PG_mlocked, /* Page is vma mlocked */ | 103 | PG_mlocked, /* Page is vma mlocked */ |
104 | #endif | 104 | #endif |
105 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED | 105 | #ifdef CONFIG_ARCH_USES_PG_UNCACHED |
@@ -259,12 +259,10 @@ PAGEFLAG_FALSE(SwapCache) | |||
259 | PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) | 259 | PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable) |
260 | TESTCLEARFLAG(Unevictable, unevictable) | 260 | TESTCLEARFLAG(Unevictable, unevictable) |
261 | 261 | ||
262 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 262 | #ifdef CONFIG_MMU |
263 | #define MLOCK_PAGES 1 | ||
264 | PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) | 263 | PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked) |
265 | TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) | 264 | TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked) |
266 | #else | 265 | #else |
267 | #define MLOCK_PAGES 0 | ||
268 | PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) | 266 | PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked) |
269 | TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) | 267 | TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked) |
270 | #endif | 268 | #endif |
@@ -277,13 +275,15 @@ PAGEFLAG_FALSE(Uncached) | |||
277 | 275 | ||
278 | #ifdef CONFIG_MEMORY_FAILURE | 276 | #ifdef CONFIG_MEMORY_FAILURE |
279 | PAGEFLAG(HWPoison, hwpoison) | 277 | PAGEFLAG(HWPoison, hwpoison) |
280 | TESTSETFLAG(HWPoison, hwpoison) | 278 | TESTSCFLAG(HWPoison, hwpoison) |
281 | #define __PG_HWPOISON (1UL << PG_hwpoison) | 279 | #define __PG_HWPOISON (1UL << PG_hwpoison) |
282 | #else | 280 | #else |
283 | PAGEFLAG_FALSE(HWPoison) | 281 | PAGEFLAG_FALSE(HWPoison) |
284 | #define __PG_HWPOISON 0 | 282 | #define __PG_HWPOISON 0 |
285 | #endif | 283 | #endif |
286 | 284 | ||
285 | u64 stable_page_flags(struct page *page); | ||
286 | |||
287 | static inline int PageUptodate(struct page *page) | 287 | static inline int PageUptodate(struct page *page) |
288 | { | 288 | { |
289 | int ret = test_bit(PG_uptodate, &(page)->flags); | 289 | int ret = test_bit(PG_uptodate, &(page)->flags); |
@@ -393,7 +393,7 @@ static inline void __ClearPageTail(struct page *page) | |||
393 | 393 | ||
394 | #endif /* !PAGEFLAGS_EXTENDED */ | 394 | #endif /* !PAGEFLAGS_EXTENDED */ |
395 | 395 | ||
396 | #ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT | 396 | #ifdef CONFIG_MMU |
397 | #define __PG_MLOCKED (1 << PG_mlocked) | 397 | #define __PG_MLOCKED (1 << PG_mlocked) |
398 | #else | 398 | #else |
399 | #define __PG_MLOCKED 0 | 399 | #define __PG_MLOCKED 0 |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 4b938d4f3ac2..b0e4eb126236 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -57,6 +57,8 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \ | |||
57 | static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ | 57 | static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ |
58 | { return test_and_clear_bit(PCG_##lname, &pc->flags); } | 58 | { return test_and_clear_bit(PCG_##lname, &pc->flags); } |
59 | 59 | ||
60 | TESTPCGFLAG(Locked, LOCK) | ||
61 | |||
60 | /* Cache flag is set only once (at allocation) */ | 62 | /* Cache flag is set only once (at allocation) */ |
61 | TESTPCGFLAG(Cache, CACHE) | 63 | TESTPCGFLAG(Cache, CACHE) |
62 | CLEARPCGFLAG(Cache, CACHE) | 64 | CLEARPCGFLAG(Cache, CACHE) |
@@ -86,11 +88,6 @@ static inline void lock_page_cgroup(struct page_cgroup *pc) | |||
86 | bit_spin_lock(PCG_LOCK, &pc->flags); | 88 | bit_spin_lock(PCG_LOCK, &pc->flags); |
87 | } | 89 | } |
88 | 90 | ||
89 | static inline int trylock_page_cgroup(struct page_cgroup *pc) | ||
90 | { | ||
91 | return bit_spin_trylock(PCG_LOCK, &pc->flags); | ||
92 | } | ||
93 | |||
94 | static inline void unlock_page_cgroup(struct page_cgroup *pc) | 91 | static inline void unlock_page_cgroup(struct page_cgroup *pc) |
95 | { | 92 | { |
96 | bit_spin_unlock(PCG_LOCK, &pc->flags); | 93 | bit_spin_unlock(PCG_LOCK, &pc->flags); |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ed5d7501e181..3c62ed408492 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -253,6 +253,8 @@ extern struct page * read_cache_page_async(struct address_space *mapping, | |||
253 | extern struct page * read_cache_page(struct address_space *mapping, | 253 | extern struct page * read_cache_page(struct address_space *mapping, |
254 | pgoff_t index, filler_t *filler, | 254 | pgoff_t index, filler_t *filler, |
255 | void *data); | 255 | void *data); |
256 | extern struct page * read_cache_page_gfp(struct address_space *mapping, | ||
257 | pgoff_t index, gfp_t gfp_mask); | ||
256 | extern int read_cache_pages(struct address_space *mapping, | 258 | extern int read_cache_pages(struct address_space *mapping, |
257 | struct list_head *pages, filler_t *filler, void *data); | 259 | struct list_head *pages, filler_t *filler, void *data); |
258 | 260 | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index bf1e67080849..174e5392e51e 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -243,6 +243,7 @@ struct pci_dev { | |||
243 | unsigned int d2_support:1; /* Low power state D2 is supported */ | 243 | unsigned int d2_support:1; /* Low power state D2 is supported */ |
244 | unsigned int no_d1d2:1; /* Only allow D0 and D3 */ | 244 | unsigned int no_d1d2:1; /* Only allow D0 and D3 */ |
245 | unsigned int wakeup_prepared:1; | 245 | unsigned int wakeup_prepared:1; |
246 | unsigned int d3_delay; /* D3->D0 transition time in ms */ | ||
246 | 247 | ||
247 | #ifdef CONFIG_PCIEASPM | 248 | #ifdef CONFIG_PCIEASPM |
248 | struct pcie_link_state *link_state; /* ASPM link state. */ | 249 | struct pcie_link_state *link_state; /* ASPM link state. */ |
@@ -566,6 +567,9 @@ void pcibios_align_resource(void *, struct resource *, resource_size_t, | |||
566 | resource_size_t); | 567 | resource_size_t); |
567 | void pcibios_update_irq(struct pci_dev *, int irq); | 568 | void pcibios_update_irq(struct pci_dev *, int irq); |
568 | 569 | ||
570 | /* Weak but can be overriden by arch */ | ||
571 | void pci_fixup_cardbus(struct pci_bus *); | ||
572 | |||
569 | /* Generic PCI functions used internally */ | 573 | /* Generic PCI functions used internally */ |
570 | 574 | ||
571 | extern struct pci_bus *pci_find_bus(int domain, int busnr); | 575 | extern struct pci_bus *pci_find_bus(int domain, int busnr); |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h deleted file mode 100644 index e3fb25606706..000000000000 --- a/include/linux/perf_counter.h +++ /dev/null | |||
@@ -1,444 +0,0 @@ | |||
1 | /* | ||
2 | * NOTE: this file will be removed in a future kernel release, it is | ||
3 | * provided as a courtesy copy of user-space code that relies on the | ||
4 | * old (pre-rename) symbols and constants. | ||
5 | * | ||
6 | * Performance events: | ||
7 | * | ||
8 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | ||
9 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | ||
10 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | ||
11 | * | ||
12 | * Data type definitions, declarations, prototypes. | ||
13 | * | ||
14 | * Started by: Thomas Gleixner and Ingo Molnar | ||
15 | * | ||
16 | * For licencing details see kernel-base/COPYING | ||
17 | */ | ||
18 | #ifndef _LINUX_PERF_COUNTER_H | ||
19 | #define _LINUX_PERF_COUNTER_H | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | #include <linux/ioctl.h> | ||
23 | #include <asm/byteorder.h> | ||
24 | |||
25 | /* | ||
26 | * User-space ABI bits: | ||
27 | */ | ||
28 | |||
29 | /* | ||
30 | * attr.type | ||
31 | */ | ||
32 | enum perf_type_id { | ||
33 | PERF_TYPE_HARDWARE = 0, | ||
34 | PERF_TYPE_SOFTWARE = 1, | ||
35 | PERF_TYPE_TRACEPOINT = 2, | ||
36 | PERF_TYPE_HW_CACHE = 3, | ||
37 | PERF_TYPE_RAW = 4, | ||
38 | |||
39 | PERF_TYPE_MAX, /* non-ABI */ | ||
40 | }; | ||
41 | |||
42 | /* | ||
43 | * Generalized performance counter event types, used by the | ||
44 | * attr.event_id parameter of the sys_perf_counter_open() | ||
45 | * syscall: | ||
46 | */ | ||
47 | enum perf_hw_id { | ||
48 | /* | ||
49 | * Common hardware events, generalized by the kernel: | ||
50 | */ | ||
51 | PERF_COUNT_HW_CPU_CYCLES = 0, | ||
52 | PERF_COUNT_HW_INSTRUCTIONS = 1, | ||
53 | PERF_COUNT_HW_CACHE_REFERENCES = 2, | ||
54 | PERF_COUNT_HW_CACHE_MISSES = 3, | ||
55 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | ||
56 | PERF_COUNT_HW_BRANCH_MISSES = 5, | ||
57 | PERF_COUNT_HW_BUS_CYCLES = 6, | ||
58 | |||
59 | PERF_COUNT_HW_MAX, /* non-ABI */ | ||
60 | }; | ||
61 | |||
62 | /* | ||
63 | * Generalized hardware cache counters: | ||
64 | * | ||
65 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | ||
66 | * { read, write, prefetch } x | ||
67 | * { accesses, misses } | ||
68 | */ | ||
69 | enum perf_hw_cache_id { | ||
70 | PERF_COUNT_HW_CACHE_L1D = 0, | ||
71 | PERF_COUNT_HW_CACHE_L1I = 1, | ||
72 | PERF_COUNT_HW_CACHE_LL = 2, | ||
73 | PERF_COUNT_HW_CACHE_DTLB = 3, | ||
74 | PERF_COUNT_HW_CACHE_ITLB = 4, | ||
75 | PERF_COUNT_HW_CACHE_BPU = 5, | ||
76 | |||
77 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ | ||
78 | }; | ||
79 | |||
80 | enum perf_hw_cache_op_id { | ||
81 | PERF_COUNT_HW_CACHE_OP_READ = 0, | ||
82 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, | ||
83 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, | ||
84 | |||
85 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ | ||
86 | }; | ||
87 | |||
88 | enum perf_hw_cache_op_result_id { | ||
89 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, | ||
90 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, | ||
91 | |||
92 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ | ||
93 | }; | ||
94 | |||
95 | /* | ||
96 | * Special "software" counters provided by the kernel, even if the hardware | ||
97 | * does not support performance counters. These counters measure various | ||
98 | * physical and sw events of the kernel (and allow the profiling of them as | ||
99 | * well): | ||
100 | */ | ||
101 | enum perf_sw_ids { | ||
102 | PERF_COUNT_SW_CPU_CLOCK = 0, | ||
103 | PERF_COUNT_SW_TASK_CLOCK = 1, | ||
104 | PERF_COUNT_SW_PAGE_FAULTS = 2, | ||
105 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, | ||
106 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | ||
107 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | ||
108 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | ||
109 | PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, | ||
110 | PERF_COUNT_SW_EMULATION_FAULTS = 8, | ||
111 | |||
112 | PERF_COUNT_SW_MAX, /* non-ABI */ | ||
113 | }; | ||
114 | |||
115 | /* | ||
116 | * Bits that can be set in attr.sample_type to request information | ||
117 | * in the overflow packets. | ||
118 | */ | ||
119 | enum perf_counter_sample_format { | ||
120 | PERF_SAMPLE_IP = 1U << 0, | ||
121 | PERF_SAMPLE_TID = 1U << 1, | ||
122 | PERF_SAMPLE_TIME = 1U << 2, | ||
123 | PERF_SAMPLE_ADDR = 1U << 3, | ||
124 | PERF_SAMPLE_READ = 1U << 4, | ||
125 | PERF_SAMPLE_CALLCHAIN = 1U << 5, | ||
126 | PERF_SAMPLE_ID = 1U << 6, | ||
127 | PERF_SAMPLE_CPU = 1U << 7, | ||
128 | PERF_SAMPLE_PERIOD = 1U << 8, | ||
129 | PERF_SAMPLE_STREAM_ID = 1U << 9, | ||
130 | PERF_SAMPLE_RAW = 1U << 10, | ||
131 | |||
132 | PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ | ||
133 | }; | ||
134 | |||
135 | /* | ||
136 | * The format of the data returned by read() on a perf counter fd, | ||
137 | * as specified by attr.read_format: | ||
138 | * | ||
139 | * struct read_format { | ||
140 | * { u64 value; | ||
141 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | ||
142 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
143 | * { u64 id; } && PERF_FORMAT_ID | ||
144 | * } && !PERF_FORMAT_GROUP | ||
145 | * | ||
146 | * { u64 nr; | ||
147 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | ||
148 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
149 | * { u64 value; | ||
150 | * { u64 id; } && PERF_FORMAT_ID | ||
151 | * } cntr[nr]; | ||
152 | * } && PERF_FORMAT_GROUP | ||
153 | * }; | ||
154 | */ | ||
155 | enum perf_counter_read_format { | ||
156 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, | ||
157 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, | ||
158 | PERF_FORMAT_ID = 1U << 2, | ||
159 | PERF_FORMAT_GROUP = 1U << 3, | ||
160 | |||
161 | PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ | ||
162 | }; | ||
163 | |||
164 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ | ||
165 | |||
166 | /* | ||
167 | * Hardware event to monitor via a performance monitoring counter: | ||
168 | */ | ||
169 | struct perf_counter_attr { | ||
170 | |||
171 | /* | ||
172 | * Major type: hardware/software/tracepoint/etc. | ||
173 | */ | ||
174 | __u32 type; | ||
175 | |||
176 | /* | ||
177 | * Size of the attr structure, for fwd/bwd compat. | ||
178 | */ | ||
179 | __u32 size; | ||
180 | |||
181 | /* | ||
182 | * Type specific configuration information. | ||
183 | */ | ||
184 | __u64 config; | ||
185 | |||
186 | union { | ||
187 | __u64 sample_period; | ||
188 | __u64 sample_freq; | ||
189 | }; | ||
190 | |||
191 | __u64 sample_type; | ||
192 | __u64 read_format; | ||
193 | |||
194 | __u64 disabled : 1, /* off by default */ | ||
195 | inherit : 1, /* children inherit it */ | ||
196 | pinned : 1, /* must always be on PMU */ | ||
197 | exclusive : 1, /* only group on PMU */ | ||
198 | exclude_user : 1, /* don't count user */ | ||
199 | exclude_kernel : 1, /* ditto kernel */ | ||
200 | exclude_hv : 1, /* ditto hypervisor */ | ||
201 | exclude_idle : 1, /* don't count when idle */ | ||
202 | mmap : 1, /* include mmap data */ | ||
203 | comm : 1, /* include comm data */ | ||
204 | freq : 1, /* use freq, not period */ | ||
205 | inherit_stat : 1, /* per task counts */ | ||
206 | enable_on_exec : 1, /* next exec enables */ | ||
207 | task : 1, /* trace fork/exit */ | ||
208 | watermark : 1, /* wakeup_watermark */ | ||
209 | |||
210 | __reserved_1 : 49; | ||
211 | |||
212 | union { | ||
213 | __u32 wakeup_events; /* wakeup every n events */ | ||
214 | __u32 wakeup_watermark; /* bytes before wakeup */ | ||
215 | }; | ||
216 | __u32 __reserved_2; | ||
217 | |||
218 | __u64 __reserved_3; | ||
219 | }; | ||
220 | |||
221 | /* | ||
222 | * Ioctls that can be done on a perf counter fd: | ||
223 | */ | ||
224 | #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) | ||
225 | #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) | ||
226 | #define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) | ||
227 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) | ||
228 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) | ||
229 | #define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) | ||
230 | #define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *) | ||
231 | |||
232 | enum perf_counter_ioc_flags { | ||
233 | PERF_IOC_FLAG_GROUP = 1U << 0, | ||
234 | }; | ||
235 | |||
236 | /* | ||
237 | * Structure of the page that can be mapped via mmap | ||
238 | */ | ||
239 | struct perf_counter_mmap_page { | ||
240 | __u32 version; /* version number of this structure */ | ||
241 | __u32 compat_version; /* lowest version this is compat with */ | ||
242 | |||
243 | /* | ||
244 | * Bits needed to read the hw counters in user-space. | ||
245 | * | ||
246 | * u32 seq; | ||
247 | * s64 count; | ||
248 | * | ||
249 | * do { | ||
250 | * seq = pc->lock; | ||
251 | * | ||
252 | * barrier() | ||
253 | * if (pc->index) { | ||
254 | * count = pmc_read(pc->index - 1); | ||
255 | * count += pc->offset; | ||
256 | * } else | ||
257 | * goto regular_read; | ||
258 | * | ||
259 | * barrier(); | ||
260 | * } while (pc->lock != seq); | ||
261 | * | ||
262 | * NOTE: for obvious reason this only works on self-monitoring | ||
263 | * processes. | ||
264 | */ | ||
265 | __u32 lock; /* seqlock for synchronization */ | ||
266 | __u32 index; /* hardware counter identifier */ | ||
267 | __s64 offset; /* add to hardware counter value */ | ||
268 | __u64 time_enabled; /* time counter active */ | ||
269 | __u64 time_running; /* time counter on cpu */ | ||
270 | |||
271 | /* | ||
272 | * Hole for extension of the self monitor capabilities | ||
273 | */ | ||
274 | |||
275 | __u64 __reserved[123]; /* align to 1k */ | ||
276 | |||
277 | /* | ||
278 | * Control data for the mmap() data buffer. | ||
279 | * | ||
280 | * User-space reading the @data_head value should issue an rmb(), on | ||
281 | * SMP capable platforms, after reading this value -- see | ||
282 | * perf_counter_wakeup(). | ||
283 | * | ||
284 | * When the mapping is PROT_WRITE the @data_tail value should be | ||
285 | * written by userspace to reflect the last read data. In this case | ||
286 | * the kernel will not over-write unread data. | ||
287 | */ | ||
288 | __u64 data_head; /* head in the data section */ | ||
289 | __u64 data_tail; /* user-space written tail */ | ||
290 | }; | ||
291 | |||
292 | #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) | ||
293 | #define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) | ||
294 | #define PERF_EVENT_MISC_KERNEL (1 << 0) | ||
295 | #define PERF_EVENT_MISC_USER (2 << 0) | ||
296 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) | ||
297 | |||
298 | struct perf_event_header { | ||
299 | __u32 type; | ||
300 | __u16 misc; | ||
301 | __u16 size; | ||
302 | }; | ||
303 | |||
304 | enum perf_event_type { | ||
305 | |||
306 | /* | ||
307 | * The MMAP events record the PROT_EXEC mappings so that we can | ||
308 | * correlate userspace IPs to code. They have the following structure: | ||
309 | * | ||
310 | * struct { | ||
311 | * struct perf_event_header header; | ||
312 | * | ||
313 | * u32 pid, tid; | ||
314 | * u64 addr; | ||
315 | * u64 len; | ||
316 | * u64 pgoff; | ||
317 | * char filename[]; | ||
318 | * }; | ||
319 | */ | ||
320 | PERF_EVENT_MMAP = 1, | ||
321 | |||
322 | /* | ||
323 | * struct { | ||
324 | * struct perf_event_header header; | ||
325 | * u64 id; | ||
326 | * u64 lost; | ||
327 | * }; | ||
328 | */ | ||
329 | PERF_EVENT_LOST = 2, | ||
330 | |||
331 | /* | ||
332 | * struct { | ||
333 | * struct perf_event_header header; | ||
334 | * | ||
335 | * u32 pid, tid; | ||
336 | * char comm[]; | ||
337 | * }; | ||
338 | */ | ||
339 | PERF_EVENT_COMM = 3, | ||
340 | |||
341 | /* | ||
342 | * struct { | ||
343 | * struct perf_event_header header; | ||
344 | * u32 pid, ppid; | ||
345 | * u32 tid, ptid; | ||
346 | * u64 time; | ||
347 | * }; | ||
348 | */ | ||
349 | PERF_EVENT_EXIT = 4, | ||
350 | |||
351 | /* | ||
352 | * struct { | ||
353 | * struct perf_event_header header; | ||
354 | * u64 time; | ||
355 | * u64 id; | ||
356 | * u64 stream_id; | ||
357 | * }; | ||
358 | */ | ||
359 | PERF_EVENT_THROTTLE = 5, | ||
360 | PERF_EVENT_UNTHROTTLE = 6, | ||
361 | |||
362 | /* | ||
363 | * struct { | ||
364 | * struct perf_event_header header; | ||
365 | * u32 pid, ppid; | ||
366 | * u32 tid, ptid; | ||
367 | * u64 time; | ||
368 | * }; | ||
369 | */ | ||
370 | PERF_EVENT_FORK = 7, | ||
371 | |||
372 | /* | ||
373 | * struct { | ||
374 | * struct perf_event_header header; | ||
375 | * u32 pid, tid; | ||
376 | * | ||
377 | * struct read_format values; | ||
378 | * }; | ||
379 | */ | ||
380 | PERF_EVENT_READ = 8, | ||
381 | |||
382 | /* | ||
383 | * struct { | ||
384 | * struct perf_event_header header; | ||
385 | * | ||
386 | * { u64 ip; } && PERF_SAMPLE_IP | ||
387 | * { u32 pid, tid; } && PERF_SAMPLE_TID | ||
388 | * { u64 time; } && PERF_SAMPLE_TIME | ||
389 | * { u64 addr; } && PERF_SAMPLE_ADDR | ||
390 | * { u64 id; } && PERF_SAMPLE_ID | ||
391 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID | ||
392 | * { u32 cpu, res; } && PERF_SAMPLE_CPU | ||
393 | * { u64 period; } && PERF_SAMPLE_PERIOD | ||
394 | * | ||
395 | * { struct read_format values; } && PERF_SAMPLE_READ | ||
396 | * | ||
397 | * { u64 nr, | ||
398 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | ||
399 | * | ||
400 | * # | ||
401 | * # The RAW record below is opaque data wrt the ABI | ||
402 | * # | ||
403 | * # That is, the ABI doesn't make any promises wrt to | ||
404 | * # the stability of its content, it may vary depending | ||
405 | * # on event, hardware, kernel version and phase of | ||
406 | * # the moon. | ||
407 | * # | ||
408 | * # In other words, PERF_SAMPLE_RAW contents are not an ABI. | ||
409 | * # | ||
410 | * | ||
411 | * { u32 size; | ||
412 | * char data[size];}&& PERF_SAMPLE_RAW | ||
413 | * }; | ||
414 | */ | ||
415 | PERF_EVENT_SAMPLE = 9, | ||
416 | |||
417 | PERF_EVENT_MAX, /* non-ABI */ | ||
418 | }; | ||
419 | |||
420 | enum perf_callchain_context { | ||
421 | PERF_CONTEXT_HV = (__u64)-32, | ||
422 | PERF_CONTEXT_KERNEL = (__u64)-128, | ||
423 | PERF_CONTEXT_USER = (__u64)-512, | ||
424 | |||
425 | PERF_CONTEXT_GUEST = (__u64)-2048, | ||
426 | PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, | ||
427 | PERF_CONTEXT_GUEST_USER = (__u64)-2560, | ||
428 | |||
429 | PERF_CONTEXT_MAX = (__u64)-4095, | ||
430 | }; | ||
431 | |||
432 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | ||
433 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | ||
434 | |||
435 | /* | ||
436 | * In case some app still references the old symbols: | ||
437 | */ | ||
438 | |||
439 | #define __NR_perf_counter_open __NR_perf_event_open | ||
440 | |||
441 | #define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE | ||
442 | #define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE | ||
443 | |||
444 | #endif /* _LINUX_PERF_COUNTER_H */ | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 64a53f74c9a9..8fa71874113f 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -211,17 +211,11 @@ struct perf_event_attr { | |||
211 | __u32 wakeup_watermark; /* bytes before wakeup */ | 211 | __u32 wakeup_watermark; /* bytes before wakeup */ |
212 | }; | 212 | }; |
213 | 213 | ||
214 | struct { /* Hardware breakpoint info */ | ||
215 | __u64 bp_addr; | ||
216 | __u32 bp_type; | ||
217 | __u32 bp_len; | ||
218 | __u64 __bp_reserved_1; | ||
219 | __u64 __bp_reserved_2; | ||
220 | }; | ||
221 | |||
222 | __u32 __reserved_2; | 214 | __u32 __reserved_2; |
223 | 215 | ||
224 | __u64 __reserved_3; | 216 | __u64 bp_addr; |
217 | __u32 bp_type; | ||
218 | __u32 bp_len; | ||
225 | }; | 219 | }; |
226 | 220 | ||
227 | /* | 221 | /* |
@@ -681,7 +675,7 @@ struct perf_event_context { | |||
681 | * Protect the states of the events in the list, | 675 | * Protect the states of the events in the list, |
682 | * nr_active, and the list: | 676 | * nr_active, and the list: |
683 | */ | 677 | */ |
684 | spinlock_t lock; | 678 | raw_spinlock_t lock; |
685 | /* | 679 | /* |
686 | * Protect the list of events. Locking either mutex or lock | 680 | * Protect the list of events. Locking either mutex or lock |
687 | * is sufficient to ensure the list doesn't change; to change | 681 | * is sufficient to ensure the list doesn't change; to change |
@@ -820,9 +814,14 @@ extern int perf_event_overflow(struct perf_event *event, int nmi, | |||
820 | */ | 814 | */ |
821 | static inline int is_software_event(struct perf_event *event) | 815 | static inline int is_software_event(struct perf_event *event) |
822 | { | 816 | { |
823 | return (event->attr.type != PERF_TYPE_RAW) && | 817 | switch (event->attr.type) { |
824 | (event->attr.type != PERF_TYPE_HARDWARE) && | 818 | case PERF_TYPE_SOFTWARE: |
825 | (event->attr.type != PERF_TYPE_HW_CACHE); | 819 | case PERF_TYPE_TRACEPOINT: |
820 | /* for now the breakpoint stuff also works as software event */ | ||
821 | case PERF_TYPE_BREAKPOINT: | ||
822 | return 1; | ||
823 | } | ||
824 | return 0; | ||
826 | } | 825 | } |
827 | 826 | ||
828 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 827 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
diff --git a/include/linux/phy.h b/include/linux/phy.h index b1368b8f6572..6a7eb402165d 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -447,6 +447,7 @@ struct phy_device* get_phy_device(struct mii_bus *bus, int addr); | |||
447 | int phy_device_register(struct phy_device *phy); | 447 | int phy_device_register(struct phy_device *phy); |
448 | int phy_clear_interrupt(struct phy_device *phydev); | 448 | int phy_clear_interrupt(struct phy_device *phydev); |
449 | int phy_config_interrupt(struct phy_device *phydev, u32 interrupts); | 449 | int phy_config_interrupt(struct phy_device *phydev, u32 interrupts); |
450 | int phy_init_hw(struct phy_device *phydev); | ||
450 | int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | 451 | int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, |
451 | u32 flags, phy_interface_t interface); | 452 | u32 flags, phy_interface_t interface); |
452 | struct phy_device * phy_attach(struct net_device *dev, | 453 | struct phy_device * phy_attach(struct net_device *dev, |
@@ -484,6 +485,7 @@ void phy_driver_unregister(struct phy_driver *drv); | |||
484 | int phy_driver_register(struct phy_driver *new_driver); | 485 | int phy_driver_register(struct phy_driver *new_driver); |
485 | void phy_prepare_link(struct phy_device *phydev, | 486 | void phy_prepare_link(struct phy_device *phydev, |
486 | void (*adjust_link)(struct net_device *)); | 487 | void (*adjust_link)(struct net_device *)); |
488 | void phy_state_machine(struct work_struct *work); | ||
487 | void phy_start_machine(struct phy_device *phydev, | 489 | void phy_start_machine(struct phy_device *phydev, |
488 | void (*handler)(struct net_device *)); | 490 | void (*handler)(struct net_device *)); |
489 | void phy_stop_machine(struct phy_device *phydev); | 491 | void phy_stop_machine(struct phy_device *phydev); |
diff --git a/include/linux/plist.h b/include/linux/plist.h index 45926d77d6ac..8227f717c70f 100644 --- a/include/linux/plist.h +++ b/include/linux/plist.h | |||
@@ -81,7 +81,8 @@ struct plist_head { | |||
81 | struct list_head prio_list; | 81 | struct list_head prio_list; |
82 | struct list_head node_list; | 82 | struct list_head node_list; |
83 | #ifdef CONFIG_DEBUG_PI_LIST | 83 | #ifdef CONFIG_DEBUG_PI_LIST |
84 | spinlock_t *lock; | 84 | raw_spinlock_t *rawlock; |
85 | spinlock_t *spinlock; | ||
85 | #endif | 86 | #endif |
86 | }; | 87 | }; |
87 | 88 | ||
@@ -91,9 +92,11 @@ struct plist_node { | |||
91 | }; | 92 | }; |
92 | 93 | ||
93 | #ifdef CONFIG_DEBUG_PI_LIST | 94 | #ifdef CONFIG_DEBUG_PI_LIST |
94 | # define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock | 95 | # define PLIST_HEAD_LOCK_INIT(_lock) .spinlock = _lock |
96 | # define PLIST_HEAD_LOCK_INIT_RAW(_lock) .rawlock = _lock | ||
95 | #else | 97 | #else |
96 | # define PLIST_HEAD_LOCK_INIT(_lock) | 98 | # define PLIST_HEAD_LOCK_INIT(_lock) |
99 | # define PLIST_HEAD_LOCK_INIT_RAW(_lock) | ||
97 | #endif | 100 | #endif |
98 | 101 | ||
99 | #define _PLIST_HEAD_INIT(head) \ | 102 | #define _PLIST_HEAD_INIT(head) \ |
@@ -107,11 +110,22 @@ struct plist_node { | |||
107 | */ | 110 | */ |
108 | #define PLIST_HEAD_INIT(head, _lock) \ | 111 | #define PLIST_HEAD_INIT(head, _lock) \ |
109 | { \ | 112 | { \ |
110 | _PLIST_HEAD_INIT(head), \ | 113 | _PLIST_HEAD_INIT(head), \ |
111 | PLIST_HEAD_LOCK_INIT(&(_lock)) \ | 114 | PLIST_HEAD_LOCK_INIT(&(_lock)) \ |
112 | } | 115 | } |
113 | 116 | ||
114 | /** | 117 | /** |
118 | * PLIST_HEAD_INIT_RAW - static struct plist_head initializer | ||
119 | * @head: struct plist_head variable name | ||
120 | * @_lock: lock to initialize for this list | ||
121 | */ | ||
122 | #define PLIST_HEAD_INIT_RAW(head, _lock) \ | ||
123 | { \ | ||
124 | _PLIST_HEAD_INIT(head), \ | ||
125 | PLIST_HEAD_LOCK_INIT_RAW(&(_lock)) \ | ||
126 | } | ||
127 | |||
128 | /** | ||
115 | * PLIST_NODE_INIT - static struct plist_node initializer | 129 | * PLIST_NODE_INIT - static struct plist_node initializer |
116 | * @node: struct plist_node variable name | 130 | * @node: struct plist_node variable name |
117 | * @__prio: initial node priority | 131 | * @__prio: initial node priority |
@@ -119,13 +133,13 @@ struct plist_node { | |||
119 | #define PLIST_NODE_INIT(node, __prio) \ | 133 | #define PLIST_NODE_INIT(node, __prio) \ |
120 | { \ | 134 | { \ |
121 | .prio = (__prio), \ | 135 | .prio = (__prio), \ |
122 | .plist = { _PLIST_HEAD_INIT((node).plist) }, \ | 136 | .plist = { _PLIST_HEAD_INIT((node).plist) }, \ |
123 | } | 137 | } |
124 | 138 | ||
125 | /** | 139 | /** |
126 | * plist_head_init - dynamic struct plist_head initializer | 140 | * plist_head_init - dynamic struct plist_head initializer |
127 | * @head: &struct plist_head pointer | 141 | * @head: &struct plist_head pointer |
128 | * @lock: list spinlock, remembered for debugging | 142 | * @lock: spinlock protecting the list (debugging) |
129 | */ | 143 | */ |
130 | static inline void | 144 | static inline void |
131 | plist_head_init(struct plist_head *head, spinlock_t *lock) | 145 | plist_head_init(struct plist_head *head, spinlock_t *lock) |
@@ -133,7 +147,24 @@ plist_head_init(struct plist_head *head, spinlock_t *lock) | |||
133 | INIT_LIST_HEAD(&head->prio_list); | 147 | INIT_LIST_HEAD(&head->prio_list); |
134 | INIT_LIST_HEAD(&head->node_list); | 148 | INIT_LIST_HEAD(&head->node_list); |
135 | #ifdef CONFIG_DEBUG_PI_LIST | 149 | #ifdef CONFIG_DEBUG_PI_LIST |
136 | head->lock = lock; | 150 | head->spinlock = lock; |
151 | head->rawlock = NULL; | ||
152 | #endif | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * plist_head_init_raw - dynamic struct plist_head initializer | ||
157 | * @head: &struct plist_head pointer | ||
158 | * @lock: raw_spinlock protecting the list (debugging) | ||
159 | */ | ||
160 | static inline void | ||
161 | plist_head_init_raw(struct plist_head *head, raw_spinlock_t *lock) | ||
162 | { | ||
163 | INIT_LIST_HEAD(&head->prio_list); | ||
164 | INIT_LIST_HEAD(&head->node_list); | ||
165 | #ifdef CONFIG_DEBUG_PI_LIST | ||
166 | head->rawlock = lock; | ||
167 | head->spinlock = NULL; | ||
137 | #endif | 168 | #endif |
138 | } | 169 | } |
139 | 170 | ||
diff --git a/include/linux/pm.h b/include/linux/pm.h index 0d65934246af..198b8f9fe05e 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -219,7 +219,7 @@ struct dev_pm_ops { | |||
219 | * to RAM and hibernation. | 219 | * to RAM and hibernation. |
220 | */ | 220 | */ |
221 | #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ | 221 | #define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \ |
222 | struct dev_pm_ops name = { \ | 222 | const struct dev_pm_ops name = { \ |
223 | .suspend = suspend_fn, \ | 223 | .suspend = suspend_fn, \ |
224 | .resume = resume_fn, \ | 224 | .resume = resume_fn, \ |
225 | .freeze = suspend_fn, \ | 225 | .freeze = suspend_fn, \ |
diff --git a/include/linux/pnp.h b/include/linux/pnp.h index fddfafaed024..7c4193eb0072 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h | |||
@@ -334,6 +334,19 @@ extern struct pnp_protocol pnpbios_protocol; | |||
334 | #define pnp_device_is_pnpbios(dev) 0 | 334 | #define pnp_device_is_pnpbios(dev) 0 |
335 | #endif | 335 | #endif |
336 | 336 | ||
337 | #ifdef CONFIG_PNPACPI | ||
338 | extern struct pnp_protocol pnpacpi_protocol; | ||
339 | |||
340 | static inline struct acpi_device *pnp_acpi_device(struct pnp_dev *dev) | ||
341 | { | ||
342 | if (dev->protocol == &pnpacpi_protocol) | ||
343 | return dev->data; | ||
344 | return NULL; | ||
345 | } | ||
346 | #else | ||
347 | #define pnp_acpi_device(dev) 0 | ||
348 | #endif | ||
349 | |||
337 | /* status */ | 350 | /* status */ |
338 | #define PNP_READY 0x0000 | 351 | #define PNP_READY 0x0000 |
339 | #define PNP_ATTACHED 0x0001 | 352 | #define PNP_ATTACHED 0x0001 |
diff --git a/include/linux/poison.h b/include/linux/poison.h index 7fc194aef8c2..2110a81c5e2a 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h | |||
@@ -2,13 +2,25 @@ | |||
2 | #define _LINUX_POISON_H | 2 | #define _LINUX_POISON_H |
3 | 3 | ||
4 | /********** include/linux/list.h **********/ | 4 | /********** include/linux/list.h **********/ |
5 | |||
6 | /* | ||
7 | * Architectures might want to move the poison pointer offset | ||
8 | * into some well-recognized area such as 0xdead000000000000, | ||
9 | * that is also not mappable by user-space exploits: | ||
10 | */ | ||
11 | #ifdef CONFIG_ILLEGAL_POINTER_VALUE | ||
12 | # define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL) | ||
13 | #else | ||
14 | # define POISON_POINTER_DELTA 0 | ||
15 | #endif | ||
16 | |||
5 | /* | 17 | /* |
6 | * These are non-NULL pointers that will result in page faults | 18 | * These are non-NULL pointers that will result in page faults |
7 | * under normal circumstances, used to verify that nobody uses | 19 | * under normal circumstances, used to verify that nobody uses |
8 | * non-initialized list entries. | 20 | * non-initialized list entries. |
9 | */ | 21 | */ |
10 | #define LIST_POISON1 ((void *) 0x00100100) | 22 | #define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) |
11 | #define LIST_POISON2 ((void *) 0x00200200) | 23 | #define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) |
12 | 24 | ||
13 | /********** include/linux/timer.h **********/ | 25 | /********** include/linux/timer.h **********/ |
14 | /* | 26 | /* |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 7456d7d87a19..56f2d63a5cbb 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -105,12 +105,7 @@ static inline int ptrace_reparented(struct task_struct *child) | |||
105 | { | 105 | { |
106 | return child->real_parent != child->parent; | 106 | return child->real_parent != child->parent; |
107 | } | 107 | } |
108 | static inline void ptrace_link(struct task_struct *child, | 108 | |
109 | struct task_struct *new_parent) | ||
110 | { | ||
111 | if (unlikely(child->ptrace)) | ||
112 | __ptrace_link(child, new_parent); | ||
113 | } | ||
114 | static inline void ptrace_unlink(struct task_struct *child) | 109 | static inline void ptrace_unlink(struct task_struct *child) |
115 | { | 110 | { |
116 | if (unlikely(child->ptrace)) | 111 | if (unlikely(child->ptrace)) |
@@ -169,9 +164,9 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace) | |||
169 | INIT_LIST_HEAD(&child->ptraced); | 164 | INIT_LIST_HEAD(&child->ptraced); |
170 | child->parent = child->real_parent; | 165 | child->parent = child->real_parent; |
171 | child->ptrace = 0; | 166 | child->ptrace = 0; |
172 | if (unlikely(ptrace)) { | 167 | if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) { |
173 | child->ptrace = current->ptrace; | 168 | child->ptrace = current->ptrace; |
174 | ptrace_link(child, current->parent); | 169 | __ptrace_link(child, current->parent); |
175 | } | 170 | } |
176 | } | 171 | } |
177 | 172 | ||
@@ -278,6 +273,18 @@ static inline void user_enable_block_step(struct task_struct *task) | |||
278 | } | 273 | } |
279 | #endif /* arch_has_block_step */ | 274 | #endif /* arch_has_block_step */ |
280 | 275 | ||
276 | #ifdef ARCH_HAS_USER_SINGLE_STEP_INFO | ||
277 | extern void user_single_step_siginfo(struct task_struct *tsk, | ||
278 | struct pt_regs *regs, siginfo_t *info); | ||
279 | #else | ||
280 | static inline void user_single_step_siginfo(struct task_struct *tsk, | ||
281 | struct pt_regs *regs, siginfo_t *info) | ||
282 | { | ||
283 | memset(info, 0, sizeof(*info)); | ||
284 | info->si_signo = SIGTRAP; | ||
285 | } | ||
286 | #endif | ||
287 | |||
281 | #ifndef arch_ptrace_stop_needed | 288 | #ifndef arch_ptrace_stop_needed |
282 | /** | 289 | /** |
283 | * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called | 290 | * arch_ptrace_stop_needed - Decide whether arch_ptrace_stop() should be called |
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index 7a9754c96775..01b3d759f1fc 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h | |||
@@ -10,7 +10,7 @@ struct platform_pwm_backlight_data { | |||
10 | unsigned int dft_brightness; | 10 | unsigned int dft_brightness; |
11 | unsigned int pwm_period_ns; | 11 | unsigned int pwm_period_ns; |
12 | int (*init)(struct device *dev); | 12 | int (*init)(struct device *dev); |
13 | int (*notify)(int brightness); | 13 | int (*notify)(struct device *dev, int brightness); |
14 | void (*exit)(struct device *dev); | 14 | void (*exit)(struct device *dev); |
15 | }; | 15 | }; |
16 | 16 | ||
diff --git a/include/linux/quota.h b/include/linux/quota.h index e70e62194243..a6861f117480 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
@@ -315,8 +315,9 @@ struct dquot_operations { | |||
315 | int (*claim_space) (struct inode *, qsize_t); | 315 | int (*claim_space) (struct inode *, qsize_t); |
316 | /* release rsved quota for delayed alloc */ | 316 | /* release rsved quota for delayed alloc */ |
317 | void (*release_rsv) (struct inode *, qsize_t); | 317 | void (*release_rsv) (struct inode *, qsize_t); |
318 | /* get reserved quota for delayed alloc */ | 318 | /* get reserved quota for delayed alloc, value returned is managed by |
319 | qsize_t (*get_reserved_space) (struct inode *); | 319 | * quota code only */ |
320 | qsize_t *(*get_reserved_space) (struct inode *); | ||
320 | }; | 321 | }; |
321 | 322 | ||
322 | /* Operations handling requests from userspace */ | 323 | /* Operations handling requests from userspace */ |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index c4ba9a78721e..96cc307ed9f4 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -101,4 +101,9 @@ static inline void exit_rcu(void) | |||
101 | { | 101 | { |
102 | } | 102 | } |
103 | 103 | ||
104 | static inline int rcu_preempt_depth(void) | ||
105 | { | ||
106 | return 0; | ||
107 | } | ||
108 | |||
104 | #endif /* __LINUX_RCUTINY_H */ | 109 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index c93eee5911b0..8044b1b94333 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -45,6 +45,12 @@ extern void __rcu_read_unlock(void); | |||
45 | extern void synchronize_rcu(void); | 45 | extern void synchronize_rcu(void); |
46 | extern void exit_rcu(void); | 46 | extern void exit_rcu(void); |
47 | 47 | ||
48 | /* | ||
49 | * Defined as macro as it is a very low level header | ||
50 | * included from areas that don't even know about current | ||
51 | */ | ||
52 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) | ||
53 | |||
48 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 54 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
49 | 55 | ||
50 | static inline void __rcu_read_lock(void) | 56 | static inline void __rcu_read_lock(void) |
@@ -63,6 +69,11 @@ static inline void exit_rcu(void) | |||
63 | { | 69 | { |
64 | } | 70 | } |
65 | 71 | ||
72 | static inline int rcu_preempt_depth(void) | ||
73 | { | ||
74 | return 0; | ||
75 | } | ||
76 | |||
66 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 77 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
67 | 78 | ||
68 | static inline void __rcu_read_lock_bh(void) | 79 | static inline void __rcu_read_lock_bh(void) |
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 490c5b37b6d7..030d92255c7a 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
@@ -35,6 +35,8 @@ | |||
35 | #ifndef __LINUX_REGULATOR_CONSUMER_H_ | 35 | #ifndef __LINUX_REGULATOR_CONSUMER_H_ |
36 | #define __LINUX_REGULATOR_CONSUMER_H_ | 36 | #define __LINUX_REGULATOR_CONSUMER_H_ |
37 | 37 | ||
38 | #include <linux/device.h> | ||
39 | |||
38 | /* | 40 | /* |
39 | * Regulator operating modes. | 41 | * Regulator operating modes. |
40 | * | 42 | * |
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 87f5f176d4ef..234a8476cba8 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h | |||
@@ -43,16 +43,20 @@ struct regulator; | |||
43 | /** | 43 | /** |
44 | * struct regulator_state - regulator state during low power system states | 44 | * struct regulator_state - regulator state during low power system states |
45 | * | 45 | * |
46 | * This describes a regulators state during a system wide low power state. | 46 | * This describes a regulators state during a system wide low power |
47 | * state. One of enabled or disabled must be set for the | ||
48 | * configuration to be applied. | ||
47 | * | 49 | * |
48 | * @uV: Operating voltage during suspend. | 50 | * @uV: Operating voltage during suspend. |
49 | * @mode: Operating mode during suspend. | 51 | * @mode: Operating mode during suspend. |
50 | * @enabled: Enabled during suspend. | 52 | * @enabled: Enabled during suspend. |
53 | * @disabled: Disabled during suspend. | ||
51 | */ | 54 | */ |
52 | struct regulator_state { | 55 | struct regulator_state { |
53 | int uV; /* suspend voltage */ | 56 | int uV; /* suspend voltage */ |
54 | unsigned int mode; /* suspend regulator operating mode */ | 57 | unsigned int mode; /* suspend regulator operating mode */ |
55 | int enabled; /* is regulator enabled in this suspend state */ | 58 | int enabled; /* is regulator enabled in this suspend state */ |
59 | int disabled; /* is the regulator disbled in this suspend state */ | ||
56 | }; | 60 | }; |
57 | 61 | ||
58 | /** | 62 | /** |
diff --git a/include/linux/regulator/max8660.h b/include/linux/regulator/max8660.h new file mode 100644 index 000000000000..9936763621c7 --- /dev/null +++ b/include/linux/regulator/max8660.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * max8660.h -- Voltage regulation for the Maxim 8660/8661 | ||
3 | * | ||
4 | * Copyright (C) 2009 Wolfram Sang, Pengutronix e.K. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; version 2 of the License. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #ifndef __LINUX_REGULATOR_MAX8660_H | ||
21 | #define __LINUX_REGULATOR_MAX8660_H | ||
22 | |||
23 | #include <linux/regulator/machine.h> | ||
24 | |||
25 | enum { | ||
26 | MAX8660_V3, | ||
27 | MAX8660_V4, | ||
28 | MAX8660_V5, | ||
29 | MAX8660_V6, | ||
30 | MAX8660_V7, | ||
31 | MAX8660_V_END, | ||
32 | }; | ||
33 | |||
34 | /** | ||
35 | * max8660_subdev_data - regulator subdev data | ||
36 | * @id: regulator id | ||
37 | * @name: regulator name | ||
38 | * @platform_data: regulator init data | ||
39 | */ | ||
40 | struct max8660_subdev_data { | ||
41 | int id; | ||
42 | char *name; | ||
43 | struct regulator_init_data *platform_data; | ||
44 | }; | ||
45 | |||
46 | /** | ||
47 | * max8660_platform_data - platform data for max8660 | ||
48 | * @num_subdevs: number of regulators used | ||
49 | * @subdevs: pointer to regulators used | ||
50 | * @en34_is_high: if EN34 is driven high, regulators cannot be en-/disabled. | ||
51 | */ | ||
52 | struct max8660_platform_data { | ||
53 | int num_subdevs; | ||
54 | struct max8660_subdev_data *subdevs; | ||
55 | unsigned en34_is_high:1; | ||
56 | }; | ||
57 | #endif | ||
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index a05b4a20768d..1ba3cf6edfbb 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h | |||
@@ -62,6 +62,12 @@ void reiserfs_write_unlock(struct super_block *s); | |||
62 | int reiserfs_write_lock_once(struct super_block *s); | 62 | int reiserfs_write_lock_once(struct super_block *s); |
63 | void reiserfs_write_unlock_once(struct super_block *s, int lock_depth); | 63 | void reiserfs_write_unlock_once(struct super_block *s, int lock_depth); |
64 | 64 | ||
65 | #ifdef CONFIG_REISERFS_CHECK | ||
66 | void reiserfs_lock_check_recursive(struct super_block *s); | ||
67 | #else | ||
68 | static inline void reiserfs_lock_check_recursive(struct super_block *s) { } | ||
69 | #endif | ||
70 | |||
65 | /* | 71 | /* |
66 | * Several mutexes depend on the write lock. | 72 | * Several mutexes depend on the write lock. |
67 | * However sometimes we want to relax the write lock while we hold | 73 | * However sometimes we want to relax the write lock while we hold |
@@ -92,11 +98,31 @@ void reiserfs_write_unlock_once(struct super_block *s, int lock_depth); | |||
92 | static inline void reiserfs_mutex_lock_safe(struct mutex *m, | 98 | static inline void reiserfs_mutex_lock_safe(struct mutex *m, |
93 | struct super_block *s) | 99 | struct super_block *s) |
94 | { | 100 | { |
101 | reiserfs_lock_check_recursive(s); | ||
95 | reiserfs_write_unlock(s); | 102 | reiserfs_write_unlock(s); |
96 | mutex_lock(m); | 103 | mutex_lock(m); |
97 | reiserfs_write_lock(s); | 104 | reiserfs_write_lock(s); |
98 | } | 105 | } |
99 | 106 | ||
107 | static inline void | ||
108 | reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass, | ||
109 | struct super_block *s) | ||
110 | { | ||
111 | reiserfs_lock_check_recursive(s); | ||
112 | reiserfs_write_unlock(s); | ||
113 | mutex_lock_nested(m, subclass); | ||
114 | reiserfs_write_lock(s); | ||
115 | } | ||
116 | |||
117 | static inline void | ||
118 | reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s) | ||
119 | { | ||
120 | reiserfs_lock_check_recursive(s); | ||
121 | reiserfs_write_unlock(s); | ||
122 | down_read(sem); | ||
123 | reiserfs_write_lock(s); | ||
124 | } | ||
125 | |||
100 | /* | 126 | /* |
101 | * When we schedule, we usually want to also release the write lock, | 127 | * When we schedule, we usually want to also release the write lock, |
102 | * according to the previous bkl based locking scheme of reiserfs. | 128 | * according to the previous bkl based locking scheme of reiserfs. |
@@ -2051,25 +2077,12 @@ void set_de_name_and_namelen(struct reiserfs_dir_entry *de); | |||
2051 | int search_by_entry_key(struct super_block *sb, const struct cpu_key *key, | 2077 | int search_by_entry_key(struct super_block *sb, const struct cpu_key *key, |
2052 | struct treepath *path, struct reiserfs_dir_entry *de); | 2078 | struct treepath *path, struct reiserfs_dir_entry *de); |
2053 | struct dentry *reiserfs_get_parent(struct dentry *); | 2079 | struct dentry *reiserfs_get_parent(struct dentry *); |
2054 | /* procfs.c */ | ||
2055 | |||
2056 | #if defined( CONFIG_PROC_FS ) && defined( CONFIG_REISERFS_PROC_INFO ) | ||
2057 | #define REISERFS_PROC_INFO | ||
2058 | #else | ||
2059 | #undef REISERFS_PROC_INFO | ||
2060 | #endif | ||
2061 | 2080 | ||
2081 | #ifdef CONFIG_REISERFS_PROC_INFO | ||
2062 | int reiserfs_proc_info_init(struct super_block *sb); | 2082 | int reiserfs_proc_info_init(struct super_block *sb); |
2063 | int reiserfs_proc_info_done(struct super_block *sb); | 2083 | int reiserfs_proc_info_done(struct super_block *sb); |
2064 | struct proc_dir_entry *reiserfs_proc_register_global(char *name, | ||
2065 | read_proc_t * func); | ||
2066 | void reiserfs_proc_unregister_global(const char *name); | ||
2067 | int reiserfs_proc_info_global_init(void); | 2084 | int reiserfs_proc_info_global_init(void); |
2068 | int reiserfs_proc_info_global_done(void); | 2085 | int reiserfs_proc_info_global_done(void); |
2069 | int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset, | ||
2070 | int count, int *eof, void *data); | ||
2071 | |||
2072 | #if defined( REISERFS_PROC_INFO ) | ||
2073 | 2086 | ||
2074 | #define PROC_EXP( e ) e | 2087 | #define PROC_EXP( e ) e |
2075 | 2088 | ||
@@ -2084,6 +2097,26 @@ int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset, | |||
2084 | PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \ | 2097 | PROC_INFO_ADD( sb, free_at[ ( level ) ], B_FREE_SPACE( bh ) ); \ |
2085 | PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) ) | 2098 | PROC_INFO_ADD( sb, items_at[ ( level ) ], B_NR_ITEMS( bh ) ) |
2086 | #else | 2099 | #else |
2100 | static inline int reiserfs_proc_info_init(struct super_block *sb) | ||
2101 | { | ||
2102 | return 0; | ||
2103 | } | ||
2104 | |||
2105 | static inline int reiserfs_proc_info_done(struct super_block *sb) | ||
2106 | { | ||
2107 | return 0; | ||
2108 | } | ||
2109 | |||
2110 | static inline int reiserfs_proc_info_global_init(void) | ||
2111 | { | ||
2112 | return 0; | ||
2113 | } | ||
2114 | |||
2115 | static inline int reiserfs_proc_info_global_done(void) | ||
2116 | { | ||
2117 | return 0; | ||
2118 | } | ||
2119 | |||
2087 | #define PROC_EXP( e ) | 2120 | #define PROC_EXP( e ) |
2088 | #define VOID_V ( ( void ) 0 ) | 2121 | #define VOID_V ( ( void ) 0 ) |
2089 | #define PROC_INFO_MAX( sb, field, value ) VOID_V | 2122 | #define PROC_INFO_MAX( sb, field, value ) VOID_V |
diff --git a/include/linux/resource.h b/include/linux/resource.h index 40fc7e626082..f1e914eefeab 100644 --- a/include/linux/resource.h +++ b/include/linux/resource.h | |||
@@ -3,8 +3,6 @@ | |||
3 | 3 | ||
4 | #include <linux/time.h> | 4 | #include <linux/time.h> |
5 | 5 | ||
6 | struct task_struct; | ||
7 | |||
8 | /* | 6 | /* |
9 | * Resource control/accounting header file for linux | 7 | * Resource control/accounting header file for linux |
10 | */ | 8 | */ |
@@ -70,6 +68,12 @@ struct rlimit { | |||
70 | */ | 68 | */ |
71 | #include <asm/resource.h> | 69 | #include <asm/resource.h> |
72 | 70 | ||
71 | #ifdef __KERNEL__ | ||
72 | |||
73 | struct task_struct; | ||
74 | |||
73 | int getrusage(struct task_struct *p, int who, struct rusage __user *ru); | 75 | int getrusage(struct task_struct *p, int who, struct rusage __user *ru); |
74 | 76 | ||
77 | #endif /* __KERNEL__ */ | ||
78 | |||
75 | #endif | 79 | #endif |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index cb0ba7032609..b019ae64e2ab 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
@@ -26,6 +26,9 @@ | |||
26 | */ | 26 | */ |
27 | struct anon_vma { | 27 | struct anon_vma { |
28 | spinlock_t lock; /* Serialize access to vma list */ | 28 | spinlock_t lock; /* Serialize access to vma list */ |
29 | #ifdef CONFIG_KSM | ||
30 | atomic_t ksm_refcount; | ||
31 | #endif | ||
29 | /* | 32 | /* |
30 | * NOTE: the LSB of the head.next is set by | 33 | * NOTE: the LSB of the head.next is set by |
31 | * mm_take_all_locks() _after_ taking the above lock. So the | 34 | * mm_take_all_locks() _after_ taking the above lock. So the |
@@ -38,6 +41,34 @@ struct anon_vma { | |||
38 | }; | 41 | }; |
39 | 42 | ||
40 | #ifdef CONFIG_MMU | 43 | #ifdef CONFIG_MMU |
44 | #ifdef CONFIG_KSM | ||
45 | static inline void ksm_refcount_init(struct anon_vma *anon_vma) | ||
46 | { | ||
47 | atomic_set(&anon_vma->ksm_refcount, 0); | ||
48 | } | ||
49 | |||
50 | static inline int ksm_refcount(struct anon_vma *anon_vma) | ||
51 | { | ||
52 | return atomic_read(&anon_vma->ksm_refcount); | ||
53 | } | ||
54 | #else | ||
55 | static inline void ksm_refcount_init(struct anon_vma *anon_vma) | ||
56 | { | ||
57 | } | ||
58 | |||
59 | static inline int ksm_refcount(struct anon_vma *anon_vma) | ||
60 | { | ||
61 | return 0; | ||
62 | } | ||
63 | #endif /* CONFIG_KSM */ | ||
64 | |||
65 | static inline struct anon_vma *page_anon_vma(struct page *page) | ||
66 | { | ||
67 | if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != | ||
68 | PAGE_MAPPING_ANON) | ||
69 | return NULL; | ||
70 | return page_rmapping(page); | ||
71 | } | ||
41 | 72 | ||
42 | static inline void anon_vma_lock(struct vm_area_struct *vma) | 73 | static inline void anon_vma_lock(struct vm_area_struct *vma) |
43 | { | 74 | { |
@@ -62,6 +93,7 @@ void __anon_vma_merge(struct vm_area_struct *, struct vm_area_struct *); | |||
62 | void anon_vma_unlink(struct vm_area_struct *); | 93 | void anon_vma_unlink(struct vm_area_struct *); |
63 | void anon_vma_link(struct vm_area_struct *); | 94 | void anon_vma_link(struct vm_area_struct *); |
64 | void __anon_vma_link(struct vm_area_struct *); | 95 | void __anon_vma_link(struct vm_area_struct *); |
96 | void anon_vma_free(struct anon_vma *); | ||
65 | 97 | ||
66 | /* | 98 | /* |
67 | * rmap interfaces called when adding or removing pte of page | 99 | * rmap interfaces called when adding or removing pte of page |
@@ -81,6 +113,9 @@ static inline void page_dup_rmap(struct page *page) | |||
81 | */ | 113 | */ |
82 | int page_referenced(struct page *, int is_locked, | 114 | int page_referenced(struct page *, int is_locked, |
83 | struct mem_cgroup *cnt, unsigned long *vm_flags); | 115 | struct mem_cgroup *cnt, unsigned long *vm_flags); |
116 | int page_referenced_one(struct page *, struct vm_area_struct *, | ||
117 | unsigned long address, unsigned int *mapcount, unsigned long *vm_flags); | ||
118 | |||
84 | enum ttu_flags { | 119 | enum ttu_flags { |
85 | TTU_UNMAP = 0, /* unmap mode */ | 120 | TTU_UNMAP = 0, /* unmap mode */ |
86 | TTU_MIGRATION = 1, /* migration mode */ | 121 | TTU_MIGRATION = 1, /* migration mode */ |
@@ -94,6 +129,8 @@ enum ttu_flags { | |||
94 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) | 129 | #define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) |
95 | 130 | ||
96 | int try_to_unmap(struct page *, enum ttu_flags flags); | 131 | int try_to_unmap(struct page *, enum ttu_flags flags); |
132 | int try_to_unmap_one(struct page *, struct vm_area_struct *, | ||
133 | unsigned long address, enum ttu_flags flags); | ||
97 | 134 | ||
98 | /* | 135 | /* |
99 | * Called from mm/filemap_xip.c to unmap empty zero page | 136 | * Called from mm/filemap_xip.c to unmap empty zero page |
@@ -127,6 +164,12 @@ struct anon_vma *page_lock_anon_vma(struct page *page); | |||
127 | void page_unlock_anon_vma(struct anon_vma *anon_vma); | 164 | void page_unlock_anon_vma(struct anon_vma *anon_vma); |
128 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); | 165 | int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma); |
129 | 166 | ||
167 | /* | ||
168 | * Called by migrate.c to remove migration ptes, but might be used more later. | ||
169 | */ | ||
170 | int rmap_walk(struct page *page, int (*rmap_one)(struct page *, | ||
171 | struct vm_area_struct *, unsigned long, void *), void *arg); | ||
172 | |||
130 | #else /* !CONFIG_MMU */ | 173 | #else /* !CONFIG_MMU */ |
131 | 174 | ||
132 | #define anon_vma_init() do {} while (0) | 175 | #define anon_vma_init() do {} while (0) |
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index f19b00b7d530..281d8fd775e8 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h | |||
@@ -24,7 +24,7 @@ | |||
24 | * @owner: the mutex owner | 24 | * @owner: the mutex owner |
25 | */ | 25 | */ |
26 | struct rt_mutex { | 26 | struct rt_mutex { |
27 | spinlock_t wait_lock; | 27 | raw_spinlock_t wait_lock; |
28 | struct plist_head wait_list; | 28 | struct plist_head wait_list; |
29 | struct task_struct *owner; | 29 | struct task_struct *owner; |
30 | #ifdef CONFIG_DEBUG_RT_MUTEXES | 30 | #ifdef CONFIG_DEBUG_RT_MUTEXES |
@@ -63,8 +63,8 @@ struct hrtimer_sleeper; | |||
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | #define __RT_MUTEX_INITIALIZER(mutexname) \ | 65 | #define __RT_MUTEX_INITIALIZER(mutexname) \ |
66 | { .wait_lock = __SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ | 66 | { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ |
67 | , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \ | 67 | , .wait_list = PLIST_HEAD_INIT_RAW(mutexname.wait_list, mutexname.wait_lock) \ |
68 | , .owner = NULL \ | 68 | , .owner = NULL \ |
69 | __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} | 69 | __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} |
70 | 70 | ||
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 14fc906ed602..05330fc5b436 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -368,11 +368,9 @@ enum { | |||
368 | #define RTAX_MAX (__RTAX_MAX - 1) | 368 | #define RTAX_MAX (__RTAX_MAX - 1) |
369 | 369 | ||
370 | #define RTAX_FEATURE_ECN 0x00000001 | 370 | #define RTAX_FEATURE_ECN 0x00000001 |
371 | #define RTAX_FEATURE_NO_SACK 0x00000002 | 371 | #define RTAX_FEATURE_SACK 0x00000002 |
372 | #define RTAX_FEATURE_NO_TSTAMP 0x00000004 | 372 | #define RTAX_FEATURE_TIMESTAMP 0x00000004 |
373 | #define RTAX_FEATURE_ALLFRAG 0x00000008 | 373 | #define RTAX_FEATURE_ALLFRAG 0x00000008 |
374 | #define RTAX_FEATURE_NO_WSCALE 0x00000010 | ||
375 | #define RTAX_FEATURE_NO_DSACK 0x00000020 | ||
376 | 374 | ||
377 | struct rta_session { | 375 | struct rta_session { |
378 | __u8 proto; | 376 | __u8 proto; |
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h new file mode 100644 index 000000000000..71e0b00b6f2c --- /dev/null +++ b/include/linux/rwlock.h | |||
@@ -0,0 +1,125 @@ | |||
1 | #ifndef __LINUX_RWLOCK_H | ||
2 | #define __LINUX_RWLOCK_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * rwlock related methods | ||
10 | * | ||
11 | * split out from spinlock.h | ||
12 | * | ||
13 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
14 | * Released under the General Public License (GPL). | ||
15 | */ | ||
16 | |||
17 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
18 | extern void __rwlock_init(rwlock_t *lock, const char *name, | ||
19 | struct lock_class_key *key); | ||
20 | # define rwlock_init(lock) \ | ||
21 | do { \ | ||
22 | static struct lock_class_key __key; \ | ||
23 | \ | ||
24 | __rwlock_init((lock), #lock, &__key); \ | ||
25 | } while (0) | ||
26 | #else | ||
27 | # define rwlock_init(lock) \ | ||
28 | do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) | ||
29 | #endif | ||
30 | |||
31 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
32 | extern void do_raw_read_lock(rwlock_t *lock); | ||
33 | #define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock) | ||
34 | extern int do_raw_read_trylock(rwlock_t *lock); | ||
35 | extern void do_raw_read_unlock(rwlock_t *lock); | ||
36 | extern void do_raw_write_lock(rwlock_t *lock); | ||
37 | #define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock) | ||
38 | extern int do_raw_write_trylock(rwlock_t *lock); | ||
39 | extern void do_raw_write_unlock(rwlock_t *lock); | ||
40 | #else | ||
41 | # define do_raw_read_lock(rwlock) arch_read_lock(&(rwlock)->raw_lock) | ||
42 | # define do_raw_read_lock_flags(lock, flags) \ | ||
43 | arch_read_lock_flags(&(lock)->raw_lock, *(flags)) | ||
44 | # define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock) | ||
45 | # define do_raw_read_unlock(rwlock) arch_read_unlock(&(rwlock)->raw_lock) | ||
46 | # define do_raw_write_lock(rwlock) arch_write_lock(&(rwlock)->raw_lock) | ||
47 | # define do_raw_write_lock_flags(lock, flags) \ | ||
48 | arch_write_lock_flags(&(lock)->raw_lock, *(flags)) | ||
49 | # define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock) | ||
50 | # define do_raw_write_unlock(rwlock) arch_write_unlock(&(rwlock)->raw_lock) | ||
51 | #endif | ||
52 | |||
53 | #define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock) | ||
54 | #define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock) | ||
55 | |||
56 | /* | ||
57 | * Define the various rw_lock methods. Note we define these | ||
58 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various | ||
59 | * methods are defined as nops in the case they are not required. | ||
60 | */ | ||
61 | #define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock)) | ||
62 | #define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock)) | ||
63 | |||
64 | #define write_lock(lock) _raw_write_lock(lock) | ||
65 | #define read_lock(lock) _raw_read_lock(lock) | ||
66 | |||
67 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
68 | |||
69 | #define read_lock_irqsave(lock, flags) \ | ||
70 | do { \ | ||
71 | typecheck(unsigned long, flags); \ | ||
72 | flags = _raw_read_lock_irqsave(lock); \ | ||
73 | } while (0) | ||
74 | #define write_lock_irqsave(lock, flags) \ | ||
75 | do { \ | ||
76 | typecheck(unsigned long, flags); \ | ||
77 | flags = _raw_write_lock_irqsave(lock); \ | ||
78 | } while (0) | ||
79 | |||
80 | #else | ||
81 | |||
82 | #define read_lock_irqsave(lock, flags) \ | ||
83 | do { \ | ||
84 | typecheck(unsigned long, flags); \ | ||
85 | _raw_read_lock_irqsave(lock, flags); \ | ||
86 | } while (0) | ||
87 | #define write_lock_irqsave(lock, flags) \ | ||
88 | do { \ | ||
89 | typecheck(unsigned long, flags); \ | ||
90 | _raw_write_lock_irqsave(lock, flags); \ | ||
91 | } while (0) | ||
92 | |||
93 | #endif | ||
94 | |||
95 | #define read_lock_irq(lock) _raw_read_lock_irq(lock) | ||
96 | #define read_lock_bh(lock) _raw_read_lock_bh(lock) | ||
97 | #define write_lock_irq(lock) _raw_write_lock_irq(lock) | ||
98 | #define write_lock_bh(lock) _raw_write_lock_bh(lock) | ||
99 | #define read_unlock(lock) _raw_read_unlock(lock) | ||
100 | #define write_unlock(lock) _raw_write_unlock(lock) | ||
101 | #define read_unlock_irq(lock) _raw_read_unlock_irq(lock) | ||
102 | #define write_unlock_irq(lock) _raw_write_unlock_irq(lock) | ||
103 | |||
104 | #define read_unlock_irqrestore(lock, flags) \ | ||
105 | do { \ | ||
106 | typecheck(unsigned long, flags); \ | ||
107 | _raw_read_unlock_irqrestore(lock, flags); \ | ||
108 | } while (0) | ||
109 | #define read_unlock_bh(lock) _raw_read_unlock_bh(lock) | ||
110 | |||
111 | #define write_unlock_irqrestore(lock, flags) \ | ||
112 | do { \ | ||
113 | typecheck(unsigned long, flags); \ | ||
114 | _raw_write_unlock_irqrestore(lock, flags); \ | ||
115 | } while (0) | ||
116 | #define write_unlock_bh(lock) _raw_write_unlock_bh(lock) | ||
117 | |||
118 | #define write_trylock_irqsave(lock, flags) \ | ||
119 | ({ \ | ||
120 | local_irq_save(flags); \ | ||
121 | write_trylock(lock) ? \ | ||
122 | 1 : ({ local_irq_restore(flags); 0; }); \ | ||
123 | }) | ||
124 | |||
125 | #endif /* __LINUX_RWLOCK_H */ | ||
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h new file mode 100644 index 000000000000..9c9f0495d37c --- /dev/null +++ b/include/linux/rwlock_api_smp.h | |||
@@ -0,0 +1,282 @@ | |||
1 | #ifndef __LINUX_RWLOCK_API_SMP_H | ||
2 | #define __LINUX_RWLOCK_API_SMP_H | ||
3 | |||
4 | #ifndef __LINUX_SPINLOCK_API_SMP_H | ||
5 | # error "please don't include this file directly" | ||
6 | #endif | ||
7 | |||
8 | /* | ||
9 | * include/linux/rwlock_api_smp.h | ||
10 | * | ||
11 | * spinlock API declarations on SMP (and debug) | ||
12 | * (implemented in kernel/spinlock.c) | ||
13 | * | ||
14 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
15 | * Released under the General Public License (GPL). | ||
16 | */ | ||
17 | |||
18 | void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock); | ||
19 | void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock); | ||
20 | void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock); | ||
21 | void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock); | ||
22 | void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock); | ||
23 | void __lockfunc _raw_write_lock_irq(rwlock_t *lock) __acquires(lock); | ||
24 | unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock) | ||
25 | __acquires(lock); | ||
26 | unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock) | ||
27 | __acquires(lock); | ||
28 | int __lockfunc _raw_read_trylock(rwlock_t *lock); | ||
29 | int __lockfunc _raw_write_trylock(rwlock_t *lock); | ||
30 | void __lockfunc _raw_read_unlock(rwlock_t *lock) __releases(lock); | ||
31 | void __lockfunc _raw_write_unlock(rwlock_t *lock) __releases(lock); | ||
32 | void __lockfunc _raw_read_unlock_bh(rwlock_t *lock) __releases(lock); | ||
33 | void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) __releases(lock); | ||
34 | void __lockfunc _raw_read_unlock_irq(rwlock_t *lock) __releases(lock); | ||
35 | void __lockfunc _raw_write_unlock_irq(rwlock_t *lock) __releases(lock); | ||
36 | void __lockfunc | ||
37 | _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
38 | __releases(lock); | ||
39 | void __lockfunc | ||
40 | _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
41 | __releases(lock); | ||
42 | |||
43 | #ifdef CONFIG_INLINE_READ_LOCK | ||
44 | #define _raw_read_lock(lock) __raw_read_lock(lock) | ||
45 | #endif | ||
46 | |||
47 | #ifdef CONFIG_INLINE_WRITE_LOCK | ||
48 | #define _raw_write_lock(lock) __raw_write_lock(lock) | ||
49 | #endif | ||
50 | |||
51 | #ifdef CONFIG_INLINE_READ_LOCK_BH | ||
52 | #define _raw_read_lock_bh(lock) __raw_read_lock_bh(lock) | ||
53 | #endif | ||
54 | |||
55 | #ifdef CONFIG_INLINE_WRITE_LOCK_BH | ||
56 | #define _raw_write_lock_bh(lock) __raw_write_lock_bh(lock) | ||
57 | #endif | ||
58 | |||
59 | #ifdef CONFIG_INLINE_READ_LOCK_IRQ | ||
60 | #define _raw_read_lock_irq(lock) __raw_read_lock_irq(lock) | ||
61 | #endif | ||
62 | |||
63 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
64 | #define _raw_write_lock_irq(lock) __raw_write_lock_irq(lock) | ||
65 | #endif | ||
66 | |||
67 | #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE | ||
68 | #define _raw_read_lock_irqsave(lock) __raw_read_lock_irqsave(lock) | ||
69 | #endif | ||
70 | |||
71 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
72 | #define _raw_write_lock_irqsave(lock) __raw_write_lock_irqsave(lock) | ||
73 | #endif | ||
74 | |||
75 | #ifdef CONFIG_INLINE_READ_TRYLOCK | ||
76 | #define _raw_read_trylock(lock) __raw_read_trylock(lock) | ||
77 | #endif | ||
78 | |||
79 | #ifdef CONFIG_INLINE_WRITE_TRYLOCK | ||
80 | #define _raw_write_trylock(lock) __raw_write_trylock(lock) | ||
81 | #endif | ||
82 | |||
83 | #ifdef CONFIG_INLINE_READ_UNLOCK | ||
84 | #define _raw_read_unlock(lock) __raw_read_unlock(lock) | ||
85 | #endif | ||
86 | |||
87 | #ifdef CONFIG_INLINE_WRITE_UNLOCK | ||
88 | #define _raw_write_unlock(lock) __raw_write_unlock(lock) | ||
89 | #endif | ||
90 | |||
91 | #ifdef CONFIG_INLINE_READ_UNLOCK_BH | ||
92 | #define _raw_read_unlock_bh(lock) __raw_read_unlock_bh(lock) | ||
93 | #endif | ||
94 | |||
95 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH | ||
96 | #define _raw_write_unlock_bh(lock) __raw_write_unlock_bh(lock) | ||
97 | #endif | ||
98 | |||
99 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ | ||
100 | #define _raw_read_unlock_irq(lock) __raw_read_unlock_irq(lock) | ||
101 | #endif | ||
102 | |||
103 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ | ||
104 | #define _raw_write_unlock_irq(lock) __raw_write_unlock_irq(lock) | ||
105 | #endif | ||
106 | |||
107 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE | ||
108 | #define _raw_read_unlock_irqrestore(lock, flags) \ | ||
109 | __raw_read_unlock_irqrestore(lock, flags) | ||
110 | #endif | ||
111 | |||
112 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
113 | #define _raw_write_unlock_irqrestore(lock, flags) \ | ||
114 | __raw_write_unlock_irqrestore(lock, flags) | ||
115 | #endif | ||
116 | |||
117 | static inline int __raw_read_trylock(rwlock_t *lock) | ||
118 | { | ||
119 | preempt_disable(); | ||
120 | if (do_raw_read_trylock(lock)) { | ||
121 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
122 | return 1; | ||
123 | } | ||
124 | preempt_enable(); | ||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static inline int __raw_write_trylock(rwlock_t *lock) | ||
129 | { | ||
130 | preempt_disable(); | ||
131 | if (do_raw_write_trylock(lock)) { | ||
132 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
133 | return 1; | ||
134 | } | ||
135 | preempt_enable(); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * If lockdep is enabled then we use the non-preemption spin-ops | ||
141 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | ||
142 | * not re-enabled during lock-acquire (which the preempt-spin-ops do): | ||
143 | */ | ||
144 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | ||
145 | |||
146 | static inline void __raw_read_lock(rwlock_t *lock) | ||
147 | { | ||
148 | preempt_disable(); | ||
149 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
150 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); | ||
151 | } | ||
152 | |||
153 | static inline unsigned long __raw_read_lock_irqsave(rwlock_t *lock) | ||
154 | { | ||
155 | unsigned long flags; | ||
156 | |||
157 | local_irq_save(flags); | ||
158 | preempt_disable(); | ||
159 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
160 | LOCK_CONTENDED_FLAGS(lock, do_raw_read_trylock, do_raw_read_lock, | ||
161 | do_raw_read_lock_flags, &flags); | ||
162 | return flags; | ||
163 | } | ||
164 | |||
165 | static inline void __raw_read_lock_irq(rwlock_t *lock) | ||
166 | { | ||
167 | local_irq_disable(); | ||
168 | preempt_disable(); | ||
169 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
170 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); | ||
171 | } | ||
172 | |||
173 | static inline void __raw_read_lock_bh(rwlock_t *lock) | ||
174 | { | ||
175 | local_bh_disable(); | ||
176 | preempt_disable(); | ||
177 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
178 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); | ||
179 | } | ||
180 | |||
181 | static inline unsigned long __raw_write_lock_irqsave(rwlock_t *lock) | ||
182 | { | ||
183 | unsigned long flags; | ||
184 | |||
185 | local_irq_save(flags); | ||
186 | preempt_disable(); | ||
187 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
188 | LOCK_CONTENDED_FLAGS(lock, do_raw_write_trylock, do_raw_write_lock, | ||
189 | do_raw_write_lock_flags, &flags); | ||
190 | return flags; | ||
191 | } | ||
192 | |||
193 | static inline void __raw_write_lock_irq(rwlock_t *lock) | ||
194 | { | ||
195 | local_irq_disable(); | ||
196 | preempt_disable(); | ||
197 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
198 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); | ||
199 | } | ||
200 | |||
201 | static inline void __raw_write_lock_bh(rwlock_t *lock) | ||
202 | { | ||
203 | local_bh_disable(); | ||
204 | preempt_disable(); | ||
205 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
206 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); | ||
207 | } | ||
208 | |||
209 | static inline void __raw_write_lock(rwlock_t *lock) | ||
210 | { | ||
211 | preempt_disable(); | ||
212 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
213 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); | ||
214 | } | ||
215 | |||
216 | #endif /* CONFIG_PREEMPT */ | ||
217 | |||
218 | static inline void __raw_write_unlock(rwlock_t *lock) | ||
219 | { | ||
220 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
221 | do_raw_write_unlock(lock); | ||
222 | preempt_enable(); | ||
223 | } | ||
224 | |||
225 | static inline void __raw_read_unlock(rwlock_t *lock) | ||
226 | { | ||
227 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
228 | do_raw_read_unlock(lock); | ||
229 | preempt_enable(); | ||
230 | } | ||
231 | |||
232 | static inline void | ||
233 | __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
234 | { | ||
235 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
236 | do_raw_read_unlock(lock); | ||
237 | local_irq_restore(flags); | ||
238 | preempt_enable(); | ||
239 | } | ||
240 | |||
241 | static inline void __raw_read_unlock_irq(rwlock_t *lock) | ||
242 | { | ||
243 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
244 | do_raw_read_unlock(lock); | ||
245 | local_irq_enable(); | ||
246 | preempt_enable(); | ||
247 | } | ||
248 | |||
249 | static inline void __raw_read_unlock_bh(rwlock_t *lock) | ||
250 | { | ||
251 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
252 | do_raw_read_unlock(lock); | ||
253 | preempt_enable_no_resched(); | ||
254 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
255 | } | ||
256 | |||
257 | static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, | ||
258 | unsigned long flags) | ||
259 | { | ||
260 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
261 | do_raw_write_unlock(lock); | ||
262 | local_irq_restore(flags); | ||
263 | preempt_enable(); | ||
264 | } | ||
265 | |||
266 | static inline void __raw_write_unlock_irq(rwlock_t *lock) | ||
267 | { | ||
268 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
269 | do_raw_write_unlock(lock); | ||
270 | local_irq_enable(); | ||
271 | preempt_enable(); | ||
272 | } | ||
273 | |||
274 | static inline void __raw_write_unlock_bh(rwlock_t *lock) | ||
275 | { | ||
276 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
277 | do_raw_write_unlock(lock); | ||
278 | preempt_enable_no_resched(); | ||
279 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
280 | } | ||
281 | |||
282 | #endif /* __LINUX_RWLOCK_API_SMP_H */ | ||
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h new file mode 100644 index 000000000000..bd31808c7d8e --- /dev/null +++ b/include/linux/rwlock_types.h | |||
@@ -0,0 +1,56 @@ | |||
1 | #ifndef __LINUX_RWLOCK_TYPES_H | ||
2 | #define __LINUX_RWLOCK_TYPES_H | ||
3 | |||
4 | /* | ||
5 | * include/linux/rwlock_types.h - generic rwlock type definitions | ||
6 | * and initializers | ||
7 | * | ||
8 | * portions Copyright 2005, Red Hat, Inc., Ingo Molnar | ||
9 | * Released under the General Public License (GPL). | ||
10 | */ | ||
11 | typedef struct { | ||
12 | arch_rwlock_t raw_lock; | ||
13 | #ifdef CONFIG_GENERIC_LOCKBREAK | ||
14 | unsigned int break_lock; | ||
15 | #endif | ||
16 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
17 | unsigned int magic, owner_cpu; | ||
18 | void *owner; | ||
19 | #endif | ||
20 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
21 | struct lockdep_map dep_map; | ||
22 | #endif | ||
23 | } rwlock_t; | ||
24 | |||
25 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
26 | |||
27 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
28 | # define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } | ||
29 | #else | ||
30 | # define RW_DEP_MAP_INIT(lockname) | ||
31 | #endif | ||
32 | |||
33 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
34 | #define __RW_LOCK_UNLOCKED(lockname) \ | ||
35 | (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ | ||
36 | .magic = RWLOCK_MAGIC, \ | ||
37 | .owner = SPINLOCK_OWNER_INIT, \ | ||
38 | .owner_cpu = -1, \ | ||
39 | RW_DEP_MAP_INIT(lockname) } | ||
40 | #else | ||
41 | #define __RW_LOCK_UNLOCKED(lockname) \ | ||
42 | (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ | ||
43 | RW_DEP_MAP_INIT(lockname) } | ||
44 | #endif | ||
45 | |||
46 | /* | ||
47 | * RW_LOCK_UNLOCKED defeat lockdep state tracking and is hence | ||
48 | * deprecated. | ||
49 | * | ||
50 | * Please use DEFINE_RWLOCK() or __RW_LOCK_UNLOCKED() as appropriate. | ||
51 | */ | ||
52 | #define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) | ||
53 | |||
54 | #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) | ||
55 | |||
56 | #endif /* __LINUX_RWLOCK_TYPES_H */ | ||
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h index 6c3c0f6c261f..bdfcc2527970 100644 --- a/include/linux/rwsem-spinlock.h +++ b/include/linux/rwsem-spinlock.h | |||
@@ -68,11 +68,7 @@ extern int __down_write_trylock(struct rw_semaphore *sem); | |||
68 | extern void __up_read(struct rw_semaphore *sem); | 68 | extern void __up_read(struct rw_semaphore *sem); |
69 | extern void __up_write(struct rw_semaphore *sem); | 69 | extern void __up_write(struct rw_semaphore *sem); |
70 | extern void __downgrade_write(struct rw_semaphore *sem); | 70 | extern void __downgrade_write(struct rw_semaphore *sem); |
71 | 71 | extern int rwsem_is_locked(struct rw_semaphore *sem); | |
72 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
73 | { | ||
74 | return (sem->activity != 0); | ||
75 | } | ||
76 | 72 | ||
77 | #endif /* __KERNEL__ */ | 73 | #endif /* __KERNEL__ */ |
78 | #endif /* _LINUX_RWSEM_SPINLOCK_H */ | 74 | #endif /* _LINUX_RWSEM_SPINLOCK_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 294eb2f80144..abdfacc58653 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -192,6 +192,12 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
192 | #define TASK_DEAD 64 | 192 | #define TASK_DEAD 64 |
193 | #define TASK_WAKEKILL 128 | 193 | #define TASK_WAKEKILL 128 |
194 | #define TASK_WAKING 256 | 194 | #define TASK_WAKING 256 |
195 | #define TASK_STATE_MAX 512 | ||
196 | |||
197 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" | ||
198 | |||
199 | extern char ___assert_task_state[1 - 2*!!( | ||
200 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; | ||
195 | 201 | ||
196 | /* Convenience macros for the sake of set_task_state */ | 202 | /* Convenience macros for the sake of set_task_state */ |
197 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) | 203 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
@@ -371,6 +377,8 @@ extern int sysctl_max_map_count; | |||
371 | 377 | ||
372 | #include <linux/aio.h> | 378 | #include <linux/aio.h> |
373 | 379 | ||
380 | #ifdef CONFIG_MMU | ||
381 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | ||
374 | extern unsigned long | 382 | extern unsigned long |
375 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, | 383 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, |
376 | unsigned long, unsigned long); | 384 | unsigned long, unsigned long); |
@@ -380,6 +388,9 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, | |||
380 | unsigned long flags); | 388 | unsigned long flags); |
381 | extern void arch_unmap_area(struct mm_struct *, unsigned long); | 389 | extern void arch_unmap_area(struct mm_struct *, unsigned long); |
382 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); | 390 | extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); |
391 | #else | ||
392 | static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} | ||
393 | #endif | ||
383 | 394 | ||
384 | #if USE_SPLIT_PTLOCKS | 395 | #if USE_SPLIT_PTLOCKS |
385 | /* | 396 | /* |
@@ -1091,7 +1102,8 @@ struct sched_class { | |||
1091 | enum cpu_idle_type idle); | 1102 | enum cpu_idle_type idle); |
1092 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1103 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
1093 | void (*post_schedule) (struct rq *this_rq); | 1104 | void (*post_schedule) (struct rq *this_rq); |
1094 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 1105 | void (*task_waking) (struct rq *this_rq, struct task_struct *task); |
1106 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | ||
1095 | 1107 | ||
1096 | void (*set_cpus_allowed)(struct task_struct *p, | 1108 | void (*set_cpus_allowed)(struct task_struct *p, |
1097 | const struct cpumask *newmask); | 1109 | const struct cpumask *newmask); |
@@ -1115,7 +1127,7 @@ struct sched_class { | |||
1115 | struct task_struct *task); | 1127 | struct task_struct *task); |
1116 | 1128 | ||
1117 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1129 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1118 | void (*moved_group) (struct task_struct *p); | 1130 | void (*moved_group) (struct task_struct *p, int on_rq); |
1119 | #endif | 1131 | #endif |
1120 | }; | 1132 | }; |
1121 | 1133 | ||
@@ -1357,7 +1369,7 @@ struct task_struct { | |||
1357 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1369 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
1358 | - access with [gs]et_task_comm (which lock | 1370 | - access with [gs]et_task_comm (which lock |
1359 | it with task_lock()) | 1371 | it with task_lock()) |
1360 | - initialized normally by flush_old_exec */ | 1372 | - initialized normally by setup_new_exec */ |
1361 | /* file system info */ | 1373 | /* file system info */ |
1362 | int link_count, total_link_count; | 1374 | int link_count, total_link_count; |
1363 | #ifdef CONFIG_SYSVIPC | 1375 | #ifdef CONFIG_SYSVIPC |
@@ -1409,7 +1421,7 @@ struct task_struct { | |||
1409 | #endif | 1421 | #endif |
1410 | 1422 | ||
1411 | /* Protection of the PI data structures: */ | 1423 | /* Protection of the PI data structures: */ |
1412 | spinlock_t pi_lock; | 1424 | raw_spinlock_t pi_lock; |
1413 | 1425 | ||
1414 | #ifdef CONFIG_RT_MUTEXES | 1426 | #ifdef CONFIG_RT_MUTEXES |
1415 | /* PI waiters blocked on a rt_mutex held by this task */ | 1427 | /* PI waiters blocked on a rt_mutex held by this task */ |
@@ -1542,10 +1554,18 @@ struct task_struct { | |||
1542 | unsigned long trace_recursion; | 1554 | unsigned long trace_recursion; |
1543 | #endif /* CONFIG_TRACING */ | 1555 | #endif /* CONFIG_TRACING */ |
1544 | unsigned long stack_start; | 1556 | unsigned long stack_start; |
1557 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR /* memcg uses this to do batch job */ | ||
1558 | struct memcg_batch_info { | ||
1559 | int do_batch; /* incremented when batch uncharge started */ | ||
1560 | struct mem_cgroup *memcg; /* target memcg of uncharge */ | ||
1561 | unsigned long bytes; /* uncharged usage */ | ||
1562 | unsigned long memsw_bytes; /* uncharged mem+swap usage */ | ||
1563 | } memcg_batch; | ||
1564 | #endif | ||
1545 | }; | 1565 | }; |
1546 | 1566 | ||
1547 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1567 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
1548 | #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) | 1568 | #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) |
1549 | 1569 | ||
1550 | /* | 1570 | /* |
1551 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT | 1571 | * Priority of a process goes from 0..MAX_PRIO-1, valid RT |
@@ -2073,7 +2093,6 @@ extern int kill_proc_info(int, struct siginfo *, pid_t); | |||
2073 | extern int do_notify_parent(struct task_struct *, int); | 2093 | extern int do_notify_parent(struct task_struct *, int); |
2074 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); | 2094 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); |
2075 | extern void force_sig(int, struct task_struct *); | 2095 | extern void force_sig(int, struct task_struct *); |
2076 | extern void force_sig_specific(int, struct task_struct *); | ||
2077 | extern int send_sig(int, struct task_struct *, int); | 2096 | extern int send_sig(int, struct task_struct *, int); |
2078 | extern void zap_other_threads(struct task_struct *p); | 2097 | extern void zap_other_threads(struct task_struct *p); |
2079 | extern struct sigqueue *sigqueue_alloc(void); | 2098 | extern struct sigqueue *sigqueue_alloc(void); |
@@ -2092,11 +2111,6 @@ static inline int kill_cad_pid(int sig, int priv) | |||
2092 | #define SEND_SIG_PRIV ((struct siginfo *) 1) | 2111 | #define SEND_SIG_PRIV ((struct siginfo *) 1) |
2093 | #define SEND_SIG_FORCED ((struct siginfo *) 2) | 2112 | #define SEND_SIG_FORCED ((struct siginfo *) 2) |
2094 | 2113 | ||
2095 | static inline int is_si_special(const struct siginfo *info) | ||
2096 | { | ||
2097 | return info <= SEND_SIG_FORCED; | ||
2098 | } | ||
2099 | |||
2100 | /* | 2114 | /* |
2101 | * True if we are on the alternate signal stack. | 2115 | * True if we are on the alternate signal stack. |
2102 | */ | 2116 | */ |
@@ -2482,8 +2496,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
2482 | 2496 | ||
2483 | #endif /* CONFIG_SMP */ | 2497 | #endif /* CONFIG_SMP */ |
2484 | 2498 | ||
2485 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | ||
2486 | |||
2487 | #ifdef CONFIG_TRACING | 2499 | #ifdef CONFIG_TRACING |
2488 | extern void | 2500 | extern void |
2489 | __trace_special(void *__tr, void *__data, | 2501 | __trace_special(void *__tr, void *__data, |
@@ -2592,7 +2604,27 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | |||
2592 | } | 2604 | } |
2593 | #endif /* CONFIG_MM_OWNER */ | 2605 | #endif /* CONFIG_MM_OWNER */ |
2594 | 2606 | ||
2595 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | 2607 | static inline unsigned long task_rlimit(const struct task_struct *tsk, |
2608 | unsigned int limit) | ||
2609 | { | ||
2610 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); | ||
2611 | } | ||
2612 | |||
2613 | static inline unsigned long task_rlimit_max(const struct task_struct *tsk, | ||
2614 | unsigned int limit) | ||
2615 | { | ||
2616 | return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); | ||
2617 | } | ||
2618 | |||
2619 | static inline unsigned long rlimit(unsigned int limit) | ||
2620 | { | ||
2621 | return task_rlimit(current, limit); | ||
2622 | } | ||
2623 | |||
2624 | static inline unsigned long rlimit_max(unsigned int limit) | ||
2625 | { | ||
2626 | return task_rlimit_max(current, limit); | ||
2627 | } | ||
2596 | 2628 | ||
2597 | #endif /* __KERNEL__ */ | 2629 | #endif /* __KERNEL__ */ |
2598 | 2630 | ||
diff --git a/include/linux/security.h b/include/linux/security.h index 466cbadbd1ef..2c627d361c02 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -95,8 +95,13 @@ struct seq_file; | |||
95 | extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); | 95 | extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); |
96 | extern int cap_netlink_recv(struct sk_buff *skb, int cap); | 96 | extern int cap_netlink_recv(struct sk_buff *skb, int cap); |
97 | 97 | ||
98 | #ifdef CONFIG_MMU | ||
98 | extern unsigned long mmap_min_addr; | 99 | extern unsigned long mmap_min_addr; |
99 | extern unsigned long dac_mmap_min_addr; | 100 | extern unsigned long dac_mmap_min_addr; |
101 | #else | ||
102 | #define dac_mmap_min_addr 0UL | ||
103 | #endif | ||
104 | |||
100 | /* | 105 | /* |
101 | * Values used in the task_security_ops calls | 106 | * Values used in the task_security_ops calls |
102 | */ | 107 | */ |
@@ -121,6 +126,7 @@ struct request_sock; | |||
121 | #define LSM_UNSAFE_PTRACE 2 | 126 | #define LSM_UNSAFE_PTRACE 2 |
122 | #define LSM_UNSAFE_PTRACE_CAP 4 | 127 | #define LSM_UNSAFE_PTRACE_CAP 4 |
123 | 128 | ||
129 | #ifdef CONFIG_MMU | ||
124 | /* | 130 | /* |
125 | * If a hint addr is less than mmap_min_addr change hint to be as | 131 | * If a hint addr is less than mmap_min_addr change hint to be as |
126 | * low as possible but still greater than mmap_min_addr | 132 | * low as possible but still greater than mmap_min_addr |
@@ -135,6 +141,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint) | |||
135 | } | 141 | } |
136 | extern int mmap_min_addr_handler(struct ctl_table *table, int write, | 142 | extern int mmap_min_addr_handler(struct ctl_table *table, int write, |
137 | void __user *buffer, size_t *lenp, loff_t *ppos); | 143 | void __user *buffer, size_t *lenp, loff_t *ppos); |
144 | #endif | ||
138 | 145 | ||
139 | #ifdef CONFIG_SECURITY | 146 | #ifdef CONFIG_SECURITY |
140 | 147 | ||
diff --git a/include/linux/sem.h b/include/linux/sem.h index 1b191c176bcd..8a4adbef8a0f 100644 --- a/include/linux/sem.h +++ b/include/linux/sem.h | |||
@@ -86,6 +86,7 @@ struct task_struct; | |||
86 | struct sem { | 86 | struct sem { |
87 | int semval; /* current value */ | 87 | int semval; /* current value */ |
88 | int sempid; /* pid of last operation */ | 88 | int sempid; /* pid of last operation */ |
89 | struct list_head sem_pending; /* pending single-sop operations */ | ||
89 | }; | 90 | }; |
90 | 91 | ||
91 | /* One sem_array data structure for each set of semaphores in the system. */ | 92 | /* One sem_array data structure for each set of semaphores in the system. */ |
@@ -96,11 +97,13 @@ struct sem_array { | |||
96 | struct sem *sem_base; /* ptr to first semaphore in array */ | 97 | struct sem *sem_base; /* ptr to first semaphore in array */ |
97 | struct list_head sem_pending; /* pending operations to be processed */ | 98 | struct list_head sem_pending; /* pending operations to be processed */ |
98 | struct list_head list_id; /* undo requests on this array */ | 99 | struct list_head list_id; /* undo requests on this array */ |
99 | unsigned long sem_nsems; /* no. of semaphores in array */ | 100 | int sem_nsems; /* no. of semaphores in array */ |
101 | int complex_count; /* pending complex operations */ | ||
100 | }; | 102 | }; |
101 | 103 | ||
102 | /* One queue for each sleeping process in the system. */ | 104 | /* One queue for each sleeping process in the system. */ |
103 | struct sem_queue { | 105 | struct sem_queue { |
106 | struct list_head simple_list; /* queue of pending operations */ | ||
104 | struct list_head list; /* queue of pending operations */ | 107 | struct list_head list; /* queue of pending operations */ |
105 | struct task_struct *sleeper; /* this process */ | 108 | struct task_struct *sleeper; /* this process */ |
106 | struct sem_undo *undo; /* undo structure */ | 109 | struct sem_undo *undo; /* undo structure */ |
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index deee7afd8d66..e164291fb3e7 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
@@ -41,20 +41,4 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode) | |||
41 | extern int init_tmpfs(void); | 41 | extern int init_tmpfs(void); |
42 | extern int shmem_fill_super(struct super_block *sb, void *data, int silent); | 42 | extern int shmem_fill_super(struct super_block *sb, void *data, int silent); |
43 | 43 | ||
44 | #ifdef CONFIG_TMPFS_POSIX_ACL | ||
45 | int shmem_check_acl(struct inode *, int); | ||
46 | int shmem_acl_init(struct inode *, struct inode *); | ||
47 | |||
48 | extern struct xattr_handler shmem_xattr_acl_access_handler; | ||
49 | extern struct xattr_handler shmem_xattr_acl_default_handler; | ||
50 | |||
51 | extern struct generic_acl_operations shmem_acl_ops; | ||
52 | |||
53 | #else | ||
54 | static inline int shmem_acl_init(struct inode *inode, struct inode *dir) | ||
55 | { | ||
56 | return 0; | ||
57 | } | ||
58 | #endif /* CONFIG_TMPFS_POSIX_ACL */ | ||
59 | |||
60 | #endif | 44 | #endif |
diff --git a/include/linux/sm501-regs.h b/include/linux/sm501-regs.h index d53642d2d899..67ed2c542831 100644 --- a/include/linux/sm501-regs.h +++ b/include/linux/sm501-regs.h | |||
@@ -31,6 +31,8 @@ | |||
31 | #define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11) | 31 | #define SM501_SYSCTRL_PCI_SUBSYS_LOCK (1<<11) |
32 | #define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15) | 32 | #define SM501_SYSCTRL_PCI_BURST_READ_EN (1<<15) |
33 | 33 | ||
34 | #define SM501_SYSCTRL_2D_ENGINE_STATUS (1<<19) | ||
35 | |||
34 | /* miscellaneous control */ | 36 | /* miscellaneous control */ |
35 | 37 | ||
36 | #define SM501_MISC_CONTROL (0x000004) | 38 | #define SM501_MISC_CONTROL (0x000004) |
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h index 34c4475ac4a2..4f95c1aac2fd 100644 --- a/include/linux/sonypi.h +++ b/include/linux/sonypi.h | |||
@@ -111,6 +111,7 @@ | |||
111 | #define SONYPI_EVENT_VOLUME_INC_PRESSED 69 | 111 | #define SONYPI_EVENT_VOLUME_INC_PRESSED 69 |
112 | #define SONYPI_EVENT_VOLUME_DEC_PRESSED 70 | 112 | #define SONYPI_EVENT_VOLUME_DEC_PRESSED 70 |
113 | #define SONYPI_EVENT_BRIGHTNESS_PRESSED 71 | 113 | #define SONYPI_EVENT_BRIGHTNESS_PRESSED 71 |
114 | #define SONYPI_EVENT_MEDIA_PRESSED 72 | ||
114 | 115 | ||
115 | /* get/set brightness */ | 116 | /* get/set brightness */ |
116 | #define SONYPI_IOCGBRT _IOR('v', 0, __u8) | 117 | #define SONYPI_IOCGBRT _IOR('v', 0, __u8) |
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h new file mode 100644 index 000000000000..51b3e771a9a3 --- /dev/null +++ b/include/linux/spi/dw_spi.h | |||
@@ -0,0 +1,212 @@ | |||
1 | #ifndef DW_SPI_HEADER_H | ||
2 | #define DW_SPI_HEADER_H | ||
3 | #include <linux/io.h> | ||
4 | |||
5 | /* Bit fields in CTRLR0 */ | ||
6 | #define SPI_DFS_OFFSET 0 | ||
7 | |||
8 | #define SPI_FRF_OFFSET 4 | ||
9 | #define SPI_FRF_SPI 0x0 | ||
10 | #define SPI_FRF_SSP 0x1 | ||
11 | #define SPI_FRF_MICROWIRE 0x2 | ||
12 | #define SPI_FRF_RESV 0x3 | ||
13 | |||
14 | #define SPI_MODE_OFFSET 6 | ||
15 | #define SPI_SCPH_OFFSET 6 | ||
16 | #define SPI_SCOL_OFFSET 7 | ||
17 | #define SPI_TMOD_OFFSET 8 | ||
18 | #define SPI_TMOD_TR 0x0 /* xmit & recv */ | ||
19 | #define SPI_TMOD_TO 0x1 /* xmit only */ | ||
20 | #define SPI_TMOD_RO 0x2 /* recv only */ | ||
21 | #define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ | ||
22 | |||
23 | #define SPI_SLVOE_OFFSET 10 | ||
24 | #define SPI_SRL_OFFSET 11 | ||
25 | #define SPI_CFS_OFFSET 12 | ||
26 | |||
27 | /* Bit fields in SR, 7 bits */ | ||
28 | #define SR_MASK 0x7f /* cover 7 bits */ | ||
29 | #define SR_BUSY (1 << 0) | ||
30 | #define SR_TF_NOT_FULL (1 << 1) | ||
31 | #define SR_TF_EMPT (1 << 2) | ||
32 | #define SR_RF_NOT_EMPT (1 << 3) | ||
33 | #define SR_RF_FULL (1 << 4) | ||
34 | #define SR_TX_ERR (1 << 5) | ||
35 | #define SR_DCOL (1 << 6) | ||
36 | |||
37 | /* Bit fields in ISR, IMR, RISR, 7 bits */ | ||
38 | #define SPI_INT_TXEI (1 << 0) | ||
39 | #define SPI_INT_TXOI (1 << 1) | ||
40 | #define SPI_INT_RXUI (1 << 2) | ||
41 | #define SPI_INT_RXOI (1 << 3) | ||
42 | #define SPI_INT_RXFI (1 << 4) | ||
43 | #define SPI_INT_MSTI (1 << 5) | ||
44 | |||
45 | /* TX RX interrupt level threshhold, max can be 256 */ | ||
46 | #define SPI_INT_THRESHOLD 32 | ||
47 | |||
48 | enum dw_ssi_type { | ||
49 | SSI_MOTO_SPI = 0, | ||
50 | SSI_TI_SSP, | ||
51 | SSI_NS_MICROWIRE, | ||
52 | }; | ||
53 | |||
54 | struct dw_spi_reg { | ||
55 | u32 ctrl0; | ||
56 | u32 ctrl1; | ||
57 | u32 ssienr; | ||
58 | u32 mwcr; | ||
59 | u32 ser; | ||
60 | u32 baudr; | ||
61 | u32 txfltr; | ||
62 | u32 rxfltr; | ||
63 | u32 txflr; | ||
64 | u32 rxflr; | ||
65 | u32 sr; | ||
66 | u32 imr; | ||
67 | u32 isr; | ||
68 | u32 risr; | ||
69 | u32 txoicr; | ||
70 | u32 rxoicr; | ||
71 | u32 rxuicr; | ||
72 | u32 msticr; | ||
73 | u32 icr; | ||
74 | u32 dmacr; | ||
75 | u32 dmatdlr; | ||
76 | u32 dmardlr; | ||
77 | u32 idr; | ||
78 | u32 version; | ||
79 | u32 dr; /* Currently oper as 32 bits, | ||
80 | though only low 16 bits matters */ | ||
81 | } __packed; | ||
82 | |||
83 | struct dw_spi { | ||
84 | struct spi_master *master; | ||
85 | struct spi_device *cur_dev; | ||
86 | struct device *parent_dev; | ||
87 | enum dw_ssi_type type; | ||
88 | |||
89 | void __iomem *regs; | ||
90 | unsigned long paddr; | ||
91 | u32 iolen; | ||
92 | int irq; | ||
93 | u32 max_freq; /* max bus freq supported */ | ||
94 | |||
95 | u16 bus_num; | ||
96 | u16 num_cs; /* supported slave numbers */ | ||
97 | |||
98 | /* Driver message queue */ | ||
99 | struct workqueue_struct *workqueue; | ||
100 | struct work_struct pump_messages; | ||
101 | spinlock_t lock; | ||
102 | struct list_head queue; | ||
103 | int busy; | ||
104 | int run; | ||
105 | |||
106 | /* Message Transfer pump */ | ||
107 | struct tasklet_struct pump_transfers; | ||
108 | |||
109 | /* Current message transfer state info */ | ||
110 | struct spi_message *cur_msg; | ||
111 | struct spi_transfer *cur_transfer; | ||
112 | struct chip_data *cur_chip; | ||
113 | struct chip_data *prev_chip; | ||
114 | size_t len; | ||
115 | void *tx; | ||
116 | void *tx_end; | ||
117 | void *rx; | ||
118 | void *rx_end; | ||
119 | int dma_mapped; | ||
120 | dma_addr_t rx_dma; | ||
121 | dma_addr_t tx_dma; | ||
122 | size_t rx_map_len; | ||
123 | size_t tx_map_len; | ||
124 | u8 n_bytes; /* current is a 1/2 bytes op */ | ||
125 | u8 max_bits_per_word; /* maxim is 16b */ | ||
126 | u32 dma_width; | ||
127 | int cs_change; | ||
128 | int (*write)(struct dw_spi *dws); | ||
129 | int (*read)(struct dw_spi *dws); | ||
130 | irqreturn_t (*transfer_handler)(struct dw_spi *dws); | ||
131 | void (*cs_control)(u32 command); | ||
132 | |||
133 | /* Dma info */ | ||
134 | int dma_inited; | ||
135 | struct dma_chan *txchan; | ||
136 | struct dma_chan *rxchan; | ||
137 | int txdma_done; | ||
138 | int rxdma_done; | ||
139 | u64 tx_param; | ||
140 | u64 rx_param; | ||
141 | struct device *dma_dev; | ||
142 | dma_addr_t dma_addr; | ||
143 | |||
144 | /* Bus interface info */ | ||
145 | void *priv; | ||
146 | #ifdef CONFIG_DEBUG_FS | ||
147 | struct dentry *debugfs; | ||
148 | #endif | ||
149 | }; | ||
150 | |||
151 | #define dw_readl(dw, name) \ | ||
152 | __raw_readl(&(((struct dw_spi_reg *)dw->regs)->name)) | ||
153 | #define dw_writel(dw, name, val) \ | ||
154 | __raw_writel((val), &(((struct dw_spi_reg *)dw->regs)->name)) | ||
155 | #define dw_readw(dw, name) \ | ||
156 | __raw_readw(&(((struct dw_spi_reg *)dw->regs)->name)) | ||
157 | #define dw_writew(dw, name, val) \ | ||
158 | __raw_writew((val), &(((struct dw_spi_reg *)dw->regs)->name)) | ||
159 | |||
160 | static inline void spi_enable_chip(struct dw_spi *dws, int enable) | ||
161 | { | ||
162 | dw_writel(dws, ssienr, (enable ? 1 : 0)); | ||
163 | } | ||
164 | |||
165 | static inline void spi_set_clk(struct dw_spi *dws, u16 div) | ||
166 | { | ||
167 | dw_writel(dws, baudr, div); | ||
168 | } | ||
169 | |||
170 | static inline void spi_chip_sel(struct dw_spi *dws, u16 cs) | ||
171 | { | ||
172 | if (cs > dws->num_cs) | ||
173 | return; | ||
174 | dw_writel(dws, ser, 1 << cs); | ||
175 | } | ||
176 | |||
177 | /* Disable IRQ bits */ | ||
178 | static inline void spi_mask_intr(struct dw_spi *dws, u32 mask) | ||
179 | { | ||
180 | u32 new_mask; | ||
181 | |||
182 | new_mask = dw_readl(dws, imr) & ~mask; | ||
183 | dw_writel(dws, imr, new_mask); | ||
184 | } | ||
185 | |||
186 | /* Enable IRQ bits */ | ||
187 | static inline void spi_umask_intr(struct dw_spi *dws, u32 mask) | ||
188 | { | ||
189 | u32 new_mask; | ||
190 | |||
191 | new_mask = dw_readl(dws, imr) | mask; | ||
192 | dw_writel(dws, imr, new_mask); | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * Each SPI slave device to work with dw_api controller should | ||
197 | * has such a structure claiming its working mode (PIO/DMA etc), | ||
198 | * which can be save in the "controller_data" member of the | ||
199 | * struct spi_device | ||
200 | */ | ||
201 | struct dw_spi_chip { | ||
202 | u8 poll_mode; /* 0 for contoller polling mode */ | ||
203 | u8 type; /* SPI/SSP/Micrwire */ | ||
204 | u8 enable_dma; | ||
205 | void (*cs_control)(u32 command); | ||
206 | }; | ||
207 | |||
208 | extern int dw_spi_add_host(struct dw_spi *dws); | ||
209 | extern void dw_spi_remove_host(struct dw_spi *dws); | ||
210 | extern int dw_spi_suspend_host(struct dw_spi *dws); | ||
211 | extern int dw_spi_resume_host(struct dw_spi *dws); | ||
212 | #endif /* DW_SPI_HEADER_H */ | ||
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 71dccfeb0d88..86088213334a 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -8,13 +8,13 @@ | |||
8 | * | 8 | * |
9 | * on SMP builds: | 9 | * on SMP builds: |
10 | * | 10 | * |
11 | * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the | 11 | * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the |
12 | * initializers | 12 | * initializers |
13 | * | 13 | * |
14 | * linux/spinlock_types.h: | 14 | * linux/spinlock_types.h: |
15 | * defines the generic type and initializers | 15 | * defines the generic type and initializers |
16 | * | 16 | * |
17 | * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel | 17 | * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel |
18 | * implementations, mostly inline assembly code | 18 | * implementations, mostly inline assembly code |
19 | * | 19 | * |
20 | * (also included on UP-debug builds:) | 20 | * (also included on UP-debug builds:) |
@@ -34,7 +34,7 @@ | |||
34 | * defines the generic type and initializers | 34 | * defines the generic type and initializers |
35 | * | 35 | * |
36 | * linux/spinlock_up.h: | 36 | * linux/spinlock_up.h: |
37 | * contains the __raw_spin_*()/etc. version of UP | 37 | * contains the arch_spin_*()/etc. version of UP |
38 | * builds. (which are NOPs on non-debug, non-preempt | 38 | * builds. (which are NOPs on non-debug, non-preempt |
39 | * builds) | 39 | * builds) |
40 | * | 40 | * |
@@ -75,12 +75,12 @@ | |||
75 | #define __lockfunc __attribute__((section(".spinlock.text"))) | 75 | #define __lockfunc __attribute__((section(".spinlock.text"))) |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * Pull the raw_spinlock_t and raw_rwlock_t definitions: | 78 | * Pull the arch_spinlock_t and arch_rwlock_t definitions: |
79 | */ | 79 | */ |
80 | #include <linux/spinlock_types.h> | 80 | #include <linux/spinlock_types.h> |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them): | 83 | * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them): |
84 | */ | 84 | */ |
85 | #ifdef CONFIG_SMP | 85 | #ifdef CONFIG_SMP |
86 | # include <asm/spinlock.h> | 86 | # include <asm/spinlock.h> |
@@ -89,45 +89,31 @@ | |||
89 | #endif | 89 | #endif |
90 | 90 | ||
91 | #ifdef CONFIG_DEBUG_SPINLOCK | 91 | #ifdef CONFIG_DEBUG_SPINLOCK |
92 | extern void __spin_lock_init(spinlock_t *lock, const char *name, | 92 | extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, |
93 | struct lock_class_key *key); | 93 | struct lock_class_key *key); |
94 | # define spin_lock_init(lock) \ | 94 | # define raw_spin_lock_init(lock) \ |
95 | do { \ | 95 | do { \ |
96 | static struct lock_class_key __key; \ | 96 | static struct lock_class_key __key; \ |
97 | \ | 97 | \ |
98 | __spin_lock_init((lock), #lock, &__key); \ | 98 | __raw_spin_lock_init((lock), #lock, &__key); \ |
99 | } while (0) | 99 | } while (0) |
100 | 100 | ||
101 | #else | 101 | #else |
102 | # define spin_lock_init(lock) \ | 102 | # define raw_spin_lock_init(lock) \ |
103 | do { *(lock) = __SPIN_LOCK_UNLOCKED(lock); } while (0) | 103 | do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0) |
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | #ifdef CONFIG_DEBUG_SPINLOCK | 106 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
107 | extern void __rwlock_init(rwlock_t *lock, const char *name, | ||
108 | struct lock_class_key *key); | ||
109 | # define rwlock_init(lock) \ | ||
110 | do { \ | ||
111 | static struct lock_class_key __key; \ | ||
112 | \ | ||
113 | __rwlock_init((lock), #lock, &__key); \ | ||
114 | } while (0) | ||
115 | #else | ||
116 | # define rwlock_init(lock) \ | ||
117 | do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0) | ||
118 | #endif | ||
119 | |||
120 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | ||
121 | 107 | ||
122 | #ifdef CONFIG_GENERIC_LOCKBREAK | 108 | #ifdef CONFIG_GENERIC_LOCKBREAK |
123 | #define spin_is_contended(lock) ((lock)->break_lock) | 109 | #define raw_spin_is_contended(lock) ((lock)->break_lock) |
124 | #else | 110 | #else |
125 | 111 | ||
126 | #ifdef __raw_spin_is_contended | 112 | #ifdef arch_spin_is_contended |
127 | #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) | 113 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
128 | #else | 114 | #else |
129 | #define spin_is_contended(lock) (((void)(lock), 0)) | 115 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
130 | #endif /*__raw_spin_is_contended*/ | 116 | #endif /*arch_spin_is_contended*/ |
131 | #endif | 117 | #endif |
132 | 118 | ||
133 | /* The lock does not imply full memory barrier. */ | 119 | /* The lock does not imply full memory barrier. */ |
@@ -136,182 +122,260 @@ static inline void smp_mb__after_lock(void) { smp_mb(); } | |||
136 | #endif | 122 | #endif |
137 | 123 | ||
138 | /** | 124 | /** |
139 | * spin_unlock_wait - wait until the spinlock gets unlocked | 125 | * raw_spin_unlock_wait - wait until the spinlock gets unlocked |
140 | * @lock: the spinlock in question. | 126 | * @lock: the spinlock in question. |
141 | */ | 127 | */ |
142 | #define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock) | 128 | #define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) |
143 | 129 | ||
144 | #ifdef CONFIG_DEBUG_SPINLOCK | 130 | #ifdef CONFIG_DEBUG_SPINLOCK |
145 | extern void _raw_spin_lock(spinlock_t *lock); | 131 | extern void do_raw_spin_lock(raw_spinlock_t *lock); |
146 | #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) | 132 | #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) |
147 | extern int _raw_spin_trylock(spinlock_t *lock); | 133 | extern int do_raw_spin_trylock(raw_spinlock_t *lock); |
148 | extern void _raw_spin_unlock(spinlock_t *lock); | 134 | extern void do_raw_spin_unlock(raw_spinlock_t *lock); |
149 | extern void _raw_read_lock(rwlock_t *lock); | ||
150 | #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) | ||
151 | extern int _raw_read_trylock(rwlock_t *lock); | ||
152 | extern void _raw_read_unlock(rwlock_t *lock); | ||
153 | extern void _raw_write_lock(rwlock_t *lock); | ||
154 | #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) | ||
155 | extern int _raw_write_trylock(rwlock_t *lock); | ||
156 | extern void _raw_write_unlock(rwlock_t *lock); | ||
157 | #else | 135 | #else |
158 | # define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock) | 136 | static inline void do_raw_spin_lock(raw_spinlock_t *lock) |
159 | # define _raw_spin_lock_flags(lock, flags) \ | 137 | { |
160 | __raw_spin_lock_flags(&(lock)->raw_lock, *(flags)) | 138 | arch_spin_lock(&lock->raw_lock); |
161 | # define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock) | 139 | } |
162 | # define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock) | 140 | |
163 | # define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock) | 141 | static inline void |
164 | # define _raw_read_lock_flags(lock, flags) \ | 142 | do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) |
165 | __raw_read_lock_flags(&(lock)->raw_lock, *(flags)) | 143 | { |
166 | # define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock) | 144 | arch_spin_lock_flags(&lock->raw_lock, *flags); |
167 | # define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock) | 145 | } |
168 | # define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock) | 146 | |
169 | # define _raw_write_lock_flags(lock, flags) \ | 147 | static inline int do_raw_spin_trylock(raw_spinlock_t *lock) |
170 | __raw_write_lock_flags(&(lock)->raw_lock, *(flags)) | 148 | { |
171 | # define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock) | 149 | return arch_spin_trylock(&(lock)->raw_lock); |
172 | # define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock) | 150 | } |
151 | |||
152 | static inline void do_raw_spin_unlock(raw_spinlock_t *lock) | ||
153 | { | ||
154 | arch_spin_unlock(&lock->raw_lock); | ||
155 | } | ||
173 | #endif | 156 | #endif |
174 | 157 | ||
175 | #define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock) | ||
176 | #define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock) | ||
177 | |||
178 | /* | 158 | /* |
179 | * Define the various spin_lock and rw_lock methods. Note we define these | 159 | * Define the various spin_lock methods. Note we define these |
180 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various | 160 | * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The |
181 | * methods are defined as nops in the case they are not required. | 161 | * various methods are defined as nops in the case they are not |
162 | * required. | ||
182 | */ | 163 | */ |
183 | #define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock)) | 164 | #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock)) |
184 | #define read_trylock(lock) __cond_lock(lock, _read_trylock(lock)) | ||
185 | #define write_trylock(lock) __cond_lock(lock, _write_trylock(lock)) | ||
186 | 165 | ||
187 | #define spin_lock(lock) _spin_lock(lock) | 166 | #define raw_spin_lock(lock) _raw_spin_lock(lock) |
188 | 167 | ||
189 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 168 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
190 | # define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass) | 169 | # define raw_spin_lock_nested(lock, subclass) \ |
191 | # define spin_lock_nest_lock(lock, nest_lock) \ | 170 | _raw_spin_lock_nested(lock, subclass) |
171 | |||
172 | # define raw_spin_lock_nest_lock(lock, nest_lock) \ | ||
192 | do { \ | 173 | do { \ |
193 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ | 174 | typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\ |
194 | _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ | 175 | _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ |
195 | } while (0) | 176 | } while (0) |
196 | #else | 177 | #else |
197 | # define spin_lock_nested(lock, subclass) _spin_lock(lock) | 178 | # define raw_spin_lock_nested(lock, subclass) _raw_spin_lock(lock) |
198 | # define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock) | 179 | # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock) |
199 | #endif | 180 | #endif |
200 | 181 | ||
201 | #define write_lock(lock) _write_lock(lock) | ||
202 | #define read_lock(lock) _read_lock(lock) | ||
203 | |||
204 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | 182 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
205 | 183 | ||
206 | #define spin_lock_irqsave(lock, flags) \ | 184 | #define raw_spin_lock_irqsave(lock, flags) \ |
207 | do { \ | 185 | do { \ |
208 | typecheck(unsigned long, flags); \ | 186 | typecheck(unsigned long, flags); \ |
209 | flags = _spin_lock_irqsave(lock); \ | 187 | flags = _raw_spin_lock_irqsave(lock); \ |
210 | } while (0) | ||
211 | #define read_lock_irqsave(lock, flags) \ | ||
212 | do { \ | ||
213 | typecheck(unsigned long, flags); \ | ||
214 | flags = _read_lock_irqsave(lock); \ | ||
215 | } while (0) | ||
216 | #define write_lock_irqsave(lock, flags) \ | ||
217 | do { \ | ||
218 | typecheck(unsigned long, flags); \ | ||
219 | flags = _write_lock_irqsave(lock); \ | ||
220 | } while (0) | 188 | } while (0) |
221 | 189 | ||
222 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 190 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
223 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | 191 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
224 | do { \ | 192 | do { \ |
225 | typecheck(unsigned long, flags); \ | 193 | typecheck(unsigned long, flags); \ |
226 | flags = _spin_lock_irqsave_nested(lock, subclass); \ | 194 | flags = _raw_spin_lock_irqsave_nested(lock, subclass); \ |
227 | } while (0) | 195 | } while (0) |
228 | #else | 196 | #else |
229 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | 197 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
230 | do { \ | 198 | do { \ |
231 | typecheck(unsigned long, flags); \ | 199 | typecheck(unsigned long, flags); \ |
232 | flags = _spin_lock_irqsave(lock); \ | 200 | flags = _raw_spin_lock_irqsave(lock); \ |
233 | } while (0) | 201 | } while (0) |
234 | #endif | 202 | #endif |
235 | 203 | ||
236 | #else | 204 | #else |
237 | 205 | ||
238 | #define spin_lock_irqsave(lock, flags) \ | 206 | #define raw_spin_lock_irqsave(lock, flags) \ |
239 | do { \ | ||
240 | typecheck(unsigned long, flags); \ | ||
241 | _spin_lock_irqsave(lock, flags); \ | ||
242 | } while (0) | ||
243 | #define read_lock_irqsave(lock, flags) \ | ||
244 | do { \ | ||
245 | typecheck(unsigned long, flags); \ | ||
246 | _read_lock_irqsave(lock, flags); \ | ||
247 | } while (0) | ||
248 | #define write_lock_irqsave(lock, flags) \ | ||
249 | do { \ | 207 | do { \ |
250 | typecheck(unsigned long, flags); \ | 208 | typecheck(unsigned long, flags); \ |
251 | _write_lock_irqsave(lock, flags); \ | 209 | _raw_spin_lock_irqsave(lock, flags); \ |
252 | } while (0) | 210 | } while (0) |
253 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | ||
254 | spin_lock_irqsave(lock, flags) | ||
255 | 211 | ||
256 | #endif | 212 | #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \ |
213 | raw_spin_lock_irqsave(lock, flags) | ||
257 | 214 | ||
258 | #define spin_lock_irq(lock) _spin_lock_irq(lock) | 215 | #endif |
259 | #define spin_lock_bh(lock) _spin_lock_bh(lock) | ||
260 | #define read_lock_irq(lock) _read_lock_irq(lock) | ||
261 | #define read_lock_bh(lock) _read_lock_bh(lock) | ||
262 | #define write_lock_irq(lock) _write_lock_irq(lock) | ||
263 | #define write_lock_bh(lock) _write_lock_bh(lock) | ||
264 | #define spin_unlock(lock) _spin_unlock(lock) | ||
265 | #define read_unlock(lock) _read_unlock(lock) | ||
266 | #define write_unlock(lock) _write_unlock(lock) | ||
267 | #define spin_unlock_irq(lock) _spin_unlock_irq(lock) | ||
268 | #define read_unlock_irq(lock) _read_unlock_irq(lock) | ||
269 | #define write_unlock_irq(lock) _write_unlock_irq(lock) | ||
270 | |||
271 | #define spin_unlock_irqrestore(lock, flags) \ | ||
272 | do { \ | ||
273 | typecheck(unsigned long, flags); \ | ||
274 | _spin_unlock_irqrestore(lock, flags); \ | ||
275 | } while (0) | ||
276 | #define spin_unlock_bh(lock) _spin_unlock_bh(lock) | ||
277 | 216 | ||
278 | #define read_unlock_irqrestore(lock, flags) \ | 217 | #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock) |
279 | do { \ | 218 | #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock) |
280 | typecheck(unsigned long, flags); \ | 219 | #define raw_spin_unlock(lock) _raw_spin_unlock(lock) |
281 | _read_unlock_irqrestore(lock, flags); \ | 220 | #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock) |
282 | } while (0) | ||
283 | #define read_unlock_bh(lock) _read_unlock_bh(lock) | ||
284 | 221 | ||
285 | #define write_unlock_irqrestore(lock, flags) \ | 222 | #define raw_spin_unlock_irqrestore(lock, flags) \ |
286 | do { \ | 223 | do { \ |
287 | typecheck(unsigned long, flags); \ | 224 | typecheck(unsigned long, flags); \ |
288 | _write_unlock_irqrestore(lock, flags); \ | 225 | _raw_spin_unlock_irqrestore(lock, flags); \ |
289 | } while (0) | 226 | } while (0) |
290 | #define write_unlock_bh(lock) _write_unlock_bh(lock) | 227 | #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock) |
291 | 228 | ||
292 | #define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock)) | 229 | #define raw_spin_trylock_bh(lock) \ |
230 | __cond_lock(lock, _raw_spin_trylock_bh(lock)) | ||
293 | 231 | ||
294 | #define spin_trylock_irq(lock) \ | 232 | #define raw_spin_trylock_irq(lock) \ |
295 | ({ \ | 233 | ({ \ |
296 | local_irq_disable(); \ | 234 | local_irq_disable(); \ |
297 | spin_trylock(lock) ? \ | 235 | raw_spin_trylock(lock) ? \ |
298 | 1 : ({ local_irq_enable(); 0; }); \ | 236 | 1 : ({ local_irq_enable(); 0; }); \ |
299 | }) | 237 | }) |
300 | 238 | ||
301 | #define spin_trylock_irqsave(lock, flags) \ | 239 | #define raw_spin_trylock_irqsave(lock, flags) \ |
302 | ({ \ | 240 | ({ \ |
303 | local_irq_save(flags); \ | 241 | local_irq_save(flags); \ |
304 | spin_trylock(lock) ? \ | 242 | raw_spin_trylock(lock) ? \ |
305 | 1 : ({ local_irq_restore(flags); 0; }); \ | 243 | 1 : ({ local_irq_restore(flags); 0; }); \ |
306 | }) | 244 | }) |
307 | 245 | ||
308 | #define write_trylock_irqsave(lock, flags) \ | 246 | /** |
309 | ({ \ | 247 | * raw_spin_can_lock - would raw_spin_trylock() succeed? |
310 | local_irq_save(flags); \ | 248 | * @lock: the spinlock in question. |
311 | write_trylock(lock) ? \ | 249 | */ |
312 | 1 : ({ local_irq_restore(flags); 0; }); \ | 250 | #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) |
251 | |||
252 | /* Include rwlock functions */ | ||
253 | #include <linux/rwlock.h> | ||
254 | |||
255 | /* | ||
256 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
257 | */ | ||
258 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
259 | # include <linux/spinlock_api_smp.h> | ||
260 | #else | ||
261 | # include <linux/spinlock_api_up.h> | ||
262 | #endif | ||
263 | |||
264 | /* | ||
265 | * Map the spin_lock functions to the raw variants for PREEMPT_RT=n | ||
266 | */ | ||
267 | |||
268 | static inline raw_spinlock_t *spinlock_check(spinlock_t *lock) | ||
269 | { | ||
270 | return &lock->rlock; | ||
271 | } | ||
272 | |||
273 | #define spin_lock_init(_lock) \ | ||
274 | do { \ | ||
275 | spinlock_check(_lock); \ | ||
276 | raw_spin_lock_init(&(_lock)->rlock); \ | ||
277 | } while (0) | ||
278 | |||
279 | static inline void spin_lock(spinlock_t *lock) | ||
280 | { | ||
281 | raw_spin_lock(&lock->rlock); | ||
282 | } | ||
283 | |||
284 | static inline void spin_lock_bh(spinlock_t *lock) | ||
285 | { | ||
286 | raw_spin_lock_bh(&lock->rlock); | ||
287 | } | ||
288 | |||
289 | static inline int spin_trylock(spinlock_t *lock) | ||
290 | { | ||
291 | return raw_spin_trylock(&lock->rlock); | ||
292 | } | ||
293 | |||
294 | #define spin_lock_nested(lock, subclass) \ | ||
295 | do { \ | ||
296 | raw_spin_lock_nested(spinlock_check(lock), subclass); \ | ||
297 | } while (0) | ||
298 | |||
299 | #define spin_lock_nest_lock(lock, nest_lock) \ | ||
300 | do { \ | ||
301 | raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \ | ||
302 | } while (0) | ||
303 | |||
304 | static inline void spin_lock_irq(spinlock_t *lock) | ||
305 | { | ||
306 | raw_spin_lock_irq(&lock->rlock); | ||
307 | } | ||
308 | |||
309 | #define spin_lock_irqsave(lock, flags) \ | ||
310 | do { \ | ||
311 | raw_spin_lock_irqsave(spinlock_check(lock), flags); \ | ||
312 | } while (0) | ||
313 | |||
314 | #define spin_lock_irqsave_nested(lock, flags, subclass) \ | ||
315 | do { \ | ||
316 | raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \ | ||
317 | } while (0) | ||
318 | |||
319 | static inline void spin_unlock(spinlock_t *lock) | ||
320 | { | ||
321 | raw_spin_unlock(&lock->rlock); | ||
322 | } | ||
323 | |||
324 | static inline void spin_unlock_bh(spinlock_t *lock) | ||
325 | { | ||
326 | raw_spin_unlock_bh(&lock->rlock); | ||
327 | } | ||
328 | |||
329 | static inline void spin_unlock_irq(spinlock_t *lock) | ||
330 | { | ||
331 | raw_spin_unlock_irq(&lock->rlock); | ||
332 | } | ||
333 | |||
334 | static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | ||
335 | { | ||
336 | raw_spin_unlock_irqrestore(&lock->rlock, flags); | ||
337 | } | ||
338 | |||
339 | static inline int spin_trylock_bh(spinlock_t *lock) | ||
340 | { | ||
341 | return raw_spin_trylock_bh(&lock->rlock); | ||
342 | } | ||
343 | |||
344 | static inline int spin_trylock_irq(spinlock_t *lock) | ||
345 | { | ||
346 | return raw_spin_trylock_irq(&lock->rlock); | ||
347 | } | ||
348 | |||
349 | #define spin_trylock_irqsave(lock, flags) \ | ||
350 | ({ \ | ||
351 | raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ | ||
313 | }) | 352 | }) |
314 | 353 | ||
354 | static inline void spin_unlock_wait(spinlock_t *lock) | ||
355 | { | ||
356 | raw_spin_unlock_wait(&lock->rlock); | ||
357 | } | ||
358 | |||
359 | static inline int spin_is_locked(spinlock_t *lock) | ||
360 | { | ||
361 | return raw_spin_is_locked(&lock->rlock); | ||
362 | } | ||
363 | |||
364 | static inline int spin_is_contended(spinlock_t *lock) | ||
365 | { | ||
366 | return raw_spin_is_contended(&lock->rlock); | ||
367 | } | ||
368 | |||
369 | static inline int spin_can_lock(spinlock_t *lock) | ||
370 | { | ||
371 | return raw_spin_can_lock(&lock->rlock); | ||
372 | } | ||
373 | |||
374 | static inline void assert_spin_locked(spinlock_t *lock) | ||
375 | { | ||
376 | assert_raw_spin_locked(&lock->rlock); | ||
377 | } | ||
378 | |||
315 | /* | 379 | /* |
316 | * Pull the atomic_t declaration: | 380 | * Pull the atomic_t declaration: |
317 | * (asm-mips/atomic.h needs above definitions) | 381 | * (asm-mips/atomic.h needs above definitions) |
@@ -329,19 +393,4 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |||
329 | #define atomic_dec_and_lock(atomic, lock) \ | 393 | #define atomic_dec_and_lock(atomic, lock) \ |
330 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) | 394 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
331 | 395 | ||
332 | /** | ||
333 | * spin_can_lock - would spin_trylock() succeed? | ||
334 | * @lock: the spinlock in question. | ||
335 | */ | ||
336 | #define spin_can_lock(lock) (!spin_is_locked(lock)) | ||
337 | |||
338 | /* | ||
339 | * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: | ||
340 | */ | ||
341 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) | ||
342 | # include <linux/spinlock_api_smp.h> | ||
343 | #else | ||
344 | # include <linux/spinlock_api_up.h> | ||
345 | #endif | ||
346 | |||
347 | #endif /* __LINUX_SPINLOCK_H */ | 396 | #endif /* __LINUX_SPINLOCK_H */ |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 8264a7f459bc..e253ccd7a604 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
@@ -17,165 +17,76 @@ | |||
17 | 17 | ||
18 | int in_lock_functions(unsigned long addr); | 18 | int in_lock_functions(unsigned long addr); |
19 | 19 | ||
20 | #define assert_spin_locked(x) BUG_ON(!spin_is_locked(x)) | 20 | #define assert_raw_spin_locked(x) BUG_ON(!raw_spin_is_locked(x)) |
21 | 21 | ||
22 | void __lockfunc _spin_lock(spinlock_t *lock) __acquires(lock); | 22 | void __lockfunc _raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); |
23 | void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | 23 | void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) |
24 | __acquires(lock); | 24 | __acquires(lock); |
25 | void __lockfunc _spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *map) | 25 | void __lockfunc |
26 | __acquires(lock); | 26 | _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) |
27 | void __lockfunc _read_lock(rwlock_t *lock) __acquires(lock); | 27 | __acquires(lock); |
28 | void __lockfunc _write_lock(rwlock_t *lock) __acquires(lock); | 28 | void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock) __acquires(lock); |
29 | void __lockfunc _spin_lock_bh(spinlock_t *lock) __acquires(lock); | 29 | void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock) |
30 | void __lockfunc _read_lock_bh(rwlock_t *lock) __acquires(lock); | 30 | __acquires(lock); |
31 | void __lockfunc _write_lock_bh(rwlock_t *lock) __acquires(lock); | 31 | |
32 | void __lockfunc _spin_lock_irq(spinlock_t *lock) __acquires(lock); | 32 | unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock) |
33 | void __lockfunc _read_lock_irq(rwlock_t *lock) __acquires(lock); | 33 | __acquires(lock); |
34 | void __lockfunc _write_lock_irq(rwlock_t *lock) __acquires(lock); | 34 | unsigned long __lockfunc |
35 | unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | 35 | _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) |
36 | __acquires(lock); | 36 | __acquires(lock); |
37 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | 37 | int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock); |
38 | __acquires(lock); | 38 | int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock); |
39 | unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | 39 | void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock) __releases(lock); |
40 | __acquires(lock); | 40 | void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) __releases(lock); |
41 | unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | 41 | void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock) __releases(lock); |
42 | __acquires(lock); | 42 | void __lockfunc |
43 | int __lockfunc _spin_trylock(spinlock_t *lock); | 43 | _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) |
44 | int __lockfunc _read_trylock(rwlock_t *lock); | 44 | __releases(lock); |
45 | int __lockfunc _write_trylock(rwlock_t *lock); | ||
46 | int __lockfunc _spin_trylock_bh(spinlock_t *lock); | ||
47 | void __lockfunc _spin_unlock(spinlock_t *lock) __releases(lock); | ||
48 | void __lockfunc _read_unlock(rwlock_t *lock) __releases(lock); | ||
49 | void __lockfunc _write_unlock(rwlock_t *lock) __releases(lock); | ||
50 | void __lockfunc _spin_unlock_bh(spinlock_t *lock) __releases(lock); | ||
51 | void __lockfunc _read_unlock_bh(rwlock_t *lock) __releases(lock); | ||
52 | void __lockfunc _write_unlock_bh(rwlock_t *lock) __releases(lock); | ||
53 | void __lockfunc _spin_unlock_irq(spinlock_t *lock) __releases(lock); | ||
54 | void __lockfunc _read_unlock_irq(rwlock_t *lock) __releases(lock); | ||
55 | void __lockfunc _write_unlock_irq(rwlock_t *lock) __releases(lock); | ||
56 | void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) | ||
57 | __releases(lock); | ||
58 | void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
59 | __releases(lock); | ||
60 | void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
61 | __releases(lock); | ||
62 | 45 | ||
63 | #ifdef CONFIG_INLINE_SPIN_LOCK | 46 | #ifdef CONFIG_INLINE_SPIN_LOCK |
64 | #define _spin_lock(lock) __spin_lock(lock) | 47 | #define _raw_spin_lock(lock) __raw_spin_lock(lock) |
65 | #endif | ||
66 | |||
67 | #ifdef CONFIG_INLINE_READ_LOCK | ||
68 | #define _read_lock(lock) __read_lock(lock) | ||
69 | #endif | ||
70 | |||
71 | #ifdef CONFIG_INLINE_WRITE_LOCK | ||
72 | #define _write_lock(lock) __write_lock(lock) | ||
73 | #endif | 48 | #endif |
74 | 49 | ||
75 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH | 50 | #ifdef CONFIG_INLINE_SPIN_LOCK_BH |
76 | #define _spin_lock_bh(lock) __spin_lock_bh(lock) | 51 | #define _raw_spin_lock_bh(lock) __raw_spin_lock_bh(lock) |
77 | #endif | ||
78 | |||
79 | #ifdef CONFIG_INLINE_READ_LOCK_BH | ||
80 | #define _read_lock_bh(lock) __read_lock_bh(lock) | ||
81 | #endif | ||
82 | |||
83 | #ifdef CONFIG_INLINE_WRITE_LOCK_BH | ||
84 | #define _write_lock_bh(lock) __write_lock_bh(lock) | ||
85 | #endif | 52 | #endif |
86 | 53 | ||
87 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ | 54 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQ |
88 | #define _spin_lock_irq(lock) __spin_lock_irq(lock) | 55 | #define _raw_spin_lock_irq(lock) __raw_spin_lock_irq(lock) |
89 | #endif | ||
90 | |||
91 | #ifdef CONFIG_INLINE_READ_LOCK_IRQ | ||
92 | #define _read_lock_irq(lock) __read_lock_irq(lock) | ||
93 | #endif | ||
94 | |||
95 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQ | ||
96 | #define _write_lock_irq(lock) __write_lock_irq(lock) | ||
97 | #endif | 56 | #endif |
98 | 57 | ||
99 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE | 58 | #ifdef CONFIG_INLINE_SPIN_LOCK_IRQSAVE |
100 | #define _spin_lock_irqsave(lock) __spin_lock_irqsave(lock) | 59 | #define _raw_spin_lock_irqsave(lock) __raw_spin_lock_irqsave(lock) |
101 | #endif | ||
102 | |||
103 | #ifdef CONFIG_INLINE_READ_LOCK_IRQSAVE | ||
104 | #define _read_lock_irqsave(lock) __read_lock_irqsave(lock) | ||
105 | #endif | ||
106 | |||
107 | #ifdef CONFIG_INLINE_WRITE_LOCK_IRQSAVE | ||
108 | #define _write_lock_irqsave(lock) __write_lock_irqsave(lock) | ||
109 | #endif | 60 | #endif |
110 | 61 | ||
111 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK | 62 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK |
112 | #define _spin_trylock(lock) __spin_trylock(lock) | 63 | #define _raw_spin_trylock(lock) __raw_spin_trylock(lock) |
113 | #endif | ||
114 | |||
115 | #ifdef CONFIG_INLINE_READ_TRYLOCK | ||
116 | #define _read_trylock(lock) __read_trylock(lock) | ||
117 | #endif | ||
118 | |||
119 | #ifdef CONFIG_INLINE_WRITE_TRYLOCK | ||
120 | #define _write_trylock(lock) __write_trylock(lock) | ||
121 | #endif | 64 | #endif |
122 | 65 | ||
123 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH | 66 | #ifdef CONFIG_INLINE_SPIN_TRYLOCK_BH |
124 | #define _spin_trylock_bh(lock) __spin_trylock_bh(lock) | 67 | #define _raw_spin_trylock_bh(lock) __raw_spin_trylock_bh(lock) |
125 | #endif | 68 | #endif |
126 | 69 | ||
127 | #ifdef CONFIG_INLINE_SPIN_UNLOCK | 70 | #ifdef CONFIG_INLINE_SPIN_UNLOCK |
128 | #define _spin_unlock(lock) __spin_unlock(lock) | 71 | #define _raw_spin_unlock(lock) __raw_spin_unlock(lock) |
129 | #endif | ||
130 | |||
131 | #ifdef CONFIG_INLINE_READ_UNLOCK | ||
132 | #define _read_unlock(lock) __read_unlock(lock) | ||
133 | #endif | ||
134 | |||
135 | #ifdef CONFIG_INLINE_WRITE_UNLOCK | ||
136 | #define _write_unlock(lock) __write_unlock(lock) | ||
137 | #endif | 72 | #endif |
138 | 73 | ||
139 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH | 74 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_BH |
140 | #define _spin_unlock_bh(lock) __spin_unlock_bh(lock) | 75 | #define _raw_spin_unlock_bh(lock) __raw_spin_unlock_bh(lock) |
141 | #endif | ||
142 | |||
143 | #ifdef CONFIG_INLINE_READ_UNLOCK_BH | ||
144 | #define _read_unlock_bh(lock) __read_unlock_bh(lock) | ||
145 | #endif | ||
146 | |||
147 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_BH | ||
148 | #define _write_unlock_bh(lock) __write_unlock_bh(lock) | ||
149 | #endif | 76 | #endif |
150 | 77 | ||
151 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ | 78 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQ |
152 | #define _spin_unlock_irq(lock) __spin_unlock_irq(lock) | 79 | #define _raw_spin_unlock_irq(lock) __raw_spin_unlock_irq(lock) |
153 | #endif | ||
154 | |||
155 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQ | ||
156 | #define _read_unlock_irq(lock) __read_unlock_irq(lock) | ||
157 | #endif | ||
158 | |||
159 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQ | ||
160 | #define _write_unlock_irq(lock) __write_unlock_irq(lock) | ||
161 | #endif | 80 | #endif |
162 | 81 | ||
163 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE | 82 | #ifdef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE |
164 | #define _spin_unlock_irqrestore(lock, flags) __spin_unlock_irqrestore(lock, flags) | 83 | #define _raw_spin_unlock_irqrestore(lock, flags) __raw_spin_unlock_irqrestore(lock, flags) |
165 | #endif | ||
166 | |||
167 | #ifdef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE | ||
168 | #define _read_unlock_irqrestore(lock, flags) __read_unlock_irqrestore(lock, flags) | ||
169 | #endif | ||
170 | |||
171 | #ifdef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE | ||
172 | #define _write_unlock_irqrestore(lock, flags) __write_unlock_irqrestore(lock, flags) | ||
173 | #endif | 84 | #endif |
174 | 85 | ||
175 | static inline int __spin_trylock(spinlock_t *lock) | 86 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
176 | { | 87 | { |
177 | preempt_disable(); | 88 | preempt_disable(); |
178 | if (_raw_spin_trylock(lock)) { | 89 | if (do_raw_spin_trylock(lock)) { |
179 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 90 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
180 | return 1; | 91 | return 1; |
181 | } | 92 | } |
@@ -183,28 +94,6 @@ static inline int __spin_trylock(spinlock_t *lock) | |||
183 | return 0; | 94 | return 0; |
184 | } | 95 | } |
185 | 96 | ||
186 | static inline int __read_trylock(rwlock_t *lock) | ||
187 | { | ||
188 | preempt_disable(); | ||
189 | if (_raw_read_trylock(lock)) { | ||
190 | rwlock_acquire_read(&lock->dep_map, 0, 1, _RET_IP_); | ||
191 | return 1; | ||
192 | } | ||
193 | preempt_enable(); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static inline int __write_trylock(rwlock_t *lock) | ||
198 | { | ||
199 | preempt_disable(); | ||
200 | if (_raw_write_trylock(lock)) { | ||
201 | rwlock_acquire(&lock->dep_map, 0, 1, _RET_IP_); | ||
202 | return 1; | ||
203 | } | ||
204 | preempt_enable(); | ||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | /* | 97 | /* |
209 | * If lockdep is enabled then we use the non-preemption spin-ops | 98 | * If lockdep is enabled then we use the non-preemption spin-ops |
210 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are | 99 | * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are |
@@ -212,14 +101,7 @@ static inline int __write_trylock(rwlock_t *lock) | |||
212 | */ | 101 | */ |
213 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) | 102 | #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC) |
214 | 103 | ||
215 | static inline void __read_lock(rwlock_t *lock) | 104 | static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock) |
216 | { | ||
217 | preempt_disable(); | ||
218 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
219 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
220 | } | ||
221 | |||
222 | static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | ||
223 | { | 105 | { |
224 | unsigned long flags; | 106 | unsigned long flags; |
225 | 107 | ||
@@ -228,205 +110,79 @@ static inline unsigned long __spin_lock_irqsave(spinlock_t *lock) | |||
228 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 110 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
229 | /* | 111 | /* |
230 | * On lockdep we dont want the hand-coded irq-enable of | 112 | * On lockdep we dont want the hand-coded irq-enable of |
231 | * _raw_spin_lock_flags() code, because lockdep assumes | 113 | * do_raw_spin_lock_flags() code, because lockdep assumes |
232 | * that interrupts are not re-enabled during lock-acquire: | 114 | * that interrupts are not re-enabled during lock-acquire: |
233 | */ | 115 | */ |
234 | #ifdef CONFIG_LOCKDEP | 116 | #ifdef CONFIG_LOCKDEP |
235 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 117 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
236 | #else | 118 | #else |
237 | _raw_spin_lock_flags(lock, &flags); | 119 | do_raw_spin_lock_flags(lock, &flags); |
238 | #endif | 120 | #endif |
239 | return flags; | 121 | return flags; |
240 | } | 122 | } |
241 | 123 | ||
242 | static inline void __spin_lock_irq(spinlock_t *lock) | 124 | static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) |
243 | { | 125 | { |
244 | local_irq_disable(); | 126 | local_irq_disable(); |
245 | preempt_disable(); | 127 | preempt_disable(); |
246 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 128 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
247 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 129 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
248 | } | 130 | } |
249 | 131 | ||
250 | static inline void __spin_lock_bh(spinlock_t *lock) | 132 | static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) |
251 | { | 133 | { |
252 | local_bh_disable(); | 134 | local_bh_disable(); |
253 | preempt_disable(); | 135 | preempt_disable(); |
254 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 136 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
255 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 137 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
256 | } | ||
257 | |||
258 | static inline unsigned long __read_lock_irqsave(rwlock_t *lock) | ||
259 | { | ||
260 | unsigned long flags; | ||
261 | |||
262 | local_irq_save(flags); | ||
263 | preempt_disable(); | ||
264 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
265 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, | ||
266 | _raw_read_lock_flags, &flags); | ||
267 | return flags; | ||
268 | } | ||
269 | |||
270 | static inline void __read_lock_irq(rwlock_t *lock) | ||
271 | { | ||
272 | local_irq_disable(); | ||
273 | preempt_disable(); | ||
274 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
275 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
276 | } | ||
277 | |||
278 | static inline void __read_lock_bh(rwlock_t *lock) | ||
279 | { | ||
280 | local_bh_disable(); | ||
281 | preempt_disable(); | ||
282 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | ||
283 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | ||
284 | } | ||
285 | |||
286 | static inline unsigned long __write_lock_irqsave(rwlock_t *lock) | ||
287 | { | ||
288 | unsigned long flags; | ||
289 | |||
290 | local_irq_save(flags); | ||
291 | preempt_disable(); | ||
292 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
293 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, | ||
294 | _raw_write_lock_flags, &flags); | ||
295 | return flags; | ||
296 | } | ||
297 | |||
298 | static inline void __write_lock_irq(rwlock_t *lock) | ||
299 | { | ||
300 | local_irq_disable(); | ||
301 | preempt_disable(); | ||
302 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
303 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
304 | } | 138 | } |
305 | 139 | ||
306 | static inline void __write_lock_bh(rwlock_t *lock) | 140 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
307 | { | ||
308 | local_bh_disable(); | ||
309 | preempt_disable(); | ||
310 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
311 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
312 | } | ||
313 | |||
314 | static inline void __spin_lock(spinlock_t *lock) | ||
315 | { | 141 | { |
316 | preempt_disable(); | 142 | preempt_disable(); |
317 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 143 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
318 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | 144 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
319 | } | ||
320 | |||
321 | static inline void __write_lock(rwlock_t *lock) | ||
322 | { | ||
323 | preempt_disable(); | ||
324 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | ||
325 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | ||
326 | } | 145 | } |
327 | 146 | ||
328 | #endif /* CONFIG_PREEMPT */ | 147 | #endif /* CONFIG_PREEMPT */ |
329 | 148 | ||
330 | static inline void __spin_unlock(spinlock_t *lock) | 149 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
331 | { | 150 | { |
332 | spin_release(&lock->dep_map, 1, _RET_IP_); | 151 | spin_release(&lock->dep_map, 1, _RET_IP_); |
333 | _raw_spin_unlock(lock); | 152 | do_raw_spin_unlock(lock); |
334 | preempt_enable(); | ||
335 | } | ||
336 | |||
337 | static inline void __write_unlock(rwlock_t *lock) | ||
338 | { | ||
339 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
340 | _raw_write_unlock(lock); | ||
341 | preempt_enable(); | ||
342 | } | ||
343 | |||
344 | static inline void __read_unlock(rwlock_t *lock) | ||
345 | { | ||
346 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
347 | _raw_read_unlock(lock); | ||
348 | preempt_enable(); | 153 | preempt_enable(); |
349 | } | 154 | } |
350 | 155 | ||
351 | static inline void __spin_unlock_irqrestore(spinlock_t *lock, | 156 | static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, |
352 | unsigned long flags) | 157 | unsigned long flags) |
353 | { | 158 | { |
354 | spin_release(&lock->dep_map, 1, _RET_IP_); | 159 | spin_release(&lock->dep_map, 1, _RET_IP_); |
355 | _raw_spin_unlock(lock); | 160 | do_raw_spin_unlock(lock); |
356 | local_irq_restore(flags); | 161 | local_irq_restore(flags); |
357 | preempt_enable(); | 162 | preempt_enable(); |
358 | } | 163 | } |
359 | 164 | ||
360 | static inline void __spin_unlock_irq(spinlock_t *lock) | 165 | static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock) |
361 | { | 166 | { |
362 | spin_release(&lock->dep_map, 1, _RET_IP_); | 167 | spin_release(&lock->dep_map, 1, _RET_IP_); |
363 | _raw_spin_unlock(lock); | 168 | do_raw_spin_unlock(lock); |
364 | local_irq_enable(); | 169 | local_irq_enable(); |
365 | preempt_enable(); | 170 | preempt_enable(); |
366 | } | 171 | } |
367 | 172 | ||
368 | static inline void __spin_unlock_bh(spinlock_t *lock) | 173 | static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) |
369 | { | 174 | { |
370 | spin_release(&lock->dep_map, 1, _RET_IP_); | 175 | spin_release(&lock->dep_map, 1, _RET_IP_); |
371 | _raw_spin_unlock(lock); | 176 | do_raw_spin_unlock(lock); |
372 | preempt_enable_no_resched(); | ||
373 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
374 | } | ||
375 | |||
376 | static inline void __read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) | ||
377 | { | ||
378 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
379 | _raw_read_unlock(lock); | ||
380 | local_irq_restore(flags); | ||
381 | preempt_enable(); | ||
382 | } | ||
383 | |||
384 | static inline void __read_unlock_irq(rwlock_t *lock) | ||
385 | { | ||
386 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
387 | _raw_read_unlock(lock); | ||
388 | local_irq_enable(); | ||
389 | preempt_enable(); | ||
390 | } | ||
391 | |||
392 | static inline void __read_unlock_bh(rwlock_t *lock) | ||
393 | { | ||
394 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
395 | _raw_read_unlock(lock); | ||
396 | preempt_enable_no_resched(); | 177 | preempt_enable_no_resched(); |
397 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | 178 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); |
398 | } | 179 | } |
399 | 180 | ||
400 | static inline void __write_unlock_irqrestore(rwlock_t *lock, | 181 | static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) |
401 | unsigned long flags) | ||
402 | { | ||
403 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
404 | _raw_write_unlock(lock); | ||
405 | local_irq_restore(flags); | ||
406 | preempt_enable(); | ||
407 | } | ||
408 | |||
409 | static inline void __write_unlock_irq(rwlock_t *lock) | ||
410 | { | ||
411 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
412 | _raw_write_unlock(lock); | ||
413 | local_irq_enable(); | ||
414 | preempt_enable(); | ||
415 | } | ||
416 | |||
417 | static inline void __write_unlock_bh(rwlock_t *lock) | ||
418 | { | ||
419 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | ||
420 | _raw_write_unlock(lock); | ||
421 | preempt_enable_no_resched(); | ||
422 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
423 | } | ||
424 | |||
425 | static inline int __spin_trylock_bh(spinlock_t *lock) | ||
426 | { | 182 | { |
427 | local_bh_disable(); | 183 | local_bh_disable(); |
428 | preempt_disable(); | 184 | preempt_disable(); |
429 | if (_raw_spin_trylock(lock)) { | 185 | if (do_raw_spin_trylock(lock)) { |
430 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 186 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
431 | return 1; | 187 | return 1; |
432 | } | 188 | } |
@@ -435,4 +191,6 @@ static inline int __spin_trylock_bh(spinlock_t *lock) | |||
435 | return 0; | 191 | return 0; |
436 | } | 192 | } |
437 | 193 | ||
194 | #include <linux/rwlock_api_smp.h> | ||
195 | |||
438 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ | 196 | #endif /* __LINUX_SPINLOCK_API_SMP_H */ |
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index 04e1d3164576..af1f47229e70 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | #define in_lock_functions(ADDR) 0 | 17 | #define in_lock_functions(ADDR) 0 |
18 | 18 | ||
19 | #define assert_spin_locked(lock) do { (void)(lock); } while (0) | 19 | #define assert_raw_spin_locked(lock) do { (void)(lock); } while (0) |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * In the UP-nondebug case there's no real locking going on, so the | 22 | * In the UP-nondebug case there's no real locking going on, so the |
@@ -40,7 +40,8 @@ | |||
40 | do { preempt_enable(); __release(lock); (void)(lock); } while (0) | 40 | do { preempt_enable(); __release(lock); (void)(lock); } while (0) |
41 | 41 | ||
42 | #define __UNLOCK_BH(lock) \ | 42 | #define __UNLOCK_BH(lock) \ |
43 | do { preempt_enable_no_resched(); local_bh_enable(); __release(lock); (void)(lock); } while (0) | 43 | do { preempt_enable_no_resched(); local_bh_enable(); \ |
44 | __release(lock); (void)(lock); } while (0) | ||
44 | 45 | ||
45 | #define __UNLOCK_IRQ(lock) \ | 46 | #define __UNLOCK_IRQ(lock) \ |
46 | do { local_irq_enable(); __UNLOCK(lock); } while (0) | 47 | do { local_irq_enable(); __UNLOCK(lock); } while (0) |
@@ -48,34 +49,37 @@ | |||
48 | #define __UNLOCK_IRQRESTORE(lock, flags) \ | 49 | #define __UNLOCK_IRQRESTORE(lock, flags) \ |
49 | do { local_irq_restore(flags); __UNLOCK(lock); } while (0) | 50 | do { local_irq_restore(flags); __UNLOCK(lock); } while (0) |
50 | 51 | ||
51 | #define _spin_lock(lock) __LOCK(lock) | 52 | #define _raw_spin_lock(lock) __LOCK(lock) |
52 | #define _spin_lock_nested(lock, subclass) __LOCK(lock) | 53 | #define _raw_spin_lock_nested(lock, subclass) __LOCK(lock) |
53 | #define _read_lock(lock) __LOCK(lock) | 54 | #define _raw_read_lock(lock) __LOCK(lock) |
54 | #define _write_lock(lock) __LOCK(lock) | 55 | #define _raw_write_lock(lock) __LOCK(lock) |
55 | #define _spin_lock_bh(lock) __LOCK_BH(lock) | 56 | #define _raw_spin_lock_bh(lock) __LOCK_BH(lock) |
56 | #define _read_lock_bh(lock) __LOCK_BH(lock) | 57 | #define _raw_read_lock_bh(lock) __LOCK_BH(lock) |
57 | #define _write_lock_bh(lock) __LOCK_BH(lock) | 58 | #define _raw_write_lock_bh(lock) __LOCK_BH(lock) |
58 | #define _spin_lock_irq(lock) __LOCK_IRQ(lock) | 59 | #define _raw_spin_lock_irq(lock) __LOCK_IRQ(lock) |
59 | #define _read_lock_irq(lock) __LOCK_IRQ(lock) | 60 | #define _raw_read_lock_irq(lock) __LOCK_IRQ(lock) |
60 | #define _write_lock_irq(lock) __LOCK_IRQ(lock) | 61 | #define _raw_write_lock_irq(lock) __LOCK_IRQ(lock) |
61 | #define _spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | 62 | #define _raw_spin_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
62 | #define _read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | 63 | #define _raw_read_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
63 | #define _write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) | 64 | #define _raw_write_lock_irqsave(lock, flags) __LOCK_IRQSAVE(lock, flags) |
64 | #define _spin_trylock(lock) ({ __LOCK(lock); 1; }) | 65 | #define _raw_spin_trylock(lock) ({ __LOCK(lock); 1; }) |
65 | #define _read_trylock(lock) ({ __LOCK(lock); 1; }) | 66 | #define _raw_read_trylock(lock) ({ __LOCK(lock); 1; }) |
66 | #define _write_trylock(lock) ({ __LOCK(lock); 1; }) | 67 | #define _raw_write_trylock(lock) ({ __LOCK(lock); 1; }) |
67 | #define _spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) | 68 | #define _raw_spin_trylock_bh(lock) ({ __LOCK_BH(lock); 1; }) |
68 | #define _spin_unlock(lock) __UNLOCK(lock) | 69 | #define _raw_spin_unlock(lock) __UNLOCK(lock) |
69 | #define _read_unlock(lock) __UNLOCK(lock) | 70 | #define _raw_read_unlock(lock) __UNLOCK(lock) |
70 | #define _write_unlock(lock) __UNLOCK(lock) | 71 | #define _raw_write_unlock(lock) __UNLOCK(lock) |
71 | #define _spin_unlock_bh(lock) __UNLOCK_BH(lock) | 72 | #define _raw_spin_unlock_bh(lock) __UNLOCK_BH(lock) |
72 | #define _write_unlock_bh(lock) __UNLOCK_BH(lock) | 73 | #define _raw_write_unlock_bh(lock) __UNLOCK_BH(lock) |
73 | #define _read_unlock_bh(lock) __UNLOCK_BH(lock) | 74 | #define _raw_read_unlock_bh(lock) __UNLOCK_BH(lock) |
74 | #define _spin_unlock_irq(lock) __UNLOCK_IRQ(lock) | 75 | #define _raw_spin_unlock_irq(lock) __UNLOCK_IRQ(lock) |
75 | #define _read_unlock_irq(lock) __UNLOCK_IRQ(lock) | 76 | #define _raw_read_unlock_irq(lock) __UNLOCK_IRQ(lock) |
76 | #define _write_unlock_irq(lock) __UNLOCK_IRQ(lock) | 77 | #define _raw_write_unlock_irq(lock) __UNLOCK_IRQ(lock) |
77 | #define _spin_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | 78 | #define _raw_spin_unlock_irqrestore(lock, flags) \ |
78 | #define _read_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | 79 | __UNLOCK_IRQRESTORE(lock, flags) |
79 | #define _write_unlock_irqrestore(lock, flags) __UNLOCK_IRQRESTORE(lock, flags) | 80 | #define _raw_read_unlock_irqrestore(lock, flags) \ |
81 | __UNLOCK_IRQRESTORE(lock, flags) | ||
82 | #define _raw_write_unlock_irqrestore(lock, flags) \ | ||
83 | __UNLOCK_IRQRESTORE(lock, flags) | ||
80 | 84 | ||
81 | #endif /* __LINUX_SPINLOCK_API_UP_H */ | 85 | #endif /* __LINUX_SPINLOCK_API_UP_H */ |
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 68d88f71f1a2..851b7783720d 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h | |||
@@ -17,8 +17,8 @@ | |||
17 | 17 | ||
18 | #include <linux/lockdep.h> | 18 | #include <linux/lockdep.h> |
19 | 19 | ||
20 | typedef struct { | 20 | typedef struct raw_spinlock { |
21 | raw_spinlock_t raw_lock; | 21 | arch_spinlock_t raw_lock; |
22 | #ifdef CONFIG_GENERIC_LOCKBREAK | 22 | #ifdef CONFIG_GENERIC_LOCKBREAK |
23 | unsigned int break_lock; | 23 | unsigned int break_lock; |
24 | #endif | 24 | #endif |
@@ -29,26 +29,10 @@ typedef struct { | |||
29 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 29 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
30 | struct lockdep_map dep_map; | 30 | struct lockdep_map dep_map; |
31 | #endif | 31 | #endif |
32 | } spinlock_t; | 32 | } raw_spinlock_t; |
33 | 33 | ||
34 | #define SPINLOCK_MAGIC 0xdead4ead | 34 | #define SPINLOCK_MAGIC 0xdead4ead |
35 | 35 | ||
36 | typedef struct { | ||
37 | raw_rwlock_t raw_lock; | ||
38 | #ifdef CONFIG_GENERIC_LOCKBREAK | ||
39 | unsigned int break_lock; | ||
40 | #endif | ||
41 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
42 | unsigned int magic, owner_cpu; | ||
43 | void *owner; | ||
44 | #endif | ||
45 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
46 | struct lockdep_map dep_map; | ||
47 | #endif | ||
48 | } rwlock_t; | ||
49 | |||
50 | #define RWLOCK_MAGIC 0xdeaf1eed | ||
51 | |||
52 | #define SPINLOCK_OWNER_INIT ((void *)-1L) | 36 | #define SPINLOCK_OWNER_INIT ((void *)-1L) |
53 | 37 | ||
54 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 38 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
@@ -57,44 +41,56 @@ typedef struct { | |||
57 | # define SPIN_DEP_MAP_INIT(lockname) | 41 | # define SPIN_DEP_MAP_INIT(lockname) |
58 | #endif | 42 | #endif |
59 | 43 | ||
60 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 44 | #ifdef CONFIG_DEBUG_SPINLOCK |
61 | # define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } | 45 | # define SPIN_DEBUG_INIT(lockname) \ |
46 | .magic = SPINLOCK_MAGIC, \ | ||
47 | .owner_cpu = -1, \ | ||
48 | .owner = SPINLOCK_OWNER_INIT, | ||
62 | #else | 49 | #else |
63 | # define RW_DEP_MAP_INIT(lockname) | 50 | # define SPIN_DEBUG_INIT(lockname) |
64 | #endif | 51 | #endif |
65 | 52 | ||
66 | #ifdef CONFIG_DEBUG_SPINLOCK | 53 | #define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ |
67 | # define __SPIN_LOCK_UNLOCKED(lockname) \ | 54 | { \ |
68 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ | 55 | .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ |
69 | .magic = SPINLOCK_MAGIC, \ | 56 | SPIN_DEBUG_INIT(lockname) \ |
70 | .owner = SPINLOCK_OWNER_INIT, \ | 57 | SPIN_DEP_MAP_INIT(lockname) } |
71 | .owner_cpu = -1, \ | 58 | |
72 | SPIN_DEP_MAP_INIT(lockname) } | 59 | #define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ |
73 | #define __RW_LOCK_UNLOCKED(lockname) \ | 60 | (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) |
74 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ | 61 | |
75 | .magic = RWLOCK_MAGIC, \ | 62 | #define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) |
76 | .owner = SPINLOCK_OWNER_INIT, \ | 63 | |
77 | .owner_cpu = -1, \ | 64 | typedef struct spinlock { |
78 | RW_DEP_MAP_INIT(lockname) } | 65 | union { |
79 | #else | 66 | struct raw_spinlock rlock; |
80 | # define __SPIN_LOCK_UNLOCKED(lockname) \ | 67 | |
81 | (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \ | 68 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
82 | SPIN_DEP_MAP_INIT(lockname) } | 69 | # define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) |
83 | #define __RW_LOCK_UNLOCKED(lockname) \ | 70 | struct { |
84 | (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \ | 71 | u8 __padding[LOCK_PADSIZE]; |
85 | RW_DEP_MAP_INIT(lockname) } | 72 | struct lockdep_map dep_map; |
73 | }; | ||
86 | #endif | 74 | #endif |
75 | }; | ||
76 | } spinlock_t; | ||
77 | |||
78 | #define __SPIN_LOCK_INITIALIZER(lockname) \ | ||
79 | { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } | ||
80 | |||
81 | #define __SPIN_LOCK_UNLOCKED(lockname) \ | ||
82 | (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) | ||
87 | 83 | ||
88 | /* | 84 | /* |
89 | * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and | 85 | * SPIN_LOCK_UNLOCKED defeats lockdep state tracking and is hence |
90 | * are hence deprecated. | 86 | * deprecated. |
91 | * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or | 87 | * Please use DEFINE_SPINLOCK() or __SPIN_LOCK_UNLOCKED() as |
92 | * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate. | 88 | * appropriate. |
93 | */ | 89 | */ |
94 | #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) | 90 | #define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init) |
95 | #define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init) | ||
96 | 91 | ||
97 | #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) | 92 | #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) |
98 | #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) | 93 | |
94 | #include <linux/rwlock_types.h> | ||
99 | 95 | ||
100 | #endif /* __LINUX_SPINLOCK_TYPES_H */ | 96 | #endif /* __LINUX_SPINLOCK_TYPES_H */ |
diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 04135b0e198e..c09b6407ae1b 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h | |||
@@ -16,22 +16,22 @@ | |||
16 | 16 | ||
17 | typedef struct { | 17 | typedef struct { |
18 | volatile unsigned int slock; | 18 | volatile unsigned int slock; |
19 | } raw_spinlock_t; | 19 | } arch_spinlock_t; |
20 | 20 | ||
21 | #define __RAW_SPIN_LOCK_UNLOCKED { 1 } | 21 | #define __ARCH_SPIN_LOCK_UNLOCKED { 1 } |
22 | 22 | ||
23 | #else | 23 | #else |
24 | 24 | ||
25 | typedef struct { } raw_spinlock_t; | 25 | typedef struct { } arch_spinlock_t; |
26 | 26 | ||
27 | #define __RAW_SPIN_LOCK_UNLOCKED { } | 27 | #define __ARCH_SPIN_LOCK_UNLOCKED { } |
28 | 28 | ||
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | typedef struct { | 31 | typedef struct { |
32 | /* no debug version on UP */ | 32 | /* no debug version on UP */ |
33 | } raw_rwlock_t; | 33 | } arch_rwlock_t; |
34 | 34 | ||
35 | #define __RAW_RW_LOCK_UNLOCKED { } | 35 | #define __ARCH_RW_LOCK_UNLOCKED { } |
36 | 36 | ||
37 | #endif /* __LINUX_SPINLOCK_TYPES_UP_H */ | 37 | #endif /* __LINUX_SPINLOCK_TYPES_UP_H */ |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index d4841ed8215b..b14f6a91e19f 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
@@ -18,21 +18,21 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #ifdef CONFIG_DEBUG_SPINLOCK | 20 | #ifdef CONFIG_DEBUG_SPINLOCK |
21 | #define __raw_spin_is_locked(x) ((x)->slock == 0) | 21 | #define arch_spin_is_locked(x) ((x)->slock == 0) |
22 | 22 | ||
23 | static inline void __raw_spin_lock(raw_spinlock_t *lock) | 23 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
24 | { | 24 | { |
25 | lock->slock = 0; | 25 | lock->slock = 0; |
26 | } | 26 | } |
27 | 27 | ||
28 | static inline void | 28 | static inline void |
29 | __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | 29 | arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) |
30 | { | 30 | { |
31 | local_irq_save(flags); | 31 | local_irq_save(flags); |
32 | lock->slock = 0; | 32 | lock->slock = 0; |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 35 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
36 | { | 36 | { |
37 | char oldval = lock->slock; | 37 | char oldval = lock->slock; |
38 | 38 | ||
@@ -41,7 +41,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) | |||
41 | return oldval > 0; | 41 | return oldval > 0; |
42 | } | 42 | } |
43 | 43 | ||
44 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) | 44 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
45 | { | 45 | { |
46 | lock->slock = 1; | 46 | lock->slock = 1; |
47 | } | 47 | } |
@@ -49,28 +49,28 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
49 | /* | 49 | /* |
50 | * Read-write spinlocks. No debug version. | 50 | * Read-write spinlocks. No debug version. |
51 | */ | 51 | */ |
52 | #define __raw_read_lock(lock) do { (void)(lock); } while (0) | 52 | #define arch_read_lock(lock) do { (void)(lock); } while (0) |
53 | #define __raw_write_lock(lock) do { (void)(lock); } while (0) | 53 | #define arch_write_lock(lock) do { (void)(lock); } while (0) |
54 | #define __raw_read_trylock(lock) ({ (void)(lock); 1; }) | 54 | #define arch_read_trylock(lock) ({ (void)(lock); 1; }) |
55 | #define __raw_write_trylock(lock) ({ (void)(lock); 1; }) | 55 | #define arch_write_trylock(lock) ({ (void)(lock); 1; }) |
56 | #define __raw_read_unlock(lock) do { (void)(lock); } while (0) | 56 | #define arch_read_unlock(lock) do { (void)(lock); } while (0) |
57 | #define __raw_write_unlock(lock) do { (void)(lock); } while (0) | 57 | #define arch_write_unlock(lock) do { (void)(lock); } while (0) |
58 | 58 | ||
59 | #else /* DEBUG_SPINLOCK */ | 59 | #else /* DEBUG_SPINLOCK */ |
60 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) | 60 | #define arch_spin_is_locked(lock) ((void)(lock), 0) |
61 | /* for sched.c and kernel_lock.c: */ | 61 | /* for sched.c and kernel_lock.c: */ |
62 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) | 62 | # define arch_spin_lock(lock) do { (void)(lock); } while (0) |
63 | # define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) | 63 | # define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) |
64 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) | 64 | # define arch_spin_unlock(lock) do { (void)(lock); } while (0) |
65 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) | 65 | # define arch_spin_trylock(lock) ({ (void)(lock); 1; }) |
66 | #endif /* DEBUG_SPINLOCK */ | 66 | #endif /* DEBUG_SPINLOCK */ |
67 | 67 | ||
68 | #define __raw_spin_is_contended(lock) (((void)(lock), 0)) | 68 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) |
69 | 69 | ||
70 | #define __raw_read_can_lock(lock) (((void)(lock), 1)) | 70 | #define arch_read_can_lock(lock) (((void)(lock), 1)) |
71 | #define __raw_write_can_lock(lock) (((void)(lock), 1)) | 71 | #define arch_write_can_lock(lock) (((void)(lock), 1)) |
72 | 72 | ||
73 | #define __raw_spin_unlock_wait(lock) \ | 73 | #define arch_spin_unlock_wait(lock) \ |
74 | do { cpu_relax(); } while (__raw_spin_is_locked(lock)) | 74 | do { cpu_relax(); } while (arch_spin_is_locked(lock)) |
75 | 75 | ||
76 | #endif /* __LINUX_SPINLOCK_UP_H */ | 76 | #endif /* __LINUX_SPINLOCK_UP_H */ |
diff --git a/include/linux/string.h b/include/linux/string.h index b8508868d5ad..a716ee2a8adb 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -62,9 +62,20 @@ extern char * strnchr(const char *, size_t, int); | |||
62 | #ifndef __HAVE_ARCH_STRRCHR | 62 | #ifndef __HAVE_ARCH_STRRCHR |
63 | extern char * strrchr(const char *,int); | 63 | extern char * strrchr(const char *,int); |
64 | #endif | 64 | #endif |
65 | extern char * __must_check strstrip(char *); | 65 | extern char * __must_check skip_spaces(const char *); |
66 | |||
67 | extern char *strim(char *); | ||
68 | |||
69 | static inline __must_check char *strstrip(char *str) | ||
70 | { | ||
71 | return strim(str); | ||
72 | } | ||
73 | |||
66 | #ifndef __HAVE_ARCH_STRSTR | 74 | #ifndef __HAVE_ARCH_STRSTR |
67 | extern char * strstr(const char *,const char *); | 75 | extern char * strstr(const char *, const char *); |
76 | #endif | ||
77 | #ifndef __HAVE_ARCH_STRNSTR | ||
78 | extern char * strnstr(const char *, const char *, size_t); | ||
68 | #endif | 79 | #endif |
69 | #ifndef __HAVE_ARCH_STRLEN | 80 | #ifndef __HAVE_ARCH_STRLEN |
70 | extern __kernel_size_t strlen(const char *); | 81 | extern __kernel_size_t strlen(const char *); |
diff --git a/include/linux/sunrpc/debug.h b/include/linux/sunrpc/debug.h index 10709cbe96fd..c2786f20016f 100644 --- a/include/linux/sunrpc/debug.h +++ b/include/linux/sunrpc/debug.h | |||
@@ -28,9 +28,6 @@ | |||
28 | 28 | ||
29 | #ifdef __KERNEL__ | 29 | #ifdef __KERNEL__ |
30 | 30 | ||
31 | #include <linux/timer.h> | ||
32 | #include <linux/workqueue.h> | ||
33 | |||
34 | /* | 31 | /* |
35 | * Enable RPC debugging/profiling. | 32 | * Enable RPC debugging/profiling. |
36 | */ | 33 | */ |
diff --git a/include/linux/sunrpc/rpc_rdma.h b/include/linux/sunrpc/rpc_rdma.h index 87b895d5c786..b78f16b1dea3 100644 --- a/include/linux/sunrpc/rpc_rdma.h +++ b/include/linux/sunrpc/rpc_rdma.h | |||
@@ -40,6 +40,8 @@ | |||
40 | #ifndef _LINUX_SUNRPC_RPC_RDMA_H | 40 | #ifndef _LINUX_SUNRPC_RPC_RDMA_H |
41 | #define _LINUX_SUNRPC_RPC_RDMA_H | 41 | #define _LINUX_SUNRPC_RPC_RDMA_H |
42 | 42 | ||
43 | #include <linux/types.h> | ||
44 | |||
43 | struct rpcrdma_segment { | 45 | struct rpcrdma_segment { |
44 | __be32 rs_handle; /* Registered memory handle */ | 46 | __be32 rs_handle; /* Registered memory handle */ |
45 | __be32 rs_length; /* Length of the chunk in bytes */ | 47 | __be32 rs_length; /* Length of the chunk in bytes */ |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 1906782ec86b..7bc7fd5291ce 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -173,7 +173,8 @@ struct rpc_task_setup { | |||
173 | #define RPC_PRIORITY_LOW (-1) | 173 | #define RPC_PRIORITY_LOW (-1) |
174 | #define RPC_PRIORITY_NORMAL (0) | 174 | #define RPC_PRIORITY_NORMAL (0) |
175 | #define RPC_PRIORITY_HIGH (1) | 175 | #define RPC_PRIORITY_HIGH (1) |
176 | #define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW) | 176 | #define RPC_PRIORITY_PRIVILEGED (2) |
177 | #define RPC_NR_PRIORITY (1 + RPC_PRIORITY_PRIVILEGED - RPC_PRIORITY_LOW) | ||
177 | 178 | ||
178 | struct rpc_timer { | 179 | struct rpc_timer { |
179 | struct timer_list timer; | 180 | struct timer_list timer; |
@@ -229,6 +230,7 @@ void rpc_wake_up_queued_task(struct rpc_wait_queue *, | |||
229 | void rpc_wake_up(struct rpc_wait_queue *); | 230 | void rpc_wake_up(struct rpc_wait_queue *); |
230 | struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); | 231 | struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); |
231 | void rpc_wake_up_status(struct rpc_wait_queue *, int); | 232 | void rpc_wake_up_status(struct rpc_wait_queue *, int); |
233 | int rpc_queue_empty(struct rpc_wait_queue *); | ||
232 | void rpc_delay(struct rpc_task *, unsigned long); | 234 | void rpc_delay(struct rpc_task *, unsigned long); |
233 | void * rpc_malloc(struct rpc_task *, size_t); | 235 | void * rpc_malloc(struct rpc_task *, size_t); |
234 | void rpc_free(void *); | 236 | void rpc_free(void *); |
@@ -254,6 +256,16 @@ static inline int rpc_wait_for_completion_task(struct rpc_task *task) | |||
254 | return __rpc_wait_for_completion_task(task, NULL); | 256 | return __rpc_wait_for_completion_task(task, NULL); |
255 | } | 257 | } |
256 | 258 | ||
259 | static inline void rpc_task_set_priority(struct rpc_task *task, unsigned char prio) | ||
260 | { | ||
261 | task->tk_priority = prio - RPC_PRIORITY_LOW; | ||
262 | } | ||
263 | |||
264 | static inline int rpc_task_has_priority(struct rpc_task *task, unsigned char prio) | ||
265 | { | ||
266 | return (task->tk_priority + RPC_PRIORITY_LOW == prio); | ||
267 | } | ||
268 | |||
257 | #ifdef RPC_DEBUG | 269 | #ifdef RPC_DEBUG |
258 | static inline const char * rpc_qname(struct rpc_wait_queue *q) | 270 | static inline const char * rpc_qname(struct rpc_wait_queue *q) |
259 | { | 271 | { |
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 52e8cb0a7569..5a3085b9b394 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h | |||
@@ -29,7 +29,6 @@ struct svc_pool_stats { | |||
29 | unsigned long packets; | 29 | unsigned long packets; |
30 | unsigned long sockets_queued; | 30 | unsigned long sockets_queued; |
31 | unsigned long threads_woken; | 31 | unsigned long threads_woken; |
32 | unsigned long overloads_avoided; | ||
33 | unsigned long threads_timedout; | 32 | unsigned long threads_timedout; |
34 | }; | 33 | }; |
35 | 34 | ||
@@ -50,7 +49,6 @@ struct svc_pool { | |||
50 | struct list_head sp_sockets; /* pending sockets */ | 49 | struct list_head sp_sockets; /* pending sockets */ |
51 | unsigned int sp_nrthreads; /* # of threads in pool */ | 50 | unsigned int sp_nrthreads; /* # of threads in pool */ |
52 | struct list_head sp_all_threads; /* all server threads */ | 51 | struct list_head sp_all_threads; /* all server threads */ |
53 | int sp_nwaking; /* number of threads woken but not yet active */ | ||
54 | struct svc_pool_stats sp_stats; /* statistics on pool operation */ | 52 | struct svc_pool_stats sp_stats; /* statistics on pool operation */ |
55 | } ____cacheline_aligned_in_smp; | 53 | } ____cacheline_aligned_in_smp; |
56 | 54 | ||
@@ -275,16 +273,11 @@ struct svc_rqst { | |||
275 | struct auth_domain * rq_client; /* RPC peer info */ | 273 | struct auth_domain * rq_client; /* RPC peer info */ |
276 | struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ | 274 | struct auth_domain * rq_gssclient; /* "gss/"-style peer info */ |
277 | struct svc_cacherep * rq_cacherep; /* cache info */ | 275 | struct svc_cacherep * rq_cacherep; /* cache info */ |
278 | struct knfsd_fh * rq_reffh; /* Referrence filehandle, used to | ||
279 | * determine what device number | ||
280 | * to report (real or virtual) | ||
281 | */ | ||
282 | int rq_splice_ok; /* turned off in gss privacy | 276 | int rq_splice_ok; /* turned off in gss privacy |
283 | * to prevent encrypting page | 277 | * to prevent encrypting page |
284 | * cache pages */ | 278 | * cache pages */ |
285 | wait_queue_head_t rq_wait; /* synchronization */ | 279 | wait_queue_head_t rq_wait; /* synchronization */ |
286 | struct task_struct *rq_task; /* service thread */ | 280 | struct task_struct *rq_task; /* service thread */ |
287 | int rq_waking; /* 1 if thread is being woken */ | ||
288 | }; | 281 | }; |
289 | 282 | ||
290 | /* | 283 | /* |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 4ec90019c1a4..a2602a8207a6 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -145,38 +145,43 @@ enum { | |||
145 | SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ | 145 | SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ |
146 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ | 146 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
147 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ | 147 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
148 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ | ||
148 | /* add others here before... */ | 149 | /* add others here before... */ |
149 | SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ | 150 | SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */ |
150 | }; | 151 | }; |
151 | 152 | ||
152 | #define SWAP_CLUSTER_MAX 32 | 153 | #define SWAP_CLUSTER_MAX 32 |
153 | 154 | ||
154 | #define SWAP_MAP_MAX 0x7ffe | 155 | #define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */ |
155 | #define SWAP_MAP_BAD 0x7fff | 156 | #define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */ |
156 | #define SWAP_HAS_CACHE 0x8000 /* There is a swap cache of entry. */ | 157 | #define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */ |
157 | #define SWAP_COUNT_MASK (~SWAP_HAS_CACHE) | 158 | #define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */ |
159 | #define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */ | ||
160 | #define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */ | ||
161 | |||
158 | /* | 162 | /* |
159 | * The in-memory structure used to track swap areas. | 163 | * The in-memory structure used to track swap areas. |
160 | */ | 164 | */ |
161 | struct swap_info_struct { | 165 | struct swap_info_struct { |
162 | unsigned long flags; | 166 | unsigned long flags; /* SWP_USED etc: see above */ |
163 | int prio; /* swap priority */ | 167 | signed short prio; /* swap priority of this type */ |
164 | int next; /* next entry on swap list */ | 168 | signed char type; /* strange name for an index */ |
165 | struct file *swap_file; | 169 | signed char next; /* next type on the swap list */ |
166 | struct block_device *bdev; | 170 | unsigned int max; /* extent of the swap_map */ |
167 | struct list_head extent_list; | 171 | unsigned char *swap_map; /* vmalloc'ed array of usage counts */ |
168 | struct swap_extent *curr_swap_extent; | 172 | unsigned int lowest_bit; /* index of first free in swap_map */ |
169 | unsigned short *swap_map; | 173 | unsigned int highest_bit; /* index of last free in swap_map */ |
170 | unsigned int lowest_bit; | 174 | unsigned int pages; /* total of usable pages of swap */ |
171 | unsigned int highest_bit; | 175 | unsigned int inuse_pages; /* number of those currently in use */ |
176 | unsigned int cluster_next; /* likely index for next allocation */ | ||
177 | unsigned int cluster_nr; /* countdown to next cluster search */ | ||
172 | unsigned int lowest_alloc; /* while preparing discard cluster */ | 178 | unsigned int lowest_alloc; /* while preparing discard cluster */ |
173 | unsigned int highest_alloc; /* while preparing discard cluster */ | 179 | unsigned int highest_alloc; /* while preparing discard cluster */ |
174 | unsigned int cluster_next; | 180 | struct swap_extent *curr_swap_extent; |
175 | unsigned int cluster_nr; | 181 | struct swap_extent first_swap_extent; |
176 | unsigned int pages; | 182 | struct block_device *bdev; /* swap device or bdev of swap file */ |
177 | unsigned int max; | 183 | struct file *swap_file; /* seldom referenced */ |
178 | unsigned int inuse_pages; | 184 | unsigned int old_block_size; /* seldom referenced */ |
179 | unsigned int old_block_size; | ||
180 | }; | 185 | }; |
181 | 186 | ||
182 | struct swap_list_t { | 187 | struct swap_list_t { |
@@ -273,6 +278,7 @@ extern int scan_unevictable_register_node(struct node *node); | |||
273 | extern void scan_unevictable_unregister_node(struct node *node); | 278 | extern void scan_unevictable_unregister_node(struct node *node); |
274 | 279 | ||
275 | extern int kswapd_run(int nid); | 280 | extern int kswapd_run(int nid); |
281 | extern void kswapd_stop(int nid); | ||
276 | 282 | ||
277 | #ifdef CONFIG_MMU | 283 | #ifdef CONFIG_MMU |
278 | /* linux/mm/shmem.c */ | 284 | /* linux/mm/shmem.c */ |
@@ -309,17 +315,18 @@ extern long total_swap_pages; | |||
309 | extern void si_swapinfo(struct sysinfo *); | 315 | extern void si_swapinfo(struct sysinfo *); |
310 | extern swp_entry_t get_swap_page(void); | 316 | extern swp_entry_t get_swap_page(void); |
311 | extern swp_entry_t get_swap_page_of_type(int); | 317 | extern swp_entry_t get_swap_page_of_type(int); |
312 | extern void swap_duplicate(swp_entry_t); | ||
313 | extern int swapcache_prepare(swp_entry_t); | ||
314 | extern int valid_swaphandles(swp_entry_t, unsigned long *); | 318 | extern int valid_swaphandles(swp_entry_t, unsigned long *); |
319 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); | ||
320 | extern void swap_shmem_alloc(swp_entry_t); | ||
321 | extern int swap_duplicate(swp_entry_t); | ||
322 | extern int swapcache_prepare(swp_entry_t); | ||
315 | extern void swap_free(swp_entry_t); | 323 | extern void swap_free(swp_entry_t); |
316 | extern void swapcache_free(swp_entry_t, struct page *page); | 324 | extern void swapcache_free(swp_entry_t, struct page *page); |
317 | extern int free_swap_and_cache(swp_entry_t); | 325 | extern int free_swap_and_cache(swp_entry_t); |
318 | extern int swap_type_of(dev_t, sector_t, struct block_device **); | 326 | extern int swap_type_of(dev_t, sector_t, struct block_device **); |
319 | extern unsigned int count_swap_pages(int, int); | 327 | extern unsigned int count_swap_pages(int, int); |
320 | extern sector_t map_swap_page(struct swap_info_struct *, pgoff_t); | 328 | extern sector_t map_swap_page(struct page *, struct block_device **); |
321 | extern sector_t swapdev_block(int, pgoff_t); | 329 | extern sector_t swapdev_block(int, pgoff_t); |
322 | extern struct swap_info_struct *get_swap_info_struct(unsigned); | ||
323 | extern int reuse_swap_page(struct page *); | 330 | extern int reuse_swap_page(struct page *); |
324 | extern int try_to_free_swap(struct page *); | 331 | extern int try_to_free_swap(struct page *); |
325 | struct backing_dev_info; | 332 | struct backing_dev_info; |
@@ -384,8 +391,18 @@ static inline void show_swap_cache_info(void) | |||
384 | #define free_swap_and_cache(swp) is_migration_entry(swp) | 391 | #define free_swap_and_cache(swp) is_migration_entry(swp) |
385 | #define swapcache_prepare(swp) is_migration_entry(swp) | 392 | #define swapcache_prepare(swp) is_migration_entry(swp) |
386 | 393 | ||
387 | static inline void swap_duplicate(swp_entry_t swp) | 394 | static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) |
388 | { | 395 | { |
396 | return 0; | ||
397 | } | ||
398 | |||
399 | static inline void swap_shmem_alloc(swp_entry_t swp) | ||
400 | { | ||
401 | } | ||
402 | |||
403 | static inline int swap_duplicate(swp_entry_t swp) | ||
404 | { | ||
405 | return 0; | ||
389 | } | 406 | } |
390 | 407 | ||
391 | static inline void swap_free(swp_entry_t swp) | 408 | static inline void swap_free(swp_entry_t swp) |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 939a61507ac5..207466a49f3d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -102,12 +102,10 @@ struct perf_event_attr; | |||
102 | #ifdef CONFIG_EVENT_PROFILE | 102 | #ifdef CONFIG_EVENT_PROFILE |
103 | 103 | ||
104 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ | 104 | #define TRACE_SYS_ENTER_PROFILE_INIT(sname) \ |
105 | .profile_count = ATOMIC_INIT(-1), \ | ||
106 | .profile_enable = prof_sysenter_enable, \ | 105 | .profile_enable = prof_sysenter_enable, \ |
107 | .profile_disable = prof_sysenter_disable, | 106 | .profile_disable = prof_sysenter_disable, |
108 | 107 | ||
109 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ | 108 | #define TRACE_SYS_EXIT_PROFILE_INIT(sname) \ |
110 | .profile_count = ATOMIC_INIT(-1), \ | ||
111 | .profile_enable = prof_sysexit_enable, \ | 109 | .profile_enable = prof_sysexit_enable, \ |
112 | .profile_disable = prof_sysexit_disable, | 110 | .profile_disable = prof_sysexit_disable, |
113 | #else | 111 | #else |
@@ -145,7 +143,7 @@ struct perf_event_attr; | |||
145 | .name = "sys_enter"#sname, \ | 143 | .name = "sys_enter"#sname, \ |
146 | .system = "syscalls", \ | 144 | .system = "syscalls", \ |
147 | .event = &enter_syscall_print_##sname, \ | 145 | .event = &enter_syscall_print_##sname, \ |
148 | .raw_init = init_syscall_trace, \ | 146 | .raw_init = trace_event_raw_init, \ |
149 | .show_format = syscall_enter_format, \ | 147 | .show_format = syscall_enter_format, \ |
150 | .define_fields = syscall_enter_define_fields, \ | 148 | .define_fields = syscall_enter_define_fields, \ |
151 | .regfunc = reg_event_syscall_enter, \ | 149 | .regfunc = reg_event_syscall_enter, \ |
@@ -167,7 +165,7 @@ struct perf_event_attr; | |||
167 | .name = "sys_exit"#sname, \ | 165 | .name = "sys_exit"#sname, \ |
168 | .system = "syscalls", \ | 166 | .system = "syscalls", \ |
169 | .event = &exit_syscall_print_##sname, \ | 167 | .event = &exit_syscall_print_##sname, \ |
170 | .raw_init = init_syscall_trace, \ | 168 | .raw_init = trace_event_raw_init, \ |
171 | .show_format = syscall_exit_format, \ | 169 | .show_format = syscall_exit_format, \ |
172 | .define_fields = syscall_exit_define_fields, \ | 170 | .define_fields = syscall_exit_define_fields, \ |
173 | .regfunc = reg_event_syscall_exit, \ | 171 | .regfunc = reg_event_syscall_exit, \ |
@@ -197,7 +195,7 @@ struct perf_event_attr; | |||
197 | static const struct syscall_metadata __used \ | 195 | static const struct syscall_metadata __used \ |
198 | __attribute__((__aligned__(4))) \ | 196 | __attribute__((__aligned__(4))) \ |
199 | __attribute__((section("__syscalls_metadata"))) \ | 197 | __attribute__((section("__syscalls_metadata"))) \ |
200 | __syscall_meta_##sname = { \ | 198 | __syscall_meta__##sname = { \ |
201 | .name = "sys_"#sname, \ | 199 | .name = "sys_"#sname, \ |
202 | .nb_args = 0, \ | 200 | .nb_args = 0, \ |
203 | .enter_event = &event_enter__##sname, \ | 201 | .enter_event = &event_enter__##sname, \ |
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 877ba039e6a4..bd27fbc9db62 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h | |||
@@ -482,6 +482,7 @@ enum | |||
482 | NET_IPV4_CONF_ARP_ACCEPT=21, | 482 | NET_IPV4_CONF_ARP_ACCEPT=21, |
483 | NET_IPV4_CONF_ARP_NOTIFY=22, | 483 | NET_IPV4_CONF_ARP_NOTIFY=22, |
484 | NET_IPV4_CONF_ACCEPT_LOCAL=23, | 484 | NET_IPV4_CONF_ACCEPT_LOCAL=23, |
485 | NET_IPV4_CONF_SRC_VMARK=24, | ||
485 | __NET_IPV4_CONF_MAX | 486 | __NET_IPV4_CONF_MAX |
486 | }; | 487 | }; |
487 | 488 | ||
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 9d68fed50f11..cfa83083a2d4 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
@@ -99,8 +99,9 @@ int __must_check sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, | |||
99 | void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); | 99 | void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); |
100 | 100 | ||
101 | int __must_check sysfs_create_bin_file(struct kobject *kobj, | 101 | int __must_check sysfs_create_bin_file(struct kobject *kobj, |
102 | struct bin_attribute *attr); | 102 | const struct bin_attribute *attr); |
103 | void sysfs_remove_bin_file(struct kobject *kobj, struct bin_attribute *attr); | 103 | void sysfs_remove_bin_file(struct kobject *kobj, |
104 | const struct bin_attribute *attr); | ||
104 | 105 | ||
105 | int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target, | 106 | int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target, |
106 | const char *name); | 107 | const char *name); |
@@ -175,13 +176,13 @@ static inline void sysfs_remove_file(struct kobject *kobj, | |||
175 | } | 176 | } |
176 | 177 | ||
177 | static inline int sysfs_create_bin_file(struct kobject *kobj, | 178 | static inline int sysfs_create_bin_file(struct kobject *kobj, |
178 | struct bin_attribute *attr) | 179 | const struct bin_attribute *attr) |
179 | { | 180 | { |
180 | return 0; | 181 | return 0; |
181 | } | 182 | } |
182 | 183 | ||
183 | static inline void sysfs_remove_bin_file(struct kobject *kobj, | 184 | static inline void sysfs_remove_bin_file(struct kobject *kobj, |
184 | struct bin_attribute *attr) | 185 | const struct bin_attribute *attr) |
185 | { | 186 | { |
186 | } | 187 | } |
187 | 188 | ||
diff --git a/include/linux/timb_gpio.h b/include/linux/timb_gpio.h new file mode 100644 index 000000000000..ce456eaae861 --- /dev/null +++ b/include/linux/timb_gpio.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * timb_gpio.h timberdale FPGA GPIO driver, platform data definition | ||
3 | * Copyright (c) 2009 Intel Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef _LINUX_TIMB_GPIO_H | ||
20 | #define _LINUX_TIMB_GPIO_H | ||
21 | |||
22 | /** | ||
23 | * struct timbgpio_platform_data - Platform data of the Timberdale GPIO driver | ||
24 | * @gpio_base The number of the first GPIO pin, set to -1 for | ||
25 | * dynamic number allocation. | ||
26 | * @nr_pins Number of pins that is supported by the hardware (1-32) | ||
27 | * @irq_base If IRQ is supported by the hardware, this is the base | ||
28 | * number of IRQ:s. One IRQ per pin will be used. Set to | ||
29 | * -1 if IRQ:s is not supported. | ||
30 | */ | ||
31 | struct timbgpio_platform_data { | ||
32 | int gpio_base; | ||
33 | int nr_pins; | ||
34 | int irq_base; | ||
35 | }; | ||
36 | |||
37 | #endif | ||
diff --git a/include/linux/topology.h b/include/linux/topology.h index 57e63579bfdd..5b81156780b1 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -99,7 +99,7 @@ int arch_update_cpu_topology(void); | |||
99 | | 1*SD_WAKE_AFFINE \ | 99 | | 1*SD_WAKE_AFFINE \ |
100 | | 1*SD_SHARE_CPUPOWER \ | 100 | | 1*SD_SHARE_CPUPOWER \ |
101 | | 0*SD_POWERSAVINGS_BALANCE \ | 101 | | 0*SD_POWERSAVINGS_BALANCE \ |
102 | | 0*SD_SHARE_PKG_RESOURCES \ | 102 | | 1*SD_SHARE_PKG_RESOURCES \ |
103 | | 0*SD_SERIALIZE \ | 103 | | 0*SD_SERIALIZE \ |
104 | | 0*SD_PREFER_SIBLING \ | 104 | | 0*SD_PREFER_SIBLING \ |
105 | , \ | 105 | , \ |
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index 1eb44a924e56..10db0102a890 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
@@ -134,6 +134,13 @@ static inline __must_check int tracehook_report_syscall_entry( | |||
134 | */ | 134 | */ |
135 | static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) | 135 | static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step) |
136 | { | 136 | { |
137 | if (step) { | ||
138 | siginfo_t info; | ||
139 | user_single_step_siginfo(current, regs, &info); | ||
140 | force_sig_info(SIGTRAP, &info, current); | ||
141 | return; | ||
142 | } | ||
143 | |||
137 | ptrace_report_syscall(regs); | 144 | ptrace_report_syscall(regs); |
138 | } | 145 | } |
139 | 146 | ||
diff --git a/include/linux/tty.h b/include/linux/tty.h index 405a9035fe40..6abfcf5b5887 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -350,8 +350,6 @@ extern void tty_write_flush(struct tty_struct *); | |||
350 | 350 | ||
351 | extern struct ktermios tty_std_termios; | 351 | extern struct ktermios tty_std_termios; |
352 | 352 | ||
353 | extern int kmsg_redirect; | ||
354 | |||
355 | extern void console_init(void); | 353 | extern void console_init(void); |
356 | extern int vcs_init(void); | 354 | extern int vcs_init(void); |
357 | 355 | ||
@@ -466,7 +464,7 @@ extern int tty_port_alloc_xmit_buf(struct tty_port *port); | |||
466 | extern void tty_port_free_xmit_buf(struct tty_port *port); | 464 | extern void tty_port_free_xmit_buf(struct tty_port *port); |
467 | extern void tty_port_put(struct tty_port *port); | 465 | extern void tty_port_put(struct tty_port *port); |
468 | 466 | ||
469 | extern inline struct tty_port *tty_port_get(struct tty_port *port) | 467 | static inline struct tty_port *tty_port_get(struct tty_port *port) |
470 | { | 468 | { |
471 | if (port) | 469 | if (port) |
472 | kref_get(&port->kref); | 470 | kref_get(&port->kref); |
@@ -488,7 +486,7 @@ extern void tty_port_close(struct tty_port *port, | |||
488 | struct tty_struct *tty, struct file *filp); | 486 | struct tty_struct *tty, struct file *filp); |
489 | extern int tty_port_open(struct tty_port *port, | 487 | extern int tty_port_open(struct tty_port *port, |
490 | struct tty_struct *tty, struct file *filp); | 488 | struct tty_struct *tty, struct file *filp); |
491 | extern inline int tty_port_users(struct tty_port *port) | 489 | static inline int tty_port_users(struct tty_port *port) |
492 | { | 490 | { |
493 | return port->count + port->blocked_open; | 491 | return port->count + port->blocked_open; |
494 | } | 492 | } |
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 6b58367d145e..d512d98dfb7d 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
@@ -94,6 +94,7 @@ static inline unsigned long __copy_from_user_nocache(void *to, | |||
94 | * happens, handle that and return -EFAULT. | 94 | * happens, handle that and return -EFAULT. |
95 | */ | 95 | */ |
96 | extern long probe_kernel_read(void *dst, void *src, size_t size); | 96 | extern long probe_kernel_read(void *dst, void *src, size_t size); |
97 | extern long __probe_kernel_read(void *dst, void *src, size_t size); | ||
97 | 98 | ||
98 | /* | 99 | /* |
99 | * probe_kernel_write(): safely attempt to write to a location | 100 | * probe_kernel_write(): safely attempt to write to a location |
@@ -104,6 +105,7 @@ extern long probe_kernel_read(void *dst, void *src, size_t size); | |||
104 | * Safely write to address @dst from the buffer at @src. If a kernel fault | 105 | * Safely write to address @dst from the buffer at @src. If a kernel fault |
105 | * happens, handle that and return -EFAULT. | 106 | * happens, handle that and return -EFAULT. |
106 | */ | 107 | */ |
107 | extern long probe_kernel_write(void *dst, void *src, size_t size); | 108 | extern long notrace probe_kernel_write(void *dst, void *src, size_t size); |
109 | extern long notrace __probe_kernel_write(void *dst, void *src, size_t size); | ||
108 | 110 | ||
109 | #endif /* __LINUX_UACCESS_H__ */ | 111 | #endif /* __LINUX_UACCESS_H__ */ |
diff --git a/include/linux/usb.h b/include/linux/usb.h index e101a2d04d75..d7ace1b80f09 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
@@ -192,6 +192,7 @@ struct usb_interface { | |||
192 | unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ | 192 | unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */ |
193 | unsigned needs_binding:1; /* needs delayed unbind/rebind */ | 193 | unsigned needs_binding:1; /* needs delayed unbind/rebind */ |
194 | unsigned reset_running:1; | 194 | unsigned reset_running:1; |
195 | unsigned resetting_device:1; /* true: bandwidth alloc after reset */ | ||
195 | 196 | ||
196 | struct device dev; /* interface specific device info */ | 197 | struct device dev; /* interface specific device info */ |
197 | struct device *usb_dev; | 198 | struct device *usb_dev; |
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index acf6e457c04b..1819396ed501 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/kref.h> | 16 | #include <linux/kref.h> |
17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
18 | #include <linux/sysrq.h> | 18 | #include <linux/sysrq.h> |
19 | #include <linux/kfifo.h> | ||
19 | 20 | ||
20 | #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ | 21 | #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ |
21 | #define SERIAL_TTY_MINORS 254 /* loads of devices :) */ | 22 | #define SERIAL_TTY_MINORS 254 /* loads of devices :) */ |
@@ -94,7 +95,7 @@ struct usb_serial_port { | |||
94 | unsigned char *bulk_out_buffer; | 95 | unsigned char *bulk_out_buffer; |
95 | int bulk_out_size; | 96 | int bulk_out_size; |
96 | struct urb *write_urb; | 97 | struct urb *write_urb; |
97 | struct kfifo *write_fifo; | 98 | struct kfifo write_fifo; |
98 | int write_urb_busy; | 99 | int write_urb_busy; |
99 | __u8 bulk_out_endpointAddress; | 100 | __u8 bulk_out_endpointAddress; |
100 | 101 | ||
diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h index 79b9837d9ca0..cf97b5b9d1fe 100644 --- a/include/linux/vermagic.h +++ b/include/linux/vermagic.h | |||
@@ -1,4 +1,4 @@ | |||
1 | #include <linux/utsrelease.h> | 1 | #include <generated/utsrelease.h> |
2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | 3 | ||
4 | /* Simply sanity version stamp for modules. */ | 4 | /* Simply sanity version stamp for modules. */ |
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 32b92298fd79..d4962a782b8a 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h | |||
@@ -294,6 +294,7 @@ struct v4l2_pix_format { | |||
294 | 294 | ||
295 | /* Grey formats */ | 295 | /* Grey formats */ |
296 | #define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ | 296 | #define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ |
297 | #define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */ | ||
297 | #define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ | 298 | #define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ |
298 | 299 | ||
299 | /* Palette formats */ | 300 | /* Palette formats */ |
@@ -329,7 +330,11 @@ struct v4l2_pix_format { | |||
329 | #define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ | 330 | #define V4L2_PIX_FMT_SBGGR8 v4l2_fourcc('B', 'A', '8', '1') /* 8 BGBG.. GRGR.. */ |
330 | #define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ | 331 | #define V4L2_PIX_FMT_SGBRG8 v4l2_fourcc('G', 'B', 'R', 'G') /* 8 GBGB.. RGRG.. */ |
331 | #define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */ | 332 | #define V4L2_PIX_FMT_SGRBG8 v4l2_fourcc('G', 'R', 'B', 'G') /* 8 GRGR.. BGBG.. */ |
332 | #define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10bit raw bayer */ | 333 | #define V4L2_PIX_FMT_SRGGB8 v4l2_fourcc('R', 'G', 'G', 'B') /* 8 RGRG.. GBGB.. */ |
334 | #define V4L2_PIX_FMT_SBGGR10 v4l2_fourcc('B', 'G', '1', '0') /* 10 BGBG.. GRGR.. */ | ||
335 | #define V4L2_PIX_FMT_SGBRG10 v4l2_fourcc('G', 'B', '1', '0') /* 10 GBGB.. RGRG.. */ | ||
336 | #define V4L2_PIX_FMT_SGRBG10 v4l2_fourcc('B', 'A', '1', '0') /* 10 GRGR.. BGBG.. */ | ||
337 | #define V4L2_PIX_FMT_SRGGB10 v4l2_fourcc('R', 'G', '1', '0') /* 10 RGRG.. GBGB.. */ | ||
333 | /* 10bit raw bayer DPCM compressed to 8 bits */ | 338 | /* 10bit raw bayer DPCM compressed to 8 bits */ |
334 | #define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0') | 339 | #define V4L2_PIX_FMT_SGRBG10DPCM8 v4l2_fourcc('B', 'D', '1', '0') |
335 | /* | 340 | /* |
@@ -732,6 +737,99 @@ struct v4l2_standard { | |||
732 | }; | 737 | }; |
733 | 738 | ||
734 | /* | 739 | /* |
740 | * V I D E O T I M I N G S D V P R E S E T | ||
741 | */ | ||
742 | struct v4l2_dv_preset { | ||
743 | __u32 preset; | ||
744 | __u32 reserved[4]; | ||
745 | }; | ||
746 | |||
747 | /* | ||
748 | * D V P R E S E T S E N U M E R A T I O N | ||
749 | */ | ||
750 | struct v4l2_dv_enum_preset { | ||
751 | __u32 index; | ||
752 | __u32 preset; | ||
753 | __u8 name[32]; /* Name of the preset timing */ | ||
754 | __u32 width; | ||
755 | __u32 height; | ||
756 | __u32 reserved[4]; | ||
757 | }; | ||
758 | |||
759 | /* | ||
760 | * D V P R E S E T V A L U E S | ||
761 | */ | ||
762 | #define V4L2_DV_INVALID 0 | ||
763 | #define V4L2_DV_480P59_94 1 /* BT.1362 */ | ||
764 | #define V4L2_DV_576P50 2 /* BT.1362 */ | ||
765 | #define V4L2_DV_720P24 3 /* SMPTE 296M */ | ||
766 | #define V4L2_DV_720P25 4 /* SMPTE 296M */ | ||
767 | #define V4L2_DV_720P30 5 /* SMPTE 296M */ | ||
768 | #define V4L2_DV_720P50 6 /* SMPTE 296M */ | ||
769 | #define V4L2_DV_720P59_94 7 /* SMPTE 274M */ | ||
770 | #define V4L2_DV_720P60 8 /* SMPTE 274M/296M */ | ||
771 | #define V4L2_DV_1080I29_97 9 /* BT.1120/ SMPTE 274M */ | ||
772 | #define V4L2_DV_1080I30 10 /* BT.1120/ SMPTE 274M */ | ||
773 | #define V4L2_DV_1080I25 11 /* BT.1120 */ | ||
774 | #define V4L2_DV_1080I50 12 /* SMPTE 296M */ | ||
775 | #define V4L2_DV_1080I60 13 /* SMPTE 296M */ | ||
776 | #define V4L2_DV_1080P24 14 /* SMPTE 296M */ | ||
777 | #define V4L2_DV_1080P25 15 /* SMPTE 296M */ | ||
778 | #define V4L2_DV_1080P30 16 /* SMPTE 296M */ | ||
779 | #define V4L2_DV_1080P50 17 /* BT.1120 */ | ||
780 | #define V4L2_DV_1080P60 18 /* BT.1120 */ | ||
781 | |||
782 | /* | ||
783 | * D V B T T I M I N G S | ||
784 | */ | ||
785 | |||
786 | /* BT.656/BT.1120 timing data */ | ||
787 | struct v4l2_bt_timings { | ||
788 | __u32 width; /* width in pixels */ | ||
789 | __u32 height; /* height in lines */ | ||
790 | __u32 interlaced; /* Interlaced or progressive */ | ||
791 | __u32 polarities; /* Positive or negative polarity */ | ||
792 | __u64 pixelclock; /* Pixel clock in HZ. Ex. 74.25MHz->74250000 */ | ||
793 | __u32 hfrontporch; /* Horizpontal front porch in pixels */ | ||
794 | __u32 hsync; /* Horizontal Sync length in pixels */ | ||
795 | __u32 hbackporch; /* Horizontal back porch in pixels */ | ||
796 | __u32 vfrontporch; /* Vertical front porch in pixels */ | ||
797 | __u32 vsync; /* Vertical Sync length in lines */ | ||
798 | __u32 vbackporch; /* Vertical back porch in lines */ | ||
799 | __u32 il_vfrontporch; /* Vertical front porch for bottom field of | ||
800 | * interlaced field formats | ||
801 | */ | ||
802 | __u32 il_vsync; /* Vertical sync length for bottom field of | ||
803 | * interlaced field formats | ||
804 | */ | ||
805 | __u32 il_vbackporch; /* Vertical back porch for bottom field of | ||
806 | * interlaced field formats | ||
807 | */ | ||
808 | __u32 reserved[16]; | ||
809 | } __attribute__ ((packed)); | ||
810 | |||
811 | /* Interlaced or progressive format */ | ||
812 | #define V4L2_DV_PROGRESSIVE 0 | ||
813 | #define V4L2_DV_INTERLACED 1 | ||
814 | |||
815 | /* Polarities. If bit is not set, it is assumed to be negative polarity */ | ||
816 | #define V4L2_DV_VSYNC_POS_POL 0x00000001 | ||
817 | #define V4L2_DV_HSYNC_POS_POL 0x00000002 | ||
818 | |||
819 | |||
820 | /* DV timings */ | ||
821 | struct v4l2_dv_timings { | ||
822 | __u32 type; | ||
823 | union { | ||
824 | struct v4l2_bt_timings bt; | ||
825 | __u32 reserved[32]; | ||
826 | }; | ||
827 | } __attribute__ ((packed)); | ||
828 | |||
829 | /* Values for the type field */ | ||
830 | #define V4L2_DV_BT_656_1120 0 /* BT.656/1120 timing type */ | ||
831 | |||
832 | /* | ||
735 | * V I D E O I N P U T S | 833 | * V I D E O I N P U T S |
736 | */ | 834 | */ |
737 | struct v4l2_input { | 835 | struct v4l2_input { |
@@ -742,7 +840,8 @@ struct v4l2_input { | |||
742 | __u32 tuner; /* Associated tuner */ | 840 | __u32 tuner; /* Associated tuner */ |
743 | v4l2_std_id std; | 841 | v4l2_std_id std; |
744 | __u32 status; | 842 | __u32 status; |
745 | __u32 reserved[4]; | 843 | __u32 capabilities; |
844 | __u32 reserved[3]; | ||
746 | }; | 845 | }; |
747 | 846 | ||
748 | /* Values for the 'type' field */ | 847 | /* Values for the 'type' field */ |
@@ -773,6 +872,11 @@ struct v4l2_input { | |||
773 | #define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */ | 872 | #define V4L2_IN_ST_NO_ACCESS 0x02000000 /* Conditional access denied */ |
774 | #define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */ | 873 | #define V4L2_IN_ST_VTR 0x04000000 /* VTR time constant */ |
775 | 874 | ||
875 | /* capabilities flags */ | ||
876 | #define V4L2_IN_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */ | ||
877 | #define V4L2_IN_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ | ||
878 | #define V4L2_IN_CAP_STD 0x00000004 /* Supports S_STD */ | ||
879 | |||
776 | /* | 880 | /* |
777 | * V I D E O O U T P U T S | 881 | * V I D E O O U T P U T S |
778 | */ | 882 | */ |
@@ -783,13 +887,19 @@ struct v4l2_output { | |||
783 | __u32 audioset; /* Associated audios (bitfield) */ | 887 | __u32 audioset; /* Associated audios (bitfield) */ |
784 | __u32 modulator; /* Associated modulator */ | 888 | __u32 modulator; /* Associated modulator */ |
785 | v4l2_std_id std; | 889 | v4l2_std_id std; |
786 | __u32 reserved[4]; | 890 | __u32 capabilities; |
891 | __u32 reserved[3]; | ||
787 | }; | 892 | }; |
788 | /* Values for the 'type' field */ | 893 | /* Values for the 'type' field */ |
789 | #define V4L2_OUTPUT_TYPE_MODULATOR 1 | 894 | #define V4L2_OUTPUT_TYPE_MODULATOR 1 |
790 | #define V4L2_OUTPUT_TYPE_ANALOG 2 | 895 | #define V4L2_OUTPUT_TYPE_ANALOG 2 |
791 | #define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3 | 896 | #define V4L2_OUTPUT_TYPE_ANALOGVGAOVERLAY 3 |
792 | 897 | ||
898 | /* capabilities flags */ | ||
899 | #define V4L2_OUT_CAP_PRESETS 0x00000001 /* Supports S_DV_PRESET */ | ||
900 | #define V4L2_OUT_CAP_CUSTOM_TIMINGS 0x00000002 /* Supports S_DV_TIMINGS */ | ||
901 | #define V4L2_OUT_CAP_STD 0x00000004 /* Supports S_STD */ | ||
902 | |||
793 | /* | 903 | /* |
794 | * C O N T R O L S | 904 | * C O N T R O L S |
795 | */ | 905 | */ |
@@ -1624,6 +1734,13 @@ struct v4l2_dbg_chip_ident { | |||
1624 | #endif | 1734 | #endif |
1625 | 1735 | ||
1626 | #define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) | 1736 | #define VIDIOC_S_HW_FREQ_SEEK _IOW('V', 82, struct v4l2_hw_freq_seek) |
1737 | #define VIDIOC_ENUM_DV_PRESETS _IOWR('V', 83, struct v4l2_dv_enum_preset) | ||
1738 | #define VIDIOC_S_DV_PRESET _IOWR('V', 84, struct v4l2_dv_preset) | ||
1739 | #define VIDIOC_G_DV_PRESET _IOWR('V', 85, struct v4l2_dv_preset) | ||
1740 | #define VIDIOC_QUERY_DV_PRESET _IOR('V', 86, struct v4l2_dv_preset) | ||
1741 | #define VIDIOC_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings) | ||
1742 | #define VIDIOC_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings) | ||
1743 | |||
1627 | /* Reminder: when adding new ioctls please add support for them to | 1744 | /* Reminder: when adding new ioctls please add support for them to |
1628 | drivers/media/video/v4l2-compat-ioctl32.c as well! */ | 1745 | drivers/media/video/v4l2-compat-ioctl32.c as well! */ |
1629 | 1746 | ||
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index d85889710f9b..ee03bba9c5df 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -40,6 +40,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | |||
40 | PGSCAN_ZONE_RECLAIM_FAILED, | 40 | PGSCAN_ZONE_RECLAIM_FAILED, |
41 | #endif | 41 | #endif |
42 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, | 42 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, |
43 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, | ||
44 | KSWAPD_SKIP_CONGESTION_WAIT, | ||
43 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | 45 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, |
44 | #ifdef CONFIG_HUGETLB_PAGE | 46 | #ifdef CONFIG_HUGETLB_PAGE |
45 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, | 47 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, |
diff --git a/include/linux/vt.h b/include/linux/vt.h index 7ffa11f06232..d5dd0bc408fd 100644 --- a/include/linux/vt.h +++ b/include/linux/vt.h | |||
@@ -84,4 +84,23 @@ struct vt_setactivate { | |||
84 | 84 | ||
85 | #define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ | 85 | #define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ |
86 | 86 | ||
87 | #ifdef __KERNEL__ | ||
88 | |||
89 | #ifdef CONFIG_VT_CONSOLE | ||
90 | |||
91 | extern int vt_kmsg_redirect(int new); | ||
92 | |||
93 | #else | ||
94 | |||
95 | static inline int vt_kmsg_redirect(int new) | ||
96 | { | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | #endif | ||
101 | |||
102 | #endif /* __KERNEL__ */ | ||
103 | |||
104 | #define vt_get_kmsg_redirect() vt_kmsg_redirect(-1) | ||
105 | |||
87 | #endif /* _LINUX_VT_H */ | 106 | #endif /* _LINUX_VT_H */ |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 705f01fe413a..76e8903cd204 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -70,6 +70,7 @@ struct writeback_control { | |||
70 | struct bdi_writeback; | 70 | struct bdi_writeback; |
71 | int inode_wait(void *); | 71 | int inode_wait(void *); |
72 | void writeback_inodes_sb(struct super_block *); | 72 | void writeback_inodes_sb(struct super_block *); |
73 | int writeback_inodes_sb_if_idle(struct super_block *); | ||
73 | void sync_inodes_sb(struct super_block *); | 74 | void sync_inodes_sb(struct super_block *); |
74 | void writeback_inodes_wbc(struct writeback_control *wbc); | 75 | void writeback_inodes_wbc(struct writeback_control *wbc); |
75 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait); | 76 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait); |
@@ -79,8 +80,7 @@ void wakeup_flusher_threads(long nr_pages); | |||
79 | static inline void wait_on_inode(struct inode *inode) | 80 | static inline void wait_on_inode(struct inode *inode) |
80 | { | 81 | { |
81 | might_sleep(); | 82 | might_sleep(); |
82 | wait_on_bit(&inode->i_state, __I_LOCK, inode_wait, | 83 | wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); |
83 | TASK_UNINTERRUPTIBLE); | ||
84 | } | 84 | } |
85 | static inline void inode_sync_wait(struct inode *inode) | 85 | static inline void inode_sync_wait(struct inode *inode) |
86 | { | 86 | { |
diff --git a/include/linux/xattr.h b/include/linux/xattr.h index 5c84af8c5f6f..fb9b7e6e1e2d 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h | |||
@@ -38,12 +38,13 @@ struct dentry; | |||
38 | 38 | ||
39 | struct xattr_handler { | 39 | struct xattr_handler { |
40 | char *prefix; | 40 | char *prefix; |
41 | size_t (*list)(struct inode *inode, char *list, size_t list_size, | 41 | int flags; /* fs private flags passed back to the handlers */ |
42 | const char *name, size_t name_len); | 42 | size_t (*list)(struct dentry *dentry, char *list, size_t list_size, |
43 | int (*get)(struct inode *inode, const char *name, void *buffer, | 43 | const char *name, size_t name_len, int handler_flags); |
44 | size_t size); | 44 | int (*get)(struct dentry *dentry, const char *name, void *buffer, |
45 | int (*set)(struct inode *inode, const char *name, const void *buffer, | 45 | size_t size, int handler_flags); |
46 | size_t size, int flags); | 46 | int (*set)(struct dentry *dentry, const char *name, const void *buffer, |
47 | size_t size, int flags, int handler_flags); | ||
47 | }; | 48 | }; |
48 | 49 | ||
49 | ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); | 50 | ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); |