diff options
Diffstat (limited to 'include/linux')
98 files changed, 2758 insertions, 817 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 3f0eaa397ef5..b3afd2219ad2 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -135,6 +135,7 @@ header-y += posix_types.h | |||
135 | header-y += ppdev.h | 135 | header-y += ppdev.h |
136 | header-y += prctl.h | 136 | header-y += prctl.h |
137 | header-y += qnxtypes.h | 137 | header-y += qnxtypes.h |
138 | header-y += qnx4_fs.h | ||
138 | header-y += radeonfb.h | 139 | header-y += radeonfb.h |
139 | header-y += raw.h | 140 | header-y += raw.h |
140 | header-y += resource.h | 141 | header-y += resource.h |
@@ -308,7 +309,6 @@ unifdef-y += poll.h | |||
308 | unifdef-y += ppp_defs.h | 309 | unifdef-y += ppp_defs.h |
309 | unifdef-y += ppp-comp.h | 310 | unifdef-y += ppp-comp.h |
310 | unifdef-y += ptrace.h | 311 | unifdef-y += ptrace.h |
311 | unifdef-y += qnx4_fs.h | ||
312 | unifdef-y += quota.h | 312 | unifdef-y += quota.h |
313 | unifdef-y += random.h | 313 | unifdef-y += random.h |
314 | unifdef-y += irqnr.h | 314 | unifdef-y += irqnr.h |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 88be890ee3c7..51b4b0a5ce8c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -119,7 +119,7 @@ extern int pci_mmcfg_config_num; | |||
119 | extern int sbf_port; | 119 | extern int sbf_port; |
120 | extern unsigned long acpi_realmode_flags; | 120 | extern unsigned long acpi_realmode_flags; |
121 | 121 | ||
122 | int acpi_register_gsi (u32 gsi, int triggering, int polarity); | 122 | int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); |
123 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); | 123 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); |
124 | 124 | ||
125 | #ifdef CONFIG_X86_IO_APIC | 125 | #ifdef CONFIG_X86_IO_APIC |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 7b214fd672a2..12737be58601 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -218,12 +218,12 @@ struct bio { | |||
218 | #define bio_sectors(bio) ((bio)->bi_size >> 9) | 218 | #define bio_sectors(bio) ((bio)->bi_size >> 9) |
219 | #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) | 219 | #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) |
220 | 220 | ||
221 | static inline unsigned int bio_cur_sectors(struct bio *bio) | 221 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
222 | { | 222 | { |
223 | if (bio->bi_vcnt) | 223 | if (bio->bi_vcnt) |
224 | return bio_iovec(bio)->bv_len >> 9; | 224 | return bio_iovec(bio)->bv_len; |
225 | else /* dataless requests such as discard */ | 225 | else /* dataless requests such as discard */ |
226 | return bio->bi_size >> 9; | 226 | return bio->bi_size; |
227 | } | 227 | } |
228 | 228 | ||
229 | static inline void *bio_data(struct bio *bio) | 229 | static inline void *bio_data(struct bio *bio) |
@@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio) | |||
279 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ | 279 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ |
280 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) | 280 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) |
281 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ | 281 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ |
282 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) | 282 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) |
283 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ | 283 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ |
284 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) | 284 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) |
285 | 285 | ||
@@ -506,7 +506,7 @@ static inline int bio_has_data(struct bio *bio) | |||
506 | } | 506 | } |
507 | 507 | ||
508 | /* | 508 | /* |
509 | * BIO list managment for use by remapping drivers (e.g. DM or MD). | 509 | * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. |
510 | * | 510 | * |
511 | * A bio_list anchors a singly-linked list of bios chained through the bi_next | 511 | * A bio_list anchors a singly-linked list of bios chained through the bi_next |
512 | * member of the bio. The bio_list also caches the last list member to allow | 512 | * member of the bio. The bio_list also caches the last list member to allow |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b4f71f1a4af7..0b1a6cae9de1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -166,19 +166,9 @@ struct request { | |||
166 | enum rq_cmd_type_bits cmd_type; | 166 | enum rq_cmd_type_bits cmd_type; |
167 | unsigned long atomic_flags; | 167 | unsigned long atomic_flags; |
168 | 168 | ||
169 | /* Maintain bio traversal state for part by part I/O submission. | 169 | /* the following two fields are internal, NEVER access directly */ |
170 | * hard_* are block layer internals, no driver should touch them! | 170 | sector_t __sector; /* sector cursor */ |
171 | */ | 171 | unsigned int __data_len; /* total data len */ |
172 | |||
173 | sector_t sector; /* next sector to submit */ | ||
174 | sector_t hard_sector; /* next sector to complete */ | ||
175 | unsigned long nr_sectors; /* no. of sectors left to submit */ | ||
176 | unsigned long hard_nr_sectors; /* no. of sectors left to complete */ | ||
177 | /* no. of sectors left to submit in the current segment */ | ||
178 | unsigned int current_nr_sectors; | ||
179 | |||
180 | /* no. of sectors left to complete in the current segment */ | ||
181 | unsigned int hard_cur_sectors; | ||
182 | 172 | ||
183 | struct bio *bio; | 173 | struct bio *bio; |
184 | struct bio *biotail; | 174 | struct bio *biotail; |
@@ -211,8 +201,8 @@ struct request { | |||
211 | 201 | ||
212 | unsigned short ioprio; | 202 | unsigned short ioprio; |
213 | 203 | ||
214 | void *special; | 204 | void *special; /* opaque pointer available for LLD use */ |
215 | char *buffer; | 205 | char *buffer; /* kaddr of the current segment if available */ |
216 | 206 | ||
217 | int tag; | 207 | int tag; |
218 | int errors; | 208 | int errors; |
@@ -226,10 +216,9 @@ struct request { | |||
226 | unsigned char __cmd[BLK_MAX_CDB]; | 216 | unsigned char __cmd[BLK_MAX_CDB]; |
227 | unsigned char *cmd; | 217 | unsigned char *cmd; |
228 | 218 | ||
229 | unsigned int data_len; | ||
230 | unsigned int extra_len; /* length of alignment and padding */ | 219 | unsigned int extra_len; /* length of alignment and padding */ |
231 | unsigned int sense_len; | 220 | unsigned int sense_len; |
232 | void *data; | 221 | unsigned int resid_len; /* residual count */ |
233 | void *sense; | 222 | void *sense; |
234 | 223 | ||
235 | unsigned long deadline; | 224 | unsigned long deadline; |
@@ -318,6 +307,26 @@ struct blk_cmd_filter { | |||
318 | struct kobject kobj; | 307 | struct kobject kobj; |
319 | }; | 308 | }; |
320 | 309 | ||
310 | struct queue_limits { | ||
311 | unsigned long bounce_pfn; | ||
312 | unsigned long seg_boundary_mask; | ||
313 | |||
314 | unsigned int max_hw_sectors; | ||
315 | unsigned int max_sectors; | ||
316 | unsigned int max_segment_size; | ||
317 | unsigned int physical_block_size; | ||
318 | unsigned int alignment_offset; | ||
319 | unsigned int io_min; | ||
320 | unsigned int io_opt; | ||
321 | |||
322 | unsigned short logical_block_size; | ||
323 | unsigned short max_hw_segments; | ||
324 | unsigned short max_phys_segments; | ||
325 | |||
326 | unsigned char misaligned; | ||
327 | unsigned char no_cluster; | ||
328 | }; | ||
329 | |||
321 | struct request_queue | 330 | struct request_queue |
322 | { | 331 | { |
323 | /* | 332 | /* |
@@ -369,7 +378,6 @@ struct request_queue | |||
369 | /* | 378 | /* |
370 | * queue needs bounce pages for pages above this limit | 379 | * queue needs bounce pages for pages above this limit |
371 | */ | 380 | */ |
372 | unsigned long bounce_pfn; | ||
373 | gfp_t bounce_gfp; | 381 | gfp_t bounce_gfp; |
374 | 382 | ||
375 | /* | 383 | /* |
@@ -398,14 +406,6 @@ struct request_queue | |||
398 | unsigned int nr_congestion_off; | 406 | unsigned int nr_congestion_off; |
399 | unsigned int nr_batching; | 407 | unsigned int nr_batching; |
400 | 408 | ||
401 | unsigned int max_sectors; | ||
402 | unsigned int max_hw_sectors; | ||
403 | unsigned short max_phys_segments; | ||
404 | unsigned short max_hw_segments; | ||
405 | unsigned short hardsect_size; | ||
406 | unsigned int max_segment_size; | ||
407 | |||
408 | unsigned long seg_boundary_mask; | ||
409 | void *dma_drain_buffer; | 409 | void *dma_drain_buffer; |
410 | unsigned int dma_drain_size; | 410 | unsigned int dma_drain_size; |
411 | unsigned int dma_pad_mask; | 411 | unsigned int dma_pad_mask; |
@@ -415,12 +415,14 @@ struct request_queue | |||
415 | struct list_head tag_busy_list; | 415 | struct list_head tag_busy_list; |
416 | 416 | ||
417 | unsigned int nr_sorted; | 417 | unsigned int nr_sorted; |
418 | unsigned int in_flight; | 418 | unsigned int in_flight[2]; |
419 | 419 | ||
420 | unsigned int rq_timeout; | 420 | unsigned int rq_timeout; |
421 | struct timer_list timeout; | 421 | struct timer_list timeout; |
422 | struct list_head timeout_list; | 422 | struct list_head timeout_list; |
423 | 423 | ||
424 | struct queue_limits limits; | ||
425 | |||
424 | /* | 426 | /* |
425 | * sg stuff | 427 | * sg stuff |
426 | */ | 428 | */ |
@@ -522,6 +524,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag, | |||
522 | __clear_bit(flag, &q->queue_flags); | 524 | __clear_bit(flag, &q->queue_flags); |
523 | } | 525 | } |
524 | 526 | ||
527 | static inline int queue_in_flight(struct request_queue *q) | ||
528 | { | ||
529 | return q->in_flight[0] + q->in_flight[1]; | ||
530 | } | ||
531 | |||
525 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | 532 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) |
526 | { | 533 | { |
527 | WARN_ON_ONCE(!queue_is_locked(q)); | 534 | WARN_ON_ONCE(!queue_is_locked(q)); |
@@ -752,10 +759,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); | |||
752 | extern void blk_put_request(struct request *); | 759 | extern void blk_put_request(struct request *); |
753 | extern void __blk_put_request(struct request_queue *, struct request *); | 760 | extern void __blk_put_request(struct request_queue *, struct request *); |
754 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 761 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
762 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | ||
763 | gfp_t); | ||
755 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 764 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
756 | extern void blk_requeue_request(struct request_queue *, struct request *); | 765 | extern void blk_requeue_request(struct request_queue *, struct request *); |
757 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 766 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
758 | extern int blk_lld_busy(struct request_queue *q); | 767 | extern int blk_lld_busy(struct request_queue *q); |
768 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | ||
769 | struct bio_set *bs, gfp_t gfp_mask, | ||
770 | int (*bio_ctr)(struct bio *, struct bio *, void *), | ||
771 | void *data); | ||
772 | extern void blk_rq_unprep_clone(struct request *rq); | ||
759 | extern int blk_insert_cloned_request(struct request_queue *q, | 773 | extern int blk_insert_cloned_request(struct request_queue *q, |
760 | struct request *rq); | 774 | struct request *rq); |
761 | extern void blk_plug_device(struct request_queue *); | 775 | extern void blk_plug_device(struct request_queue *); |
@@ -768,12 +782,6 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
768 | struct scsi_ioctl_command __user *); | 782 | struct scsi_ioctl_command __user *); |
769 | 783 | ||
770 | /* | 784 | /* |
771 | * Temporary export, until SCSI gets fixed up. | ||
772 | */ | ||
773 | extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, | ||
774 | struct bio *bio); | ||
775 | |||
776 | /* | ||
777 | * A queue has just exitted congestion. Note this in the global counter of | 785 | * A queue has just exitted congestion. Note this in the global counter of |
778 | * congested queues, and wake up anyone who was waiting for requests to be | 786 | * congested queues, and wake up anyone who was waiting for requests to be |
779 | * put back. | 787 | * put back. |
@@ -798,7 +806,6 @@ extern void blk_sync_queue(struct request_queue *q); | |||
798 | extern void __blk_stop_queue(struct request_queue *q); | 806 | extern void __blk_stop_queue(struct request_queue *q); |
799 | extern void __blk_run_queue(struct request_queue *); | 807 | extern void __blk_run_queue(struct request_queue *); |
800 | extern void blk_run_queue(struct request_queue *); | 808 | extern void blk_run_queue(struct request_queue *); |
801 | extern void blk_start_queueing(struct request_queue *); | ||
802 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 809 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
803 | struct rq_map_data *, void __user *, unsigned long, | 810 | struct rq_map_data *, void __user *, unsigned long, |
804 | gfp_t); | 811 | gfp_t); |
@@ -831,41 +838,73 @@ static inline void blk_run_address_space(struct address_space *mapping) | |||
831 | blk_run_backing_dev(mapping->backing_dev_info, NULL); | 838 | blk_run_backing_dev(mapping->backing_dev_info, NULL); |
832 | } | 839 | } |
833 | 840 | ||
834 | extern void blkdev_dequeue_request(struct request *req); | 841 | /* |
842 | * blk_rq_pos() : the current sector | ||
843 | * blk_rq_bytes() : bytes left in the entire request | ||
844 | * blk_rq_cur_bytes() : bytes left in the current segment | ||
845 | * blk_rq_sectors() : sectors left in the entire request | ||
846 | * blk_rq_cur_sectors() : sectors left in the current segment | ||
847 | */ | ||
848 | static inline sector_t blk_rq_pos(const struct request *rq) | ||
849 | { | ||
850 | return rq->__sector; | ||
851 | } | ||
852 | |||
853 | static inline unsigned int blk_rq_bytes(const struct request *rq) | ||
854 | { | ||
855 | return rq->__data_len; | ||
856 | } | ||
857 | |||
858 | static inline int blk_rq_cur_bytes(const struct request *rq) | ||
859 | { | ||
860 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; | ||
861 | } | ||
862 | |||
863 | static inline unsigned int blk_rq_sectors(const struct request *rq) | ||
864 | { | ||
865 | return blk_rq_bytes(rq) >> 9; | ||
866 | } | ||
867 | |||
868 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | ||
869 | { | ||
870 | return blk_rq_cur_bytes(rq) >> 9; | ||
871 | } | ||
872 | |||
873 | /* | ||
874 | * Request issue related functions. | ||
875 | */ | ||
876 | extern struct request *blk_peek_request(struct request_queue *q); | ||
877 | extern void blk_start_request(struct request *rq); | ||
878 | extern struct request *blk_fetch_request(struct request_queue *q); | ||
835 | 879 | ||
836 | /* | 880 | /* |
837 | * blk_end_request() and friends. | 881 | * Request completion related functions. |
838 | * __blk_end_request() and end_request() must be called with | 882 | * |
839 | * the request queue spinlock acquired. | 883 | * blk_update_request() completes given number of bytes and updates |
884 | * the request without completing it. | ||
885 | * | ||
886 | * blk_end_request() and friends. __blk_end_request() must be called | ||
887 | * with the request queue spinlock acquired. | ||
840 | * | 888 | * |
841 | * Several drivers define their own end_request and call | 889 | * Several drivers define their own end_request and call |
842 | * blk_end_request() for parts of the original function. | 890 | * blk_end_request() for parts of the original function. |
843 | * This prevents code duplication in drivers. | 891 | * This prevents code duplication in drivers. |
844 | */ | 892 | */ |
845 | extern int blk_end_request(struct request *rq, int error, | 893 | extern bool blk_update_request(struct request *rq, int error, |
846 | unsigned int nr_bytes); | 894 | unsigned int nr_bytes); |
847 | extern int __blk_end_request(struct request *rq, int error, | 895 | extern bool blk_end_request(struct request *rq, int error, |
848 | unsigned int nr_bytes); | 896 | unsigned int nr_bytes); |
849 | extern int blk_end_bidi_request(struct request *rq, int error, | 897 | extern void blk_end_request_all(struct request *rq, int error); |
850 | unsigned int nr_bytes, unsigned int bidi_bytes); | 898 | extern bool blk_end_request_cur(struct request *rq, int error); |
851 | extern void end_request(struct request *, int); | 899 | extern bool __blk_end_request(struct request *rq, int error, |
852 | extern int blk_end_request_callback(struct request *rq, int error, | 900 | unsigned int nr_bytes); |
853 | unsigned int nr_bytes, | 901 | extern void __blk_end_request_all(struct request *rq, int error); |
854 | int (drv_callback)(struct request *)); | 902 | extern bool __blk_end_request_cur(struct request *rq, int error); |
903 | |||
855 | extern void blk_complete_request(struct request *); | 904 | extern void blk_complete_request(struct request *); |
856 | extern void __blk_complete_request(struct request *); | 905 | extern void __blk_complete_request(struct request *); |
857 | extern void blk_abort_request(struct request *); | 906 | extern void blk_abort_request(struct request *); |
858 | extern void blk_abort_queue(struct request_queue *); | 907 | extern void blk_abort_queue(struct request_queue *); |
859 | extern void blk_update_request(struct request *rq, int error, | ||
860 | unsigned int nr_bytes); | ||
861 | |||
862 | /* | ||
863 | * blk_end_request() takes bytes instead of sectors as a complete size. | ||
864 | * blk_rq_bytes() returns bytes left to complete in the entire request. | ||
865 | * blk_rq_cur_bytes() returns bytes left to complete in the current segment. | ||
866 | */ | ||
867 | extern unsigned int blk_rq_bytes(struct request *rq); | ||
868 | extern unsigned int blk_rq_cur_bytes(struct request *rq); | ||
869 | 908 | ||
870 | /* | 909 | /* |
871 | * Access functions for manipulating queue properties | 910 | * Access functions for manipulating queue properties |
@@ -877,10 +916,20 @@ extern void blk_cleanup_queue(struct request_queue *); | |||
877 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 916 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
878 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 917 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
879 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | 918 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); |
919 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | ||
880 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 920 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
881 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 921 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
882 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 922 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
883 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); | 923 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
924 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | ||
925 | extern void blk_queue_alignment_offset(struct request_queue *q, | ||
926 | unsigned int alignment); | ||
927 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | ||
928 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | ||
929 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | ||
930 | sector_t offset); | ||
931 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | ||
932 | sector_t offset); | ||
884 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 933 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
885 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 934 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
886 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | 935 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
@@ -967,19 +1016,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | |||
967 | 1016 | ||
968 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 1017 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
969 | 1018 | ||
970 | static inline int queue_hardsect_size(struct request_queue *q) | 1019 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) |
1020 | { | ||
1021 | return q->limits.bounce_pfn; | ||
1022 | } | ||
1023 | |||
1024 | static inline unsigned long queue_segment_boundary(struct request_queue *q) | ||
1025 | { | ||
1026 | return q->limits.seg_boundary_mask; | ||
1027 | } | ||
1028 | |||
1029 | static inline unsigned int queue_max_sectors(struct request_queue *q) | ||
1030 | { | ||
1031 | return q->limits.max_sectors; | ||
1032 | } | ||
1033 | |||
1034 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | ||
1035 | { | ||
1036 | return q->limits.max_hw_sectors; | ||
1037 | } | ||
1038 | |||
1039 | static inline unsigned short queue_max_hw_segments(struct request_queue *q) | ||
1040 | { | ||
1041 | return q->limits.max_hw_segments; | ||
1042 | } | ||
1043 | |||
1044 | static inline unsigned short queue_max_phys_segments(struct request_queue *q) | ||
1045 | { | ||
1046 | return q->limits.max_phys_segments; | ||
1047 | } | ||
1048 | |||
1049 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | ||
1050 | { | ||
1051 | return q->limits.max_segment_size; | ||
1052 | } | ||
1053 | |||
1054 | static inline unsigned short queue_logical_block_size(struct request_queue *q) | ||
971 | { | 1055 | { |
972 | int retval = 512; | 1056 | int retval = 512; |
973 | 1057 | ||
974 | if (q && q->hardsect_size) | 1058 | if (q && q->limits.logical_block_size) |
975 | retval = q->hardsect_size; | 1059 | retval = q->limits.logical_block_size; |
976 | 1060 | ||
977 | return retval; | 1061 | return retval; |
978 | } | 1062 | } |
979 | 1063 | ||
980 | static inline int bdev_hardsect_size(struct block_device *bdev) | 1064 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) |
1065 | { | ||
1066 | return queue_logical_block_size(bdev_get_queue(bdev)); | ||
1067 | } | ||
1068 | |||
1069 | static inline unsigned int queue_physical_block_size(struct request_queue *q) | ||
1070 | { | ||
1071 | return q->limits.physical_block_size; | ||
1072 | } | ||
1073 | |||
1074 | static inline unsigned int queue_io_min(struct request_queue *q) | ||
1075 | { | ||
1076 | return q->limits.io_min; | ||
1077 | } | ||
1078 | |||
1079 | static inline unsigned int queue_io_opt(struct request_queue *q) | ||
1080 | { | ||
1081 | return q->limits.io_opt; | ||
1082 | } | ||
1083 | |||
1084 | static inline int queue_alignment_offset(struct request_queue *q) | ||
1085 | { | ||
1086 | if (q && q->limits.misaligned) | ||
1087 | return -1; | ||
1088 | |||
1089 | if (q && q->limits.alignment_offset) | ||
1090 | return q->limits.alignment_offset; | ||
1091 | |||
1092 | return 0; | ||
1093 | } | ||
1094 | |||
1095 | static inline int queue_sector_alignment_offset(struct request_queue *q, | ||
1096 | sector_t sector) | ||
981 | { | 1097 | { |
982 | return queue_hardsect_size(bdev_get_queue(bdev)); | 1098 | return ((sector << 9) - q->limits.alignment_offset) |
1099 | & (q->limits.io_min - 1); | ||
983 | } | 1100 | } |
984 | 1101 | ||
985 | static inline int queue_dma_alignment(struct request_queue *q) | 1102 | static inline int queue_dma_alignment(struct request_queue *q) |
@@ -1109,6 +1226,8 @@ struct block_device_operations { | |||
1109 | int (*direct_access) (struct block_device *, sector_t, | 1226 | int (*direct_access) (struct block_device *, sector_t, |
1110 | void **, unsigned long *); | 1227 | void **, unsigned long *); |
1111 | int (*media_changed) (struct gendisk *); | 1228 | int (*media_changed) (struct gendisk *); |
1229 | unsigned long long (*set_capacity) (struct gendisk *, | ||
1230 | unsigned long long); | ||
1112 | int (*revalidate_disk) (struct gendisk *); | 1231 | int (*revalidate_disk) (struct gendisk *); |
1113 | int (*getgeo)(struct block_device *, struct hd_geometry *); | 1232 | int (*getgeo)(struct block_device *, struct hd_geometry *); |
1114 | struct module *owner; | 1233 | struct module *owner; |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index d960889e92ef..7e4350ece0f8 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -116,9 +116,9 @@ struct blk_io_trace { | |||
116 | * The remap event | 116 | * The remap event |
117 | */ | 117 | */ |
118 | struct blk_io_trace_remap { | 118 | struct blk_io_trace_remap { |
119 | __be32 device; | ||
120 | __be32 device_from; | 119 | __be32 device_from; |
121 | __be64 sector; | 120 | __be32 device_to; |
121 | __be64 sector_from; | ||
122 | }; | 122 | }; |
123 | 123 | ||
124 | enum { | 124 | enum { |
@@ -165,8 +165,9 @@ struct blk_trace { | |||
165 | 165 | ||
166 | extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); | 166 | extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); |
167 | extern void blk_trace_shutdown(struct request_queue *); | 167 | extern void blk_trace_shutdown(struct request_queue *); |
168 | extern int do_blk_trace_setup(struct request_queue *q, | 168 | extern int do_blk_trace_setup(struct request_queue *q, char *name, |
169 | char *name, dev_t dev, struct blk_user_trace_setup *buts); | 169 | dev_t dev, struct block_device *bdev, |
170 | struct blk_user_trace_setup *buts); | ||
170 | extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); | 171 | extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); |
171 | 172 | ||
172 | /** | 173 | /** |
@@ -193,22 +194,42 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); | |||
193 | extern void blk_add_driver_data(struct request_queue *q, struct request *rq, | 194 | extern void blk_add_driver_data(struct request_queue *q, struct request *rq, |
194 | void *data, size_t len); | 195 | void *data, size_t len); |
195 | extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | 196 | extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
197 | struct block_device *bdev, | ||
196 | char __user *arg); | 198 | char __user *arg); |
197 | extern int blk_trace_startstop(struct request_queue *q, int start); | 199 | extern int blk_trace_startstop(struct request_queue *q, int start); |
198 | extern int blk_trace_remove(struct request_queue *q); | 200 | extern int blk_trace_remove(struct request_queue *q); |
201 | extern int blk_trace_init_sysfs(struct device *dev); | ||
199 | 202 | ||
200 | extern struct attribute_group blk_trace_attr_group; | 203 | extern struct attribute_group blk_trace_attr_group; |
201 | 204 | ||
202 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ | 205 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ |
203 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) | 206 | # define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) |
204 | #define blk_trace_shutdown(q) do { } while (0) | 207 | # define blk_trace_shutdown(q) do { } while (0) |
205 | #define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) | 208 | # define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY) |
206 | #define blk_add_driver_data(q, rq, data, len) do {} while (0) | 209 | # define blk_add_driver_data(q, rq, data, len) do {} while (0) |
207 | #define blk_trace_setup(q, name, dev, arg) (-ENOTTY) | 210 | # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) |
208 | #define blk_trace_startstop(q, start) (-ENOTTY) | 211 | # define blk_trace_startstop(q, start) (-ENOTTY) |
209 | #define blk_trace_remove(q) (-ENOTTY) | 212 | # define blk_trace_remove(q) (-ENOTTY) |
210 | #define blk_add_trace_msg(q, fmt, ...) do { } while (0) | 213 | # define blk_add_trace_msg(q, fmt, ...) do { } while (0) |
214 | static inline int blk_trace_init_sysfs(struct device *dev) | ||
215 | { | ||
216 | return 0; | ||
217 | } | ||
211 | 218 | ||
212 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ | 219 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ |
220 | |||
221 | #if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) | ||
222 | |||
223 | static inline int blk_cmd_buf_len(struct request *rq) | ||
224 | { | ||
225 | return blk_pc_request(rq) ? rq->cmd_len * 3 : 1; | ||
226 | } | ||
227 | |||
228 | extern void blk_dump_cmd(char *buf, struct request *rq); | ||
229 | extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); | ||
230 | extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq); | ||
231 | |||
232 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ | ||
233 | |||
213 | #endif /* __KERNEL__ */ | 234 | #endif /* __KERNEL__ */ |
214 | #endif | 235 | #endif |
diff --git a/include/linux/cdev.h b/include/linux/cdev.h index fb4591977b03..f389e319a454 100644 --- a/include/linux/cdev.h +++ b/include/linux/cdev.h | |||
@@ -28,6 +28,8 @@ int cdev_add(struct cdev *, dev_t, unsigned); | |||
28 | 28 | ||
29 | void cdev_del(struct cdev *); | 29 | void cdev_del(struct cdev *); |
30 | 30 | ||
31 | int cdev_index(struct inode *inode); | ||
32 | |||
31 | void cd_forget(struct inode *); | 33 | void cd_forget(struct inode *); |
32 | 34 | ||
33 | extern struct backing_dev_info directly_mappable_cdev_bdi; | 35 | extern struct backing_dev_info directly_mappable_cdev_bdi; |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 5a40d14daa9f..c56457c8334e 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -288,7 +288,15 @@ static inline cycle_t clocksource_read(struct clocksource *cs) | |||
288 | */ | 288 | */ |
289 | static inline int clocksource_enable(struct clocksource *cs) | 289 | static inline int clocksource_enable(struct clocksource *cs) |
290 | { | 290 | { |
291 | return cs->enable ? cs->enable(cs) : 0; | 291 | int ret = 0; |
292 | |||
293 | if (cs->enable) | ||
294 | ret = cs->enable(cs); | ||
295 | |||
296 | /* save mult_orig on enable */ | ||
297 | cs->mult_orig = cs->mult; | ||
298 | |||
299 | return ret; | ||
292 | } | 300 | } |
293 | 301 | ||
294 | /** | 302 | /** |
diff --git a/include/linux/compat.h b/include/linux/compat.h index f2ded21f9a3c..af931ee43dd8 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -222,6 +222,8 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); | |||
222 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from); | 222 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from); |
223 | int get_compat_sigevent(struct sigevent *event, | 223 | int get_compat_sigevent(struct sigevent *event, |
224 | const struct compat_sigevent __user *u_event); | 224 | const struct compat_sigevent __user *u_event); |
225 | long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, | ||
226 | struct compat_siginfo __user *uinfo); | ||
225 | 227 | ||
226 | static inline int compat_timeval_compare(struct compat_timeval *lhs, | 228 | static inline int compat_timeval_compare(struct compat_timeval *lhs, |
227 | struct compat_timeval *rhs) | 229 | struct compat_timeval *rhs) |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 37bcb50a4d7c..04fb5135b4e1 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -261,6 +261,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
261 | # define __section(S) __attribute__ ((__section__(#S))) | 261 | # define __section(S) __attribute__ ((__section__(#S))) |
262 | #endif | 262 | #endif |
263 | 263 | ||
264 | /* Are two types/vars the same type (ignoring qualifiers)? */ | ||
265 | #ifndef __same_type | ||
266 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) | ||
267 | #endif | ||
268 | |||
264 | /* | 269 | /* |
265 | * Prevent the compiler from merging or refetching accesses. The compiler | 270 | * Prevent the compiler from merging or refetching accesses. The compiler |
266 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | 271 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), |
diff --git a/include/linux/cramfs_fs.h b/include/linux/cramfs_fs.h index 3be4e5a27d82..6fc2bed368b8 100644 --- a/include/linux/cramfs_fs.h +++ b/include/linux/cramfs_fs.h | |||
@@ -2,9 +2,8 @@ | |||
2 | #define __CRAMFS_H | 2 | #define __CRAMFS_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/magic.h> | ||
5 | 6 | ||
6 | #define CRAMFS_MAGIC 0x28cd3d45 /* some random number */ | ||
7 | #define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */ | ||
8 | #define CRAMFS_SIGNATURE "Compressed ROMFS" | 7 | #define CRAMFS_SIGNATURE "Compressed ROMFS" |
9 | 8 | ||
10 | /* | 9 | /* |
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index 788850ba4e75..1fbdea4f08eb 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h | |||
@@ -142,19 +142,6 @@ struct CYZ_BOOT_CTRL { | |||
142 | 142 | ||
143 | 143 | ||
144 | #ifndef DP_WINDOW_SIZE | 144 | #ifndef DP_WINDOW_SIZE |
145 | /* #include "cyclomz.h" */ | ||
146 | /****************** ****************** *******************/ | ||
147 | /* | ||
148 | * The data types defined below are used in all ZFIRM interface | ||
149 | * data structures. They accomodate differences between HW | ||
150 | * architectures and compilers. | ||
151 | */ | ||
152 | |||
153 | typedef __u64 ucdouble; /* 64 bits, unsigned */ | ||
154 | typedef __u32 uclong; /* 32 bits, unsigned */ | ||
155 | typedef __u16 ucshort; /* 16 bits, unsigned */ | ||
156 | typedef __u8 ucchar; /* 8 bits, unsigned */ | ||
157 | |||
158 | /* | 145 | /* |
159 | * Memory Window Sizes | 146 | * Memory Window Sizes |
160 | */ | 147 | */ |
@@ -507,16 +494,20 @@ struct ZFW_CTRL { | |||
507 | 494 | ||
508 | /* Per card data structure */ | 495 | /* Per card data structure */ |
509 | struct cyclades_card { | 496 | struct cyclades_card { |
510 | void __iomem *base_addr; | 497 | void __iomem *base_addr; |
511 | void __iomem *ctl_addr; | 498 | union { |
512 | int irq; | 499 | void __iomem *p9050; |
513 | unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ | 500 | struct RUNTIME_9060 __iomem *p9060; |
514 | unsigned int first_line; /* minor number of first channel on card */ | 501 | } ctl_addr; |
515 | unsigned int nports; /* Number of ports in the card */ | 502 | int irq; |
516 | int bus_index; /* address shift - 0 for ISA, 1 for PCI */ | 503 | unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ |
517 | int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ | 504 | unsigned int first_line; /* minor number of first channel on card */ |
518 | spinlock_t card_lock; | 505 | unsigned int nports; /* Number of ports in the card */ |
519 | struct cyclades_port *ports; | 506 | int bus_index; /* address shift - 0 for ISA, 1 for PCI */ |
507 | int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ | ||
508 | u32 hw_ver; | ||
509 | spinlock_t card_lock; | ||
510 | struct cyclades_port *ports; | ||
520 | }; | 511 | }; |
521 | 512 | ||
522 | /*************************************** | 513 | /*************************************** |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 15156364d196..30b93b2a01a4 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -180,10 +180,12 @@ d_iput: no no no yes | |||
180 | #define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */ | 180 | #define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */ |
181 | #define DCACHE_UNHASHED 0x0010 | 181 | #define DCACHE_UNHASHED 0x0010 |
182 | 182 | ||
183 | #define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */ | 183 | #define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched by inotify */ |
184 | 184 | ||
185 | #define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */ | 185 | #define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */ |
186 | 186 | ||
187 | #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ | ||
188 | |||
187 | extern spinlock_t dcache_lock; | 189 | extern spinlock_t dcache_lock; |
188 | extern seqlock_t rename_lock; | 190 | extern seqlock_t rename_lock; |
189 | 191 | ||
@@ -351,6 +353,11 @@ static inline int d_unhashed(struct dentry *dentry) | |||
351 | return (dentry->d_flags & DCACHE_UNHASHED); | 353 | return (dentry->d_flags & DCACHE_UNHASHED); |
352 | } | 354 | } |
353 | 355 | ||
356 | static inline int d_unlinked(struct dentry *dentry) | ||
357 | { | ||
358 | return d_unhashed(dentry) && !IS_ROOT(dentry); | ||
359 | } | ||
360 | |||
354 | static inline struct dentry *dget_parent(struct dentry *dentry) | 361 | static inline struct dentry *dget_parent(struct dentry *dentry) |
355 | { | 362 | { |
356 | struct dentry *ret; | 363 | struct dentry *ret; |
@@ -368,7 +375,7 @@ static inline int d_mountpoint(struct dentry *dentry) | |||
368 | return dentry->d_mounted; | 375 | return dentry->d_mounted; |
369 | } | 376 | } |
370 | 377 | ||
371 | extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *); | 378 | extern struct vfsmount *lookup_mnt(struct path *); |
372 | extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); | 379 | extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); |
373 | 380 | ||
374 | extern int sysctl_vfs_cache_pressure; | 381 | extern int sysctl_vfs_cache_pressure; |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ded2d7c42668..49c2362977fd 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -149,7 +149,7 @@ struct io_restrictions { | |||
149 | unsigned max_hw_sectors; | 149 | unsigned max_hw_sectors; |
150 | unsigned max_sectors; | 150 | unsigned max_sectors; |
151 | unsigned max_segment_size; | 151 | unsigned max_segment_size; |
152 | unsigned short hardsect_size; | 152 | unsigned short logical_block_size; |
153 | unsigned short max_hw_segments; | 153 | unsigned short max_hw_segments; |
154 | unsigned short max_phys_segments; | 154 | unsigned short max_phys_segments; |
155 | unsigned char no_cluster; /* inverted so that 0 is default */ | 155 | unsigned char no_cluster; /* inverted so that 0 is default */ |
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index 28d53cb7b5a2..171ad8aedc83 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h | |||
@@ -32,6 +32,8 @@ extern void dma_debug_add_bus(struct bus_type *bus); | |||
32 | 32 | ||
33 | extern void dma_debug_init(u32 num_entries); | 33 | extern void dma_debug_init(u32 num_entries); |
34 | 34 | ||
35 | extern int dma_debug_resize_entries(u32 num_entries); | ||
36 | |||
35 | extern void debug_dma_map_page(struct device *dev, struct page *page, | 37 | extern void debug_dma_map_page(struct device *dev, struct page *page, |
36 | size_t offset, size_t size, | 38 | size_t offset, size_t size, |
37 | int direction, dma_addr_t dma_addr, | 39 | int direction, dma_addr_t dma_addr, |
@@ -91,6 +93,11 @@ static inline void dma_debug_init(u32 num_entries) | |||
91 | { | 93 | { |
92 | } | 94 | } |
93 | 95 | ||
96 | static inline int dma_debug_resize_entries(u32 num_entries) | ||
97 | { | ||
98 | return 0; | ||
99 | } | ||
100 | |||
94 | static inline void debug_dma_map_page(struct device *dev, struct page *page, | 101 | static inline void debug_dma_map_page(struct device *dev, struct page *page, |
95 | size_t offset, size_t size, | 102 | size_t offset, size_t size, |
96 | int direction, dma_addr_t dma_addr, | 103 | int direction, dma_addr_t dma_addr, |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e397dc342cda..10ff5c498824 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -108,6 +108,7 @@ struct irte { | |||
108 | }; | 108 | }; |
109 | #ifdef CONFIG_INTR_REMAP | 109 | #ifdef CONFIG_INTR_REMAP |
110 | extern int intr_remapping_enabled; | 110 | extern int intr_remapping_enabled; |
111 | extern int intr_remapping_supported(void); | ||
111 | extern int enable_intr_remapping(int); | 112 | extern int enable_intr_remapping(int); |
112 | extern void disable_intr_remapping(void); | 113 | extern void disable_intr_remapping(void); |
113 | extern int reenable_intr_remapping(int); | 114 | extern int reenable_intr_remapping(int); |
@@ -157,6 +158,8 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic) | |||
157 | } | 158 | } |
158 | #define irq_remapped(irq) (0) | 159 | #define irq_remapped(irq) (0) |
159 | #define enable_intr_remapping(mode) (-1) | 160 | #define enable_intr_remapping(mode) (-1) |
161 | #define disable_intr_remapping() (0) | ||
162 | #define reenable_intr_remapping(mode) (0) | ||
160 | #define intr_remapping_enabled (0) | 163 | #define intr_remapping_enabled (0) |
161 | #endif | 164 | #endif |
162 | 165 | ||
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h index 102a902b4396..ecc06286226d 100644 --- a/include/linux/dnotify.h +++ b/include/linux/dnotify.h | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | struct dnotify_struct { | 11 | struct dnotify_struct { |
12 | struct dnotify_struct * dn_next; | 12 | struct dnotify_struct * dn_next; |
13 | unsigned long dn_mask; | 13 | __u32 dn_mask; |
14 | int dn_fd; | 14 | int dn_fd; |
15 | struct file * dn_filp; | 15 | struct file * dn_filp; |
16 | fl_owner_t dn_owner; | 16 | fl_owner_t dn_owner; |
@@ -21,23 +21,18 @@ struct dnotify_struct { | |||
21 | 21 | ||
22 | #ifdef CONFIG_DNOTIFY | 22 | #ifdef CONFIG_DNOTIFY |
23 | 23 | ||
24 | extern void __inode_dir_notify(struct inode *, unsigned long); | 24 | #define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\ |
25 | FS_MODIFY | FS_MODIFY_CHILD |\ | ||
26 | FS_ACCESS | FS_ACCESS_CHILD |\ | ||
27 | FS_ATTRIB | FS_ATTRIB_CHILD |\ | ||
28 | FS_CREATE | FS_DN_RENAME |\ | ||
29 | FS_MOVED_FROM | FS_MOVED_TO) | ||
30 | |||
25 | extern void dnotify_flush(struct file *, fl_owner_t); | 31 | extern void dnotify_flush(struct file *, fl_owner_t); |
26 | extern int fcntl_dirnotify(int, struct file *, unsigned long); | 32 | extern int fcntl_dirnotify(int, struct file *, unsigned long); |
27 | extern void dnotify_parent(struct dentry *, unsigned long); | ||
28 | |||
29 | static inline void inode_dir_notify(struct inode *inode, unsigned long event) | ||
30 | { | ||
31 | if (inode->i_dnotify_mask & (event)) | ||
32 | __inode_dir_notify(inode, event); | ||
33 | } | ||
34 | 33 | ||
35 | #else | 34 | #else |
36 | 35 | ||
37 | static inline void __inode_dir_notify(struct inode *inode, unsigned long event) | ||
38 | { | ||
39 | } | ||
40 | |||
41 | static inline void dnotify_flush(struct file *filp, fl_owner_t id) | 36 | static inline void dnotify_flush(struct file *filp, fl_owner_t id) |
42 | { | 37 | { |
43 | } | 38 | } |
@@ -47,14 +42,6 @@ static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) | |||
47 | return -EINVAL; | 42 | return -EINVAL; |
48 | } | 43 | } |
49 | 44 | ||
50 | static inline void dnotify_parent(struct dentry *dentry, unsigned long event) | ||
51 | { | ||
52 | } | ||
53 | |||
54 | static inline void inode_dir_notify(struct inode *inode, unsigned long event) | ||
55 | { | ||
56 | } | ||
57 | |||
58 | #endif /* CONFIG_DNOTIFY */ | 45 | #endif /* CONFIG_DNOTIFY */ |
59 | 46 | ||
60 | #endif /* __KERNEL __ */ | 47 | #endif /* __KERNEL __ */ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index c59b769f62b0..1cb3372e65d8 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *); | |||
103 | extern void elv_merge_requests(struct request_queue *, struct request *, | 103 | extern void elv_merge_requests(struct request_queue *, struct request *, |
104 | struct request *); | 104 | struct request *); |
105 | extern void elv_merged_request(struct request_queue *, struct request *, int); | 105 | extern void elv_merged_request(struct request_queue *, struct request *, int); |
106 | extern void elv_dequeue_request(struct request_queue *, struct request *); | ||
107 | extern void elv_requeue_request(struct request_queue *, struct request *); | 106 | extern void elv_requeue_request(struct request_queue *, struct request *); |
108 | extern int elv_queue_empty(struct request_queue *); | 107 | extern int elv_queue_empty(struct request_queue *); |
109 | extern struct request *elv_next_request(struct request_queue *q); | ||
110 | extern struct request *elv_former_request(struct request_queue *, struct request *); | 108 | extern struct request *elv_former_request(struct request_queue *, struct request *); |
111 | extern struct request *elv_latter_request(struct request_queue *, struct request *); | 109 | extern struct request *elv_latter_request(struct request_queue *, struct request *); |
112 | extern int elv_register_queue(struct request_queue *q); | 110 | extern int elv_register_queue(struct request_queue *q); |
@@ -171,7 +169,7 @@ enum { | |||
171 | ELV_MQUEUE_MUST, | 169 | ELV_MQUEUE_MUST, |
172 | }; | 170 | }; |
173 | 171 | ||
174 | #define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) | 172 | #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
175 | #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) | 173 | #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) |
176 | 174 | ||
177 | /* | 175 | /* |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 3b534e527e09..ede84fa7da5d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -729,8 +729,8 @@ struct inode { | |||
729 | struct timespec i_atime; | 729 | struct timespec i_atime; |
730 | struct timespec i_mtime; | 730 | struct timespec i_mtime; |
731 | struct timespec i_ctime; | 731 | struct timespec i_ctime; |
732 | unsigned int i_blkbits; | ||
733 | blkcnt_t i_blocks; | 732 | blkcnt_t i_blocks; |
733 | unsigned int i_blkbits; | ||
734 | unsigned short i_bytes; | 734 | unsigned short i_bytes; |
735 | umode_t i_mode; | 735 | umode_t i_mode; |
736 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ | 736 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ |
@@ -751,13 +751,12 @@ struct inode { | |||
751 | struct block_device *i_bdev; | 751 | struct block_device *i_bdev; |
752 | struct cdev *i_cdev; | 752 | struct cdev *i_cdev; |
753 | }; | 753 | }; |
754 | int i_cindex; | ||
755 | 754 | ||
756 | __u32 i_generation; | 755 | __u32 i_generation; |
757 | 756 | ||
758 | #ifdef CONFIG_DNOTIFY | 757 | #ifdef CONFIG_FSNOTIFY |
759 | unsigned long i_dnotify_mask; /* Directory notify events */ | 758 | __u32 i_fsnotify_mask; /* all events this inode cares about */ |
760 | struct dnotify_struct *i_dnotify; /* for directory notifications */ | 759 | struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */ |
761 | #endif | 760 | #endif |
762 | 761 | ||
763 | #ifdef CONFIG_INOTIFY | 762 | #ifdef CONFIG_INOTIFY |
@@ -1321,7 +1320,7 @@ struct super_block { | |||
1321 | struct rw_semaphore s_umount; | 1320 | struct rw_semaphore s_umount; |
1322 | struct mutex s_lock; | 1321 | struct mutex s_lock; |
1323 | int s_count; | 1322 | int s_count; |
1324 | int s_need_sync_fs; | 1323 | int s_need_sync; |
1325 | atomic_t s_active; | 1324 | atomic_t s_active; |
1326 | #ifdef CONFIG_SECURITY | 1325 | #ifdef CONFIG_SECURITY |
1327 | void *s_security; | 1326 | void *s_security; |
@@ -1372,11 +1371,6 @@ struct super_block { | |||
1372 | * generic_show_options() | 1371 | * generic_show_options() |
1373 | */ | 1372 | */ |
1374 | char *s_options; | 1373 | char *s_options; |
1375 | |||
1376 | /* | ||
1377 | * storage for asynchronous operations | ||
1378 | */ | ||
1379 | struct list_head s_async_list; | ||
1380 | }; | 1374 | }; |
1381 | 1375 | ||
1382 | extern struct timespec current_fs_time(struct super_block *sb); | 1376 | extern struct timespec current_fs_time(struct super_block *sb); |
@@ -1800,7 +1794,7 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); | |||
1800 | extern int may_umount_tree(struct vfsmount *); | 1794 | extern int may_umount_tree(struct vfsmount *); |
1801 | extern int may_umount(struct vfsmount *); | 1795 | extern int may_umount(struct vfsmount *); |
1802 | extern long do_mount(char *, char *, char *, unsigned long, void *); | 1796 | extern long do_mount(char *, char *, char *, unsigned long, void *); |
1803 | extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *); | 1797 | extern struct vfsmount *collect_mounts(struct path *); |
1804 | extern void drop_collected_mounts(struct vfsmount *); | 1798 | extern void drop_collected_mounts(struct vfsmount *); |
1805 | 1799 | ||
1806 | extern int vfs_statfs(struct dentry *, struct kstatfs *); | 1800 | extern int vfs_statfs(struct dentry *, struct kstatfs *); |
@@ -1947,8 +1941,6 @@ extern struct super_block *freeze_bdev(struct block_device *); | |||
1947 | extern void emergency_thaw_all(void); | 1941 | extern void emergency_thaw_all(void); |
1948 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); | 1942 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); |
1949 | extern int fsync_bdev(struct block_device *); | 1943 | extern int fsync_bdev(struct block_device *); |
1950 | extern int fsync_super(struct super_block *); | ||
1951 | extern int fsync_no_super(struct block_device *); | ||
1952 | #else | 1944 | #else |
1953 | static inline void bd_forget(struct inode *inode) {} | 1945 | static inline void bd_forget(struct inode *inode) {} |
1954 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } | 1946 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } |
@@ -1964,6 +1956,7 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) | |||
1964 | return 0; | 1956 | return 0; |
1965 | } | 1957 | } |
1966 | #endif | 1958 | #endif |
1959 | extern int sync_filesystem(struct super_block *); | ||
1967 | extern const struct file_operations def_blk_fops; | 1960 | extern const struct file_operations def_blk_fops; |
1968 | extern const struct file_operations def_chr_fops; | 1961 | extern const struct file_operations def_chr_fops; |
1969 | extern const struct file_operations bad_sock_fops; | 1962 | extern const struct file_operations bad_sock_fops; |
@@ -2082,12 +2075,8 @@ extern int filemap_fdatawrite_range(struct address_space *mapping, | |||
2082 | 2075 | ||
2083 | extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync); | 2076 | extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync); |
2084 | extern void sync_supers(void); | 2077 | extern void sync_supers(void); |
2085 | extern void sync_filesystems(int wait); | ||
2086 | extern void __fsync_super(struct super_block *sb); | ||
2087 | extern void emergency_sync(void); | 2078 | extern void emergency_sync(void); |
2088 | extern void emergency_remount(void); | 2079 | extern void emergency_remount(void); |
2089 | extern int do_remount_sb(struct super_block *sb, int flags, | ||
2090 | void *data, int force); | ||
2091 | #ifdef CONFIG_BLOCK | 2080 | #ifdef CONFIG_BLOCK |
2092 | extern sector_t bmap(struct inode *, sector_t); | 2081 | extern sector_t bmap(struct inode *, sector_t); |
2093 | #endif | 2082 | #endif |
@@ -2205,6 +2194,8 @@ extern int generic_segment_checks(const struct iovec *iov, | |||
2205 | /* fs/splice.c */ | 2194 | /* fs/splice.c */ |
2206 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, | 2195 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, |
2207 | struct pipe_inode_info *, size_t, unsigned int); | 2196 | struct pipe_inode_info *, size_t, unsigned int); |
2197 | extern ssize_t default_file_splice_read(struct file *, loff_t *, | ||
2198 | struct pipe_inode_info *, size_t, unsigned int); | ||
2208 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, | 2199 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, |
2209 | struct file *, loff_t *, size_t, unsigned int); | 2200 | struct file *, loff_t *, size_t, unsigned int); |
2210 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, | 2201 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, |
@@ -2354,6 +2345,8 @@ extern void simple_release_fs(struct vfsmount **mount, int *count); | |||
2354 | extern ssize_t simple_read_from_buffer(void __user *to, size_t count, | 2345 | extern ssize_t simple_read_from_buffer(void __user *to, size_t count, |
2355 | loff_t *ppos, const void *from, size_t available); | 2346 | loff_t *ppos, const void *from, size_t available); |
2356 | 2347 | ||
2348 | extern int simple_fsync(struct file *, struct dentry *, int); | ||
2349 | |||
2357 | #ifdef CONFIG_MIGRATION | 2350 | #ifdef CONFIG_MIGRATION |
2358 | extern int buffer_migrate_page(struct address_space *, | 2351 | extern int buffer_migrate_page(struct address_space *, |
2359 | struct page *, struct page *); | 2352 | struct page *, struct page *); |
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 00fbd5b245c9..936f9aa8bb97 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/dnotify.h> | 14 | #include <linux/dnotify.h> |
15 | #include <linux/inotify.h> | 15 | #include <linux/inotify.h> |
16 | #include <linux/fsnotify_backend.h> | ||
16 | #include <linux/audit.h> | 17 | #include <linux/audit.h> |
17 | 18 | ||
18 | /* | 19 | /* |
@@ -22,19 +23,45 @@ | |||
22 | static inline void fsnotify_d_instantiate(struct dentry *entry, | 23 | static inline void fsnotify_d_instantiate(struct dentry *entry, |
23 | struct inode *inode) | 24 | struct inode *inode) |
24 | { | 25 | { |
26 | __fsnotify_d_instantiate(entry, inode); | ||
27 | |||
25 | inotify_d_instantiate(entry, inode); | 28 | inotify_d_instantiate(entry, inode); |
26 | } | 29 | } |
27 | 30 | ||
31 | /* Notify this dentry's parent about a child's events. */ | ||
32 | static inline void fsnotify_parent(struct dentry *dentry, __u32 mask) | ||
33 | { | ||
34 | __fsnotify_parent(dentry, mask); | ||
35 | |||
36 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | ||
37 | } | ||
38 | |||
28 | /* | 39 | /* |
29 | * fsnotify_d_move - entry has been moved | 40 | * fsnotify_d_move - entry has been moved |
30 | * Called with dcache_lock and entry->d_lock held. | 41 | * Called with dcache_lock and entry->d_lock held. |
31 | */ | 42 | */ |
32 | static inline void fsnotify_d_move(struct dentry *entry) | 43 | static inline void fsnotify_d_move(struct dentry *entry) |
33 | { | 44 | { |
45 | /* | ||
46 | * On move we need to update entry->d_flags to indicate if the new parent | ||
47 | * cares about events from this entry. | ||
48 | */ | ||
49 | __fsnotify_update_dcache_flags(entry); | ||
50 | |||
34 | inotify_d_move(entry); | 51 | inotify_d_move(entry); |
35 | } | 52 | } |
36 | 53 | ||
37 | /* | 54 | /* |
55 | * fsnotify_link_count - inode's link count changed | ||
56 | */ | ||
57 | static inline void fsnotify_link_count(struct inode *inode) | ||
58 | { | ||
59 | inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL); | ||
60 | |||
61 | fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
62 | } | ||
63 | |||
64 | /* | ||
38 | * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir | 65 | * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir |
39 | */ | 66 | */ |
40 | static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, | 67 | static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, |
@@ -42,42 +69,62 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, | |||
42 | int isdir, struct inode *target, struct dentry *moved) | 69 | int isdir, struct inode *target, struct dentry *moved) |
43 | { | 70 | { |
44 | struct inode *source = moved->d_inode; | 71 | struct inode *source = moved->d_inode; |
45 | u32 cookie = inotify_get_cookie(); | 72 | u32 in_cookie = inotify_get_cookie(); |
73 | u32 fs_cookie = fsnotify_get_cookie(); | ||
74 | __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); | ||
75 | __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); | ||
46 | 76 | ||
47 | if (old_dir == new_dir) | 77 | if (old_dir == new_dir) |
48 | inode_dir_notify(old_dir, DN_RENAME); | 78 | old_dir_mask |= FS_DN_RENAME; |
49 | else { | ||
50 | inode_dir_notify(old_dir, DN_DELETE); | ||
51 | inode_dir_notify(new_dir, DN_CREATE); | ||
52 | } | ||
53 | 79 | ||
54 | if (isdir) | 80 | if (isdir) { |
55 | isdir = IN_ISDIR; | 81 | isdir = IN_ISDIR; |
56 | inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir,cookie,old_name, | 82 | old_dir_mask |= FS_IN_ISDIR; |
83 | new_dir_mask |= FS_IN_ISDIR; | ||
84 | } | ||
85 | |||
86 | inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name, | ||
57 | source); | 87 | source); |
58 | inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, cookie, new_name, | 88 | inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name, |
59 | source); | 89 | source); |
60 | 90 | ||
91 | fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); | ||
92 | fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); | ||
93 | |||
61 | if (target) { | 94 | if (target) { |
62 | inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL); | 95 | inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL); |
63 | inotify_inode_is_dead(target); | 96 | inotify_inode_is_dead(target); |
97 | |||
98 | /* this is really a link_count change not a removal */ | ||
99 | fsnotify_link_count(target); | ||
64 | } | 100 | } |
65 | 101 | ||
66 | if (source) { | 102 | if (source) { |
67 | inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); | 103 | inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); |
104 | fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
68 | } | 105 | } |
69 | audit_inode_child(new_name, moved, new_dir); | 106 | audit_inode_child(new_name, moved, new_dir); |
70 | } | 107 | } |
71 | 108 | ||
72 | /* | 109 | /* |
110 | * fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed | ||
111 | */ | ||
112 | static inline void fsnotify_inode_delete(struct inode *inode) | ||
113 | { | ||
114 | __fsnotify_inode_delete(inode); | ||
115 | } | ||
116 | |||
117 | /* | ||
73 | * fsnotify_nameremove - a filename was removed from a directory | 118 | * fsnotify_nameremove - a filename was removed from a directory |
74 | */ | 119 | */ |
75 | static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) | 120 | static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) |
76 | { | 121 | { |
122 | __u32 mask = FS_DELETE; | ||
123 | |||
77 | if (isdir) | 124 | if (isdir) |
78 | isdir = IN_ISDIR; | 125 | mask |= FS_IN_ISDIR; |
79 | dnotify_parent(dentry, DN_DELETE); | 126 | |
80 | inotify_dentry_parent_queue_event(dentry, IN_DELETE|isdir, 0, dentry->d_name.name); | 127 | fsnotify_parent(dentry, mask); |
81 | } | 128 | } |
82 | 129 | ||
83 | /* | 130 | /* |
@@ -87,14 +134,9 @@ static inline void fsnotify_inoderemove(struct inode *inode) | |||
87 | { | 134 | { |
88 | inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL); | 135 | inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL); |
89 | inotify_inode_is_dead(inode); | 136 | inotify_inode_is_dead(inode); |
90 | } | ||
91 | 137 | ||
92 | /* | 138 | fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0); |
93 | * fsnotify_link_count - inode's link count changed | 139 | __fsnotify_inode_delete(inode); |
94 | */ | ||
95 | static inline void fsnotify_link_count(struct inode *inode) | ||
96 | { | ||
97 | inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL); | ||
98 | } | 140 | } |
99 | 141 | ||
100 | /* | 142 | /* |
@@ -102,10 +144,11 @@ static inline void fsnotify_link_count(struct inode *inode) | |||
102 | */ | 144 | */ |
103 | static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) | 145 | static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) |
104 | { | 146 | { |
105 | inode_dir_notify(inode, DN_CREATE); | ||
106 | inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, | 147 | inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, |
107 | dentry->d_inode); | 148 | dentry->d_inode); |
108 | audit_inode_child(dentry->d_name.name, dentry, inode); | 149 | audit_inode_child(dentry->d_name.name, dentry, inode); |
150 | |||
151 | fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); | ||
109 | } | 152 | } |
110 | 153 | ||
111 | /* | 154 | /* |
@@ -115,11 +158,12 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) | |||
115 | */ | 158 | */ |
116 | static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) | 159 | static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) |
117 | { | 160 | { |
118 | inode_dir_notify(dir, DN_CREATE); | ||
119 | inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name, | 161 | inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name, |
120 | inode); | 162 | inode); |
121 | fsnotify_link_count(inode); | 163 | fsnotify_link_count(inode); |
122 | audit_inode_child(new_dentry->d_name.name, new_dentry, dir); | 164 | audit_inode_child(new_dentry->d_name.name, new_dentry, dir); |
165 | |||
166 | fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0); | ||
123 | } | 167 | } |
124 | 168 | ||
125 | /* | 169 | /* |
@@ -127,10 +171,13 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct | |||
127 | */ | 171 | */ |
128 | static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) | 172 | static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) |
129 | { | 173 | { |
130 | inode_dir_notify(inode, DN_CREATE); | 174 | __u32 mask = (FS_CREATE | FS_IN_ISDIR); |
131 | inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, | 175 | struct inode *d_inode = dentry->d_inode; |
132 | dentry->d_name.name, dentry->d_inode); | 176 | |
177 | inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode); | ||
133 | audit_inode_child(dentry->d_name.name, dentry, inode); | 178 | audit_inode_child(dentry->d_name.name, dentry, inode); |
179 | |||
180 | fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); | ||
134 | } | 181 | } |
135 | 182 | ||
136 | /* | 183 | /* |
@@ -139,14 +186,15 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) | |||
139 | static inline void fsnotify_access(struct dentry *dentry) | 186 | static inline void fsnotify_access(struct dentry *dentry) |
140 | { | 187 | { |
141 | struct inode *inode = dentry->d_inode; | 188 | struct inode *inode = dentry->d_inode; |
142 | u32 mask = IN_ACCESS; | 189 | __u32 mask = FS_ACCESS; |
143 | 190 | ||
144 | if (S_ISDIR(inode->i_mode)) | 191 | if (S_ISDIR(inode->i_mode)) |
145 | mask |= IN_ISDIR; | 192 | mask |= FS_IN_ISDIR; |
146 | 193 | ||
147 | dnotify_parent(dentry, DN_ACCESS); | ||
148 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | ||
149 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 194 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
195 | |||
196 | fsnotify_parent(dentry, mask); | ||
197 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
150 | } | 198 | } |
151 | 199 | ||
152 | /* | 200 | /* |
@@ -155,14 +203,15 @@ static inline void fsnotify_access(struct dentry *dentry) | |||
155 | static inline void fsnotify_modify(struct dentry *dentry) | 203 | static inline void fsnotify_modify(struct dentry *dentry) |
156 | { | 204 | { |
157 | struct inode *inode = dentry->d_inode; | 205 | struct inode *inode = dentry->d_inode; |
158 | u32 mask = IN_MODIFY; | 206 | __u32 mask = FS_MODIFY; |
159 | 207 | ||
160 | if (S_ISDIR(inode->i_mode)) | 208 | if (S_ISDIR(inode->i_mode)) |
161 | mask |= IN_ISDIR; | 209 | mask |= FS_IN_ISDIR; |
162 | 210 | ||
163 | dnotify_parent(dentry, DN_MODIFY); | ||
164 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | ||
165 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 211 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
212 | |||
213 | fsnotify_parent(dentry, mask); | ||
214 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
166 | } | 215 | } |
167 | 216 | ||
168 | /* | 217 | /* |
@@ -171,13 +220,15 @@ static inline void fsnotify_modify(struct dentry *dentry) | |||
171 | static inline void fsnotify_open(struct dentry *dentry) | 220 | static inline void fsnotify_open(struct dentry *dentry) |
172 | { | 221 | { |
173 | struct inode *inode = dentry->d_inode; | 222 | struct inode *inode = dentry->d_inode; |
174 | u32 mask = IN_OPEN; | 223 | __u32 mask = FS_OPEN; |
175 | 224 | ||
176 | if (S_ISDIR(inode->i_mode)) | 225 | if (S_ISDIR(inode->i_mode)) |
177 | mask |= IN_ISDIR; | 226 | mask |= FS_IN_ISDIR; |
178 | 227 | ||
179 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | ||
180 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 228 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
229 | |||
230 | fsnotify_parent(dentry, mask); | ||
231 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
181 | } | 232 | } |
182 | 233 | ||
183 | /* | 234 | /* |
@@ -187,15 +238,16 @@ static inline void fsnotify_close(struct file *file) | |||
187 | { | 238 | { |
188 | struct dentry *dentry = file->f_path.dentry; | 239 | struct dentry *dentry = file->f_path.dentry; |
189 | struct inode *inode = dentry->d_inode; | 240 | struct inode *inode = dentry->d_inode; |
190 | const char *name = dentry->d_name.name; | ||
191 | fmode_t mode = file->f_mode; | 241 | fmode_t mode = file->f_mode; |
192 | u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE; | 242 | __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; |
193 | 243 | ||
194 | if (S_ISDIR(inode->i_mode)) | 244 | if (S_ISDIR(inode->i_mode)) |
195 | mask |= IN_ISDIR; | 245 | mask |= FS_IN_ISDIR; |
196 | 246 | ||
197 | inotify_dentry_parent_queue_event(dentry, mask, 0, name); | ||
198 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 247 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
248 | |||
249 | fsnotify_parent(dentry, mask); | ||
250 | fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0); | ||
199 | } | 251 | } |
200 | 252 | ||
201 | /* | 253 | /* |
@@ -204,13 +256,15 @@ static inline void fsnotify_close(struct file *file) | |||
204 | static inline void fsnotify_xattr(struct dentry *dentry) | 256 | static inline void fsnotify_xattr(struct dentry *dentry) |
205 | { | 257 | { |
206 | struct inode *inode = dentry->d_inode; | 258 | struct inode *inode = dentry->d_inode; |
207 | u32 mask = IN_ATTRIB; | 259 | __u32 mask = FS_ATTRIB; |
208 | 260 | ||
209 | if (S_ISDIR(inode->i_mode)) | 261 | if (S_ISDIR(inode->i_mode)) |
210 | mask |= IN_ISDIR; | 262 | mask |= FS_IN_ISDIR; |
211 | 263 | ||
212 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | ||
213 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 264 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
265 | |||
266 | fsnotify_parent(dentry, mask); | ||
267 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
214 | } | 268 | } |
215 | 269 | ||
216 | /* | 270 | /* |
@@ -220,50 +274,37 @@ static inline void fsnotify_xattr(struct dentry *dentry) | |||
220 | static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) | 274 | static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) |
221 | { | 275 | { |
222 | struct inode *inode = dentry->d_inode; | 276 | struct inode *inode = dentry->d_inode; |
223 | int dn_mask = 0; | 277 | __u32 mask = 0; |
224 | u32 in_mask = 0; | 278 | |
279 | if (ia_valid & ATTR_UID) | ||
280 | mask |= FS_ATTRIB; | ||
281 | if (ia_valid & ATTR_GID) | ||
282 | mask |= FS_ATTRIB; | ||
283 | if (ia_valid & ATTR_SIZE) | ||
284 | mask |= FS_MODIFY; | ||
225 | 285 | ||
226 | if (ia_valid & ATTR_UID) { | ||
227 | in_mask |= IN_ATTRIB; | ||
228 | dn_mask |= DN_ATTRIB; | ||
229 | } | ||
230 | if (ia_valid & ATTR_GID) { | ||
231 | in_mask |= IN_ATTRIB; | ||
232 | dn_mask |= DN_ATTRIB; | ||
233 | } | ||
234 | if (ia_valid & ATTR_SIZE) { | ||
235 | in_mask |= IN_MODIFY; | ||
236 | dn_mask |= DN_MODIFY; | ||
237 | } | ||
238 | /* both times implies a utime(s) call */ | 286 | /* both times implies a utime(s) call */ |
239 | if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) | 287 | if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) |
240 | { | 288 | mask |= FS_ATTRIB; |
241 | in_mask |= IN_ATTRIB; | 289 | else if (ia_valid & ATTR_ATIME) |
242 | dn_mask |= DN_ATTRIB; | 290 | mask |= FS_ACCESS; |
243 | } else if (ia_valid & ATTR_ATIME) { | 291 | else if (ia_valid & ATTR_MTIME) |
244 | in_mask |= IN_ACCESS; | 292 | mask |= FS_MODIFY; |
245 | dn_mask |= DN_ACCESS; | 293 | |
246 | } else if (ia_valid & ATTR_MTIME) { | 294 | if (ia_valid & ATTR_MODE) |
247 | in_mask |= IN_MODIFY; | 295 | mask |= FS_ATTRIB; |
248 | dn_mask |= DN_MODIFY; | ||
249 | } | ||
250 | if (ia_valid & ATTR_MODE) { | ||
251 | in_mask |= IN_ATTRIB; | ||
252 | dn_mask |= DN_ATTRIB; | ||
253 | } | ||
254 | 296 | ||
255 | if (dn_mask) | 297 | if (mask) { |
256 | dnotify_parent(dentry, dn_mask); | ||
257 | if (in_mask) { | ||
258 | if (S_ISDIR(inode->i_mode)) | 298 | if (S_ISDIR(inode->i_mode)) |
259 | in_mask |= IN_ISDIR; | 299 | mask |= FS_IN_ISDIR; |
260 | inotify_inode_queue_event(inode, in_mask, 0, NULL, NULL); | 300 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
261 | inotify_dentry_parent_queue_event(dentry, in_mask, 0, | 301 | |
262 | dentry->d_name.name); | 302 | fsnotify_parent(dentry, mask); |
303 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
263 | } | 304 | } |
264 | } | 305 | } |
265 | 306 | ||
266 | #ifdef CONFIG_INOTIFY /* inotify helpers */ | 307 | #if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */ |
267 | 308 | ||
268 | /* | 309 | /* |
269 | * fsnotify_oldname_init - save off the old filename before we change it | 310 | * fsnotify_oldname_init - save off the old filename before we change it |
@@ -281,7 +322,7 @@ static inline void fsnotify_oldname_free(const char *old_name) | |||
281 | kfree(old_name); | 322 | kfree(old_name); |
282 | } | 323 | } |
283 | 324 | ||
284 | #else /* CONFIG_INOTIFY */ | 325 | #else /* CONFIG_INOTIFY || CONFIG_FSNOTIFY */ |
285 | 326 | ||
286 | static inline const char *fsnotify_oldname_init(const char *name) | 327 | static inline const char *fsnotify_oldname_init(const char *name) |
287 | { | 328 | { |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h new file mode 100644 index 000000000000..44848aa830dc --- /dev/null +++ b/include/linux/fsnotify_backend.h | |||
@@ -0,0 +1,387 @@ | |||
1 | /* | ||
2 | * Filesystem access notification for Linux | ||
3 | * | ||
4 | * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef __LINUX_FSNOTIFY_BACKEND_H | ||
8 | #define __LINUX_FSNOTIFY_BACKEND_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <linux/idr.h> /* inotify uses this */ | ||
13 | #include <linux/fs.h> /* struct inode */ | ||
14 | #include <linux/list.h> | ||
15 | #include <linux/path.h> /* struct path */ | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/types.h> | ||
18 | |||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | /* | ||
22 | * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily | ||
23 | * convert between them. dnotify only needs conversion at watch creation | ||
24 | * so no perf loss there. fanotify isn't defined yet, so it can use the | ||
25 | * wholes if it needs more events. | ||
26 | */ | ||
27 | #define FS_ACCESS 0x00000001 /* File was accessed */ | ||
28 | #define FS_MODIFY 0x00000002 /* File was modified */ | ||
29 | #define FS_ATTRIB 0x00000004 /* Metadata changed */ | ||
30 | #define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */ | ||
31 | #define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */ | ||
32 | #define FS_OPEN 0x00000020 /* File was opened */ | ||
33 | #define FS_MOVED_FROM 0x00000040 /* File was moved from X */ | ||
34 | #define FS_MOVED_TO 0x00000080 /* File was moved to Y */ | ||
35 | #define FS_CREATE 0x00000100 /* Subfile was created */ | ||
36 | #define FS_DELETE 0x00000200 /* Subfile was deleted */ | ||
37 | #define FS_DELETE_SELF 0x00000400 /* Self was deleted */ | ||
38 | #define FS_MOVE_SELF 0x00000800 /* Self was moved */ | ||
39 | |||
40 | #define FS_UNMOUNT 0x00002000 /* inode on umount fs */ | ||
41 | #define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ | ||
42 | #define FS_IN_IGNORED 0x00008000 /* last inotify event here */ | ||
43 | |||
44 | #define FS_IN_ISDIR 0x40000000 /* event occurred against dir */ | ||
45 | #define FS_IN_ONESHOT 0x80000000 /* only send event once */ | ||
46 | |||
47 | #define FS_DN_RENAME 0x10000000 /* file renamed */ | ||
48 | #define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ | ||
49 | |||
50 | /* This inode cares about things that happen to its children. Always set for | ||
51 | * dnotify and inotify. */ | ||
52 | #define FS_EVENT_ON_CHILD 0x08000000 | ||
53 | |||
54 | /* This is a list of all events that may get sent to a parernt based on fs event | ||
55 | * happening to inodes inside that directory */ | ||
56 | #define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ | ||
57 | FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ | ||
58 | FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ | ||
59 | FS_DELETE) | ||
60 | |||
61 | /* listeners that hard code group numbers near the top */ | ||
62 | #define DNOTIFY_GROUP_NUM UINT_MAX | ||
63 | #define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1) | ||
64 | |||
65 | struct fsnotify_group; | ||
66 | struct fsnotify_event; | ||
67 | struct fsnotify_mark_entry; | ||
68 | struct fsnotify_event_private_data; | ||
69 | |||
70 | /* | ||
71 | * Each group much define these ops. The fsnotify infrastructure will call | ||
72 | * these operations for each relevant group. | ||
73 | * | ||
74 | * should_send_event - given a group, inode, and mask this function determines | ||
75 | * if the group is interested in this event. | ||
76 | * handle_event - main call for a group to handle an fs event | ||
77 | * free_group_priv - called when a group refcnt hits 0 to clean up the private union | ||
78 | * freeing-mark - this means that a mark has been flagged to die when everything | ||
79 | * finishes using it. The function is supplied with what must be a | ||
80 | * valid group and inode to use to clean up. | ||
81 | */ | ||
82 | struct fsnotify_ops { | ||
83 | bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask); | ||
84 | int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event); | ||
85 | void (*free_group_priv)(struct fsnotify_group *group); | ||
86 | void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group); | ||
87 | void (*free_event_priv)(struct fsnotify_event_private_data *priv); | ||
88 | }; | ||
89 | |||
90 | /* | ||
91 | * A group is a "thing" that wants to receive notification about filesystem | ||
92 | * events. The mask holds the subset of event types this group cares about. | ||
93 | * refcnt on a group is up to the implementor and at any moment if it goes 0 | ||
94 | * everything will be cleaned up. | ||
95 | */ | ||
96 | struct fsnotify_group { | ||
97 | /* | ||
98 | * global list of all groups receiving events from fsnotify. | ||
99 | * anchored by fsnotify_groups and protected by either fsnotify_grp_mutex | ||
100 | * or fsnotify_grp_srcu depending on write vs read. | ||
101 | */ | ||
102 | struct list_head group_list; | ||
103 | |||
104 | /* | ||
105 | * Defines all of the event types in which this group is interested. | ||
106 | * This mask is a bitwise OR of the FS_* events from above. Each time | ||
107 | * this mask changes for a group (if it changes) the correct functions | ||
108 | * must be called to update the global structures which indicate global | ||
109 | * interest in event types. | ||
110 | */ | ||
111 | __u32 mask; | ||
112 | |||
113 | /* | ||
114 | * How the refcnt is used is up to each group. When the refcnt hits 0 | ||
115 | * fsnotify will clean up all of the resources associated with this group. | ||
116 | * As an example, the dnotify group will always have a refcnt=1 and that | ||
117 | * will never change. Inotify, on the other hand, has a group per | ||
118 | * inotify_init() and the refcnt will hit 0 only when that fd has been | ||
119 | * closed. | ||
120 | */ | ||
121 | atomic_t refcnt; /* things with interest in this group */ | ||
122 | unsigned int group_num; /* simply prevents accidental group collision */ | ||
123 | |||
124 | const struct fsnotify_ops *ops; /* how this group handles things */ | ||
125 | |||
126 | /* needed to send notification to userspace */ | ||
127 | struct mutex notification_mutex; /* protect the notification_list */ | ||
128 | struct list_head notification_list; /* list of event_holder this group needs to send to userspace */ | ||
129 | wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */ | ||
130 | unsigned int q_len; /* events on the queue */ | ||
131 | unsigned int max_events; /* maximum events allowed on the list */ | ||
132 | |||
133 | /* stores all fastapth entries assoc with this group so they can be cleaned on unregister */ | ||
134 | spinlock_t mark_lock; /* protect mark_entries list */ | ||
135 | atomic_t num_marks; /* 1 for each mark entry and 1 for not being | ||
136 | * past the point of no return when freeing | ||
137 | * a group */ | ||
138 | struct list_head mark_entries; /* all inode mark entries for this group */ | ||
139 | |||
140 | /* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */ | ||
141 | bool on_group_list; | ||
142 | |||
143 | /* groups can define private fields here or use the void *private */ | ||
144 | union { | ||
145 | void *private; | ||
146 | #ifdef CONFIG_INOTIFY_USER | ||
147 | struct inotify_group_private_data { | ||
148 | spinlock_t idr_lock; | ||
149 | struct idr idr; | ||
150 | u32 last_wd; | ||
151 | struct fasync_struct *fa; /* async notification */ | ||
152 | struct user_struct *user; | ||
153 | } inotify_data; | ||
154 | #endif | ||
155 | }; | ||
156 | }; | ||
157 | |||
158 | /* | ||
159 | * A single event can be queued in multiple group->notification_lists. | ||
160 | * | ||
161 | * each group->notification_list will point to an event_holder which in turns points | ||
162 | * to the actual event that needs to be sent to userspace. | ||
163 | * | ||
164 | * Seemed cheaper to create a refcnt'd event and a small holder for every group | ||
165 | * than create a different event for every group | ||
166 | * | ||
167 | */ | ||
168 | struct fsnotify_event_holder { | ||
169 | struct fsnotify_event *event; | ||
170 | struct list_head event_list; | ||
171 | }; | ||
172 | |||
173 | /* | ||
174 | * Inotify needs to tack data onto an event. This struct lets us later find the | ||
175 | * correct private data of the correct group. | ||
176 | */ | ||
177 | struct fsnotify_event_private_data { | ||
178 | struct fsnotify_group *group; | ||
179 | struct list_head event_list; | ||
180 | }; | ||
181 | |||
182 | /* | ||
183 | * all of the information about the original object we want to now send to | ||
184 | * a group. If you want to carry more info from the accessing task to the | ||
185 | * listener this structure is where you need to be adding fields. | ||
186 | */ | ||
187 | struct fsnotify_event { | ||
188 | /* | ||
189 | * If we create an event we are also likely going to need a holder | ||
190 | * to link to a group. So embed one holder in the event. Means only | ||
191 | * one allocation for the common case where we only have one group | ||
192 | */ | ||
193 | struct fsnotify_event_holder holder; | ||
194 | spinlock_t lock; /* protection for the associated event_holder and private_list */ | ||
195 | /* to_tell may ONLY be dereferenced during handle_event(). */ | ||
196 | struct inode *to_tell; /* either the inode the event happened to or its parent */ | ||
197 | /* | ||
198 | * depending on the event type we should have either a path or inode | ||
199 | * We hold a reference on path, but NOT on inode. Since we have the ref on | ||
200 | * the path, it may be dereferenced at any point during this object's | ||
201 | * lifetime. That reference is dropped when this object's refcnt hits | ||
202 | * 0. If this event contains an inode instead of a path, the inode may | ||
203 | * ONLY be used during handle_event(). | ||
204 | */ | ||
205 | union { | ||
206 | struct path path; | ||
207 | struct inode *inode; | ||
208 | }; | ||
209 | /* when calling fsnotify tell it if the data is a path or inode */ | ||
210 | #define FSNOTIFY_EVENT_NONE 0 | ||
211 | #define FSNOTIFY_EVENT_PATH 1 | ||
212 | #define FSNOTIFY_EVENT_INODE 2 | ||
213 | #define FSNOTIFY_EVENT_FILE 3 | ||
214 | int data_type; /* which of the above union we have */ | ||
215 | atomic_t refcnt; /* how many groups still are using/need to send this event */ | ||
216 | __u32 mask; /* the type of access, bitwise OR for FS_* event types */ | ||
217 | |||
218 | u32 sync_cookie; /* used to corrolate events, namely inotify mv events */ | ||
219 | char *file_name; | ||
220 | size_t name_len; | ||
221 | |||
222 | struct list_head private_data_list; /* groups can store private data here */ | ||
223 | }; | ||
224 | |||
225 | /* | ||
226 | * a mark is simply an entry attached to an in core inode which allows an | ||
227 | * fsnotify listener to indicate they are either no longer interested in events | ||
228 | * of a type matching mask or only interested in those events. | ||
229 | * | ||
230 | * these are flushed when an inode is evicted from core and may be flushed | ||
231 | * when the inode is modified (as seen by fsnotify_access). Some fsnotify users | ||
232 | * (such as dnotify) will flush these when the open fd is closed and not at | ||
233 | * inode eviction or modification. | ||
234 | */ | ||
235 | struct fsnotify_mark_entry { | ||
236 | __u32 mask; /* mask this mark entry is for */ | ||
237 | /* we hold ref for each i_list and g_list. also one ref for each 'thing' | ||
238 | * in kernel that found and may be using this mark. */ | ||
239 | atomic_t refcnt; /* active things looking at this mark */ | ||
240 | struct inode *inode; /* inode this entry is associated with */ | ||
241 | struct fsnotify_group *group; /* group this mark entry is for */ | ||
242 | struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */ | ||
243 | struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */ | ||
244 | spinlock_t lock; /* protect group, inode, and killme */ | ||
245 | struct list_head free_i_list; /* tmp list used when freeing this mark */ | ||
246 | struct list_head free_g_list; /* tmp list used when freeing this mark */ | ||
247 | void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */ | ||
248 | }; | ||
249 | |||
250 | #ifdef CONFIG_FSNOTIFY | ||
251 | |||
252 | /* called from the vfs helpers */ | ||
253 | |||
254 | /* main fsnotify call to send events */ | ||
255 | extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, | ||
256 | const char *name, u32 cookie); | ||
257 | extern void __fsnotify_parent(struct dentry *dentry, __u32 mask); | ||
258 | extern void __fsnotify_inode_delete(struct inode *inode); | ||
259 | extern u32 fsnotify_get_cookie(void); | ||
260 | |||
261 | static inline int fsnotify_inode_watches_children(struct inode *inode) | ||
262 | { | ||
263 | /* FS_EVENT_ON_CHILD is set if the inode may care */ | ||
264 | if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD)) | ||
265 | return 0; | ||
266 | /* this inode might care about child events, does it care about the | ||
267 | * specific set of events that can happen on a child? */ | ||
268 | return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Update the dentry with a flag indicating the interest of its parent to receive | ||
273 | * filesystem events when those events happens to this dentry->d_inode. | ||
274 | */ | ||
275 | static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) | ||
276 | { | ||
277 | struct dentry *parent; | ||
278 | |||
279 | assert_spin_locked(&dcache_lock); | ||
280 | assert_spin_locked(&dentry->d_lock); | ||
281 | |||
282 | parent = dentry->d_parent; | ||
283 | if (fsnotify_inode_watches_children(parent->d_inode)) | ||
284 | dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; | ||
285 | else | ||
286 | dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * fsnotify_d_instantiate - instantiate a dentry for inode | ||
291 | * Called with dcache_lock held. | ||
292 | */ | ||
293 | static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) | ||
294 | { | ||
295 | if (!inode) | ||
296 | return; | ||
297 | |||
298 | assert_spin_locked(&dcache_lock); | ||
299 | |||
300 | spin_lock(&dentry->d_lock); | ||
301 | __fsnotify_update_dcache_flags(dentry); | ||
302 | spin_unlock(&dentry->d_lock); | ||
303 | } | ||
304 | |||
305 | /* called from fsnotify listeners, such as fanotify or dnotify */ | ||
306 | |||
307 | /* must call when a group changes its ->mask */ | ||
308 | extern void fsnotify_recalc_global_mask(void); | ||
309 | /* get a reference to an existing or create a new group */ | ||
310 | extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, | ||
311 | __u32 mask, | ||
312 | const struct fsnotify_ops *ops); | ||
313 | /* run all marks associated with this group and update group->mask */ | ||
314 | extern void fsnotify_recalc_group_mask(struct fsnotify_group *group); | ||
315 | /* drop reference on a group from fsnotify_obtain_group */ | ||
316 | extern void fsnotify_put_group(struct fsnotify_group *group); | ||
317 | |||
318 | /* take a reference to an event */ | ||
319 | extern void fsnotify_get_event(struct fsnotify_event *event); | ||
320 | extern void fsnotify_put_event(struct fsnotify_event *event); | ||
321 | /* find private data previously attached to an event and unlink it */ | ||
322 | extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, | ||
323 | struct fsnotify_event *event); | ||
324 | |||
325 | /* attach the event to the group notification queue */ | ||
326 | extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event, | ||
327 | struct fsnotify_event_private_data *priv); | ||
328 | /* true if the group notification queue is empty */ | ||
329 | extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); | ||
330 | /* return, but do not dequeue the first event on the notification queue */ | ||
331 | extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group); | ||
332 | /* return AND dequeue the first event on the notification queue */ | ||
333 | extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group); | ||
334 | |||
335 | /* functions used to manipulate the marks attached to inodes */ | ||
336 | |||
337 | /* run all marks associated with an inode and update inode->i_fsnotify_mask */ | ||
338 | extern void fsnotify_recalc_inode_mask(struct inode *inode); | ||
339 | extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry)); | ||
340 | /* find (and take a reference) to a mark associated with group and inode */ | ||
341 | extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode); | ||
342 | /* attach the mark to both the group and the inode */ | ||
343 | extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode); | ||
344 | /* given a mark, flag it to be freed when all references are dropped */ | ||
345 | extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry); | ||
346 | /* run all the marks in a group, and flag them to be freed */ | ||
347 | extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); | ||
348 | extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry); | ||
349 | extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry); | ||
350 | extern void fsnotify_unmount_inodes(struct list_head *list); | ||
351 | |||
352 | /* put here because inotify does some weird stuff when destroying watches */ | ||
353 | extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, | ||
354 | void *data, int data_is, const char *name, | ||
355 | u32 cookie); | ||
356 | |||
357 | #else | ||
358 | |||
359 | static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, | ||
360 | const char *name, u32 cookie) | ||
361 | {} | ||
362 | |||
363 | static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask) | ||
364 | {} | ||
365 | |||
366 | static inline void __fsnotify_inode_delete(struct inode *inode) | ||
367 | {} | ||
368 | |||
369 | static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) | ||
370 | {} | ||
371 | |||
372 | static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) | ||
373 | {} | ||
374 | |||
375 | static inline u32 fsnotify_get_cookie(void) | ||
376 | { | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | static inline void fsnotify_unmount_inodes(struct list_head *list) | ||
381 | {} | ||
382 | |||
383 | #endif /* CONFIG_FSNOTIFY */ | ||
384 | |||
385 | #endif /* __KERNEL __ */ | ||
386 | |||
387 | #endif /* __LINUX_FSNOTIFY_BACKEND_H */ | ||
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 8a0c2f221e6b..39b95c56587e 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -233,8 +233,6 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size); | |||
233 | 233 | ||
234 | extern int skip_trace(unsigned long ip); | 234 | extern int skip_trace(unsigned long ip); |
235 | 235 | ||
236 | extern void ftrace_release(void *start, unsigned long size); | ||
237 | |||
238 | extern void ftrace_disable_daemon(void); | 236 | extern void ftrace_disable_daemon(void); |
239 | extern void ftrace_enable_daemon(void); | 237 | extern void ftrace_enable_daemon(void); |
240 | #else | 238 | #else |
@@ -325,13 +323,8 @@ static inline void __ftrace_enabled_restore(int enabled) | |||
325 | 323 | ||
326 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 324 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
327 | extern void ftrace_init(void); | 325 | extern void ftrace_init(void); |
328 | extern void ftrace_init_module(struct module *mod, | ||
329 | unsigned long *start, unsigned long *end); | ||
330 | #else | 326 | #else |
331 | static inline void ftrace_init(void) { } | 327 | static inline void ftrace_init(void) { } |
332 | static inline void | ||
333 | ftrace_init_module(struct module *mod, | ||
334 | unsigned long *start, unsigned long *end) { } | ||
335 | #endif | 328 | #endif |
336 | 329 | ||
337 | /* | 330 | /* |
@@ -368,6 +361,7 @@ struct ftrace_ret_stack { | |||
368 | unsigned long ret; | 361 | unsigned long ret; |
369 | unsigned long func; | 362 | unsigned long func; |
370 | unsigned long long calltime; | 363 | unsigned long long calltime; |
364 | unsigned long long subtime; | ||
371 | }; | 365 | }; |
372 | 366 | ||
373 | /* | 367 | /* |
@@ -379,8 +373,6 @@ extern void return_to_handler(void); | |||
379 | 373 | ||
380 | extern int | 374 | extern int |
381 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); | 375 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); |
382 | extern void | ||
383 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); | ||
384 | 376 | ||
385 | /* | 377 | /* |
386 | * Sometimes we don't want to trace a function with the function | 378 | * Sometimes we don't want to trace a function with the function |
@@ -496,8 +488,15 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) | |||
496 | 488 | ||
497 | extern int ftrace_dump_on_oops; | 489 | extern int ftrace_dump_on_oops; |
498 | 490 | ||
491 | #ifdef CONFIG_PREEMPT | ||
492 | #define INIT_TRACE_RECURSION .trace_recursion = 0, | ||
493 | #endif | ||
494 | |||
499 | #endif /* CONFIG_TRACING */ | 495 | #endif /* CONFIG_TRACING */ |
500 | 496 | ||
497 | #ifndef INIT_TRACE_RECURSION | ||
498 | #define INIT_TRACE_RECURSION | ||
499 | #endif | ||
501 | 500 | ||
502 | #ifdef CONFIG_HW_BRANCH_TRACER | 501 | #ifdef CONFIG_HW_BRANCH_TRACER |
503 | 502 | ||
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h new file mode 100644 index 000000000000..5c093ffc655b --- /dev/null +++ b/include/linux/ftrace_event.h | |||
@@ -0,0 +1,172 @@ | |||
1 | #ifndef _LINUX_FTRACE_EVENT_H | ||
2 | #define _LINUX_FTRACE_EVENT_H | ||
3 | |||
4 | #include <linux/trace_seq.h> | ||
5 | #include <linux/ring_buffer.h> | ||
6 | #include <linux/percpu.h> | ||
7 | |||
8 | struct trace_array; | ||
9 | struct tracer; | ||
10 | struct dentry; | ||
11 | |||
12 | DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq); | ||
13 | |||
14 | struct trace_print_flags { | ||
15 | unsigned long mask; | ||
16 | const char *name; | ||
17 | }; | ||
18 | |||
19 | const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | ||
20 | unsigned long flags, | ||
21 | const struct trace_print_flags *flag_array); | ||
22 | |||
23 | const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | ||
24 | const struct trace_print_flags *symbol_array); | ||
25 | |||
26 | /* | ||
27 | * The trace entry - the most basic unit of tracing. This is what | ||
28 | * is printed in the end as a single line in the trace output, such as: | ||
29 | * | ||
30 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | ||
31 | */ | ||
32 | struct trace_entry { | ||
33 | unsigned short type; | ||
34 | unsigned char flags; | ||
35 | unsigned char preempt_count; | ||
36 | int pid; | ||
37 | int tgid; | ||
38 | }; | ||
39 | |||
40 | #define FTRACE_MAX_EVENT \ | ||
41 | ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) | ||
42 | |||
43 | /* | ||
44 | * Trace iterator - used by printout routines who present trace | ||
45 | * results to users and which routines might sleep, etc: | ||
46 | */ | ||
47 | struct trace_iterator { | ||
48 | struct trace_array *tr; | ||
49 | struct tracer *trace; | ||
50 | void *private; | ||
51 | int cpu_file; | ||
52 | struct mutex mutex; | ||
53 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; | ||
54 | unsigned long iter_flags; | ||
55 | |||
56 | /* The below is zeroed out in pipe_read */ | ||
57 | struct trace_seq seq; | ||
58 | struct trace_entry *ent; | ||
59 | int cpu; | ||
60 | u64 ts; | ||
61 | |||
62 | loff_t pos; | ||
63 | long idx; | ||
64 | |||
65 | cpumask_var_t started; | ||
66 | }; | ||
67 | |||
68 | |||
69 | typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, | ||
70 | int flags); | ||
71 | struct trace_event { | ||
72 | struct hlist_node node; | ||
73 | struct list_head list; | ||
74 | int type; | ||
75 | trace_print_func trace; | ||
76 | trace_print_func raw; | ||
77 | trace_print_func hex; | ||
78 | trace_print_func binary; | ||
79 | }; | ||
80 | |||
81 | extern int register_ftrace_event(struct trace_event *event); | ||
82 | extern int unregister_ftrace_event(struct trace_event *event); | ||
83 | |||
84 | /* Return values for print_line callback */ | ||
85 | enum print_line_t { | ||
86 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | ||
87 | TRACE_TYPE_HANDLED = 1, | ||
88 | TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ | ||
89 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | ||
90 | }; | ||
91 | |||
92 | |||
93 | struct ring_buffer_event * | ||
94 | trace_current_buffer_lock_reserve(int type, unsigned long len, | ||
95 | unsigned long flags, int pc); | ||
96 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | ||
97 | unsigned long flags, int pc); | ||
98 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | ||
99 | unsigned long flags, int pc); | ||
100 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event); | ||
101 | |||
102 | void tracing_record_cmdline(struct task_struct *tsk); | ||
103 | |||
104 | struct ftrace_event_call { | ||
105 | struct list_head list; | ||
106 | char *name; | ||
107 | char *system; | ||
108 | struct dentry *dir; | ||
109 | struct trace_event *event; | ||
110 | int enabled; | ||
111 | int (*regfunc)(void); | ||
112 | void (*unregfunc)(void); | ||
113 | int id; | ||
114 | int (*raw_init)(void); | ||
115 | int (*show_format)(struct trace_seq *s); | ||
116 | int (*define_fields)(void); | ||
117 | struct list_head fields; | ||
118 | int filter_active; | ||
119 | void *filter; | ||
120 | void *mod; | ||
121 | |||
122 | #ifdef CONFIG_EVENT_PROFILE | ||
123 | atomic_t profile_count; | ||
124 | int (*profile_enable)(struct ftrace_event_call *); | ||
125 | void (*profile_disable)(struct ftrace_event_call *); | ||
126 | #endif | ||
127 | }; | ||
128 | |||
129 | #define MAX_FILTER_PRED 32 | ||
130 | #define MAX_FILTER_STR_VAL 128 | ||
131 | |||
132 | extern int init_preds(struct ftrace_event_call *call); | ||
133 | extern void destroy_preds(struct ftrace_event_call *call); | ||
134 | extern int filter_match_preds(struct ftrace_event_call *call, void *rec); | ||
135 | extern int filter_current_check_discard(struct ftrace_event_call *call, | ||
136 | void *rec, | ||
137 | struct ring_buffer_event *event); | ||
138 | |||
139 | extern int trace_define_field(struct ftrace_event_call *call, char *type, | ||
140 | char *name, int offset, int size, int is_signed); | ||
141 | |||
142 | #define is_signed_type(type) (((type)(-1)) < 0) | ||
143 | |||
144 | int trace_set_clr_event(const char *system, const char *event, int set); | ||
145 | |||
146 | /* | ||
147 | * The double __builtin_constant_p is because gcc will give us an error | ||
148 | * if we try to allocate the static variable to fmt if it is not a | ||
149 | * constant. Even with the outer if statement optimizing out. | ||
150 | */ | ||
151 | #define event_trace_printk(ip, fmt, args...) \ | ||
152 | do { \ | ||
153 | __trace_printk_check_format(fmt, ##args); \ | ||
154 | tracing_record_cmdline(current); \ | ||
155 | if (__builtin_constant_p(fmt)) { \ | ||
156 | static const char *trace_printk_fmt \ | ||
157 | __attribute__((section("__trace_printk_fmt"))) = \ | ||
158 | __builtin_constant_p(fmt) ? fmt : NULL; \ | ||
159 | \ | ||
160 | __trace_bprintk(ip, trace_printk_fmt, ##args); \ | ||
161 | } else \ | ||
162 | __trace_printk(ip, fmt, ##args); \ | ||
163 | } while (0) | ||
164 | |||
165 | #define __common_field(type, item, is_signed) \ | ||
166 | ret = trace_define_field(event_call, #type, "common_" #item, \ | ||
167 | offsetof(typeof(field.ent), item), \ | ||
168 | sizeof(field.ent.item), is_signed); \ | ||
169 | if (ret) \ | ||
170 | return ret; | ||
171 | |||
172 | #endif /* _LINUX_FTRACE_EVENT_H */ | ||
diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 162e5defe683..d41ed593f79f 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h | |||
@@ -121,6 +121,13 @@ struct fuse_file_lock { | |||
121 | #define FUSE_BIG_WRITES (1 << 5) | 121 | #define FUSE_BIG_WRITES (1 << 5) |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * CUSE INIT request/reply flags | ||
125 | * | ||
126 | * CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl | ||
127 | */ | ||
128 | #define CUSE_UNRESTRICTED_IOCTL (1 << 0) | ||
129 | |||
130 | /** | ||
124 | * Release flags | 131 | * Release flags |
125 | */ | 132 | */ |
126 | #define FUSE_RELEASE_FLUSH (1 << 0) | 133 | #define FUSE_RELEASE_FLUSH (1 << 0) |
@@ -210,6 +217,9 @@ enum fuse_opcode { | |||
210 | FUSE_DESTROY = 38, | 217 | FUSE_DESTROY = 38, |
211 | FUSE_IOCTL = 39, | 218 | FUSE_IOCTL = 39, |
212 | FUSE_POLL = 40, | 219 | FUSE_POLL = 40, |
220 | |||
221 | /* CUSE specific operations */ | ||
222 | CUSE_INIT = 4096, | ||
213 | }; | 223 | }; |
214 | 224 | ||
215 | enum fuse_notify_code { | 225 | enum fuse_notify_code { |
@@ -401,6 +411,27 @@ struct fuse_init_out { | |||
401 | __u32 max_write; | 411 | __u32 max_write; |
402 | }; | 412 | }; |
403 | 413 | ||
414 | #define CUSE_INIT_INFO_MAX 4096 | ||
415 | |||
416 | struct cuse_init_in { | ||
417 | __u32 major; | ||
418 | __u32 minor; | ||
419 | __u32 unused; | ||
420 | __u32 flags; | ||
421 | }; | ||
422 | |||
423 | struct cuse_init_out { | ||
424 | __u32 major; | ||
425 | __u32 minor; | ||
426 | __u32 unused; | ||
427 | __u32 flags; | ||
428 | __u32 max_read; | ||
429 | __u32 max_write; | ||
430 | __u32 dev_major; /* chardev major */ | ||
431 | __u32 dev_minor; /* chardev minor */ | ||
432 | __u32 spare[10]; | ||
433 | }; | ||
434 | |||
404 | struct fuse_interrupt_in { | 435 | struct fuse_interrupt_in { |
405 | __u64 unique; | 436 | __u64 unique; |
406 | }; | 437 | }; |
diff --git a/include/linux/futex.h b/include/linux/futex.h index 3bf5bb5a34f9..34956c8fdebf 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -23,6 +23,8 @@ union ktime; | |||
23 | #define FUTEX_TRYLOCK_PI 8 | 23 | #define FUTEX_TRYLOCK_PI 8 |
24 | #define FUTEX_WAIT_BITSET 9 | 24 | #define FUTEX_WAIT_BITSET 9 |
25 | #define FUTEX_WAKE_BITSET 10 | 25 | #define FUTEX_WAKE_BITSET 10 |
26 | #define FUTEX_WAIT_REQUEUE_PI 11 | ||
27 | #define FUTEX_CMP_REQUEUE_PI 12 | ||
26 | 28 | ||
27 | #define FUTEX_PRIVATE_FLAG 128 | 29 | #define FUTEX_PRIVATE_FLAG 128 |
28 | #define FUTEX_CLOCK_REALTIME 256 | 30 | #define FUTEX_CLOCK_REALTIME 256 |
@@ -38,6 +40,10 @@ union ktime; | |||
38 | #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) | 40 | #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) |
39 | #define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG) | 41 | #define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG) |
40 | #define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG) | 42 | #define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG) |
43 | #define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \ | ||
44 | FUTEX_PRIVATE_FLAG) | ||
45 | #define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \ | ||
46 | FUTEX_PRIVATE_FLAG) | ||
41 | 47 | ||
42 | /* | 48 | /* |
43 | * Support for robust futexes: the kernel cleans up held futexes at | 49 | * Support for robust futexes: the kernel cleans up held futexes at |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index a1a28caed23d..7cbd38d363a2 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -90,6 +90,7 @@ struct disk_stats { | |||
90 | struct hd_struct { | 90 | struct hd_struct { |
91 | sector_t start_sect; | 91 | sector_t start_sect; |
92 | sector_t nr_sects; | 92 | sector_t nr_sects; |
93 | sector_t alignment_offset; | ||
93 | struct device __dev; | 94 | struct device __dev; |
94 | struct kobject *holder_dir; | 95 | struct kobject *holder_dir; |
95 | int policy, partno; | 96 | int policy, partno; |
@@ -113,6 +114,7 @@ struct hd_struct { | |||
113 | #define GENHD_FL_UP 16 | 114 | #define GENHD_FL_UP 16 |
114 | #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 | 115 | #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 |
115 | #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ | 116 | #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ |
117 | #define GENHD_FL_NATIVE_CAPACITY 128 | ||
116 | 118 | ||
117 | #define BLK_SCSI_MAX_CMDS (256) | 119 | #define BLK_SCSI_MAX_CMDS (256) |
118 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 120 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0bbc15f54536..3760e7c5de02 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -85,6 +85,9 @@ struct vm_area_struct; | |||
85 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | 85 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ |
86 | __GFP_NORETRY|__GFP_NOMEMALLOC) | 86 | __GFP_NORETRY|__GFP_NOMEMALLOC) |
87 | 87 | ||
88 | /* Control slab gfp mask during early boot */ | ||
89 | #define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) | ||
90 | |||
88 | /* Control allocation constraints */ | 91 | /* Control allocation constraints */ |
89 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | 92 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
90 | 93 | ||
diff --git a/include/linux/ide.h b/include/linux/ide.h index 9fed365a598b..a6c6a2fad7c8 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -26,6 +26,9 @@ | |||
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | #include <asm/mutex.h> | 27 | #include <asm/mutex.h> |
28 | 28 | ||
29 | /* for request_sense */ | ||
30 | #include <linux/cdrom.h> | ||
31 | |||
29 | #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) | 32 | #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) |
30 | # define SUPPORT_VLB_SYNC 0 | 33 | # define SUPPORT_VLB_SYNC 0 |
31 | #else | 34 | #else |
@@ -175,7 +178,7 @@ typedef u8 hwif_chipset_t; | |||
175 | /* | 178 | /* |
176 | * Structure to hold all information about the location of this port | 179 | * Structure to hold all information about the location of this port |
177 | */ | 180 | */ |
178 | typedef struct hw_regs_s { | 181 | struct ide_hw { |
179 | union { | 182 | union { |
180 | struct ide_io_ports io_ports; | 183 | struct ide_io_ports io_ports; |
181 | unsigned long io_ports_array[IDE_NR_PORTS]; | 184 | unsigned long io_ports_array[IDE_NR_PORTS]; |
@@ -183,12 +186,11 @@ typedef struct hw_regs_s { | |||
183 | 186 | ||
184 | int irq; /* our irq number */ | 187 | int irq; /* our irq number */ |
185 | ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ | 188 | ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ |
186 | hwif_chipset_t chipset; | ||
187 | struct device *dev, *parent; | 189 | struct device *dev, *parent; |
188 | unsigned long config; | 190 | unsigned long config; |
189 | } hw_regs_t; | 191 | }; |
190 | 192 | ||
191 | static inline void ide_std_init_ports(hw_regs_t *hw, | 193 | static inline void ide_std_init_ports(struct ide_hw *hw, |
192 | unsigned long io_addr, | 194 | unsigned long io_addr, |
193 | unsigned long ctl_addr) | 195 | unsigned long ctl_addr) |
194 | { | 196 | { |
@@ -215,21 +217,12 @@ static inline void ide_std_init_ports(hw_regs_t *hw, | |||
215 | 217 | ||
216 | /* | 218 | /* |
217 | * Special Driver Flags | 219 | * Special Driver Flags |
218 | * | ||
219 | * set_geometry : respecify drive geometry | ||
220 | * recalibrate : seek to cyl 0 | ||
221 | * set_multmode : set multmode count | ||
222 | * reserved : unused | ||
223 | */ | 220 | */ |
224 | typedef union { | 221 | enum { |
225 | unsigned all : 8; | 222 | IDE_SFLAG_SET_GEOMETRY = (1 << 0), |
226 | struct { | 223 | IDE_SFLAG_RECALIBRATE = (1 << 1), |
227 | unsigned set_geometry : 1; | 224 | IDE_SFLAG_SET_MULTMODE = (1 << 2), |
228 | unsigned recalibrate : 1; | 225 | }; |
229 | unsigned set_multmode : 1; | ||
230 | unsigned reserved : 5; | ||
231 | } b; | ||
232 | } special_t; | ||
233 | 226 | ||
234 | /* | 227 | /* |
235 | * Status returned from various ide_ functions | 228 | * Status returned from various ide_ functions |
@@ -324,7 +317,6 @@ struct ide_cmd { | |||
324 | unsigned int cursg_ofs; | 317 | unsigned int cursg_ofs; |
325 | 318 | ||
326 | struct request *rq; /* copy of request */ | 319 | struct request *rq; /* copy of request */ |
327 | void *special; /* valid_t generally */ | ||
328 | }; | 320 | }; |
329 | 321 | ||
330 | /* ATAPI packet command flags */ | 322 | /* ATAPI packet command flags */ |
@@ -360,11 +352,7 @@ struct ide_atapi_pc { | |||
360 | 352 | ||
361 | /* data buffer */ | 353 | /* data buffer */ |
362 | u8 *buf; | 354 | u8 *buf; |
363 | /* current buffer position */ | ||
364 | u8 *cur_pos; | ||
365 | int buf_size; | 355 | int buf_size; |
366 | /* missing/available data on the current buffer */ | ||
367 | int b_count; | ||
368 | 356 | ||
369 | /* the corresponding request */ | 357 | /* the corresponding request */ |
370 | struct request *rq; | 358 | struct request *rq; |
@@ -377,10 +365,6 @@ struct ide_atapi_pc { | |||
377 | */ | 365 | */ |
378 | u8 pc_buf[IDE_PC_BUFFER_SIZE]; | 366 | u8 pc_buf[IDE_PC_BUFFER_SIZE]; |
379 | 367 | ||
380 | /* idetape only */ | ||
381 | struct idetape_bh *bh; | ||
382 | char *b_data; | ||
383 | |||
384 | unsigned long timeout; | 368 | unsigned long timeout; |
385 | }; | 369 | }; |
386 | 370 | ||
@@ -397,6 +381,7 @@ struct ide_drive_s; | |||
397 | struct ide_disk_ops { | 381 | struct ide_disk_ops { |
398 | int (*check)(struct ide_drive_s *, const char *); | 382 | int (*check)(struct ide_drive_s *, const char *); |
399 | int (*get_capacity)(struct ide_drive_s *); | 383 | int (*get_capacity)(struct ide_drive_s *); |
384 | u64 (*set_capacity)(struct ide_drive_s *, u64); | ||
400 | void (*setup)(struct ide_drive_s *); | 385 | void (*setup)(struct ide_drive_s *); |
401 | void (*flush)(struct ide_drive_s *); | 386 | void (*flush)(struct ide_drive_s *); |
402 | int (*init_media)(struct ide_drive_s *, struct gendisk *); | 387 | int (*init_media)(struct ide_drive_s *, struct gendisk *); |
@@ -474,6 +459,8 @@ enum { | |||
474 | IDE_DFLAG_NICE1 = (1 << 5), | 459 | IDE_DFLAG_NICE1 = (1 << 5), |
475 | /* device is physically present */ | 460 | /* device is physically present */ |
476 | IDE_DFLAG_PRESENT = (1 << 6), | 461 | IDE_DFLAG_PRESENT = (1 << 6), |
462 | /* disable Host Protected Area */ | ||
463 | IDE_DFLAG_NOHPA = (1 << 7), | ||
477 | /* id read from device (synthetic if not set) */ | 464 | /* id read from device (synthetic if not set) */ |
478 | IDE_DFLAG_ID_READ = (1 << 8), | 465 | IDE_DFLAG_ID_READ = (1 << 8), |
479 | IDE_DFLAG_NOPROBE = (1 << 9), | 466 | IDE_DFLAG_NOPROBE = (1 << 9), |
@@ -512,6 +499,7 @@ enum { | |||
512 | /* write protect */ | 499 | /* write protect */ |
513 | IDE_DFLAG_WP = (1 << 29), | 500 | IDE_DFLAG_WP = (1 << 29), |
514 | IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), | 501 | IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), |
502 | IDE_DFLAG_NIEN_QUIRK = (1 << 31), | ||
515 | }; | 503 | }; |
516 | 504 | ||
517 | struct ide_drive_s { | 505 | struct ide_drive_s { |
@@ -536,14 +524,13 @@ struct ide_drive_s { | |||
536 | unsigned long sleep; /* sleep until this time */ | 524 | unsigned long sleep; /* sleep until this time */ |
537 | unsigned long timeout; /* max time to wait for irq */ | 525 | unsigned long timeout; /* max time to wait for irq */ |
538 | 526 | ||
539 | special_t special; /* special action flags */ | 527 | u8 special_flags; /* special action flags */ |
540 | 528 | ||
541 | u8 select; /* basic drive/head select reg value */ | 529 | u8 select; /* basic drive/head select reg value */ |
542 | u8 retry_pio; /* retrying dma capable host in pio */ | 530 | u8 retry_pio; /* retrying dma capable host in pio */ |
543 | u8 waiting_for_dma; /* dma currently in progress */ | 531 | u8 waiting_for_dma; /* dma currently in progress */ |
544 | u8 dma; /* atapi dma flag */ | 532 | u8 dma; /* atapi dma flag */ |
545 | 533 | ||
546 | u8 quirk_list; /* considered quirky, set for a specific host */ | ||
547 | u8 init_speed; /* transfer rate set at boot */ | 534 | u8 init_speed; /* transfer rate set at boot */ |
548 | u8 current_speed; /* current transfer rate set */ | 535 | u8 current_speed; /* current transfer rate set */ |
549 | u8 desired_speed; /* desired transfer rate set */ | 536 | u8 desired_speed; /* desired transfer rate set */ |
@@ -568,8 +555,7 @@ struct ide_drive_s { | |||
568 | unsigned int drive_data; /* used by set_pio_mode/dev_select() */ | 555 | unsigned int drive_data; /* used by set_pio_mode/dev_select() */ |
569 | unsigned int failures; /* current failure count */ | 556 | unsigned int failures; /* current failure count */ |
570 | unsigned int max_failures; /* maximum allowed failure count */ | 557 | unsigned int max_failures; /* maximum allowed failure count */ |
571 | u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */ | 558 | u64 probed_capacity;/* initial/native media capacity */ |
572 | |||
573 | u64 capacity64; /* total number of sectors */ | 559 | u64 capacity64; /* total number of sectors */ |
574 | 560 | ||
575 | int lun; /* logical unit */ | 561 | int lun; /* logical unit */ |
@@ -593,16 +579,16 @@ struct ide_drive_s { | |||
593 | /* callback for packet commands */ | 579 | /* callback for packet commands */ |
594 | int (*pc_callback)(struct ide_drive_s *, int); | 580 | int (*pc_callback)(struct ide_drive_s *, int); |
595 | 581 | ||
596 | void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *); | ||
597 | int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *, | ||
598 | unsigned int, int); | ||
599 | |||
600 | ide_startstop_t (*irq_handler)(struct ide_drive_s *); | 582 | ide_startstop_t (*irq_handler)(struct ide_drive_s *); |
601 | 583 | ||
602 | unsigned long atapi_flags; | 584 | unsigned long atapi_flags; |
603 | 585 | ||
604 | struct ide_atapi_pc request_sense_pc; | 586 | struct ide_atapi_pc request_sense_pc; |
605 | struct request request_sense_rq; | 587 | |
588 | /* current sense rq and buffer */ | ||
589 | bool sense_rq_armed; | ||
590 | struct request sense_rq; | ||
591 | struct request_sense sense_data; | ||
606 | }; | 592 | }; |
607 | 593 | ||
608 | typedef struct ide_drive_s ide_drive_t; | 594 | typedef struct ide_drive_s ide_drive_t; |
@@ -1174,7 +1160,10 @@ int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *); | |||
1174 | int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); | 1160 | int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); |
1175 | int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); | 1161 | int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); |
1176 | void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); | 1162 | void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); |
1177 | void ide_retry_pc(ide_drive_t *, struct gendisk *); | 1163 | void ide_retry_pc(ide_drive_t *drive); |
1164 | |||
1165 | void ide_prep_sense(ide_drive_t *drive, struct request *rq); | ||
1166 | int ide_queue_sense_rq(ide_drive_t *drive, void *special); | ||
1178 | 1167 | ||
1179 | int ide_cd_expiry(ide_drive_t *); | 1168 | int ide_cd_expiry(ide_drive_t *); |
1180 | 1169 | ||
@@ -1225,7 +1214,7 @@ static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev) | |||
1225 | } | 1214 | } |
1226 | 1215 | ||
1227 | void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, | 1216 | void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, |
1228 | hw_regs_t *, hw_regs_t **); | 1217 | struct ide_hw *, struct ide_hw **); |
1229 | void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); | 1218 | void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); |
1230 | 1219 | ||
1231 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI | 1220 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI |
@@ -1464,16 +1453,18 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} | |||
1464 | void ide_register_region(struct gendisk *); | 1453 | void ide_register_region(struct gendisk *); |
1465 | void ide_unregister_region(struct gendisk *); | 1454 | void ide_unregister_region(struct gendisk *); |
1466 | 1455 | ||
1456 | void ide_check_nien_quirk_list(ide_drive_t *); | ||
1467 | void ide_undecoded_slave(ide_drive_t *); | 1457 | void ide_undecoded_slave(ide_drive_t *); |
1468 | 1458 | ||
1469 | void ide_port_apply_params(ide_hwif_t *); | 1459 | void ide_port_apply_params(ide_hwif_t *); |
1470 | int ide_sysfs_register_port(ide_hwif_t *); | 1460 | int ide_sysfs_register_port(ide_hwif_t *); |
1471 | 1461 | ||
1472 | struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); | 1462 | struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **, |
1463 | unsigned int); | ||
1473 | void ide_host_free(struct ide_host *); | 1464 | void ide_host_free(struct ide_host *); |
1474 | int ide_host_register(struct ide_host *, const struct ide_port_info *, | 1465 | int ide_host_register(struct ide_host *, const struct ide_port_info *, |
1475 | hw_regs_t **); | 1466 | struct ide_hw **); |
1476 | int ide_host_add(const struct ide_port_info *, hw_regs_t **, | 1467 | int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int, |
1477 | struct ide_host **); | 1468 | struct ide_host **); |
1478 | void ide_host_remove(struct ide_host *); | 1469 | void ide_host_remove(struct ide_host *); |
1479 | int ide_legacy_device_add(const struct ide_port_info *, unsigned long); | 1470 | int ide_legacy_device_add(const struct ide_port_info *, unsigned long); |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index cfe4fe1b7132..60e8934d10b5 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
@@ -79,6 +79,7 @@ | |||
79 | #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ | 79 | #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ |
80 | #define ETH_P_TIPC 0x88CA /* TIPC */ | 80 | #define ETH_P_TIPC 0x88CA /* TIPC */ |
81 | #define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ | 81 | #define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ |
82 | #define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */ | ||
82 | #define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ | 83 | #define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ |
83 | 84 | ||
84 | /* | 85 | /* |
diff --git a/include/linux/ima.h b/include/linux/ima.h index 0e2aa45cb0ce..b1b827d091a9 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h | |||
@@ -13,14 +13,17 @@ | |||
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | struct linux_binprm; | 14 | struct linux_binprm; |
15 | 15 | ||
16 | #define IMA_COUNT_UPDATE 1 | ||
17 | #define IMA_COUNT_LEAVE 0 | ||
18 | |||
16 | #ifdef CONFIG_IMA | 19 | #ifdef CONFIG_IMA |
17 | extern int ima_bprm_check(struct linux_binprm *bprm); | 20 | extern int ima_bprm_check(struct linux_binprm *bprm); |
18 | extern int ima_inode_alloc(struct inode *inode); | 21 | extern int ima_inode_alloc(struct inode *inode); |
19 | extern void ima_inode_free(struct inode *inode); | 22 | extern void ima_inode_free(struct inode *inode); |
20 | extern int ima_path_check(struct path *path, int mask); | 23 | extern int ima_path_check(struct path *path, int mask, int update_counts); |
21 | extern void ima_file_free(struct file *file); | 24 | extern void ima_file_free(struct file *file); |
22 | extern int ima_file_mmap(struct file *file, unsigned long prot); | 25 | extern int ima_file_mmap(struct file *file, unsigned long prot); |
23 | extern void ima_shm_check(struct file *file); | 26 | extern void ima_counts_get(struct file *file); |
24 | 27 | ||
25 | #else | 28 | #else |
26 | static inline int ima_bprm_check(struct linux_binprm *bprm) | 29 | static inline int ima_bprm_check(struct linux_binprm *bprm) |
@@ -38,7 +41,7 @@ static inline void ima_inode_free(struct inode *inode) | |||
38 | return; | 41 | return; |
39 | } | 42 | } |
40 | 43 | ||
41 | static inline int ima_path_check(struct path *path, int mask) | 44 | static inline int ima_path_check(struct path *path, int mask, int update_counts) |
42 | { | 45 | { |
43 | return 0; | 46 | return 0; |
44 | } | 47 | } |
@@ -53,7 +56,7 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot) | |||
53 | return 0; | 56 | return 0; |
54 | } | 57 | } |
55 | 58 | ||
56 | static inline void ima_shm_check(struct file *file) | 59 | static inline void ima_counts_get(struct file *file) |
57 | { | 60 | { |
58 | return; | 61 | return; |
59 | } | 62 | } |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d87247d2641f..28b1f30601b5 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -108,6 +108,15 @@ extern struct group_info init_groups; | |||
108 | 108 | ||
109 | extern struct cred init_cred; | 109 | extern struct cred init_cred; |
110 | 110 | ||
111 | #ifdef CONFIG_PERF_COUNTERS | ||
112 | # define INIT_PERF_COUNTERS(tsk) \ | ||
113 | .perf_counter_mutex = \ | ||
114 | __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ | ||
115 | .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), | ||
116 | #else | ||
117 | # define INIT_PERF_COUNTERS(tsk) | ||
118 | #endif | ||
119 | |||
111 | /* | 120 | /* |
112 | * INIT_TASK is used to set up the first task table, touch at | 121 | * INIT_TASK is used to set up the first task table, touch at |
113 | * your own risk!. Base=0, limit=0x1fffff (=2MB) | 122 | * your own risk!. Base=0, limit=0x1fffff (=2MB) |
@@ -145,8 +154,8 @@ extern struct cred init_cred; | |||
145 | .group_leader = &tsk, \ | 154 | .group_leader = &tsk, \ |
146 | .real_cred = &init_cred, \ | 155 | .real_cred = &init_cred, \ |
147 | .cred = &init_cred, \ | 156 | .cred = &init_cred, \ |
148 | .cred_exec_mutex = \ | 157 | .cred_guard_mutex = \ |
149 | __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \ | 158 | __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ |
150 | .comm = "swapper", \ | 159 | .comm = "swapper", \ |
151 | .thread = INIT_THREAD, \ | 160 | .thread = INIT_THREAD, \ |
152 | .fs = &init_fs, \ | 161 | .fs = &init_fs, \ |
@@ -171,9 +180,11 @@ extern struct cred init_cred; | |||
171 | }, \ | 180 | }, \ |
172 | .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ | 181 | .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ |
173 | INIT_IDS \ | 182 | INIT_IDS \ |
183 | INIT_PERF_COUNTERS(tsk) \ | ||
174 | INIT_TRACE_IRQFLAGS \ | 184 | INIT_TRACE_IRQFLAGS \ |
175 | INIT_LOCKDEP \ | 185 | INIT_LOCKDEP \ |
176 | INIT_FTRACE_GRAPH \ | 186 | INIT_FTRACE_GRAPH \ |
187 | INIT_TRACE_RECURSION \ | ||
177 | } | 188 | } |
178 | 189 | ||
179 | 190 | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 91bb76f44f14..ff374ceface0 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -566,6 +566,6 @@ struct irq_desc; | |||
566 | extern int early_irq_init(void); | 566 | extern int early_irq_init(void); |
567 | extern int arch_probe_nr_irqs(void); | 567 | extern int arch_probe_nr_irqs(void); |
568 | extern int arch_early_irq_init(void); | 568 | extern int arch_early_irq_init(void); |
569 | extern int arch_init_chip_data(struct irq_desc *desc, int cpu); | 569 | extern int arch_init_chip_data(struct irq_desc *desc, int node); |
570 | 570 | ||
571 | #endif | 571 | #endif |
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 08b987bccf89..dd05434fa45f 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
@@ -64,7 +64,7 @@ struct cfq_io_context { | |||
64 | * and kmalloc'ed. These could be shared between processes. | 64 | * and kmalloc'ed. These could be shared between processes. |
65 | */ | 65 | */ |
66 | struct io_context { | 66 | struct io_context { |
67 | atomic_t refcount; | 67 | atomic_long_t refcount; |
68 | atomic_t nr_tasks; | 68 | atomic_t nr_tasks; |
69 | 69 | ||
70 | /* all the fields below are protected by this lock */ | 70 | /* all the fields below are protected by this lock */ |
@@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) | |||
91 | * if ref count is zero, don't allow sharing (ioc is going away, it's | 91 | * if ref count is zero, don't allow sharing (ioc is going away, it's |
92 | * a race). | 92 | * a race). |
93 | */ | 93 | */ |
94 | if (ioc && atomic_inc_not_zero(&ioc->refcount)) { | 94 | if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { |
95 | atomic_inc(&ioc->nr_tasks); | 95 | atomic_long_inc(&ioc->refcount); |
96 | return ioc; | 96 | return ioc; |
97 | } | 97 | } |
98 | 98 | ||
diff --git a/include/linux/irq.h b/include/linux/irq.h index b7cbeed972e4..1e50c34f0062 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -117,7 +117,7 @@ struct irq_chip { | |||
117 | void (*eoi)(unsigned int irq); | 117 | void (*eoi)(unsigned int irq); |
118 | 118 | ||
119 | void (*end)(unsigned int irq); | 119 | void (*end)(unsigned int irq); |
120 | void (*set_affinity)(unsigned int irq, | 120 | int (*set_affinity)(unsigned int irq, |
121 | const struct cpumask *dest); | 121 | const struct cpumask *dest); |
122 | int (*retrigger)(unsigned int irq); | 122 | int (*retrigger)(unsigned int irq); |
123 | int (*set_type)(unsigned int irq, unsigned int flow_type); | 123 | int (*set_type)(unsigned int irq, unsigned int flow_type); |
@@ -187,7 +187,7 @@ struct irq_desc { | |||
187 | spinlock_t lock; | 187 | spinlock_t lock; |
188 | #ifdef CONFIG_SMP | 188 | #ifdef CONFIG_SMP |
189 | cpumask_var_t affinity; | 189 | cpumask_var_t affinity; |
190 | unsigned int cpu; | 190 | unsigned int node; |
191 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 191 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
192 | cpumask_var_t pending_mask; | 192 | cpumask_var_t pending_mask; |
193 | #endif | 193 | #endif |
@@ -201,26 +201,23 @@ struct irq_desc { | |||
201 | } ____cacheline_internodealigned_in_smp; | 201 | } ____cacheline_internodealigned_in_smp; |
202 | 202 | ||
203 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | 203 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, |
204 | struct irq_desc *desc, int cpu); | 204 | struct irq_desc *desc, int node); |
205 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | 205 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); |
206 | 206 | ||
207 | #ifndef CONFIG_SPARSE_IRQ | 207 | #ifndef CONFIG_SPARSE_IRQ |
208 | extern struct irq_desc irq_desc[NR_IRQS]; | 208 | extern struct irq_desc irq_desc[NR_IRQS]; |
209 | #else /* CONFIG_SPARSE_IRQ */ | 209 | #endif |
210 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu); | ||
211 | #endif /* CONFIG_SPARSE_IRQ */ | ||
212 | |||
213 | extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); | ||
214 | 210 | ||
215 | static inline struct irq_desc * | 211 | #ifdef CONFIG_NUMA_IRQ_DESC |
216 | irq_remap_to_desc(unsigned int irq, struct irq_desc *desc) | 212 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); |
217 | { | ||
218 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | ||
219 | return irq_to_desc(irq); | ||
220 | #else | 213 | #else |
214 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
215 | { | ||
221 | return desc; | 216 | return desc; |
222 | #endif | ||
223 | } | 217 | } |
218 | #endif | ||
219 | |||
220 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | ||
224 | 221 | ||
225 | /* | 222 | /* |
226 | * Migration helpers for obsolete names, they will go away: | 223 | * Migration helpers for obsolete names, they will go away: |
@@ -386,7 +383,7 @@ extern void set_irq_noprobe(unsigned int irq); | |||
386 | extern void set_irq_probe(unsigned int irq); | 383 | extern void set_irq_probe(unsigned int irq); |
387 | 384 | ||
388 | /* Handle dynamic irq creation and destruction */ | 385 | /* Handle dynamic irq creation and destruction */ |
389 | extern unsigned int create_irq_nr(unsigned int irq_want); | 386 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
390 | extern int create_irq(void); | 387 | extern int create_irq(void); |
391 | extern void destroy_irq(unsigned int irq); | 388 | extern void destroy_irq(unsigned int irq); |
392 | 389 | ||
@@ -424,47 +421,44 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | |||
424 | 421 | ||
425 | #ifdef CONFIG_SMP | 422 | #ifdef CONFIG_SMP |
426 | /** | 423 | /** |
427 | * init_alloc_desc_masks - allocate cpumasks for irq_desc | 424 | * alloc_desc_masks - allocate cpumasks for irq_desc |
428 | * @desc: pointer to irq_desc struct | 425 | * @desc: pointer to irq_desc struct |
429 | * @cpu: cpu which will be handling the cpumasks | 426 | * @cpu: cpu which will be handling the cpumasks |
430 | * @boot: true if need bootmem | 427 | * @boot: true if need bootmem |
431 | * | 428 | * |
432 | * Allocates affinity and pending_mask cpumask if required. | 429 | * Allocates affinity and pending_mask cpumask if required. |
433 | * Returns true if successful (or not required). | 430 | * Returns true if successful (or not required). |
434 | * Side effect: affinity has all bits set, pending_mask has all bits clear. | ||
435 | */ | 431 | */ |
436 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | 432 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, |
437 | bool boot) | 433 | bool boot) |
438 | { | 434 | { |
439 | int node; | 435 | gfp_t gfp = GFP_ATOMIC; |
440 | |||
441 | if (boot) { | ||
442 | alloc_bootmem_cpumask_var(&desc->affinity); | ||
443 | cpumask_setall(desc->affinity); | ||
444 | |||
445 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
446 | alloc_bootmem_cpumask_var(&desc->pending_mask); | ||
447 | cpumask_clear(desc->pending_mask); | ||
448 | #endif | ||
449 | return true; | ||
450 | } | ||
451 | 436 | ||
452 | node = cpu_to_node(cpu); | 437 | if (boot) |
438 | gfp = GFP_NOWAIT; | ||
453 | 439 | ||
454 | if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) | 440 | #ifdef CONFIG_CPUMASK_OFFSTACK |
441 | if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) | ||
455 | return false; | 442 | return false; |
456 | cpumask_setall(desc->affinity); | ||
457 | 443 | ||
458 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 444 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
459 | if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { | 445 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
460 | free_cpumask_var(desc->affinity); | 446 | free_cpumask_var(desc->affinity); |
461 | return false; | 447 | return false; |
462 | } | 448 | } |
463 | cpumask_clear(desc->pending_mask); | 449 | #endif |
464 | #endif | 450 | #endif |
465 | return true; | 451 | return true; |
466 | } | 452 | } |
467 | 453 | ||
454 | static inline void init_desc_masks(struct irq_desc *desc) | ||
455 | { | ||
456 | cpumask_setall(desc->affinity); | ||
457 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
458 | cpumask_clear(desc->pending_mask); | ||
459 | #endif | ||
460 | } | ||
461 | |||
468 | /** | 462 | /** |
469 | * init_copy_desc_masks - copy cpumasks for irq_desc | 463 | * init_copy_desc_masks - copy cpumasks for irq_desc |
470 | * @old_desc: pointer to old irq_desc struct | 464 | * @old_desc: pointer to old irq_desc struct |
@@ -478,7 +472,7 @@ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | |||
478 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 472 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
479 | struct irq_desc *new_desc) | 473 | struct irq_desc *new_desc) |
480 | { | 474 | { |
481 | #ifdef CONFIG_CPUMASKS_OFFSTACK | 475 | #ifdef CONFIG_CPUMASK_OFFSTACK |
482 | cpumask_copy(new_desc->affinity, old_desc->affinity); | 476 | cpumask_copy(new_desc->affinity, old_desc->affinity); |
483 | 477 | ||
484 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 478 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
@@ -499,12 +493,16 @@ static inline void free_desc_masks(struct irq_desc *old_desc, | |||
499 | 493 | ||
500 | #else /* !CONFIG_SMP */ | 494 | #else /* !CONFIG_SMP */ |
501 | 495 | ||
502 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | 496 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, |
503 | bool boot) | 497 | bool boot) |
504 | { | 498 | { |
505 | return true; | 499 | return true; |
506 | } | 500 | } |
507 | 501 | ||
502 | static inline void init_desc_masks(struct irq_desc *desc) | ||
503 | { | ||
504 | } | ||
505 | |||
508 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 506 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
509 | struct irq_desc *new_desc) | 507 | struct irq_desc *new_desc) |
510 | { | 508 | { |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 0c8b89f28a95..a77c6007dc99 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
@@ -81,7 +81,12 @@ static inline unsigned int kstat_irqs(unsigned int irq) | |||
81 | return sum; | 81 | return sum; |
82 | } | 82 | } |
83 | 83 | ||
84 | |||
85 | /* | ||
86 | * Lock/unlock the current runqueue - to extract task statistics: | ||
87 | */ | ||
84 | extern unsigned long long task_delta_exec(struct task_struct *); | 88 | extern unsigned long long task_delta_exec(struct task_struct *); |
89 | |||
85 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); | 90 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); |
86 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); | 91 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); |
87 | extern void account_steal_time(cputime_t); | 92 | extern void account_steal_time(cputime_t); |
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h new file mode 100644 index 000000000000..7796aed6cdd5 --- /dev/null +++ b/include/linux/kmemleak.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * include/linux/kmemleak.h | ||
3 | * | ||
4 | * Copyright (C) 2008 ARM Limited | ||
5 | * Written by Catalin Marinas <catalin.marinas@arm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | #ifndef __KMEMLEAK_H | ||
22 | #define __KMEMLEAK_H | ||
23 | |||
24 | #ifdef CONFIG_DEBUG_KMEMLEAK | ||
25 | |||
26 | extern void kmemleak_init(void); | ||
27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, | ||
28 | gfp_t gfp); | ||
29 | extern void kmemleak_free(const void *ptr); | ||
30 | extern void kmemleak_padding(const void *ptr, unsigned long offset, | ||
31 | size_t size); | ||
32 | extern void kmemleak_not_leak(const void *ptr); | ||
33 | extern void kmemleak_ignore(const void *ptr); | ||
34 | extern void kmemleak_scan_area(const void *ptr, unsigned long offset, | ||
35 | size_t length, gfp_t gfp); | ||
36 | extern void kmemleak_no_scan(const void *ptr); | ||
37 | |||
38 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | ||
39 | int min_count, unsigned long flags, | ||
40 | gfp_t gfp) | ||
41 | { | ||
42 | if (!(flags & SLAB_NOLEAKTRACE)) | ||
43 | kmemleak_alloc(ptr, size, min_count, gfp); | ||
44 | } | ||
45 | |||
46 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) | ||
47 | { | ||
48 | if (!(flags & SLAB_NOLEAKTRACE)) | ||
49 | kmemleak_free(ptr); | ||
50 | } | ||
51 | |||
52 | static inline void kmemleak_erase(void **ptr) | ||
53 | { | ||
54 | *ptr = NULL; | ||
55 | } | ||
56 | |||
57 | #else | ||
58 | |||
59 | static inline void kmemleak_init(void) | ||
60 | { | ||
61 | } | ||
62 | static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count, | ||
63 | gfp_t gfp) | ||
64 | { | ||
65 | } | ||
66 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | ||
67 | int min_count, unsigned long flags, | ||
68 | gfp_t gfp) | ||
69 | { | ||
70 | } | ||
71 | static inline void kmemleak_free(const void *ptr) | ||
72 | { | ||
73 | } | ||
74 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) | ||
75 | { | ||
76 | } | ||
77 | static inline void kmemleak_not_leak(const void *ptr) | ||
78 | { | ||
79 | } | ||
80 | static inline void kmemleak_ignore(const void *ptr) | ||
81 | { | ||
82 | } | ||
83 | static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, | ||
84 | size_t length, gfp_t gfp) | ||
85 | { | ||
86 | } | ||
87 | static inline void kmemleak_erase(void **ptr) | ||
88 | { | ||
89 | } | ||
90 | static inline void kmemleak_no_scan(const void *ptr) | ||
91 | { | ||
92 | } | ||
93 | |||
94 | #endif /* CONFIG_DEBUG_KMEMLEAK */ | ||
95 | |||
96 | #endif /* __KMEMLEAK_H */ | ||
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h new file mode 100644 index 000000000000..b616d3930c3b --- /dev/null +++ b/include/linux/kmemtrace.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
3 | * | ||
4 | * This file is released under GPL version 2. | ||
5 | */ | ||
6 | |||
7 | #ifndef _LINUX_KMEMTRACE_H | ||
8 | #define _LINUX_KMEMTRACE_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <trace/events/kmem.h> | ||
13 | |||
14 | #ifdef CONFIG_KMEMTRACE | ||
15 | extern void kmemtrace_init(void); | ||
16 | #else | ||
17 | static inline void kmemtrace_init(void) | ||
18 | { | ||
19 | } | ||
20 | #endif | ||
21 | |||
22 | #endif /* __KERNEL__ */ | ||
23 | |||
24 | #endif /* _LINUX_KMEMTRACE_H */ | ||
25 | |||
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 8cc137911b34..3db5d8d37485 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -119,7 +119,7 @@ struct kvm_run { | |||
119 | __u32 error_code; | 119 | __u32 error_code; |
120 | } ex; | 120 | } ex; |
121 | /* KVM_EXIT_IO */ | 121 | /* KVM_EXIT_IO */ |
122 | struct kvm_io { | 122 | struct { |
123 | #define KVM_EXIT_IO_IN 0 | 123 | #define KVM_EXIT_IO_IN 0 |
124 | #define KVM_EXIT_IO_OUT 1 | 124 | #define KVM_EXIT_IO_OUT 1 |
125 | __u8 direction; | 125 | __u8 direction; |
@@ -224,10 +224,10 @@ struct kvm_interrupt { | |||
224 | /* for KVM_GET_DIRTY_LOG */ | 224 | /* for KVM_GET_DIRTY_LOG */ |
225 | struct kvm_dirty_log { | 225 | struct kvm_dirty_log { |
226 | __u32 slot; | 226 | __u32 slot; |
227 | __u32 padding; | 227 | __u32 padding1; |
228 | union { | 228 | union { |
229 | void __user *dirty_bitmap; /* one bit per page */ | 229 | void __user *dirty_bitmap; /* one bit per page */ |
230 | __u64 padding; | 230 | __u64 padding2; |
231 | }; | 231 | }; |
232 | }; | 232 | }; |
233 | 233 | ||
@@ -409,6 +409,10 @@ struct kvm_trace_rec { | |||
409 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT | 409 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT |
410 | #define KVM_CAP_DEVICE_DEASSIGNMENT 27 | 410 | #define KVM_CAP_DEVICE_DEASSIGNMENT 27 |
411 | #endif | 411 | #endif |
412 | #ifdef __KVM_HAVE_MSIX | ||
413 | #define KVM_CAP_DEVICE_MSIX 28 | ||
414 | #endif | ||
415 | #define KVM_CAP_ASSIGN_DEV_IRQ 29 | ||
412 | /* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ | 416 | /* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ |
413 | #define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 | 417 | #define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 |
414 | 418 | ||
@@ -482,11 +486,18 @@ struct kvm_irq_routing { | |||
482 | #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ | 486 | #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ |
483 | struct kvm_assigned_pci_dev) | 487 | struct kvm_assigned_pci_dev) |
484 | #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) | 488 | #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) |
489 | /* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */ | ||
485 | #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ | 490 | #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ |
486 | struct kvm_assigned_irq) | 491 | struct kvm_assigned_irq) |
492 | #define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq) | ||
487 | #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) | 493 | #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) |
488 | #define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ | 494 | #define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ |
489 | struct kvm_assigned_pci_dev) | 495 | struct kvm_assigned_pci_dev) |
496 | #define KVM_ASSIGN_SET_MSIX_NR \ | ||
497 | _IOW(KVMIO, 0x73, struct kvm_assigned_msix_nr) | ||
498 | #define KVM_ASSIGN_SET_MSIX_ENTRY \ | ||
499 | _IOW(KVMIO, 0x74, struct kvm_assigned_msix_entry) | ||
500 | #define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) | ||
490 | 501 | ||
491 | /* | 502 | /* |
492 | * ioctls for vcpu fds | 503 | * ioctls for vcpu fds |
@@ -577,6 +588,8 @@ struct kvm_debug_guest { | |||
577 | #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) | 588 | #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) |
578 | #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) | 589 | #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) |
579 | 590 | ||
591 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) | ||
592 | |||
580 | struct kvm_assigned_pci_dev { | 593 | struct kvm_assigned_pci_dev { |
581 | __u32 assigned_dev_id; | 594 | __u32 assigned_dev_id; |
582 | __u32 busnr; | 595 | __u32 busnr; |
@@ -587,6 +600,17 @@ struct kvm_assigned_pci_dev { | |||
587 | }; | 600 | }; |
588 | }; | 601 | }; |
589 | 602 | ||
603 | #define KVM_DEV_IRQ_HOST_INTX (1 << 0) | ||
604 | #define KVM_DEV_IRQ_HOST_MSI (1 << 1) | ||
605 | #define KVM_DEV_IRQ_HOST_MSIX (1 << 2) | ||
606 | |||
607 | #define KVM_DEV_IRQ_GUEST_INTX (1 << 8) | ||
608 | #define KVM_DEV_IRQ_GUEST_MSI (1 << 9) | ||
609 | #define KVM_DEV_IRQ_GUEST_MSIX (1 << 10) | ||
610 | |||
611 | #define KVM_DEV_IRQ_HOST_MASK 0x00ff | ||
612 | #define KVM_DEV_IRQ_GUEST_MASK 0xff00 | ||
613 | |||
590 | struct kvm_assigned_irq { | 614 | struct kvm_assigned_irq { |
591 | __u32 assigned_dev_id; | 615 | __u32 assigned_dev_id; |
592 | __u32 host_irq; | 616 | __u32 host_irq; |
@@ -602,9 +626,19 @@ struct kvm_assigned_irq { | |||
602 | }; | 626 | }; |
603 | }; | 627 | }; |
604 | 628 | ||
605 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) | ||
606 | 629 | ||
607 | #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION KVM_DEV_IRQ_ASSIGN_ENABLE_MSI | 630 | struct kvm_assigned_msix_nr { |
608 | #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI (1 << 0) | 631 | __u32 assigned_dev_id; |
632 | __u16 entry_nr; | ||
633 | __u16 padding; | ||
634 | }; | ||
635 | |||
636 | #define KVM_MAX_MSIX_PER_DEV 512 | ||
637 | struct kvm_assigned_msix_entry { | ||
638 | __u32 assigned_dev_id; | ||
639 | __u32 gsi; | ||
640 | __u16 entry; /* The index of entry in the MSI-X table */ | ||
641 | __u16 padding[3]; | ||
642 | }; | ||
609 | 643 | ||
610 | #endif | 644 | #endif |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 894a56e365e8..aacc5449f586 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #define KVM_REQ_UNHALT 6 | 38 | #define KVM_REQ_UNHALT 6 |
39 | #define KVM_REQ_MMU_SYNC 7 | 39 | #define KVM_REQ_MMU_SYNC 7 |
40 | #define KVM_REQ_KVMCLOCK_UPDATE 8 | 40 | #define KVM_REQ_KVMCLOCK_UPDATE 8 |
41 | #define KVM_REQ_KICK 9 | ||
41 | 42 | ||
42 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 43 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
43 | 44 | ||
@@ -72,7 +73,6 @@ struct kvm_vcpu { | |||
72 | struct mutex mutex; | 73 | struct mutex mutex; |
73 | int cpu; | 74 | int cpu; |
74 | struct kvm_run *run; | 75 | struct kvm_run *run; |
75 | int guest_mode; | ||
76 | unsigned long requests; | 76 | unsigned long requests; |
77 | unsigned long guest_debug; | 77 | unsigned long guest_debug; |
78 | int fpu_active; | 78 | int fpu_active; |
@@ -298,6 +298,7 @@ int kvm_arch_hardware_setup(void); | |||
298 | void kvm_arch_hardware_unsetup(void); | 298 | void kvm_arch_hardware_unsetup(void); |
299 | void kvm_arch_check_processor_compat(void *rtn); | 299 | void kvm_arch_check_processor_compat(void *rtn); |
300 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); | 300 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
301 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | ||
301 | 302 | ||
302 | void kvm_free_physmem(struct kvm *kvm); | 303 | void kvm_free_physmem(struct kvm *kvm); |
303 | 304 | ||
@@ -319,6 +320,13 @@ struct kvm_irq_ack_notifier { | |||
319 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | 320 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); |
320 | }; | 321 | }; |
321 | 322 | ||
323 | #define KVM_ASSIGNED_MSIX_PENDING 0x1 | ||
324 | struct kvm_guest_msix_entry { | ||
325 | u32 vector; | ||
326 | u16 entry; | ||
327 | u16 flags; | ||
328 | }; | ||
329 | |||
322 | struct kvm_assigned_dev_kernel { | 330 | struct kvm_assigned_dev_kernel { |
323 | struct kvm_irq_ack_notifier ack_notifier; | 331 | struct kvm_irq_ack_notifier ack_notifier; |
324 | struct work_struct interrupt_work; | 332 | struct work_struct interrupt_work; |
@@ -326,18 +334,18 @@ struct kvm_assigned_dev_kernel { | |||
326 | int assigned_dev_id; | 334 | int assigned_dev_id; |
327 | int host_busnr; | 335 | int host_busnr; |
328 | int host_devfn; | 336 | int host_devfn; |
337 | unsigned int entries_nr; | ||
329 | int host_irq; | 338 | int host_irq; |
330 | bool host_irq_disabled; | 339 | bool host_irq_disabled; |
340 | struct msix_entry *host_msix_entries; | ||
331 | int guest_irq; | 341 | int guest_irq; |
332 | #define KVM_ASSIGNED_DEV_GUEST_INTX (1 << 0) | 342 | struct kvm_guest_msix_entry *guest_msix_entries; |
333 | #define KVM_ASSIGNED_DEV_GUEST_MSI (1 << 1) | ||
334 | #define KVM_ASSIGNED_DEV_HOST_INTX (1 << 8) | ||
335 | #define KVM_ASSIGNED_DEV_HOST_MSI (1 << 9) | ||
336 | unsigned long irq_requested_type; | 343 | unsigned long irq_requested_type; |
337 | int irq_source_id; | 344 | int irq_source_id; |
338 | int flags; | 345 | int flags; |
339 | struct pci_dev *dev; | 346 | struct pci_dev *dev; |
340 | struct kvm *kvm; | 347 | struct kvm *kvm; |
348 | spinlock_t assigned_dev_lock; | ||
341 | }; | 349 | }; |
342 | 350 | ||
343 | struct kvm_irq_mask_notifier { | 351 | struct kvm_irq_mask_notifier { |
@@ -360,6 +368,9 @@ void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); | |||
360 | int kvm_request_irq_source_id(struct kvm *kvm); | 368 | int kvm_request_irq_source_id(struct kvm *kvm); |
361 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | 369 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
362 | 370 | ||
371 | /* For vcpu->arch.iommu_flags */ | ||
372 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 | ||
373 | |||
363 | #ifdef CONFIG_IOMMU_API | 374 | #ifdef CONFIG_IOMMU_API |
364 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, | 375 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, |
365 | unsigned long npages); | 376 | unsigned long npages); |
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 2b8318c83e53..fb46efbeabec 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h | |||
@@ -40,4 +40,31 @@ typedef unsigned long hfn_t; | |||
40 | 40 | ||
41 | typedef hfn_t pfn_t; | 41 | typedef hfn_t pfn_t; |
42 | 42 | ||
43 | union kvm_ioapic_redirect_entry { | ||
44 | u64 bits; | ||
45 | struct { | ||
46 | u8 vector; | ||
47 | u8 delivery_mode:3; | ||
48 | u8 dest_mode:1; | ||
49 | u8 delivery_status:1; | ||
50 | u8 polarity:1; | ||
51 | u8 remote_irr:1; | ||
52 | u8 trig_mode:1; | ||
53 | u8 mask:1; | ||
54 | u8 reserve:7; | ||
55 | u8 reserved[4]; | ||
56 | u8 dest_id; | ||
57 | } fields; | ||
58 | }; | ||
59 | |||
60 | struct kvm_lapic_irq { | ||
61 | u32 vector; | ||
62 | u32 delivery_mode; | ||
63 | u32 dest_mode; | ||
64 | u32 level; | ||
65 | u32 trig_mode; | ||
66 | u32 shorthand; | ||
67 | u32 dest_id; | ||
68 | }; | ||
69 | |||
43 | #endif /* __KVM_TYPES_H__ */ | 70 | #endif /* __KVM_TYPES_H__ */ |
diff --git a/include/linux/lguest.h b/include/linux/lguest.h index 175e63f4a8c0..7bc1440fc473 100644 --- a/include/linux/lguest.h +++ b/include/linux/lguest.h | |||
@@ -30,6 +30,10 @@ struct lguest_data | |||
30 | /* Wallclock time set by the Host. */ | 30 | /* Wallclock time set by the Host. */ |
31 | struct timespec time; | 31 | struct timespec time; |
32 | 32 | ||
33 | /* Interrupt pending set by the Host. The Guest should do a hypercall | ||
34 | * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */ | ||
35 | int irq_pending; | ||
36 | |||
33 | /* Async hypercall ring. Instead of directly making hypercalls, we can | 37 | /* Async hypercall ring. Instead of directly making hypercalls, we can |
34 | * place them in here for processing the next time the Host wants. | 38 | * place them in here for processing the next time the Host wants. |
35 | * This batching can be quite efficient. */ | 39 | * This batching can be quite efficient. */ |
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index a53407a4165c..bfefbdf7498a 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h | |||
@@ -57,7 +57,8 @@ enum lguest_req | |||
57 | LHREQ_INITIALIZE, /* + base, pfnlimit, start */ | 57 | LHREQ_INITIALIZE, /* + base, pfnlimit, start */ |
58 | LHREQ_GETDMA, /* No longer used */ | 58 | LHREQ_GETDMA, /* No longer used */ |
59 | LHREQ_IRQ, /* + irq */ | 59 | LHREQ_IRQ, /* + irq */ |
60 | LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ | 60 | LHREQ_BREAK, /* No longer used */ |
61 | LHREQ_EVENTFD, /* + address, fd. */ | ||
61 | }; | 62 | }; |
62 | 63 | ||
63 | /* The alignment to use between consumer and producer parts of vring. | 64 | /* The alignment to use between consumer and producer parts of vring. |
diff --git a/include/linux/loop.h b/include/linux/loop.h index 40725447f5e0..66c194e2d9b9 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h | |||
@@ -56,8 +56,7 @@ struct loop_device { | |||
56 | gfp_t old_gfp_mask; | 56 | gfp_t old_gfp_mask; |
57 | 57 | ||
58 | spinlock_t lo_lock; | 58 | spinlock_t lo_lock; |
59 | struct bio *lo_bio; | 59 | struct bio_list lo_bio_list; |
60 | struct bio *lo_biotail; | ||
61 | int lo_state; | 60 | int lo_state; |
62 | struct mutex lo_ctl_mutex; | 61 | struct mutex lo_ctl_mutex; |
63 | struct task_struct *lo_thread; | 62 | struct task_struct *lo_thread; |
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h new file mode 100644 index 000000000000..e461b2c3d711 --- /dev/null +++ b/include/linux/lsm_audit.h | |||
@@ -0,0 +1,111 @@ | |||
1 | /* | ||
2 | * Common LSM logging functions | ||
3 | * Heavily borrowed from selinux/avc.h | ||
4 | * | ||
5 | * Author : Etienne BASSET <etienne.basset@ensta.org> | ||
6 | * | ||
7 | * All credits to : Stephen Smalley, <sds@epoch.ncsc.mil> | ||
8 | * All BUGS to : Etienne BASSET <etienne.basset@ensta.org> | ||
9 | */ | ||
10 | #ifndef _LSM_COMMON_LOGGING_ | ||
11 | #define _LSM_COMMON_LOGGING_ | ||
12 | |||
13 | #include <linux/stddef.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/kdev_t.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/audit.h> | ||
20 | #include <linux/in6.h> | ||
21 | #include <linux/path.h> | ||
22 | #include <linux/key.h> | ||
23 | #include <linux/skbuff.h> | ||
24 | #include <asm/system.h> | ||
25 | |||
26 | |||
27 | /* Auxiliary data to use in generating the audit record. */ | ||
28 | struct common_audit_data { | ||
29 | char type; | ||
30 | #define LSM_AUDIT_DATA_FS 1 | ||
31 | #define LSM_AUDIT_DATA_NET 2 | ||
32 | #define LSM_AUDIT_DATA_CAP 3 | ||
33 | #define LSM_AUDIT_DATA_IPC 4 | ||
34 | #define LSM_AUDIT_DATA_TASK 5 | ||
35 | #define LSM_AUDIT_DATA_KEY 6 | ||
36 | struct task_struct *tsk; | ||
37 | union { | ||
38 | struct { | ||
39 | struct path path; | ||
40 | struct inode *inode; | ||
41 | } fs; | ||
42 | struct { | ||
43 | int netif; | ||
44 | struct sock *sk; | ||
45 | u16 family; | ||
46 | __be16 dport; | ||
47 | __be16 sport; | ||
48 | union { | ||
49 | struct { | ||
50 | __be32 daddr; | ||
51 | __be32 saddr; | ||
52 | } v4; | ||
53 | struct { | ||
54 | struct in6_addr daddr; | ||
55 | struct in6_addr saddr; | ||
56 | } v6; | ||
57 | } fam; | ||
58 | } net; | ||
59 | int cap; | ||
60 | int ipc_id; | ||
61 | struct task_struct *tsk; | ||
62 | #ifdef CONFIG_KEYS | ||
63 | struct { | ||
64 | key_serial_t key; | ||
65 | char *key_desc; | ||
66 | } key_struct; | ||
67 | #endif | ||
68 | } u; | ||
69 | const char *function; | ||
70 | /* this union contains LSM specific data */ | ||
71 | union { | ||
72 | /* SMACK data */ | ||
73 | struct smack_audit_data { | ||
74 | char *subject; | ||
75 | char *object; | ||
76 | char *request; | ||
77 | int result; | ||
78 | } smack_audit_data; | ||
79 | /* SELinux data */ | ||
80 | struct { | ||
81 | u32 ssid; | ||
82 | u32 tsid; | ||
83 | u16 tclass; | ||
84 | u32 requested; | ||
85 | u32 audited; | ||
86 | struct av_decision *avd; | ||
87 | int result; | ||
88 | } selinux_audit_data; | ||
89 | } lsm_priv; | ||
90 | /* these callback will be implemented by a specific LSM */ | ||
91 | void (*lsm_pre_audit)(struct audit_buffer *, void *); | ||
92 | void (*lsm_post_audit)(struct audit_buffer *, void *); | ||
93 | }; | ||
94 | |||
95 | #define v4info fam.v4 | ||
96 | #define v6info fam.v6 | ||
97 | |||
98 | int ipv4_skb_to_auditdata(struct sk_buff *skb, | ||
99 | struct common_audit_data *ad, u8 *proto); | ||
100 | |||
101 | int ipv6_skb_to_auditdata(struct sk_buff *skb, | ||
102 | struct common_audit_data *ad, u8 *proto); | ||
103 | |||
104 | /* Initialize an LSM audit data structure. */ | ||
105 | #define COMMON_AUDIT_DATA_INIT(_d, _t) \ | ||
106 | { memset((_d), 0, sizeof(struct common_audit_data)); \ | ||
107 | (_d)->type = LSM_AUDIT_DATA_##_t; (_d)->function = __func__; } | ||
108 | |||
109 | void common_lsm_audit(struct common_audit_data *a); | ||
110 | |||
111 | #endif | ||
diff --git a/include/linux/magic.h b/include/linux/magic.h index 5b4e28bcb788..1923327b9869 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h | |||
@@ -6,9 +6,12 @@ | |||
6 | #define AFS_SUPER_MAGIC 0x5346414F | 6 | #define AFS_SUPER_MAGIC 0x5346414F |
7 | #define AUTOFS_SUPER_MAGIC 0x0187 | 7 | #define AUTOFS_SUPER_MAGIC 0x0187 |
8 | #define CODA_SUPER_MAGIC 0x73757245 | 8 | #define CODA_SUPER_MAGIC 0x73757245 |
9 | #define CRAMFS_MAGIC 0x28cd3d45 /* some random number */ | ||
10 | #define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */ | ||
9 | #define DEBUGFS_MAGIC 0x64626720 | 11 | #define DEBUGFS_MAGIC 0x64626720 |
10 | #define SYSFS_MAGIC 0x62656572 | 12 | #define SYSFS_MAGIC 0x62656572 |
11 | #define SECURITYFS_MAGIC 0x73636673 | 13 | #define SECURITYFS_MAGIC 0x73636673 |
14 | #define SELINUX_MAGIC 0xf97cff8c | ||
12 | #define TMPFS_MAGIC 0x01021994 | 15 | #define TMPFS_MAGIC 0x01021994 |
13 | #define SQUASHFS_MAGIC 0x73717368 | 16 | #define SQUASHFS_MAGIC 0x73717368 |
14 | #define EFS_SUPER_MAGIC 0x414A53 | 17 | #define EFS_SUPER_MAGIC 0x414A53 |
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h deleted file mode 100644 index 1f76b1ebf627..000000000000 --- a/include/linux/mg_disk.h +++ /dev/null | |||
@@ -1,206 +0,0 @@ | |||
1 | /* | ||
2 | * include/linux/mg_disk.c | ||
3 | * | ||
4 | * Support for the mGine m[g]flash IO mode. | ||
5 | * Based on legacy hd.c | ||
6 | * | ||
7 | * (c) 2008 mGine Co.,LTD | ||
8 | * (c) 2008 unsik Kim <donari75@gmail.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __MG_DISK_H__ | ||
16 | #define __MG_DISK_H__ | ||
17 | |||
18 | #include <linux/blkdev.h> | ||
19 | #include <linux/ata.h> | ||
20 | |||
21 | /* name for block device */ | ||
22 | #define MG_DISK_NAME "mgd" | ||
23 | /* name for platform device */ | ||
24 | #define MG_DEV_NAME "mg_disk" | ||
25 | |||
26 | #define MG_DISK_MAJ 0 | ||
27 | #define MG_DISK_MAX_PART 16 | ||
28 | #define MG_SECTOR_SIZE 512 | ||
29 | #define MG_MAX_SECTS 256 | ||
30 | |||
31 | /* Register offsets */ | ||
32 | #define MG_BUFF_OFFSET 0x8000 | ||
33 | #define MG_STORAGE_BUFFER_SIZE 0x200 | ||
34 | #define MG_REG_OFFSET 0xC000 | ||
35 | #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ | ||
36 | #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ | ||
37 | #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4) | ||
38 | #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6) | ||
39 | #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8) | ||
40 | #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA) | ||
41 | #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC) | ||
42 | #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */ | ||
43 | #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */ | ||
44 | #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10) | ||
45 | #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12) | ||
46 | |||
47 | /* "Drive Select/Head Register" bit values */ | ||
48 | #define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */ | ||
49 | #define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON) | ||
50 | #define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON) | ||
51 | #define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON) | ||
52 | |||
53 | |||
54 | /* "Device Control Register" bit values */ | ||
55 | #define MG_REG_CTRL_INTR_ENABLE 0x0 | ||
56 | #define MG_REG_CTRL_INTR_DISABLE (0x1<<1) | ||
57 | #define MG_REG_CTRL_RESET (0x1<<2) | ||
58 | #define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0 | ||
59 | #define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4) | ||
60 | #define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0 | ||
61 | #define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5) | ||
62 | #define MG_REG_CTRL_DPD_DISABLE 0x0 | ||
63 | #define MG_REG_CTRL_DPD_ENABLE (0x1<<6) | ||
64 | |||
65 | /* Status register bit */ | ||
66 | /* error bit in status register */ | ||
67 | #define MG_REG_STATUS_BIT_ERROR 0x01 | ||
68 | /* corrected error in status register */ | ||
69 | #define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04 | ||
70 | /* data request bit in status register */ | ||
71 | #define MG_REG_STATUS_BIT_DATA_REQ 0x08 | ||
72 | /* DSC - Drive Seek Complete */ | ||
73 | #define MG_REG_STATUS_BIT_SEEK_DONE 0x10 | ||
74 | /* DWF - Drive Write Fault */ | ||
75 | #define MG_REG_STATUS_BIT_WRITE_FAULT 0x20 | ||
76 | #define MG_REG_STATUS_BIT_READY 0x40 | ||
77 | #define MG_REG_STATUS_BIT_BUSY 0x80 | ||
78 | |||
79 | /* handy status */ | ||
80 | #define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE) | ||
81 | #define MG_READY_OK(s) (((s) & (MG_STAT_READY | \ | ||
82 | (MG_REG_STATUS_BIT_BUSY | \ | ||
83 | MG_REG_STATUS_BIT_WRITE_FAULT | \ | ||
84 | MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY) | ||
85 | |||
86 | /* Error register */ | ||
87 | #define MG_REG_ERR_AMNF 0x01 | ||
88 | #define MG_REG_ERR_ABRT 0x04 | ||
89 | #define MG_REG_ERR_IDNF 0x10 | ||
90 | #define MG_REG_ERR_UNC 0x40 | ||
91 | #define MG_REG_ERR_BBK 0x80 | ||
92 | |||
93 | /* error code for others */ | ||
94 | #define MG_ERR_NONE 0 | ||
95 | #define MG_ERR_TIMEOUT 0x100 | ||
96 | #define MG_ERR_INIT_STAT 0x101 | ||
97 | #define MG_ERR_TRANSLATION 0x102 | ||
98 | #define MG_ERR_CTRL_RST 0x103 | ||
99 | #define MG_ERR_INV_STAT 0x104 | ||
100 | #define MG_ERR_RSTOUT 0x105 | ||
101 | |||
102 | #define MG_MAX_ERRORS 6 /* Max read/write errors */ | ||
103 | |||
104 | /* command */ | ||
105 | #define MG_CMD_RD 0x20 | ||
106 | #define MG_CMD_WR 0x30 | ||
107 | #define MG_CMD_SLEEP 0x99 | ||
108 | #define MG_CMD_WAKEUP 0xC3 | ||
109 | #define MG_CMD_ID 0xEC | ||
110 | #define MG_CMD_WR_CONF 0x3C | ||
111 | #define MG_CMD_RD_CONF 0x40 | ||
112 | |||
113 | /* operation mode */ | ||
114 | #define MG_OP_CASCADE (1 << 0) | ||
115 | #define MG_OP_CASCADE_SYNC_RD (1 << 1) | ||
116 | #define MG_OP_CASCADE_SYNC_WR (1 << 2) | ||
117 | #define MG_OP_INTERLEAVE (1 << 3) | ||
118 | |||
119 | /* synchronous */ | ||
120 | #define MG_BURST_LAT_4 (3 << 4) | ||
121 | #define MG_BURST_LAT_5 (4 << 4) | ||
122 | #define MG_BURST_LAT_6 (5 << 4) | ||
123 | #define MG_BURST_LAT_7 (6 << 4) | ||
124 | #define MG_BURST_LAT_8 (7 << 4) | ||
125 | #define MG_BURST_LEN_4 (1 << 1) | ||
126 | #define MG_BURST_LEN_8 (2 << 1) | ||
127 | #define MG_BURST_LEN_16 (3 << 1) | ||
128 | #define MG_BURST_LEN_32 (4 << 1) | ||
129 | #define MG_BURST_LEN_CONT (0 << 1) | ||
130 | |||
131 | /* timeout value (unit: ms) */ | ||
132 | #define MG_TMAX_CONF_TO_CMD 1 | ||
133 | #define MG_TMAX_WAIT_RD_DRQ 10 | ||
134 | #define MG_TMAX_WAIT_WR_DRQ 500 | ||
135 | #define MG_TMAX_RST_TO_BUSY 10 | ||
136 | #define MG_TMAX_HDRST_TO_RDY 500 | ||
137 | #define MG_TMAX_SWRST_TO_RDY 500 | ||
138 | #define MG_TMAX_RSTOUT 3000 | ||
139 | |||
140 | /* device attribution */ | ||
141 | /* use mflash as boot device */ | ||
142 | #define MG_BOOT_DEV (1 << 0) | ||
143 | /* use mflash as storage device */ | ||
144 | #define MG_STORAGE_DEV (1 << 1) | ||
145 | /* same as MG_STORAGE_DEV, but bootloader already done reset sequence */ | ||
146 | #define MG_STORAGE_DEV_SKIP_RST (1 << 2) | ||
147 | |||
148 | #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST) | ||
149 | |||
150 | /* names of GPIO resource */ | ||
151 | #define MG_RST_PIN "mg_rst" | ||
152 | /* except MG_BOOT_DEV, reset-out pin should be assigned */ | ||
153 | #define MG_RSTOUT_PIN "mg_rstout" | ||
154 | |||
155 | /* private driver data */ | ||
156 | struct mg_drv_data { | ||
157 | /* disk resource */ | ||
158 | u32 use_polling; | ||
159 | |||
160 | /* device attribution */ | ||
161 | u32 dev_attr; | ||
162 | |||
163 | /* internally used */ | ||
164 | struct mg_host *host; | ||
165 | }; | ||
166 | |||
167 | /* main structure for mflash driver */ | ||
168 | struct mg_host { | ||
169 | struct device *dev; | ||
170 | |||
171 | struct request_queue *breq; | ||
172 | spinlock_t lock; | ||
173 | struct gendisk *gd; | ||
174 | |||
175 | struct timer_list timer; | ||
176 | void (*mg_do_intr) (struct mg_host *); | ||
177 | |||
178 | u16 id[ATA_ID_WORDS]; | ||
179 | |||
180 | u16 cyls; | ||
181 | u16 heads; | ||
182 | u16 sectors; | ||
183 | u32 n_sectors; | ||
184 | u32 nres_sectors; | ||
185 | |||
186 | void __iomem *dev_base; | ||
187 | unsigned int irq; | ||
188 | unsigned int rst; | ||
189 | unsigned int rstout; | ||
190 | |||
191 | u32 major; | ||
192 | u32 error; | ||
193 | }; | ||
194 | |||
195 | /* | ||
196 | * Debugging macro and defines | ||
197 | */ | ||
198 | #undef DO_MG_DEBUG | ||
199 | #ifdef DO_MG_DEBUG | ||
200 | # define MG_DBG(fmt, args...) \ | ||
201 | printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args) | ||
202 | #else /* CONFIG_MG_DEBUG */ | ||
203 | # define MG_DBG(fmt, args...) do { } while (0) | ||
204 | #endif /* CONFIG_MG_DEBUG */ | ||
205 | |||
206 | #endif | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index bff1f0d475c7..ad613ed66ab0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -19,6 +19,7 @@ struct anon_vma; | |||
19 | struct file_ra_state; | 19 | struct file_ra_state; |
20 | struct user_struct; | 20 | struct user_struct; |
21 | struct writeback_control; | 21 | struct writeback_control; |
22 | struct rlimit; | ||
22 | 23 | ||
23 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ | 24 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ |
24 | extern unsigned long max_mapnr; | 25 | extern unsigned long max_mapnr; |
@@ -580,12 +581,10 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
580 | */ | 581 | */ |
581 | static inline unsigned long round_hint_to_min(unsigned long hint) | 582 | static inline unsigned long round_hint_to_min(unsigned long hint) |
582 | { | 583 | { |
583 | #ifdef CONFIG_SECURITY | ||
584 | hint &= PAGE_MASK; | 584 | hint &= PAGE_MASK; |
585 | if (((void *)hint != NULL) && | 585 | if (((void *)hint != NULL) && |
586 | (hint < mmap_min_addr)) | 586 | (hint < mmap_min_addr)) |
587 | return PAGE_ALIGN(mmap_min_addr); | 587 | return PAGE_ALIGN(mmap_min_addr); |
588 | #endif | ||
589 | return hint; | 588 | return hint; |
590 | } | 589 | } |
591 | 590 | ||
@@ -1031,8 +1030,6 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn, | |||
1031 | unsigned long end_pfn); | 1030 | unsigned long end_pfn); |
1032 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, | 1031 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, |
1033 | unsigned long end_pfn); | 1032 | unsigned long end_pfn); |
1034 | extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, | ||
1035 | unsigned long end_pfn); | ||
1036 | extern void remove_all_active_ranges(void); | 1033 | extern void remove_all_active_ranges(void); |
1037 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, | 1034 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, |
1038 | unsigned long end_pfn); | 1035 | unsigned long end_pfn); |
@@ -1319,8 +1316,8 @@ int vmemmap_populate_basepages(struct page *start_page, | |||
1319 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | 1316 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); |
1320 | void vmemmap_populate_print_last(void); | 1317 | void vmemmap_populate_print_last(void); |
1321 | 1318 | ||
1322 | extern void *alloc_locked_buffer(size_t size); | 1319 | extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, |
1323 | extern void free_locked_buffer(void *buffer, size_t size); | 1320 | size_t size); |
1324 | extern void release_locked_buffer(void *buffer, size_t size); | 1321 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); |
1325 | #endif /* __KERNEL__ */ | 1322 | #endif /* __KERNEL__ */ |
1326 | #endif /* _LINUX_MM_H */ | 1323 | #endif /* _LINUX_MM_H */ |
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h index 3d1b7bde1283..97491f78b08c 100644 --- a/include/linux/mmiotrace.h +++ b/include/linux/mmiotrace.h | |||
@@ -30,6 +30,8 @@ extern unsigned int kmmio_count; | |||
30 | 30 | ||
31 | extern int register_kmmio_probe(struct kmmio_probe *p); | 31 | extern int register_kmmio_probe(struct kmmio_probe *p); |
32 | extern void unregister_kmmio_probe(struct kmmio_probe *p); | 32 | extern void unregister_kmmio_probe(struct kmmio_probe *p); |
33 | extern int kmmio_init(void); | ||
34 | extern void kmmio_cleanup(void); | ||
33 | 35 | ||
34 | #ifdef CONFIG_MMIOTRACE | 36 | #ifdef CONFIG_MMIOTRACE |
35 | /* kmmio is active by some kmmio_probes? */ | 37 | /* kmmio is active by some kmmio_probes? */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 627ac082e2a6..a7bc6e7b43a7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -77,6 +77,7 @@ search_extable(const struct exception_table_entry *first, | |||
77 | void sort_extable(struct exception_table_entry *start, | 77 | void sort_extable(struct exception_table_entry *start, |
78 | struct exception_table_entry *finish); | 78 | struct exception_table_entry *finish); |
79 | void sort_main_extable(void); | 79 | void sort_main_extable(void); |
80 | void trim_init_extable(struct module *m); | ||
80 | 81 | ||
81 | #ifdef MODULE | 82 | #ifdef MODULE |
82 | #define MODULE_GENERIC_TABLE(gtype,name) \ | 83 | #define MODULE_GENERIC_TABLE(gtype,name) \ |
@@ -337,6 +338,14 @@ struct module | |||
337 | const char **trace_bprintk_fmt_start; | 338 | const char **trace_bprintk_fmt_start; |
338 | unsigned int num_trace_bprintk_fmt; | 339 | unsigned int num_trace_bprintk_fmt; |
339 | #endif | 340 | #endif |
341 | #ifdef CONFIG_EVENT_TRACING | ||
342 | struct ftrace_event_call *trace_events; | ||
343 | unsigned int num_trace_events; | ||
344 | #endif | ||
345 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | ||
346 | unsigned long *ftrace_callsites; | ||
347 | unsigned int num_ftrace_callsites; | ||
348 | #endif | ||
340 | 349 | ||
341 | #ifdef CONFIG_MODULE_UNLOAD | 350 | #ifdef CONFIG_MODULE_UNLOAD |
342 | /* What modules depend on me? */ | 351 | /* What modules depend on me? */ |
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index a4f0b931846c..6547c3cdbc4c 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
@@ -36,9 +36,14 @@ typedef int (*param_set_fn)(const char *val, struct kernel_param *kp); | |||
36 | /* Returns length written or -errno. Buffer is 4k (ie. be short!) */ | 36 | /* Returns length written or -errno. Buffer is 4k (ie. be short!) */ |
37 | typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp); | 37 | typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp); |
38 | 38 | ||
39 | /* Flag bits for kernel_param.flags */ | ||
40 | #define KPARAM_KMALLOCED 1 | ||
41 | #define KPARAM_ISBOOL 2 | ||
42 | |||
39 | struct kernel_param { | 43 | struct kernel_param { |
40 | const char *name; | 44 | const char *name; |
41 | unsigned int perm; | 45 | u16 perm; |
46 | u16 flags; | ||
42 | param_set_fn set; | 47 | param_set_fn set; |
43 | param_get_fn get; | 48 | param_get_fn get; |
44 | union { | 49 | union { |
@@ -79,7 +84,7 @@ struct kparam_array | |||
79 | parameters. perm sets the visibility in sysfs: 000 means it's | 84 | parameters. perm sets the visibility in sysfs: 000 means it's |
80 | not there, read bits mean it's readable, write bits mean it's | 85 | not there, read bits mean it's readable, write bits mean it's |
81 | writable. */ | 86 | writable. */ |
82 | #define __module_param_call(prefix, name, set, get, arg, perm) \ | 87 | #define __module_param_call(prefix, name, set, get, arg, isbool, perm) \ |
83 | /* Default value instead of permissions? */ \ | 88 | /* Default value instead of permissions? */ \ |
84 | static int __param_perm_check_##name __attribute__((unused)) = \ | 89 | static int __param_perm_check_##name __attribute__((unused)) = \ |
85 | BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \ | 90 | BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \ |
@@ -88,10 +93,13 @@ struct kparam_array | |||
88 | static struct kernel_param __moduleparam_const __param_##name \ | 93 | static struct kernel_param __moduleparam_const __param_##name \ |
89 | __used \ | 94 | __used \ |
90 | __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ | 95 | __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ |
91 | = { __param_str_##name, perm, set, get, { arg } } | 96 | = { __param_str_##name, perm, isbool ? KPARAM_ISBOOL : 0, \ |
97 | set, get, { arg } } | ||
92 | 98 | ||
93 | #define module_param_call(name, set, get, arg, perm) \ | 99 | #define module_param_call(name, set, get, arg, perm) \ |
94 | __module_param_call(MODULE_PARAM_PREFIX, name, set, get, arg, perm) | 100 | __module_param_call(MODULE_PARAM_PREFIX, \ |
101 | name, set, get, arg, \ | ||
102 | __same_type(*(arg), bool), perm) | ||
95 | 103 | ||
96 | /* Helper functions: type is byte, short, ushort, int, uint, long, | 104 | /* Helper functions: type is byte, short, ushort, int, uint, long, |
97 | ulong, charp, bool or invbool, or XXX if you define param_get_XXX, | 105 | ulong, charp, bool or invbool, or XXX if you define param_get_XXX, |
@@ -120,15 +128,16 @@ struct kparam_array | |||
120 | #define core_param(name, var, type, perm) \ | 128 | #define core_param(name, var, type, perm) \ |
121 | param_check_##type(name, &(var)); \ | 129 | param_check_##type(name, &(var)); \ |
122 | __module_param_call("", name, param_set_##type, param_get_##type, \ | 130 | __module_param_call("", name, param_set_##type, param_get_##type, \ |
123 | &var, perm) | 131 | &var, __same_type(var, bool), perm) |
124 | #endif /* !MODULE */ | 132 | #endif /* !MODULE */ |
125 | 133 | ||
126 | /* Actually copy string: maxlen param is usually sizeof(string). */ | 134 | /* Actually copy string: maxlen param is usually sizeof(string). */ |
127 | #define module_param_string(name, string, len, perm) \ | 135 | #define module_param_string(name, string, len, perm) \ |
128 | static const struct kparam_string __param_string_##name \ | 136 | static const struct kparam_string __param_string_##name \ |
129 | = { len, string }; \ | 137 | = { len, string }; \ |
130 | module_param_call(name, param_set_copystring, param_get_string, \ | 138 | __module_param_call(MODULE_PARAM_PREFIX, name, \ |
131 | .str = &__param_string_##name, perm); \ | 139 | param_set_copystring, param_get_string, \ |
140 | .str = &__param_string_##name, 0, perm); \ | ||
132 | __MODULE_PARM_TYPE(name, "string") | 141 | __MODULE_PARM_TYPE(name, "string") |
133 | 142 | ||
134 | /* Called on module insert or kernel boot */ | 143 | /* Called on module insert or kernel boot */ |
@@ -186,21 +195,30 @@ extern int param_set_charp(const char *val, struct kernel_param *kp); | |||
186 | extern int param_get_charp(char *buffer, struct kernel_param *kp); | 195 | extern int param_get_charp(char *buffer, struct kernel_param *kp); |
187 | #define param_check_charp(name, p) __param_check(name, p, char *) | 196 | #define param_check_charp(name, p) __param_check(name, p, char *) |
188 | 197 | ||
198 | /* For historical reasons "bool" parameters can be (unsigned) "int". */ | ||
189 | extern int param_set_bool(const char *val, struct kernel_param *kp); | 199 | extern int param_set_bool(const char *val, struct kernel_param *kp); |
190 | extern int param_get_bool(char *buffer, struct kernel_param *kp); | 200 | extern int param_get_bool(char *buffer, struct kernel_param *kp); |
191 | #define param_check_bool(name, p) __param_check(name, p, int) | 201 | #define param_check_bool(name, p) \ |
202 | static inline void __check_##name(void) \ | ||
203 | { \ | ||
204 | BUILD_BUG_ON(!__same_type(*(p), bool) && \ | ||
205 | !__same_type(*(p), unsigned int) && \ | ||
206 | !__same_type(*(p), int)); \ | ||
207 | } | ||
192 | 208 | ||
193 | extern int param_set_invbool(const char *val, struct kernel_param *kp); | 209 | extern int param_set_invbool(const char *val, struct kernel_param *kp); |
194 | extern int param_get_invbool(char *buffer, struct kernel_param *kp); | 210 | extern int param_get_invbool(char *buffer, struct kernel_param *kp); |
195 | #define param_check_invbool(name, p) __param_check(name, p, int) | 211 | #define param_check_invbool(name, p) __param_check(name, p, bool) |
196 | 212 | ||
197 | /* Comma-separated array: *nump is set to number they actually specified. */ | 213 | /* Comma-separated array: *nump is set to number they actually specified. */ |
198 | #define module_param_array_named(name, array, type, nump, perm) \ | 214 | #define module_param_array_named(name, array, type, nump, perm) \ |
199 | static const struct kparam_array __param_arr_##name \ | 215 | static const struct kparam_array __param_arr_##name \ |
200 | = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\ | 216 | = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\ |
201 | sizeof(array[0]), array }; \ | 217 | sizeof(array[0]), array }; \ |
202 | module_param_call(name, param_array_set, param_array_get, \ | 218 | __module_param_call(MODULE_PARAM_PREFIX, name, \ |
203 | .arr = &__param_arr_##name, perm); \ | 219 | param_array_set, param_array_get, \ |
220 | .arr = &__param_arr_##name, \ | ||
221 | __same_type(array[0], bool), perm); \ | ||
204 | __MODULE_PARM_TYPE(name, "array of " #type) | 222 | __MODULE_PARM_TYPE(name, "array of " #type) |
205 | 223 | ||
206 | #define module_param_array(name, type, nump, perm) \ | 224 | #define module_param_array(name, type, nump, perm) \ |
diff --git a/include/linux/mount.h b/include/linux/mount.h index 51f55f903aff..5d5275364867 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h | |||
@@ -30,7 +30,7 @@ struct mnt_namespace; | |||
30 | #define MNT_STRICTATIME 0x80 | 30 | #define MNT_STRICTATIME 0x80 |
31 | 31 | ||
32 | #define MNT_SHRINKABLE 0x100 | 32 | #define MNT_SHRINKABLE 0x100 |
33 | #define MNT_IMBALANCED_WRITE_COUNT 0x200 /* just for debugging */ | 33 | #define MNT_WRITE_HOLD 0x200 |
34 | 34 | ||
35 | #define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ | 35 | #define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ |
36 | #define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ | 36 | #define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ |
@@ -65,13 +65,22 @@ struct vfsmount { | |||
65 | int mnt_expiry_mark; /* true if marked for expiry */ | 65 | int mnt_expiry_mark; /* true if marked for expiry */ |
66 | int mnt_pinned; | 66 | int mnt_pinned; |
67 | int mnt_ghosts; | 67 | int mnt_ghosts; |
68 | /* | 68 | #ifdef CONFIG_SMP |
69 | * This value is not stable unless all of the mnt_writers[] spinlocks | 69 | int *mnt_writers; |
70 | * are held, and all mnt_writer[]s on this mount have 0 as their ->count | 70 | #else |
71 | */ | 71 | int mnt_writers; |
72 | atomic_t __mnt_writers; | 72 | #endif |
73 | }; | 73 | }; |
74 | 74 | ||
75 | static inline int *get_mnt_writers_ptr(struct vfsmount *mnt) | ||
76 | { | ||
77 | #ifdef CONFIG_SMP | ||
78 | return mnt->mnt_writers; | ||
79 | #else | ||
80 | return &mnt->mnt_writers; | ||
81 | #endif | ||
82 | } | ||
83 | |||
75 | static inline struct vfsmount *mntget(struct vfsmount *mnt) | 84 | static inline struct vfsmount *mntget(struct vfsmount *mnt) |
76 | { | 85 | { |
77 | if (mnt) | 86 | if (mnt) |
@@ -79,7 +88,11 @@ static inline struct vfsmount *mntget(struct vfsmount *mnt) | |||
79 | return mnt; | 88 | return mnt; |
80 | } | 89 | } |
81 | 90 | ||
91 | struct file; /* forward dec */ | ||
92 | |||
82 | extern int mnt_want_write(struct vfsmount *mnt); | 93 | extern int mnt_want_write(struct vfsmount *mnt); |
94 | extern int mnt_want_write_file(struct file *file); | ||
95 | extern int mnt_clone_write(struct vfsmount *mnt); | ||
83 | extern void mnt_drop_write(struct vfsmount *mnt); | 96 | extern void mnt_drop_write(struct vfsmount *mnt); |
84 | extern void mntput_no_expire(struct vfsmount *mnt); | 97 | extern void mntput_no_expire(struct vfsmount *mnt); |
85 | extern void mnt_pin(struct vfsmount *mnt); | 98 | extern void mnt_pin(struct vfsmount *mnt); |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 3069ec7e0ab8..878cab4f5fcc 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -150,5 +150,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); | |||
150 | */ | 150 | */ |
151 | extern int mutex_trylock(struct mutex *lock); | 151 | extern int mutex_trylock(struct mutex *lock); |
152 | extern void mutex_unlock(struct mutex *lock); | 152 | extern void mutex_unlock(struct mutex *lock); |
153 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | ||
153 | 154 | ||
154 | #endif | 155 | #endif |
diff --git a/include/linux/namei.h b/include/linux/namei.h index 518098fe63af..d870ae2faedc 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
@@ -18,6 +18,7 @@ enum { MAX_NESTED_LINKS = 8 }; | |||
18 | struct nameidata { | 18 | struct nameidata { |
19 | struct path path; | 19 | struct path path; |
20 | struct qstr last; | 20 | struct qstr last; |
21 | struct path root; | ||
21 | unsigned int flags; | 22 | unsigned int flags; |
22 | int last_type; | 23 | int last_type; |
23 | unsigned depth; | 24 | unsigned depth; |
@@ -77,8 +78,8 @@ extern void release_open_intent(struct nameidata *); | |||
77 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); | 78 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); |
78 | extern struct dentry *lookup_one_noperm(const char *, struct dentry *); | 79 | extern struct dentry *lookup_one_noperm(const char *, struct dentry *); |
79 | 80 | ||
80 | extern int follow_down(struct vfsmount **, struct dentry **); | 81 | extern int follow_down(struct path *); |
81 | extern int follow_up(struct vfsmount **, struct dentry **); | 82 | extern int follow_up(struct path *); |
82 | 83 | ||
83 | extern struct dentry *lock_rename(struct dentry *, struct dentry *); | 84 | extern struct dentry *lock_rename(struct dentry *, struct dentry *); |
84 | extern void unlock_rename(struct dentry *, struct dentry *); | 85 | extern void unlock_rename(struct dentry *, struct dentry *); |
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index bcd0201589f8..a6d9ef2bb34a 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h | |||
@@ -125,11 +125,9 @@ void nfsd_export_flush(void); | |||
125 | void exp_readlock(void); | 125 | void exp_readlock(void); |
126 | void exp_readunlock(void); | 126 | void exp_readunlock(void); |
127 | struct svc_export * rqst_exp_get_by_name(struct svc_rqst *, | 127 | struct svc_export * rqst_exp_get_by_name(struct svc_rqst *, |
128 | struct vfsmount *, | 128 | struct path *); |
129 | struct dentry *); | ||
130 | struct svc_export * rqst_exp_parent(struct svc_rqst *, | 129 | struct svc_export * rqst_exp_parent(struct svc_rqst *, |
131 | struct vfsmount *mnt, | 130 | struct path *); |
132 | struct dentry *dentry); | ||
133 | int exp_rootfh(struct auth_domain *, | 131 | int exp_rootfh(struct auth_domain *, |
134 | char *path, struct knfsd_fh *, int maxsize); | 132 | char *path, struct knfsd_fh *, int maxsize); |
135 | __be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *); | 133 | __be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *); |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 7339c7bf7331..13f126c89ae8 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -18,7 +18,19 @@ struct page_cgroup { | |||
18 | }; | 18 | }; |
19 | 19 | ||
20 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); | 20 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); |
21 | void __init page_cgroup_init(void); | 21 | |
22 | #ifdef CONFIG_SPARSEMEM | ||
23 | static inline void __init page_cgroup_init_flatmem(void) | ||
24 | { | ||
25 | } | ||
26 | extern void __init page_cgroup_init(void); | ||
27 | #else | ||
28 | void __init page_cgroup_init_flatmem(void); | ||
29 | static inline void __init page_cgroup_init(void) | ||
30 | { | ||
31 | } | ||
32 | #endif | ||
33 | |||
22 | struct page_cgroup *lookup_page_cgroup(struct page *page); | 34 | struct page_cgroup *lookup_page_cgroup(struct page *page); |
23 | 35 | ||
24 | enum { | 36 | enum { |
@@ -87,6 +99,10 @@ static inline void page_cgroup_init(void) | |||
87 | { | 99 | { |
88 | } | 100 | } |
89 | 101 | ||
102 | static inline void __init page_cgroup_init_flatmem(void) | ||
103 | { | ||
104 | } | ||
105 | |||
90 | #endif | 106 | #endif |
91 | 107 | ||
92 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 108 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 28fe766393a3..19f8e6d1a4d2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2008,10 +2008,12 @@ | |||
2008 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 | 2008 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 |
2009 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C | 2009 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C |
2010 | #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 | 2010 | #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 |
2011 | #define PCI_DEVICE_ID_OXSEMI_C950 0x950B | ||
2011 | #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 | 2012 | #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 |
2012 | #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 | 2013 | #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 |
2013 | #define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 | 2014 | #define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 |
2014 | #define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 | 2015 | #define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 |
2016 | #define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001 | ||
2015 | 2017 | ||
2016 | #define PCI_VENDOR_ID_CHELSIO 0x1425 | 2018 | #define PCI_VENDOR_ID_CHELSIO 0x1425 |
2017 | 2019 | ||
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 1581ff235c7e..26fd9d12f050 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -86,7 +86,12 @@ struct percpu_data { | |||
86 | void *ptrs[1]; | 86 | void *ptrs[1]; |
87 | }; | 87 | }; |
88 | 88 | ||
89 | /* pointer disguising messes up the kmemleak objects tracking */ | ||
90 | #ifndef CONFIG_DEBUG_KMEMLEAK | ||
89 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | 91 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
92 | #else | ||
93 | #define __percpu_disguise(pdata) (struct percpu_data *)(pdata) | ||
94 | #endif | ||
90 | 95 | ||
91 | #define per_cpu_ptr(ptr, cpu) \ | 96 | #define per_cpu_ptr(ptr, cpu) \ |
92 | ({ \ | 97 | ({ \ |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h new file mode 100644 index 000000000000..6e133954e2e4 --- /dev/null +++ b/include/linux/perf_counter.h | |||
@@ -0,0 +1,697 @@ | |||
1 | /* | ||
2 | * Performance counters: | ||
3 | * | ||
4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | ||
6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | ||
7 | * | ||
8 | * Data type definitions, declarations, prototypes. | ||
9 | * | ||
10 | * Started by: Thomas Gleixner and Ingo Molnar | ||
11 | * | ||
12 | * For licencing details see kernel-base/COPYING | ||
13 | */ | ||
14 | #ifndef _LINUX_PERF_COUNTER_H | ||
15 | #define _LINUX_PERF_COUNTER_H | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | #include <linux/ioctl.h> | ||
19 | #include <asm/byteorder.h> | ||
20 | |||
21 | /* | ||
22 | * User-space ABI bits: | ||
23 | */ | ||
24 | |||
25 | /* | ||
26 | * attr.type | ||
27 | */ | ||
28 | enum perf_type_id { | ||
29 | PERF_TYPE_HARDWARE = 0, | ||
30 | PERF_TYPE_SOFTWARE = 1, | ||
31 | PERF_TYPE_TRACEPOINT = 2, | ||
32 | PERF_TYPE_HW_CACHE = 3, | ||
33 | PERF_TYPE_RAW = 4, | ||
34 | |||
35 | PERF_TYPE_MAX, /* non-ABI */ | ||
36 | }; | ||
37 | |||
38 | /* | ||
39 | * Generalized performance counter event types, used by the | ||
40 | * attr.event_id parameter of the sys_perf_counter_open() | ||
41 | * syscall: | ||
42 | */ | ||
43 | enum perf_hw_id { | ||
44 | /* | ||
45 | * Common hardware events, generalized by the kernel: | ||
46 | */ | ||
47 | PERF_COUNT_HW_CPU_CYCLES = 0, | ||
48 | PERF_COUNT_HW_INSTRUCTIONS = 1, | ||
49 | PERF_COUNT_HW_CACHE_REFERENCES = 2, | ||
50 | PERF_COUNT_HW_CACHE_MISSES = 3, | ||
51 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | ||
52 | PERF_COUNT_HW_BRANCH_MISSES = 5, | ||
53 | PERF_COUNT_HW_BUS_CYCLES = 6, | ||
54 | |||
55 | PERF_COUNT_HW_MAX, /* non-ABI */ | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * Generalized hardware cache counters: | ||
60 | * | ||
61 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | ||
62 | * { read, write, prefetch } x | ||
63 | * { accesses, misses } | ||
64 | */ | ||
65 | enum perf_hw_cache_id { | ||
66 | PERF_COUNT_HW_CACHE_L1D = 0, | ||
67 | PERF_COUNT_HW_CACHE_L1I = 1, | ||
68 | PERF_COUNT_HW_CACHE_LL = 2, | ||
69 | PERF_COUNT_HW_CACHE_DTLB = 3, | ||
70 | PERF_COUNT_HW_CACHE_ITLB = 4, | ||
71 | PERF_COUNT_HW_CACHE_BPU = 5, | ||
72 | |||
73 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ | ||
74 | }; | ||
75 | |||
76 | enum perf_hw_cache_op_id { | ||
77 | PERF_COUNT_HW_CACHE_OP_READ = 0, | ||
78 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, | ||
79 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, | ||
80 | |||
81 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ | ||
82 | }; | ||
83 | |||
84 | enum perf_hw_cache_op_result_id { | ||
85 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, | ||
86 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, | ||
87 | |||
88 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ | ||
89 | }; | ||
90 | |||
91 | /* | ||
92 | * Special "software" counters provided by the kernel, even if the hardware | ||
93 | * does not support performance counters. These counters measure various | ||
94 | * physical and sw events of the kernel (and allow the profiling of them as | ||
95 | * well): | ||
96 | */ | ||
97 | enum perf_sw_ids { | ||
98 | PERF_COUNT_SW_CPU_CLOCK = 0, | ||
99 | PERF_COUNT_SW_TASK_CLOCK = 1, | ||
100 | PERF_COUNT_SW_PAGE_FAULTS = 2, | ||
101 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, | ||
102 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | ||
103 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | ||
104 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | ||
105 | |||
106 | PERF_COUNT_SW_MAX, /* non-ABI */ | ||
107 | }; | ||
108 | |||
109 | /* | ||
110 | * Bits that can be set in attr.sample_type to request information | ||
111 | * in the overflow packets. | ||
112 | */ | ||
113 | enum perf_counter_sample_format { | ||
114 | PERF_SAMPLE_IP = 1U << 0, | ||
115 | PERF_SAMPLE_TID = 1U << 1, | ||
116 | PERF_SAMPLE_TIME = 1U << 2, | ||
117 | PERF_SAMPLE_ADDR = 1U << 3, | ||
118 | PERF_SAMPLE_GROUP = 1U << 4, | ||
119 | PERF_SAMPLE_CALLCHAIN = 1U << 5, | ||
120 | PERF_SAMPLE_ID = 1U << 6, | ||
121 | PERF_SAMPLE_CPU = 1U << 7, | ||
122 | PERF_SAMPLE_PERIOD = 1U << 8, | ||
123 | }; | ||
124 | |||
125 | /* | ||
126 | * Bits that can be set in attr.read_format to request that | ||
127 | * reads on the counter should return the indicated quantities, | ||
128 | * in increasing order of bit value, after the counter value. | ||
129 | */ | ||
130 | enum perf_counter_read_format { | ||
131 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, | ||
132 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, | ||
133 | PERF_FORMAT_ID = 1U << 2, | ||
134 | }; | ||
135 | |||
136 | /* | ||
137 | * Hardware event to monitor via a performance monitoring counter: | ||
138 | */ | ||
139 | struct perf_counter_attr { | ||
140 | /* | ||
141 | * Major type: hardware/software/tracepoint/etc. | ||
142 | */ | ||
143 | __u32 type; | ||
144 | __u32 __reserved_1; | ||
145 | |||
146 | /* | ||
147 | * Type specific configuration information. | ||
148 | */ | ||
149 | __u64 config; | ||
150 | |||
151 | union { | ||
152 | __u64 sample_period; | ||
153 | __u64 sample_freq; | ||
154 | }; | ||
155 | |||
156 | __u64 sample_type; | ||
157 | __u64 read_format; | ||
158 | |||
159 | __u64 disabled : 1, /* off by default */ | ||
160 | inherit : 1, /* children inherit it */ | ||
161 | pinned : 1, /* must always be on PMU */ | ||
162 | exclusive : 1, /* only group on PMU */ | ||
163 | exclude_user : 1, /* don't count user */ | ||
164 | exclude_kernel : 1, /* ditto kernel */ | ||
165 | exclude_hv : 1, /* ditto hypervisor */ | ||
166 | exclude_idle : 1, /* don't count when idle */ | ||
167 | mmap : 1, /* include mmap data */ | ||
168 | comm : 1, /* include comm data */ | ||
169 | freq : 1, /* use freq, not period */ | ||
170 | |||
171 | __reserved_2 : 53; | ||
172 | |||
173 | __u32 wakeup_events; /* wakeup every n events */ | ||
174 | __u32 __reserved_3; | ||
175 | |||
176 | __u64 __reserved_4; | ||
177 | }; | ||
178 | |||
179 | /* | ||
180 | * Ioctls that can be done on a perf counter fd: | ||
181 | */ | ||
182 | #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) | ||
183 | #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) | ||
184 | #define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) | ||
185 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) | ||
186 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) | ||
187 | |||
188 | enum perf_counter_ioc_flags { | ||
189 | PERF_IOC_FLAG_GROUP = 1U << 0, | ||
190 | }; | ||
191 | |||
192 | /* | ||
193 | * Structure of the page that can be mapped via mmap | ||
194 | */ | ||
195 | struct perf_counter_mmap_page { | ||
196 | __u32 version; /* version number of this structure */ | ||
197 | __u32 compat_version; /* lowest version this is compat with */ | ||
198 | |||
199 | /* | ||
200 | * Bits needed to read the hw counters in user-space. | ||
201 | * | ||
202 | * u32 seq; | ||
203 | * s64 count; | ||
204 | * | ||
205 | * do { | ||
206 | * seq = pc->lock; | ||
207 | * | ||
208 | * barrier() | ||
209 | * if (pc->index) { | ||
210 | * count = pmc_read(pc->index - 1); | ||
211 | * count += pc->offset; | ||
212 | * } else | ||
213 | * goto regular_read; | ||
214 | * | ||
215 | * barrier(); | ||
216 | * } while (pc->lock != seq); | ||
217 | * | ||
218 | * NOTE: for obvious reason this only works on self-monitoring | ||
219 | * processes. | ||
220 | */ | ||
221 | __u32 lock; /* seqlock for synchronization */ | ||
222 | __u32 index; /* hardware counter identifier */ | ||
223 | __s64 offset; /* add to hardware counter value */ | ||
224 | |||
225 | /* | ||
226 | * Control data for the mmap() data buffer. | ||
227 | * | ||
228 | * User-space reading this value should issue an rmb(), on SMP capable | ||
229 | * platforms, after reading this value -- see perf_counter_wakeup(). | ||
230 | */ | ||
231 | __u64 data_head; /* head in the data section */ | ||
232 | }; | ||
233 | |||
234 | #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) | ||
235 | #define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) | ||
236 | #define PERF_EVENT_MISC_KERNEL (1 << 0) | ||
237 | #define PERF_EVENT_MISC_USER (2 << 0) | ||
238 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) | ||
239 | #define PERF_EVENT_MISC_OVERFLOW (1 << 2) | ||
240 | |||
241 | struct perf_event_header { | ||
242 | __u32 type; | ||
243 | __u16 misc; | ||
244 | __u16 size; | ||
245 | }; | ||
246 | |||
247 | enum perf_event_type { | ||
248 | |||
249 | /* | ||
250 | * The MMAP events record the PROT_EXEC mappings so that we can | ||
251 | * correlate userspace IPs to code. They have the following structure: | ||
252 | * | ||
253 | * struct { | ||
254 | * struct perf_event_header header; | ||
255 | * | ||
256 | * u32 pid, tid; | ||
257 | * u64 addr; | ||
258 | * u64 len; | ||
259 | * u64 pgoff; | ||
260 | * char filename[]; | ||
261 | * }; | ||
262 | */ | ||
263 | PERF_EVENT_MMAP = 1, | ||
264 | |||
265 | /* | ||
266 | * struct { | ||
267 | * struct perf_event_header header; | ||
268 | * | ||
269 | * u32 pid, tid; | ||
270 | * char comm[]; | ||
271 | * }; | ||
272 | */ | ||
273 | PERF_EVENT_COMM = 3, | ||
274 | |||
275 | /* | ||
276 | * struct { | ||
277 | * struct perf_event_header header; | ||
278 | * u64 time; | ||
279 | * u64 id; | ||
280 | * u64 sample_period; | ||
281 | * }; | ||
282 | */ | ||
283 | PERF_EVENT_PERIOD = 4, | ||
284 | |||
285 | /* | ||
286 | * struct { | ||
287 | * struct perf_event_header header; | ||
288 | * u64 time; | ||
289 | * u64 id; | ||
290 | * }; | ||
291 | */ | ||
292 | PERF_EVENT_THROTTLE = 5, | ||
293 | PERF_EVENT_UNTHROTTLE = 6, | ||
294 | |||
295 | /* | ||
296 | * struct { | ||
297 | * struct perf_event_header header; | ||
298 | * u32 pid, ppid; | ||
299 | * }; | ||
300 | */ | ||
301 | PERF_EVENT_FORK = 7, | ||
302 | |||
303 | /* | ||
304 | * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field | ||
305 | * will be PERF_RECORD_* | ||
306 | * | ||
307 | * struct { | ||
308 | * struct perf_event_header header; | ||
309 | * | ||
310 | * { u64 ip; } && PERF_RECORD_IP | ||
311 | * { u32 pid, tid; } && PERF_RECORD_TID | ||
312 | * { u64 time; } && PERF_RECORD_TIME | ||
313 | * { u64 addr; } && PERF_RECORD_ADDR | ||
314 | * { u64 config; } && PERF_RECORD_CONFIG | ||
315 | * { u32 cpu, res; } && PERF_RECORD_CPU | ||
316 | * | ||
317 | * { u64 nr; | ||
318 | * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP | ||
319 | * | ||
320 | * { u16 nr, | ||
321 | * hv, | ||
322 | * kernel, | ||
323 | * user; | ||
324 | * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN | ||
325 | * }; | ||
326 | */ | ||
327 | }; | ||
328 | |||
329 | #ifdef __KERNEL__ | ||
330 | /* | ||
331 | * Kernel-internal data types and definitions: | ||
332 | */ | ||
333 | |||
334 | #ifdef CONFIG_PERF_COUNTERS | ||
335 | # include <asm/perf_counter.h> | ||
336 | #endif | ||
337 | |||
338 | #include <linux/list.h> | ||
339 | #include <linux/mutex.h> | ||
340 | #include <linux/rculist.h> | ||
341 | #include <linux/rcupdate.h> | ||
342 | #include <linux/spinlock.h> | ||
343 | #include <linux/hrtimer.h> | ||
344 | #include <linux/fs.h> | ||
345 | #include <linux/pid_namespace.h> | ||
346 | #include <asm/atomic.h> | ||
347 | |||
348 | struct task_struct; | ||
349 | |||
350 | /** | ||
351 | * struct hw_perf_counter - performance counter hardware details: | ||
352 | */ | ||
353 | struct hw_perf_counter { | ||
354 | #ifdef CONFIG_PERF_COUNTERS | ||
355 | union { | ||
356 | struct { /* hardware */ | ||
357 | u64 config; | ||
358 | unsigned long config_base; | ||
359 | unsigned long counter_base; | ||
360 | int idx; | ||
361 | }; | ||
362 | union { /* software */ | ||
363 | atomic64_t count; | ||
364 | struct hrtimer hrtimer; | ||
365 | }; | ||
366 | }; | ||
367 | atomic64_t prev_count; | ||
368 | u64 sample_period; | ||
369 | u64 last_period; | ||
370 | atomic64_t period_left; | ||
371 | u64 interrupts; | ||
372 | |||
373 | u64 freq_count; | ||
374 | u64 freq_interrupts; | ||
375 | u64 freq_stamp; | ||
376 | #endif | ||
377 | }; | ||
378 | |||
379 | struct perf_counter; | ||
380 | |||
381 | /** | ||
382 | * struct pmu - generic performance monitoring unit | ||
383 | */ | ||
384 | struct pmu { | ||
385 | int (*enable) (struct perf_counter *counter); | ||
386 | void (*disable) (struct perf_counter *counter); | ||
387 | void (*read) (struct perf_counter *counter); | ||
388 | void (*unthrottle) (struct perf_counter *counter); | ||
389 | }; | ||
390 | |||
391 | /** | ||
392 | * enum perf_counter_active_state - the states of a counter | ||
393 | */ | ||
394 | enum perf_counter_active_state { | ||
395 | PERF_COUNTER_STATE_ERROR = -2, | ||
396 | PERF_COUNTER_STATE_OFF = -1, | ||
397 | PERF_COUNTER_STATE_INACTIVE = 0, | ||
398 | PERF_COUNTER_STATE_ACTIVE = 1, | ||
399 | }; | ||
400 | |||
401 | struct file; | ||
402 | |||
403 | struct perf_mmap_data { | ||
404 | struct rcu_head rcu_head; | ||
405 | int nr_pages; /* nr of data pages */ | ||
406 | int nr_locked; /* nr pages mlocked */ | ||
407 | |||
408 | atomic_t poll; /* POLL_ for wakeups */ | ||
409 | atomic_t events; /* event limit */ | ||
410 | |||
411 | atomic_long_t head; /* write position */ | ||
412 | atomic_long_t done_head; /* completed head */ | ||
413 | |||
414 | atomic_t lock; /* concurrent writes */ | ||
415 | |||
416 | atomic_t wakeup; /* needs a wakeup */ | ||
417 | |||
418 | struct perf_counter_mmap_page *user_page; | ||
419 | void *data_pages[0]; | ||
420 | }; | ||
421 | |||
422 | struct perf_pending_entry { | ||
423 | struct perf_pending_entry *next; | ||
424 | void (*func)(struct perf_pending_entry *); | ||
425 | }; | ||
426 | |||
427 | /** | ||
428 | * struct perf_counter - performance counter kernel representation: | ||
429 | */ | ||
430 | struct perf_counter { | ||
431 | #ifdef CONFIG_PERF_COUNTERS | ||
432 | struct list_head list_entry; | ||
433 | struct list_head event_entry; | ||
434 | struct list_head sibling_list; | ||
435 | int nr_siblings; | ||
436 | struct perf_counter *group_leader; | ||
437 | const struct pmu *pmu; | ||
438 | |||
439 | enum perf_counter_active_state state; | ||
440 | atomic64_t count; | ||
441 | |||
442 | /* | ||
443 | * These are the total time in nanoseconds that the counter | ||
444 | * has been enabled (i.e. eligible to run, and the task has | ||
445 | * been scheduled in, if this is a per-task counter) | ||
446 | * and running (scheduled onto the CPU), respectively. | ||
447 | * | ||
448 | * They are computed from tstamp_enabled, tstamp_running and | ||
449 | * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. | ||
450 | */ | ||
451 | u64 total_time_enabled; | ||
452 | u64 total_time_running; | ||
453 | |||
454 | /* | ||
455 | * These are timestamps used for computing total_time_enabled | ||
456 | * and total_time_running when the counter is in INACTIVE or | ||
457 | * ACTIVE state, measured in nanoseconds from an arbitrary point | ||
458 | * in time. | ||
459 | * tstamp_enabled: the notional time when the counter was enabled | ||
460 | * tstamp_running: the notional time when the counter was scheduled on | ||
461 | * tstamp_stopped: in INACTIVE state, the notional time when the | ||
462 | * counter was scheduled off. | ||
463 | */ | ||
464 | u64 tstamp_enabled; | ||
465 | u64 tstamp_running; | ||
466 | u64 tstamp_stopped; | ||
467 | |||
468 | struct perf_counter_attr attr; | ||
469 | struct hw_perf_counter hw; | ||
470 | |||
471 | struct perf_counter_context *ctx; | ||
472 | struct file *filp; | ||
473 | |||
474 | /* | ||
475 | * These accumulate total time (in nanoseconds) that children | ||
476 | * counters have been enabled and running, respectively. | ||
477 | */ | ||
478 | atomic64_t child_total_time_enabled; | ||
479 | atomic64_t child_total_time_running; | ||
480 | |||
481 | /* | ||
482 | * Protect attach/detach and child_list: | ||
483 | */ | ||
484 | struct mutex child_mutex; | ||
485 | struct list_head child_list; | ||
486 | struct perf_counter *parent; | ||
487 | |||
488 | int oncpu; | ||
489 | int cpu; | ||
490 | |||
491 | struct list_head owner_entry; | ||
492 | struct task_struct *owner; | ||
493 | |||
494 | /* mmap bits */ | ||
495 | struct mutex mmap_mutex; | ||
496 | atomic_t mmap_count; | ||
497 | struct perf_mmap_data *data; | ||
498 | |||
499 | /* poll related */ | ||
500 | wait_queue_head_t waitq; | ||
501 | struct fasync_struct *fasync; | ||
502 | |||
503 | /* delayed work for NMIs and such */ | ||
504 | int pending_wakeup; | ||
505 | int pending_kill; | ||
506 | int pending_disable; | ||
507 | struct perf_pending_entry pending; | ||
508 | |||
509 | atomic_t event_limit; | ||
510 | |||
511 | void (*destroy)(struct perf_counter *); | ||
512 | struct rcu_head rcu_head; | ||
513 | |||
514 | struct pid_namespace *ns; | ||
515 | u64 id; | ||
516 | #endif | ||
517 | }; | ||
518 | |||
519 | /** | ||
520 | * struct perf_counter_context - counter context structure | ||
521 | * | ||
522 | * Used as a container for task counters and CPU counters as well: | ||
523 | */ | ||
524 | struct perf_counter_context { | ||
525 | /* | ||
526 | * Protect the states of the counters in the list, | ||
527 | * nr_active, and the list: | ||
528 | */ | ||
529 | spinlock_t lock; | ||
530 | /* | ||
531 | * Protect the list of counters. Locking either mutex or lock | ||
532 | * is sufficient to ensure the list doesn't change; to change | ||
533 | * the list you need to lock both the mutex and the spinlock. | ||
534 | */ | ||
535 | struct mutex mutex; | ||
536 | |||
537 | struct list_head counter_list; | ||
538 | struct list_head event_list; | ||
539 | int nr_counters; | ||
540 | int nr_active; | ||
541 | int is_active; | ||
542 | atomic_t refcount; | ||
543 | struct task_struct *task; | ||
544 | |||
545 | /* | ||
546 | * Context clock, runs when context enabled. | ||
547 | */ | ||
548 | u64 time; | ||
549 | u64 timestamp; | ||
550 | |||
551 | /* | ||
552 | * These fields let us detect when two contexts have both | ||
553 | * been cloned (inherited) from a common ancestor. | ||
554 | */ | ||
555 | struct perf_counter_context *parent_ctx; | ||
556 | u64 parent_gen; | ||
557 | u64 generation; | ||
558 | int pin_count; | ||
559 | struct rcu_head rcu_head; | ||
560 | }; | ||
561 | |||
562 | /** | ||
563 | * struct perf_counter_cpu_context - per cpu counter context structure | ||
564 | */ | ||
565 | struct perf_cpu_context { | ||
566 | struct perf_counter_context ctx; | ||
567 | struct perf_counter_context *task_ctx; | ||
568 | int active_oncpu; | ||
569 | int max_pertask; | ||
570 | int exclusive; | ||
571 | |||
572 | /* | ||
573 | * Recursion avoidance: | ||
574 | * | ||
575 | * task, softirq, irq, nmi context | ||
576 | */ | ||
577 | int recursion[4]; | ||
578 | }; | ||
579 | |||
580 | #ifdef CONFIG_PERF_COUNTERS | ||
581 | |||
582 | /* | ||
583 | * Set by architecture code: | ||
584 | */ | ||
585 | extern int perf_max_counters; | ||
586 | |||
587 | extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); | ||
588 | |||
589 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); | ||
590 | extern void perf_counter_task_sched_out(struct task_struct *task, | ||
591 | struct task_struct *next, int cpu); | ||
592 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); | ||
593 | extern int perf_counter_init_task(struct task_struct *child); | ||
594 | extern void perf_counter_exit_task(struct task_struct *child); | ||
595 | extern void perf_counter_free_task(struct task_struct *task); | ||
596 | extern void perf_counter_do_pending(void); | ||
597 | extern void perf_counter_print_debug(void); | ||
598 | extern void __perf_disable(void); | ||
599 | extern bool __perf_enable(void); | ||
600 | extern void perf_disable(void); | ||
601 | extern void perf_enable(void); | ||
602 | extern int perf_counter_task_disable(void); | ||
603 | extern int perf_counter_task_enable(void); | ||
604 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, | ||
605 | struct perf_cpu_context *cpuctx, | ||
606 | struct perf_counter_context *ctx, int cpu); | ||
607 | extern void perf_counter_update_userpage(struct perf_counter *counter); | ||
608 | |||
609 | struct perf_sample_data { | ||
610 | struct pt_regs *regs; | ||
611 | u64 addr; | ||
612 | u64 period; | ||
613 | }; | ||
614 | |||
615 | extern int perf_counter_overflow(struct perf_counter *counter, int nmi, | ||
616 | struct perf_sample_data *data); | ||
617 | |||
618 | /* | ||
619 | * Return 1 for a software counter, 0 for a hardware counter | ||
620 | */ | ||
621 | static inline int is_software_counter(struct perf_counter *counter) | ||
622 | { | ||
623 | return (counter->attr.type != PERF_TYPE_RAW) && | ||
624 | (counter->attr.type != PERF_TYPE_HARDWARE); | ||
625 | } | ||
626 | |||
627 | extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); | ||
628 | |||
629 | extern void __perf_counter_mmap(struct vm_area_struct *vma); | ||
630 | |||
631 | static inline void perf_counter_mmap(struct vm_area_struct *vma) | ||
632 | { | ||
633 | if (vma->vm_flags & VM_EXEC) | ||
634 | __perf_counter_mmap(vma); | ||
635 | } | ||
636 | |||
637 | extern void perf_counter_comm(struct task_struct *tsk); | ||
638 | extern void perf_counter_fork(struct task_struct *tsk); | ||
639 | |||
640 | extern void perf_counter_task_migration(struct task_struct *task, int cpu); | ||
641 | |||
642 | #define MAX_STACK_DEPTH 255 | ||
643 | |||
644 | struct perf_callchain_entry { | ||
645 | u16 nr; | ||
646 | u16 hv; | ||
647 | u16 kernel; | ||
648 | u16 user; | ||
649 | u64 ip[MAX_STACK_DEPTH]; | ||
650 | }; | ||
651 | |||
652 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | ||
653 | |||
654 | extern int sysctl_perf_counter_paranoid; | ||
655 | extern int sysctl_perf_counter_mlock; | ||
656 | extern int sysctl_perf_counter_sample_rate; | ||
657 | |||
658 | extern void perf_counter_init(void); | ||
659 | |||
660 | #ifndef perf_misc_flags | ||
661 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ | ||
662 | PERF_EVENT_MISC_KERNEL) | ||
663 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | ||
664 | #endif | ||
665 | |||
666 | #else | ||
667 | static inline void | ||
668 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | ||
669 | static inline void | ||
670 | perf_counter_task_sched_out(struct task_struct *task, | ||
671 | struct task_struct *next, int cpu) { } | ||
672 | static inline void | ||
673 | perf_counter_task_tick(struct task_struct *task, int cpu) { } | ||
674 | static inline int perf_counter_init_task(struct task_struct *child) { return 0; } | ||
675 | static inline void perf_counter_exit_task(struct task_struct *child) { } | ||
676 | static inline void perf_counter_free_task(struct task_struct *task) { } | ||
677 | static inline void perf_counter_do_pending(void) { } | ||
678 | static inline void perf_counter_print_debug(void) { } | ||
679 | static inline void perf_disable(void) { } | ||
680 | static inline void perf_enable(void) { } | ||
681 | static inline int perf_counter_task_disable(void) { return -EINVAL; } | ||
682 | static inline int perf_counter_task_enable(void) { return -EINVAL; } | ||
683 | |||
684 | static inline void | ||
685 | perf_swcounter_event(u32 event, u64 nr, int nmi, | ||
686 | struct pt_regs *regs, u64 addr) { } | ||
687 | |||
688 | static inline void perf_counter_mmap(struct vm_area_struct *vma) { } | ||
689 | static inline void perf_counter_comm(struct task_struct *tsk) { } | ||
690 | static inline void perf_counter_fork(struct task_struct *tsk) { } | ||
691 | static inline void perf_counter_init(void) { } | ||
692 | static inline void perf_counter_task_migration(struct task_struct *task, | ||
693 | int cpu) { } | ||
694 | #endif | ||
695 | |||
696 | #endif /* __KERNEL__ */ | ||
697 | #endif /* _LINUX_PERF_COUNTER_H */ | ||
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index c8f038554e80..b43a9e039059 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h | |||
@@ -152,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void | |||
152 | void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); | 152 | void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); |
153 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); | 153 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); |
154 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); | 154 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); |
155 | void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); | ||
155 | 156 | ||
156 | #endif | 157 | #endif |
diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 48d887e3c6e7..b00df4c79c63 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h | |||
@@ -85,4 +85,7 @@ | |||
85 | #define PR_SET_TIMERSLACK 29 | 85 | #define PR_SET_TIMERSLACK 29 |
86 | #define PR_GET_TIMERSLACK 30 | 86 | #define PR_GET_TIMERSLACK 30 |
87 | 87 | ||
88 | #define PR_TASK_PERF_COUNTERS_DISABLE 31 | ||
89 | #define PR_TASK_PERF_COUNTERS_ENABLE 32 | ||
90 | |||
88 | #endif /* _LINUX_PRCTL_H */ | 91 | #endif /* _LINUX_PRCTL_H */ |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index fbfa3d44d33d..e6e77d31c418 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -93,20 +93,9 @@ struct vmcore { | |||
93 | 93 | ||
94 | #ifdef CONFIG_PROC_FS | 94 | #ifdef CONFIG_PROC_FS |
95 | 95 | ||
96 | extern spinlock_t proc_subdir_lock; | ||
97 | |||
98 | extern void proc_root_init(void); | 96 | extern void proc_root_init(void); |
99 | 97 | ||
100 | void proc_flush_task(struct task_struct *task); | 98 | void proc_flush_task(struct task_struct *task); |
101 | struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *); | ||
102 | int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir); | ||
103 | unsigned long task_vsize(struct mm_struct *); | ||
104 | int task_statm(struct mm_struct *, int *, int *, int *, int *); | ||
105 | void task_mem(struct seq_file *, struct mm_struct *); | ||
106 | void clear_refs_smap(struct mm_struct *mm); | ||
107 | |||
108 | struct proc_dir_entry *de_get(struct proc_dir_entry *de); | ||
109 | void de_put(struct proc_dir_entry *de); | ||
110 | 99 | ||
111 | extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, | 100 | extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, |
112 | struct proc_dir_entry *parent); | 101 | struct proc_dir_entry *parent); |
@@ -116,20 +105,7 @@ struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, | |||
116 | void *data); | 105 | void *data); |
117 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); | 106 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); |
118 | 107 | ||
119 | extern struct vfsmount *proc_mnt; | ||
120 | struct pid_namespace; | 108 | struct pid_namespace; |
121 | extern int proc_fill_super(struct super_block *); | ||
122 | extern struct inode *proc_get_inode(struct super_block *, unsigned int, struct proc_dir_entry *); | ||
123 | |||
124 | /* | ||
125 | * These are generic /proc routines that use the internal | ||
126 | * "struct proc_dir_entry" tree to traverse the filesystem. | ||
127 | * | ||
128 | * The /proc root directory has extended versions to take care | ||
129 | * of the /proc/<pid> subdirectories. | ||
130 | */ | ||
131 | extern int proc_readdir(struct file *, void *, filldir_t); | ||
132 | extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *); | ||
133 | 109 | ||
134 | extern int pid_ns_prepare_proc(struct pid_namespace *ns); | 110 | extern int pid_ns_prepare_proc(struct pid_namespace *ns); |
135 | extern void pid_ns_release_proc(struct pid_namespace *ns); | 111 | extern void pid_ns_release_proc(struct pid_namespace *ns); |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 67c15653fc23..59e133d39d50 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -95,7 +95,6 @@ extern void __ptrace_link(struct task_struct *child, | |||
95 | struct task_struct *new_parent); | 95 | struct task_struct *new_parent); |
96 | extern void __ptrace_unlink(struct task_struct *child); | 96 | extern void __ptrace_unlink(struct task_struct *child); |
97 | extern void exit_ptrace(struct task_struct *tracer); | 97 | extern void exit_ptrace(struct task_struct *tracer); |
98 | extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); | ||
99 | #define PTRACE_MODE_READ 1 | 98 | #define PTRACE_MODE_READ 1 |
100 | #define PTRACE_MODE_ATTACH 2 | 99 | #define PTRACE_MODE_ATTACH 2 |
101 | /* Returns 0 on success, -errno on denial. */ | 100 | /* Returns 0 on success, -errno on denial. */ |
@@ -327,15 +326,6 @@ static inline void user_enable_block_step(struct task_struct *task) | |||
327 | #define arch_ptrace_untrace(task) do { } while (0) | 326 | #define arch_ptrace_untrace(task) do { } while (0) |
328 | #endif | 327 | #endif |
329 | 328 | ||
330 | #ifndef arch_ptrace_fork | ||
331 | /* | ||
332 | * Do machine-specific work to initialize a new task. | ||
333 | * | ||
334 | * This is called from copy_process(). | ||
335 | */ | ||
336 | #define arch_ptrace_fork(child, clone_flags) do { } while (0) | ||
337 | #endif | ||
338 | |||
339 | extern int task_current_syscall(struct task_struct *target, long *callno, | 329 | extern int task_current_syscall(struct task_struct *target, long *callno, |
340 | unsigned long args[6], unsigned int maxargs, | 330 | unsigned long args[6], unsigned int maxargs, |
341 | unsigned long *sp, unsigned long *pc); | 331 | unsigned long *sp, unsigned long *pc); |
diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h index 787d19ea9f46..8b9aee1a9ce3 100644 --- a/include/linux/qnx4_fs.h +++ b/include/linux/qnx4_fs.h | |||
@@ -85,65 +85,4 @@ struct qnx4_super_block { | |||
85 | struct qnx4_inode_entry AltBoot; | 85 | struct qnx4_inode_entry AltBoot; |
86 | }; | 86 | }; |
87 | 87 | ||
88 | #ifdef __KERNEL__ | ||
89 | |||
90 | #define QNX4_DEBUG 0 | ||
91 | |||
92 | #if QNX4_DEBUG | ||
93 | #define QNX4DEBUG(X) printk X | ||
94 | #else | ||
95 | #define QNX4DEBUG(X) (void) 0 | ||
96 | #endif | ||
97 | |||
98 | struct qnx4_sb_info { | ||
99 | struct buffer_head *sb_buf; /* superblock buffer */ | ||
100 | struct qnx4_super_block *sb; /* our superblock */ | ||
101 | unsigned int Version; /* may be useful */ | ||
102 | struct qnx4_inode_entry *BitMap; /* useful */ | ||
103 | }; | ||
104 | |||
105 | struct qnx4_inode_info { | ||
106 | struct qnx4_inode_entry raw; | ||
107 | loff_t mmu_private; | ||
108 | struct inode vfs_inode; | ||
109 | }; | ||
110 | |||
111 | extern struct inode *qnx4_iget(struct super_block *, unsigned long); | ||
112 | extern struct dentry *qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd); | ||
113 | extern unsigned long qnx4_count_free_blocks(struct super_block *sb); | ||
114 | extern unsigned long qnx4_block_map(struct inode *inode, long iblock); | ||
115 | |||
116 | extern struct buffer_head *qnx4_bread(struct inode *, int, int); | ||
117 | |||
118 | extern const struct inode_operations qnx4_file_inode_operations; | ||
119 | extern const struct inode_operations qnx4_dir_inode_operations; | ||
120 | extern const struct file_operations qnx4_file_operations; | ||
121 | extern const struct file_operations qnx4_dir_operations; | ||
122 | extern int qnx4_is_free(struct super_block *sb, long block); | ||
123 | extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy); | ||
124 | extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode, struct nameidata *nd); | ||
125 | extern void qnx4_truncate(struct inode *inode); | ||
126 | extern void qnx4_free_inode(struct inode *inode); | ||
127 | extern int qnx4_unlink(struct inode *dir, struct dentry *dentry); | ||
128 | extern int qnx4_rmdir(struct inode *dir, struct dentry *dentry); | ||
129 | extern int qnx4_sync_file(struct file *file, struct dentry *dentry, int); | ||
130 | extern int qnx4_sync_inode(struct inode *inode); | ||
131 | |||
132 | static inline struct qnx4_sb_info *qnx4_sb(struct super_block *sb) | ||
133 | { | ||
134 | return sb->s_fs_info; | ||
135 | } | ||
136 | |||
137 | static inline struct qnx4_inode_info *qnx4_i(struct inode *inode) | ||
138 | { | ||
139 | return container_of(inode, struct qnx4_inode_info, vfs_inode); | ||
140 | } | ||
141 | |||
142 | static inline struct qnx4_inode_entry *qnx4_raw_inode(struct inode *inode) | ||
143 | { | ||
144 | return &qnx4_i(inode)->raw; | ||
145 | } | ||
146 | |||
147 | #endif /* __KERNEL__ */ | ||
148 | |||
149 | #endif | 88 | #endif |
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 36353d95c8db..7bc457593684 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h | |||
@@ -20,7 +20,12 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb) | |||
20 | /* | 20 | /* |
21 | * declaration of quota_function calls in kernel. | 21 | * declaration of quota_function calls in kernel. |
22 | */ | 22 | */ |
23 | void sync_dquots(struct super_block *sb, int type); | 23 | void sync_quota_sb(struct super_block *sb, int type); |
24 | static inline void writeout_quota_sb(struct super_block *sb, int type) | ||
25 | { | ||
26 | if (sb->s_qcop->quota_sync) | ||
27 | sb->s_qcop->quota_sync(sb, type); | ||
28 | } | ||
24 | 29 | ||
25 | int dquot_initialize(struct inode *inode, int type); | 30 | int dquot_initialize(struct inode *inode, int type); |
26 | int dquot_drop(struct inode *inode); | 31 | int dquot_drop(struct inode *inode); |
@@ -253,12 +258,7 @@ static inline void vfs_dq_free_inode(struct inode *inode) | |||
253 | inode->i_sb->dq_op->free_inode(inode, 1); | 258 | inode->i_sb->dq_op->free_inode(inode, 1); |
254 | } | 259 | } |
255 | 260 | ||
256 | /* The following two functions cannot be called inside a transaction */ | 261 | /* Cannot be called inside a transaction */ |
257 | static inline void vfs_dq_sync(struct super_block *sb) | ||
258 | { | ||
259 | sync_dquots(sb, -1); | ||
260 | } | ||
261 | |||
262 | static inline int vfs_dq_off(struct super_block *sb, int remount) | 262 | static inline int vfs_dq_off(struct super_block *sb, int remount) |
263 | { | 263 | { |
264 | int ret = -ENOSYS; | 264 | int ret = -ENOSYS; |
@@ -334,7 +334,11 @@ static inline void vfs_dq_free_inode(struct inode *inode) | |||
334 | { | 334 | { |
335 | } | 335 | } |
336 | 336 | ||
337 | static inline void vfs_dq_sync(struct super_block *sb) | 337 | static inline void sync_quota_sb(struct super_block *sb, int type) |
338 | { | ||
339 | } | ||
340 | |||
341 | static inline void writeout_quota_sb(struct super_block *sb, int type) | ||
338 | { | 342 | { |
339 | } | 343 | } |
340 | 344 | ||
diff --git a/include/linux/rational.h b/include/linux/rational.h new file mode 100644 index 000000000000..4f532fcd9eea --- /dev/null +++ b/include/linux/rational.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * rational fractions | ||
3 | * | ||
4 | * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com> | ||
5 | * | ||
6 | * helper functions when coping with rational numbers, | ||
7 | * e.g. when calculating optimum numerator/denominator pairs for | ||
8 | * pll configuration taking into account restricted register size | ||
9 | */ | ||
10 | |||
11 | #ifndef _LINUX_RATIONAL_H | ||
12 | #define _LINUX_RATIONAL_H | ||
13 | |||
14 | void rational_best_approximation( | ||
15 | unsigned long given_numerator, unsigned long given_denominator, | ||
16 | unsigned long max_numerator, unsigned long max_denominator, | ||
17 | unsigned long *best_numerator, unsigned long *best_denominator); | ||
18 | |||
19 | #endif /* _LINUX_RATIONAL_H */ | ||
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index e649bd3f2c97..5710f43bbc9e 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -198,6 +198,32 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
198 | at->prev = last; | 198 | at->prev = last; |
199 | } | 199 | } |
200 | 200 | ||
201 | /** | ||
202 | * list_entry_rcu - get the struct for this entry | ||
203 | * @ptr: the &struct list_head pointer. | ||
204 | * @type: the type of the struct this is embedded in. | ||
205 | * @member: the name of the list_struct within the struct. | ||
206 | * | ||
207 | * This primitive may safely run concurrently with the _rcu list-mutation | ||
208 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | ||
209 | */ | ||
210 | #define list_entry_rcu(ptr, type, member) \ | ||
211 | container_of(rcu_dereference(ptr), type, member) | ||
212 | |||
213 | /** | ||
214 | * list_first_entry_rcu - get the first element from a list | ||
215 | * @ptr: the list head to take the element from. | ||
216 | * @type: the type of the struct this is embedded in. | ||
217 | * @member: the name of the list_struct within the struct. | ||
218 | * | ||
219 | * Note, that list is expected to be not empty. | ||
220 | * | ||
221 | * This primitive may safely run concurrently with the _rcu list-mutation | ||
222 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | ||
223 | */ | ||
224 | #define list_first_entry_rcu(ptr, type, member) \ | ||
225 | list_entry_rcu((ptr)->next, type, member) | ||
226 | |||
201 | #define __list_for_each_rcu(pos, head) \ | 227 | #define __list_for_each_rcu(pos, head) \ |
202 | for (pos = rcu_dereference((head)->next); \ | 228 | for (pos = rcu_dereference((head)->next); \ |
203 | pos != (head); \ | 229 | pos != (head); \ |
@@ -214,9 +240,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
214 | * as long as the traversal is guarded by rcu_read_lock(). | 240 | * as long as the traversal is guarded by rcu_read_lock(). |
215 | */ | 241 | */ |
216 | #define list_for_each_entry_rcu(pos, head, member) \ | 242 | #define list_for_each_entry_rcu(pos, head, member) \ |
217 | for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \ | 243 | for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
218 | prefetch(pos->member.next), &pos->member != (head); \ | 244 | prefetch(pos->member.next), &pos->member != (head); \ |
219 | pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member)) | 245 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
220 | 246 | ||
221 | 247 | ||
222 | /** | 248 | /** |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 58b2aa5312b9..5a5153806c42 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -161,8 +161,15 @@ struct rcu_data { | |||
161 | unsigned long offline_fqs; /* Kicked due to being offline. */ | 161 | unsigned long offline_fqs; /* Kicked due to being offline. */ |
162 | unsigned long resched_ipi; /* Sent a resched IPI. */ | 162 | unsigned long resched_ipi; /* Sent a resched IPI. */ |
163 | 163 | ||
164 | /* 5) For future __rcu_pending statistics. */ | 164 | /* 5) __rcu_pending() statistics. */ |
165 | long n_rcu_pending; /* rcu_pending() calls since boot. */ | 165 | long n_rcu_pending; /* rcu_pending() calls since boot. */ |
166 | long n_rp_qs_pending; | ||
167 | long n_rp_cb_ready; | ||
168 | long n_rp_cpu_needs_gp; | ||
169 | long n_rp_gp_completed; | ||
170 | long n_rp_gp_started; | ||
171 | long n_rp_need_fqs; | ||
172 | long n_rp_need_nothing; | ||
166 | 173 | ||
167 | int cpu; | 174 | int cpu; |
168 | }; | 175 | }; |
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index 6473650c28f1..dab68bbed675 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h | |||
@@ -453,6 +453,7 @@ enum reiserfs_mount_options { | |||
453 | REISERFS_ATTRS, | 453 | REISERFS_ATTRS, |
454 | REISERFS_XATTRS_USER, | 454 | REISERFS_XATTRS_USER, |
455 | REISERFS_POSIXACL, | 455 | REISERFS_POSIXACL, |
456 | REISERFS_EXPOSE_PRIVROOT, | ||
456 | REISERFS_BARRIER_NONE, | 457 | REISERFS_BARRIER_NONE, |
457 | REISERFS_BARRIER_FLUSH, | 458 | REISERFS_BARRIER_FLUSH, |
458 | 459 | ||
@@ -490,6 +491,7 @@ enum reiserfs_mount_options { | |||
490 | #define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK)) | 491 | #define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK)) |
491 | #define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER)) | 492 | #define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER)) |
492 | #define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL)) | 493 | #define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL)) |
494 | #define reiserfs_expose_privroot(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_EXPOSE_PRIVROOT)) | ||
493 | #define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s)) | 495 | #define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s)) |
494 | #define reiserfs_barrier_none(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_NONE)) | 496 | #define reiserfs_barrier_none(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_NONE)) |
495 | #define reiserfs_barrier_flush(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_FLUSH)) | 497 | #define reiserfs_barrier_flush(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_FLUSH)) |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index e1b7b2173885..8670f1575fe1 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -11,7 +11,7 @@ struct ring_buffer_iter; | |||
11 | * Don't refer to this struct directly, use functions below. | 11 | * Don't refer to this struct directly, use functions below. |
12 | */ | 12 | */ |
13 | struct ring_buffer_event { | 13 | struct ring_buffer_event { |
14 | u32 type:2, len:3, time_delta:27; | 14 | u32 type_len:5, time_delta:27; |
15 | u32 array[]; | 15 | u32 array[]; |
16 | }; | 16 | }; |
17 | 17 | ||
@@ -24,7 +24,8 @@ struct ring_buffer_event { | |||
24 | * size is variable depending on how much | 24 | * size is variable depending on how much |
25 | * padding is needed | 25 | * padding is needed |
26 | * If time_delta is non zero: | 26 | * If time_delta is non zero: |
27 | * everything else same as RINGBUF_TYPE_DATA | 27 | * array[0] holds the actual length |
28 | * size = 4 + length (bytes) | ||
28 | * | 29 | * |
29 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta | 30 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta |
30 | * array[0] = time delta (28 .. 59) | 31 | * array[0] = time delta (28 .. 59) |
@@ -35,22 +36,23 @@ struct ring_buffer_event { | |||
35 | * array[1..2] = tv_sec | 36 | * array[1..2] = tv_sec |
36 | * size = 16 bytes | 37 | * size = 16 bytes |
37 | * | 38 | * |
38 | * @RINGBUF_TYPE_DATA: Data record | 39 | * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: |
39 | * If len is zero: | 40 | * Data record |
41 | * If type_len is zero: | ||
40 | * array[0] holds the actual length | 42 | * array[0] holds the actual length |
41 | * array[1..(length+3)/4] holds data | 43 | * array[1..(length+3)/4] holds data |
42 | * size = 4 + 4 + length (bytes) | 44 | * size = 4 + length (bytes) |
43 | * else | 45 | * else |
44 | * length = len << 2 | 46 | * length = type_len << 2 |
45 | * array[0..(length+3)/4-1] holds data | 47 | * array[0..(length+3)/4-1] holds data |
46 | * size = 4 + length (bytes) | 48 | * size = 4 + length (bytes) |
47 | */ | 49 | */ |
48 | enum ring_buffer_type { | 50 | enum ring_buffer_type { |
51 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, | ||
49 | RINGBUF_TYPE_PADDING, | 52 | RINGBUF_TYPE_PADDING, |
50 | RINGBUF_TYPE_TIME_EXTEND, | 53 | RINGBUF_TYPE_TIME_EXTEND, |
51 | /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ | 54 | /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ |
52 | RINGBUF_TYPE_TIME_STAMP, | 55 | RINGBUF_TYPE_TIME_STAMP, |
53 | RINGBUF_TYPE_DATA, | ||
54 | }; | 56 | }; |
55 | 57 | ||
56 | unsigned ring_buffer_event_length(struct ring_buffer_event *event); | 58 | unsigned ring_buffer_event_length(struct ring_buffer_event *event); |
@@ -68,13 +70,54 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) | |||
68 | return event->time_delta; | 70 | return event->time_delta; |
69 | } | 71 | } |
70 | 72 | ||
73 | /* | ||
74 | * ring_buffer_event_discard can discard any event in the ring buffer. | ||
75 | * it is up to the caller to protect against a reader from | ||
76 | * consuming it or a writer from wrapping and replacing it. | ||
77 | * | ||
78 | * No external protection is needed if this is called before | ||
79 | * the event is commited. But in that case it would be better to | ||
80 | * use ring_buffer_discard_commit. | ||
81 | * | ||
82 | * Note, if an event that has not been committed is discarded | ||
83 | * with ring_buffer_event_discard, it must still be committed. | ||
84 | */ | ||
71 | void ring_buffer_event_discard(struct ring_buffer_event *event); | 85 | void ring_buffer_event_discard(struct ring_buffer_event *event); |
72 | 86 | ||
73 | /* | 87 | /* |
88 | * ring_buffer_discard_commit will remove an event that has not | ||
89 | * ben committed yet. If this is used, then ring_buffer_unlock_commit | ||
90 | * must not be called on the discarded event. This function | ||
91 | * will try to remove the event from the ring buffer completely | ||
92 | * if another event has not been written after it. | ||
93 | * | ||
94 | * Example use: | ||
95 | * | ||
96 | * if (some_condition) | ||
97 | * ring_buffer_discard_commit(buffer, event); | ||
98 | * else | ||
99 | * ring_buffer_unlock_commit(buffer, event); | ||
100 | */ | ||
101 | void ring_buffer_discard_commit(struct ring_buffer *buffer, | ||
102 | struct ring_buffer_event *event); | ||
103 | |||
104 | /* | ||
74 | * size is in bytes for each per CPU buffer. | 105 | * size is in bytes for each per CPU buffer. |
75 | */ | 106 | */ |
76 | struct ring_buffer * | 107 | struct ring_buffer * |
77 | ring_buffer_alloc(unsigned long size, unsigned flags); | 108 | __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); |
109 | |||
110 | /* | ||
111 | * Because the ring buffer is generic, if other users of the ring buffer get | ||
112 | * traced by ftrace, it can produce lockdep warnings. We need to keep each | ||
113 | * ring buffer's lock class separate. | ||
114 | */ | ||
115 | #define ring_buffer_alloc(size, flags) \ | ||
116 | ({ \ | ||
117 | static struct lock_class_key __key; \ | ||
118 | __ring_buffer_alloc((size), (flags), &__key); \ | ||
119 | }) | ||
120 | |||
78 | void ring_buffer_free(struct ring_buffer *buffer); | 121 | void ring_buffer_free(struct ring_buffer *buffer); |
79 | 122 | ||
80 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); | 123 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); |
@@ -122,6 +165,8 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer); | |||
122 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); | 165 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); |
123 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); | 166 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); |
124 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | 167 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); |
168 | unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); | ||
169 | unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu); | ||
125 | 170 | ||
126 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); | 171 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); |
127 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, | 172 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, |
@@ -137,6 +182,11 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); | |||
137 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, | 182 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, |
138 | size_t len, int cpu, int full); | 183 | size_t len, int cpu, int full); |
139 | 184 | ||
185 | struct trace_seq; | ||
186 | |||
187 | int ring_buffer_print_entry_header(struct trace_seq *s); | ||
188 | int ring_buffer_print_page_header(struct trace_seq *s); | ||
189 | |||
140 | enum ring_buffer_flags { | 190 | enum ring_buffer_flags { |
141 | RB_FL_OVERWRITE = 1 << 0, | 191 | RB_FL_OVERWRITE = 1 << 0, |
142 | }; | 192 | }; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index b4c38bc8049c..4896fdfec913 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -77,6 +77,7 @@ struct sched_param { | |||
77 | #include <linux/proportions.h> | 77 | #include <linux/proportions.h> |
78 | #include <linux/seccomp.h> | 78 | #include <linux/seccomp.h> |
79 | #include <linux/rcupdate.h> | 79 | #include <linux/rcupdate.h> |
80 | #include <linux/rculist.h> | ||
80 | #include <linux/rtmutex.h> | 81 | #include <linux/rtmutex.h> |
81 | 82 | ||
82 | #include <linux/time.h> | 83 | #include <linux/time.h> |
@@ -96,8 +97,9 @@ struct exec_domain; | |||
96 | struct futex_pi_state; | 97 | struct futex_pi_state; |
97 | struct robust_list_head; | 98 | struct robust_list_head; |
98 | struct bio; | 99 | struct bio; |
99 | struct bts_tracer; | ||
100 | struct fs_struct; | 100 | struct fs_struct; |
101 | struct bts_context; | ||
102 | struct perf_counter_context; | ||
101 | 103 | ||
102 | /* | 104 | /* |
103 | * List of flags we want to share for kernel threads, | 105 | * List of flags we want to share for kernel threads, |
@@ -116,6 +118,7 @@ struct fs_struct; | |||
116 | * 11 bit fractions. | 118 | * 11 bit fractions. |
117 | */ | 119 | */ |
118 | extern unsigned long avenrun[]; /* Load averages */ | 120 | extern unsigned long avenrun[]; /* Load averages */ |
121 | extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); | ||
119 | 122 | ||
120 | #define FSHIFT 11 /* nr of bits of precision */ | 123 | #define FSHIFT 11 /* nr of bits of precision */ |
121 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ | 124 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ |
@@ -135,8 +138,9 @@ DECLARE_PER_CPU(unsigned long, process_counts); | |||
135 | extern int nr_processes(void); | 138 | extern int nr_processes(void); |
136 | extern unsigned long nr_running(void); | 139 | extern unsigned long nr_running(void); |
137 | extern unsigned long nr_uninterruptible(void); | 140 | extern unsigned long nr_uninterruptible(void); |
138 | extern unsigned long nr_active(void); | ||
139 | extern unsigned long nr_iowait(void); | 141 | extern unsigned long nr_iowait(void); |
142 | extern void calc_global_load(void); | ||
143 | extern u64 cpu_nr_migrations(int cpu); | ||
140 | 144 | ||
141 | extern unsigned long get_parent_ip(unsigned long addr); | 145 | extern unsigned long get_parent_ip(unsigned long addr); |
142 | 146 | ||
@@ -672,6 +676,10 @@ struct user_struct { | |||
672 | struct work_struct work; | 676 | struct work_struct work; |
673 | #endif | 677 | #endif |
674 | #endif | 678 | #endif |
679 | |||
680 | #ifdef CONFIG_PERF_COUNTERS | ||
681 | atomic_long_t locked_vm; | ||
682 | #endif | ||
675 | }; | 683 | }; |
676 | 684 | ||
677 | extern int uids_sysfs_init(void); | 685 | extern int uids_sysfs_init(void); |
@@ -838,7 +846,17 @@ struct sched_group { | |||
838 | */ | 846 | */ |
839 | u32 reciprocal_cpu_power; | 847 | u32 reciprocal_cpu_power; |
840 | 848 | ||
841 | unsigned long cpumask[]; | 849 | /* |
850 | * The CPUs this group covers. | ||
851 | * | ||
852 | * NOTE: this field is variable length. (Allocated dynamically | ||
853 | * by attaching extra space to the end of the structure, | ||
854 | * depending on how many CPUs the kernel has booted up with) | ||
855 | * | ||
856 | * It is also be embedded into static data structures at build | ||
857 | * time. (See 'struct static_sched_group' in kernel/sched.c) | ||
858 | */ | ||
859 | unsigned long cpumask[0]; | ||
842 | }; | 860 | }; |
843 | 861 | ||
844 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | 862 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
@@ -924,8 +942,17 @@ struct sched_domain { | |||
924 | char *name; | 942 | char *name; |
925 | #endif | 943 | #endif |
926 | 944 | ||
927 | /* span of all CPUs in this domain */ | 945 | /* |
928 | unsigned long span[]; | 946 | * Span of all CPUs in this domain. |
947 | * | ||
948 | * NOTE: this field is variable length. (Allocated dynamically | ||
949 | * by attaching extra space to the end of the structure, | ||
950 | * depending on how many CPUs the kernel has booted up with) | ||
951 | * | ||
952 | * It is also be embedded into static data structures at build | ||
953 | * time. (See 'struct static_sched_domain' in kernel/sched.c) | ||
954 | */ | ||
955 | unsigned long span[0]; | ||
929 | }; | 956 | }; |
930 | 957 | ||
931 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | 958 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
@@ -1052,9 +1079,10 @@ struct sched_entity { | |||
1052 | u64 last_wakeup; | 1079 | u64 last_wakeup; |
1053 | u64 avg_overlap; | 1080 | u64 avg_overlap; |
1054 | 1081 | ||
1082 | u64 nr_migrations; | ||
1083 | |||
1055 | u64 start_runtime; | 1084 | u64 start_runtime; |
1056 | u64 avg_wakeup; | 1085 | u64 avg_wakeup; |
1057 | u64 nr_migrations; | ||
1058 | 1086 | ||
1059 | #ifdef CONFIG_SCHEDSTATS | 1087 | #ifdef CONFIG_SCHEDSTATS |
1060 | u64 wait_start; | 1088 | u64 wait_start; |
@@ -1209,18 +1237,11 @@ struct task_struct { | |||
1209 | struct list_head ptraced; | 1237 | struct list_head ptraced; |
1210 | struct list_head ptrace_entry; | 1238 | struct list_head ptrace_entry; |
1211 | 1239 | ||
1212 | #ifdef CONFIG_X86_PTRACE_BTS | ||
1213 | /* | 1240 | /* |
1214 | * This is the tracer handle for the ptrace BTS extension. | 1241 | * This is the tracer handle for the ptrace BTS extension. |
1215 | * This field actually belongs to the ptracer task. | 1242 | * This field actually belongs to the ptracer task. |
1216 | */ | 1243 | */ |
1217 | struct bts_tracer *bts; | 1244 | struct bts_context *bts; |
1218 | /* | ||
1219 | * The buffer to hold the BTS data. | ||
1220 | */ | ||
1221 | void *bts_buffer; | ||
1222 | size_t bts_size; | ||
1223 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
1224 | 1245 | ||
1225 | /* PID/PID hash table linkage. */ | 1246 | /* PID/PID hash table linkage. */ |
1226 | struct pid_link pids[PIDTYPE_MAX]; | 1247 | struct pid_link pids[PIDTYPE_MAX]; |
@@ -1247,7 +1268,9 @@ struct task_struct { | |||
1247 | * credentials (COW) */ | 1268 | * credentials (COW) */ |
1248 | const struct cred *cred; /* effective (overridable) subjective task | 1269 | const struct cred *cred; /* effective (overridable) subjective task |
1249 | * credentials (COW) */ | 1270 | * credentials (COW) */ |
1250 | struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ | 1271 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
1272 | * credential calculations | ||
1273 | * (notably. ptrace) */ | ||
1251 | 1274 | ||
1252 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1275 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
1253 | - access with [gs]et_task_comm (which lock | 1276 | - access with [gs]et_task_comm (which lock |
@@ -1380,6 +1403,11 @@ struct task_struct { | |||
1380 | struct list_head pi_state_list; | 1403 | struct list_head pi_state_list; |
1381 | struct futex_pi_state *pi_state_cache; | 1404 | struct futex_pi_state *pi_state_cache; |
1382 | #endif | 1405 | #endif |
1406 | #ifdef CONFIG_PERF_COUNTERS | ||
1407 | struct perf_counter_context *perf_counter_ctxp; | ||
1408 | struct mutex perf_counter_mutex; | ||
1409 | struct list_head perf_counter_list; | ||
1410 | #endif | ||
1383 | #ifdef CONFIG_NUMA | 1411 | #ifdef CONFIG_NUMA |
1384 | struct mempolicy *mempolicy; | 1412 | struct mempolicy *mempolicy; |
1385 | short il_next; | 1413 | short il_next; |
@@ -1428,7 +1456,9 @@ struct task_struct { | |||
1428 | #ifdef CONFIG_TRACING | 1456 | #ifdef CONFIG_TRACING |
1429 | /* state flags for use by tracers */ | 1457 | /* state flags for use by tracers */ |
1430 | unsigned long trace; | 1458 | unsigned long trace; |
1431 | #endif | 1459 | /* bitmask of trace recursion */ |
1460 | unsigned long trace_recursion; | ||
1461 | #endif /* CONFIG_TRACING */ | ||
1432 | }; | 1462 | }; |
1433 | 1463 | ||
1434 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1464 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
@@ -1885,6 +1915,7 @@ extern void sched_dead(struct task_struct *p); | |||
1885 | 1915 | ||
1886 | extern void proc_caches_init(void); | 1916 | extern void proc_caches_init(void); |
1887 | extern void flush_signals(struct task_struct *); | 1917 | extern void flush_signals(struct task_struct *); |
1918 | extern void __flush_signals(struct task_struct *); | ||
1888 | extern void ignore_signals(struct task_struct *); | 1919 | extern void ignore_signals(struct task_struct *); |
1889 | extern void flush_signal_handlers(struct task_struct *, int force_default); | 1920 | extern void flush_signal_handlers(struct task_struct *, int force_default); |
1890 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); | 1921 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
@@ -2001,8 +2032,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); | |||
2001 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2032 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
2002 | 2033 | ||
2003 | #ifdef CONFIG_SMP | 2034 | #ifdef CONFIG_SMP |
2035 | extern void wait_task_context_switch(struct task_struct *p); | ||
2004 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 2036 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
2005 | #else | 2037 | #else |
2038 | static inline void wait_task_context_switch(struct task_struct *p) {} | ||
2006 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 2039 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
2007 | long match_state) | 2040 | long match_state) |
2008 | { | 2041 | { |
@@ -2010,7 +2043,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, | |||
2010 | } | 2043 | } |
2011 | #endif | 2044 | #endif |
2012 | 2045 | ||
2013 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) | 2046 | #define next_task(p) \ |
2047 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) | ||
2014 | 2048 | ||
2015 | #define for_each_process(p) \ | 2049 | #define for_each_process(p) \ |
2016 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 2050 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
@@ -2049,8 +2083,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2) | |||
2049 | 2083 | ||
2050 | static inline struct task_struct *next_thread(const struct task_struct *p) | 2084 | static inline struct task_struct *next_thread(const struct task_struct *p) |
2051 | { | 2085 | { |
2052 | return list_entry(rcu_dereference(p->thread_group.next), | 2086 | return list_entry_rcu(p->thread_group.next, |
2053 | struct task_struct, thread_group); | 2087 | struct task_struct, thread_group); |
2054 | } | 2088 | } |
2055 | 2089 | ||
2056 | static inline int thread_group_empty(struct task_struct *p) | 2090 | static inline int thread_group_empty(struct task_struct *p) |
@@ -2388,6 +2422,13 @@ static inline void inc_syscw(struct task_struct *tsk) | |||
2388 | #define TASK_SIZE_OF(tsk) TASK_SIZE | 2422 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
2389 | #endif | 2423 | #endif |
2390 | 2424 | ||
2425 | /* | ||
2426 | * Call the function if the target task is executing on a CPU right now: | ||
2427 | */ | ||
2428 | extern void task_oncpu_function_call(struct task_struct *p, | ||
2429 | void (*func) (void *info), void *info); | ||
2430 | |||
2431 | |||
2391 | #ifdef CONFIG_MM_OWNER | 2432 | #ifdef CONFIG_MM_OWNER |
2392 | extern void mm_update_next_owner(struct mm_struct *mm); | 2433 | extern void mm_update_next_owner(struct mm_struct *mm); |
2393 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); | 2434 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); |
diff --git a/include/linux/security.h b/include/linux/security.h index d5fd6163606f..5eff459b3833 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -2197,6 +2197,8 @@ static inline int security_file_mmap(struct file *file, unsigned long reqprot, | |||
2197 | unsigned long addr, | 2197 | unsigned long addr, |
2198 | unsigned long addr_only) | 2198 | unsigned long addr_only) |
2199 | { | 2199 | { |
2200 | if ((addr < mmap_min_addr) && !capable(CAP_SYS_RAWIO)) | ||
2201 | return -EACCES; | ||
2200 | return 0; | 2202 | return 0; |
2201 | } | 2203 | } |
2202 | 2204 | ||
diff --git a/include/linux/serial.h b/include/linux/serial.h index 9136cc5608c3..e5bb75a63802 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h | |||
@@ -96,54 +96,76 @@ struct serial_uart_config { | |||
96 | 96 | ||
97 | /* | 97 | /* |
98 | * Definitions for async_struct (and serial_struct) flags field | 98 | * Definitions for async_struct (and serial_struct) flags field |
99 | * | ||
100 | * Define ASYNCB_* for convenient use with {test,set,clear}_bit. | ||
99 | */ | 101 | */ |
100 | #define ASYNC_HUP_NOTIFY 0x0001 /* Notify getty on hangups and closes | 102 | #define ASYNCB_HUP_NOTIFY 0 /* Notify getty on hangups and closes |
101 | on the callout port */ | 103 | * on the callout port */ |
102 | #define ASYNC_FOURPORT 0x0002 /* Set OU1, OUT2 per AST Fourport settings */ | 104 | #define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */ |
103 | #define ASYNC_SAK 0x0004 /* Secure Attention Key (Orange book) */ | 105 | #define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */ |
104 | #define ASYNC_SPLIT_TERMIOS 0x0008 /* Separate termios for dialin/callout */ | 106 | #define ASYNCB_SPLIT_TERMIOS 3 /* Separate termios for dialin/callout */ |
105 | 107 | #define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */ | |
106 | #define ASYNC_SPD_MASK 0x1030 | 108 | #define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */ |
107 | #define ASYNC_SPD_HI 0x0010 /* Use 56000 instead of 38400 bps */ | 109 | #define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */ |
108 | 110 | #define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during | |
109 | #define ASYNC_SPD_VHI 0x0020 /* Use 115200 instead of 38400 bps */ | 111 | * autoconfiguration */ |
110 | #define ASYNC_SPD_CUST 0x0030 /* Use user-specified divisor */ | 112 | #define ASYNCB_SESSION_LOCKOUT 8 /* Lock out cua opens based on session */ |
111 | 113 | #define ASYNCB_PGRP_LOCKOUT 9 /* Lock out cua opens based on pgrp */ | |
112 | #define ASYNC_SKIP_TEST 0x0040 /* Skip UART test during autoconfiguration */ | 114 | #define ASYNCB_CALLOUT_NOHUP 10 /* Don't do hangups for cua device */ |
113 | #define ASYNC_AUTO_IRQ 0x0080 /* Do automatic IRQ during autoconfiguration */ | 115 | #define ASYNCB_HARDPPS_CD 11 /* Call hardpps when CD goes high */ |
114 | #define ASYNC_SESSION_LOCKOUT 0x0100 /* Lock out cua opens based on session */ | 116 | #define ASYNCB_SPD_SHI 12 /* Use 230400 instead of 38400 bps */ |
115 | #define ASYNC_PGRP_LOCKOUT 0x0200 /* Lock out cua opens based on pgrp */ | 117 | #define ASYNCB_LOW_LATENCY 13 /* Request low latency behaviour */ |
116 | #define ASYNC_CALLOUT_NOHUP 0x0400 /* Don't do hangups for cua device */ | 118 | #define ASYNCB_BUGGY_UART 14 /* This is a buggy UART, skip some safety |
117 | 119 | * checks. Note: can be dangerous! */ | |
118 | #define ASYNC_HARDPPS_CD 0x0800 /* Call hardpps when CD goes high */ | 120 | #define ASYNCB_AUTOPROBE 15 /* Port was autoprobed by PCI or PNP code */ |
119 | 121 | #define ASYNCB_LAST_USER 15 | |
120 | #define ASYNC_SPD_SHI 0x1000 /* Use 230400 instead of 38400 bps */ | 122 | |
121 | #define ASYNC_SPD_WARP 0x1010 /* Use 460800 instead of 38400 bps */ | 123 | /* Internal flags used only by kernel */ |
122 | 124 | #define ASYNCB_INITIALIZED 31 /* Serial port was initialized */ | |
123 | #define ASYNC_LOW_LATENCY 0x2000 /* Request low latency behaviour */ | 125 | #define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */ |
124 | 126 | #define ASYNCB_BOOT_AUTOCONF 28 /* Autoconfigure port on bootup */ | |
125 | #define ASYNC_BUGGY_UART 0x4000 /* This is a buggy UART, skip some safety | 127 | #define ASYNCB_CLOSING 27 /* Serial port is closing */ |
126 | * checks. Note: can be dangerous! */ | 128 | #define ASYNCB_CTS_FLOW 26 /* Do CTS flow control */ |
127 | 129 | #define ASYNCB_CHECK_CD 25 /* i.e., CLOCAL */ | |
128 | #define ASYNC_AUTOPROBE 0x8000 /* Port was autoprobed by PCI or PNP code */ | 130 | #define ASYNCB_SHARE_IRQ 24 /* for multifunction cards, no longer used */ |
129 | 131 | #define ASYNCB_CONS_FLOW 23 /* flow control for console */ | |
130 | #define ASYNC_FLAGS 0x7FFF /* Possible legal async flags */ | 132 | #define ASYNCB_BOOT_ONLYMCA 22 /* Probe only if MCA bus */ |
131 | #define ASYNC_USR_MASK 0x3430 /* Legal flags that non-privileged | 133 | #define ASYNCB_FIRST_KERNEL 22 |
132 | * users can set or reset */ | 134 | |
133 | 135 | #define ASYNC_HUP_NOTIFY (1U << ASYNCB_HUP_NOTIFY) | |
134 | /* Internal flags used only by kernel/chr_drv/serial.c */ | 136 | #define ASYNC_FOURPORT (1U << ASYNCB_FOURPORT) |
135 | #define ASYNC_INITIALIZED 0x80000000 /* Serial port was initialized */ | 137 | #define ASYNC_SAK (1U << ASYNCB_SAK) |
136 | #define ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device is active */ | 138 | #define ASYNC_SPLIT_TERMIOS (1U << ASYNCB_SPLIT_TERMIOS) |
137 | #define ASYNC_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */ | 139 | #define ASYNC_SPD_HI (1U << ASYNCB_SPD_HI) |
138 | #define ASYNC_CLOSING 0x08000000 /* Serial port is closing */ | 140 | #define ASYNC_SPD_VHI (1U << ASYNCB_SPD_VHI) |
139 | #define ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */ | 141 | #define ASYNC_SKIP_TEST (1U << ASYNCB_SKIP_TEST) |
140 | #define ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */ | 142 | #define ASYNC_AUTO_IRQ (1U << ASYNCB_AUTO_IRQ) |
141 | #define ASYNC_SHARE_IRQ 0x01000000 /* for multifunction cards | 143 | #define ASYNC_SESSION_LOCKOUT (1U << ASYNCB_SESSION_LOCKOUT) |
142 | --- no longer used */ | 144 | #define ASYNC_PGRP_LOCKOUT (1U << ASYNCB_PGRP_LOCKOUT) |
143 | #define ASYNC_CONS_FLOW 0x00800000 /* flow control for console */ | 145 | #define ASYNC_CALLOUT_NOHUP (1U << ASYNCB_CALLOUT_NOHUP) |
144 | 146 | #define ASYNC_HARDPPS_CD (1U << ASYNCB_HARDPPS_CD) | |
145 | #define ASYNC_BOOT_ONLYMCA 0x00400000 /* Probe only if MCA bus */ | 147 | #define ASYNC_SPD_SHI (1U << ASYNCB_SPD_SHI) |
146 | #define ASYNC_INTERNAL_FLAGS 0xFFC00000 /* Internal flags */ | 148 | #define ASYNC_LOW_LATENCY (1U << ASYNCB_LOW_LATENCY) |
149 | #define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART) | ||
150 | #define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE) | ||
151 | |||
152 | #define ASYNC_FLAGS ((1U << ASYNCB_LAST_USER) - 1) | ||
153 | #define ASYNC_USR_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI| \ | ||
154 | ASYNC_CALLOUT_NOHUP|ASYNC_SPD_SHI|ASYNC_LOW_LATENCY) | ||
155 | #define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI) | ||
156 | #define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI) | ||
157 | #define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI) | ||
158 | |||
159 | #define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED) | ||
160 | #define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE) | ||
161 | #define ASYNC_BOOT_AUTOCONF (1U << ASYNCB_BOOT_AUTOCONF) | ||
162 | #define ASYNC_CLOSING (1U << ASYNCB_CLOSING) | ||
163 | #define ASYNC_CTS_FLOW (1U << ASYNCB_CTS_FLOW) | ||
164 | #define ASYNC_CHECK_CD (1U << ASYNCB_CHECK_CD) | ||
165 | #define ASYNC_SHARE_IRQ (1U << ASYNCB_SHARE_IRQ) | ||
166 | #define ASYNC_CONS_FLOW (1U << ASYNCB_CONS_FLOW) | ||
167 | #define ASYNC_BOOT_ONLYMCA (1U << ASYNCB_BOOT_ONLYMCA) | ||
168 | #define ASYNC_INTERNAL_FLAGS (~((1U << ASYNCB_FIRST_KERNEL) - 1)) | ||
147 | 169 | ||
148 | /* | 170 | /* |
149 | * Multiport serial configuration structure --- external structure | 171 | * Multiport serial configuration structure --- external structure |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 57a97e52e58d..6fd80c4243f1 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -41,7 +41,8 @@ | |||
41 | #define PORT_XSCALE 15 | 41 | #define PORT_XSCALE 15 |
42 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ | 42 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ |
43 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ | 43 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ |
44 | #define PORT_MAX_8250 17 /* max port ID */ | 44 | #define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ |
45 | #define PORT_MAX_8250 18 /* max port ID */ | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * ARM specific type numbers. These are not currently guaranteed | 48 | * ARM specific type numbers. These are not currently guaranteed |
@@ -167,6 +168,9 @@ | |||
167 | /* MAX3100 */ | 168 | /* MAX3100 */ |
168 | #define PORT_MAX3100 86 | 169 | #define PORT_MAX3100 86 |
169 | 170 | ||
171 | /* Timberdale UART */ | ||
172 | #define PORT_TIMBUART 87 | ||
173 | |||
170 | #ifdef __KERNEL__ | 174 | #ifdef __KERNEL__ |
171 | 175 | ||
172 | #include <linux/compiler.h> | 176 | #include <linux/compiler.h> |
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 893cc53486bc..1c297ddc9d5a 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h | |||
@@ -25,8 +25,7 @@ struct plat_sci_port { | |||
25 | unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */ | 25 | unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */ |
26 | unsigned int type; /* SCI / SCIF / IRDA */ | 26 | unsigned int type; /* SCI / SCIF / IRDA */ |
27 | upf_t flags; /* UPF_* flags */ | 27 | upf_t flags; /* UPF_* flags */ |
28 | char *clk; /* clock string */ | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | int early_sci_setup(struct uart_port *port); | ||
31 | |||
32 | #endif /* __LINUX_SERIAL_SCI_H */ | 31 | #endif /* __LINUX_SERIAL_SCI_H */ |
diff --git a/include/linux/sh_cmt.h b/include/linux/sh_cmt.h deleted file mode 100644 index 68cacde5954f..000000000000 --- a/include/linux/sh_cmt.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | #ifndef __SH_CMT_H__ | ||
2 | #define __SH_CMT_H__ | ||
3 | |||
4 | struct sh_cmt_config { | ||
5 | char *name; | ||
6 | unsigned long channel_offset; | ||
7 | int timer_bit; | ||
8 | char *clk; | ||
9 | unsigned long clockevent_rating; | ||
10 | unsigned long clocksource_rating; | ||
11 | }; | ||
12 | |||
13 | #endif /* __SH_CMT_H__ */ | ||
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h new file mode 100644 index 000000000000..864bd56bd3b0 --- /dev/null +++ b/include/linux/sh_timer.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __SH_TIMER_H__ | ||
2 | #define __SH_TIMER_H__ | ||
3 | |||
4 | struct sh_timer_config { | ||
5 | char *name; | ||
6 | long channel_offset; | ||
7 | int timer_bit; | ||
8 | char *clk; | ||
9 | unsigned long clockevent_rating; | ||
10 | unsigned long clocksource_rating; | ||
11 | }; | ||
12 | |||
13 | #endif /* __SH_TIMER_H__ */ | ||
diff --git a/include/linux/signal.h b/include/linux/signal.h index 84f997f8aa53..c7552836bd95 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -235,6 +235,8 @@ static inline int valid_signal(unsigned long sig) | |||
235 | extern int next_signal(struct sigpending *pending, sigset_t *mask); | 235 | extern int next_signal(struct sigpending *pending, sigset_t *mask); |
236 | extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); | 236 | extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); |
237 | extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); | 237 | extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); |
238 | extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, | ||
239 | siginfo_t *info); | ||
238 | extern long do_sigpending(void __user *, unsigned long); | 240 | extern long do_sigpending(void __user *, unsigned long); |
239 | extern int sigprocmask(int, sigset_t *, sigset_t *); | 241 | extern int sigprocmask(int, sigset_t *, sigset_t *); |
240 | extern int show_unhandled_signals; | 242 | extern int show_unhandled_signals; |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 24c5602bee99..219b8fb4651d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -62,6 +62,8 @@ | |||
62 | # define SLAB_DEBUG_OBJECTS 0x00000000UL | 62 | # define SLAB_DEBUG_OBJECTS 0x00000000UL |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ | ||
66 | |||
65 | /* The following flags affect the page allocator grouping pages by mobility */ | 67 | /* The following flags affect the page allocator grouping pages by mobility */ |
66 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | 68 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ |
67 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ | 69 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ |
@@ -317,4 +319,6 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) | |||
317 | return kmalloc_node(size, flags | __GFP_ZERO, node); | 319 | return kmalloc_node(size, flags | __GFP_ZERO, node); |
318 | } | 320 | } |
319 | 321 | ||
322 | void __init kmem_cache_init_late(void); | ||
323 | |||
320 | #endif /* _LINUX_SLAB_H */ | 324 | #endif /* _LINUX_SLAB_H */ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 5ac9b0bcaf9a..713f841ecaa9 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <trace/kmemtrace.h> | 17 | #include <linux/kmemtrace.h> |
18 | 18 | ||
19 | /* Size description struct for general caches. */ | 19 | /* Size description struct for general caches. */ |
20 | struct cache_sizes { | 20 | struct cache_sizes { |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 0ec00b39d006..bb5368df4be8 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
@@ -34,4 +34,9 @@ static __always_inline void *__kmalloc(size_t size, gfp_t flags) | |||
34 | return kmalloc(size, flags); | 34 | return kmalloc(size, flags); |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline void kmem_cache_init_late(void) | ||
38 | { | ||
39 | /* Nothing to do */ | ||
40 | } | ||
41 | |||
37 | #endif /* __LINUX_SLOB_DEF_H */ | 42 | #endif /* __LINUX_SLOB_DEF_H */ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5046f90c1171..4dcbc2c71491 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <trace/kmemtrace.h> | 13 | #include <linux/kmemtrace.h> |
14 | 14 | ||
15 | enum stat_item { | 15 | enum stat_item { |
16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
@@ -302,4 +302,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
302 | } | 302 | } |
303 | #endif | 303 | #endif |
304 | 304 | ||
305 | void __init kmem_cache_init_late(void); | ||
306 | |||
305 | #endif /* _LINUX_SLUB_DEF_H */ | 307 | #endif /* _LINUX_SLUB_DEF_H */ |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 938234c4a996..d4841ed8215b 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
@@ -60,6 +60,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
60 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) | 60 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) |
61 | /* for sched.c and kernel_lock.c: */ | 61 | /* for sched.c and kernel_lock.c: */ |
62 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) | 62 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) |
63 | # define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) | ||
63 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) | 64 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) |
64 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) | 65 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) |
65 | #endif /* DEBUG_SPINLOCK */ | 66 | #endif /* DEBUG_SPINLOCK */ |
diff --git a/include/linux/splice.h b/include/linux/splice.h index 5f3faa9d15ae..18e7c7c0cae6 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h | |||
@@ -11,8 +11,7 @@ | |||
11 | #include <linux/pipe_fs_i.h> | 11 | #include <linux/pipe_fs_i.h> |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * splice is tied to pipes as a transport (at least for now), so we'll just | 14 | * Flags passed in from splice/tee/vmsplice |
15 | * add the splice flags here. | ||
16 | */ | 15 | */ |
17 | #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ | 16 | #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ |
18 | #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ | 17 | #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index ac9ff54f7cb3..cb1a6631b8f4 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -29,7 +29,8 @@ extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); | |||
29 | 29 | ||
30 | extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, | 30 | extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, |
31 | phys_addr_t address); | 31 | phys_addr_t address); |
32 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); | 32 | extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, |
33 | dma_addr_t address); | ||
33 | 34 | ||
34 | extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size); | 35 | extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size); |
35 | 36 | ||
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 30520844b8da..c6c84ad8bd71 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -55,6 +55,7 @@ struct compat_timeval; | |||
55 | struct robust_list_head; | 55 | struct robust_list_head; |
56 | struct getcpu_cache; | 56 | struct getcpu_cache; |
57 | struct old_linux_dirent; | 57 | struct old_linux_dirent; |
58 | struct perf_counter_attr; | ||
58 | 59 | ||
59 | #include <linux/types.h> | 60 | #include <linux/types.h> |
60 | #include <linux/aio_abi.h> | 61 | #include <linux/aio_abi.h> |
@@ -755,4 +756,8 @@ asmlinkage long sys_pipe(int __user *); | |||
755 | 756 | ||
756 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); | 757 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); |
757 | 758 | ||
759 | |||
760 | asmlinkage long sys_perf_counter_open( | ||
761 | const struct perf_counter_attr __user *attr_uptr, | ||
762 | pid_t pid, int cpu, int group_fd, unsigned long flags); | ||
758 | #endif | 763 | #endif |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index e6b820f8b56b..a8cc4e13434c 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -21,13 +21,14 @@ struct restart_block { | |||
21 | struct { | 21 | struct { |
22 | unsigned long arg0, arg1, arg2, arg3; | 22 | unsigned long arg0, arg1, arg2, arg3; |
23 | }; | 23 | }; |
24 | /* For futex_wait */ | 24 | /* For futex_wait and futex_wait_requeue_pi */ |
25 | struct { | 25 | struct { |
26 | u32 *uaddr; | 26 | u32 *uaddr; |
27 | u32 val; | 27 | u32 val; |
28 | u32 flags; | 28 | u32 flags; |
29 | u32 bitset; | 29 | u32 bitset; |
30 | u64 time; | 30 | u64 time; |
31 | u32 *uaddr2; | ||
31 | } futex; | 32 | } futex; |
32 | /* For nanosleep */ | 33 | /* For nanosleep */ |
33 | struct { | 34 | struct { |
diff --git a/include/linux/time.h b/include/linux/time.h index 242f62499bb7..ea16c1a01d51 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -113,6 +113,21 @@ struct timespec current_kernel_time(void); | |||
113 | #define CURRENT_TIME (current_kernel_time()) | 113 | #define CURRENT_TIME (current_kernel_time()) |
114 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) | 114 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) |
115 | 115 | ||
116 | /* Some architectures do not supply their own clocksource. | ||
117 | * This is mainly the case in architectures that get their | ||
118 | * inter-tick times by reading the counter on their interval | ||
119 | * timer. Since these timers wrap every tick, they're not really | ||
120 | * useful as clocksources. Wrapping them to act like one is possible | ||
121 | * but not very efficient. So we provide a callout these arches | ||
122 | * can implement for use with the jiffies clocksource to provide | ||
123 | * finer then tick granular time. | ||
124 | */ | ||
125 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | ||
126 | extern u32 arch_gettimeoffset(void); | ||
127 | #else | ||
128 | static inline u32 arch_gettimeoffset(void) { return 0; } | ||
129 | #endif | ||
130 | |||
116 | extern void do_gettimeofday(struct timeval *tv); | 131 | extern void do_gettimeofday(struct timeval *tv); |
117 | extern int do_settimeofday(struct timespec *tv); | 132 | extern int do_settimeofday(struct timespec *tv); |
118 | extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); | 133 | extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); |
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h new file mode 100644 index 000000000000..c68bccba2074 --- /dev/null +++ b/include/linux/trace_seq.h | |||
@@ -0,0 +1,92 @@ | |||
1 | #ifndef _LINUX_TRACE_SEQ_H | ||
2 | #define _LINUX_TRACE_SEQ_H | ||
3 | |||
4 | #include <linux/fs.h> | ||
5 | |||
6 | /* | ||
7 | * Trace sequences are used to allow a function to call several other functions | ||
8 | * to create a string of data to use (up to a max of PAGE_SIZE. | ||
9 | */ | ||
10 | |||
11 | struct trace_seq { | ||
12 | unsigned char buffer[PAGE_SIZE]; | ||
13 | unsigned int len; | ||
14 | unsigned int readpos; | ||
15 | }; | ||
16 | |||
17 | static inline void | ||
18 | trace_seq_init(struct trace_seq *s) | ||
19 | { | ||
20 | s->len = 0; | ||
21 | s->readpos = 0; | ||
22 | } | ||
23 | |||
24 | /* | ||
25 | * Currently only defined when tracing is enabled. | ||
26 | */ | ||
27 | #ifdef CONFIG_TRACING | ||
28 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
29 | __attribute__ ((format (printf, 2, 3))); | ||
30 | extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) | ||
31 | __attribute__ ((format (printf, 2, 0))); | ||
32 | extern int | ||
33 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); | ||
34 | extern void trace_print_seq(struct seq_file *m, struct trace_seq *s); | ||
35 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | ||
36 | size_t cnt); | ||
37 | extern int trace_seq_puts(struct trace_seq *s, const char *str); | ||
38 | extern int trace_seq_putc(struct trace_seq *s, unsigned char c); | ||
39 | extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len); | ||
40 | extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | ||
41 | size_t len); | ||
42 | extern void *trace_seq_reserve(struct trace_seq *s, size_t len); | ||
43 | extern int trace_seq_path(struct trace_seq *s, struct path *path); | ||
44 | |||
45 | #else /* CONFIG_TRACING */ | ||
46 | static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
47 | { | ||
48 | return 0; | ||
49 | } | ||
50 | static inline int | ||
51 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | ||
52 | { | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
57 | { | ||
58 | } | ||
59 | static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | ||
60 | size_t cnt) | ||
61 | { | ||
62 | return 0; | ||
63 | } | ||
64 | static inline int trace_seq_puts(struct trace_seq *s, const char *str) | ||
65 | { | ||
66 | return 0; | ||
67 | } | ||
68 | static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) | ||
69 | { | ||
70 | return 0; | ||
71 | } | ||
72 | static inline int | ||
73 | trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) | ||
74 | { | ||
75 | return 0; | ||
76 | } | ||
77 | static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | ||
78 | size_t len) | ||
79 | { | ||
80 | return 0; | ||
81 | } | ||
82 | static inline void *trace_seq_reserve(struct trace_seq *s, size_t len) | ||
83 | { | ||
84 | return NULL; | ||
85 | } | ||
86 | static inline int trace_seq_path(struct trace_seq *s, struct path *path) | ||
87 | { | ||
88 | return 0; | ||
89 | } | ||
90 | #endif /* CONFIG_TRACING */ | ||
91 | |||
92 | #endif /* _LINUX_TRACE_SEQ_H */ | ||
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index d35a7ee7611f..14df7e635d43 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -31,6 +31,8 @@ struct tracepoint { | |||
31 | * Keep in sync with vmlinux.lds.h. | 31 | * Keep in sync with vmlinux.lds.h. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #ifndef DECLARE_TRACE | ||
35 | |||
34 | #define TP_PROTO(args...) args | 36 | #define TP_PROTO(args...) args |
35 | #define TP_ARGS(args...) args | 37 | #define TP_ARGS(args...) args |
36 | 38 | ||
@@ -114,6 +116,7 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, | |||
114 | struct tracepoint *end) | 116 | struct tracepoint *end) |
115 | { } | 117 | { } |
116 | #endif /* CONFIG_TRACEPOINTS */ | 118 | #endif /* CONFIG_TRACEPOINTS */ |
119 | #endif /* DECLARE_TRACE */ | ||
117 | 120 | ||
118 | /* | 121 | /* |
119 | * Connect a probe to a tracepoint. | 122 | * Connect a probe to a tracepoint. |
@@ -154,10 +157,8 @@ static inline void tracepoint_synchronize_unregister(void) | |||
154 | } | 157 | } |
155 | 158 | ||
156 | #define PARAMS(args...) args | 159 | #define PARAMS(args...) args |
157 | #define TRACE_FORMAT(name, proto, args, fmt) \ | ||
158 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | ||
159 | |||
160 | 160 | ||
161 | #ifndef TRACE_EVENT | ||
161 | /* | 162 | /* |
162 | * For use with the TRACE_EVENT macro: | 163 | * For use with the TRACE_EVENT macro: |
163 | * | 164 | * |
@@ -262,5 +263,6 @@ static inline void tracepoint_synchronize_unregister(void) | |||
262 | 263 | ||
263 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ | 264 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ |
264 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | 265 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) |
266 | #endif | ||
265 | 267 | ||
266 | #endif | 268 | #endif |
diff --git a/include/linux/tty.h b/include/linux/tty.h index fc39db95499f..1488d8c81aac 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -185,7 +185,7 @@ struct tty_port; | |||
185 | struct tty_port_operations { | 185 | struct tty_port_operations { |
186 | /* Return 1 if the carrier is raised */ | 186 | /* Return 1 if the carrier is raised */ |
187 | int (*carrier_raised)(struct tty_port *port); | 187 | int (*carrier_raised)(struct tty_port *port); |
188 | void (*raise_dtr_rts)(struct tty_port *port); | 188 | void (*dtr_rts)(struct tty_port *port, int raise); |
189 | }; | 189 | }; |
190 | 190 | ||
191 | struct tty_port { | 191 | struct tty_port { |
@@ -201,6 +201,9 @@ struct tty_port { | |||
201 | unsigned char *xmit_buf; /* Optional buffer */ | 201 | unsigned char *xmit_buf; /* Optional buffer */ |
202 | int close_delay; /* Close port delay */ | 202 | int close_delay; /* Close port delay */ |
203 | int closing_wait; /* Delay for output */ | 203 | int closing_wait; /* Delay for output */ |
204 | int drain_delay; /* Set to zero if no pure time | ||
205 | based drain is needed else | ||
206 | set to size of fifo */ | ||
204 | }; | 207 | }; |
205 | 208 | ||
206 | /* | 209 | /* |
@@ -223,8 +226,11 @@ struct tty_struct { | |||
223 | struct tty_driver *driver; | 226 | struct tty_driver *driver; |
224 | const struct tty_operations *ops; | 227 | const struct tty_operations *ops; |
225 | int index; | 228 | int index; |
226 | /* The ldisc objects are protected by tty_ldisc_lock at the moment */ | 229 | |
227 | struct tty_ldisc ldisc; | 230 | /* Protects ldisc changes: Lock tty not pty */ |
231 | struct mutex ldisc_mutex; | ||
232 | struct tty_ldisc *ldisc; | ||
233 | |||
228 | struct mutex termios_mutex; | 234 | struct mutex termios_mutex; |
229 | spinlock_t ctrl_lock; | 235 | spinlock_t ctrl_lock; |
230 | /* Termios values are protected by the termios mutex */ | 236 | /* Termios values are protected by the termios mutex */ |
@@ -311,6 +317,7 @@ struct tty_struct { | |||
311 | #define TTY_CLOSING 7 /* ->close() in progress */ | 317 | #define TTY_CLOSING 7 /* ->close() in progress */ |
312 | #define TTY_LDISC 9 /* Line discipline attached */ | 318 | #define TTY_LDISC 9 /* Line discipline attached */ |
313 | #define TTY_LDISC_CHANGING 10 /* Line discipline changing */ | 319 | #define TTY_LDISC_CHANGING 10 /* Line discipline changing */ |
320 | #define TTY_LDISC_OPEN 11 /* Line discipline is open */ | ||
314 | #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ | 321 | #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ |
315 | #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ | 322 | #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ |
316 | #define TTY_PTY_LOCK 16 /* pty private */ | 323 | #define TTY_PTY_LOCK 16 /* pty private */ |
@@ -403,6 +410,7 @@ extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); | |||
403 | extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); | 410 | extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); |
404 | extern void tty_ldisc_deref(struct tty_ldisc *); | 411 | extern void tty_ldisc_deref(struct tty_ldisc *); |
405 | extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); | 412 | extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); |
413 | extern void tty_ldisc_hangup(struct tty_struct *tty); | ||
406 | extern const struct file_operations tty_ldiscs_proc_fops; | 414 | extern const struct file_operations tty_ldiscs_proc_fops; |
407 | 415 | ||
408 | extern void tty_wakeup(struct tty_struct *tty); | 416 | extern void tty_wakeup(struct tty_struct *tty); |
@@ -425,6 +433,9 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx, | |||
425 | extern void tty_release_dev(struct file *filp); | 433 | extern void tty_release_dev(struct file *filp); |
426 | extern int tty_init_termios(struct tty_struct *tty); | 434 | extern int tty_init_termios(struct tty_struct *tty); |
427 | 435 | ||
436 | extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); | ||
437 | extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); | ||
438 | |||
428 | extern struct mutex tty_mutex; | 439 | extern struct mutex tty_mutex; |
429 | 440 | ||
430 | extern void tty_write_unlock(struct tty_struct *tty); | 441 | extern void tty_write_unlock(struct tty_struct *tty); |
@@ -438,6 +449,7 @@ extern struct tty_struct *tty_port_tty_get(struct tty_port *port); | |||
438 | extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); | 449 | extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); |
439 | extern int tty_port_carrier_raised(struct tty_port *port); | 450 | extern int tty_port_carrier_raised(struct tty_port *port); |
440 | extern void tty_port_raise_dtr_rts(struct tty_port *port); | 451 | extern void tty_port_raise_dtr_rts(struct tty_port *port); |
452 | extern void tty_port_lower_dtr_rts(struct tty_port *port); | ||
441 | extern void tty_port_hangup(struct tty_port *port); | 453 | extern void tty_port_hangup(struct tty_port *port); |
442 | extern int tty_port_block_til_ready(struct tty_port *port, | 454 | extern int tty_port_block_til_ready(struct tty_port *port, |
443 | struct tty_struct *tty, struct file *filp); | 455 | struct tty_struct *tty, struct file *filp); |
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index bcba84ea2d86..3566129384a4 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h | |||
@@ -127,7 +127,8 @@ | |||
127 | * the line discipline are close to full, and it should somehow | 127 | * the line discipline are close to full, and it should somehow |
128 | * signal that no more characters should be sent to the tty. | 128 | * signal that no more characters should be sent to the tty. |
129 | * | 129 | * |
130 | * Optional: Always invoke via tty_throttle(); | 130 | * Optional: Always invoke via tty_throttle(), called under the |
131 | * termios lock. | ||
131 | * | 132 | * |
132 | * void (*unthrottle)(struct tty_struct * tty); | 133 | * void (*unthrottle)(struct tty_struct * tty); |
133 | * | 134 | * |
@@ -135,7 +136,8 @@ | |||
135 | * that characters can now be sent to the tty without fear of | 136 | * that characters can now be sent to the tty without fear of |
136 | * overrunning the input buffers of the line disciplines. | 137 | * overrunning the input buffers of the line disciplines. |
137 | * | 138 | * |
138 | * Optional: Always invoke via tty_unthrottle(); | 139 | * Optional: Always invoke via tty_unthrottle(), called under the |
140 | * termios lock. | ||
139 | * | 141 | * |
140 | * void (*stop)(struct tty_struct *tty); | 142 | * void (*stop)(struct tty_struct *tty); |
141 | * | 143 | * |
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 625e9e4639c6..8cdfed738fe4 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
@@ -224,8 +224,7 @@ struct usb_serial_driver { | |||
224 | /* Called by console with tty = NULL and by tty */ | 224 | /* Called by console with tty = NULL and by tty */ |
225 | int (*open)(struct tty_struct *tty, | 225 | int (*open)(struct tty_struct *tty, |
226 | struct usb_serial_port *port, struct file *filp); | 226 | struct usb_serial_port *port, struct file *filp); |
227 | void (*close)(struct tty_struct *tty, | 227 | void (*close)(struct usb_serial_port *port); |
228 | struct usb_serial_port *port, struct file *filp); | ||
229 | int (*write)(struct tty_struct *tty, struct usb_serial_port *port, | 228 | int (*write)(struct tty_struct *tty, struct usb_serial_port *port, |
230 | const unsigned char *buf, int count); | 229 | const unsigned char *buf, int count); |
231 | /* Called only by the tty layer */ | 230 | /* Called only by the tty layer */ |
@@ -241,6 +240,10 @@ struct usb_serial_driver { | |||
241 | int (*tiocmget)(struct tty_struct *tty, struct file *file); | 240 | int (*tiocmget)(struct tty_struct *tty, struct file *file); |
242 | int (*tiocmset)(struct tty_struct *tty, struct file *file, | 241 | int (*tiocmset)(struct tty_struct *tty, struct file *file, |
243 | unsigned int set, unsigned int clear); | 242 | unsigned int set, unsigned int clear); |
243 | /* Called by the tty layer for port level work. There may or may not | ||
244 | be an attached tty at this point */ | ||
245 | void (*dtr_rts)(struct usb_serial_port *port, int on); | ||
246 | int (*carrier_raised)(struct usb_serial_port *port); | ||
244 | /* USB events */ | 247 | /* USB events */ |
245 | void (*read_int_callback)(struct urb *urb); | 248 | void (*read_int_callback)(struct urb *urb); |
246 | void (*write_int_callback)(struct urb *urb); | 249 | void (*write_int_callback)(struct urb *urb); |
@@ -283,8 +286,7 @@ extern int usb_serial_generic_open(struct tty_struct *tty, | |||
283 | struct usb_serial_port *port, struct file *filp); | 286 | struct usb_serial_port *port, struct file *filp); |
284 | extern int usb_serial_generic_write(struct tty_struct *tty, | 287 | extern int usb_serial_generic_write(struct tty_struct *tty, |
285 | struct usb_serial_port *port, const unsigned char *buf, int count); | 288 | struct usb_serial_port *port, const unsigned char *buf, int count); |
286 | extern void usb_serial_generic_close(struct tty_struct *tty, | 289 | extern void usb_serial_generic_close(struct usb_serial_port *port); |
287 | struct usb_serial_port *port, struct file *filp); | ||
288 | extern int usb_serial_generic_resume(struct usb_serial *serial); | 290 | extern int usb_serial_generic_resume(struct usb_serial *serial); |
289 | extern int usb_serial_generic_write_room(struct tty_struct *tty); | 291 | extern int usb_serial_generic_write_room(struct tty_struct *tty); |
290 | extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); | 292 | extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); |
diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 06005fa9e982..4fca4f5440ba 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h | |||
@@ -10,14 +10,17 @@ | |||
10 | 10 | ||
11 | /** | 11 | /** |
12 | * virtqueue - a queue to register buffers for sending or receiving. | 12 | * virtqueue - a queue to register buffers for sending or receiving. |
13 | * @list: the chain of virtqueues for this device | ||
13 | * @callback: the function to call when buffers are consumed (can be NULL). | 14 | * @callback: the function to call when buffers are consumed (can be NULL). |
15 | * @name: the name of this virtqueue (mainly for debugging) | ||
14 | * @vdev: the virtio device this queue was created for. | 16 | * @vdev: the virtio device this queue was created for. |
15 | * @vq_ops: the operations for this virtqueue (see below). | 17 | * @vq_ops: the operations for this virtqueue (see below). |
16 | * @priv: a pointer for the virtqueue implementation to use. | 18 | * @priv: a pointer for the virtqueue implementation to use. |
17 | */ | 19 | */ |
18 | struct virtqueue | 20 | struct virtqueue { |
19 | { | 21 | struct list_head list; |
20 | void (*callback)(struct virtqueue *vq); | 22 | void (*callback)(struct virtqueue *vq); |
23 | const char *name; | ||
21 | struct virtio_device *vdev; | 24 | struct virtio_device *vdev; |
22 | struct virtqueue_ops *vq_ops; | 25 | struct virtqueue_ops *vq_ops; |
23 | void *priv; | 26 | void *priv; |
@@ -76,15 +79,16 @@ struct virtqueue_ops { | |||
76 | * @dev: underlying device. | 79 | * @dev: underlying device. |
77 | * @id: the device type identification (used to match it with a driver). | 80 | * @id: the device type identification (used to match it with a driver). |
78 | * @config: the configuration ops for this device. | 81 | * @config: the configuration ops for this device. |
82 | * @vqs: the list of virtqueues for this device. | ||
79 | * @features: the features supported by both driver and device. | 83 | * @features: the features supported by both driver and device. |
80 | * @priv: private pointer for the driver's use. | 84 | * @priv: private pointer for the driver's use. |
81 | */ | 85 | */ |
82 | struct virtio_device | 86 | struct virtio_device { |
83 | { | ||
84 | int index; | 87 | int index; |
85 | struct device dev; | 88 | struct device dev; |
86 | struct virtio_device_id id; | 89 | struct virtio_device_id id; |
87 | struct virtio_config_ops *config; | 90 | struct virtio_config_ops *config; |
91 | struct list_head vqs; | ||
88 | /* Note that this is a Linux set_bit-style bitmap. */ | 92 | /* Note that this is a Linux set_bit-style bitmap. */ |
89 | unsigned long features[1]; | 93 | unsigned long features[1]; |
90 | void *priv; | 94 | void *priv; |
@@ -99,8 +103,7 @@ void unregister_virtio_device(struct virtio_device *dev); | |||
99 | * @id_table: the ids serviced by this driver. | 103 | * @id_table: the ids serviced by this driver. |
100 | * @feature_table: an array of feature numbers supported by this device. | 104 | * @feature_table: an array of feature numbers supported by this device. |
101 | * @feature_table_size: number of entries in the feature table array. | 105 | * @feature_table_size: number of entries in the feature table array. |
102 | * @probe: the function to call when a device is found. Returns a token for | 106 | * @probe: the function to call when a device is found. Returns 0 or -errno. |
103 | * remove, or PTR_ERR(). | ||
104 | * @remove: the function when a device is removed. | 107 | * @remove: the function when a device is removed. |
105 | * @config_changed: optional function to call when the device configuration | 108 | * @config_changed: optional function to call when the device configuration |
106 | * changes; may be called in interrupt context. | 109 | * changes; may be called in interrupt context. |
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index 94c56d29869d..be7d255fc7cf 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h | |||
@@ -15,6 +15,10 @@ | |||
15 | #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ | 15 | #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ |
16 | #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ | 16 | #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ |
17 | #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ | 17 | #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ |
18 | #define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ | ||
19 | #define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */ | ||
20 | |||
21 | #define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ | ||
18 | 22 | ||
19 | struct virtio_blk_config | 23 | struct virtio_blk_config |
20 | { | 24 | { |
@@ -32,6 +36,7 @@ struct virtio_blk_config | |||
32 | } geometry; | 36 | } geometry; |
33 | /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ | 37 | /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ |
34 | __u32 blk_size; | 38 | __u32 blk_size; |
39 | __u8 identify[VIRTIO_BLK_ID_BYTES]; | ||
35 | } __attribute__((packed)); | 40 | } __attribute__((packed)); |
36 | 41 | ||
37 | /* These two define direction. */ | 42 | /* These two define direction. */ |
@@ -55,6 +60,13 @@ struct virtio_blk_outhdr | |||
55 | __u64 sector; | 60 | __u64 sector; |
56 | }; | 61 | }; |
57 | 62 | ||
63 | struct virtio_scsi_inhdr { | ||
64 | __u32 errors; | ||
65 | __u32 data_len; | ||
66 | __u32 sense_len; | ||
67 | __u32 residual; | ||
68 | }; | ||
69 | |||
58 | /* And this is the final byte of the write scatter-gather list. */ | 70 | /* And this is the final byte of the write scatter-gather list. */ |
59 | #define VIRTIO_BLK_S_OK 0 | 71 | #define VIRTIO_BLK_S_OK 0 |
60 | #define VIRTIO_BLK_S_IOERR 1 | 72 | #define VIRTIO_BLK_S_IOERR 1 |
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index bf8ec283b232..99f514575f6a 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #define VIRTIO_F_NOTIFY_ON_EMPTY 24 | 29 | #define VIRTIO_F_NOTIFY_ON_EMPTY 24 |
30 | 30 | ||
31 | #ifdef __KERNEL__ | 31 | #ifdef __KERNEL__ |
32 | #include <linux/err.h> | ||
32 | #include <linux/virtio.h> | 33 | #include <linux/virtio.h> |
33 | 34 | ||
34 | /** | 35 | /** |
@@ -49,15 +50,26 @@ | |||
49 | * @set_status: write the status byte | 50 | * @set_status: write the status byte |
50 | * vdev: the virtio_device | 51 | * vdev: the virtio_device |
51 | * status: the new status byte | 52 | * status: the new status byte |
53 | * @request_vqs: request the specified number of virtqueues | ||
54 | * vdev: the virtio_device | ||
55 | * max_vqs: the max number of virtqueues we want | ||
56 | * If supplied, must call before any virtqueues are instantiated. | ||
57 | * To modify the max number of virtqueues after request_vqs has been | ||
58 | * called, call free_vqs and then request_vqs with a new value. | ||
59 | * @free_vqs: cleanup resources allocated by request_vqs | ||
60 | * vdev: the virtio_device | ||
61 | * If supplied, must call after all virtqueues have been deleted. | ||
52 | * @reset: reset the device | 62 | * @reset: reset the device |
53 | * vdev: the virtio device | 63 | * vdev: the virtio device |
54 | * After this, status and feature negotiation must be done again | 64 | * After this, status and feature negotiation must be done again |
55 | * @find_vq: find a virtqueue and instantiate it. | 65 | * @find_vqs: find virtqueues and instantiate them. |
56 | * vdev: the virtio_device | 66 | * vdev: the virtio_device |
57 | * index: the 0-based virtqueue number in case there's more than one. | 67 | * nvqs: the number of virtqueues to find |
58 | * callback: the virqtueue callback | 68 | * vqs: on success, includes new virtqueues |
59 | * Returns the new virtqueue or ERR_PTR() (eg. -ENOENT). | 69 | * callbacks: array of callbacks, for each virtqueue |
60 | * @del_vq: free a virtqueue found by find_vq(). | 70 | * names: array of virtqueue names (mainly for debugging) |
71 | * Returns 0 on success or error status | ||
72 | * @del_vqs: free virtqueues found by find_vqs(). | ||
61 | * @get_features: get the array of feature bits for this device. | 73 | * @get_features: get the array of feature bits for this device. |
62 | * vdev: the virtio_device | 74 | * vdev: the virtio_device |
63 | * Returns the first 32 feature bits (all we currently need). | 75 | * Returns the first 32 feature bits (all we currently need). |
@@ -66,6 +78,7 @@ | |||
66 | * This gives the final feature bits for the device: it can change | 78 | * This gives the final feature bits for the device: it can change |
67 | * the dev->feature bits if it wants. | 79 | * the dev->feature bits if it wants. |
68 | */ | 80 | */ |
81 | typedef void vq_callback_t(struct virtqueue *); | ||
69 | struct virtio_config_ops | 82 | struct virtio_config_ops |
70 | { | 83 | { |
71 | void (*get)(struct virtio_device *vdev, unsigned offset, | 84 | void (*get)(struct virtio_device *vdev, unsigned offset, |
@@ -75,10 +88,11 @@ struct virtio_config_ops | |||
75 | u8 (*get_status)(struct virtio_device *vdev); | 88 | u8 (*get_status)(struct virtio_device *vdev); |
76 | void (*set_status)(struct virtio_device *vdev, u8 status); | 89 | void (*set_status)(struct virtio_device *vdev, u8 status); |
77 | void (*reset)(struct virtio_device *vdev); | 90 | void (*reset)(struct virtio_device *vdev); |
78 | struct virtqueue *(*find_vq)(struct virtio_device *vdev, | 91 | int (*find_vqs)(struct virtio_device *, unsigned nvqs, |
79 | unsigned index, | 92 | struct virtqueue *vqs[], |
80 | void (*callback)(struct virtqueue *)); | 93 | vq_callback_t *callbacks[], |
81 | void (*del_vq)(struct virtqueue *vq); | 94 | const char *names[]); |
95 | void (*del_vqs)(struct virtio_device *); | ||
82 | u32 (*get_features)(struct virtio_device *vdev); | 96 | u32 (*get_features)(struct virtio_device *vdev); |
83 | void (*finalize_features)(struct virtio_device *vdev); | 97 | void (*finalize_features)(struct virtio_device *vdev); |
84 | }; | 98 | }; |
@@ -99,7 +113,9 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, | |||
99 | if (__builtin_constant_p(fbit)) | 113 | if (__builtin_constant_p(fbit)) |
100 | BUILD_BUG_ON(fbit >= 32); | 114 | BUILD_BUG_ON(fbit >= 32); |
101 | 115 | ||
102 | virtio_check_driver_offered_feature(vdev, fbit); | 116 | if (fbit < VIRTIO_TRANSPORT_F_START) |
117 | virtio_check_driver_offered_feature(vdev, fbit); | ||
118 | |||
103 | return test_bit(fbit, vdev->features); | 119 | return test_bit(fbit, vdev->features); |
104 | } | 120 | } |
105 | 121 | ||
@@ -126,5 +142,18 @@ static inline int virtio_config_buf(struct virtio_device *vdev, | |||
126 | vdev->config->get(vdev, offset, buf, len); | 142 | vdev->config->get(vdev, offset, buf, len); |
127 | return 0; | 143 | return 0; |
128 | } | 144 | } |
145 | |||
146 | static inline | ||
147 | struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, | ||
148 | vq_callback_t *c, const char *n) | ||
149 | { | ||
150 | vq_callback_t *callbacks[] = { c }; | ||
151 | const char *names[] = { n }; | ||
152 | struct virtqueue *vq; | ||
153 | int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); | ||
154 | if (err < 0) | ||
155 | return ERR_PTR(err); | ||
156 | return vq; | ||
157 | } | ||
129 | #endif /* __KERNEL__ */ | 158 | #endif /* __KERNEL__ */ |
130 | #endif /* _LINUX_VIRTIO_CONFIG_H */ | 159 | #endif /* _LINUX_VIRTIO_CONFIG_H */ |
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h index cd0fd5d181a6..9a3d7c48c622 100644 --- a/include/linux/virtio_pci.h +++ b/include/linux/virtio_pci.h | |||
@@ -47,9 +47,17 @@ | |||
47 | /* The bit of the ISR which indicates a device configuration change. */ | 47 | /* The bit of the ISR which indicates a device configuration change. */ |
48 | #define VIRTIO_PCI_ISR_CONFIG 0x2 | 48 | #define VIRTIO_PCI_ISR_CONFIG 0x2 |
49 | 49 | ||
50 | /* MSI-X registers: only enabled if MSI-X is enabled. */ | ||
51 | /* A 16-bit vector for configuration changes. */ | ||
52 | #define VIRTIO_MSI_CONFIG_VECTOR 20 | ||
53 | /* A 16-bit vector for selected queue notifications. */ | ||
54 | #define VIRTIO_MSI_QUEUE_VECTOR 22 | ||
55 | /* Vector value used to disable MSI for queue */ | ||
56 | #define VIRTIO_MSI_NO_VECTOR 0xffff | ||
57 | |||
50 | /* The remaining space is defined by each driver as the per-driver | 58 | /* The remaining space is defined by each driver as the per-driver |
51 | * configuration space */ | 59 | * configuration space */ |
52 | #define VIRTIO_PCI_CONFIG 20 | 60 | #define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20) |
53 | 61 | ||
54 | /* Virtio ABI version, this must match exactly */ | 62 | /* Virtio ABI version, this must match exactly */ |
55 | #define VIRTIO_PCI_ABI_VERSION 0 | 63 | #define VIRTIO_PCI_ABI_VERSION 0 |
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 71e03722fb59..693e0ec5afa6 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #define VRING_DESC_F_NEXT 1 | 14 | #define VRING_DESC_F_NEXT 1 |
15 | /* This marks a buffer as write-only (otherwise read-only). */ | 15 | /* This marks a buffer as write-only (otherwise read-only). */ |
16 | #define VRING_DESC_F_WRITE 2 | 16 | #define VRING_DESC_F_WRITE 2 |
17 | /* This means the buffer contains a list of buffer descriptors. */ | ||
18 | #define VRING_DESC_F_INDIRECT 4 | ||
17 | 19 | ||
18 | /* The Host uses this in used->flags to advise the Guest: don't kick me when | 20 | /* The Host uses this in used->flags to advise the Guest: don't kick me when |
19 | * you add a buffer. It's unreliable, so it's simply an optimization. Guest | 21 | * you add a buffer. It's unreliable, so it's simply an optimization. Guest |
@@ -24,6 +26,9 @@ | |||
24 | * optimization. */ | 26 | * optimization. */ |
25 | #define VRING_AVAIL_F_NO_INTERRUPT 1 | 27 | #define VRING_AVAIL_F_NO_INTERRUPT 1 |
26 | 28 | ||
29 | /* We support indirect buffer descriptors */ | ||
30 | #define VIRTIO_RING_F_INDIRECT_DESC 28 | ||
31 | |||
27 | /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ | 32 | /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ |
28 | struct vring_desc | 33 | struct vring_desc |
29 | { | 34 | { |
@@ -119,7 +124,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, | |||
119 | struct virtio_device *vdev, | 124 | struct virtio_device *vdev, |
120 | void *pages, | 125 | void *pages, |
121 | void (*notify)(struct virtqueue *vq), | 126 | void (*notify)(struct virtqueue *vq), |
122 | void (*callback)(struct virtqueue *vq)); | 127 | void (*callback)(struct virtqueue *vq), |
128 | const char *name); | ||
123 | void vring_del_virtqueue(struct virtqueue *vq); | 129 | void vring_del_virtqueue(struct virtqueue *vq); |
124 | /* Filter out transport-specific feature bits. */ | 130 | /* Filter out transport-specific feature bits. */ |
125 | void vring_transport_features(struct virtio_device *vdev); | 131 | void vring_transport_features(struct virtio_device *vdev); |
diff --git a/include/linux/wait.h b/include/linux/wait.h index bc024632f365..6788e1a4d4ca 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -132,8 +132,6 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, | |||
132 | list_del(&old->task_list); | 132 | list_del(&old->task_list); |
133 | } | 133 | } |
134 | 134 | ||
135 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | ||
136 | int nr_exclusive, int sync, void *key); | ||
137 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 135 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
138 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); | 136 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
139 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, | 137 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 93445477f86a..3224820c8514 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -79,7 +79,6 @@ struct writeback_control { | |||
79 | void writeback_inodes(struct writeback_control *wbc); | 79 | void writeback_inodes(struct writeback_control *wbc); |
80 | int inode_wait(void *); | 80 | int inode_wait(void *); |
81 | void sync_inodes_sb(struct super_block *, int wait); | 81 | void sync_inodes_sb(struct super_block *, int wait); |
82 | void sync_inodes(int wait); | ||
83 | 82 | ||
84 | /* writeback.h requires fs.h; it, too, is not included from here. */ | 83 | /* writeback.h requires fs.h; it, too, is not included from here. */ |
85 | static inline void wait_on_inode(struct inode *inode) | 84 | static inline void wait_on_inode(struct inode *inode) |