diff options
| author | Ingo Molnar <mingo@elte.hu> | 2009-06-11 17:31:52 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-06-11 17:31:52 -0400 |
| commit | 0d5959723e1db3fd7323c198a50c16cecf96c7a9 (patch) | |
| tree | 802b623fff261ebcbbddadf84af5524398364a18 /include/linux | |
| parent | 62fdac5913f71f8f200bd2c9bd59a02e9a1498e9 (diff) | |
| parent | 512626a04e72aca60effe111fa0333ed0b195d21 (diff) | |
Merge branch 'linus' into x86/mce3
Conflicts:
arch/x86/kernel/cpu/mcheck/mce_64.c
arch/x86/kernel/irq.c
Merge reason: Resolve the conflicts above.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
71 files changed, 1983 insertions, 508 deletions
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 48ee32a18ac5..64a982ea5d5f 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h | |||
| @@ -159,6 +159,7 @@ | |||
| 159 | #define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS) | 159 | #define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS) |
| 160 | 160 | ||
| 161 | #ifndef __ASSEMBLY__ | 161 | #ifndef __ASSEMBLY__ |
| 162 | struct amba_device; /* in uncompress this is included but amba/bus.h is not */ | ||
| 162 | struct amba_pl010_data { | 163 | struct amba_pl010_data { |
| 163 | void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); | 164 | void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); |
| 164 | }; | 165 | }; |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 7b214fd672a2..12737be58601 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -218,12 +218,12 @@ struct bio { | |||
| 218 | #define bio_sectors(bio) ((bio)->bi_size >> 9) | 218 | #define bio_sectors(bio) ((bio)->bi_size >> 9) |
| 219 | #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) | 219 | #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) |
| 220 | 220 | ||
| 221 | static inline unsigned int bio_cur_sectors(struct bio *bio) | 221 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
| 222 | { | 222 | { |
| 223 | if (bio->bi_vcnt) | 223 | if (bio->bi_vcnt) |
| 224 | return bio_iovec(bio)->bv_len >> 9; | 224 | return bio_iovec(bio)->bv_len; |
| 225 | else /* dataless requests such as discard */ | 225 | else /* dataless requests such as discard */ |
| 226 | return bio->bi_size >> 9; | 226 | return bio->bi_size; |
| 227 | } | 227 | } |
| 228 | 228 | ||
| 229 | static inline void *bio_data(struct bio *bio) | 229 | static inline void *bio_data(struct bio *bio) |
| @@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio) | |||
| 279 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ | 279 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ |
| 280 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) | 280 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) |
| 281 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ | 281 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ |
| 282 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) | 282 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) |
| 283 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ | 283 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ |
| 284 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) | 284 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) |
| 285 | 285 | ||
| @@ -506,7 +506,7 @@ static inline int bio_has_data(struct bio *bio) | |||
| 506 | } | 506 | } |
| 507 | 507 | ||
| 508 | /* | 508 | /* |
| 509 | * BIO list managment for use by remapping drivers (e.g. DM or MD). | 509 | * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. |
| 510 | * | 510 | * |
| 511 | * A bio_list anchors a singly-linked list of bios chained through the bi_next | 511 | * A bio_list anchors a singly-linked list of bios chained through the bi_next |
| 512 | * member of the bio. The bio_list also caches the last list member to allow | 512 | * member of the bio. The bio_list also caches the last list member to allow |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b4f71f1a4af7..ebdfde8fe556 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -166,19 +166,9 @@ struct request { | |||
| 166 | enum rq_cmd_type_bits cmd_type; | 166 | enum rq_cmd_type_bits cmd_type; |
| 167 | unsigned long atomic_flags; | 167 | unsigned long atomic_flags; |
| 168 | 168 | ||
| 169 | /* Maintain bio traversal state for part by part I/O submission. | 169 | /* the following two fields are internal, NEVER access directly */ |
| 170 | * hard_* are block layer internals, no driver should touch them! | 170 | sector_t __sector; /* sector cursor */ |
| 171 | */ | 171 | unsigned int __data_len; /* total data len */ |
| 172 | |||
| 173 | sector_t sector; /* next sector to submit */ | ||
| 174 | sector_t hard_sector; /* next sector to complete */ | ||
| 175 | unsigned long nr_sectors; /* no. of sectors left to submit */ | ||
| 176 | unsigned long hard_nr_sectors; /* no. of sectors left to complete */ | ||
| 177 | /* no. of sectors left to submit in the current segment */ | ||
| 178 | unsigned int current_nr_sectors; | ||
| 179 | |||
| 180 | /* no. of sectors left to complete in the current segment */ | ||
| 181 | unsigned int hard_cur_sectors; | ||
| 182 | 172 | ||
| 183 | struct bio *bio; | 173 | struct bio *bio; |
| 184 | struct bio *biotail; | 174 | struct bio *biotail; |
| @@ -211,8 +201,8 @@ struct request { | |||
| 211 | 201 | ||
| 212 | unsigned short ioprio; | 202 | unsigned short ioprio; |
| 213 | 203 | ||
| 214 | void *special; | 204 | void *special; /* opaque pointer available for LLD use */ |
| 215 | char *buffer; | 205 | char *buffer; /* kaddr of the current segment if available */ |
| 216 | 206 | ||
| 217 | int tag; | 207 | int tag; |
| 218 | int errors; | 208 | int errors; |
| @@ -226,10 +216,9 @@ struct request { | |||
| 226 | unsigned char __cmd[BLK_MAX_CDB]; | 216 | unsigned char __cmd[BLK_MAX_CDB]; |
| 227 | unsigned char *cmd; | 217 | unsigned char *cmd; |
| 228 | 218 | ||
| 229 | unsigned int data_len; | ||
| 230 | unsigned int extra_len; /* length of alignment and padding */ | 219 | unsigned int extra_len; /* length of alignment and padding */ |
| 231 | unsigned int sense_len; | 220 | unsigned int sense_len; |
| 232 | void *data; | 221 | unsigned int resid_len; /* residual count */ |
| 233 | void *sense; | 222 | void *sense; |
| 234 | 223 | ||
| 235 | unsigned long deadline; | 224 | unsigned long deadline; |
| @@ -318,6 +307,26 @@ struct blk_cmd_filter { | |||
| 318 | struct kobject kobj; | 307 | struct kobject kobj; |
| 319 | }; | 308 | }; |
| 320 | 309 | ||
| 310 | struct queue_limits { | ||
| 311 | unsigned long bounce_pfn; | ||
| 312 | unsigned long seg_boundary_mask; | ||
| 313 | |||
| 314 | unsigned int max_hw_sectors; | ||
| 315 | unsigned int max_sectors; | ||
| 316 | unsigned int max_segment_size; | ||
| 317 | unsigned int physical_block_size; | ||
| 318 | unsigned int alignment_offset; | ||
| 319 | unsigned int io_min; | ||
| 320 | unsigned int io_opt; | ||
| 321 | |||
| 322 | unsigned short logical_block_size; | ||
| 323 | unsigned short max_hw_segments; | ||
| 324 | unsigned short max_phys_segments; | ||
| 325 | |||
| 326 | unsigned char misaligned; | ||
| 327 | unsigned char no_cluster; | ||
| 328 | }; | ||
| 329 | |||
| 321 | struct request_queue | 330 | struct request_queue |
| 322 | { | 331 | { |
| 323 | /* | 332 | /* |
| @@ -369,7 +378,6 @@ struct request_queue | |||
| 369 | /* | 378 | /* |
| 370 | * queue needs bounce pages for pages above this limit | 379 | * queue needs bounce pages for pages above this limit |
| 371 | */ | 380 | */ |
| 372 | unsigned long bounce_pfn; | ||
| 373 | gfp_t bounce_gfp; | 381 | gfp_t bounce_gfp; |
| 374 | 382 | ||
| 375 | /* | 383 | /* |
| @@ -398,14 +406,6 @@ struct request_queue | |||
| 398 | unsigned int nr_congestion_off; | 406 | unsigned int nr_congestion_off; |
| 399 | unsigned int nr_batching; | 407 | unsigned int nr_batching; |
| 400 | 408 | ||
| 401 | unsigned int max_sectors; | ||
| 402 | unsigned int max_hw_sectors; | ||
| 403 | unsigned short max_phys_segments; | ||
| 404 | unsigned short max_hw_segments; | ||
| 405 | unsigned short hardsect_size; | ||
| 406 | unsigned int max_segment_size; | ||
| 407 | |||
| 408 | unsigned long seg_boundary_mask; | ||
| 409 | void *dma_drain_buffer; | 409 | void *dma_drain_buffer; |
| 410 | unsigned int dma_drain_size; | 410 | unsigned int dma_drain_size; |
| 411 | unsigned int dma_pad_mask; | 411 | unsigned int dma_pad_mask; |
| @@ -415,12 +415,14 @@ struct request_queue | |||
| 415 | struct list_head tag_busy_list; | 415 | struct list_head tag_busy_list; |
| 416 | 416 | ||
| 417 | unsigned int nr_sorted; | 417 | unsigned int nr_sorted; |
| 418 | unsigned int in_flight; | 418 | unsigned int in_flight[2]; |
| 419 | 419 | ||
| 420 | unsigned int rq_timeout; | 420 | unsigned int rq_timeout; |
| 421 | struct timer_list timeout; | 421 | struct timer_list timeout; |
| 422 | struct list_head timeout_list; | 422 | struct list_head timeout_list; |
| 423 | 423 | ||
| 424 | struct queue_limits limits; | ||
| 425 | |||
| 424 | /* | 426 | /* |
| 425 | * sg stuff | 427 | * sg stuff |
| 426 | */ | 428 | */ |
| @@ -522,6 +524,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag, | |||
| 522 | __clear_bit(flag, &q->queue_flags); | 524 | __clear_bit(flag, &q->queue_flags); |
| 523 | } | 525 | } |
| 524 | 526 | ||
| 527 | static inline int queue_in_flight(struct request_queue *q) | ||
| 528 | { | ||
| 529 | return q->in_flight[0] + q->in_flight[1]; | ||
| 530 | } | ||
| 531 | |||
| 525 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | 532 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) |
| 526 | { | 533 | { |
| 527 | WARN_ON_ONCE(!queue_is_locked(q)); | 534 | WARN_ON_ONCE(!queue_is_locked(q)); |
| @@ -752,10 +759,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); | |||
| 752 | extern void blk_put_request(struct request *); | 759 | extern void blk_put_request(struct request *); |
| 753 | extern void __blk_put_request(struct request_queue *, struct request *); | 760 | extern void __blk_put_request(struct request_queue *, struct request *); |
| 754 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 761 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
| 762 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | ||
| 763 | gfp_t); | ||
| 755 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 764 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
| 756 | extern void blk_requeue_request(struct request_queue *, struct request *); | 765 | extern void blk_requeue_request(struct request_queue *, struct request *); |
| 757 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 766 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
| 758 | extern int blk_lld_busy(struct request_queue *q); | 767 | extern int blk_lld_busy(struct request_queue *q); |
| 768 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | ||
| 769 | struct bio_set *bs, gfp_t gfp_mask, | ||
| 770 | int (*bio_ctr)(struct bio *, struct bio *, void *), | ||
| 771 | void *data); | ||
| 772 | extern void blk_rq_unprep_clone(struct request *rq); | ||
| 759 | extern int blk_insert_cloned_request(struct request_queue *q, | 773 | extern int blk_insert_cloned_request(struct request_queue *q, |
| 760 | struct request *rq); | 774 | struct request *rq); |
| 761 | extern void blk_plug_device(struct request_queue *); | 775 | extern void blk_plug_device(struct request_queue *); |
| @@ -768,12 +782,6 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
| 768 | struct scsi_ioctl_command __user *); | 782 | struct scsi_ioctl_command __user *); |
| 769 | 783 | ||
| 770 | /* | 784 | /* |
| 771 | * Temporary export, until SCSI gets fixed up. | ||
| 772 | */ | ||
| 773 | extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, | ||
| 774 | struct bio *bio); | ||
| 775 | |||
| 776 | /* | ||
| 777 | * A queue has just exitted congestion. Note this in the global counter of | 785 | * A queue has just exitted congestion. Note this in the global counter of |
| 778 | * congested queues, and wake up anyone who was waiting for requests to be | 786 | * congested queues, and wake up anyone who was waiting for requests to be |
| 779 | * put back. | 787 | * put back. |
| @@ -798,7 +806,6 @@ extern void blk_sync_queue(struct request_queue *q); | |||
| 798 | extern void __blk_stop_queue(struct request_queue *q); | 806 | extern void __blk_stop_queue(struct request_queue *q); |
| 799 | extern void __blk_run_queue(struct request_queue *); | 807 | extern void __blk_run_queue(struct request_queue *); |
| 800 | extern void blk_run_queue(struct request_queue *); | 808 | extern void blk_run_queue(struct request_queue *); |
| 801 | extern void blk_start_queueing(struct request_queue *); | ||
| 802 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 809 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
| 803 | struct rq_map_data *, void __user *, unsigned long, | 810 | struct rq_map_data *, void __user *, unsigned long, |
| 804 | gfp_t); | 811 | gfp_t); |
| @@ -831,41 +838,73 @@ static inline void blk_run_address_space(struct address_space *mapping) | |||
| 831 | blk_run_backing_dev(mapping->backing_dev_info, NULL); | 838 | blk_run_backing_dev(mapping->backing_dev_info, NULL); |
| 832 | } | 839 | } |
| 833 | 840 | ||
| 834 | extern void blkdev_dequeue_request(struct request *req); | 841 | /* |
| 842 | * blk_rq_pos() : the current sector | ||
| 843 | * blk_rq_bytes() : bytes left in the entire request | ||
| 844 | * blk_rq_cur_bytes() : bytes left in the current segment | ||
| 845 | * blk_rq_sectors() : sectors left in the entire request | ||
| 846 | * blk_rq_cur_sectors() : sectors left in the current segment | ||
| 847 | */ | ||
| 848 | static inline sector_t blk_rq_pos(const struct request *rq) | ||
| 849 | { | ||
| 850 | return rq->__sector; | ||
| 851 | } | ||
| 852 | |||
| 853 | static inline unsigned int blk_rq_bytes(const struct request *rq) | ||
| 854 | { | ||
| 855 | return rq->__data_len; | ||
| 856 | } | ||
| 857 | |||
| 858 | static inline int blk_rq_cur_bytes(const struct request *rq) | ||
| 859 | { | ||
| 860 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; | ||
| 861 | } | ||
| 862 | |||
| 863 | static inline unsigned int blk_rq_sectors(const struct request *rq) | ||
| 864 | { | ||
| 865 | return blk_rq_bytes(rq) >> 9; | ||
| 866 | } | ||
| 867 | |||
| 868 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | ||
| 869 | { | ||
| 870 | return blk_rq_cur_bytes(rq) >> 9; | ||
| 871 | } | ||
| 872 | |||
| 873 | /* | ||
| 874 | * Request issue related functions. | ||
| 875 | */ | ||
| 876 | extern struct request *blk_peek_request(struct request_queue *q); | ||
| 877 | extern void blk_start_request(struct request *rq); | ||
| 878 | extern struct request *blk_fetch_request(struct request_queue *q); | ||
| 835 | 879 | ||
| 836 | /* | 880 | /* |
| 837 | * blk_end_request() and friends. | 881 | * Request completion related functions. |
| 838 | * __blk_end_request() and end_request() must be called with | 882 | * |
| 839 | * the request queue spinlock acquired. | 883 | * blk_update_request() completes given number of bytes and updates |
| 884 | * the request without completing it. | ||
| 885 | * | ||
| 886 | * blk_end_request() and friends. __blk_end_request() must be called | ||
| 887 | * with the request queue spinlock acquired. | ||
| 840 | * | 888 | * |
| 841 | * Several drivers define their own end_request and call | 889 | * Several drivers define their own end_request and call |
| 842 | * blk_end_request() for parts of the original function. | 890 | * blk_end_request() for parts of the original function. |
| 843 | * This prevents code duplication in drivers. | 891 | * This prevents code duplication in drivers. |
| 844 | */ | 892 | */ |
| 845 | extern int blk_end_request(struct request *rq, int error, | 893 | extern bool blk_update_request(struct request *rq, int error, |
| 846 | unsigned int nr_bytes); | 894 | unsigned int nr_bytes); |
| 847 | extern int __blk_end_request(struct request *rq, int error, | 895 | extern bool blk_end_request(struct request *rq, int error, |
| 848 | unsigned int nr_bytes); | 896 | unsigned int nr_bytes); |
| 849 | extern int blk_end_bidi_request(struct request *rq, int error, | 897 | extern void blk_end_request_all(struct request *rq, int error); |
| 850 | unsigned int nr_bytes, unsigned int bidi_bytes); | 898 | extern bool blk_end_request_cur(struct request *rq, int error); |
| 851 | extern void end_request(struct request *, int); | 899 | extern bool __blk_end_request(struct request *rq, int error, |
| 852 | extern int blk_end_request_callback(struct request *rq, int error, | 900 | unsigned int nr_bytes); |
| 853 | unsigned int nr_bytes, | 901 | extern void __blk_end_request_all(struct request *rq, int error); |
| 854 | int (drv_callback)(struct request *)); | 902 | extern bool __blk_end_request_cur(struct request *rq, int error); |
| 903 | |||
| 855 | extern void blk_complete_request(struct request *); | 904 | extern void blk_complete_request(struct request *); |
| 856 | extern void __blk_complete_request(struct request *); | 905 | extern void __blk_complete_request(struct request *); |
| 857 | extern void blk_abort_request(struct request *); | 906 | extern void blk_abort_request(struct request *); |
| 858 | extern void blk_abort_queue(struct request_queue *); | 907 | extern void blk_abort_queue(struct request_queue *); |
| 859 | extern void blk_update_request(struct request *rq, int error, | ||
| 860 | unsigned int nr_bytes); | ||
| 861 | |||
| 862 | /* | ||
| 863 | * blk_end_request() takes bytes instead of sectors as a complete size. | ||
| 864 | * blk_rq_bytes() returns bytes left to complete in the entire request. | ||
| 865 | * blk_rq_cur_bytes() returns bytes left to complete in the current segment. | ||
| 866 | */ | ||
| 867 | extern unsigned int blk_rq_bytes(struct request *rq); | ||
| 868 | extern unsigned int blk_rq_cur_bytes(struct request *rq); | ||
| 869 | 908 | ||
| 870 | /* | 909 | /* |
| 871 | * Access functions for manipulating queue properties | 910 | * Access functions for manipulating queue properties |
| @@ -877,10 +916,20 @@ extern void blk_cleanup_queue(struct request_queue *); | |||
| 877 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 916 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
| 878 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 917 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
| 879 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | 918 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); |
| 919 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | ||
| 880 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 920 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
| 881 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 921 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
| 882 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 922 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
| 883 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); | 923 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
| 924 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | ||
| 925 | extern void blk_queue_alignment_offset(struct request_queue *q, | ||
| 926 | unsigned int alignment); | ||
| 927 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | ||
| 928 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | ||
| 929 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | ||
| 930 | sector_t offset); | ||
| 931 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | ||
| 932 | sector_t offset); | ||
| 884 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 933 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
| 885 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 934 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
| 886 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | 935 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
| @@ -967,19 +1016,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | |||
| 967 | 1016 | ||
| 968 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 1017 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
| 969 | 1018 | ||
| 970 | static inline int queue_hardsect_size(struct request_queue *q) | 1019 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) |
| 1020 | { | ||
| 1021 | return q->limits.bounce_pfn; | ||
| 1022 | } | ||
| 1023 | |||
| 1024 | static inline unsigned long queue_segment_boundary(struct request_queue *q) | ||
| 1025 | { | ||
| 1026 | return q->limits.seg_boundary_mask; | ||
| 1027 | } | ||
| 1028 | |||
| 1029 | static inline unsigned int queue_max_sectors(struct request_queue *q) | ||
| 1030 | { | ||
| 1031 | return q->limits.max_sectors; | ||
| 1032 | } | ||
| 1033 | |||
| 1034 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | ||
| 1035 | { | ||
| 1036 | return q->limits.max_hw_sectors; | ||
| 1037 | } | ||
| 1038 | |||
| 1039 | static inline unsigned short queue_max_hw_segments(struct request_queue *q) | ||
| 1040 | { | ||
| 1041 | return q->limits.max_hw_segments; | ||
| 1042 | } | ||
| 1043 | |||
| 1044 | static inline unsigned short queue_max_phys_segments(struct request_queue *q) | ||
| 1045 | { | ||
| 1046 | return q->limits.max_phys_segments; | ||
| 1047 | } | ||
| 1048 | |||
| 1049 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | ||
| 1050 | { | ||
| 1051 | return q->limits.max_segment_size; | ||
| 1052 | } | ||
| 1053 | |||
| 1054 | static inline unsigned short queue_logical_block_size(struct request_queue *q) | ||
| 971 | { | 1055 | { |
| 972 | int retval = 512; | 1056 | int retval = 512; |
| 973 | 1057 | ||
| 974 | if (q && q->hardsect_size) | 1058 | if (q && q->limits.logical_block_size) |
| 975 | retval = q->hardsect_size; | 1059 | retval = q->limits.logical_block_size; |
| 976 | 1060 | ||
| 977 | return retval; | 1061 | return retval; |
| 978 | } | 1062 | } |
| 979 | 1063 | ||
| 980 | static inline int bdev_hardsect_size(struct block_device *bdev) | 1064 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) |
| 1065 | { | ||
| 1066 | return queue_logical_block_size(bdev_get_queue(bdev)); | ||
| 1067 | } | ||
| 1068 | |||
| 1069 | static inline unsigned int queue_physical_block_size(struct request_queue *q) | ||
| 1070 | { | ||
| 1071 | return q->limits.physical_block_size; | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | static inline unsigned int queue_io_min(struct request_queue *q) | ||
| 1075 | { | ||
| 1076 | return q->limits.io_min; | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | static inline unsigned int queue_io_opt(struct request_queue *q) | ||
| 1080 | { | ||
| 1081 | return q->limits.io_opt; | ||
| 1082 | } | ||
| 1083 | |||
| 1084 | static inline int queue_alignment_offset(struct request_queue *q) | ||
| 1085 | { | ||
| 1086 | if (q && q->limits.misaligned) | ||
| 1087 | return -1; | ||
| 1088 | |||
| 1089 | if (q && q->limits.alignment_offset) | ||
| 1090 | return q->limits.alignment_offset; | ||
| 1091 | |||
| 1092 | return 0; | ||
| 1093 | } | ||
| 1094 | |||
| 1095 | static inline int queue_sector_alignment_offset(struct request_queue *q, | ||
| 1096 | sector_t sector) | ||
| 981 | { | 1097 | { |
| 982 | return queue_hardsect_size(bdev_get_queue(bdev)); | 1098 | return ((sector << 9) - q->limits.alignment_offset) |
| 1099 | & (q->limits.io_min - 1); | ||
| 983 | } | 1100 | } |
| 984 | 1101 | ||
| 985 | static inline int queue_dma_alignment(struct request_queue *q) | 1102 | static inline int queue_dma_alignment(struct request_queue *q) |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index d960889e92ef..7e4350ece0f8 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
| @@ -116,9 +116,9 @@ struct blk_io_trace { | |||
| 116 | * The remap event | 116 | * The remap event |
| 117 | */ | 117 | */ |
| 118 | struct blk_io_trace_remap { | 118 | struct blk_io_trace_remap { |
| 119 | __be32 device; | ||
| 120 | __be32 device_from; | 119 | __be32 device_from; |
| 121 | __be64 sector; | 120 | __be32 device_to; |
| 121 | __be64 sector_from; | ||
| 122 | }; | 122 | }; |
| 123 | 123 | ||
| 124 | enum { | 124 | enum { |
| @@ -165,8 +165,9 @@ struct blk_trace { | |||
| 165 | 165 | ||
| 166 | extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); | 166 | extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); |
| 167 | extern void blk_trace_shutdown(struct request_queue *); | 167 | extern void blk_trace_shutdown(struct request_queue *); |
| 168 | extern int do_blk_trace_setup(struct request_queue *q, | 168 | extern int do_blk_trace_setup(struct request_queue *q, char *name, |
| 169 | char *name, dev_t dev, struct blk_user_trace_setup *buts); | 169 | dev_t dev, struct block_device *bdev, |
| 170 | struct blk_user_trace_setup *buts); | ||
| 170 | extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); | 171 | extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); |
| 171 | 172 | ||
| 172 | /** | 173 | /** |
| @@ -193,22 +194,42 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); | |||
| 193 | extern void blk_add_driver_data(struct request_queue *q, struct request *rq, | 194 | extern void blk_add_driver_data(struct request_queue *q, struct request *rq, |
| 194 | void *data, size_t len); | 195 | void *data, size_t len); |
| 195 | extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | 196 | extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
| 197 | struct block_device *bdev, | ||
| 196 | char __user *arg); | 198 | char __user *arg); |
| 197 | extern int blk_trace_startstop(struct request_queue *q, int start); | 199 | extern int blk_trace_startstop(struct request_queue *q, int start); |
| 198 | extern int blk_trace_remove(struct request_queue *q); | 200 | extern int blk_trace_remove(struct request_queue *q); |
| 201 | extern int blk_trace_init_sysfs(struct device *dev); | ||
| 199 | 202 | ||
| 200 | extern struct attribute_group blk_trace_attr_group; | 203 | extern struct attribute_group blk_trace_attr_group; |
| 201 | 204 | ||
| 202 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ | 205 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ |
| 203 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) | 206 | # define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) |
| 204 | #define blk_trace_shutdown(q) do { } while (0) | 207 | # define blk_trace_shutdown(q) do { } while (0) |
| 205 | #define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) | 208 | # define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY) |
| 206 | #define blk_add_driver_data(q, rq, data, len) do {} while (0) | 209 | # define blk_add_driver_data(q, rq, data, len) do {} while (0) |
| 207 | #define blk_trace_setup(q, name, dev, arg) (-ENOTTY) | 210 | # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) |
| 208 | #define blk_trace_startstop(q, start) (-ENOTTY) | 211 | # define blk_trace_startstop(q, start) (-ENOTTY) |
| 209 | #define blk_trace_remove(q) (-ENOTTY) | 212 | # define blk_trace_remove(q) (-ENOTTY) |
| 210 | #define blk_add_trace_msg(q, fmt, ...) do { } while (0) | 213 | # define blk_add_trace_msg(q, fmt, ...) do { } while (0) |
| 214 | static inline int blk_trace_init_sysfs(struct device *dev) | ||
| 215 | { | ||
| 216 | return 0; | ||
| 217 | } | ||
| 211 | 218 | ||
| 212 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ | 219 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ |
| 220 | |||
| 221 | #if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) | ||
| 222 | |||
| 223 | static inline int blk_cmd_buf_len(struct request *rq) | ||
| 224 | { | ||
| 225 | return blk_pc_request(rq) ? rq->cmd_len * 3 : 1; | ||
| 226 | } | ||
| 227 | |||
| 228 | extern void blk_dump_cmd(char *buf, struct request *rq); | ||
| 229 | extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); | ||
| 230 | extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq); | ||
| 231 | |||
| 232 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ | ||
| 233 | |||
| 213 | #endif /* __KERNEL__ */ | 234 | #endif /* __KERNEL__ */ |
| 214 | #endif | 235 | #endif |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 5a40d14daa9f..c56457c8334e 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
| @@ -288,7 +288,15 @@ static inline cycle_t clocksource_read(struct clocksource *cs) | |||
| 288 | */ | 288 | */ |
| 289 | static inline int clocksource_enable(struct clocksource *cs) | 289 | static inline int clocksource_enable(struct clocksource *cs) |
| 290 | { | 290 | { |
| 291 | return cs->enable ? cs->enable(cs) : 0; | 291 | int ret = 0; |
| 292 | |||
| 293 | if (cs->enable) | ||
| 294 | ret = cs->enable(cs); | ||
| 295 | |||
| 296 | /* save mult_orig on enable */ | ||
| 297 | cs->mult_orig = cs->mult; | ||
| 298 | |||
| 299 | return ret; | ||
| 292 | } | 300 | } |
| 293 | 301 | ||
| 294 | /** | 302 | /** |
diff --git a/include/linux/compat.h b/include/linux/compat.h index f2ded21f9a3c..af931ee43dd8 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
| @@ -222,6 +222,8 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); | |||
| 222 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from); | 222 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from); |
| 223 | int get_compat_sigevent(struct sigevent *event, | 223 | int get_compat_sigevent(struct sigevent *event, |
| 224 | const struct compat_sigevent __user *u_event); | 224 | const struct compat_sigevent __user *u_event); |
| 225 | long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, | ||
| 226 | struct compat_siginfo __user *uinfo); | ||
| 225 | 227 | ||
| 226 | static inline int compat_timeval_compare(struct compat_timeval *lhs, | 228 | static inline int compat_timeval_compare(struct compat_timeval *lhs, |
| 227 | struct compat_timeval *rhs) | 229 | struct compat_timeval *rhs) |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 9f315382610b..c5ac87ca7bc6 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
| @@ -1022,6 +1022,8 @@ typedef struct cpumask *cpumask_var_t; | |||
| 1022 | 1022 | ||
| 1023 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); | 1023 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); |
| 1024 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); | 1024 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); |
| 1025 | bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); | ||
| 1026 | bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); | ||
| 1025 | void alloc_bootmem_cpumask_var(cpumask_var_t *mask); | 1027 | void alloc_bootmem_cpumask_var(cpumask_var_t *mask); |
| 1026 | void free_cpumask_var(cpumask_var_t mask); | 1028 | void free_cpumask_var(cpumask_var_t mask); |
| 1027 | void free_bootmem_cpumask_var(cpumask_var_t mask); | 1029 | void free_bootmem_cpumask_var(cpumask_var_t mask); |
| @@ -1040,6 +1042,19 @@ static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, | |||
| 1040 | return true; | 1042 | return true; |
| 1041 | } | 1043 | } |
| 1042 | 1044 | ||
| 1045 | static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | ||
| 1046 | { | ||
| 1047 | cpumask_clear(*mask); | ||
| 1048 | return true; | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, | ||
| 1052 | int node) | ||
| 1053 | { | ||
| 1054 | cpumask_clear(*mask); | ||
| 1055 | return true; | ||
| 1056 | } | ||
| 1057 | |||
| 1043 | static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) | 1058 | static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) |
| 1044 | { | 1059 | { |
| 1045 | } | 1060 | } |
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index 788850ba4e75..1fbdea4f08eb 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h | |||
| @@ -142,19 +142,6 @@ struct CYZ_BOOT_CTRL { | |||
| 142 | 142 | ||
| 143 | 143 | ||
| 144 | #ifndef DP_WINDOW_SIZE | 144 | #ifndef DP_WINDOW_SIZE |
| 145 | /* #include "cyclomz.h" */ | ||
| 146 | /****************** ****************** *******************/ | ||
| 147 | /* | ||
| 148 | * The data types defined below are used in all ZFIRM interface | ||
| 149 | * data structures. They accomodate differences between HW | ||
| 150 | * architectures and compilers. | ||
| 151 | */ | ||
| 152 | |||
| 153 | typedef __u64 ucdouble; /* 64 bits, unsigned */ | ||
| 154 | typedef __u32 uclong; /* 32 bits, unsigned */ | ||
| 155 | typedef __u16 ucshort; /* 16 bits, unsigned */ | ||
| 156 | typedef __u8 ucchar; /* 8 bits, unsigned */ | ||
| 157 | |||
| 158 | /* | 145 | /* |
| 159 | * Memory Window Sizes | 146 | * Memory Window Sizes |
| 160 | */ | 147 | */ |
| @@ -507,16 +494,20 @@ struct ZFW_CTRL { | |||
| 507 | 494 | ||
| 508 | /* Per card data structure */ | 495 | /* Per card data structure */ |
| 509 | struct cyclades_card { | 496 | struct cyclades_card { |
| 510 | void __iomem *base_addr; | 497 | void __iomem *base_addr; |
| 511 | void __iomem *ctl_addr; | 498 | union { |
| 512 | int irq; | 499 | void __iomem *p9050; |
| 513 | unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ | 500 | struct RUNTIME_9060 __iomem *p9060; |
| 514 | unsigned int first_line; /* minor number of first channel on card */ | 501 | } ctl_addr; |
| 515 | unsigned int nports; /* Number of ports in the card */ | 502 | int irq; |
| 516 | int bus_index; /* address shift - 0 for ISA, 1 for PCI */ | 503 | unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ |
| 517 | int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ | 504 | unsigned int first_line; /* minor number of first channel on card */ |
| 518 | spinlock_t card_lock; | 505 | unsigned int nports; /* Number of ports in the card */ |
| 519 | struct cyclades_port *ports; | 506 | int bus_index; /* address shift - 0 for ISA, 1 for PCI */ |
| 507 | int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ | ||
| 508 | u32 hw_ver; | ||
| 509 | spinlock_t card_lock; | ||
| 510 | struct cyclades_port *ports; | ||
| 520 | }; | 511 | }; |
| 521 | 512 | ||
| 522 | /*************************************** | 513 | /*************************************** |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ded2d7c42668..49c2362977fd 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
| @@ -149,7 +149,7 @@ struct io_restrictions { | |||
| 149 | unsigned max_hw_sectors; | 149 | unsigned max_hw_sectors; |
| 150 | unsigned max_sectors; | 150 | unsigned max_sectors; |
| 151 | unsigned max_segment_size; | 151 | unsigned max_segment_size; |
| 152 | unsigned short hardsect_size; | 152 | unsigned short logical_block_size; |
| 153 | unsigned short max_hw_segments; | 153 | unsigned short max_hw_segments; |
| 154 | unsigned short max_phys_segments; | 154 | unsigned short max_phys_segments; |
| 155 | unsigned char no_cluster; /* inverted so that 0 is default */ | 155 | unsigned char no_cluster; /* inverted so that 0 is default */ |
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index 28d53cb7b5a2..171ad8aedc83 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h | |||
| @@ -32,6 +32,8 @@ extern void dma_debug_add_bus(struct bus_type *bus); | |||
| 32 | 32 | ||
| 33 | extern void dma_debug_init(u32 num_entries); | 33 | extern void dma_debug_init(u32 num_entries); |
| 34 | 34 | ||
| 35 | extern int dma_debug_resize_entries(u32 num_entries); | ||
| 36 | |||
| 35 | extern void debug_dma_map_page(struct device *dev, struct page *page, | 37 | extern void debug_dma_map_page(struct device *dev, struct page *page, |
| 36 | size_t offset, size_t size, | 38 | size_t offset, size_t size, |
| 37 | int direction, dma_addr_t dma_addr, | 39 | int direction, dma_addr_t dma_addr, |
| @@ -91,6 +93,11 @@ static inline void dma_debug_init(u32 num_entries) | |||
| 91 | { | 93 | { |
| 92 | } | 94 | } |
| 93 | 95 | ||
| 96 | static inline int dma_debug_resize_entries(u32 num_entries) | ||
| 97 | { | ||
| 98 | return 0; | ||
| 99 | } | ||
| 100 | |||
| 94 | static inline void debug_dma_map_page(struct device *dev, struct page *page, | 101 | static inline void debug_dma_map_page(struct device *dev, struct page *page, |
| 95 | size_t offset, size_t size, | 102 | size_t offset, size_t size, |
| 96 | int direction, dma_addr_t dma_addr, | 103 | int direction, dma_addr_t dma_addr, |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index c59b769f62b0..1cb3372e65d8 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *); | |||
| 103 | extern void elv_merge_requests(struct request_queue *, struct request *, | 103 | extern void elv_merge_requests(struct request_queue *, struct request *, |
| 104 | struct request *); | 104 | struct request *); |
| 105 | extern void elv_merged_request(struct request_queue *, struct request *, int); | 105 | extern void elv_merged_request(struct request_queue *, struct request *, int); |
| 106 | extern void elv_dequeue_request(struct request_queue *, struct request *); | ||
| 107 | extern void elv_requeue_request(struct request_queue *, struct request *); | 106 | extern void elv_requeue_request(struct request_queue *, struct request *); |
| 108 | extern int elv_queue_empty(struct request_queue *); | 107 | extern int elv_queue_empty(struct request_queue *); |
| 109 | extern struct request *elv_next_request(struct request_queue *q); | ||
| 110 | extern struct request *elv_former_request(struct request_queue *, struct request *); | 108 | extern struct request *elv_former_request(struct request_queue *, struct request *); |
| 111 | extern struct request *elv_latter_request(struct request_queue *, struct request *); | 109 | extern struct request *elv_latter_request(struct request_queue *, struct request *); |
| 112 | extern int elv_register_queue(struct request_queue *q); | 110 | extern int elv_register_queue(struct request_queue *q); |
| @@ -171,7 +169,7 @@ enum { | |||
| 171 | ELV_MQUEUE_MUST, | 169 | ELV_MQUEUE_MUST, |
| 172 | }; | 170 | }; |
| 173 | 171 | ||
| 174 | #define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) | 172 | #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
| 175 | #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) | 173 | #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) |
| 176 | 174 | ||
| 177 | /* | 175 | /* |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 3b534e527e09..83d6b4397245 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -2205,6 +2205,8 @@ extern int generic_segment_checks(const struct iovec *iov, | |||
| 2205 | /* fs/splice.c */ | 2205 | /* fs/splice.c */ |
| 2206 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, | 2206 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, |
| 2207 | struct pipe_inode_info *, size_t, unsigned int); | 2207 | struct pipe_inode_info *, size_t, unsigned int); |
| 2208 | extern ssize_t default_file_splice_read(struct file *, loff_t *, | ||
| 2209 | struct pipe_inode_info *, size_t, unsigned int); | ||
| 2208 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, | 2210 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, |
| 2209 | struct file *, loff_t *, size_t, unsigned int); | 2211 | struct file *, loff_t *, size_t, unsigned int); |
| 2210 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, | 2212 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 8a0c2f221e6b..39b95c56587e 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -233,8 +233,6 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size); | |||
| 233 | 233 | ||
| 234 | extern int skip_trace(unsigned long ip); | 234 | extern int skip_trace(unsigned long ip); |
| 235 | 235 | ||
| 236 | extern void ftrace_release(void *start, unsigned long size); | ||
| 237 | |||
| 238 | extern void ftrace_disable_daemon(void); | 236 | extern void ftrace_disable_daemon(void); |
| 239 | extern void ftrace_enable_daemon(void); | 237 | extern void ftrace_enable_daemon(void); |
| 240 | #else | 238 | #else |
| @@ -325,13 +323,8 @@ static inline void __ftrace_enabled_restore(int enabled) | |||
| 325 | 323 | ||
| 326 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 324 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
| 327 | extern void ftrace_init(void); | 325 | extern void ftrace_init(void); |
| 328 | extern void ftrace_init_module(struct module *mod, | ||
| 329 | unsigned long *start, unsigned long *end); | ||
| 330 | #else | 326 | #else |
| 331 | static inline void ftrace_init(void) { } | 327 | static inline void ftrace_init(void) { } |
| 332 | static inline void | ||
| 333 | ftrace_init_module(struct module *mod, | ||
| 334 | unsigned long *start, unsigned long *end) { } | ||
| 335 | #endif | 328 | #endif |
| 336 | 329 | ||
| 337 | /* | 330 | /* |
| @@ -368,6 +361,7 @@ struct ftrace_ret_stack { | |||
| 368 | unsigned long ret; | 361 | unsigned long ret; |
| 369 | unsigned long func; | 362 | unsigned long func; |
| 370 | unsigned long long calltime; | 363 | unsigned long long calltime; |
| 364 | unsigned long long subtime; | ||
| 371 | }; | 365 | }; |
| 372 | 366 | ||
| 373 | /* | 367 | /* |
| @@ -379,8 +373,6 @@ extern void return_to_handler(void); | |||
| 379 | 373 | ||
| 380 | extern int | 374 | extern int |
| 381 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); | 375 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); |
| 382 | extern void | ||
| 383 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); | ||
| 384 | 376 | ||
| 385 | /* | 377 | /* |
| 386 | * Sometimes we don't want to trace a function with the function | 378 | * Sometimes we don't want to trace a function with the function |
| @@ -496,8 +488,15 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) | |||
| 496 | 488 | ||
| 497 | extern int ftrace_dump_on_oops; | 489 | extern int ftrace_dump_on_oops; |
| 498 | 490 | ||
| 491 | #ifdef CONFIG_PREEMPT | ||
| 492 | #define INIT_TRACE_RECURSION .trace_recursion = 0, | ||
| 493 | #endif | ||
| 494 | |||
| 499 | #endif /* CONFIG_TRACING */ | 495 | #endif /* CONFIG_TRACING */ |
| 500 | 496 | ||
| 497 | #ifndef INIT_TRACE_RECURSION | ||
| 498 | #define INIT_TRACE_RECURSION | ||
| 499 | #endif | ||
| 501 | 500 | ||
| 502 | #ifdef CONFIG_HW_BRANCH_TRACER | 501 | #ifdef CONFIG_HW_BRANCH_TRACER |
| 503 | 502 | ||
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h new file mode 100644 index 000000000000..5c093ffc655b --- /dev/null +++ b/include/linux/ftrace_event.h | |||
| @@ -0,0 +1,172 @@ | |||
| 1 | #ifndef _LINUX_FTRACE_EVENT_H | ||
| 2 | #define _LINUX_FTRACE_EVENT_H | ||
| 3 | |||
| 4 | #include <linux/trace_seq.h> | ||
| 5 | #include <linux/ring_buffer.h> | ||
| 6 | #include <linux/percpu.h> | ||
| 7 | |||
| 8 | struct trace_array; | ||
| 9 | struct tracer; | ||
| 10 | struct dentry; | ||
| 11 | |||
| 12 | DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq); | ||
| 13 | |||
| 14 | struct trace_print_flags { | ||
| 15 | unsigned long mask; | ||
| 16 | const char *name; | ||
| 17 | }; | ||
| 18 | |||
| 19 | const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | ||
| 20 | unsigned long flags, | ||
| 21 | const struct trace_print_flags *flag_array); | ||
| 22 | |||
| 23 | const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | ||
| 24 | const struct trace_print_flags *symbol_array); | ||
| 25 | |||
| 26 | /* | ||
| 27 | * The trace entry - the most basic unit of tracing. This is what | ||
| 28 | * is printed in the end as a single line in the trace output, such as: | ||
| 29 | * | ||
| 30 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | ||
| 31 | */ | ||
| 32 | struct trace_entry { | ||
| 33 | unsigned short type; | ||
| 34 | unsigned char flags; | ||
| 35 | unsigned char preempt_count; | ||
| 36 | int pid; | ||
| 37 | int tgid; | ||
| 38 | }; | ||
| 39 | |||
| 40 | #define FTRACE_MAX_EVENT \ | ||
| 41 | ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) | ||
| 42 | |||
| 43 | /* | ||
| 44 | * Trace iterator - used by printout routines who present trace | ||
| 45 | * results to users and which routines might sleep, etc: | ||
| 46 | */ | ||
| 47 | struct trace_iterator { | ||
| 48 | struct trace_array *tr; | ||
| 49 | struct tracer *trace; | ||
| 50 | void *private; | ||
| 51 | int cpu_file; | ||
| 52 | struct mutex mutex; | ||
| 53 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; | ||
| 54 | unsigned long iter_flags; | ||
| 55 | |||
| 56 | /* The below is zeroed out in pipe_read */ | ||
| 57 | struct trace_seq seq; | ||
| 58 | struct trace_entry *ent; | ||
| 59 | int cpu; | ||
| 60 | u64 ts; | ||
| 61 | |||
| 62 | loff_t pos; | ||
| 63 | long idx; | ||
| 64 | |||
| 65 | cpumask_var_t started; | ||
| 66 | }; | ||
| 67 | |||
| 68 | |||
| 69 | typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, | ||
| 70 | int flags); | ||
| 71 | struct trace_event { | ||
| 72 | struct hlist_node node; | ||
| 73 | struct list_head list; | ||
| 74 | int type; | ||
| 75 | trace_print_func trace; | ||
| 76 | trace_print_func raw; | ||
| 77 | trace_print_func hex; | ||
| 78 | trace_print_func binary; | ||
| 79 | }; | ||
| 80 | |||
| 81 | extern int register_ftrace_event(struct trace_event *event); | ||
| 82 | extern int unregister_ftrace_event(struct trace_event *event); | ||
| 83 | |||
| 84 | /* Return values for print_line callback */ | ||
| 85 | enum print_line_t { | ||
| 86 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | ||
| 87 | TRACE_TYPE_HANDLED = 1, | ||
| 88 | TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ | ||
| 89 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | ||
| 90 | }; | ||
| 91 | |||
| 92 | |||
| 93 | struct ring_buffer_event * | ||
| 94 | trace_current_buffer_lock_reserve(int type, unsigned long len, | ||
| 95 | unsigned long flags, int pc); | ||
| 96 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | ||
| 97 | unsigned long flags, int pc); | ||
| 98 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | ||
| 99 | unsigned long flags, int pc); | ||
| 100 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event); | ||
| 101 | |||
| 102 | void tracing_record_cmdline(struct task_struct *tsk); | ||
| 103 | |||
| 104 | struct ftrace_event_call { | ||
| 105 | struct list_head list; | ||
| 106 | char *name; | ||
| 107 | char *system; | ||
| 108 | struct dentry *dir; | ||
| 109 | struct trace_event *event; | ||
| 110 | int enabled; | ||
| 111 | int (*regfunc)(void); | ||
| 112 | void (*unregfunc)(void); | ||
| 113 | int id; | ||
| 114 | int (*raw_init)(void); | ||
| 115 | int (*show_format)(struct trace_seq *s); | ||
| 116 | int (*define_fields)(void); | ||
| 117 | struct list_head fields; | ||
| 118 | int filter_active; | ||
| 119 | void *filter; | ||
| 120 | void *mod; | ||
| 121 | |||
| 122 | #ifdef CONFIG_EVENT_PROFILE | ||
| 123 | atomic_t profile_count; | ||
| 124 | int (*profile_enable)(struct ftrace_event_call *); | ||
| 125 | void (*profile_disable)(struct ftrace_event_call *); | ||
| 126 | #endif | ||
| 127 | }; | ||
| 128 | |||
| 129 | #define MAX_FILTER_PRED 32 | ||
| 130 | #define MAX_FILTER_STR_VAL 128 | ||
| 131 | |||
| 132 | extern int init_preds(struct ftrace_event_call *call); | ||
| 133 | extern void destroy_preds(struct ftrace_event_call *call); | ||
| 134 | extern int filter_match_preds(struct ftrace_event_call *call, void *rec); | ||
| 135 | extern int filter_current_check_discard(struct ftrace_event_call *call, | ||
| 136 | void *rec, | ||
| 137 | struct ring_buffer_event *event); | ||
| 138 | |||
| 139 | extern int trace_define_field(struct ftrace_event_call *call, char *type, | ||
| 140 | char *name, int offset, int size, int is_signed); | ||
| 141 | |||
| 142 | #define is_signed_type(type) (((type)(-1)) < 0) | ||
| 143 | |||
| 144 | int trace_set_clr_event(const char *system, const char *event, int set); | ||
| 145 | |||
| 146 | /* | ||
| 147 | * The double __builtin_constant_p is because gcc will give us an error | ||
| 148 | * if we try to allocate the static variable to fmt if it is not a | ||
| 149 | * constant. Even with the outer if statement optimizing out. | ||
| 150 | */ | ||
| 151 | #define event_trace_printk(ip, fmt, args...) \ | ||
| 152 | do { \ | ||
| 153 | __trace_printk_check_format(fmt, ##args); \ | ||
| 154 | tracing_record_cmdline(current); \ | ||
| 155 | if (__builtin_constant_p(fmt)) { \ | ||
| 156 | static const char *trace_printk_fmt \ | ||
| 157 | __attribute__((section("__trace_printk_fmt"))) = \ | ||
| 158 | __builtin_constant_p(fmt) ? fmt : NULL; \ | ||
| 159 | \ | ||
| 160 | __trace_bprintk(ip, trace_printk_fmt, ##args); \ | ||
| 161 | } else \ | ||
| 162 | __trace_printk(ip, fmt, ##args); \ | ||
| 163 | } while (0) | ||
| 164 | |||
| 165 | #define __common_field(type, item, is_signed) \ | ||
| 166 | ret = trace_define_field(event_call, #type, "common_" #item, \ | ||
| 167 | offsetof(typeof(field.ent), item), \ | ||
| 168 | sizeof(field.ent.item), is_signed); \ | ||
| 169 | if (ret) \ | ||
| 170 | return ret; | ||
| 171 | |||
| 172 | #endif /* _LINUX_FTRACE_EVENT_H */ | ||
diff --git a/include/linux/futex.h b/include/linux/futex.h index 3bf5bb5a34f9..34956c8fdebf 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
| @@ -23,6 +23,8 @@ union ktime; | |||
| 23 | #define FUTEX_TRYLOCK_PI 8 | 23 | #define FUTEX_TRYLOCK_PI 8 |
| 24 | #define FUTEX_WAIT_BITSET 9 | 24 | #define FUTEX_WAIT_BITSET 9 |
| 25 | #define FUTEX_WAKE_BITSET 10 | 25 | #define FUTEX_WAKE_BITSET 10 |
| 26 | #define FUTEX_WAIT_REQUEUE_PI 11 | ||
| 27 | #define FUTEX_CMP_REQUEUE_PI 12 | ||
| 26 | 28 | ||
| 27 | #define FUTEX_PRIVATE_FLAG 128 | 29 | #define FUTEX_PRIVATE_FLAG 128 |
| 28 | #define FUTEX_CLOCK_REALTIME 256 | 30 | #define FUTEX_CLOCK_REALTIME 256 |
| @@ -38,6 +40,10 @@ union ktime; | |||
| 38 | #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) | 40 | #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) |
| 39 | #define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG) | 41 | #define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG) |
| 40 | #define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG) | 42 | #define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG) |
| 43 | #define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \ | ||
| 44 | FUTEX_PRIVATE_FLAG) | ||
| 45 | #define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \ | ||
| 46 | FUTEX_PRIVATE_FLAG) | ||
| 41 | 47 | ||
| 42 | /* | 48 | /* |
| 43 | * Support for robust futexes: the kernel cleans up held futexes at | 49 | * Support for robust futexes: the kernel cleans up held futexes at |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index a1a28caed23d..149fda264c86 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
| @@ -90,6 +90,7 @@ struct disk_stats { | |||
| 90 | struct hd_struct { | 90 | struct hd_struct { |
| 91 | sector_t start_sect; | 91 | sector_t start_sect; |
| 92 | sector_t nr_sects; | 92 | sector_t nr_sects; |
| 93 | sector_t alignment_offset; | ||
| 93 | struct device __dev; | 94 | struct device __dev; |
| 94 | struct kobject *holder_dir; | 95 | struct kobject *holder_dir; |
| 95 | int policy, partno; | 96 | int policy, partno; |
diff --git a/include/linux/ide.h b/include/linux/ide.h index 9fed365a598b..867cb68d8461 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
| @@ -26,6 +26,9 @@ | |||
| 26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
| 27 | #include <asm/mutex.h> | 27 | #include <asm/mutex.h> |
| 28 | 28 | ||
| 29 | /* for request_sense */ | ||
| 30 | #include <linux/cdrom.h> | ||
| 31 | |||
| 29 | #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) | 32 | #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) |
| 30 | # define SUPPORT_VLB_SYNC 0 | 33 | # define SUPPORT_VLB_SYNC 0 |
| 31 | #else | 34 | #else |
| @@ -324,7 +327,6 @@ struct ide_cmd { | |||
| 324 | unsigned int cursg_ofs; | 327 | unsigned int cursg_ofs; |
| 325 | 328 | ||
| 326 | struct request *rq; /* copy of request */ | 329 | struct request *rq; /* copy of request */ |
| 327 | void *special; /* valid_t generally */ | ||
| 328 | }; | 330 | }; |
| 329 | 331 | ||
| 330 | /* ATAPI packet command flags */ | 332 | /* ATAPI packet command flags */ |
| @@ -360,11 +362,7 @@ struct ide_atapi_pc { | |||
| 360 | 362 | ||
| 361 | /* data buffer */ | 363 | /* data buffer */ |
| 362 | u8 *buf; | 364 | u8 *buf; |
| 363 | /* current buffer position */ | ||
| 364 | u8 *cur_pos; | ||
| 365 | int buf_size; | 365 | int buf_size; |
| 366 | /* missing/available data on the current buffer */ | ||
| 367 | int b_count; | ||
| 368 | 366 | ||
| 369 | /* the corresponding request */ | 367 | /* the corresponding request */ |
| 370 | struct request *rq; | 368 | struct request *rq; |
| @@ -377,10 +375,6 @@ struct ide_atapi_pc { | |||
| 377 | */ | 375 | */ |
| 378 | u8 pc_buf[IDE_PC_BUFFER_SIZE]; | 376 | u8 pc_buf[IDE_PC_BUFFER_SIZE]; |
| 379 | 377 | ||
| 380 | /* idetape only */ | ||
| 381 | struct idetape_bh *bh; | ||
| 382 | char *b_data; | ||
| 383 | |||
| 384 | unsigned long timeout; | 378 | unsigned long timeout; |
| 385 | }; | 379 | }; |
| 386 | 380 | ||
| @@ -593,16 +587,16 @@ struct ide_drive_s { | |||
| 593 | /* callback for packet commands */ | 587 | /* callback for packet commands */ |
| 594 | int (*pc_callback)(struct ide_drive_s *, int); | 588 | int (*pc_callback)(struct ide_drive_s *, int); |
| 595 | 589 | ||
| 596 | void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *); | ||
| 597 | int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *, | ||
| 598 | unsigned int, int); | ||
| 599 | |||
| 600 | ide_startstop_t (*irq_handler)(struct ide_drive_s *); | 590 | ide_startstop_t (*irq_handler)(struct ide_drive_s *); |
| 601 | 591 | ||
| 602 | unsigned long atapi_flags; | 592 | unsigned long atapi_flags; |
| 603 | 593 | ||
| 604 | struct ide_atapi_pc request_sense_pc; | 594 | struct ide_atapi_pc request_sense_pc; |
| 605 | struct request request_sense_rq; | 595 | |
| 596 | /* current sense rq and buffer */ | ||
| 597 | bool sense_rq_armed; | ||
| 598 | struct request sense_rq; | ||
| 599 | struct request_sense sense_data; | ||
| 606 | }; | 600 | }; |
| 607 | 601 | ||
| 608 | typedef struct ide_drive_s ide_drive_t; | 602 | typedef struct ide_drive_s ide_drive_t; |
| @@ -1174,7 +1168,10 @@ int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *); | |||
| 1174 | int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); | 1168 | int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); |
| 1175 | int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); | 1169 | int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); |
| 1176 | void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); | 1170 | void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); |
| 1177 | void ide_retry_pc(ide_drive_t *, struct gendisk *); | 1171 | void ide_retry_pc(ide_drive_t *drive); |
| 1172 | |||
| 1173 | void ide_prep_sense(ide_drive_t *drive, struct request *rq); | ||
| 1174 | int ide_queue_sense_rq(ide_drive_t *drive, void *special); | ||
| 1178 | 1175 | ||
| 1179 | int ide_cd_expiry(ide_drive_t *); | 1176 | int ide_cd_expiry(ide_drive_t *); |
| 1180 | 1177 | ||
diff --git a/include/linux/ima.h b/include/linux/ima.h index 0e2aa45cb0ce..b1b827d091a9 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h | |||
| @@ -13,14 +13,17 @@ | |||
| 13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
| 14 | struct linux_binprm; | 14 | struct linux_binprm; |
| 15 | 15 | ||
| 16 | #define IMA_COUNT_UPDATE 1 | ||
| 17 | #define IMA_COUNT_LEAVE 0 | ||
| 18 | |||
| 16 | #ifdef CONFIG_IMA | 19 | #ifdef CONFIG_IMA |
| 17 | extern int ima_bprm_check(struct linux_binprm *bprm); | 20 | extern int ima_bprm_check(struct linux_binprm *bprm); |
| 18 | extern int ima_inode_alloc(struct inode *inode); | 21 | extern int ima_inode_alloc(struct inode *inode); |
| 19 | extern void ima_inode_free(struct inode *inode); | 22 | extern void ima_inode_free(struct inode *inode); |
| 20 | extern int ima_path_check(struct path *path, int mask); | 23 | extern int ima_path_check(struct path *path, int mask, int update_counts); |
| 21 | extern void ima_file_free(struct file *file); | 24 | extern void ima_file_free(struct file *file); |
| 22 | extern int ima_file_mmap(struct file *file, unsigned long prot); | 25 | extern int ima_file_mmap(struct file *file, unsigned long prot); |
| 23 | extern void ima_shm_check(struct file *file); | 26 | extern void ima_counts_get(struct file *file); |
| 24 | 27 | ||
| 25 | #else | 28 | #else |
| 26 | static inline int ima_bprm_check(struct linux_binprm *bprm) | 29 | static inline int ima_bprm_check(struct linux_binprm *bprm) |
| @@ -38,7 +41,7 @@ static inline void ima_inode_free(struct inode *inode) | |||
| 38 | return; | 41 | return; |
| 39 | } | 42 | } |
| 40 | 43 | ||
| 41 | static inline int ima_path_check(struct path *path, int mask) | 44 | static inline int ima_path_check(struct path *path, int mask, int update_counts) |
| 42 | { | 45 | { |
| 43 | return 0; | 46 | return 0; |
| 44 | } | 47 | } |
| @@ -53,7 +56,7 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot) | |||
| 53 | return 0; | 56 | return 0; |
| 54 | } | 57 | } |
| 55 | 58 | ||
| 56 | static inline void ima_shm_check(struct file *file) | 59 | static inline void ima_counts_get(struct file *file) |
| 57 | { | 60 | { |
| 58 | return; | 61 | return; |
| 59 | } | 62 | } |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d87247d2641f..28b1f30601b5 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -108,6 +108,15 @@ extern struct group_info init_groups; | |||
| 108 | 108 | ||
| 109 | extern struct cred init_cred; | 109 | extern struct cred init_cred; |
| 110 | 110 | ||
| 111 | #ifdef CONFIG_PERF_COUNTERS | ||
| 112 | # define INIT_PERF_COUNTERS(tsk) \ | ||
| 113 | .perf_counter_mutex = \ | ||
| 114 | __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ | ||
| 115 | .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), | ||
| 116 | #else | ||
| 117 | # define INIT_PERF_COUNTERS(tsk) | ||
| 118 | #endif | ||
| 119 | |||
| 111 | /* | 120 | /* |
| 112 | * INIT_TASK is used to set up the first task table, touch at | 121 | * INIT_TASK is used to set up the first task table, touch at |
| 113 | * your own risk!. Base=0, limit=0x1fffff (=2MB) | 122 | * your own risk!. Base=0, limit=0x1fffff (=2MB) |
| @@ -145,8 +154,8 @@ extern struct cred init_cred; | |||
| 145 | .group_leader = &tsk, \ | 154 | .group_leader = &tsk, \ |
| 146 | .real_cred = &init_cred, \ | 155 | .real_cred = &init_cred, \ |
| 147 | .cred = &init_cred, \ | 156 | .cred = &init_cred, \ |
| 148 | .cred_exec_mutex = \ | 157 | .cred_guard_mutex = \ |
| 149 | __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \ | 158 | __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ |
| 150 | .comm = "swapper", \ | 159 | .comm = "swapper", \ |
| 151 | .thread = INIT_THREAD, \ | 160 | .thread = INIT_THREAD, \ |
| 152 | .fs = &init_fs, \ | 161 | .fs = &init_fs, \ |
| @@ -171,9 +180,11 @@ extern struct cred init_cred; | |||
| 171 | }, \ | 180 | }, \ |
| 172 | .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ | 181 | .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ |
| 173 | INIT_IDS \ | 182 | INIT_IDS \ |
| 183 | INIT_PERF_COUNTERS(tsk) \ | ||
| 174 | INIT_TRACE_IRQFLAGS \ | 184 | INIT_TRACE_IRQFLAGS \ |
| 175 | INIT_LOCKDEP \ | 185 | INIT_LOCKDEP \ |
| 176 | INIT_FTRACE_GRAPH \ | 186 | INIT_FTRACE_GRAPH \ |
| 187 | INIT_TRACE_RECURSION \ | ||
| 177 | } | 188 | } |
| 178 | 189 | ||
| 179 | 190 | ||
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 08b987bccf89..dd05434fa45f 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
| @@ -64,7 +64,7 @@ struct cfq_io_context { | |||
| 64 | * and kmalloc'ed. These could be shared between processes. | 64 | * and kmalloc'ed. These could be shared between processes. |
| 65 | */ | 65 | */ |
| 66 | struct io_context { | 66 | struct io_context { |
| 67 | atomic_t refcount; | 67 | atomic_long_t refcount; |
| 68 | atomic_t nr_tasks; | 68 | atomic_t nr_tasks; |
| 69 | 69 | ||
| 70 | /* all the fields below are protected by this lock */ | 70 | /* all the fields below are protected by this lock */ |
| @@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) | |||
| 91 | * if ref count is zero, don't allow sharing (ioc is going away, it's | 91 | * if ref count is zero, don't allow sharing (ioc is going away, it's |
| 92 | * a race). | 92 | * a race). |
| 93 | */ | 93 | */ |
| 94 | if (ioc && atomic_inc_not_zero(&ioc->refcount)) { | 94 | if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { |
| 95 | atomic_inc(&ioc->nr_tasks); | 95 | atomic_long_inc(&ioc->refcount); |
| 96 | return ioc; | 96 | return ioc; |
| 97 | } | 97 | } |
| 98 | 98 | ||
diff --git a/include/linux/irq.h b/include/linux/irq.h index eedbb8e5e0cc..1e50c34f0062 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -430,23 +430,19 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | |||
| 430 | * Returns true if successful (or not required). | 430 | * Returns true if successful (or not required). |
| 431 | */ | 431 | */ |
| 432 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | 432 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, |
| 433 | bool boot) | 433 | bool boot) |
| 434 | { | 434 | { |
| 435 | #ifdef CONFIG_CPUMASK_OFFSTACK | 435 | gfp_t gfp = GFP_ATOMIC; |
| 436 | if (boot) { | ||
| 437 | alloc_bootmem_cpumask_var(&desc->affinity); | ||
| 438 | 436 | ||
| 439 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 437 | if (boot) |
| 440 | alloc_bootmem_cpumask_var(&desc->pending_mask); | 438 | gfp = GFP_NOWAIT; |
| 441 | #endif | ||
| 442 | return true; | ||
| 443 | } | ||
| 444 | 439 | ||
| 445 | if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) | 440 | #ifdef CONFIG_CPUMASK_OFFSTACK |
| 441 | if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) | ||
| 446 | return false; | 442 | return false; |
| 447 | 443 | ||
| 448 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 444 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 449 | if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { | 445 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
| 450 | free_cpumask_var(desc->affinity); | 446 | free_cpumask_var(desc->affinity); |
| 451 | return false; | 447 | return false; |
| 452 | } | 448 | } |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 0c8b89f28a95..a77c6007dc99 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
| @@ -81,7 +81,12 @@ static inline unsigned int kstat_irqs(unsigned int irq) | |||
| 81 | return sum; | 81 | return sum; |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | |||
| 85 | /* | ||
| 86 | * Lock/unlock the current runqueue - to extract task statistics: | ||
| 87 | */ | ||
| 84 | extern unsigned long long task_delta_exec(struct task_struct *); | 88 | extern unsigned long long task_delta_exec(struct task_struct *); |
| 89 | |||
| 85 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); | 90 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); |
| 86 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); | 91 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); |
| 87 | extern void account_steal_time(cputime_t); | 92 | extern void account_steal_time(cputime_t); |
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h new file mode 100644 index 000000000000..7796aed6cdd5 --- /dev/null +++ b/include/linux/kmemleak.h | |||
| @@ -0,0 +1,96 @@ | |||
| 1 | /* | ||
| 2 | * include/linux/kmemleak.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 ARM Limited | ||
| 5 | * Written by Catalin Marinas <catalin.marinas@arm.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 19 | */ | ||
| 20 | |||
| 21 | #ifndef __KMEMLEAK_H | ||
| 22 | #define __KMEMLEAK_H | ||
| 23 | |||
| 24 | #ifdef CONFIG_DEBUG_KMEMLEAK | ||
| 25 | |||
| 26 | extern void kmemleak_init(void); | ||
| 27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, | ||
| 28 | gfp_t gfp); | ||
| 29 | extern void kmemleak_free(const void *ptr); | ||
| 30 | extern void kmemleak_padding(const void *ptr, unsigned long offset, | ||
| 31 | size_t size); | ||
| 32 | extern void kmemleak_not_leak(const void *ptr); | ||
| 33 | extern void kmemleak_ignore(const void *ptr); | ||
| 34 | extern void kmemleak_scan_area(const void *ptr, unsigned long offset, | ||
| 35 | size_t length, gfp_t gfp); | ||
| 36 | extern void kmemleak_no_scan(const void *ptr); | ||
| 37 | |||
| 38 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | ||
| 39 | int min_count, unsigned long flags, | ||
| 40 | gfp_t gfp) | ||
| 41 | { | ||
| 42 | if (!(flags & SLAB_NOLEAKTRACE)) | ||
| 43 | kmemleak_alloc(ptr, size, min_count, gfp); | ||
| 44 | } | ||
| 45 | |||
| 46 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) | ||
| 47 | { | ||
| 48 | if (!(flags & SLAB_NOLEAKTRACE)) | ||
| 49 | kmemleak_free(ptr); | ||
| 50 | } | ||
| 51 | |||
| 52 | static inline void kmemleak_erase(void **ptr) | ||
| 53 | { | ||
| 54 | *ptr = NULL; | ||
| 55 | } | ||
| 56 | |||
| 57 | #else | ||
| 58 | |||
| 59 | static inline void kmemleak_init(void) | ||
| 60 | { | ||
| 61 | } | ||
| 62 | static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count, | ||
| 63 | gfp_t gfp) | ||
| 64 | { | ||
| 65 | } | ||
| 66 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | ||
| 67 | int min_count, unsigned long flags, | ||
| 68 | gfp_t gfp) | ||
| 69 | { | ||
| 70 | } | ||
| 71 | static inline void kmemleak_free(const void *ptr) | ||
| 72 | { | ||
| 73 | } | ||
| 74 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) | ||
| 75 | { | ||
| 76 | } | ||
| 77 | static inline void kmemleak_not_leak(const void *ptr) | ||
| 78 | { | ||
| 79 | } | ||
| 80 | static inline void kmemleak_ignore(const void *ptr) | ||
| 81 | { | ||
| 82 | } | ||
| 83 | static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, | ||
| 84 | size_t length, gfp_t gfp) | ||
| 85 | { | ||
| 86 | } | ||
| 87 | static inline void kmemleak_erase(void **ptr) | ||
| 88 | { | ||
| 89 | } | ||
| 90 | static inline void kmemleak_no_scan(const void *ptr) | ||
| 91 | { | ||
| 92 | } | ||
| 93 | |||
| 94 | #endif /* CONFIG_DEBUG_KMEMLEAK */ | ||
| 95 | |||
| 96 | #endif /* __KMEMLEAK_H */ | ||
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h new file mode 100644 index 000000000000..b616d3930c3b --- /dev/null +++ b/include/linux/kmemtrace.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
| 3 | * | ||
| 4 | * This file is released under GPL version 2. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef _LINUX_KMEMTRACE_H | ||
| 8 | #define _LINUX_KMEMTRACE_H | ||
| 9 | |||
| 10 | #ifdef __KERNEL__ | ||
| 11 | |||
| 12 | #include <trace/events/kmem.h> | ||
| 13 | |||
| 14 | #ifdef CONFIG_KMEMTRACE | ||
| 15 | extern void kmemtrace_init(void); | ||
| 16 | #else | ||
| 17 | static inline void kmemtrace_init(void) | ||
| 18 | { | ||
| 19 | } | ||
| 20 | #endif | ||
| 21 | |||
| 22 | #endif /* __KERNEL__ */ | ||
| 23 | |||
| 24 | #endif /* _LINUX_KMEMTRACE_H */ | ||
| 25 | |||
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 8cc137911b34..3db5d8d37485 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
| @@ -119,7 +119,7 @@ struct kvm_run { | |||
| 119 | __u32 error_code; | 119 | __u32 error_code; |
| 120 | } ex; | 120 | } ex; |
| 121 | /* KVM_EXIT_IO */ | 121 | /* KVM_EXIT_IO */ |
| 122 | struct kvm_io { | 122 | struct { |
| 123 | #define KVM_EXIT_IO_IN 0 | 123 | #define KVM_EXIT_IO_IN 0 |
| 124 | #define KVM_EXIT_IO_OUT 1 | 124 | #define KVM_EXIT_IO_OUT 1 |
| 125 | __u8 direction; | 125 | __u8 direction; |
| @@ -224,10 +224,10 @@ struct kvm_interrupt { | |||
| 224 | /* for KVM_GET_DIRTY_LOG */ | 224 | /* for KVM_GET_DIRTY_LOG */ |
| 225 | struct kvm_dirty_log { | 225 | struct kvm_dirty_log { |
| 226 | __u32 slot; | 226 | __u32 slot; |
| 227 | __u32 padding; | 227 | __u32 padding1; |
| 228 | union { | 228 | union { |
| 229 | void __user *dirty_bitmap; /* one bit per page */ | 229 | void __user *dirty_bitmap; /* one bit per page */ |
| 230 | __u64 padding; | 230 | __u64 padding2; |
| 231 | }; | 231 | }; |
| 232 | }; | 232 | }; |
| 233 | 233 | ||
| @@ -409,6 +409,10 @@ struct kvm_trace_rec { | |||
| 409 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT | 409 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT |
| 410 | #define KVM_CAP_DEVICE_DEASSIGNMENT 27 | 410 | #define KVM_CAP_DEVICE_DEASSIGNMENT 27 |
| 411 | #endif | 411 | #endif |
| 412 | #ifdef __KVM_HAVE_MSIX | ||
| 413 | #define KVM_CAP_DEVICE_MSIX 28 | ||
| 414 | #endif | ||
| 415 | #define KVM_CAP_ASSIGN_DEV_IRQ 29 | ||
| 412 | /* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ | 416 | /* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ |
| 413 | #define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 | 417 | #define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 |
| 414 | 418 | ||
| @@ -482,11 +486,18 @@ struct kvm_irq_routing { | |||
| 482 | #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ | 486 | #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ |
| 483 | struct kvm_assigned_pci_dev) | 487 | struct kvm_assigned_pci_dev) |
| 484 | #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) | 488 | #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) |
| 489 | /* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */ | ||
| 485 | #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ | 490 | #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ |
| 486 | struct kvm_assigned_irq) | 491 | struct kvm_assigned_irq) |
| 492 | #define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq) | ||
| 487 | #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) | 493 | #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) |
| 488 | #define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ | 494 | #define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ |
| 489 | struct kvm_assigned_pci_dev) | 495 | struct kvm_assigned_pci_dev) |
| 496 | #define KVM_ASSIGN_SET_MSIX_NR \ | ||
| 497 | _IOW(KVMIO, 0x73, struct kvm_assigned_msix_nr) | ||
| 498 | #define KVM_ASSIGN_SET_MSIX_ENTRY \ | ||
| 499 | _IOW(KVMIO, 0x74, struct kvm_assigned_msix_entry) | ||
| 500 | #define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) | ||
| 490 | 501 | ||
| 491 | /* | 502 | /* |
| 492 | * ioctls for vcpu fds | 503 | * ioctls for vcpu fds |
| @@ -577,6 +588,8 @@ struct kvm_debug_guest { | |||
| 577 | #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) | 588 | #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) |
| 578 | #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) | 589 | #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) |
| 579 | 590 | ||
| 591 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) | ||
| 592 | |||
| 580 | struct kvm_assigned_pci_dev { | 593 | struct kvm_assigned_pci_dev { |
| 581 | __u32 assigned_dev_id; | 594 | __u32 assigned_dev_id; |
| 582 | __u32 busnr; | 595 | __u32 busnr; |
| @@ -587,6 +600,17 @@ struct kvm_assigned_pci_dev { | |||
| 587 | }; | 600 | }; |
| 588 | }; | 601 | }; |
| 589 | 602 | ||
| 603 | #define KVM_DEV_IRQ_HOST_INTX (1 << 0) | ||
| 604 | #define KVM_DEV_IRQ_HOST_MSI (1 << 1) | ||
| 605 | #define KVM_DEV_IRQ_HOST_MSIX (1 << 2) | ||
| 606 | |||
| 607 | #define KVM_DEV_IRQ_GUEST_INTX (1 << 8) | ||
| 608 | #define KVM_DEV_IRQ_GUEST_MSI (1 << 9) | ||
| 609 | #define KVM_DEV_IRQ_GUEST_MSIX (1 << 10) | ||
| 610 | |||
| 611 | #define KVM_DEV_IRQ_HOST_MASK 0x00ff | ||
| 612 | #define KVM_DEV_IRQ_GUEST_MASK 0xff00 | ||
| 613 | |||
| 590 | struct kvm_assigned_irq { | 614 | struct kvm_assigned_irq { |
| 591 | __u32 assigned_dev_id; | 615 | __u32 assigned_dev_id; |
| 592 | __u32 host_irq; | 616 | __u32 host_irq; |
| @@ -602,9 +626,19 @@ struct kvm_assigned_irq { | |||
| 602 | }; | 626 | }; |
| 603 | }; | 627 | }; |
| 604 | 628 | ||
| 605 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) | ||
| 606 | 629 | ||
| 607 | #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION KVM_DEV_IRQ_ASSIGN_ENABLE_MSI | 630 | struct kvm_assigned_msix_nr { |
| 608 | #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI (1 << 0) | 631 | __u32 assigned_dev_id; |
| 632 | __u16 entry_nr; | ||
| 633 | __u16 padding; | ||
| 634 | }; | ||
| 635 | |||
| 636 | #define KVM_MAX_MSIX_PER_DEV 512 | ||
| 637 | struct kvm_assigned_msix_entry { | ||
| 638 | __u32 assigned_dev_id; | ||
| 639 | __u32 gsi; | ||
| 640 | __u16 entry; /* The index of entry in the MSI-X table */ | ||
| 641 | __u16 padding[3]; | ||
| 642 | }; | ||
| 609 | 643 | ||
| 610 | #endif | 644 | #endif |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 894a56e365e8..aacc5449f586 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #define KVM_REQ_UNHALT 6 | 38 | #define KVM_REQ_UNHALT 6 |
| 39 | #define KVM_REQ_MMU_SYNC 7 | 39 | #define KVM_REQ_MMU_SYNC 7 |
| 40 | #define KVM_REQ_KVMCLOCK_UPDATE 8 | 40 | #define KVM_REQ_KVMCLOCK_UPDATE 8 |
| 41 | #define KVM_REQ_KICK 9 | ||
| 41 | 42 | ||
| 42 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 43 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
| 43 | 44 | ||
| @@ -72,7 +73,6 @@ struct kvm_vcpu { | |||
| 72 | struct mutex mutex; | 73 | struct mutex mutex; |
| 73 | int cpu; | 74 | int cpu; |
| 74 | struct kvm_run *run; | 75 | struct kvm_run *run; |
| 75 | int guest_mode; | ||
| 76 | unsigned long requests; | 76 | unsigned long requests; |
| 77 | unsigned long guest_debug; | 77 | unsigned long guest_debug; |
| 78 | int fpu_active; | 78 | int fpu_active; |
| @@ -298,6 +298,7 @@ int kvm_arch_hardware_setup(void); | |||
| 298 | void kvm_arch_hardware_unsetup(void); | 298 | void kvm_arch_hardware_unsetup(void); |
| 299 | void kvm_arch_check_processor_compat(void *rtn); | 299 | void kvm_arch_check_processor_compat(void *rtn); |
| 300 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); | 300 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
| 301 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | ||
| 301 | 302 | ||
| 302 | void kvm_free_physmem(struct kvm *kvm); | 303 | void kvm_free_physmem(struct kvm *kvm); |
| 303 | 304 | ||
| @@ -319,6 +320,13 @@ struct kvm_irq_ack_notifier { | |||
| 319 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | 320 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); |
| 320 | }; | 321 | }; |
| 321 | 322 | ||
| 323 | #define KVM_ASSIGNED_MSIX_PENDING 0x1 | ||
| 324 | struct kvm_guest_msix_entry { | ||
| 325 | u32 vector; | ||
| 326 | u16 entry; | ||
| 327 | u16 flags; | ||
| 328 | }; | ||
| 329 | |||
| 322 | struct kvm_assigned_dev_kernel { | 330 | struct kvm_assigned_dev_kernel { |
| 323 | struct kvm_irq_ack_notifier ack_notifier; | 331 | struct kvm_irq_ack_notifier ack_notifier; |
| 324 | struct work_struct interrupt_work; | 332 | struct work_struct interrupt_work; |
| @@ -326,18 +334,18 @@ struct kvm_assigned_dev_kernel { | |||
| 326 | int assigned_dev_id; | 334 | int assigned_dev_id; |
| 327 | int host_busnr; | 335 | int host_busnr; |
| 328 | int host_devfn; | 336 | int host_devfn; |
| 337 | unsigned int entries_nr; | ||
| 329 | int host_irq; | 338 | int host_irq; |
| 330 | bool host_irq_disabled; | 339 | bool host_irq_disabled; |
| 340 | struct msix_entry *host_msix_entries; | ||
| 331 | int guest_irq; | 341 | int guest_irq; |
| 332 | #define KVM_ASSIGNED_DEV_GUEST_INTX (1 << 0) | 342 | struct kvm_guest_msix_entry *guest_msix_entries; |
| 333 | #define KVM_ASSIGNED_DEV_GUEST_MSI (1 << 1) | ||
| 334 | #define KVM_ASSIGNED_DEV_HOST_INTX (1 << 8) | ||
| 335 | #define KVM_ASSIGNED_DEV_HOST_MSI (1 << 9) | ||
| 336 | unsigned long irq_requested_type; | 343 | unsigned long irq_requested_type; |
| 337 | int irq_source_id; | 344 | int irq_source_id; |
| 338 | int flags; | 345 | int flags; |
| 339 | struct pci_dev *dev; | 346 | struct pci_dev *dev; |
| 340 | struct kvm *kvm; | 347 | struct kvm *kvm; |
| 348 | spinlock_t assigned_dev_lock; | ||
| 341 | }; | 349 | }; |
| 342 | 350 | ||
| 343 | struct kvm_irq_mask_notifier { | 351 | struct kvm_irq_mask_notifier { |
| @@ -360,6 +368,9 @@ void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); | |||
| 360 | int kvm_request_irq_source_id(struct kvm *kvm); | 368 | int kvm_request_irq_source_id(struct kvm *kvm); |
| 361 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | 369 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
| 362 | 370 | ||
| 371 | /* For vcpu->arch.iommu_flags */ | ||
| 372 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 | ||
| 373 | |||
| 363 | #ifdef CONFIG_IOMMU_API | 374 | #ifdef CONFIG_IOMMU_API |
| 364 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, | 375 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, |
| 365 | unsigned long npages); | 376 | unsigned long npages); |
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 2b8318c83e53..fb46efbeabec 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h | |||
| @@ -40,4 +40,31 @@ typedef unsigned long hfn_t; | |||
| 40 | 40 | ||
| 41 | typedef hfn_t pfn_t; | 41 | typedef hfn_t pfn_t; |
| 42 | 42 | ||
| 43 | union kvm_ioapic_redirect_entry { | ||
| 44 | u64 bits; | ||
| 45 | struct { | ||
| 46 | u8 vector; | ||
| 47 | u8 delivery_mode:3; | ||
| 48 | u8 dest_mode:1; | ||
| 49 | u8 delivery_status:1; | ||
| 50 | u8 polarity:1; | ||
| 51 | u8 remote_irr:1; | ||
| 52 | u8 trig_mode:1; | ||
| 53 | u8 mask:1; | ||
| 54 | u8 reserve:7; | ||
| 55 | u8 reserved[4]; | ||
| 56 | u8 dest_id; | ||
| 57 | } fields; | ||
| 58 | }; | ||
| 59 | |||
| 60 | struct kvm_lapic_irq { | ||
| 61 | u32 vector; | ||
| 62 | u32 delivery_mode; | ||
| 63 | u32 dest_mode; | ||
| 64 | u32 level; | ||
| 65 | u32 trig_mode; | ||
| 66 | u32 shorthand; | ||
| 67 | u32 dest_id; | ||
| 68 | }; | ||
| 69 | |||
| 43 | #endif /* __KVM_TYPES_H__ */ | 70 | #endif /* __KVM_TYPES_H__ */ |
diff --git a/include/linux/loop.h b/include/linux/loop.h index 40725447f5e0..66c194e2d9b9 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h | |||
| @@ -56,8 +56,7 @@ struct loop_device { | |||
| 56 | gfp_t old_gfp_mask; | 56 | gfp_t old_gfp_mask; |
| 57 | 57 | ||
| 58 | spinlock_t lo_lock; | 58 | spinlock_t lo_lock; |
| 59 | struct bio *lo_bio; | 59 | struct bio_list lo_bio_list; |
| 60 | struct bio *lo_biotail; | ||
| 61 | int lo_state; | 60 | int lo_state; |
| 62 | struct mutex lo_ctl_mutex; | 61 | struct mutex lo_ctl_mutex; |
| 63 | struct task_struct *lo_thread; | 62 | struct task_struct *lo_thread; |
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h new file mode 100644 index 000000000000..e461b2c3d711 --- /dev/null +++ b/include/linux/lsm_audit.h | |||
| @@ -0,0 +1,111 @@ | |||
| 1 | /* | ||
| 2 | * Common LSM logging functions | ||
| 3 | * Heavily borrowed from selinux/avc.h | ||
| 4 | * | ||
| 5 | * Author : Etienne BASSET <etienne.basset@ensta.org> | ||
| 6 | * | ||
| 7 | * All credits to : Stephen Smalley, <sds@epoch.ncsc.mil> | ||
| 8 | * All BUGS to : Etienne BASSET <etienne.basset@ensta.org> | ||
| 9 | */ | ||
| 10 | #ifndef _LSM_COMMON_LOGGING_ | ||
| 11 | #define _LSM_COMMON_LOGGING_ | ||
| 12 | |||
| 13 | #include <linux/stddef.h> | ||
| 14 | #include <linux/errno.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/kdev_t.h> | ||
| 17 | #include <linux/spinlock.h> | ||
| 18 | #include <linux/init.h> | ||
| 19 | #include <linux/audit.h> | ||
| 20 | #include <linux/in6.h> | ||
| 21 | #include <linux/path.h> | ||
| 22 | #include <linux/key.h> | ||
| 23 | #include <linux/skbuff.h> | ||
| 24 | #include <asm/system.h> | ||
| 25 | |||
| 26 | |||
| 27 | /* Auxiliary data to use in generating the audit record. */ | ||
| 28 | struct common_audit_data { | ||
| 29 | char type; | ||
| 30 | #define LSM_AUDIT_DATA_FS 1 | ||
| 31 | #define LSM_AUDIT_DATA_NET 2 | ||
| 32 | #define LSM_AUDIT_DATA_CAP 3 | ||
| 33 | #define LSM_AUDIT_DATA_IPC 4 | ||
| 34 | #define LSM_AUDIT_DATA_TASK 5 | ||
| 35 | #define LSM_AUDIT_DATA_KEY 6 | ||
| 36 | struct task_struct *tsk; | ||
| 37 | union { | ||
| 38 | struct { | ||
| 39 | struct path path; | ||
| 40 | struct inode *inode; | ||
| 41 | } fs; | ||
| 42 | struct { | ||
| 43 | int netif; | ||
| 44 | struct sock *sk; | ||
| 45 | u16 family; | ||
| 46 | __be16 dport; | ||
| 47 | __be16 sport; | ||
| 48 | union { | ||
| 49 | struct { | ||
| 50 | __be32 daddr; | ||
| 51 | __be32 saddr; | ||
| 52 | } v4; | ||
| 53 | struct { | ||
| 54 | struct in6_addr daddr; | ||
| 55 | struct in6_addr saddr; | ||
| 56 | } v6; | ||
| 57 | } fam; | ||
| 58 | } net; | ||
| 59 | int cap; | ||
| 60 | int ipc_id; | ||
| 61 | struct task_struct *tsk; | ||
| 62 | #ifdef CONFIG_KEYS | ||
| 63 | struct { | ||
| 64 | key_serial_t key; | ||
| 65 | char *key_desc; | ||
| 66 | } key_struct; | ||
| 67 | #endif | ||
| 68 | } u; | ||
| 69 | const char *function; | ||
| 70 | /* this union contains LSM specific data */ | ||
| 71 | union { | ||
| 72 | /* SMACK data */ | ||
| 73 | struct smack_audit_data { | ||
| 74 | char *subject; | ||
| 75 | char *object; | ||
| 76 | char *request; | ||
| 77 | int result; | ||
| 78 | } smack_audit_data; | ||
| 79 | /* SELinux data */ | ||
| 80 | struct { | ||
| 81 | u32 ssid; | ||
| 82 | u32 tsid; | ||
| 83 | u16 tclass; | ||
| 84 | u32 requested; | ||
| 85 | u32 audited; | ||
| 86 | struct av_decision *avd; | ||
| 87 | int result; | ||
| 88 | } selinux_audit_data; | ||
| 89 | } lsm_priv; | ||
| 90 | /* these callback will be implemented by a specific LSM */ | ||
| 91 | void (*lsm_pre_audit)(struct audit_buffer *, void *); | ||
| 92 | void (*lsm_post_audit)(struct audit_buffer *, void *); | ||
| 93 | }; | ||
| 94 | |||
| 95 | #define v4info fam.v4 | ||
| 96 | #define v6info fam.v6 | ||
| 97 | |||
| 98 | int ipv4_skb_to_auditdata(struct sk_buff *skb, | ||
| 99 | struct common_audit_data *ad, u8 *proto); | ||
| 100 | |||
| 101 | int ipv6_skb_to_auditdata(struct sk_buff *skb, | ||
| 102 | struct common_audit_data *ad, u8 *proto); | ||
| 103 | |||
| 104 | /* Initialize an LSM audit data structure. */ | ||
| 105 | #define COMMON_AUDIT_DATA_INIT(_d, _t) \ | ||
| 106 | { memset((_d), 0, sizeof(struct common_audit_data)); \ | ||
| 107 | (_d)->type = LSM_AUDIT_DATA_##_t; (_d)->function = __func__; } | ||
| 108 | |||
| 109 | void common_lsm_audit(struct common_audit_data *a); | ||
| 110 | |||
| 111 | #endif | ||
diff --git a/include/linux/magic.h b/include/linux/magic.h index 5b4e28bcb788..927138cf3050 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #define DEBUGFS_MAGIC 0x64626720 | 9 | #define DEBUGFS_MAGIC 0x64626720 |
| 10 | #define SYSFS_MAGIC 0x62656572 | 10 | #define SYSFS_MAGIC 0x62656572 |
| 11 | #define SECURITYFS_MAGIC 0x73636673 | 11 | #define SECURITYFS_MAGIC 0x73636673 |
| 12 | #define SELINUX_MAGIC 0xf97cff8c | ||
| 12 | #define TMPFS_MAGIC 0x01021994 | 13 | #define TMPFS_MAGIC 0x01021994 |
| 13 | #define SQUASHFS_MAGIC 0x73717368 | 14 | #define SQUASHFS_MAGIC 0x73717368 |
| 14 | #define EFS_SUPER_MAGIC 0x414A53 | 15 | #define EFS_SUPER_MAGIC 0x414A53 |
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h deleted file mode 100644 index 1f76b1ebf627..000000000000 --- a/include/linux/mg_disk.h +++ /dev/null | |||
| @@ -1,206 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * include/linux/mg_disk.c | ||
| 3 | * | ||
| 4 | * Support for the mGine m[g]flash IO mode. | ||
| 5 | * Based on legacy hd.c | ||
| 6 | * | ||
| 7 | * (c) 2008 mGine Co.,LTD | ||
| 8 | * (c) 2008 unsik Kim <donari75@gmail.com> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License version 2 as | ||
| 12 | * published by the Free Software Foundation. | ||
| 13 | */ | ||
| 14 | |||
| 15 | #ifndef __MG_DISK_H__ | ||
| 16 | #define __MG_DISK_H__ | ||
| 17 | |||
| 18 | #include <linux/blkdev.h> | ||
| 19 | #include <linux/ata.h> | ||
| 20 | |||
| 21 | /* name for block device */ | ||
| 22 | #define MG_DISK_NAME "mgd" | ||
| 23 | /* name for platform device */ | ||
| 24 | #define MG_DEV_NAME "mg_disk" | ||
| 25 | |||
| 26 | #define MG_DISK_MAJ 0 | ||
| 27 | #define MG_DISK_MAX_PART 16 | ||
| 28 | #define MG_SECTOR_SIZE 512 | ||
| 29 | #define MG_MAX_SECTS 256 | ||
| 30 | |||
| 31 | /* Register offsets */ | ||
| 32 | #define MG_BUFF_OFFSET 0x8000 | ||
| 33 | #define MG_STORAGE_BUFFER_SIZE 0x200 | ||
| 34 | #define MG_REG_OFFSET 0xC000 | ||
| 35 | #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ | ||
| 36 | #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ | ||
| 37 | #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4) | ||
| 38 | #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6) | ||
| 39 | #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8) | ||
| 40 | #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA) | ||
| 41 | #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC) | ||
| 42 | #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */ | ||
| 43 | #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */ | ||
| 44 | #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10) | ||
| 45 | #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12) | ||
| 46 | |||
| 47 | /* "Drive Select/Head Register" bit values */ | ||
| 48 | #define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */ | ||
| 49 | #define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON) | ||
| 50 | #define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON) | ||
| 51 | #define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON) | ||
| 52 | |||
| 53 | |||
| 54 | /* "Device Control Register" bit values */ | ||
| 55 | #define MG_REG_CTRL_INTR_ENABLE 0x0 | ||
| 56 | #define MG_REG_CTRL_INTR_DISABLE (0x1<<1) | ||
| 57 | #define MG_REG_CTRL_RESET (0x1<<2) | ||
| 58 | #define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0 | ||
| 59 | #define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4) | ||
| 60 | #define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0 | ||
| 61 | #define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5) | ||
| 62 | #define MG_REG_CTRL_DPD_DISABLE 0x0 | ||
| 63 | #define MG_REG_CTRL_DPD_ENABLE (0x1<<6) | ||
| 64 | |||
| 65 | /* Status register bit */ | ||
| 66 | /* error bit in status register */ | ||
| 67 | #define MG_REG_STATUS_BIT_ERROR 0x01 | ||
| 68 | /* corrected error in status register */ | ||
| 69 | #define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04 | ||
| 70 | /* data request bit in status register */ | ||
| 71 | #define MG_REG_STATUS_BIT_DATA_REQ 0x08 | ||
| 72 | /* DSC - Drive Seek Complete */ | ||
| 73 | #define MG_REG_STATUS_BIT_SEEK_DONE 0x10 | ||
| 74 | /* DWF - Drive Write Fault */ | ||
| 75 | #define MG_REG_STATUS_BIT_WRITE_FAULT 0x20 | ||
| 76 | #define MG_REG_STATUS_BIT_READY 0x40 | ||
| 77 | #define MG_REG_STATUS_BIT_BUSY 0x80 | ||
| 78 | |||
| 79 | /* handy status */ | ||
| 80 | #define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE) | ||
| 81 | #define MG_READY_OK(s) (((s) & (MG_STAT_READY | \ | ||
| 82 | (MG_REG_STATUS_BIT_BUSY | \ | ||
| 83 | MG_REG_STATUS_BIT_WRITE_FAULT | \ | ||
| 84 | MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY) | ||
| 85 | |||
| 86 | /* Error register */ | ||
| 87 | #define MG_REG_ERR_AMNF 0x01 | ||
| 88 | #define MG_REG_ERR_ABRT 0x04 | ||
| 89 | #define MG_REG_ERR_IDNF 0x10 | ||
| 90 | #define MG_REG_ERR_UNC 0x40 | ||
| 91 | #define MG_REG_ERR_BBK 0x80 | ||
| 92 | |||
| 93 | /* error code for others */ | ||
| 94 | #define MG_ERR_NONE 0 | ||
| 95 | #define MG_ERR_TIMEOUT 0x100 | ||
| 96 | #define MG_ERR_INIT_STAT 0x101 | ||
| 97 | #define MG_ERR_TRANSLATION 0x102 | ||
| 98 | #define MG_ERR_CTRL_RST 0x103 | ||
| 99 | #define MG_ERR_INV_STAT 0x104 | ||
| 100 | #define MG_ERR_RSTOUT 0x105 | ||
| 101 | |||
| 102 | #define MG_MAX_ERRORS 6 /* Max read/write errors */ | ||
| 103 | |||
| 104 | /* command */ | ||
| 105 | #define MG_CMD_RD 0x20 | ||
| 106 | #define MG_CMD_WR 0x30 | ||
| 107 | #define MG_CMD_SLEEP 0x99 | ||
| 108 | #define MG_CMD_WAKEUP 0xC3 | ||
| 109 | #define MG_CMD_ID 0xEC | ||
| 110 | #define MG_CMD_WR_CONF 0x3C | ||
| 111 | #define MG_CMD_RD_CONF 0x40 | ||
| 112 | |||
| 113 | /* operation mode */ | ||
| 114 | #define MG_OP_CASCADE (1 << 0) | ||
| 115 | #define MG_OP_CASCADE_SYNC_RD (1 << 1) | ||
| 116 | #define MG_OP_CASCADE_SYNC_WR (1 << 2) | ||
| 117 | #define MG_OP_INTERLEAVE (1 << 3) | ||
| 118 | |||
| 119 | /* synchronous */ | ||
| 120 | #define MG_BURST_LAT_4 (3 << 4) | ||
| 121 | #define MG_BURST_LAT_5 (4 << 4) | ||
| 122 | #define MG_BURST_LAT_6 (5 << 4) | ||
| 123 | #define MG_BURST_LAT_7 (6 << 4) | ||
| 124 | #define MG_BURST_LAT_8 (7 << 4) | ||
| 125 | #define MG_BURST_LEN_4 (1 << 1) | ||
| 126 | #define MG_BURST_LEN_8 (2 << 1) | ||
| 127 | #define MG_BURST_LEN_16 (3 << 1) | ||
| 128 | #define MG_BURST_LEN_32 (4 << 1) | ||
| 129 | #define MG_BURST_LEN_CONT (0 << 1) | ||
| 130 | |||
| 131 | /* timeout value (unit: ms) */ | ||
| 132 | #define MG_TMAX_CONF_TO_CMD 1 | ||
| 133 | #define MG_TMAX_WAIT_RD_DRQ 10 | ||
| 134 | #define MG_TMAX_WAIT_WR_DRQ 500 | ||
| 135 | #define MG_TMAX_RST_TO_BUSY 10 | ||
| 136 | #define MG_TMAX_HDRST_TO_RDY 500 | ||
| 137 | #define MG_TMAX_SWRST_TO_RDY 500 | ||
| 138 | #define MG_TMAX_RSTOUT 3000 | ||
| 139 | |||
| 140 | /* device attribution */ | ||
| 141 | /* use mflash as boot device */ | ||
| 142 | #define MG_BOOT_DEV (1 << 0) | ||
| 143 | /* use mflash as storage device */ | ||
| 144 | #define MG_STORAGE_DEV (1 << 1) | ||
| 145 | /* same as MG_STORAGE_DEV, but bootloader already done reset sequence */ | ||
| 146 | #define MG_STORAGE_DEV_SKIP_RST (1 << 2) | ||
| 147 | |||
| 148 | #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST) | ||
| 149 | |||
| 150 | /* names of GPIO resource */ | ||
| 151 | #define MG_RST_PIN "mg_rst" | ||
| 152 | /* except MG_BOOT_DEV, reset-out pin should be assigned */ | ||
| 153 | #define MG_RSTOUT_PIN "mg_rstout" | ||
| 154 | |||
| 155 | /* private driver data */ | ||
| 156 | struct mg_drv_data { | ||
| 157 | /* disk resource */ | ||
| 158 | u32 use_polling; | ||
| 159 | |||
| 160 | /* device attribution */ | ||
| 161 | u32 dev_attr; | ||
| 162 | |||
| 163 | /* internally used */ | ||
| 164 | struct mg_host *host; | ||
| 165 | }; | ||
| 166 | |||
| 167 | /* main structure for mflash driver */ | ||
| 168 | struct mg_host { | ||
| 169 | struct device *dev; | ||
| 170 | |||
| 171 | struct request_queue *breq; | ||
| 172 | spinlock_t lock; | ||
| 173 | struct gendisk *gd; | ||
| 174 | |||
| 175 | struct timer_list timer; | ||
| 176 | void (*mg_do_intr) (struct mg_host *); | ||
| 177 | |||
| 178 | u16 id[ATA_ID_WORDS]; | ||
| 179 | |||
| 180 | u16 cyls; | ||
| 181 | u16 heads; | ||
| 182 | u16 sectors; | ||
| 183 | u32 n_sectors; | ||
| 184 | u32 nres_sectors; | ||
| 185 | |||
| 186 | void __iomem *dev_base; | ||
| 187 | unsigned int irq; | ||
| 188 | unsigned int rst; | ||
| 189 | unsigned int rstout; | ||
| 190 | |||
| 191 | u32 major; | ||
| 192 | u32 error; | ||
| 193 | }; | ||
| 194 | |||
| 195 | /* | ||
| 196 | * Debugging macro and defines | ||
| 197 | */ | ||
| 198 | #undef DO_MG_DEBUG | ||
| 199 | #ifdef DO_MG_DEBUG | ||
| 200 | # define MG_DBG(fmt, args...) \ | ||
| 201 | printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args) | ||
| 202 | #else /* CONFIG_MG_DEBUG */ | ||
| 203 | # define MG_DBG(fmt, args...) do { } while (0) | ||
| 204 | #endif /* CONFIG_MG_DEBUG */ | ||
| 205 | |||
| 206 | #endif | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index bff1f0d475c7..ad613ed66ab0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -19,6 +19,7 @@ struct anon_vma; | |||
| 19 | struct file_ra_state; | 19 | struct file_ra_state; |
| 20 | struct user_struct; | 20 | struct user_struct; |
| 21 | struct writeback_control; | 21 | struct writeback_control; |
| 22 | struct rlimit; | ||
| 22 | 23 | ||
| 23 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ | 24 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ |
| 24 | extern unsigned long max_mapnr; | 25 | extern unsigned long max_mapnr; |
| @@ -580,12 +581,10 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
| 580 | */ | 581 | */ |
| 581 | static inline unsigned long round_hint_to_min(unsigned long hint) | 582 | static inline unsigned long round_hint_to_min(unsigned long hint) |
| 582 | { | 583 | { |
| 583 | #ifdef CONFIG_SECURITY | ||
| 584 | hint &= PAGE_MASK; | 584 | hint &= PAGE_MASK; |
| 585 | if (((void *)hint != NULL) && | 585 | if (((void *)hint != NULL) && |
| 586 | (hint < mmap_min_addr)) | 586 | (hint < mmap_min_addr)) |
| 587 | return PAGE_ALIGN(mmap_min_addr); | 587 | return PAGE_ALIGN(mmap_min_addr); |
| 588 | #endif | ||
| 589 | return hint; | 588 | return hint; |
| 590 | } | 589 | } |
| 591 | 590 | ||
| @@ -1031,8 +1030,6 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn, | |||
| 1031 | unsigned long end_pfn); | 1030 | unsigned long end_pfn); |
| 1032 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, | 1031 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, |
| 1033 | unsigned long end_pfn); | 1032 | unsigned long end_pfn); |
| 1034 | extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, | ||
| 1035 | unsigned long end_pfn); | ||
| 1036 | extern void remove_all_active_ranges(void); | 1033 | extern void remove_all_active_ranges(void); |
| 1037 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, | 1034 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, |
| 1038 | unsigned long end_pfn); | 1035 | unsigned long end_pfn); |
| @@ -1319,8 +1316,8 @@ int vmemmap_populate_basepages(struct page *start_page, | |||
| 1319 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | 1316 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); |
| 1320 | void vmemmap_populate_print_last(void); | 1317 | void vmemmap_populate_print_last(void); |
| 1321 | 1318 | ||
| 1322 | extern void *alloc_locked_buffer(size_t size); | 1319 | extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, |
| 1323 | extern void free_locked_buffer(void *buffer, size_t size); | 1320 | size_t size); |
| 1324 | extern void release_locked_buffer(void *buffer, size_t size); | 1321 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); |
| 1325 | #endif /* __KERNEL__ */ | 1322 | #endif /* __KERNEL__ */ |
| 1326 | #endif /* _LINUX_MM_H */ | 1323 | #endif /* _LINUX_MM_H */ |
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h index 3d1b7bde1283..97491f78b08c 100644 --- a/include/linux/mmiotrace.h +++ b/include/linux/mmiotrace.h | |||
| @@ -30,6 +30,8 @@ extern unsigned int kmmio_count; | |||
| 30 | 30 | ||
| 31 | extern int register_kmmio_probe(struct kmmio_probe *p); | 31 | extern int register_kmmio_probe(struct kmmio_probe *p); |
| 32 | extern void unregister_kmmio_probe(struct kmmio_probe *p); | 32 | extern void unregister_kmmio_probe(struct kmmio_probe *p); |
| 33 | extern int kmmio_init(void); | ||
| 34 | extern void kmmio_cleanup(void); | ||
| 33 | 35 | ||
| 34 | #ifdef CONFIG_MMIOTRACE | 36 | #ifdef CONFIG_MMIOTRACE |
| 35 | /* kmmio is active by some kmmio_probes? */ | 37 | /* kmmio is active by some kmmio_probes? */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 627ac082e2a6..a8f2c0aa4c32 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -337,6 +337,14 @@ struct module | |||
| 337 | const char **trace_bprintk_fmt_start; | 337 | const char **trace_bprintk_fmt_start; |
| 338 | unsigned int num_trace_bprintk_fmt; | 338 | unsigned int num_trace_bprintk_fmt; |
| 339 | #endif | 339 | #endif |
| 340 | #ifdef CONFIG_EVENT_TRACING | ||
| 341 | struct ftrace_event_call *trace_events; | ||
| 342 | unsigned int num_trace_events; | ||
| 343 | #endif | ||
| 344 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | ||
| 345 | unsigned long *ftrace_callsites; | ||
| 346 | unsigned int num_ftrace_callsites; | ||
| 347 | #endif | ||
| 340 | 348 | ||
| 341 | #ifdef CONFIG_MODULE_UNLOAD | 349 | #ifdef CONFIG_MODULE_UNLOAD |
| 342 | /* What modules depend on me? */ | 350 | /* What modules depend on me? */ |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 3069ec7e0ab8..878cab4f5fcc 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
| @@ -150,5 +150,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); | |||
| 150 | */ | 150 | */ |
| 151 | extern int mutex_trylock(struct mutex *lock); | 151 | extern int mutex_trylock(struct mutex *lock); |
| 152 | extern void mutex_unlock(struct mutex *lock); | 152 | extern void mutex_unlock(struct mutex *lock); |
| 153 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | ||
| 153 | 154 | ||
| 154 | #endif | 155 | #endif |
diff --git a/include/linux/parport.h b/include/linux/parport.h index e1f83c5065c5..38a423ed3c01 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h | |||
| @@ -324,6 +324,10 @@ struct parport { | |||
| 324 | int spintime; | 324 | int spintime; |
| 325 | atomic_t ref_count; | 325 | atomic_t ref_count; |
| 326 | 326 | ||
| 327 | unsigned long devflags; | ||
| 328 | #define PARPORT_DEVPROC_REGISTERED 0 | ||
| 329 | struct pardevice *proc_device; /* Currently register proc device */ | ||
| 330 | |||
| 327 | struct list_head full_list; | 331 | struct list_head full_list; |
| 328 | struct parport *slaves[3]; | 332 | struct parport *slaves[3]; |
| 329 | }; | 333 | }; |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0f71812d67d3..d7d1c41a0b17 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -1996,10 +1996,12 @@ | |||
| 1996 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 | 1996 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 |
| 1997 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C | 1997 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C |
| 1998 | #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 | 1998 | #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 |
| 1999 | #define PCI_DEVICE_ID_OXSEMI_C950 0x950B | ||
| 1999 | #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 | 2000 | #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 |
| 2000 | #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 | 2001 | #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 |
| 2001 | #define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 | 2002 | #define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 |
| 2002 | #define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 | 2003 | #define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 |
| 2004 | #define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001 | ||
| 2003 | 2005 | ||
| 2004 | #define PCI_VENDOR_ID_CHELSIO 0x1425 | 2006 | #define PCI_VENDOR_ID_CHELSIO 0x1425 |
| 2005 | 2007 | ||
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 1581ff235c7e..26fd9d12f050 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
| @@ -86,7 +86,12 @@ struct percpu_data { | |||
| 86 | void *ptrs[1]; | 86 | void *ptrs[1]; |
| 87 | }; | 87 | }; |
| 88 | 88 | ||
| 89 | /* pointer disguising messes up the kmemleak objects tracking */ | ||
| 90 | #ifndef CONFIG_DEBUG_KMEMLEAK | ||
| 89 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | 91 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
| 92 | #else | ||
| 93 | #define __percpu_disguise(pdata) (struct percpu_data *)(pdata) | ||
| 94 | #endif | ||
| 90 | 95 | ||
| 91 | #define per_cpu_ptr(ptr, cpu) \ | 96 | #define per_cpu_ptr(ptr, cpu) \ |
| 92 | ({ \ | 97 | ({ \ |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h new file mode 100644 index 000000000000..6e133954e2e4 --- /dev/null +++ b/include/linux/perf_counter.h | |||
| @@ -0,0 +1,697 @@ | |||
| 1 | /* | ||
| 2 | * Performance counters: | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | ||
| 5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | ||
| 6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | ||
| 7 | * | ||
| 8 | * Data type definitions, declarations, prototypes. | ||
| 9 | * | ||
| 10 | * Started by: Thomas Gleixner and Ingo Molnar | ||
| 11 | * | ||
| 12 | * For licencing details see kernel-base/COPYING | ||
| 13 | */ | ||
| 14 | #ifndef _LINUX_PERF_COUNTER_H | ||
| 15 | #define _LINUX_PERF_COUNTER_H | ||
| 16 | |||
| 17 | #include <linux/types.h> | ||
| 18 | #include <linux/ioctl.h> | ||
| 19 | #include <asm/byteorder.h> | ||
| 20 | |||
| 21 | /* | ||
| 22 | * User-space ABI bits: | ||
| 23 | */ | ||
| 24 | |||
| 25 | /* | ||
| 26 | * attr.type | ||
| 27 | */ | ||
| 28 | enum perf_type_id { | ||
| 29 | PERF_TYPE_HARDWARE = 0, | ||
| 30 | PERF_TYPE_SOFTWARE = 1, | ||
| 31 | PERF_TYPE_TRACEPOINT = 2, | ||
| 32 | PERF_TYPE_HW_CACHE = 3, | ||
| 33 | PERF_TYPE_RAW = 4, | ||
| 34 | |||
| 35 | PERF_TYPE_MAX, /* non-ABI */ | ||
| 36 | }; | ||
| 37 | |||
| 38 | /* | ||
| 39 | * Generalized performance counter event types, used by the | ||
| 40 | * attr.event_id parameter of the sys_perf_counter_open() | ||
| 41 | * syscall: | ||
| 42 | */ | ||
| 43 | enum perf_hw_id { | ||
| 44 | /* | ||
| 45 | * Common hardware events, generalized by the kernel: | ||
| 46 | */ | ||
| 47 | PERF_COUNT_HW_CPU_CYCLES = 0, | ||
| 48 | PERF_COUNT_HW_INSTRUCTIONS = 1, | ||
| 49 | PERF_COUNT_HW_CACHE_REFERENCES = 2, | ||
| 50 | PERF_COUNT_HW_CACHE_MISSES = 3, | ||
| 51 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | ||
| 52 | PERF_COUNT_HW_BRANCH_MISSES = 5, | ||
| 53 | PERF_COUNT_HW_BUS_CYCLES = 6, | ||
| 54 | |||
| 55 | PERF_COUNT_HW_MAX, /* non-ABI */ | ||
| 56 | }; | ||
| 57 | |||
| 58 | /* | ||
| 59 | * Generalized hardware cache counters: | ||
| 60 | * | ||
| 61 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | ||
| 62 | * { read, write, prefetch } x | ||
| 63 | * { accesses, misses } | ||
| 64 | */ | ||
| 65 | enum perf_hw_cache_id { | ||
| 66 | PERF_COUNT_HW_CACHE_L1D = 0, | ||
| 67 | PERF_COUNT_HW_CACHE_L1I = 1, | ||
| 68 | PERF_COUNT_HW_CACHE_LL = 2, | ||
| 69 | PERF_COUNT_HW_CACHE_DTLB = 3, | ||
| 70 | PERF_COUNT_HW_CACHE_ITLB = 4, | ||
| 71 | PERF_COUNT_HW_CACHE_BPU = 5, | ||
| 72 | |||
| 73 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ | ||
| 74 | }; | ||
| 75 | |||
| 76 | enum perf_hw_cache_op_id { | ||
| 77 | PERF_COUNT_HW_CACHE_OP_READ = 0, | ||
| 78 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, | ||
| 79 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, | ||
| 80 | |||
| 81 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ | ||
| 82 | }; | ||
| 83 | |||
| 84 | enum perf_hw_cache_op_result_id { | ||
| 85 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, | ||
| 86 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, | ||
| 87 | |||
| 88 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ | ||
| 89 | }; | ||
| 90 | |||
| 91 | /* | ||
| 92 | * Special "software" counters provided by the kernel, even if the hardware | ||
| 93 | * does not support performance counters. These counters measure various | ||
| 94 | * physical and sw events of the kernel (and allow the profiling of them as | ||
| 95 | * well): | ||
| 96 | */ | ||
| 97 | enum perf_sw_ids { | ||
| 98 | PERF_COUNT_SW_CPU_CLOCK = 0, | ||
| 99 | PERF_COUNT_SW_TASK_CLOCK = 1, | ||
| 100 | PERF_COUNT_SW_PAGE_FAULTS = 2, | ||
| 101 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, | ||
| 102 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | ||
| 103 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | ||
| 104 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | ||
| 105 | |||
| 106 | PERF_COUNT_SW_MAX, /* non-ABI */ | ||
| 107 | }; | ||
| 108 | |||
| 109 | /* | ||
| 110 | * Bits that can be set in attr.sample_type to request information | ||
| 111 | * in the overflow packets. | ||
| 112 | */ | ||
| 113 | enum perf_counter_sample_format { | ||
| 114 | PERF_SAMPLE_IP = 1U << 0, | ||
| 115 | PERF_SAMPLE_TID = 1U << 1, | ||
| 116 | PERF_SAMPLE_TIME = 1U << 2, | ||
| 117 | PERF_SAMPLE_ADDR = 1U << 3, | ||
| 118 | PERF_SAMPLE_GROUP = 1U << 4, | ||
| 119 | PERF_SAMPLE_CALLCHAIN = 1U << 5, | ||
| 120 | PERF_SAMPLE_ID = 1U << 6, | ||
| 121 | PERF_SAMPLE_CPU = 1U << 7, | ||
| 122 | PERF_SAMPLE_PERIOD = 1U << 8, | ||
| 123 | }; | ||
| 124 | |||
| 125 | /* | ||
| 126 | * Bits that can be set in attr.read_format to request that | ||
| 127 | * reads on the counter should return the indicated quantities, | ||
| 128 | * in increasing order of bit value, after the counter value. | ||
| 129 | */ | ||
| 130 | enum perf_counter_read_format { | ||
| 131 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, | ||
| 132 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, | ||
| 133 | PERF_FORMAT_ID = 1U << 2, | ||
| 134 | }; | ||
| 135 | |||
| 136 | /* | ||
| 137 | * Hardware event to monitor via a performance monitoring counter: | ||
| 138 | */ | ||
| 139 | struct perf_counter_attr { | ||
| 140 | /* | ||
| 141 | * Major type: hardware/software/tracepoint/etc. | ||
| 142 | */ | ||
| 143 | __u32 type; | ||
| 144 | __u32 __reserved_1; | ||
| 145 | |||
| 146 | /* | ||
| 147 | * Type specific configuration information. | ||
| 148 | */ | ||
| 149 | __u64 config; | ||
| 150 | |||
| 151 | union { | ||
| 152 | __u64 sample_period; | ||
| 153 | __u64 sample_freq; | ||
| 154 | }; | ||
| 155 | |||
| 156 | __u64 sample_type; | ||
| 157 | __u64 read_format; | ||
| 158 | |||
| 159 | __u64 disabled : 1, /* off by default */ | ||
| 160 | inherit : 1, /* children inherit it */ | ||
| 161 | pinned : 1, /* must always be on PMU */ | ||
| 162 | exclusive : 1, /* only group on PMU */ | ||
| 163 | exclude_user : 1, /* don't count user */ | ||
| 164 | exclude_kernel : 1, /* ditto kernel */ | ||
| 165 | exclude_hv : 1, /* ditto hypervisor */ | ||
| 166 | exclude_idle : 1, /* don't count when idle */ | ||
| 167 | mmap : 1, /* include mmap data */ | ||
| 168 | comm : 1, /* include comm data */ | ||
| 169 | freq : 1, /* use freq, not period */ | ||
| 170 | |||
| 171 | __reserved_2 : 53; | ||
| 172 | |||
| 173 | __u32 wakeup_events; /* wakeup every n events */ | ||
| 174 | __u32 __reserved_3; | ||
| 175 | |||
| 176 | __u64 __reserved_4; | ||
| 177 | }; | ||
| 178 | |||
| 179 | /* | ||
| 180 | * Ioctls that can be done on a perf counter fd: | ||
| 181 | */ | ||
| 182 | #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) | ||
| 183 | #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) | ||
| 184 | #define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) | ||
| 185 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) | ||
| 186 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) | ||
| 187 | |||
| 188 | enum perf_counter_ioc_flags { | ||
| 189 | PERF_IOC_FLAG_GROUP = 1U << 0, | ||
| 190 | }; | ||
| 191 | |||
| 192 | /* | ||
| 193 | * Structure of the page that can be mapped via mmap | ||
| 194 | */ | ||
| 195 | struct perf_counter_mmap_page { | ||
| 196 | __u32 version; /* version number of this structure */ | ||
| 197 | __u32 compat_version; /* lowest version this is compat with */ | ||
| 198 | |||
| 199 | /* | ||
| 200 | * Bits needed to read the hw counters in user-space. | ||
| 201 | * | ||
| 202 | * u32 seq; | ||
| 203 | * s64 count; | ||
| 204 | * | ||
| 205 | * do { | ||
| 206 | * seq = pc->lock; | ||
| 207 | * | ||
| 208 | * barrier() | ||
| 209 | * if (pc->index) { | ||
| 210 | * count = pmc_read(pc->index - 1); | ||
| 211 | * count += pc->offset; | ||
| 212 | * } else | ||
| 213 | * goto regular_read; | ||
| 214 | * | ||
| 215 | * barrier(); | ||
| 216 | * } while (pc->lock != seq); | ||
| 217 | * | ||
| 218 | * NOTE: for obvious reason this only works on self-monitoring | ||
| 219 | * processes. | ||
| 220 | */ | ||
| 221 | __u32 lock; /* seqlock for synchronization */ | ||
| 222 | __u32 index; /* hardware counter identifier */ | ||
| 223 | __s64 offset; /* add to hardware counter value */ | ||
| 224 | |||
| 225 | /* | ||
| 226 | * Control data for the mmap() data buffer. | ||
| 227 | * | ||
| 228 | * User-space reading this value should issue an rmb(), on SMP capable | ||
| 229 | * platforms, after reading this value -- see perf_counter_wakeup(). | ||
| 230 | */ | ||
| 231 | __u64 data_head; /* head in the data section */ | ||
| 232 | }; | ||
| 233 | |||
| 234 | #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) | ||
| 235 | #define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) | ||
| 236 | #define PERF_EVENT_MISC_KERNEL (1 << 0) | ||
| 237 | #define PERF_EVENT_MISC_USER (2 << 0) | ||
| 238 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) | ||
| 239 | #define PERF_EVENT_MISC_OVERFLOW (1 << 2) | ||
| 240 | |||
| 241 | struct perf_event_header { | ||
| 242 | __u32 type; | ||
| 243 | __u16 misc; | ||
| 244 | __u16 size; | ||
| 245 | }; | ||
| 246 | |||
| 247 | enum perf_event_type { | ||
| 248 | |||
| 249 | /* | ||
| 250 | * The MMAP events record the PROT_EXEC mappings so that we can | ||
| 251 | * correlate userspace IPs to code. They have the following structure: | ||
| 252 | * | ||
| 253 | * struct { | ||
| 254 | * struct perf_event_header header; | ||
| 255 | * | ||
| 256 | * u32 pid, tid; | ||
| 257 | * u64 addr; | ||
| 258 | * u64 len; | ||
| 259 | * u64 pgoff; | ||
| 260 | * char filename[]; | ||
| 261 | * }; | ||
| 262 | */ | ||
| 263 | PERF_EVENT_MMAP = 1, | ||
| 264 | |||
| 265 | /* | ||
| 266 | * struct { | ||
| 267 | * struct perf_event_header header; | ||
| 268 | * | ||
| 269 | * u32 pid, tid; | ||
| 270 | * char comm[]; | ||
| 271 | * }; | ||
| 272 | */ | ||
| 273 | PERF_EVENT_COMM = 3, | ||
| 274 | |||
| 275 | /* | ||
| 276 | * struct { | ||
| 277 | * struct perf_event_header header; | ||
| 278 | * u64 time; | ||
| 279 | * u64 id; | ||
| 280 | * u64 sample_period; | ||
| 281 | * }; | ||
| 282 | */ | ||
| 283 | PERF_EVENT_PERIOD = 4, | ||
| 284 | |||
| 285 | /* | ||
| 286 | * struct { | ||
| 287 | * struct perf_event_header header; | ||
| 288 | * u64 time; | ||
| 289 | * u64 id; | ||
| 290 | * }; | ||
| 291 | */ | ||
| 292 | PERF_EVENT_THROTTLE = 5, | ||
| 293 | PERF_EVENT_UNTHROTTLE = 6, | ||
| 294 | |||
| 295 | /* | ||
| 296 | * struct { | ||
| 297 | * struct perf_event_header header; | ||
| 298 | * u32 pid, ppid; | ||
| 299 | * }; | ||
| 300 | */ | ||
| 301 | PERF_EVENT_FORK = 7, | ||
| 302 | |||
| 303 | /* | ||
| 304 | * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field | ||
| 305 | * will be PERF_RECORD_* | ||
| 306 | * | ||
| 307 | * struct { | ||
| 308 | * struct perf_event_header header; | ||
| 309 | * | ||
| 310 | * { u64 ip; } && PERF_RECORD_IP | ||
| 311 | * { u32 pid, tid; } && PERF_RECORD_TID | ||
| 312 | * { u64 time; } && PERF_RECORD_TIME | ||
| 313 | * { u64 addr; } && PERF_RECORD_ADDR | ||
| 314 | * { u64 config; } && PERF_RECORD_CONFIG | ||
| 315 | * { u32 cpu, res; } && PERF_RECORD_CPU | ||
| 316 | * | ||
| 317 | * { u64 nr; | ||
| 318 | * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP | ||
| 319 | * | ||
| 320 | * { u16 nr, | ||
| 321 | * hv, | ||
| 322 | * kernel, | ||
| 323 | * user; | ||
| 324 | * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN | ||
| 325 | * }; | ||
| 326 | */ | ||
| 327 | }; | ||
| 328 | |||
| 329 | #ifdef __KERNEL__ | ||
| 330 | /* | ||
| 331 | * Kernel-internal data types and definitions: | ||
| 332 | */ | ||
| 333 | |||
| 334 | #ifdef CONFIG_PERF_COUNTERS | ||
| 335 | # include <asm/perf_counter.h> | ||
| 336 | #endif | ||
| 337 | |||
| 338 | #include <linux/list.h> | ||
| 339 | #include <linux/mutex.h> | ||
| 340 | #include <linux/rculist.h> | ||
| 341 | #include <linux/rcupdate.h> | ||
| 342 | #include <linux/spinlock.h> | ||
| 343 | #include <linux/hrtimer.h> | ||
| 344 | #include <linux/fs.h> | ||
| 345 | #include <linux/pid_namespace.h> | ||
| 346 | #include <asm/atomic.h> | ||
| 347 | |||
| 348 | struct task_struct; | ||
| 349 | |||
| 350 | /** | ||
| 351 | * struct hw_perf_counter - performance counter hardware details: | ||
| 352 | */ | ||
| 353 | struct hw_perf_counter { | ||
| 354 | #ifdef CONFIG_PERF_COUNTERS | ||
| 355 | union { | ||
| 356 | struct { /* hardware */ | ||
| 357 | u64 config; | ||
| 358 | unsigned long config_base; | ||
| 359 | unsigned long counter_base; | ||
| 360 | int idx; | ||
| 361 | }; | ||
| 362 | union { /* software */ | ||
| 363 | atomic64_t count; | ||
| 364 | struct hrtimer hrtimer; | ||
| 365 | }; | ||
| 366 | }; | ||
| 367 | atomic64_t prev_count; | ||
| 368 | u64 sample_period; | ||
| 369 | u64 last_period; | ||
| 370 | atomic64_t period_left; | ||
| 371 | u64 interrupts; | ||
| 372 | |||
| 373 | u64 freq_count; | ||
| 374 | u64 freq_interrupts; | ||
| 375 | u64 freq_stamp; | ||
| 376 | #endif | ||
| 377 | }; | ||
| 378 | |||
| 379 | struct perf_counter; | ||
| 380 | |||
| 381 | /** | ||
| 382 | * struct pmu - generic performance monitoring unit | ||
| 383 | */ | ||
| 384 | struct pmu { | ||
| 385 | int (*enable) (struct perf_counter *counter); | ||
| 386 | void (*disable) (struct perf_counter *counter); | ||
| 387 | void (*read) (struct perf_counter *counter); | ||
| 388 | void (*unthrottle) (struct perf_counter *counter); | ||
| 389 | }; | ||
| 390 | |||
| 391 | /** | ||
| 392 | * enum perf_counter_active_state - the states of a counter | ||
| 393 | */ | ||
| 394 | enum perf_counter_active_state { | ||
| 395 | PERF_COUNTER_STATE_ERROR = -2, | ||
| 396 | PERF_COUNTER_STATE_OFF = -1, | ||
| 397 | PERF_COUNTER_STATE_INACTIVE = 0, | ||
| 398 | PERF_COUNTER_STATE_ACTIVE = 1, | ||
| 399 | }; | ||
| 400 | |||
| 401 | struct file; | ||
| 402 | |||
| 403 | struct perf_mmap_data { | ||
| 404 | struct rcu_head rcu_head; | ||
| 405 | int nr_pages; /* nr of data pages */ | ||
| 406 | int nr_locked; /* nr pages mlocked */ | ||
| 407 | |||
| 408 | atomic_t poll; /* POLL_ for wakeups */ | ||
| 409 | atomic_t events; /* event limit */ | ||
| 410 | |||
| 411 | atomic_long_t head; /* write position */ | ||
| 412 | atomic_long_t done_head; /* completed head */ | ||
| 413 | |||
| 414 | atomic_t lock; /* concurrent writes */ | ||
| 415 | |||
| 416 | atomic_t wakeup; /* needs a wakeup */ | ||
| 417 | |||
| 418 | struct perf_counter_mmap_page *user_page; | ||
| 419 | void *data_pages[0]; | ||
| 420 | }; | ||
| 421 | |||
| 422 | struct perf_pending_entry { | ||
| 423 | struct perf_pending_entry *next; | ||
| 424 | void (*func)(struct perf_pending_entry *); | ||
| 425 | }; | ||
| 426 | |||
| 427 | /** | ||
| 428 | * struct perf_counter - performance counter kernel representation: | ||
| 429 | */ | ||
| 430 | struct perf_counter { | ||
| 431 | #ifdef CONFIG_PERF_COUNTERS | ||
| 432 | struct list_head list_entry; | ||
| 433 | struct list_head event_entry; | ||
| 434 | struct list_head sibling_list; | ||
| 435 | int nr_siblings; | ||
| 436 | struct perf_counter *group_leader; | ||
| 437 | const struct pmu *pmu; | ||
| 438 | |||
| 439 | enum perf_counter_active_state state; | ||
| 440 | atomic64_t count; | ||
| 441 | |||
| 442 | /* | ||
| 443 | * These are the total time in nanoseconds that the counter | ||
| 444 | * has been enabled (i.e. eligible to run, and the task has | ||
| 445 | * been scheduled in, if this is a per-task counter) | ||
| 446 | * and running (scheduled onto the CPU), respectively. | ||
| 447 | * | ||
| 448 | * They are computed from tstamp_enabled, tstamp_running and | ||
| 449 | * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. | ||
| 450 | */ | ||
| 451 | u64 total_time_enabled; | ||
| 452 | u64 total_time_running; | ||
| 453 | |||
| 454 | /* | ||
| 455 | * These are timestamps used for computing total_time_enabled | ||
| 456 | * and total_time_running when the counter is in INACTIVE or | ||
| 457 | * ACTIVE state, measured in nanoseconds from an arbitrary point | ||
| 458 | * in time. | ||
| 459 | * tstamp_enabled: the notional time when the counter was enabled | ||
| 460 | * tstamp_running: the notional time when the counter was scheduled on | ||
| 461 | * tstamp_stopped: in INACTIVE state, the notional time when the | ||
| 462 | * counter was scheduled off. | ||
| 463 | */ | ||
| 464 | u64 tstamp_enabled; | ||
| 465 | u64 tstamp_running; | ||
| 466 | u64 tstamp_stopped; | ||
| 467 | |||
| 468 | struct perf_counter_attr attr; | ||
| 469 | struct hw_perf_counter hw; | ||
| 470 | |||
| 471 | struct perf_counter_context *ctx; | ||
| 472 | struct file *filp; | ||
| 473 | |||
| 474 | /* | ||
| 475 | * These accumulate total time (in nanoseconds) that children | ||
| 476 | * counters have been enabled and running, respectively. | ||
| 477 | */ | ||
| 478 | atomic64_t child_total_time_enabled; | ||
| 479 | atomic64_t child_total_time_running; | ||
| 480 | |||
| 481 | /* | ||
| 482 | * Protect attach/detach and child_list: | ||
| 483 | */ | ||
| 484 | struct mutex child_mutex; | ||
| 485 | struct list_head child_list; | ||
| 486 | struct perf_counter *parent; | ||
| 487 | |||
| 488 | int oncpu; | ||
| 489 | int cpu; | ||
| 490 | |||
| 491 | struct list_head owner_entry; | ||
| 492 | struct task_struct *owner; | ||
| 493 | |||
| 494 | /* mmap bits */ | ||
| 495 | struct mutex mmap_mutex; | ||
| 496 | atomic_t mmap_count; | ||
| 497 | struct perf_mmap_data *data; | ||
| 498 | |||
| 499 | /* poll related */ | ||
| 500 | wait_queue_head_t waitq; | ||
| 501 | struct fasync_struct *fasync; | ||
| 502 | |||
| 503 | /* delayed work for NMIs and such */ | ||
| 504 | int pending_wakeup; | ||
| 505 | int pending_kill; | ||
| 506 | int pending_disable; | ||
| 507 | struct perf_pending_entry pending; | ||
| 508 | |||
| 509 | atomic_t event_limit; | ||
| 510 | |||
| 511 | void (*destroy)(struct perf_counter *); | ||
| 512 | struct rcu_head rcu_head; | ||
| 513 | |||
| 514 | struct pid_namespace *ns; | ||
| 515 | u64 id; | ||
| 516 | #endif | ||
| 517 | }; | ||
| 518 | |||
| 519 | /** | ||
| 520 | * struct perf_counter_context - counter context structure | ||
| 521 | * | ||
| 522 | * Used as a container for task counters and CPU counters as well: | ||
| 523 | */ | ||
| 524 | struct perf_counter_context { | ||
| 525 | /* | ||
| 526 | * Protect the states of the counters in the list, | ||
| 527 | * nr_active, and the list: | ||
| 528 | */ | ||
| 529 | spinlock_t lock; | ||
| 530 | /* | ||
| 531 | * Protect the list of counters. Locking either mutex or lock | ||
| 532 | * is sufficient to ensure the list doesn't change; to change | ||
| 533 | * the list you need to lock both the mutex and the spinlock. | ||
| 534 | */ | ||
| 535 | struct mutex mutex; | ||
| 536 | |||
| 537 | struct list_head counter_list; | ||
| 538 | struct list_head event_list; | ||
| 539 | int nr_counters; | ||
| 540 | int nr_active; | ||
| 541 | int is_active; | ||
| 542 | atomic_t refcount; | ||
| 543 | struct task_struct *task; | ||
| 544 | |||
| 545 | /* | ||
| 546 | * Context clock, runs when context enabled. | ||
| 547 | */ | ||
| 548 | u64 time; | ||
| 549 | u64 timestamp; | ||
| 550 | |||
| 551 | /* | ||
| 552 | * These fields let us detect when two contexts have both | ||
| 553 | * been cloned (inherited) from a common ancestor. | ||
| 554 | */ | ||
| 555 | struct perf_counter_context *parent_ctx; | ||
| 556 | u64 parent_gen; | ||
| 557 | u64 generation; | ||
| 558 | int pin_count; | ||
| 559 | struct rcu_head rcu_head; | ||
| 560 | }; | ||
| 561 | |||
| 562 | /** | ||
| 563 | * struct perf_counter_cpu_context - per cpu counter context structure | ||
| 564 | */ | ||
| 565 | struct perf_cpu_context { | ||
| 566 | struct perf_counter_context ctx; | ||
| 567 | struct perf_counter_context *task_ctx; | ||
| 568 | int active_oncpu; | ||
| 569 | int max_pertask; | ||
| 570 | int exclusive; | ||
| 571 | |||
| 572 | /* | ||
| 573 | * Recursion avoidance: | ||
| 574 | * | ||
| 575 | * task, softirq, irq, nmi context | ||
| 576 | */ | ||
| 577 | int recursion[4]; | ||
| 578 | }; | ||
| 579 | |||
| 580 | #ifdef CONFIG_PERF_COUNTERS | ||
| 581 | |||
| 582 | /* | ||
| 583 | * Set by architecture code: | ||
| 584 | */ | ||
| 585 | extern int perf_max_counters; | ||
| 586 | |||
| 587 | extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); | ||
| 588 | |||
| 589 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); | ||
| 590 | extern void perf_counter_task_sched_out(struct task_struct *task, | ||
| 591 | struct task_struct *next, int cpu); | ||
| 592 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); | ||
| 593 | extern int perf_counter_init_task(struct task_struct *child); | ||
| 594 | extern void perf_counter_exit_task(struct task_struct *child); | ||
| 595 | extern void perf_counter_free_task(struct task_struct *task); | ||
| 596 | extern void perf_counter_do_pending(void); | ||
| 597 | extern void perf_counter_print_debug(void); | ||
| 598 | extern void __perf_disable(void); | ||
| 599 | extern bool __perf_enable(void); | ||
| 600 | extern void perf_disable(void); | ||
| 601 | extern void perf_enable(void); | ||
| 602 | extern int perf_counter_task_disable(void); | ||
| 603 | extern int perf_counter_task_enable(void); | ||
| 604 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, | ||
| 605 | struct perf_cpu_context *cpuctx, | ||
| 606 | struct perf_counter_context *ctx, int cpu); | ||
| 607 | extern void perf_counter_update_userpage(struct perf_counter *counter); | ||
| 608 | |||
| 609 | struct perf_sample_data { | ||
| 610 | struct pt_regs *regs; | ||
| 611 | u64 addr; | ||
| 612 | u64 period; | ||
| 613 | }; | ||
| 614 | |||
| 615 | extern int perf_counter_overflow(struct perf_counter *counter, int nmi, | ||
| 616 | struct perf_sample_data *data); | ||
| 617 | |||
| 618 | /* | ||
| 619 | * Return 1 for a software counter, 0 for a hardware counter | ||
| 620 | */ | ||
| 621 | static inline int is_software_counter(struct perf_counter *counter) | ||
| 622 | { | ||
| 623 | return (counter->attr.type != PERF_TYPE_RAW) && | ||
| 624 | (counter->attr.type != PERF_TYPE_HARDWARE); | ||
| 625 | } | ||
| 626 | |||
| 627 | extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); | ||
| 628 | |||
| 629 | extern void __perf_counter_mmap(struct vm_area_struct *vma); | ||
| 630 | |||
| 631 | static inline void perf_counter_mmap(struct vm_area_struct *vma) | ||
| 632 | { | ||
| 633 | if (vma->vm_flags & VM_EXEC) | ||
| 634 | __perf_counter_mmap(vma); | ||
| 635 | } | ||
| 636 | |||
| 637 | extern void perf_counter_comm(struct task_struct *tsk); | ||
| 638 | extern void perf_counter_fork(struct task_struct *tsk); | ||
| 639 | |||
| 640 | extern void perf_counter_task_migration(struct task_struct *task, int cpu); | ||
| 641 | |||
| 642 | #define MAX_STACK_DEPTH 255 | ||
| 643 | |||
| 644 | struct perf_callchain_entry { | ||
| 645 | u16 nr; | ||
| 646 | u16 hv; | ||
| 647 | u16 kernel; | ||
| 648 | u16 user; | ||
| 649 | u64 ip[MAX_STACK_DEPTH]; | ||
| 650 | }; | ||
| 651 | |||
| 652 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | ||
| 653 | |||
| 654 | extern int sysctl_perf_counter_paranoid; | ||
| 655 | extern int sysctl_perf_counter_mlock; | ||
| 656 | extern int sysctl_perf_counter_sample_rate; | ||
| 657 | |||
| 658 | extern void perf_counter_init(void); | ||
| 659 | |||
| 660 | #ifndef perf_misc_flags | ||
| 661 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ | ||
| 662 | PERF_EVENT_MISC_KERNEL) | ||
| 663 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | ||
| 664 | #endif | ||
| 665 | |||
| 666 | #else | ||
| 667 | static inline void | ||
| 668 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | ||
| 669 | static inline void | ||
| 670 | perf_counter_task_sched_out(struct task_struct *task, | ||
| 671 | struct task_struct *next, int cpu) { } | ||
| 672 | static inline void | ||
| 673 | perf_counter_task_tick(struct task_struct *task, int cpu) { } | ||
| 674 | static inline int perf_counter_init_task(struct task_struct *child) { return 0; } | ||
| 675 | static inline void perf_counter_exit_task(struct task_struct *child) { } | ||
| 676 | static inline void perf_counter_free_task(struct task_struct *task) { } | ||
| 677 | static inline void perf_counter_do_pending(void) { } | ||
| 678 | static inline void perf_counter_print_debug(void) { } | ||
| 679 | static inline void perf_disable(void) { } | ||
| 680 | static inline void perf_enable(void) { } | ||
| 681 | static inline int perf_counter_task_disable(void) { return -EINVAL; } | ||
| 682 | static inline int perf_counter_task_enable(void) { return -EINVAL; } | ||
| 683 | |||
| 684 | static inline void | ||
| 685 | perf_swcounter_event(u32 event, u64 nr, int nmi, | ||
| 686 | struct pt_regs *regs, u64 addr) { } | ||
| 687 | |||
| 688 | static inline void perf_counter_mmap(struct vm_area_struct *vma) { } | ||
| 689 | static inline void perf_counter_comm(struct task_struct *tsk) { } | ||
| 690 | static inline void perf_counter_fork(struct task_struct *tsk) { } | ||
| 691 | static inline void perf_counter_init(void) { } | ||
| 692 | static inline void perf_counter_task_migration(struct task_struct *task, | ||
| 693 | int cpu) { } | ||
| 694 | #endif | ||
| 695 | |||
| 696 | #endif /* __KERNEL__ */ | ||
| 697 | #endif /* _LINUX_PERF_COUNTER_H */ | ||
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index c8f038554e80..b43a9e039059 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h | |||
| @@ -152,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void | |||
| 152 | void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); | 152 | void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); |
| 153 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); | 153 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); |
| 154 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); | 154 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); |
| 155 | void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); | ||
| 155 | 156 | ||
| 156 | #endif | 157 | #endif |
diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 48d887e3c6e7..b00df4c79c63 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h | |||
| @@ -85,4 +85,7 @@ | |||
| 85 | #define PR_SET_TIMERSLACK 29 | 85 | #define PR_SET_TIMERSLACK 29 |
| 86 | #define PR_GET_TIMERSLACK 30 | 86 | #define PR_GET_TIMERSLACK 30 |
| 87 | 87 | ||
| 88 | #define PR_TASK_PERF_COUNTERS_DISABLE 31 | ||
| 89 | #define PR_TASK_PERF_COUNTERS_ENABLE 32 | ||
| 90 | |||
| 88 | #endif /* _LINUX_PRCTL_H */ | 91 | #endif /* _LINUX_PRCTL_H */ |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 67c15653fc23..59e133d39d50 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
| @@ -95,7 +95,6 @@ extern void __ptrace_link(struct task_struct *child, | |||
| 95 | struct task_struct *new_parent); | 95 | struct task_struct *new_parent); |
| 96 | extern void __ptrace_unlink(struct task_struct *child); | 96 | extern void __ptrace_unlink(struct task_struct *child); |
| 97 | extern void exit_ptrace(struct task_struct *tracer); | 97 | extern void exit_ptrace(struct task_struct *tracer); |
| 98 | extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); | ||
| 99 | #define PTRACE_MODE_READ 1 | 98 | #define PTRACE_MODE_READ 1 |
| 100 | #define PTRACE_MODE_ATTACH 2 | 99 | #define PTRACE_MODE_ATTACH 2 |
| 101 | /* Returns 0 on success, -errno on denial. */ | 100 | /* Returns 0 on success, -errno on denial. */ |
| @@ -327,15 +326,6 @@ static inline void user_enable_block_step(struct task_struct *task) | |||
| 327 | #define arch_ptrace_untrace(task) do { } while (0) | 326 | #define arch_ptrace_untrace(task) do { } while (0) |
| 328 | #endif | 327 | #endif |
| 329 | 328 | ||
| 330 | #ifndef arch_ptrace_fork | ||
| 331 | /* | ||
| 332 | * Do machine-specific work to initialize a new task. | ||
| 333 | * | ||
| 334 | * This is called from copy_process(). | ||
| 335 | */ | ||
| 336 | #define arch_ptrace_fork(child, clone_flags) do { } while (0) | ||
| 337 | #endif | ||
| 338 | |||
| 339 | extern int task_current_syscall(struct task_struct *target, long *callno, | 329 | extern int task_current_syscall(struct task_struct *target, long *callno, |
| 340 | unsigned long args[6], unsigned int maxargs, | 330 | unsigned long args[6], unsigned int maxargs, |
| 341 | unsigned long *sp, unsigned long *pc); | 331 | unsigned long *sp, unsigned long *pc); |
diff --git a/include/linux/rational.h b/include/linux/rational.h new file mode 100644 index 000000000000..4f532fcd9eea --- /dev/null +++ b/include/linux/rational.h | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | /* | ||
| 2 | * rational fractions | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com> | ||
| 5 | * | ||
| 6 | * helper functions when coping with rational numbers, | ||
| 7 | * e.g. when calculating optimum numerator/denominator pairs for | ||
| 8 | * pll configuration taking into account restricted register size | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef _LINUX_RATIONAL_H | ||
| 12 | #define _LINUX_RATIONAL_H | ||
| 13 | |||
| 14 | void rational_best_approximation( | ||
| 15 | unsigned long given_numerator, unsigned long given_denominator, | ||
| 16 | unsigned long max_numerator, unsigned long max_denominator, | ||
| 17 | unsigned long *best_numerator, unsigned long *best_denominator); | ||
| 18 | |||
| 19 | #endif /* _LINUX_RATIONAL_H */ | ||
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index e649bd3f2c97..5710f43bbc9e 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
| @@ -198,6 +198,32 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
| 198 | at->prev = last; | 198 | at->prev = last; |
| 199 | } | 199 | } |
| 200 | 200 | ||
| 201 | /** | ||
| 202 | * list_entry_rcu - get the struct for this entry | ||
| 203 | * @ptr: the &struct list_head pointer. | ||
| 204 | * @type: the type of the struct this is embedded in. | ||
| 205 | * @member: the name of the list_struct within the struct. | ||
| 206 | * | ||
| 207 | * This primitive may safely run concurrently with the _rcu list-mutation | ||
| 208 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | ||
| 209 | */ | ||
| 210 | #define list_entry_rcu(ptr, type, member) \ | ||
| 211 | container_of(rcu_dereference(ptr), type, member) | ||
| 212 | |||
| 213 | /** | ||
| 214 | * list_first_entry_rcu - get the first element from a list | ||
| 215 | * @ptr: the list head to take the element from. | ||
| 216 | * @type: the type of the struct this is embedded in. | ||
| 217 | * @member: the name of the list_struct within the struct. | ||
| 218 | * | ||
| 219 | * Note, that list is expected to be not empty. | ||
| 220 | * | ||
| 221 | * This primitive may safely run concurrently with the _rcu list-mutation | ||
| 222 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | ||
| 223 | */ | ||
| 224 | #define list_first_entry_rcu(ptr, type, member) \ | ||
| 225 | list_entry_rcu((ptr)->next, type, member) | ||
| 226 | |||
| 201 | #define __list_for_each_rcu(pos, head) \ | 227 | #define __list_for_each_rcu(pos, head) \ |
| 202 | for (pos = rcu_dereference((head)->next); \ | 228 | for (pos = rcu_dereference((head)->next); \ |
| 203 | pos != (head); \ | 229 | pos != (head); \ |
| @@ -214,9 +240,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
| 214 | * as long as the traversal is guarded by rcu_read_lock(). | 240 | * as long as the traversal is guarded by rcu_read_lock(). |
| 215 | */ | 241 | */ |
| 216 | #define list_for_each_entry_rcu(pos, head, member) \ | 242 | #define list_for_each_entry_rcu(pos, head, member) \ |
| 217 | for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \ | 243 | for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
| 218 | prefetch(pos->member.next), &pos->member != (head); \ | 244 | prefetch(pos->member.next), &pos->member != (head); \ |
| 219 | pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member)) | 245 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
| 220 | 246 | ||
| 221 | 247 | ||
| 222 | /** | 248 | /** |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 58b2aa5312b9..5a5153806c42 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -161,8 +161,15 @@ struct rcu_data { | |||
| 161 | unsigned long offline_fqs; /* Kicked due to being offline. */ | 161 | unsigned long offline_fqs; /* Kicked due to being offline. */ |
| 162 | unsigned long resched_ipi; /* Sent a resched IPI. */ | 162 | unsigned long resched_ipi; /* Sent a resched IPI. */ |
| 163 | 163 | ||
| 164 | /* 5) For future __rcu_pending statistics. */ | 164 | /* 5) __rcu_pending() statistics. */ |
| 165 | long n_rcu_pending; /* rcu_pending() calls since boot. */ | 165 | long n_rcu_pending; /* rcu_pending() calls since boot. */ |
| 166 | long n_rp_qs_pending; | ||
| 167 | long n_rp_cb_ready; | ||
| 168 | long n_rp_cpu_needs_gp; | ||
| 169 | long n_rp_gp_completed; | ||
| 170 | long n_rp_gp_started; | ||
| 171 | long n_rp_need_fqs; | ||
| 172 | long n_rp_need_nothing; | ||
| 166 | 173 | ||
| 167 | int cpu; | 174 | int cpu; |
| 168 | }; | 175 | }; |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index e1b7b2173885..8670f1575fe1 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
| @@ -11,7 +11,7 @@ struct ring_buffer_iter; | |||
| 11 | * Don't refer to this struct directly, use functions below. | 11 | * Don't refer to this struct directly, use functions below. |
| 12 | */ | 12 | */ |
| 13 | struct ring_buffer_event { | 13 | struct ring_buffer_event { |
| 14 | u32 type:2, len:3, time_delta:27; | 14 | u32 type_len:5, time_delta:27; |
| 15 | u32 array[]; | 15 | u32 array[]; |
| 16 | }; | 16 | }; |
| 17 | 17 | ||
| @@ -24,7 +24,8 @@ struct ring_buffer_event { | |||
| 24 | * size is variable depending on how much | 24 | * size is variable depending on how much |
| 25 | * padding is needed | 25 | * padding is needed |
| 26 | * If time_delta is non zero: | 26 | * If time_delta is non zero: |
| 27 | * everything else same as RINGBUF_TYPE_DATA | 27 | * array[0] holds the actual length |
| 28 | * size = 4 + length (bytes) | ||
| 28 | * | 29 | * |
| 29 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta | 30 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta |
| 30 | * array[0] = time delta (28 .. 59) | 31 | * array[0] = time delta (28 .. 59) |
| @@ -35,22 +36,23 @@ struct ring_buffer_event { | |||
| 35 | * array[1..2] = tv_sec | 36 | * array[1..2] = tv_sec |
| 36 | * size = 16 bytes | 37 | * size = 16 bytes |
| 37 | * | 38 | * |
| 38 | * @RINGBUF_TYPE_DATA: Data record | 39 | * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: |
| 39 | * If len is zero: | 40 | * Data record |
| 41 | * If type_len is zero: | ||
| 40 | * array[0] holds the actual length | 42 | * array[0] holds the actual length |
| 41 | * array[1..(length+3)/4] holds data | 43 | * array[1..(length+3)/4] holds data |
| 42 | * size = 4 + 4 + length (bytes) | 44 | * size = 4 + length (bytes) |
| 43 | * else | 45 | * else |
| 44 | * length = len << 2 | 46 | * length = type_len << 2 |
| 45 | * array[0..(length+3)/4-1] holds data | 47 | * array[0..(length+3)/4-1] holds data |
| 46 | * size = 4 + length (bytes) | 48 | * size = 4 + length (bytes) |
| 47 | */ | 49 | */ |
| 48 | enum ring_buffer_type { | 50 | enum ring_buffer_type { |
| 51 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, | ||
| 49 | RINGBUF_TYPE_PADDING, | 52 | RINGBUF_TYPE_PADDING, |
| 50 | RINGBUF_TYPE_TIME_EXTEND, | 53 | RINGBUF_TYPE_TIME_EXTEND, |
| 51 | /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ | 54 | /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ |
| 52 | RINGBUF_TYPE_TIME_STAMP, | 55 | RINGBUF_TYPE_TIME_STAMP, |
| 53 | RINGBUF_TYPE_DATA, | ||
| 54 | }; | 56 | }; |
| 55 | 57 | ||
| 56 | unsigned ring_buffer_event_length(struct ring_buffer_event *event); | 58 | unsigned ring_buffer_event_length(struct ring_buffer_event *event); |
| @@ -68,13 +70,54 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) | |||
| 68 | return event->time_delta; | 70 | return event->time_delta; |
| 69 | } | 71 | } |
| 70 | 72 | ||
| 73 | /* | ||
| 74 | * ring_buffer_event_discard can discard any event in the ring buffer. | ||
| 75 | * it is up to the caller to protect against a reader from | ||
| 76 | * consuming it or a writer from wrapping and replacing it. | ||
| 77 | * | ||
| 78 | * No external protection is needed if this is called before | ||
| 79 | * the event is commited. But in that case it would be better to | ||
| 80 | * use ring_buffer_discard_commit. | ||
| 81 | * | ||
| 82 | * Note, if an event that has not been committed is discarded | ||
| 83 | * with ring_buffer_event_discard, it must still be committed. | ||
| 84 | */ | ||
| 71 | void ring_buffer_event_discard(struct ring_buffer_event *event); | 85 | void ring_buffer_event_discard(struct ring_buffer_event *event); |
| 72 | 86 | ||
| 73 | /* | 87 | /* |
| 88 | * ring_buffer_discard_commit will remove an event that has not | ||
| 89 | * ben committed yet. If this is used, then ring_buffer_unlock_commit | ||
| 90 | * must not be called on the discarded event. This function | ||
| 91 | * will try to remove the event from the ring buffer completely | ||
| 92 | * if another event has not been written after it. | ||
| 93 | * | ||
| 94 | * Example use: | ||
| 95 | * | ||
| 96 | * if (some_condition) | ||
| 97 | * ring_buffer_discard_commit(buffer, event); | ||
| 98 | * else | ||
| 99 | * ring_buffer_unlock_commit(buffer, event); | ||
| 100 | */ | ||
| 101 | void ring_buffer_discard_commit(struct ring_buffer *buffer, | ||
| 102 | struct ring_buffer_event *event); | ||
| 103 | |||
| 104 | /* | ||
| 74 | * size is in bytes for each per CPU buffer. | 105 | * size is in bytes for each per CPU buffer. |
| 75 | */ | 106 | */ |
| 76 | struct ring_buffer * | 107 | struct ring_buffer * |
| 77 | ring_buffer_alloc(unsigned long size, unsigned flags); | 108 | __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); |
| 109 | |||
| 110 | /* | ||
| 111 | * Because the ring buffer is generic, if other users of the ring buffer get | ||
| 112 | * traced by ftrace, it can produce lockdep warnings. We need to keep each | ||
| 113 | * ring buffer's lock class separate. | ||
| 114 | */ | ||
| 115 | #define ring_buffer_alloc(size, flags) \ | ||
| 116 | ({ \ | ||
| 117 | static struct lock_class_key __key; \ | ||
| 118 | __ring_buffer_alloc((size), (flags), &__key); \ | ||
| 119 | }) | ||
| 120 | |||
| 78 | void ring_buffer_free(struct ring_buffer *buffer); | 121 | void ring_buffer_free(struct ring_buffer *buffer); |
| 79 | 122 | ||
| 80 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); | 123 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); |
| @@ -122,6 +165,8 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer); | |||
| 122 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); | 165 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); |
| 123 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); | 166 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); |
| 124 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | 167 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); |
| 168 | unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); | ||
| 169 | unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu); | ||
| 125 | 170 | ||
| 126 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); | 171 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); |
| 127 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, | 172 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, |
| @@ -137,6 +182,11 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); | |||
| 137 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, | 182 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, |
| 138 | size_t len, int cpu, int full); | 183 | size_t len, int cpu, int full); |
| 139 | 184 | ||
| 185 | struct trace_seq; | ||
| 186 | |||
| 187 | int ring_buffer_print_entry_header(struct trace_seq *s); | ||
| 188 | int ring_buffer_print_page_header(struct trace_seq *s); | ||
| 189 | |||
| 140 | enum ring_buffer_flags { | 190 | enum ring_buffer_flags { |
| 141 | RB_FL_OVERWRITE = 1 << 0, | 191 | RB_FL_OVERWRITE = 1 << 0, |
| 142 | }; | 192 | }; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index b4c38bc8049c..4896fdfec913 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -77,6 +77,7 @@ struct sched_param { | |||
| 77 | #include <linux/proportions.h> | 77 | #include <linux/proportions.h> |
| 78 | #include <linux/seccomp.h> | 78 | #include <linux/seccomp.h> |
| 79 | #include <linux/rcupdate.h> | 79 | #include <linux/rcupdate.h> |
| 80 | #include <linux/rculist.h> | ||
| 80 | #include <linux/rtmutex.h> | 81 | #include <linux/rtmutex.h> |
| 81 | 82 | ||
| 82 | #include <linux/time.h> | 83 | #include <linux/time.h> |
| @@ -96,8 +97,9 @@ struct exec_domain; | |||
| 96 | struct futex_pi_state; | 97 | struct futex_pi_state; |
| 97 | struct robust_list_head; | 98 | struct robust_list_head; |
| 98 | struct bio; | 99 | struct bio; |
| 99 | struct bts_tracer; | ||
| 100 | struct fs_struct; | 100 | struct fs_struct; |
| 101 | struct bts_context; | ||
| 102 | struct perf_counter_context; | ||
| 101 | 103 | ||
| 102 | /* | 104 | /* |
| 103 | * List of flags we want to share for kernel threads, | 105 | * List of flags we want to share for kernel threads, |
| @@ -116,6 +118,7 @@ struct fs_struct; | |||
| 116 | * 11 bit fractions. | 118 | * 11 bit fractions. |
| 117 | */ | 119 | */ |
| 118 | extern unsigned long avenrun[]; /* Load averages */ | 120 | extern unsigned long avenrun[]; /* Load averages */ |
| 121 | extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); | ||
| 119 | 122 | ||
| 120 | #define FSHIFT 11 /* nr of bits of precision */ | 123 | #define FSHIFT 11 /* nr of bits of precision */ |
| 121 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ | 124 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ |
| @@ -135,8 +138,9 @@ DECLARE_PER_CPU(unsigned long, process_counts); | |||
| 135 | extern int nr_processes(void); | 138 | extern int nr_processes(void); |
| 136 | extern unsigned long nr_running(void); | 139 | extern unsigned long nr_running(void); |
| 137 | extern unsigned long nr_uninterruptible(void); | 140 | extern unsigned long nr_uninterruptible(void); |
| 138 | extern unsigned long nr_active(void); | ||
| 139 | extern unsigned long nr_iowait(void); | 141 | extern unsigned long nr_iowait(void); |
| 142 | extern void calc_global_load(void); | ||
| 143 | extern u64 cpu_nr_migrations(int cpu); | ||
| 140 | 144 | ||
| 141 | extern unsigned long get_parent_ip(unsigned long addr); | 145 | extern unsigned long get_parent_ip(unsigned long addr); |
| 142 | 146 | ||
| @@ -672,6 +676,10 @@ struct user_struct { | |||
| 672 | struct work_struct work; | 676 | struct work_struct work; |
| 673 | #endif | 677 | #endif |
| 674 | #endif | 678 | #endif |
| 679 | |||
| 680 | #ifdef CONFIG_PERF_COUNTERS | ||
| 681 | atomic_long_t locked_vm; | ||
| 682 | #endif | ||
| 675 | }; | 683 | }; |
| 676 | 684 | ||
| 677 | extern int uids_sysfs_init(void); | 685 | extern int uids_sysfs_init(void); |
| @@ -838,7 +846,17 @@ struct sched_group { | |||
| 838 | */ | 846 | */ |
| 839 | u32 reciprocal_cpu_power; | 847 | u32 reciprocal_cpu_power; |
| 840 | 848 | ||
| 841 | unsigned long cpumask[]; | 849 | /* |
| 850 | * The CPUs this group covers. | ||
| 851 | * | ||
| 852 | * NOTE: this field is variable length. (Allocated dynamically | ||
| 853 | * by attaching extra space to the end of the structure, | ||
| 854 | * depending on how many CPUs the kernel has booted up with) | ||
| 855 | * | ||
| 856 | * It is also be embedded into static data structures at build | ||
| 857 | * time. (See 'struct static_sched_group' in kernel/sched.c) | ||
| 858 | */ | ||
| 859 | unsigned long cpumask[0]; | ||
| 842 | }; | 860 | }; |
| 843 | 861 | ||
| 844 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | 862 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
| @@ -924,8 +942,17 @@ struct sched_domain { | |||
| 924 | char *name; | 942 | char *name; |
| 925 | #endif | 943 | #endif |
| 926 | 944 | ||
| 927 | /* span of all CPUs in this domain */ | 945 | /* |
| 928 | unsigned long span[]; | 946 | * Span of all CPUs in this domain. |
| 947 | * | ||
| 948 | * NOTE: this field is variable length. (Allocated dynamically | ||
| 949 | * by attaching extra space to the end of the structure, | ||
| 950 | * depending on how many CPUs the kernel has booted up with) | ||
| 951 | * | ||
| 952 | * It is also be embedded into static data structures at build | ||
| 953 | * time. (See 'struct static_sched_domain' in kernel/sched.c) | ||
| 954 | */ | ||
| 955 | unsigned long span[0]; | ||
| 929 | }; | 956 | }; |
| 930 | 957 | ||
| 931 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | 958 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
| @@ -1052,9 +1079,10 @@ struct sched_entity { | |||
| 1052 | u64 last_wakeup; | 1079 | u64 last_wakeup; |
| 1053 | u64 avg_overlap; | 1080 | u64 avg_overlap; |
| 1054 | 1081 | ||
| 1082 | u64 nr_migrations; | ||
| 1083 | |||
| 1055 | u64 start_runtime; | 1084 | u64 start_runtime; |
| 1056 | u64 avg_wakeup; | 1085 | u64 avg_wakeup; |
| 1057 | u64 nr_migrations; | ||
| 1058 | 1086 | ||
| 1059 | #ifdef CONFIG_SCHEDSTATS | 1087 | #ifdef CONFIG_SCHEDSTATS |
| 1060 | u64 wait_start; | 1088 | u64 wait_start; |
| @@ -1209,18 +1237,11 @@ struct task_struct { | |||
| 1209 | struct list_head ptraced; | 1237 | struct list_head ptraced; |
| 1210 | struct list_head ptrace_entry; | 1238 | struct list_head ptrace_entry; |
| 1211 | 1239 | ||
| 1212 | #ifdef CONFIG_X86_PTRACE_BTS | ||
| 1213 | /* | 1240 | /* |
| 1214 | * This is the tracer handle for the ptrace BTS extension. | 1241 | * This is the tracer handle for the ptrace BTS extension. |
| 1215 | * This field actually belongs to the ptracer task. | 1242 | * This field actually belongs to the ptracer task. |
| 1216 | */ | 1243 | */ |
| 1217 | struct bts_tracer *bts; | 1244 | struct bts_context *bts; |
| 1218 | /* | ||
| 1219 | * The buffer to hold the BTS data. | ||
| 1220 | */ | ||
| 1221 | void *bts_buffer; | ||
| 1222 | size_t bts_size; | ||
| 1223 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
| 1224 | 1245 | ||
| 1225 | /* PID/PID hash table linkage. */ | 1246 | /* PID/PID hash table linkage. */ |
| 1226 | struct pid_link pids[PIDTYPE_MAX]; | 1247 | struct pid_link pids[PIDTYPE_MAX]; |
| @@ -1247,7 +1268,9 @@ struct task_struct { | |||
| 1247 | * credentials (COW) */ | 1268 | * credentials (COW) */ |
| 1248 | const struct cred *cred; /* effective (overridable) subjective task | 1269 | const struct cred *cred; /* effective (overridable) subjective task |
| 1249 | * credentials (COW) */ | 1270 | * credentials (COW) */ |
| 1250 | struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ | 1271 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
| 1272 | * credential calculations | ||
| 1273 | * (notably. ptrace) */ | ||
| 1251 | 1274 | ||
| 1252 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1275 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
| 1253 | - access with [gs]et_task_comm (which lock | 1276 | - access with [gs]et_task_comm (which lock |
| @@ -1380,6 +1403,11 @@ struct task_struct { | |||
| 1380 | struct list_head pi_state_list; | 1403 | struct list_head pi_state_list; |
| 1381 | struct futex_pi_state *pi_state_cache; | 1404 | struct futex_pi_state *pi_state_cache; |
| 1382 | #endif | 1405 | #endif |
| 1406 | #ifdef CONFIG_PERF_COUNTERS | ||
| 1407 | struct perf_counter_context *perf_counter_ctxp; | ||
| 1408 | struct mutex perf_counter_mutex; | ||
| 1409 | struct list_head perf_counter_list; | ||
| 1410 | #endif | ||
| 1383 | #ifdef CONFIG_NUMA | 1411 | #ifdef CONFIG_NUMA |
| 1384 | struct mempolicy *mempolicy; | 1412 | struct mempolicy *mempolicy; |
| 1385 | short il_next; | 1413 | short il_next; |
| @@ -1428,7 +1456,9 @@ struct task_struct { | |||
| 1428 | #ifdef CONFIG_TRACING | 1456 | #ifdef CONFIG_TRACING |
| 1429 | /* state flags for use by tracers */ | 1457 | /* state flags for use by tracers */ |
| 1430 | unsigned long trace; | 1458 | unsigned long trace; |
| 1431 | #endif | 1459 | /* bitmask of trace recursion */ |
| 1460 | unsigned long trace_recursion; | ||
| 1461 | #endif /* CONFIG_TRACING */ | ||
| 1432 | }; | 1462 | }; |
| 1433 | 1463 | ||
| 1434 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1464 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
| @@ -1885,6 +1915,7 @@ extern void sched_dead(struct task_struct *p); | |||
| 1885 | 1915 | ||
| 1886 | extern void proc_caches_init(void); | 1916 | extern void proc_caches_init(void); |
| 1887 | extern void flush_signals(struct task_struct *); | 1917 | extern void flush_signals(struct task_struct *); |
| 1918 | extern void __flush_signals(struct task_struct *); | ||
| 1888 | extern void ignore_signals(struct task_struct *); | 1919 | extern void ignore_signals(struct task_struct *); |
| 1889 | extern void flush_signal_handlers(struct task_struct *, int force_default); | 1920 | extern void flush_signal_handlers(struct task_struct *, int force_default); |
| 1890 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); | 1921 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
| @@ -2001,8 +2032,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); | |||
| 2001 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2032 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
| 2002 | 2033 | ||
| 2003 | #ifdef CONFIG_SMP | 2034 | #ifdef CONFIG_SMP |
| 2035 | extern void wait_task_context_switch(struct task_struct *p); | ||
| 2004 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 2036 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
| 2005 | #else | 2037 | #else |
| 2038 | static inline void wait_task_context_switch(struct task_struct *p) {} | ||
| 2006 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 2039 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
| 2007 | long match_state) | 2040 | long match_state) |
| 2008 | { | 2041 | { |
| @@ -2010,7 +2043,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, | |||
| 2010 | } | 2043 | } |
| 2011 | #endif | 2044 | #endif |
| 2012 | 2045 | ||
| 2013 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) | 2046 | #define next_task(p) \ |
| 2047 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) | ||
| 2014 | 2048 | ||
| 2015 | #define for_each_process(p) \ | 2049 | #define for_each_process(p) \ |
| 2016 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 2050 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
| @@ -2049,8 +2083,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2) | |||
| 2049 | 2083 | ||
| 2050 | static inline struct task_struct *next_thread(const struct task_struct *p) | 2084 | static inline struct task_struct *next_thread(const struct task_struct *p) |
| 2051 | { | 2085 | { |
| 2052 | return list_entry(rcu_dereference(p->thread_group.next), | 2086 | return list_entry_rcu(p->thread_group.next, |
| 2053 | struct task_struct, thread_group); | 2087 | struct task_struct, thread_group); |
| 2054 | } | 2088 | } |
| 2055 | 2089 | ||
| 2056 | static inline int thread_group_empty(struct task_struct *p) | 2090 | static inline int thread_group_empty(struct task_struct *p) |
| @@ -2388,6 +2422,13 @@ static inline void inc_syscw(struct task_struct *tsk) | |||
| 2388 | #define TASK_SIZE_OF(tsk) TASK_SIZE | 2422 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
| 2389 | #endif | 2423 | #endif |
| 2390 | 2424 | ||
| 2425 | /* | ||
| 2426 | * Call the function if the target task is executing on a CPU right now: | ||
| 2427 | */ | ||
| 2428 | extern void task_oncpu_function_call(struct task_struct *p, | ||
| 2429 | void (*func) (void *info), void *info); | ||
| 2430 | |||
| 2431 | |||
| 2391 | #ifdef CONFIG_MM_OWNER | 2432 | #ifdef CONFIG_MM_OWNER |
| 2392 | extern void mm_update_next_owner(struct mm_struct *mm); | 2433 | extern void mm_update_next_owner(struct mm_struct *mm); |
| 2393 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); | 2434 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); |
diff --git a/include/linux/security.h b/include/linux/security.h index d5fd6163606f..5eff459b3833 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -2197,6 +2197,8 @@ static inline int security_file_mmap(struct file *file, unsigned long reqprot, | |||
| 2197 | unsigned long addr, | 2197 | unsigned long addr, |
| 2198 | unsigned long addr_only) | 2198 | unsigned long addr_only) |
| 2199 | { | 2199 | { |
| 2200 | if ((addr < mmap_min_addr) && !capable(CAP_SYS_RAWIO)) | ||
| 2201 | return -EACCES; | ||
| 2200 | return 0; | 2202 | return 0; |
| 2201 | } | 2203 | } |
| 2202 | 2204 | ||
diff --git a/include/linux/serial.h b/include/linux/serial.h index 9136cc5608c3..e5bb75a63802 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h | |||
| @@ -96,54 +96,76 @@ struct serial_uart_config { | |||
| 96 | 96 | ||
| 97 | /* | 97 | /* |
| 98 | * Definitions for async_struct (and serial_struct) flags field | 98 | * Definitions for async_struct (and serial_struct) flags field |
| 99 | * | ||
| 100 | * Define ASYNCB_* for convenient use with {test,set,clear}_bit. | ||
| 99 | */ | 101 | */ |
| 100 | #define ASYNC_HUP_NOTIFY 0x0001 /* Notify getty on hangups and closes | 102 | #define ASYNCB_HUP_NOTIFY 0 /* Notify getty on hangups and closes |
| 101 | on the callout port */ | 103 | * on the callout port */ |
| 102 | #define ASYNC_FOURPORT 0x0002 /* Set OU1, OUT2 per AST Fourport settings */ | 104 | #define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */ |
| 103 | #define ASYNC_SAK 0x0004 /* Secure Attention Key (Orange book) */ | 105 | #define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */ |
| 104 | #define ASYNC_SPLIT_TERMIOS 0x0008 /* Separate termios for dialin/callout */ | 106 | #define ASYNCB_SPLIT_TERMIOS 3 /* Separate termios for dialin/callout */ |
| 105 | 107 | #define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */ | |
| 106 | #define ASYNC_SPD_MASK 0x1030 | 108 | #define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */ |
| 107 | #define ASYNC_SPD_HI 0x0010 /* Use 56000 instead of 38400 bps */ | 109 | #define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */ |
| 108 | 110 | #define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during | |
| 109 | #define ASYNC_SPD_VHI 0x0020 /* Use 115200 instead of 38400 bps */ | 111 | * autoconfiguration */ |
| 110 | #define ASYNC_SPD_CUST 0x0030 /* Use user-specified divisor */ | 112 | #define ASYNCB_SESSION_LOCKOUT 8 /* Lock out cua opens based on session */ |
| 111 | 113 | #define ASYNCB_PGRP_LOCKOUT 9 /* Lock out cua opens based on pgrp */ | |
| 112 | #define ASYNC_SKIP_TEST 0x0040 /* Skip UART test during autoconfiguration */ | 114 | #define ASYNCB_CALLOUT_NOHUP 10 /* Don't do hangups for cua device */ |
| 113 | #define ASYNC_AUTO_IRQ 0x0080 /* Do automatic IRQ during autoconfiguration */ | 115 | #define ASYNCB_HARDPPS_CD 11 /* Call hardpps when CD goes high */ |
| 114 | #define ASYNC_SESSION_LOCKOUT 0x0100 /* Lock out cua opens based on session */ | 116 | #define ASYNCB_SPD_SHI 12 /* Use 230400 instead of 38400 bps */ |
| 115 | #define ASYNC_PGRP_LOCKOUT 0x0200 /* Lock out cua opens based on pgrp */ | 117 | #define ASYNCB_LOW_LATENCY 13 /* Request low latency behaviour */ |
| 116 | #define ASYNC_CALLOUT_NOHUP 0x0400 /* Don't do hangups for cua device */ | 118 | #define ASYNCB_BUGGY_UART 14 /* This is a buggy UART, skip some safety |
| 117 | 119 | * checks. Note: can be dangerous! */ | |
| 118 | #define ASYNC_HARDPPS_CD 0x0800 /* Call hardpps when CD goes high */ | 120 | #define ASYNCB_AUTOPROBE 15 /* Port was autoprobed by PCI or PNP code */ |
| 119 | 121 | #define ASYNCB_LAST_USER 15 | |
| 120 | #define ASYNC_SPD_SHI 0x1000 /* Use 230400 instead of 38400 bps */ | 122 | |
| 121 | #define ASYNC_SPD_WARP 0x1010 /* Use 460800 instead of 38400 bps */ | 123 | /* Internal flags used only by kernel */ |
| 122 | 124 | #define ASYNCB_INITIALIZED 31 /* Serial port was initialized */ | |
| 123 | #define ASYNC_LOW_LATENCY 0x2000 /* Request low latency behaviour */ | 125 | #define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */ |
| 124 | 126 | #define ASYNCB_BOOT_AUTOCONF 28 /* Autoconfigure port on bootup */ | |
| 125 | #define ASYNC_BUGGY_UART 0x4000 /* This is a buggy UART, skip some safety | 127 | #define ASYNCB_CLOSING 27 /* Serial port is closing */ |
| 126 | * checks. Note: can be dangerous! */ | 128 | #define ASYNCB_CTS_FLOW 26 /* Do CTS flow control */ |
| 127 | 129 | #define ASYNCB_CHECK_CD 25 /* i.e., CLOCAL */ | |
| 128 | #define ASYNC_AUTOPROBE 0x8000 /* Port was autoprobed by PCI or PNP code */ | 130 | #define ASYNCB_SHARE_IRQ 24 /* for multifunction cards, no longer used */ |
| 129 | 131 | #define ASYNCB_CONS_FLOW 23 /* flow control for console */ | |
| 130 | #define ASYNC_FLAGS 0x7FFF /* Possible legal async flags */ | 132 | #define ASYNCB_BOOT_ONLYMCA 22 /* Probe only if MCA bus */ |
| 131 | #define ASYNC_USR_MASK 0x3430 /* Legal flags that non-privileged | 133 | #define ASYNCB_FIRST_KERNEL 22 |
| 132 | * users can set or reset */ | 134 | |
| 133 | 135 | #define ASYNC_HUP_NOTIFY (1U << ASYNCB_HUP_NOTIFY) | |
| 134 | /* Internal flags used only by kernel/chr_drv/serial.c */ | 136 | #define ASYNC_FOURPORT (1U << ASYNCB_FOURPORT) |
| 135 | #define ASYNC_INITIALIZED 0x80000000 /* Serial port was initialized */ | 137 | #define ASYNC_SAK (1U << ASYNCB_SAK) |
| 136 | #define ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device is active */ | 138 | #define ASYNC_SPLIT_TERMIOS (1U << ASYNCB_SPLIT_TERMIOS) |
| 137 | #define ASYNC_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */ | 139 | #define ASYNC_SPD_HI (1U << ASYNCB_SPD_HI) |
| 138 | #define ASYNC_CLOSING 0x08000000 /* Serial port is closing */ | 140 | #define ASYNC_SPD_VHI (1U << ASYNCB_SPD_VHI) |
| 139 | #define ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */ | 141 | #define ASYNC_SKIP_TEST (1U << ASYNCB_SKIP_TEST) |
| 140 | #define ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */ | 142 | #define ASYNC_AUTO_IRQ (1U << ASYNCB_AUTO_IRQ) |
| 141 | #define ASYNC_SHARE_IRQ 0x01000000 /* for multifunction cards | 143 | #define ASYNC_SESSION_LOCKOUT (1U << ASYNCB_SESSION_LOCKOUT) |
| 142 | --- no longer used */ | 144 | #define ASYNC_PGRP_LOCKOUT (1U << ASYNCB_PGRP_LOCKOUT) |
| 143 | #define ASYNC_CONS_FLOW 0x00800000 /* flow control for console */ | 145 | #define ASYNC_CALLOUT_NOHUP (1U << ASYNCB_CALLOUT_NOHUP) |
| 144 | 146 | #define ASYNC_HARDPPS_CD (1U << ASYNCB_HARDPPS_CD) | |
| 145 | #define ASYNC_BOOT_ONLYMCA 0x00400000 /* Probe only if MCA bus */ | 147 | #define ASYNC_SPD_SHI (1U << ASYNCB_SPD_SHI) |
| 146 | #define ASYNC_INTERNAL_FLAGS 0xFFC00000 /* Internal flags */ | 148 | #define ASYNC_LOW_LATENCY (1U << ASYNCB_LOW_LATENCY) |
| 149 | #define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART) | ||
| 150 | #define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE) | ||
| 151 | |||
| 152 | #define ASYNC_FLAGS ((1U << ASYNCB_LAST_USER) - 1) | ||
| 153 | #define ASYNC_USR_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI| \ | ||
| 154 | ASYNC_CALLOUT_NOHUP|ASYNC_SPD_SHI|ASYNC_LOW_LATENCY) | ||
| 155 | #define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI) | ||
| 156 | #define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI) | ||
| 157 | #define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI) | ||
| 158 | |||
| 159 | #define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED) | ||
| 160 | #define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE) | ||
| 161 | #define ASYNC_BOOT_AUTOCONF (1U << ASYNCB_BOOT_AUTOCONF) | ||
| 162 | #define ASYNC_CLOSING (1U << ASYNCB_CLOSING) | ||
| 163 | #define ASYNC_CTS_FLOW (1U << ASYNCB_CTS_FLOW) | ||
| 164 | #define ASYNC_CHECK_CD (1U << ASYNCB_CHECK_CD) | ||
| 165 | #define ASYNC_SHARE_IRQ (1U << ASYNCB_SHARE_IRQ) | ||
| 166 | #define ASYNC_CONS_FLOW (1U << ASYNCB_CONS_FLOW) | ||
| 167 | #define ASYNC_BOOT_ONLYMCA (1U << ASYNCB_BOOT_ONLYMCA) | ||
| 168 | #define ASYNC_INTERNAL_FLAGS (~((1U << ASYNCB_FIRST_KERNEL) - 1)) | ||
| 147 | 169 | ||
| 148 | /* | 170 | /* |
| 149 | * Multiport serial configuration structure --- external structure | 171 | * Multiport serial configuration structure --- external structure |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 57a97e52e58d..6fd80c4243f1 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
| @@ -41,7 +41,8 @@ | |||
| 41 | #define PORT_XSCALE 15 | 41 | #define PORT_XSCALE 15 |
| 42 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ | 42 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ |
| 43 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ | 43 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ |
| 44 | #define PORT_MAX_8250 17 /* max port ID */ | 44 | #define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ |
| 45 | #define PORT_MAX_8250 18 /* max port ID */ | ||
| 45 | 46 | ||
| 46 | /* | 47 | /* |
| 47 | * ARM specific type numbers. These are not currently guaranteed | 48 | * ARM specific type numbers. These are not currently guaranteed |
| @@ -167,6 +168,9 @@ | |||
| 167 | /* MAX3100 */ | 168 | /* MAX3100 */ |
| 168 | #define PORT_MAX3100 86 | 169 | #define PORT_MAX3100 86 |
| 169 | 170 | ||
| 171 | /* Timberdale UART */ | ||
| 172 | #define PORT_TIMBUART 87 | ||
| 173 | |||
| 170 | #ifdef __KERNEL__ | 174 | #ifdef __KERNEL__ |
| 171 | 175 | ||
| 172 | #include <linux/compiler.h> | 176 | #include <linux/compiler.h> |
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 893cc53486bc..1c297ddc9d5a 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h | |||
| @@ -25,8 +25,7 @@ struct plat_sci_port { | |||
| 25 | unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */ | 25 | unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */ |
| 26 | unsigned int type; /* SCI / SCIF / IRDA */ | 26 | unsigned int type; /* SCI / SCIF / IRDA */ |
| 27 | upf_t flags; /* UPF_* flags */ | 27 | upf_t flags; /* UPF_* flags */ |
| 28 | char *clk; /* clock string */ | ||
| 28 | }; | 29 | }; |
| 29 | 30 | ||
| 30 | int early_sci_setup(struct uart_port *port); | ||
| 31 | |||
| 32 | #endif /* __LINUX_SERIAL_SCI_H */ | 31 | #endif /* __LINUX_SERIAL_SCI_H */ |
diff --git a/include/linux/sh_cmt.h b/include/linux/sh_cmt.h deleted file mode 100644 index 68cacde5954f..000000000000 --- a/include/linux/sh_cmt.h +++ /dev/null | |||
| @@ -1,13 +0,0 @@ | |||
| 1 | #ifndef __SH_CMT_H__ | ||
| 2 | #define __SH_CMT_H__ | ||
| 3 | |||
| 4 | struct sh_cmt_config { | ||
| 5 | char *name; | ||
| 6 | unsigned long channel_offset; | ||
| 7 | int timer_bit; | ||
| 8 | char *clk; | ||
| 9 | unsigned long clockevent_rating; | ||
| 10 | unsigned long clocksource_rating; | ||
| 11 | }; | ||
| 12 | |||
| 13 | #endif /* __SH_CMT_H__ */ | ||
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h new file mode 100644 index 000000000000..864bd56bd3b0 --- /dev/null +++ b/include/linux/sh_timer.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | #ifndef __SH_TIMER_H__ | ||
| 2 | #define __SH_TIMER_H__ | ||
| 3 | |||
| 4 | struct sh_timer_config { | ||
| 5 | char *name; | ||
| 6 | long channel_offset; | ||
| 7 | int timer_bit; | ||
| 8 | char *clk; | ||
| 9 | unsigned long clockevent_rating; | ||
| 10 | unsigned long clocksource_rating; | ||
| 11 | }; | ||
| 12 | |||
| 13 | #endif /* __SH_TIMER_H__ */ | ||
diff --git a/include/linux/signal.h b/include/linux/signal.h index 84f997f8aa53..c7552836bd95 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
| @@ -235,6 +235,8 @@ static inline int valid_signal(unsigned long sig) | |||
| 235 | extern int next_signal(struct sigpending *pending, sigset_t *mask); | 235 | extern int next_signal(struct sigpending *pending, sigset_t *mask); |
| 236 | extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); | 236 | extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); |
| 237 | extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); | 237 | extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); |
| 238 | extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, | ||
| 239 | siginfo_t *info); | ||
| 238 | extern long do_sigpending(void __user *, unsigned long); | 240 | extern long do_sigpending(void __user *, unsigned long); |
| 239 | extern int sigprocmask(int, sigset_t *, sigset_t *); | 241 | extern int sigprocmask(int, sigset_t *, sigset_t *); |
| 240 | extern int show_unhandled_signals; | 242 | extern int show_unhandled_signals; |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 24c5602bee99..48803064cedf 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -62,6 +62,8 @@ | |||
| 62 | # define SLAB_DEBUG_OBJECTS 0x00000000UL | 62 | # define SLAB_DEBUG_OBJECTS 0x00000000UL |
| 63 | #endif | 63 | #endif |
| 64 | 64 | ||
| 65 | #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ | ||
| 66 | |||
| 65 | /* The following flags affect the page allocator grouping pages by mobility */ | 67 | /* The following flags affect the page allocator grouping pages by mobility */ |
| 66 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | 68 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ |
| 67 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ | 69 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 5ac9b0bcaf9a..713f841ecaa9 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
| 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
| 16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
| 17 | #include <trace/kmemtrace.h> | 17 | #include <linux/kmemtrace.h> |
| 18 | 18 | ||
| 19 | /* Size description struct for general caches. */ | 19 | /* Size description struct for general caches. */ |
| 20 | struct cache_sizes { | 20 | struct cache_sizes { |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5046f90c1171..be5d40c43bd2 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
| 11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
| 12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
| 13 | #include <trace/kmemtrace.h> | 13 | #include <linux/kmemtrace.h> |
| 14 | 14 | ||
| 15 | enum stat_item { | 15 | enum stat_item { |
| 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 938234c4a996..d4841ed8215b 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
| @@ -60,6 +60,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
| 60 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) | 60 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) |
| 61 | /* for sched.c and kernel_lock.c: */ | 61 | /* for sched.c and kernel_lock.c: */ |
| 62 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) | 62 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) |
| 63 | # define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) | ||
| 63 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) | 64 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) |
| 64 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) | 65 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) |
| 65 | #endif /* DEBUG_SPINLOCK */ | 66 | #endif /* DEBUG_SPINLOCK */ |
diff --git a/include/linux/splice.h b/include/linux/splice.h index 5f3faa9d15ae..18e7c7c0cae6 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h | |||
| @@ -11,8 +11,7 @@ | |||
| 11 | #include <linux/pipe_fs_i.h> | 11 | #include <linux/pipe_fs_i.h> |
| 12 | 12 | ||
| 13 | /* | 13 | /* |
| 14 | * splice is tied to pipes as a transport (at least for now), so we'll just | 14 | * Flags passed in from splice/tee/vmsplice |
| 15 | * add the splice flags here. | ||
| 16 | */ | 15 | */ |
| 17 | #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ | 16 | #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ |
| 18 | #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ | 17 | #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index ac9ff54f7cb3..cb1a6631b8f4 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
| @@ -29,7 +29,8 @@ extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); | |||
| 29 | 29 | ||
| 30 | extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, | 30 | extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, |
| 31 | phys_addr_t address); | 31 | phys_addr_t address); |
| 32 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); | 32 | extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, |
| 33 | dma_addr_t address); | ||
| 33 | 34 | ||
| 34 | extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size); | 35 | extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size); |
| 35 | 36 | ||
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 30520844b8da..c6c84ad8bd71 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -55,6 +55,7 @@ struct compat_timeval; | |||
| 55 | struct robust_list_head; | 55 | struct robust_list_head; |
| 56 | struct getcpu_cache; | 56 | struct getcpu_cache; |
| 57 | struct old_linux_dirent; | 57 | struct old_linux_dirent; |
| 58 | struct perf_counter_attr; | ||
| 58 | 59 | ||
| 59 | #include <linux/types.h> | 60 | #include <linux/types.h> |
| 60 | #include <linux/aio_abi.h> | 61 | #include <linux/aio_abi.h> |
| @@ -755,4 +756,8 @@ asmlinkage long sys_pipe(int __user *); | |||
| 755 | 756 | ||
| 756 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); | 757 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); |
| 757 | 758 | ||
| 759 | |||
| 760 | asmlinkage long sys_perf_counter_open( | ||
| 761 | const struct perf_counter_attr __user *attr_uptr, | ||
| 762 | pid_t pid, int cpu, int group_fd, unsigned long flags); | ||
| 758 | #endif | 763 | #endif |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index e6b820f8b56b..a8cc4e13434c 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
| @@ -21,13 +21,14 @@ struct restart_block { | |||
| 21 | struct { | 21 | struct { |
| 22 | unsigned long arg0, arg1, arg2, arg3; | 22 | unsigned long arg0, arg1, arg2, arg3; |
| 23 | }; | 23 | }; |
| 24 | /* For futex_wait */ | 24 | /* For futex_wait and futex_wait_requeue_pi */ |
| 25 | struct { | 25 | struct { |
| 26 | u32 *uaddr; | 26 | u32 *uaddr; |
| 27 | u32 val; | 27 | u32 val; |
| 28 | u32 flags; | 28 | u32 flags; |
| 29 | u32 bitset; | 29 | u32 bitset; |
| 30 | u64 time; | 30 | u64 time; |
| 31 | u32 *uaddr2; | ||
| 31 | } futex; | 32 | } futex; |
| 32 | /* For nanosleep */ | 33 | /* For nanosleep */ |
| 33 | struct { | 34 | struct { |
diff --git a/include/linux/time.h b/include/linux/time.h index 242f62499bb7..ea16c1a01d51 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
| @@ -113,6 +113,21 @@ struct timespec current_kernel_time(void); | |||
| 113 | #define CURRENT_TIME (current_kernel_time()) | 113 | #define CURRENT_TIME (current_kernel_time()) |
| 114 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) | 114 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) |
| 115 | 115 | ||
| 116 | /* Some architectures do not supply their own clocksource. | ||
| 117 | * This is mainly the case in architectures that get their | ||
| 118 | * inter-tick times by reading the counter on their interval | ||
| 119 | * timer. Since these timers wrap every tick, they're not really | ||
| 120 | * useful as clocksources. Wrapping them to act like one is possible | ||
| 121 | * but not very efficient. So we provide a callout these arches | ||
| 122 | * can implement for use with the jiffies clocksource to provide | ||
| 123 | * finer then tick granular time. | ||
| 124 | */ | ||
| 125 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | ||
| 126 | extern u32 arch_gettimeoffset(void); | ||
| 127 | #else | ||
| 128 | static inline u32 arch_gettimeoffset(void) { return 0; } | ||
| 129 | #endif | ||
| 130 | |||
| 116 | extern void do_gettimeofday(struct timeval *tv); | 131 | extern void do_gettimeofday(struct timeval *tv); |
| 117 | extern int do_settimeofday(struct timespec *tv); | 132 | extern int do_settimeofday(struct timespec *tv); |
| 118 | extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); | 133 | extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); |
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h new file mode 100644 index 000000000000..c68bccba2074 --- /dev/null +++ b/include/linux/trace_seq.h | |||
| @@ -0,0 +1,92 @@ | |||
| 1 | #ifndef _LINUX_TRACE_SEQ_H | ||
| 2 | #define _LINUX_TRACE_SEQ_H | ||
| 3 | |||
| 4 | #include <linux/fs.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * Trace sequences are used to allow a function to call several other functions | ||
| 8 | * to create a string of data to use (up to a max of PAGE_SIZE. | ||
| 9 | */ | ||
| 10 | |||
| 11 | struct trace_seq { | ||
| 12 | unsigned char buffer[PAGE_SIZE]; | ||
| 13 | unsigned int len; | ||
| 14 | unsigned int readpos; | ||
| 15 | }; | ||
| 16 | |||
| 17 | static inline void | ||
| 18 | trace_seq_init(struct trace_seq *s) | ||
| 19 | { | ||
| 20 | s->len = 0; | ||
| 21 | s->readpos = 0; | ||
| 22 | } | ||
| 23 | |||
| 24 | /* | ||
| 25 | * Currently only defined when tracing is enabled. | ||
| 26 | */ | ||
| 27 | #ifdef CONFIG_TRACING | ||
| 28 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
| 29 | __attribute__ ((format (printf, 2, 3))); | ||
| 30 | extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) | ||
| 31 | __attribute__ ((format (printf, 2, 0))); | ||
| 32 | extern int | ||
| 33 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); | ||
| 34 | extern void trace_print_seq(struct seq_file *m, struct trace_seq *s); | ||
| 35 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | ||
| 36 | size_t cnt); | ||
| 37 | extern int trace_seq_puts(struct trace_seq *s, const char *str); | ||
| 38 | extern int trace_seq_putc(struct trace_seq *s, unsigned char c); | ||
| 39 | extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len); | ||
| 40 | extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | ||
| 41 | size_t len); | ||
| 42 | extern void *trace_seq_reserve(struct trace_seq *s, size_t len); | ||
| 43 | extern int trace_seq_path(struct trace_seq *s, struct path *path); | ||
| 44 | |||
| 45 | #else /* CONFIG_TRACING */ | ||
| 46 | static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
| 47 | { | ||
| 48 | return 0; | ||
| 49 | } | ||
| 50 | static inline int | ||
| 51 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | ||
| 52 | { | ||
| 53 | return 0; | ||
| 54 | } | ||
| 55 | |||
| 56 | static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
| 57 | { | ||
| 58 | } | ||
| 59 | static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | ||
| 60 | size_t cnt) | ||
| 61 | { | ||
| 62 | return 0; | ||
| 63 | } | ||
| 64 | static inline int trace_seq_puts(struct trace_seq *s, const char *str) | ||
| 65 | { | ||
| 66 | return 0; | ||
| 67 | } | ||
| 68 | static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) | ||
| 69 | { | ||
| 70 | return 0; | ||
| 71 | } | ||
| 72 | static inline int | ||
| 73 | trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) | ||
| 74 | { | ||
| 75 | return 0; | ||
| 76 | } | ||
| 77 | static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | ||
| 78 | size_t len) | ||
| 79 | { | ||
| 80 | return 0; | ||
| 81 | } | ||
| 82 | static inline void *trace_seq_reserve(struct trace_seq *s, size_t len) | ||
| 83 | { | ||
| 84 | return NULL; | ||
| 85 | } | ||
| 86 | static inline int trace_seq_path(struct trace_seq *s, struct path *path) | ||
| 87 | { | ||
| 88 | return 0; | ||
| 89 | } | ||
| 90 | #endif /* CONFIG_TRACING */ | ||
| 91 | |||
| 92 | #endif /* _LINUX_TRACE_SEQ_H */ | ||
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index c7aa154f4bfc..eb96603d92db 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
| @@ -259,14 +259,12 @@ static inline void tracehook_finish_clone(struct task_struct *child, | |||
| 259 | 259 | ||
| 260 | /** | 260 | /** |
| 261 | * tracehook_report_clone - in parent, new child is about to start running | 261 | * tracehook_report_clone - in parent, new child is about to start running |
| 262 | * @trace: return value from tracehook_prepare_clone() | ||
| 263 | * @regs: parent's user register state | 262 | * @regs: parent's user register state |
| 264 | * @clone_flags: flags from parent's system call | 263 | * @clone_flags: flags from parent's system call |
| 265 | * @pid: new child's PID in the parent's namespace | 264 | * @pid: new child's PID in the parent's namespace |
| 266 | * @child: new child task | 265 | * @child: new child task |
| 267 | * | 266 | * |
| 268 | * Called after a child is set up, but before it has been started | 267 | * Called after a child is set up, but before it has been started running. |
| 269 | * running. @trace is the value returned by tracehook_prepare_clone(). | ||
| 270 | * This is not a good place to block, because the child has not started | 268 | * This is not a good place to block, because the child has not started |
| 271 | * yet. Suspend the child here if desired, and then block in | 269 | * yet. Suspend the child here if desired, and then block in |
| 272 | * tracehook_report_clone_complete(). This must prevent the child from | 270 | * tracehook_report_clone_complete(). This must prevent the child from |
| @@ -276,13 +274,14 @@ static inline void tracehook_finish_clone(struct task_struct *child, | |||
| 276 | * | 274 | * |
| 277 | * Called with no locks held, but the child cannot run until this returns. | 275 | * Called with no locks held, but the child cannot run until this returns. |
| 278 | */ | 276 | */ |
| 279 | static inline void tracehook_report_clone(int trace, struct pt_regs *regs, | 277 | static inline void tracehook_report_clone(struct pt_regs *regs, |
| 280 | unsigned long clone_flags, | 278 | unsigned long clone_flags, |
| 281 | pid_t pid, struct task_struct *child) | 279 | pid_t pid, struct task_struct *child) |
| 282 | { | 280 | { |
| 283 | if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) { | 281 | if (unlikely(task_ptrace(child))) { |
| 284 | /* | 282 | /* |
| 285 | * The child starts up with an immediate SIGSTOP. | 283 | * It doesn't matter who attached/attaching to this |
| 284 | * task, the pending SIGSTOP is right in any case. | ||
| 286 | */ | 285 | */ |
| 287 | sigaddset(&child->pending.signal, SIGSTOP); | 286 | sigaddset(&child->pending.signal, SIGSTOP); |
| 288 | set_tsk_thread_flag(child, TIF_SIGPENDING); | 287 | set_tsk_thread_flag(child, TIF_SIGPENDING); |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index d35a7ee7611f..14df7e635d43 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
| @@ -31,6 +31,8 @@ struct tracepoint { | |||
| 31 | * Keep in sync with vmlinux.lds.h. | 31 | * Keep in sync with vmlinux.lds.h. |
| 32 | */ | 32 | */ |
| 33 | 33 | ||
| 34 | #ifndef DECLARE_TRACE | ||
| 35 | |||
| 34 | #define TP_PROTO(args...) args | 36 | #define TP_PROTO(args...) args |
| 35 | #define TP_ARGS(args...) args | 37 | #define TP_ARGS(args...) args |
| 36 | 38 | ||
| @@ -114,6 +116,7 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, | |||
| 114 | struct tracepoint *end) | 116 | struct tracepoint *end) |
| 115 | { } | 117 | { } |
| 116 | #endif /* CONFIG_TRACEPOINTS */ | 118 | #endif /* CONFIG_TRACEPOINTS */ |
| 119 | #endif /* DECLARE_TRACE */ | ||
| 117 | 120 | ||
| 118 | /* | 121 | /* |
| 119 | * Connect a probe to a tracepoint. | 122 | * Connect a probe to a tracepoint. |
| @@ -154,10 +157,8 @@ static inline void tracepoint_synchronize_unregister(void) | |||
| 154 | } | 157 | } |
| 155 | 158 | ||
| 156 | #define PARAMS(args...) args | 159 | #define PARAMS(args...) args |
| 157 | #define TRACE_FORMAT(name, proto, args, fmt) \ | ||
| 158 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | ||
| 159 | |||
| 160 | 160 | ||
| 161 | #ifndef TRACE_EVENT | ||
| 161 | /* | 162 | /* |
| 162 | * For use with the TRACE_EVENT macro: | 163 | * For use with the TRACE_EVENT macro: |
| 163 | * | 164 | * |
| @@ -262,5 +263,6 @@ static inline void tracepoint_synchronize_unregister(void) | |||
| 262 | 263 | ||
| 263 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ | 264 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ |
| 264 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | 265 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) |
| 266 | #endif | ||
| 265 | 267 | ||
| 266 | #endif | 268 | #endif |
diff --git a/include/linux/tty.h b/include/linux/tty.h index fc39db95499f..1488d8c81aac 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
| @@ -185,7 +185,7 @@ struct tty_port; | |||
| 185 | struct tty_port_operations { | 185 | struct tty_port_operations { |
| 186 | /* Return 1 if the carrier is raised */ | 186 | /* Return 1 if the carrier is raised */ |
| 187 | int (*carrier_raised)(struct tty_port *port); | 187 | int (*carrier_raised)(struct tty_port *port); |
| 188 | void (*raise_dtr_rts)(struct tty_port *port); | 188 | void (*dtr_rts)(struct tty_port *port, int raise); |
| 189 | }; | 189 | }; |
| 190 | 190 | ||
| 191 | struct tty_port { | 191 | struct tty_port { |
| @@ -201,6 +201,9 @@ struct tty_port { | |||
| 201 | unsigned char *xmit_buf; /* Optional buffer */ | 201 | unsigned char *xmit_buf; /* Optional buffer */ |
| 202 | int close_delay; /* Close port delay */ | 202 | int close_delay; /* Close port delay */ |
| 203 | int closing_wait; /* Delay for output */ | 203 | int closing_wait; /* Delay for output */ |
| 204 | int drain_delay; /* Set to zero if no pure time | ||
| 205 | based drain is needed else | ||
| 206 | set to size of fifo */ | ||
| 204 | }; | 207 | }; |
| 205 | 208 | ||
| 206 | /* | 209 | /* |
| @@ -223,8 +226,11 @@ struct tty_struct { | |||
| 223 | struct tty_driver *driver; | 226 | struct tty_driver *driver; |
| 224 | const struct tty_operations *ops; | 227 | const struct tty_operations *ops; |
| 225 | int index; | 228 | int index; |
| 226 | /* The ldisc objects are protected by tty_ldisc_lock at the moment */ | 229 | |
| 227 | struct tty_ldisc ldisc; | 230 | /* Protects ldisc changes: Lock tty not pty */ |
| 231 | struct mutex ldisc_mutex; | ||
| 232 | struct tty_ldisc *ldisc; | ||
| 233 | |||
| 228 | struct mutex termios_mutex; | 234 | struct mutex termios_mutex; |
| 229 | spinlock_t ctrl_lock; | 235 | spinlock_t ctrl_lock; |
| 230 | /* Termios values are protected by the termios mutex */ | 236 | /* Termios values are protected by the termios mutex */ |
| @@ -311,6 +317,7 @@ struct tty_struct { | |||
| 311 | #define TTY_CLOSING 7 /* ->close() in progress */ | 317 | #define TTY_CLOSING 7 /* ->close() in progress */ |
| 312 | #define TTY_LDISC 9 /* Line discipline attached */ | 318 | #define TTY_LDISC 9 /* Line discipline attached */ |
| 313 | #define TTY_LDISC_CHANGING 10 /* Line discipline changing */ | 319 | #define TTY_LDISC_CHANGING 10 /* Line discipline changing */ |
| 320 | #define TTY_LDISC_OPEN 11 /* Line discipline is open */ | ||
| 314 | #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ | 321 | #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ |
| 315 | #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ | 322 | #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ |
| 316 | #define TTY_PTY_LOCK 16 /* pty private */ | 323 | #define TTY_PTY_LOCK 16 /* pty private */ |
| @@ -403,6 +410,7 @@ extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); | |||
| 403 | extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); | 410 | extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); |
| 404 | extern void tty_ldisc_deref(struct tty_ldisc *); | 411 | extern void tty_ldisc_deref(struct tty_ldisc *); |
| 405 | extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); | 412 | extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); |
| 413 | extern void tty_ldisc_hangup(struct tty_struct *tty); | ||
| 406 | extern const struct file_operations tty_ldiscs_proc_fops; | 414 | extern const struct file_operations tty_ldiscs_proc_fops; |
| 407 | 415 | ||
| 408 | extern void tty_wakeup(struct tty_struct *tty); | 416 | extern void tty_wakeup(struct tty_struct *tty); |
| @@ -425,6 +433,9 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx, | |||
| 425 | extern void tty_release_dev(struct file *filp); | 433 | extern void tty_release_dev(struct file *filp); |
| 426 | extern int tty_init_termios(struct tty_struct *tty); | 434 | extern int tty_init_termios(struct tty_struct *tty); |
| 427 | 435 | ||
| 436 | extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); | ||
| 437 | extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); | ||
| 438 | |||
| 428 | extern struct mutex tty_mutex; | 439 | extern struct mutex tty_mutex; |
| 429 | 440 | ||
| 430 | extern void tty_write_unlock(struct tty_struct *tty); | 441 | extern void tty_write_unlock(struct tty_struct *tty); |
| @@ -438,6 +449,7 @@ extern struct tty_struct *tty_port_tty_get(struct tty_port *port); | |||
| 438 | extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); | 449 | extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); |
| 439 | extern int tty_port_carrier_raised(struct tty_port *port); | 450 | extern int tty_port_carrier_raised(struct tty_port *port); |
| 440 | extern void tty_port_raise_dtr_rts(struct tty_port *port); | 451 | extern void tty_port_raise_dtr_rts(struct tty_port *port); |
| 452 | extern void tty_port_lower_dtr_rts(struct tty_port *port); | ||
| 441 | extern void tty_port_hangup(struct tty_port *port); | 453 | extern void tty_port_hangup(struct tty_port *port); |
| 442 | extern int tty_port_block_til_ready(struct tty_port *port, | 454 | extern int tty_port_block_til_ready(struct tty_port *port, |
| 443 | struct tty_struct *tty, struct file *filp); | 455 | struct tty_struct *tty, struct file *filp); |
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index bcba84ea2d86..3566129384a4 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h | |||
| @@ -127,7 +127,8 @@ | |||
| 127 | * the line discipline are close to full, and it should somehow | 127 | * the line discipline are close to full, and it should somehow |
| 128 | * signal that no more characters should be sent to the tty. | 128 | * signal that no more characters should be sent to the tty. |
| 129 | * | 129 | * |
| 130 | * Optional: Always invoke via tty_throttle(); | 130 | * Optional: Always invoke via tty_throttle(), called under the |
| 131 | * termios lock. | ||
| 131 | * | 132 | * |
| 132 | * void (*unthrottle)(struct tty_struct * tty); | 133 | * void (*unthrottle)(struct tty_struct * tty); |
| 133 | * | 134 | * |
| @@ -135,7 +136,8 @@ | |||
| 135 | * that characters can now be sent to the tty without fear of | 136 | * that characters can now be sent to the tty without fear of |
| 136 | * overrunning the input buffers of the line disciplines. | 137 | * overrunning the input buffers of the line disciplines. |
| 137 | * | 138 | * |
| 138 | * Optional: Always invoke via tty_unthrottle(); | 139 | * Optional: Always invoke via tty_unthrottle(), called under the |
| 140 | * termios lock. | ||
| 139 | * | 141 | * |
| 140 | * void (*stop)(struct tty_struct *tty); | 142 | * void (*stop)(struct tty_struct *tty); |
| 141 | * | 143 | * |
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 625e9e4639c6..8cdfed738fe4 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
| @@ -224,8 +224,7 @@ struct usb_serial_driver { | |||
| 224 | /* Called by console with tty = NULL and by tty */ | 224 | /* Called by console with tty = NULL and by tty */ |
| 225 | int (*open)(struct tty_struct *tty, | 225 | int (*open)(struct tty_struct *tty, |
| 226 | struct usb_serial_port *port, struct file *filp); | 226 | struct usb_serial_port *port, struct file *filp); |
| 227 | void (*close)(struct tty_struct *tty, | 227 | void (*close)(struct usb_serial_port *port); |
| 228 | struct usb_serial_port *port, struct file *filp); | ||
| 229 | int (*write)(struct tty_struct *tty, struct usb_serial_port *port, | 228 | int (*write)(struct tty_struct *tty, struct usb_serial_port *port, |
| 230 | const unsigned char *buf, int count); | 229 | const unsigned char *buf, int count); |
| 231 | /* Called only by the tty layer */ | 230 | /* Called only by the tty layer */ |
| @@ -241,6 +240,10 @@ struct usb_serial_driver { | |||
| 241 | int (*tiocmget)(struct tty_struct *tty, struct file *file); | 240 | int (*tiocmget)(struct tty_struct *tty, struct file *file); |
| 242 | int (*tiocmset)(struct tty_struct *tty, struct file *file, | 241 | int (*tiocmset)(struct tty_struct *tty, struct file *file, |
| 243 | unsigned int set, unsigned int clear); | 242 | unsigned int set, unsigned int clear); |
| 243 | /* Called by the tty layer for port level work. There may or may not | ||
| 244 | be an attached tty at this point */ | ||
| 245 | void (*dtr_rts)(struct usb_serial_port *port, int on); | ||
| 246 | int (*carrier_raised)(struct usb_serial_port *port); | ||
| 244 | /* USB events */ | 247 | /* USB events */ |
| 245 | void (*read_int_callback)(struct urb *urb); | 248 | void (*read_int_callback)(struct urb *urb); |
| 246 | void (*write_int_callback)(struct urb *urb); | 249 | void (*write_int_callback)(struct urb *urb); |
| @@ -283,8 +286,7 @@ extern int usb_serial_generic_open(struct tty_struct *tty, | |||
| 283 | struct usb_serial_port *port, struct file *filp); | 286 | struct usb_serial_port *port, struct file *filp); |
| 284 | extern int usb_serial_generic_write(struct tty_struct *tty, | 287 | extern int usb_serial_generic_write(struct tty_struct *tty, |
| 285 | struct usb_serial_port *port, const unsigned char *buf, int count); | 288 | struct usb_serial_port *port, const unsigned char *buf, int count); |
| 286 | extern void usb_serial_generic_close(struct tty_struct *tty, | 289 | extern void usb_serial_generic_close(struct usb_serial_port *port); |
| 287 | struct usb_serial_port *port, struct file *filp); | ||
| 288 | extern int usb_serial_generic_resume(struct usb_serial *serial); | 290 | extern int usb_serial_generic_resume(struct usb_serial *serial); |
| 289 | extern int usb_serial_generic_write_room(struct tty_struct *tty); | 291 | extern int usb_serial_generic_write_room(struct tty_struct *tty); |
| 290 | extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); | 292 | extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); |
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index 94c56d29869d..be7d255fc7cf 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h | |||
| @@ -15,6 +15,10 @@ | |||
| 15 | #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ | 15 | #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ |
| 16 | #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ | 16 | #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ |
| 17 | #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ | 17 | #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ |
| 18 | #define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ | ||
| 19 | #define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */ | ||
| 20 | |||
| 21 | #define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ | ||
| 18 | 22 | ||
| 19 | struct virtio_blk_config | 23 | struct virtio_blk_config |
| 20 | { | 24 | { |
| @@ -32,6 +36,7 @@ struct virtio_blk_config | |||
| 32 | } geometry; | 36 | } geometry; |
| 33 | /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ | 37 | /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ |
| 34 | __u32 blk_size; | 38 | __u32 blk_size; |
| 39 | __u8 identify[VIRTIO_BLK_ID_BYTES]; | ||
| 35 | } __attribute__((packed)); | 40 | } __attribute__((packed)); |
| 36 | 41 | ||
| 37 | /* These two define direction. */ | 42 | /* These two define direction. */ |
| @@ -55,6 +60,13 @@ struct virtio_blk_outhdr | |||
| 55 | __u64 sector; | 60 | __u64 sector; |
| 56 | }; | 61 | }; |
| 57 | 62 | ||
| 63 | struct virtio_scsi_inhdr { | ||
| 64 | __u32 errors; | ||
| 65 | __u32 data_len; | ||
| 66 | __u32 sense_len; | ||
| 67 | __u32 residual; | ||
| 68 | }; | ||
| 69 | |||
| 58 | /* And this is the final byte of the write scatter-gather list. */ | 70 | /* And this is the final byte of the write scatter-gather list. */ |
| 59 | #define VIRTIO_BLK_S_OK 0 | 71 | #define VIRTIO_BLK_S_OK 0 |
| 60 | #define VIRTIO_BLK_S_IOERR 1 | 72 | #define VIRTIO_BLK_S_IOERR 1 |
diff --git a/include/linux/wait.h b/include/linux/wait.h index bc024632f365..6788e1a4d4ca 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -132,8 +132,6 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, | |||
| 132 | list_del(&old->task_list); | 132 | list_del(&old->task_list); |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | ||
| 136 | int nr_exclusive, int sync, void *key); | ||
| 137 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 135 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
| 138 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); | 136 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
| 139 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, | 137 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, |
