diff options
Diffstat (limited to 'include')
153 files changed, 4973 insertions, 1310 deletions
diff --git a/include/Kbuild b/include/Kbuild index d8c3e3cbf416..fe36accd4328 100644 --- a/include/Kbuild +++ b/include/Kbuild | |||
@@ -8,3 +8,4 @@ header-y += mtd/ | |||
8 | header-y += rdma/ | 8 | header-y += rdma/ |
9 | header-y += video/ | 9 | header-y += video/ |
10 | header-y += drm/ | 10 | header-y += drm/ |
11 | header-y += xen/ | ||
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index 3673a13b6703..81d3be459efb 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -134,7 +134,7 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u) | |||
134 | #define atomic_long_cmpxchg(l, old, new) \ | 134 | #define atomic_long_cmpxchg(l, old, new) \ |
135 | (atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) | 135 | (atomic64_cmpxchg((atomic64_t *)(l), (old), (new))) |
136 | #define atomic_long_xchg(v, new) \ | 136 | #define atomic_long_xchg(v, new) \ |
137 | (atomic64_xchg((atomic64_t *)(l), (new))) | 137 | (atomic64_xchg((atomic64_t *)(v), (new))) |
138 | 138 | ||
139 | #else /* BITS_PER_LONG == 64 */ | 139 | #else /* BITS_PER_LONG == 64 */ |
140 | 140 | ||
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 8e6d0ca70aba..e410f602cab1 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -280,17 +280,18 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, | |||
280 | #endif | 280 | #endif |
281 | 281 | ||
282 | /* | 282 | /* |
283 | * A facility to provide batching of the reload of page tables with the | 283 | * A facility to provide batching of the reload of page tables and |
284 | * actual context switch code for paravirtualized guests. By convention, | 284 | * other process state with the actual context switch code for |
285 | * only one of the lazy modes (CPU, MMU) should be active at any given | 285 | * paravirtualized guests. By convention, only one of the batched |
286 | * time, entry should never be nested, and entry and exits should always | 286 | * update (lazy) modes (CPU, MMU) should be active at any given time, |
287 | * be paired. This is for sanity of maintaining and reasoning about the | 287 | * entry should never be nested, and entry and exits should always be |
288 | * kernel code. | 288 | * paired. This is for sanity of maintaining and reasoning about the |
289 | * kernel code. In this case, the exit (end of the context switch) is | ||
290 | * in architecture-specific code, and so doesn't need a generic | ||
291 | * definition. | ||
289 | */ | 292 | */ |
290 | #ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE | 293 | #ifndef __HAVE_ARCH_START_CONTEXT_SWITCH |
291 | #define arch_enter_lazy_cpu_mode() do {} while (0) | 294 | #define arch_start_context_switch(prev) do {} while (0) |
292 | #define arch_leave_lazy_cpu_mode() do {} while (0) | ||
293 | #define arch_flush_lazy_cpu_mode() do {} while (0) | ||
294 | #endif | 295 | #endif |
295 | 296 | ||
296 | #ifndef __HAVE_PFNMAP_TRACKING | 297 | #ifndef __HAVE_PFNMAP_TRACKING |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 89853bcd27a6..f1736ca7922c 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -63,7 +63,7 @@ | |||
63 | #define BRANCH_PROFILE() | 63 | #define BRANCH_PROFILE() |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #ifdef CONFIG_EVENT_TRACER | 66 | #ifdef CONFIG_EVENT_TRACING |
67 | #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ | 67 | #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \ |
68 | *(_ftrace_events) \ | 68 | *(_ftrace_events) \ |
69 | VMLINUX_SYMBOL(__stop_ftrace_events) = .; | 69 | VMLINUX_SYMBOL(__stop_ftrace_events) = .; |
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 3c1924c010e8..7300fb866767 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -471,6 +471,9 @@ struct drm_connector { | |||
471 | u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; | 471 | u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY]; |
472 | uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; | 472 | uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; |
473 | 473 | ||
474 | /* requested DPMS state */ | ||
475 | int dpms; | ||
476 | |||
474 | void *helper_private; | 477 | void *helper_private; |
475 | 478 | ||
476 | uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; | 479 | uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; |
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h index ec073d8288d9..6769ff6c1bc0 100644 --- a/include/drm/drm_crtc_helper.h +++ b/include/drm/drm_crtc_helper.h | |||
@@ -99,6 +99,8 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, | |||
99 | struct drm_framebuffer *old_fb); | 99 | struct drm_framebuffer *old_fb); |
100 | extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); | 100 | extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); |
101 | 101 | ||
102 | extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); | ||
103 | |||
102 | extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, | 104 | extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, |
103 | struct drm_mode_fb_cmd *mode_cmd); | 105 | struct drm_mode_fb_cmd *mode_cmd); |
104 | 106 | ||
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 3f0eaa397ef5..b3afd2219ad2 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -135,6 +135,7 @@ header-y += posix_types.h | |||
135 | header-y += ppdev.h | 135 | header-y += ppdev.h |
136 | header-y += prctl.h | 136 | header-y += prctl.h |
137 | header-y += qnxtypes.h | 137 | header-y += qnxtypes.h |
138 | header-y += qnx4_fs.h | ||
138 | header-y += radeonfb.h | 139 | header-y += radeonfb.h |
139 | header-y += raw.h | 140 | header-y += raw.h |
140 | header-y += resource.h | 141 | header-y += resource.h |
@@ -308,7 +309,6 @@ unifdef-y += poll.h | |||
308 | unifdef-y += ppp_defs.h | 309 | unifdef-y += ppp_defs.h |
309 | unifdef-y += ppp-comp.h | 310 | unifdef-y += ppp-comp.h |
310 | unifdef-y += ptrace.h | 311 | unifdef-y += ptrace.h |
311 | unifdef-y += qnx4_fs.h | ||
312 | unifdef-y += quota.h | 312 | unifdef-y += quota.h |
313 | unifdef-y += random.h | 313 | unifdef-y += random.h |
314 | unifdef-y += irqnr.h | 314 | unifdef-y += irqnr.h |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 88be890ee3c7..51b4b0a5ce8c 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -119,7 +119,7 @@ extern int pci_mmcfg_config_num; | |||
119 | extern int sbf_port; | 119 | extern int sbf_port; |
120 | extern unsigned long acpi_realmode_flags; | 120 | extern unsigned long acpi_realmode_flags; |
121 | 121 | ||
122 | int acpi_register_gsi (u32 gsi, int triggering, int polarity); | 122 | int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity); |
123 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); | 123 | int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); |
124 | 124 | ||
125 | #ifdef CONFIG_X86_IO_APIC | 125 | #ifdef CONFIG_X86_IO_APIC |
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index 48ee32a18ac5..64a982ea5d5f 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h | |||
@@ -159,6 +159,7 @@ | |||
159 | #define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS) | 159 | #define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS) |
160 | 160 | ||
161 | #ifndef __ASSEMBLY__ | 161 | #ifndef __ASSEMBLY__ |
162 | struct amba_device; /* in uncompress this is included but amba/bus.h is not */ | ||
162 | struct amba_pl010_data { | 163 | struct amba_pl010_data { |
163 | void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); | 164 | void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl); |
164 | }; | 165 | }; |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 7b214fd672a2..12737be58601 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -218,12 +218,12 @@ struct bio { | |||
218 | #define bio_sectors(bio) ((bio)->bi_size >> 9) | 218 | #define bio_sectors(bio) ((bio)->bi_size >> 9) |
219 | #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) | 219 | #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) |
220 | 220 | ||
221 | static inline unsigned int bio_cur_sectors(struct bio *bio) | 221 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
222 | { | 222 | { |
223 | if (bio->bi_vcnt) | 223 | if (bio->bi_vcnt) |
224 | return bio_iovec(bio)->bv_len >> 9; | 224 | return bio_iovec(bio)->bv_len; |
225 | else /* dataless requests such as discard */ | 225 | else /* dataless requests such as discard */ |
226 | return bio->bi_size >> 9; | 226 | return bio->bi_size; |
227 | } | 227 | } |
228 | 228 | ||
229 | static inline void *bio_data(struct bio *bio) | 229 | static inline void *bio_data(struct bio *bio) |
@@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio) | |||
279 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ | 279 | #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ |
280 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) | 280 | (((addr1) | (mask)) == (((addr2) - 1) | (mask))) |
281 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ | 281 | #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ |
282 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) | 282 | __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) |
283 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ | 283 | #define BIO_SEG_BOUNDARY(q, b1, b2) \ |
284 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) | 284 | BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) |
285 | 285 | ||
@@ -506,7 +506,7 @@ static inline int bio_has_data(struct bio *bio) | |||
506 | } | 506 | } |
507 | 507 | ||
508 | /* | 508 | /* |
509 | * BIO list managment for use by remapping drivers (e.g. DM or MD). | 509 | * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. |
510 | * | 510 | * |
511 | * A bio_list anchors a singly-linked list of bios chained through the bi_next | 511 | * A bio_list anchors a singly-linked list of bios chained through the bi_next |
512 | * member of the bio. The bio_list also caches the last list member to allow | 512 | * member of the bio. The bio_list also caches the last list member to allow |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b4f71f1a4af7..0b1a6cae9de1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -166,19 +166,9 @@ struct request { | |||
166 | enum rq_cmd_type_bits cmd_type; | 166 | enum rq_cmd_type_bits cmd_type; |
167 | unsigned long atomic_flags; | 167 | unsigned long atomic_flags; |
168 | 168 | ||
169 | /* Maintain bio traversal state for part by part I/O submission. | 169 | /* the following two fields are internal, NEVER access directly */ |
170 | * hard_* are block layer internals, no driver should touch them! | 170 | sector_t __sector; /* sector cursor */ |
171 | */ | 171 | unsigned int __data_len; /* total data len */ |
172 | |||
173 | sector_t sector; /* next sector to submit */ | ||
174 | sector_t hard_sector; /* next sector to complete */ | ||
175 | unsigned long nr_sectors; /* no. of sectors left to submit */ | ||
176 | unsigned long hard_nr_sectors; /* no. of sectors left to complete */ | ||
177 | /* no. of sectors left to submit in the current segment */ | ||
178 | unsigned int current_nr_sectors; | ||
179 | |||
180 | /* no. of sectors left to complete in the current segment */ | ||
181 | unsigned int hard_cur_sectors; | ||
182 | 172 | ||
183 | struct bio *bio; | 173 | struct bio *bio; |
184 | struct bio *biotail; | 174 | struct bio *biotail; |
@@ -211,8 +201,8 @@ struct request { | |||
211 | 201 | ||
212 | unsigned short ioprio; | 202 | unsigned short ioprio; |
213 | 203 | ||
214 | void *special; | 204 | void *special; /* opaque pointer available for LLD use */ |
215 | char *buffer; | 205 | char *buffer; /* kaddr of the current segment if available */ |
216 | 206 | ||
217 | int tag; | 207 | int tag; |
218 | int errors; | 208 | int errors; |
@@ -226,10 +216,9 @@ struct request { | |||
226 | unsigned char __cmd[BLK_MAX_CDB]; | 216 | unsigned char __cmd[BLK_MAX_CDB]; |
227 | unsigned char *cmd; | 217 | unsigned char *cmd; |
228 | 218 | ||
229 | unsigned int data_len; | ||
230 | unsigned int extra_len; /* length of alignment and padding */ | 219 | unsigned int extra_len; /* length of alignment and padding */ |
231 | unsigned int sense_len; | 220 | unsigned int sense_len; |
232 | void *data; | 221 | unsigned int resid_len; /* residual count */ |
233 | void *sense; | 222 | void *sense; |
234 | 223 | ||
235 | unsigned long deadline; | 224 | unsigned long deadline; |
@@ -318,6 +307,26 @@ struct blk_cmd_filter { | |||
318 | struct kobject kobj; | 307 | struct kobject kobj; |
319 | }; | 308 | }; |
320 | 309 | ||
310 | struct queue_limits { | ||
311 | unsigned long bounce_pfn; | ||
312 | unsigned long seg_boundary_mask; | ||
313 | |||
314 | unsigned int max_hw_sectors; | ||
315 | unsigned int max_sectors; | ||
316 | unsigned int max_segment_size; | ||
317 | unsigned int physical_block_size; | ||
318 | unsigned int alignment_offset; | ||
319 | unsigned int io_min; | ||
320 | unsigned int io_opt; | ||
321 | |||
322 | unsigned short logical_block_size; | ||
323 | unsigned short max_hw_segments; | ||
324 | unsigned short max_phys_segments; | ||
325 | |||
326 | unsigned char misaligned; | ||
327 | unsigned char no_cluster; | ||
328 | }; | ||
329 | |||
321 | struct request_queue | 330 | struct request_queue |
322 | { | 331 | { |
323 | /* | 332 | /* |
@@ -369,7 +378,6 @@ struct request_queue | |||
369 | /* | 378 | /* |
370 | * queue needs bounce pages for pages above this limit | 379 | * queue needs bounce pages for pages above this limit |
371 | */ | 380 | */ |
372 | unsigned long bounce_pfn; | ||
373 | gfp_t bounce_gfp; | 381 | gfp_t bounce_gfp; |
374 | 382 | ||
375 | /* | 383 | /* |
@@ -398,14 +406,6 @@ struct request_queue | |||
398 | unsigned int nr_congestion_off; | 406 | unsigned int nr_congestion_off; |
399 | unsigned int nr_batching; | 407 | unsigned int nr_batching; |
400 | 408 | ||
401 | unsigned int max_sectors; | ||
402 | unsigned int max_hw_sectors; | ||
403 | unsigned short max_phys_segments; | ||
404 | unsigned short max_hw_segments; | ||
405 | unsigned short hardsect_size; | ||
406 | unsigned int max_segment_size; | ||
407 | |||
408 | unsigned long seg_boundary_mask; | ||
409 | void *dma_drain_buffer; | 409 | void *dma_drain_buffer; |
410 | unsigned int dma_drain_size; | 410 | unsigned int dma_drain_size; |
411 | unsigned int dma_pad_mask; | 411 | unsigned int dma_pad_mask; |
@@ -415,12 +415,14 @@ struct request_queue | |||
415 | struct list_head tag_busy_list; | 415 | struct list_head tag_busy_list; |
416 | 416 | ||
417 | unsigned int nr_sorted; | 417 | unsigned int nr_sorted; |
418 | unsigned int in_flight; | 418 | unsigned int in_flight[2]; |
419 | 419 | ||
420 | unsigned int rq_timeout; | 420 | unsigned int rq_timeout; |
421 | struct timer_list timeout; | 421 | struct timer_list timeout; |
422 | struct list_head timeout_list; | 422 | struct list_head timeout_list; |
423 | 423 | ||
424 | struct queue_limits limits; | ||
425 | |||
424 | /* | 426 | /* |
425 | * sg stuff | 427 | * sg stuff |
426 | */ | 428 | */ |
@@ -522,6 +524,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag, | |||
522 | __clear_bit(flag, &q->queue_flags); | 524 | __clear_bit(flag, &q->queue_flags); |
523 | } | 525 | } |
524 | 526 | ||
527 | static inline int queue_in_flight(struct request_queue *q) | ||
528 | { | ||
529 | return q->in_flight[0] + q->in_flight[1]; | ||
530 | } | ||
531 | |||
525 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | 532 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) |
526 | { | 533 | { |
527 | WARN_ON_ONCE(!queue_is_locked(q)); | 534 | WARN_ON_ONCE(!queue_is_locked(q)); |
@@ -752,10 +759,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); | |||
752 | extern void blk_put_request(struct request *); | 759 | extern void blk_put_request(struct request *); |
753 | extern void __blk_put_request(struct request_queue *, struct request *); | 760 | extern void __blk_put_request(struct request_queue *, struct request *); |
754 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 761 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
762 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | ||
763 | gfp_t); | ||
755 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 764 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
756 | extern void blk_requeue_request(struct request_queue *, struct request *); | 765 | extern void blk_requeue_request(struct request_queue *, struct request *); |
757 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 766 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
758 | extern int blk_lld_busy(struct request_queue *q); | 767 | extern int blk_lld_busy(struct request_queue *q); |
768 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | ||
769 | struct bio_set *bs, gfp_t gfp_mask, | ||
770 | int (*bio_ctr)(struct bio *, struct bio *, void *), | ||
771 | void *data); | ||
772 | extern void blk_rq_unprep_clone(struct request *rq); | ||
759 | extern int blk_insert_cloned_request(struct request_queue *q, | 773 | extern int blk_insert_cloned_request(struct request_queue *q, |
760 | struct request *rq); | 774 | struct request *rq); |
761 | extern void blk_plug_device(struct request_queue *); | 775 | extern void blk_plug_device(struct request_queue *); |
@@ -768,12 +782,6 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
768 | struct scsi_ioctl_command __user *); | 782 | struct scsi_ioctl_command __user *); |
769 | 783 | ||
770 | /* | 784 | /* |
771 | * Temporary export, until SCSI gets fixed up. | ||
772 | */ | ||
773 | extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, | ||
774 | struct bio *bio); | ||
775 | |||
776 | /* | ||
777 | * A queue has just exitted congestion. Note this in the global counter of | 785 | * A queue has just exitted congestion. Note this in the global counter of |
778 | * congested queues, and wake up anyone who was waiting for requests to be | 786 | * congested queues, and wake up anyone who was waiting for requests to be |
779 | * put back. | 787 | * put back. |
@@ -798,7 +806,6 @@ extern void blk_sync_queue(struct request_queue *q); | |||
798 | extern void __blk_stop_queue(struct request_queue *q); | 806 | extern void __blk_stop_queue(struct request_queue *q); |
799 | extern void __blk_run_queue(struct request_queue *); | 807 | extern void __blk_run_queue(struct request_queue *); |
800 | extern void blk_run_queue(struct request_queue *); | 808 | extern void blk_run_queue(struct request_queue *); |
801 | extern void blk_start_queueing(struct request_queue *); | ||
802 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 809 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
803 | struct rq_map_data *, void __user *, unsigned long, | 810 | struct rq_map_data *, void __user *, unsigned long, |
804 | gfp_t); | 811 | gfp_t); |
@@ -831,41 +838,73 @@ static inline void blk_run_address_space(struct address_space *mapping) | |||
831 | blk_run_backing_dev(mapping->backing_dev_info, NULL); | 838 | blk_run_backing_dev(mapping->backing_dev_info, NULL); |
832 | } | 839 | } |
833 | 840 | ||
834 | extern void blkdev_dequeue_request(struct request *req); | 841 | /* |
842 | * blk_rq_pos() : the current sector | ||
843 | * blk_rq_bytes() : bytes left in the entire request | ||
844 | * blk_rq_cur_bytes() : bytes left in the current segment | ||
845 | * blk_rq_sectors() : sectors left in the entire request | ||
846 | * blk_rq_cur_sectors() : sectors left in the current segment | ||
847 | */ | ||
848 | static inline sector_t blk_rq_pos(const struct request *rq) | ||
849 | { | ||
850 | return rq->__sector; | ||
851 | } | ||
852 | |||
853 | static inline unsigned int blk_rq_bytes(const struct request *rq) | ||
854 | { | ||
855 | return rq->__data_len; | ||
856 | } | ||
857 | |||
858 | static inline int blk_rq_cur_bytes(const struct request *rq) | ||
859 | { | ||
860 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; | ||
861 | } | ||
862 | |||
863 | static inline unsigned int blk_rq_sectors(const struct request *rq) | ||
864 | { | ||
865 | return blk_rq_bytes(rq) >> 9; | ||
866 | } | ||
867 | |||
868 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | ||
869 | { | ||
870 | return blk_rq_cur_bytes(rq) >> 9; | ||
871 | } | ||
872 | |||
873 | /* | ||
874 | * Request issue related functions. | ||
875 | */ | ||
876 | extern struct request *blk_peek_request(struct request_queue *q); | ||
877 | extern void blk_start_request(struct request *rq); | ||
878 | extern struct request *blk_fetch_request(struct request_queue *q); | ||
835 | 879 | ||
836 | /* | 880 | /* |
837 | * blk_end_request() and friends. | 881 | * Request completion related functions. |
838 | * __blk_end_request() and end_request() must be called with | 882 | * |
839 | * the request queue spinlock acquired. | 883 | * blk_update_request() completes given number of bytes and updates |
884 | * the request without completing it. | ||
885 | * | ||
886 | * blk_end_request() and friends. __blk_end_request() must be called | ||
887 | * with the request queue spinlock acquired. | ||
840 | * | 888 | * |
841 | * Several drivers define their own end_request and call | 889 | * Several drivers define their own end_request and call |
842 | * blk_end_request() for parts of the original function. | 890 | * blk_end_request() for parts of the original function. |
843 | * This prevents code duplication in drivers. | 891 | * This prevents code duplication in drivers. |
844 | */ | 892 | */ |
845 | extern int blk_end_request(struct request *rq, int error, | 893 | extern bool blk_update_request(struct request *rq, int error, |
846 | unsigned int nr_bytes); | 894 | unsigned int nr_bytes); |
847 | extern int __blk_end_request(struct request *rq, int error, | 895 | extern bool blk_end_request(struct request *rq, int error, |
848 | unsigned int nr_bytes); | 896 | unsigned int nr_bytes); |
849 | extern int blk_end_bidi_request(struct request *rq, int error, | 897 | extern void blk_end_request_all(struct request *rq, int error); |
850 | unsigned int nr_bytes, unsigned int bidi_bytes); | 898 | extern bool blk_end_request_cur(struct request *rq, int error); |
851 | extern void end_request(struct request *, int); | 899 | extern bool __blk_end_request(struct request *rq, int error, |
852 | extern int blk_end_request_callback(struct request *rq, int error, | 900 | unsigned int nr_bytes); |
853 | unsigned int nr_bytes, | 901 | extern void __blk_end_request_all(struct request *rq, int error); |
854 | int (drv_callback)(struct request *)); | 902 | extern bool __blk_end_request_cur(struct request *rq, int error); |
903 | |||
855 | extern void blk_complete_request(struct request *); | 904 | extern void blk_complete_request(struct request *); |
856 | extern void __blk_complete_request(struct request *); | 905 | extern void __blk_complete_request(struct request *); |
857 | extern void blk_abort_request(struct request *); | 906 | extern void blk_abort_request(struct request *); |
858 | extern void blk_abort_queue(struct request_queue *); | 907 | extern void blk_abort_queue(struct request_queue *); |
859 | extern void blk_update_request(struct request *rq, int error, | ||
860 | unsigned int nr_bytes); | ||
861 | |||
862 | /* | ||
863 | * blk_end_request() takes bytes instead of sectors as a complete size. | ||
864 | * blk_rq_bytes() returns bytes left to complete in the entire request. | ||
865 | * blk_rq_cur_bytes() returns bytes left to complete in the current segment. | ||
866 | */ | ||
867 | extern unsigned int blk_rq_bytes(struct request *rq); | ||
868 | extern unsigned int blk_rq_cur_bytes(struct request *rq); | ||
869 | 908 | ||
870 | /* | 909 | /* |
871 | * Access functions for manipulating queue properties | 910 | * Access functions for manipulating queue properties |
@@ -877,10 +916,20 @@ extern void blk_cleanup_queue(struct request_queue *); | |||
877 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 916 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
878 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 917 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
879 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | 918 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); |
919 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | ||
880 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 920 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
881 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 921 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
882 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 922 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
883 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); | 923 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
924 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | ||
925 | extern void blk_queue_alignment_offset(struct request_queue *q, | ||
926 | unsigned int alignment); | ||
927 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | ||
928 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | ||
929 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | ||
930 | sector_t offset); | ||
931 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | ||
932 | sector_t offset); | ||
884 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 933 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
885 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 934 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
886 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | 935 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
@@ -967,19 +1016,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | |||
967 | 1016 | ||
968 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 1017 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
969 | 1018 | ||
970 | static inline int queue_hardsect_size(struct request_queue *q) | 1019 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) |
1020 | { | ||
1021 | return q->limits.bounce_pfn; | ||
1022 | } | ||
1023 | |||
1024 | static inline unsigned long queue_segment_boundary(struct request_queue *q) | ||
1025 | { | ||
1026 | return q->limits.seg_boundary_mask; | ||
1027 | } | ||
1028 | |||
1029 | static inline unsigned int queue_max_sectors(struct request_queue *q) | ||
1030 | { | ||
1031 | return q->limits.max_sectors; | ||
1032 | } | ||
1033 | |||
1034 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | ||
1035 | { | ||
1036 | return q->limits.max_hw_sectors; | ||
1037 | } | ||
1038 | |||
1039 | static inline unsigned short queue_max_hw_segments(struct request_queue *q) | ||
1040 | { | ||
1041 | return q->limits.max_hw_segments; | ||
1042 | } | ||
1043 | |||
1044 | static inline unsigned short queue_max_phys_segments(struct request_queue *q) | ||
1045 | { | ||
1046 | return q->limits.max_phys_segments; | ||
1047 | } | ||
1048 | |||
1049 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | ||
1050 | { | ||
1051 | return q->limits.max_segment_size; | ||
1052 | } | ||
1053 | |||
1054 | static inline unsigned short queue_logical_block_size(struct request_queue *q) | ||
971 | { | 1055 | { |
972 | int retval = 512; | 1056 | int retval = 512; |
973 | 1057 | ||
974 | if (q && q->hardsect_size) | 1058 | if (q && q->limits.logical_block_size) |
975 | retval = q->hardsect_size; | 1059 | retval = q->limits.logical_block_size; |
976 | 1060 | ||
977 | return retval; | 1061 | return retval; |
978 | } | 1062 | } |
979 | 1063 | ||
980 | static inline int bdev_hardsect_size(struct block_device *bdev) | 1064 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) |
1065 | { | ||
1066 | return queue_logical_block_size(bdev_get_queue(bdev)); | ||
1067 | } | ||
1068 | |||
1069 | static inline unsigned int queue_physical_block_size(struct request_queue *q) | ||
1070 | { | ||
1071 | return q->limits.physical_block_size; | ||
1072 | } | ||
1073 | |||
1074 | static inline unsigned int queue_io_min(struct request_queue *q) | ||
1075 | { | ||
1076 | return q->limits.io_min; | ||
1077 | } | ||
1078 | |||
1079 | static inline unsigned int queue_io_opt(struct request_queue *q) | ||
1080 | { | ||
1081 | return q->limits.io_opt; | ||
1082 | } | ||
1083 | |||
1084 | static inline int queue_alignment_offset(struct request_queue *q) | ||
1085 | { | ||
1086 | if (q && q->limits.misaligned) | ||
1087 | return -1; | ||
1088 | |||
1089 | if (q && q->limits.alignment_offset) | ||
1090 | return q->limits.alignment_offset; | ||
1091 | |||
1092 | return 0; | ||
1093 | } | ||
1094 | |||
1095 | static inline int queue_sector_alignment_offset(struct request_queue *q, | ||
1096 | sector_t sector) | ||
981 | { | 1097 | { |
982 | return queue_hardsect_size(bdev_get_queue(bdev)); | 1098 | return ((sector << 9) - q->limits.alignment_offset) |
1099 | & (q->limits.io_min - 1); | ||
983 | } | 1100 | } |
984 | 1101 | ||
985 | static inline int queue_dma_alignment(struct request_queue *q) | 1102 | static inline int queue_dma_alignment(struct request_queue *q) |
@@ -1109,6 +1226,8 @@ struct block_device_operations { | |||
1109 | int (*direct_access) (struct block_device *, sector_t, | 1226 | int (*direct_access) (struct block_device *, sector_t, |
1110 | void **, unsigned long *); | 1227 | void **, unsigned long *); |
1111 | int (*media_changed) (struct gendisk *); | 1228 | int (*media_changed) (struct gendisk *); |
1229 | unsigned long long (*set_capacity) (struct gendisk *, | ||
1230 | unsigned long long); | ||
1112 | int (*revalidate_disk) (struct gendisk *); | 1231 | int (*revalidate_disk) (struct gendisk *); |
1113 | int (*getgeo)(struct block_device *, struct hd_geometry *); | 1232 | int (*getgeo)(struct block_device *, struct hd_geometry *); |
1114 | struct module *owner; | 1233 | struct module *owner; |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index d960889e92ef..7e4350ece0f8 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -116,9 +116,9 @@ struct blk_io_trace { | |||
116 | * The remap event | 116 | * The remap event |
117 | */ | 117 | */ |
118 | struct blk_io_trace_remap { | 118 | struct blk_io_trace_remap { |
119 | __be32 device; | ||
120 | __be32 device_from; | 119 | __be32 device_from; |
121 | __be64 sector; | 120 | __be32 device_to; |
121 | __be64 sector_from; | ||
122 | }; | 122 | }; |
123 | 123 | ||
124 | enum { | 124 | enum { |
@@ -165,8 +165,9 @@ struct blk_trace { | |||
165 | 165 | ||
166 | extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); | 166 | extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); |
167 | extern void blk_trace_shutdown(struct request_queue *); | 167 | extern void blk_trace_shutdown(struct request_queue *); |
168 | extern int do_blk_trace_setup(struct request_queue *q, | 168 | extern int do_blk_trace_setup(struct request_queue *q, char *name, |
169 | char *name, dev_t dev, struct blk_user_trace_setup *buts); | 169 | dev_t dev, struct block_device *bdev, |
170 | struct blk_user_trace_setup *buts); | ||
170 | extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); | 171 | extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); |
171 | 172 | ||
172 | /** | 173 | /** |
@@ -193,22 +194,42 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); | |||
193 | extern void blk_add_driver_data(struct request_queue *q, struct request *rq, | 194 | extern void blk_add_driver_data(struct request_queue *q, struct request *rq, |
194 | void *data, size_t len); | 195 | void *data, size_t len); |
195 | extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | 196 | extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
197 | struct block_device *bdev, | ||
196 | char __user *arg); | 198 | char __user *arg); |
197 | extern int blk_trace_startstop(struct request_queue *q, int start); | 199 | extern int blk_trace_startstop(struct request_queue *q, int start); |
198 | extern int blk_trace_remove(struct request_queue *q); | 200 | extern int blk_trace_remove(struct request_queue *q); |
201 | extern int blk_trace_init_sysfs(struct device *dev); | ||
199 | 202 | ||
200 | extern struct attribute_group blk_trace_attr_group; | 203 | extern struct attribute_group blk_trace_attr_group; |
201 | 204 | ||
202 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ | 205 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ |
203 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) | 206 | # define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) |
204 | #define blk_trace_shutdown(q) do { } while (0) | 207 | # define blk_trace_shutdown(q) do { } while (0) |
205 | #define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) | 208 | # define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY) |
206 | #define blk_add_driver_data(q, rq, data, len) do {} while (0) | 209 | # define blk_add_driver_data(q, rq, data, len) do {} while (0) |
207 | #define blk_trace_setup(q, name, dev, arg) (-ENOTTY) | 210 | # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) |
208 | #define blk_trace_startstop(q, start) (-ENOTTY) | 211 | # define blk_trace_startstop(q, start) (-ENOTTY) |
209 | #define blk_trace_remove(q) (-ENOTTY) | 212 | # define blk_trace_remove(q) (-ENOTTY) |
210 | #define blk_add_trace_msg(q, fmt, ...) do { } while (0) | 213 | # define blk_add_trace_msg(q, fmt, ...) do { } while (0) |
214 | static inline int blk_trace_init_sysfs(struct device *dev) | ||
215 | { | ||
216 | return 0; | ||
217 | } | ||
211 | 218 | ||
212 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ | 219 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ |
220 | |||
221 | #if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) | ||
222 | |||
223 | static inline int blk_cmd_buf_len(struct request *rq) | ||
224 | { | ||
225 | return blk_pc_request(rq) ? rq->cmd_len * 3 : 1; | ||
226 | } | ||
227 | |||
228 | extern void blk_dump_cmd(char *buf, struct request *rq); | ||
229 | extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); | ||
230 | extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq); | ||
231 | |||
232 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ | ||
233 | |||
213 | #endif /* __KERNEL__ */ | 234 | #endif /* __KERNEL__ */ |
214 | #endif | 235 | #endif |
diff --git a/include/linux/cdev.h b/include/linux/cdev.h index fb4591977b03..f389e319a454 100644 --- a/include/linux/cdev.h +++ b/include/linux/cdev.h | |||
@@ -28,6 +28,8 @@ int cdev_add(struct cdev *, dev_t, unsigned); | |||
28 | 28 | ||
29 | void cdev_del(struct cdev *); | 29 | void cdev_del(struct cdev *); |
30 | 30 | ||
31 | int cdev_index(struct inode *inode); | ||
32 | |||
31 | void cd_forget(struct inode *); | 33 | void cd_forget(struct inode *); |
32 | 34 | ||
33 | extern struct backing_dev_info directly_mappable_cdev_bdi; | 35 | extern struct backing_dev_info directly_mappable_cdev_bdi; |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 5a40d14daa9f..c56457c8334e 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -288,7 +288,15 @@ static inline cycle_t clocksource_read(struct clocksource *cs) | |||
288 | */ | 288 | */ |
289 | static inline int clocksource_enable(struct clocksource *cs) | 289 | static inline int clocksource_enable(struct clocksource *cs) |
290 | { | 290 | { |
291 | return cs->enable ? cs->enable(cs) : 0; | 291 | int ret = 0; |
292 | |||
293 | if (cs->enable) | ||
294 | ret = cs->enable(cs); | ||
295 | |||
296 | /* save mult_orig on enable */ | ||
297 | cs->mult_orig = cs->mult; | ||
298 | |||
299 | return ret; | ||
292 | } | 300 | } |
293 | 301 | ||
294 | /** | 302 | /** |
diff --git a/include/linux/compat.h b/include/linux/compat.h index f2ded21f9a3c..af931ee43dd8 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -222,6 +222,8 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); | |||
222 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from); | 222 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from); |
223 | int get_compat_sigevent(struct sigevent *event, | 223 | int get_compat_sigevent(struct sigevent *event, |
224 | const struct compat_sigevent __user *u_event); | 224 | const struct compat_sigevent __user *u_event); |
225 | long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, | ||
226 | struct compat_siginfo __user *uinfo); | ||
225 | 227 | ||
226 | static inline int compat_timeval_compare(struct compat_timeval *lhs, | 228 | static inline int compat_timeval_compare(struct compat_timeval *lhs, |
227 | struct compat_timeval *rhs) | 229 | struct compat_timeval *rhs) |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 37bcb50a4d7c..04fb5135b4e1 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -261,6 +261,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
261 | # define __section(S) __attribute__ ((__section__(#S))) | 261 | # define __section(S) __attribute__ ((__section__(#S))) |
262 | #endif | 262 | #endif |
263 | 263 | ||
264 | /* Are two types/vars the same type (ignoring qualifiers)? */ | ||
265 | #ifndef __same_type | ||
266 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) | ||
267 | #endif | ||
268 | |||
264 | /* | 269 | /* |
265 | * Prevent the compiler from merging or refetching accesses. The compiler | 270 | * Prevent the compiler from merging or refetching accesses. The compiler |
266 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | 271 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 9f315382610b..c5ac87ca7bc6 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -1022,6 +1022,8 @@ typedef struct cpumask *cpumask_var_t; | |||
1022 | 1022 | ||
1023 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); | 1023 | bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); |
1024 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); | 1024 | bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); |
1025 | bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node); | ||
1026 | bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags); | ||
1025 | void alloc_bootmem_cpumask_var(cpumask_var_t *mask); | 1027 | void alloc_bootmem_cpumask_var(cpumask_var_t *mask); |
1026 | void free_cpumask_var(cpumask_var_t mask); | 1028 | void free_cpumask_var(cpumask_var_t mask); |
1027 | void free_bootmem_cpumask_var(cpumask_var_t mask); | 1029 | void free_bootmem_cpumask_var(cpumask_var_t mask); |
@@ -1040,6 +1042,19 @@ static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, | |||
1040 | return true; | 1042 | return true; |
1041 | } | 1043 | } |
1042 | 1044 | ||
1045 | static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) | ||
1046 | { | ||
1047 | cpumask_clear(*mask); | ||
1048 | return true; | ||
1049 | } | ||
1050 | |||
1051 | static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, | ||
1052 | int node) | ||
1053 | { | ||
1054 | cpumask_clear(*mask); | ||
1055 | return true; | ||
1056 | } | ||
1057 | |||
1043 | static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) | 1058 | static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask) |
1044 | { | 1059 | { |
1045 | } | 1060 | } |
diff --git a/include/linux/cramfs_fs.h b/include/linux/cramfs_fs.h index 3be4e5a27d82..6fc2bed368b8 100644 --- a/include/linux/cramfs_fs.h +++ b/include/linux/cramfs_fs.h | |||
@@ -2,9 +2,8 @@ | |||
2 | #define __CRAMFS_H | 2 | #define __CRAMFS_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/magic.h> | ||
5 | 6 | ||
6 | #define CRAMFS_MAGIC 0x28cd3d45 /* some random number */ | ||
7 | #define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */ | ||
8 | #define CRAMFS_SIGNATURE "Compressed ROMFS" | 7 | #define CRAMFS_SIGNATURE "Compressed ROMFS" |
9 | 8 | ||
10 | /* | 9 | /* |
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h index 788850ba4e75..1fbdea4f08eb 100644 --- a/include/linux/cyclades.h +++ b/include/linux/cyclades.h | |||
@@ -142,19 +142,6 @@ struct CYZ_BOOT_CTRL { | |||
142 | 142 | ||
143 | 143 | ||
144 | #ifndef DP_WINDOW_SIZE | 144 | #ifndef DP_WINDOW_SIZE |
145 | /* #include "cyclomz.h" */ | ||
146 | /****************** ****************** *******************/ | ||
147 | /* | ||
148 | * The data types defined below are used in all ZFIRM interface | ||
149 | * data structures. They accomodate differences between HW | ||
150 | * architectures and compilers. | ||
151 | */ | ||
152 | |||
153 | typedef __u64 ucdouble; /* 64 bits, unsigned */ | ||
154 | typedef __u32 uclong; /* 32 bits, unsigned */ | ||
155 | typedef __u16 ucshort; /* 16 bits, unsigned */ | ||
156 | typedef __u8 ucchar; /* 8 bits, unsigned */ | ||
157 | |||
158 | /* | 145 | /* |
159 | * Memory Window Sizes | 146 | * Memory Window Sizes |
160 | */ | 147 | */ |
@@ -507,16 +494,20 @@ struct ZFW_CTRL { | |||
507 | 494 | ||
508 | /* Per card data structure */ | 495 | /* Per card data structure */ |
509 | struct cyclades_card { | 496 | struct cyclades_card { |
510 | void __iomem *base_addr; | 497 | void __iomem *base_addr; |
511 | void __iomem *ctl_addr; | 498 | union { |
512 | int irq; | 499 | void __iomem *p9050; |
513 | unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ | 500 | struct RUNTIME_9060 __iomem *p9060; |
514 | unsigned int first_line; /* minor number of first channel on card */ | 501 | } ctl_addr; |
515 | unsigned int nports; /* Number of ports in the card */ | 502 | int irq; |
516 | int bus_index; /* address shift - 0 for ISA, 1 for PCI */ | 503 | unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */ |
517 | int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ | 504 | unsigned int first_line; /* minor number of first channel on card */ |
518 | spinlock_t card_lock; | 505 | unsigned int nports; /* Number of ports in the card */ |
519 | struct cyclades_port *ports; | 506 | int bus_index; /* address shift - 0 for ISA, 1 for PCI */ |
507 | int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */ | ||
508 | u32 hw_ver; | ||
509 | spinlock_t card_lock; | ||
510 | struct cyclades_port *ports; | ||
520 | }; | 511 | }; |
521 | 512 | ||
522 | /*************************************** | 513 | /*************************************** |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 15156364d196..30b93b2a01a4 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -180,10 +180,12 @@ d_iput: no no no yes | |||
180 | #define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */ | 180 | #define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */ |
181 | #define DCACHE_UNHASHED 0x0010 | 181 | #define DCACHE_UNHASHED 0x0010 |
182 | 182 | ||
183 | #define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */ | 183 | #define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched by inotify */ |
184 | 184 | ||
185 | #define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */ | 185 | #define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */ |
186 | 186 | ||
187 | #define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */ | ||
188 | |||
187 | extern spinlock_t dcache_lock; | 189 | extern spinlock_t dcache_lock; |
188 | extern seqlock_t rename_lock; | 190 | extern seqlock_t rename_lock; |
189 | 191 | ||
@@ -351,6 +353,11 @@ static inline int d_unhashed(struct dentry *dentry) | |||
351 | return (dentry->d_flags & DCACHE_UNHASHED); | 353 | return (dentry->d_flags & DCACHE_UNHASHED); |
352 | } | 354 | } |
353 | 355 | ||
356 | static inline int d_unlinked(struct dentry *dentry) | ||
357 | { | ||
358 | return d_unhashed(dentry) && !IS_ROOT(dentry); | ||
359 | } | ||
360 | |||
354 | static inline struct dentry *dget_parent(struct dentry *dentry) | 361 | static inline struct dentry *dget_parent(struct dentry *dentry) |
355 | { | 362 | { |
356 | struct dentry *ret; | 363 | struct dentry *ret; |
@@ -368,7 +375,7 @@ static inline int d_mountpoint(struct dentry *dentry) | |||
368 | return dentry->d_mounted; | 375 | return dentry->d_mounted; |
369 | } | 376 | } |
370 | 377 | ||
371 | extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *); | 378 | extern struct vfsmount *lookup_mnt(struct path *); |
372 | extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); | 379 | extern struct dentry *lookup_create(struct nameidata *nd, int is_dir); |
373 | 380 | ||
374 | extern int sysctl_vfs_cache_pressure; | 381 | extern int sysctl_vfs_cache_pressure; |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index ded2d7c42668..49c2362977fd 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -149,7 +149,7 @@ struct io_restrictions { | |||
149 | unsigned max_hw_sectors; | 149 | unsigned max_hw_sectors; |
150 | unsigned max_sectors; | 150 | unsigned max_sectors; |
151 | unsigned max_segment_size; | 151 | unsigned max_segment_size; |
152 | unsigned short hardsect_size; | 152 | unsigned short logical_block_size; |
153 | unsigned short max_hw_segments; | 153 | unsigned short max_hw_segments; |
154 | unsigned short max_phys_segments; | 154 | unsigned short max_phys_segments; |
155 | unsigned char no_cluster; /* inverted so that 0 is default */ | 155 | unsigned char no_cluster; /* inverted so that 0 is default */ |
diff --git a/include/linux/device.h b/include/linux/device.h index 5d5c197bad45..a4a7b10aaa48 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -62,8 +62,6 @@ struct bus_type { | |||
62 | void (*shutdown)(struct device *dev); | 62 | void (*shutdown)(struct device *dev); |
63 | 63 | ||
64 | int (*suspend)(struct device *dev, pm_message_t state); | 64 | int (*suspend)(struct device *dev, pm_message_t state); |
65 | int (*suspend_late)(struct device *dev, pm_message_t state); | ||
66 | int (*resume_early)(struct device *dev); | ||
67 | int (*resume)(struct device *dev); | 65 | int (*resume)(struct device *dev); |
68 | 66 | ||
69 | struct dev_pm_ops *pm; | 67 | struct dev_pm_ops *pm; |
@@ -291,9 +289,6 @@ struct device_type { | |||
291 | int (*uevent)(struct device *dev, struct kobj_uevent_env *env); | 289 | int (*uevent)(struct device *dev, struct kobj_uevent_env *env); |
292 | void (*release)(struct device *dev); | 290 | void (*release)(struct device *dev); |
293 | 291 | ||
294 | int (*suspend)(struct device *dev, pm_message_t state); | ||
295 | int (*resume)(struct device *dev); | ||
296 | |||
297 | struct dev_pm_ops *pm; | 292 | struct dev_pm_ops *pm; |
298 | }; | 293 | }; |
299 | 294 | ||
diff --git a/include/linux/dlm.h b/include/linux/dlm.h index b9cd38603fd8..0b3518c42356 100644 --- a/include/linux/dlm.h +++ b/include/linux/dlm.h | |||
@@ -81,8 +81,8 @@ struct dlm_lksb { | |||
81 | * the cluster, the calling node joins it. | 81 | * the cluster, the calling node joins it. |
82 | */ | 82 | */ |
83 | 83 | ||
84 | int dlm_new_lockspace(char *name, int namelen, dlm_lockspace_t **lockspace, | 84 | int dlm_new_lockspace(const char *name, int namelen, |
85 | uint32_t flags, int lvblen); | 85 | dlm_lockspace_t **lockspace, uint32_t flags, int lvblen); |
86 | 86 | ||
87 | /* | 87 | /* |
88 | * dlm_release_lockspace | 88 | * dlm_release_lockspace |
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h index 28d53cb7b5a2..171ad8aedc83 100644 --- a/include/linux/dma-debug.h +++ b/include/linux/dma-debug.h | |||
@@ -32,6 +32,8 @@ extern void dma_debug_add_bus(struct bus_type *bus); | |||
32 | 32 | ||
33 | extern void dma_debug_init(u32 num_entries); | 33 | extern void dma_debug_init(u32 num_entries); |
34 | 34 | ||
35 | extern int dma_debug_resize_entries(u32 num_entries); | ||
36 | |||
35 | extern void debug_dma_map_page(struct device *dev, struct page *page, | 37 | extern void debug_dma_map_page(struct device *dev, struct page *page, |
36 | size_t offset, size_t size, | 38 | size_t offset, size_t size, |
37 | int direction, dma_addr_t dma_addr, | 39 | int direction, dma_addr_t dma_addr, |
@@ -91,6 +93,11 @@ static inline void dma_debug_init(u32 num_entries) | |||
91 | { | 93 | { |
92 | } | 94 | } |
93 | 95 | ||
96 | static inline int dma_debug_resize_entries(u32 num_entries) | ||
97 | { | ||
98 | return 0; | ||
99 | } | ||
100 | |||
94 | static inline void debug_dma_map_page(struct device *dev, struct page *page, | 101 | static inline void debug_dma_map_page(struct device *dev, struct page *page, |
95 | size_t offset, size_t size, | 102 | size_t offset, size_t size, |
96 | int direction, dma_addr_t dma_addr, | 103 | int direction, dma_addr_t dma_addr, |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e397dc342cda..10ff5c498824 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -108,6 +108,7 @@ struct irte { | |||
108 | }; | 108 | }; |
109 | #ifdef CONFIG_INTR_REMAP | 109 | #ifdef CONFIG_INTR_REMAP |
110 | extern int intr_remapping_enabled; | 110 | extern int intr_remapping_enabled; |
111 | extern int intr_remapping_supported(void); | ||
111 | extern int enable_intr_remapping(int); | 112 | extern int enable_intr_remapping(int); |
112 | extern void disable_intr_remapping(void); | 113 | extern void disable_intr_remapping(void); |
113 | extern int reenable_intr_remapping(int); | 114 | extern int reenable_intr_remapping(int); |
@@ -157,6 +158,8 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic) | |||
157 | } | 158 | } |
158 | #define irq_remapped(irq) (0) | 159 | #define irq_remapped(irq) (0) |
159 | #define enable_intr_remapping(mode) (-1) | 160 | #define enable_intr_remapping(mode) (-1) |
161 | #define disable_intr_remapping() (0) | ||
162 | #define reenable_intr_remapping(mode) (0) | ||
160 | #define intr_remapping_enabled (0) | 163 | #define intr_remapping_enabled (0) |
161 | #endif | 164 | #endif |
162 | 165 | ||
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h index 102a902b4396..ecc06286226d 100644 --- a/include/linux/dnotify.h +++ b/include/linux/dnotify.h | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | struct dnotify_struct { | 11 | struct dnotify_struct { |
12 | struct dnotify_struct * dn_next; | 12 | struct dnotify_struct * dn_next; |
13 | unsigned long dn_mask; | 13 | __u32 dn_mask; |
14 | int dn_fd; | 14 | int dn_fd; |
15 | struct file * dn_filp; | 15 | struct file * dn_filp; |
16 | fl_owner_t dn_owner; | 16 | fl_owner_t dn_owner; |
@@ -21,23 +21,18 @@ struct dnotify_struct { | |||
21 | 21 | ||
22 | #ifdef CONFIG_DNOTIFY | 22 | #ifdef CONFIG_DNOTIFY |
23 | 23 | ||
24 | extern void __inode_dir_notify(struct inode *, unsigned long); | 24 | #define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\ |
25 | FS_MODIFY | FS_MODIFY_CHILD |\ | ||
26 | FS_ACCESS | FS_ACCESS_CHILD |\ | ||
27 | FS_ATTRIB | FS_ATTRIB_CHILD |\ | ||
28 | FS_CREATE | FS_DN_RENAME |\ | ||
29 | FS_MOVED_FROM | FS_MOVED_TO) | ||
30 | |||
25 | extern void dnotify_flush(struct file *, fl_owner_t); | 31 | extern void dnotify_flush(struct file *, fl_owner_t); |
26 | extern int fcntl_dirnotify(int, struct file *, unsigned long); | 32 | extern int fcntl_dirnotify(int, struct file *, unsigned long); |
27 | extern void dnotify_parent(struct dentry *, unsigned long); | ||
28 | |||
29 | static inline void inode_dir_notify(struct inode *inode, unsigned long event) | ||
30 | { | ||
31 | if (inode->i_dnotify_mask & (event)) | ||
32 | __inode_dir_notify(inode, event); | ||
33 | } | ||
34 | 33 | ||
35 | #else | 34 | #else |
36 | 35 | ||
37 | static inline void __inode_dir_notify(struct inode *inode, unsigned long event) | ||
38 | { | ||
39 | } | ||
40 | |||
41 | static inline void dnotify_flush(struct file *filp, fl_owner_t id) | 36 | static inline void dnotify_flush(struct file *filp, fl_owner_t id) |
42 | { | 37 | { |
43 | } | 38 | } |
@@ -47,14 +42,6 @@ static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg) | |||
47 | return -EINVAL; | 42 | return -EINVAL; |
48 | } | 43 | } |
49 | 44 | ||
50 | static inline void dnotify_parent(struct dentry *dentry, unsigned long event) | ||
51 | { | ||
52 | } | ||
53 | |||
54 | static inline void inode_dir_notify(struct inode *inode, unsigned long event) | ||
55 | { | ||
56 | } | ||
57 | |||
58 | #endif /* CONFIG_DNOTIFY */ | 45 | #endif /* CONFIG_DNOTIFY */ |
59 | 46 | ||
60 | #endif /* __KERNEL __ */ | 47 | #endif /* __KERNEL __ */ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index c59b769f62b0..1cb3372e65d8 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *); | |||
103 | extern void elv_merge_requests(struct request_queue *, struct request *, | 103 | extern void elv_merge_requests(struct request_queue *, struct request *, |
104 | struct request *); | 104 | struct request *); |
105 | extern void elv_merged_request(struct request_queue *, struct request *, int); | 105 | extern void elv_merged_request(struct request_queue *, struct request *, int); |
106 | extern void elv_dequeue_request(struct request_queue *, struct request *); | ||
107 | extern void elv_requeue_request(struct request_queue *, struct request *); | 106 | extern void elv_requeue_request(struct request_queue *, struct request *); |
108 | extern int elv_queue_empty(struct request_queue *); | 107 | extern int elv_queue_empty(struct request_queue *); |
109 | extern struct request *elv_next_request(struct request_queue *q); | ||
110 | extern struct request *elv_former_request(struct request_queue *, struct request *); | 108 | extern struct request *elv_former_request(struct request_queue *, struct request *); |
111 | extern struct request *elv_latter_request(struct request_queue *, struct request *); | 109 | extern struct request *elv_latter_request(struct request_queue *, struct request *); |
112 | extern int elv_register_queue(struct request_queue *q); | 110 | extern int elv_register_queue(struct request_queue *q); |
@@ -171,7 +169,7 @@ enum { | |||
171 | ELV_MQUEUE_MUST, | 169 | ELV_MQUEUE_MUST, |
172 | }; | 170 | }; |
173 | 171 | ||
174 | #define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) | 172 | #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) |
175 | #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) | 173 | #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) |
176 | 174 | ||
177 | /* | 175 | /* |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 3b534e527e09..ede84fa7da5d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -729,8 +729,8 @@ struct inode { | |||
729 | struct timespec i_atime; | 729 | struct timespec i_atime; |
730 | struct timespec i_mtime; | 730 | struct timespec i_mtime; |
731 | struct timespec i_ctime; | 731 | struct timespec i_ctime; |
732 | unsigned int i_blkbits; | ||
733 | blkcnt_t i_blocks; | 732 | blkcnt_t i_blocks; |
733 | unsigned int i_blkbits; | ||
734 | unsigned short i_bytes; | 734 | unsigned short i_bytes; |
735 | umode_t i_mode; | 735 | umode_t i_mode; |
736 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ | 736 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ |
@@ -751,13 +751,12 @@ struct inode { | |||
751 | struct block_device *i_bdev; | 751 | struct block_device *i_bdev; |
752 | struct cdev *i_cdev; | 752 | struct cdev *i_cdev; |
753 | }; | 753 | }; |
754 | int i_cindex; | ||
755 | 754 | ||
756 | __u32 i_generation; | 755 | __u32 i_generation; |
757 | 756 | ||
758 | #ifdef CONFIG_DNOTIFY | 757 | #ifdef CONFIG_FSNOTIFY |
759 | unsigned long i_dnotify_mask; /* Directory notify events */ | 758 | __u32 i_fsnotify_mask; /* all events this inode cares about */ |
760 | struct dnotify_struct *i_dnotify; /* for directory notifications */ | 759 | struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */ |
761 | #endif | 760 | #endif |
762 | 761 | ||
763 | #ifdef CONFIG_INOTIFY | 762 | #ifdef CONFIG_INOTIFY |
@@ -1321,7 +1320,7 @@ struct super_block { | |||
1321 | struct rw_semaphore s_umount; | 1320 | struct rw_semaphore s_umount; |
1322 | struct mutex s_lock; | 1321 | struct mutex s_lock; |
1323 | int s_count; | 1322 | int s_count; |
1324 | int s_need_sync_fs; | 1323 | int s_need_sync; |
1325 | atomic_t s_active; | 1324 | atomic_t s_active; |
1326 | #ifdef CONFIG_SECURITY | 1325 | #ifdef CONFIG_SECURITY |
1327 | void *s_security; | 1326 | void *s_security; |
@@ -1372,11 +1371,6 @@ struct super_block { | |||
1372 | * generic_show_options() | 1371 | * generic_show_options() |
1373 | */ | 1372 | */ |
1374 | char *s_options; | 1373 | char *s_options; |
1375 | |||
1376 | /* | ||
1377 | * storage for asynchronous operations | ||
1378 | */ | ||
1379 | struct list_head s_async_list; | ||
1380 | }; | 1374 | }; |
1381 | 1375 | ||
1382 | extern struct timespec current_fs_time(struct super_block *sb); | 1376 | extern struct timespec current_fs_time(struct super_block *sb); |
@@ -1800,7 +1794,7 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); | |||
1800 | extern int may_umount_tree(struct vfsmount *); | 1794 | extern int may_umount_tree(struct vfsmount *); |
1801 | extern int may_umount(struct vfsmount *); | 1795 | extern int may_umount(struct vfsmount *); |
1802 | extern long do_mount(char *, char *, char *, unsigned long, void *); | 1796 | extern long do_mount(char *, char *, char *, unsigned long, void *); |
1803 | extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *); | 1797 | extern struct vfsmount *collect_mounts(struct path *); |
1804 | extern void drop_collected_mounts(struct vfsmount *); | 1798 | extern void drop_collected_mounts(struct vfsmount *); |
1805 | 1799 | ||
1806 | extern int vfs_statfs(struct dentry *, struct kstatfs *); | 1800 | extern int vfs_statfs(struct dentry *, struct kstatfs *); |
@@ -1947,8 +1941,6 @@ extern struct super_block *freeze_bdev(struct block_device *); | |||
1947 | extern void emergency_thaw_all(void); | 1941 | extern void emergency_thaw_all(void); |
1948 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); | 1942 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); |
1949 | extern int fsync_bdev(struct block_device *); | 1943 | extern int fsync_bdev(struct block_device *); |
1950 | extern int fsync_super(struct super_block *); | ||
1951 | extern int fsync_no_super(struct block_device *); | ||
1952 | #else | 1944 | #else |
1953 | static inline void bd_forget(struct inode *inode) {} | 1945 | static inline void bd_forget(struct inode *inode) {} |
1954 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } | 1946 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } |
@@ -1964,6 +1956,7 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) | |||
1964 | return 0; | 1956 | return 0; |
1965 | } | 1957 | } |
1966 | #endif | 1958 | #endif |
1959 | extern int sync_filesystem(struct super_block *); | ||
1967 | extern const struct file_operations def_blk_fops; | 1960 | extern const struct file_operations def_blk_fops; |
1968 | extern const struct file_operations def_chr_fops; | 1961 | extern const struct file_operations def_chr_fops; |
1969 | extern const struct file_operations bad_sock_fops; | 1962 | extern const struct file_operations bad_sock_fops; |
@@ -2082,12 +2075,8 @@ extern int filemap_fdatawrite_range(struct address_space *mapping, | |||
2082 | 2075 | ||
2083 | extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync); | 2076 | extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync); |
2084 | extern void sync_supers(void); | 2077 | extern void sync_supers(void); |
2085 | extern void sync_filesystems(int wait); | ||
2086 | extern void __fsync_super(struct super_block *sb); | ||
2087 | extern void emergency_sync(void); | 2078 | extern void emergency_sync(void); |
2088 | extern void emergency_remount(void); | 2079 | extern void emergency_remount(void); |
2089 | extern int do_remount_sb(struct super_block *sb, int flags, | ||
2090 | void *data, int force); | ||
2091 | #ifdef CONFIG_BLOCK | 2080 | #ifdef CONFIG_BLOCK |
2092 | extern sector_t bmap(struct inode *, sector_t); | 2081 | extern sector_t bmap(struct inode *, sector_t); |
2093 | #endif | 2082 | #endif |
@@ -2205,6 +2194,8 @@ extern int generic_segment_checks(const struct iovec *iov, | |||
2205 | /* fs/splice.c */ | 2194 | /* fs/splice.c */ |
2206 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, | 2195 | extern ssize_t generic_file_splice_read(struct file *, loff_t *, |
2207 | struct pipe_inode_info *, size_t, unsigned int); | 2196 | struct pipe_inode_info *, size_t, unsigned int); |
2197 | extern ssize_t default_file_splice_read(struct file *, loff_t *, | ||
2198 | struct pipe_inode_info *, size_t, unsigned int); | ||
2208 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, | 2199 | extern ssize_t generic_file_splice_write(struct pipe_inode_info *, |
2209 | struct file *, loff_t *, size_t, unsigned int); | 2200 | struct file *, loff_t *, size_t, unsigned int); |
2210 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, | 2201 | extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, |
@@ -2354,6 +2345,8 @@ extern void simple_release_fs(struct vfsmount **mount, int *count); | |||
2354 | extern ssize_t simple_read_from_buffer(void __user *to, size_t count, | 2345 | extern ssize_t simple_read_from_buffer(void __user *to, size_t count, |
2355 | loff_t *ppos, const void *from, size_t available); | 2346 | loff_t *ppos, const void *from, size_t available); |
2356 | 2347 | ||
2348 | extern int simple_fsync(struct file *, struct dentry *, int); | ||
2349 | |||
2357 | #ifdef CONFIG_MIGRATION | 2350 | #ifdef CONFIG_MIGRATION |
2358 | extern int buffer_migrate_page(struct address_space *, | 2351 | extern int buffer_migrate_page(struct address_space *, |
2359 | struct page *, struct page *); | 2352 | struct page *, struct page *); |
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 00fbd5b245c9..936f9aa8bb97 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/dnotify.h> | 14 | #include <linux/dnotify.h> |
15 | #include <linux/inotify.h> | 15 | #include <linux/inotify.h> |
16 | #include <linux/fsnotify_backend.h> | ||
16 | #include <linux/audit.h> | 17 | #include <linux/audit.h> |
17 | 18 | ||
18 | /* | 19 | /* |
@@ -22,19 +23,45 @@ | |||
22 | static inline void fsnotify_d_instantiate(struct dentry *entry, | 23 | static inline void fsnotify_d_instantiate(struct dentry *entry, |
23 | struct inode *inode) | 24 | struct inode *inode) |
24 | { | 25 | { |
26 | __fsnotify_d_instantiate(entry, inode); | ||
27 | |||
25 | inotify_d_instantiate(entry, inode); | 28 | inotify_d_instantiate(entry, inode); |
26 | } | 29 | } |
27 | 30 | ||
31 | /* Notify this dentry's parent about a child's events. */ | ||
32 | static inline void fsnotify_parent(struct dentry *dentry, __u32 mask) | ||
33 | { | ||
34 | __fsnotify_parent(dentry, mask); | ||
35 | |||
36 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | ||
37 | } | ||
38 | |||
28 | /* | 39 | /* |
29 | * fsnotify_d_move - entry has been moved | 40 | * fsnotify_d_move - entry has been moved |
30 | * Called with dcache_lock and entry->d_lock held. | 41 | * Called with dcache_lock and entry->d_lock held. |
31 | */ | 42 | */ |
32 | static inline void fsnotify_d_move(struct dentry *entry) | 43 | static inline void fsnotify_d_move(struct dentry *entry) |
33 | { | 44 | { |
45 | /* | ||
46 | * On move we need to update entry->d_flags to indicate if the new parent | ||
47 | * cares about events from this entry. | ||
48 | */ | ||
49 | __fsnotify_update_dcache_flags(entry); | ||
50 | |||
34 | inotify_d_move(entry); | 51 | inotify_d_move(entry); |
35 | } | 52 | } |
36 | 53 | ||
37 | /* | 54 | /* |
55 | * fsnotify_link_count - inode's link count changed | ||
56 | */ | ||
57 | static inline void fsnotify_link_count(struct inode *inode) | ||
58 | { | ||
59 | inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL); | ||
60 | |||
61 | fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
62 | } | ||
63 | |||
64 | /* | ||
38 | * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir | 65 | * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir |
39 | */ | 66 | */ |
40 | static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, | 67 | static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, |
@@ -42,42 +69,62 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, | |||
42 | int isdir, struct inode *target, struct dentry *moved) | 69 | int isdir, struct inode *target, struct dentry *moved) |
43 | { | 70 | { |
44 | struct inode *source = moved->d_inode; | 71 | struct inode *source = moved->d_inode; |
45 | u32 cookie = inotify_get_cookie(); | 72 | u32 in_cookie = inotify_get_cookie(); |
73 | u32 fs_cookie = fsnotify_get_cookie(); | ||
74 | __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); | ||
75 | __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); | ||
46 | 76 | ||
47 | if (old_dir == new_dir) | 77 | if (old_dir == new_dir) |
48 | inode_dir_notify(old_dir, DN_RENAME); | 78 | old_dir_mask |= FS_DN_RENAME; |
49 | else { | ||
50 | inode_dir_notify(old_dir, DN_DELETE); | ||
51 | inode_dir_notify(new_dir, DN_CREATE); | ||
52 | } | ||
53 | 79 | ||
54 | if (isdir) | 80 | if (isdir) { |
55 | isdir = IN_ISDIR; | 81 | isdir = IN_ISDIR; |
56 | inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir,cookie,old_name, | 82 | old_dir_mask |= FS_IN_ISDIR; |
83 | new_dir_mask |= FS_IN_ISDIR; | ||
84 | } | ||
85 | |||
86 | inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name, | ||
57 | source); | 87 | source); |
58 | inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, cookie, new_name, | 88 | inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name, |
59 | source); | 89 | source); |
60 | 90 | ||
91 | fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); | ||
92 | fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); | ||
93 | |||
61 | if (target) { | 94 | if (target) { |
62 | inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL); | 95 | inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL); |
63 | inotify_inode_is_dead(target); | 96 | inotify_inode_is_dead(target); |
97 | |||
98 | /* this is really a link_count change not a removal */ | ||
99 | fsnotify_link_count(target); | ||
64 | } | 100 | } |
65 | 101 | ||
66 | if (source) { | 102 | if (source) { |
67 | inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); | 103 | inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); |
104 | fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
68 | } | 105 | } |
69 | audit_inode_child(new_name, moved, new_dir); | 106 | audit_inode_child(new_name, moved, new_dir); |
70 | } | 107 | } |
71 | 108 | ||
72 | /* | 109 | /* |
110 | * fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed | ||
111 | */ | ||
112 | static inline void fsnotify_inode_delete(struct inode *inode) | ||
113 | { | ||
114 | __fsnotify_inode_delete(inode); | ||
115 | } | ||
116 | |||
117 | /* | ||
73 | * fsnotify_nameremove - a filename was removed from a directory | 118 | * fsnotify_nameremove - a filename was removed from a directory |
74 | */ | 119 | */ |
75 | static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) | 120 | static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) |
76 | { | 121 | { |
122 | __u32 mask = FS_DELETE; | ||
123 | |||
77 | if (isdir) | 124 | if (isdir) |
78 | isdir = IN_ISDIR; | 125 | mask |= FS_IN_ISDIR; |
79 | dnotify_parent(dentry, DN_DELETE); | 126 | |
80 | inotify_dentry_parent_queue_event(dentry, IN_DELETE|isdir, 0, dentry->d_name.name); | 127 | fsnotify_parent(dentry, mask); |
81 | } | 128 | } |
82 | 129 | ||
83 | /* | 130 | /* |
@@ -87,14 +134,9 @@ static inline void fsnotify_inoderemove(struct inode *inode) | |||
87 | { | 134 | { |
88 | inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL); | 135 | inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL); |
89 | inotify_inode_is_dead(inode); | 136 | inotify_inode_is_dead(inode); |
90 | } | ||
91 | 137 | ||
92 | /* | 138 | fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0); |
93 | * fsnotify_link_count - inode's link count changed | 139 | __fsnotify_inode_delete(inode); |
94 | */ | ||
95 | static inline void fsnotify_link_count(struct inode *inode) | ||
96 | { | ||
97 | inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL); | ||
98 | } | 140 | } |
99 | 141 | ||
100 | /* | 142 | /* |
@@ -102,10 +144,11 @@ static inline void fsnotify_link_count(struct inode *inode) | |||
102 | */ | 144 | */ |
103 | static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) | 145 | static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) |
104 | { | 146 | { |
105 | inode_dir_notify(inode, DN_CREATE); | ||
106 | inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, | 147 | inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, |
107 | dentry->d_inode); | 148 | dentry->d_inode); |
108 | audit_inode_child(dentry->d_name.name, dentry, inode); | 149 | audit_inode_child(dentry->d_name.name, dentry, inode); |
150 | |||
151 | fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); | ||
109 | } | 152 | } |
110 | 153 | ||
111 | /* | 154 | /* |
@@ -115,11 +158,12 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) | |||
115 | */ | 158 | */ |
116 | static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) | 159 | static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) |
117 | { | 160 | { |
118 | inode_dir_notify(dir, DN_CREATE); | ||
119 | inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name, | 161 | inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name, |
120 | inode); | 162 | inode); |
121 | fsnotify_link_count(inode); | 163 | fsnotify_link_count(inode); |
122 | audit_inode_child(new_dentry->d_name.name, new_dentry, dir); | 164 | audit_inode_child(new_dentry->d_name.name, new_dentry, dir); |
165 | |||
166 | fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0); | ||
123 | } | 167 | } |
124 | 168 | ||
125 | /* | 169 | /* |
@@ -127,10 +171,13 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct | |||
127 | */ | 171 | */ |
128 | static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) | 172 | static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) |
129 | { | 173 | { |
130 | inode_dir_notify(inode, DN_CREATE); | 174 | __u32 mask = (FS_CREATE | FS_IN_ISDIR); |
131 | inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0, | 175 | struct inode *d_inode = dentry->d_inode; |
132 | dentry->d_name.name, dentry->d_inode); | 176 | |
177 | inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode); | ||
133 | audit_inode_child(dentry->d_name.name, dentry, inode); | 178 | audit_inode_child(dentry->d_name.name, dentry, inode); |
179 | |||
180 | fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); | ||
134 | } | 181 | } |
135 | 182 | ||
136 | /* | 183 | /* |
@@ -139,14 +186,15 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) | |||
139 | static inline void fsnotify_access(struct dentry *dentry) | 186 | static inline void fsnotify_access(struct dentry *dentry) |
140 | { | 187 | { |
141 | struct inode *inode = dentry->d_inode; | 188 | struct inode *inode = dentry->d_inode; |
142 | u32 mask = IN_ACCESS; | 189 | __u32 mask = FS_ACCESS; |
143 | 190 | ||
144 | if (S_ISDIR(inode->i_mode)) | 191 | if (S_ISDIR(inode->i_mode)) |
145 | mask |= IN_ISDIR; | 192 | mask |= FS_IN_ISDIR; |
146 | 193 | ||
147 | dnotify_parent(dentry, DN_ACCESS); | ||
148 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | ||
149 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 194 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
195 | |||
196 | fsnotify_parent(dentry, mask); | ||
197 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
150 | } | 198 | } |
151 | 199 | ||
152 | /* | 200 | /* |
@@ -155,14 +203,15 @@ static inline void fsnotify_access(struct dentry *dentry) | |||
155 | static inline void fsnotify_modify(struct dentry *dentry) | 203 | static inline void fsnotify_modify(struct dentry *dentry) |
156 | { | 204 | { |
157 | struct inode *inode = dentry->d_inode; | 205 | struct inode *inode = dentry->d_inode; |
158 | u32 mask = IN_MODIFY; | 206 | __u32 mask = FS_MODIFY; |
159 | 207 | ||
160 | if (S_ISDIR(inode->i_mode)) | 208 | if (S_ISDIR(inode->i_mode)) |
161 | mask |= IN_ISDIR; | 209 | mask |= FS_IN_ISDIR; |
162 | 210 | ||
163 | dnotify_parent(dentry, DN_MODIFY); | ||
164 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | ||
165 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 211 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
212 | |||
213 | fsnotify_parent(dentry, mask); | ||
214 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
166 | } | 215 | } |
167 | 216 | ||
168 | /* | 217 | /* |
@@ -171,13 +220,15 @@ static inline void fsnotify_modify(struct dentry *dentry) | |||
171 | static inline void fsnotify_open(struct dentry *dentry) | 220 | static inline void fsnotify_open(struct dentry *dentry) |
172 | { | 221 | { |
173 | struct inode *inode = dentry->d_inode; | 222 | struct inode *inode = dentry->d_inode; |
174 | u32 mask = IN_OPEN; | 223 | __u32 mask = FS_OPEN; |
175 | 224 | ||
176 | if (S_ISDIR(inode->i_mode)) | 225 | if (S_ISDIR(inode->i_mode)) |
177 | mask |= IN_ISDIR; | 226 | mask |= FS_IN_ISDIR; |
178 | 227 | ||
179 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | ||
180 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 228 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
229 | |||
230 | fsnotify_parent(dentry, mask); | ||
231 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
181 | } | 232 | } |
182 | 233 | ||
183 | /* | 234 | /* |
@@ -187,15 +238,16 @@ static inline void fsnotify_close(struct file *file) | |||
187 | { | 238 | { |
188 | struct dentry *dentry = file->f_path.dentry; | 239 | struct dentry *dentry = file->f_path.dentry; |
189 | struct inode *inode = dentry->d_inode; | 240 | struct inode *inode = dentry->d_inode; |
190 | const char *name = dentry->d_name.name; | ||
191 | fmode_t mode = file->f_mode; | 241 | fmode_t mode = file->f_mode; |
192 | u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE; | 242 | __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; |
193 | 243 | ||
194 | if (S_ISDIR(inode->i_mode)) | 244 | if (S_ISDIR(inode->i_mode)) |
195 | mask |= IN_ISDIR; | 245 | mask |= FS_IN_ISDIR; |
196 | 246 | ||
197 | inotify_dentry_parent_queue_event(dentry, mask, 0, name); | ||
198 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 247 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
248 | |||
249 | fsnotify_parent(dentry, mask); | ||
250 | fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0); | ||
199 | } | 251 | } |
200 | 252 | ||
201 | /* | 253 | /* |
@@ -204,13 +256,15 @@ static inline void fsnotify_close(struct file *file) | |||
204 | static inline void fsnotify_xattr(struct dentry *dentry) | 256 | static inline void fsnotify_xattr(struct dentry *dentry) |
205 | { | 257 | { |
206 | struct inode *inode = dentry->d_inode; | 258 | struct inode *inode = dentry->d_inode; |
207 | u32 mask = IN_ATTRIB; | 259 | __u32 mask = FS_ATTRIB; |
208 | 260 | ||
209 | if (S_ISDIR(inode->i_mode)) | 261 | if (S_ISDIR(inode->i_mode)) |
210 | mask |= IN_ISDIR; | 262 | mask |= FS_IN_ISDIR; |
211 | 263 | ||
212 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | ||
213 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 264 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
265 | |||
266 | fsnotify_parent(dentry, mask); | ||
267 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
214 | } | 268 | } |
215 | 269 | ||
216 | /* | 270 | /* |
@@ -220,50 +274,37 @@ static inline void fsnotify_xattr(struct dentry *dentry) | |||
220 | static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) | 274 | static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) |
221 | { | 275 | { |
222 | struct inode *inode = dentry->d_inode; | 276 | struct inode *inode = dentry->d_inode; |
223 | int dn_mask = 0; | 277 | __u32 mask = 0; |
224 | u32 in_mask = 0; | 278 | |
279 | if (ia_valid & ATTR_UID) | ||
280 | mask |= FS_ATTRIB; | ||
281 | if (ia_valid & ATTR_GID) | ||
282 | mask |= FS_ATTRIB; | ||
283 | if (ia_valid & ATTR_SIZE) | ||
284 | mask |= FS_MODIFY; | ||
225 | 285 | ||
226 | if (ia_valid & ATTR_UID) { | ||
227 | in_mask |= IN_ATTRIB; | ||
228 | dn_mask |= DN_ATTRIB; | ||
229 | } | ||
230 | if (ia_valid & ATTR_GID) { | ||
231 | in_mask |= IN_ATTRIB; | ||
232 | dn_mask |= DN_ATTRIB; | ||
233 | } | ||
234 | if (ia_valid & ATTR_SIZE) { | ||
235 | in_mask |= IN_MODIFY; | ||
236 | dn_mask |= DN_MODIFY; | ||
237 | } | ||
238 | /* both times implies a utime(s) call */ | 286 | /* both times implies a utime(s) call */ |
239 | if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) | 287 | if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME)) |
240 | { | 288 | mask |= FS_ATTRIB; |
241 | in_mask |= IN_ATTRIB; | 289 | else if (ia_valid & ATTR_ATIME) |
242 | dn_mask |= DN_ATTRIB; | 290 | mask |= FS_ACCESS; |
243 | } else if (ia_valid & ATTR_ATIME) { | 291 | else if (ia_valid & ATTR_MTIME) |
244 | in_mask |= IN_ACCESS; | 292 | mask |= FS_MODIFY; |
245 | dn_mask |= DN_ACCESS; | 293 | |
246 | } else if (ia_valid & ATTR_MTIME) { | 294 | if (ia_valid & ATTR_MODE) |
247 | in_mask |= IN_MODIFY; | 295 | mask |= FS_ATTRIB; |
248 | dn_mask |= DN_MODIFY; | ||
249 | } | ||
250 | if (ia_valid & ATTR_MODE) { | ||
251 | in_mask |= IN_ATTRIB; | ||
252 | dn_mask |= DN_ATTRIB; | ||
253 | } | ||
254 | 296 | ||
255 | if (dn_mask) | 297 | if (mask) { |
256 | dnotify_parent(dentry, dn_mask); | ||
257 | if (in_mask) { | ||
258 | if (S_ISDIR(inode->i_mode)) | 298 | if (S_ISDIR(inode->i_mode)) |
259 | in_mask |= IN_ISDIR; | 299 | mask |= FS_IN_ISDIR; |
260 | inotify_inode_queue_event(inode, in_mask, 0, NULL, NULL); | 300 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); |
261 | inotify_dentry_parent_queue_event(dentry, in_mask, 0, | 301 | |
262 | dentry->d_name.name); | 302 | fsnotify_parent(dentry, mask); |
303 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
263 | } | 304 | } |
264 | } | 305 | } |
265 | 306 | ||
266 | #ifdef CONFIG_INOTIFY /* inotify helpers */ | 307 | #if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */ |
267 | 308 | ||
268 | /* | 309 | /* |
269 | * fsnotify_oldname_init - save off the old filename before we change it | 310 | * fsnotify_oldname_init - save off the old filename before we change it |
@@ -281,7 +322,7 @@ static inline void fsnotify_oldname_free(const char *old_name) | |||
281 | kfree(old_name); | 322 | kfree(old_name); |
282 | } | 323 | } |
283 | 324 | ||
284 | #else /* CONFIG_INOTIFY */ | 325 | #else /* CONFIG_INOTIFY || CONFIG_FSNOTIFY */ |
285 | 326 | ||
286 | static inline const char *fsnotify_oldname_init(const char *name) | 327 | static inline const char *fsnotify_oldname_init(const char *name) |
287 | { | 328 | { |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h new file mode 100644 index 000000000000..44848aa830dc --- /dev/null +++ b/include/linux/fsnotify_backend.h | |||
@@ -0,0 +1,387 @@ | |||
1 | /* | ||
2 | * Filesystem access notification for Linux | ||
3 | * | ||
4 | * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef __LINUX_FSNOTIFY_BACKEND_H | ||
8 | #define __LINUX_FSNOTIFY_BACKEND_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <linux/idr.h> /* inotify uses this */ | ||
13 | #include <linux/fs.h> /* struct inode */ | ||
14 | #include <linux/list.h> | ||
15 | #include <linux/path.h> /* struct path */ | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/types.h> | ||
18 | |||
19 | #include <asm/atomic.h> | ||
20 | |||
21 | /* | ||
22 | * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily | ||
23 | * convert between them. dnotify only needs conversion at watch creation | ||
24 | * so no perf loss there. fanotify isn't defined yet, so it can use the | ||
25 | * wholes if it needs more events. | ||
26 | */ | ||
27 | #define FS_ACCESS 0x00000001 /* File was accessed */ | ||
28 | #define FS_MODIFY 0x00000002 /* File was modified */ | ||
29 | #define FS_ATTRIB 0x00000004 /* Metadata changed */ | ||
30 | #define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */ | ||
31 | #define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */ | ||
32 | #define FS_OPEN 0x00000020 /* File was opened */ | ||
33 | #define FS_MOVED_FROM 0x00000040 /* File was moved from X */ | ||
34 | #define FS_MOVED_TO 0x00000080 /* File was moved to Y */ | ||
35 | #define FS_CREATE 0x00000100 /* Subfile was created */ | ||
36 | #define FS_DELETE 0x00000200 /* Subfile was deleted */ | ||
37 | #define FS_DELETE_SELF 0x00000400 /* Self was deleted */ | ||
38 | #define FS_MOVE_SELF 0x00000800 /* Self was moved */ | ||
39 | |||
40 | #define FS_UNMOUNT 0x00002000 /* inode on umount fs */ | ||
41 | #define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ | ||
42 | #define FS_IN_IGNORED 0x00008000 /* last inotify event here */ | ||
43 | |||
44 | #define FS_IN_ISDIR 0x40000000 /* event occurred against dir */ | ||
45 | #define FS_IN_ONESHOT 0x80000000 /* only send event once */ | ||
46 | |||
47 | #define FS_DN_RENAME 0x10000000 /* file renamed */ | ||
48 | #define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ | ||
49 | |||
50 | /* This inode cares about things that happen to its children. Always set for | ||
51 | * dnotify and inotify. */ | ||
52 | #define FS_EVENT_ON_CHILD 0x08000000 | ||
53 | |||
54 | /* This is a list of all events that may get sent to a parernt based on fs event | ||
55 | * happening to inodes inside that directory */ | ||
56 | #define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ | ||
57 | FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ | ||
58 | FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ | ||
59 | FS_DELETE) | ||
60 | |||
61 | /* listeners that hard code group numbers near the top */ | ||
62 | #define DNOTIFY_GROUP_NUM UINT_MAX | ||
63 | #define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1) | ||
64 | |||
65 | struct fsnotify_group; | ||
66 | struct fsnotify_event; | ||
67 | struct fsnotify_mark_entry; | ||
68 | struct fsnotify_event_private_data; | ||
69 | |||
70 | /* | ||
71 | * Each group much define these ops. The fsnotify infrastructure will call | ||
72 | * these operations for each relevant group. | ||
73 | * | ||
74 | * should_send_event - given a group, inode, and mask this function determines | ||
75 | * if the group is interested in this event. | ||
76 | * handle_event - main call for a group to handle an fs event | ||
77 | * free_group_priv - called when a group refcnt hits 0 to clean up the private union | ||
78 | * freeing-mark - this means that a mark has been flagged to die when everything | ||
79 | * finishes using it. The function is supplied with what must be a | ||
80 | * valid group and inode to use to clean up. | ||
81 | */ | ||
82 | struct fsnotify_ops { | ||
83 | bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask); | ||
84 | int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event); | ||
85 | void (*free_group_priv)(struct fsnotify_group *group); | ||
86 | void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group); | ||
87 | void (*free_event_priv)(struct fsnotify_event_private_data *priv); | ||
88 | }; | ||
89 | |||
90 | /* | ||
91 | * A group is a "thing" that wants to receive notification about filesystem | ||
92 | * events. The mask holds the subset of event types this group cares about. | ||
93 | * refcnt on a group is up to the implementor and at any moment if it goes 0 | ||
94 | * everything will be cleaned up. | ||
95 | */ | ||
96 | struct fsnotify_group { | ||
97 | /* | ||
98 | * global list of all groups receiving events from fsnotify. | ||
99 | * anchored by fsnotify_groups and protected by either fsnotify_grp_mutex | ||
100 | * or fsnotify_grp_srcu depending on write vs read. | ||
101 | */ | ||
102 | struct list_head group_list; | ||
103 | |||
104 | /* | ||
105 | * Defines all of the event types in which this group is interested. | ||
106 | * This mask is a bitwise OR of the FS_* events from above. Each time | ||
107 | * this mask changes for a group (if it changes) the correct functions | ||
108 | * must be called to update the global structures which indicate global | ||
109 | * interest in event types. | ||
110 | */ | ||
111 | __u32 mask; | ||
112 | |||
113 | /* | ||
114 | * How the refcnt is used is up to each group. When the refcnt hits 0 | ||
115 | * fsnotify will clean up all of the resources associated with this group. | ||
116 | * As an example, the dnotify group will always have a refcnt=1 and that | ||
117 | * will never change. Inotify, on the other hand, has a group per | ||
118 | * inotify_init() and the refcnt will hit 0 only when that fd has been | ||
119 | * closed. | ||
120 | */ | ||
121 | atomic_t refcnt; /* things with interest in this group */ | ||
122 | unsigned int group_num; /* simply prevents accidental group collision */ | ||
123 | |||
124 | const struct fsnotify_ops *ops; /* how this group handles things */ | ||
125 | |||
126 | /* needed to send notification to userspace */ | ||
127 | struct mutex notification_mutex; /* protect the notification_list */ | ||
128 | struct list_head notification_list; /* list of event_holder this group needs to send to userspace */ | ||
129 | wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */ | ||
130 | unsigned int q_len; /* events on the queue */ | ||
131 | unsigned int max_events; /* maximum events allowed on the list */ | ||
132 | |||
133 | /* stores all fastapth entries assoc with this group so they can be cleaned on unregister */ | ||
134 | spinlock_t mark_lock; /* protect mark_entries list */ | ||
135 | atomic_t num_marks; /* 1 for each mark entry and 1 for not being | ||
136 | * past the point of no return when freeing | ||
137 | * a group */ | ||
138 | struct list_head mark_entries; /* all inode mark entries for this group */ | ||
139 | |||
140 | /* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */ | ||
141 | bool on_group_list; | ||
142 | |||
143 | /* groups can define private fields here or use the void *private */ | ||
144 | union { | ||
145 | void *private; | ||
146 | #ifdef CONFIG_INOTIFY_USER | ||
147 | struct inotify_group_private_data { | ||
148 | spinlock_t idr_lock; | ||
149 | struct idr idr; | ||
150 | u32 last_wd; | ||
151 | struct fasync_struct *fa; /* async notification */ | ||
152 | struct user_struct *user; | ||
153 | } inotify_data; | ||
154 | #endif | ||
155 | }; | ||
156 | }; | ||
157 | |||
158 | /* | ||
159 | * A single event can be queued in multiple group->notification_lists. | ||
160 | * | ||
161 | * each group->notification_list will point to an event_holder which in turns points | ||
162 | * to the actual event that needs to be sent to userspace. | ||
163 | * | ||
164 | * Seemed cheaper to create a refcnt'd event and a small holder for every group | ||
165 | * than create a different event for every group | ||
166 | * | ||
167 | */ | ||
168 | struct fsnotify_event_holder { | ||
169 | struct fsnotify_event *event; | ||
170 | struct list_head event_list; | ||
171 | }; | ||
172 | |||
173 | /* | ||
174 | * Inotify needs to tack data onto an event. This struct lets us later find the | ||
175 | * correct private data of the correct group. | ||
176 | */ | ||
177 | struct fsnotify_event_private_data { | ||
178 | struct fsnotify_group *group; | ||
179 | struct list_head event_list; | ||
180 | }; | ||
181 | |||
182 | /* | ||
183 | * all of the information about the original object we want to now send to | ||
184 | * a group. If you want to carry more info from the accessing task to the | ||
185 | * listener this structure is where you need to be adding fields. | ||
186 | */ | ||
187 | struct fsnotify_event { | ||
188 | /* | ||
189 | * If we create an event we are also likely going to need a holder | ||
190 | * to link to a group. So embed one holder in the event. Means only | ||
191 | * one allocation for the common case where we only have one group | ||
192 | */ | ||
193 | struct fsnotify_event_holder holder; | ||
194 | spinlock_t lock; /* protection for the associated event_holder and private_list */ | ||
195 | /* to_tell may ONLY be dereferenced during handle_event(). */ | ||
196 | struct inode *to_tell; /* either the inode the event happened to or its parent */ | ||
197 | /* | ||
198 | * depending on the event type we should have either a path or inode | ||
199 | * We hold a reference on path, but NOT on inode. Since we have the ref on | ||
200 | * the path, it may be dereferenced at any point during this object's | ||
201 | * lifetime. That reference is dropped when this object's refcnt hits | ||
202 | * 0. If this event contains an inode instead of a path, the inode may | ||
203 | * ONLY be used during handle_event(). | ||
204 | */ | ||
205 | union { | ||
206 | struct path path; | ||
207 | struct inode *inode; | ||
208 | }; | ||
209 | /* when calling fsnotify tell it if the data is a path or inode */ | ||
210 | #define FSNOTIFY_EVENT_NONE 0 | ||
211 | #define FSNOTIFY_EVENT_PATH 1 | ||
212 | #define FSNOTIFY_EVENT_INODE 2 | ||
213 | #define FSNOTIFY_EVENT_FILE 3 | ||
214 | int data_type; /* which of the above union we have */ | ||
215 | atomic_t refcnt; /* how many groups still are using/need to send this event */ | ||
216 | __u32 mask; /* the type of access, bitwise OR for FS_* event types */ | ||
217 | |||
218 | u32 sync_cookie; /* used to corrolate events, namely inotify mv events */ | ||
219 | char *file_name; | ||
220 | size_t name_len; | ||
221 | |||
222 | struct list_head private_data_list; /* groups can store private data here */ | ||
223 | }; | ||
224 | |||
225 | /* | ||
226 | * a mark is simply an entry attached to an in core inode which allows an | ||
227 | * fsnotify listener to indicate they are either no longer interested in events | ||
228 | * of a type matching mask or only interested in those events. | ||
229 | * | ||
230 | * these are flushed when an inode is evicted from core and may be flushed | ||
231 | * when the inode is modified (as seen by fsnotify_access). Some fsnotify users | ||
232 | * (such as dnotify) will flush these when the open fd is closed and not at | ||
233 | * inode eviction or modification. | ||
234 | */ | ||
235 | struct fsnotify_mark_entry { | ||
236 | __u32 mask; /* mask this mark entry is for */ | ||
237 | /* we hold ref for each i_list and g_list. also one ref for each 'thing' | ||
238 | * in kernel that found and may be using this mark. */ | ||
239 | atomic_t refcnt; /* active things looking at this mark */ | ||
240 | struct inode *inode; /* inode this entry is associated with */ | ||
241 | struct fsnotify_group *group; /* group this mark entry is for */ | ||
242 | struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */ | ||
243 | struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */ | ||
244 | spinlock_t lock; /* protect group, inode, and killme */ | ||
245 | struct list_head free_i_list; /* tmp list used when freeing this mark */ | ||
246 | struct list_head free_g_list; /* tmp list used when freeing this mark */ | ||
247 | void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */ | ||
248 | }; | ||
249 | |||
250 | #ifdef CONFIG_FSNOTIFY | ||
251 | |||
252 | /* called from the vfs helpers */ | ||
253 | |||
254 | /* main fsnotify call to send events */ | ||
255 | extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, | ||
256 | const char *name, u32 cookie); | ||
257 | extern void __fsnotify_parent(struct dentry *dentry, __u32 mask); | ||
258 | extern void __fsnotify_inode_delete(struct inode *inode); | ||
259 | extern u32 fsnotify_get_cookie(void); | ||
260 | |||
261 | static inline int fsnotify_inode_watches_children(struct inode *inode) | ||
262 | { | ||
263 | /* FS_EVENT_ON_CHILD is set if the inode may care */ | ||
264 | if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD)) | ||
265 | return 0; | ||
266 | /* this inode might care about child events, does it care about the | ||
267 | * specific set of events that can happen on a child? */ | ||
268 | return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Update the dentry with a flag indicating the interest of its parent to receive | ||
273 | * filesystem events when those events happens to this dentry->d_inode. | ||
274 | */ | ||
275 | static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) | ||
276 | { | ||
277 | struct dentry *parent; | ||
278 | |||
279 | assert_spin_locked(&dcache_lock); | ||
280 | assert_spin_locked(&dentry->d_lock); | ||
281 | |||
282 | parent = dentry->d_parent; | ||
283 | if (fsnotify_inode_watches_children(parent->d_inode)) | ||
284 | dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; | ||
285 | else | ||
286 | dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; | ||
287 | } | ||
288 | |||
289 | /* | ||
290 | * fsnotify_d_instantiate - instantiate a dentry for inode | ||
291 | * Called with dcache_lock held. | ||
292 | */ | ||
293 | static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) | ||
294 | { | ||
295 | if (!inode) | ||
296 | return; | ||
297 | |||
298 | assert_spin_locked(&dcache_lock); | ||
299 | |||
300 | spin_lock(&dentry->d_lock); | ||
301 | __fsnotify_update_dcache_flags(dentry); | ||
302 | spin_unlock(&dentry->d_lock); | ||
303 | } | ||
304 | |||
305 | /* called from fsnotify listeners, such as fanotify or dnotify */ | ||
306 | |||
307 | /* must call when a group changes its ->mask */ | ||
308 | extern void fsnotify_recalc_global_mask(void); | ||
309 | /* get a reference to an existing or create a new group */ | ||
310 | extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, | ||
311 | __u32 mask, | ||
312 | const struct fsnotify_ops *ops); | ||
313 | /* run all marks associated with this group and update group->mask */ | ||
314 | extern void fsnotify_recalc_group_mask(struct fsnotify_group *group); | ||
315 | /* drop reference on a group from fsnotify_obtain_group */ | ||
316 | extern void fsnotify_put_group(struct fsnotify_group *group); | ||
317 | |||
318 | /* take a reference to an event */ | ||
319 | extern void fsnotify_get_event(struct fsnotify_event *event); | ||
320 | extern void fsnotify_put_event(struct fsnotify_event *event); | ||
321 | /* find private data previously attached to an event and unlink it */ | ||
322 | extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, | ||
323 | struct fsnotify_event *event); | ||
324 | |||
325 | /* attach the event to the group notification queue */ | ||
326 | extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event, | ||
327 | struct fsnotify_event_private_data *priv); | ||
328 | /* true if the group notification queue is empty */ | ||
329 | extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); | ||
330 | /* return, but do not dequeue the first event on the notification queue */ | ||
331 | extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group); | ||
332 | /* return AND dequeue the first event on the notification queue */ | ||
333 | extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group); | ||
334 | |||
335 | /* functions used to manipulate the marks attached to inodes */ | ||
336 | |||
337 | /* run all marks associated with an inode and update inode->i_fsnotify_mask */ | ||
338 | extern void fsnotify_recalc_inode_mask(struct inode *inode); | ||
339 | extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry)); | ||
340 | /* find (and take a reference) to a mark associated with group and inode */ | ||
341 | extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode); | ||
342 | /* attach the mark to both the group and the inode */ | ||
343 | extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode); | ||
344 | /* given a mark, flag it to be freed when all references are dropped */ | ||
345 | extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry); | ||
346 | /* run all the marks in a group, and flag them to be freed */ | ||
347 | extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); | ||
348 | extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry); | ||
349 | extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry); | ||
350 | extern void fsnotify_unmount_inodes(struct list_head *list); | ||
351 | |||
352 | /* put here because inotify does some weird stuff when destroying watches */ | ||
353 | extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, | ||
354 | void *data, int data_is, const char *name, | ||
355 | u32 cookie); | ||
356 | |||
357 | #else | ||
358 | |||
359 | static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, | ||
360 | const char *name, u32 cookie) | ||
361 | {} | ||
362 | |||
363 | static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask) | ||
364 | {} | ||
365 | |||
366 | static inline void __fsnotify_inode_delete(struct inode *inode) | ||
367 | {} | ||
368 | |||
369 | static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) | ||
370 | {} | ||
371 | |||
372 | static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) | ||
373 | {} | ||
374 | |||
375 | static inline u32 fsnotify_get_cookie(void) | ||
376 | { | ||
377 | return 0; | ||
378 | } | ||
379 | |||
380 | static inline void fsnotify_unmount_inodes(struct list_head *list) | ||
381 | {} | ||
382 | |||
383 | #endif /* CONFIG_FSNOTIFY */ | ||
384 | |||
385 | #endif /* __KERNEL __ */ | ||
386 | |||
387 | #endif /* __LINUX_FSNOTIFY_BACKEND_H */ | ||
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 8a0c2f221e6b..39b95c56587e 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -233,8 +233,6 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size); | |||
233 | 233 | ||
234 | extern int skip_trace(unsigned long ip); | 234 | extern int skip_trace(unsigned long ip); |
235 | 235 | ||
236 | extern void ftrace_release(void *start, unsigned long size); | ||
237 | |||
238 | extern void ftrace_disable_daemon(void); | 236 | extern void ftrace_disable_daemon(void); |
239 | extern void ftrace_enable_daemon(void); | 237 | extern void ftrace_enable_daemon(void); |
240 | #else | 238 | #else |
@@ -325,13 +323,8 @@ static inline void __ftrace_enabled_restore(int enabled) | |||
325 | 323 | ||
326 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | 324 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD |
327 | extern void ftrace_init(void); | 325 | extern void ftrace_init(void); |
328 | extern void ftrace_init_module(struct module *mod, | ||
329 | unsigned long *start, unsigned long *end); | ||
330 | #else | 326 | #else |
331 | static inline void ftrace_init(void) { } | 327 | static inline void ftrace_init(void) { } |
332 | static inline void | ||
333 | ftrace_init_module(struct module *mod, | ||
334 | unsigned long *start, unsigned long *end) { } | ||
335 | #endif | 328 | #endif |
336 | 329 | ||
337 | /* | 330 | /* |
@@ -368,6 +361,7 @@ struct ftrace_ret_stack { | |||
368 | unsigned long ret; | 361 | unsigned long ret; |
369 | unsigned long func; | 362 | unsigned long func; |
370 | unsigned long long calltime; | 363 | unsigned long long calltime; |
364 | unsigned long long subtime; | ||
371 | }; | 365 | }; |
372 | 366 | ||
373 | /* | 367 | /* |
@@ -379,8 +373,6 @@ extern void return_to_handler(void); | |||
379 | 373 | ||
380 | extern int | 374 | extern int |
381 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); | 375 | ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); |
382 | extern void | ||
383 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret); | ||
384 | 376 | ||
385 | /* | 377 | /* |
386 | * Sometimes we don't want to trace a function with the function | 378 | * Sometimes we don't want to trace a function with the function |
@@ -496,8 +488,15 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) | |||
496 | 488 | ||
497 | extern int ftrace_dump_on_oops; | 489 | extern int ftrace_dump_on_oops; |
498 | 490 | ||
491 | #ifdef CONFIG_PREEMPT | ||
492 | #define INIT_TRACE_RECURSION .trace_recursion = 0, | ||
493 | #endif | ||
494 | |||
499 | #endif /* CONFIG_TRACING */ | 495 | #endif /* CONFIG_TRACING */ |
500 | 496 | ||
497 | #ifndef INIT_TRACE_RECURSION | ||
498 | #define INIT_TRACE_RECURSION | ||
499 | #endif | ||
501 | 500 | ||
502 | #ifdef CONFIG_HW_BRANCH_TRACER | 501 | #ifdef CONFIG_HW_BRANCH_TRACER |
503 | 502 | ||
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h new file mode 100644 index 000000000000..5c093ffc655b --- /dev/null +++ b/include/linux/ftrace_event.h | |||
@@ -0,0 +1,172 @@ | |||
1 | #ifndef _LINUX_FTRACE_EVENT_H | ||
2 | #define _LINUX_FTRACE_EVENT_H | ||
3 | |||
4 | #include <linux/trace_seq.h> | ||
5 | #include <linux/ring_buffer.h> | ||
6 | #include <linux/percpu.h> | ||
7 | |||
8 | struct trace_array; | ||
9 | struct tracer; | ||
10 | struct dentry; | ||
11 | |||
12 | DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq); | ||
13 | |||
14 | struct trace_print_flags { | ||
15 | unsigned long mask; | ||
16 | const char *name; | ||
17 | }; | ||
18 | |||
19 | const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, | ||
20 | unsigned long flags, | ||
21 | const struct trace_print_flags *flag_array); | ||
22 | |||
23 | const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | ||
24 | const struct trace_print_flags *symbol_array); | ||
25 | |||
26 | /* | ||
27 | * The trace entry - the most basic unit of tracing. This is what | ||
28 | * is printed in the end as a single line in the trace output, such as: | ||
29 | * | ||
30 | * bash-15816 [01] 235.197585: idle_cpu <- irq_enter | ||
31 | */ | ||
32 | struct trace_entry { | ||
33 | unsigned short type; | ||
34 | unsigned char flags; | ||
35 | unsigned char preempt_count; | ||
36 | int pid; | ||
37 | int tgid; | ||
38 | }; | ||
39 | |||
40 | #define FTRACE_MAX_EVENT \ | ||
41 | ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1) | ||
42 | |||
43 | /* | ||
44 | * Trace iterator - used by printout routines who present trace | ||
45 | * results to users and which routines might sleep, etc: | ||
46 | */ | ||
47 | struct trace_iterator { | ||
48 | struct trace_array *tr; | ||
49 | struct tracer *trace; | ||
50 | void *private; | ||
51 | int cpu_file; | ||
52 | struct mutex mutex; | ||
53 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; | ||
54 | unsigned long iter_flags; | ||
55 | |||
56 | /* The below is zeroed out in pipe_read */ | ||
57 | struct trace_seq seq; | ||
58 | struct trace_entry *ent; | ||
59 | int cpu; | ||
60 | u64 ts; | ||
61 | |||
62 | loff_t pos; | ||
63 | long idx; | ||
64 | |||
65 | cpumask_var_t started; | ||
66 | }; | ||
67 | |||
68 | |||
69 | typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, | ||
70 | int flags); | ||
71 | struct trace_event { | ||
72 | struct hlist_node node; | ||
73 | struct list_head list; | ||
74 | int type; | ||
75 | trace_print_func trace; | ||
76 | trace_print_func raw; | ||
77 | trace_print_func hex; | ||
78 | trace_print_func binary; | ||
79 | }; | ||
80 | |||
81 | extern int register_ftrace_event(struct trace_event *event); | ||
82 | extern int unregister_ftrace_event(struct trace_event *event); | ||
83 | |||
84 | /* Return values for print_line callback */ | ||
85 | enum print_line_t { | ||
86 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | ||
87 | TRACE_TYPE_HANDLED = 1, | ||
88 | TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ | ||
89 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | ||
90 | }; | ||
91 | |||
92 | |||
93 | struct ring_buffer_event * | ||
94 | trace_current_buffer_lock_reserve(int type, unsigned long len, | ||
95 | unsigned long flags, int pc); | ||
96 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | ||
97 | unsigned long flags, int pc); | ||
98 | void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event, | ||
99 | unsigned long flags, int pc); | ||
100 | void trace_current_buffer_discard_commit(struct ring_buffer_event *event); | ||
101 | |||
102 | void tracing_record_cmdline(struct task_struct *tsk); | ||
103 | |||
104 | struct ftrace_event_call { | ||
105 | struct list_head list; | ||
106 | char *name; | ||
107 | char *system; | ||
108 | struct dentry *dir; | ||
109 | struct trace_event *event; | ||
110 | int enabled; | ||
111 | int (*regfunc)(void); | ||
112 | void (*unregfunc)(void); | ||
113 | int id; | ||
114 | int (*raw_init)(void); | ||
115 | int (*show_format)(struct trace_seq *s); | ||
116 | int (*define_fields)(void); | ||
117 | struct list_head fields; | ||
118 | int filter_active; | ||
119 | void *filter; | ||
120 | void *mod; | ||
121 | |||
122 | #ifdef CONFIG_EVENT_PROFILE | ||
123 | atomic_t profile_count; | ||
124 | int (*profile_enable)(struct ftrace_event_call *); | ||
125 | void (*profile_disable)(struct ftrace_event_call *); | ||
126 | #endif | ||
127 | }; | ||
128 | |||
129 | #define MAX_FILTER_PRED 32 | ||
130 | #define MAX_FILTER_STR_VAL 128 | ||
131 | |||
132 | extern int init_preds(struct ftrace_event_call *call); | ||
133 | extern void destroy_preds(struct ftrace_event_call *call); | ||
134 | extern int filter_match_preds(struct ftrace_event_call *call, void *rec); | ||
135 | extern int filter_current_check_discard(struct ftrace_event_call *call, | ||
136 | void *rec, | ||
137 | struct ring_buffer_event *event); | ||
138 | |||
139 | extern int trace_define_field(struct ftrace_event_call *call, char *type, | ||
140 | char *name, int offset, int size, int is_signed); | ||
141 | |||
142 | #define is_signed_type(type) (((type)(-1)) < 0) | ||
143 | |||
144 | int trace_set_clr_event(const char *system, const char *event, int set); | ||
145 | |||
146 | /* | ||
147 | * The double __builtin_constant_p is because gcc will give us an error | ||
148 | * if we try to allocate the static variable to fmt if it is not a | ||
149 | * constant. Even with the outer if statement optimizing out. | ||
150 | */ | ||
151 | #define event_trace_printk(ip, fmt, args...) \ | ||
152 | do { \ | ||
153 | __trace_printk_check_format(fmt, ##args); \ | ||
154 | tracing_record_cmdline(current); \ | ||
155 | if (__builtin_constant_p(fmt)) { \ | ||
156 | static const char *trace_printk_fmt \ | ||
157 | __attribute__((section("__trace_printk_fmt"))) = \ | ||
158 | __builtin_constant_p(fmt) ? fmt : NULL; \ | ||
159 | \ | ||
160 | __trace_bprintk(ip, trace_printk_fmt, ##args); \ | ||
161 | } else \ | ||
162 | __trace_printk(ip, fmt, ##args); \ | ||
163 | } while (0) | ||
164 | |||
165 | #define __common_field(type, item, is_signed) \ | ||
166 | ret = trace_define_field(event_call, #type, "common_" #item, \ | ||
167 | offsetof(typeof(field.ent), item), \ | ||
168 | sizeof(field.ent.item), is_signed); \ | ||
169 | if (ret) \ | ||
170 | return ret; | ||
171 | |||
172 | #endif /* _LINUX_FTRACE_EVENT_H */ | ||
diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 162e5defe683..d41ed593f79f 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h | |||
@@ -121,6 +121,13 @@ struct fuse_file_lock { | |||
121 | #define FUSE_BIG_WRITES (1 << 5) | 121 | #define FUSE_BIG_WRITES (1 << 5) |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * CUSE INIT request/reply flags | ||
125 | * | ||
126 | * CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl | ||
127 | */ | ||
128 | #define CUSE_UNRESTRICTED_IOCTL (1 << 0) | ||
129 | |||
130 | /** | ||
124 | * Release flags | 131 | * Release flags |
125 | */ | 132 | */ |
126 | #define FUSE_RELEASE_FLUSH (1 << 0) | 133 | #define FUSE_RELEASE_FLUSH (1 << 0) |
@@ -210,6 +217,9 @@ enum fuse_opcode { | |||
210 | FUSE_DESTROY = 38, | 217 | FUSE_DESTROY = 38, |
211 | FUSE_IOCTL = 39, | 218 | FUSE_IOCTL = 39, |
212 | FUSE_POLL = 40, | 219 | FUSE_POLL = 40, |
220 | |||
221 | /* CUSE specific operations */ | ||
222 | CUSE_INIT = 4096, | ||
213 | }; | 223 | }; |
214 | 224 | ||
215 | enum fuse_notify_code { | 225 | enum fuse_notify_code { |
@@ -401,6 +411,27 @@ struct fuse_init_out { | |||
401 | __u32 max_write; | 411 | __u32 max_write; |
402 | }; | 412 | }; |
403 | 413 | ||
414 | #define CUSE_INIT_INFO_MAX 4096 | ||
415 | |||
416 | struct cuse_init_in { | ||
417 | __u32 major; | ||
418 | __u32 minor; | ||
419 | __u32 unused; | ||
420 | __u32 flags; | ||
421 | }; | ||
422 | |||
423 | struct cuse_init_out { | ||
424 | __u32 major; | ||
425 | __u32 minor; | ||
426 | __u32 unused; | ||
427 | __u32 flags; | ||
428 | __u32 max_read; | ||
429 | __u32 max_write; | ||
430 | __u32 dev_major; /* chardev major */ | ||
431 | __u32 dev_minor; /* chardev minor */ | ||
432 | __u32 spare[10]; | ||
433 | }; | ||
434 | |||
404 | struct fuse_interrupt_in { | 435 | struct fuse_interrupt_in { |
405 | __u64 unique; | 436 | __u64 unique; |
406 | }; | 437 | }; |
diff --git a/include/linux/futex.h b/include/linux/futex.h index 3bf5bb5a34f9..34956c8fdebf 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -23,6 +23,8 @@ union ktime; | |||
23 | #define FUTEX_TRYLOCK_PI 8 | 23 | #define FUTEX_TRYLOCK_PI 8 |
24 | #define FUTEX_WAIT_BITSET 9 | 24 | #define FUTEX_WAIT_BITSET 9 |
25 | #define FUTEX_WAKE_BITSET 10 | 25 | #define FUTEX_WAKE_BITSET 10 |
26 | #define FUTEX_WAIT_REQUEUE_PI 11 | ||
27 | #define FUTEX_CMP_REQUEUE_PI 12 | ||
26 | 28 | ||
27 | #define FUTEX_PRIVATE_FLAG 128 | 29 | #define FUTEX_PRIVATE_FLAG 128 |
28 | #define FUTEX_CLOCK_REALTIME 256 | 30 | #define FUTEX_CLOCK_REALTIME 256 |
@@ -38,6 +40,10 @@ union ktime; | |||
38 | #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) | 40 | #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) |
39 | #define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG) | 41 | #define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG) |
40 | #define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG) | 42 | #define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG) |
43 | #define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \ | ||
44 | FUTEX_PRIVATE_FLAG) | ||
45 | #define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \ | ||
46 | FUTEX_PRIVATE_FLAG) | ||
41 | 47 | ||
42 | /* | 48 | /* |
43 | * Support for robust futexes: the kernel cleans up held futexes at | 49 | * Support for robust futexes: the kernel cleans up held futexes at |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index a1a28caed23d..7cbd38d363a2 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -90,6 +90,7 @@ struct disk_stats { | |||
90 | struct hd_struct { | 90 | struct hd_struct { |
91 | sector_t start_sect; | 91 | sector_t start_sect; |
92 | sector_t nr_sects; | 92 | sector_t nr_sects; |
93 | sector_t alignment_offset; | ||
93 | struct device __dev; | 94 | struct device __dev; |
94 | struct kobject *holder_dir; | 95 | struct kobject *holder_dir; |
95 | int policy, partno; | 96 | int policy, partno; |
@@ -113,6 +114,7 @@ struct hd_struct { | |||
113 | #define GENHD_FL_UP 16 | 114 | #define GENHD_FL_UP 16 |
114 | #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 | 115 | #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 |
115 | #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ | 116 | #define GENHD_FL_EXT_DEVT 64 /* allow extended devt */ |
117 | #define GENHD_FL_NATIVE_CAPACITY 128 | ||
116 | 118 | ||
117 | #define BLK_SCSI_MAX_CMDS (256) | 119 | #define BLK_SCSI_MAX_CMDS (256) |
118 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 120 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0bbc15f54536..3760e7c5de02 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -85,6 +85,9 @@ struct vm_area_struct; | |||
85 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ | 85 | __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\ |
86 | __GFP_NORETRY|__GFP_NOMEMALLOC) | 86 | __GFP_NORETRY|__GFP_NOMEMALLOC) |
87 | 87 | ||
88 | /* Control slab gfp mask during early boot */ | ||
89 | #define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) | ||
90 | |||
88 | /* Control allocation constraints */ | 91 | /* Control allocation constraints */ |
89 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) | 92 | #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) |
90 | 93 | ||
diff --git a/include/linux/ide.h b/include/linux/ide.h index 9fed365a598b..a6c6a2fad7c8 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -26,6 +26,9 @@ | |||
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | #include <asm/mutex.h> | 27 | #include <asm/mutex.h> |
28 | 28 | ||
29 | /* for request_sense */ | ||
30 | #include <linux/cdrom.h> | ||
31 | |||
29 | #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) | 32 | #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) |
30 | # define SUPPORT_VLB_SYNC 0 | 33 | # define SUPPORT_VLB_SYNC 0 |
31 | #else | 34 | #else |
@@ -175,7 +178,7 @@ typedef u8 hwif_chipset_t; | |||
175 | /* | 178 | /* |
176 | * Structure to hold all information about the location of this port | 179 | * Structure to hold all information about the location of this port |
177 | */ | 180 | */ |
178 | typedef struct hw_regs_s { | 181 | struct ide_hw { |
179 | union { | 182 | union { |
180 | struct ide_io_ports io_ports; | 183 | struct ide_io_ports io_ports; |
181 | unsigned long io_ports_array[IDE_NR_PORTS]; | 184 | unsigned long io_ports_array[IDE_NR_PORTS]; |
@@ -183,12 +186,11 @@ typedef struct hw_regs_s { | |||
183 | 186 | ||
184 | int irq; /* our irq number */ | 187 | int irq; /* our irq number */ |
185 | ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ | 188 | ide_ack_intr_t *ack_intr; /* acknowledge interrupt */ |
186 | hwif_chipset_t chipset; | ||
187 | struct device *dev, *parent; | 189 | struct device *dev, *parent; |
188 | unsigned long config; | 190 | unsigned long config; |
189 | } hw_regs_t; | 191 | }; |
190 | 192 | ||
191 | static inline void ide_std_init_ports(hw_regs_t *hw, | 193 | static inline void ide_std_init_ports(struct ide_hw *hw, |
192 | unsigned long io_addr, | 194 | unsigned long io_addr, |
193 | unsigned long ctl_addr) | 195 | unsigned long ctl_addr) |
194 | { | 196 | { |
@@ -215,21 +217,12 @@ static inline void ide_std_init_ports(hw_regs_t *hw, | |||
215 | 217 | ||
216 | /* | 218 | /* |
217 | * Special Driver Flags | 219 | * Special Driver Flags |
218 | * | ||
219 | * set_geometry : respecify drive geometry | ||
220 | * recalibrate : seek to cyl 0 | ||
221 | * set_multmode : set multmode count | ||
222 | * reserved : unused | ||
223 | */ | 220 | */ |
224 | typedef union { | 221 | enum { |
225 | unsigned all : 8; | 222 | IDE_SFLAG_SET_GEOMETRY = (1 << 0), |
226 | struct { | 223 | IDE_SFLAG_RECALIBRATE = (1 << 1), |
227 | unsigned set_geometry : 1; | 224 | IDE_SFLAG_SET_MULTMODE = (1 << 2), |
228 | unsigned recalibrate : 1; | 225 | }; |
229 | unsigned set_multmode : 1; | ||
230 | unsigned reserved : 5; | ||
231 | } b; | ||
232 | } special_t; | ||
233 | 226 | ||
234 | /* | 227 | /* |
235 | * Status returned from various ide_ functions | 228 | * Status returned from various ide_ functions |
@@ -324,7 +317,6 @@ struct ide_cmd { | |||
324 | unsigned int cursg_ofs; | 317 | unsigned int cursg_ofs; |
325 | 318 | ||
326 | struct request *rq; /* copy of request */ | 319 | struct request *rq; /* copy of request */ |
327 | void *special; /* valid_t generally */ | ||
328 | }; | 320 | }; |
329 | 321 | ||
330 | /* ATAPI packet command flags */ | 322 | /* ATAPI packet command flags */ |
@@ -360,11 +352,7 @@ struct ide_atapi_pc { | |||
360 | 352 | ||
361 | /* data buffer */ | 353 | /* data buffer */ |
362 | u8 *buf; | 354 | u8 *buf; |
363 | /* current buffer position */ | ||
364 | u8 *cur_pos; | ||
365 | int buf_size; | 355 | int buf_size; |
366 | /* missing/available data on the current buffer */ | ||
367 | int b_count; | ||
368 | 356 | ||
369 | /* the corresponding request */ | 357 | /* the corresponding request */ |
370 | struct request *rq; | 358 | struct request *rq; |
@@ -377,10 +365,6 @@ struct ide_atapi_pc { | |||
377 | */ | 365 | */ |
378 | u8 pc_buf[IDE_PC_BUFFER_SIZE]; | 366 | u8 pc_buf[IDE_PC_BUFFER_SIZE]; |
379 | 367 | ||
380 | /* idetape only */ | ||
381 | struct idetape_bh *bh; | ||
382 | char *b_data; | ||
383 | |||
384 | unsigned long timeout; | 368 | unsigned long timeout; |
385 | }; | 369 | }; |
386 | 370 | ||
@@ -397,6 +381,7 @@ struct ide_drive_s; | |||
397 | struct ide_disk_ops { | 381 | struct ide_disk_ops { |
398 | int (*check)(struct ide_drive_s *, const char *); | 382 | int (*check)(struct ide_drive_s *, const char *); |
399 | int (*get_capacity)(struct ide_drive_s *); | 383 | int (*get_capacity)(struct ide_drive_s *); |
384 | u64 (*set_capacity)(struct ide_drive_s *, u64); | ||
400 | void (*setup)(struct ide_drive_s *); | 385 | void (*setup)(struct ide_drive_s *); |
401 | void (*flush)(struct ide_drive_s *); | 386 | void (*flush)(struct ide_drive_s *); |
402 | int (*init_media)(struct ide_drive_s *, struct gendisk *); | 387 | int (*init_media)(struct ide_drive_s *, struct gendisk *); |
@@ -474,6 +459,8 @@ enum { | |||
474 | IDE_DFLAG_NICE1 = (1 << 5), | 459 | IDE_DFLAG_NICE1 = (1 << 5), |
475 | /* device is physically present */ | 460 | /* device is physically present */ |
476 | IDE_DFLAG_PRESENT = (1 << 6), | 461 | IDE_DFLAG_PRESENT = (1 << 6), |
462 | /* disable Host Protected Area */ | ||
463 | IDE_DFLAG_NOHPA = (1 << 7), | ||
477 | /* id read from device (synthetic if not set) */ | 464 | /* id read from device (synthetic if not set) */ |
478 | IDE_DFLAG_ID_READ = (1 << 8), | 465 | IDE_DFLAG_ID_READ = (1 << 8), |
479 | IDE_DFLAG_NOPROBE = (1 << 9), | 466 | IDE_DFLAG_NOPROBE = (1 << 9), |
@@ -512,6 +499,7 @@ enum { | |||
512 | /* write protect */ | 499 | /* write protect */ |
513 | IDE_DFLAG_WP = (1 << 29), | 500 | IDE_DFLAG_WP = (1 << 29), |
514 | IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), | 501 | IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30), |
502 | IDE_DFLAG_NIEN_QUIRK = (1 << 31), | ||
515 | }; | 503 | }; |
516 | 504 | ||
517 | struct ide_drive_s { | 505 | struct ide_drive_s { |
@@ -536,14 +524,13 @@ struct ide_drive_s { | |||
536 | unsigned long sleep; /* sleep until this time */ | 524 | unsigned long sleep; /* sleep until this time */ |
537 | unsigned long timeout; /* max time to wait for irq */ | 525 | unsigned long timeout; /* max time to wait for irq */ |
538 | 526 | ||
539 | special_t special; /* special action flags */ | 527 | u8 special_flags; /* special action flags */ |
540 | 528 | ||
541 | u8 select; /* basic drive/head select reg value */ | 529 | u8 select; /* basic drive/head select reg value */ |
542 | u8 retry_pio; /* retrying dma capable host in pio */ | 530 | u8 retry_pio; /* retrying dma capable host in pio */ |
543 | u8 waiting_for_dma; /* dma currently in progress */ | 531 | u8 waiting_for_dma; /* dma currently in progress */ |
544 | u8 dma; /* atapi dma flag */ | 532 | u8 dma; /* atapi dma flag */ |
545 | 533 | ||
546 | u8 quirk_list; /* considered quirky, set for a specific host */ | ||
547 | u8 init_speed; /* transfer rate set at boot */ | 534 | u8 init_speed; /* transfer rate set at boot */ |
548 | u8 current_speed; /* current transfer rate set */ | 535 | u8 current_speed; /* current transfer rate set */ |
549 | u8 desired_speed; /* desired transfer rate set */ | 536 | u8 desired_speed; /* desired transfer rate set */ |
@@ -568,8 +555,7 @@ struct ide_drive_s { | |||
568 | unsigned int drive_data; /* used by set_pio_mode/dev_select() */ | 555 | unsigned int drive_data; /* used by set_pio_mode/dev_select() */ |
569 | unsigned int failures; /* current failure count */ | 556 | unsigned int failures; /* current failure count */ |
570 | unsigned int max_failures; /* maximum allowed failure count */ | 557 | unsigned int max_failures; /* maximum allowed failure count */ |
571 | u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */ | 558 | u64 probed_capacity;/* initial/native media capacity */ |
572 | |||
573 | u64 capacity64; /* total number of sectors */ | 559 | u64 capacity64; /* total number of sectors */ |
574 | 560 | ||
575 | int lun; /* logical unit */ | 561 | int lun; /* logical unit */ |
@@ -593,16 +579,16 @@ struct ide_drive_s { | |||
593 | /* callback for packet commands */ | 579 | /* callback for packet commands */ |
594 | int (*pc_callback)(struct ide_drive_s *, int); | 580 | int (*pc_callback)(struct ide_drive_s *, int); |
595 | 581 | ||
596 | void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *); | ||
597 | int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *, | ||
598 | unsigned int, int); | ||
599 | |||
600 | ide_startstop_t (*irq_handler)(struct ide_drive_s *); | 582 | ide_startstop_t (*irq_handler)(struct ide_drive_s *); |
601 | 583 | ||
602 | unsigned long atapi_flags; | 584 | unsigned long atapi_flags; |
603 | 585 | ||
604 | struct ide_atapi_pc request_sense_pc; | 586 | struct ide_atapi_pc request_sense_pc; |
605 | struct request request_sense_rq; | 587 | |
588 | /* current sense rq and buffer */ | ||
589 | bool sense_rq_armed; | ||
590 | struct request sense_rq; | ||
591 | struct request_sense sense_data; | ||
606 | }; | 592 | }; |
607 | 593 | ||
608 | typedef struct ide_drive_s ide_drive_t; | 594 | typedef struct ide_drive_s ide_drive_t; |
@@ -1174,7 +1160,10 @@ int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *); | |||
1174 | int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); | 1160 | int ide_do_start_stop(ide_drive_t *, struct gendisk *, int); |
1175 | int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); | 1161 | int ide_set_media_lock(ide_drive_t *, struct gendisk *, int); |
1176 | void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); | 1162 | void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *); |
1177 | void ide_retry_pc(ide_drive_t *, struct gendisk *); | 1163 | void ide_retry_pc(ide_drive_t *drive); |
1164 | |||
1165 | void ide_prep_sense(ide_drive_t *drive, struct request *rq); | ||
1166 | int ide_queue_sense_rq(ide_drive_t *drive, void *special); | ||
1178 | 1167 | ||
1179 | int ide_cd_expiry(ide_drive_t *); | 1168 | int ide_cd_expiry(ide_drive_t *); |
1180 | 1169 | ||
@@ -1225,7 +1214,7 @@ static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev) | |||
1225 | } | 1214 | } |
1226 | 1215 | ||
1227 | void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, | 1216 | void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, |
1228 | hw_regs_t *, hw_regs_t **); | 1217 | struct ide_hw *, struct ide_hw **); |
1229 | void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); | 1218 | void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); |
1230 | 1219 | ||
1231 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI | 1220 | #ifdef CONFIG_BLK_DEV_IDEDMA_PCI |
@@ -1464,16 +1453,18 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} | |||
1464 | void ide_register_region(struct gendisk *); | 1453 | void ide_register_region(struct gendisk *); |
1465 | void ide_unregister_region(struct gendisk *); | 1454 | void ide_unregister_region(struct gendisk *); |
1466 | 1455 | ||
1456 | void ide_check_nien_quirk_list(ide_drive_t *); | ||
1467 | void ide_undecoded_slave(ide_drive_t *); | 1457 | void ide_undecoded_slave(ide_drive_t *); |
1468 | 1458 | ||
1469 | void ide_port_apply_params(ide_hwif_t *); | 1459 | void ide_port_apply_params(ide_hwif_t *); |
1470 | int ide_sysfs_register_port(ide_hwif_t *); | 1460 | int ide_sysfs_register_port(ide_hwif_t *); |
1471 | 1461 | ||
1472 | struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **); | 1462 | struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **, |
1463 | unsigned int); | ||
1473 | void ide_host_free(struct ide_host *); | 1464 | void ide_host_free(struct ide_host *); |
1474 | int ide_host_register(struct ide_host *, const struct ide_port_info *, | 1465 | int ide_host_register(struct ide_host *, const struct ide_port_info *, |
1475 | hw_regs_t **); | 1466 | struct ide_hw **); |
1476 | int ide_host_add(const struct ide_port_info *, hw_regs_t **, | 1467 | int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int, |
1477 | struct ide_host **); | 1468 | struct ide_host **); |
1478 | void ide_host_remove(struct ide_host *); | 1469 | void ide_host_remove(struct ide_host *); |
1479 | int ide_legacy_device_add(const struct ide_port_info *, unsigned long); | 1470 | int ide_legacy_device_add(const struct ide_port_info *, unsigned long); |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index cfe4fe1b7132..60e8934d10b5 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
@@ -79,6 +79,7 @@ | |||
79 | #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ | 79 | #define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ |
80 | #define ETH_P_TIPC 0x88CA /* TIPC */ | 80 | #define ETH_P_TIPC 0x88CA /* TIPC */ |
81 | #define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ | 81 | #define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ |
82 | #define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */ | ||
82 | #define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ | 83 | #define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ |
83 | 84 | ||
84 | /* | 85 | /* |
diff --git a/include/linux/ima.h b/include/linux/ima.h index 0e2aa45cb0ce..b1b827d091a9 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h | |||
@@ -13,14 +13,17 @@ | |||
13 | #include <linux/fs.h> | 13 | #include <linux/fs.h> |
14 | struct linux_binprm; | 14 | struct linux_binprm; |
15 | 15 | ||
16 | #define IMA_COUNT_UPDATE 1 | ||
17 | #define IMA_COUNT_LEAVE 0 | ||
18 | |||
16 | #ifdef CONFIG_IMA | 19 | #ifdef CONFIG_IMA |
17 | extern int ima_bprm_check(struct linux_binprm *bprm); | 20 | extern int ima_bprm_check(struct linux_binprm *bprm); |
18 | extern int ima_inode_alloc(struct inode *inode); | 21 | extern int ima_inode_alloc(struct inode *inode); |
19 | extern void ima_inode_free(struct inode *inode); | 22 | extern void ima_inode_free(struct inode *inode); |
20 | extern int ima_path_check(struct path *path, int mask); | 23 | extern int ima_path_check(struct path *path, int mask, int update_counts); |
21 | extern void ima_file_free(struct file *file); | 24 | extern void ima_file_free(struct file *file); |
22 | extern int ima_file_mmap(struct file *file, unsigned long prot); | 25 | extern int ima_file_mmap(struct file *file, unsigned long prot); |
23 | extern void ima_shm_check(struct file *file); | 26 | extern void ima_counts_get(struct file *file); |
24 | 27 | ||
25 | #else | 28 | #else |
26 | static inline int ima_bprm_check(struct linux_binprm *bprm) | 29 | static inline int ima_bprm_check(struct linux_binprm *bprm) |
@@ -38,7 +41,7 @@ static inline void ima_inode_free(struct inode *inode) | |||
38 | return; | 41 | return; |
39 | } | 42 | } |
40 | 43 | ||
41 | static inline int ima_path_check(struct path *path, int mask) | 44 | static inline int ima_path_check(struct path *path, int mask, int update_counts) |
42 | { | 45 | { |
43 | return 0; | 46 | return 0; |
44 | } | 47 | } |
@@ -53,7 +56,7 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot) | |||
53 | return 0; | 56 | return 0; |
54 | } | 57 | } |
55 | 58 | ||
56 | static inline void ima_shm_check(struct file *file) | 59 | static inline void ima_counts_get(struct file *file) |
57 | { | 60 | { |
58 | return; | 61 | return; |
59 | } | 62 | } |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index d87247d2641f..28b1f30601b5 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -108,6 +108,15 @@ extern struct group_info init_groups; | |||
108 | 108 | ||
109 | extern struct cred init_cred; | 109 | extern struct cred init_cred; |
110 | 110 | ||
111 | #ifdef CONFIG_PERF_COUNTERS | ||
112 | # define INIT_PERF_COUNTERS(tsk) \ | ||
113 | .perf_counter_mutex = \ | ||
114 | __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \ | ||
115 | .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list), | ||
116 | #else | ||
117 | # define INIT_PERF_COUNTERS(tsk) | ||
118 | #endif | ||
119 | |||
111 | /* | 120 | /* |
112 | * INIT_TASK is used to set up the first task table, touch at | 121 | * INIT_TASK is used to set up the first task table, touch at |
113 | * your own risk!. Base=0, limit=0x1fffff (=2MB) | 122 | * your own risk!. Base=0, limit=0x1fffff (=2MB) |
@@ -145,8 +154,8 @@ extern struct cred init_cred; | |||
145 | .group_leader = &tsk, \ | 154 | .group_leader = &tsk, \ |
146 | .real_cred = &init_cred, \ | 155 | .real_cred = &init_cred, \ |
147 | .cred = &init_cred, \ | 156 | .cred = &init_cred, \ |
148 | .cred_exec_mutex = \ | 157 | .cred_guard_mutex = \ |
149 | __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \ | 158 | __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ |
150 | .comm = "swapper", \ | 159 | .comm = "swapper", \ |
151 | .thread = INIT_THREAD, \ | 160 | .thread = INIT_THREAD, \ |
152 | .fs = &init_fs, \ | 161 | .fs = &init_fs, \ |
@@ -171,9 +180,11 @@ extern struct cred init_cred; | |||
171 | }, \ | 180 | }, \ |
172 | .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ | 181 | .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \ |
173 | INIT_IDS \ | 182 | INIT_IDS \ |
183 | INIT_PERF_COUNTERS(tsk) \ | ||
174 | INIT_TRACE_IRQFLAGS \ | 184 | INIT_TRACE_IRQFLAGS \ |
175 | INIT_LOCKDEP \ | 185 | INIT_LOCKDEP \ |
176 | INIT_FTRACE_GRAPH \ | 186 | INIT_FTRACE_GRAPH \ |
187 | INIT_TRACE_RECURSION \ | ||
177 | } | 188 | } |
178 | 189 | ||
179 | 190 | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 91bb76f44f14..c41e812e9d5e 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -183,6 +183,7 @@ extern void disable_irq(unsigned int irq); | |||
183 | extern void enable_irq(unsigned int irq); | 183 | extern void enable_irq(unsigned int irq); |
184 | 184 | ||
185 | /* The following three functions are for the core kernel use only. */ | 185 | /* The following three functions are for the core kernel use only. */ |
186 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
186 | extern void suspend_device_irqs(void); | 187 | extern void suspend_device_irqs(void); |
187 | extern void resume_device_irqs(void); | 188 | extern void resume_device_irqs(void); |
188 | #ifdef CONFIG_PM_SLEEP | 189 | #ifdef CONFIG_PM_SLEEP |
@@ -190,6 +191,11 @@ extern int check_wakeup_irqs(void); | |||
190 | #else | 191 | #else |
191 | static inline int check_wakeup_irqs(void) { return 0; } | 192 | static inline int check_wakeup_irqs(void) { return 0; } |
192 | #endif | 193 | #endif |
194 | #else | ||
195 | static inline void suspend_device_irqs(void) { }; | ||
196 | static inline void resume_device_irqs(void) { }; | ||
197 | static inline int check_wakeup_irqs(void) { return 0; } | ||
198 | #endif | ||
193 | 199 | ||
194 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | 200 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
195 | 201 | ||
@@ -566,6 +572,6 @@ struct irq_desc; | |||
566 | extern int early_irq_init(void); | 572 | extern int early_irq_init(void); |
567 | extern int arch_probe_nr_irqs(void); | 573 | extern int arch_probe_nr_irqs(void); |
568 | extern int arch_early_irq_init(void); | 574 | extern int arch_early_irq_init(void); |
569 | extern int arch_init_chip_data(struct irq_desc *desc, int cpu); | 575 | extern int arch_init_chip_data(struct irq_desc *desc, int node); |
570 | 576 | ||
571 | #endif | 577 | #endif |
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 08b987bccf89..dd05434fa45f 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
@@ -64,7 +64,7 @@ struct cfq_io_context { | |||
64 | * and kmalloc'ed. These could be shared between processes. | 64 | * and kmalloc'ed. These could be shared between processes. |
65 | */ | 65 | */ |
66 | struct io_context { | 66 | struct io_context { |
67 | atomic_t refcount; | 67 | atomic_long_t refcount; |
68 | atomic_t nr_tasks; | 68 | atomic_t nr_tasks; |
69 | 69 | ||
70 | /* all the fields below are protected by this lock */ | 70 | /* all the fields below are protected by this lock */ |
@@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc) | |||
91 | * if ref count is zero, don't allow sharing (ioc is going away, it's | 91 | * if ref count is zero, don't allow sharing (ioc is going away, it's |
92 | * a race). | 92 | * a race). |
93 | */ | 93 | */ |
94 | if (ioc && atomic_inc_not_zero(&ioc->refcount)) { | 94 | if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { |
95 | atomic_inc(&ioc->nr_tasks); | 95 | atomic_long_inc(&ioc->refcount); |
96 | return ioc; | 96 | return ioc; |
97 | } | 97 | } |
98 | 98 | ||
diff --git a/include/linux/irq.h b/include/linux/irq.h index b7cbeed972e4..1e50c34f0062 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -117,7 +117,7 @@ struct irq_chip { | |||
117 | void (*eoi)(unsigned int irq); | 117 | void (*eoi)(unsigned int irq); |
118 | 118 | ||
119 | void (*end)(unsigned int irq); | 119 | void (*end)(unsigned int irq); |
120 | void (*set_affinity)(unsigned int irq, | 120 | int (*set_affinity)(unsigned int irq, |
121 | const struct cpumask *dest); | 121 | const struct cpumask *dest); |
122 | int (*retrigger)(unsigned int irq); | 122 | int (*retrigger)(unsigned int irq); |
123 | int (*set_type)(unsigned int irq, unsigned int flow_type); | 123 | int (*set_type)(unsigned int irq, unsigned int flow_type); |
@@ -187,7 +187,7 @@ struct irq_desc { | |||
187 | spinlock_t lock; | 187 | spinlock_t lock; |
188 | #ifdef CONFIG_SMP | 188 | #ifdef CONFIG_SMP |
189 | cpumask_var_t affinity; | 189 | cpumask_var_t affinity; |
190 | unsigned int cpu; | 190 | unsigned int node; |
191 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 191 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
192 | cpumask_var_t pending_mask; | 192 | cpumask_var_t pending_mask; |
193 | #endif | 193 | #endif |
@@ -201,26 +201,23 @@ struct irq_desc { | |||
201 | } ____cacheline_internodealigned_in_smp; | 201 | } ____cacheline_internodealigned_in_smp; |
202 | 202 | ||
203 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | 203 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, |
204 | struct irq_desc *desc, int cpu); | 204 | struct irq_desc *desc, int node); |
205 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | 205 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); |
206 | 206 | ||
207 | #ifndef CONFIG_SPARSE_IRQ | 207 | #ifndef CONFIG_SPARSE_IRQ |
208 | extern struct irq_desc irq_desc[NR_IRQS]; | 208 | extern struct irq_desc irq_desc[NR_IRQS]; |
209 | #else /* CONFIG_SPARSE_IRQ */ | 209 | #endif |
210 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu); | ||
211 | #endif /* CONFIG_SPARSE_IRQ */ | ||
212 | |||
213 | extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu); | ||
214 | 210 | ||
215 | static inline struct irq_desc * | 211 | #ifdef CONFIG_NUMA_IRQ_DESC |
216 | irq_remap_to_desc(unsigned int irq, struct irq_desc *desc) | 212 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); |
217 | { | ||
218 | #ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC | ||
219 | return irq_to_desc(irq); | ||
220 | #else | 213 | #else |
214 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
215 | { | ||
221 | return desc; | 216 | return desc; |
222 | #endif | ||
223 | } | 217 | } |
218 | #endif | ||
219 | |||
220 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | ||
224 | 221 | ||
225 | /* | 222 | /* |
226 | * Migration helpers for obsolete names, they will go away: | 223 | * Migration helpers for obsolete names, they will go away: |
@@ -386,7 +383,7 @@ extern void set_irq_noprobe(unsigned int irq); | |||
386 | extern void set_irq_probe(unsigned int irq); | 383 | extern void set_irq_probe(unsigned int irq); |
387 | 384 | ||
388 | /* Handle dynamic irq creation and destruction */ | 385 | /* Handle dynamic irq creation and destruction */ |
389 | extern unsigned int create_irq_nr(unsigned int irq_want); | 386 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
390 | extern int create_irq(void); | 387 | extern int create_irq(void); |
391 | extern void destroy_irq(unsigned int irq); | 388 | extern void destroy_irq(unsigned int irq); |
392 | 389 | ||
@@ -424,47 +421,44 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | |||
424 | 421 | ||
425 | #ifdef CONFIG_SMP | 422 | #ifdef CONFIG_SMP |
426 | /** | 423 | /** |
427 | * init_alloc_desc_masks - allocate cpumasks for irq_desc | 424 | * alloc_desc_masks - allocate cpumasks for irq_desc |
428 | * @desc: pointer to irq_desc struct | 425 | * @desc: pointer to irq_desc struct |
429 | * @cpu: cpu which will be handling the cpumasks | 426 | * @cpu: cpu which will be handling the cpumasks |
430 | * @boot: true if need bootmem | 427 | * @boot: true if need bootmem |
431 | * | 428 | * |
432 | * Allocates affinity and pending_mask cpumask if required. | 429 | * Allocates affinity and pending_mask cpumask if required. |
433 | * Returns true if successful (or not required). | 430 | * Returns true if successful (or not required). |
434 | * Side effect: affinity has all bits set, pending_mask has all bits clear. | ||
435 | */ | 431 | */ |
436 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | 432 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, |
437 | bool boot) | 433 | bool boot) |
438 | { | 434 | { |
439 | int node; | 435 | gfp_t gfp = GFP_ATOMIC; |
440 | |||
441 | if (boot) { | ||
442 | alloc_bootmem_cpumask_var(&desc->affinity); | ||
443 | cpumask_setall(desc->affinity); | ||
444 | |||
445 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
446 | alloc_bootmem_cpumask_var(&desc->pending_mask); | ||
447 | cpumask_clear(desc->pending_mask); | ||
448 | #endif | ||
449 | return true; | ||
450 | } | ||
451 | 436 | ||
452 | node = cpu_to_node(cpu); | 437 | if (boot) |
438 | gfp = GFP_NOWAIT; | ||
453 | 439 | ||
454 | if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) | 440 | #ifdef CONFIG_CPUMASK_OFFSTACK |
441 | if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) | ||
455 | return false; | 442 | return false; |
456 | cpumask_setall(desc->affinity); | ||
457 | 443 | ||
458 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 444 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
459 | if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { | 445 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
460 | free_cpumask_var(desc->affinity); | 446 | free_cpumask_var(desc->affinity); |
461 | return false; | 447 | return false; |
462 | } | 448 | } |
463 | cpumask_clear(desc->pending_mask); | 449 | #endif |
464 | #endif | 450 | #endif |
465 | return true; | 451 | return true; |
466 | } | 452 | } |
467 | 453 | ||
454 | static inline void init_desc_masks(struct irq_desc *desc) | ||
455 | { | ||
456 | cpumask_setall(desc->affinity); | ||
457 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
458 | cpumask_clear(desc->pending_mask); | ||
459 | #endif | ||
460 | } | ||
461 | |||
468 | /** | 462 | /** |
469 | * init_copy_desc_masks - copy cpumasks for irq_desc | 463 | * init_copy_desc_masks - copy cpumasks for irq_desc |
470 | * @old_desc: pointer to old irq_desc struct | 464 | * @old_desc: pointer to old irq_desc struct |
@@ -478,7 +472,7 @@ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | |||
478 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 472 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
479 | struct irq_desc *new_desc) | 473 | struct irq_desc *new_desc) |
480 | { | 474 | { |
481 | #ifdef CONFIG_CPUMASKS_OFFSTACK | 475 | #ifdef CONFIG_CPUMASK_OFFSTACK |
482 | cpumask_copy(new_desc->affinity, old_desc->affinity); | 476 | cpumask_copy(new_desc->affinity, old_desc->affinity); |
483 | 477 | ||
484 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 478 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
@@ -499,12 +493,16 @@ static inline void free_desc_masks(struct irq_desc *old_desc, | |||
499 | 493 | ||
500 | #else /* !CONFIG_SMP */ | 494 | #else /* !CONFIG_SMP */ |
501 | 495 | ||
502 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | 496 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, |
503 | bool boot) | 497 | bool boot) |
504 | { | 498 | { |
505 | return true; | 499 | return true; |
506 | } | 500 | } |
507 | 501 | ||
502 | static inline void init_desc_masks(struct irq_desc *desc) | ||
503 | { | ||
504 | } | ||
505 | |||
508 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 506 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, |
509 | struct irq_desc *new_desc) | 507 | struct irq_desc *new_desc) |
510 | { | 508 | { |
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 0c8b89f28a95..a77c6007dc99 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
@@ -81,7 +81,12 @@ static inline unsigned int kstat_irqs(unsigned int irq) | |||
81 | return sum; | 81 | return sum; |
82 | } | 82 | } |
83 | 83 | ||
84 | |||
85 | /* | ||
86 | * Lock/unlock the current runqueue - to extract task statistics: | ||
87 | */ | ||
84 | extern unsigned long long task_delta_exec(struct task_struct *); | 88 | extern unsigned long long task_delta_exec(struct task_struct *); |
89 | |||
85 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); | 90 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); |
86 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); | 91 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); |
87 | extern void account_steal_time(cputime_t); | 92 | extern void account_steal_time(cputime_t); |
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h new file mode 100644 index 000000000000..7796aed6cdd5 --- /dev/null +++ b/include/linux/kmemleak.h | |||
@@ -0,0 +1,96 @@ | |||
1 | /* | ||
2 | * include/linux/kmemleak.h | ||
3 | * | ||
4 | * Copyright (C) 2008 ARM Limited | ||
5 | * Written by Catalin Marinas <catalin.marinas@arm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | #ifndef __KMEMLEAK_H | ||
22 | #define __KMEMLEAK_H | ||
23 | |||
24 | #ifdef CONFIG_DEBUG_KMEMLEAK | ||
25 | |||
26 | extern void kmemleak_init(void); | ||
27 | extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, | ||
28 | gfp_t gfp); | ||
29 | extern void kmemleak_free(const void *ptr); | ||
30 | extern void kmemleak_padding(const void *ptr, unsigned long offset, | ||
31 | size_t size); | ||
32 | extern void kmemleak_not_leak(const void *ptr); | ||
33 | extern void kmemleak_ignore(const void *ptr); | ||
34 | extern void kmemleak_scan_area(const void *ptr, unsigned long offset, | ||
35 | size_t length, gfp_t gfp); | ||
36 | extern void kmemleak_no_scan(const void *ptr); | ||
37 | |||
38 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | ||
39 | int min_count, unsigned long flags, | ||
40 | gfp_t gfp) | ||
41 | { | ||
42 | if (!(flags & SLAB_NOLEAKTRACE)) | ||
43 | kmemleak_alloc(ptr, size, min_count, gfp); | ||
44 | } | ||
45 | |||
46 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) | ||
47 | { | ||
48 | if (!(flags & SLAB_NOLEAKTRACE)) | ||
49 | kmemleak_free(ptr); | ||
50 | } | ||
51 | |||
52 | static inline void kmemleak_erase(void **ptr) | ||
53 | { | ||
54 | *ptr = NULL; | ||
55 | } | ||
56 | |||
57 | #else | ||
58 | |||
59 | static inline void kmemleak_init(void) | ||
60 | { | ||
61 | } | ||
62 | static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count, | ||
63 | gfp_t gfp) | ||
64 | { | ||
65 | } | ||
66 | static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, | ||
67 | int min_count, unsigned long flags, | ||
68 | gfp_t gfp) | ||
69 | { | ||
70 | } | ||
71 | static inline void kmemleak_free(const void *ptr) | ||
72 | { | ||
73 | } | ||
74 | static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) | ||
75 | { | ||
76 | } | ||
77 | static inline void kmemleak_not_leak(const void *ptr) | ||
78 | { | ||
79 | } | ||
80 | static inline void kmemleak_ignore(const void *ptr) | ||
81 | { | ||
82 | } | ||
83 | static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, | ||
84 | size_t length, gfp_t gfp) | ||
85 | { | ||
86 | } | ||
87 | static inline void kmemleak_erase(void **ptr) | ||
88 | { | ||
89 | } | ||
90 | static inline void kmemleak_no_scan(const void *ptr) | ||
91 | { | ||
92 | } | ||
93 | |||
94 | #endif /* CONFIG_DEBUG_KMEMLEAK */ | ||
95 | |||
96 | #endif /* __KMEMLEAK_H */ | ||
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h new file mode 100644 index 000000000000..b616d3930c3b --- /dev/null +++ b/include/linux/kmemtrace.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
3 | * | ||
4 | * This file is released under GPL version 2. | ||
5 | */ | ||
6 | |||
7 | #ifndef _LINUX_KMEMTRACE_H | ||
8 | #define _LINUX_KMEMTRACE_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <trace/events/kmem.h> | ||
13 | |||
14 | #ifdef CONFIG_KMEMTRACE | ||
15 | extern void kmemtrace_init(void); | ||
16 | #else | ||
17 | static inline void kmemtrace_init(void) | ||
18 | { | ||
19 | } | ||
20 | #endif | ||
21 | |||
22 | #endif /* __KERNEL__ */ | ||
23 | |||
24 | #endif /* _LINUX_KMEMTRACE_H */ | ||
25 | |||
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 8cc137911b34..3db5d8d37485 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -119,7 +119,7 @@ struct kvm_run { | |||
119 | __u32 error_code; | 119 | __u32 error_code; |
120 | } ex; | 120 | } ex; |
121 | /* KVM_EXIT_IO */ | 121 | /* KVM_EXIT_IO */ |
122 | struct kvm_io { | 122 | struct { |
123 | #define KVM_EXIT_IO_IN 0 | 123 | #define KVM_EXIT_IO_IN 0 |
124 | #define KVM_EXIT_IO_OUT 1 | 124 | #define KVM_EXIT_IO_OUT 1 |
125 | __u8 direction; | 125 | __u8 direction; |
@@ -224,10 +224,10 @@ struct kvm_interrupt { | |||
224 | /* for KVM_GET_DIRTY_LOG */ | 224 | /* for KVM_GET_DIRTY_LOG */ |
225 | struct kvm_dirty_log { | 225 | struct kvm_dirty_log { |
226 | __u32 slot; | 226 | __u32 slot; |
227 | __u32 padding; | 227 | __u32 padding1; |
228 | union { | 228 | union { |
229 | void __user *dirty_bitmap; /* one bit per page */ | 229 | void __user *dirty_bitmap; /* one bit per page */ |
230 | __u64 padding; | 230 | __u64 padding2; |
231 | }; | 231 | }; |
232 | }; | 232 | }; |
233 | 233 | ||
@@ -409,6 +409,10 @@ struct kvm_trace_rec { | |||
409 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT | 409 | #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT |
410 | #define KVM_CAP_DEVICE_DEASSIGNMENT 27 | 410 | #define KVM_CAP_DEVICE_DEASSIGNMENT 27 |
411 | #endif | 411 | #endif |
412 | #ifdef __KVM_HAVE_MSIX | ||
413 | #define KVM_CAP_DEVICE_MSIX 28 | ||
414 | #endif | ||
415 | #define KVM_CAP_ASSIGN_DEV_IRQ 29 | ||
412 | /* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ | 416 | /* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */ |
413 | #define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 | 417 | #define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30 |
414 | 418 | ||
@@ -482,11 +486,18 @@ struct kvm_irq_routing { | |||
482 | #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ | 486 | #define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \ |
483 | struct kvm_assigned_pci_dev) | 487 | struct kvm_assigned_pci_dev) |
484 | #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) | 488 | #define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing) |
489 | /* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */ | ||
485 | #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ | 490 | #define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \ |
486 | struct kvm_assigned_irq) | 491 | struct kvm_assigned_irq) |
492 | #define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq) | ||
487 | #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) | 493 | #define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71) |
488 | #define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ | 494 | #define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \ |
489 | struct kvm_assigned_pci_dev) | 495 | struct kvm_assigned_pci_dev) |
496 | #define KVM_ASSIGN_SET_MSIX_NR \ | ||
497 | _IOW(KVMIO, 0x73, struct kvm_assigned_msix_nr) | ||
498 | #define KVM_ASSIGN_SET_MSIX_ENTRY \ | ||
499 | _IOW(KVMIO, 0x74, struct kvm_assigned_msix_entry) | ||
500 | #define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq) | ||
490 | 501 | ||
491 | /* | 502 | /* |
492 | * ioctls for vcpu fds | 503 | * ioctls for vcpu fds |
@@ -577,6 +588,8 @@ struct kvm_debug_guest { | |||
577 | #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) | 588 | #define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18) |
578 | #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) | 589 | #define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19) |
579 | 590 | ||
591 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) | ||
592 | |||
580 | struct kvm_assigned_pci_dev { | 593 | struct kvm_assigned_pci_dev { |
581 | __u32 assigned_dev_id; | 594 | __u32 assigned_dev_id; |
582 | __u32 busnr; | 595 | __u32 busnr; |
@@ -587,6 +600,17 @@ struct kvm_assigned_pci_dev { | |||
587 | }; | 600 | }; |
588 | }; | 601 | }; |
589 | 602 | ||
603 | #define KVM_DEV_IRQ_HOST_INTX (1 << 0) | ||
604 | #define KVM_DEV_IRQ_HOST_MSI (1 << 1) | ||
605 | #define KVM_DEV_IRQ_HOST_MSIX (1 << 2) | ||
606 | |||
607 | #define KVM_DEV_IRQ_GUEST_INTX (1 << 8) | ||
608 | #define KVM_DEV_IRQ_GUEST_MSI (1 << 9) | ||
609 | #define KVM_DEV_IRQ_GUEST_MSIX (1 << 10) | ||
610 | |||
611 | #define KVM_DEV_IRQ_HOST_MASK 0x00ff | ||
612 | #define KVM_DEV_IRQ_GUEST_MASK 0xff00 | ||
613 | |||
590 | struct kvm_assigned_irq { | 614 | struct kvm_assigned_irq { |
591 | __u32 assigned_dev_id; | 615 | __u32 assigned_dev_id; |
592 | __u32 host_irq; | 616 | __u32 host_irq; |
@@ -602,9 +626,19 @@ struct kvm_assigned_irq { | |||
602 | }; | 626 | }; |
603 | }; | 627 | }; |
604 | 628 | ||
605 | #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) | ||
606 | 629 | ||
607 | #define KVM_DEV_IRQ_ASSIGN_MSI_ACTION KVM_DEV_IRQ_ASSIGN_ENABLE_MSI | 630 | struct kvm_assigned_msix_nr { |
608 | #define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI (1 << 0) | 631 | __u32 assigned_dev_id; |
632 | __u16 entry_nr; | ||
633 | __u16 padding; | ||
634 | }; | ||
635 | |||
636 | #define KVM_MAX_MSIX_PER_DEV 512 | ||
637 | struct kvm_assigned_msix_entry { | ||
638 | __u32 assigned_dev_id; | ||
639 | __u32 gsi; | ||
640 | __u16 entry; /* The index of entry in the MSI-X table */ | ||
641 | __u16 padding[3]; | ||
642 | }; | ||
609 | 643 | ||
610 | #endif | 644 | #endif |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 894a56e365e8..aacc5449f586 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -38,6 +38,7 @@ | |||
38 | #define KVM_REQ_UNHALT 6 | 38 | #define KVM_REQ_UNHALT 6 |
39 | #define KVM_REQ_MMU_SYNC 7 | 39 | #define KVM_REQ_MMU_SYNC 7 |
40 | #define KVM_REQ_KVMCLOCK_UPDATE 8 | 40 | #define KVM_REQ_KVMCLOCK_UPDATE 8 |
41 | #define KVM_REQ_KICK 9 | ||
41 | 42 | ||
42 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 43 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
43 | 44 | ||
@@ -72,7 +73,6 @@ struct kvm_vcpu { | |||
72 | struct mutex mutex; | 73 | struct mutex mutex; |
73 | int cpu; | 74 | int cpu; |
74 | struct kvm_run *run; | 75 | struct kvm_run *run; |
75 | int guest_mode; | ||
76 | unsigned long requests; | 76 | unsigned long requests; |
77 | unsigned long guest_debug; | 77 | unsigned long guest_debug; |
78 | int fpu_active; | 78 | int fpu_active; |
@@ -298,6 +298,7 @@ int kvm_arch_hardware_setup(void); | |||
298 | void kvm_arch_hardware_unsetup(void); | 298 | void kvm_arch_hardware_unsetup(void); |
299 | void kvm_arch_check_processor_compat(void *rtn); | 299 | void kvm_arch_check_processor_compat(void *rtn); |
300 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); | 300 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
301 | int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); | ||
301 | 302 | ||
302 | void kvm_free_physmem(struct kvm *kvm); | 303 | void kvm_free_physmem(struct kvm *kvm); |
303 | 304 | ||
@@ -319,6 +320,13 @@ struct kvm_irq_ack_notifier { | |||
319 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | 320 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); |
320 | }; | 321 | }; |
321 | 322 | ||
323 | #define KVM_ASSIGNED_MSIX_PENDING 0x1 | ||
324 | struct kvm_guest_msix_entry { | ||
325 | u32 vector; | ||
326 | u16 entry; | ||
327 | u16 flags; | ||
328 | }; | ||
329 | |||
322 | struct kvm_assigned_dev_kernel { | 330 | struct kvm_assigned_dev_kernel { |
323 | struct kvm_irq_ack_notifier ack_notifier; | 331 | struct kvm_irq_ack_notifier ack_notifier; |
324 | struct work_struct interrupt_work; | 332 | struct work_struct interrupt_work; |
@@ -326,18 +334,18 @@ struct kvm_assigned_dev_kernel { | |||
326 | int assigned_dev_id; | 334 | int assigned_dev_id; |
327 | int host_busnr; | 335 | int host_busnr; |
328 | int host_devfn; | 336 | int host_devfn; |
337 | unsigned int entries_nr; | ||
329 | int host_irq; | 338 | int host_irq; |
330 | bool host_irq_disabled; | 339 | bool host_irq_disabled; |
340 | struct msix_entry *host_msix_entries; | ||
331 | int guest_irq; | 341 | int guest_irq; |
332 | #define KVM_ASSIGNED_DEV_GUEST_INTX (1 << 0) | 342 | struct kvm_guest_msix_entry *guest_msix_entries; |
333 | #define KVM_ASSIGNED_DEV_GUEST_MSI (1 << 1) | ||
334 | #define KVM_ASSIGNED_DEV_HOST_INTX (1 << 8) | ||
335 | #define KVM_ASSIGNED_DEV_HOST_MSI (1 << 9) | ||
336 | unsigned long irq_requested_type; | 343 | unsigned long irq_requested_type; |
337 | int irq_source_id; | 344 | int irq_source_id; |
338 | int flags; | 345 | int flags; |
339 | struct pci_dev *dev; | 346 | struct pci_dev *dev; |
340 | struct kvm *kvm; | 347 | struct kvm *kvm; |
348 | spinlock_t assigned_dev_lock; | ||
341 | }; | 349 | }; |
342 | 350 | ||
343 | struct kvm_irq_mask_notifier { | 351 | struct kvm_irq_mask_notifier { |
@@ -360,6 +368,9 @@ void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian); | |||
360 | int kvm_request_irq_source_id(struct kvm *kvm); | 368 | int kvm_request_irq_source_id(struct kvm *kvm); |
361 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | 369 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
362 | 370 | ||
371 | /* For vcpu->arch.iommu_flags */ | ||
372 | #define KVM_IOMMU_CACHE_COHERENCY 0x1 | ||
373 | |||
363 | #ifdef CONFIG_IOMMU_API | 374 | #ifdef CONFIG_IOMMU_API |
364 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, | 375 | int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn, |
365 | unsigned long npages); | 376 | unsigned long npages); |
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index 2b8318c83e53..fb46efbeabec 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h | |||
@@ -40,4 +40,31 @@ typedef unsigned long hfn_t; | |||
40 | 40 | ||
41 | typedef hfn_t pfn_t; | 41 | typedef hfn_t pfn_t; |
42 | 42 | ||
43 | union kvm_ioapic_redirect_entry { | ||
44 | u64 bits; | ||
45 | struct { | ||
46 | u8 vector; | ||
47 | u8 delivery_mode:3; | ||
48 | u8 dest_mode:1; | ||
49 | u8 delivery_status:1; | ||
50 | u8 polarity:1; | ||
51 | u8 remote_irr:1; | ||
52 | u8 trig_mode:1; | ||
53 | u8 mask:1; | ||
54 | u8 reserve:7; | ||
55 | u8 reserved[4]; | ||
56 | u8 dest_id; | ||
57 | } fields; | ||
58 | }; | ||
59 | |||
60 | struct kvm_lapic_irq { | ||
61 | u32 vector; | ||
62 | u32 delivery_mode; | ||
63 | u32 dest_mode; | ||
64 | u32 level; | ||
65 | u32 trig_mode; | ||
66 | u32 shorthand; | ||
67 | u32 dest_id; | ||
68 | }; | ||
69 | |||
43 | #endif /* __KVM_TYPES_H__ */ | 70 | #endif /* __KVM_TYPES_H__ */ |
diff --git a/include/linux/lguest.h b/include/linux/lguest.h index 175e63f4a8c0..7bc1440fc473 100644 --- a/include/linux/lguest.h +++ b/include/linux/lguest.h | |||
@@ -30,6 +30,10 @@ struct lguest_data | |||
30 | /* Wallclock time set by the Host. */ | 30 | /* Wallclock time set by the Host. */ |
31 | struct timespec time; | 31 | struct timespec time; |
32 | 32 | ||
33 | /* Interrupt pending set by the Host. The Guest should do a hypercall | ||
34 | * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */ | ||
35 | int irq_pending; | ||
36 | |||
33 | /* Async hypercall ring. Instead of directly making hypercalls, we can | 37 | /* Async hypercall ring. Instead of directly making hypercalls, we can |
34 | * place them in here for processing the next time the Host wants. | 38 | * place them in here for processing the next time the Host wants. |
35 | * This batching can be quite efficient. */ | 39 | * This batching can be quite efficient. */ |
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index a53407a4165c..bfefbdf7498a 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h | |||
@@ -57,7 +57,8 @@ enum lguest_req | |||
57 | LHREQ_INITIALIZE, /* + base, pfnlimit, start */ | 57 | LHREQ_INITIALIZE, /* + base, pfnlimit, start */ |
58 | LHREQ_GETDMA, /* No longer used */ | 58 | LHREQ_GETDMA, /* No longer used */ |
59 | LHREQ_IRQ, /* + irq */ | 59 | LHREQ_IRQ, /* + irq */ |
60 | LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */ | 60 | LHREQ_BREAK, /* No longer used */ |
61 | LHREQ_EVENTFD, /* + address, fd. */ | ||
61 | }; | 62 | }; |
62 | 63 | ||
63 | /* The alignment to use between consumer and producer parts of vring. | 64 | /* The alignment to use between consumer and producer parts of vring. |
diff --git a/include/linux/loop.h b/include/linux/loop.h index 40725447f5e0..66c194e2d9b9 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h | |||
@@ -56,8 +56,7 @@ struct loop_device { | |||
56 | gfp_t old_gfp_mask; | 56 | gfp_t old_gfp_mask; |
57 | 57 | ||
58 | spinlock_t lo_lock; | 58 | spinlock_t lo_lock; |
59 | struct bio *lo_bio; | 59 | struct bio_list lo_bio_list; |
60 | struct bio *lo_biotail; | ||
61 | int lo_state; | 60 | int lo_state; |
62 | struct mutex lo_ctl_mutex; | 61 | struct mutex lo_ctl_mutex; |
63 | struct task_struct *lo_thread; | 62 | struct task_struct *lo_thread; |
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h new file mode 100644 index 000000000000..e461b2c3d711 --- /dev/null +++ b/include/linux/lsm_audit.h | |||
@@ -0,0 +1,111 @@ | |||
1 | /* | ||
2 | * Common LSM logging functions | ||
3 | * Heavily borrowed from selinux/avc.h | ||
4 | * | ||
5 | * Author : Etienne BASSET <etienne.basset@ensta.org> | ||
6 | * | ||
7 | * All credits to : Stephen Smalley, <sds@epoch.ncsc.mil> | ||
8 | * All BUGS to : Etienne BASSET <etienne.basset@ensta.org> | ||
9 | */ | ||
10 | #ifndef _LSM_COMMON_LOGGING_ | ||
11 | #define _LSM_COMMON_LOGGING_ | ||
12 | |||
13 | #include <linux/stddef.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/kdev_t.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/audit.h> | ||
20 | #include <linux/in6.h> | ||
21 | #include <linux/path.h> | ||
22 | #include <linux/key.h> | ||
23 | #include <linux/skbuff.h> | ||
24 | #include <asm/system.h> | ||
25 | |||
26 | |||
27 | /* Auxiliary data to use in generating the audit record. */ | ||
28 | struct common_audit_data { | ||
29 | char type; | ||
30 | #define LSM_AUDIT_DATA_FS 1 | ||
31 | #define LSM_AUDIT_DATA_NET 2 | ||
32 | #define LSM_AUDIT_DATA_CAP 3 | ||
33 | #define LSM_AUDIT_DATA_IPC 4 | ||
34 | #define LSM_AUDIT_DATA_TASK 5 | ||
35 | #define LSM_AUDIT_DATA_KEY 6 | ||
36 | struct task_struct *tsk; | ||
37 | union { | ||
38 | struct { | ||
39 | struct path path; | ||
40 | struct inode *inode; | ||
41 | } fs; | ||
42 | struct { | ||
43 | int netif; | ||
44 | struct sock *sk; | ||
45 | u16 family; | ||
46 | __be16 dport; | ||
47 | __be16 sport; | ||
48 | union { | ||
49 | struct { | ||
50 | __be32 daddr; | ||
51 | __be32 saddr; | ||
52 | } v4; | ||
53 | struct { | ||
54 | struct in6_addr daddr; | ||
55 | struct in6_addr saddr; | ||
56 | } v6; | ||
57 | } fam; | ||
58 | } net; | ||
59 | int cap; | ||
60 | int ipc_id; | ||
61 | struct task_struct *tsk; | ||
62 | #ifdef CONFIG_KEYS | ||
63 | struct { | ||
64 | key_serial_t key; | ||
65 | char *key_desc; | ||
66 | } key_struct; | ||
67 | #endif | ||
68 | } u; | ||
69 | const char *function; | ||
70 | /* this union contains LSM specific data */ | ||
71 | union { | ||
72 | /* SMACK data */ | ||
73 | struct smack_audit_data { | ||
74 | char *subject; | ||
75 | char *object; | ||
76 | char *request; | ||
77 | int result; | ||
78 | } smack_audit_data; | ||
79 | /* SELinux data */ | ||
80 | struct { | ||
81 | u32 ssid; | ||
82 | u32 tsid; | ||
83 | u16 tclass; | ||
84 | u32 requested; | ||
85 | u32 audited; | ||
86 | struct av_decision *avd; | ||
87 | int result; | ||
88 | } selinux_audit_data; | ||
89 | } lsm_priv; | ||
90 | /* these callback will be implemented by a specific LSM */ | ||
91 | void (*lsm_pre_audit)(struct audit_buffer *, void *); | ||
92 | void (*lsm_post_audit)(struct audit_buffer *, void *); | ||
93 | }; | ||
94 | |||
95 | #define v4info fam.v4 | ||
96 | #define v6info fam.v6 | ||
97 | |||
98 | int ipv4_skb_to_auditdata(struct sk_buff *skb, | ||
99 | struct common_audit_data *ad, u8 *proto); | ||
100 | |||
101 | int ipv6_skb_to_auditdata(struct sk_buff *skb, | ||
102 | struct common_audit_data *ad, u8 *proto); | ||
103 | |||
104 | /* Initialize an LSM audit data structure. */ | ||
105 | #define COMMON_AUDIT_DATA_INIT(_d, _t) \ | ||
106 | { memset((_d), 0, sizeof(struct common_audit_data)); \ | ||
107 | (_d)->type = LSM_AUDIT_DATA_##_t; (_d)->function = __func__; } | ||
108 | |||
109 | void common_lsm_audit(struct common_audit_data *a); | ||
110 | |||
111 | #endif | ||
diff --git a/include/linux/magic.h b/include/linux/magic.h index 5b4e28bcb788..1923327b9869 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h | |||
@@ -6,9 +6,12 @@ | |||
6 | #define AFS_SUPER_MAGIC 0x5346414F | 6 | #define AFS_SUPER_MAGIC 0x5346414F |
7 | #define AUTOFS_SUPER_MAGIC 0x0187 | 7 | #define AUTOFS_SUPER_MAGIC 0x0187 |
8 | #define CODA_SUPER_MAGIC 0x73757245 | 8 | #define CODA_SUPER_MAGIC 0x73757245 |
9 | #define CRAMFS_MAGIC 0x28cd3d45 /* some random number */ | ||
10 | #define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */ | ||
9 | #define DEBUGFS_MAGIC 0x64626720 | 11 | #define DEBUGFS_MAGIC 0x64626720 |
10 | #define SYSFS_MAGIC 0x62656572 | 12 | #define SYSFS_MAGIC 0x62656572 |
11 | #define SECURITYFS_MAGIC 0x73636673 | 13 | #define SECURITYFS_MAGIC 0x73636673 |
14 | #define SELINUX_MAGIC 0xf97cff8c | ||
12 | #define TMPFS_MAGIC 0x01021994 | 15 | #define TMPFS_MAGIC 0x01021994 |
13 | #define SQUASHFS_MAGIC 0x73717368 | 16 | #define SQUASHFS_MAGIC 0x73717368 |
14 | #define EFS_SUPER_MAGIC 0x414A53 | 17 | #define EFS_SUPER_MAGIC 0x414A53 |
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h deleted file mode 100644 index 1f76b1ebf627..000000000000 --- a/include/linux/mg_disk.h +++ /dev/null | |||
@@ -1,206 +0,0 @@ | |||
1 | /* | ||
2 | * include/linux/mg_disk.c | ||
3 | * | ||
4 | * Support for the mGine m[g]flash IO mode. | ||
5 | * Based on legacy hd.c | ||
6 | * | ||
7 | * (c) 2008 mGine Co.,LTD | ||
8 | * (c) 2008 unsik Kim <donari75@gmail.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #ifndef __MG_DISK_H__ | ||
16 | #define __MG_DISK_H__ | ||
17 | |||
18 | #include <linux/blkdev.h> | ||
19 | #include <linux/ata.h> | ||
20 | |||
21 | /* name for block device */ | ||
22 | #define MG_DISK_NAME "mgd" | ||
23 | /* name for platform device */ | ||
24 | #define MG_DEV_NAME "mg_disk" | ||
25 | |||
26 | #define MG_DISK_MAJ 0 | ||
27 | #define MG_DISK_MAX_PART 16 | ||
28 | #define MG_SECTOR_SIZE 512 | ||
29 | #define MG_MAX_SECTS 256 | ||
30 | |||
31 | /* Register offsets */ | ||
32 | #define MG_BUFF_OFFSET 0x8000 | ||
33 | #define MG_STORAGE_BUFFER_SIZE 0x200 | ||
34 | #define MG_REG_OFFSET 0xC000 | ||
35 | #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ | ||
36 | #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ | ||
37 | #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4) | ||
38 | #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6) | ||
39 | #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8) | ||
40 | #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA) | ||
41 | #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC) | ||
42 | #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */ | ||
43 | #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */ | ||
44 | #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10) | ||
45 | #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12) | ||
46 | |||
47 | /* "Drive Select/Head Register" bit values */ | ||
48 | #define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */ | ||
49 | #define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON) | ||
50 | #define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON) | ||
51 | #define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON) | ||
52 | |||
53 | |||
54 | /* "Device Control Register" bit values */ | ||
55 | #define MG_REG_CTRL_INTR_ENABLE 0x0 | ||
56 | #define MG_REG_CTRL_INTR_DISABLE (0x1<<1) | ||
57 | #define MG_REG_CTRL_RESET (0x1<<2) | ||
58 | #define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0 | ||
59 | #define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4) | ||
60 | #define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0 | ||
61 | #define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5) | ||
62 | #define MG_REG_CTRL_DPD_DISABLE 0x0 | ||
63 | #define MG_REG_CTRL_DPD_ENABLE (0x1<<6) | ||
64 | |||
65 | /* Status register bit */ | ||
66 | /* error bit in status register */ | ||
67 | #define MG_REG_STATUS_BIT_ERROR 0x01 | ||
68 | /* corrected error in status register */ | ||
69 | #define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04 | ||
70 | /* data request bit in status register */ | ||
71 | #define MG_REG_STATUS_BIT_DATA_REQ 0x08 | ||
72 | /* DSC - Drive Seek Complete */ | ||
73 | #define MG_REG_STATUS_BIT_SEEK_DONE 0x10 | ||
74 | /* DWF - Drive Write Fault */ | ||
75 | #define MG_REG_STATUS_BIT_WRITE_FAULT 0x20 | ||
76 | #define MG_REG_STATUS_BIT_READY 0x40 | ||
77 | #define MG_REG_STATUS_BIT_BUSY 0x80 | ||
78 | |||
79 | /* handy status */ | ||
80 | #define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE) | ||
81 | #define MG_READY_OK(s) (((s) & (MG_STAT_READY | \ | ||
82 | (MG_REG_STATUS_BIT_BUSY | \ | ||
83 | MG_REG_STATUS_BIT_WRITE_FAULT | \ | ||
84 | MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY) | ||
85 | |||
86 | /* Error register */ | ||
87 | #define MG_REG_ERR_AMNF 0x01 | ||
88 | #define MG_REG_ERR_ABRT 0x04 | ||
89 | #define MG_REG_ERR_IDNF 0x10 | ||
90 | #define MG_REG_ERR_UNC 0x40 | ||
91 | #define MG_REG_ERR_BBK 0x80 | ||
92 | |||
93 | /* error code for others */ | ||
94 | #define MG_ERR_NONE 0 | ||
95 | #define MG_ERR_TIMEOUT 0x100 | ||
96 | #define MG_ERR_INIT_STAT 0x101 | ||
97 | #define MG_ERR_TRANSLATION 0x102 | ||
98 | #define MG_ERR_CTRL_RST 0x103 | ||
99 | #define MG_ERR_INV_STAT 0x104 | ||
100 | #define MG_ERR_RSTOUT 0x105 | ||
101 | |||
102 | #define MG_MAX_ERRORS 6 /* Max read/write errors */ | ||
103 | |||
104 | /* command */ | ||
105 | #define MG_CMD_RD 0x20 | ||
106 | #define MG_CMD_WR 0x30 | ||
107 | #define MG_CMD_SLEEP 0x99 | ||
108 | #define MG_CMD_WAKEUP 0xC3 | ||
109 | #define MG_CMD_ID 0xEC | ||
110 | #define MG_CMD_WR_CONF 0x3C | ||
111 | #define MG_CMD_RD_CONF 0x40 | ||
112 | |||
113 | /* operation mode */ | ||
114 | #define MG_OP_CASCADE (1 << 0) | ||
115 | #define MG_OP_CASCADE_SYNC_RD (1 << 1) | ||
116 | #define MG_OP_CASCADE_SYNC_WR (1 << 2) | ||
117 | #define MG_OP_INTERLEAVE (1 << 3) | ||
118 | |||
119 | /* synchronous */ | ||
120 | #define MG_BURST_LAT_4 (3 << 4) | ||
121 | #define MG_BURST_LAT_5 (4 << 4) | ||
122 | #define MG_BURST_LAT_6 (5 << 4) | ||
123 | #define MG_BURST_LAT_7 (6 << 4) | ||
124 | #define MG_BURST_LAT_8 (7 << 4) | ||
125 | #define MG_BURST_LEN_4 (1 << 1) | ||
126 | #define MG_BURST_LEN_8 (2 << 1) | ||
127 | #define MG_BURST_LEN_16 (3 << 1) | ||
128 | #define MG_BURST_LEN_32 (4 << 1) | ||
129 | #define MG_BURST_LEN_CONT (0 << 1) | ||
130 | |||
131 | /* timeout value (unit: ms) */ | ||
132 | #define MG_TMAX_CONF_TO_CMD 1 | ||
133 | #define MG_TMAX_WAIT_RD_DRQ 10 | ||
134 | #define MG_TMAX_WAIT_WR_DRQ 500 | ||
135 | #define MG_TMAX_RST_TO_BUSY 10 | ||
136 | #define MG_TMAX_HDRST_TO_RDY 500 | ||
137 | #define MG_TMAX_SWRST_TO_RDY 500 | ||
138 | #define MG_TMAX_RSTOUT 3000 | ||
139 | |||
140 | /* device attribution */ | ||
141 | /* use mflash as boot device */ | ||
142 | #define MG_BOOT_DEV (1 << 0) | ||
143 | /* use mflash as storage device */ | ||
144 | #define MG_STORAGE_DEV (1 << 1) | ||
145 | /* same as MG_STORAGE_DEV, but bootloader already done reset sequence */ | ||
146 | #define MG_STORAGE_DEV_SKIP_RST (1 << 2) | ||
147 | |||
148 | #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST) | ||
149 | |||
150 | /* names of GPIO resource */ | ||
151 | #define MG_RST_PIN "mg_rst" | ||
152 | /* except MG_BOOT_DEV, reset-out pin should be assigned */ | ||
153 | #define MG_RSTOUT_PIN "mg_rstout" | ||
154 | |||
155 | /* private driver data */ | ||
156 | struct mg_drv_data { | ||
157 | /* disk resource */ | ||
158 | u32 use_polling; | ||
159 | |||
160 | /* device attribution */ | ||
161 | u32 dev_attr; | ||
162 | |||
163 | /* internally used */ | ||
164 | struct mg_host *host; | ||
165 | }; | ||
166 | |||
167 | /* main structure for mflash driver */ | ||
168 | struct mg_host { | ||
169 | struct device *dev; | ||
170 | |||
171 | struct request_queue *breq; | ||
172 | spinlock_t lock; | ||
173 | struct gendisk *gd; | ||
174 | |||
175 | struct timer_list timer; | ||
176 | void (*mg_do_intr) (struct mg_host *); | ||
177 | |||
178 | u16 id[ATA_ID_WORDS]; | ||
179 | |||
180 | u16 cyls; | ||
181 | u16 heads; | ||
182 | u16 sectors; | ||
183 | u32 n_sectors; | ||
184 | u32 nres_sectors; | ||
185 | |||
186 | void __iomem *dev_base; | ||
187 | unsigned int irq; | ||
188 | unsigned int rst; | ||
189 | unsigned int rstout; | ||
190 | |||
191 | u32 major; | ||
192 | u32 error; | ||
193 | }; | ||
194 | |||
195 | /* | ||
196 | * Debugging macro and defines | ||
197 | */ | ||
198 | #undef DO_MG_DEBUG | ||
199 | #ifdef DO_MG_DEBUG | ||
200 | # define MG_DBG(fmt, args...) \ | ||
201 | printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args) | ||
202 | #else /* CONFIG_MG_DEBUG */ | ||
203 | # define MG_DBG(fmt, args...) do { } while (0) | ||
204 | #endif /* CONFIG_MG_DEBUG */ | ||
205 | |||
206 | #endif | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index bff1f0d475c7..ad613ed66ab0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -19,6 +19,7 @@ struct anon_vma; | |||
19 | struct file_ra_state; | 19 | struct file_ra_state; |
20 | struct user_struct; | 20 | struct user_struct; |
21 | struct writeback_control; | 21 | struct writeback_control; |
22 | struct rlimit; | ||
22 | 23 | ||
23 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ | 24 | #ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */ |
24 | extern unsigned long max_mapnr; | 25 | extern unsigned long max_mapnr; |
@@ -580,12 +581,10 @@ static inline void set_page_links(struct page *page, enum zone_type zone, | |||
580 | */ | 581 | */ |
581 | static inline unsigned long round_hint_to_min(unsigned long hint) | 582 | static inline unsigned long round_hint_to_min(unsigned long hint) |
582 | { | 583 | { |
583 | #ifdef CONFIG_SECURITY | ||
584 | hint &= PAGE_MASK; | 584 | hint &= PAGE_MASK; |
585 | if (((void *)hint != NULL) && | 585 | if (((void *)hint != NULL) && |
586 | (hint < mmap_min_addr)) | 586 | (hint < mmap_min_addr)) |
587 | return PAGE_ALIGN(mmap_min_addr); | 587 | return PAGE_ALIGN(mmap_min_addr); |
588 | #endif | ||
589 | return hint; | 588 | return hint; |
590 | } | 589 | } |
591 | 590 | ||
@@ -1031,8 +1030,6 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn, | |||
1031 | unsigned long end_pfn); | 1030 | unsigned long end_pfn); |
1032 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, | 1031 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, |
1033 | unsigned long end_pfn); | 1032 | unsigned long end_pfn); |
1034 | extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn, | ||
1035 | unsigned long end_pfn); | ||
1036 | extern void remove_all_active_ranges(void); | 1033 | extern void remove_all_active_ranges(void); |
1037 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, | 1034 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, |
1038 | unsigned long end_pfn); | 1035 | unsigned long end_pfn); |
@@ -1319,8 +1316,8 @@ int vmemmap_populate_basepages(struct page *start_page, | |||
1319 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); | 1316 | int vmemmap_populate(struct page *start_page, unsigned long pages, int node); |
1320 | void vmemmap_populate_print_last(void); | 1317 | void vmemmap_populate_print_last(void); |
1321 | 1318 | ||
1322 | extern void *alloc_locked_buffer(size_t size); | 1319 | extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim, |
1323 | extern void free_locked_buffer(void *buffer, size_t size); | 1320 | size_t size); |
1324 | extern void release_locked_buffer(void *buffer, size_t size); | 1321 | extern void refund_locked_memory(struct mm_struct *mm, size_t size); |
1325 | #endif /* __KERNEL__ */ | 1322 | #endif /* __KERNEL__ */ |
1326 | #endif /* _LINUX_MM_H */ | 1323 | #endif /* _LINUX_MM_H */ |
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h index 3d1b7bde1283..97491f78b08c 100644 --- a/include/linux/mmiotrace.h +++ b/include/linux/mmiotrace.h | |||
@@ -30,6 +30,8 @@ extern unsigned int kmmio_count; | |||
30 | 30 | ||
31 | extern int register_kmmio_probe(struct kmmio_probe *p); | 31 | extern int register_kmmio_probe(struct kmmio_probe *p); |
32 | extern void unregister_kmmio_probe(struct kmmio_probe *p); | 32 | extern void unregister_kmmio_probe(struct kmmio_probe *p); |
33 | extern int kmmio_init(void); | ||
34 | extern void kmmio_cleanup(void); | ||
33 | 35 | ||
34 | #ifdef CONFIG_MMIOTRACE | 36 | #ifdef CONFIG_MMIOTRACE |
35 | /* kmmio is active by some kmmio_probes? */ | 37 | /* kmmio is active by some kmmio_probes? */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 627ac082e2a6..a7bc6e7b43a7 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -77,6 +77,7 @@ search_extable(const struct exception_table_entry *first, | |||
77 | void sort_extable(struct exception_table_entry *start, | 77 | void sort_extable(struct exception_table_entry *start, |
78 | struct exception_table_entry *finish); | 78 | struct exception_table_entry *finish); |
79 | void sort_main_extable(void); | 79 | void sort_main_extable(void); |
80 | void trim_init_extable(struct module *m); | ||
80 | 81 | ||
81 | #ifdef MODULE | 82 | #ifdef MODULE |
82 | #define MODULE_GENERIC_TABLE(gtype,name) \ | 83 | #define MODULE_GENERIC_TABLE(gtype,name) \ |
@@ -337,6 +338,14 @@ struct module | |||
337 | const char **trace_bprintk_fmt_start; | 338 | const char **trace_bprintk_fmt_start; |
338 | unsigned int num_trace_bprintk_fmt; | 339 | unsigned int num_trace_bprintk_fmt; |
339 | #endif | 340 | #endif |
341 | #ifdef CONFIG_EVENT_TRACING | ||
342 | struct ftrace_event_call *trace_events; | ||
343 | unsigned int num_trace_events; | ||
344 | #endif | ||
345 | #ifdef CONFIG_FTRACE_MCOUNT_RECORD | ||
346 | unsigned long *ftrace_callsites; | ||
347 | unsigned int num_ftrace_callsites; | ||
348 | #endif | ||
340 | 349 | ||
341 | #ifdef CONFIG_MODULE_UNLOAD | 350 | #ifdef CONFIG_MODULE_UNLOAD |
342 | /* What modules depend on me? */ | 351 | /* What modules depend on me? */ |
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index a4f0b931846c..6547c3cdbc4c 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h | |||
@@ -36,9 +36,14 @@ typedef int (*param_set_fn)(const char *val, struct kernel_param *kp); | |||
36 | /* Returns length written or -errno. Buffer is 4k (ie. be short!) */ | 36 | /* Returns length written or -errno. Buffer is 4k (ie. be short!) */ |
37 | typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp); | 37 | typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp); |
38 | 38 | ||
39 | /* Flag bits for kernel_param.flags */ | ||
40 | #define KPARAM_KMALLOCED 1 | ||
41 | #define KPARAM_ISBOOL 2 | ||
42 | |||
39 | struct kernel_param { | 43 | struct kernel_param { |
40 | const char *name; | 44 | const char *name; |
41 | unsigned int perm; | 45 | u16 perm; |
46 | u16 flags; | ||
42 | param_set_fn set; | 47 | param_set_fn set; |
43 | param_get_fn get; | 48 | param_get_fn get; |
44 | union { | 49 | union { |
@@ -79,7 +84,7 @@ struct kparam_array | |||
79 | parameters. perm sets the visibility in sysfs: 000 means it's | 84 | parameters. perm sets the visibility in sysfs: 000 means it's |
80 | not there, read bits mean it's readable, write bits mean it's | 85 | not there, read bits mean it's readable, write bits mean it's |
81 | writable. */ | 86 | writable. */ |
82 | #define __module_param_call(prefix, name, set, get, arg, perm) \ | 87 | #define __module_param_call(prefix, name, set, get, arg, isbool, perm) \ |
83 | /* Default value instead of permissions? */ \ | 88 | /* Default value instead of permissions? */ \ |
84 | static int __param_perm_check_##name __attribute__((unused)) = \ | 89 | static int __param_perm_check_##name __attribute__((unused)) = \ |
85 | BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \ | 90 | BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \ |
@@ -88,10 +93,13 @@ struct kparam_array | |||
88 | static struct kernel_param __moduleparam_const __param_##name \ | 93 | static struct kernel_param __moduleparam_const __param_##name \ |
89 | __used \ | 94 | __used \ |
90 | __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ | 95 | __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ |
91 | = { __param_str_##name, perm, set, get, { arg } } | 96 | = { __param_str_##name, perm, isbool ? KPARAM_ISBOOL : 0, \ |
97 | set, get, { arg } } | ||
92 | 98 | ||
93 | #define module_param_call(name, set, get, arg, perm) \ | 99 | #define module_param_call(name, set, get, arg, perm) \ |
94 | __module_param_call(MODULE_PARAM_PREFIX, name, set, get, arg, perm) | 100 | __module_param_call(MODULE_PARAM_PREFIX, \ |
101 | name, set, get, arg, \ | ||
102 | __same_type(*(arg), bool), perm) | ||
95 | 103 | ||
96 | /* Helper functions: type is byte, short, ushort, int, uint, long, | 104 | /* Helper functions: type is byte, short, ushort, int, uint, long, |
97 | ulong, charp, bool or invbool, or XXX if you define param_get_XXX, | 105 | ulong, charp, bool or invbool, or XXX if you define param_get_XXX, |
@@ -120,15 +128,16 @@ struct kparam_array | |||
120 | #define core_param(name, var, type, perm) \ | 128 | #define core_param(name, var, type, perm) \ |
121 | param_check_##type(name, &(var)); \ | 129 | param_check_##type(name, &(var)); \ |
122 | __module_param_call("", name, param_set_##type, param_get_##type, \ | 130 | __module_param_call("", name, param_set_##type, param_get_##type, \ |
123 | &var, perm) | 131 | &var, __same_type(var, bool), perm) |
124 | #endif /* !MODULE */ | 132 | #endif /* !MODULE */ |
125 | 133 | ||
126 | /* Actually copy string: maxlen param is usually sizeof(string). */ | 134 | /* Actually copy string: maxlen param is usually sizeof(string). */ |
127 | #define module_param_string(name, string, len, perm) \ | 135 | #define module_param_string(name, string, len, perm) \ |
128 | static const struct kparam_string __param_string_##name \ | 136 | static const struct kparam_string __param_string_##name \ |
129 | = { len, string }; \ | 137 | = { len, string }; \ |
130 | module_param_call(name, param_set_copystring, param_get_string, \ | 138 | __module_param_call(MODULE_PARAM_PREFIX, name, \ |
131 | .str = &__param_string_##name, perm); \ | 139 | param_set_copystring, param_get_string, \ |
140 | .str = &__param_string_##name, 0, perm); \ | ||
132 | __MODULE_PARM_TYPE(name, "string") | 141 | __MODULE_PARM_TYPE(name, "string") |
133 | 142 | ||
134 | /* Called on module insert or kernel boot */ | 143 | /* Called on module insert or kernel boot */ |
@@ -186,21 +195,30 @@ extern int param_set_charp(const char *val, struct kernel_param *kp); | |||
186 | extern int param_get_charp(char *buffer, struct kernel_param *kp); | 195 | extern int param_get_charp(char *buffer, struct kernel_param *kp); |
187 | #define param_check_charp(name, p) __param_check(name, p, char *) | 196 | #define param_check_charp(name, p) __param_check(name, p, char *) |
188 | 197 | ||
198 | /* For historical reasons "bool" parameters can be (unsigned) "int". */ | ||
189 | extern int param_set_bool(const char *val, struct kernel_param *kp); | 199 | extern int param_set_bool(const char *val, struct kernel_param *kp); |
190 | extern int param_get_bool(char *buffer, struct kernel_param *kp); | 200 | extern int param_get_bool(char *buffer, struct kernel_param *kp); |
191 | #define param_check_bool(name, p) __param_check(name, p, int) | 201 | #define param_check_bool(name, p) \ |
202 | static inline void __check_##name(void) \ | ||
203 | { \ | ||
204 | BUILD_BUG_ON(!__same_type(*(p), bool) && \ | ||
205 | !__same_type(*(p), unsigned int) && \ | ||
206 | !__same_type(*(p), int)); \ | ||
207 | } | ||
192 | 208 | ||
193 | extern int param_set_invbool(const char *val, struct kernel_param *kp); | 209 | extern int param_set_invbool(const char *val, struct kernel_param *kp); |
194 | extern int param_get_invbool(char *buffer, struct kernel_param *kp); | 210 | extern int param_get_invbool(char *buffer, struct kernel_param *kp); |
195 | #define param_check_invbool(name, p) __param_check(name, p, int) | 211 | #define param_check_invbool(name, p) __param_check(name, p, bool) |
196 | 212 | ||
197 | /* Comma-separated array: *nump is set to number they actually specified. */ | 213 | /* Comma-separated array: *nump is set to number they actually specified. */ |
198 | #define module_param_array_named(name, array, type, nump, perm) \ | 214 | #define module_param_array_named(name, array, type, nump, perm) \ |
199 | static const struct kparam_array __param_arr_##name \ | 215 | static const struct kparam_array __param_arr_##name \ |
200 | = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\ | 216 | = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\ |
201 | sizeof(array[0]), array }; \ | 217 | sizeof(array[0]), array }; \ |
202 | module_param_call(name, param_array_set, param_array_get, \ | 218 | __module_param_call(MODULE_PARAM_PREFIX, name, \ |
203 | .arr = &__param_arr_##name, perm); \ | 219 | param_array_set, param_array_get, \ |
220 | .arr = &__param_arr_##name, \ | ||
221 | __same_type(array[0], bool), perm); \ | ||
204 | __MODULE_PARM_TYPE(name, "array of " #type) | 222 | __MODULE_PARM_TYPE(name, "array of " #type) |
205 | 223 | ||
206 | #define module_param_array(name, type, nump, perm) \ | 224 | #define module_param_array(name, type, nump, perm) \ |
diff --git a/include/linux/mount.h b/include/linux/mount.h index 51f55f903aff..5d5275364867 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h | |||
@@ -30,7 +30,7 @@ struct mnt_namespace; | |||
30 | #define MNT_STRICTATIME 0x80 | 30 | #define MNT_STRICTATIME 0x80 |
31 | 31 | ||
32 | #define MNT_SHRINKABLE 0x100 | 32 | #define MNT_SHRINKABLE 0x100 |
33 | #define MNT_IMBALANCED_WRITE_COUNT 0x200 /* just for debugging */ | 33 | #define MNT_WRITE_HOLD 0x200 |
34 | 34 | ||
35 | #define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ | 35 | #define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */ |
36 | #define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ | 36 | #define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */ |
@@ -65,13 +65,22 @@ struct vfsmount { | |||
65 | int mnt_expiry_mark; /* true if marked for expiry */ | 65 | int mnt_expiry_mark; /* true if marked for expiry */ |
66 | int mnt_pinned; | 66 | int mnt_pinned; |
67 | int mnt_ghosts; | 67 | int mnt_ghosts; |
68 | /* | 68 | #ifdef CONFIG_SMP |
69 | * This value is not stable unless all of the mnt_writers[] spinlocks | 69 | int *mnt_writers; |
70 | * are held, and all mnt_writer[]s on this mount have 0 as their ->count | 70 | #else |
71 | */ | 71 | int mnt_writers; |
72 | atomic_t __mnt_writers; | 72 | #endif |
73 | }; | 73 | }; |
74 | 74 | ||
75 | static inline int *get_mnt_writers_ptr(struct vfsmount *mnt) | ||
76 | { | ||
77 | #ifdef CONFIG_SMP | ||
78 | return mnt->mnt_writers; | ||
79 | #else | ||
80 | return &mnt->mnt_writers; | ||
81 | #endif | ||
82 | } | ||
83 | |||
75 | static inline struct vfsmount *mntget(struct vfsmount *mnt) | 84 | static inline struct vfsmount *mntget(struct vfsmount *mnt) |
76 | { | 85 | { |
77 | if (mnt) | 86 | if (mnt) |
@@ -79,7 +88,11 @@ static inline struct vfsmount *mntget(struct vfsmount *mnt) | |||
79 | return mnt; | 88 | return mnt; |
80 | } | 89 | } |
81 | 90 | ||
91 | struct file; /* forward dec */ | ||
92 | |||
82 | extern int mnt_want_write(struct vfsmount *mnt); | 93 | extern int mnt_want_write(struct vfsmount *mnt); |
94 | extern int mnt_want_write_file(struct file *file); | ||
95 | extern int mnt_clone_write(struct vfsmount *mnt); | ||
83 | extern void mnt_drop_write(struct vfsmount *mnt); | 96 | extern void mnt_drop_write(struct vfsmount *mnt); |
84 | extern void mntput_no_expire(struct vfsmount *mnt); | 97 | extern void mntput_no_expire(struct vfsmount *mnt); |
85 | extern void mnt_pin(struct vfsmount *mnt); | 98 | extern void mnt_pin(struct vfsmount *mnt); |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 3069ec7e0ab8..878cab4f5fcc 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -150,5 +150,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); | |||
150 | */ | 150 | */ |
151 | extern int mutex_trylock(struct mutex *lock); | 151 | extern int mutex_trylock(struct mutex *lock); |
152 | extern void mutex_unlock(struct mutex *lock); | 152 | extern void mutex_unlock(struct mutex *lock); |
153 | extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); | ||
153 | 154 | ||
154 | #endif | 155 | #endif |
diff --git a/include/linux/namei.h b/include/linux/namei.h index 518098fe63af..d870ae2faedc 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
@@ -18,6 +18,7 @@ enum { MAX_NESTED_LINKS = 8 }; | |||
18 | struct nameidata { | 18 | struct nameidata { |
19 | struct path path; | 19 | struct path path; |
20 | struct qstr last; | 20 | struct qstr last; |
21 | struct path root; | ||
21 | unsigned int flags; | 22 | unsigned int flags; |
22 | int last_type; | 23 | int last_type; |
23 | unsigned depth; | 24 | unsigned depth; |
@@ -77,8 +78,8 @@ extern void release_open_intent(struct nameidata *); | |||
77 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); | 78 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); |
78 | extern struct dentry *lookup_one_noperm(const char *, struct dentry *); | 79 | extern struct dentry *lookup_one_noperm(const char *, struct dentry *); |
79 | 80 | ||
80 | extern int follow_down(struct vfsmount **, struct dentry **); | 81 | extern int follow_down(struct path *); |
81 | extern int follow_up(struct vfsmount **, struct dentry **); | 82 | extern int follow_up(struct path *); |
82 | 83 | ||
83 | extern struct dentry *lock_rename(struct dentry *, struct dentry *); | 84 | extern struct dentry *lock_rename(struct dentry *, struct dentry *); |
84 | extern void unlock_rename(struct dentry *, struct dentry *); | 85 | extern void unlock_rename(struct dentry *, struct dentry *); |
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index bcd0201589f8..a6d9ef2bb34a 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h | |||
@@ -125,11 +125,9 @@ void nfsd_export_flush(void); | |||
125 | void exp_readlock(void); | 125 | void exp_readlock(void); |
126 | void exp_readunlock(void); | 126 | void exp_readunlock(void); |
127 | struct svc_export * rqst_exp_get_by_name(struct svc_rqst *, | 127 | struct svc_export * rqst_exp_get_by_name(struct svc_rqst *, |
128 | struct vfsmount *, | 128 | struct path *); |
129 | struct dentry *); | ||
130 | struct svc_export * rqst_exp_parent(struct svc_rqst *, | 129 | struct svc_export * rqst_exp_parent(struct svc_rqst *, |
131 | struct vfsmount *mnt, | 130 | struct path *); |
132 | struct dentry *dentry); | ||
133 | int exp_rootfh(struct auth_domain *, | 131 | int exp_rootfh(struct auth_domain *, |
134 | char *path, struct knfsd_fh *, int maxsize); | 132 | char *path, struct knfsd_fh *, int maxsize); |
135 | __be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *); | 133 | __be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *); |
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 7339c7bf7331..13f126c89ae8 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h | |||
@@ -18,7 +18,19 @@ struct page_cgroup { | |||
18 | }; | 18 | }; |
19 | 19 | ||
20 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); | 20 | void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat); |
21 | void __init page_cgroup_init(void); | 21 | |
22 | #ifdef CONFIG_SPARSEMEM | ||
23 | static inline void __init page_cgroup_init_flatmem(void) | ||
24 | { | ||
25 | } | ||
26 | extern void __init page_cgroup_init(void); | ||
27 | #else | ||
28 | void __init page_cgroup_init_flatmem(void); | ||
29 | static inline void __init page_cgroup_init(void) | ||
30 | { | ||
31 | } | ||
32 | #endif | ||
33 | |||
22 | struct page_cgroup *lookup_page_cgroup(struct page *page); | 34 | struct page_cgroup *lookup_page_cgroup(struct page *page); |
23 | 35 | ||
24 | enum { | 36 | enum { |
@@ -87,6 +99,10 @@ static inline void page_cgroup_init(void) | |||
87 | { | 99 | { |
88 | } | 100 | } |
89 | 101 | ||
102 | static inline void __init page_cgroup_init_flatmem(void) | ||
103 | { | ||
104 | } | ||
105 | |||
90 | #endif | 106 | #endif |
91 | 107 | ||
92 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 108 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0f71812d67d3..19f8e6d1a4d2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -1005,6 +1005,7 @@ | |||
1005 | #define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196 | 1005 | #define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196 |
1006 | #define PCI_DEVICE_ID_PLX_9030 0x9030 | 1006 | #define PCI_DEVICE_ID_PLX_9030 0x9030 |
1007 | #define PCI_DEVICE_ID_PLX_9050 0x9050 | 1007 | #define PCI_DEVICE_ID_PLX_9050 0x9050 |
1008 | #define PCI_DEVICE_ID_PLX_9056 0x9056 | ||
1008 | #define PCI_DEVICE_ID_PLX_9080 0x9080 | 1009 | #define PCI_DEVICE_ID_PLX_9080 0x9080 |
1009 | #define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001 | 1010 | #define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001 |
1010 | 1011 | ||
@@ -1314,6 +1315,13 @@ | |||
1314 | 1315 | ||
1315 | #define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */ | 1316 | #define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */ |
1316 | #define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002 | 1317 | #define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002 |
1318 | #define PCI_DEVICE_ID_CREATIVE_20K1 0x0005 | ||
1319 | #define PCI_DEVICE_ID_CREATIVE_20K2 0x000b | ||
1320 | #define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024 | ||
1321 | #define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041 | ||
1322 | #define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042 | ||
1323 | #define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043 | ||
1324 | #define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000 | ||
1317 | 1325 | ||
1318 | #define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */ | 1326 | #define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */ |
1319 | #define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938 | 1327 | #define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938 |
@@ -1847,6 +1855,10 @@ | |||
1847 | #define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107 | 1855 | #define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107 |
1848 | #define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108 | 1856 | #define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108 |
1849 | 1857 | ||
1858 | #define PCI_VENDOR_ID_DIGIGRAM 0x1369 | ||
1859 | #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001 | ||
1860 | #define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002 | ||
1861 | |||
1850 | #define PCI_VENDOR_ID_KAWASAKI 0x136b | 1862 | #define PCI_VENDOR_ID_KAWASAKI 0x136b |
1851 | #define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 | 1863 | #define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01 |
1852 | 1864 | ||
@@ -1996,10 +2008,12 @@ | |||
1996 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 | 2008 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118 |
1997 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C | 2009 | #define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C |
1998 | #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 | 2010 | #define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501 |
2011 | #define PCI_DEVICE_ID_OXSEMI_C950 0x950B | ||
1999 | #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 | 2012 | #define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511 |
2000 | #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 | 2013 | #define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513 |
2001 | #define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 | 2014 | #define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521 |
2002 | #define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 | 2015 | #define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523 |
2016 | #define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001 | ||
2003 | 2017 | ||
2004 | #define PCI_VENDOR_ID_CHELSIO 0x1425 | 2018 | #define PCI_VENDOR_ID_CHELSIO 0x1425 |
2005 | 2019 | ||
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 1581ff235c7e..26fd9d12f050 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -86,7 +86,12 @@ struct percpu_data { | |||
86 | void *ptrs[1]; | 86 | void *ptrs[1]; |
87 | }; | 87 | }; |
88 | 88 | ||
89 | /* pointer disguising messes up the kmemleak objects tracking */ | ||
90 | #ifndef CONFIG_DEBUG_KMEMLEAK | ||
89 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | 91 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
92 | #else | ||
93 | #define __percpu_disguise(pdata) (struct percpu_data *)(pdata) | ||
94 | #endif | ||
90 | 95 | ||
91 | #define per_cpu_ptr(ptr, cpu) \ | 96 | #define per_cpu_ptr(ptr, cpu) \ |
92 | ({ \ | 97 | ({ \ |
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h new file mode 100644 index 000000000000..1b3118a1023a --- /dev/null +++ b/include/linux/perf_counter.h | |||
@@ -0,0 +1,709 @@ | |||
1 | /* | ||
2 | * Performance counters: | ||
3 | * | ||
4 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | ||
6 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | ||
7 | * | ||
8 | * Data type definitions, declarations, prototypes. | ||
9 | * | ||
10 | * Started by: Thomas Gleixner and Ingo Molnar | ||
11 | * | ||
12 | * For licencing details see kernel-base/COPYING | ||
13 | */ | ||
14 | #ifndef _LINUX_PERF_COUNTER_H | ||
15 | #define _LINUX_PERF_COUNTER_H | ||
16 | |||
17 | #include <linux/types.h> | ||
18 | #include <linux/ioctl.h> | ||
19 | #include <asm/byteorder.h> | ||
20 | |||
21 | /* | ||
22 | * User-space ABI bits: | ||
23 | */ | ||
24 | |||
25 | /* | ||
26 | * attr.type | ||
27 | */ | ||
28 | enum perf_type_id { | ||
29 | PERF_TYPE_HARDWARE = 0, | ||
30 | PERF_TYPE_SOFTWARE = 1, | ||
31 | PERF_TYPE_TRACEPOINT = 2, | ||
32 | PERF_TYPE_HW_CACHE = 3, | ||
33 | PERF_TYPE_RAW = 4, | ||
34 | |||
35 | PERF_TYPE_MAX, /* non-ABI */ | ||
36 | }; | ||
37 | |||
38 | /* | ||
39 | * Generalized performance counter event types, used by the | ||
40 | * attr.event_id parameter of the sys_perf_counter_open() | ||
41 | * syscall: | ||
42 | */ | ||
43 | enum perf_hw_id { | ||
44 | /* | ||
45 | * Common hardware events, generalized by the kernel: | ||
46 | */ | ||
47 | PERF_COUNT_HW_CPU_CYCLES = 0, | ||
48 | PERF_COUNT_HW_INSTRUCTIONS = 1, | ||
49 | PERF_COUNT_HW_CACHE_REFERENCES = 2, | ||
50 | PERF_COUNT_HW_CACHE_MISSES = 3, | ||
51 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | ||
52 | PERF_COUNT_HW_BRANCH_MISSES = 5, | ||
53 | PERF_COUNT_HW_BUS_CYCLES = 6, | ||
54 | |||
55 | PERF_COUNT_HW_MAX, /* non-ABI */ | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * Generalized hardware cache counters: | ||
60 | * | ||
61 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | ||
62 | * { read, write, prefetch } x | ||
63 | * { accesses, misses } | ||
64 | */ | ||
65 | enum perf_hw_cache_id { | ||
66 | PERF_COUNT_HW_CACHE_L1D = 0, | ||
67 | PERF_COUNT_HW_CACHE_L1I = 1, | ||
68 | PERF_COUNT_HW_CACHE_LL = 2, | ||
69 | PERF_COUNT_HW_CACHE_DTLB = 3, | ||
70 | PERF_COUNT_HW_CACHE_ITLB = 4, | ||
71 | PERF_COUNT_HW_CACHE_BPU = 5, | ||
72 | |||
73 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ | ||
74 | }; | ||
75 | |||
76 | enum perf_hw_cache_op_id { | ||
77 | PERF_COUNT_HW_CACHE_OP_READ = 0, | ||
78 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, | ||
79 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, | ||
80 | |||
81 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ | ||
82 | }; | ||
83 | |||
84 | enum perf_hw_cache_op_result_id { | ||
85 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, | ||
86 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, | ||
87 | |||
88 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ | ||
89 | }; | ||
90 | |||
91 | /* | ||
92 | * Special "software" counters provided by the kernel, even if the hardware | ||
93 | * does not support performance counters. These counters measure various | ||
94 | * physical and sw events of the kernel (and allow the profiling of them as | ||
95 | * well): | ||
96 | */ | ||
97 | enum perf_sw_ids { | ||
98 | PERF_COUNT_SW_CPU_CLOCK = 0, | ||
99 | PERF_COUNT_SW_TASK_CLOCK = 1, | ||
100 | PERF_COUNT_SW_PAGE_FAULTS = 2, | ||
101 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, | ||
102 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | ||
103 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | ||
104 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | ||
105 | |||
106 | PERF_COUNT_SW_MAX, /* non-ABI */ | ||
107 | }; | ||
108 | |||
109 | /* | ||
110 | * Bits that can be set in attr.sample_type to request information | ||
111 | * in the overflow packets. | ||
112 | */ | ||
113 | enum perf_counter_sample_format { | ||
114 | PERF_SAMPLE_IP = 1U << 0, | ||
115 | PERF_SAMPLE_TID = 1U << 1, | ||
116 | PERF_SAMPLE_TIME = 1U << 2, | ||
117 | PERF_SAMPLE_ADDR = 1U << 3, | ||
118 | PERF_SAMPLE_GROUP = 1U << 4, | ||
119 | PERF_SAMPLE_CALLCHAIN = 1U << 5, | ||
120 | PERF_SAMPLE_ID = 1U << 6, | ||
121 | PERF_SAMPLE_CPU = 1U << 7, | ||
122 | PERF_SAMPLE_PERIOD = 1U << 8, | ||
123 | |||
124 | PERF_SAMPLE_MAX = 1U << 9, /* non-ABI */ | ||
125 | }; | ||
126 | |||
127 | /* | ||
128 | * Bits that can be set in attr.read_format to request that | ||
129 | * reads on the counter should return the indicated quantities, | ||
130 | * in increasing order of bit value, after the counter value. | ||
131 | */ | ||
132 | enum perf_counter_read_format { | ||
133 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, | ||
134 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, | ||
135 | PERF_FORMAT_ID = 1U << 2, | ||
136 | |||
137 | PERF_FORMAT_MAX = 1U << 3, /* non-ABI */ | ||
138 | }; | ||
139 | |||
140 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ | ||
141 | |||
142 | /* | ||
143 | * Hardware event to monitor via a performance monitoring counter: | ||
144 | */ | ||
145 | struct perf_counter_attr { | ||
146 | |||
147 | /* | ||
148 | * Major type: hardware/software/tracepoint/etc. | ||
149 | */ | ||
150 | __u32 type; | ||
151 | |||
152 | /* | ||
153 | * Size of the attr structure, for fwd/bwd compat. | ||
154 | */ | ||
155 | __u32 size; | ||
156 | |||
157 | /* | ||
158 | * Type specific configuration information. | ||
159 | */ | ||
160 | __u64 config; | ||
161 | |||
162 | union { | ||
163 | __u64 sample_period; | ||
164 | __u64 sample_freq; | ||
165 | }; | ||
166 | |||
167 | __u64 sample_type; | ||
168 | __u64 read_format; | ||
169 | |||
170 | __u64 disabled : 1, /* off by default */ | ||
171 | inherit : 1, /* children inherit it */ | ||
172 | pinned : 1, /* must always be on PMU */ | ||
173 | exclusive : 1, /* only group on PMU */ | ||
174 | exclude_user : 1, /* don't count user */ | ||
175 | exclude_kernel : 1, /* ditto kernel */ | ||
176 | exclude_hv : 1, /* ditto hypervisor */ | ||
177 | exclude_idle : 1, /* don't count when idle */ | ||
178 | mmap : 1, /* include mmap data */ | ||
179 | comm : 1, /* include comm data */ | ||
180 | freq : 1, /* use freq, not period */ | ||
181 | |||
182 | __reserved_1 : 53; | ||
183 | |||
184 | __u32 wakeup_events; /* wakeup every n events */ | ||
185 | __u32 __reserved_2; | ||
186 | |||
187 | __u64 __reserved_3; | ||
188 | }; | ||
189 | |||
190 | /* | ||
191 | * Ioctls that can be done on a perf counter fd: | ||
192 | */ | ||
193 | #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) | ||
194 | #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) | ||
195 | #define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) | ||
196 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) | ||
197 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) | ||
198 | |||
199 | enum perf_counter_ioc_flags { | ||
200 | PERF_IOC_FLAG_GROUP = 1U << 0, | ||
201 | }; | ||
202 | |||
203 | /* | ||
204 | * Structure of the page that can be mapped via mmap | ||
205 | */ | ||
206 | struct perf_counter_mmap_page { | ||
207 | __u32 version; /* version number of this structure */ | ||
208 | __u32 compat_version; /* lowest version this is compat with */ | ||
209 | |||
210 | /* | ||
211 | * Bits needed to read the hw counters in user-space. | ||
212 | * | ||
213 | * u32 seq; | ||
214 | * s64 count; | ||
215 | * | ||
216 | * do { | ||
217 | * seq = pc->lock; | ||
218 | * | ||
219 | * barrier() | ||
220 | * if (pc->index) { | ||
221 | * count = pmc_read(pc->index - 1); | ||
222 | * count += pc->offset; | ||
223 | * } else | ||
224 | * goto regular_read; | ||
225 | * | ||
226 | * barrier(); | ||
227 | * } while (pc->lock != seq); | ||
228 | * | ||
229 | * NOTE: for obvious reason this only works on self-monitoring | ||
230 | * processes. | ||
231 | */ | ||
232 | __u32 lock; /* seqlock for synchronization */ | ||
233 | __u32 index; /* hardware counter identifier */ | ||
234 | __s64 offset; /* add to hardware counter value */ | ||
235 | |||
236 | /* | ||
237 | * Control data for the mmap() data buffer. | ||
238 | * | ||
239 | * User-space reading this value should issue an rmb(), on SMP capable | ||
240 | * platforms, after reading this value -- see perf_counter_wakeup(). | ||
241 | */ | ||
242 | __u64 data_head; /* head in the data section */ | ||
243 | }; | ||
244 | |||
245 | #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) | ||
246 | #define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) | ||
247 | #define PERF_EVENT_MISC_KERNEL (1 << 0) | ||
248 | #define PERF_EVENT_MISC_USER (2 << 0) | ||
249 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) | ||
250 | #define PERF_EVENT_MISC_OVERFLOW (1 << 2) | ||
251 | |||
252 | struct perf_event_header { | ||
253 | __u32 type; | ||
254 | __u16 misc; | ||
255 | __u16 size; | ||
256 | }; | ||
257 | |||
258 | enum perf_event_type { | ||
259 | |||
260 | /* | ||
261 | * The MMAP events record the PROT_EXEC mappings so that we can | ||
262 | * correlate userspace IPs to code. They have the following structure: | ||
263 | * | ||
264 | * struct { | ||
265 | * struct perf_event_header header; | ||
266 | * | ||
267 | * u32 pid, tid; | ||
268 | * u64 addr; | ||
269 | * u64 len; | ||
270 | * u64 pgoff; | ||
271 | * char filename[]; | ||
272 | * }; | ||
273 | */ | ||
274 | PERF_EVENT_MMAP = 1, | ||
275 | |||
276 | /* | ||
277 | * struct { | ||
278 | * struct perf_event_header header; | ||
279 | * | ||
280 | * u32 pid, tid; | ||
281 | * char comm[]; | ||
282 | * }; | ||
283 | */ | ||
284 | PERF_EVENT_COMM = 3, | ||
285 | |||
286 | /* | ||
287 | * struct { | ||
288 | * struct perf_event_header header; | ||
289 | * u64 time; | ||
290 | * u64 id; | ||
291 | * u64 sample_period; | ||
292 | * }; | ||
293 | */ | ||
294 | PERF_EVENT_PERIOD = 4, | ||
295 | |||
296 | /* | ||
297 | * struct { | ||
298 | * struct perf_event_header header; | ||
299 | * u64 time; | ||
300 | * u64 id; | ||
301 | * }; | ||
302 | */ | ||
303 | PERF_EVENT_THROTTLE = 5, | ||
304 | PERF_EVENT_UNTHROTTLE = 6, | ||
305 | |||
306 | /* | ||
307 | * struct { | ||
308 | * struct perf_event_header header; | ||
309 | * u32 pid, ppid; | ||
310 | * }; | ||
311 | */ | ||
312 | PERF_EVENT_FORK = 7, | ||
313 | |||
314 | /* | ||
315 | * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field | ||
316 | * will be PERF_RECORD_* | ||
317 | * | ||
318 | * struct { | ||
319 | * struct perf_event_header header; | ||
320 | * | ||
321 | * { u64 ip; } && PERF_RECORD_IP | ||
322 | * { u32 pid, tid; } && PERF_RECORD_TID | ||
323 | * { u64 time; } && PERF_RECORD_TIME | ||
324 | * { u64 addr; } && PERF_RECORD_ADDR | ||
325 | * { u64 config; } && PERF_RECORD_CONFIG | ||
326 | * { u32 cpu, res; } && PERF_RECORD_CPU | ||
327 | * | ||
328 | * { u64 nr; | ||
329 | * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP | ||
330 | * | ||
331 | * { u16 nr, | ||
332 | * hv, | ||
333 | * kernel, | ||
334 | * user; | ||
335 | * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN | ||
336 | * }; | ||
337 | */ | ||
338 | }; | ||
339 | |||
340 | #ifdef __KERNEL__ | ||
341 | /* | ||
342 | * Kernel-internal data types and definitions: | ||
343 | */ | ||
344 | |||
345 | #ifdef CONFIG_PERF_COUNTERS | ||
346 | # include <asm/perf_counter.h> | ||
347 | #endif | ||
348 | |||
349 | #include <linux/list.h> | ||
350 | #include <linux/mutex.h> | ||
351 | #include <linux/rculist.h> | ||
352 | #include <linux/rcupdate.h> | ||
353 | #include <linux/spinlock.h> | ||
354 | #include <linux/hrtimer.h> | ||
355 | #include <linux/fs.h> | ||
356 | #include <linux/pid_namespace.h> | ||
357 | #include <asm/atomic.h> | ||
358 | |||
359 | struct task_struct; | ||
360 | |||
361 | /** | ||
362 | * struct hw_perf_counter - performance counter hardware details: | ||
363 | */ | ||
364 | struct hw_perf_counter { | ||
365 | #ifdef CONFIG_PERF_COUNTERS | ||
366 | union { | ||
367 | struct { /* hardware */ | ||
368 | u64 config; | ||
369 | unsigned long config_base; | ||
370 | unsigned long counter_base; | ||
371 | int idx; | ||
372 | }; | ||
373 | union { /* software */ | ||
374 | atomic64_t count; | ||
375 | struct hrtimer hrtimer; | ||
376 | }; | ||
377 | }; | ||
378 | atomic64_t prev_count; | ||
379 | u64 sample_period; | ||
380 | u64 last_period; | ||
381 | atomic64_t period_left; | ||
382 | u64 interrupts; | ||
383 | |||
384 | u64 freq_count; | ||
385 | u64 freq_interrupts; | ||
386 | u64 freq_stamp; | ||
387 | #endif | ||
388 | }; | ||
389 | |||
390 | struct perf_counter; | ||
391 | |||
392 | /** | ||
393 | * struct pmu - generic performance monitoring unit | ||
394 | */ | ||
395 | struct pmu { | ||
396 | int (*enable) (struct perf_counter *counter); | ||
397 | void (*disable) (struct perf_counter *counter); | ||
398 | void (*read) (struct perf_counter *counter); | ||
399 | void (*unthrottle) (struct perf_counter *counter); | ||
400 | }; | ||
401 | |||
402 | /** | ||
403 | * enum perf_counter_active_state - the states of a counter | ||
404 | */ | ||
405 | enum perf_counter_active_state { | ||
406 | PERF_COUNTER_STATE_ERROR = -2, | ||
407 | PERF_COUNTER_STATE_OFF = -1, | ||
408 | PERF_COUNTER_STATE_INACTIVE = 0, | ||
409 | PERF_COUNTER_STATE_ACTIVE = 1, | ||
410 | }; | ||
411 | |||
412 | struct file; | ||
413 | |||
414 | struct perf_mmap_data { | ||
415 | struct rcu_head rcu_head; | ||
416 | int nr_pages; /* nr of data pages */ | ||
417 | int nr_locked; /* nr pages mlocked */ | ||
418 | |||
419 | atomic_t poll; /* POLL_ for wakeups */ | ||
420 | atomic_t events; /* event limit */ | ||
421 | |||
422 | atomic_long_t head; /* write position */ | ||
423 | atomic_long_t done_head; /* completed head */ | ||
424 | |||
425 | atomic_t lock; /* concurrent writes */ | ||
426 | |||
427 | atomic_t wakeup; /* needs a wakeup */ | ||
428 | |||
429 | struct perf_counter_mmap_page *user_page; | ||
430 | void *data_pages[0]; | ||
431 | }; | ||
432 | |||
433 | struct perf_pending_entry { | ||
434 | struct perf_pending_entry *next; | ||
435 | void (*func)(struct perf_pending_entry *); | ||
436 | }; | ||
437 | |||
438 | /** | ||
439 | * struct perf_counter - performance counter kernel representation: | ||
440 | */ | ||
441 | struct perf_counter { | ||
442 | #ifdef CONFIG_PERF_COUNTERS | ||
443 | struct list_head list_entry; | ||
444 | struct list_head event_entry; | ||
445 | struct list_head sibling_list; | ||
446 | int nr_siblings; | ||
447 | struct perf_counter *group_leader; | ||
448 | const struct pmu *pmu; | ||
449 | |||
450 | enum perf_counter_active_state state; | ||
451 | atomic64_t count; | ||
452 | |||
453 | /* | ||
454 | * These are the total time in nanoseconds that the counter | ||
455 | * has been enabled (i.e. eligible to run, and the task has | ||
456 | * been scheduled in, if this is a per-task counter) | ||
457 | * and running (scheduled onto the CPU), respectively. | ||
458 | * | ||
459 | * They are computed from tstamp_enabled, tstamp_running and | ||
460 | * tstamp_stopped when the counter is in INACTIVE or ACTIVE state. | ||
461 | */ | ||
462 | u64 total_time_enabled; | ||
463 | u64 total_time_running; | ||
464 | |||
465 | /* | ||
466 | * These are timestamps used for computing total_time_enabled | ||
467 | * and total_time_running when the counter is in INACTIVE or | ||
468 | * ACTIVE state, measured in nanoseconds from an arbitrary point | ||
469 | * in time. | ||
470 | * tstamp_enabled: the notional time when the counter was enabled | ||
471 | * tstamp_running: the notional time when the counter was scheduled on | ||
472 | * tstamp_stopped: in INACTIVE state, the notional time when the | ||
473 | * counter was scheduled off. | ||
474 | */ | ||
475 | u64 tstamp_enabled; | ||
476 | u64 tstamp_running; | ||
477 | u64 tstamp_stopped; | ||
478 | |||
479 | struct perf_counter_attr attr; | ||
480 | struct hw_perf_counter hw; | ||
481 | |||
482 | struct perf_counter_context *ctx; | ||
483 | struct file *filp; | ||
484 | |||
485 | /* | ||
486 | * These accumulate total time (in nanoseconds) that children | ||
487 | * counters have been enabled and running, respectively. | ||
488 | */ | ||
489 | atomic64_t child_total_time_enabled; | ||
490 | atomic64_t child_total_time_running; | ||
491 | |||
492 | /* | ||
493 | * Protect attach/detach and child_list: | ||
494 | */ | ||
495 | struct mutex child_mutex; | ||
496 | struct list_head child_list; | ||
497 | struct perf_counter *parent; | ||
498 | |||
499 | int oncpu; | ||
500 | int cpu; | ||
501 | |||
502 | struct list_head owner_entry; | ||
503 | struct task_struct *owner; | ||
504 | |||
505 | /* mmap bits */ | ||
506 | struct mutex mmap_mutex; | ||
507 | atomic_t mmap_count; | ||
508 | struct perf_mmap_data *data; | ||
509 | |||
510 | /* poll related */ | ||
511 | wait_queue_head_t waitq; | ||
512 | struct fasync_struct *fasync; | ||
513 | |||
514 | /* delayed work for NMIs and such */ | ||
515 | int pending_wakeup; | ||
516 | int pending_kill; | ||
517 | int pending_disable; | ||
518 | struct perf_pending_entry pending; | ||
519 | |||
520 | atomic_t event_limit; | ||
521 | |||
522 | void (*destroy)(struct perf_counter *); | ||
523 | struct rcu_head rcu_head; | ||
524 | |||
525 | struct pid_namespace *ns; | ||
526 | u64 id; | ||
527 | #endif | ||
528 | }; | ||
529 | |||
530 | /** | ||
531 | * struct perf_counter_context - counter context structure | ||
532 | * | ||
533 | * Used as a container for task counters and CPU counters as well: | ||
534 | */ | ||
535 | struct perf_counter_context { | ||
536 | /* | ||
537 | * Protect the states of the counters in the list, | ||
538 | * nr_active, and the list: | ||
539 | */ | ||
540 | spinlock_t lock; | ||
541 | /* | ||
542 | * Protect the list of counters. Locking either mutex or lock | ||
543 | * is sufficient to ensure the list doesn't change; to change | ||
544 | * the list you need to lock both the mutex and the spinlock. | ||
545 | */ | ||
546 | struct mutex mutex; | ||
547 | |||
548 | struct list_head counter_list; | ||
549 | struct list_head event_list; | ||
550 | int nr_counters; | ||
551 | int nr_active; | ||
552 | int is_active; | ||
553 | atomic_t refcount; | ||
554 | struct task_struct *task; | ||
555 | |||
556 | /* | ||
557 | * Context clock, runs when context enabled. | ||
558 | */ | ||
559 | u64 time; | ||
560 | u64 timestamp; | ||
561 | |||
562 | /* | ||
563 | * These fields let us detect when two contexts have both | ||
564 | * been cloned (inherited) from a common ancestor. | ||
565 | */ | ||
566 | struct perf_counter_context *parent_ctx; | ||
567 | u64 parent_gen; | ||
568 | u64 generation; | ||
569 | int pin_count; | ||
570 | struct rcu_head rcu_head; | ||
571 | }; | ||
572 | |||
573 | /** | ||
574 | * struct perf_counter_cpu_context - per cpu counter context structure | ||
575 | */ | ||
576 | struct perf_cpu_context { | ||
577 | struct perf_counter_context ctx; | ||
578 | struct perf_counter_context *task_ctx; | ||
579 | int active_oncpu; | ||
580 | int max_pertask; | ||
581 | int exclusive; | ||
582 | |||
583 | /* | ||
584 | * Recursion avoidance: | ||
585 | * | ||
586 | * task, softirq, irq, nmi context | ||
587 | */ | ||
588 | int recursion[4]; | ||
589 | }; | ||
590 | |||
591 | #ifdef CONFIG_PERF_COUNTERS | ||
592 | |||
593 | /* | ||
594 | * Set by architecture code: | ||
595 | */ | ||
596 | extern int perf_max_counters; | ||
597 | |||
598 | extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter); | ||
599 | |||
600 | extern void perf_counter_task_sched_in(struct task_struct *task, int cpu); | ||
601 | extern void perf_counter_task_sched_out(struct task_struct *task, | ||
602 | struct task_struct *next, int cpu); | ||
603 | extern void perf_counter_task_tick(struct task_struct *task, int cpu); | ||
604 | extern int perf_counter_init_task(struct task_struct *child); | ||
605 | extern void perf_counter_exit_task(struct task_struct *child); | ||
606 | extern void perf_counter_free_task(struct task_struct *task); | ||
607 | extern void perf_counter_do_pending(void); | ||
608 | extern void perf_counter_print_debug(void); | ||
609 | extern void __perf_disable(void); | ||
610 | extern bool __perf_enable(void); | ||
611 | extern void perf_disable(void); | ||
612 | extern void perf_enable(void); | ||
613 | extern int perf_counter_task_disable(void); | ||
614 | extern int perf_counter_task_enable(void); | ||
615 | extern int hw_perf_group_sched_in(struct perf_counter *group_leader, | ||
616 | struct perf_cpu_context *cpuctx, | ||
617 | struct perf_counter_context *ctx, int cpu); | ||
618 | extern void perf_counter_update_userpage(struct perf_counter *counter); | ||
619 | |||
620 | struct perf_sample_data { | ||
621 | struct pt_regs *regs; | ||
622 | u64 addr; | ||
623 | u64 period; | ||
624 | }; | ||
625 | |||
626 | extern int perf_counter_overflow(struct perf_counter *counter, int nmi, | ||
627 | struct perf_sample_data *data); | ||
628 | |||
629 | /* | ||
630 | * Return 1 for a software counter, 0 for a hardware counter | ||
631 | */ | ||
632 | static inline int is_software_counter(struct perf_counter *counter) | ||
633 | { | ||
634 | return (counter->attr.type != PERF_TYPE_RAW) && | ||
635 | (counter->attr.type != PERF_TYPE_HARDWARE) && | ||
636 | (counter->attr.type != PERF_TYPE_HW_CACHE); | ||
637 | } | ||
638 | |||
639 | extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64); | ||
640 | |||
641 | extern void __perf_counter_mmap(struct vm_area_struct *vma); | ||
642 | |||
643 | static inline void perf_counter_mmap(struct vm_area_struct *vma) | ||
644 | { | ||
645 | if (vma->vm_flags & VM_EXEC) | ||
646 | __perf_counter_mmap(vma); | ||
647 | } | ||
648 | |||
649 | extern void perf_counter_comm(struct task_struct *tsk); | ||
650 | extern void perf_counter_fork(struct task_struct *tsk); | ||
651 | |||
652 | extern void perf_counter_task_migration(struct task_struct *task, int cpu); | ||
653 | |||
654 | #define MAX_STACK_DEPTH 255 | ||
655 | |||
656 | struct perf_callchain_entry { | ||
657 | u16 nr; | ||
658 | u16 hv; | ||
659 | u16 kernel; | ||
660 | u16 user; | ||
661 | u64 ip[MAX_STACK_DEPTH]; | ||
662 | }; | ||
663 | |||
664 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | ||
665 | |||
666 | extern int sysctl_perf_counter_paranoid; | ||
667 | extern int sysctl_perf_counter_mlock; | ||
668 | extern int sysctl_perf_counter_sample_rate; | ||
669 | |||
670 | extern void perf_counter_init(void); | ||
671 | |||
672 | #ifndef perf_misc_flags | ||
673 | #define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \ | ||
674 | PERF_EVENT_MISC_KERNEL) | ||
675 | #define perf_instruction_pointer(regs) instruction_pointer(regs) | ||
676 | #endif | ||
677 | |||
678 | #else | ||
679 | static inline void | ||
680 | perf_counter_task_sched_in(struct task_struct *task, int cpu) { } | ||
681 | static inline void | ||
682 | perf_counter_task_sched_out(struct task_struct *task, | ||
683 | struct task_struct *next, int cpu) { } | ||
684 | static inline void | ||
685 | perf_counter_task_tick(struct task_struct *task, int cpu) { } | ||
686 | static inline int perf_counter_init_task(struct task_struct *child) { return 0; } | ||
687 | static inline void perf_counter_exit_task(struct task_struct *child) { } | ||
688 | static inline void perf_counter_free_task(struct task_struct *task) { } | ||
689 | static inline void perf_counter_do_pending(void) { } | ||
690 | static inline void perf_counter_print_debug(void) { } | ||
691 | static inline void perf_disable(void) { } | ||
692 | static inline void perf_enable(void) { } | ||
693 | static inline int perf_counter_task_disable(void) { return -EINVAL; } | ||
694 | static inline int perf_counter_task_enable(void) { return -EINVAL; } | ||
695 | |||
696 | static inline void | ||
697 | perf_swcounter_event(u32 event, u64 nr, int nmi, | ||
698 | struct pt_regs *regs, u64 addr) { } | ||
699 | |||
700 | static inline void perf_counter_mmap(struct vm_area_struct *vma) { } | ||
701 | static inline void perf_counter_comm(struct task_struct *tsk) { } | ||
702 | static inline void perf_counter_fork(struct task_struct *tsk) { } | ||
703 | static inline void perf_counter_init(void) { } | ||
704 | static inline void perf_counter_task_migration(struct task_struct *task, | ||
705 | int cpu) { } | ||
706 | #endif | ||
707 | |||
708 | #endif /* __KERNEL__ */ | ||
709 | #endif /* _LINUX_PERF_COUNTER_H */ | ||
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index c8f038554e80..b43a9e039059 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h | |||
@@ -152,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void | |||
152 | void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); | 152 | void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); |
153 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); | 153 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); |
154 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); | 154 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); |
155 | void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); | ||
155 | 156 | ||
156 | #endif | 157 | #endif |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 1d4e2d289821..b3f74764a586 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -382,14 +382,13 @@ struct dev_pm_info { | |||
382 | #ifdef CONFIG_PM_SLEEP | 382 | #ifdef CONFIG_PM_SLEEP |
383 | extern void device_pm_lock(void); | 383 | extern void device_pm_lock(void); |
384 | extern int sysdev_resume(void); | 384 | extern int sysdev_resume(void); |
385 | extern void device_power_up(pm_message_t state); | 385 | extern void dpm_resume_noirq(pm_message_t state); |
386 | extern void device_resume(pm_message_t state); | 386 | extern void dpm_resume_end(pm_message_t state); |
387 | 387 | ||
388 | extern void device_pm_unlock(void); | 388 | extern void device_pm_unlock(void); |
389 | extern int sysdev_suspend(pm_message_t state); | 389 | extern int sysdev_suspend(pm_message_t state); |
390 | extern int device_power_down(pm_message_t state); | 390 | extern int dpm_suspend_noirq(pm_message_t state); |
391 | extern int device_suspend(pm_message_t state); | 391 | extern int dpm_suspend_start(pm_message_t state); |
392 | extern int device_prepare_suspend(pm_message_t state); | ||
393 | 392 | ||
394 | extern void __suspend_report_result(const char *function, void *fn, int ret); | 393 | extern void __suspend_report_result(const char *function, void *fn, int ret); |
395 | 394 | ||
@@ -403,7 +402,7 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); | |||
403 | #define device_pm_lock() do {} while (0) | 402 | #define device_pm_lock() do {} while (0) |
404 | #define device_pm_unlock() do {} while (0) | 403 | #define device_pm_unlock() do {} while (0) |
405 | 404 | ||
406 | static inline int device_suspend(pm_message_t state) | 405 | static inline int dpm_suspend_start(pm_message_t state) |
407 | { | 406 | { |
408 | return 0; | 407 | return 0; |
409 | } | 408 | } |
diff --git a/include/linux/prctl.h b/include/linux/prctl.h index 48d887e3c6e7..b00df4c79c63 100644 --- a/include/linux/prctl.h +++ b/include/linux/prctl.h | |||
@@ -85,4 +85,7 @@ | |||
85 | #define PR_SET_TIMERSLACK 29 | 85 | #define PR_SET_TIMERSLACK 29 |
86 | #define PR_GET_TIMERSLACK 30 | 86 | #define PR_GET_TIMERSLACK 30 |
87 | 87 | ||
88 | #define PR_TASK_PERF_COUNTERS_DISABLE 31 | ||
89 | #define PR_TASK_PERF_COUNTERS_ENABLE 32 | ||
90 | |||
88 | #endif /* _LINUX_PRCTL_H */ | 91 | #endif /* _LINUX_PRCTL_H */ |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index fbfa3d44d33d..e6e77d31c418 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -93,20 +93,9 @@ struct vmcore { | |||
93 | 93 | ||
94 | #ifdef CONFIG_PROC_FS | 94 | #ifdef CONFIG_PROC_FS |
95 | 95 | ||
96 | extern spinlock_t proc_subdir_lock; | ||
97 | |||
98 | extern void proc_root_init(void); | 96 | extern void proc_root_init(void); |
99 | 97 | ||
100 | void proc_flush_task(struct task_struct *task); | 98 | void proc_flush_task(struct task_struct *task); |
101 | struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *); | ||
102 | int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir); | ||
103 | unsigned long task_vsize(struct mm_struct *); | ||
104 | int task_statm(struct mm_struct *, int *, int *, int *, int *); | ||
105 | void task_mem(struct seq_file *, struct mm_struct *); | ||
106 | void clear_refs_smap(struct mm_struct *mm); | ||
107 | |||
108 | struct proc_dir_entry *de_get(struct proc_dir_entry *de); | ||
109 | void de_put(struct proc_dir_entry *de); | ||
110 | 99 | ||
111 | extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, | 100 | extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, |
112 | struct proc_dir_entry *parent); | 101 | struct proc_dir_entry *parent); |
@@ -116,20 +105,7 @@ struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, | |||
116 | void *data); | 105 | void *data); |
117 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); | 106 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); |
118 | 107 | ||
119 | extern struct vfsmount *proc_mnt; | ||
120 | struct pid_namespace; | 108 | struct pid_namespace; |
121 | extern int proc_fill_super(struct super_block *); | ||
122 | extern struct inode *proc_get_inode(struct super_block *, unsigned int, struct proc_dir_entry *); | ||
123 | |||
124 | /* | ||
125 | * These are generic /proc routines that use the internal | ||
126 | * "struct proc_dir_entry" tree to traverse the filesystem. | ||
127 | * | ||
128 | * The /proc root directory has extended versions to take care | ||
129 | * of the /proc/<pid> subdirectories. | ||
130 | */ | ||
131 | extern int proc_readdir(struct file *, void *, filldir_t); | ||
132 | extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *); | ||
133 | 109 | ||
134 | extern int pid_ns_prepare_proc(struct pid_namespace *ns); | 110 | extern int pid_ns_prepare_proc(struct pid_namespace *ns); |
135 | extern void pid_ns_release_proc(struct pid_namespace *ns); | 111 | extern void pid_ns_release_proc(struct pid_namespace *ns); |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 67c15653fc23..59e133d39d50 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -95,7 +95,6 @@ extern void __ptrace_link(struct task_struct *child, | |||
95 | struct task_struct *new_parent); | 95 | struct task_struct *new_parent); |
96 | extern void __ptrace_unlink(struct task_struct *child); | 96 | extern void __ptrace_unlink(struct task_struct *child); |
97 | extern void exit_ptrace(struct task_struct *tracer); | 97 | extern void exit_ptrace(struct task_struct *tracer); |
98 | extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags); | ||
99 | #define PTRACE_MODE_READ 1 | 98 | #define PTRACE_MODE_READ 1 |
100 | #define PTRACE_MODE_ATTACH 2 | 99 | #define PTRACE_MODE_ATTACH 2 |
101 | /* Returns 0 on success, -errno on denial. */ | 100 | /* Returns 0 on success, -errno on denial. */ |
@@ -327,15 +326,6 @@ static inline void user_enable_block_step(struct task_struct *task) | |||
327 | #define arch_ptrace_untrace(task) do { } while (0) | 326 | #define arch_ptrace_untrace(task) do { } while (0) |
328 | #endif | 327 | #endif |
329 | 328 | ||
330 | #ifndef arch_ptrace_fork | ||
331 | /* | ||
332 | * Do machine-specific work to initialize a new task. | ||
333 | * | ||
334 | * This is called from copy_process(). | ||
335 | */ | ||
336 | #define arch_ptrace_fork(child, clone_flags) do { } while (0) | ||
337 | #endif | ||
338 | |||
339 | extern int task_current_syscall(struct task_struct *target, long *callno, | 329 | extern int task_current_syscall(struct task_struct *target, long *callno, |
340 | unsigned long args[6], unsigned int maxargs, | 330 | unsigned long args[6], unsigned int maxargs, |
341 | unsigned long *sp, unsigned long *pc); | 331 | unsigned long *sp, unsigned long *pc); |
diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h index 787d19ea9f46..8b9aee1a9ce3 100644 --- a/include/linux/qnx4_fs.h +++ b/include/linux/qnx4_fs.h | |||
@@ -85,65 +85,4 @@ struct qnx4_super_block { | |||
85 | struct qnx4_inode_entry AltBoot; | 85 | struct qnx4_inode_entry AltBoot; |
86 | }; | 86 | }; |
87 | 87 | ||
88 | #ifdef __KERNEL__ | ||
89 | |||
90 | #define QNX4_DEBUG 0 | ||
91 | |||
92 | #if QNX4_DEBUG | ||
93 | #define QNX4DEBUG(X) printk X | ||
94 | #else | ||
95 | #define QNX4DEBUG(X) (void) 0 | ||
96 | #endif | ||
97 | |||
98 | struct qnx4_sb_info { | ||
99 | struct buffer_head *sb_buf; /* superblock buffer */ | ||
100 | struct qnx4_super_block *sb; /* our superblock */ | ||
101 | unsigned int Version; /* may be useful */ | ||
102 | struct qnx4_inode_entry *BitMap; /* useful */ | ||
103 | }; | ||
104 | |||
105 | struct qnx4_inode_info { | ||
106 | struct qnx4_inode_entry raw; | ||
107 | loff_t mmu_private; | ||
108 | struct inode vfs_inode; | ||
109 | }; | ||
110 | |||
111 | extern struct inode *qnx4_iget(struct super_block *, unsigned long); | ||
112 | extern struct dentry *qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd); | ||
113 | extern unsigned long qnx4_count_free_blocks(struct super_block *sb); | ||
114 | extern unsigned long qnx4_block_map(struct inode *inode, long iblock); | ||
115 | |||
116 | extern struct buffer_head *qnx4_bread(struct inode *, int, int); | ||
117 | |||
118 | extern const struct inode_operations qnx4_file_inode_operations; | ||
119 | extern const struct inode_operations qnx4_dir_inode_operations; | ||
120 | extern const struct file_operations qnx4_file_operations; | ||
121 | extern const struct file_operations qnx4_dir_operations; | ||
122 | extern int qnx4_is_free(struct super_block *sb, long block); | ||
123 | extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy); | ||
124 | extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode, struct nameidata *nd); | ||
125 | extern void qnx4_truncate(struct inode *inode); | ||
126 | extern void qnx4_free_inode(struct inode *inode); | ||
127 | extern int qnx4_unlink(struct inode *dir, struct dentry *dentry); | ||
128 | extern int qnx4_rmdir(struct inode *dir, struct dentry *dentry); | ||
129 | extern int qnx4_sync_file(struct file *file, struct dentry *dentry, int); | ||
130 | extern int qnx4_sync_inode(struct inode *inode); | ||
131 | |||
132 | static inline struct qnx4_sb_info *qnx4_sb(struct super_block *sb) | ||
133 | { | ||
134 | return sb->s_fs_info; | ||
135 | } | ||
136 | |||
137 | static inline struct qnx4_inode_info *qnx4_i(struct inode *inode) | ||
138 | { | ||
139 | return container_of(inode, struct qnx4_inode_info, vfs_inode); | ||
140 | } | ||
141 | |||
142 | static inline struct qnx4_inode_entry *qnx4_raw_inode(struct inode *inode) | ||
143 | { | ||
144 | return &qnx4_i(inode)->raw; | ||
145 | } | ||
146 | |||
147 | #endif /* __KERNEL__ */ | ||
148 | |||
149 | #endif | 88 | #endif |
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index 36353d95c8db..7bc457593684 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h | |||
@@ -20,7 +20,12 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb) | |||
20 | /* | 20 | /* |
21 | * declaration of quota_function calls in kernel. | 21 | * declaration of quota_function calls in kernel. |
22 | */ | 22 | */ |
23 | void sync_dquots(struct super_block *sb, int type); | 23 | void sync_quota_sb(struct super_block *sb, int type); |
24 | static inline void writeout_quota_sb(struct super_block *sb, int type) | ||
25 | { | ||
26 | if (sb->s_qcop->quota_sync) | ||
27 | sb->s_qcop->quota_sync(sb, type); | ||
28 | } | ||
24 | 29 | ||
25 | int dquot_initialize(struct inode *inode, int type); | 30 | int dquot_initialize(struct inode *inode, int type); |
26 | int dquot_drop(struct inode *inode); | 31 | int dquot_drop(struct inode *inode); |
@@ -253,12 +258,7 @@ static inline void vfs_dq_free_inode(struct inode *inode) | |||
253 | inode->i_sb->dq_op->free_inode(inode, 1); | 258 | inode->i_sb->dq_op->free_inode(inode, 1); |
254 | } | 259 | } |
255 | 260 | ||
256 | /* The following two functions cannot be called inside a transaction */ | 261 | /* Cannot be called inside a transaction */ |
257 | static inline void vfs_dq_sync(struct super_block *sb) | ||
258 | { | ||
259 | sync_dquots(sb, -1); | ||
260 | } | ||
261 | |||
262 | static inline int vfs_dq_off(struct super_block *sb, int remount) | 262 | static inline int vfs_dq_off(struct super_block *sb, int remount) |
263 | { | 263 | { |
264 | int ret = -ENOSYS; | 264 | int ret = -ENOSYS; |
@@ -334,7 +334,11 @@ static inline void vfs_dq_free_inode(struct inode *inode) | |||
334 | { | 334 | { |
335 | } | 335 | } |
336 | 336 | ||
337 | static inline void vfs_dq_sync(struct super_block *sb) | 337 | static inline void sync_quota_sb(struct super_block *sb, int type) |
338 | { | ||
339 | } | ||
340 | |||
341 | static inline void writeout_quota_sb(struct super_block *sb, int type) | ||
338 | { | 342 | { |
339 | } | 343 | } |
340 | 344 | ||
diff --git a/include/linux/rational.h b/include/linux/rational.h new file mode 100644 index 000000000000..4f532fcd9eea --- /dev/null +++ b/include/linux/rational.h | |||
@@ -0,0 +1,19 @@ | |||
1 | /* | ||
2 | * rational fractions | ||
3 | * | ||
4 | * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com> | ||
5 | * | ||
6 | * helper functions when coping with rational numbers, | ||
7 | * e.g. when calculating optimum numerator/denominator pairs for | ||
8 | * pll configuration taking into account restricted register size | ||
9 | */ | ||
10 | |||
11 | #ifndef _LINUX_RATIONAL_H | ||
12 | #define _LINUX_RATIONAL_H | ||
13 | |||
14 | void rational_best_approximation( | ||
15 | unsigned long given_numerator, unsigned long given_denominator, | ||
16 | unsigned long max_numerator, unsigned long max_denominator, | ||
17 | unsigned long *best_numerator, unsigned long *best_denominator); | ||
18 | |||
19 | #endif /* _LINUX_RATIONAL_H */ | ||
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index e649bd3f2c97..5710f43bbc9e 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -198,6 +198,32 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
198 | at->prev = last; | 198 | at->prev = last; |
199 | } | 199 | } |
200 | 200 | ||
201 | /** | ||
202 | * list_entry_rcu - get the struct for this entry | ||
203 | * @ptr: the &struct list_head pointer. | ||
204 | * @type: the type of the struct this is embedded in. | ||
205 | * @member: the name of the list_struct within the struct. | ||
206 | * | ||
207 | * This primitive may safely run concurrently with the _rcu list-mutation | ||
208 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | ||
209 | */ | ||
210 | #define list_entry_rcu(ptr, type, member) \ | ||
211 | container_of(rcu_dereference(ptr), type, member) | ||
212 | |||
213 | /** | ||
214 | * list_first_entry_rcu - get the first element from a list | ||
215 | * @ptr: the list head to take the element from. | ||
216 | * @type: the type of the struct this is embedded in. | ||
217 | * @member: the name of the list_struct within the struct. | ||
218 | * | ||
219 | * Note, that list is expected to be not empty. | ||
220 | * | ||
221 | * This primitive may safely run concurrently with the _rcu list-mutation | ||
222 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | ||
223 | */ | ||
224 | #define list_first_entry_rcu(ptr, type, member) \ | ||
225 | list_entry_rcu((ptr)->next, type, member) | ||
226 | |||
201 | #define __list_for_each_rcu(pos, head) \ | 227 | #define __list_for_each_rcu(pos, head) \ |
202 | for (pos = rcu_dereference((head)->next); \ | 228 | for (pos = rcu_dereference((head)->next); \ |
203 | pos != (head); \ | 229 | pos != (head); \ |
@@ -214,9 +240,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
214 | * as long as the traversal is guarded by rcu_read_lock(). | 240 | * as long as the traversal is guarded by rcu_read_lock(). |
215 | */ | 241 | */ |
216 | #define list_for_each_entry_rcu(pos, head, member) \ | 242 | #define list_for_each_entry_rcu(pos, head, member) \ |
217 | for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \ | 243 | for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ |
218 | prefetch(pos->member.next), &pos->member != (head); \ | 244 | prefetch(pos->member.next), &pos->member != (head); \ |
219 | pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member)) | 245 | pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) |
220 | 246 | ||
221 | 247 | ||
222 | /** | 248 | /** |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 58b2aa5312b9..5a5153806c42 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -161,8 +161,15 @@ struct rcu_data { | |||
161 | unsigned long offline_fqs; /* Kicked due to being offline. */ | 161 | unsigned long offline_fqs; /* Kicked due to being offline. */ |
162 | unsigned long resched_ipi; /* Sent a resched IPI. */ | 162 | unsigned long resched_ipi; /* Sent a resched IPI. */ |
163 | 163 | ||
164 | /* 5) For future __rcu_pending statistics. */ | 164 | /* 5) __rcu_pending() statistics. */ |
165 | long n_rcu_pending; /* rcu_pending() calls since boot. */ | 165 | long n_rcu_pending; /* rcu_pending() calls since boot. */ |
166 | long n_rp_qs_pending; | ||
167 | long n_rp_cb_ready; | ||
168 | long n_rp_cpu_needs_gp; | ||
169 | long n_rp_gp_completed; | ||
170 | long n_rp_gp_started; | ||
171 | long n_rp_need_fqs; | ||
172 | long n_rp_need_nothing; | ||
166 | 173 | ||
167 | int cpu; | 174 | int cpu; |
168 | }; | 175 | }; |
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h index 6473650c28f1..dab68bbed675 100644 --- a/include/linux/reiserfs_fs_sb.h +++ b/include/linux/reiserfs_fs_sb.h | |||
@@ -453,6 +453,7 @@ enum reiserfs_mount_options { | |||
453 | REISERFS_ATTRS, | 453 | REISERFS_ATTRS, |
454 | REISERFS_XATTRS_USER, | 454 | REISERFS_XATTRS_USER, |
455 | REISERFS_POSIXACL, | 455 | REISERFS_POSIXACL, |
456 | REISERFS_EXPOSE_PRIVROOT, | ||
456 | REISERFS_BARRIER_NONE, | 457 | REISERFS_BARRIER_NONE, |
457 | REISERFS_BARRIER_FLUSH, | 458 | REISERFS_BARRIER_FLUSH, |
458 | 459 | ||
@@ -490,6 +491,7 @@ enum reiserfs_mount_options { | |||
490 | #define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK)) | 491 | #define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK)) |
491 | #define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER)) | 492 | #define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER)) |
492 | #define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL)) | 493 | #define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL)) |
494 | #define reiserfs_expose_privroot(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_EXPOSE_PRIVROOT)) | ||
493 | #define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s)) | 495 | #define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s)) |
494 | #define reiserfs_barrier_none(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_NONE)) | 496 | #define reiserfs_barrier_none(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_NONE)) |
495 | #define reiserfs_barrier_flush(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_FLUSH)) | 497 | #define reiserfs_barrier_flush(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_FLUSH)) |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index e1b7b2173885..8670f1575fe1 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -11,7 +11,7 @@ struct ring_buffer_iter; | |||
11 | * Don't refer to this struct directly, use functions below. | 11 | * Don't refer to this struct directly, use functions below. |
12 | */ | 12 | */ |
13 | struct ring_buffer_event { | 13 | struct ring_buffer_event { |
14 | u32 type:2, len:3, time_delta:27; | 14 | u32 type_len:5, time_delta:27; |
15 | u32 array[]; | 15 | u32 array[]; |
16 | }; | 16 | }; |
17 | 17 | ||
@@ -24,7 +24,8 @@ struct ring_buffer_event { | |||
24 | * size is variable depending on how much | 24 | * size is variable depending on how much |
25 | * padding is needed | 25 | * padding is needed |
26 | * If time_delta is non zero: | 26 | * If time_delta is non zero: |
27 | * everything else same as RINGBUF_TYPE_DATA | 27 | * array[0] holds the actual length |
28 | * size = 4 + length (bytes) | ||
28 | * | 29 | * |
29 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta | 30 | * @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta |
30 | * array[0] = time delta (28 .. 59) | 31 | * array[0] = time delta (28 .. 59) |
@@ -35,22 +36,23 @@ struct ring_buffer_event { | |||
35 | * array[1..2] = tv_sec | 36 | * array[1..2] = tv_sec |
36 | * size = 16 bytes | 37 | * size = 16 bytes |
37 | * | 38 | * |
38 | * @RINGBUF_TYPE_DATA: Data record | 39 | * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX: |
39 | * If len is zero: | 40 | * Data record |
41 | * If type_len is zero: | ||
40 | * array[0] holds the actual length | 42 | * array[0] holds the actual length |
41 | * array[1..(length+3)/4] holds data | 43 | * array[1..(length+3)/4] holds data |
42 | * size = 4 + 4 + length (bytes) | 44 | * size = 4 + length (bytes) |
43 | * else | 45 | * else |
44 | * length = len << 2 | 46 | * length = type_len << 2 |
45 | * array[0..(length+3)/4-1] holds data | 47 | * array[0..(length+3)/4-1] holds data |
46 | * size = 4 + length (bytes) | 48 | * size = 4 + length (bytes) |
47 | */ | 49 | */ |
48 | enum ring_buffer_type { | 50 | enum ring_buffer_type { |
51 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, | ||
49 | RINGBUF_TYPE_PADDING, | 52 | RINGBUF_TYPE_PADDING, |
50 | RINGBUF_TYPE_TIME_EXTEND, | 53 | RINGBUF_TYPE_TIME_EXTEND, |
51 | /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ | 54 | /* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */ |
52 | RINGBUF_TYPE_TIME_STAMP, | 55 | RINGBUF_TYPE_TIME_STAMP, |
53 | RINGBUF_TYPE_DATA, | ||
54 | }; | 56 | }; |
55 | 57 | ||
56 | unsigned ring_buffer_event_length(struct ring_buffer_event *event); | 58 | unsigned ring_buffer_event_length(struct ring_buffer_event *event); |
@@ -68,13 +70,54 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event) | |||
68 | return event->time_delta; | 70 | return event->time_delta; |
69 | } | 71 | } |
70 | 72 | ||
73 | /* | ||
74 | * ring_buffer_event_discard can discard any event in the ring buffer. | ||
75 | * it is up to the caller to protect against a reader from | ||
76 | * consuming it or a writer from wrapping and replacing it. | ||
77 | * | ||
78 | * No external protection is needed if this is called before | ||
79 | * the event is commited. But in that case it would be better to | ||
80 | * use ring_buffer_discard_commit. | ||
81 | * | ||
82 | * Note, if an event that has not been committed is discarded | ||
83 | * with ring_buffer_event_discard, it must still be committed. | ||
84 | */ | ||
71 | void ring_buffer_event_discard(struct ring_buffer_event *event); | 85 | void ring_buffer_event_discard(struct ring_buffer_event *event); |
72 | 86 | ||
73 | /* | 87 | /* |
88 | * ring_buffer_discard_commit will remove an event that has not | ||
89 | * ben committed yet. If this is used, then ring_buffer_unlock_commit | ||
90 | * must not be called on the discarded event. This function | ||
91 | * will try to remove the event from the ring buffer completely | ||
92 | * if another event has not been written after it. | ||
93 | * | ||
94 | * Example use: | ||
95 | * | ||
96 | * if (some_condition) | ||
97 | * ring_buffer_discard_commit(buffer, event); | ||
98 | * else | ||
99 | * ring_buffer_unlock_commit(buffer, event); | ||
100 | */ | ||
101 | void ring_buffer_discard_commit(struct ring_buffer *buffer, | ||
102 | struct ring_buffer_event *event); | ||
103 | |||
104 | /* | ||
74 | * size is in bytes for each per CPU buffer. | 105 | * size is in bytes for each per CPU buffer. |
75 | */ | 106 | */ |
76 | struct ring_buffer * | 107 | struct ring_buffer * |
77 | ring_buffer_alloc(unsigned long size, unsigned flags); | 108 | __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); |
109 | |||
110 | /* | ||
111 | * Because the ring buffer is generic, if other users of the ring buffer get | ||
112 | * traced by ftrace, it can produce lockdep warnings. We need to keep each | ||
113 | * ring buffer's lock class separate. | ||
114 | */ | ||
115 | #define ring_buffer_alloc(size, flags) \ | ||
116 | ({ \ | ||
117 | static struct lock_class_key __key; \ | ||
118 | __ring_buffer_alloc((size), (flags), &__key); \ | ||
119 | }) | ||
120 | |||
78 | void ring_buffer_free(struct ring_buffer *buffer); | 121 | void ring_buffer_free(struct ring_buffer *buffer); |
79 | 122 | ||
80 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); | 123 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); |
@@ -122,6 +165,8 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer); | |||
122 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); | 165 | unsigned long ring_buffer_overruns(struct ring_buffer *buffer); |
123 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); | 166 | unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); |
124 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); | 167 | unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); |
168 | unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); | ||
169 | unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu); | ||
125 | 170 | ||
126 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); | 171 | u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); |
127 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, | 172 | void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, |
@@ -137,6 +182,11 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); | |||
137 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, | 182 | int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, |
138 | size_t len, int cpu, int full); | 183 | size_t len, int cpu, int full); |
139 | 184 | ||
185 | struct trace_seq; | ||
186 | |||
187 | int ring_buffer_print_entry_header(struct trace_seq *s); | ||
188 | int ring_buffer_print_page_header(struct trace_seq *s); | ||
189 | |||
140 | enum ring_buffer_flags { | 190 | enum ring_buffer_flags { |
141 | RB_FL_OVERWRITE = 1 << 0, | 191 | RB_FL_OVERWRITE = 1 << 0, |
142 | }; | 192 | }; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index b4c38bc8049c..4896fdfec913 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -77,6 +77,7 @@ struct sched_param { | |||
77 | #include <linux/proportions.h> | 77 | #include <linux/proportions.h> |
78 | #include <linux/seccomp.h> | 78 | #include <linux/seccomp.h> |
79 | #include <linux/rcupdate.h> | 79 | #include <linux/rcupdate.h> |
80 | #include <linux/rculist.h> | ||
80 | #include <linux/rtmutex.h> | 81 | #include <linux/rtmutex.h> |
81 | 82 | ||
82 | #include <linux/time.h> | 83 | #include <linux/time.h> |
@@ -96,8 +97,9 @@ struct exec_domain; | |||
96 | struct futex_pi_state; | 97 | struct futex_pi_state; |
97 | struct robust_list_head; | 98 | struct robust_list_head; |
98 | struct bio; | 99 | struct bio; |
99 | struct bts_tracer; | ||
100 | struct fs_struct; | 100 | struct fs_struct; |
101 | struct bts_context; | ||
102 | struct perf_counter_context; | ||
101 | 103 | ||
102 | /* | 104 | /* |
103 | * List of flags we want to share for kernel threads, | 105 | * List of flags we want to share for kernel threads, |
@@ -116,6 +118,7 @@ struct fs_struct; | |||
116 | * 11 bit fractions. | 118 | * 11 bit fractions. |
117 | */ | 119 | */ |
118 | extern unsigned long avenrun[]; /* Load averages */ | 120 | extern unsigned long avenrun[]; /* Load averages */ |
121 | extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); | ||
119 | 122 | ||
120 | #define FSHIFT 11 /* nr of bits of precision */ | 123 | #define FSHIFT 11 /* nr of bits of precision */ |
121 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ | 124 | #define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ |
@@ -135,8 +138,9 @@ DECLARE_PER_CPU(unsigned long, process_counts); | |||
135 | extern int nr_processes(void); | 138 | extern int nr_processes(void); |
136 | extern unsigned long nr_running(void); | 139 | extern unsigned long nr_running(void); |
137 | extern unsigned long nr_uninterruptible(void); | 140 | extern unsigned long nr_uninterruptible(void); |
138 | extern unsigned long nr_active(void); | ||
139 | extern unsigned long nr_iowait(void); | 141 | extern unsigned long nr_iowait(void); |
142 | extern void calc_global_load(void); | ||
143 | extern u64 cpu_nr_migrations(int cpu); | ||
140 | 144 | ||
141 | extern unsigned long get_parent_ip(unsigned long addr); | 145 | extern unsigned long get_parent_ip(unsigned long addr); |
142 | 146 | ||
@@ -672,6 +676,10 @@ struct user_struct { | |||
672 | struct work_struct work; | 676 | struct work_struct work; |
673 | #endif | 677 | #endif |
674 | #endif | 678 | #endif |
679 | |||
680 | #ifdef CONFIG_PERF_COUNTERS | ||
681 | atomic_long_t locked_vm; | ||
682 | #endif | ||
675 | }; | 683 | }; |
676 | 684 | ||
677 | extern int uids_sysfs_init(void); | 685 | extern int uids_sysfs_init(void); |
@@ -838,7 +846,17 @@ struct sched_group { | |||
838 | */ | 846 | */ |
839 | u32 reciprocal_cpu_power; | 847 | u32 reciprocal_cpu_power; |
840 | 848 | ||
841 | unsigned long cpumask[]; | 849 | /* |
850 | * The CPUs this group covers. | ||
851 | * | ||
852 | * NOTE: this field is variable length. (Allocated dynamically | ||
853 | * by attaching extra space to the end of the structure, | ||
854 | * depending on how many CPUs the kernel has booted up with) | ||
855 | * | ||
856 | * It is also be embedded into static data structures at build | ||
857 | * time. (See 'struct static_sched_group' in kernel/sched.c) | ||
858 | */ | ||
859 | unsigned long cpumask[0]; | ||
842 | }; | 860 | }; |
843 | 861 | ||
844 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) | 862 | static inline struct cpumask *sched_group_cpus(struct sched_group *sg) |
@@ -924,8 +942,17 @@ struct sched_domain { | |||
924 | char *name; | 942 | char *name; |
925 | #endif | 943 | #endif |
926 | 944 | ||
927 | /* span of all CPUs in this domain */ | 945 | /* |
928 | unsigned long span[]; | 946 | * Span of all CPUs in this domain. |
947 | * | ||
948 | * NOTE: this field is variable length. (Allocated dynamically | ||
949 | * by attaching extra space to the end of the structure, | ||
950 | * depending on how many CPUs the kernel has booted up with) | ||
951 | * | ||
952 | * It is also be embedded into static data structures at build | ||
953 | * time. (See 'struct static_sched_domain' in kernel/sched.c) | ||
954 | */ | ||
955 | unsigned long span[0]; | ||
929 | }; | 956 | }; |
930 | 957 | ||
931 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) | 958 | static inline struct cpumask *sched_domain_span(struct sched_domain *sd) |
@@ -1052,9 +1079,10 @@ struct sched_entity { | |||
1052 | u64 last_wakeup; | 1079 | u64 last_wakeup; |
1053 | u64 avg_overlap; | 1080 | u64 avg_overlap; |
1054 | 1081 | ||
1082 | u64 nr_migrations; | ||
1083 | |||
1055 | u64 start_runtime; | 1084 | u64 start_runtime; |
1056 | u64 avg_wakeup; | 1085 | u64 avg_wakeup; |
1057 | u64 nr_migrations; | ||
1058 | 1086 | ||
1059 | #ifdef CONFIG_SCHEDSTATS | 1087 | #ifdef CONFIG_SCHEDSTATS |
1060 | u64 wait_start; | 1088 | u64 wait_start; |
@@ -1209,18 +1237,11 @@ struct task_struct { | |||
1209 | struct list_head ptraced; | 1237 | struct list_head ptraced; |
1210 | struct list_head ptrace_entry; | 1238 | struct list_head ptrace_entry; |
1211 | 1239 | ||
1212 | #ifdef CONFIG_X86_PTRACE_BTS | ||
1213 | /* | 1240 | /* |
1214 | * This is the tracer handle for the ptrace BTS extension. | 1241 | * This is the tracer handle for the ptrace BTS extension. |
1215 | * This field actually belongs to the ptracer task. | 1242 | * This field actually belongs to the ptracer task. |
1216 | */ | 1243 | */ |
1217 | struct bts_tracer *bts; | 1244 | struct bts_context *bts; |
1218 | /* | ||
1219 | * The buffer to hold the BTS data. | ||
1220 | */ | ||
1221 | void *bts_buffer; | ||
1222 | size_t bts_size; | ||
1223 | #endif /* CONFIG_X86_PTRACE_BTS */ | ||
1224 | 1245 | ||
1225 | /* PID/PID hash table linkage. */ | 1246 | /* PID/PID hash table linkage. */ |
1226 | struct pid_link pids[PIDTYPE_MAX]; | 1247 | struct pid_link pids[PIDTYPE_MAX]; |
@@ -1247,7 +1268,9 @@ struct task_struct { | |||
1247 | * credentials (COW) */ | 1268 | * credentials (COW) */ |
1248 | const struct cred *cred; /* effective (overridable) subjective task | 1269 | const struct cred *cred; /* effective (overridable) subjective task |
1249 | * credentials (COW) */ | 1270 | * credentials (COW) */ |
1250 | struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */ | 1271 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
1272 | * credential calculations | ||
1273 | * (notably. ptrace) */ | ||
1251 | 1274 | ||
1252 | char comm[TASK_COMM_LEN]; /* executable name excluding path | 1275 | char comm[TASK_COMM_LEN]; /* executable name excluding path |
1253 | - access with [gs]et_task_comm (which lock | 1276 | - access with [gs]et_task_comm (which lock |
@@ -1380,6 +1403,11 @@ struct task_struct { | |||
1380 | struct list_head pi_state_list; | 1403 | struct list_head pi_state_list; |
1381 | struct futex_pi_state *pi_state_cache; | 1404 | struct futex_pi_state *pi_state_cache; |
1382 | #endif | 1405 | #endif |
1406 | #ifdef CONFIG_PERF_COUNTERS | ||
1407 | struct perf_counter_context *perf_counter_ctxp; | ||
1408 | struct mutex perf_counter_mutex; | ||
1409 | struct list_head perf_counter_list; | ||
1410 | #endif | ||
1383 | #ifdef CONFIG_NUMA | 1411 | #ifdef CONFIG_NUMA |
1384 | struct mempolicy *mempolicy; | 1412 | struct mempolicy *mempolicy; |
1385 | short il_next; | 1413 | short il_next; |
@@ -1428,7 +1456,9 @@ struct task_struct { | |||
1428 | #ifdef CONFIG_TRACING | 1456 | #ifdef CONFIG_TRACING |
1429 | /* state flags for use by tracers */ | 1457 | /* state flags for use by tracers */ |
1430 | unsigned long trace; | 1458 | unsigned long trace; |
1431 | #endif | 1459 | /* bitmask of trace recursion */ |
1460 | unsigned long trace_recursion; | ||
1461 | #endif /* CONFIG_TRACING */ | ||
1432 | }; | 1462 | }; |
1433 | 1463 | ||
1434 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1464 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
@@ -1885,6 +1915,7 @@ extern void sched_dead(struct task_struct *p); | |||
1885 | 1915 | ||
1886 | extern void proc_caches_init(void); | 1916 | extern void proc_caches_init(void); |
1887 | extern void flush_signals(struct task_struct *); | 1917 | extern void flush_signals(struct task_struct *); |
1918 | extern void __flush_signals(struct task_struct *); | ||
1888 | extern void ignore_signals(struct task_struct *); | 1919 | extern void ignore_signals(struct task_struct *); |
1889 | extern void flush_signal_handlers(struct task_struct *, int force_default); | 1920 | extern void flush_signal_handlers(struct task_struct *, int force_default); |
1890 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); | 1921 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
@@ -2001,8 +2032,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from); | |||
2001 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 2032 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
2002 | 2033 | ||
2003 | #ifdef CONFIG_SMP | 2034 | #ifdef CONFIG_SMP |
2035 | extern void wait_task_context_switch(struct task_struct *p); | ||
2004 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); | 2036 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
2005 | #else | 2037 | #else |
2038 | static inline void wait_task_context_switch(struct task_struct *p) {} | ||
2006 | static inline unsigned long wait_task_inactive(struct task_struct *p, | 2039 | static inline unsigned long wait_task_inactive(struct task_struct *p, |
2007 | long match_state) | 2040 | long match_state) |
2008 | { | 2041 | { |
@@ -2010,7 +2043,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p, | |||
2010 | } | 2043 | } |
2011 | #endif | 2044 | #endif |
2012 | 2045 | ||
2013 | #define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks) | 2046 | #define next_task(p) \ |
2047 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) | ||
2014 | 2048 | ||
2015 | #define for_each_process(p) \ | 2049 | #define for_each_process(p) \ |
2016 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) | 2050 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
@@ -2049,8 +2083,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2) | |||
2049 | 2083 | ||
2050 | static inline struct task_struct *next_thread(const struct task_struct *p) | 2084 | static inline struct task_struct *next_thread(const struct task_struct *p) |
2051 | { | 2085 | { |
2052 | return list_entry(rcu_dereference(p->thread_group.next), | 2086 | return list_entry_rcu(p->thread_group.next, |
2053 | struct task_struct, thread_group); | 2087 | struct task_struct, thread_group); |
2054 | } | 2088 | } |
2055 | 2089 | ||
2056 | static inline int thread_group_empty(struct task_struct *p) | 2090 | static inline int thread_group_empty(struct task_struct *p) |
@@ -2388,6 +2422,13 @@ static inline void inc_syscw(struct task_struct *tsk) | |||
2388 | #define TASK_SIZE_OF(tsk) TASK_SIZE | 2422 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
2389 | #endif | 2423 | #endif |
2390 | 2424 | ||
2425 | /* | ||
2426 | * Call the function if the target task is executing on a CPU right now: | ||
2427 | */ | ||
2428 | extern void task_oncpu_function_call(struct task_struct *p, | ||
2429 | void (*func) (void *info), void *info); | ||
2430 | |||
2431 | |||
2391 | #ifdef CONFIG_MM_OWNER | 2432 | #ifdef CONFIG_MM_OWNER |
2392 | extern void mm_update_next_owner(struct mm_struct *mm); | 2433 | extern void mm_update_next_owner(struct mm_struct *mm); |
2393 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); | 2434 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); |
diff --git a/include/linux/security.h b/include/linux/security.h index d5fd6163606f..5eff459b3833 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -2197,6 +2197,8 @@ static inline int security_file_mmap(struct file *file, unsigned long reqprot, | |||
2197 | unsigned long addr, | 2197 | unsigned long addr, |
2198 | unsigned long addr_only) | 2198 | unsigned long addr_only) |
2199 | { | 2199 | { |
2200 | if ((addr < mmap_min_addr) && !capable(CAP_SYS_RAWIO)) | ||
2201 | return -EACCES; | ||
2200 | return 0; | 2202 | return 0; |
2201 | } | 2203 | } |
2202 | 2204 | ||
diff --git a/include/linux/serial.h b/include/linux/serial.h index 9136cc5608c3..e5bb75a63802 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h | |||
@@ -96,54 +96,76 @@ struct serial_uart_config { | |||
96 | 96 | ||
97 | /* | 97 | /* |
98 | * Definitions for async_struct (and serial_struct) flags field | 98 | * Definitions for async_struct (and serial_struct) flags field |
99 | * | ||
100 | * Define ASYNCB_* for convenient use with {test,set,clear}_bit. | ||
99 | */ | 101 | */ |
100 | #define ASYNC_HUP_NOTIFY 0x0001 /* Notify getty on hangups and closes | 102 | #define ASYNCB_HUP_NOTIFY 0 /* Notify getty on hangups and closes |
101 | on the callout port */ | 103 | * on the callout port */ |
102 | #define ASYNC_FOURPORT 0x0002 /* Set OU1, OUT2 per AST Fourport settings */ | 104 | #define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */ |
103 | #define ASYNC_SAK 0x0004 /* Secure Attention Key (Orange book) */ | 105 | #define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */ |
104 | #define ASYNC_SPLIT_TERMIOS 0x0008 /* Separate termios for dialin/callout */ | 106 | #define ASYNCB_SPLIT_TERMIOS 3 /* Separate termios for dialin/callout */ |
105 | 107 | #define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */ | |
106 | #define ASYNC_SPD_MASK 0x1030 | 108 | #define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */ |
107 | #define ASYNC_SPD_HI 0x0010 /* Use 56000 instead of 38400 bps */ | 109 | #define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */ |
108 | 110 | #define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during | |
109 | #define ASYNC_SPD_VHI 0x0020 /* Use 115200 instead of 38400 bps */ | 111 | * autoconfiguration */ |
110 | #define ASYNC_SPD_CUST 0x0030 /* Use user-specified divisor */ | 112 | #define ASYNCB_SESSION_LOCKOUT 8 /* Lock out cua opens based on session */ |
111 | 113 | #define ASYNCB_PGRP_LOCKOUT 9 /* Lock out cua opens based on pgrp */ | |
112 | #define ASYNC_SKIP_TEST 0x0040 /* Skip UART test during autoconfiguration */ | 114 | #define ASYNCB_CALLOUT_NOHUP 10 /* Don't do hangups for cua device */ |
113 | #define ASYNC_AUTO_IRQ 0x0080 /* Do automatic IRQ during autoconfiguration */ | 115 | #define ASYNCB_HARDPPS_CD 11 /* Call hardpps when CD goes high */ |
114 | #define ASYNC_SESSION_LOCKOUT 0x0100 /* Lock out cua opens based on session */ | 116 | #define ASYNCB_SPD_SHI 12 /* Use 230400 instead of 38400 bps */ |
115 | #define ASYNC_PGRP_LOCKOUT 0x0200 /* Lock out cua opens based on pgrp */ | 117 | #define ASYNCB_LOW_LATENCY 13 /* Request low latency behaviour */ |
116 | #define ASYNC_CALLOUT_NOHUP 0x0400 /* Don't do hangups for cua device */ | 118 | #define ASYNCB_BUGGY_UART 14 /* This is a buggy UART, skip some safety |
117 | 119 | * checks. Note: can be dangerous! */ | |
118 | #define ASYNC_HARDPPS_CD 0x0800 /* Call hardpps when CD goes high */ | 120 | #define ASYNCB_AUTOPROBE 15 /* Port was autoprobed by PCI or PNP code */ |
119 | 121 | #define ASYNCB_LAST_USER 15 | |
120 | #define ASYNC_SPD_SHI 0x1000 /* Use 230400 instead of 38400 bps */ | 122 | |
121 | #define ASYNC_SPD_WARP 0x1010 /* Use 460800 instead of 38400 bps */ | 123 | /* Internal flags used only by kernel */ |
122 | 124 | #define ASYNCB_INITIALIZED 31 /* Serial port was initialized */ | |
123 | #define ASYNC_LOW_LATENCY 0x2000 /* Request low latency behaviour */ | 125 | #define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */ |
124 | 126 | #define ASYNCB_BOOT_AUTOCONF 28 /* Autoconfigure port on bootup */ | |
125 | #define ASYNC_BUGGY_UART 0x4000 /* This is a buggy UART, skip some safety | 127 | #define ASYNCB_CLOSING 27 /* Serial port is closing */ |
126 | * checks. Note: can be dangerous! */ | 128 | #define ASYNCB_CTS_FLOW 26 /* Do CTS flow control */ |
127 | 129 | #define ASYNCB_CHECK_CD 25 /* i.e., CLOCAL */ | |
128 | #define ASYNC_AUTOPROBE 0x8000 /* Port was autoprobed by PCI or PNP code */ | 130 | #define ASYNCB_SHARE_IRQ 24 /* for multifunction cards, no longer used */ |
129 | 131 | #define ASYNCB_CONS_FLOW 23 /* flow control for console */ | |
130 | #define ASYNC_FLAGS 0x7FFF /* Possible legal async flags */ | 132 | #define ASYNCB_BOOT_ONLYMCA 22 /* Probe only if MCA bus */ |
131 | #define ASYNC_USR_MASK 0x3430 /* Legal flags that non-privileged | 133 | #define ASYNCB_FIRST_KERNEL 22 |
132 | * users can set or reset */ | 134 | |
133 | 135 | #define ASYNC_HUP_NOTIFY (1U << ASYNCB_HUP_NOTIFY) | |
134 | /* Internal flags used only by kernel/chr_drv/serial.c */ | 136 | #define ASYNC_FOURPORT (1U << ASYNCB_FOURPORT) |
135 | #define ASYNC_INITIALIZED 0x80000000 /* Serial port was initialized */ | 137 | #define ASYNC_SAK (1U << ASYNCB_SAK) |
136 | #define ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device is active */ | 138 | #define ASYNC_SPLIT_TERMIOS (1U << ASYNCB_SPLIT_TERMIOS) |
137 | #define ASYNC_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */ | 139 | #define ASYNC_SPD_HI (1U << ASYNCB_SPD_HI) |
138 | #define ASYNC_CLOSING 0x08000000 /* Serial port is closing */ | 140 | #define ASYNC_SPD_VHI (1U << ASYNCB_SPD_VHI) |
139 | #define ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */ | 141 | #define ASYNC_SKIP_TEST (1U << ASYNCB_SKIP_TEST) |
140 | #define ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */ | 142 | #define ASYNC_AUTO_IRQ (1U << ASYNCB_AUTO_IRQ) |
141 | #define ASYNC_SHARE_IRQ 0x01000000 /* for multifunction cards | 143 | #define ASYNC_SESSION_LOCKOUT (1U << ASYNCB_SESSION_LOCKOUT) |
142 | --- no longer used */ | 144 | #define ASYNC_PGRP_LOCKOUT (1U << ASYNCB_PGRP_LOCKOUT) |
143 | #define ASYNC_CONS_FLOW 0x00800000 /* flow control for console */ | 145 | #define ASYNC_CALLOUT_NOHUP (1U << ASYNCB_CALLOUT_NOHUP) |
144 | 146 | #define ASYNC_HARDPPS_CD (1U << ASYNCB_HARDPPS_CD) | |
145 | #define ASYNC_BOOT_ONLYMCA 0x00400000 /* Probe only if MCA bus */ | 147 | #define ASYNC_SPD_SHI (1U << ASYNCB_SPD_SHI) |
146 | #define ASYNC_INTERNAL_FLAGS 0xFFC00000 /* Internal flags */ | 148 | #define ASYNC_LOW_LATENCY (1U << ASYNCB_LOW_LATENCY) |
149 | #define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART) | ||
150 | #define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE) | ||
151 | |||
152 | #define ASYNC_FLAGS ((1U << ASYNCB_LAST_USER) - 1) | ||
153 | #define ASYNC_USR_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI| \ | ||
154 | ASYNC_CALLOUT_NOHUP|ASYNC_SPD_SHI|ASYNC_LOW_LATENCY) | ||
155 | #define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI) | ||
156 | #define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI) | ||
157 | #define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI) | ||
158 | |||
159 | #define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED) | ||
160 | #define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE) | ||
161 | #define ASYNC_BOOT_AUTOCONF (1U << ASYNCB_BOOT_AUTOCONF) | ||
162 | #define ASYNC_CLOSING (1U << ASYNCB_CLOSING) | ||
163 | #define ASYNC_CTS_FLOW (1U << ASYNCB_CTS_FLOW) | ||
164 | #define ASYNC_CHECK_CD (1U << ASYNCB_CHECK_CD) | ||
165 | #define ASYNC_SHARE_IRQ (1U << ASYNCB_SHARE_IRQ) | ||
166 | #define ASYNC_CONS_FLOW (1U << ASYNCB_CONS_FLOW) | ||
167 | #define ASYNC_BOOT_ONLYMCA (1U << ASYNCB_BOOT_ONLYMCA) | ||
168 | #define ASYNC_INTERNAL_FLAGS (~((1U << ASYNCB_FIRST_KERNEL) - 1)) | ||
147 | 169 | ||
148 | /* | 170 | /* |
149 | * Multiport serial configuration structure --- external structure | 171 | * Multiport serial configuration structure --- external structure |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 57a97e52e58d..6fd80c4243f1 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -41,7 +41,8 @@ | |||
41 | #define PORT_XSCALE 15 | 41 | #define PORT_XSCALE 15 |
42 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ | 42 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ |
43 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ | 43 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ |
44 | #define PORT_MAX_8250 17 /* max port ID */ | 44 | #define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ |
45 | #define PORT_MAX_8250 18 /* max port ID */ | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * ARM specific type numbers. These are not currently guaranteed | 48 | * ARM specific type numbers. These are not currently guaranteed |
@@ -167,6 +168,9 @@ | |||
167 | /* MAX3100 */ | 168 | /* MAX3100 */ |
168 | #define PORT_MAX3100 86 | 169 | #define PORT_MAX3100 86 |
169 | 170 | ||
171 | /* Timberdale UART */ | ||
172 | #define PORT_TIMBUART 87 | ||
173 | |||
170 | #ifdef __KERNEL__ | 174 | #ifdef __KERNEL__ |
171 | 175 | ||
172 | #include <linux/compiler.h> | 176 | #include <linux/compiler.h> |
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h index 893cc53486bc..1c297ddc9d5a 100644 --- a/include/linux/serial_sci.h +++ b/include/linux/serial_sci.h | |||
@@ -25,8 +25,7 @@ struct plat_sci_port { | |||
25 | unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */ | 25 | unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */ |
26 | unsigned int type; /* SCI / SCIF / IRDA */ | 26 | unsigned int type; /* SCI / SCIF / IRDA */ |
27 | upf_t flags; /* UPF_* flags */ | 27 | upf_t flags; /* UPF_* flags */ |
28 | char *clk; /* clock string */ | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | int early_sci_setup(struct uart_port *port); | ||
31 | |||
32 | #endif /* __LINUX_SERIAL_SCI_H */ | 31 | #endif /* __LINUX_SERIAL_SCI_H */ |
diff --git a/include/linux/sh_cmt.h b/include/linux/sh_cmt.h deleted file mode 100644 index 68cacde5954f..000000000000 --- a/include/linux/sh_cmt.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | #ifndef __SH_CMT_H__ | ||
2 | #define __SH_CMT_H__ | ||
3 | |||
4 | struct sh_cmt_config { | ||
5 | char *name; | ||
6 | unsigned long channel_offset; | ||
7 | int timer_bit; | ||
8 | char *clk; | ||
9 | unsigned long clockevent_rating; | ||
10 | unsigned long clocksource_rating; | ||
11 | }; | ||
12 | |||
13 | #endif /* __SH_CMT_H__ */ | ||
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h new file mode 100644 index 000000000000..864bd56bd3b0 --- /dev/null +++ b/include/linux/sh_timer.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __SH_TIMER_H__ | ||
2 | #define __SH_TIMER_H__ | ||
3 | |||
4 | struct sh_timer_config { | ||
5 | char *name; | ||
6 | long channel_offset; | ||
7 | int timer_bit; | ||
8 | char *clk; | ||
9 | unsigned long clockevent_rating; | ||
10 | unsigned long clocksource_rating; | ||
11 | }; | ||
12 | |||
13 | #endif /* __SH_TIMER_H__ */ | ||
diff --git a/include/linux/signal.h b/include/linux/signal.h index 84f997f8aa53..c7552836bd95 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -235,6 +235,8 @@ static inline int valid_signal(unsigned long sig) | |||
235 | extern int next_signal(struct sigpending *pending, sigset_t *mask); | 235 | extern int next_signal(struct sigpending *pending, sigset_t *mask); |
236 | extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); | 236 | extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); |
237 | extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); | 237 | extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); |
238 | extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, | ||
239 | siginfo_t *info); | ||
238 | extern long do_sigpending(void __user *, unsigned long); | 240 | extern long do_sigpending(void __user *, unsigned long); |
239 | extern int sigprocmask(int, sigset_t *, sigset_t *); | 241 | extern int sigprocmask(int, sigset_t *, sigset_t *); |
240 | extern int show_unhandled_signals; | 242 | extern int show_unhandled_signals; |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 24c5602bee99..219b8fb4651d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -62,6 +62,8 @@ | |||
62 | # define SLAB_DEBUG_OBJECTS 0x00000000UL | 62 | # define SLAB_DEBUG_OBJECTS 0x00000000UL |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ | ||
66 | |||
65 | /* The following flags affect the page allocator grouping pages by mobility */ | 67 | /* The following flags affect the page allocator grouping pages by mobility */ |
66 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ | 68 | #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ |
67 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ | 69 | #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ |
@@ -317,4 +319,6 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) | |||
317 | return kmalloc_node(size, flags | __GFP_ZERO, node); | 319 | return kmalloc_node(size, flags | __GFP_ZERO, node); |
318 | } | 320 | } |
319 | 321 | ||
322 | void __init kmem_cache_init_late(void); | ||
323 | |||
320 | #endif /* _LINUX_SLAB_H */ | 324 | #endif /* _LINUX_SLAB_H */ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 5ac9b0bcaf9a..713f841ecaa9 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <trace/kmemtrace.h> | 17 | #include <linux/kmemtrace.h> |
18 | 18 | ||
19 | /* Size description struct for general caches. */ | 19 | /* Size description struct for general caches. */ |
20 | struct cache_sizes { | 20 | struct cache_sizes { |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 0ec00b39d006..bb5368df4be8 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
@@ -34,4 +34,9 @@ static __always_inline void *__kmalloc(size_t size, gfp_t flags) | |||
34 | return kmalloc(size, flags); | 34 | return kmalloc(size, flags); |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline void kmem_cache_init_late(void) | ||
38 | { | ||
39 | /* Nothing to do */ | ||
40 | } | ||
41 | |||
37 | #endif /* __LINUX_SLOB_DEF_H */ | 42 | #endif /* __LINUX_SLOB_DEF_H */ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 5046f90c1171..4dcbc2c71491 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -10,7 +10,7 @@ | |||
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
13 | #include <trace/kmemtrace.h> | 13 | #include <linux/kmemtrace.h> |
14 | 14 | ||
15 | enum stat_item { | 15 | enum stat_item { |
16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 16 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
@@ -302,4 +302,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | |||
302 | } | 302 | } |
303 | #endif | 303 | #endif |
304 | 304 | ||
305 | void __init kmem_cache_init_late(void); | ||
306 | |||
305 | #endif /* _LINUX_SLUB_DEF_H */ | 307 | #endif /* _LINUX_SLUB_DEF_H */ |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index 938234c4a996..d4841ed8215b 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
@@ -60,6 +60,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
60 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) | 60 | #define __raw_spin_is_locked(lock) ((void)(lock), 0) |
61 | /* for sched.c and kernel_lock.c: */ | 61 | /* for sched.c and kernel_lock.c: */ |
62 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) | 62 | # define __raw_spin_lock(lock) do { (void)(lock); } while (0) |
63 | # define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) | ||
63 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) | 64 | # define __raw_spin_unlock(lock) do { (void)(lock); } while (0) |
64 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) | 65 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) |
65 | #endif /* DEBUG_SPINLOCK */ | 66 | #endif /* DEBUG_SPINLOCK */ |
diff --git a/include/linux/splice.h b/include/linux/splice.h index 5f3faa9d15ae..18e7c7c0cae6 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h | |||
@@ -11,8 +11,7 @@ | |||
11 | #include <linux/pipe_fs_i.h> | 11 | #include <linux/pipe_fs_i.h> |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * splice is tied to pipes as a transport (at least for now), so we'll just | 14 | * Flags passed in from splice/tee/vmsplice |
15 | * add the splice flags here. | ||
16 | */ | 15 | */ |
17 | #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ | 16 | #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ |
18 | #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ | 17 | #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */ |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 795032edfc46..cd15df6c63cd 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -245,11 +245,6 @@ extern unsigned long get_safe_page(gfp_t gfp_mask); | |||
245 | 245 | ||
246 | extern void hibernation_set_ops(struct platform_hibernation_ops *ops); | 246 | extern void hibernation_set_ops(struct platform_hibernation_ops *ops); |
247 | extern int hibernate(void); | 247 | extern int hibernate(void); |
248 | extern int hibernate_nvs_register(unsigned long start, unsigned long size); | ||
249 | extern int hibernate_nvs_alloc(void); | ||
250 | extern void hibernate_nvs_free(void); | ||
251 | extern void hibernate_nvs_save(void); | ||
252 | extern void hibernate_nvs_restore(void); | ||
253 | extern bool system_entering_hibernation(void); | 248 | extern bool system_entering_hibernation(void); |
254 | #else /* CONFIG_HIBERNATION */ | 249 | #else /* CONFIG_HIBERNATION */ |
255 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } | 250 | static inline int swsusp_page_is_forbidden(struct page *p) { return 0; } |
@@ -258,6 +253,16 @@ static inline void swsusp_unset_page_free(struct page *p) {} | |||
258 | 253 | ||
259 | static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} | 254 | static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} |
260 | static inline int hibernate(void) { return -ENOSYS; } | 255 | static inline int hibernate(void) { return -ENOSYS; } |
256 | static inline bool system_entering_hibernation(void) { return false; } | ||
257 | #endif /* CONFIG_HIBERNATION */ | ||
258 | |||
259 | #ifdef CONFIG_HIBERNATION_NVS | ||
260 | extern int hibernate_nvs_register(unsigned long start, unsigned long size); | ||
261 | extern int hibernate_nvs_alloc(void); | ||
262 | extern void hibernate_nvs_free(void); | ||
263 | extern void hibernate_nvs_save(void); | ||
264 | extern void hibernate_nvs_restore(void); | ||
265 | #else /* CONFIG_HIBERNATION_NVS */ | ||
261 | static inline int hibernate_nvs_register(unsigned long a, unsigned long b) | 266 | static inline int hibernate_nvs_register(unsigned long a, unsigned long b) |
262 | { | 267 | { |
263 | return 0; | 268 | return 0; |
@@ -266,8 +271,7 @@ static inline int hibernate_nvs_alloc(void) { return 0; } | |||
266 | static inline void hibernate_nvs_free(void) {} | 271 | static inline void hibernate_nvs_free(void) {} |
267 | static inline void hibernate_nvs_save(void) {} | 272 | static inline void hibernate_nvs_save(void) {} |
268 | static inline void hibernate_nvs_restore(void) {} | 273 | static inline void hibernate_nvs_restore(void) {} |
269 | static inline bool system_entering_hibernation(void) { return false; } | 274 | #endif /* CONFIG_HIBERNATION_NVS */ |
270 | #endif /* CONFIG_HIBERNATION */ | ||
271 | 275 | ||
272 | #ifdef CONFIG_PM_SLEEP | 276 | #ifdef CONFIG_PM_SLEEP |
273 | void save_processor_state(void); | 277 | void save_processor_state(void); |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index ac9ff54f7cb3..cb1a6631b8f4 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -29,7 +29,8 @@ extern void *swiotlb_alloc(unsigned order, unsigned long nslabs); | |||
29 | 29 | ||
30 | extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, | 30 | extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, |
31 | phys_addr_t address); | 31 | phys_addr_t address); |
32 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); | 32 | extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, |
33 | dma_addr_t address); | ||
33 | 34 | ||
34 | extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size); | 35 | extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size); |
35 | 36 | ||
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 30520844b8da..418d90f5effe 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -55,6 +55,7 @@ struct compat_timeval; | |||
55 | struct robust_list_head; | 55 | struct robust_list_head; |
56 | struct getcpu_cache; | 56 | struct getcpu_cache; |
57 | struct old_linux_dirent; | 57 | struct old_linux_dirent; |
58 | struct perf_counter_attr; | ||
58 | 59 | ||
59 | #include <linux/types.h> | 60 | #include <linux/types.h> |
60 | #include <linux/aio_abi.h> | 61 | #include <linux/aio_abi.h> |
@@ -755,4 +756,8 @@ asmlinkage long sys_pipe(int __user *); | |||
755 | 756 | ||
756 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); | 757 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); |
757 | 758 | ||
759 | |||
760 | asmlinkage long sys_perf_counter_open( | ||
761 | struct perf_counter_attr __user *attr_uptr, | ||
762 | pid_t pid, int cpu, int group_fd, unsigned long flags); | ||
758 | #endif | 763 | #endif |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index e6b820f8b56b..a8cc4e13434c 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -21,13 +21,14 @@ struct restart_block { | |||
21 | struct { | 21 | struct { |
22 | unsigned long arg0, arg1, arg2, arg3; | 22 | unsigned long arg0, arg1, arg2, arg3; |
23 | }; | 23 | }; |
24 | /* For futex_wait */ | 24 | /* For futex_wait and futex_wait_requeue_pi */ |
25 | struct { | 25 | struct { |
26 | u32 *uaddr; | 26 | u32 *uaddr; |
27 | u32 val; | 27 | u32 val; |
28 | u32 flags; | 28 | u32 flags; |
29 | u32 bitset; | 29 | u32 bitset; |
30 | u64 time; | 30 | u64 time; |
31 | u32 *uaddr2; | ||
31 | } futex; | 32 | } futex; |
32 | /* For nanosleep */ | 33 | /* For nanosleep */ |
33 | struct { | 34 | struct { |
diff --git a/include/linux/time.h b/include/linux/time.h index 242f62499bb7..ea16c1a01d51 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -113,6 +113,21 @@ struct timespec current_kernel_time(void); | |||
113 | #define CURRENT_TIME (current_kernel_time()) | 113 | #define CURRENT_TIME (current_kernel_time()) |
114 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) | 114 | #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) |
115 | 115 | ||
116 | /* Some architectures do not supply their own clocksource. | ||
117 | * This is mainly the case in architectures that get their | ||
118 | * inter-tick times by reading the counter on their interval | ||
119 | * timer. Since these timers wrap every tick, they're not really | ||
120 | * useful as clocksources. Wrapping them to act like one is possible | ||
121 | * but not very efficient. So we provide a callout these arches | ||
122 | * can implement for use with the jiffies clocksource to provide | ||
123 | * finer then tick granular time. | ||
124 | */ | ||
125 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | ||
126 | extern u32 arch_gettimeoffset(void); | ||
127 | #else | ||
128 | static inline u32 arch_gettimeoffset(void) { return 0; } | ||
129 | #endif | ||
130 | |||
116 | extern void do_gettimeofday(struct timeval *tv); | 131 | extern void do_gettimeofday(struct timeval *tv); |
117 | extern int do_settimeofday(struct timespec *tv); | 132 | extern int do_settimeofday(struct timespec *tv); |
118 | extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); | 133 | extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz); |
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h new file mode 100644 index 000000000000..c68bccba2074 --- /dev/null +++ b/include/linux/trace_seq.h | |||
@@ -0,0 +1,92 @@ | |||
1 | #ifndef _LINUX_TRACE_SEQ_H | ||
2 | #define _LINUX_TRACE_SEQ_H | ||
3 | |||
4 | #include <linux/fs.h> | ||
5 | |||
6 | /* | ||
7 | * Trace sequences are used to allow a function to call several other functions | ||
8 | * to create a string of data to use (up to a max of PAGE_SIZE. | ||
9 | */ | ||
10 | |||
11 | struct trace_seq { | ||
12 | unsigned char buffer[PAGE_SIZE]; | ||
13 | unsigned int len; | ||
14 | unsigned int readpos; | ||
15 | }; | ||
16 | |||
17 | static inline void | ||
18 | trace_seq_init(struct trace_seq *s) | ||
19 | { | ||
20 | s->len = 0; | ||
21 | s->readpos = 0; | ||
22 | } | ||
23 | |||
24 | /* | ||
25 | * Currently only defined when tracing is enabled. | ||
26 | */ | ||
27 | #ifdef CONFIG_TRACING | ||
28 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
29 | __attribute__ ((format (printf, 2, 3))); | ||
30 | extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) | ||
31 | __attribute__ ((format (printf, 2, 0))); | ||
32 | extern int | ||
33 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); | ||
34 | extern void trace_print_seq(struct seq_file *m, struct trace_seq *s); | ||
35 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | ||
36 | size_t cnt); | ||
37 | extern int trace_seq_puts(struct trace_seq *s, const char *str); | ||
38 | extern int trace_seq_putc(struct trace_seq *s, unsigned char c); | ||
39 | extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len); | ||
40 | extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | ||
41 | size_t len); | ||
42 | extern void *trace_seq_reserve(struct trace_seq *s, size_t len); | ||
43 | extern int trace_seq_path(struct trace_seq *s, struct path *path); | ||
44 | |||
45 | #else /* CONFIG_TRACING */ | ||
46 | static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
47 | { | ||
48 | return 0; | ||
49 | } | ||
50 | static inline int | ||
51 | trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | ||
52 | { | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s) | ||
57 | { | ||
58 | } | ||
59 | static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | ||
60 | size_t cnt) | ||
61 | { | ||
62 | return 0; | ||
63 | } | ||
64 | static inline int trace_seq_puts(struct trace_seq *s, const char *str) | ||
65 | { | ||
66 | return 0; | ||
67 | } | ||
68 | static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) | ||
69 | { | ||
70 | return 0; | ||
71 | } | ||
72 | static inline int | ||
73 | trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) | ||
74 | { | ||
75 | return 0; | ||
76 | } | ||
77 | static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | ||
78 | size_t len) | ||
79 | { | ||
80 | return 0; | ||
81 | } | ||
82 | static inline void *trace_seq_reserve(struct trace_seq *s, size_t len) | ||
83 | { | ||
84 | return NULL; | ||
85 | } | ||
86 | static inline int trace_seq_path(struct trace_seq *s, struct path *path) | ||
87 | { | ||
88 | return 0; | ||
89 | } | ||
90 | #endif /* CONFIG_TRACING */ | ||
91 | |||
92 | #endif /* _LINUX_TRACE_SEQ_H */ | ||
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h index c7aa154f4bfc..eb96603d92db 100644 --- a/include/linux/tracehook.h +++ b/include/linux/tracehook.h | |||
@@ -259,14 +259,12 @@ static inline void tracehook_finish_clone(struct task_struct *child, | |||
259 | 259 | ||
260 | /** | 260 | /** |
261 | * tracehook_report_clone - in parent, new child is about to start running | 261 | * tracehook_report_clone - in parent, new child is about to start running |
262 | * @trace: return value from tracehook_prepare_clone() | ||
263 | * @regs: parent's user register state | 262 | * @regs: parent's user register state |
264 | * @clone_flags: flags from parent's system call | 263 | * @clone_flags: flags from parent's system call |
265 | * @pid: new child's PID in the parent's namespace | 264 | * @pid: new child's PID in the parent's namespace |
266 | * @child: new child task | 265 | * @child: new child task |
267 | * | 266 | * |
268 | * Called after a child is set up, but before it has been started | 267 | * Called after a child is set up, but before it has been started running. |
269 | * running. @trace is the value returned by tracehook_prepare_clone(). | ||
270 | * This is not a good place to block, because the child has not started | 268 | * This is not a good place to block, because the child has not started |
271 | * yet. Suspend the child here if desired, and then block in | 269 | * yet. Suspend the child here if desired, and then block in |
272 | * tracehook_report_clone_complete(). This must prevent the child from | 270 | * tracehook_report_clone_complete(). This must prevent the child from |
@@ -276,13 +274,14 @@ static inline void tracehook_finish_clone(struct task_struct *child, | |||
276 | * | 274 | * |
277 | * Called with no locks held, but the child cannot run until this returns. | 275 | * Called with no locks held, but the child cannot run until this returns. |
278 | */ | 276 | */ |
279 | static inline void tracehook_report_clone(int trace, struct pt_regs *regs, | 277 | static inline void tracehook_report_clone(struct pt_regs *regs, |
280 | unsigned long clone_flags, | 278 | unsigned long clone_flags, |
281 | pid_t pid, struct task_struct *child) | 279 | pid_t pid, struct task_struct *child) |
282 | { | 280 | { |
283 | if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) { | 281 | if (unlikely(task_ptrace(child))) { |
284 | /* | 282 | /* |
285 | * The child starts up with an immediate SIGSTOP. | 283 | * It doesn't matter who attached/attaching to this |
284 | * task, the pending SIGSTOP is right in any case. | ||
286 | */ | 285 | */ |
287 | sigaddset(&child->pending.signal, SIGSTOP); | 286 | sigaddset(&child->pending.signal, SIGSTOP); |
288 | set_tsk_thread_flag(child, TIF_SIGPENDING); | 287 | set_tsk_thread_flag(child, TIF_SIGPENDING); |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index d35a7ee7611f..14df7e635d43 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -31,6 +31,8 @@ struct tracepoint { | |||
31 | * Keep in sync with vmlinux.lds.h. | 31 | * Keep in sync with vmlinux.lds.h. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #ifndef DECLARE_TRACE | ||
35 | |||
34 | #define TP_PROTO(args...) args | 36 | #define TP_PROTO(args...) args |
35 | #define TP_ARGS(args...) args | 37 | #define TP_ARGS(args...) args |
36 | 38 | ||
@@ -114,6 +116,7 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, | |||
114 | struct tracepoint *end) | 116 | struct tracepoint *end) |
115 | { } | 117 | { } |
116 | #endif /* CONFIG_TRACEPOINTS */ | 118 | #endif /* CONFIG_TRACEPOINTS */ |
119 | #endif /* DECLARE_TRACE */ | ||
117 | 120 | ||
118 | /* | 121 | /* |
119 | * Connect a probe to a tracepoint. | 122 | * Connect a probe to a tracepoint. |
@@ -154,10 +157,8 @@ static inline void tracepoint_synchronize_unregister(void) | |||
154 | } | 157 | } |
155 | 158 | ||
156 | #define PARAMS(args...) args | 159 | #define PARAMS(args...) args |
157 | #define TRACE_FORMAT(name, proto, args, fmt) \ | ||
158 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | ||
159 | |||
160 | 160 | ||
161 | #ifndef TRACE_EVENT | ||
161 | /* | 162 | /* |
162 | * For use with the TRACE_EVENT macro: | 163 | * For use with the TRACE_EVENT macro: |
163 | * | 164 | * |
@@ -262,5 +263,6 @@ static inline void tracepoint_synchronize_unregister(void) | |||
262 | 263 | ||
263 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ | 264 | #define TRACE_EVENT(name, proto, args, struct, assign, print) \ |
264 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) | 265 | DECLARE_TRACE(name, PARAMS(proto), PARAMS(args)) |
266 | #endif | ||
265 | 267 | ||
266 | #endif | 268 | #endif |
diff --git a/include/linux/tty.h b/include/linux/tty.h index fc39db95499f..1488d8c81aac 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -185,7 +185,7 @@ struct tty_port; | |||
185 | struct tty_port_operations { | 185 | struct tty_port_operations { |
186 | /* Return 1 if the carrier is raised */ | 186 | /* Return 1 if the carrier is raised */ |
187 | int (*carrier_raised)(struct tty_port *port); | 187 | int (*carrier_raised)(struct tty_port *port); |
188 | void (*raise_dtr_rts)(struct tty_port *port); | 188 | void (*dtr_rts)(struct tty_port *port, int raise); |
189 | }; | 189 | }; |
190 | 190 | ||
191 | struct tty_port { | 191 | struct tty_port { |
@@ -201,6 +201,9 @@ struct tty_port { | |||
201 | unsigned char *xmit_buf; /* Optional buffer */ | 201 | unsigned char *xmit_buf; /* Optional buffer */ |
202 | int close_delay; /* Close port delay */ | 202 | int close_delay; /* Close port delay */ |
203 | int closing_wait; /* Delay for output */ | 203 | int closing_wait; /* Delay for output */ |
204 | int drain_delay; /* Set to zero if no pure time | ||
205 | based drain is needed else | ||
206 | set to size of fifo */ | ||
204 | }; | 207 | }; |
205 | 208 | ||
206 | /* | 209 | /* |
@@ -223,8 +226,11 @@ struct tty_struct { | |||
223 | struct tty_driver *driver; | 226 | struct tty_driver *driver; |
224 | const struct tty_operations *ops; | 227 | const struct tty_operations *ops; |
225 | int index; | 228 | int index; |
226 | /* The ldisc objects are protected by tty_ldisc_lock at the moment */ | 229 | |
227 | struct tty_ldisc ldisc; | 230 | /* Protects ldisc changes: Lock tty not pty */ |
231 | struct mutex ldisc_mutex; | ||
232 | struct tty_ldisc *ldisc; | ||
233 | |||
228 | struct mutex termios_mutex; | 234 | struct mutex termios_mutex; |
229 | spinlock_t ctrl_lock; | 235 | spinlock_t ctrl_lock; |
230 | /* Termios values are protected by the termios mutex */ | 236 | /* Termios values are protected by the termios mutex */ |
@@ -311,6 +317,7 @@ struct tty_struct { | |||
311 | #define TTY_CLOSING 7 /* ->close() in progress */ | 317 | #define TTY_CLOSING 7 /* ->close() in progress */ |
312 | #define TTY_LDISC 9 /* Line discipline attached */ | 318 | #define TTY_LDISC 9 /* Line discipline attached */ |
313 | #define TTY_LDISC_CHANGING 10 /* Line discipline changing */ | 319 | #define TTY_LDISC_CHANGING 10 /* Line discipline changing */ |
320 | #define TTY_LDISC_OPEN 11 /* Line discipline is open */ | ||
314 | #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ | 321 | #define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */ |
315 | #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ | 322 | #define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */ |
316 | #define TTY_PTY_LOCK 16 /* pty private */ | 323 | #define TTY_PTY_LOCK 16 /* pty private */ |
@@ -403,6 +410,7 @@ extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b); | |||
403 | extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); | 410 | extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); |
404 | extern void tty_ldisc_deref(struct tty_ldisc *); | 411 | extern void tty_ldisc_deref(struct tty_ldisc *); |
405 | extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); | 412 | extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); |
413 | extern void tty_ldisc_hangup(struct tty_struct *tty); | ||
406 | extern const struct file_operations tty_ldiscs_proc_fops; | 414 | extern const struct file_operations tty_ldiscs_proc_fops; |
407 | 415 | ||
408 | extern void tty_wakeup(struct tty_struct *tty); | 416 | extern void tty_wakeup(struct tty_struct *tty); |
@@ -425,6 +433,9 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx, | |||
425 | extern void tty_release_dev(struct file *filp); | 433 | extern void tty_release_dev(struct file *filp); |
426 | extern int tty_init_termios(struct tty_struct *tty); | 434 | extern int tty_init_termios(struct tty_struct *tty); |
427 | 435 | ||
436 | extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); | ||
437 | extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); | ||
438 | |||
428 | extern struct mutex tty_mutex; | 439 | extern struct mutex tty_mutex; |
429 | 440 | ||
430 | extern void tty_write_unlock(struct tty_struct *tty); | 441 | extern void tty_write_unlock(struct tty_struct *tty); |
@@ -438,6 +449,7 @@ extern struct tty_struct *tty_port_tty_get(struct tty_port *port); | |||
438 | extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); | 449 | extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); |
439 | extern int tty_port_carrier_raised(struct tty_port *port); | 450 | extern int tty_port_carrier_raised(struct tty_port *port); |
440 | extern void tty_port_raise_dtr_rts(struct tty_port *port); | 451 | extern void tty_port_raise_dtr_rts(struct tty_port *port); |
452 | extern void tty_port_lower_dtr_rts(struct tty_port *port); | ||
441 | extern void tty_port_hangup(struct tty_port *port); | 453 | extern void tty_port_hangup(struct tty_port *port); |
442 | extern int tty_port_block_til_ready(struct tty_port *port, | 454 | extern int tty_port_block_til_ready(struct tty_port *port, |
443 | struct tty_struct *tty, struct file *filp); | 455 | struct tty_struct *tty, struct file *filp); |
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index bcba84ea2d86..3566129384a4 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h | |||
@@ -127,7 +127,8 @@ | |||
127 | * the line discipline are close to full, and it should somehow | 127 | * the line discipline are close to full, and it should somehow |
128 | * signal that no more characters should be sent to the tty. | 128 | * signal that no more characters should be sent to the tty. |
129 | * | 129 | * |
130 | * Optional: Always invoke via tty_throttle(); | 130 | * Optional: Always invoke via tty_throttle(), called under the |
131 | * termios lock. | ||
131 | * | 132 | * |
132 | * void (*unthrottle)(struct tty_struct * tty); | 133 | * void (*unthrottle)(struct tty_struct * tty); |
133 | * | 134 | * |
@@ -135,7 +136,8 @@ | |||
135 | * that characters can now be sent to the tty without fear of | 136 | * that characters can now be sent to the tty without fear of |
136 | * overrunning the input buffers of the line disciplines. | 137 | * overrunning the input buffers of the line disciplines. |
137 | * | 138 | * |
138 | * Optional: Always invoke via tty_unthrottle(); | 139 | * Optional: Always invoke via tty_unthrottle(), called under the |
140 | * termios lock. | ||
139 | * | 141 | * |
140 | * void (*stop)(struct tty_struct *tty); | 142 | * void (*stop)(struct tty_struct *tty); |
141 | * | 143 | * |
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 625e9e4639c6..8cdfed738fe4 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
@@ -224,8 +224,7 @@ struct usb_serial_driver { | |||
224 | /* Called by console with tty = NULL and by tty */ | 224 | /* Called by console with tty = NULL and by tty */ |
225 | int (*open)(struct tty_struct *tty, | 225 | int (*open)(struct tty_struct *tty, |
226 | struct usb_serial_port *port, struct file *filp); | 226 | struct usb_serial_port *port, struct file *filp); |
227 | void (*close)(struct tty_struct *tty, | 227 | void (*close)(struct usb_serial_port *port); |
228 | struct usb_serial_port *port, struct file *filp); | ||
229 | int (*write)(struct tty_struct *tty, struct usb_serial_port *port, | 228 | int (*write)(struct tty_struct *tty, struct usb_serial_port *port, |
230 | const unsigned char *buf, int count); | 229 | const unsigned char *buf, int count); |
231 | /* Called only by the tty layer */ | 230 | /* Called only by the tty layer */ |
@@ -241,6 +240,10 @@ struct usb_serial_driver { | |||
241 | int (*tiocmget)(struct tty_struct *tty, struct file *file); | 240 | int (*tiocmget)(struct tty_struct *tty, struct file *file); |
242 | int (*tiocmset)(struct tty_struct *tty, struct file *file, | 241 | int (*tiocmset)(struct tty_struct *tty, struct file *file, |
243 | unsigned int set, unsigned int clear); | 242 | unsigned int set, unsigned int clear); |
243 | /* Called by the tty layer for port level work. There may or may not | ||
244 | be an attached tty at this point */ | ||
245 | void (*dtr_rts)(struct usb_serial_port *port, int on); | ||
246 | int (*carrier_raised)(struct usb_serial_port *port); | ||
244 | /* USB events */ | 247 | /* USB events */ |
245 | void (*read_int_callback)(struct urb *urb); | 248 | void (*read_int_callback)(struct urb *urb); |
246 | void (*write_int_callback)(struct urb *urb); | 249 | void (*write_int_callback)(struct urb *urb); |
@@ -283,8 +286,7 @@ extern int usb_serial_generic_open(struct tty_struct *tty, | |||
283 | struct usb_serial_port *port, struct file *filp); | 286 | struct usb_serial_port *port, struct file *filp); |
284 | extern int usb_serial_generic_write(struct tty_struct *tty, | 287 | extern int usb_serial_generic_write(struct tty_struct *tty, |
285 | struct usb_serial_port *port, const unsigned char *buf, int count); | 288 | struct usb_serial_port *port, const unsigned char *buf, int count); |
286 | extern void usb_serial_generic_close(struct tty_struct *tty, | 289 | extern void usb_serial_generic_close(struct usb_serial_port *port); |
287 | struct usb_serial_port *port, struct file *filp); | ||
288 | extern int usb_serial_generic_resume(struct usb_serial *serial); | 290 | extern int usb_serial_generic_resume(struct usb_serial *serial); |
289 | extern int usb_serial_generic_write_room(struct tty_struct *tty); | 291 | extern int usb_serial_generic_write_room(struct tty_struct *tty); |
290 | extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); | 292 | extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); |
diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 06005fa9e982..4fca4f5440ba 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h | |||
@@ -10,14 +10,17 @@ | |||
10 | 10 | ||
11 | /** | 11 | /** |
12 | * virtqueue - a queue to register buffers for sending or receiving. | 12 | * virtqueue - a queue to register buffers for sending or receiving. |
13 | * @list: the chain of virtqueues for this device | ||
13 | * @callback: the function to call when buffers are consumed (can be NULL). | 14 | * @callback: the function to call when buffers are consumed (can be NULL). |
15 | * @name: the name of this virtqueue (mainly for debugging) | ||
14 | * @vdev: the virtio device this queue was created for. | 16 | * @vdev: the virtio device this queue was created for. |
15 | * @vq_ops: the operations for this virtqueue (see below). | 17 | * @vq_ops: the operations for this virtqueue (see below). |
16 | * @priv: a pointer for the virtqueue implementation to use. | 18 | * @priv: a pointer for the virtqueue implementation to use. |
17 | */ | 19 | */ |
18 | struct virtqueue | 20 | struct virtqueue { |
19 | { | 21 | struct list_head list; |
20 | void (*callback)(struct virtqueue *vq); | 22 | void (*callback)(struct virtqueue *vq); |
23 | const char *name; | ||
21 | struct virtio_device *vdev; | 24 | struct virtio_device *vdev; |
22 | struct virtqueue_ops *vq_ops; | 25 | struct virtqueue_ops *vq_ops; |
23 | void *priv; | 26 | void *priv; |
@@ -76,15 +79,16 @@ struct virtqueue_ops { | |||
76 | * @dev: underlying device. | 79 | * @dev: underlying device. |
77 | * @id: the device type identification (used to match it with a driver). | 80 | * @id: the device type identification (used to match it with a driver). |
78 | * @config: the configuration ops for this device. | 81 | * @config: the configuration ops for this device. |
82 | * @vqs: the list of virtqueues for this device. | ||
79 | * @features: the features supported by both driver and device. | 83 | * @features: the features supported by both driver and device. |
80 | * @priv: private pointer for the driver's use. | 84 | * @priv: private pointer for the driver's use. |
81 | */ | 85 | */ |
82 | struct virtio_device | 86 | struct virtio_device { |
83 | { | ||
84 | int index; | 87 | int index; |
85 | struct device dev; | 88 | struct device dev; |
86 | struct virtio_device_id id; | 89 | struct virtio_device_id id; |
87 | struct virtio_config_ops *config; | 90 | struct virtio_config_ops *config; |
91 | struct list_head vqs; | ||
88 | /* Note that this is a Linux set_bit-style bitmap. */ | 92 | /* Note that this is a Linux set_bit-style bitmap. */ |
89 | unsigned long features[1]; | 93 | unsigned long features[1]; |
90 | void *priv; | 94 | void *priv; |
@@ -99,8 +103,7 @@ void unregister_virtio_device(struct virtio_device *dev); | |||
99 | * @id_table: the ids serviced by this driver. | 103 | * @id_table: the ids serviced by this driver. |
100 | * @feature_table: an array of feature numbers supported by this device. | 104 | * @feature_table: an array of feature numbers supported by this device. |
101 | * @feature_table_size: number of entries in the feature table array. | 105 | * @feature_table_size: number of entries in the feature table array. |
102 | * @probe: the function to call when a device is found. Returns a token for | 106 | * @probe: the function to call when a device is found. Returns 0 or -errno. |
103 | * remove, or PTR_ERR(). | ||
104 | * @remove: the function when a device is removed. | 107 | * @remove: the function when a device is removed. |
105 | * @config_changed: optional function to call when the device configuration | 108 | * @config_changed: optional function to call when the device configuration |
106 | * changes; may be called in interrupt context. | 109 | * changes; may be called in interrupt context. |
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h index 94c56d29869d..be7d255fc7cf 100644 --- a/include/linux/virtio_blk.h +++ b/include/linux/virtio_blk.h | |||
@@ -15,6 +15,10 @@ | |||
15 | #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ | 15 | #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ |
16 | #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ | 16 | #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ |
17 | #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ | 17 | #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ |
18 | #define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ | ||
19 | #define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */ | ||
20 | |||
21 | #define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ | ||
18 | 22 | ||
19 | struct virtio_blk_config | 23 | struct virtio_blk_config |
20 | { | 24 | { |
@@ -32,6 +36,7 @@ struct virtio_blk_config | |||
32 | } geometry; | 36 | } geometry; |
33 | /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ | 37 | /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ |
34 | __u32 blk_size; | 38 | __u32 blk_size; |
39 | __u8 identify[VIRTIO_BLK_ID_BYTES]; | ||
35 | } __attribute__((packed)); | 40 | } __attribute__((packed)); |
36 | 41 | ||
37 | /* These two define direction. */ | 42 | /* These two define direction. */ |
@@ -55,6 +60,13 @@ struct virtio_blk_outhdr | |||
55 | __u64 sector; | 60 | __u64 sector; |
56 | }; | 61 | }; |
57 | 62 | ||
63 | struct virtio_scsi_inhdr { | ||
64 | __u32 errors; | ||
65 | __u32 data_len; | ||
66 | __u32 sense_len; | ||
67 | __u32 residual; | ||
68 | }; | ||
69 | |||
58 | /* And this is the final byte of the write scatter-gather list. */ | 70 | /* And this is the final byte of the write scatter-gather list. */ |
59 | #define VIRTIO_BLK_S_OK 0 | 71 | #define VIRTIO_BLK_S_OK 0 |
60 | #define VIRTIO_BLK_S_IOERR 1 | 72 | #define VIRTIO_BLK_S_IOERR 1 |
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index bf8ec283b232..99f514575f6a 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #define VIRTIO_F_NOTIFY_ON_EMPTY 24 | 29 | #define VIRTIO_F_NOTIFY_ON_EMPTY 24 |
30 | 30 | ||
31 | #ifdef __KERNEL__ | 31 | #ifdef __KERNEL__ |
32 | #include <linux/err.h> | ||
32 | #include <linux/virtio.h> | 33 | #include <linux/virtio.h> |
33 | 34 | ||
34 | /** | 35 | /** |
@@ -49,15 +50,26 @@ | |||
49 | * @set_status: write the status byte | 50 | * @set_status: write the status byte |
50 | * vdev: the virtio_device | 51 | * vdev: the virtio_device |
51 | * status: the new status byte | 52 | * status: the new status byte |
53 | * @request_vqs: request the specified number of virtqueues | ||
54 | * vdev: the virtio_device | ||
55 | * max_vqs: the max number of virtqueues we want | ||
56 | * If supplied, must call before any virtqueues are instantiated. | ||
57 | * To modify the max number of virtqueues after request_vqs has been | ||
58 | * called, call free_vqs and then request_vqs with a new value. | ||
59 | * @free_vqs: cleanup resources allocated by request_vqs | ||
60 | * vdev: the virtio_device | ||
61 | * If supplied, must call after all virtqueues have been deleted. | ||
52 | * @reset: reset the device | 62 | * @reset: reset the device |
53 | * vdev: the virtio device | 63 | * vdev: the virtio device |
54 | * After this, status and feature negotiation must be done again | 64 | * After this, status and feature negotiation must be done again |
55 | * @find_vq: find a virtqueue and instantiate it. | 65 | * @find_vqs: find virtqueues and instantiate them. |
56 | * vdev: the virtio_device | 66 | * vdev: the virtio_device |
57 | * index: the 0-based virtqueue number in case there's more than one. | 67 | * nvqs: the number of virtqueues to find |
58 | * callback: the virqtueue callback | 68 | * vqs: on success, includes new virtqueues |
59 | * Returns the new virtqueue or ERR_PTR() (eg. -ENOENT). | 69 | * callbacks: array of callbacks, for each virtqueue |
60 | * @del_vq: free a virtqueue found by find_vq(). | 70 | * names: array of virtqueue names (mainly for debugging) |
71 | * Returns 0 on success or error status | ||
72 | * @del_vqs: free virtqueues found by find_vqs(). | ||
61 | * @get_features: get the array of feature bits for this device. | 73 | * @get_features: get the array of feature bits for this device. |
62 | * vdev: the virtio_device | 74 | * vdev: the virtio_device |
63 | * Returns the first 32 feature bits (all we currently need). | 75 | * Returns the first 32 feature bits (all we currently need). |
@@ -66,6 +78,7 @@ | |||
66 | * This gives the final feature bits for the device: it can change | 78 | * This gives the final feature bits for the device: it can change |
67 | * the dev->feature bits if it wants. | 79 | * the dev->feature bits if it wants. |
68 | */ | 80 | */ |
81 | typedef void vq_callback_t(struct virtqueue *); | ||
69 | struct virtio_config_ops | 82 | struct virtio_config_ops |
70 | { | 83 | { |
71 | void (*get)(struct virtio_device *vdev, unsigned offset, | 84 | void (*get)(struct virtio_device *vdev, unsigned offset, |
@@ -75,10 +88,11 @@ struct virtio_config_ops | |||
75 | u8 (*get_status)(struct virtio_device *vdev); | 88 | u8 (*get_status)(struct virtio_device *vdev); |
76 | void (*set_status)(struct virtio_device *vdev, u8 status); | 89 | void (*set_status)(struct virtio_device *vdev, u8 status); |
77 | void (*reset)(struct virtio_device *vdev); | 90 | void (*reset)(struct virtio_device *vdev); |
78 | struct virtqueue *(*find_vq)(struct virtio_device *vdev, | 91 | int (*find_vqs)(struct virtio_device *, unsigned nvqs, |
79 | unsigned index, | 92 | struct virtqueue *vqs[], |
80 | void (*callback)(struct virtqueue *)); | 93 | vq_callback_t *callbacks[], |
81 | void (*del_vq)(struct virtqueue *vq); | 94 | const char *names[]); |
95 | void (*del_vqs)(struct virtio_device *); | ||
82 | u32 (*get_features)(struct virtio_device *vdev); | 96 | u32 (*get_features)(struct virtio_device *vdev); |
83 | void (*finalize_features)(struct virtio_device *vdev); | 97 | void (*finalize_features)(struct virtio_device *vdev); |
84 | }; | 98 | }; |
@@ -99,7 +113,9 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev, | |||
99 | if (__builtin_constant_p(fbit)) | 113 | if (__builtin_constant_p(fbit)) |
100 | BUILD_BUG_ON(fbit >= 32); | 114 | BUILD_BUG_ON(fbit >= 32); |
101 | 115 | ||
102 | virtio_check_driver_offered_feature(vdev, fbit); | 116 | if (fbit < VIRTIO_TRANSPORT_F_START) |
117 | virtio_check_driver_offered_feature(vdev, fbit); | ||
118 | |||
103 | return test_bit(fbit, vdev->features); | 119 | return test_bit(fbit, vdev->features); |
104 | } | 120 | } |
105 | 121 | ||
@@ -126,5 +142,18 @@ static inline int virtio_config_buf(struct virtio_device *vdev, | |||
126 | vdev->config->get(vdev, offset, buf, len); | 142 | vdev->config->get(vdev, offset, buf, len); |
127 | return 0; | 143 | return 0; |
128 | } | 144 | } |
145 | |||
146 | static inline | ||
147 | struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev, | ||
148 | vq_callback_t *c, const char *n) | ||
149 | { | ||
150 | vq_callback_t *callbacks[] = { c }; | ||
151 | const char *names[] = { n }; | ||
152 | struct virtqueue *vq; | ||
153 | int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names); | ||
154 | if (err < 0) | ||
155 | return ERR_PTR(err); | ||
156 | return vq; | ||
157 | } | ||
129 | #endif /* __KERNEL__ */ | 158 | #endif /* __KERNEL__ */ |
130 | #endif /* _LINUX_VIRTIO_CONFIG_H */ | 159 | #endif /* _LINUX_VIRTIO_CONFIG_H */ |
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h index cd0fd5d181a6..9a3d7c48c622 100644 --- a/include/linux/virtio_pci.h +++ b/include/linux/virtio_pci.h | |||
@@ -47,9 +47,17 @@ | |||
47 | /* The bit of the ISR which indicates a device configuration change. */ | 47 | /* The bit of the ISR which indicates a device configuration change. */ |
48 | #define VIRTIO_PCI_ISR_CONFIG 0x2 | 48 | #define VIRTIO_PCI_ISR_CONFIG 0x2 |
49 | 49 | ||
50 | /* MSI-X registers: only enabled if MSI-X is enabled. */ | ||
51 | /* A 16-bit vector for configuration changes. */ | ||
52 | #define VIRTIO_MSI_CONFIG_VECTOR 20 | ||
53 | /* A 16-bit vector for selected queue notifications. */ | ||
54 | #define VIRTIO_MSI_QUEUE_VECTOR 22 | ||
55 | /* Vector value used to disable MSI for queue */ | ||
56 | #define VIRTIO_MSI_NO_VECTOR 0xffff | ||
57 | |||
50 | /* The remaining space is defined by each driver as the per-driver | 58 | /* The remaining space is defined by each driver as the per-driver |
51 | * configuration space */ | 59 | * configuration space */ |
52 | #define VIRTIO_PCI_CONFIG 20 | 60 | #define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20) |
53 | 61 | ||
54 | /* Virtio ABI version, this must match exactly */ | 62 | /* Virtio ABI version, this must match exactly */ |
55 | #define VIRTIO_PCI_ABI_VERSION 0 | 63 | #define VIRTIO_PCI_ABI_VERSION 0 |
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index 71e03722fb59..693e0ec5afa6 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
@@ -14,6 +14,8 @@ | |||
14 | #define VRING_DESC_F_NEXT 1 | 14 | #define VRING_DESC_F_NEXT 1 |
15 | /* This marks a buffer as write-only (otherwise read-only). */ | 15 | /* This marks a buffer as write-only (otherwise read-only). */ |
16 | #define VRING_DESC_F_WRITE 2 | 16 | #define VRING_DESC_F_WRITE 2 |
17 | /* This means the buffer contains a list of buffer descriptors. */ | ||
18 | #define VRING_DESC_F_INDIRECT 4 | ||
17 | 19 | ||
18 | /* The Host uses this in used->flags to advise the Guest: don't kick me when | 20 | /* The Host uses this in used->flags to advise the Guest: don't kick me when |
19 | * you add a buffer. It's unreliable, so it's simply an optimization. Guest | 21 | * you add a buffer. It's unreliable, so it's simply an optimization. Guest |
@@ -24,6 +26,9 @@ | |||
24 | * optimization. */ | 26 | * optimization. */ |
25 | #define VRING_AVAIL_F_NO_INTERRUPT 1 | 27 | #define VRING_AVAIL_F_NO_INTERRUPT 1 |
26 | 28 | ||
29 | /* We support indirect buffer descriptors */ | ||
30 | #define VIRTIO_RING_F_INDIRECT_DESC 28 | ||
31 | |||
27 | /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ | 32 | /* Virtio ring descriptors: 16 bytes. These can chain together via "next". */ |
28 | struct vring_desc | 33 | struct vring_desc |
29 | { | 34 | { |
@@ -119,7 +124,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, | |||
119 | struct virtio_device *vdev, | 124 | struct virtio_device *vdev, |
120 | void *pages, | 125 | void *pages, |
121 | void (*notify)(struct virtqueue *vq), | 126 | void (*notify)(struct virtqueue *vq), |
122 | void (*callback)(struct virtqueue *vq)); | 127 | void (*callback)(struct virtqueue *vq), |
128 | const char *name); | ||
123 | void vring_del_virtqueue(struct virtqueue *vq); | 129 | void vring_del_virtqueue(struct virtqueue *vq); |
124 | /* Filter out transport-specific feature bits. */ | 130 | /* Filter out transport-specific feature bits. */ |
125 | void vring_transport_features(struct virtio_device *vdev); | 131 | void vring_transport_features(struct virtio_device *vdev); |
diff --git a/include/linux/wait.h b/include/linux/wait.h index bc024632f365..6788e1a4d4ca 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -132,8 +132,6 @@ static inline void __remove_wait_queue(wait_queue_head_t *head, | |||
132 | list_del(&old->task_list); | 132 | list_del(&old->task_list); |
133 | } | 133 | } |
134 | 134 | ||
135 | void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | ||
136 | int nr_exclusive, int sync, void *key); | ||
137 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); | 135 | void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); |
138 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); | 136 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); |
139 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, | 137 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 93445477f86a..3224820c8514 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -79,7 +79,6 @@ struct writeback_control { | |||
79 | void writeback_inodes(struct writeback_control *wbc); | 79 | void writeback_inodes(struct writeback_control *wbc); |
80 | int inode_wait(void *); | 80 | int inode_wait(void *); |
81 | void sync_inodes_sb(struct super_block *, int wait); | 81 | void sync_inodes_sb(struct super_block *, int wait); |
82 | void sync_inodes(int wait); | ||
83 | 82 | ||
84 | /* writeback.h requires fs.h; it, too, is not included from here. */ | 83 | /* writeback.h requires fs.h; it, too, is not included from here. */ |
85 | static inline void wait_on_inode(struct inode *inode) | 84 | static inline void wait_on_inode(struct inode *inode) |
diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h index 0627a9ae6347..3d138c1fcf8a 100644 --- a/include/scsi/fc/fc_fip.h +++ b/include/scsi/fc/fc_fip.h | |||
@@ -22,13 +22,6 @@ | |||
22 | * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf | 22 | * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf |
23 | */ | 23 | */ |
24 | 24 | ||
25 | /* | ||
26 | * The FIP ethertype eventually goes in net/if_ether.h. | ||
27 | */ | ||
28 | #ifndef ETH_P_FIP | ||
29 | #define ETH_P_FIP 0x8914 /* FIP Ethertype */ | ||
30 | #endif | ||
31 | |||
32 | #define FIP_DEF_PRI 128 /* default selection priority */ | 25 | #define FIP_DEF_PRI 128 /* default selection priority */ |
33 | #define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */ | 26 | #define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */ |
34 | #define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */ | 27 | #define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */ |
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h index d0ed5226f8c4..4426f00da5ff 100644 --- a/include/scsi/iscsi_if.h +++ b/include/scsi/iscsi_if.h | |||
@@ -22,6 +22,11 @@ | |||
22 | #define ISCSI_IF_H | 22 | #define ISCSI_IF_H |
23 | 23 | ||
24 | #include <scsi/iscsi_proto.h> | 24 | #include <scsi/iscsi_proto.h> |
25 | #include <linux/in.h> | ||
26 | #include <linux/in6.h> | ||
27 | |||
28 | #define ISCSI_NL_GRP_ISCSID 1 | ||
29 | #define ISCSI_NL_GRP_UIP 2 | ||
25 | 30 | ||
26 | #define UEVENT_BASE 10 | 31 | #define UEVENT_BASE 10 |
27 | #define KEVENT_BASE 100 | 32 | #define KEVENT_BASE 100 |
@@ -50,7 +55,10 @@ enum iscsi_uevent_e { | |||
50 | ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15, | 55 | ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15, |
51 | ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16, | 56 | ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16, |
52 | ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17, | 57 | ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17, |
53 | ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18, | 58 | ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18, |
59 | ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST = UEVENT_BASE + 19, | ||
60 | |||
61 | ISCSI_UEVENT_PATH_UPDATE = UEVENT_BASE + 20, | ||
54 | 62 | ||
55 | /* up events */ | 63 | /* up events */ |
56 | ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, | 64 | ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1, |
@@ -59,6 +67,9 @@ enum iscsi_uevent_e { | |||
59 | ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4, | 67 | ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4, |
60 | ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5, | 68 | ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5, |
61 | ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6, | 69 | ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6, |
70 | |||
71 | ISCSI_KEVENT_PATH_REQ = KEVENT_BASE + 7, | ||
72 | ISCSI_KEVENT_IF_DOWN = KEVENT_BASE + 8, | ||
62 | }; | 73 | }; |
63 | 74 | ||
64 | enum iscsi_tgt_dscvr { | 75 | enum iscsi_tgt_dscvr { |
@@ -131,6 +142,10 @@ struct iscsi_uevent { | |||
131 | struct msg_transport_connect { | 142 | struct msg_transport_connect { |
132 | uint32_t non_blocking; | 143 | uint32_t non_blocking; |
133 | } ep_connect; | 144 | } ep_connect; |
145 | struct msg_transport_connect_through_host { | ||
146 | uint32_t host_no; | ||
147 | uint32_t non_blocking; | ||
148 | } ep_connect_through_host; | ||
134 | struct msg_transport_poll { | 149 | struct msg_transport_poll { |
135 | uint64_t ep_handle; | 150 | uint64_t ep_handle; |
136 | uint32_t timeout_ms; | 151 | uint32_t timeout_ms; |
@@ -154,6 +169,9 @@ struct iscsi_uevent { | |||
154 | uint32_t param; /* enum iscsi_host_param */ | 169 | uint32_t param; /* enum iscsi_host_param */ |
155 | uint32_t len; | 170 | uint32_t len; |
156 | } set_host_param; | 171 | } set_host_param; |
172 | struct msg_set_path { | ||
173 | uint32_t host_no; | ||
174 | } set_path; | ||
157 | } u; | 175 | } u; |
158 | union { | 176 | union { |
159 | /* messages k -> u */ | 177 | /* messages k -> u */ |
@@ -187,10 +205,39 @@ struct iscsi_uevent { | |||
187 | struct msg_transport_connect_ret { | 205 | struct msg_transport_connect_ret { |
188 | uint64_t handle; | 206 | uint64_t handle; |
189 | } ep_connect_ret; | 207 | } ep_connect_ret; |
208 | struct msg_req_path { | ||
209 | uint32_t host_no; | ||
210 | } req_path; | ||
211 | struct msg_notify_if_down { | ||
212 | uint32_t host_no; | ||
213 | } notify_if_down; | ||
190 | } r; | 214 | } r; |
191 | } __attribute__ ((aligned (sizeof(uint64_t)))); | 215 | } __attribute__ ((aligned (sizeof(uint64_t)))); |
192 | 216 | ||
193 | /* | 217 | /* |
218 | * To keep the struct iscsi_uevent size the same for userspace code | ||
219 | * compatibility, the main structure for ISCSI_UEVENT_PATH_UPDATE and | ||
220 | * ISCSI_KEVENT_PATH_REQ is defined separately and comes after the | ||
221 | * struct iscsi_uevent in the NETLINK_ISCSI message. | ||
222 | */ | ||
223 | struct iscsi_path { | ||
224 | uint64_t handle; | ||
225 | uint8_t mac_addr[6]; | ||
226 | uint8_t mac_addr_old[6]; | ||
227 | uint32_t ip_addr_len; /* 4 or 16 */ | ||
228 | union { | ||
229 | struct in_addr v4_addr; | ||
230 | struct in6_addr v6_addr; | ||
231 | } src; | ||
232 | union { | ||
233 | struct in_addr v4_addr; | ||
234 | struct in6_addr v6_addr; | ||
235 | } dst; | ||
236 | uint16_t vlan_id; | ||
237 | uint16_t pmtu; | ||
238 | } __attribute__ ((aligned (sizeof(uint64_t)))); | ||
239 | |||
240 | /* | ||
194 | * Common error codes | 241 | * Common error codes |
195 | */ | 242 | */ |
196 | enum iscsi_err { | 243 | enum iscsi_err { |
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h index 45f9cc642c46..ebdd9f4cf070 100644 --- a/include/scsi/libfc.h +++ b/include/scsi/libfc.h | |||
@@ -679,6 +679,7 @@ struct fc_lport { | |||
679 | unsigned int e_d_tov; | 679 | unsigned int e_d_tov; |
680 | unsigned int r_a_tov; | 680 | unsigned int r_a_tov; |
681 | u8 max_retry_count; | 681 | u8 max_retry_count; |
682 | u8 max_rport_retry_count; | ||
682 | u16 link_speed; | 683 | u16 link_speed; |
683 | u16 link_supported_speeds; | 684 | u16 link_supported_speeds; |
684 | u16 lro_xid; /* max xid for fcoe lro */ | 685 | u16 lro_xid; /* max xid for fcoe lro */ |
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 0289f5745fb9..196525cd402f 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h | |||
@@ -82,9 +82,12 @@ enum { | |||
82 | 82 | ||
83 | 83 | ||
84 | enum { | 84 | enum { |
85 | ISCSI_TASK_FREE, | ||
85 | ISCSI_TASK_COMPLETED, | 86 | ISCSI_TASK_COMPLETED, |
86 | ISCSI_TASK_PENDING, | 87 | ISCSI_TASK_PENDING, |
87 | ISCSI_TASK_RUNNING, | 88 | ISCSI_TASK_RUNNING, |
89 | ISCSI_TASK_ABRT_TMF, /* aborted due to TMF */ | ||
90 | ISCSI_TASK_ABRT_SESS_RECOV, /* aborted due to session recovery */ | ||
88 | }; | 91 | }; |
89 | 92 | ||
90 | struct iscsi_r2t_info { | 93 | struct iscsi_r2t_info { |
@@ -181,9 +184,7 @@ struct iscsi_conn { | |||
181 | 184 | ||
182 | /* xmit */ | 185 | /* xmit */ |
183 | struct list_head mgmtqueue; /* mgmt (control) xmit queue */ | 186 | struct list_head mgmtqueue; /* mgmt (control) xmit queue */ |
184 | struct list_head mgmt_run_list; /* list of control tasks */ | 187 | struct list_head cmdqueue; /* data-path cmd queue */ |
185 | struct list_head xmitqueue; /* data-path cmd queue */ | ||
186 | struct list_head run_list; /* list of cmds in progress */ | ||
187 | struct list_head requeue; /* tasks needing another run */ | 188 | struct list_head requeue; /* tasks needing another run */ |
188 | struct work_struct xmitwork; /* per-conn. xmit workqueue */ | 189 | struct work_struct xmitwork; /* per-conn. xmit workqueue */ |
189 | unsigned long suspend_tx; /* suspend Tx */ | 190 | unsigned long suspend_tx; /* suspend Tx */ |
@@ -406,6 +407,7 @@ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *, | |||
406 | char *, int); | 407 | char *, int); |
407 | extern int iscsi_verify_itt(struct iscsi_conn *, itt_t); | 408 | extern int iscsi_verify_itt(struct iscsi_conn *, itt_t); |
408 | extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t); | 409 | extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t); |
410 | extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t); | ||
409 | extern void iscsi_requeue_task(struct iscsi_task *task); | 411 | extern void iscsi_requeue_task(struct iscsi_task *task); |
410 | extern void iscsi_put_task(struct iscsi_task *task); | 412 | extern void iscsi_put_task(struct iscsi_task *task); |
411 | extern void __iscsi_get_task(struct iscsi_task *task); | 413 | extern void __iscsi_get_task(struct iscsi_task *task); |
diff --git a/include/scsi/osd_attributes.h b/include/scsi/osd_attributes.h index f888a6fda073..56e920ade326 100644 --- a/include/scsi/osd_attributes.h +++ b/include/scsi/osd_attributes.h | |||
@@ -29,6 +29,7 @@ enum { | |||
29 | OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1, | 29 | OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1, |
30 | OSD_APAGE_PARTITION_QUOTAS = OSD_APAGE_PARTITION_FIRST + 2, | 30 | OSD_APAGE_PARTITION_QUOTAS = OSD_APAGE_PARTITION_FIRST + 2, |
31 | OSD_APAGE_PARTITION_TIMESTAMP = OSD_APAGE_PARTITION_FIRST + 3, | 31 | OSD_APAGE_PARTITION_TIMESTAMP = OSD_APAGE_PARTITION_FIRST + 3, |
32 | OSD_APAGE_PARTITION_ATTR_ACCESS = OSD_APAGE_PARTITION_FIRST + 4, | ||
32 | OSD_APAGE_PARTITION_SECURITY = OSD_APAGE_PARTITION_FIRST + 5, | 33 | OSD_APAGE_PARTITION_SECURITY = OSD_APAGE_PARTITION_FIRST + 5, |
33 | OSD_APAGE_PARTITION_LAST = 0x5FFFFFFF, | 34 | OSD_APAGE_PARTITION_LAST = 0x5FFFFFFF, |
34 | 35 | ||
@@ -51,7 +52,9 @@ enum { | |||
51 | OSD_APAGE_RESERVED_TYPE_LAST = 0xEFFFFFFF, | 52 | OSD_APAGE_RESERVED_TYPE_LAST = 0xEFFFFFFF, |
52 | 53 | ||
53 | OSD_APAGE_COMMON_FIRST = 0xF0000000, | 54 | OSD_APAGE_COMMON_FIRST = 0xF0000000, |
54 | OSD_APAGE_COMMON_LAST = 0xFFFFFFFE, | 55 | OSD_APAGE_COMMON_LAST = 0xFFFFFFFD, |
56 | |||
57 | OSD_APAGE_CURRENT_COMMAND = 0xFFFFFFFE, | ||
55 | 58 | ||
56 | OSD_APAGE_REQUEST_ALL = 0xFFFFFFFF, | 59 | OSD_APAGE_REQUEST_ALL = 0xFFFFFFFF, |
57 | }; | 60 | }; |
@@ -106,10 +109,30 @@ enum { | |||
106 | OSD_ATTR_RI_PRODUCT_REVISION_LEVEL = 0x7, /* 4 */ | 109 | OSD_ATTR_RI_PRODUCT_REVISION_LEVEL = 0x7, /* 4 */ |
107 | OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER = 0x8, /* variable */ | 110 | OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER = 0x8, /* variable */ |
108 | OSD_ATTR_RI_OSD_NAME = 0x9, /* variable */ | 111 | OSD_ATTR_RI_OSD_NAME = 0x9, /* variable */ |
112 | OSD_ATTR_RI_MAX_CDB_CONTINUATION_LEN = 0xA, /* 4 */ | ||
109 | OSD_ATTR_RI_TOTAL_CAPACITY = 0x80, /* 8 */ | 113 | OSD_ATTR_RI_TOTAL_CAPACITY = 0x80, /* 8 */ |
110 | OSD_ATTR_RI_USED_CAPACITY = 0x81, /* 8 */ | 114 | OSD_ATTR_RI_USED_CAPACITY = 0x81, /* 8 */ |
111 | OSD_ATTR_RI_NUMBER_OF_PARTITIONS = 0xC0, /* 8 */ | 115 | OSD_ATTR_RI_NUMBER_OF_PARTITIONS = 0xC0, /* 8 */ |
112 | OSD_ATTR_RI_CLOCK = 0x100, /* 6 */ | 116 | OSD_ATTR_RI_CLOCK = 0x100, /* 6 */ |
117 | OARI_DEFAULT_ISOLATION_METHOD = 0X110, /* 1 */ | ||
118 | OARI_SUPPORTED_ISOLATION_METHODS = 0X111, /* 32 */ | ||
119 | |||
120 | OARI_DATA_ATOMICITY_GUARANTEE = 0X120, /* 8 */ | ||
121 | OARI_DATA_ATOMICITY_ALIGNMENT = 0X121, /* 8 */ | ||
122 | OARI_ATTRIBUTES_ATOMICITY_GUARANTEE = 0X122, /* 8 */ | ||
123 | OARI_DATA_ATTRIBUTES_ATOMICITY_MULTIPLIER = 0X123, /* 1 */ | ||
124 | |||
125 | OARI_MAXIMUM_SNAPSHOTS_COUNT = 0X1C1, /* 0 or 4 */ | ||
126 | OARI_MAXIMUM_CLONES_COUNT = 0X1C2, /* 0 or 4 */ | ||
127 | OARI_MAXIMUM_BRANCH_DEPTH = 0X1CC, /* 0 or 4 */ | ||
128 | OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_FIRST = 0X200, /* 0 or 4 */ | ||
129 | OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_LAST = 0X2ff, /* 0 or 4 */ | ||
130 | OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_FIRST = 0X300, /* 0 or 4 */ | ||
131 | OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_LAST = 0X30F, /* 0 or 4 */ | ||
132 | OARI_SUPPORT_FOR_DUPLICATED_OBJECT_FREEZING = 0X310, /* 0 or 4 */ | ||
133 | OARI_SUPPORT_FOR_SNAPSHOT_REFRESHING = 0X311, /* 0 or 1 */ | ||
134 | OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_FIRST = 0X7000001,/* 0 or 4 */ | ||
135 | OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_LAST = 0X700FFFF,/* 0 or 4 */ | ||
113 | }; | 136 | }; |
114 | /* Root_Information_attributes_page does not have a get_page structure */ | 137 | /* Root_Information_attributes_page does not have a get_page structure */ |
115 | 138 | ||
@@ -120,7 +143,15 @@ enum { | |||
120 | OSD_ATTR_PI_PARTITION_ID = 0x1, /* 8 */ | 143 | OSD_ATTR_PI_PARTITION_ID = 0x1, /* 8 */ |
121 | OSD_ATTR_PI_USERNAME = 0x9, /* variable */ | 144 | OSD_ATTR_PI_USERNAME = 0x9, /* variable */ |
122 | OSD_ATTR_PI_USED_CAPACITY = 0x81, /* 8 */ | 145 | OSD_ATTR_PI_USED_CAPACITY = 0x81, /* 8 */ |
146 | OSD_ATTR_PI_USED_CAPACITY_INCREMENT = 0x84, /* 0 or 8 */ | ||
123 | OSD_ATTR_PI_NUMBER_OF_OBJECTS = 0xC1, /* 8 */ | 147 | OSD_ATTR_PI_NUMBER_OF_OBJECTS = 0xC1, /* 8 */ |
148 | |||
149 | OSD_ATTR_PI_ACTUAL_DATA_SPACE = 0xD1, /* 0 or 8 */ | ||
150 | OSD_ATTR_PI_RESERVED_DATA_SPACE = 0xD2, /* 0 or 8 */ | ||
151 | OSD_ATTR_PI_DEFAULT_SNAPSHOT_DUPLICATION_METHOD = 0x200,/* 0 or 4 */ | ||
152 | OSD_ATTR_PI_DEFAULT_CLONE_DUPLICATION_METHOD = 0x201,/* 0 or 4 */ | ||
153 | OSD_ATTR_PI_DEFAULT_SP_TIME_OF_DUPLICATION = 0x300,/* 0 or 4 */ | ||
154 | OSD_ATTR_PI_DEFAULT_CLONE_TIME_OF_DUPLICATION = 0x301,/* 0 or 4 */ | ||
124 | }; | 155 | }; |
125 | /* Partition Information attributes page does not have a get_page structure */ | 156 | /* Partition Information attributes page does not have a get_page structure */ |
126 | 157 | ||
@@ -131,6 +162,7 @@ enum { | |||
131 | OSD_ATTR_CI_PARTITION_ID = 0x1, /* 8 */ | 162 | OSD_ATTR_CI_PARTITION_ID = 0x1, /* 8 */ |
132 | OSD_ATTR_CI_COLLECTION_OBJECT_ID = 0x2, /* 8 */ | 163 | OSD_ATTR_CI_COLLECTION_OBJECT_ID = 0x2, /* 8 */ |
133 | OSD_ATTR_CI_USERNAME = 0x9, /* variable */ | 164 | OSD_ATTR_CI_USERNAME = 0x9, /* variable */ |
165 | OSD_ATTR_CI_COLLECTION_TYPE = 0xA, /* 1 */ | ||
134 | OSD_ATTR_CI_USED_CAPACITY = 0x81, /* 8 */ | 166 | OSD_ATTR_CI_USED_CAPACITY = 0x81, /* 8 */ |
135 | }; | 167 | }; |
136 | /* Collection Information attributes page does not have a get_page structure */ | 168 | /* Collection Information attributes page does not have a get_page structure */ |
@@ -144,6 +176,8 @@ enum { | |||
144 | OSD_ATTR_OI_USERNAME = 0x9, /* variable */ | 176 | OSD_ATTR_OI_USERNAME = 0x9, /* variable */ |
145 | OSD_ATTR_OI_USED_CAPACITY = 0x81, /* 8 */ | 177 | OSD_ATTR_OI_USED_CAPACITY = 0x81, /* 8 */ |
146 | OSD_ATTR_OI_LOGICAL_LENGTH = 0x82, /* 8 */ | 178 | OSD_ATTR_OI_LOGICAL_LENGTH = 0x82, /* 8 */ |
179 | SD_ATTR_OI_ACTUAL_DATA_SPACE = 0XD1, /* 0 OR 8 */ | ||
180 | SD_ATTR_OI_RESERVED_DATA_SPACE = 0XD2, /* 0 OR 8 */ | ||
147 | }; | 181 | }; |
148 | /* Object Information attributes page does not have a get_page structure */ | 182 | /* Object Information attributes page does not have a get_page structure */ |
149 | 183 | ||
@@ -248,7 +282,18 @@ struct object_timestamps_attributes_page { | |||
248 | struct osd_timestamp data_modified_time; | 282 | struct osd_timestamp data_modified_time; |
249 | } __packed; | 283 | } __packed; |
250 | 284 | ||
251 | /* 7.1.2.19 Collections attributes page */ | 285 | /* OSD2r05: 7.1.3.19 Attributes Access attributes page |
286 | * (OSD_APAGE_PARTITION_ATTR_ACCESS) | ||
287 | * | ||
288 | * each attribute is of the form below. Total array length is deduced | ||
289 | * from the attribute's length | ||
290 | * (See allowed_attributes_access of the struct osd_cap_object_descriptor) | ||
291 | */ | ||
292 | struct attributes_access_attr { | ||
293 | struct osd_attributes_list_attrid attr_list[0]; | ||
294 | } __packed; | ||
295 | |||
296 | /* OSD2r05: 7.1.2.21 Collections attributes page */ | ||
252 | /* TBD */ | 297 | /* TBD */ |
253 | 298 | ||
254 | /* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */ | 299 | /* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */ |
@@ -324,4 +369,29 @@ struct object_security_attributes_page { | |||
324 | __be32 policy_access_tag; | 369 | __be32 policy_access_tag; |
325 | } __packed; | 370 | } __packed; |
326 | 371 | ||
372 | /* OSD2r05: 7.1.3.31 Current Command attributes page | ||
373 | * (OSD_APAGE_CURRENT_COMMAND) | ||
374 | */ | ||
375 | enum { | ||
376 | OSD_ATTR_CC_RESPONSE_INTEGRITY_CHECK_VALUE = 0x1, /* 32 */ | ||
377 | OSD_ATTR_CC_OBJECT_TYPE = 0x2, /* 1 */ | ||
378 | OSD_ATTR_CC_PARTITION_ID = 0x3, /* 8 */ | ||
379 | OSD_ATTR_CC_OBJECT_ID = 0x4, /* 8 */ | ||
380 | OSD_ATTR_CC_STARTING_BYTE_ADDRESS_OF_APPEND = 0x5, /* 8 */ | ||
381 | OSD_ATTR_CC_CHANGE_IN_USED_CAPACITY = 0x6, /* 8 */ | ||
382 | }; | ||
383 | |||
384 | /*TBD: osdv1_current_command_attributes_page */ | ||
385 | |||
386 | struct osdv2_current_command_attributes_page { | ||
387 | struct osd_attr_page_header hdr; /* id=0xFFFFFFFE, size=0x44 */ | ||
388 | u8 response_integrity_check_value[OSD_CRYPTO_KEYID_SIZE]; | ||
389 | u8 object_type; | ||
390 | u8 reserved[3]; | ||
391 | __be64 partition_id; | ||
392 | __be64 object_id; | ||
393 | __be64 starting_byte_address_of_append; | ||
394 | __be64 change_in_used_capacity; | ||
395 | }; | ||
396 | |||
327 | #endif /*ndef __OSD_ATTRIBUTES_H__*/ | 397 | #endif /*ndef __OSD_ATTRIBUTES_H__*/ |
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h index b24d9616eb46..02bd9f716357 100644 --- a/include/scsi/osd_initiator.h +++ b/include/scsi/osd_initiator.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include "osd_types.h" | 18 | #include "osd_types.h" |
19 | 19 | ||
20 | #include <linux/blkdev.h> | 20 | #include <linux/blkdev.h> |
21 | #include <scsi/scsi_device.h> | ||
21 | 22 | ||
22 | /* Note: "NI" in comments below means "Not Implemented yet" */ | 23 | /* Note: "NI" in comments below means "Not Implemented yet" */ |
23 | 24 | ||
@@ -47,6 +48,7 @@ enum osd_std_version { | |||
47 | */ | 48 | */ |
48 | struct osd_dev { | 49 | struct osd_dev { |
49 | struct scsi_device *scsi_device; | 50 | struct scsi_device *scsi_device; |
51 | struct file *file; | ||
50 | unsigned def_timeout; | 52 | unsigned def_timeout; |
51 | 53 | ||
52 | #ifdef OSD_VER1_SUPPORT | 54 | #ifdef OSD_VER1_SUPPORT |
@@ -69,6 +71,10 @@ void osd_dev_fini(struct osd_dev *od); | |||
69 | 71 | ||
70 | /* some hi level device operations */ | 72 | /* some hi level device operations */ |
71 | int osd_auto_detect_ver(struct osd_dev *od, void *caps); /* GFP_KERNEL */ | 73 | int osd_auto_detect_ver(struct osd_dev *od, void *caps); /* GFP_KERNEL */ |
74 | static inline struct request_queue *osd_request_queue(struct osd_dev *od) | ||
75 | { | ||
76 | return od->scsi_device->request_queue; | ||
77 | } | ||
72 | 78 | ||
73 | /* we might want to use function vector in the future */ | 79 | /* we might want to use function vector in the future */ |
74 | static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v) | 80 | static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v) |
@@ -363,7 +369,9 @@ void osd_req_create_object(struct osd_request *or, struct osd_obj_id *); | |||
363 | void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *); | 369 | void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *); |
364 | 370 | ||
365 | void osd_req_write(struct osd_request *or, | 371 | void osd_req_write(struct osd_request *or, |
366 | const struct osd_obj_id *, struct bio *data_out, u64 offset); | 372 | const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len); |
373 | int osd_req_write_kern(struct osd_request *or, | ||
374 | const struct osd_obj_id *obj, u64 offset, void *buff, u64 len); | ||
367 | void osd_req_append(struct osd_request *or, | 375 | void osd_req_append(struct osd_request *or, |
368 | const struct osd_obj_id *, struct bio *data_out);/* NI */ | 376 | const struct osd_obj_id *, struct bio *data_out);/* NI */ |
369 | void osd_req_create_write(struct osd_request *or, | 377 | void osd_req_create_write(struct osd_request *or, |
@@ -378,7 +386,9 @@ void osd_req_flush_object(struct osd_request *or, | |||
378 | /*V2*/ u64 offset, /*V2*/ u64 len); | 386 | /*V2*/ u64 offset, /*V2*/ u64 len); |
379 | 387 | ||
380 | void osd_req_read(struct osd_request *or, | 388 | void osd_req_read(struct osd_request *or, |
381 | const struct osd_obj_id *, struct bio *data_in, u64 offset); | 389 | const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len); |
390 | int osd_req_read_kern(struct osd_request *or, | ||
391 | const struct osd_obj_id *obj, u64 offset, void *buff, u64 len); | ||
382 | 392 | ||
383 | /* | 393 | /* |
384 | * Root/Partition/Collection/Object Attributes commands | 394 | * Root/Partition/Collection/Object Attributes commands |
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h index 62b2ab8c69d4..2cc8e8b1cc19 100644 --- a/include/scsi/osd_protocol.h +++ b/include/scsi/osd_protocol.h | |||
@@ -303,7 +303,15 @@ enum osd_service_actions { | |||
303 | OSD_ACT_V2(REMOVE_MEMBER_OBJECTS, 0x21) | 303 | OSD_ACT_V2(REMOVE_MEMBER_OBJECTS, 0x21) |
304 | OSD_ACT_V2(GET_MEMBER_ATTRIBUTES, 0x22) | 304 | OSD_ACT_V2(GET_MEMBER_ATTRIBUTES, 0x22) |
305 | OSD_ACT_V2(SET_MEMBER_ATTRIBUTES, 0x23) | 305 | OSD_ACT_V2(SET_MEMBER_ATTRIBUTES, 0x23) |
306 | |||
307 | OSD_ACT_V2(CREATE_CLONE, 0x28) | ||
308 | OSD_ACT_V2(CREATE_SNAPSHOT, 0x29) | ||
309 | OSD_ACT_V2(DETACH_CLONE, 0x2A) | ||
310 | OSD_ACT_V2(REFRESH_SNAPSHOT_CLONE, 0x2B) | ||
311 | OSD_ACT_V2(RESTORE_PARTITION_FROM_SNAPSHOT, 0x2C) | ||
312 | |||
306 | OSD_ACT_V2(READ_MAP, 0x31) | 313 | OSD_ACT_V2(READ_MAP, 0x31) |
314 | OSD_ACT_V2(READ_MAPS_COMPARE, 0x32) | ||
307 | 315 | ||
308 | OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND, 0x8F7E, 0x8F7C) | 316 | OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND, 0x8F7E, 0x8F7C) |
309 | OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT, 0x8F7F, 0x8F7D) | 317 | OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT, 0x8F7F, 0x8F7D) |
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h index 43b50d36925c..3878d1dc7f59 100644 --- a/include/scsi/scsi_cmnd.h +++ b/include/scsi/scsi_cmnd.h | |||
@@ -270,7 +270,7 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd) | |||
270 | 270 | ||
271 | static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) | 271 | static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) |
272 | { | 272 | { |
273 | return scmd->request->sector; | 273 | return blk_rq_pos(scmd->request); |
274 | } | 274 | } |
275 | 275 | ||
276 | static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd) | 276 | static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd) |
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 457588e1119b..349c7f30720d 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h | |||
@@ -126,12 +126,14 @@ struct iscsi_transport { | |||
126 | int *index, int *age); | 126 | int *index, int *age); |
127 | 127 | ||
128 | void (*session_recovery_timedout) (struct iscsi_cls_session *session); | 128 | void (*session_recovery_timedout) (struct iscsi_cls_session *session); |
129 | struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr, | 129 | struct iscsi_endpoint *(*ep_connect) (struct Scsi_Host *shost, |
130 | struct sockaddr *dst_addr, | ||
130 | int non_blocking); | 131 | int non_blocking); |
131 | int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms); | 132 | int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms); |
132 | void (*ep_disconnect) (struct iscsi_endpoint *ep); | 133 | void (*ep_disconnect) (struct iscsi_endpoint *ep); |
133 | int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type, | 134 | int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type, |
134 | uint32_t enable, struct sockaddr *dst_addr); | 135 | uint32_t enable, struct sockaddr *dst_addr); |
136 | int (*set_path) (struct Scsi_Host *shost, struct iscsi_path *params); | ||
135 | }; | 137 | }; |
136 | 138 | ||
137 | /* | 139 | /* |
@@ -148,6 +150,10 @@ extern void iscsi_conn_error_event(struct iscsi_cls_conn *conn, | |||
148 | extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, | 150 | extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr, |
149 | char *data, uint32_t data_size); | 151 | char *data, uint32_t data_size); |
150 | 152 | ||
153 | extern int iscsi_offload_mesg(struct Scsi_Host *shost, | ||
154 | struct iscsi_transport *transport, uint32_t type, | ||
155 | char *data, uint16_t data_size); | ||
156 | |||
151 | struct iscsi_cls_conn { | 157 | struct iscsi_cls_conn { |
152 | struct list_head conn_list; /* item in connlist */ | 158 | struct list_head conn_list; /* item in connlist */ |
153 | void *dd_data; /* LLD private data */ | 159 | void *dd_data; /* LLD private data */ |
diff --git a/include/sound/asound.h b/include/sound/asound.h index 6add80fc2512..82aed3f47534 100644 --- a/include/sound/asound.h +++ b/include/sound/asound.h | |||
@@ -255,6 +255,7 @@ typedef int __bitwise snd_pcm_subformat_t; | |||
255 | #define SNDRV_PCM_INFO_HALF_DUPLEX 0x00100000 /* only half duplex */ | 255 | #define SNDRV_PCM_INFO_HALF_DUPLEX 0x00100000 /* only half duplex */ |
256 | #define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */ | 256 | #define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */ |
257 | #define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */ | 257 | #define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */ |
258 | #define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */ | ||
258 | 259 | ||
259 | typedef int __bitwise snd_pcm_state_t; | 260 | typedef int __bitwise snd_pcm_state_t; |
260 | #define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */ | 261 | #define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */ |
diff --git a/include/sound/core.h b/include/sound/core.h index 3dea79829acc..309cb9659a05 100644 --- a/include/sound/core.h +++ b/include/sound/core.h | |||
@@ -300,19 +300,10 @@ int snd_card_create(int idx, const char *id, | |||
300 | struct module *module, int extra_size, | 300 | struct module *module, int extra_size, |
301 | struct snd_card **card_ret); | 301 | struct snd_card **card_ret); |
302 | 302 | ||
303 | static inline __deprecated | ||
304 | struct snd_card *snd_card_new(int idx, const char *id, | ||
305 | struct module *module, int extra_size) | ||
306 | { | ||
307 | struct snd_card *card; | ||
308 | if (snd_card_create(idx, id, module, extra_size, &card) < 0) | ||
309 | return NULL; | ||
310 | return card; | ||
311 | } | ||
312 | |||
313 | int snd_card_disconnect(struct snd_card *card); | 303 | int snd_card_disconnect(struct snd_card *card); |
314 | int snd_card_free(struct snd_card *card); | 304 | int snd_card_free(struct snd_card *card); |
315 | int snd_card_free_when_closed(struct snd_card *card); | 305 | int snd_card_free_when_closed(struct snd_card *card); |
306 | void snd_card_set_id(struct snd_card *card, const char *id); | ||
316 | int snd_card_register(struct snd_card *card); | 307 | int snd_card_register(struct snd_card *card); |
317 | int snd_card_info_init(void); | 308 | int snd_card_info_init(void); |
318 | int snd_card_info_done(void); | 309 | int snd_card_info_done(void); |
diff --git a/include/sound/driver.h b/include/sound/driver.h deleted file mode 100644 index f0359437d01a..000000000000 --- a/include/sound/driver.h +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | #warning "This file is deprecated" | ||
diff --git a/include/sound/pcm.h b/include/sound/pcm.h index c17296891617..23893523dc8c 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h | |||
@@ -98,6 +98,7 @@ struct snd_pcm_ops { | |||
98 | #define SNDRV_PCM_IOCTL1_INFO 1 | 98 | #define SNDRV_PCM_IOCTL1_INFO 1 |
99 | #define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2 | 99 | #define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2 |
100 | #define SNDRV_PCM_IOCTL1_GSTATE 3 | 100 | #define SNDRV_PCM_IOCTL1_GSTATE 3 |
101 | #define SNDRV_PCM_IOCTL1_FIFO_SIZE 4 | ||
101 | 102 | ||
102 | #define SNDRV_PCM_TRIGGER_STOP 0 | 103 | #define SNDRV_PCM_TRIGGER_STOP 0 |
103 | #define SNDRV_PCM_TRIGGER_START 1 | 104 | #define SNDRV_PCM_TRIGGER_START 1 |
@@ -270,6 +271,7 @@ struct snd_pcm_runtime { | |||
270 | snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */ | 271 | snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */ |
271 | snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time */ | 272 | snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time */ |
272 | unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */ | 273 | unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */ |
274 | snd_pcm_sframes_t delay; /* extra delay; typically FIFO size */ | ||
273 | 275 | ||
274 | /* -- HW params -- */ | 276 | /* -- HW params -- */ |
275 | snd_pcm_access_t access; /* access mode */ | 277 | snd_pcm_access_t access; /* access mode */ |
@@ -486,80 +488,6 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream); | |||
486 | void snd_pcm_vma_notify_data(void *client, void *data); | 488 | void snd_pcm_vma_notify_data(void *client, void *data); |
487 | int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area); | 489 | int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area); |
488 | 490 | ||
489 | #if BITS_PER_LONG >= 64 | ||
490 | |||
491 | static inline void div64_32(u_int64_t *n, u_int32_t div, u_int32_t *rem) | ||
492 | { | ||
493 | *rem = *n % div; | ||
494 | *n /= div; | ||
495 | } | ||
496 | |||
497 | #elif defined(i386) | ||
498 | |||
499 | static inline void div64_32(u_int64_t *n, u_int32_t div, u_int32_t *rem) | ||
500 | { | ||
501 | u_int32_t low, high; | ||
502 | low = *n & 0xffffffff; | ||
503 | high = *n >> 32; | ||
504 | if (high) { | ||
505 | u_int32_t high1 = high % div; | ||
506 | high /= div; | ||
507 | asm("divl %2":"=a" (low), "=d" (*rem):"rm" (div), "a" (low), "d" (high1)); | ||
508 | *n = (u_int64_t)high << 32 | low; | ||
509 | } else { | ||
510 | *n = low / div; | ||
511 | *rem = low % div; | ||
512 | } | ||
513 | } | ||
514 | #else | ||
515 | |||
516 | static inline void divl(u_int32_t high, u_int32_t low, | ||
517 | u_int32_t div, | ||
518 | u_int32_t *q, u_int32_t *r) | ||
519 | { | ||
520 | u_int64_t n = (u_int64_t)high << 32 | low; | ||
521 | u_int64_t d = (u_int64_t)div << 31; | ||
522 | u_int32_t q1 = 0; | ||
523 | int c = 32; | ||
524 | while (n > 0xffffffffU) { | ||
525 | q1 <<= 1; | ||
526 | if (n >= d) { | ||
527 | n -= d; | ||
528 | q1 |= 1; | ||
529 | } | ||
530 | d >>= 1; | ||
531 | c--; | ||
532 | } | ||
533 | q1 <<= c; | ||
534 | if (n) { | ||
535 | low = n; | ||
536 | *q = q1 | (low / div); | ||
537 | *r = low % div; | ||
538 | } else { | ||
539 | *r = 0; | ||
540 | *q = q1; | ||
541 | } | ||
542 | return; | ||
543 | } | ||
544 | |||
545 | static inline void div64_32(u_int64_t *n, u_int32_t div, u_int32_t *rem) | ||
546 | { | ||
547 | u_int32_t low, high; | ||
548 | low = *n & 0xffffffff; | ||
549 | high = *n >> 32; | ||
550 | if (high) { | ||
551 | u_int32_t high1 = high % div; | ||
552 | u_int32_t low1 = low; | ||
553 | high /= div; | ||
554 | divl(high1, low1, div, &low, rem); | ||
555 | *n = (u_int64_t)high << 32 | low; | ||
556 | } else { | ||
557 | *n = low / div; | ||
558 | *rem = low % div; | ||
559 | } | ||
560 | } | ||
561 | #endif | ||
562 | |||
563 | /* | 491 | /* |
564 | * PCM library | 492 | * PCM library |
565 | */ | 493 | */ |
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 13676472ddfc..352d7eee9b6d 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h | |||
@@ -45,24 +45,6 @@ struct snd_pcm_substream; | |||
45 | #define SND_SOC_DAIFMT_GATED (1 << 4) /* clock is gated */ | 45 | #define SND_SOC_DAIFMT_GATED (1 << 4) /* clock is gated */ |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * DAI Left/Right Clocks. | ||
49 | * | ||
50 | * Specifies whether the DAI can support different samples for similtanious | ||
51 | * playback and capture. This usually requires a seperate physical frame | ||
52 | * clock for playback and capture. | ||
53 | */ | ||
54 | #define SND_SOC_DAIFMT_SYNC (0 << 5) /* Tx FRM = Rx FRM */ | ||
55 | #define SND_SOC_DAIFMT_ASYNC (1 << 5) /* Tx FRM ~ Rx FRM */ | ||
56 | |||
57 | /* | ||
58 | * TDM | ||
59 | * | ||
60 | * Time Division Multiplexing. Allows PCM data to be multplexed with other | ||
61 | * data on the DAI. | ||
62 | */ | ||
63 | #define SND_SOC_DAIFMT_TDM (1 << 6) | ||
64 | |||
65 | /* | ||
66 | * DAI hardware signal inversions. | 48 | * DAI hardware signal inversions. |
67 | * | 49 | * |
68 | * Specifies whether the DAI can also support inverted clocks for the specified | 50 | * Specifies whether the DAI can also support inverted clocks for the specified |
@@ -96,6 +78,10 @@ struct snd_pcm_substream; | |||
96 | #define SND_SOC_CLOCK_IN 0 | 78 | #define SND_SOC_CLOCK_IN 0 |
97 | #define SND_SOC_CLOCK_OUT 1 | 79 | #define SND_SOC_CLOCK_OUT 1 |
98 | 80 | ||
81 | #define SND_SOC_STD_AC97_FMTS (SNDRV_PCM_FMTBIT_S16_LE |\ | ||
82 | SNDRV_PCM_FMTBIT_S32_LE |\ | ||
83 | SNDRV_PCM_FMTBIT_S32_BE) | ||
84 | |||
99 | struct snd_soc_dai_ops; | 85 | struct snd_soc_dai_ops; |
100 | struct snd_soc_dai; | 86 | struct snd_soc_dai; |
101 | struct snd_ac97_bus_ops; | 87 | struct snd_ac97_bus_ops; |
@@ -208,6 +194,7 @@ struct snd_soc_dai { | |||
208 | /* DAI capabilities */ | 194 | /* DAI capabilities */ |
209 | struct snd_soc_pcm_stream capture; | 195 | struct snd_soc_pcm_stream capture; |
210 | struct snd_soc_pcm_stream playback; | 196 | struct snd_soc_pcm_stream playback; |
197 | unsigned int symmetric_rates:1; | ||
211 | 198 | ||
212 | /* DAI runtime info */ | 199 | /* DAI runtime info */ |
213 | struct snd_pcm_runtime *runtime; | 200 | struct snd_pcm_runtime *runtime; |
@@ -219,11 +206,8 @@ struct snd_soc_dai { | |||
219 | /* DAI private data */ | 206 | /* DAI private data */ |
220 | void *private_data; | 207 | void *private_data; |
221 | 208 | ||
222 | /* parent codec/platform */ | 209 | /* parent platform */ |
223 | union { | 210 | struct snd_soc_platform *platform; |
224 | struct snd_soc_codec *codec; | ||
225 | struct snd_soc_platform *platform; | ||
226 | }; | ||
227 | 211 | ||
228 | struct list_head list; | 212 | struct list_head list; |
229 | }; | 213 | }; |
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h index a7def6a9a030..ec8a45f9a069 100644 --- a/include/sound/soc-dapm.h +++ b/include/sound/soc-dapm.h | |||
@@ -140,16 +140,30 @@ | |||
140 | #define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \ | 140 | #define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \ |
141 | { .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \ | 141 | { .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \ |
142 | .shift = wshift, .invert = winvert} | 142 | .shift = wshift, .invert = winvert} |
143 | #define SND_SOC_DAPM_DAC_E(wname, stname, wreg, wshift, winvert, \ | ||
144 | wevent, wflags) \ | ||
145 | { .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \ | ||
146 | .shift = wshift, .invert = winvert, \ | ||
147 | .event = wevent, .event_flags = wflags} | ||
143 | #define SND_SOC_DAPM_ADC(wname, stname, wreg, wshift, winvert) \ | 148 | #define SND_SOC_DAPM_ADC(wname, stname, wreg, wshift, winvert) \ |
144 | { .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \ | 149 | { .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \ |
145 | .shift = wshift, .invert = winvert} | 150 | .shift = wshift, .invert = winvert} |
151 | #define SND_SOC_DAPM_ADC_E(wname, stname, wreg, wshift, winvert, \ | ||
152 | wevent, wflags) \ | ||
153 | { .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \ | ||
154 | .shift = wshift, .invert = winvert, \ | ||
155 | .event = wevent, .event_flags = wflags} | ||
146 | 156 | ||
147 | /* generic register modifier widget */ | 157 | /* generic widgets */ |
148 | #define SND_SOC_DAPM_REG(wid, wname, wreg, wshift, wmask, won_val, woff_val) \ | 158 | #define SND_SOC_DAPM_REG(wid, wname, wreg, wshift, wmask, won_val, woff_val) \ |
149 | { .id = wid, .name = wname, .kcontrols = NULL, .num_kcontrols = 0, \ | 159 | { .id = wid, .name = wname, .kcontrols = NULL, .num_kcontrols = 0, \ |
150 | .reg = -((wreg) + 1), .shift = wshift, .mask = wmask, \ | 160 | .reg = -((wreg) + 1), .shift = wshift, .mask = wmask, \ |
151 | .on_val = won_val, .off_val = woff_val, .event = dapm_reg_event, \ | 161 | .on_val = won_val, .off_val = woff_val, .event = dapm_reg_event, \ |
152 | .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD} | 162 | .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD} |
163 | #define SND_SOC_DAPM_SUPPLY(wname, wreg, wshift, winvert, wevent, wflags) \ | ||
164 | { .id = snd_soc_dapm_supply, .name = wname, .reg = wreg, \ | ||
165 | .shift = wshift, .invert = winvert, .event = wevent, \ | ||
166 | .event_flags = wflags} | ||
153 | 167 | ||
154 | /* dapm kcontrol types */ | 168 | /* dapm kcontrol types */ |
155 | #define SOC_DAPM_SINGLE(xname, reg, shift, max, invert) \ | 169 | #define SOC_DAPM_SINGLE(xname, reg, shift, max, invert) \ |
@@ -265,8 +279,6 @@ int snd_soc_dapm_add_routes(struct snd_soc_codec *codec, | |||
265 | /* dapm events */ | 279 | /* dapm events */ |
266 | int snd_soc_dapm_stream_event(struct snd_soc_codec *codec, char *stream, | 280 | int snd_soc_dapm_stream_event(struct snd_soc_codec *codec, char *stream, |
267 | int event); | 281 | int event); |
268 | int snd_soc_dapm_set_bias_level(struct snd_soc_device *socdev, | ||
269 | enum snd_soc_bias_level level); | ||
270 | 282 | ||
271 | /* dapm sys fs - used by the core */ | 283 | /* dapm sys fs - used by the core */ |
272 | int snd_soc_dapm_sys_add(struct device *dev); | 284 | int snd_soc_dapm_sys_add(struct device *dev); |
@@ -298,6 +310,7 @@ enum snd_soc_dapm_type { | |||
298 | snd_soc_dapm_vmid, /* codec bias/vmid - to minimise pops */ | 310 | snd_soc_dapm_vmid, /* codec bias/vmid - to minimise pops */ |
299 | snd_soc_dapm_pre, /* machine specific pre widget - exec first */ | 311 | snd_soc_dapm_pre, /* machine specific pre widget - exec first */ |
300 | snd_soc_dapm_post, /* machine specific post widget - exec last */ | 312 | snd_soc_dapm_post, /* machine specific post widget - exec last */ |
313 | snd_soc_dapm_supply, /* power/clock supply */ | ||
301 | }; | 314 | }; |
302 | 315 | ||
303 | /* | 316 | /* |
@@ -357,6 +370,8 @@ struct snd_soc_dapm_widget { | |||
357 | unsigned char suspend:1; /* was active before suspend */ | 370 | unsigned char suspend:1; /* was active before suspend */ |
358 | unsigned char pmdown:1; /* waiting for timeout */ | 371 | unsigned char pmdown:1; /* waiting for timeout */ |
359 | 372 | ||
373 | int (*power_check)(struct snd_soc_dapm_widget *w); | ||
374 | |||
360 | /* external events */ | 375 | /* external events */ |
361 | unsigned short event_flags; /* flags to specify event types */ | 376 | unsigned short event_flags; /* flags to specify event types */ |
362 | int (*event)(struct snd_soc_dapm_widget*, struct snd_kcontrol *, int); | 377 | int (*event)(struct snd_soc_dapm_widget*, struct snd_kcontrol *, int); |
@@ -368,6 +383,9 @@ struct snd_soc_dapm_widget { | |||
368 | /* widget input and outputs */ | 383 | /* widget input and outputs */ |
369 | struct list_head sources; | 384 | struct list_head sources; |
370 | struct list_head sinks; | 385 | struct list_head sinks; |
386 | |||
387 | /* used during DAPM updates */ | ||
388 | struct list_head power_list; | ||
371 | }; | 389 | }; |
372 | 390 | ||
373 | #endif | 391 | #endif |
diff --git a/include/sound/soc.h b/include/sound/soc.h index a40bc6f316fc..cf6111d72b17 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h | |||
@@ -118,6 +118,14 @@ | |||
118 | .info = snd_soc_info_volsw, \ | 118 | .info = snd_soc_info_volsw, \ |
119 | .get = xhandler_get, .put = xhandler_put, \ | 119 | .get = xhandler_get, .put = xhandler_put, \ |
120 | .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) } | 120 | .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) } |
121 | #define SOC_DOUBLE_EXT(xname, xreg, shift_left, shift_right, xmax, xinvert,\ | ||
122 | xhandler_get, xhandler_put) \ | ||
123 | { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\ | ||
124 | .info = snd_soc_info_volsw, \ | ||
125 | .get = xhandler_get, .put = xhandler_put, \ | ||
126 | .private_value = (unsigned long)&(struct soc_mixer_control) \ | ||
127 | {.reg = xreg, .shift = shift_left, .rshift = shift_right, \ | ||
128 | .max = xmax, .invert = xinvert} } | ||
121 | #define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\ | 129 | #define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\ |
122 | xhandler_get, xhandler_put, tlv_array) \ | 130 | xhandler_get, xhandler_put, tlv_array) \ |
123 | { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ | 131 | { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ |
@@ -206,10 +214,6 @@ void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count, | |||
206 | struct snd_soc_jack_gpio *gpios); | 214 | struct snd_soc_jack_gpio *gpios); |
207 | #endif | 215 | #endif |
208 | 216 | ||
209 | /* codec IO */ | ||
210 | #define snd_soc_read(codec, reg) codec->read(codec, reg) | ||
211 | #define snd_soc_write(codec, reg, value) codec->write(codec, reg, value) | ||
212 | |||
213 | /* codec register bit access */ | 217 | /* codec register bit access */ |
214 | int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg, | 218 | int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg, |
215 | unsigned short mask, unsigned short value); | 219 | unsigned short mask, unsigned short value); |
@@ -331,6 +335,7 @@ struct snd_soc_codec { | |||
331 | struct module *owner; | 335 | struct module *owner; |
332 | struct mutex mutex; | 336 | struct mutex mutex; |
333 | struct device *dev; | 337 | struct device *dev; |
338 | struct snd_soc_device *socdev; | ||
334 | 339 | ||
335 | struct list_head list; | 340 | struct list_head list; |
336 | 341 | ||
@@ -364,6 +369,8 @@ struct snd_soc_codec { | |||
364 | enum snd_soc_bias_level bias_level; | 369 | enum snd_soc_bias_level bias_level; |
365 | enum snd_soc_bias_level suspend_bias_level; | 370 | enum snd_soc_bias_level suspend_bias_level; |
366 | struct delayed_work delayed_work; | 371 | struct delayed_work delayed_work; |
372 | struct list_head up_list; | ||
373 | struct list_head down_list; | ||
367 | 374 | ||
368 | /* codec DAI's */ | 375 | /* codec DAI's */ |
369 | struct snd_soc_dai *dai; | 376 | struct snd_soc_dai *dai; |
@@ -417,6 +424,12 @@ struct snd_soc_dai_link { | |||
417 | /* codec/machine specific init - e.g. add machine controls */ | 424 | /* codec/machine specific init - e.g. add machine controls */ |
418 | int (*init)(struct snd_soc_codec *codec); | 425 | int (*init)(struct snd_soc_codec *codec); |
419 | 426 | ||
427 | /* Symmetry requirements */ | ||
428 | unsigned int symmetric_rates:1; | ||
429 | |||
430 | /* Symmetry data - only valid if symmetry is being enforced */ | ||
431 | unsigned int rate; | ||
432 | |||
420 | /* DAI pcm */ | 433 | /* DAI pcm */ |
421 | struct snd_pcm *pcm; | 434 | struct snd_pcm *pcm; |
422 | }; | 435 | }; |
@@ -490,6 +503,19 @@ struct soc_enum { | |||
490 | void *dapm; | 503 | void *dapm; |
491 | }; | 504 | }; |
492 | 505 | ||
506 | /* codec IO */ | ||
507 | static inline unsigned int snd_soc_read(struct snd_soc_codec *codec, | ||
508 | unsigned int reg) | ||
509 | { | ||
510 | return codec->read(codec, reg); | ||
511 | } | ||
512 | |||
513 | static inline unsigned int snd_soc_write(struct snd_soc_codec *codec, | ||
514 | unsigned int reg, unsigned int val) | ||
515 | { | ||
516 | return codec->write(codec, reg, val); | ||
517 | } | ||
518 | |||
493 | #include <sound/soc-dai.h> | 519 | #include <sound/soc-dai.h> |
494 | 520 | ||
495 | #endif | 521 | #endif |
diff --git a/include/sound/wm9081.h b/include/sound/wm9081.h new file mode 100644 index 000000000000..e173ddbf6bd4 --- /dev/null +++ b/include/sound/wm9081.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * linux/sound/wm9081.h -- Platform data for WM9081 | ||
3 | * | ||
4 | * Copyright 2009 Wolfson Microelectronics. PLC. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #ifndef __LINUX_SND_WM_9081_H | ||
12 | #define __LINUX_SND_WM_9081_H | ||
13 | |||
14 | struct wm9081_retune_mobile_setting { | ||
15 | const char *name; | ||
16 | unsigned int rate; | ||
17 | u16 config[20]; | ||
18 | }; | ||
19 | |||
20 | struct wm9081_retune_mobile_config { | ||
21 | struct wm9081_retune_mobile_setting *configs; | ||
22 | int num_configs; | ||
23 | }; | ||
24 | |||
25 | #endif | ||
diff --git a/include/trace/block.h b/include/trace/block.h deleted file mode 100644 index 25b7068b819e..000000000000 --- a/include/trace/block.h +++ /dev/null | |||
@@ -1,76 +0,0 @@ | |||
1 | #ifndef _TRACE_BLOCK_H | ||
2 | #define _TRACE_BLOCK_H | ||
3 | |||
4 | #include <linux/blkdev.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | DECLARE_TRACE(block_rq_abort, | ||
8 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
9 | TP_ARGS(q, rq)); | ||
10 | |||
11 | DECLARE_TRACE(block_rq_insert, | ||
12 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
13 | TP_ARGS(q, rq)); | ||
14 | |||
15 | DECLARE_TRACE(block_rq_issue, | ||
16 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
17 | TP_ARGS(q, rq)); | ||
18 | |||
19 | DECLARE_TRACE(block_rq_requeue, | ||
20 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
21 | TP_ARGS(q, rq)); | ||
22 | |||
23 | DECLARE_TRACE(block_rq_complete, | ||
24 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
25 | TP_ARGS(q, rq)); | ||
26 | |||
27 | DECLARE_TRACE(block_bio_bounce, | ||
28 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
29 | TP_ARGS(q, bio)); | ||
30 | |||
31 | DECLARE_TRACE(block_bio_complete, | ||
32 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
33 | TP_ARGS(q, bio)); | ||
34 | |||
35 | DECLARE_TRACE(block_bio_backmerge, | ||
36 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
37 | TP_ARGS(q, bio)); | ||
38 | |||
39 | DECLARE_TRACE(block_bio_frontmerge, | ||
40 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
41 | TP_ARGS(q, bio)); | ||
42 | |||
43 | DECLARE_TRACE(block_bio_queue, | ||
44 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
45 | TP_ARGS(q, bio)); | ||
46 | |||
47 | DECLARE_TRACE(block_getrq, | ||
48 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | ||
49 | TP_ARGS(q, bio, rw)); | ||
50 | |||
51 | DECLARE_TRACE(block_sleeprq, | ||
52 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | ||
53 | TP_ARGS(q, bio, rw)); | ||
54 | |||
55 | DECLARE_TRACE(block_plug, | ||
56 | TP_PROTO(struct request_queue *q), | ||
57 | TP_ARGS(q)); | ||
58 | |||
59 | DECLARE_TRACE(block_unplug_timer, | ||
60 | TP_PROTO(struct request_queue *q), | ||
61 | TP_ARGS(q)); | ||
62 | |||
63 | DECLARE_TRACE(block_unplug_io, | ||
64 | TP_PROTO(struct request_queue *q), | ||
65 | TP_ARGS(q)); | ||
66 | |||
67 | DECLARE_TRACE(block_split, | ||
68 | TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int pdu), | ||
69 | TP_ARGS(q, bio, pdu)); | ||
70 | |||
71 | DECLARE_TRACE(block_remap, | ||
72 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, | ||
73 | sector_t from, sector_t to), | ||
74 | TP_ARGS(q, bio, dev, from, to)); | ||
75 | |||
76 | #endif | ||
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h new file mode 100644 index 000000000000..f7a7ae1e8f90 --- /dev/null +++ b/include/trace/define_trace.h | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * Trace files that want to automate creationg of all tracepoints defined | ||
3 | * in their file should include this file. The following are macros that the | ||
4 | * trace file may define: | ||
5 | * | ||
6 | * TRACE_SYSTEM defines the system the tracepoint is for | ||
7 | * | ||
8 | * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h | ||
9 | * This macro may be defined to tell define_trace.h what file to include. | ||
10 | * Note, leave off the ".h". | ||
11 | * | ||
12 | * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace | ||
13 | * then this macro can define the path to use. Note, the path is relative to | ||
14 | * define_trace.h, not the file including it. Full path names for out of tree | ||
15 | * modules must be used. | ||
16 | */ | ||
17 | |||
18 | #ifdef CREATE_TRACE_POINTS | ||
19 | |||
20 | /* Prevent recursion */ | ||
21 | #undef CREATE_TRACE_POINTS | ||
22 | |||
23 | #include <linux/stringify.h> | ||
24 | |||
25 | #undef TRACE_EVENT | ||
26 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | ||
27 | DEFINE_TRACE(name) | ||
28 | |||
29 | #undef DECLARE_TRACE | ||
30 | #define DECLARE_TRACE(name, proto, args) \ | ||
31 | DEFINE_TRACE(name) | ||
32 | |||
33 | #undef TRACE_INCLUDE | ||
34 | #undef __TRACE_INCLUDE | ||
35 | |||
36 | #ifndef TRACE_INCLUDE_FILE | ||
37 | # define TRACE_INCLUDE_FILE TRACE_SYSTEM | ||
38 | # define UNDEF_TRACE_INCLUDE_FILE | ||
39 | #endif | ||
40 | |||
41 | #ifndef TRACE_INCLUDE_PATH | ||
42 | # define __TRACE_INCLUDE(system) <trace/events/system.h> | ||
43 | # define UNDEF_TRACE_INCLUDE_PATH | ||
44 | #else | ||
45 | # define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h) | ||
46 | #endif | ||
47 | |||
48 | # define TRACE_INCLUDE(system) __TRACE_INCLUDE(system) | ||
49 | |||
50 | /* Let the trace headers be reread */ | ||
51 | #define TRACE_HEADER_MULTI_READ | ||
52 | |||
53 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
54 | |||
55 | #ifdef CONFIG_EVENT_TRACING | ||
56 | #include <trace/ftrace.h> | ||
57 | #endif | ||
58 | |||
59 | #undef TRACE_HEADER_MULTI_READ | ||
60 | |||
61 | /* Only undef what we defined in this file */ | ||
62 | #ifdef UNDEF_TRACE_INCLUDE_FILE | ||
63 | # undef TRACE_INCLUDE_FILE | ||
64 | # undef UNDEF_TRACE_INCLUDE_FILE | ||
65 | #endif | ||
66 | |||
67 | #ifdef UNDEF_TRACE_INCLUDE_PATH | ||
68 | # undef TRACE_INCLUDE_PATH | ||
69 | # undef UNDEF_TRACE_INCLUDE_PATH | ||
70 | #endif | ||
71 | |||
72 | /* We may be processing more files */ | ||
73 | #define CREATE_TRACE_POINTS | ||
74 | |||
75 | #endif /* CREATE_TRACE_POINTS */ | ||
diff --git a/include/trace/events/block.h b/include/trace/events/block.h new file mode 100644 index 000000000000..d6b05f42dd44 --- /dev/null +++ b/include/trace/events/block.h | |||
@@ -0,0 +1,493 @@ | |||
1 | #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_BLOCK_H | ||
3 | |||
4 | #include <linux/blktrace_api.h> | ||
5 | #include <linux/blkdev.h> | ||
6 | #include <linux/tracepoint.h> | ||
7 | |||
8 | #undef TRACE_SYSTEM | ||
9 | #define TRACE_SYSTEM block | ||
10 | |||
11 | TRACE_EVENT(block_rq_abort, | ||
12 | |||
13 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
14 | |||
15 | TP_ARGS(q, rq), | ||
16 | |||
17 | TP_STRUCT__entry( | ||
18 | __field( dev_t, dev ) | ||
19 | __field( sector_t, sector ) | ||
20 | __field( unsigned int, nr_sector ) | ||
21 | __field( int, errors ) | ||
22 | __array( char, rwbs, 6 ) | ||
23 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
24 | ), | ||
25 | |||
26 | TP_fast_assign( | ||
27 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
28 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
29 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
30 | __entry->errors = rq->errors; | ||
31 | |||
32 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
33 | blk_dump_cmd(__get_str(cmd), rq); | ||
34 | ), | ||
35 | |||
36 | TP_printk("%d,%d %s (%s) %llu + %u [%d]", | ||
37 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
38 | __entry->rwbs, __get_str(cmd), | ||
39 | (unsigned long long)__entry->sector, | ||
40 | __entry->nr_sector, __entry->errors) | ||
41 | ); | ||
42 | |||
43 | TRACE_EVENT(block_rq_insert, | ||
44 | |||
45 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
46 | |||
47 | TP_ARGS(q, rq), | ||
48 | |||
49 | TP_STRUCT__entry( | ||
50 | __field( dev_t, dev ) | ||
51 | __field( sector_t, sector ) | ||
52 | __field( unsigned int, nr_sector ) | ||
53 | __field( unsigned int, bytes ) | ||
54 | __array( char, rwbs, 6 ) | ||
55 | __array( char, comm, TASK_COMM_LEN ) | ||
56 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
57 | ), | ||
58 | |||
59 | TP_fast_assign( | ||
60 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
61 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
62 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
63 | __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; | ||
64 | |||
65 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
66 | blk_dump_cmd(__get_str(cmd), rq); | ||
67 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
68 | ), | ||
69 | |||
70 | TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", | ||
71 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
72 | __entry->rwbs, __entry->bytes, __get_str(cmd), | ||
73 | (unsigned long long)__entry->sector, | ||
74 | __entry->nr_sector, __entry->comm) | ||
75 | ); | ||
76 | |||
77 | TRACE_EVENT(block_rq_issue, | ||
78 | |||
79 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
80 | |||
81 | TP_ARGS(q, rq), | ||
82 | |||
83 | TP_STRUCT__entry( | ||
84 | __field( dev_t, dev ) | ||
85 | __field( sector_t, sector ) | ||
86 | __field( unsigned int, nr_sector ) | ||
87 | __field( unsigned int, bytes ) | ||
88 | __array( char, rwbs, 6 ) | ||
89 | __array( char, comm, TASK_COMM_LEN ) | ||
90 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
91 | ), | ||
92 | |||
93 | TP_fast_assign( | ||
94 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
95 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
96 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
97 | __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; | ||
98 | |||
99 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
100 | blk_dump_cmd(__get_str(cmd), rq); | ||
101 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
102 | ), | ||
103 | |||
104 | TP_printk("%d,%d %s %u (%s) %llu + %u [%s]", | ||
105 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
106 | __entry->rwbs, __entry->bytes, __get_str(cmd), | ||
107 | (unsigned long long)__entry->sector, | ||
108 | __entry->nr_sector, __entry->comm) | ||
109 | ); | ||
110 | |||
111 | TRACE_EVENT(block_rq_requeue, | ||
112 | |||
113 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
114 | |||
115 | TP_ARGS(q, rq), | ||
116 | |||
117 | TP_STRUCT__entry( | ||
118 | __field( dev_t, dev ) | ||
119 | __field( sector_t, sector ) | ||
120 | __field( unsigned int, nr_sector ) | ||
121 | __field( int, errors ) | ||
122 | __array( char, rwbs, 6 ) | ||
123 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
124 | ), | ||
125 | |||
126 | TP_fast_assign( | ||
127 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
128 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
129 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
130 | __entry->errors = rq->errors; | ||
131 | |||
132 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
133 | blk_dump_cmd(__get_str(cmd), rq); | ||
134 | ), | ||
135 | |||
136 | TP_printk("%d,%d %s (%s) %llu + %u [%d]", | ||
137 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
138 | __entry->rwbs, __get_str(cmd), | ||
139 | (unsigned long long)__entry->sector, | ||
140 | __entry->nr_sector, __entry->errors) | ||
141 | ); | ||
142 | |||
143 | TRACE_EVENT(block_rq_complete, | ||
144 | |||
145 | TP_PROTO(struct request_queue *q, struct request *rq), | ||
146 | |||
147 | TP_ARGS(q, rq), | ||
148 | |||
149 | TP_STRUCT__entry( | ||
150 | __field( dev_t, dev ) | ||
151 | __field( sector_t, sector ) | ||
152 | __field( unsigned int, nr_sector ) | ||
153 | __field( int, errors ) | ||
154 | __array( char, rwbs, 6 ) | ||
155 | __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) | ||
156 | ), | ||
157 | |||
158 | TP_fast_assign( | ||
159 | __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; | ||
160 | __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); | ||
161 | __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); | ||
162 | __entry->errors = rq->errors; | ||
163 | |||
164 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
165 | blk_dump_cmd(__get_str(cmd), rq); | ||
166 | ), | ||
167 | |||
168 | TP_printk("%d,%d %s (%s) %llu + %u [%d]", | ||
169 | MAJOR(__entry->dev), MINOR(__entry->dev), | ||
170 | __entry->rwbs, __get_str(cmd), | ||
171 | (unsigned long long)__entry->sector, | ||
172 | __entry->nr_sector, __entry->errors) | ||
173 | ); | ||
174 | TRACE_EVENT(block_bio_bounce, | ||
175 | |||
176 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
177 | |||
178 | TP_ARGS(q, bio), | ||
179 | |||
180 | TP_STRUCT__entry( | ||
181 | __field( dev_t, dev ) | ||
182 | __field( sector_t, sector ) | ||
183 | __field( unsigned int, nr_sector ) | ||
184 | __array( char, rwbs, 6 ) | ||
185 | __array( char, comm, TASK_COMM_LEN ) | ||
186 | ), | ||
187 | |||
188 | TP_fast_assign( | ||
189 | __entry->dev = bio->bi_bdev->bd_dev; | ||
190 | __entry->sector = bio->bi_sector; | ||
191 | __entry->nr_sector = bio->bi_size >> 9; | ||
192 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
193 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
194 | ), | ||
195 | |||
196 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
197 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
198 | (unsigned long long)__entry->sector, | ||
199 | __entry->nr_sector, __entry->comm) | ||
200 | ); | ||
201 | |||
202 | TRACE_EVENT(block_bio_complete, | ||
203 | |||
204 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
205 | |||
206 | TP_ARGS(q, bio), | ||
207 | |||
208 | TP_STRUCT__entry( | ||
209 | __field( dev_t, dev ) | ||
210 | __field( sector_t, sector ) | ||
211 | __field( unsigned, nr_sector ) | ||
212 | __field( int, error ) | ||
213 | __array( char, rwbs, 6 ) | ||
214 | ), | ||
215 | |||
216 | TP_fast_assign( | ||
217 | __entry->dev = bio->bi_bdev->bd_dev; | ||
218 | __entry->sector = bio->bi_sector; | ||
219 | __entry->nr_sector = bio->bi_size >> 9; | ||
220 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
221 | ), | ||
222 | |||
223 | TP_printk("%d,%d %s %llu + %u [%d]", | ||
224 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
225 | (unsigned long long)__entry->sector, | ||
226 | __entry->nr_sector, __entry->error) | ||
227 | ); | ||
228 | |||
229 | TRACE_EVENT(block_bio_backmerge, | ||
230 | |||
231 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
232 | |||
233 | TP_ARGS(q, bio), | ||
234 | |||
235 | TP_STRUCT__entry( | ||
236 | __field( dev_t, dev ) | ||
237 | __field( sector_t, sector ) | ||
238 | __field( unsigned int, nr_sector ) | ||
239 | __array( char, rwbs, 6 ) | ||
240 | __array( char, comm, TASK_COMM_LEN ) | ||
241 | ), | ||
242 | |||
243 | TP_fast_assign( | ||
244 | __entry->dev = bio->bi_bdev->bd_dev; | ||
245 | __entry->sector = bio->bi_sector; | ||
246 | __entry->nr_sector = bio->bi_size >> 9; | ||
247 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
248 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
249 | ), | ||
250 | |||
251 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
252 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
253 | (unsigned long long)__entry->sector, | ||
254 | __entry->nr_sector, __entry->comm) | ||
255 | ); | ||
256 | |||
257 | TRACE_EVENT(block_bio_frontmerge, | ||
258 | |||
259 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
260 | |||
261 | TP_ARGS(q, bio), | ||
262 | |||
263 | TP_STRUCT__entry( | ||
264 | __field( dev_t, dev ) | ||
265 | __field( sector_t, sector ) | ||
266 | __field( unsigned, nr_sector ) | ||
267 | __array( char, rwbs, 6 ) | ||
268 | __array( char, comm, TASK_COMM_LEN ) | ||
269 | ), | ||
270 | |||
271 | TP_fast_assign( | ||
272 | __entry->dev = bio->bi_bdev->bd_dev; | ||
273 | __entry->sector = bio->bi_sector; | ||
274 | __entry->nr_sector = bio->bi_size >> 9; | ||
275 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
276 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
277 | ), | ||
278 | |||
279 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
280 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
281 | (unsigned long long)__entry->sector, | ||
282 | __entry->nr_sector, __entry->comm) | ||
283 | ); | ||
284 | |||
285 | TRACE_EVENT(block_bio_queue, | ||
286 | |||
287 | TP_PROTO(struct request_queue *q, struct bio *bio), | ||
288 | |||
289 | TP_ARGS(q, bio), | ||
290 | |||
291 | TP_STRUCT__entry( | ||
292 | __field( dev_t, dev ) | ||
293 | __field( sector_t, sector ) | ||
294 | __field( unsigned int, nr_sector ) | ||
295 | __array( char, rwbs, 6 ) | ||
296 | __array( char, comm, TASK_COMM_LEN ) | ||
297 | ), | ||
298 | |||
299 | TP_fast_assign( | ||
300 | __entry->dev = bio->bi_bdev->bd_dev; | ||
301 | __entry->sector = bio->bi_sector; | ||
302 | __entry->nr_sector = bio->bi_size >> 9; | ||
303 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
304 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
305 | ), | ||
306 | |||
307 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
308 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
309 | (unsigned long long)__entry->sector, | ||
310 | __entry->nr_sector, __entry->comm) | ||
311 | ); | ||
312 | |||
313 | TRACE_EVENT(block_getrq, | ||
314 | |||
315 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | ||
316 | |||
317 | TP_ARGS(q, bio, rw), | ||
318 | |||
319 | TP_STRUCT__entry( | ||
320 | __field( dev_t, dev ) | ||
321 | __field( sector_t, sector ) | ||
322 | __field( unsigned int, nr_sector ) | ||
323 | __array( char, rwbs, 6 ) | ||
324 | __array( char, comm, TASK_COMM_LEN ) | ||
325 | ), | ||
326 | |||
327 | TP_fast_assign( | ||
328 | __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; | ||
329 | __entry->sector = bio ? bio->bi_sector : 0; | ||
330 | __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; | ||
331 | blk_fill_rwbs(__entry->rwbs, | ||
332 | bio ? bio->bi_rw : 0, __entry->nr_sector); | ||
333 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
334 | ), | ||
335 | |||
336 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
337 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
338 | (unsigned long long)__entry->sector, | ||
339 | __entry->nr_sector, __entry->comm) | ||
340 | ); | ||
341 | |||
342 | TRACE_EVENT(block_sleeprq, | ||
343 | |||
344 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | ||
345 | |||
346 | TP_ARGS(q, bio, rw), | ||
347 | |||
348 | TP_STRUCT__entry( | ||
349 | __field( dev_t, dev ) | ||
350 | __field( sector_t, sector ) | ||
351 | __field( unsigned int, nr_sector ) | ||
352 | __array( char, rwbs, 6 ) | ||
353 | __array( char, comm, TASK_COMM_LEN ) | ||
354 | ), | ||
355 | |||
356 | TP_fast_assign( | ||
357 | __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; | ||
358 | __entry->sector = bio ? bio->bi_sector : 0; | ||
359 | __entry->nr_sector = bio ? bio->bi_size >> 9 : 0; | ||
360 | blk_fill_rwbs(__entry->rwbs, | ||
361 | bio ? bio->bi_rw : 0, __entry->nr_sector); | ||
362 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
363 | ), | ||
364 | |||
365 | TP_printk("%d,%d %s %llu + %u [%s]", | ||
366 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
367 | (unsigned long long)__entry->sector, | ||
368 | __entry->nr_sector, __entry->comm) | ||
369 | ); | ||
370 | |||
371 | TRACE_EVENT(block_plug, | ||
372 | |||
373 | TP_PROTO(struct request_queue *q), | ||
374 | |||
375 | TP_ARGS(q), | ||
376 | |||
377 | TP_STRUCT__entry( | ||
378 | __array( char, comm, TASK_COMM_LEN ) | ||
379 | ), | ||
380 | |||
381 | TP_fast_assign( | ||
382 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
383 | ), | ||
384 | |||
385 | TP_printk("[%s]", __entry->comm) | ||
386 | ); | ||
387 | |||
388 | TRACE_EVENT(block_unplug_timer, | ||
389 | |||
390 | TP_PROTO(struct request_queue *q), | ||
391 | |||
392 | TP_ARGS(q), | ||
393 | |||
394 | TP_STRUCT__entry( | ||
395 | __field( int, nr_rq ) | ||
396 | __array( char, comm, TASK_COMM_LEN ) | ||
397 | ), | ||
398 | |||
399 | TP_fast_assign( | ||
400 | __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; | ||
401 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
402 | ), | ||
403 | |||
404 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | ||
405 | ); | ||
406 | |||
407 | TRACE_EVENT(block_unplug_io, | ||
408 | |||
409 | TP_PROTO(struct request_queue *q), | ||
410 | |||
411 | TP_ARGS(q), | ||
412 | |||
413 | TP_STRUCT__entry( | ||
414 | __field( int, nr_rq ) | ||
415 | __array( char, comm, TASK_COMM_LEN ) | ||
416 | ), | ||
417 | |||
418 | TP_fast_assign( | ||
419 | __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE]; | ||
420 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
421 | ), | ||
422 | |||
423 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | ||
424 | ); | ||
425 | |||
426 | TRACE_EVENT(block_split, | ||
427 | |||
428 | TP_PROTO(struct request_queue *q, struct bio *bio, | ||
429 | unsigned int new_sector), | ||
430 | |||
431 | TP_ARGS(q, bio, new_sector), | ||
432 | |||
433 | TP_STRUCT__entry( | ||
434 | __field( dev_t, dev ) | ||
435 | __field( sector_t, sector ) | ||
436 | __field( sector_t, new_sector ) | ||
437 | __array( char, rwbs, 6 ) | ||
438 | __array( char, comm, TASK_COMM_LEN ) | ||
439 | ), | ||
440 | |||
441 | TP_fast_assign( | ||
442 | __entry->dev = bio->bi_bdev->bd_dev; | ||
443 | __entry->sector = bio->bi_sector; | ||
444 | __entry->new_sector = new_sector; | ||
445 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
446 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | ||
447 | ), | ||
448 | |||
449 | TP_printk("%d,%d %s %llu / %llu [%s]", | ||
450 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
451 | (unsigned long long)__entry->sector, | ||
452 | (unsigned long long)__entry->new_sector, | ||
453 | __entry->comm) | ||
454 | ); | ||
455 | |||
456 | TRACE_EVENT(block_remap, | ||
457 | |||
458 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, | ||
459 | sector_t from), | ||
460 | |||
461 | TP_ARGS(q, bio, dev, from), | ||
462 | |||
463 | TP_STRUCT__entry( | ||
464 | __field( dev_t, dev ) | ||
465 | __field( sector_t, sector ) | ||
466 | __field( unsigned int, nr_sector ) | ||
467 | __field( dev_t, old_dev ) | ||
468 | __field( sector_t, old_sector ) | ||
469 | __array( char, rwbs, 6 ) | ||
470 | ), | ||
471 | |||
472 | TP_fast_assign( | ||
473 | __entry->dev = bio->bi_bdev->bd_dev; | ||
474 | __entry->sector = bio->bi_sector; | ||
475 | __entry->nr_sector = bio->bi_size >> 9; | ||
476 | __entry->old_dev = dev; | ||
477 | __entry->old_sector = from; | ||
478 | blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); | ||
479 | ), | ||
480 | |||
481 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", | ||
482 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
483 | (unsigned long long)__entry->sector, | ||
484 | __entry->nr_sector, | ||
485 | MAJOR(__entry->old_dev), MINOR(__entry->old_dev), | ||
486 | (unsigned long long)__entry->old_sector) | ||
487 | ); | ||
488 | |||
489 | #endif /* _TRACE_BLOCK_H */ | ||
490 | |||
491 | /* This part must be outside protection */ | ||
492 | #include <trace/define_trace.h> | ||
493 | |||
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h new file mode 100644 index 000000000000..b0c7ede55eb1 --- /dev/null +++ b/include/trace/events/irq.h | |||
@@ -0,0 +1,145 @@ | |||
1 | #if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_IRQ_H | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | #include <linux/interrupt.h> | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM irq | ||
9 | |||
10 | #define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq } | ||
11 | #define show_softirq_name(val) \ | ||
12 | __print_symbolic(val, \ | ||
13 | softirq_name(HI), \ | ||
14 | softirq_name(TIMER), \ | ||
15 | softirq_name(NET_TX), \ | ||
16 | softirq_name(NET_RX), \ | ||
17 | softirq_name(BLOCK), \ | ||
18 | softirq_name(TASKLET), \ | ||
19 | softirq_name(SCHED), \ | ||
20 | softirq_name(HRTIMER), \ | ||
21 | softirq_name(RCU)) | ||
22 | |||
23 | /** | ||
24 | * irq_handler_entry - called immediately before the irq action handler | ||
25 | * @irq: irq number | ||
26 | * @action: pointer to struct irqaction | ||
27 | * | ||
28 | * The struct irqaction pointed to by @action contains various | ||
29 | * information about the handler, including the device name, | ||
30 | * @action->name, and the device id, @action->dev_id. When used in | ||
31 | * conjunction with the irq_handler_exit tracepoint, we can figure | ||
32 | * out irq handler latencies. | ||
33 | */ | ||
34 | TRACE_EVENT(irq_handler_entry, | ||
35 | |||
36 | TP_PROTO(int irq, struct irqaction *action), | ||
37 | |||
38 | TP_ARGS(irq, action), | ||
39 | |||
40 | TP_STRUCT__entry( | ||
41 | __field( int, irq ) | ||
42 | __string( name, action->name ) | ||
43 | ), | ||
44 | |||
45 | TP_fast_assign( | ||
46 | __entry->irq = irq; | ||
47 | __assign_str(name, action->name); | ||
48 | ), | ||
49 | |||
50 | TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name)) | ||
51 | ); | ||
52 | |||
53 | /** | ||
54 | * irq_handler_exit - called immediately after the irq action handler returns | ||
55 | * @irq: irq number | ||
56 | * @action: pointer to struct irqaction | ||
57 | * @ret: return value | ||
58 | * | ||
59 | * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding | ||
60 | * @action->handler scuccessully handled this irq. Otherwise, the irq might be | ||
61 | * a shared irq line, or the irq was not handled successfully. Can be used in | ||
62 | * conjunction with the irq_handler_entry to understand irq handler latencies. | ||
63 | */ | ||
64 | TRACE_EVENT(irq_handler_exit, | ||
65 | |||
66 | TP_PROTO(int irq, struct irqaction *action, int ret), | ||
67 | |||
68 | TP_ARGS(irq, action, ret), | ||
69 | |||
70 | TP_STRUCT__entry( | ||
71 | __field( int, irq ) | ||
72 | __field( int, ret ) | ||
73 | ), | ||
74 | |||
75 | TP_fast_assign( | ||
76 | __entry->irq = irq; | ||
77 | __entry->ret = ret; | ||
78 | ), | ||
79 | |||
80 | TP_printk("irq=%d return=%s", | ||
81 | __entry->irq, __entry->ret ? "handled" : "unhandled") | ||
82 | ); | ||
83 | |||
84 | /** | ||
85 | * softirq_entry - called immediately before the softirq handler | ||
86 | * @h: pointer to struct softirq_action | ||
87 | * @vec: pointer to first struct softirq_action in softirq_vec array | ||
88 | * | ||
89 | * The @h parameter, contains a pointer to the struct softirq_action | ||
90 | * which has a pointer to the action handler that is called. By subtracting | ||
91 | * the @vec pointer from the @h pointer, we can determine the softirq | ||
92 | * number. Also, when used in combination with the softirq_exit tracepoint | ||
93 | * we can determine the softirq latency. | ||
94 | */ | ||
95 | TRACE_EVENT(softirq_entry, | ||
96 | |||
97 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | ||
98 | |||
99 | TP_ARGS(h, vec), | ||
100 | |||
101 | TP_STRUCT__entry( | ||
102 | __field( int, vec ) | ||
103 | ), | ||
104 | |||
105 | TP_fast_assign( | ||
106 | __entry->vec = (int)(h - vec); | ||
107 | ), | ||
108 | |||
109 | TP_printk("softirq=%d action=%s", __entry->vec, | ||
110 | show_softirq_name(__entry->vec)) | ||
111 | ); | ||
112 | |||
113 | /** | ||
114 | * softirq_exit - called immediately after the softirq handler returns | ||
115 | * @h: pointer to struct softirq_action | ||
116 | * @vec: pointer to first struct softirq_action in softirq_vec array | ||
117 | * | ||
118 | * The @h parameter contains a pointer to the struct softirq_action | ||
119 | * that has handled the softirq. By subtracting the @vec pointer from | ||
120 | * the @h pointer, we can determine the softirq number. Also, when used in | ||
121 | * combination with the softirq_entry tracepoint we can determine the softirq | ||
122 | * latency. | ||
123 | */ | ||
124 | TRACE_EVENT(softirq_exit, | ||
125 | |||
126 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | ||
127 | |||
128 | TP_ARGS(h, vec), | ||
129 | |||
130 | TP_STRUCT__entry( | ||
131 | __field( int, vec ) | ||
132 | ), | ||
133 | |||
134 | TP_fast_assign( | ||
135 | __entry->vec = (int)(h - vec); | ||
136 | ), | ||
137 | |||
138 | TP_printk("softirq=%d action=%s", __entry->vec, | ||
139 | show_softirq_name(__entry->vec)) | ||
140 | ); | ||
141 | |||
142 | #endif /* _TRACE_IRQ_H */ | ||
143 | |||
144 | /* This part must be outside protection */ | ||
145 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h new file mode 100644 index 000000000000..9baba50d6512 --- /dev/null +++ b/include/trace/events/kmem.h | |||
@@ -0,0 +1,231 @@ | |||
1 | #if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_KMEM_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM kmem | ||
9 | |||
10 | /* | ||
11 | * The order of these masks is important. Matching masks will be seen | ||
12 | * first and the left over flags will end up showing by themselves. | ||
13 | * | ||
14 | * For example, if we have GFP_KERNEL before GFP_USER we wil get: | ||
15 | * | ||
16 | * GFP_KERNEL|GFP_HARDWALL | ||
17 | * | ||
18 | * Thus most bits set go first. | ||
19 | */ | ||
20 | #define show_gfp_flags(flags) \ | ||
21 | (flags) ? __print_flags(flags, "|", \ | ||
22 | {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \ | ||
23 | {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \ | ||
24 | {(unsigned long)GFP_USER, "GFP_USER"}, \ | ||
25 | {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \ | ||
26 | {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \ | ||
27 | {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \ | ||
28 | {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \ | ||
29 | {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \ | ||
30 | {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \ | ||
31 | {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \ | ||
32 | {(unsigned long)__GFP_IO, "GFP_IO"}, \ | ||
33 | {(unsigned long)__GFP_COLD, "GFP_COLD"}, \ | ||
34 | {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \ | ||
35 | {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \ | ||
36 | {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \ | ||
37 | {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \ | ||
38 | {(unsigned long)__GFP_COMP, "GFP_COMP"}, \ | ||
39 | {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \ | ||
40 | {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \ | ||
41 | {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \ | ||
42 | {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \ | ||
43 | {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \ | ||
44 | {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \ | ||
45 | ) : "GFP_NOWAIT" | ||
46 | |||
47 | TRACE_EVENT(kmalloc, | ||
48 | |||
49 | TP_PROTO(unsigned long call_site, | ||
50 | const void *ptr, | ||
51 | size_t bytes_req, | ||
52 | size_t bytes_alloc, | ||
53 | gfp_t gfp_flags), | ||
54 | |||
55 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), | ||
56 | |||
57 | TP_STRUCT__entry( | ||
58 | __field( unsigned long, call_site ) | ||
59 | __field( const void *, ptr ) | ||
60 | __field( size_t, bytes_req ) | ||
61 | __field( size_t, bytes_alloc ) | ||
62 | __field( gfp_t, gfp_flags ) | ||
63 | ), | ||
64 | |||
65 | TP_fast_assign( | ||
66 | __entry->call_site = call_site; | ||
67 | __entry->ptr = ptr; | ||
68 | __entry->bytes_req = bytes_req; | ||
69 | __entry->bytes_alloc = bytes_alloc; | ||
70 | __entry->gfp_flags = gfp_flags; | ||
71 | ), | ||
72 | |||
73 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", | ||
74 | __entry->call_site, | ||
75 | __entry->ptr, | ||
76 | __entry->bytes_req, | ||
77 | __entry->bytes_alloc, | ||
78 | show_gfp_flags(__entry->gfp_flags)) | ||
79 | ); | ||
80 | |||
81 | TRACE_EVENT(kmem_cache_alloc, | ||
82 | |||
83 | TP_PROTO(unsigned long call_site, | ||
84 | const void *ptr, | ||
85 | size_t bytes_req, | ||
86 | size_t bytes_alloc, | ||
87 | gfp_t gfp_flags), | ||
88 | |||
89 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags), | ||
90 | |||
91 | TP_STRUCT__entry( | ||
92 | __field( unsigned long, call_site ) | ||
93 | __field( const void *, ptr ) | ||
94 | __field( size_t, bytes_req ) | ||
95 | __field( size_t, bytes_alloc ) | ||
96 | __field( gfp_t, gfp_flags ) | ||
97 | ), | ||
98 | |||
99 | TP_fast_assign( | ||
100 | __entry->call_site = call_site; | ||
101 | __entry->ptr = ptr; | ||
102 | __entry->bytes_req = bytes_req; | ||
103 | __entry->bytes_alloc = bytes_alloc; | ||
104 | __entry->gfp_flags = gfp_flags; | ||
105 | ), | ||
106 | |||
107 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s", | ||
108 | __entry->call_site, | ||
109 | __entry->ptr, | ||
110 | __entry->bytes_req, | ||
111 | __entry->bytes_alloc, | ||
112 | show_gfp_flags(__entry->gfp_flags)) | ||
113 | ); | ||
114 | |||
115 | TRACE_EVENT(kmalloc_node, | ||
116 | |||
117 | TP_PROTO(unsigned long call_site, | ||
118 | const void *ptr, | ||
119 | size_t bytes_req, | ||
120 | size_t bytes_alloc, | ||
121 | gfp_t gfp_flags, | ||
122 | int node), | ||
123 | |||
124 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), | ||
125 | |||
126 | TP_STRUCT__entry( | ||
127 | __field( unsigned long, call_site ) | ||
128 | __field( const void *, ptr ) | ||
129 | __field( size_t, bytes_req ) | ||
130 | __field( size_t, bytes_alloc ) | ||
131 | __field( gfp_t, gfp_flags ) | ||
132 | __field( int, node ) | ||
133 | ), | ||
134 | |||
135 | TP_fast_assign( | ||
136 | __entry->call_site = call_site; | ||
137 | __entry->ptr = ptr; | ||
138 | __entry->bytes_req = bytes_req; | ||
139 | __entry->bytes_alloc = bytes_alloc; | ||
140 | __entry->gfp_flags = gfp_flags; | ||
141 | __entry->node = node; | ||
142 | ), | ||
143 | |||
144 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", | ||
145 | __entry->call_site, | ||
146 | __entry->ptr, | ||
147 | __entry->bytes_req, | ||
148 | __entry->bytes_alloc, | ||
149 | show_gfp_flags(__entry->gfp_flags), | ||
150 | __entry->node) | ||
151 | ); | ||
152 | |||
153 | TRACE_EVENT(kmem_cache_alloc_node, | ||
154 | |||
155 | TP_PROTO(unsigned long call_site, | ||
156 | const void *ptr, | ||
157 | size_t bytes_req, | ||
158 | size_t bytes_alloc, | ||
159 | gfp_t gfp_flags, | ||
160 | int node), | ||
161 | |||
162 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node), | ||
163 | |||
164 | TP_STRUCT__entry( | ||
165 | __field( unsigned long, call_site ) | ||
166 | __field( const void *, ptr ) | ||
167 | __field( size_t, bytes_req ) | ||
168 | __field( size_t, bytes_alloc ) | ||
169 | __field( gfp_t, gfp_flags ) | ||
170 | __field( int, node ) | ||
171 | ), | ||
172 | |||
173 | TP_fast_assign( | ||
174 | __entry->call_site = call_site; | ||
175 | __entry->ptr = ptr; | ||
176 | __entry->bytes_req = bytes_req; | ||
177 | __entry->bytes_alloc = bytes_alloc; | ||
178 | __entry->gfp_flags = gfp_flags; | ||
179 | __entry->node = node; | ||
180 | ), | ||
181 | |||
182 | TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d", | ||
183 | __entry->call_site, | ||
184 | __entry->ptr, | ||
185 | __entry->bytes_req, | ||
186 | __entry->bytes_alloc, | ||
187 | show_gfp_flags(__entry->gfp_flags), | ||
188 | __entry->node) | ||
189 | ); | ||
190 | |||
191 | TRACE_EVENT(kfree, | ||
192 | |||
193 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
194 | |||
195 | TP_ARGS(call_site, ptr), | ||
196 | |||
197 | TP_STRUCT__entry( | ||
198 | __field( unsigned long, call_site ) | ||
199 | __field( const void *, ptr ) | ||
200 | ), | ||
201 | |||
202 | TP_fast_assign( | ||
203 | __entry->call_site = call_site; | ||
204 | __entry->ptr = ptr; | ||
205 | ), | ||
206 | |||
207 | TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) | ||
208 | ); | ||
209 | |||
210 | TRACE_EVENT(kmem_cache_free, | ||
211 | |||
212 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
213 | |||
214 | TP_ARGS(call_site, ptr), | ||
215 | |||
216 | TP_STRUCT__entry( | ||
217 | __field( unsigned long, call_site ) | ||
218 | __field( const void *, ptr ) | ||
219 | ), | ||
220 | |||
221 | TP_fast_assign( | ||
222 | __entry->call_site = call_site; | ||
223 | __entry->ptr = ptr; | ||
224 | ), | ||
225 | |||
226 | TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr) | ||
227 | ); | ||
228 | #endif /* _TRACE_KMEM_H */ | ||
229 | |||
230 | /* This part must be outside protection */ | ||
231 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/lockdep.h b/include/trace/events/lockdep.h new file mode 100644 index 000000000000..0e956c9dfd7e --- /dev/null +++ b/include/trace/events/lockdep.h | |||
@@ -0,0 +1,96 @@ | |||
1 | #if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_LOCKDEP_H | ||
3 | |||
4 | #include <linux/lockdep.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM lockdep | ||
9 | |||
10 | #ifdef CONFIG_LOCKDEP | ||
11 | |||
12 | TRACE_EVENT(lock_acquire, | ||
13 | |||
14 | TP_PROTO(struct lockdep_map *lock, unsigned int subclass, | ||
15 | int trylock, int read, int check, | ||
16 | struct lockdep_map *next_lock, unsigned long ip), | ||
17 | |||
18 | TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), | ||
19 | |||
20 | TP_STRUCT__entry( | ||
21 | __field(unsigned int, flags) | ||
22 | __string(name, lock->name) | ||
23 | ), | ||
24 | |||
25 | TP_fast_assign( | ||
26 | __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0); | ||
27 | __assign_str(name, lock->name); | ||
28 | ), | ||
29 | |||
30 | TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "", | ||
31 | (__entry->flags & 2) ? "read " : "", | ||
32 | __get_str(name)) | ||
33 | ); | ||
34 | |||
35 | TRACE_EVENT(lock_release, | ||
36 | |||
37 | TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip), | ||
38 | |||
39 | TP_ARGS(lock, nested, ip), | ||
40 | |||
41 | TP_STRUCT__entry( | ||
42 | __string(name, lock->name) | ||
43 | ), | ||
44 | |||
45 | TP_fast_assign( | ||
46 | __assign_str(name, lock->name); | ||
47 | ), | ||
48 | |||
49 | TP_printk("%s", __get_str(name)) | ||
50 | ); | ||
51 | |||
52 | #ifdef CONFIG_LOCK_STAT | ||
53 | |||
54 | TRACE_EVENT(lock_contended, | ||
55 | |||
56 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | ||
57 | |||
58 | TP_ARGS(lock, ip), | ||
59 | |||
60 | TP_STRUCT__entry( | ||
61 | __string(name, lock->name) | ||
62 | ), | ||
63 | |||
64 | TP_fast_assign( | ||
65 | __assign_str(name, lock->name); | ||
66 | ), | ||
67 | |||
68 | TP_printk("%s", __get_str(name)) | ||
69 | ); | ||
70 | |||
71 | TRACE_EVENT(lock_acquired, | ||
72 | TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime), | ||
73 | |||
74 | TP_ARGS(lock, ip, waittime), | ||
75 | |||
76 | TP_STRUCT__entry( | ||
77 | __string(name, lock->name) | ||
78 | __field(unsigned long, wait_usec) | ||
79 | __field(unsigned long, wait_nsec_rem) | ||
80 | ), | ||
81 | TP_fast_assign( | ||
82 | __assign_str(name, lock->name); | ||
83 | __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC); | ||
84 | __entry->wait_usec = (unsigned long) waittime; | ||
85 | ), | ||
86 | TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec, | ||
87 | __entry->wait_nsec_rem) | ||
88 | ); | ||
89 | |||
90 | #endif | ||
91 | #endif | ||
92 | |||
93 | #endif /* _TRACE_LOCKDEP_H */ | ||
94 | |||
95 | /* This part must be outside protection */ | ||
96 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/sched_event_types.h b/include/trace/events/sched.h index 63547dc1125f..24ab5bcff7b2 100644 --- a/include/trace/sched_event_types.h +++ b/include/trace/events/sched.h | |||
@@ -1,9 +1,8 @@ | |||
1 | #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_SCHED_H | ||
1 | 3 | ||
2 | /* use <trace/sched.h> instead */ | 4 | #include <linux/sched.h> |
3 | #ifndef TRACE_EVENT | 5 | #include <linux/tracepoint.h> |
4 | # error Do not include this file directly. | ||
5 | # error Unless you know what you are doing. | ||
6 | #endif | ||
7 | 6 | ||
8 | #undef TRACE_SYSTEM | 7 | #undef TRACE_SYSTEM |
9 | #define TRACE_SYSTEM sched | 8 | #define TRACE_SYSTEM sched |
@@ -157,6 +156,7 @@ TRACE_EVENT(sched_switch, | |||
157 | __array( char, prev_comm, TASK_COMM_LEN ) | 156 | __array( char, prev_comm, TASK_COMM_LEN ) |
158 | __field( pid_t, prev_pid ) | 157 | __field( pid_t, prev_pid ) |
159 | __field( int, prev_prio ) | 158 | __field( int, prev_prio ) |
159 | __field( long, prev_state ) | ||
160 | __array( char, next_comm, TASK_COMM_LEN ) | 160 | __array( char, next_comm, TASK_COMM_LEN ) |
161 | __field( pid_t, next_pid ) | 161 | __field( pid_t, next_pid ) |
162 | __field( int, next_prio ) | 162 | __field( int, next_prio ) |
@@ -166,13 +166,19 @@ TRACE_EVENT(sched_switch, | |||
166 | memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); | 166 | memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN); |
167 | __entry->prev_pid = prev->pid; | 167 | __entry->prev_pid = prev->pid; |
168 | __entry->prev_prio = prev->prio; | 168 | __entry->prev_prio = prev->prio; |
169 | __entry->prev_state = prev->state; | ||
169 | memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); | 170 | memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); |
170 | __entry->next_pid = next->pid; | 171 | __entry->next_pid = next->pid; |
171 | __entry->next_prio = next->prio; | 172 | __entry->next_prio = next->prio; |
172 | ), | 173 | ), |
173 | 174 | ||
174 | TP_printk("task %s:%d [%d] ==> %s:%d [%d]", | 175 | TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]", |
175 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, | 176 | __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, |
177 | __entry->prev_state ? | ||
178 | __print_flags(__entry->prev_state, "|", | ||
179 | { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, | ||
180 | { 16, "Z" }, { 32, "X" }, { 64, "x" }, | ||
181 | { 128, "W" }) : "R", | ||
176 | __entry->next_comm, __entry->next_pid, __entry->next_prio) | 182 | __entry->next_comm, __entry->next_pid, __entry->next_prio) |
177 | ); | 183 | ); |
178 | 184 | ||
@@ -181,9 +187,9 @@ TRACE_EVENT(sched_switch, | |||
181 | */ | 187 | */ |
182 | TRACE_EVENT(sched_migrate_task, | 188 | TRACE_EVENT(sched_migrate_task, |
183 | 189 | ||
184 | TP_PROTO(struct task_struct *p, int orig_cpu, int dest_cpu), | 190 | TP_PROTO(struct task_struct *p, int dest_cpu), |
185 | 191 | ||
186 | TP_ARGS(p, orig_cpu, dest_cpu), | 192 | TP_ARGS(p, dest_cpu), |
187 | 193 | ||
188 | TP_STRUCT__entry( | 194 | TP_STRUCT__entry( |
189 | __array( char, comm, TASK_COMM_LEN ) | 195 | __array( char, comm, TASK_COMM_LEN ) |
@@ -197,7 +203,7 @@ TRACE_EVENT(sched_migrate_task, | |||
197 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); | 203 | memcpy(__entry->comm, p->comm, TASK_COMM_LEN); |
198 | __entry->pid = p->pid; | 204 | __entry->pid = p->pid; |
199 | __entry->prio = p->prio; | 205 | __entry->prio = p->prio; |
200 | __entry->orig_cpu = orig_cpu; | 206 | __entry->orig_cpu = task_cpu(p); |
201 | __entry->dest_cpu = dest_cpu; | 207 | __entry->dest_cpu = dest_cpu; |
202 | ), | 208 | ), |
203 | 209 | ||
@@ -334,4 +340,7 @@ TRACE_EVENT(sched_signal_send, | |||
334 | __entry->sig, __entry->comm, __entry->pid) | 340 | __entry->sig, __entry->comm, __entry->pid) |
335 | ); | 341 | ); |
336 | 342 | ||
337 | #undef TRACE_SYSTEM | 343 | #endif /* _TRACE_SCHED_H */ |
344 | |||
345 | /* This part must be outside protection */ | ||
346 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h new file mode 100644 index 000000000000..1e8fabb57c06 --- /dev/null +++ b/include/trace/events/skb.h | |||
@@ -0,0 +1,40 @@ | |||
1 | #if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_SKB_H | ||
3 | |||
4 | #include <linux/skbuff.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM skb | ||
9 | |||
10 | /* | ||
11 | * Tracepoint for free an sk_buff: | ||
12 | */ | ||
13 | TRACE_EVENT(kfree_skb, | ||
14 | |||
15 | TP_PROTO(struct sk_buff *skb, void *location), | ||
16 | |||
17 | TP_ARGS(skb, location), | ||
18 | |||
19 | TP_STRUCT__entry( | ||
20 | __field( void *, skbaddr ) | ||
21 | __field( unsigned short, protocol ) | ||
22 | __field( void *, location ) | ||
23 | ), | ||
24 | |||
25 | TP_fast_assign( | ||
26 | __entry->skbaddr = skb; | ||
27 | if (skb) { | ||
28 | __entry->protocol = ntohs(skb->protocol); | ||
29 | } | ||
30 | __entry->location = location; | ||
31 | ), | ||
32 | |||
33 | TP_printk("skbaddr=%p protocol=%u location=%p", | ||
34 | __entry->skbaddr, __entry->protocol, __entry->location) | ||
35 | ); | ||
36 | |||
37 | #endif /* _TRACE_SKB_H */ | ||
38 | |||
39 | /* This part must be outside protection */ | ||
40 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h new file mode 100644 index 000000000000..035f1bff288e --- /dev/null +++ b/include/trace/events/workqueue.h | |||
@@ -0,0 +1,100 @@ | |||
1 | #if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_WORKQUEUE_H | ||
3 | |||
4 | #include <linux/workqueue.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <linux/tracepoint.h> | ||
7 | |||
8 | #undef TRACE_SYSTEM | ||
9 | #define TRACE_SYSTEM workqueue | ||
10 | |||
11 | TRACE_EVENT(workqueue_insertion, | ||
12 | |||
13 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
14 | |||
15 | TP_ARGS(wq_thread, work), | ||
16 | |||
17 | TP_STRUCT__entry( | ||
18 | __array(char, thread_comm, TASK_COMM_LEN) | ||
19 | __field(pid_t, thread_pid) | ||
20 | __field(work_func_t, func) | ||
21 | ), | ||
22 | |||
23 | TP_fast_assign( | ||
24 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
25 | __entry->thread_pid = wq_thread->pid; | ||
26 | __entry->func = work->func; | ||
27 | ), | ||
28 | |||
29 | TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, | ||
30 | __entry->thread_pid, __entry->func) | ||
31 | ); | ||
32 | |||
33 | TRACE_EVENT(workqueue_execution, | ||
34 | |||
35 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
36 | |||
37 | TP_ARGS(wq_thread, work), | ||
38 | |||
39 | TP_STRUCT__entry( | ||
40 | __array(char, thread_comm, TASK_COMM_LEN) | ||
41 | __field(pid_t, thread_pid) | ||
42 | __field(work_func_t, func) | ||
43 | ), | ||
44 | |||
45 | TP_fast_assign( | ||
46 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
47 | __entry->thread_pid = wq_thread->pid; | ||
48 | __entry->func = work->func; | ||
49 | ), | ||
50 | |||
51 | TP_printk("thread=%s:%d func=%pF", __entry->thread_comm, | ||
52 | __entry->thread_pid, __entry->func) | ||
53 | ); | ||
54 | |||
55 | /* Trace the creation of one workqueue thread on a cpu */ | ||
56 | TRACE_EVENT(workqueue_creation, | ||
57 | |||
58 | TP_PROTO(struct task_struct *wq_thread, int cpu), | ||
59 | |||
60 | TP_ARGS(wq_thread, cpu), | ||
61 | |||
62 | TP_STRUCT__entry( | ||
63 | __array(char, thread_comm, TASK_COMM_LEN) | ||
64 | __field(pid_t, thread_pid) | ||
65 | __field(int, cpu) | ||
66 | ), | ||
67 | |||
68 | TP_fast_assign( | ||
69 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
70 | __entry->thread_pid = wq_thread->pid; | ||
71 | __entry->cpu = cpu; | ||
72 | ), | ||
73 | |||
74 | TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm, | ||
75 | __entry->thread_pid, __entry->cpu) | ||
76 | ); | ||
77 | |||
78 | TRACE_EVENT(workqueue_destruction, | ||
79 | |||
80 | TP_PROTO(struct task_struct *wq_thread), | ||
81 | |||
82 | TP_ARGS(wq_thread), | ||
83 | |||
84 | TP_STRUCT__entry( | ||
85 | __array(char, thread_comm, TASK_COMM_LEN) | ||
86 | __field(pid_t, thread_pid) | ||
87 | ), | ||
88 | |||
89 | TP_fast_assign( | ||
90 | memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN); | ||
91 | __entry->thread_pid = wq_thread->pid; | ||
92 | ), | ||
93 | |||
94 | TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid) | ||
95 | ); | ||
96 | |||
97 | #endif /* _TRACE_WORKQUEUE_H */ | ||
98 | |||
99 | /* This part must be outside protection */ | ||
100 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h new file mode 100644 index 000000000000..1867553c61e5 --- /dev/null +++ b/include/trace/ftrace.h | |||
@@ -0,0 +1,591 @@ | |||
1 | /* | ||
2 | * Stage 1 of the trace events. | ||
3 | * | ||
4 | * Override the macros in <trace/trace_events.h> to include the following: | ||
5 | * | ||
6 | * struct ftrace_raw_<call> { | ||
7 | * struct trace_entry ent; | ||
8 | * <type> <item>; | ||
9 | * <type2> <item2>[<len>]; | ||
10 | * [...] | ||
11 | * }; | ||
12 | * | ||
13 | * The <type> <item> is created by the __field(type, item) macro or | ||
14 | * the __array(type2, item2, len) macro. | ||
15 | * We simply do "type item;", and that will create the fields | ||
16 | * in the structure. | ||
17 | */ | ||
18 | |||
19 | #include <linux/ftrace_event.h> | ||
20 | |||
21 | #undef __field | ||
22 | #define __field(type, item) type item; | ||
23 | |||
24 | #undef __array | ||
25 | #define __array(type, item, len) type item[len]; | ||
26 | |||
27 | #undef __dynamic_array | ||
28 | #define __dynamic_array(type, item, len) unsigned short __data_loc_##item; | ||
29 | |||
30 | #undef __string | ||
31 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
32 | |||
33 | #undef TP_STRUCT__entry | ||
34 | #define TP_STRUCT__entry(args...) args | ||
35 | |||
36 | #undef TRACE_EVENT | ||
37 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | ||
38 | struct ftrace_raw_##name { \ | ||
39 | struct trace_entry ent; \ | ||
40 | tstruct \ | ||
41 | char __data[0]; \ | ||
42 | }; \ | ||
43 | static struct ftrace_event_call event_##name | ||
44 | |||
45 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
46 | |||
47 | |||
48 | /* | ||
49 | * Stage 2 of the trace events. | ||
50 | * | ||
51 | * Include the following: | ||
52 | * | ||
53 | * struct ftrace_data_offsets_<call> { | ||
54 | * int <item1>; | ||
55 | * int <item2>; | ||
56 | * [...] | ||
57 | * }; | ||
58 | * | ||
59 | * The __dynamic_array() macro will create each int <item>, this is | ||
60 | * to keep the offset of each array from the beginning of the event. | ||
61 | */ | ||
62 | |||
63 | #undef __field | ||
64 | #define __field(type, item); | ||
65 | |||
66 | #undef __array | ||
67 | #define __array(type, item, len) | ||
68 | |||
69 | #undef __dynamic_array | ||
70 | #define __dynamic_array(type, item, len) int item; | ||
71 | |||
72 | #undef __string | ||
73 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
74 | |||
75 | #undef TRACE_EVENT | ||
76 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
77 | struct ftrace_data_offsets_##call { \ | ||
78 | tstruct; \ | ||
79 | }; | ||
80 | |||
81 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
82 | |||
83 | /* | ||
84 | * Setup the showing format of trace point. | ||
85 | * | ||
86 | * int | ||
87 | * ftrace_format_##call(struct trace_seq *s) | ||
88 | * { | ||
89 | * struct ftrace_raw_##call field; | ||
90 | * int ret; | ||
91 | * | ||
92 | * ret = trace_seq_printf(s, #type " " #item ";" | ||
93 | * " offset:%u; size:%u;\n", | ||
94 | * offsetof(struct ftrace_raw_##call, item), | ||
95 | * sizeof(field.type)); | ||
96 | * | ||
97 | * } | ||
98 | */ | ||
99 | |||
100 | #undef TP_STRUCT__entry | ||
101 | #define TP_STRUCT__entry(args...) args | ||
102 | |||
103 | #undef __field | ||
104 | #define __field(type, item) \ | ||
105 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | ||
106 | "offset:%u;\tsize:%u;\n", \ | ||
107 | (unsigned int)offsetof(typeof(field), item), \ | ||
108 | (unsigned int)sizeof(field.item)); \ | ||
109 | if (!ret) \ | ||
110 | return 0; | ||
111 | |||
112 | #undef __array | ||
113 | #define __array(type, item, len) \ | ||
114 | ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \ | ||
115 | "offset:%u;\tsize:%u;\n", \ | ||
116 | (unsigned int)offsetof(typeof(field), item), \ | ||
117 | (unsigned int)sizeof(field.item)); \ | ||
118 | if (!ret) \ | ||
119 | return 0; | ||
120 | |||
121 | #undef __dynamic_array | ||
122 | #define __dynamic_array(type, item, len) \ | ||
123 | ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \ | ||
124 | "offset:%u;\tsize:%u;\n", \ | ||
125 | (unsigned int)offsetof(typeof(field), \ | ||
126 | __data_loc_##item), \ | ||
127 | (unsigned int)sizeof(field.__data_loc_##item)); \ | ||
128 | if (!ret) \ | ||
129 | return 0; | ||
130 | |||
131 | #undef __string | ||
132 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
133 | |||
134 | #undef __entry | ||
135 | #define __entry REC | ||
136 | |||
137 | #undef __print_symbolic | ||
138 | #undef __get_dynamic_array | ||
139 | #undef __get_str | ||
140 | |||
141 | #undef TP_printk | ||
142 | #define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args) | ||
143 | |||
144 | #undef TP_fast_assign | ||
145 | #define TP_fast_assign(args...) args | ||
146 | |||
147 | #undef TRACE_EVENT | ||
148 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | ||
149 | static int \ | ||
150 | ftrace_format_##call(struct trace_seq *s) \ | ||
151 | { \ | ||
152 | struct ftrace_raw_##call field __attribute__((unused)); \ | ||
153 | int ret = 0; \ | ||
154 | \ | ||
155 | tstruct; \ | ||
156 | \ | ||
157 | trace_seq_printf(s, "\nprint fmt: " print); \ | ||
158 | \ | ||
159 | return ret; \ | ||
160 | } | ||
161 | |||
162 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
163 | |||
164 | /* | ||
165 | * Stage 3 of the trace events. | ||
166 | * | ||
167 | * Override the macros in <trace/trace_events.h> to include the following: | ||
168 | * | ||
169 | * enum print_line_t | ||
170 | * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) | ||
171 | * { | ||
172 | * struct trace_seq *s = &iter->seq; | ||
173 | * struct ftrace_raw_<call> *field; <-- defined in stage 1 | ||
174 | * struct trace_entry *entry; | ||
175 | * struct trace_seq *p; | ||
176 | * int ret; | ||
177 | * | ||
178 | * entry = iter->ent; | ||
179 | * | ||
180 | * if (entry->type != event_<call>.id) { | ||
181 | * WARN_ON_ONCE(1); | ||
182 | * return TRACE_TYPE_UNHANDLED; | ||
183 | * } | ||
184 | * | ||
185 | * field = (typeof(field))entry; | ||
186 | * | ||
187 | * p = get_cpu_var(ftrace_event_seq); | ||
188 | * trace_seq_init(p); | ||
189 | * ret = trace_seq_printf(s, <TP_printk> "\n"); | ||
190 | * put_cpu(); | ||
191 | * if (!ret) | ||
192 | * return TRACE_TYPE_PARTIAL_LINE; | ||
193 | * | ||
194 | * return TRACE_TYPE_HANDLED; | ||
195 | * } | ||
196 | * | ||
197 | * This is the method used to print the raw event to the trace | ||
198 | * output format. Note, this is not needed if the data is read | ||
199 | * in binary. | ||
200 | */ | ||
201 | |||
202 | #undef __entry | ||
203 | #define __entry field | ||
204 | |||
205 | #undef TP_printk | ||
206 | #define TP_printk(fmt, args...) fmt "\n", args | ||
207 | |||
208 | #undef __get_dynamic_array | ||
209 | #define __get_dynamic_array(field) \ | ||
210 | ((void *)__entry + __entry->__data_loc_##field) | ||
211 | |||
212 | #undef __get_str | ||
213 | #define __get_str(field) (char *)__get_dynamic_array(field) | ||
214 | |||
215 | #undef __print_flags | ||
216 | #define __print_flags(flag, delim, flag_array...) \ | ||
217 | ({ \ | ||
218 | static const struct trace_print_flags flags[] = \ | ||
219 | { flag_array, { -1, NULL }}; \ | ||
220 | ftrace_print_flags_seq(p, delim, flag, flags); \ | ||
221 | }) | ||
222 | |||
223 | #undef __print_symbolic | ||
224 | #define __print_symbolic(value, symbol_array...) \ | ||
225 | ({ \ | ||
226 | static const struct trace_print_flags symbols[] = \ | ||
227 | { symbol_array, { -1, NULL }}; \ | ||
228 | ftrace_print_symbols_seq(p, value, symbols); \ | ||
229 | }) | ||
230 | |||
231 | #undef TRACE_EVENT | ||
232 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
233 | enum print_line_t \ | ||
234 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | ||
235 | { \ | ||
236 | struct trace_seq *s = &iter->seq; \ | ||
237 | struct ftrace_raw_##call *field; \ | ||
238 | struct trace_entry *entry; \ | ||
239 | struct trace_seq *p; \ | ||
240 | int ret; \ | ||
241 | \ | ||
242 | entry = iter->ent; \ | ||
243 | \ | ||
244 | if (entry->type != event_##call.id) { \ | ||
245 | WARN_ON_ONCE(1); \ | ||
246 | return TRACE_TYPE_UNHANDLED; \ | ||
247 | } \ | ||
248 | \ | ||
249 | field = (typeof(field))entry; \ | ||
250 | \ | ||
251 | p = &get_cpu_var(ftrace_event_seq); \ | ||
252 | trace_seq_init(p); \ | ||
253 | ret = trace_seq_printf(s, #call ": " print); \ | ||
254 | put_cpu(); \ | ||
255 | if (!ret) \ | ||
256 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
257 | \ | ||
258 | return TRACE_TYPE_HANDLED; \ | ||
259 | } | ||
260 | |||
261 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
262 | |||
263 | #undef __field | ||
264 | #define __field(type, item) \ | ||
265 | ret = trace_define_field(event_call, #type, #item, \ | ||
266 | offsetof(typeof(field), item), \ | ||
267 | sizeof(field.item), is_signed_type(type)); \ | ||
268 | if (ret) \ | ||
269 | return ret; | ||
270 | |||
271 | #undef __array | ||
272 | #define __array(type, item, len) \ | ||
273 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | ||
274 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | ||
275 | offsetof(typeof(field), item), \ | ||
276 | sizeof(field.item), 0); \ | ||
277 | if (ret) \ | ||
278 | return ret; | ||
279 | |||
280 | #undef __dynamic_array | ||
281 | #define __dynamic_array(type, item, len) \ | ||
282 | ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\ | ||
283 | offsetof(typeof(field), __data_loc_##item), \ | ||
284 | sizeof(field.__data_loc_##item), 0); | ||
285 | |||
286 | #undef __string | ||
287 | #define __string(item, src) __dynamic_array(char, item, -1) | ||
288 | |||
289 | #undef TRACE_EVENT | ||
290 | #define TRACE_EVENT(call, proto, args, tstruct, func, print) \ | ||
291 | int \ | ||
292 | ftrace_define_fields_##call(void) \ | ||
293 | { \ | ||
294 | struct ftrace_raw_##call field; \ | ||
295 | struct ftrace_event_call *event_call = &event_##call; \ | ||
296 | int ret; \ | ||
297 | \ | ||
298 | __common_field(int, type, 1); \ | ||
299 | __common_field(unsigned char, flags, 0); \ | ||
300 | __common_field(unsigned char, preempt_count, 0); \ | ||
301 | __common_field(int, pid, 1); \ | ||
302 | __common_field(int, tgid, 1); \ | ||
303 | \ | ||
304 | tstruct; \ | ||
305 | \ | ||
306 | return ret; \ | ||
307 | } | ||
308 | |||
309 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
310 | |||
311 | /* | ||
312 | * remember the offset of each array from the beginning of the event. | ||
313 | */ | ||
314 | |||
315 | #undef __entry | ||
316 | #define __entry entry | ||
317 | |||
318 | #undef __field | ||
319 | #define __field(type, item) | ||
320 | |||
321 | #undef __array | ||
322 | #define __array(type, item, len) | ||
323 | |||
324 | #undef __dynamic_array | ||
325 | #define __dynamic_array(type, item, len) \ | ||
326 | __data_offsets->item = __data_size + \ | ||
327 | offsetof(typeof(*entry), __data); \ | ||
328 | __data_size += (len) * sizeof(type); | ||
329 | |||
330 | #undef __string | ||
331 | #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \ | ||
332 | |||
333 | #undef TRACE_EVENT | ||
334 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
335 | static inline int ftrace_get_offsets_##call( \ | ||
336 | struct ftrace_data_offsets_##call *__data_offsets, proto) \ | ||
337 | { \ | ||
338 | int __data_size = 0; \ | ||
339 | struct ftrace_raw_##call __maybe_unused *entry; \ | ||
340 | \ | ||
341 | tstruct; \ | ||
342 | \ | ||
343 | return __data_size; \ | ||
344 | } | ||
345 | |||
346 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
347 | |||
348 | /* | ||
349 | * Stage 4 of the trace events. | ||
350 | * | ||
351 | * Override the macros in <trace/trace_events.h> to include the following: | ||
352 | * | ||
353 | * static void ftrace_event_<call>(proto) | ||
354 | * { | ||
355 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | ||
356 | * } | ||
357 | * | ||
358 | * static int ftrace_reg_event_<call>(void) | ||
359 | * { | ||
360 | * int ret; | ||
361 | * | ||
362 | * ret = register_trace_<call>(ftrace_event_<call>); | ||
363 | * if (!ret) | ||
364 | * pr_info("event trace: Could not activate trace point " | ||
365 | * "probe to <call>"); | ||
366 | * return ret; | ||
367 | * } | ||
368 | * | ||
369 | * static void ftrace_unreg_event_<call>(void) | ||
370 | * { | ||
371 | * unregister_trace_<call>(ftrace_event_<call>); | ||
372 | * } | ||
373 | * | ||
374 | * | ||
375 | * For those macros defined with TRACE_EVENT: | ||
376 | * | ||
377 | * static struct ftrace_event_call event_<call>; | ||
378 | * | ||
379 | * static void ftrace_raw_event_<call>(proto) | ||
380 | * { | ||
381 | * struct ring_buffer_event *event; | ||
382 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | ||
383 | * unsigned long irq_flags; | ||
384 | * int pc; | ||
385 | * | ||
386 | * local_save_flags(irq_flags); | ||
387 | * pc = preempt_count(); | ||
388 | * | ||
389 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | ||
390 | * sizeof(struct ftrace_raw_<call>), | ||
391 | * irq_flags, pc); | ||
392 | * if (!event) | ||
393 | * return; | ||
394 | * entry = ring_buffer_event_data(event); | ||
395 | * | ||
396 | * <assign>; <-- Here we assign the entries by the __field and | ||
397 | * __array macros. | ||
398 | * | ||
399 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | ||
400 | * } | ||
401 | * | ||
402 | * static int ftrace_raw_reg_event_<call>(void) | ||
403 | * { | ||
404 | * int ret; | ||
405 | * | ||
406 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | ||
407 | * if (!ret) | ||
408 | * pr_info("event trace: Could not activate trace point " | ||
409 | * "probe to <call>"); | ||
410 | * return ret; | ||
411 | * } | ||
412 | * | ||
413 | * static void ftrace_unreg_event_<call>(void) | ||
414 | * { | ||
415 | * unregister_trace_<call>(ftrace_raw_event_<call>); | ||
416 | * } | ||
417 | * | ||
418 | * static struct trace_event ftrace_event_type_<call> = { | ||
419 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | ||
420 | * }; | ||
421 | * | ||
422 | * static int ftrace_raw_init_event_<call>(void) | ||
423 | * { | ||
424 | * int id; | ||
425 | * | ||
426 | * id = register_ftrace_event(&ftrace_event_type_<call>); | ||
427 | * if (!id) | ||
428 | * return -ENODEV; | ||
429 | * event_<call>.id = id; | ||
430 | * return 0; | ||
431 | * } | ||
432 | * | ||
433 | * static struct ftrace_event_call __used | ||
434 | * __attribute__((__aligned__(4))) | ||
435 | * __attribute__((section("_ftrace_events"))) event_<call> = { | ||
436 | * .name = "<call>", | ||
437 | * .system = "<system>", | ||
438 | * .raw_init = ftrace_raw_init_event_<call>, | ||
439 | * .regfunc = ftrace_reg_event_<call>, | ||
440 | * .unregfunc = ftrace_unreg_event_<call>, | ||
441 | * .show_format = ftrace_format_<call>, | ||
442 | * } | ||
443 | * | ||
444 | */ | ||
445 | |||
446 | #undef TP_FMT | ||
447 | #define TP_FMT(fmt, args...) fmt "\n", ##args | ||
448 | |||
449 | #ifdef CONFIG_EVENT_PROFILE | ||
450 | #define _TRACE_PROFILE(call, proto, args) \ | ||
451 | static void ftrace_profile_##call(proto) \ | ||
452 | { \ | ||
453 | extern void perf_tpcounter_event(int); \ | ||
454 | perf_tpcounter_event(event_##call.id); \ | ||
455 | } \ | ||
456 | \ | ||
457 | static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \ | ||
458 | { \ | ||
459 | int ret = 0; \ | ||
460 | \ | ||
461 | if (!atomic_inc_return(&event_call->profile_count)) \ | ||
462 | ret = register_trace_##call(ftrace_profile_##call); \ | ||
463 | \ | ||
464 | return ret; \ | ||
465 | } \ | ||
466 | \ | ||
467 | static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\ | ||
468 | { \ | ||
469 | if (atomic_add_negative(-1, &event_call->profile_count)) \ | ||
470 | unregister_trace_##call(ftrace_profile_##call); \ | ||
471 | } | ||
472 | |||
473 | #define _TRACE_PROFILE_INIT(call) \ | ||
474 | .profile_count = ATOMIC_INIT(-1), \ | ||
475 | .profile_enable = ftrace_profile_enable_##call, \ | ||
476 | .profile_disable = ftrace_profile_disable_##call, | ||
477 | |||
478 | #else | ||
479 | #define _TRACE_PROFILE(call, proto, args) | ||
480 | #define _TRACE_PROFILE_INIT(call) | ||
481 | #endif | ||
482 | |||
483 | #undef __entry | ||
484 | #define __entry entry | ||
485 | |||
486 | #undef __field | ||
487 | #define __field(type, item) | ||
488 | |||
489 | #undef __array | ||
490 | #define __array(type, item, len) | ||
491 | |||
492 | #undef __dynamic_array | ||
493 | #define __dynamic_array(type, item, len) \ | ||
494 | __entry->__data_loc_##item = __data_offsets.item; | ||
495 | |||
496 | #undef __string | ||
497 | #define __string(item, src) __dynamic_array(char, item, -1) \ | ||
498 | |||
499 | #undef __assign_str | ||
500 | #define __assign_str(dst, src) \ | ||
501 | strcpy(__get_str(dst), src); | ||
502 | |||
503 | #undef TRACE_EVENT | ||
504 | #define TRACE_EVENT(call, proto, args, tstruct, assign, print) \ | ||
505 | _TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \ | ||
506 | \ | ||
507 | static struct ftrace_event_call event_##call; \ | ||
508 | \ | ||
509 | static void ftrace_raw_event_##call(proto) \ | ||
510 | { \ | ||
511 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | ||
512 | struct ftrace_event_call *event_call = &event_##call; \ | ||
513 | struct ring_buffer_event *event; \ | ||
514 | struct ftrace_raw_##call *entry; \ | ||
515 | unsigned long irq_flags; \ | ||
516 | int __data_size; \ | ||
517 | int pc; \ | ||
518 | \ | ||
519 | local_save_flags(irq_flags); \ | ||
520 | pc = preempt_count(); \ | ||
521 | \ | ||
522 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | ||
523 | \ | ||
524 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | ||
525 | sizeof(*entry) + __data_size, \ | ||
526 | irq_flags, pc); \ | ||
527 | if (!event) \ | ||
528 | return; \ | ||
529 | entry = ring_buffer_event_data(event); \ | ||
530 | \ | ||
531 | \ | ||
532 | tstruct \ | ||
533 | \ | ||
534 | { assign; } \ | ||
535 | \ | ||
536 | if (!filter_current_check_discard(event_call, entry, event)) \ | ||
537 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \ | ||
538 | } \ | ||
539 | \ | ||
540 | static int ftrace_raw_reg_event_##call(void) \ | ||
541 | { \ | ||
542 | int ret; \ | ||
543 | \ | ||
544 | ret = register_trace_##call(ftrace_raw_event_##call); \ | ||
545 | if (ret) \ | ||
546 | pr_info("event trace: Could not activate trace point " \ | ||
547 | "probe to " #call "\n"); \ | ||
548 | return ret; \ | ||
549 | } \ | ||
550 | \ | ||
551 | static void ftrace_raw_unreg_event_##call(void) \ | ||
552 | { \ | ||
553 | unregister_trace_##call(ftrace_raw_event_##call); \ | ||
554 | } \ | ||
555 | \ | ||
556 | static struct trace_event ftrace_event_type_##call = { \ | ||
557 | .trace = ftrace_raw_output_##call, \ | ||
558 | }; \ | ||
559 | \ | ||
560 | static int ftrace_raw_init_event_##call(void) \ | ||
561 | { \ | ||
562 | int id; \ | ||
563 | \ | ||
564 | id = register_ftrace_event(&ftrace_event_type_##call); \ | ||
565 | if (!id) \ | ||
566 | return -ENODEV; \ | ||
567 | event_##call.id = id; \ | ||
568 | INIT_LIST_HEAD(&event_##call.fields); \ | ||
569 | init_preds(&event_##call); \ | ||
570 | return 0; \ | ||
571 | } \ | ||
572 | \ | ||
573 | static struct ftrace_event_call __used \ | ||
574 | __attribute__((__aligned__(4))) \ | ||
575 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
576 | .name = #call, \ | ||
577 | .system = __stringify(TRACE_SYSTEM), \ | ||
578 | .event = &ftrace_event_type_##call, \ | ||
579 | .raw_init = ftrace_raw_init_event_##call, \ | ||
580 | .regfunc = ftrace_raw_reg_event_##call, \ | ||
581 | .unregfunc = ftrace_raw_unreg_event_##call, \ | ||
582 | .show_format = ftrace_format_##call, \ | ||
583 | .define_fields = ftrace_define_fields_##call, \ | ||
584 | _TRACE_PROFILE_INIT(call) \ | ||
585 | } | ||
586 | |||
587 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | ||
588 | |||
589 | #undef _TRACE_PROFILE | ||
590 | #undef _TRACE_PROFILE_INIT | ||
591 | |||
diff --git a/include/trace/irq.h b/include/trace/irq.h deleted file mode 100644 index ff5d4495dc37..000000000000 --- a/include/trace/irq.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | #ifndef _TRACE_IRQ_H | ||
2 | #define _TRACE_IRQ_H | ||
3 | |||
4 | #include <linux/interrupt.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #include <trace/irq_event_types.h> | ||
8 | |||
9 | #endif | ||
diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h deleted file mode 100644 index 85964ebd47ec..000000000000 --- a/include/trace/irq_event_types.h +++ /dev/null | |||
@@ -1,55 +0,0 @@ | |||
1 | |||
2 | /* use <trace/irq.h> instead */ | ||
3 | #ifndef TRACE_FORMAT | ||
4 | # error Do not include this file directly. | ||
5 | # error Unless you know what you are doing. | ||
6 | #endif | ||
7 | |||
8 | #undef TRACE_SYSTEM | ||
9 | #define TRACE_SYSTEM irq | ||
10 | |||
11 | /* | ||
12 | * Tracepoint for entry of interrupt handler: | ||
13 | */ | ||
14 | TRACE_FORMAT(irq_handler_entry, | ||
15 | TP_PROTO(int irq, struct irqaction *action), | ||
16 | TP_ARGS(irq, action), | ||
17 | TP_FMT("irq=%d handler=%s", irq, action->name) | ||
18 | ); | ||
19 | |||
20 | /* | ||
21 | * Tracepoint for return of an interrupt handler: | ||
22 | */ | ||
23 | TRACE_EVENT(irq_handler_exit, | ||
24 | |||
25 | TP_PROTO(int irq, struct irqaction *action, int ret), | ||
26 | |||
27 | TP_ARGS(irq, action, ret), | ||
28 | |||
29 | TP_STRUCT__entry( | ||
30 | __field( int, irq ) | ||
31 | __field( int, ret ) | ||
32 | ), | ||
33 | |||
34 | TP_fast_assign( | ||
35 | __entry->irq = irq; | ||
36 | __entry->ret = ret; | ||
37 | ), | ||
38 | |||
39 | TP_printk("irq=%d return=%s", | ||
40 | __entry->irq, __entry->ret ? "handled" : "unhandled") | ||
41 | ); | ||
42 | |||
43 | TRACE_FORMAT(softirq_entry, | ||
44 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | ||
45 | TP_ARGS(h, vec), | ||
46 | TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec]) | ||
47 | ); | ||
48 | |||
49 | TRACE_FORMAT(softirq_exit, | ||
50 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | ||
51 | TP_ARGS(h, vec), | ||
52 | TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec]) | ||
53 | ); | ||
54 | |||
55 | #undef TRACE_SYSTEM | ||
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h deleted file mode 100644 index 28ee69f9cd46..000000000000 --- a/include/trace/kmemtrace.h +++ /dev/null | |||
@@ -1,63 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
3 | * | ||
4 | * This file is released under GPL version 2. | ||
5 | */ | ||
6 | |||
7 | #ifndef _LINUX_KMEMTRACE_H | ||
8 | #define _LINUX_KMEMTRACE_H | ||
9 | |||
10 | #ifdef __KERNEL__ | ||
11 | |||
12 | #include <linux/tracepoint.h> | ||
13 | #include <linux/types.h> | ||
14 | |||
15 | #ifdef CONFIG_KMEMTRACE | ||
16 | extern void kmemtrace_init(void); | ||
17 | #else | ||
18 | static inline void kmemtrace_init(void) | ||
19 | { | ||
20 | } | ||
21 | #endif | ||
22 | |||
23 | DECLARE_TRACE(kmalloc, | ||
24 | TP_PROTO(unsigned long call_site, | ||
25 | const void *ptr, | ||
26 | size_t bytes_req, | ||
27 | size_t bytes_alloc, | ||
28 | gfp_t gfp_flags), | ||
29 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)); | ||
30 | DECLARE_TRACE(kmem_cache_alloc, | ||
31 | TP_PROTO(unsigned long call_site, | ||
32 | const void *ptr, | ||
33 | size_t bytes_req, | ||
34 | size_t bytes_alloc, | ||
35 | gfp_t gfp_flags), | ||
36 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)); | ||
37 | DECLARE_TRACE(kmalloc_node, | ||
38 | TP_PROTO(unsigned long call_site, | ||
39 | const void *ptr, | ||
40 | size_t bytes_req, | ||
41 | size_t bytes_alloc, | ||
42 | gfp_t gfp_flags, | ||
43 | int node), | ||
44 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)); | ||
45 | DECLARE_TRACE(kmem_cache_alloc_node, | ||
46 | TP_PROTO(unsigned long call_site, | ||
47 | const void *ptr, | ||
48 | size_t bytes_req, | ||
49 | size_t bytes_alloc, | ||
50 | gfp_t gfp_flags, | ||
51 | int node), | ||
52 | TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)); | ||
53 | DECLARE_TRACE(kfree, | ||
54 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
55 | TP_ARGS(call_site, ptr)); | ||
56 | DECLARE_TRACE(kmem_cache_free, | ||
57 | TP_PROTO(unsigned long call_site, const void *ptr), | ||
58 | TP_ARGS(call_site, ptr)); | ||
59 | |||
60 | #endif /* __KERNEL__ */ | ||
61 | |||
62 | #endif /* _LINUX_KMEMTRACE_H */ | ||
63 | |||
diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h deleted file mode 100644 index 5ca67df87f2a..000000000000 --- a/include/trace/lockdep.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | #ifndef _TRACE_LOCKDEP_H | ||
2 | #define _TRACE_LOCKDEP_H | ||
3 | |||
4 | #include <linux/lockdep.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #include <trace/lockdep_event_types.h> | ||
8 | |||
9 | #endif | ||
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h deleted file mode 100644 index adccfcd2ec8f..000000000000 --- a/include/trace/lockdep_event_types.h +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | |||
2 | #ifndef TRACE_FORMAT | ||
3 | # error Do not include this file directly. | ||
4 | # error Unless you know what you are doing. | ||
5 | #endif | ||
6 | |||
7 | #undef TRACE_SYSTEM | ||
8 | #define TRACE_SYSTEM lock | ||
9 | |||
10 | #ifdef CONFIG_LOCKDEP | ||
11 | |||
12 | TRACE_FORMAT(lock_acquire, | ||
13 | TP_PROTO(struct lockdep_map *lock, unsigned int subclass, | ||
14 | int trylock, int read, int check, | ||
15 | struct lockdep_map *next_lock, unsigned long ip), | ||
16 | TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), | ||
17 | TP_FMT("%s%s%s", trylock ? "try " : "", | ||
18 | read ? "read " : "", lock->name) | ||
19 | ); | ||
20 | |||
21 | TRACE_FORMAT(lock_release, | ||
22 | TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip), | ||
23 | TP_ARGS(lock, nested, ip), | ||
24 | TP_FMT("%s", lock->name) | ||
25 | ); | ||
26 | |||
27 | #ifdef CONFIG_LOCK_STAT | ||
28 | |||
29 | TRACE_FORMAT(lock_contended, | ||
30 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | ||
31 | TP_ARGS(lock, ip), | ||
32 | TP_FMT("%s", lock->name) | ||
33 | ); | ||
34 | |||
35 | TRACE_FORMAT(lock_acquired, | ||
36 | TP_PROTO(struct lockdep_map *lock, unsigned long ip), | ||
37 | TP_ARGS(lock, ip), | ||
38 | TP_FMT("%s", lock->name) | ||
39 | ); | ||
40 | |||
41 | #endif | ||
42 | #endif | ||
43 | |||
44 | #undef TRACE_SYSTEM | ||
diff --git a/include/trace/sched.h b/include/trace/sched.h deleted file mode 100644 index 4e372a1a29bf..000000000000 --- a/include/trace/sched.h +++ /dev/null | |||
@@ -1,9 +0,0 @@ | |||
1 | #ifndef _TRACE_SCHED_H | ||
2 | #define _TRACE_SCHED_H | ||
3 | |||
4 | #include <linux/sched.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | #include <trace/sched_event_types.h> | ||
8 | |||
9 | #endif | ||
diff --git a/include/trace/skb.h b/include/trace/skb.h deleted file mode 100644 index b66206d9be72..000000000000 --- a/include/trace/skb.h +++ /dev/null | |||
@@ -1,11 +0,0 @@ | |||
1 | #ifndef _TRACE_SKB_H_ | ||
2 | #define _TRACE_SKB_H_ | ||
3 | |||
4 | #include <linux/skbuff.h> | ||
5 | #include <linux/tracepoint.h> | ||
6 | |||
7 | DECLARE_TRACE(kfree_skb, | ||
8 | TP_PROTO(struct sk_buff *skb, void *location), | ||
9 | TP_ARGS(skb, location)); | ||
10 | |||
11 | #endif | ||
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h deleted file mode 100644 index df56f5694be6..000000000000 --- a/include/trace/trace_event_types.h +++ /dev/null | |||
@@ -1,5 +0,0 @@ | |||
1 | /* trace/<type>_event_types.h here */ | ||
2 | |||
3 | #include <trace/sched_event_types.h> | ||
4 | #include <trace/irq_event_types.h> | ||
5 | #include <trace/lockdep_event_types.h> | ||
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h deleted file mode 100644 index fd13750ca4ba..000000000000 --- a/include/trace/trace_events.h +++ /dev/null | |||
@@ -1,5 +0,0 @@ | |||
1 | /* trace/<type>.h here */ | ||
2 | |||
3 | #include <trace/sched.h> | ||
4 | #include <trace/irq.h> | ||
5 | #include <trace/lockdep.h> | ||
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h deleted file mode 100644 index 7626523deeba..000000000000 --- a/include/trace/workqueue.h +++ /dev/null | |||
@@ -1,25 +0,0 @@ | |||
1 | #ifndef __TRACE_WORKQUEUE_H | ||
2 | #define __TRACE_WORKQUEUE_H | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | #include <linux/workqueue.h> | ||
6 | #include <linux/sched.h> | ||
7 | |||
8 | DECLARE_TRACE(workqueue_insertion, | ||
9 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
10 | TP_ARGS(wq_thread, work)); | ||
11 | |||
12 | DECLARE_TRACE(workqueue_execution, | ||
13 | TP_PROTO(struct task_struct *wq_thread, struct work_struct *work), | ||
14 | TP_ARGS(wq_thread, work)); | ||
15 | |||
16 | /* Trace the creation of one workqueue thread on a cpu */ | ||
17 | DECLARE_TRACE(workqueue_creation, | ||
18 | TP_PROTO(struct task_struct *wq_thread, int cpu), | ||
19 | TP_ARGS(wq_thread, cpu)); | ||
20 | |||
21 | DECLARE_TRACE(workqueue_destruction, | ||
22 | TP_PROTO(struct task_struct *wq_thread), | ||
23 | TP_ARGS(wq_thread)); | ||
24 | |||
25 | #endif /* __TRACE_WORKQUEUE_H */ | ||
diff --git a/include/xen/Kbuild b/include/xen/Kbuild new file mode 100644 index 000000000000..4e65c16a445b --- /dev/null +++ b/include/xen/Kbuild | |||
@@ -0,0 +1 @@ | |||
header-y += evtchn.h | |||
diff --git a/include/xen/events.h b/include/xen/events.h index 0d5f1adc0363..e68d59a90ca8 100644 --- a/include/xen/events.h +++ b/include/xen/events.h | |||
@@ -53,4 +53,7 @@ bool xen_test_irq_pending(int irq); | |||
53 | irq will be disabled so it won't deliver an interrupt. */ | 53 | irq will be disabled so it won't deliver an interrupt. */ |
54 | void xen_poll_irq(int irq); | 54 | void xen_poll_irq(int irq); |
55 | 55 | ||
56 | /* Determine the IRQ which is bound to an event channel */ | ||
57 | unsigned irq_from_evtchn(unsigned int evtchn); | ||
58 | |||
56 | #endif /* _XEN_EVENTS_H */ | 59 | #endif /* _XEN_EVENTS_H */ |
diff --git a/include/xen/evtchn.h b/include/xen/evtchn.h new file mode 100644 index 000000000000..14e833ee4e0b --- /dev/null +++ b/include/xen/evtchn.h | |||
@@ -0,0 +1,88 @@ | |||
1 | /****************************************************************************** | ||
2 | * evtchn.h | ||
3 | * | ||
4 | * Interface to /dev/xen/evtchn. | ||
5 | * | ||
6 | * Copyright (c) 2003-2005, K A Fraser | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License version 2 | ||
10 | * as published by the Free Software Foundation; or, when distributed | ||
11 | * separately from the Linux kernel or incorporated into other | ||
12 | * software packages, subject to the following license: | ||
13 | * | ||
14 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
15 | * of this source file (the "Software"), to deal in the Software without | ||
16 | * restriction, including without limitation the rights to use, copy, modify, | ||
17 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, | ||
18 | * and to permit persons to whom the Software is furnished to do so, subject to | ||
19 | * the following conditions: | ||
20 | * | ||
21 | * The above copyright notice and this permission notice shall be included in | ||
22 | * all copies or substantial portions of the Software. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
25 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
26 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||
27 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
28 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
29 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
30 | * IN THE SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef __LINUX_PUBLIC_EVTCHN_H__ | ||
34 | #define __LINUX_PUBLIC_EVTCHN_H__ | ||
35 | |||
36 | /* | ||
37 | * Bind a fresh port to VIRQ @virq. | ||
38 | * Return allocated port. | ||
39 | */ | ||
40 | #define IOCTL_EVTCHN_BIND_VIRQ \ | ||
41 | _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq)) | ||
42 | struct ioctl_evtchn_bind_virq { | ||
43 | unsigned int virq; | ||
44 | }; | ||
45 | |||
46 | /* | ||
47 | * Bind a fresh port to remote <@remote_domain, @remote_port>. | ||
48 | * Return allocated port. | ||
49 | */ | ||
50 | #define IOCTL_EVTCHN_BIND_INTERDOMAIN \ | ||
51 | _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain)) | ||
52 | struct ioctl_evtchn_bind_interdomain { | ||
53 | unsigned int remote_domain, remote_port; | ||
54 | }; | ||
55 | |||
56 | /* | ||
57 | * Allocate a fresh port for binding to @remote_domain. | ||
58 | * Return allocated port. | ||
59 | */ | ||
60 | #define IOCTL_EVTCHN_BIND_UNBOUND_PORT \ | ||
61 | _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port)) | ||
62 | struct ioctl_evtchn_bind_unbound_port { | ||
63 | unsigned int remote_domain; | ||
64 | }; | ||
65 | |||
66 | /* | ||
67 | * Unbind previously allocated @port. | ||
68 | */ | ||
69 | #define IOCTL_EVTCHN_UNBIND \ | ||
70 | _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind)) | ||
71 | struct ioctl_evtchn_unbind { | ||
72 | unsigned int port; | ||
73 | }; | ||
74 | |||
75 | /* | ||
76 | * Unbind previously allocated @port. | ||
77 | */ | ||
78 | #define IOCTL_EVTCHN_NOTIFY \ | ||
79 | _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify)) | ||
80 | struct ioctl_evtchn_notify { | ||
81 | unsigned int port; | ||
82 | }; | ||
83 | |||
84 | /* Clear and reinitialise the event buffer. Clear error condition. */ | ||
85 | #define IOCTL_EVTCHN_RESET \ | ||
86 | _IOC(_IOC_NONE, 'E', 5, 0) | ||
87 | |||
88 | #endif /* __LINUX_PUBLIC_EVTCHN_H__ */ | ||
diff --git a/include/xen/interface/version.h b/include/xen/interface/version.h index 453235e923f0..e8b6519d47e9 100644 --- a/include/xen/interface/version.h +++ b/include/xen/interface/version.h | |||
@@ -57,4 +57,7 @@ struct xen_feature_info { | |||
57 | /* Declares the features reported by XENVER_get_features. */ | 57 | /* Declares the features reported by XENVER_get_features. */ |
58 | #include "features.h" | 58 | #include "features.h" |
59 | 59 | ||
60 | /* arg == NULL; returns host memory page size. */ | ||
61 | #define XENVER_pagesize 7 | ||
62 | |||
60 | #endif /* __XEN_PUBLIC_VERSION_H__ */ | 63 | #endif /* __XEN_PUBLIC_VERSION_H__ */ |
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index f87f9614844d..b9763badbd77 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h | |||
@@ -91,8 +91,7 @@ struct xenbus_driver { | |||
91 | void (*otherend_changed)(struct xenbus_device *dev, | 91 | void (*otherend_changed)(struct xenbus_device *dev, |
92 | enum xenbus_state backend_state); | 92 | enum xenbus_state backend_state); |
93 | int (*remove)(struct xenbus_device *dev); | 93 | int (*remove)(struct xenbus_device *dev); |
94 | int (*suspend)(struct xenbus_device *dev); | 94 | int (*suspend)(struct xenbus_device *dev, pm_message_t state); |
95 | int (*suspend_cancel)(struct xenbus_device *dev); | ||
96 | int (*resume)(struct xenbus_device *dev); | 95 | int (*resume)(struct xenbus_device *dev); |
97 | int (*uevent)(struct xenbus_device *, char **, int, char *, int); | 96 | int (*uevent)(struct xenbus_device *, char **, int, char *, int); |
98 | struct device_driver driver; | 97 | struct device_driver driver; |