diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/drm/drm_pciids.h | 1 | ||||
-rw-r--r-- | include/linux/blkdev.h | 35 | ||||
-rw-r--r-- | include/linux/drbd.h | 2 | ||||
-rw-r--r-- | include/linux/drbd_nl.h | 3 | ||||
-rw-r--r-- | include/linux/firewire-cdev.h | 78 | ||||
-rw-r--r-- | include/linux/firewire-constants.h | 29 | ||||
-rw-r--r-- | include/linux/genhd.h | 2 | ||||
-rw-r--r-- | include/linux/i2o.h | 1 | ||||
-rw-r--r-- | include/linux/input/matrix_keypad.h | 2 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 7 | ||||
-rw-r--r-- | include/linux/lcm.h | 8 | ||||
-rw-r--r-- | include/linux/nfs_fs_sb.h | 1 | ||||
-rw-r--r-- | include/linux/radix-tree.h | 7 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 65 | ||||
-rw-r--r-- | include/linux/regulator/consumer.h | 8 | ||||
-rw-r--r-- | include/linux/slab.h | 1 | ||||
-rw-r--r-- | include/linux/writeback.h | 3 | ||||
-rw-r--r-- | include/net/x25.h | 4 | ||||
-rw-r--r-- | include/trace/events/block.h | 164 |
19 files changed, 351 insertions, 70 deletions
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 04a6ebc27b96..2d428b088cc8 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -6,6 +6,7 @@ | |||
6 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 6 | {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
7 | {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 7 | {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
8 | {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 8 | {0x1002, 0x3154, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
9 | {0x1002, 0x3155, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | ||
9 | {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 10 | {0x1002, 0x3E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
10 | {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 11 | {0x1002, 0x3E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
11 | {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ | 12 | {0x1002, 0x4136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP}, \ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ebd22dbed861..6690e8bae7bb 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -158,7 +158,6 @@ enum rq_flag_bits { | |||
158 | struct request { | 158 | struct request { |
159 | struct list_head queuelist; | 159 | struct list_head queuelist; |
160 | struct call_single_data csd; | 160 | struct call_single_data csd; |
161 | int cpu; | ||
162 | 161 | ||
163 | struct request_queue *q; | 162 | struct request_queue *q; |
164 | 163 | ||
@@ -166,9 +165,11 @@ struct request { | |||
166 | enum rq_cmd_type_bits cmd_type; | 165 | enum rq_cmd_type_bits cmd_type; |
167 | unsigned long atomic_flags; | 166 | unsigned long atomic_flags; |
168 | 167 | ||
168 | int cpu; | ||
169 | |||
169 | /* the following two fields are internal, NEVER access directly */ | 170 | /* the following two fields are internal, NEVER access directly */ |
170 | sector_t __sector; /* sector cursor */ | ||
171 | unsigned int __data_len; /* total data len */ | 171 | unsigned int __data_len; /* total data len */ |
172 | sector_t __sector; /* sector cursor */ | ||
172 | 173 | ||
173 | struct bio *bio; | 174 | struct bio *bio; |
174 | struct bio *biotail; | 175 | struct bio *biotail; |
@@ -201,20 +202,20 @@ struct request { | |||
201 | 202 | ||
202 | unsigned short ioprio; | 203 | unsigned short ioprio; |
203 | 204 | ||
205 | int ref_count; | ||
206 | |||
204 | void *special; /* opaque pointer available for LLD use */ | 207 | void *special; /* opaque pointer available for LLD use */ |
205 | char *buffer; /* kaddr of the current segment if available */ | 208 | char *buffer; /* kaddr of the current segment if available */ |
206 | 209 | ||
207 | int tag; | 210 | int tag; |
208 | int errors; | 211 | int errors; |
209 | 212 | ||
210 | int ref_count; | ||
211 | |||
212 | /* | 213 | /* |
213 | * when request is used as a packet command carrier | 214 | * when request is used as a packet command carrier |
214 | */ | 215 | */ |
215 | unsigned short cmd_len; | ||
216 | unsigned char __cmd[BLK_MAX_CDB]; | 216 | unsigned char __cmd[BLK_MAX_CDB]; |
217 | unsigned char *cmd; | 217 | unsigned char *cmd; |
218 | unsigned short cmd_len; | ||
218 | 219 | ||
219 | unsigned int extra_len; /* length of alignment and padding */ | 220 | unsigned int extra_len; /* length of alignment and padding */ |
220 | unsigned int sense_len; | 221 | unsigned int sense_len; |
@@ -921,26 +922,7 @@ extern void blk_cleanup_queue(struct request_queue *); | |||
921 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 922 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
922 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 923 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
923 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 924 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
924 | |||
925 | /* Temporary compatibility wrapper */ | ||
926 | static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max) | ||
927 | { | ||
928 | blk_queue_max_hw_sectors(q, max); | ||
929 | } | ||
930 | |||
931 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 925 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
932 | |||
933 | static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max) | ||
934 | { | ||
935 | blk_queue_max_segments(q, max); | ||
936 | } | ||
937 | |||
938 | static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max) | ||
939 | { | ||
940 | blk_queue_max_segments(q, max); | ||
941 | } | ||
942 | |||
943 | |||
944 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 926 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
945 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 927 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
946 | unsigned int max_discard_sectors); | 928 | unsigned int max_discard_sectors); |
@@ -1030,11 +1012,6 @@ static inline int sb_issue_discard(struct super_block *sb, | |||
1030 | 1012 | ||
1031 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 1013 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
1032 | 1014 | ||
1033 | #define MAX_PHYS_SEGMENTS 128 | ||
1034 | #define MAX_HW_SEGMENTS 128 | ||
1035 | #define SAFE_MAX_SECTORS 255 | ||
1036 | #define MAX_SEGMENT_SIZE 65536 | ||
1037 | |||
1038 | enum blk_default_limits { | 1015 | enum blk_default_limits { |
1039 | BLK_MAX_SEGMENTS = 128, | 1016 | BLK_MAX_SEGMENTS = 128, |
1040 | BLK_SAFE_MAX_SECTORS = 255, | 1017 | BLK_SAFE_MAX_SECTORS = 255, |
diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 78962272338a..4341b1a97a34 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h | |||
@@ -56,7 +56,7 @@ extern const char *drbd_buildtag(void); | |||
56 | #define REL_VERSION "8.3.7" | 56 | #define REL_VERSION "8.3.7" |
57 | #define API_VERSION 88 | 57 | #define API_VERSION 88 |
58 | #define PRO_VERSION_MIN 86 | 58 | #define PRO_VERSION_MIN 86 |
59 | #define PRO_VERSION_MAX 91 | 59 | #define PRO_VERSION_MAX 92 |
60 | 60 | ||
61 | 61 | ||
62 | enum drbd_io_error_p { | 62 | enum drbd_io_error_p { |
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h index a4d82f895994..f7431a4ca608 100644 --- a/include/linux/drbd_nl.h +++ b/include/linux/drbd_nl.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #endif | 12 | #endif |
13 | 13 | ||
14 | NL_PACKET(primary, 1, | 14 | NL_PACKET(primary, 1, |
15 | NL_BIT( 1, T_MAY_IGNORE, overwrite_peer) | 15 | NL_BIT( 1, T_MAY_IGNORE, primary_force) |
16 | ) | 16 | ) |
17 | 17 | ||
18 | NL_PACKET(secondary, 2, ) | 18 | NL_PACKET(secondary, 2, ) |
@@ -63,6 +63,7 @@ NL_PACKET(net_conf, 5, | |||
63 | NL_BIT( 41, T_MAY_IGNORE, always_asbp) | 63 | NL_BIT( 41, T_MAY_IGNORE, always_asbp) |
64 | NL_BIT( 61, T_MAY_IGNORE, no_cork) | 64 | NL_BIT( 61, T_MAY_IGNORE, no_cork) |
65 | NL_BIT( 62, T_MANDATORY, auto_sndbuf_size) | 65 | NL_BIT( 62, T_MANDATORY, auto_sndbuf_size) |
66 | NL_BIT( 70, T_MANDATORY, dry_run) | ||
66 | ) | 67 | ) |
67 | 68 | ||
68 | NL_PACKET(disconnect, 6, ) | 69 | NL_PACKET(disconnect, 6, ) |
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 40b11013408e..81f3b14d5d76 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h | |||
@@ -1,21 +1,26 @@ | |||
1 | /* | 1 | /* |
2 | * Char device interface. | 2 | * Char device interface. |
3 | * | 3 | * |
4 | * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> | 4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * it under the terms of the GNU General Public License as published by | 7 | * copy of this software and associated documentation files (the "Software"), |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * to deal in the Software without restriction, including without limitation |
9 | * (at your option) any later version. | 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * | 10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * This program is distributed in the hope that it will be useful, | 11 | * Software is furnished to do so, subject to the following conditions: |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * The above copyright notice and this permission notice (including the next |
14 | * GNU General Public License for more details. | 14 | * paragraph) shall be included in all copies or substantial portions of the |
15 | * | 15 | * Software. |
16 | * You should have received a copy of the GNU General Public License | 16 | * |
17 | * along with this program; if not, write to the Free Software Foundation, | 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
19 | */ | 24 | */ |
20 | 25 | ||
21 | #ifndef _LINUX_FIREWIRE_CDEV_H | 26 | #ifndef _LINUX_FIREWIRE_CDEV_H |
@@ -438,7 +443,7 @@ struct fw_cdev_remove_descriptor { | |||
438 | * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE | 443 | * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE |
439 | * @header_size: Header size to strip for receive contexts | 444 | * @header_size: Header size to strip for receive contexts |
440 | * @channel: Channel to bind to | 445 | * @channel: Channel to bind to |
441 | * @speed: Speed to transmit at | 446 | * @speed: Speed for transmit contexts |
442 | * @closure: To be returned in &fw_cdev_event_iso_interrupt | 447 | * @closure: To be returned in &fw_cdev_event_iso_interrupt |
443 | * @handle: Handle to context, written back by kernel | 448 | * @handle: Handle to context, written back by kernel |
444 | * | 449 | * |
@@ -451,6 +456,9 @@ struct fw_cdev_remove_descriptor { | |||
451 | * If a context was successfully created, the kernel writes back a handle to the | 456 | * If a context was successfully created, the kernel writes back a handle to the |
452 | * context, which must be passed in for subsequent operations on that context. | 457 | * context, which must be passed in for subsequent operations on that context. |
453 | * | 458 | * |
459 | * For receive contexts, @header_size must be at least 4 and must be a multiple | ||
460 | * of 4. | ||
461 | * | ||
454 | * Note that the effect of a @header_size > 4 depends on | 462 | * Note that the effect of a @header_size > 4 depends on |
455 | * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. | 463 | * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. |
456 | */ | 464 | */ |
@@ -481,10 +489,34 @@ struct fw_cdev_create_iso_context { | |||
481 | * | 489 | * |
482 | * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. | 490 | * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. |
483 | * | 491 | * |
484 | * Use the FW_CDEV_ISO_ macros to fill in @control. The sy and tag fields are | 492 | * Use the FW_CDEV_ISO_ macros to fill in @control. |
485 | * specified by IEEE 1394a and IEC 61883. | 493 | * |
486 | * | 494 | * For transmit packets, the header length must be a multiple of 4 and specifies |
487 | * FIXME - finish this documentation | 495 | * the numbers of bytes in @header that will be prepended to the packet's |
496 | * payload; these bytes are copied into the kernel and will not be accessed | ||
497 | * after the ioctl has returned. The sy and tag fields are copied to the iso | ||
498 | * packet header (these fields are specified by IEEE 1394a and IEC 61883-1). | ||
499 | * The skip flag specifies that no packet is to be sent in a frame; when using | ||
500 | * this, all other fields except the interrupt flag must be zero. | ||
501 | * | ||
502 | * For receive packets, the header length must be a multiple of the context's | ||
503 | * header size; if the header length is larger than the context's header size, | ||
504 | * multiple packets are queued for this entry. The sy and tag fields are | ||
505 | * ignored. If the sync flag is set, the context drops all packets until | ||
506 | * a packet with a matching sy field is received (the sync value to wait for is | ||
507 | * specified in the &fw_cdev_start_iso structure). The payload length defines | ||
508 | * how many payload bytes can be received for one packet (in addition to payload | ||
509 | * quadlets that have been defined as headers and are stripped and returned in | ||
510 | * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the | ||
511 | * additional bytes are dropped. If less bytes are received, the remaining | ||
512 | * bytes in this part of the payload buffer will not be written to, not even by | ||
513 | * the next packet, i.e., packets received in consecutive frames will not | ||
514 | * necessarily be consecutive in memory. If an entry has queued multiple | ||
515 | * packets, the payload length is divided equally among them. | ||
516 | * | ||
517 | * When a packet with the interrupt flag set has been completed, the | ||
518 | * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued | ||
519 | * multiple receive packets is completed when its last packet is completed. | ||
488 | */ | 520 | */ |
489 | struct fw_cdev_iso_packet { | 521 | struct fw_cdev_iso_packet { |
490 | __u32 control; | 522 | __u32 control; |
@@ -501,7 +533,7 @@ struct fw_cdev_iso_packet { | |||
501 | * Queue a number of isochronous packets for reception or transmission. | 533 | * Queue a number of isochronous packets for reception or transmission. |
502 | * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs, | 534 | * This ioctl takes a pointer to an array of &fw_cdev_iso_packet structs, |
503 | * which describe how to transmit from or receive into a contiguous region | 535 | * which describe how to transmit from or receive into a contiguous region |
504 | * of a mmap()'ed payload buffer. As part of the packet descriptors, | 536 | * of a mmap()'ed payload buffer. As part of transmit packet descriptors, |
505 | * a series of headers can be supplied, which will be prepended to the | 537 | * a series of headers can be supplied, which will be prepended to the |
506 | * payload during DMA. | 538 | * payload during DMA. |
507 | * | 539 | * |
@@ -620,8 +652,8 @@ struct fw_cdev_get_cycle_timer2 { | |||
620 | * instead of allocated. | 652 | * instead of allocated. |
621 | * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. | 653 | * An %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED event concludes this operation. |
622 | * | 654 | * |
623 | * To summarize, %FW_CDEV_IOC_DEALLOCATE_ISO_RESOURCE allocates iso resources | 655 | * To summarize, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE allocates iso resources |
624 | * for the lifetime of the fd or handle. | 656 | * for the lifetime of the fd or @handle. |
625 | * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources | 657 | * In contrast, %FW_CDEV_IOC_ALLOCATE_ISO_RESOURCE_ONCE allocates iso resources |
626 | * for the duration of a bus generation. | 658 | * for the duration of a bus generation. |
627 | * | 659 | * |
diff --git a/include/linux/firewire-constants.h b/include/linux/firewire-constants.h index b316770a43fd..9c63f06e67f2 100644 --- a/include/linux/firewire-constants.h +++ b/include/linux/firewire-constants.h | |||
@@ -1,3 +1,28 @@ | |||
1 | /* | ||
2 | * IEEE 1394 constants. | ||
3 | * | ||
4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> | ||
5 | * | ||
6 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
7 | * copy of this software and associated documentation files (the "Software"), | ||
8 | * to deal in the Software without restriction, including without limitation | ||
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
10 | * and/or sell copies of the Software, and to permit persons to whom the | ||
11 | * Software is furnished to do so, subject to the following conditions: | ||
12 | * | ||
13 | * The above copyright notice and this permission notice (including the next | ||
14 | * paragraph) shall be included in all copies or substantial portions of the | ||
15 | * Software. | ||
16 | * | ||
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
20 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
21 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
22 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
23 | * DEALINGS IN THE SOFTWARE. | ||
24 | */ | ||
25 | |||
1 | #ifndef _LINUX_FIREWIRE_CONSTANTS_H | 26 | #ifndef _LINUX_FIREWIRE_CONSTANTS_H |
2 | #define _LINUX_FIREWIRE_CONSTANTS_H | 27 | #define _LINUX_FIREWIRE_CONSTANTS_H |
3 | 28 | ||
@@ -21,7 +46,7 @@ | |||
21 | #define EXTCODE_WRAP_ADD 0x6 | 46 | #define EXTCODE_WRAP_ADD 0x6 |
22 | #define EXTCODE_VENDOR_DEPENDENT 0x7 | 47 | #define EXTCODE_VENDOR_DEPENDENT 0x7 |
23 | 48 | ||
24 | /* Juju specific tcodes */ | 49 | /* Linux firewire-core (Juju) specific tcodes */ |
25 | #define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP) | 50 | #define TCODE_LOCK_MASK_SWAP (0x10 | EXTCODE_MASK_SWAP) |
26 | #define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP) | 51 | #define TCODE_LOCK_COMPARE_SWAP (0x10 | EXTCODE_COMPARE_SWAP) |
27 | #define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD) | 52 | #define TCODE_LOCK_FETCH_ADD (0x10 | EXTCODE_FETCH_ADD) |
@@ -36,7 +61,7 @@ | |||
36 | #define RCODE_TYPE_ERROR 0x6 | 61 | #define RCODE_TYPE_ERROR 0x6 |
37 | #define RCODE_ADDRESS_ERROR 0x7 | 62 | #define RCODE_ADDRESS_ERROR 0x7 |
38 | 63 | ||
39 | /* Juju specific rcodes */ | 64 | /* Linux firewire-core (Juju) specific rcodes */ |
40 | #define RCODE_SEND_ERROR 0x10 | 65 | #define RCODE_SEND_ERROR 0x10 |
41 | #define RCODE_CANCELLED 0x11 | 66 | #define RCODE_CANCELLED 0x11 |
42 | #define RCODE_BUSY 0x12 | 67 | #define RCODE_BUSY 0x12 |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 56b50514ab25..5f2f4c4d8fb0 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -109,7 +109,7 @@ struct hd_struct { | |||
109 | }; | 109 | }; |
110 | 110 | ||
111 | #define GENHD_FL_REMOVABLE 1 | 111 | #define GENHD_FL_REMOVABLE 1 |
112 | #define GENHD_FL_DRIVERFS 2 | 112 | /* 2 is unused */ |
113 | #define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 | 113 | #define GENHD_FL_MEDIA_CHANGE_NOTIFY 4 |
114 | #define GENHD_FL_CD 8 | 114 | #define GENHD_FL_CD 8 |
115 | #define GENHD_FL_UP 16 | 115 | #define GENHD_FL_UP 16 |
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 87018dc5527d..9e7a12d6385d 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
@@ -782,7 +782,6 @@ extern int i2o_exec_lct_get(struct i2o_controller *); | |||
782 | #define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) | 782 | #define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver) |
783 | #define to_i2o_device(dev) container_of(dev, struct i2o_device, device) | 783 | #define to_i2o_device(dev) container_of(dev, struct i2o_device, device) |
784 | #define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) | 784 | #define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device) |
785 | #define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj)) | ||
786 | 785 | ||
787 | /** | 786 | /** |
788 | * i2o_out_to_virt - Turn an I2O message to a virtual address | 787 | * i2o_out_to_virt - Turn an I2O message to a virtual address |
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index 3bd018baae20..c964cd7f436a 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h | |||
@@ -44,6 +44,7 @@ struct matrix_keymap_data { | |||
44 | * @active_low: gpio polarity | 44 | * @active_low: gpio polarity |
45 | * @wakeup: controls whether the device should be set up as wakeup | 45 | * @wakeup: controls whether the device should be set up as wakeup |
46 | * source | 46 | * source |
47 | * @no_autorepeat: disable key autorepeat | ||
47 | * | 48 | * |
48 | * This structure represents platform-specific data that use used by | 49 | * This structure represents platform-specific data that use used by |
49 | * matrix_keypad driver to perform proper initialization. | 50 | * matrix_keypad driver to perform proper initialization. |
@@ -64,6 +65,7 @@ struct matrix_keypad_platform_data { | |||
64 | 65 | ||
65 | bool active_low; | 66 | bool active_low; |
66 | bool wakeup; | 67 | bool wakeup; |
68 | bool no_autorepeat; | ||
67 | }; | 69 | }; |
68 | 70 | ||
69 | /** | 71 | /** |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index a3fd0f91d943..169d07758ee5 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -54,7 +54,7 @@ extern struct kmem_cache *kvm_vcpu_cache; | |||
54 | */ | 54 | */ |
55 | struct kvm_io_bus { | 55 | struct kvm_io_bus { |
56 | int dev_count; | 56 | int dev_count; |
57 | #define NR_IOBUS_DEVS 6 | 57 | #define NR_IOBUS_DEVS 200 |
58 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; | 58 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; |
59 | }; | 59 | }; |
60 | 60 | ||
@@ -119,6 +119,11 @@ struct kvm_memory_slot { | |||
119 | int user_alloc; | 119 | int user_alloc; |
120 | }; | 120 | }; |
121 | 121 | ||
122 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) | ||
123 | { | ||
124 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; | ||
125 | } | ||
126 | |||
122 | struct kvm_kernel_irq_routing_entry { | 127 | struct kvm_kernel_irq_routing_entry { |
123 | u32 gsi; | 128 | u32 gsi; |
124 | u32 type; | 129 | u32 type; |
diff --git a/include/linux/lcm.h b/include/linux/lcm.h new file mode 100644 index 000000000000..7bf01d779b45 --- /dev/null +++ b/include/linux/lcm.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _LCM_H | ||
2 | #define _LCM_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | |||
6 | unsigned long lcm(unsigned long a, unsigned long b) __attribute_const__; | ||
7 | |||
8 | #endif /* _LCM_H */ | ||
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 717a5e54eb1d..e82957acea56 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -176,6 +176,7 @@ struct nfs_server { | |||
176 | #define NFS_CAP_ATIME (1U << 11) | 176 | #define NFS_CAP_ATIME (1U << 11) |
177 | #define NFS_CAP_CTIME (1U << 12) | 177 | #define NFS_CAP_CTIME (1U << 12) |
178 | #define NFS_CAP_MTIME (1U << 13) | 178 | #define NFS_CAP_MTIME (1U << 13) |
179 | #define NFS_CAP_POSIX_LOCK (1U << 14) | ||
179 | 180 | ||
180 | 181 | ||
181 | /* maximum number of slots to use */ | 182 | /* maximum number of slots to use */ |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index c5da74918096..55ca73cf25e5 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -121,6 +121,13 @@ do { \ | |||
121 | * (Note, rcu_assign_pointer and rcu_dereference are not needed to control | 121 | * (Note, rcu_assign_pointer and rcu_dereference are not needed to control |
122 | * access to data items when inserting into or looking up from the radix tree) | 122 | * access to data items when inserting into or looking up from the radix tree) |
123 | * | 123 | * |
124 | * Note that the value returned by radix_tree_tag_get() may not be relied upon | ||
125 | * if only the RCU read lock is held. Functions to set/clear tags and to | ||
126 | * delete nodes running concurrently with it may affect its result such that | ||
127 | * two consecutive reads in the same locked section may return different | ||
128 | * values. If reliability is required, modification functions must also be | ||
129 | * excluded from concurrency. | ||
130 | * | ||
124 | * radix_tree_tagged is able to be called without locking or RCU. | 131 | * radix_tree_tagged is able to be called without locking or RCU. |
125 | */ | 132 | */ |
126 | 133 | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 872a98e13d6a..07db2feb8572 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -101,10 +101,7 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
101 | # define rcu_read_release_sched() \ | 101 | # define rcu_read_release_sched() \ |
102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) | 102 | lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) |
103 | 103 | ||
104 | static inline int debug_lockdep_rcu_enabled(void) | 104 | extern int debug_lockdep_rcu_enabled(void); |
105 | { | ||
106 | return likely(rcu_scheduler_active && debug_locks); | ||
107 | } | ||
108 | 105 | ||
109 | /** | 106 | /** |
110 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 107 | * rcu_read_lock_held - might we be in RCU read-side critical section? |
@@ -195,12 +192,30 @@ static inline int rcu_read_lock_sched_held(void) | |||
195 | 192 | ||
196 | /** | 193 | /** |
197 | * rcu_dereference_check - rcu_dereference with debug checking | 194 | * rcu_dereference_check - rcu_dereference with debug checking |
195 | * @p: The pointer to read, prior to dereferencing | ||
196 | * @c: The conditions under which the dereference will take place | ||
197 | * | ||
198 | * Do an rcu_dereference(), but check that the conditions under which the | ||
199 | * dereference will take place are correct. Typically the conditions indicate | ||
200 | * the various locking conditions that should be held at that point. The check | ||
201 | * should return true if the conditions are satisfied. | ||
202 | * | ||
203 | * For example: | ||
204 | * | ||
205 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | ||
206 | * lockdep_is_held(&foo->lock)); | ||
198 | * | 207 | * |
199 | * Do an rcu_dereference(), but check that the context is correct. | 208 | * could be used to indicate to lockdep that foo->bar may only be dereferenced |
200 | * For example, rcu_dereference_check(gp, rcu_read_lock_held()) to | 209 | * if either the RCU read lock is held, or that the lock required to replace |
201 | * ensure that the rcu_dereference_check() executes within an RCU | 210 | * the bar struct at foo->bar is held. |
202 | * read-side critical section. It is also possible to check for | 211 | * |
203 | * locks being held, for example, by using lockdep_is_held(). | 212 | * Note that the list of conditions may also include indications of when a lock |
213 | * need not be held, for example during initialisation or destruction of the | ||
214 | * target struct: | ||
215 | * | ||
216 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | ||
217 | * lockdep_is_held(&foo->lock) || | ||
218 | * atomic_read(&foo->usage) == 0); | ||
204 | */ | 219 | */ |
205 | #define rcu_dereference_check(p, c) \ | 220 | #define rcu_dereference_check(p, c) \ |
206 | ({ \ | 221 | ({ \ |
@@ -209,13 +224,45 @@ static inline int rcu_read_lock_sched_held(void) | |||
209 | rcu_dereference_raw(p); \ | 224 | rcu_dereference_raw(p); \ |
210 | }) | 225 | }) |
211 | 226 | ||
227 | /** | ||
228 | * rcu_dereference_protected - fetch RCU pointer when updates prevented | ||
229 | * | ||
230 | * Return the value of the specified RCU-protected pointer, but omit | ||
231 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This | ||
232 | * is useful in cases where update-side locks prevent the value of the | ||
233 | * pointer from changing. Please note that this primitive does -not- | ||
234 | * prevent the compiler from repeating this reference or combining it | ||
235 | * with other references, so it should not be used without protection | ||
236 | * of appropriate locks. | ||
237 | */ | ||
238 | #define rcu_dereference_protected(p, c) \ | ||
239 | ({ \ | ||
240 | if (debug_lockdep_rcu_enabled() && !(c)) \ | ||
241 | lockdep_rcu_dereference(__FILE__, __LINE__); \ | ||
242 | (p); \ | ||
243 | }) | ||
244 | |||
212 | #else /* #ifdef CONFIG_PROVE_RCU */ | 245 | #else /* #ifdef CONFIG_PROVE_RCU */ |
213 | 246 | ||
214 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) | 247 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) |
248 | #define rcu_dereference_protected(p, c) (p) | ||
215 | 249 | ||
216 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | 250 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ |
217 | 251 | ||
218 | /** | 252 | /** |
253 | * rcu_access_pointer - fetch RCU pointer with no dereferencing | ||
254 | * | ||
255 | * Return the value of the specified RCU-protected pointer, but omit the | ||
256 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | ||
257 | * when the value of this pointer is accessed, but the pointer is not | ||
258 | * dereferenced, for example, when testing an RCU-protected pointer against | ||
259 | * NULL. This may also be used in cases where update-side locks prevent | ||
260 | * the value of the pointer from changing, but rcu_dereference_protected() | ||
261 | * is a lighter-weight primitive for this use case. | ||
262 | */ | ||
263 | #define rcu_access_pointer(p) ACCESS_ONCE(p) | ||
264 | |||
265 | /** | ||
219 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 266 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
220 | * | 267 | * |
221 | * When synchronize_rcu() is invoked on one CPU while other CPUs | 268 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 28c9fd020d39..ebd747265294 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h | |||
@@ -183,9 +183,13 @@ static inline struct regulator *__must_check regulator_get(struct device *dev, | |||
183 | { | 183 | { |
184 | /* Nothing except the stubbed out regulator API should be | 184 | /* Nothing except the stubbed out regulator API should be |
185 | * looking at the value except to check if it is an error | 185 | * looking at the value except to check if it is an error |
186 | * value so the actual return value doesn't matter. | 186 | * value. Drivers are free to handle NULL specifically by |
187 | * skipping all regulator API calls, but they don't have to. | ||
188 | * Drivers which don't, should make sure they properly handle | ||
189 | * corner cases of the API, such as regulator_get_voltage() | ||
190 | * returning 0. | ||
187 | */ | 191 | */ |
188 | return (struct regulator *)id; | 192 | return NULL; |
189 | } | 193 | } |
190 | static inline void regulator_put(struct regulator *regulator) | 194 | static inline void regulator_put(struct regulator *regulator) |
191 | { | 195 | { |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 488446289cab..49d1247cd6d9 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -106,6 +106,7 @@ int kmem_cache_shrink(struct kmem_cache *); | |||
106 | void kmem_cache_free(struct kmem_cache *, void *); | 106 | void kmem_cache_free(struct kmem_cache *, void *); |
107 | unsigned int kmem_cache_size(struct kmem_cache *); | 107 | unsigned int kmem_cache_size(struct kmem_cache *); |
108 | const char *kmem_cache_name(struct kmem_cache *); | 108 | const char *kmem_cache_name(struct kmem_cache *); |
109 | int kern_ptr_validate(const void *ptr, unsigned long size); | ||
109 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); | 110 | int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr); |
110 | 111 | ||
111 | /* | 112 | /* |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 76e8903cd204..36520ded3e06 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -34,6 +34,9 @@ struct writeback_control { | |||
34 | enum writeback_sync_modes sync_mode; | 34 | enum writeback_sync_modes sync_mode; |
35 | unsigned long *older_than_this; /* If !NULL, only write back inodes | 35 | unsigned long *older_than_this; /* If !NULL, only write back inodes |
36 | older than this */ | 36 | older than this */ |
37 | unsigned long wb_start; /* Time writeback_inodes_wb was | ||
38 | called. This is needed to avoid | ||
39 | extra jobs and livelock */ | ||
37 | long nr_to_write; /* Write this many pages, and decrement | 40 | long nr_to_write; /* Write this many pages, and decrement |
38 | this for each page written */ | 41 | this for each page written */ |
39 | long pages_skipped; /* Pages which were not written */ | 42 | long pages_skipped; /* Pages which were not written */ |
diff --git a/include/net/x25.h b/include/net/x25.h index 15ef9624ab75..468551ea4f1d 100644 --- a/include/net/x25.h +++ b/include/net/x25.h | |||
@@ -183,6 +183,10 @@ extern int sysctl_x25_clear_request_timeout; | |||
183 | extern int sysctl_x25_ack_holdback_timeout; | 183 | extern int sysctl_x25_ack_holdback_timeout; |
184 | extern int sysctl_x25_forward; | 184 | extern int sysctl_x25_forward; |
185 | 185 | ||
186 | extern int x25_parse_address_block(struct sk_buff *skb, | ||
187 | struct x25_address *called_addr, | ||
188 | struct x25_address *calling_addr); | ||
189 | |||
186 | extern int x25_addr_ntoa(unsigned char *, struct x25_address *, | 190 | extern int x25_addr_ntoa(unsigned char *, struct x25_address *, |
187 | struct x25_address *); | 191 | struct x25_address *); |
188 | extern int x25_addr_aton(unsigned char *, struct x25_address *, | 192 | extern int x25_addr_aton(unsigned char *, struct x25_address *, |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 5fb72733331e..d870a918559c 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
@@ -40,6 +40,16 @@ DECLARE_EVENT_CLASS(block_rq_with_error, | |||
40 | __entry->nr_sector, __entry->errors) | 40 | __entry->nr_sector, __entry->errors) |
41 | ); | 41 | ); |
42 | 42 | ||
43 | /** | ||
44 | * block_rq_abort - abort block operation request | ||
45 | * @q: queue containing the block operation request | ||
46 | * @rq: block IO operation request | ||
47 | * | ||
48 | * Called immediately after pending block IO operation request @rq in | ||
49 | * queue @q is aborted. The fields in the operation request @rq | ||
50 | * can be examined to determine which device and sectors the pending | ||
51 | * operation would access. | ||
52 | */ | ||
43 | DEFINE_EVENT(block_rq_with_error, block_rq_abort, | 53 | DEFINE_EVENT(block_rq_with_error, block_rq_abort, |
44 | 54 | ||
45 | TP_PROTO(struct request_queue *q, struct request *rq), | 55 | TP_PROTO(struct request_queue *q, struct request *rq), |
@@ -47,6 +57,15 @@ DEFINE_EVENT(block_rq_with_error, block_rq_abort, | |||
47 | TP_ARGS(q, rq) | 57 | TP_ARGS(q, rq) |
48 | ); | 58 | ); |
49 | 59 | ||
60 | /** | ||
61 | * block_rq_requeue - place block IO request back on a queue | ||
62 | * @q: queue holding operation | ||
63 | * @rq: block IO operation request | ||
64 | * | ||
65 | * The block operation request @rq is being placed back into queue | ||
66 | * @q. For some reason the request was not completed and needs to be | ||
67 | * put back in the queue. | ||
68 | */ | ||
50 | DEFINE_EVENT(block_rq_with_error, block_rq_requeue, | 69 | DEFINE_EVENT(block_rq_with_error, block_rq_requeue, |
51 | 70 | ||
52 | TP_PROTO(struct request_queue *q, struct request *rq), | 71 | TP_PROTO(struct request_queue *q, struct request *rq), |
@@ -54,6 +73,17 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue, | |||
54 | TP_ARGS(q, rq) | 73 | TP_ARGS(q, rq) |
55 | ); | 74 | ); |
56 | 75 | ||
76 | /** | ||
77 | * block_rq_complete - block IO operation completed by device driver | ||
78 | * @q: queue containing the block operation request | ||
79 | * @rq: block operations request | ||
80 | * | ||
81 | * The block_rq_complete tracepoint event indicates that some portion | ||
82 | * of operation request has been completed by the device driver. If | ||
83 | * the @rq->bio is %NULL, then there is absolutely no additional work to | ||
84 | * do for the request. If @rq->bio is non-NULL then there is | ||
85 | * additional work required to complete the request. | ||
86 | */ | ||
57 | DEFINE_EVENT(block_rq_with_error, block_rq_complete, | 87 | DEFINE_EVENT(block_rq_with_error, block_rq_complete, |
58 | 88 | ||
59 | TP_PROTO(struct request_queue *q, struct request *rq), | 89 | TP_PROTO(struct request_queue *q, struct request *rq), |
@@ -95,6 +125,16 @@ DECLARE_EVENT_CLASS(block_rq, | |||
95 | __entry->nr_sector, __entry->comm) | 125 | __entry->nr_sector, __entry->comm) |
96 | ); | 126 | ); |
97 | 127 | ||
128 | /** | ||
129 | * block_rq_insert - insert block operation request into queue | ||
130 | * @q: target queue | ||
131 | * @rq: block IO operation request | ||
132 | * | ||
133 | * Called immediately before block operation request @rq is inserted | ||
134 | * into queue @q. The fields in the operation request @rq struct can | ||
135 | * be examined to determine which device and sectors the pending | ||
136 | * operation would access. | ||
137 | */ | ||
98 | DEFINE_EVENT(block_rq, block_rq_insert, | 138 | DEFINE_EVENT(block_rq, block_rq_insert, |
99 | 139 | ||
100 | TP_PROTO(struct request_queue *q, struct request *rq), | 140 | TP_PROTO(struct request_queue *q, struct request *rq), |
@@ -102,6 +142,14 @@ DEFINE_EVENT(block_rq, block_rq_insert, | |||
102 | TP_ARGS(q, rq) | 142 | TP_ARGS(q, rq) |
103 | ); | 143 | ); |
104 | 144 | ||
145 | /** | ||
146 | * block_rq_issue - issue pending block IO request operation to device driver | ||
147 | * @q: queue holding operation | ||
148 | * @rq: block IO operation operation request | ||
149 | * | ||
150 | * Called when block operation request @rq from queue @q is sent to a | ||
151 | * device driver for processing. | ||
152 | */ | ||
105 | DEFINE_EVENT(block_rq, block_rq_issue, | 153 | DEFINE_EVENT(block_rq, block_rq_issue, |
106 | 154 | ||
107 | TP_PROTO(struct request_queue *q, struct request *rq), | 155 | TP_PROTO(struct request_queue *q, struct request *rq), |
@@ -109,6 +157,17 @@ DEFINE_EVENT(block_rq, block_rq_issue, | |||
109 | TP_ARGS(q, rq) | 157 | TP_ARGS(q, rq) |
110 | ); | 158 | ); |
111 | 159 | ||
160 | /** | ||
161 | * block_bio_bounce - used bounce buffer when processing block operation | ||
162 | * @q: queue holding the block operation | ||
163 | * @bio: block operation | ||
164 | * | ||
165 | * A bounce buffer was used to handle the block operation @bio in @q. | ||
166 | * This occurs when hardware limitations prevent a direct transfer of | ||
167 | * data between the @bio data memory area and the IO device. Use of a | ||
168 | * bounce buffer requires extra copying of data and decreases | ||
169 | * performance. | ||
170 | */ | ||
112 | TRACE_EVENT(block_bio_bounce, | 171 | TRACE_EVENT(block_bio_bounce, |
113 | 172 | ||
114 | TP_PROTO(struct request_queue *q, struct bio *bio), | 173 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -138,6 +197,14 @@ TRACE_EVENT(block_bio_bounce, | |||
138 | __entry->nr_sector, __entry->comm) | 197 | __entry->nr_sector, __entry->comm) |
139 | ); | 198 | ); |
140 | 199 | ||
200 | /** | ||
201 | * block_bio_complete - completed all work on the block operation | ||
202 | * @q: queue holding the block operation | ||
203 | * @bio: block operation completed | ||
204 | * | ||
205 | * This tracepoint indicates there is no further work to do on this | ||
206 | * block IO operation @bio. | ||
207 | */ | ||
141 | TRACE_EVENT(block_bio_complete, | 208 | TRACE_EVENT(block_bio_complete, |
142 | 209 | ||
143 | TP_PROTO(struct request_queue *q, struct bio *bio), | 210 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -193,6 +260,14 @@ DECLARE_EVENT_CLASS(block_bio, | |||
193 | __entry->nr_sector, __entry->comm) | 260 | __entry->nr_sector, __entry->comm) |
194 | ); | 261 | ); |
195 | 262 | ||
263 | /** | ||
264 | * block_bio_backmerge - merging block operation to the end of an existing operation | ||
265 | * @q: queue holding operation | ||
266 | * @bio: new block operation to merge | ||
267 | * | ||
268 | * Merging block request @bio to the end of an existing block request | ||
269 | * in queue @q. | ||
270 | */ | ||
196 | DEFINE_EVENT(block_bio, block_bio_backmerge, | 271 | DEFINE_EVENT(block_bio, block_bio_backmerge, |
197 | 272 | ||
198 | TP_PROTO(struct request_queue *q, struct bio *bio), | 273 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -200,6 +275,14 @@ DEFINE_EVENT(block_bio, block_bio_backmerge, | |||
200 | TP_ARGS(q, bio) | 275 | TP_ARGS(q, bio) |
201 | ); | 276 | ); |
202 | 277 | ||
278 | /** | ||
279 | * block_bio_frontmerge - merging block operation to the beginning of an existing operation | ||
280 | * @q: queue holding operation | ||
281 | * @bio: new block operation to merge | ||
282 | * | ||
283 | * Merging block IO operation @bio to the beginning of an existing block | ||
284 | * operation in queue @q. | ||
285 | */ | ||
203 | DEFINE_EVENT(block_bio, block_bio_frontmerge, | 286 | DEFINE_EVENT(block_bio, block_bio_frontmerge, |
204 | 287 | ||
205 | TP_PROTO(struct request_queue *q, struct bio *bio), | 288 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -207,6 +290,13 @@ DEFINE_EVENT(block_bio, block_bio_frontmerge, | |||
207 | TP_ARGS(q, bio) | 290 | TP_ARGS(q, bio) |
208 | ); | 291 | ); |
209 | 292 | ||
293 | /** | ||
294 | * block_bio_queue - putting new block IO operation in queue | ||
295 | * @q: queue holding operation | ||
296 | * @bio: new block operation | ||
297 | * | ||
298 | * About to place the block IO operation @bio into queue @q. | ||
299 | */ | ||
210 | DEFINE_EVENT(block_bio, block_bio_queue, | 300 | DEFINE_EVENT(block_bio, block_bio_queue, |
211 | 301 | ||
212 | TP_PROTO(struct request_queue *q, struct bio *bio), | 302 | TP_PROTO(struct request_queue *q, struct bio *bio), |
@@ -243,6 +333,15 @@ DECLARE_EVENT_CLASS(block_get_rq, | |||
243 | __entry->nr_sector, __entry->comm) | 333 | __entry->nr_sector, __entry->comm) |
244 | ); | 334 | ); |
245 | 335 | ||
336 | /** | ||
337 | * block_getrq - get a free request entry in queue for block IO operations | ||
338 | * @q: queue for operations | ||
339 | * @bio: pending block IO operation | ||
340 | * @rw: low bit indicates a read (%0) or a write (%1) | ||
341 | * | ||
342 | * A request struct for queue @q has been allocated to handle the | ||
343 | * block IO operation @bio. | ||
344 | */ | ||
246 | DEFINE_EVENT(block_get_rq, block_getrq, | 345 | DEFINE_EVENT(block_get_rq, block_getrq, |
247 | 346 | ||
248 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | 347 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
@@ -250,6 +349,17 @@ DEFINE_EVENT(block_get_rq, block_getrq, | |||
250 | TP_ARGS(q, bio, rw) | 349 | TP_ARGS(q, bio, rw) |
251 | ); | 350 | ); |
252 | 351 | ||
352 | /** | ||
353 | * block_sleeprq - waiting to get a free request entry in queue for block IO operation | ||
354 | * @q: queue for operation | ||
355 | * @bio: pending block IO operation | ||
356 | * @rw: low bit indicates a read (%0) or a write (%1) | ||
357 | * | ||
358 | * In the case where a request struct cannot be provided for queue @q | ||
359 | * the process needs to wait for an request struct to become | ||
360 | * available. This tracepoint event is generated each time the | ||
361 | * process goes to sleep waiting for request struct become available. | ||
362 | */ | ||
253 | DEFINE_EVENT(block_get_rq, block_sleeprq, | 363 | DEFINE_EVENT(block_get_rq, block_sleeprq, |
254 | 364 | ||
255 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), | 365 | TP_PROTO(struct request_queue *q, struct bio *bio, int rw), |
@@ -257,6 +367,14 @@ DEFINE_EVENT(block_get_rq, block_sleeprq, | |||
257 | TP_ARGS(q, bio, rw) | 367 | TP_ARGS(q, bio, rw) |
258 | ); | 368 | ); |
259 | 369 | ||
370 | /** | ||
371 | * block_plug - keep operations requests in request queue | ||
372 | * @q: request queue to plug | ||
373 | * | ||
374 | * Plug the request queue @q. Do not allow block operation requests | ||
375 | * to be sent to the device driver. Instead, accumulate requests in | ||
376 | * the queue to improve throughput performance of the block device. | ||
377 | */ | ||
260 | TRACE_EVENT(block_plug, | 378 | TRACE_EVENT(block_plug, |
261 | 379 | ||
262 | TP_PROTO(struct request_queue *q), | 380 | TP_PROTO(struct request_queue *q), |
@@ -293,6 +411,13 @@ DECLARE_EVENT_CLASS(block_unplug, | |||
293 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) | 411 | TP_printk("[%s] %d", __entry->comm, __entry->nr_rq) |
294 | ); | 412 | ); |
295 | 413 | ||
414 | /** | ||
415 | * block_unplug_timer - timed release of operations requests in queue to device driver | ||
416 | * @q: request queue to unplug | ||
417 | * | ||
418 | * Unplug the request queue @q because a timer expired and allow block | ||
419 | * operation requests to be sent to the device driver. | ||
420 | */ | ||
296 | DEFINE_EVENT(block_unplug, block_unplug_timer, | 421 | DEFINE_EVENT(block_unplug, block_unplug_timer, |
297 | 422 | ||
298 | TP_PROTO(struct request_queue *q), | 423 | TP_PROTO(struct request_queue *q), |
@@ -300,6 +425,13 @@ DEFINE_EVENT(block_unplug, block_unplug_timer, | |||
300 | TP_ARGS(q) | 425 | TP_ARGS(q) |
301 | ); | 426 | ); |
302 | 427 | ||
428 | /** | ||
429 | * block_unplug_io - release of operations requests in request queue | ||
430 | * @q: request queue to unplug | ||
431 | * | ||
432 | * Unplug request queue @q because device driver is scheduled to work | ||
433 | * on elements in the request queue. | ||
434 | */ | ||
303 | DEFINE_EVENT(block_unplug, block_unplug_io, | 435 | DEFINE_EVENT(block_unplug, block_unplug_io, |
304 | 436 | ||
305 | TP_PROTO(struct request_queue *q), | 437 | TP_PROTO(struct request_queue *q), |
@@ -307,6 +439,17 @@ DEFINE_EVENT(block_unplug, block_unplug_io, | |||
307 | TP_ARGS(q) | 439 | TP_ARGS(q) |
308 | ); | 440 | ); |
309 | 441 | ||
442 | /** | ||
443 | * block_split - split a single bio struct into two bio structs | ||
444 | * @q: queue containing the bio | ||
445 | * @bio: block operation being split | ||
446 | * @new_sector: The starting sector for the new bio | ||
447 | * | ||
448 | * The bio request @bio in request queue @q needs to be split into two | ||
449 | * bio requests. The newly created @bio request starts at | ||
450 | * @new_sector. This split may be required due to hardware limitation | ||
451 | * such as operation crossing device boundaries in a RAID system. | ||
452 | */ | ||
310 | TRACE_EVENT(block_split, | 453 | TRACE_EVENT(block_split, |
311 | 454 | ||
312 | TP_PROTO(struct request_queue *q, struct bio *bio, | 455 | TP_PROTO(struct request_queue *q, struct bio *bio, |
@@ -337,6 +480,16 @@ TRACE_EVENT(block_split, | |||
337 | __entry->comm) | 480 | __entry->comm) |
338 | ); | 481 | ); |
339 | 482 | ||
483 | /** | ||
484 | * block_remap - map request for a partition to the raw device | ||
485 | * @q: queue holding the operation | ||
486 | * @bio: revised operation | ||
487 | * @dev: device for the operation | ||
488 | * @from: original sector for the operation | ||
489 | * | ||
490 | * An operation for a partition on a block device has been mapped to the | ||
491 | * raw block device. | ||
492 | */ | ||
340 | TRACE_EVENT(block_remap, | 493 | TRACE_EVENT(block_remap, |
341 | 494 | ||
342 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, | 495 | TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, |
@@ -370,6 +523,17 @@ TRACE_EVENT(block_remap, | |||
370 | (unsigned long long)__entry->old_sector) | 523 | (unsigned long long)__entry->old_sector) |
371 | ); | 524 | ); |
372 | 525 | ||
526 | /** | ||
527 | * block_rq_remap - map request for a block operation request | ||
528 | * @q: queue holding the operation | ||
529 | * @rq: block IO operation request | ||
530 | * @dev: device for the operation | ||
531 | * @from: original sector for the operation | ||
532 | * | ||
533 | * The block operation request @rq in @q has been remapped. The block | ||
534 | * operation request @rq holds the current information and @from hold | ||
535 | * the original sector. | ||
536 | */ | ||
373 | TRACE_EVENT(block_rq_remap, | 537 | TRACE_EVENT(block_rq_remap, |
374 | 538 | ||
375 | TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, | 539 | TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, |