diff options
Diffstat (limited to 'include/linux')
170 files changed, 3866 insertions, 2030 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 9aa9bcadf869..9d65d4d0bd9c 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
| @@ -39,6 +39,7 @@ header-y += ax25.h | |||
| 39 | header-y += b1lli.h | 39 | header-y += b1lli.h |
| 40 | header-y += baycom.h | 40 | header-y += baycom.h |
| 41 | header-y += bfs_fs.h | 41 | header-y += bfs_fs.h |
| 42 | header-y += blk_types.h | ||
| 42 | header-y += blkpg.h | 43 | header-y += blkpg.h |
| 43 | header-y += bpqether.h | 44 | header-y += bpqether.h |
| 44 | header-y += bsg.h | 45 | header-y += bsg.h |
| @@ -210,6 +211,7 @@ unifdef-y += ethtool.h | |||
| 210 | unifdef-y += eventpoll.h | 211 | unifdef-y += eventpoll.h |
| 211 | unifdef-y += signalfd.h | 212 | unifdef-y += signalfd.h |
| 212 | unifdef-y += ext2_fs.h | 213 | unifdef-y += ext2_fs.h |
| 214 | unifdef-y += fanotify.h | ||
| 213 | unifdef-y += fb.h | 215 | unifdef-y += fb.h |
| 214 | unifdef-y += fcntl.h | 216 | unifdef-y += fcntl.h |
| 215 | unifdef-y += filter.h | 217 | unifdef-y += filter.h |
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h index f7dd576dd5a4..be3d9a77d6ed 100644 --- a/include/linux/ahci_platform.h +++ b/include/linux/ahci_platform.h | |||
| @@ -15,11 +15,13 @@ | |||
| 15 | #ifndef _AHCI_PLATFORM_H | 15 | #ifndef _AHCI_PLATFORM_H |
| 16 | #define _AHCI_PLATFORM_H | 16 | #define _AHCI_PLATFORM_H |
| 17 | 17 | ||
| 18 | #include <linux/compiler.h> | ||
| 19 | |||
| 18 | struct device; | 20 | struct device; |
| 19 | struct ata_port_info; | 21 | struct ata_port_info; |
| 20 | 22 | ||
| 21 | struct ahci_platform_data { | 23 | struct ahci_platform_data { |
| 22 | int (*init)(struct device *dev); | 24 | int (*init)(struct device *dev, void __iomem *addr); |
| 23 | void (*exit)(struct device *dev); | 25 | void (*exit)(struct device *dev); |
| 24 | const struct ata_port_info *ata_port_info; | 26 | const struct ata_port_info *ata_port_info; |
| 25 | unsigned int force_port_map; | 27 | unsigned int force_port_map; |
diff --git a/include/linux/audit.h b/include/linux/audit.h index f391d45c8aea..e24afabc548f 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
| @@ -544,7 +544,7 @@ extern int audit_signals; | |||
| 544 | #define audit_putname(n) do { ; } while (0) | 544 | #define audit_putname(n) do { ; } while (0) |
| 545 | #define __audit_inode(n,d) do { ; } while (0) | 545 | #define __audit_inode(n,d) do { ; } while (0) |
| 546 | #define __audit_inode_child(i,p) do { ; } while (0) | 546 | #define __audit_inode_child(i,p) do { ; } while (0) |
| 547 | #define audit_inode(n,d) do { ; } while (0) | 547 | #define audit_inode(n,d) do { (void)(d); } while (0) |
| 548 | #define audit_inode_child(i,p) do { ; } while (0) | 548 | #define audit_inode_child(i,p) do { ; } while (0) |
| 549 | #define audit_core_dumps(i) do { ; } while (0) | 549 | #define audit_core_dumps(i) do { ; } while (0) |
| 550 | #define auditsc_get_stamp(c,t,s) (0) | 550 | #define auditsc_get_stamp(c,t,s) (0) |
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h index 7b09c8348fd3..da64e15004b6 100644 --- a/include/linux/auto_fs.h +++ b/include/linux/auto_fs.h | |||
| @@ -79,6 +79,7 @@ struct autofs_packet_expire { | |||
| 79 | #define AUTOFS_IOC_FAIL _IO(0x93,0x61) | 79 | #define AUTOFS_IOC_FAIL _IO(0x93,0x61) |
| 80 | #define AUTOFS_IOC_CATATONIC _IO(0x93,0x62) | 80 | #define AUTOFS_IOC_CATATONIC _IO(0x93,0x62) |
| 81 | #define AUTOFS_IOC_PROTOVER _IOR(0x93,0x63,int) | 81 | #define AUTOFS_IOC_PROTOVER _IOR(0x93,0x63,int) |
| 82 | #define AUTOFS_IOC_SETTIMEOUT32 _IOWR(0x93,0x64,compat_ulong_t) | ||
| 82 | #define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93,0x64,unsigned long) | 83 | #define AUTOFS_IOC_SETTIMEOUT _IOWR(0x93,0x64,unsigned long) |
| 83 | #define AUTOFS_IOC_EXPIRE _IOR(0x93,0x65,struct autofs_packet_expire) | 84 | #define AUTOFS_IOC_EXPIRE _IOR(0x93,0x65,struct autofs_packet_expire) |
| 84 | 85 | ||
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index e9aec0d099df..35b00746c712 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
| @@ -31,6 +31,7 @@ enum bdi_state { | |||
| 31 | BDI_async_congested, /* The async (write) queue is getting full */ | 31 | BDI_async_congested, /* The async (write) queue is getting full */ |
| 32 | BDI_sync_congested, /* The sync queue is getting full */ | 32 | BDI_sync_congested, /* The sync queue is getting full */ |
| 33 | BDI_registered, /* bdi_register() was done */ | 33 | BDI_registered, /* bdi_register() was done */ |
| 34 | BDI_writeback_running, /* Writeback is in progress */ | ||
| 34 | BDI_unused, /* Available bits start here */ | 35 | BDI_unused, /* Available bits start here */ |
| 35 | }; | 36 | }; |
| 36 | 37 | ||
| @@ -45,22 +46,21 @@ enum bdi_stat_item { | |||
| 45 | #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) | 46 | #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) |
| 46 | 47 | ||
| 47 | struct bdi_writeback { | 48 | struct bdi_writeback { |
| 48 | struct list_head list; /* hangs off the bdi */ | 49 | struct backing_dev_info *bdi; /* our parent bdi */ |
| 49 | |||
| 50 | struct backing_dev_info *bdi; /* our parent bdi */ | ||
| 51 | unsigned int nr; | 50 | unsigned int nr; |
| 52 | 51 | ||
| 53 | unsigned long last_old_flush; /* last old data flush */ | 52 | unsigned long last_old_flush; /* last old data flush */ |
| 53 | unsigned long last_active; /* last time bdi thread was active */ | ||
| 54 | 54 | ||
| 55 | struct task_struct *task; /* writeback task */ | 55 | struct task_struct *task; /* writeback thread */ |
| 56 | struct list_head b_dirty; /* dirty inodes */ | 56 | struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */ |
| 57 | struct list_head b_io; /* parked for writeback */ | 57 | struct list_head b_dirty; /* dirty inodes */ |
| 58 | struct list_head b_more_io; /* parked for more writeback */ | 58 | struct list_head b_io; /* parked for writeback */ |
| 59 | struct list_head b_more_io; /* parked for more writeback */ | ||
| 59 | }; | 60 | }; |
| 60 | 61 | ||
| 61 | struct backing_dev_info { | 62 | struct backing_dev_info { |
| 62 | struct list_head bdi_list; | 63 | struct list_head bdi_list; |
| 63 | struct rcu_head rcu_head; | ||
| 64 | unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ | 64 | unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ |
| 65 | unsigned long state; /* Always use atomic bitops on this */ | 65 | unsigned long state; /* Always use atomic bitops on this */ |
| 66 | unsigned int capabilities; /* Device capabilities */ | 66 | unsigned int capabilities; /* Device capabilities */ |
| @@ -80,8 +80,7 @@ struct backing_dev_info { | |||
| 80 | unsigned int max_ratio, max_prop_frac; | 80 | unsigned int max_ratio, max_prop_frac; |
| 81 | 81 | ||
| 82 | struct bdi_writeback wb; /* default writeback info for this bdi */ | 82 | struct bdi_writeback wb; /* default writeback info for this bdi */ |
| 83 | spinlock_t wb_lock; /* protects update side of wb_list */ | 83 | spinlock_t wb_lock; /* protects work_list */ |
| 84 | struct list_head wb_list; /* the flusher threads hanging off this bdi */ | ||
| 85 | 84 | ||
| 86 | struct list_head work_list; | 85 | struct list_head work_list; |
| 87 | 86 | ||
| @@ -105,9 +104,10 @@ void bdi_unregister(struct backing_dev_info *bdi); | |||
| 105 | int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); | 104 | int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); |
| 106 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); | 105 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); |
| 107 | void bdi_start_background_writeback(struct backing_dev_info *bdi); | 106 | void bdi_start_background_writeback(struct backing_dev_info *bdi); |
| 108 | int bdi_writeback_task(struct bdi_writeback *wb); | 107 | int bdi_writeback_thread(void *data); |
| 109 | int bdi_has_dirty_io(struct backing_dev_info *bdi); | 108 | int bdi_has_dirty_io(struct backing_dev_info *bdi); |
| 110 | void bdi_arm_supers_timer(void); | 109 | void bdi_arm_supers_timer(void); |
| 110 | void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); | ||
| 111 | 111 | ||
| 112 | extern spinlock_t bdi_lock; | 112 | extern spinlock_t bdi_lock; |
| 113 | extern struct list_head bdi_list; | 113 | extern struct list_head bdi_list; |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 7fc5606e6ea5..5274103434ad 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | * | 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | 12 | * | |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
| 15 | * | 15 | * |
| @@ -28,6 +28,9 @@ | |||
| 28 | 28 | ||
| 29 | #include <asm/io.h> | 29 | #include <asm/io.h> |
| 30 | 30 | ||
| 31 | /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ | ||
| 32 | #include <linux/blk_types.h> | ||
| 33 | |||
| 31 | #define BIO_DEBUG | 34 | #define BIO_DEBUG |
| 32 | 35 | ||
| 33 | #ifdef BIO_DEBUG | 36 | #ifdef BIO_DEBUG |
| @@ -41,154 +44,6 @@ | |||
| 41 | #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) | 44 | #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) |
| 42 | 45 | ||
| 43 | /* | 46 | /* |
| 44 | * was unsigned short, but we might as well be ready for > 64kB I/O pages | ||
| 45 | */ | ||
| 46 | struct bio_vec { | ||
| 47 | struct page *bv_page; | ||
| 48 | unsigned int bv_len; | ||
| 49 | unsigned int bv_offset; | ||
| 50 | }; | ||
| 51 | |||
| 52 | struct bio_set; | ||
| 53 | struct bio; | ||
| 54 | struct bio_integrity_payload; | ||
| 55 | typedef void (bio_end_io_t) (struct bio *, int); | ||
| 56 | typedef void (bio_destructor_t) (struct bio *); | ||
| 57 | |||
| 58 | /* | ||
| 59 | * main unit of I/O for the block layer and lower layers (ie drivers and | ||
| 60 | * stacking drivers) | ||
| 61 | */ | ||
| 62 | struct bio { | ||
| 63 | sector_t bi_sector; /* device address in 512 byte | ||
| 64 | sectors */ | ||
| 65 | struct bio *bi_next; /* request queue link */ | ||
| 66 | struct block_device *bi_bdev; | ||
| 67 | unsigned long bi_flags; /* status, command, etc */ | ||
| 68 | unsigned long bi_rw; /* bottom bits READ/WRITE, | ||
| 69 | * top bits priority | ||
| 70 | */ | ||
| 71 | |||
| 72 | unsigned short bi_vcnt; /* how many bio_vec's */ | ||
| 73 | unsigned short bi_idx; /* current index into bvl_vec */ | ||
| 74 | |||
| 75 | /* Number of segments in this BIO after | ||
| 76 | * physical address coalescing is performed. | ||
| 77 | */ | ||
| 78 | unsigned int bi_phys_segments; | ||
| 79 | |||
| 80 | unsigned int bi_size; /* residual I/O count */ | ||
| 81 | |||
| 82 | /* | ||
| 83 | * To keep track of the max segment size, we account for the | ||
| 84 | * sizes of the first and last mergeable segments in this bio. | ||
| 85 | */ | ||
| 86 | unsigned int bi_seg_front_size; | ||
| 87 | unsigned int bi_seg_back_size; | ||
| 88 | |||
| 89 | unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ | ||
| 90 | |||
| 91 | unsigned int bi_comp_cpu; /* completion CPU */ | ||
| 92 | |||
| 93 | atomic_t bi_cnt; /* pin count */ | ||
| 94 | |||
| 95 | struct bio_vec *bi_io_vec; /* the actual vec list */ | ||
| 96 | |||
| 97 | bio_end_io_t *bi_end_io; | ||
| 98 | |||
| 99 | void *bi_private; | ||
| 100 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
| 101 | struct bio_integrity_payload *bi_integrity; /* data integrity */ | ||
| 102 | #endif | ||
| 103 | |||
| 104 | bio_destructor_t *bi_destructor; /* destructor */ | ||
| 105 | |||
| 106 | /* | ||
| 107 | * We can inline a number of vecs at the end of the bio, to avoid | ||
| 108 | * double allocations for a small number of bio_vecs. This member | ||
| 109 | * MUST obviously be kept at the very end of the bio. | ||
| 110 | */ | ||
| 111 | struct bio_vec bi_inline_vecs[0]; | ||
| 112 | }; | ||
| 113 | |||
| 114 | /* | ||
| 115 | * bio flags | ||
| 116 | */ | ||
| 117 | #define BIO_UPTODATE 0 /* ok after I/O completion */ | ||
| 118 | #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ | ||
| 119 | #define BIO_EOF 2 /* out-out-bounds error */ | ||
| 120 | #define BIO_SEG_VALID 3 /* bi_phys_segments valid */ | ||
| 121 | #define BIO_CLONED 4 /* doesn't own data */ | ||
| 122 | #define BIO_BOUNCED 5 /* bio is a bounce bio */ | ||
| 123 | #define BIO_USER_MAPPED 6 /* contains user pages */ | ||
| 124 | #define BIO_EOPNOTSUPP 7 /* not supported */ | ||
| 125 | #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ | ||
| 126 | #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ | ||
| 127 | #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ | ||
| 128 | #define BIO_QUIET 11 /* Make BIO Quiet */ | ||
| 129 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) | ||
| 130 | |||
| 131 | /* | ||
| 132 | * top 4 bits of bio flags indicate the pool this bio came from | ||
| 133 | */ | ||
| 134 | #define BIO_POOL_BITS (4) | ||
| 135 | #define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) | ||
| 136 | #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) | ||
| 137 | #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) | ||
| 138 | #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) | ||
| 139 | |||
| 140 | /* | ||
| 141 | * bio bi_rw flags | ||
| 142 | * | ||
| 143 | * bit 0 -- data direction | ||
| 144 | * If not set, bio is a read from device. If set, it's a write to device. | ||
| 145 | * bit 1 -- fail fast device errors | ||
| 146 | * bit 2 -- fail fast transport errors | ||
| 147 | * bit 3 -- fail fast driver errors | ||
| 148 | * bit 4 -- rw-ahead when set | ||
| 149 | * bit 5 -- barrier | ||
| 150 | * Insert a serialization point in the IO queue, forcing previously | ||
| 151 | * submitted IO to be completed before this one is issued. | ||
| 152 | * bit 6 -- synchronous I/O hint. | ||
| 153 | * bit 7 -- Unplug the device immediately after submitting this bio. | ||
| 154 | * bit 8 -- metadata request | ||
| 155 | * Used for tracing to differentiate metadata and data IO. May also | ||
| 156 | * get some preferential treatment in the IO scheduler | ||
| 157 | * bit 9 -- discard sectors | ||
| 158 | * Informs the lower level device that this range of sectors is no longer | ||
| 159 | * used by the file system and may thus be freed by the device. Used | ||
| 160 | * for flash based storage. | ||
| 161 | * Don't want driver retries for any fast fail whatever the reason. | ||
| 162 | * bit 10 -- Tell the IO scheduler not to wait for more requests after this | ||
| 163 | one has been submitted, even if it is a SYNC request. | ||
| 164 | */ | ||
| 165 | enum bio_rw_flags { | ||
| 166 | BIO_RW, | ||
| 167 | BIO_RW_FAILFAST_DEV, | ||
| 168 | BIO_RW_FAILFAST_TRANSPORT, | ||
| 169 | BIO_RW_FAILFAST_DRIVER, | ||
| 170 | /* above flags must match REQ_* */ | ||
| 171 | BIO_RW_AHEAD, | ||
| 172 | BIO_RW_BARRIER, | ||
| 173 | BIO_RW_SYNCIO, | ||
| 174 | BIO_RW_UNPLUG, | ||
| 175 | BIO_RW_META, | ||
| 176 | BIO_RW_DISCARD, | ||
| 177 | BIO_RW_NOIDLE, | ||
| 178 | }; | ||
| 179 | |||
| 180 | /* | ||
| 181 | * First four bits must match between bio->bi_rw and rq->cmd_flags, make | ||
| 182 | * that explicit here. | ||
| 183 | */ | ||
| 184 | #define BIO_RW_RQ_MASK 0xf | ||
| 185 | |||
| 186 | static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag) | ||
| 187 | { | ||
| 188 | return (bio->bi_rw & (1 << flag)) != 0; | ||
| 189 | } | ||
| 190 | |||
| 191 | /* | ||
| 192 | * upper 16 bits of bi_rw define the io priority of this bio | 47 | * upper 16 bits of bi_rw define the io priority of this bio |
| 193 | */ | 48 | */ |
| 194 | #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) | 49 | #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) |
| @@ -211,7 +66,10 @@ static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag) | |||
| 211 | #define bio_offset(bio) bio_iovec((bio))->bv_offset | 66 | #define bio_offset(bio) bio_iovec((bio))->bv_offset |
| 212 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) | 67 | #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) |
| 213 | #define bio_sectors(bio) ((bio)->bi_size >> 9) | 68 | #define bio_sectors(bio) ((bio)->bi_size >> 9) |
| 214 | #define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD)) | 69 | #define bio_empty_barrier(bio) \ |
| 70 | ((bio->bi_rw & REQ_HARDBARRIER) && \ | ||
| 71 | !bio_has_data(bio) && \ | ||
| 72 | !(bio->bi_rw & REQ_DISCARD)) | ||
| 215 | 73 | ||
| 216 | static inline unsigned int bio_cur_bytes(struct bio *bio) | 74 | static inline unsigned int bio_cur_bytes(struct bio *bio) |
| 217 | { | 75 | { |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h new file mode 100644 index 000000000000..ca83a97c9715 --- /dev/null +++ b/include/linux/blk_types.h | |||
| @@ -0,0 +1,196 @@ | |||
| 1 | /* | ||
| 2 | * Block data types and constants. Directly include this file only to | ||
| 3 | * break include dependency loop. | ||
| 4 | */ | ||
| 5 | #ifndef __LINUX_BLK_TYPES_H | ||
| 6 | #define __LINUX_BLK_TYPES_H | ||
| 7 | |||
| 8 | #ifdef CONFIG_BLOCK | ||
| 9 | |||
| 10 | #include <linux/types.h> | ||
| 11 | |||
| 12 | struct bio_set; | ||
| 13 | struct bio; | ||
| 14 | struct bio_integrity_payload; | ||
| 15 | struct page; | ||
| 16 | struct block_device; | ||
| 17 | typedef void (bio_end_io_t) (struct bio *, int); | ||
| 18 | typedef void (bio_destructor_t) (struct bio *); | ||
| 19 | |||
| 20 | /* | ||
| 21 | * was unsigned short, but we might as well be ready for > 64kB I/O pages | ||
| 22 | */ | ||
| 23 | struct bio_vec { | ||
| 24 | struct page *bv_page; | ||
| 25 | unsigned int bv_len; | ||
| 26 | unsigned int bv_offset; | ||
| 27 | }; | ||
| 28 | |||
| 29 | /* | ||
| 30 | * main unit of I/O for the block layer and lower layers (ie drivers and | ||
| 31 | * stacking drivers) | ||
| 32 | */ | ||
| 33 | struct bio { | ||
| 34 | sector_t bi_sector; /* device address in 512 byte | ||
| 35 | sectors */ | ||
| 36 | struct bio *bi_next; /* request queue link */ | ||
| 37 | struct block_device *bi_bdev; | ||
| 38 | unsigned long bi_flags; /* status, command, etc */ | ||
| 39 | unsigned long bi_rw; /* bottom bits READ/WRITE, | ||
| 40 | * top bits priority | ||
| 41 | */ | ||
| 42 | |||
| 43 | unsigned short bi_vcnt; /* how many bio_vec's */ | ||
| 44 | unsigned short bi_idx; /* current index into bvl_vec */ | ||
| 45 | |||
| 46 | /* Number of segments in this BIO after | ||
| 47 | * physical address coalescing is performed. | ||
| 48 | */ | ||
| 49 | unsigned int bi_phys_segments; | ||
| 50 | |||
| 51 | unsigned int bi_size; /* residual I/O count */ | ||
| 52 | |||
| 53 | /* | ||
| 54 | * To keep track of the max segment size, we account for the | ||
| 55 | * sizes of the first and last mergeable segments in this bio. | ||
| 56 | */ | ||
| 57 | unsigned int bi_seg_front_size; | ||
| 58 | unsigned int bi_seg_back_size; | ||
| 59 | |||
| 60 | unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ | ||
| 61 | |||
| 62 | unsigned int bi_comp_cpu; /* completion CPU */ | ||
| 63 | |||
| 64 | atomic_t bi_cnt; /* pin count */ | ||
| 65 | |||
| 66 | struct bio_vec *bi_io_vec; /* the actual vec list */ | ||
| 67 | |||
| 68 | bio_end_io_t *bi_end_io; | ||
| 69 | |||
| 70 | void *bi_private; | ||
| 71 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
| 72 | struct bio_integrity_payload *bi_integrity; /* data integrity */ | ||
| 73 | #endif | ||
| 74 | |||
| 75 | bio_destructor_t *bi_destructor; /* destructor */ | ||
| 76 | |||
| 77 | /* | ||
| 78 | * We can inline a number of vecs at the end of the bio, to avoid | ||
| 79 | * double allocations for a small number of bio_vecs. This member | ||
| 80 | * MUST obviously be kept at the very end of the bio. | ||
| 81 | */ | ||
| 82 | struct bio_vec bi_inline_vecs[0]; | ||
| 83 | }; | ||
| 84 | |||
| 85 | /* | ||
| 86 | * bio flags | ||
| 87 | */ | ||
| 88 | #define BIO_UPTODATE 0 /* ok after I/O completion */ | ||
| 89 | #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ | ||
| 90 | #define BIO_EOF 2 /* out-out-bounds error */ | ||
| 91 | #define BIO_SEG_VALID 3 /* bi_phys_segments valid */ | ||
| 92 | #define BIO_CLONED 4 /* doesn't own data */ | ||
| 93 | #define BIO_BOUNCED 5 /* bio is a bounce bio */ | ||
| 94 | #define BIO_USER_MAPPED 6 /* contains user pages */ | ||
| 95 | #define BIO_EOPNOTSUPP 7 /* not supported */ | ||
| 96 | #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ | ||
| 97 | #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ | ||
| 98 | #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ | ||
| 99 | #define BIO_QUIET 11 /* Make BIO Quiet */ | ||
| 100 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) | ||
| 101 | |||
| 102 | /* | ||
| 103 | * top 4 bits of bio flags indicate the pool this bio came from | ||
| 104 | */ | ||
| 105 | #define BIO_POOL_BITS (4) | ||
| 106 | #define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) | ||
| 107 | #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) | ||
| 108 | #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) | ||
| 109 | #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) | ||
| 110 | |||
| 111 | #endif /* CONFIG_BLOCK */ | ||
| 112 | |||
| 113 | /* | ||
| 114 | * Request flags. For use in the cmd_flags field of struct request, and in | ||
| 115 | * bi_rw of struct bio. Note that some flags are only valid in either one. | ||
| 116 | */ | ||
| 117 | enum rq_flag_bits { | ||
| 118 | /* common flags */ | ||
| 119 | __REQ_WRITE, /* not set, read. set, write */ | ||
| 120 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ | ||
| 121 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | ||
| 122 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | ||
| 123 | |||
| 124 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | ||
| 125 | __REQ_SYNC, /* request is sync (sync write or read) */ | ||
| 126 | __REQ_META, /* metadata io request */ | ||
| 127 | __REQ_DISCARD, /* request to discard sectors */ | ||
| 128 | __REQ_NOIDLE, /* don't anticipate more IO after this one */ | ||
| 129 | |||
| 130 | /* bio only flags */ | ||
| 131 | __REQ_UNPLUG, /* unplug the immediately after submission */ | ||
| 132 | __REQ_RAHEAD, /* read ahead, can fail anytime */ | ||
| 133 | |||
| 134 | /* request only flags */ | ||
| 135 | __REQ_SORTED, /* elevator knows about this request */ | ||
| 136 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | ||
| 137 | __REQ_FUA, /* forced unit access */ | ||
| 138 | __REQ_NOMERGE, /* don't touch this for merging */ | ||
| 139 | __REQ_STARTED, /* drive already may have started this one */ | ||
| 140 | __REQ_DONTPREP, /* don't call prep for this one */ | ||
| 141 | __REQ_QUEUED, /* uses queueing */ | ||
| 142 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
| 143 | __REQ_FAILED, /* set if the request failed */ | ||
| 144 | __REQ_QUIET, /* don't worry about errors */ | ||
| 145 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | ||
| 146 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
| 147 | __REQ_ALLOCED, /* request came from our alloc pool */ | ||
| 148 | __REQ_COPY_USER, /* contains copies of user pages */ | ||
| 149 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | ||
| 150 | __REQ_FLUSH, /* request for cache flush */ | ||
| 151 | __REQ_IO_STAT, /* account I/O stat */ | ||
| 152 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | ||
| 153 | __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */ | ||
| 154 | __REQ_NR_BITS, /* stops here */ | ||
| 155 | }; | ||
| 156 | |||
| 157 | #define REQ_WRITE (1 << __REQ_WRITE) | ||
| 158 | #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) | ||
| 159 | #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) | ||
| 160 | #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) | ||
| 161 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | ||
| 162 | #define REQ_SYNC (1 << __REQ_SYNC) | ||
| 163 | #define REQ_META (1 << __REQ_META) | ||
| 164 | #define REQ_DISCARD (1 << __REQ_DISCARD) | ||
| 165 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) | ||
| 166 | |||
| 167 | #define REQ_FAILFAST_MASK \ | ||
| 168 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | ||
| 169 | #define REQ_COMMON_MASK \ | ||
| 170 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ | ||
| 171 | REQ_META| REQ_DISCARD | REQ_NOIDLE) | ||
| 172 | |||
| 173 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) | ||
| 174 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) | ||
| 175 | |||
| 176 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
| 177 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | ||
| 178 | #define REQ_FUA (1 << __REQ_FUA) | ||
| 179 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | ||
| 180 | #define REQ_STARTED (1 << __REQ_STARTED) | ||
| 181 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | ||
| 182 | #define REQ_QUEUED (1 << __REQ_QUEUED) | ||
| 183 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
| 184 | #define REQ_FAILED (1 << __REQ_FAILED) | ||
| 185 | #define REQ_QUIET (1 << __REQ_QUIET) | ||
| 186 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | ||
| 187 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
| 188 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) | ||
| 189 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | ||
| 190 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | ||
| 191 | #define REQ_FLUSH (1 << __REQ_FLUSH) | ||
| 192 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | ||
| 193 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) | ||
| 194 | #define REQ_SECURE (1 << __REQ_SECURE) | ||
| 195 | |||
| 196 | #endif /* __LINUX_BLK_TYPES_H */ | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 09a840264d6f..2c54906f678f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -60,7 +60,6 @@ enum rq_cmd_type_bits { | |||
| 60 | REQ_TYPE_PM_RESUME, /* resume request */ | 60 | REQ_TYPE_PM_RESUME, /* resume request */ |
| 61 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ | 61 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ |
| 62 | REQ_TYPE_SPECIAL, /* driver defined type */ | 62 | REQ_TYPE_SPECIAL, /* driver defined type */ |
| 63 | REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ | ||
| 64 | /* | 63 | /* |
| 65 | * for ATA/ATAPI devices. this really doesn't belong here, ide should | 64 | * for ATA/ATAPI devices. this really doesn't belong here, ide should |
| 66 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | 65 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver |
| @@ -70,84 +69,6 @@ enum rq_cmd_type_bits { | |||
| 70 | REQ_TYPE_ATA_PC, | 69 | REQ_TYPE_ATA_PC, |
| 71 | }; | 70 | }; |
| 72 | 71 | ||
| 73 | /* | ||
| 74 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | ||
| 75 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | ||
| 76 | * SCSI cdb. | ||
| 77 | * | ||
| 78 | * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, | ||
| 79 | * typically to differentiate REQ_TYPE_SPECIAL requests. | ||
| 80 | * | ||
| 81 | */ | ||
| 82 | enum { | ||
| 83 | REQ_LB_OP_EJECT = 0x40, /* eject request */ | ||
| 84 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ | ||
| 85 | }; | ||
| 86 | |||
| 87 | /* | ||
| 88 | * request type modified bits. first four bits match BIO_RW* bits, important | ||
| 89 | */ | ||
| 90 | enum rq_flag_bits { | ||
| 91 | __REQ_RW, /* not set, read. set, write */ | ||
| 92 | __REQ_FAILFAST_DEV, /* no driver retries of device errors */ | ||
| 93 | __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ | ||
| 94 | __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ | ||
| 95 | /* above flags must match BIO_RW_* */ | ||
| 96 | __REQ_DISCARD, /* request to discard sectors */ | ||
| 97 | __REQ_SORTED, /* elevator knows about this request */ | ||
| 98 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | ||
| 99 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | ||
| 100 | __REQ_FUA, /* forced unit access */ | ||
| 101 | __REQ_NOMERGE, /* don't touch this for merging */ | ||
| 102 | __REQ_STARTED, /* drive already may have started this one */ | ||
| 103 | __REQ_DONTPREP, /* don't call prep for this one */ | ||
| 104 | __REQ_QUEUED, /* uses queueing */ | ||
| 105 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
| 106 | __REQ_FAILED, /* set if the request failed */ | ||
| 107 | __REQ_QUIET, /* don't worry about errors */ | ||
| 108 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | ||
| 109 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
| 110 | __REQ_RW_SYNC, /* request is sync (sync write or read) */ | ||
| 111 | __REQ_ALLOCED, /* request came from our alloc pool */ | ||
| 112 | __REQ_RW_META, /* metadata io request */ | ||
| 113 | __REQ_COPY_USER, /* contains copies of user pages */ | ||
| 114 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | ||
| 115 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ | ||
| 116 | __REQ_IO_STAT, /* account I/O stat */ | ||
| 117 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | ||
| 118 | __REQ_NR_BITS, /* stops here */ | ||
| 119 | }; | ||
| 120 | |||
| 121 | #define REQ_RW (1 << __REQ_RW) | ||
| 122 | #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) | ||
| 123 | #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) | ||
| 124 | #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) | ||
| 125 | #define REQ_DISCARD (1 << __REQ_DISCARD) | ||
| 126 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
| 127 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | ||
| 128 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | ||
| 129 | #define REQ_FUA (1 << __REQ_FUA) | ||
| 130 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | ||
| 131 | #define REQ_STARTED (1 << __REQ_STARTED) | ||
| 132 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | ||
| 133 | #define REQ_QUEUED (1 << __REQ_QUEUED) | ||
| 134 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
| 135 | #define REQ_FAILED (1 << __REQ_FAILED) | ||
| 136 | #define REQ_QUIET (1 << __REQ_QUIET) | ||
| 137 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | ||
| 138 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
| 139 | #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) | ||
| 140 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) | ||
| 141 | #define REQ_RW_META (1 << __REQ_RW_META) | ||
| 142 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | ||
| 143 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | ||
| 144 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) | ||
| 145 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | ||
| 146 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) | ||
| 147 | |||
| 148 | #define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \ | ||
| 149 | REQ_FAILFAST_DRIVER) | ||
| 150 | |||
| 151 | #define BLK_MAX_CDB 16 | 72 | #define BLK_MAX_CDB 16 |
| 152 | 73 | ||
| 153 | /* | 74 | /* |
| @@ -264,6 +185,7 @@ struct request_pm_state | |||
| 264 | typedef void (request_fn_proc) (struct request_queue *q); | 185 | typedef void (request_fn_proc) (struct request_queue *q); |
| 265 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 186 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
| 266 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 187 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
| 188 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | ||
| 267 | typedef void (unplug_fn) (struct request_queue *); | 189 | typedef void (unplug_fn) (struct request_queue *); |
| 268 | 190 | ||
| 269 | struct bio_vec; | 191 | struct bio_vec; |
| @@ -275,7 +197,6 @@ struct bvec_merge_data { | |||
| 275 | }; | 197 | }; |
| 276 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, | 198 | typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, |
| 277 | struct bio_vec *); | 199 | struct bio_vec *); |
| 278 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); | ||
| 279 | typedef void (softirq_done_fn)(struct request *); | 200 | typedef void (softirq_done_fn)(struct request *); |
| 280 | typedef int (dma_drain_needed_fn)(struct request *); | 201 | typedef int (dma_drain_needed_fn)(struct request *); |
| 281 | typedef int (lld_busy_fn) (struct request_queue *q); | 202 | typedef int (lld_busy_fn) (struct request_queue *q); |
| @@ -346,9 +267,9 @@ struct request_queue | |||
| 346 | request_fn_proc *request_fn; | 267 | request_fn_proc *request_fn; |
| 347 | make_request_fn *make_request_fn; | 268 | make_request_fn *make_request_fn; |
| 348 | prep_rq_fn *prep_rq_fn; | 269 | prep_rq_fn *prep_rq_fn; |
| 270 | unprep_rq_fn *unprep_rq_fn; | ||
| 349 | unplug_fn *unplug_fn; | 271 | unplug_fn *unplug_fn; |
| 350 | merge_bvec_fn *merge_bvec_fn; | 272 | merge_bvec_fn *merge_bvec_fn; |
| 351 | prepare_flush_fn *prepare_flush_fn; | ||
| 352 | softirq_done_fn *softirq_done_fn; | 273 | softirq_done_fn *softirq_done_fn; |
| 353 | rq_timed_out_fn *rq_timed_out_fn; | 274 | rq_timed_out_fn *rq_timed_out_fn; |
| 354 | dma_drain_needed_fn *dma_drain_needed; | 275 | dma_drain_needed_fn *dma_drain_needed; |
| @@ -467,11 +388,14 @@ struct request_queue | |||
| 467 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 388 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
| 468 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 389 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
| 469 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | 390 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ |
| 391 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | ||
| 392 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ | ||
| 470 | 393 | ||
| 471 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 394 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 472 | (1 << QUEUE_FLAG_CLUSTER) | \ | 395 | (1 << QUEUE_FLAG_CLUSTER) | \ |
| 473 | (1 << QUEUE_FLAG_STACKABLE) | \ | 396 | (1 << QUEUE_FLAG_STACKABLE) | \ |
| 474 | (1 << QUEUE_FLAG_SAME_COMP)) | 397 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
| 398 | (1 << QUEUE_FLAG_ADD_RANDOM)) | ||
| 475 | 399 | ||
| 476 | static inline int queue_is_locked(struct request_queue *q) | 400 | static inline int queue_is_locked(struct request_queue *q) |
| 477 | { | 401 | { |
| @@ -596,38 +520,28 @@ enum { | |||
| 596 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 520 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
| 597 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 521 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
| 598 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 522 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
| 523 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | ||
| 599 | #define blk_queue_flushing(q) ((q)->ordseq) | 524 | #define blk_queue_flushing(q) ((q)->ordseq) |
| 600 | #define blk_queue_stackable(q) \ | 525 | #define blk_queue_stackable(q) \ |
| 601 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 526 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
| 602 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 527 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
| 528 | #define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ | ||
| 529 | test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) | ||
| 530 | |||
| 531 | #define blk_noretry_request(rq) \ | ||
| 532 | ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ | ||
| 533 | REQ_FAILFAST_DRIVER)) | ||
| 534 | |||
| 535 | #define blk_account_rq(rq) \ | ||
| 536 | (((rq)->cmd_flags & REQ_STARTED) && \ | ||
| 537 | ((rq)->cmd_type == REQ_TYPE_FS || \ | ||
| 538 | ((rq)->cmd_flags & REQ_DISCARD))) | ||
| 603 | 539 | ||
| 604 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) | ||
| 605 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) | ||
| 606 | #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) | ||
| 607 | #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) | ||
| 608 | |||
| 609 | #define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) | ||
| 610 | #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) | ||
| 611 | #define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) | ||
| 612 | #define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ | ||
| 613 | blk_failfast_transport(rq) || \ | ||
| 614 | blk_failfast_driver(rq)) | ||
| 615 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) | ||
| 616 | #define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) | ||
| 617 | #define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) | ||
| 618 | |||
| 619 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) | ||
| 620 | |||
| 621 | #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) | ||
| 622 | #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) | ||
| 623 | #define blk_pm_request(rq) \ | 540 | #define blk_pm_request(rq) \ |
| 624 | (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) | 541 | ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ |
| 542 | (rq)->cmd_type == REQ_TYPE_PM_RESUME) | ||
| 625 | 543 | ||
| 626 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) | 544 | #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) |
| 627 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) | ||
| 628 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) | ||
| 629 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) | ||
| 630 | #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) | ||
| 631 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | 545 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) |
| 632 | /* rq->queuelist of dequeued request must be list_empty() */ | 546 | /* rq->queuelist of dequeued request must be list_empty() */ |
| 633 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) | 547 | #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) |
| @@ -641,7 +555,7 @@ enum { | |||
| 641 | */ | 555 | */ |
| 642 | static inline bool rw_is_sync(unsigned int rw_flags) | 556 | static inline bool rw_is_sync(unsigned int rw_flags) |
| 643 | { | 557 | { |
| 644 | return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); | 558 | return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); |
| 645 | } | 559 | } |
| 646 | 560 | ||
| 647 | static inline bool rq_is_sync(struct request *rq) | 561 | static inline bool rq_is_sync(struct request *rq) |
| @@ -649,9 +563,6 @@ static inline bool rq_is_sync(struct request *rq) | |||
| 649 | return rw_is_sync(rq->cmd_flags); | 563 | return rw_is_sync(rq->cmd_flags); |
| 650 | } | 564 | } |
| 651 | 565 | ||
| 652 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) | ||
| 653 | #define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) | ||
| 654 | |||
| 655 | static inline int blk_queue_full(struct request_queue *q, int sync) | 566 | static inline int blk_queue_full(struct request_queue *q, int sync) |
| 656 | { | 567 | { |
| 657 | if (sync) | 568 | if (sync) |
| @@ -684,7 +595,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) | |||
| 684 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 595 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) |
| 685 | #define rq_mergeable(rq) \ | 596 | #define rq_mergeable(rq) \ |
| 686 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 597 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
| 687 | (blk_discard_rq(rq) || blk_fs_request((rq)))) | 598 | (((rq)->cmd_flags & REQ_DISCARD) || \ |
| 599 | (rq)->cmd_type == REQ_TYPE_FS)) | ||
| 688 | 600 | ||
| 689 | /* | 601 | /* |
| 690 | * q->prep_rq_fn return values | 602 | * q->prep_rq_fn return values |
| @@ -709,7 +621,7 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; | |||
| 709 | #define BLK_BOUNCE_HIGH -1ULL | 621 | #define BLK_BOUNCE_HIGH -1ULL |
| 710 | #endif | 622 | #endif |
| 711 | #define BLK_BOUNCE_ANY (-1ULL) | 623 | #define BLK_BOUNCE_ANY (-1ULL) |
| 712 | #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) | 624 | #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) |
| 713 | 625 | ||
| 714 | /* | 626 | /* |
| 715 | * default timeout for SG_IO if none specified | 627 | * default timeout for SG_IO if none specified |
| @@ -781,6 +693,8 @@ extern struct request *blk_make_request(struct request_queue *, struct bio *, | |||
| 781 | gfp_t); | 693 | gfp_t); |
| 782 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 694 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
| 783 | extern void blk_requeue_request(struct request_queue *, struct request *); | 695 | extern void blk_requeue_request(struct request_queue *, struct request *); |
| 696 | extern void blk_add_request_payload(struct request *rq, struct page *page, | ||
| 697 | unsigned int len); | ||
| 784 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 698 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
| 785 | extern int blk_lld_busy(struct request_queue *q); | 699 | extern int blk_lld_busy(struct request_queue *q); |
| 786 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | 700 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, |
| @@ -915,6 +829,7 @@ extern void blk_complete_request(struct request *); | |||
| 915 | extern void __blk_complete_request(struct request *); | 829 | extern void __blk_complete_request(struct request *); |
| 916 | extern void blk_abort_request(struct request *); | 830 | extern void blk_abort_request(struct request *); |
| 917 | extern void blk_abort_queue(struct request_queue *); | 831 | extern void blk_abort_queue(struct request_queue *); |
| 832 | extern void blk_unprep_request(struct request *); | ||
| 918 | 833 | ||
| 919 | /* | 834 | /* |
| 920 | * Access functions for manipulating queue properties | 835 | * Access functions for manipulating queue properties |
| @@ -959,6 +874,7 @@ extern int blk_queue_dma_drain(struct request_queue *q, | |||
| 959 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); | 874 | extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); |
| 960 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); | 875 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
| 961 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); | 876 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
| 877 | extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); | ||
| 962 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | 878 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
| 963 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 879 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
| 964 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 880 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
| @@ -966,7 +882,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | |||
| 966 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 882 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
| 967 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 883 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
| 968 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 884 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
| 969 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); | 885 | extern int blk_queue_ordered(struct request_queue *, unsigned); |
| 970 | extern bool blk_do_ordered(struct request_queue *, struct request **); | 886 | extern bool blk_do_ordered(struct request_queue *, struct request **); |
| 971 | extern unsigned blk_ordered_cur_seq(struct request_queue *); | 887 | extern unsigned blk_ordered_cur_seq(struct request_queue *); |
| 972 | extern unsigned blk_ordered_req_seq(struct request *); | 888 | extern unsigned blk_ordered_req_seq(struct request *); |
| @@ -1005,10 +921,12 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
| 1005 | } | 921 | } |
| 1006 | enum{ | 922 | enum{ |
| 1007 | BLKDEV_WAIT, /* wait for completion */ | 923 | BLKDEV_WAIT, /* wait for completion */ |
| 1008 | BLKDEV_BARRIER, /*issue request with barrier */ | 924 | BLKDEV_BARRIER, /* issue request with barrier */ |
| 925 | BLKDEV_SECURE, /* secure discard */ | ||
| 1009 | }; | 926 | }; |
| 1010 | #define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT) | 927 | #define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT) |
| 1011 | #define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER) | 928 | #define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER) |
| 929 | #define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE) | ||
| 1012 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *, | 930 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *, |
| 1013 | unsigned long); | 931 | unsigned long); |
| 1014 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 932 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
| @@ -1020,7 +938,7 @@ static inline int sb_issue_discard(struct super_block *sb, | |||
| 1020 | { | 938 | { |
| 1021 | block <<= (sb->s_blocksize_bits - 9); | 939 | block <<= (sb->s_blocksize_bits - 9); |
| 1022 | nr_blocks <<= (sb->s_blocksize_bits - 9); | 940 | nr_blocks <<= (sb->s_blocksize_bits - 9); |
| 1023 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL, | 941 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, |
| 1024 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | 942 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); |
| 1025 | } | 943 | } |
| 1026 | 944 | ||
| @@ -1333,7 +1251,6 @@ static inline int blk_integrity_rq(struct request *rq) | |||
| 1333 | struct block_device_operations { | 1251 | struct block_device_operations { |
| 1334 | int (*open) (struct block_device *, fmode_t); | 1252 | int (*open) (struct block_device *, fmode_t); |
| 1335 | int (*release) (struct gendisk *, fmode_t); | 1253 | int (*release) (struct gendisk *, fmode_t); |
| 1336 | int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | ||
| 1337 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1254 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
| 1338 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1255 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
| 1339 | int (*direct_access) (struct block_device *, sector_t, | 1256 | int (*direct_access) (struct block_device *, sector_t, |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 416bf62d6d46..3395cf7130f5 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #ifdef __KERNEL__ | 5 | #ifdef __KERNEL__ |
| 6 | #include <linux/blkdev.h> | 6 | #include <linux/blkdev.h> |
| 7 | #include <linux/relay.h> | 7 | #include <linux/relay.h> |
| 8 | #include <linux/compat.h> | ||
| 8 | #endif | 9 | #endif |
| 9 | 10 | ||
| 10 | /* | 11 | /* |
| @@ -220,11 +221,26 @@ static inline int blk_trace_init_sysfs(struct device *dev) | |||
| 220 | 221 | ||
| 221 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ | 222 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ |
| 222 | 223 | ||
| 224 | #ifdef CONFIG_COMPAT | ||
| 225 | |||
| 226 | struct compat_blk_user_trace_setup { | ||
| 227 | char name[32]; | ||
| 228 | u16 act_mask; | ||
| 229 | u32 buf_size; | ||
| 230 | u32 buf_nr; | ||
| 231 | compat_u64 start_lba; | ||
| 232 | compat_u64 end_lba; | ||
| 233 | u32 pid; | ||
| 234 | }; | ||
| 235 | #define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup) | ||
| 236 | |||
| 237 | #endif | ||
| 238 | |||
| 223 | #if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) | 239 | #if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) |
| 224 | 240 | ||
| 225 | static inline int blk_cmd_buf_len(struct request *rq) | 241 | static inline int blk_cmd_buf_len(struct request *rq) |
| 226 | { | 242 | { |
| 227 | return blk_pc_request(rq) ? rq->cmd_len * 3 : 1; | 243 | return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1; |
| 228 | } | 244 | } |
| 229 | 245 | ||
| 230 | extern void blk_dump_cmd(char *buf, struct request *rq); | 246 | extern void blk_dump_cmd(char *buf, struct request *rq); |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 1b9ba193b789..43e649a72529 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
| @@ -203,12 +203,10 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block, | |||
| 203 | int block_read_full_page(struct page*, get_block_t*); | 203 | int block_read_full_page(struct page*, get_block_t*); |
| 204 | int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, | 204 | int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc, |
| 205 | unsigned long from); | 205 | unsigned long from); |
| 206 | int block_write_begin_newtrunc(struct file *, struct address_space *, | 206 | int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, |
| 207 | loff_t, unsigned, unsigned, | 207 | unsigned flags, struct page **pagep, get_block_t *get_block); |
| 208 | struct page **, void **, get_block_t*); | 208 | int __block_write_begin(struct page *page, loff_t pos, unsigned len, |
| 209 | int block_write_begin(struct file *, struct address_space *, | 209 | get_block_t *get_block); |
| 210 | loff_t, unsigned, unsigned, | ||
| 211 | struct page **, void **, get_block_t*); | ||
| 212 | int block_write_end(struct file *, struct address_space *, | 210 | int block_write_end(struct file *, struct address_space *, |
| 213 | loff_t, unsigned, unsigned, | 211 | loff_t, unsigned, unsigned, |
| 214 | struct page *, void *); | 212 | struct page *, void *); |
| @@ -217,9 +215,6 @@ int generic_write_end(struct file *, struct address_space *, | |||
| 217 | struct page *, void *); | 215 | struct page *, void *); |
| 218 | void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); | 216 | void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); |
| 219 | int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*); | 217 | int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*); |
| 220 | int cont_write_begin_newtrunc(struct file *, struct address_space *, loff_t, | ||
| 221 | unsigned, unsigned, struct page **, void **, | ||
| 222 | get_block_t *, loff_t *); | ||
| 223 | int cont_write_begin(struct file *, struct address_space *, loff_t, | 218 | int cont_write_begin(struct file *, struct address_space *, loff_t, |
| 224 | unsigned, unsigned, struct page **, void **, | 219 | unsigned, unsigned, struct page **, void **, |
| 225 | get_block_t *, loff_t *); | 220 | get_block_t *, loff_t *); |
| @@ -230,12 +225,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
| 230 | void block_sync_page(struct page *); | 225 | void block_sync_page(struct page *); |
| 231 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); | 226 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); |
| 232 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); | 227 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); |
| 233 | int file_fsync(struct file *, int); | 228 | int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, |
| 234 | int nobh_write_begin_newtrunc(struct file *, struct address_space *, | ||
| 235 | loff_t, unsigned, unsigned, | ||
| 236 | struct page **, void **, get_block_t*); | ||
| 237 | int nobh_write_begin(struct file *, struct address_space *, | ||
| 238 | loff_t, unsigned, unsigned, | ||
| 239 | struct page **, void **, get_block_t*); | 229 | struct page **, void **, get_block_t*); |
| 240 | int nobh_write_end(struct file *, struct address_space *, | 230 | int nobh_write_end(struct file *, struct address_space *, |
| 241 | loff_t, unsigned, unsigned, | 231 | loff_t, unsigned, unsigned, |
| @@ -314,15 +304,10 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) | |||
| 314 | bh->b_size = sb->s_blocksize; | 304 | bh->b_size = sb->s_blocksize; |
| 315 | } | 305 | } |
| 316 | 306 | ||
| 317 | /* | ||
| 318 | * Calling wait_on_buffer() for a zero-ref buffer is illegal, so we call into | ||
| 319 | * __wait_on_buffer() just to trip a debug check. Because debug code in inline | ||
| 320 | * functions is bloaty. | ||
| 321 | */ | ||
| 322 | static inline void wait_on_buffer(struct buffer_head *bh) | 307 | static inline void wait_on_buffer(struct buffer_head *bh) |
| 323 | { | 308 | { |
| 324 | might_sleep(); | 309 | might_sleep(); |
| 325 | if (buffer_locked(bh) || atomic_read(&bh->b_count) == 0) | 310 | if (buffer_locked(bh)) |
| 326 | __wait_on_buffer(bh); | 311 | __wait_on_buffer(bh); |
| 327 | } | 312 | } |
| 328 | 313 | ||
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 5ea3c60c160c..c37b21ad5a3b 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
| @@ -292,6 +292,8 @@ clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); | |||
| 292 | */ | 292 | */ |
| 293 | extern int | 293 | extern int |
| 294 | __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); | 294 | __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq); |
| 295 | extern void | ||
| 296 | __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq); | ||
| 295 | 297 | ||
| 296 | static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) | 298 | static inline int clocksource_register_hz(struct clocksource *cs, u32 hz) |
| 297 | { | 299 | { |
| @@ -303,6 +305,15 @@ static inline int clocksource_register_khz(struct clocksource *cs, u32 khz) | |||
| 303 | return __clocksource_register_scale(cs, 1000, khz); | 305 | return __clocksource_register_scale(cs, 1000, khz); |
| 304 | } | 306 | } |
| 305 | 307 | ||
| 308 | static inline void __clocksource_updatefreq_hz(struct clocksource *cs, u32 hz) | ||
| 309 | { | ||
| 310 | __clocksource_updatefreq_scale(cs, 1, hz); | ||
| 311 | } | ||
| 312 | |||
| 313 | static inline void __clocksource_updatefreq_khz(struct clocksource *cs, u32 khz) | ||
| 314 | { | ||
| 315 | __clocksource_updatefreq_scale(cs, 1000, khz); | ||
| 316 | } | ||
| 306 | 317 | ||
| 307 | static inline void | 318 | static inline void |
| 308 | clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec) | 319 | clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec) |
| @@ -313,11 +324,13 @@ clocksource_calc_mult_shift(struct clocksource *cs, u32 freq, u32 minsec) | |||
| 313 | 324 | ||
| 314 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL | 325 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL |
| 315 | extern void | 326 | extern void |
| 316 | update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult); | 327 | update_vsyscall(struct timespec *ts, struct timespec *wtm, |
| 328 | struct clocksource *c, u32 mult); | ||
| 317 | extern void update_vsyscall_tz(void); | 329 | extern void update_vsyscall_tz(void); |
| 318 | #else | 330 | #else |
| 319 | static inline void | 331 | static inline void |
| 320 | update_vsyscall(struct timespec *ts, struct clocksource *c, u32 mult) | 332 | update_vsyscall(struct timespec *ts, struct timespec *wtm, |
| 333 | struct clocksource *c, u32 mult) | ||
| 321 | { | 334 | { |
| 322 | } | 335 | } |
| 323 | 336 | ||
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h index 8859e2ede9fe..284b520934a0 100644 --- a/include/linux/coda_psdev.h +++ b/include/linux/coda_psdev.h | |||
| @@ -86,9 +86,9 @@ struct upc_req { | |||
| 86 | wait_queue_head_t uc_sleep; /* process' wait queue */ | 86 | wait_queue_head_t uc_sleep; /* process' wait queue */ |
| 87 | }; | 87 | }; |
| 88 | 88 | ||
| 89 | #define REQ_ASYNC 0x1 | 89 | #define CODA_REQ_ASYNC 0x1 |
| 90 | #define REQ_READ 0x2 | 90 | #define CODA_REQ_READ 0x2 |
| 91 | #define REQ_WRITE 0x4 | 91 | #define CODA_REQ_WRITE 0x4 |
| 92 | #define REQ_ABORT 0x8 | 92 | #define CODA_REQ_ABORT 0x8 |
| 93 | 93 | ||
| 94 | #endif | 94 | #endif |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 0da5b187f124..16508bcddacc 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
| @@ -35,8 +35,7 @@ | |||
| 35 | (typeof(ptr)) (__ptr + (off)); }) | 35 | (typeof(ptr)) (__ptr + (off)); }) |
| 36 | 36 | ||
| 37 | /* &a[0] degrades to a pointer: a different type from an array */ | 37 | /* &a[0] degrades to a pointer: a different type from an array */ |
| 38 | #define __must_be_array(a) \ | 38 | #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) |
| 39 | BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0]))) | ||
| 40 | 39 | ||
| 41 | /* | 40 | /* |
| 42 | * Force always-inline if the user requests it so via the .config, | 41 | * Force always-inline if the user requests it so via the .config, |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index a5a472b10746..c1a62c56a660 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | # define __release(x) __context__(x,-1) | 16 | # define __release(x) __context__(x,-1) |
| 17 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) | 17 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) |
| 18 | # define __percpu __attribute__((noderef, address_space(3))) | 18 | # define __percpu __attribute__((noderef, address_space(3))) |
| 19 | # define __rcu | ||
| 19 | extern void __chk_user_ptr(const volatile void __user *); | 20 | extern void __chk_user_ptr(const volatile void __user *); |
| 20 | extern void __chk_io_ptr(const volatile void __iomem *); | 21 | extern void __chk_io_ptr(const volatile void __iomem *); |
| 21 | #else | 22 | #else |
| @@ -34,6 +35,7 @@ extern void __chk_io_ptr(const volatile void __iomem *); | |||
| 34 | # define __release(x) (void)0 | 35 | # define __release(x) (void)0 |
| 35 | # define __cond_lock(x,c) (c) | 36 | # define __cond_lock(x,c) (c) |
| 36 | # define __percpu | 37 | # define __percpu |
| 38 | # define __rcu | ||
| 37 | #endif | 39 | #endif |
| 38 | 40 | ||
| 39 | #ifdef __KERNEL__ | 41 | #ifdef __KERNEL__ |
diff --git a/include/linux/console.h b/include/linux/console.h index f76fc297322d..95cf6f08a59d 100644 --- a/include/linux/console.h +++ b/include/linux/console.h | |||
| @@ -79,8 +79,13 @@ int register_con_driver(const struct consw *csw, int first, int last); | |||
| 79 | int unregister_con_driver(const struct consw *csw); | 79 | int unregister_con_driver(const struct consw *csw); |
| 80 | int take_over_console(const struct consw *sw, int first, int last, int deflt); | 80 | int take_over_console(const struct consw *sw, int first, int last, int deflt); |
| 81 | void give_up_console(const struct consw *sw); | 81 | void give_up_console(const struct consw *sw); |
| 82 | #ifdef CONFIG_HW_CONSOLE | ||
| 82 | int con_debug_enter(struct vc_data *vc); | 83 | int con_debug_enter(struct vc_data *vc); |
| 83 | int con_debug_leave(void); | 84 | int con_debug_leave(void); |
| 85 | #else | ||
| 86 | #define con_debug_enter(vc) (0) | ||
| 87 | #define con_debug_leave() (0) | ||
| 88 | #endif | ||
| 84 | 89 | ||
| 85 | /* scroll */ | 90 | /* scroll */ |
| 86 | #define SM_UP (1) | 91 | #define SM_UP (1) |
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index 38fe59dc89ae..7f0c32908568 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h | |||
| @@ -21,6 +21,8 @@ struct vt_struct; | |||
| 21 | #define NPAR 16 | 21 | #define NPAR 16 |
| 22 | 22 | ||
| 23 | struct vc_data { | 23 | struct vc_data { |
| 24 | struct tty_port port; /* Upper level data */ | ||
| 25 | |||
| 24 | unsigned short vc_num; /* Console number */ | 26 | unsigned short vc_num; /* Console number */ |
| 25 | unsigned int vc_cols; /* [#] Console size */ | 27 | unsigned int vc_cols; /* [#] Console size */ |
| 26 | unsigned int vc_rows; | 28 | unsigned int vc_rows; |
| @@ -56,7 +58,6 @@ struct vc_data { | |||
| 56 | /* VT terminal data */ | 58 | /* VT terminal data */ |
| 57 | unsigned int vc_state; /* Escape sequence parser state */ | 59 | unsigned int vc_state; /* Escape sequence parser state */ |
| 58 | unsigned int vc_npar,vc_par[NPAR]; /* Parameters of current escape sequence */ | 60 | unsigned int vc_npar,vc_par[NPAR]; /* Parameters of current escape sequence */ |
| 59 | struct tty_struct *vc_tty; /* TTY we are attached to */ | ||
| 60 | /* data for manual vt switching */ | 61 | /* data for manual vt switching */ |
| 61 | struct vt_mode vt_mode; | 62 | struct vt_mode vt_mode; |
| 62 | struct pid *vt_pid; | 63 | struct pid *vt_pid; |
| @@ -105,6 +106,7 @@ struct vc_data { | |||
| 105 | struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ | 106 | struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */ |
| 106 | unsigned long vc_uni_pagedir; | 107 | unsigned long vc_uni_pagedir; |
| 107 | unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ | 108 | unsigned long *vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */ |
| 109 | bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */ | ||
| 108 | /* additional information is in vt_kern.h */ | 110 | /* additional information is in vt_kern.h */ |
| 109 | }; | 111 | }; |
| 110 | 112 | ||
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index e287863ac053..4823af64e9db 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -48,6 +48,33 @@ extern ssize_t arch_cpu_release(const char *, size_t); | |||
| 48 | #endif | 48 | #endif |
| 49 | struct notifier_block; | 49 | struct notifier_block; |
| 50 | 50 | ||
| 51 | /* | ||
| 52 | * CPU notifier priorities. | ||
| 53 | */ | ||
| 54 | enum { | ||
| 55 | /* | ||
| 56 | * SCHED_ACTIVE marks a cpu which is coming up active during | ||
| 57 | * CPU_ONLINE and CPU_DOWN_FAILED and must be the first | ||
| 58 | * notifier. CPUSET_ACTIVE adjusts cpuset according to | ||
| 59 | * cpu_active mask right after SCHED_ACTIVE. During | ||
| 60 | * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are | ||
| 61 | * ordered in the similar way. | ||
| 62 | * | ||
| 63 | * This ordering guarantees consistent cpu_active mask and | ||
| 64 | * migration behavior to all cpu notifiers. | ||
| 65 | */ | ||
| 66 | CPU_PRI_SCHED_ACTIVE = INT_MAX, | ||
| 67 | CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1, | ||
| 68 | CPU_PRI_SCHED_INACTIVE = INT_MIN + 1, | ||
| 69 | CPU_PRI_CPUSET_INACTIVE = INT_MIN, | ||
| 70 | |||
| 71 | /* migration should happen before other stuff but after perf */ | ||
| 72 | CPU_PRI_PERF = 20, | ||
| 73 | CPU_PRI_MIGRATION = 10, | ||
| 74 | /* prepare workqueues for other notifiers */ | ||
| 75 | CPU_PRI_WORKQUEUE = 5, | ||
| 76 | }; | ||
| 77 | |||
| 51 | #ifdef CONFIG_SMP | 78 | #ifdef CONFIG_SMP |
| 52 | /* Need to know about CPUs going up/down? */ | 79 | /* Need to know about CPUs going up/down? */ |
| 53 | #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) | 80 | #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 55215cce5005..36ca9721a0c2 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -52,6 +52,7 @@ struct cpuidle_state { | |||
| 52 | #define CPUIDLE_FLAG_SHALLOW (0x20) /* low latency, minimal savings */ | 52 | #define CPUIDLE_FLAG_SHALLOW (0x20) /* low latency, minimal savings */ |
| 53 | #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ | 53 | #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ |
| 54 | #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ | 54 | #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ |
| 55 | #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ | ||
| 55 | 56 | ||
| 56 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) | 57 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) |
| 57 | 58 | ||
| @@ -84,6 +85,7 @@ struct cpuidle_state_kobj { | |||
| 84 | struct cpuidle_device { | 85 | struct cpuidle_device { |
| 85 | unsigned int registered:1; | 86 | unsigned int registered:1; |
| 86 | unsigned int enabled:1; | 87 | unsigned int enabled:1; |
| 88 | unsigned int power_specified:1; | ||
| 87 | unsigned int cpu; | 89 | unsigned int cpu; |
| 88 | 90 | ||
| 89 | int last_residency; | 91 | int last_residency; |
| @@ -97,6 +99,8 @@ struct cpuidle_device { | |||
| 97 | struct completion kobj_unregister; | 99 | struct completion kobj_unregister; |
| 98 | void *governor_data; | 100 | void *governor_data; |
| 99 | struct cpuidle_state *safe_state; | 101 | struct cpuidle_state *safe_state; |
| 102 | |||
| 103 | int (*prepare) (struct cpuidle_device *dev); | ||
| 100 | }; | 104 | }; |
| 101 | 105 | ||
| 102 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | 106 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 457ed765a116..f20eb8f16025 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
| @@ -20,6 +20,7 @@ extern int number_of_cpusets; /* How many cpusets are defined in system? */ | |||
| 20 | 20 | ||
| 21 | extern int cpuset_init(void); | 21 | extern int cpuset_init(void); |
| 22 | extern void cpuset_init_smp(void); | 22 | extern void cpuset_init_smp(void); |
| 23 | extern void cpuset_update_active_cpus(void); | ||
| 23 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); | 24 | extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); |
| 24 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); | 25 | extern int cpuset_cpus_allowed_fallback(struct task_struct *p); |
| 25 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); | 26 | extern nodemask_t cpuset_mems_allowed(struct task_struct *p); |
| @@ -132,6 +133,11 @@ static inline void set_mems_allowed(nodemask_t nodemask) | |||
| 132 | static inline int cpuset_init(void) { return 0; } | 133 | static inline int cpuset_init(void) { return 0; } |
| 133 | static inline void cpuset_init_smp(void) {} | 134 | static inline void cpuset_init_smp(void) {} |
| 134 | 135 | ||
| 136 | static inline void cpuset_update_active_cpus(void) | ||
| 137 | { | ||
| 138 | partition_sched_domains(1, NULL, NULL); | ||
| 139 | } | ||
| 140 | |||
| 135 | static inline void cpuset_cpus_allowed(struct task_struct *p, | 141 | static inline void cpuset_cpus_allowed(struct task_struct *p, |
| 136 | struct cpumask *mask) | 142 | struct cpumask *mask) |
| 137 | { | 143 | { |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index eebb617c17d8..6a4aea30aa09 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -315,6 +315,8 @@ extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); | |||
| 315 | 315 | ||
| 316 | extern char *__d_path(const struct path *path, struct path *root, char *, int); | 316 | extern char *__d_path(const struct path *path, struct path *root, char *, int); |
| 317 | extern char *d_path(const struct path *, char *, int); | 317 | extern char *d_path(const struct path *, char *, int); |
| 318 | extern char *d_path_with_unreachable(const struct path *, char *, int); | ||
| 319 | extern char *__dentry_path(struct dentry *, char *, int); | ||
| 318 | extern char *dentry_path(struct dentry *, char *, int); | 320 | extern char *dentry_path(struct dentry *, char *, int); |
| 319 | 321 | ||
| 320 | /* Allocation counts.. */ | 322 | /* Allocation counts.. */ |
diff --git a/include/linux/delay.h b/include/linux/delay.h index fd832c6d419e..a6ecb34cf547 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h | |||
| @@ -45,6 +45,7 @@ extern unsigned long lpj_fine; | |||
| 45 | void calibrate_delay(void); | 45 | void calibrate_delay(void); |
| 46 | void msleep(unsigned int msecs); | 46 | void msleep(unsigned int msecs); |
| 47 | unsigned long msleep_interruptible(unsigned int msecs); | 47 | unsigned long msleep_interruptible(unsigned int msecs); |
| 48 | void usleep_range(unsigned long min, unsigned long max); | ||
| 48 | 49 | ||
| 49 | static inline void ssleep(unsigned int seconds) | 50 | static inline void ssleep(unsigned int seconds) |
| 50 | { | 51 | { |
diff --git a/include/linux/device.h b/include/linux/device.h index 6a8276f683b6..516fecacf27b 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -84,9 +84,8 @@ struct device *bus_find_device_by_name(struct bus_type *bus, | |||
| 84 | struct device *start, | 84 | struct device *start, |
| 85 | const char *name); | 85 | const char *name); |
| 86 | 86 | ||
| 87 | int __must_check bus_for_each_drv(struct bus_type *bus, | 87 | int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, |
| 88 | struct device_driver *start, void *data, | 88 | void *data, int (*fn)(struct device_driver *, void *)); |
| 89 | int (*fn)(struct device_driver *, void *)); | ||
| 90 | 89 | ||
| 91 | void bus_sort_breadthfirst(struct bus_type *bus, | 90 | void bus_sort_breadthfirst(struct bus_type *bus, |
| 92 | int (*compare)(const struct device *a, | 91 | int (*compare)(const struct device *a, |
| @@ -110,10 +109,12 @@ extern int bus_unregister_notifier(struct bus_type *bus, | |||
| 110 | */ | 109 | */ |
| 111 | #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ | 110 | #define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */ |
| 112 | #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */ | 111 | #define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */ |
| 113 | #define BUS_NOTIFY_BOUND_DRIVER 0x00000003 /* driver bound to device */ | 112 | #define BUS_NOTIFY_BIND_DRIVER 0x00000003 /* driver about to be |
| 114 | #define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be | 113 | bound */ |
| 114 | #define BUS_NOTIFY_BOUND_DRIVER 0x00000004 /* driver bound to device */ | ||
| 115 | #define BUS_NOTIFY_UNBIND_DRIVER 0x00000005 /* driver about to be | ||
| 115 | unbound */ | 116 | unbound */ |
| 116 | #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000005 /* driver is unbound | 117 | #define BUS_NOTIFY_UNBOUND_DRIVER 0x00000006 /* driver is unbound |
| 117 | from the device */ | 118 | from the device */ |
| 118 | 119 | ||
| 119 | extern struct kset *bus_get_kset(struct bus_type *bus); | 120 | extern struct kset *bus_get_kset(struct bus_type *bus); |
| @@ -551,7 +552,7 @@ extern int device_for_each_child(struct device *dev, void *data, | |||
| 551 | int (*fn)(struct device *dev, void *data)); | 552 | int (*fn)(struct device *dev, void *data)); |
| 552 | extern struct device *device_find_child(struct device *dev, void *data, | 553 | extern struct device *device_find_child(struct device *dev, void *data, |
| 553 | int (*match)(struct device *dev, void *data)); | 554 | int (*match)(struct device *dev, void *data)); |
| 554 | extern int device_rename(struct device *dev, char *new_name); | 555 | extern int device_rename(struct device *dev, const char *new_name); |
| 555 | extern int device_move(struct device *dev, struct device *new_parent, | 556 | extern int device_move(struct device *dev, struct device *new_parent, |
| 556 | enum dpm_order dpm_order); | 557 | enum dpm_order dpm_order); |
| 557 | extern const char *device_get_devnode(struct device *dev, | 558 | extern const char *device_get_devnode(struct device *dev, |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 89b7e1a605b8..e0670a512056 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
| @@ -142,6 +142,14 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | |||
| 142 | return -EIO; | 142 | return -EIO; |
| 143 | } | 143 | } |
| 144 | 144 | ||
| 145 | static inline int dma_get_cache_alignment(void) | ||
| 146 | { | ||
| 147 | #ifdef ARCH_DMA_MINALIGN | ||
| 148 | return ARCH_DMA_MINALIGN; | ||
| 149 | #endif | ||
| 150 | return 1; | ||
| 151 | } | ||
| 152 | |||
| 145 | /* flags for the coherent memory api */ | 153 | /* flags for the coherent memory api */ |
| 146 | #define DMA_MEMORY_MAP 0x01 | 154 | #define DMA_MEMORY_MAP 0x01 |
| 147 | #define DMA_MEMORY_IO 0x02 | 155 | #define DMA_MEMORY_IO 0x02 |
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 5204f018931b..c61d4ca27bcc 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
| @@ -114,11 +114,17 @@ enum dma_ctrl_flags { | |||
| 114 | * @DMA_TERMINATE_ALL: terminate all ongoing transfers | 114 | * @DMA_TERMINATE_ALL: terminate all ongoing transfers |
| 115 | * @DMA_PAUSE: pause ongoing transfers | 115 | * @DMA_PAUSE: pause ongoing transfers |
| 116 | * @DMA_RESUME: resume paused transfer | 116 | * @DMA_RESUME: resume paused transfer |
| 117 | * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers | ||
| 118 | * that need to runtime reconfigure the slave channels (as opposed to passing | ||
| 119 | * configuration data in statically from the platform). An additional | ||
| 120 | * argument of struct dma_slave_config must be passed in with this | ||
| 121 | * command. | ||
| 117 | */ | 122 | */ |
| 118 | enum dma_ctrl_cmd { | 123 | enum dma_ctrl_cmd { |
| 119 | DMA_TERMINATE_ALL, | 124 | DMA_TERMINATE_ALL, |
| 120 | DMA_PAUSE, | 125 | DMA_PAUSE, |
| 121 | DMA_RESUME, | 126 | DMA_RESUME, |
| 127 | DMA_SLAVE_CONFIG, | ||
| 122 | }; | 128 | }; |
| 123 | 129 | ||
| 124 | /** | 130 | /** |
| @@ -199,6 +205,71 @@ struct dma_chan_dev { | |||
| 199 | atomic_t *idr_ref; | 205 | atomic_t *idr_ref; |
| 200 | }; | 206 | }; |
| 201 | 207 | ||
| 208 | /** | ||
| 209 | * enum dma_slave_buswidth - defines bus with of the DMA slave | ||
| 210 | * device, source or target buses | ||
| 211 | */ | ||
| 212 | enum dma_slave_buswidth { | ||
| 213 | DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, | ||
| 214 | DMA_SLAVE_BUSWIDTH_1_BYTE = 1, | ||
| 215 | DMA_SLAVE_BUSWIDTH_2_BYTES = 2, | ||
| 216 | DMA_SLAVE_BUSWIDTH_4_BYTES = 4, | ||
| 217 | DMA_SLAVE_BUSWIDTH_8_BYTES = 8, | ||
| 218 | }; | ||
| 219 | |||
| 220 | /** | ||
| 221 | * struct dma_slave_config - dma slave channel runtime config | ||
| 222 | * @direction: whether the data shall go in or out on this slave | ||
| 223 | * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are | ||
| 224 | * legal values, DMA_BIDIRECTIONAL is not acceptable since we | ||
| 225 | * need to differentiate source and target addresses. | ||
| 226 | * @src_addr: this is the physical address where DMA slave data | ||
| 227 | * should be read (RX), if the source is memory this argument is | ||
| 228 | * ignored. | ||
| 229 | * @dst_addr: this is the physical address where DMA slave data | ||
| 230 | * should be written (TX), if the source is memory this argument | ||
| 231 | * is ignored. | ||
| 232 | * @src_addr_width: this is the width in bytes of the source (RX) | ||
| 233 | * register where DMA data shall be read. If the source | ||
| 234 | * is memory this may be ignored depending on architecture. | ||
| 235 | * Legal values: 1, 2, 4, 8. | ||
| 236 | * @dst_addr_width: same as src_addr_width but for destination | ||
| 237 | * target (TX) mutatis mutandis. | ||
| 238 | * @src_maxburst: the maximum number of words (note: words, as in | ||
| 239 | * units of the src_addr_width member, not bytes) that can be sent | ||
| 240 | * in one burst to the device. Typically something like half the | ||
| 241 | * FIFO depth on I/O peripherals so you don't overflow it. This | ||
| 242 | * may or may not be applicable on memory sources. | ||
| 243 | * @dst_maxburst: same as src_maxburst but for destination target | ||
| 244 | * mutatis mutandis. | ||
| 245 | * | ||
| 246 | * This struct is passed in as configuration data to a DMA engine | ||
| 247 | * in order to set up a certain channel for DMA transport at runtime. | ||
| 248 | * The DMA device/engine has to provide support for an additional | ||
| 249 | * command in the channel config interface, DMA_SLAVE_CONFIG | ||
| 250 | * and this struct will then be passed in as an argument to the | ||
| 251 | * DMA engine device_control() function. | ||
| 252 | * | ||
| 253 | * The rationale for adding configuration information to this struct | ||
| 254 | * is as follows: if it is likely that most DMA slave controllers in | ||
| 255 | * the world will support the configuration option, then make it | ||
| 256 | * generic. If not: if it is fixed so that it be sent in static from | ||
| 257 | * the platform data, then prefer to do that. Else, if it is neither | ||
| 258 | * fixed at runtime, nor generic enough (such as bus mastership on | ||
| 259 | * some CPU family and whatnot) then create a custom slave config | ||
| 260 | * struct and pass that, then make this config a member of that | ||
| 261 | * struct, if applicable. | ||
| 262 | */ | ||
| 263 | struct dma_slave_config { | ||
| 264 | enum dma_data_direction direction; | ||
| 265 | dma_addr_t src_addr; | ||
| 266 | dma_addr_t dst_addr; | ||
| 267 | enum dma_slave_buswidth src_addr_width; | ||
| 268 | enum dma_slave_buswidth dst_addr_width; | ||
| 269 | u32 src_maxburst; | ||
| 270 | u32 dst_maxburst; | ||
| 271 | }; | ||
| 272 | |||
| 202 | static inline const char *dma_chan_name(struct dma_chan *chan) | 273 | static inline const char *dma_chan_name(struct dma_chan *chan) |
| 203 | { | 274 | { |
| 204 | return dev_name(&chan->dev->device); | 275 | return dev_name(&chan->dev->device); |
diff --git a/include/linux/dmi.h b/include/linux/dmi.h index a8a3e1ac281d..90e087f8d951 100644 --- a/include/linux/dmi.h +++ b/include/linux/dmi.h | |||
| @@ -20,6 +20,7 @@ enum dmi_device_type { | |||
| 20 | DMI_DEV_TYPE_SAS, | 20 | DMI_DEV_TYPE_SAS, |
| 21 | DMI_DEV_TYPE_IPMI = -1, | 21 | DMI_DEV_TYPE_IPMI = -1, |
| 22 | DMI_DEV_TYPE_OEM_STRING = -2, | 22 | DMI_DEV_TYPE_OEM_STRING = -2, |
| 23 | DMI_DEV_TYPE_DEV_ONBOARD = -3, | ||
| 23 | }; | 24 | }; |
| 24 | 25 | ||
| 25 | struct dmi_header { | 26 | struct dmi_header { |
| @@ -37,6 +38,14 @@ struct dmi_device { | |||
| 37 | 38 | ||
| 38 | #ifdef CONFIG_DMI | 39 | #ifdef CONFIG_DMI |
| 39 | 40 | ||
| 41 | struct dmi_dev_onboard { | ||
| 42 | struct dmi_device dev; | ||
| 43 | int instance; | ||
| 44 | int segment; | ||
| 45 | int bus; | ||
| 46 | int devfn; | ||
| 47 | }; | ||
| 48 | |||
| 40 | extern int dmi_check_system(const struct dmi_system_id *list); | 49 | extern int dmi_check_system(const struct dmi_system_id *list); |
| 41 | const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); | 50 | const struct dmi_system_id *dmi_first_match(const struct dmi_system_id *list); |
| 42 | extern const char * dmi_get_system_info(int field); | 51 | extern const char * dmi_get_system_info(int field); |
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h index ecc06286226d..3290555a52ee 100644 --- a/include/linux/dnotify.h +++ b/include/linux/dnotify.h | |||
| @@ -28,6 +28,7 @@ struct dnotify_struct { | |||
| 28 | FS_CREATE | FS_DN_RENAME |\ | 28 | FS_CREATE | FS_DN_RENAME |\ |
| 29 | FS_MOVED_FROM | FS_MOVED_TO) | 29 | FS_MOVED_FROM | FS_MOVED_TO) |
| 30 | 30 | ||
| 31 | extern int dir_notify_enable; | ||
| 31 | extern void dnotify_flush(struct file *, fl_owner_t); | 32 | extern void dnotify_flush(struct file *, fl_owner_t); |
| 32 | extern int fcntl_dirnotify(int, struct file *, unsigned long); | 33 | extern int fcntl_dirnotify(int, struct file *, unsigned long); |
| 33 | 34 | ||
diff --git a/include/linux/dns_resolver.h b/include/linux/dns_resolver.h new file mode 100644 index 000000000000..cc92268af89a --- /dev/null +++ b/include/linux/dns_resolver.h | |||
| @@ -0,0 +1,34 @@ | |||
| 1 | /* | ||
| 2 | * DNS Resolver upcall management for CIFS DFS and AFS | ||
| 3 | * Handles host name to IP address resolution and DNS query for AFSDB RR. | ||
| 4 | * | ||
| 5 | * Copyright (c) International Business Machines Corp., 2008 | ||
| 6 | * Author(s): Steve French (sfrench@us.ibm.com) | ||
| 7 | * Wang Lei (wang840925@gmail.com) | ||
| 8 | * | ||
| 9 | * This library is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU Lesser General Public License as published | ||
| 11 | * by the Free Software Foundation; either version 2.1 of the License, or | ||
| 12 | * (at your option) any later version. | ||
| 13 | * | ||
| 14 | * This library is distributed in the hope that it will be useful, | ||
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
| 17 | * the GNU Lesser General Public License for more details. | ||
| 18 | * | ||
| 19 | * You should have received a copy of the GNU Lesser General Public License | ||
| 20 | * along with this library; if not, write to the Free Software | ||
| 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 22 | */ | ||
| 23 | |||
| 24 | #ifndef _LINUX_DNS_RESOLVER_H | ||
| 25 | #define _LINUX_DNS_RESOLVER_H | ||
| 26 | |||
| 27 | #ifdef __KERNEL__ | ||
| 28 | |||
| 29 | extern int dns_query(const char *type, const char *name, size_t namelen, | ||
| 30 | const char *options, char **_result, time_t *_expiry); | ||
| 31 | |||
| 32 | #endif /* KERNEL */ | ||
| 33 | |||
| 34 | #endif /* _LINUX_DNS_RESOLVER_H */ | ||
diff --git a/include/linux/dqblk_xfs.h b/include/linux/dqblk_xfs.h index 4389ae72024e..86552807aed9 100644 --- a/include/linux/dqblk_xfs.h +++ b/include/linux/dqblk_xfs.h | |||
| @@ -49,7 +49,7 @@ | |||
| 49 | #define FS_DQUOT_VERSION 1 /* fs_disk_quota.d_version */ | 49 | #define FS_DQUOT_VERSION 1 /* fs_disk_quota.d_version */ |
| 50 | typedef struct fs_disk_quota { | 50 | typedef struct fs_disk_quota { |
| 51 | __s8 d_version; /* version of this structure */ | 51 | __s8 d_version; /* version of this structure */ |
| 52 | __s8 d_flags; /* XFS_{USER,PROJ,GROUP}_QUOTA */ | 52 | __s8 d_flags; /* FS_{USER,PROJ,GROUP}_QUOTA */ |
| 53 | __u16 d_fieldmask; /* field specifier */ | 53 | __u16 d_fieldmask; /* field specifier */ |
| 54 | __u32 d_id; /* user, project, or group ID */ | 54 | __u32 d_id; /* user, project, or group ID */ |
| 55 | __u64 d_blk_hardlimit;/* absolute limit on disk blks */ | 55 | __u64 d_blk_hardlimit;/* absolute limit on disk blks */ |
| @@ -119,18 +119,18 @@ typedef struct fs_disk_quota { | |||
| 119 | #define FS_DQ_ACCT_MASK (FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT) | 119 | #define FS_DQ_ACCT_MASK (FS_DQ_BCOUNT | FS_DQ_ICOUNT | FS_DQ_RTBCOUNT) |
| 120 | 120 | ||
| 121 | /* | 121 | /* |
| 122 | * Various flags related to quotactl(2). Only relevant to XFS filesystems. | 122 | * Various flags related to quotactl(2). |
| 123 | */ | 123 | */ |
| 124 | #define XFS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */ | 124 | #define FS_QUOTA_UDQ_ACCT (1<<0) /* user quota accounting */ |
| 125 | #define XFS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */ | 125 | #define FS_QUOTA_UDQ_ENFD (1<<1) /* user quota limits enforcement */ |
| 126 | #define XFS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */ | 126 | #define FS_QUOTA_GDQ_ACCT (1<<2) /* group quota accounting */ |
| 127 | #define XFS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */ | 127 | #define FS_QUOTA_GDQ_ENFD (1<<3) /* group quota limits enforcement */ |
| 128 | #define XFS_QUOTA_PDQ_ACCT (1<<4) /* project quota accounting */ | 128 | #define FS_QUOTA_PDQ_ACCT (1<<4) /* project quota accounting */ |
| 129 | #define XFS_QUOTA_PDQ_ENFD (1<<5) /* project quota limits enforcement */ | 129 | #define FS_QUOTA_PDQ_ENFD (1<<5) /* project quota limits enforcement */ |
| 130 | 130 | ||
| 131 | #define XFS_USER_QUOTA (1<<0) /* user quota type */ | 131 | #define FS_USER_QUOTA (1<<0) /* user quota type */ |
| 132 | #define XFS_PROJ_QUOTA (1<<1) /* project quota type */ | 132 | #define FS_PROJ_QUOTA (1<<1) /* project quota type */ |
| 133 | #define XFS_GROUP_QUOTA (1<<2) /* group quota type */ | 133 | #define FS_GROUP_QUOTA (1<<2) /* group quota type */ |
| 134 | 134 | ||
| 135 | /* | 135 | /* |
| 136 | * fs_quota_stat is the struct returned in Q_XGETQSTAT for a given file system. | 136 | * fs_quota_stat is the struct returned in Q_XGETQSTAT for a given file system. |
| @@ -151,7 +151,7 @@ typedef struct fs_qfilestat { | |||
| 151 | 151 | ||
| 152 | typedef struct fs_quota_stat { | 152 | typedef struct fs_quota_stat { |
| 153 | __s8 qs_version; /* version number for future changes */ | 153 | __s8 qs_version; /* version number for future changes */ |
| 154 | __u16 qs_flags; /* XFS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */ | 154 | __u16 qs_flags; /* FS_QUOTA_{U,P,G}DQ_{ACCT,ENFD} */ |
| 155 | __s8 qs_pad; /* unused */ | 155 | __s8 qs_pad; /* unused */ |
| 156 | fs_qfilestat_t qs_uquota; /* user quota storage information */ | 156 | fs_qfilestat_t qs_uquota; /* user quota storage information */ |
| 157 | fs_qfilestat_t qs_gquota; /* group quota storage information */ | 157 | fs_qfilestat_t qs_gquota; /* group quota storage information */ |
diff --git a/include/linux/drbd.h b/include/linux/drbd.h index b8d2516668aa..479ee3a1d901 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h | |||
| @@ -53,7 +53,7 @@ | |||
| 53 | 53 | ||
| 54 | 54 | ||
| 55 | extern const char *drbd_buildtag(void); | 55 | extern const char *drbd_buildtag(void); |
| 56 | #define REL_VERSION "8.3.8" | 56 | #define REL_VERSION "8.3.8.1" |
| 57 | #define API_VERSION 88 | 57 | #define API_VERSION 88 |
| 58 | #define PRO_VERSION_MIN 86 | 58 | #define PRO_VERSION_MIN 86 |
| 59 | #define PRO_VERSION_MAX 94 | 59 | #define PRO_VERSION_MAX 94 |
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h index ce77a746fc9d..5f042810a56c 100644 --- a/include/linux/drbd_nl.h +++ b/include/linux/drbd_nl.h | |||
| @@ -78,10 +78,11 @@ NL_PACKET(syncer_conf, 8, | |||
| 78 | NL_INTEGER( 30, T_MAY_IGNORE, rate) | 78 | NL_INTEGER( 30, T_MAY_IGNORE, rate) |
| 79 | NL_INTEGER( 31, T_MAY_IGNORE, after) | 79 | NL_INTEGER( 31, T_MAY_IGNORE, after) |
| 80 | NL_INTEGER( 32, T_MAY_IGNORE, al_extents) | 80 | NL_INTEGER( 32, T_MAY_IGNORE, al_extents) |
| 81 | NL_INTEGER( 71, T_MAY_IGNORE, dp_volume) | 81 | /* NL_INTEGER( 71, T_MAY_IGNORE, dp_volume) |
| 82 | NL_INTEGER( 72, T_MAY_IGNORE, dp_interval) | 82 | * NL_INTEGER( 72, T_MAY_IGNORE, dp_interval) |
| 83 | NL_INTEGER( 73, T_MAY_IGNORE, throttle_th) | 83 | * NL_INTEGER( 73, T_MAY_IGNORE, throttle_th) |
| 84 | NL_INTEGER( 74, T_MAY_IGNORE, hold_off_th) | 84 | * NL_INTEGER( 74, T_MAY_IGNORE, hold_off_th) |
| 85 | * feature will be reimplemented differently with 8.3.9 */ | ||
| 85 | NL_STRING( 52, T_MAY_IGNORE, verify_alg, SHARED_SECRET_MAX) | 86 | NL_STRING( 52, T_MAY_IGNORE, verify_alg, SHARED_SECRET_MAX) |
| 86 | NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32) | 87 | NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32) |
| 87 | NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX) | 88 | NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX) |
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index 7fc62d4550b2..6ce1bca01724 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h | |||
| @@ -400,7 +400,6 @@ struct ext3_inode { | |||
| 400 | #define EXT3_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */ | 400 | #define EXT3_MOUNT_POSIX_ACL 0x08000 /* POSIX Access Control Lists */ |
| 401 | #define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */ | 401 | #define EXT3_MOUNT_RESERVATION 0x10000 /* Preallocation */ |
| 402 | #define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */ | 402 | #define EXT3_MOUNT_BARRIER 0x20000 /* Use block barriers */ |
| 403 | #define EXT3_MOUNT_NOBH 0x40000 /* No bufferheads */ | ||
| 404 | #define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */ | 403 | #define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */ |
| 405 | #define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ | 404 | #define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */ |
| 406 | #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ | 405 | #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */ |
| @@ -896,7 +895,7 @@ int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |||
| 896 | extern struct inode *ext3_iget(struct super_block *, unsigned long); | 895 | extern struct inode *ext3_iget(struct super_block *, unsigned long); |
| 897 | extern int ext3_write_inode (struct inode *, struct writeback_control *); | 896 | extern int ext3_write_inode (struct inode *, struct writeback_control *); |
| 898 | extern int ext3_setattr (struct dentry *, struct iattr *); | 897 | extern int ext3_setattr (struct dentry *, struct iattr *); |
| 899 | extern void ext3_delete_inode (struct inode *); | 898 | extern void ext3_evict_inode (struct inode *); |
| 900 | extern int ext3_sync_inode (handle_t *, struct inode *); | 899 | extern int ext3_sync_inode (handle_t *, struct inode *); |
| 901 | extern void ext3_discard_reservation (struct inode *); | 900 | extern void ext3_discard_reservation (struct inode *); |
| 902 | extern void ext3_dirty_inode(struct inode *); | 901 | extern void ext3_dirty_inode(struct inode *); |
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h new file mode 100644 index 000000000000..f0949a57ca9d --- /dev/null +++ b/include/linux/fanotify.h | |||
| @@ -0,0 +1,105 @@ | |||
| 1 | #ifndef _LINUX_FANOTIFY_H | ||
| 2 | #define _LINUX_FANOTIFY_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | |||
| 6 | /* the following events that user-space can register for */ | ||
| 7 | #define FAN_ACCESS 0x00000001 /* File was accessed */ | ||
| 8 | #define FAN_MODIFY 0x00000002 /* File was modified */ | ||
| 9 | #define FAN_CLOSE_WRITE 0x00000008 /* Unwrittable file closed */ | ||
| 10 | #define FAN_CLOSE_NOWRITE 0x00000010 /* Writtable file closed */ | ||
| 11 | #define FAN_OPEN 0x00000020 /* File was opened */ | ||
| 12 | |||
| 13 | #define FAN_EVENT_ON_CHILD 0x08000000 /* interested in child events */ | ||
| 14 | |||
| 15 | /* FIXME currently Q's have no limit.... */ | ||
| 16 | #define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ | ||
| 17 | |||
| 18 | #define FAN_OPEN_PERM 0x00010000 /* File open in perm check */ | ||
| 19 | #define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */ | ||
| 20 | |||
| 21 | /* helper events */ | ||
| 22 | #define FAN_CLOSE (FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE) /* close */ | ||
| 23 | |||
| 24 | /* flags used for fanotify_init() */ | ||
| 25 | #define FAN_CLOEXEC 0x00000001 | ||
| 26 | #define FAN_NONBLOCK 0x00000002 | ||
| 27 | |||
| 28 | #define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK) | ||
| 29 | |||
| 30 | /* flags used for fanotify_modify_mark() */ | ||
| 31 | #define FAN_MARK_ADD 0x00000001 | ||
| 32 | #define FAN_MARK_REMOVE 0x00000002 | ||
| 33 | #define FAN_MARK_DONT_FOLLOW 0x00000004 | ||
| 34 | #define FAN_MARK_ONLYDIR 0x00000008 | ||
| 35 | #define FAN_MARK_MOUNT 0x00000010 | ||
| 36 | #define FAN_MARK_IGNORED_MASK 0x00000020 | ||
| 37 | #define FAN_MARK_IGNORED_SURV_MODIFY 0x00000040 | ||
| 38 | #define FAN_MARK_FLUSH 0x00000080 | ||
| 39 | |||
| 40 | #define FAN_ALL_MARK_FLAGS (FAN_MARK_ADD |\ | ||
| 41 | FAN_MARK_REMOVE |\ | ||
| 42 | FAN_MARK_DONT_FOLLOW |\ | ||
| 43 | FAN_MARK_ONLYDIR |\ | ||
| 44 | FAN_MARK_MOUNT |\ | ||
| 45 | FAN_MARK_IGNORED_MASK |\ | ||
| 46 | FAN_MARK_IGNORED_SURV_MODIFY) | ||
| 47 | |||
| 48 | /* | ||
| 49 | * All of the events - we build the list by hand so that we can add flags in | ||
| 50 | * the future and not break backward compatibility. Apps will get only the | ||
| 51 | * events that they originally wanted. Be sure to add new events here! | ||
| 52 | */ | ||
| 53 | #define FAN_ALL_EVENTS (FAN_ACCESS |\ | ||
| 54 | FAN_MODIFY |\ | ||
| 55 | FAN_CLOSE |\ | ||
| 56 | FAN_OPEN) | ||
| 57 | |||
| 58 | /* | ||
| 59 | * All events which require a permission response from userspace | ||
| 60 | */ | ||
| 61 | #define FAN_ALL_PERM_EVENTS (FAN_OPEN_PERM |\ | ||
| 62 | FAN_ACCESS_PERM) | ||
| 63 | |||
| 64 | #define FAN_ALL_OUTGOING_EVENTS (FAN_ALL_EVENTS |\ | ||
| 65 | FAN_ALL_PERM_EVENTS |\ | ||
| 66 | FAN_Q_OVERFLOW) | ||
| 67 | |||
| 68 | #define FANOTIFY_METADATA_VERSION 1 | ||
| 69 | |||
| 70 | struct fanotify_event_metadata { | ||
| 71 | __u32 event_len; | ||
| 72 | __u32 vers; | ||
| 73 | __s32 fd; | ||
| 74 | __u64 mask; | ||
| 75 | __s64 pid; | ||
| 76 | } __attribute__ ((packed)); | ||
| 77 | |||
| 78 | struct fanotify_response { | ||
| 79 | __s32 fd; | ||
| 80 | __u32 response; | ||
| 81 | } __attribute__ ((packed)); | ||
| 82 | |||
| 83 | /* Legit userspace responses to a _PERM event */ | ||
| 84 | #define FAN_ALLOW 0x01 | ||
| 85 | #define FAN_DENY 0x02 | ||
| 86 | |||
| 87 | /* Helper functions to deal with fanotify_event_metadata buffers */ | ||
| 88 | #define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata)) | ||
| 89 | |||
| 90 | #define FAN_EVENT_NEXT(meta, len) ((len) -= (meta)->event_len, \ | ||
| 91 | (struct fanotify_event_metadata*)(((char *)(meta)) + \ | ||
| 92 | (meta)->event_len)) | ||
| 93 | |||
| 94 | #define FAN_EVENT_OK(meta, len) ((long)(len) >= (long)FAN_EVENT_METADATA_LEN && \ | ||
| 95 | (long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \ | ||
| 96 | (long)(meta)->event_len <= (long)(len)) | ||
| 97 | |||
| 98 | #ifdef __KERNEL__ | ||
| 99 | |||
| 100 | struct fanotify_wait { | ||
| 101 | struct fsnotify_event *event; | ||
| 102 | __s32 fd; | ||
| 103 | }; | ||
| 104 | #endif /* __KERNEL__ */ | ||
| 105 | #endif /* _LINUX_FANOTIFY_H */ | ||
diff --git a/include/linux/fb.h b/include/linux/fb.h index 0c5659c41b01..f0268deca658 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
| @@ -825,6 +825,10 @@ struct fb_tile_ops { | |||
| 825 | */ | 825 | */ |
| 826 | #define FBINFO_BE_MATH 0x100000 | 826 | #define FBINFO_BE_MATH 0x100000 |
| 827 | 827 | ||
| 828 | /* report to the VT layer that this fb driver can accept forced console | ||
| 829 | output like oopses */ | ||
| 830 | #define FBINFO_CAN_FORCE_OUTPUT 0x200000 | ||
| 831 | |||
| 828 | struct fb_info { | 832 | struct fb_info { |
| 829 | int node; | 833 | int node; |
| 830 | int flags; | 834 | int flags; |
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index d147461bc271..f59ed297b661 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/rcupdate.h> | 11 | #include <linux/rcupdate.h> |
| 12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
| 13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 14 | #include <linux/fs.h> | ||
| 14 | 15 | ||
| 15 | #include <asm/atomic.h> | 16 | #include <asm/atomic.h> |
| 16 | 17 | ||
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h index 68f883b30a53..68c642d8843d 100644 --- a/include/linux/firewire-cdev.h +++ b/include/linux/firewire-cdev.h | |||
| @@ -30,12 +30,18 @@ | |||
| 30 | #include <linux/types.h> | 30 | #include <linux/types.h> |
| 31 | #include <linux/firewire-constants.h> | 31 | #include <linux/firewire-constants.h> |
| 32 | 32 | ||
| 33 | #define FW_CDEV_EVENT_BUS_RESET 0x00 | 33 | #define FW_CDEV_EVENT_BUS_RESET 0x00 |
| 34 | #define FW_CDEV_EVENT_RESPONSE 0x01 | 34 | #define FW_CDEV_EVENT_RESPONSE 0x01 |
| 35 | #define FW_CDEV_EVENT_REQUEST 0x02 | 35 | #define FW_CDEV_EVENT_REQUEST 0x02 |
| 36 | #define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 | 36 | #define FW_CDEV_EVENT_ISO_INTERRUPT 0x03 |
| 37 | #define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 | 37 | #define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04 |
| 38 | #define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 | 38 | #define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05 |
| 39 | |||
| 40 | /* available since kernel version 2.6.36 */ | ||
| 41 | #define FW_CDEV_EVENT_REQUEST2 0x06 | ||
| 42 | #define FW_CDEV_EVENT_PHY_PACKET_SENT 0x07 | ||
| 43 | #define FW_CDEV_EVENT_PHY_PACKET_RECEIVED 0x08 | ||
| 44 | #define FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL 0x09 | ||
| 39 | 45 | ||
| 40 | /** | 46 | /** |
| 41 | * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types | 47 | * struct fw_cdev_event_common - Common part of all fw_cdev_event_ types |
| @@ -68,6 +74,10 @@ struct fw_cdev_event_common { | |||
| 68 | * This event is sent when the bus the device belongs to goes through a bus | 74 | * This event is sent when the bus the device belongs to goes through a bus |
| 69 | * reset. It provides information about the new bus configuration, such as | 75 | * reset. It provides information about the new bus configuration, such as |
| 70 | * new node ID for this device, new root ID, and others. | 76 | * new node ID for this device, new root ID, and others. |
| 77 | * | ||
| 78 | * If @bm_node_id is 0xffff right after bus reset it can be reread by an | ||
| 79 | * %FW_CDEV_IOC_GET_INFO ioctl after bus manager selection was finished. | ||
| 80 | * Kernels with ABI version < 4 do not set @bm_node_id. | ||
| 71 | */ | 81 | */ |
| 72 | struct fw_cdev_event_bus_reset { | 82 | struct fw_cdev_event_bus_reset { |
| 73 | __u64 closure; | 83 | __u64 closure; |
| @@ -82,8 +92,9 @@ struct fw_cdev_event_bus_reset { | |||
| 82 | 92 | ||
| 83 | /** | 93 | /** |
| 84 | * struct fw_cdev_event_response - Sent when a response packet was received | 94 | * struct fw_cdev_event_response - Sent when a response packet was received |
| 85 | * @closure: See &fw_cdev_event_common; | 95 | * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_REQUEST |
| 86 | * set by %FW_CDEV_IOC_SEND_REQUEST ioctl | 96 | * or %FW_CDEV_IOC_SEND_BROADCAST_REQUEST |
| 97 | * or %FW_CDEV_IOC_SEND_STREAM_PACKET ioctl | ||
| 87 | * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE | 98 | * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_RESPONSE |
| 88 | * @rcode: Response code returned by the remote node | 99 | * @rcode: Response code returned by the remote node |
| 89 | * @length: Data length, i.e. the response's payload size in bytes | 100 | * @length: Data length, i.e. the response's payload size in bytes |
| @@ -93,6 +104,11 @@ struct fw_cdev_event_bus_reset { | |||
| 93 | * sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses | 104 | * sent by %FW_CDEV_IOC_SEND_REQUEST ioctl. The payload data for responses |
| 94 | * carrying data (read and lock responses) follows immediately and can be | 105 | * carrying data (read and lock responses) follows immediately and can be |
| 95 | * accessed through the @data field. | 106 | * accessed through the @data field. |
| 107 | * | ||
| 108 | * The event is also generated after conclusions of transactions that do not | ||
| 109 | * involve response packets. This includes unified write transactions, | ||
| 110 | * broadcast write transactions, and transmission of asynchronous stream | ||
| 111 | * packets. @rcode indicates success or failure of such transmissions. | ||
| 96 | */ | 112 | */ |
| 97 | struct fw_cdev_event_response { | 113 | struct fw_cdev_event_response { |
| 98 | __u64 closure; | 114 | __u64 closure; |
| @@ -103,11 +119,46 @@ struct fw_cdev_event_response { | |||
| 103 | }; | 119 | }; |
| 104 | 120 | ||
| 105 | /** | 121 | /** |
| 106 | * struct fw_cdev_event_request - Sent on incoming request to an address region | 122 | * struct fw_cdev_event_request - Old version of &fw_cdev_event_request2 |
| 107 | * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl | 123 | * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl |
| 108 | * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST | 124 | * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST |
| 125 | * @tcode: See &fw_cdev_event_request2 | ||
| 126 | * @offset: See &fw_cdev_event_request2 | ||
| 127 | * @handle: See &fw_cdev_event_request2 | ||
| 128 | * @length: See &fw_cdev_event_request2 | ||
| 129 | * @data: See &fw_cdev_event_request2 | ||
| 130 | * | ||
| 131 | * This event is sent instead of &fw_cdev_event_request2 if the kernel or | ||
| 132 | * the client implements ABI version <= 3. | ||
| 133 | * | ||
| 134 | * Unlike &fw_cdev_event_request2, the sender identity cannot be established, | ||
| 135 | * broadcast write requests cannot be distinguished from unicast writes, and | ||
| 136 | * @tcode of lock requests is %TCODE_LOCK_REQUEST. | ||
| 137 | * | ||
| 138 | * Requests to the FCP_REQUEST or FCP_RESPONSE register are responded to as | ||
| 139 | * with &fw_cdev_event_request2, except in kernel 2.6.32 and older which send | ||
| 140 | * the response packet of the client's %FW_CDEV_IOC_SEND_RESPONSE ioctl. | ||
| 141 | */ | ||
| 142 | struct fw_cdev_event_request { | ||
| 143 | __u64 closure; | ||
| 144 | __u32 type; | ||
| 145 | __u32 tcode; | ||
| 146 | __u64 offset; | ||
| 147 | __u32 handle; | ||
| 148 | __u32 length; | ||
| 149 | __u32 data[0]; | ||
| 150 | }; | ||
| 151 | |||
| 152 | /** | ||
| 153 | * struct fw_cdev_event_request2 - Sent on incoming request to an address region | ||
| 154 | * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl | ||
| 155 | * @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST2 | ||
| 109 | * @tcode: Transaction code of the incoming request | 156 | * @tcode: Transaction code of the incoming request |
| 110 | * @offset: The offset into the 48-bit per-node address space | 157 | * @offset: The offset into the 48-bit per-node address space |
| 158 | * @source_node_id: Sender node ID | ||
| 159 | * @destination_node_id: Destination node ID | ||
| 160 | * @card: The index of the card from which the request came | ||
| 161 | * @generation: Bus generation in which the request is valid | ||
| 111 | * @handle: Reference to the kernel-side pending request | 162 | * @handle: Reference to the kernel-side pending request |
| 112 | * @length: Data length, i.e. the request's payload size in bytes | 163 | * @length: Data length, i.e. the request's payload size in bytes |
| 113 | * @data: Incoming data, if any | 164 | * @data: Incoming data, if any |
| @@ -120,12 +171,42 @@ struct fw_cdev_event_response { | |||
| 120 | * | 171 | * |
| 121 | * The payload data for requests carrying data (write and lock requests) | 172 | * The payload data for requests carrying data (write and lock requests) |
| 122 | * follows immediately and can be accessed through the @data field. | 173 | * follows immediately and can be accessed through the @data field. |
| 174 | * | ||
| 175 | * Unlike &fw_cdev_event_request, @tcode of lock requests is one of the | ||
| 176 | * firewire-core specific %TCODE_LOCK_MASK_SWAP...%TCODE_LOCK_VENDOR_DEPENDENT, | ||
| 177 | * i.e. encodes the extended transaction code. | ||
| 178 | * | ||
| 179 | * @card may differ from &fw_cdev_get_info.card because requests are received | ||
| 180 | * from all cards of the Linux host. @source_node_id, @destination_node_id, and | ||
| 181 | * @generation pertain to that card. Destination node ID and bus generation may | ||
| 182 | * therefore differ from the corresponding fields of the last | ||
| 183 | * &fw_cdev_event_bus_reset. | ||
| 184 | * | ||
| 185 | * @destination_node_id may also differ from the current node ID because of a | ||
| 186 | * non-local bus ID part or in case of a broadcast write request. Note, a | ||
| 187 | * client must call an %FW_CDEV_IOC_SEND_RESPONSE ioctl even in case of a | ||
| 188 | * broadcast write request; the kernel will then release the kernel-side pending | ||
| 189 | * request but will not actually send a response packet. | ||
| 190 | * | ||
| 191 | * In case of a write request to FCP_REQUEST or FCP_RESPONSE, the kernel already | ||
| 192 | * sent a write response immediately after the request was received; in this | ||
| 193 | * case the client must still call an %FW_CDEV_IOC_SEND_RESPONSE ioctl to | ||
| 194 | * release the kernel-side pending request, though another response won't be | ||
| 195 | * sent. | ||
| 196 | * | ||
| 197 | * If the client subsequently needs to initiate requests to the sender node of | ||
| 198 | * an &fw_cdev_event_request2, it needs to use a device file with matching | ||
| 199 | * card index, node ID, and generation for outbound requests. | ||
| 123 | */ | 200 | */ |
| 124 | struct fw_cdev_event_request { | 201 | struct fw_cdev_event_request2 { |
| 125 | __u64 closure; | 202 | __u64 closure; |
| 126 | __u32 type; | 203 | __u32 type; |
| 127 | __u32 tcode; | 204 | __u32 tcode; |
| 128 | __u64 offset; | 205 | __u64 offset; |
| 206 | __u32 source_node_id; | ||
| 207 | __u32 destination_node_id; | ||
| 208 | __u32 card; | ||
| 209 | __u32 generation; | ||
| 129 | __u32 handle; | 210 | __u32 handle; |
| 130 | __u32 length; | 211 | __u32 length; |
| 131 | __u32 data[0]; | 212 | __u32 data[0]; |
| @@ -141,26 +222,43 @@ struct fw_cdev_event_request { | |||
| 141 | * @header: Stripped headers, if any | 222 | * @header: Stripped headers, if any |
| 142 | * | 223 | * |
| 143 | * This event is sent when the controller has completed an &fw_cdev_iso_packet | 224 | * This event is sent when the controller has completed an &fw_cdev_iso_packet |
| 144 | * with the %FW_CDEV_ISO_INTERRUPT bit set. In the receive case, the headers | 225 | * with the %FW_CDEV_ISO_INTERRUPT bit set. |
| 145 | * stripped of all packets up until and including the interrupt packet are | ||
| 146 | * returned in the @header field. The amount of header data per packet is as | ||
| 147 | * specified at iso context creation by &fw_cdev_create_iso_context.header_size. | ||
| 148 | * | 226 | * |
| 149 | * In version 1 of this ABI, header data consisted of the 1394 isochronous | 227 | * Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT): |
| 150 | * packet header, followed by quadlets from the packet payload if | ||
| 151 | * &fw_cdev_create_iso_context.header_size > 4. | ||
| 152 | * | 228 | * |
| 153 | * In version 2 of this ABI, header data consist of the 1394 isochronous | 229 | * In version 3 and some implementations of version 2 of the ABI, &header_length |
| 154 | * packet header, followed by a timestamp quadlet if | 230 | * is a multiple of 4 and &header contains timestamps of all packets up until |
| 155 | * &fw_cdev_create_iso_context.header_size > 4, followed by quadlets from the | 231 | * the interrupt packet. The format of the timestamps is as described below for |
| 156 | * packet payload if &fw_cdev_create_iso_context.header_size > 8. | 232 | * isochronous reception. In version 1 of the ABI, &header_length was 0. |
| 157 | * | 233 | * |
| 158 | * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. | 234 | * Isochronous receive events (context type %FW_CDEV_ISO_CONTEXT_RECEIVE): |
| 235 | * | ||
| 236 | * The headers stripped of all packets up until and including the interrupt | ||
| 237 | * packet are returned in the @header field. The amount of header data per | ||
| 238 | * packet is as specified at iso context creation by | ||
| 239 | * &fw_cdev_create_iso_context.header_size. | ||
| 240 | * | ||
| 241 | * Hence, _interrupt.header_length / _context.header_size is the number of | ||
| 242 | * packets received in this interrupt event. The client can now iterate | ||
| 243 | * through the mmap()'ed DMA buffer according to this number of packets and | ||
| 244 | * to the buffer sizes as the client specified in &fw_cdev_queue_iso. | ||
| 245 | * | ||
| 246 | * Since version 2 of this ABI, the portion for each packet in _interrupt.header | ||
| 247 | * consists of the 1394 isochronous packet header, followed by a timestamp | ||
| 248 | * quadlet if &fw_cdev_create_iso_context.header_size > 4, followed by quadlets | ||
| 249 | * from the packet payload if &fw_cdev_create_iso_context.header_size > 8. | ||
| 159 | * | 250 | * |
| 160 | * Format of 1394 iso packet header: 16 bits len, 2 bits tag, 6 bits channel, | 251 | * Format of 1394 iso packet header: 16 bits data_length, 2 bits tag, 6 bits |
| 161 | * 4 bits tcode, 4 bits sy, in big endian byte order. Format of timestamp: | 252 | * channel, 4 bits tcode, 4 bits sy, in big endian byte order. |
| 162 | * 16 bits invalid, 3 bits cycleSeconds, 13 bits cycleCount, in big endian byte | 253 | * data_length is the actual received size of the packet without the four |
| 163 | * order. | 254 | * 1394 iso packet header bytes. |
| 255 | * | ||
| 256 | * Format of timestamp: 16 bits invalid, 3 bits cycleSeconds, 13 bits | ||
| 257 | * cycleCount, in big endian byte order. | ||
| 258 | * | ||
| 259 | * In version 1 of the ABI, no timestamp quadlet was inserted; instead, payload | ||
| 260 | * data followed directly after the 1394 is header if header_size > 4. | ||
| 261 | * Behaviour of ver. 1 of this ABI is no longer available since ABI ver. 2. | ||
| 164 | */ | 262 | */ |
| 165 | struct fw_cdev_event_iso_interrupt { | 263 | struct fw_cdev_event_iso_interrupt { |
| 166 | __u64 closure; | 264 | __u64 closure; |
| @@ -171,6 +269,43 @@ struct fw_cdev_event_iso_interrupt { | |||
| 171 | }; | 269 | }; |
| 172 | 270 | ||
| 173 | /** | 271 | /** |
| 272 | * struct fw_cdev_event_iso_interrupt_mc - An iso buffer chunk was completed | ||
| 273 | * @closure: See &fw_cdev_event_common; | ||
| 274 | * set by %FW_CDEV_CREATE_ISO_CONTEXT ioctl | ||
| 275 | * @type: %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL | ||
| 276 | * @completed: Offset into the receive buffer; data before this offest is valid | ||
| 277 | * | ||
| 278 | * This event is sent in multichannel contexts (context type | ||
| 279 | * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL) for &fw_cdev_iso_packet buffer | ||
| 280 | * chunks that have the %FW_CDEV_ISO_INTERRUPT bit set. Whether this happens | ||
| 281 | * when a packet is completed and/or when a buffer chunk is completed depends | ||
| 282 | * on the hardware implementation. | ||
| 283 | * | ||
| 284 | * The buffer is continuously filled with the following data, per packet: | ||
| 285 | * - the 1394 iso packet header as described at &fw_cdev_event_iso_interrupt, | ||
| 286 | * but in little endian byte order, | ||
| 287 | * - packet payload (as many bytes as specified in the data_length field of | ||
| 288 | * the 1394 iso packet header) in big endian byte order, | ||
| 289 | * - 0...3 padding bytes as needed to align the following trailer quadlet, | ||
| 290 | * - trailer quadlet, containing the reception timestamp as described at | ||
| 291 | * &fw_cdev_event_iso_interrupt, but in little endian byte order. | ||
| 292 | * | ||
| 293 | * Hence the per-packet size is data_length (rounded up to a multiple of 4) + 8. | ||
| 294 | * When processing the data, stop before a packet that would cross the | ||
| 295 | * @completed offset. | ||
| 296 | * | ||
| 297 | * A packet near the end of a buffer chunk will typically spill over into the | ||
| 298 | * next queued buffer chunk. It is the responsibility of the client to check | ||
| 299 | * for this condition, assemble a broken-up packet from its parts, and not to | ||
| 300 | * re-queue any buffer chunks in which as yet unread packet parts reside. | ||
| 301 | */ | ||
| 302 | struct fw_cdev_event_iso_interrupt_mc { | ||
| 303 | __u64 closure; | ||
| 304 | __u32 type; | ||
| 305 | __u32 completed; | ||
| 306 | }; | ||
| 307 | |||
| 308 | /** | ||
| 174 | * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed | 309 | * struct fw_cdev_event_iso_resource - Iso resources were allocated or freed |
| 175 | * @closure: See &fw_cdev_event_common; | 310 | * @closure: See &fw_cdev_event_common; |
| 176 | * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl | 311 | * set by %FW_CDEV_IOC_(DE)ALLOCATE_ISO_RESOURCE(_ONCE) ioctl |
| @@ -200,15 +335,45 @@ struct fw_cdev_event_iso_resource { | |||
| 200 | }; | 335 | }; |
| 201 | 336 | ||
| 202 | /** | 337 | /** |
| 338 | * struct fw_cdev_event_phy_packet - A PHY packet was transmitted or received | ||
| 339 | * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_SEND_PHY_PACKET | ||
| 340 | * or %FW_CDEV_IOC_RECEIVE_PHY_PACKETS ioctl | ||
| 341 | * @type: %FW_CDEV_EVENT_PHY_PACKET_SENT or %..._RECEIVED | ||
| 342 | * @rcode: %RCODE_..., indicates success or failure of transmission | ||
| 343 | * @length: Data length in bytes | ||
| 344 | * @data: Incoming data | ||
| 345 | * | ||
| 346 | * If @type is %FW_CDEV_EVENT_PHY_PACKET_SENT, @length is 0 and @data empty, | ||
| 347 | * except in case of a ping packet: Then, @length is 4, and @data[0] is the | ||
| 348 | * ping time in 49.152MHz clocks if @rcode is %RCODE_COMPLETE. | ||
| 349 | * | ||
| 350 | * If @type is %FW_CDEV_EVENT_PHY_PACKET_RECEIVED, @length is 8 and @data | ||
| 351 | * consists of the two PHY packet quadlets, in host byte order. | ||
| 352 | */ | ||
| 353 | struct fw_cdev_event_phy_packet { | ||
| 354 | __u64 closure; | ||
| 355 | __u32 type; | ||
| 356 | __u32 rcode; | ||
| 357 | __u32 length; | ||
| 358 | __u32 data[0]; | ||
| 359 | }; | ||
| 360 | |||
| 361 | /** | ||
| 203 | * union fw_cdev_event - Convenience union of fw_cdev_event_ types | 362 | * union fw_cdev_event - Convenience union of fw_cdev_event_ types |
| 204 | * @common: Valid for all types | 363 | * @common: Valid for all types |
| 205 | * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET | 364 | * @bus_reset: Valid if @common.type == %FW_CDEV_EVENT_BUS_RESET |
| 206 | * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE | 365 | * @response: Valid if @common.type == %FW_CDEV_EVENT_RESPONSE |
| 207 | * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST | 366 | * @request: Valid if @common.type == %FW_CDEV_EVENT_REQUEST |
| 208 | * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT | 367 | * @request2: Valid if @common.type == %FW_CDEV_EVENT_REQUEST2 |
| 209 | * @iso_resource: Valid if @common.type == | 368 | * @iso_interrupt: Valid if @common.type == %FW_CDEV_EVENT_ISO_INTERRUPT |
| 369 | * @iso_interrupt_mc: Valid if @common.type == | ||
| 370 | * %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL | ||
| 371 | * @iso_resource: Valid if @common.type == | ||
| 210 | * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or | 372 | * %FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED or |
| 211 | * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED | 373 | * %FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED |
| 374 | * @phy_packet: Valid if @common.type == | ||
| 375 | * %FW_CDEV_EVENT_PHY_PACKET_SENT or | ||
| 376 | * %FW_CDEV_EVENT_PHY_PACKET_RECEIVED | ||
| 212 | * | 377 | * |
| 213 | * Convenience union for userspace use. Events could be read(2) into an | 378 | * Convenience union for userspace use. Events could be read(2) into an |
| 214 | * appropriately aligned char buffer and then cast to this union for further | 379 | * appropriately aligned char buffer and then cast to this union for further |
| @@ -223,8 +388,11 @@ union fw_cdev_event { | |||
| 223 | struct fw_cdev_event_bus_reset bus_reset; | 388 | struct fw_cdev_event_bus_reset bus_reset; |
| 224 | struct fw_cdev_event_response response; | 389 | struct fw_cdev_event_response response; |
| 225 | struct fw_cdev_event_request request; | 390 | struct fw_cdev_event_request request; |
| 391 | struct fw_cdev_event_request2 request2; /* added in 2.6.36 */ | ||
| 226 | struct fw_cdev_event_iso_interrupt iso_interrupt; | 392 | struct fw_cdev_event_iso_interrupt iso_interrupt; |
| 227 | struct fw_cdev_event_iso_resource iso_resource; | 393 | struct fw_cdev_event_iso_interrupt_mc iso_interrupt_mc; /* added in 2.6.36 */ |
| 394 | struct fw_cdev_event_iso_resource iso_resource; /* added in 2.6.30 */ | ||
| 395 | struct fw_cdev_event_phy_packet phy_packet; /* added in 2.6.36 */ | ||
| 228 | }; | 396 | }; |
| 229 | 397 | ||
| 230 | /* available since kernel version 2.6.22 */ | 398 | /* available since kernel version 2.6.22 */ |
| @@ -256,23 +424,46 @@ union fw_cdev_event { | |||
| 256 | /* available since kernel version 2.6.34 */ | 424 | /* available since kernel version 2.6.34 */ |
| 257 | #define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2) | 425 | #define FW_CDEV_IOC_GET_CYCLE_TIMER2 _IOWR('#', 0x14, struct fw_cdev_get_cycle_timer2) |
| 258 | 426 | ||
| 427 | /* available since kernel version 2.6.36 */ | ||
| 428 | #define FW_CDEV_IOC_SEND_PHY_PACKET _IOWR('#', 0x15, struct fw_cdev_send_phy_packet) | ||
| 429 | #define FW_CDEV_IOC_RECEIVE_PHY_PACKETS _IOW('#', 0x16, struct fw_cdev_receive_phy_packets) | ||
| 430 | #define FW_CDEV_IOC_SET_ISO_CHANNELS _IOW('#', 0x17, struct fw_cdev_set_iso_channels) | ||
| 431 | |||
| 259 | /* | 432 | /* |
| 260 | * FW_CDEV_VERSION History | 433 | * ABI version history |
| 261 | * 1 (2.6.22) - initial version | 434 | * 1 (2.6.22) - initial version |
| 435 | * (2.6.24) - added %FW_CDEV_IOC_GET_CYCLE_TIMER | ||
| 262 | * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if | 436 | * 2 (2.6.30) - changed &fw_cdev_event_iso_interrupt.header if |
| 263 | * &fw_cdev_create_iso_context.header_size is 8 or more | 437 | * &fw_cdev_create_iso_context.header_size is 8 or more |
| 438 | * - added %FW_CDEV_IOC_*_ISO_RESOURCE*, | ||
| 439 | * %FW_CDEV_IOC_GET_SPEED, %FW_CDEV_IOC_SEND_BROADCAST_REQUEST, | ||
| 440 | * %FW_CDEV_IOC_SEND_STREAM_PACKET | ||
| 264 | * (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt | 441 | * (2.6.32) - added time stamp to xmit &fw_cdev_event_iso_interrupt |
| 265 | * (2.6.33) - IR has always packet-per-buffer semantics now, not one of | 442 | * (2.6.33) - IR has always packet-per-buffer semantics now, not one of |
| 266 | * dual-buffer or packet-per-buffer depending on hardware | 443 | * dual-buffer or packet-per-buffer depending on hardware |
| 444 | * - shared use and auto-response for FCP registers | ||
| 267 | * 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable | 445 | * 3 (2.6.34) - made &fw_cdev_get_cycle_timer reliable |
| 446 | * - added %FW_CDEV_IOC_GET_CYCLE_TIMER2 | ||
| 447 | * 4 (2.6.36) - added %FW_CDEV_EVENT_REQUEST2, %FW_CDEV_EVENT_PHY_PACKET_*, | ||
| 448 | * and &fw_cdev_allocate.region_end | ||
| 449 | * - implemented &fw_cdev_event_bus_reset.bm_node_id | ||
| 450 | * - added %FW_CDEV_IOC_SEND_PHY_PACKET, _RECEIVE_PHY_PACKETS | ||
| 451 | * - added %FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL, | ||
| 452 | * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL, and | ||
| 453 | * %FW_CDEV_IOC_SET_ISO_CHANNELS | ||
| 268 | */ | 454 | */ |
| 269 | #define FW_CDEV_VERSION 3 | 455 | #define FW_CDEV_VERSION 3 /* Meaningless; don't use this macro. */ |
| 270 | 456 | ||
| 271 | /** | 457 | /** |
| 272 | * struct fw_cdev_get_info - General purpose information ioctl | 458 | * struct fw_cdev_get_info - General purpose information ioctl |
| 273 | * @version: The version field is just a running serial number. | 459 | * @version: The version field is just a running serial number. Both an |
| 274 | * We never break backwards compatibility, but may add more | 460 | * input parameter (ABI version implemented by the client) and |
| 275 | * structs and ioctls in later revisions. | 461 | * output parameter (ABI version implemented by the kernel). |
| 462 | * A client must not fill in an %FW_CDEV_VERSION defined from an | ||
| 463 | * included kernel header file but the actual version for which | ||
| 464 | * the client was implemented. This is necessary for forward | ||
| 465 | * compatibility. We never break backwards compatibility, but | ||
| 466 | * may add more structs, events, and ioctls in later revisions. | ||
| 276 | * @rom_length: If @rom is non-zero, at most rom_length bytes of configuration | 467 | * @rom_length: If @rom is non-zero, at most rom_length bytes of configuration |
| 277 | * ROM will be copied into that user space address. In either | 468 | * ROM will be copied into that user space address. In either |
| 278 | * case, @rom_length is updated with the actual length of the | 469 | * case, @rom_length is updated with the actual length of the |
| @@ -339,28 +530,48 @@ struct fw_cdev_send_response { | |||
| 339 | }; | 530 | }; |
| 340 | 531 | ||
| 341 | /** | 532 | /** |
| 342 | * struct fw_cdev_allocate - Allocate a CSR address range | 533 | * struct fw_cdev_allocate - Allocate a CSR in an address range |
| 343 | * @offset: Start offset of the address range | 534 | * @offset: Start offset of the address range |
| 344 | * @closure: To be passed back to userspace in request events | 535 | * @closure: To be passed back to userspace in request events |
| 345 | * @length: Length of the address range, in bytes | 536 | * @length: Length of the CSR, in bytes |
| 346 | * @handle: Handle to the allocation, written by the kernel | 537 | * @handle: Handle to the allocation, written by the kernel |
| 538 | * @region_end: First address above the address range (added in ABI v4, 2.6.36) | ||
| 347 | * | 539 | * |
| 348 | * Allocate an address range in the 48-bit address space on the local node | 540 | * Allocate an address range in the 48-bit address space on the local node |
| 349 | * (the controller). This allows userspace to listen for requests with an | 541 | * (the controller). This allows userspace to listen for requests with an |
| 350 | * offset within that address range. When the kernel receives a request | 542 | * offset within that address range. Every time when the kernel receives a |
| 351 | * within the range, an &fw_cdev_event_request event will be written back. | 543 | * request within the range, an &fw_cdev_event_request2 event will be emitted. |
| 352 | * The @closure field is passed back to userspace in the response event. | 544 | * (If the kernel or the client implements ABI version <= 3, an |
| 545 | * &fw_cdev_event_request will be generated instead.) | ||
| 546 | * | ||
| 547 | * The @closure field is passed back to userspace in these request events. | ||
| 353 | * The @handle field is an out parameter, returning a handle to the allocated | 548 | * The @handle field is an out parameter, returning a handle to the allocated |
| 354 | * range to be used for later deallocation of the range. | 549 | * range to be used for later deallocation of the range. |
| 355 | * | 550 | * |
| 356 | * The address range is allocated on all local nodes. The address allocation | 551 | * The address range is allocated on all local nodes. The address allocation |
| 357 | * is exclusive except for the FCP command and response registers. | 552 | * is exclusive except for the FCP command and response registers. If an |
| 553 | * exclusive address region is already in use, the ioctl fails with errno set | ||
| 554 | * to %EBUSY. | ||
| 555 | * | ||
| 556 | * If kernel and client implement ABI version >= 4, the kernel looks up a free | ||
| 557 | * spot of size @length inside [@offset..@region_end) and, if found, writes | ||
| 558 | * the start address of the new CSR back in @offset. I.e. @offset is an | ||
| 559 | * in and out parameter. If this automatic placement of a CSR in a bigger | ||
| 560 | * address range is not desired, the client simply needs to set @region_end | ||
| 561 | * = @offset + @length. | ||
| 562 | * | ||
| 563 | * If the kernel or the client implements ABI version <= 3, @region_end is | ||
| 564 | * ignored and effectively assumed to be @offset + @length. | ||
| 565 | * | ||
| 566 | * @region_end is only present in a kernel header >= 2.6.36. If necessary, | ||
| 567 | * this can for example be tested by #ifdef FW_CDEV_EVENT_REQUEST2. | ||
| 358 | */ | 568 | */ |
| 359 | struct fw_cdev_allocate { | 569 | struct fw_cdev_allocate { |
| 360 | __u64 offset; | 570 | __u64 offset; |
| 361 | __u64 closure; | 571 | __u64 closure; |
| 362 | __u32 length; | 572 | __u32 length; |
| 363 | __u32 handle; | 573 | __u32 handle; |
| 574 | __u64 region_end; /* available since kernel version 2.6.36 */ | ||
| 364 | }; | 575 | }; |
| 365 | 576 | ||
| 366 | /** | 577 | /** |
| @@ -382,9 +593,14 @@ struct fw_cdev_deallocate { | |||
| 382 | * Initiate a bus reset for the bus this device is on. The bus reset can be | 593 | * Initiate a bus reset for the bus this device is on. The bus reset can be |
| 383 | * either the original (long) bus reset or the arbitrated (short) bus reset | 594 | * either the original (long) bus reset or the arbitrated (short) bus reset |
| 384 | * introduced in 1394a-2000. | 595 | * introduced in 1394a-2000. |
| 596 | * | ||
| 597 | * The ioctl returns immediately. A subsequent &fw_cdev_event_bus_reset | ||
| 598 | * indicates when the reset actually happened. Since ABI v4, this may be | ||
| 599 | * considerably later than the ioctl because the kernel ensures a grace period | ||
| 600 | * between subsequent bus resets as per IEEE 1394 bus management specification. | ||
| 385 | */ | 601 | */ |
| 386 | struct fw_cdev_initiate_bus_reset { | 602 | struct fw_cdev_initiate_bus_reset { |
| 387 | __u32 type; /* FW_CDEV_SHORT_RESET or FW_CDEV_LONG_RESET */ | 603 | __u32 type; |
| 388 | }; | 604 | }; |
| 389 | 605 | ||
| 390 | /** | 606 | /** |
| @@ -408,9 +624,10 @@ struct fw_cdev_initiate_bus_reset { | |||
| 408 | * | 624 | * |
| 409 | * @immediate, @key, and @data array elements are CPU-endian quadlets. | 625 | * @immediate, @key, and @data array elements are CPU-endian quadlets. |
| 410 | * | 626 | * |
| 411 | * If successful, the kernel adds the descriptor and writes back a handle to the | 627 | * If successful, the kernel adds the descriptor and writes back a @handle to |
| 412 | * kernel-side object to be used for later removal of the descriptor block and | 628 | * the kernel-side object to be used for later removal of the descriptor block |
| 413 | * immediate key. | 629 | * and immediate key. The kernel will also generate a bus reset to signal the |
| 630 | * change of the configuration ROM to other nodes. | ||
| 414 | * | 631 | * |
| 415 | * This ioctl affects the configuration ROMs of all local nodes. | 632 | * This ioctl affects the configuration ROMs of all local nodes. |
| 416 | * The ioctl only succeeds on device files which represent a local node. | 633 | * The ioctl only succeeds on device files which represent a local node. |
| @@ -429,38 +646,50 @@ struct fw_cdev_add_descriptor { | |||
| 429 | * descriptor was added | 646 | * descriptor was added |
| 430 | * | 647 | * |
| 431 | * Remove a descriptor block and accompanying immediate key from the local | 648 | * Remove a descriptor block and accompanying immediate key from the local |
| 432 | * nodes' configuration ROMs. | 649 | * nodes' configuration ROMs. The kernel will also generate a bus reset to |
| 650 | * signal the change of the configuration ROM to other nodes. | ||
| 433 | */ | 651 | */ |
| 434 | struct fw_cdev_remove_descriptor { | 652 | struct fw_cdev_remove_descriptor { |
| 435 | __u32 handle; | 653 | __u32 handle; |
| 436 | }; | 654 | }; |
| 437 | 655 | ||
| 438 | #define FW_CDEV_ISO_CONTEXT_TRANSMIT 0 | 656 | #define FW_CDEV_ISO_CONTEXT_TRANSMIT 0 |
| 439 | #define FW_CDEV_ISO_CONTEXT_RECEIVE 1 | 657 | #define FW_CDEV_ISO_CONTEXT_RECEIVE 1 |
| 658 | #define FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 /* added in 2.6.36 */ | ||
| 440 | 659 | ||
| 441 | /** | 660 | /** |
| 442 | * struct fw_cdev_create_iso_context - Create a context for isochronous IO | 661 | * struct fw_cdev_create_iso_context - Create a context for isochronous I/O |
| 443 | * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE | 662 | * @type: %FW_CDEV_ISO_CONTEXT_TRANSMIT or %FW_CDEV_ISO_CONTEXT_RECEIVE or |
| 444 | * @header_size: Header size to strip for receive contexts | 663 | * %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL |
| 445 | * @channel: Channel to bind to | 664 | * @header_size: Header size to strip in single-channel reception |
| 446 | * @speed: Speed for transmit contexts | 665 | * @channel: Channel to bind to in single-channel reception or transmission |
| 447 | * @closure: To be returned in &fw_cdev_event_iso_interrupt | 666 | * @speed: Transmission speed |
| 667 | * @closure: To be returned in &fw_cdev_event_iso_interrupt or | ||
| 668 | * &fw_cdev_event_iso_interrupt_multichannel | ||
| 448 | * @handle: Handle to context, written back by kernel | 669 | * @handle: Handle to context, written back by kernel |
| 449 | * | 670 | * |
| 450 | * Prior to sending or receiving isochronous I/O, a context must be created. | 671 | * Prior to sending or receiving isochronous I/O, a context must be created. |
| 451 | * The context records information about the transmit or receive configuration | 672 | * The context records information about the transmit or receive configuration |
| 452 | * and typically maps to an underlying hardware resource. A context is set up | 673 | * and typically maps to an underlying hardware resource. A context is set up |
| 453 | * for either sending or receiving. It is bound to a specific isochronous | 674 | * for either sending or receiving. It is bound to a specific isochronous |
| 454 | * channel. | 675 | * @channel. |
| 676 | * | ||
| 677 | * In case of multichannel reception, @header_size and @channel are ignored | ||
| 678 | * and the channels are selected by %FW_CDEV_IOC_SET_ISO_CHANNELS. | ||
| 679 | * | ||
| 680 | * For %FW_CDEV_ISO_CONTEXT_RECEIVE contexts, @header_size must be at least 4 | ||
| 681 | * and must be a multiple of 4. It is ignored in other context types. | ||
| 682 | * | ||
| 683 | * @speed is ignored in receive context types. | ||
| 455 | * | 684 | * |
| 456 | * If a context was successfully created, the kernel writes back a handle to the | 685 | * If a context was successfully created, the kernel writes back a handle to the |
| 457 | * context, which must be passed in for subsequent operations on that context. | 686 | * context, which must be passed in for subsequent operations on that context. |
| 458 | * | 687 | * |
| 459 | * For receive contexts, @header_size must be at least 4 and must be a multiple | 688 | * Limitations: |
| 460 | * of 4. | 689 | * No more than one iso context can be created per fd. |
| 461 | * | 690 | * The total number of contexts that all userspace and kernelspace drivers can |
| 462 | * Note that the effect of a @header_size > 4 depends on | 691 | * create on a card at a time is a hardware limit, typically 4 or 8 contexts per |
| 463 | * &fw_cdev_get_info.version, as documented at &fw_cdev_event_iso_interrupt. | 692 | * direction, and of them at most one multichannel receive context. |
| 464 | */ | 693 | */ |
| 465 | struct fw_cdev_create_iso_context { | 694 | struct fw_cdev_create_iso_context { |
| 466 | __u32 type; | 695 | __u32 type; |
| @@ -471,6 +700,22 @@ struct fw_cdev_create_iso_context { | |||
| 471 | __u32 handle; | 700 | __u32 handle; |
| 472 | }; | 701 | }; |
| 473 | 702 | ||
| 703 | /** | ||
| 704 | * struct fw_cdev_set_iso_channels - Select channels in multichannel reception | ||
| 705 | * @channels: Bitmask of channels to listen to | ||
| 706 | * @handle: Handle of the mutichannel receive context | ||
| 707 | * | ||
| 708 | * @channels is the bitwise or of 1ULL << n for each channel n to listen to. | ||
| 709 | * | ||
| 710 | * The ioctl fails with errno %EBUSY if there is already another receive context | ||
| 711 | * on a channel in @channels. In that case, the bitmask of all unoccupied | ||
| 712 | * channels is returned in @channels. | ||
| 713 | */ | ||
| 714 | struct fw_cdev_set_iso_channels { | ||
| 715 | __u64 channels; | ||
| 716 | __u32 handle; | ||
| 717 | }; | ||
| 718 | |||
| 474 | #define FW_CDEV_ISO_PAYLOAD_LENGTH(v) (v) | 719 | #define FW_CDEV_ISO_PAYLOAD_LENGTH(v) (v) |
| 475 | #define FW_CDEV_ISO_INTERRUPT (1 << 16) | 720 | #define FW_CDEV_ISO_INTERRUPT (1 << 16) |
| 476 | #define FW_CDEV_ISO_SKIP (1 << 17) | 721 | #define FW_CDEV_ISO_SKIP (1 << 17) |
| @@ -481,42 +726,72 @@ struct fw_cdev_create_iso_context { | |||
| 481 | 726 | ||
| 482 | /** | 727 | /** |
| 483 | * struct fw_cdev_iso_packet - Isochronous packet | 728 | * struct fw_cdev_iso_packet - Isochronous packet |
| 484 | * @control: Contains the header length (8 uppermost bits), the sy field | 729 | * @control: Contains the header length (8 uppermost bits), |
| 485 | * (4 bits), the tag field (2 bits), a sync flag (1 bit), | 730 | * the sy field (4 bits), the tag field (2 bits), a sync flag |
| 486 | * a skip flag (1 bit), an interrupt flag (1 bit), and the | 731 | * or a skip flag (1 bit), an interrupt flag (1 bit), and the |
| 487 | * payload length (16 lowermost bits) | 732 | * payload length (16 lowermost bits) |
| 488 | * @header: Header and payload | 733 | * @header: Header and payload in case of a transmit context. |
| 489 | * | 734 | * |
| 490 | * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. | 735 | * &struct fw_cdev_iso_packet is used to describe isochronous packet queues. |
| 491 | * | ||
| 492 | * Use the FW_CDEV_ISO_ macros to fill in @control. | 736 | * Use the FW_CDEV_ISO_ macros to fill in @control. |
| 737 | * The @header array is empty in case of receive contexts. | ||
| 738 | * | ||
| 739 | * Context type %FW_CDEV_ISO_CONTEXT_TRANSMIT: | ||
| 740 | * | ||
| 741 | * @control.HEADER_LENGTH must be a multiple of 4. It specifies the numbers of | ||
| 742 | * bytes in @header that will be prepended to the packet's payload. These bytes | ||
| 743 | * are copied into the kernel and will not be accessed after the ioctl has | ||
| 744 | * returned. | ||
| 745 | * | ||
| 746 | * The @control.SY and TAG fields are copied to the iso packet header. These | ||
| 747 | * fields are specified by IEEE 1394a and IEC 61883-1. | ||
| 748 | * | ||
| 749 | * The @control.SKIP flag specifies that no packet is to be sent in a frame. | ||
| 750 | * When using this, all other fields except @control.INTERRUPT must be zero. | ||
| 751 | * | ||
| 752 | * When a packet with the @control.INTERRUPT flag set has been completed, an | ||
| 753 | * &fw_cdev_event_iso_interrupt event will be sent. | ||
| 754 | * | ||
| 755 | * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE: | ||
| 756 | * | ||
| 757 | * @control.HEADER_LENGTH must be a multiple of the context's header_size. | ||
| 758 | * If the HEADER_LENGTH is larger than the context's header_size, multiple | ||
| 759 | * packets are queued for this entry. | ||
| 760 | * | ||
| 761 | * The @control.SY and TAG fields are ignored. | ||
| 762 | * | ||
| 763 | * If the @control.SYNC flag is set, the context drops all packets until a | ||
| 764 | * packet with a sy field is received which matches &fw_cdev_start_iso.sync. | ||
| 765 | * | ||
| 766 | * @control.PAYLOAD_LENGTH defines how many payload bytes can be received for | ||
| 767 | * one packet (in addition to payload quadlets that have been defined as headers | ||
| 768 | * and are stripped and returned in the &fw_cdev_event_iso_interrupt structure). | ||
| 769 | * If more bytes are received, the additional bytes are dropped. If less bytes | ||
| 770 | * are received, the remaining bytes in this part of the payload buffer will not | ||
| 771 | * be written to, not even by the next packet. I.e., packets received in | ||
| 772 | * consecutive frames will not necessarily be consecutive in memory. If an | ||
| 773 | * entry has queued multiple packets, the PAYLOAD_LENGTH is divided equally | ||
| 774 | * among them. | ||
| 493 | * | 775 | * |
| 494 | * For transmit packets, the header length must be a multiple of 4 and specifies | 776 | * When a packet with the @control.INTERRUPT flag set has been completed, an |
| 495 | * the numbers of bytes in @header that will be prepended to the packet's | ||
| 496 | * payload; these bytes are copied into the kernel and will not be accessed | ||
| 497 | * after the ioctl has returned. The sy and tag fields are copied to the iso | ||
| 498 | * packet header (these fields are specified by IEEE 1394a and IEC 61883-1). | ||
| 499 | * The skip flag specifies that no packet is to be sent in a frame; when using | ||
| 500 | * this, all other fields except the interrupt flag must be zero. | ||
| 501 | * | ||
| 502 | * For receive packets, the header length must be a multiple of the context's | ||
| 503 | * header size; if the header length is larger than the context's header size, | ||
| 504 | * multiple packets are queued for this entry. The sy and tag fields are | ||
| 505 | * ignored. If the sync flag is set, the context drops all packets until | ||
| 506 | * a packet with a matching sy field is received (the sync value to wait for is | ||
| 507 | * specified in the &fw_cdev_start_iso structure). The payload length defines | ||
| 508 | * how many payload bytes can be received for one packet (in addition to payload | ||
| 509 | * quadlets that have been defined as headers and are stripped and returned in | ||
| 510 | * the &fw_cdev_event_iso_interrupt structure). If more bytes are received, the | ||
| 511 | * additional bytes are dropped. If less bytes are received, the remaining | ||
| 512 | * bytes in this part of the payload buffer will not be written to, not even by | ||
| 513 | * the next packet, i.e., packets received in consecutive frames will not | ||
| 514 | * necessarily be consecutive in memory. If an entry has queued multiple | ||
| 515 | * packets, the payload length is divided equally among them. | ||
| 516 | * | ||
| 517 | * When a packet with the interrupt flag set has been completed, the | ||
| 518 | * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued | 777 | * &fw_cdev_event_iso_interrupt event will be sent. An entry that has queued |
| 519 | * multiple receive packets is completed when its last packet is completed. | 778 | * multiple receive packets is completed when its last packet is completed. |
| 779 | * | ||
| 780 | * Context type %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL: | ||
| 781 | * | ||
| 782 | * Here, &fw_cdev_iso_packet would be more aptly named _iso_buffer_chunk since | ||
| 783 | * it specifies a chunk of the mmap()'ed buffer, while the number and alignment | ||
| 784 | * of packets to be placed into the buffer chunk is not known beforehand. | ||
| 785 | * | ||
| 786 | * @control.PAYLOAD_LENGTH is the size of the buffer chunk and specifies room | ||
| 787 | * for header, payload, padding, and trailer bytes of one or more packets. | ||
| 788 | * It must be a multiple of 4. | ||
| 789 | * | ||
| 790 | * @control.HEADER_LENGTH, TAG and SY are ignored. SYNC is treated as described | ||
| 791 | * for single-channel reception. | ||
| 792 | * | ||
| 793 | * When a buffer chunk with the @control.INTERRUPT flag set has been filled | ||
| 794 | * entirely, an &fw_cdev_event_iso_interrupt_mc event will be sent. | ||
| 520 | */ | 795 | */ |
| 521 | struct fw_cdev_iso_packet { | 796 | struct fw_cdev_iso_packet { |
| 522 | __u32 control; | 797 | __u32 control; |
| @@ -525,9 +800,9 @@ struct fw_cdev_iso_packet { | |||
| 525 | 800 | ||
| 526 | /** | 801 | /** |
| 527 | * struct fw_cdev_queue_iso - Queue isochronous packets for I/O | 802 | * struct fw_cdev_queue_iso - Queue isochronous packets for I/O |
| 528 | * @packets: Userspace pointer to packet data | 803 | * @packets: Userspace pointer to an array of &fw_cdev_iso_packet |
| 529 | * @data: Pointer into mmap()'ed payload buffer | 804 | * @data: Pointer into mmap()'ed payload buffer |
| 530 | * @size: Size of packet data in bytes | 805 | * @size: Size of the @packets array, in bytes |
| 531 | * @handle: Isochronous context handle | 806 | * @handle: Isochronous context handle |
| 532 | * | 807 | * |
| 533 | * Queue a number of isochronous packets for reception or transmission. | 808 | * Queue a number of isochronous packets for reception or transmission. |
| @@ -540,6 +815,9 @@ struct fw_cdev_iso_packet { | |||
| 540 | * The kernel may or may not queue all packets, but will write back updated | 815 | * The kernel may or may not queue all packets, but will write back updated |
| 541 | * values of the @packets, @data and @size fields, so the ioctl can be | 816 | * values of the @packets, @data and @size fields, so the ioctl can be |
| 542 | * resubmitted easily. | 817 | * resubmitted easily. |
| 818 | * | ||
| 819 | * In case of a multichannel receive context, @data must be quadlet-aligned | ||
| 820 | * relative to the buffer start. | ||
| 543 | */ | 821 | */ |
| 544 | struct fw_cdev_queue_iso { | 822 | struct fw_cdev_queue_iso { |
| 545 | __u64 packets; | 823 | __u64 packets; |
| @@ -698,4 +976,39 @@ struct fw_cdev_send_stream_packet { | |||
| 698 | __u32 speed; | 976 | __u32 speed; |
| 699 | }; | 977 | }; |
| 700 | 978 | ||
| 979 | /** | ||
| 980 | * struct fw_cdev_send_phy_packet - send a PHY packet | ||
| 981 | * @closure: Passed back to userspace in the PHY-packet-sent event | ||
| 982 | * @data: First and second quadlet of the PHY packet | ||
| 983 | * @generation: The bus generation where packet is valid | ||
| 984 | * | ||
| 985 | * The %FW_CDEV_IOC_SEND_PHY_PACKET ioctl sends a PHY packet to all nodes | ||
| 986 | * on the same card as this device. After transmission, an | ||
| 987 | * %FW_CDEV_EVENT_PHY_PACKET_SENT event is generated. | ||
| 988 | * | ||
| 989 | * The payload @data[] shall be specified in host byte order. Usually, | ||
| 990 | * @data[1] needs to be the bitwise inverse of @data[0]. VersaPHY packets | ||
| 991 | * are an exception to this rule. | ||
| 992 | * | ||
| 993 | * The ioctl is only permitted on device files which represent a local node. | ||
| 994 | */ | ||
| 995 | struct fw_cdev_send_phy_packet { | ||
| 996 | __u64 closure; | ||
| 997 | __u32 data[2]; | ||
| 998 | __u32 generation; | ||
| 999 | }; | ||
| 1000 | |||
| 1001 | /** | ||
| 1002 | * struct fw_cdev_receive_phy_packets - start reception of PHY packets | ||
| 1003 | * @closure: Passed back to userspace in phy packet events | ||
| 1004 | * | ||
| 1005 | * This ioctl activates issuing of %FW_CDEV_EVENT_PHY_PACKET_RECEIVED due to | ||
| 1006 | * incoming PHY packets from any node on the same bus as the device. | ||
| 1007 | * | ||
| 1008 | * The ioctl is only permitted on device files which represent a local node. | ||
| 1009 | */ | ||
| 1010 | struct fw_cdev_receive_phy_packets { | ||
| 1011 | __u64 closure; | ||
| 1012 | }; | ||
| 1013 | |||
| 701 | #endif /* _LINUX_FIREWIRE_CDEV_H */ | 1014 | #endif /* _LINUX_FIREWIRE_CDEV_H */ |
diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 72e2b8ac2a5a..1cd637ef62d2 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h | |||
| @@ -32,11 +32,13 @@ | |||
| 32 | #define CSR_CYCLE_TIME 0x200 | 32 | #define CSR_CYCLE_TIME 0x200 |
| 33 | #define CSR_BUS_TIME 0x204 | 33 | #define CSR_BUS_TIME 0x204 |
| 34 | #define CSR_BUSY_TIMEOUT 0x210 | 34 | #define CSR_BUSY_TIMEOUT 0x210 |
| 35 | #define CSR_PRIORITY_BUDGET 0x218 | ||
| 35 | #define CSR_BUS_MANAGER_ID 0x21c | 36 | #define CSR_BUS_MANAGER_ID 0x21c |
| 36 | #define CSR_BANDWIDTH_AVAILABLE 0x220 | 37 | #define CSR_BANDWIDTH_AVAILABLE 0x220 |
| 37 | #define CSR_CHANNELS_AVAILABLE 0x224 | 38 | #define CSR_CHANNELS_AVAILABLE 0x224 |
| 38 | #define CSR_CHANNELS_AVAILABLE_HI 0x224 | 39 | #define CSR_CHANNELS_AVAILABLE_HI 0x224 |
| 39 | #define CSR_CHANNELS_AVAILABLE_LO 0x228 | 40 | #define CSR_CHANNELS_AVAILABLE_LO 0x228 |
| 41 | #define CSR_MAINT_UTILITY 0x230 | ||
| 40 | #define CSR_BROADCAST_CHANNEL 0x234 | 42 | #define CSR_BROADCAST_CHANNEL 0x234 |
| 41 | #define CSR_CONFIG_ROM 0x400 | 43 | #define CSR_CONFIG_ROM 0x400 |
| 42 | #define CSR_CONFIG_ROM_END 0x800 | 44 | #define CSR_CONFIG_ROM_END 0x800 |
| @@ -89,6 +91,11 @@ struct fw_card { | |||
| 89 | struct list_head transaction_list; | 91 | struct list_head transaction_list; |
| 90 | unsigned long reset_jiffies; | 92 | unsigned long reset_jiffies; |
| 91 | 93 | ||
| 94 | u32 split_timeout_hi; | ||
| 95 | u32 split_timeout_lo; | ||
| 96 | unsigned int split_timeout_cycles; | ||
| 97 | unsigned int split_timeout_jiffies; | ||
| 98 | |||
| 92 | unsigned long long guid; | 99 | unsigned long long guid; |
| 93 | unsigned max_receive; | 100 | unsigned max_receive; |
| 94 | int link_speed; | 101 | int link_speed; |
| @@ -104,18 +111,28 @@ struct fw_card { | |||
| 104 | bool beta_repeaters_present; | 111 | bool beta_repeaters_present; |
| 105 | 112 | ||
| 106 | int index; | 113 | int index; |
| 107 | |||
| 108 | struct list_head link; | 114 | struct list_head link; |
| 109 | 115 | ||
| 110 | /* Work struct for BM duties. */ | 116 | struct list_head phy_receiver_list; |
| 111 | struct delayed_work work; | 117 | |
| 118 | struct delayed_work br_work; /* bus reset job */ | ||
| 119 | bool br_short; | ||
| 120 | |||
| 121 | struct delayed_work bm_work; /* bus manager job */ | ||
| 112 | int bm_retries; | 122 | int bm_retries; |
| 113 | int bm_generation; | 123 | int bm_generation; |
| 114 | __be32 bm_transaction_data[2]; | 124 | __be32 bm_transaction_data[2]; |
| 125 | int bm_node_id; | ||
| 126 | bool bm_abdicate; | ||
| 127 | |||
| 128 | bool priority_budget_implemented; /* controller feature */ | ||
| 129 | bool broadcast_channel_auto_allocated; /* controller feature */ | ||
| 115 | 130 | ||
| 116 | bool broadcast_channel_allocated; | 131 | bool broadcast_channel_allocated; |
| 117 | u32 broadcast_channel; | 132 | u32 broadcast_channel; |
| 118 | __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; | 133 | __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; |
| 134 | |||
| 135 | __be32 maint_utility_register; | ||
| 119 | }; | 136 | }; |
| 120 | 137 | ||
| 121 | struct fw_attribute_group { | 138 | struct fw_attribute_group { |
| @@ -252,7 +269,7 @@ typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode, | |||
| 252 | typedef void (*fw_address_callback_t)(struct fw_card *card, | 269 | typedef void (*fw_address_callback_t)(struct fw_card *card, |
| 253 | struct fw_request *request, | 270 | struct fw_request *request, |
| 254 | int tcode, int destination, int source, | 271 | int tcode, int destination, int source, |
| 255 | int generation, int speed, | 272 | int generation, |
| 256 | unsigned long long offset, | 273 | unsigned long long offset, |
| 257 | void *data, size_t length, | 274 | void *data, size_t length, |
| 258 | void *callback_data); | 275 | void *callback_data); |
| @@ -269,10 +286,10 @@ struct fw_packet { | |||
| 269 | u32 timestamp; | 286 | u32 timestamp; |
| 270 | 287 | ||
| 271 | /* | 288 | /* |
| 272 | * This callback is called when the packet transmission has | 289 | * This callback is called when the packet transmission has completed. |
| 273 | * completed; for successful transmission, the status code is | 290 | * For successful transmission, the status code is the ack received |
| 274 | * the ack received from the destination, otherwise it's a | 291 | * from the destination. Otherwise it is one of the juju-specific |
| 275 | * negative errno: ENOMEM, ESTALE, ETIMEDOUT, ENODEV, EIO. | 292 | * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. |
| 276 | * The callback can be called from tasklet context and thus | 293 | * The callback can be called from tasklet context and thus |
| 277 | * must never block. | 294 | * must never block. |
| 278 | */ | 295 | */ |
| @@ -355,17 +372,19 @@ void fw_core_remove_descriptor(struct fw_descriptor *desc); | |||
| 355 | * scatter-gather streaming (e.g. assembling video frame automatically). | 372 | * scatter-gather streaming (e.g. assembling video frame automatically). |
| 356 | */ | 373 | */ |
| 357 | struct fw_iso_packet { | 374 | struct fw_iso_packet { |
| 358 | u16 payload_length; /* Length of indirect payload. */ | 375 | u16 payload_length; /* Length of indirect payload */ |
| 359 | u32 interrupt:1; /* Generate interrupt on this packet */ | 376 | u32 interrupt:1; /* Generate interrupt on this packet */ |
| 360 | u32 skip:1; /* Set to not send packet at all. */ | 377 | u32 skip:1; /* tx: Set to not send packet at all */ |
| 361 | u32 tag:2; | 378 | /* rx: Sync bit, wait for matching sy */ |
| 362 | u32 sy:4; | 379 | u32 tag:2; /* tx: Tag in packet header */ |
| 363 | u32 header_length:8; /* Length of immediate header. */ | 380 | u32 sy:4; /* tx: Sy in packet header */ |
| 364 | u32 header[0]; | 381 | u32 header_length:8; /* Length of immediate header */ |
| 382 | u32 header[0]; /* tx: Top of 1394 isoch. data_block */ | ||
| 365 | }; | 383 | }; |
| 366 | 384 | ||
| 367 | #define FW_ISO_CONTEXT_TRANSMIT 0 | 385 | #define FW_ISO_CONTEXT_TRANSMIT 0 |
| 368 | #define FW_ISO_CONTEXT_RECEIVE 1 | 386 | #define FW_ISO_CONTEXT_RECEIVE 1 |
| 387 | #define FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL 2 | ||
| 369 | 388 | ||
| 370 | #define FW_ISO_CONTEXT_MATCH_TAG0 1 | 389 | #define FW_ISO_CONTEXT_MATCH_TAG0 1 |
| 371 | #define FW_ISO_CONTEXT_MATCH_TAG1 2 | 390 | #define FW_ISO_CONTEXT_MATCH_TAG1 2 |
| @@ -389,24 +408,31 @@ struct fw_iso_buffer { | |||
| 389 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, | 408 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, |
| 390 | int page_count, enum dma_data_direction direction); | 409 | int page_count, enum dma_data_direction direction); |
| 391 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); | 410 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card); |
| 411 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed); | ||
| 392 | 412 | ||
| 393 | struct fw_iso_context; | 413 | struct fw_iso_context; |
| 394 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, | 414 | typedef void (*fw_iso_callback_t)(struct fw_iso_context *context, |
| 395 | u32 cycle, size_t header_length, | 415 | u32 cycle, size_t header_length, |
| 396 | void *header, void *data); | 416 | void *header, void *data); |
| 417 | typedef void (*fw_iso_mc_callback_t)(struct fw_iso_context *context, | ||
| 418 | dma_addr_t completed, void *data); | ||
| 397 | struct fw_iso_context { | 419 | struct fw_iso_context { |
| 398 | struct fw_card *card; | 420 | struct fw_card *card; |
| 399 | int type; | 421 | int type; |
| 400 | int channel; | 422 | int channel; |
| 401 | int speed; | 423 | int speed; |
| 402 | size_t header_size; | 424 | size_t header_size; |
| 403 | fw_iso_callback_t callback; | 425 | union { |
| 426 | fw_iso_callback_t sc; | ||
| 427 | fw_iso_mc_callback_t mc; | ||
| 428 | } callback; | ||
| 404 | void *callback_data; | 429 | void *callback_data; |
| 405 | }; | 430 | }; |
| 406 | 431 | ||
| 407 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, | 432 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
| 408 | int type, int channel, int speed, size_t header_size, | 433 | int type, int channel, int speed, size_t header_size, |
| 409 | fw_iso_callback_t callback, void *callback_data); | 434 | fw_iso_callback_t callback, void *callback_data); |
| 435 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels); | ||
| 410 | int fw_iso_context_queue(struct fw_iso_context *ctx, | 436 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
| 411 | struct fw_iso_packet *packet, | 437 | struct fw_iso_packet *packet, |
| 412 | struct fw_iso_buffer *buffer, | 438 | struct fw_iso_buffer *buffer, |
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h index 1d747f72298b..631b77f2ac70 100644 --- a/include/linux/flex_array.h +++ b/include/linux/flex_array.h | |||
| @@ -70,4 +70,9 @@ int flex_array_clear(struct flex_array *fa, unsigned int element_nr); | |||
| 70 | void *flex_array_get(struct flex_array *fa, unsigned int element_nr); | 70 | void *flex_array_get(struct flex_array *fa, unsigned int element_nr); |
| 71 | int flex_array_shrink(struct flex_array *fa); | 71 | int flex_array_shrink(struct flex_array *fa); |
| 72 | 72 | ||
| 73 | #define flex_array_put_ptr(fa, nr, src, gfp) \ | ||
| 74 | flex_array_put(fa, nr, &(void *)(src), gfp) | ||
| 75 | |||
| 76 | void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr); | ||
| 77 | |||
| 73 | #endif /* _FLEX_ARRAY_H */ | 78 | #endif /* _FLEX_ARRAY_H */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index e5106e49bd2c..7a0625e26a39 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | #include <linux/limits.h> | 9 | #include <linux/limits.h> |
| 10 | #include <linux/ioctl.h> | 10 | #include <linux/ioctl.h> |
| 11 | #include <linux/blk_types.h> | ||
| 11 | 12 | ||
| 12 | /* | 13 | /* |
| 13 | * It's silly to have NR_OPEN bigger than NR_FILE, but you can change | 14 | * It's silly to have NR_OPEN bigger than NR_FILE, but you can change |
| @@ -91,6 +92,9 @@ struct inodes_stat_t { | |||
| 91 | /* Expect random access pattern */ | 92 | /* Expect random access pattern */ |
| 92 | #define FMODE_RANDOM ((__force fmode_t)0x1000) | 93 | #define FMODE_RANDOM ((__force fmode_t)0x1000) |
| 93 | 94 | ||
| 95 | /* File was opened by fanotify and shouldn't generate fanotify events */ | ||
| 96 | #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) | ||
| 97 | |||
| 94 | /* | 98 | /* |
| 95 | * The below are the various read and write types that we support. Some of | 99 | * The below are the various read and write types that we support. Some of |
| 96 | * them include behavioral modifiers that send information down to the | 100 | * them include behavioral modifiers that send information down to the |
| @@ -118,7 +122,7 @@ struct inodes_stat_t { | |||
| 118 | * immediately wait on this read without caring about | 122 | * immediately wait on this read without caring about |
| 119 | * unplugging. | 123 | * unplugging. |
| 120 | * READA Used for read-ahead operations. Lower priority, and the | 124 | * READA Used for read-ahead operations. Lower priority, and the |
| 121 | * block layer could (in theory) choose to ignore this | 125 | * block layer could (in theory) choose to ignore this |
| 122 | * request if it runs into resource problems. | 126 | * request if it runs into resource problems. |
| 123 | * WRITE A normal async write. Device will be plugged. | 127 | * WRITE A normal async write. Device will be plugged. |
| 124 | * SWRITE Like WRITE, but a special case for ll_rw_block() that | 128 | * SWRITE Like WRITE, but a special case for ll_rw_block() that |
| @@ -137,7 +141,7 @@ struct inodes_stat_t { | |||
| 137 | * SWRITE_SYNC | 141 | * SWRITE_SYNC |
| 138 | * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer. | 142 | * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer. |
| 139 | * See SWRITE. | 143 | * See SWRITE. |
| 140 | * WRITE_BARRIER Like WRITE, but tells the block layer that all | 144 | * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all |
| 141 | * previously submitted writes must be safely on storage | 145 | * previously submitted writes must be safely on storage |
| 142 | * before this one is started. Also guarantees that when | 146 | * before this one is started. Also guarantees that when |
| 143 | * this write is complete, it itself is also safely on | 147 | * this write is complete, it itself is also safely on |
| @@ -145,29 +149,32 @@ struct inodes_stat_t { | |||
| 145 | * of this IO. | 149 | * of this IO. |
| 146 | * | 150 | * |
| 147 | */ | 151 | */ |
| 148 | #define RW_MASK 1 | 152 | #define RW_MASK REQ_WRITE |
| 149 | #define RWA_MASK 2 | 153 | #define RWA_MASK REQ_RAHEAD |
| 150 | #define READ 0 | 154 | |
| 151 | #define WRITE 1 | 155 | #define READ 0 |
| 152 | #define READA 2 /* read-ahead - don't block if no resources */ | 156 | #define WRITE RW_MASK |
| 153 | #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ | 157 | #define READA RWA_MASK |
| 154 | #define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) | 158 | #define SWRITE (WRITE | READA) |
| 155 | #define READ_META (READ | (1 << BIO_RW_META)) | 159 | |
| 156 | #define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) | 160 | #define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) |
| 157 | #define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) | 161 | #define READ_META (READ | REQ_META) |
| 158 | #define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO)) | 162 | #define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE) |
| 159 | #define WRITE_META (WRITE | (1 << BIO_RW_META)) | 163 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) |
| 160 | #define SWRITE_SYNC_PLUG \ | 164 | #define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) |
| 161 | (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) | 165 | #define WRITE_META (WRITE | REQ_META) |
| 162 | #define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) | 166 | #define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ |
| 163 | #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) | 167 | REQ_HARDBARRIER) |
| 168 | #define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE) | ||
| 169 | #define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) | ||
| 164 | 170 | ||
| 165 | /* | 171 | /* |
| 166 | * These aren't really reads or writes, they pass down information about | 172 | * These aren't really reads or writes, they pass down information about |
| 167 | * parts of device that are now unused by the file system. | 173 | * parts of device that are now unused by the file system. |
| 168 | */ | 174 | */ |
| 169 | #define DISCARD_NOBARRIER (WRITE | (1 << BIO_RW_DISCARD)) | 175 | #define DISCARD_NOBARRIER (WRITE | REQ_DISCARD) |
| 170 | #define DISCARD_BARRIER (DISCARD_NOBARRIER | (1 << BIO_RW_BARRIER)) | 176 | #define DISCARD_BARRIER (WRITE | REQ_DISCARD | REQ_HARDBARRIER) |
| 177 | #define DISCARD_SECURE (DISCARD_NOBARRIER | REQ_SECURE) | ||
| 171 | 178 | ||
| 172 | #define SEL_IN 1 | 179 | #define SEL_IN 1 |
| 173 | #define SEL_OUT 2 | 180 | #define SEL_OUT 2 |
| @@ -210,6 +217,7 @@ struct inodes_stat_t { | |||
| 210 | #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ | 217 | #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ |
| 211 | #define MS_I_VERSION (1<<23) /* Update inode I_version field */ | 218 | #define MS_I_VERSION (1<<23) /* Update inode I_version field */ |
| 212 | #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ | 219 | #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ |
| 220 | #define MS_BORN (1<<29) | ||
| 213 | #define MS_ACTIVE (1<<30) | 221 | #define MS_ACTIVE (1<<30) |
| 214 | #define MS_NOUSER (1<<31) | 222 | #define MS_NOUSER (1<<31) |
| 215 | 223 | ||
| @@ -310,6 +318,7 @@ struct inodes_stat_t { | |||
| 310 | #define BLKALIGNOFF _IO(0x12,122) | 318 | #define BLKALIGNOFF _IO(0x12,122) |
| 311 | #define BLKPBSZGET _IO(0x12,123) | 319 | #define BLKPBSZGET _IO(0x12,123) |
| 312 | #define BLKDISCARDZEROES _IO(0x12,124) | 320 | #define BLKDISCARDZEROES _IO(0x12,124) |
| 321 | #define BLKSECDISCARD _IO(0x12,125) | ||
| 313 | 322 | ||
| 314 | #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ | 323 | #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ |
| 315 | #define FIBMAP _IO(0x00,1) /* bmap access */ | 324 | #define FIBMAP _IO(0x00,1) /* bmap access */ |
| @@ -408,9 +417,6 @@ extern int get_max_files(void); | |||
| 408 | extern int sysctl_nr_open; | 417 | extern int sysctl_nr_open; |
| 409 | extern struct inodes_stat_t inodes_stat; | 418 | extern struct inodes_stat_t inodes_stat; |
| 410 | extern int leases_enable, lease_break_time; | 419 | extern int leases_enable, lease_break_time; |
| 411 | #ifdef CONFIG_DNOTIFY | ||
| 412 | extern int dir_notify_enable; | ||
| 413 | #endif | ||
| 414 | 420 | ||
| 415 | struct buffer_head; | 421 | struct buffer_head; |
| 416 | typedef int (get_block_t)(struct inode *inode, sector_t iblock, | 422 | typedef int (get_block_t)(struct inode *inode, sector_t iblock, |
| @@ -687,6 +693,7 @@ struct block_device { | |||
| 687 | */ | 693 | */ |
| 688 | #define PAGECACHE_TAG_DIRTY 0 | 694 | #define PAGECACHE_TAG_DIRTY 0 |
| 689 | #define PAGECACHE_TAG_WRITEBACK 1 | 695 | #define PAGECACHE_TAG_WRITEBACK 1 |
| 696 | #define PAGECACHE_TAG_TOWRITE 2 | ||
| 690 | 697 | ||
| 691 | int mapping_tagged(struct address_space *mapping, int tag); | 698 | int mapping_tagged(struct address_space *mapping, int tag); |
| 692 | 699 | ||
| @@ -770,12 +777,7 @@ struct inode { | |||
| 770 | 777 | ||
| 771 | #ifdef CONFIG_FSNOTIFY | 778 | #ifdef CONFIG_FSNOTIFY |
| 772 | __u32 i_fsnotify_mask; /* all events this inode cares about */ | 779 | __u32 i_fsnotify_mask; /* all events this inode cares about */ |
| 773 | struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */ | 780 | struct hlist_head i_fsnotify_marks; |
| 774 | #endif | ||
| 775 | |||
| 776 | #ifdef CONFIG_INOTIFY | ||
| 777 | struct list_head inotify_watches; /* watches on this inode */ | ||
| 778 | struct mutex inotify_mutex; /* protects the watches list */ | ||
| 779 | #endif | 781 | #endif |
| 780 | 782 | ||
| 781 | unsigned long i_state; | 783 | unsigned long i_state; |
| @@ -1563,8 +1565,8 @@ struct super_operations { | |||
| 1563 | 1565 | ||
| 1564 | void (*dirty_inode) (struct inode *); | 1566 | void (*dirty_inode) (struct inode *); |
| 1565 | int (*write_inode) (struct inode *, struct writeback_control *wbc); | 1567 | int (*write_inode) (struct inode *, struct writeback_control *wbc); |
| 1566 | void (*drop_inode) (struct inode *); | 1568 | int (*drop_inode) (struct inode *); |
| 1567 | void (*delete_inode) (struct inode *); | 1569 | void (*evict_inode) (struct inode *); |
| 1568 | void (*put_super) (struct super_block *); | 1570 | void (*put_super) (struct super_block *); |
| 1569 | void (*write_super) (struct super_block *); | 1571 | void (*write_super) (struct super_block *); |
| 1570 | int (*sync_fs)(struct super_block *sb, int wait); | 1572 | int (*sync_fs)(struct super_block *sb, int wait); |
| @@ -1572,7 +1574,6 @@ struct super_operations { | |||
| 1572 | int (*unfreeze_fs) (struct super_block *); | 1574 | int (*unfreeze_fs) (struct super_block *); |
| 1573 | int (*statfs) (struct dentry *, struct kstatfs *); | 1575 | int (*statfs) (struct dentry *, struct kstatfs *); |
| 1574 | int (*remount_fs) (struct super_block *, int *, char *); | 1576 | int (*remount_fs) (struct super_block *, int *, char *); |
| 1575 | void (*clear_inode) (struct inode *); | ||
| 1576 | void (*umount_begin) (struct super_block *); | 1577 | void (*umount_begin) (struct super_block *); |
| 1577 | 1578 | ||
| 1578 | int (*show_options)(struct seq_file *, struct vfsmount *); | 1579 | int (*show_options)(struct seq_file *, struct vfsmount *); |
| @@ -1617,8 +1618,8 @@ struct super_operations { | |||
| 1617 | * I_FREEING Set when inode is about to be freed but still has dirty | 1618 | * I_FREEING Set when inode is about to be freed but still has dirty |
| 1618 | * pages or buffers attached or the inode itself is still | 1619 | * pages or buffers attached or the inode itself is still |
| 1619 | * dirty. | 1620 | * dirty. |
| 1620 | * I_CLEAR Set by clear_inode(). In this state the inode is clean | 1621 | * I_CLEAR Added by end_writeback(). In this state the inode is clean |
| 1621 | * and can be destroyed. | 1622 | * and can be destroyed. Inode keeps I_FREEING. |
| 1622 | * | 1623 | * |
| 1623 | * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are | 1624 | * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are |
| 1624 | * prohibited for many purposes. iget() must wait for | 1625 | * prohibited for many purposes. iget() must wait for |
| @@ -1815,7 +1816,8 @@ extern struct vfsmount *collect_mounts(struct path *); | |||
| 1815 | extern void drop_collected_mounts(struct vfsmount *); | 1816 | extern void drop_collected_mounts(struct vfsmount *); |
| 1816 | extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, | 1817 | extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, |
| 1817 | struct vfsmount *); | 1818 | struct vfsmount *); |
| 1818 | extern int vfs_statfs(struct dentry *, struct kstatfs *); | 1819 | extern int vfs_statfs(struct path *, struct kstatfs *); |
| 1820 | extern int statfs_by_dentry(struct dentry *, struct kstatfs *); | ||
| 1819 | extern int freeze_super(struct super_block *super); | 1821 | extern int freeze_super(struct super_block *super); |
| 1820 | extern int thaw_super(struct super_block *super); | 1822 | extern int thaw_super(struct super_block *super); |
| 1821 | 1823 | ||
| @@ -2165,9 +2167,8 @@ extern void iput(struct inode *); | |||
| 2165 | extern struct inode * igrab(struct inode *); | 2167 | extern struct inode * igrab(struct inode *); |
| 2166 | extern ino_t iunique(struct super_block *, ino_t); | 2168 | extern ino_t iunique(struct super_block *, ino_t); |
| 2167 | extern int inode_needs_sync(struct inode *inode); | 2169 | extern int inode_needs_sync(struct inode *inode); |
| 2168 | extern void generic_delete_inode(struct inode *inode); | 2170 | extern int generic_delete_inode(struct inode *inode); |
| 2169 | extern void generic_drop_inode(struct inode *inode); | 2171 | extern int generic_drop_inode(struct inode *inode); |
| 2170 | extern int generic_detach_inode(struct inode *inode); | ||
| 2171 | 2172 | ||
| 2172 | extern struct inode *ilookup5_nowait(struct super_block *sb, | 2173 | extern struct inode *ilookup5_nowait(struct super_block *sb, |
| 2173 | unsigned long hashval, int (*test)(struct inode *, void *), | 2174 | unsigned long hashval, int (*test)(struct inode *, void *), |
| @@ -2184,7 +2185,7 @@ extern void unlock_new_inode(struct inode *); | |||
| 2184 | 2185 | ||
| 2185 | extern void __iget(struct inode * inode); | 2186 | extern void __iget(struct inode * inode); |
| 2186 | extern void iget_failed(struct inode *); | 2187 | extern void iget_failed(struct inode *); |
| 2187 | extern void clear_inode(struct inode *); | 2188 | extern void end_writeback(struct inode *); |
| 2188 | extern void destroy_inode(struct inode *); | 2189 | extern void destroy_inode(struct inode *); |
| 2189 | extern void __destroy_inode(struct inode *); | 2190 | extern void __destroy_inode(struct inode *); |
| 2190 | extern struct inode *new_inode(struct super_block *); | 2191 | extern struct inode *new_inode(struct super_block *); |
| @@ -2200,7 +2201,6 @@ static inline void insert_inode_hash(struct inode *inode) { | |||
| 2200 | extern void file_move(struct file *f, struct list_head *list); | 2201 | extern void file_move(struct file *f, struct list_head *list); |
| 2201 | extern void file_kill(struct file *f); | 2202 | extern void file_kill(struct file *f); |
| 2202 | #ifdef CONFIG_BLOCK | 2203 | #ifdef CONFIG_BLOCK |
| 2203 | struct bio; | ||
| 2204 | extern void submit_bio(int, struct bio *); | 2204 | extern void submit_bio(int, struct bio *); |
| 2205 | extern int bdev_read_only(struct block_device *); | 2205 | extern int bdev_read_only(struct block_device *); |
| 2206 | #endif | 2206 | #endif |
| @@ -2267,19 +2267,8 @@ static inline int xip_truncate_page(struct address_space *mapping, loff_t from) | |||
| 2267 | #endif | 2267 | #endif |
| 2268 | 2268 | ||
| 2269 | #ifdef CONFIG_BLOCK | 2269 | #ifdef CONFIG_BLOCK |
| 2270 | struct bio; | ||
| 2271 | typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, | 2270 | typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, |
| 2272 | loff_t file_offset); | 2271 | loff_t file_offset); |
| 2273 | void dio_end_io(struct bio *bio, int error); | ||
| 2274 | |||
| 2275 | ssize_t __blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, struct inode *inode, | ||
| 2276 | struct block_device *bdev, const struct iovec *iov, loff_t offset, | ||
| 2277 | unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, | ||
| 2278 | dio_submit_t submit_io, int lock_type); | ||
| 2279 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | ||
| 2280 | struct block_device *bdev, const struct iovec *iov, loff_t offset, | ||
| 2281 | unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, | ||
| 2282 | dio_submit_t submit_io, int lock_type); | ||
| 2283 | 2272 | ||
| 2284 | enum { | 2273 | enum { |
| 2285 | /* need locking between buffered and direct access */ | 2274 | /* need locking between buffered and direct access */ |
| @@ -2289,24 +2278,13 @@ enum { | |||
| 2289 | DIO_SKIP_HOLES = 0x02, | 2278 | DIO_SKIP_HOLES = 0x02, |
| 2290 | }; | 2279 | }; |
| 2291 | 2280 | ||
| 2292 | static inline ssize_t blockdev_direct_IO_newtrunc(int rw, struct kiocb *iocb, | 2281 | void dio_end_io(struct bio *bio, int error); |
| 2293 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | 2282 | |
| 2294 | loff_t offset, unsigned long nr_segs, get_block_t get_block, | 2283 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, |
| 2295 | dio_iodone_t end_io) | 2284 | struct block_device *bdev, const struct iovec *iov, loff_t offset, |
| 2296 | { | 2285 | unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, |
| 2297 | return __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov, offset, | 2286 | dio_submit_t submit_io, int flags); |
| 2298 | nr_segs, get_block, end_io, NULL, | ||
| 2299 | DIO_LOCKING | DIO_SKIP_HOLES); | ||
| 2300 | } | ||
| 2301 | 2287 | ||
| 2302 | static inline ssize_t blockdev_direct_IO_no_locking_newtrunc(int rw, struct kiocb *iocb, | ||
| 2303 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | ||
| 2304 | loff_t offset, unsigned long nr_segs, get_block_t get_block, | ||
| 2305 | dio_iodone_t end_io) | ||
| 2306 | { | ||
| 2307 | return __blockdev_direct_IO_newtrunc(rw, iocb, inode, bdev, iov, offset, | ||
| 2308 | nr_segs, get_block, end_io, NULL, 0); | ||
| 2309 | } | ||
| 2310 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, | 2288 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, |
| 2311 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | 2289 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, |
| 2312 | loff_t offset, unsigned long nr_segs, get_block_t get_block, | 2290 | loff_t offset, unsigned long nr_segs, get_block_t get_block, |
| @@ -2316,15 +2294,6 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, | |||
| 2316 | nr_segs, get_block, end_io, NULL, | 2294 | nr_segs, get_block, end_io, NULL, |
| 2317 | DIO_LOCKING | DIO_SKIP_HOLES); | 2295 | DIO_LOCKING | DIO_SKIP_HOLES); |
| 2318 | } | 2296 | } |
| 2319 | |||
| 2320 | static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, | ||
| 2321 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | ||
| 2322 | loff_t offset, unsigned long nr_segs, get_block_t get_block, | ||
| 2323 | dio_iodone_t end_io) | ||
| 2324 | { | ||
| 2325 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | ||
| 2326 | nr_segs, get_block, end_io, NULL, 0); | ||
| 2327 | } | ||
| 2328 | #endif | 2297 | #endif |
| 2329 | 2298 | ||
| 2330 | extern const struct file_operations generic_ro_fops; | 2299 | extern const struct file_operations generic_ro_fops; |
| @@ -2386,7 +2355,6 @@ extern int simple_link(struct dentry *, struct inode *, struct dentry *); | |||
| 2386 | extern int simple_unlink(struct inode *, struct dentry *); | 2355 | extern int simple_unlink(struct inode *, struct dentry *); |
| 2387 | extern int simple_rmdir(struct inode *, struct dentry *); | 2356 | extern int simple_rmdir(struct inode *, struct dentry *); |
| 2388 | extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); | 2357 | extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); |
| 2389 | extern int simple_setsize(struct inode *, loff_t); | ||
| 2390 | extern int noop_fsync(struct file *, int); | 2358 | extern int noop_fsync(struct file *, int); |
| 2391 | extern int simple_empty(struct dentry *); | 2359 | extern int simple_empty(struct dentry *); |
| 2392 | extern int simple_readpage(struct file *file, struct page *page); | 2360 | extern int simple_readpage(struct file *file, struct page *page); |
| @@ -2423,8 +2391,7 @@ extern int buffer_migrate_page(struct address_space *, | |||
| 2423 | 2391 | ||
| 2424 | extern int inode_change_ok(const struct inode *, struct iattr *); | 2392 | extern int inode_change_ok(const struct inode *, struct iattr *); |
| 2425 | extern int inode_newsize_ok(const struct inode *, loff_t offset); | 2393 | extern int inode_newsize_ok(const struct inode *, loff_t offset); |
| 2426 | extern int __must_check inode_setattr(struct inode *, const struct iattr *); | 2394 | extern void setattr_copy(struct inode *inode, const struct iattr *attr); |
| 2427 | extern void generic_setattr(struct inode *inode, const struct iattr *attr); | ||
| 2428 | 2395 | ||
| 2429 | extern void file_update_time(struct file *file); | 2396 | extern void file_update_time(struct file *file); |
| 2430 | 2397 | ||
| @@ -2515,7 +2482,8 @@ int proc_nr_files(struct ctl_table *table, int write, | |||
| 2515 | int __init get_filesystem_list(char *buf); | 2482 | int __init get_filesystem_list(char *buf); |
| 2516 | 2483 | ||
| 2517 | #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) | 2484 | #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) |
| 2518 | #define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE)) | 2485 | #define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \ |
| 2486 | (flag & FMODE_NONOTIFY))) | ||
| 2519 | 2487 | ||
| 2520 | #endif /* __KERNEL__ */ | 2488 | #endif /* __KERNEL__ */ |
| 2521 | #endif /* _LINUX_FS_H */ | 2489 | #endif /* _LINUX_FS_H */ |
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index 78a05bfcd8eb..eca3d5202138 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h | |||
| @@ -21,4 +21,31 @@ extern void free_fs_struct(struct fs_struct *); | |||
| 21 | extern void daemonize_fs_struct(void); | 21 | extern void daemonize_fs_struct(void); |
| 22 | extern int unshare_fs_struct(void); | 22 | extern int unshare_fs_struct(void); |
| 23 | 23 | ||
| 24 | static inline void get_fs_root(struct fs_struct *fs, struct path *root) | ||
| 25 | { | ||
| 26 | read_lock(&fs->lock); | ||
| 27 | *root = fs->root; | ||
| 28 | path_get(root); | ||
| 29 | read_unlock(&fs->lock); | ||
| 30 | } | ||
| 31 | |||
| 32 | static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd) | ||
| 33 | { | ||
| 34 | read_lock(&fs->lock); | ||
| 35 | *pwd = fs->pwd; | ||
| 36 | path_get(pwd); | ||
| 37 | read_unlock(&fs->lock); | ||
| 38 | } | ||
| 39 | |||
| 40 | static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root, | ||
| 41 | struct path *pwd) | ||
| 42 | { | ||
| 43 | read_lock(&fs->lock); | ||
| 44 | *root = fs->root; | ||
| 45 | path_get(root); | ||
| 46 | *pwd = fs->pwd; | ||
| 47 | path_get(pwd); | ||
| 48 | read_unlock(&fs->lock); | ||
| 49 | } | ||
| 50 | |||
| 24 | #endif /* _LINUX_FS_STRUCT_H */ | 51 | #endif /* _LINUX_FS_STRUCT_H */ |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index c57db27ac861..b8581c09d19f 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | 20 | ||
| 21 | #include <linux/fscache.h> | 21 | #include <linux/fscache.h> |
| 22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
| 23 | #include <linux/slow-work.h> | 23 | #include <linux/workqueue.h> |
| 24 | 24 | ||
| 25 | #define NR_MAXCACHES BITS_PER_LONG | 25 | #define NR_MAXCACHES BITS_PER_LONG |
| 26 | 26 | ||
| @@ -76,18 +76,14 @@ typedef void (*fscache_operation_release_t)(struct fscache_operation *op); | |||
| 76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); | 76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); |
| 77 | 77 | ||
| 78 | struct fscache_operation { | 78 | struct fscache_operation { |
| 79 | union { | 79 | struct work_struct work; /* record for async ops */ |
| 80 | struct work_struct fast_work; /* record for fast ops */ | ||
| 81 | struct slow_work slow_work; /* record for (very) slow ops */ | ||
| 82 | }; | ||
| 83 | struct list_head pend_link; /* link in object->pending_ops */ | 80 | struct list_head pend_link; /* link in object->pending_ops */ |
| 84 | struct fscache_object *object; /* object to be operated upon */ | 81 | struct fscache_object *object; /* object to be operated upon */ |
| 85 | 82 | ||
| 86 | unsigned long flags; | 83 | unsigned long flags; |
| 87 | #define FSCACHE_OP_TYPE 0x000f /* operation type */ | 84 | #define FSCACHE_OP_TYPE 0x000f /* operation type */ |
| 88 | #define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */ | 85 | #define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */ |
| 89 | #define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */ | 86 | #define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */ |
| 90 | #define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */ | ||
| 91 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ | 87 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ |
| 92 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ | 88 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ |
| 93 | #define FSCACHE_OP_DEAD 6 /* op is now dead */ | 89 | #define FSCACHE_OP_DEAD 6 /* op is now dead */ |
| @@ -105,7 +101,8 @@ struct fscache_operation { | |||
| 105 | /* operation releaser */ | 101 | /* operation releaser */ |
| 106 | fscache_operation_release_t release; | 102 | fscache_operation_release_t release; |
| 107 | 103 | ||
| 108 | #ifdef CONFIG_SLOW_WORK_DEBUG | 104 | #ifdef CONFIG_WORKQUEUE_DEBUGFS |
| 105 | struct work_struct put_work; /* work to delay operation put */ | ||
| 109 | const char *name; /* operation name */ | 106 | const char *name; /* operation name */ |
| 110 | const char *state; /* operation state */ | 107 | const char *state; /* operation state */ |
| 111 | #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) | 108 | #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) |
| @@ -117,7 +114,7 @@ struct fscache_operation { | |||
| 117 | }; | 114 | }; |
| 118 | 115 | ||
| 119 | extern atomic_t fscache_op_debug_id; | 116 | extern atomic_t fscache_op_debug_id; |
| 120 | extern const struct slow_work_ops fscache_op_slow_work_ops; | 117 | extern void fscache_op_work_func(struct work_struct *work); |
| 121 | 118 | ||
| 122 | extern void fscache_enqueue_operation(struct fscache_operation *); | 119 | extern void fscache_enqueue_operation(struct fscache_operation *); |
| 123 | extern void fscache_put_operation(struct fscache_operation *); | 120 | extern void fscache_put_operation(struct fscache_operation *); |
| @@ -128,33 +125,21 @@ extern void fscache_put_operation(struct fscache_operation *); | |||
| 128 | * @release: The release function to assign | 125 | * @release: The release function to assign |
| 129 | * | 126 | * |
| 130 | * Do basic initialisation of an operation. The caller must still set flags, | 127 | * Do basic initialisation of an operation. The caller must still set flags, |
| 131 | * object, either fast_work or slow_work if necessary, and processor if needed. | 128 | * object and processor if needed. |
| 132 | */ | 129 | */ |
| 133 | static inline void fscache_operation_init(struct fscache_operation *op, | 130 | static inline void fscache_operation_init(struct fscache_operation *op, |
| 134 | fscache_operation_release_t release) | 131 | fscache_operation_processor_t processor, |
| 132 | fscache_operation_release_t release) | ||
| 135 | { | 133 | { |
| 134 | INIT_WORK(&op->work, fscache_op_work_func); | ||
| 136 | atomic_set(&op->usage, 1); | 135 | atomic_set(&op->usage, 1); |
| 137 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | 136 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); |
| 137 | op->processor = processor; | ||
| 138 | op->release = release; | 138 | op->release = release; |
| 139 | INIT_LIST_HEAD(&op->pend_link); | 139 | INIT_LIST_HEAD(&op->pend_link); |
| 140 | fscache_set_op_state(op, "Init"); | 140 | fscache_set_op_state(op, "Init"); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | /** | ||
| 144 | * fscache_operation_init_slow - Do additional initialisation of a slow op | ||
| 145 | * @op: The operation to initialise | ||
| 146 | * @processor: The processor function to assign | ||
| 147 | * | ||
| 148 | * Do additional initialisation of an operation as required for slow work. | ||
| 149 | */ | ||
| 150 | static inline | ||
| 151 | void fscache_operation_init_slow(struct fscache_operation *op, | ||
| 152 | fscache_operation_processor_t processor) | ||
| 153 | { | ||
| 154 | op->processor = processor; | ||
| 155 | slow_work_init(&op->slow_work, &fscache_op_slow_work_ops); | ||
| 156 | } | ||
| 157 | |||
| 158 | /* | 143 | /* |
| 159 | * data read operation | 144 | * data read operation |
| 160 | */ | 145 | */ |
| @@ -389,7 +374,7 @@ struct fscache_object { | |||
| 389 | struct fscache_cache *cache; /* cache that supplied this object */ | 374 | struct fscache_cache *cache; /* cache that supplied this object */ |
| 390 | struct fscache_cookie *cookie; /* netfs's file/index object */ | 375 | struct fscache_cookie *cookie; /* netfs's file/index object */ |
| 391 | struct fscache_object *parent; /* parent object */ | 376 | struct fscache_object *parent; /* parent object */ |
| 392 | struct slow_work work; /* attention scheduling record */ | 377 | struct work_struct work; /* attention scheduling record */ |
| 393 | struct list_head dependents; /* FIFO of dependent objects */ | 378 | struct list_head dependents; /* FIFO of dependent objects */ |
| 394 | struct list_head dep_link; /* link in parent's dependents list */ | 379 | struct list_head dep_link; /* link in parent's dependents list */ |
| 395 | struct list_head pending_ops; /* unstarted operations on this object */ | 380 | struct list_head pending_ops; /* unstarted operations on this object */ |
| @@ -411,7 +396,7 @@ extern const char *fscache_object_states[]; | |||
| 411 | (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ | 396 | (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) && \ |
| 412 | (obj)->state >= FSCACHE_OBJECT_DYING) | 397 | (obj)->state >= FSCACHE_OBJECT_DYING) |
| 413 | 398 | ||
| 414 | extern const struct slow_work_ops fscache_object_slow_work_ops; | 399 | extern void fscache_object_work_func(struct work_struct *work); |
| 415 | 400 | ||
| 416 | /** | 401 | /** |
| 417 | * fscache_object_init - Initialise a cache object description | 402 | * fscache_object_init - Initialise a cache object description |
| @@ -433,7 +418,7 @@ void fscache_object_init(struct fscache_object *object, | |||
| 433 | spin_lock_init(&object->lock); | 418 | spin_lock_init(&object->lock); |
| 434 | INIT_LIST_HEAD(&object->cache_link); | 419 | INIT_LIST_HEAD(&object->cache_link); |
| 435 | INIT_HLIST_NODE(&object->cookie_link); | 420 | INIT_HLIST_NODE(&object->cookie_link); |
| 436 | vslow_work_init(&object->work, &fscache_object_slow_work_ops); | 421 | INIT_WORK(&object->work, fscache_object_work_func); |
| 437 | INIT_LIST_HEAD(&object->dependents); | 422 | INIT_LIST_HEAD(&object->dependents); |
| 438 | INIT_LIST_HEAD(&object->dep_link); | 423 | INIT_LIST_HEAD(&object->dep_link); |
| 439 | INIT_LIST_HEAD(&object->pending_ops); | 424 | INIT_LIST_HEAD(&object->pending_ops); |
| @@ -534,6 +519,8 @@ extern void fscache_io_error(struct fscache_cache *cache); | |||
| 534 | extern void fscache_mark_pages_cached(struct fscache_retrieval *op, | 519 | extern void fscache_mark_pages_cached(struct fscache_retrieval *op, |
| 535 | struct pagevec *pagevec); | 520 | struct pagevec *pagevec); |
| 536 | 521 | ||
| 522 | extern bool fscache_object_sleep_till_congested(signed long *timeoutp); | ||
| 523 | |||
| 537 | extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, | 524 | extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, |
| 538 | const void *data, | 525 | const void *data, |
| 539 | uint16_t datalen); | 526 | uint16_t datalen); |
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 01755909ce81..e4e2204187ee 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h | |||
| @@ -11,8 +11,6 @@ | |||
| 11 | * (C) Copyright 2005 Robert Love | 11 | * (C) Copyright 2005 Robert Love |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #include <linux/dnotify.h> | ||
| 15 | #include <linux/inotify.h> | ||
| 16 | #include <linux/fsnotify_backend.h> | 14 | #include <linux/fsnotify_backend.h> |
| 17 | #include <linux/audit.h> | 15 | #include <linux/audit.h> |
| 18 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
| @@ -21,35 +19,52 @@ | |||
| 21 | * fsnotify_d_instantiate - instantiate a dentry for inode | 19 | * fsnotify_d_instantiate - instantiate a dentry for inode |
| 22 | * Called with dcache_lock held. | 20 | * Called with dcache_lock held. |
| 23 | */ | 21 | */ |
| 24 | static inline void fsnotify_d_instantiate(struct dentry *entry, | 22 | static inline void fsnotify_d_instantiate(struct dentry *dentry, |
| 25 | struct inode *inode) | 23 | struct inode *inode) |
| 26 | { | 24 | { |
| 27 | __fsnotify_d_instantiate(entry, inode); | 25 | __fsnotify_d_instantiate(dentry, inode); |
| 28 | |||
| 29 | inotify_d_instantiate(entry, inode); | ||
| 30 | } | 26 | } |
| 31 | 27 | ||
| 32 | /* Notify this dentry's parent about a child's events. */ | 28 | /* Notify this dentry's parent about a child's events. */ |
| 33 | static inline void fsnotify_parent(struct dentry *dentry, __u32 mask) | 29 | static inline void fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask) |
| 34 | { | 30 | { |
| 35 | __fsnotify_parent(dentry, mask); | 31 | if (!dentry) |
| 32 | dentry = file->f_path.dentry; | ||
| 33 | |||
| 34 | __fsnotify_parent(file, dentry, mask); | ||
| 35 | } | ||
| 36 | 36 | ||
| 37 | inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name); | 37 | /* simple call site for access decisions */ |
| 38 | static inline int fsnotify_perm(struct file *file, int mask) | ||
| 39 | { | ||
| 40 | struct inode *inode = file->f_path.dentry->d_inode; | ||
| 41 | __u32 fsnotify_mask = 0; | ||
| 42 | |||
| 43 | if (file->f_mode & FMODE_NONOTIFY) | ||
| 44 | return 0; | ||
| 45 | if (!(mask & (MAY_READ | MAY_OPEN))) | ||
| 46 | return 0; | ||
| 47 | if (mask & MAY_OPEN) | ||
| 48 | fsnotify_mask = FS_OPEN_PERM; | ||
| 49 | else if (mask & MAY_READ) | ||
| 50 | fsnotify_mask = FS_ACCESS_PERM; | ||
| 51 | else | ||
| 52 | BUG(); | ||
| 53 | |||
| 54 | return fsnotify(inode, fsnotify_mask, file, FSNOTIFY_EVENT_FILE, NULL, 0); | ||
| 38 | } | 55 | } |
| 39 | 56 | ||
| 40 | /* | 57 | /* |
| 41 | * fsnotify_d_move - entry has been moved | 58 | * fsnotify_d_move - dentry has been moved |
| 42 | * Called with dcache_lock and entry->d_lock held. | 59 | * Called with dcache_lock and dentry->d_lock held. |
| 43 | */ | 60 | */ |
| 44 | static inline void fsnotify_d_move(struct dentry *entry) | 61 | static inline void fsnotify_d_move(struct dentry *dentry) |
| 45 | { | 62 | { |
| 46 | /* | 63 | /* |
| 47 | * On move we need to update entry->d_flags to indicate if the new parent | 64 | * On move we need to update dentry->d_flags to indicate if the new parent |
| 48 | * cares about events from this entry. | 65 | * cares about events from this dentry. |
| 49 | */ | 66 | */ |
| 50 | __fsnotify_update_dcache_flags(entry); | 67 | __fsnotify_update_dcache_flags(dentry); |
| 51 | |||
| 52 | inotify_d_move(entry); | ||
| 53 | } | 68 | } |
| 54 | 69 | ||
| 55 | /* | 70 | /* |
| @@ -57,8 +72,6 @@ static inline void fsnotify_d_move(struct dentry *entry) | |||
| 57 | */ | 72 | */ |
| 58 | static inline void fsnotify_link_count(struct inode *inode) | 73 | static inline void fsnotify_link_count(struct inode *inode) |
| 59 | { | 74 | { |
| 60 | inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL); | ||
| 61 | |||
| 62 | fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | 75 | fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0); |
| 63 | } | 76 | } |
| 64 | 77 | ||
| @@ -66,45 +79,31 @@ static inline void fsnotify_link_count(struct inode *inode) | |||
| 66 | * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir | 79 | * fsnotify_move - file old_name at old_dir was moved to new_name at new_dir |
| 67 | */ | 80 | */ |
| 68 | static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, | 81 | static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, |
| 69 | const char *old_name, | 82 | const unsigned char *old_name, |
| 70 | int isdir, struct inode *target, struct dentry *moved) | 83 | int isdir, struct inode *target, struct dentry *moved) |
| 71 | { | 84 | { |
| 72 | struct inode *source = moved->d_inode; | 85 | struct inode *source = moved->d_inode; |
| 73 | u32 in_cookie = inotify_get_cookie(); | ||
| 74 | u32 fs_cookie = fsnotify_get_cookie(); | 86 | u32 fs_cookie = fsnotify_get_cookie(); |
| 75 | __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); | 87 | __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); |
| 76 | __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); | 88 | __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); |
| 77 | const char *new_name = moved->d_name.name; | 89 | const unsigned char *new_name = moved->d_name.name; |
| 78 | 90 | ||
| 79 | if (old_dir == new_dir) | 91 | if (old_dir == new_dir) |
| 80 | old_dir_mask |= FS_DN_RENAME; | 92 | old_dir_mask |= FS_DN_RENAME; |
| 81 | 93 | ||
| 82 | if (isdir) { | 94 | if (isdir) { |
| 83 | isdir = IN_ISDIR; | ||
| 84 | old_dir_mask |= FS_IN_ISDIR; | 95 | old_dir_mask |= FS_IN_ISDIR; |
| 85 | new_dir_mask |= FS_IN_ISDIR; | 96 | new_dir_mask |= FS_IN_ISDIR; |
| 86 | } | 97 | } |
| 87 | 98 | ||
| 88 | inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name, | ||
| 89 | source); | ||
| 90 | inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name, | ||
| 91 | source); | ||
| 92 | |||
| 93 | fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); | 99 | fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie); |
| 94 | fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); | 100 | fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie); |
| 95 | 101 | ||
| 96 | if (target) { | 102 | if (target) |
| 97 | inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL); | ||
| 98 | inotify_inode_is_dead(target); | ||
| 99 | |||
| 100 | /* this is really a link_count change not a removal */ | ||
| 101 | fsnotify_link_count(target); | 103 | fsnotify_link_count(target); |
| 102 | } | ||
| 103 | 104 | ||
| 104 | if (source) { | 105 | if (source) |
| 105 | inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL); | ||
| 106 | fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); | 106 | fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); |
| 107 | } | ||
| 108 | audit_inode_child(moved, new_dir); | 107 | audit_inode_child(moved, new_dir); |
| 109 | } | 108 | } |
| 110 | 109 | ||
| @@ -117,6 +116,14 @@ static inline void fsnotify_inode_delete(struct inode *inode) | |||
| 117 | } | 116 | } |
| 118 | 117 | ||
| 119 | /* | 118 | /* |
| 119 | * fsnotify_vfsmount_delete - a vfsmount is being destroyed, clean up is needed | ||
| 120 | */ | ||
| 121 | static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) | ||
| 122 | { | ||
| 123 | __fsnotify_vfsmount_delete(mnt); | ||
| 124 | } | ||
| 125 | |||
| 126 | /* | ||
| 120 | * fsnotify_nameremove - a filename was removed from a directory | 127 | * fsnotify_nameremove - a filename was removed from a directory |
| 121 | */ | 128 | */ |
| 122 | static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) | 129 | static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) |
| @@ -126,7 +133,7 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) | |||
| 126 | if (isdir) | 133 | if (isdir) |
| 127 | mask |= FS_IN_ISDIR; | 134 | mask |= FS_IN_ISDIR; |
| 128 | 135 | ||
| 129 | fsnotify_parent(dentry, mask); | 136 | fsnotify_parent(NULL, dentry, mask); |
| 130 | } | 137 | } |
| 131 | 138 | ||
| 132 | /* | 139 | /* |
| @@ -134,9 +141,6 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) | |||
| 134 | */ | 141 | */ |
| 135 | static inline void fsnotify_inoderemove(struct inode *inode) | 142 | static inline void fsnotify_inoderemove(struct inode *inode) |
| 136 | { | 143 | { |
| 137 | inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL); | ||
| 138 | inotify_inode_is_dead(inode); | ||
| 139 | |||
| 140 | fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | 144 | fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0); |
| 141 | __fsnotify_inode_delete(inode); | 145 | __fsnotify_inode_delete(inode); |
| 142 | } | 146 | } |
| @@ -146,8 +150,6 @@ static inline void fsnotify_inoderemove(struct inode *inode) | |||
| 146 | */ | 150 | */ |
| 147 | static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) | 151 | static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) |
| 148 | { | 152 | { |
| 149 | inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name, | ||
| 150 | dentry->d_inode); | ||
| 151 | audit_inode_child(dentry, inode); | 153 | audit_inode_child(dentry, inode); |
| 152 | 154 | ||
| 153 | fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); | 155 | fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); |
| @@ -160,8 +162,6 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) | |||
| 160 | */ | 162 | */ |
| 161 | static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) | 163 | static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) |
| 162 | { | 164 | { |
| 163 | inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name, | ||
| 164 | inode); | ||
| 165 | fsnotify_link_count(inode); | 165 | fsnotify_link_count(inode); |
| 166 | audit_inode_child(new_dentry, dir); | 166 | audit_inode_child(new_dentry, dir); |
| 167 | 167 | ||
| @@ -176,7 +176,6 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) | |||
| 176 | __u32 mask = (FS_CREATE | FS_IN_ISDIR); | 176 | __u32 mask = (FS_CREATE | FS_IN_ISDIR); |
| 177 | struct inode *d_inode = dentry->d_inode; | 177 | struct inode *d_inode = dentry->d_inode; |
| 178 | 178 | ||
| 179 | inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode); | ||
| 180 | audit_inode_child(dentry, inode); | 179 | audit_inode_child(dentry, inode); |
| 181 | 180 | ||
| 182 | fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); | 181 | fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); |
| @@ -185,52 +184,52 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) | |||
| 185 | /* | 184 | /* |
| 186 | * fsnotify_access - file was read | 185 | * fsnotify_access - file was read |
| 187 | */ | 186 | */ |
| 188 | static inline void fsnotify_access(struct dentry *dentry) | 187 | static inline void fsnotify_access(struct file *file) |
| 189 | { | 188 | { |
| 190 | struct inode *inode = dentry->d_inode; | 189 | struct inode *inode = file->f_path.dentry->d_inode; |
| 191 | __u32 mask = FS_ACCESS; | 190 | __u32 mask = FS_ACCESS; |
| 192 | 191 | ||
| 193 | if (S_ISDIR(inode->i_mode)) | 192 | if (S_ISDIR(inode->i_mode)) |
| 194 | mask |= FS_IN_ISDIR; | 193 | mask |= FS_IN_ISDIR; |
| 195 | 194 | ||
| 196 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 195 | if (!(file->f_mode & FMODE_NONOTIFY)) { |
| 197 | 196 | fsnotify_parent(file, NULL, mask); | |
| 198 | fsnotify_parent(dentry, mask); | 197 | fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0); |
| 199 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | 198 | } |
| 200 | } | 199 | } |
| 201 | 200 | ||
| 202 | /* | 201 | /* |
| 203 | * fsnotify_modify - file was modified | 202 | * fsnotify_modify - file was modified |
| 204 | */ | 203 | */ |
| 205 | static inline void fsnotify_modify(struct dentry *dentry) | 204 | static inline void fsnotify_modify(struct file *file) |
| 206 | { | 205 | { |
| 207 | struct inode *inode = dentry->d_inode; | 206 | struct inode *inode = file->f_path.dentry->d_inode; |
| 208 | __u32 mask = FS_MODIFY; | 207 | __u32 mask = FS_MODIFY; |
| 209 | 208 | ||
| 210 | if (S_ISDIR(inode->i_mode)) | 209 | if (S_ISDIR(inode->i_mode)) |
| 211 | mask |= FS_IN_ISDIR; | 210 | mask |= FS_IN_ISDIR; |
| 212 | 211 | ||
| 213 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 212 | if (!(file->f_mode & FMODE_NONOTIFY)) { |
| 214 | 213 | fsnotify_parent(file, NULL, mask); | |
| 215 | fsnotify_parent(dentry, mask); | 214 | fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0); |
| 216 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | 215 | } |
| 217 | } | 216 | } |
| 218 | 217 | ||
| 219 | /* | 218 | /* |
| 220 | * fsnotify_open - file was opened | 219 | * fsnotify_open - file was opened |
| 221 | */ | 220 | */ |
| 222 | static inline void fsnotify_open(struct dentry *dentry) | 221 | static inline void fsnotify_open(struct file *file) |
| 223 | { | 222 | { |
| 224 | struct inode *inode = dentry->d_inode; | 223 | struct inode *inode = file->f_path.dentry->d_inode; |
| 225 | __u32 mask = FS_OPEN; | 224 | __u32 mask = FS_OPEN; |
| 226 | 225 | ||
| 227 | if (S_ISDIR(inode->i_mode)) | 226 | if (S_ISDIR(inode->i_mode)) |
| 228 | mask |= FS_IN_ISDIR; | 227 | mask |= FS_IN_ISDIR; |
| 229 | 228 | ||
| 230 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 229 | if (!(file->f_mode & FMODE_NONOTIFY)) { |
| 231 | 230 | fsnotify_parent(file, NULL, mask); | |
| 232 | fsnotify_parent(dentry, mask); | 231 | fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0); |
| 233 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | 232 | } |
| 234 | } | 233 | } |
| 235 | 234 | ||
| 236 | /* | 235 | /* |
| @@ -238,18 +237,17 @@ static inline void fsnotify_open(struct dentry *dentry) | |||
| 238 | */ | 237 | */ |
| 239 | static inline void fsnotify_close(struct file *file) | 238 | static inline void fsnotify_close(struct file *file) |
| 240 | { | 239 | { |
| 241 | struct dentry *dentry = file->f_path.dentry; | 240 | struct inode *inode = file->f_path.dentry->d_inode; |
| 242 | struct inode *inode = dentry->d_inode; | ||
| 243 | fmode_t mode = file->f_mode; | 241 | fmode_t mode = file->f_mode; |
| 244 | __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; | 242 | __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE; |
| 245 | 243 | ||
| 246 | if (S_ISDIR(inode->i_mode)) | 244 | if (S_ISDIR(inode->i_mode)) |
| 247 | mask |= FS_IN_ISDIR; | 245 | mask |= FS_IN_ISDIR; |
| 248 | 246 | ||
| 249 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 247 | if (!(file->f_mode & FMODE_NONOTIFY)) { |
| 250 | 248 | fsnotify_parent(file, NULL, mask); | |
| 251 | fsnotify_parent(dentry, mask); | 249 | fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0); |
| 252 | fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0); | 250 | } |
| 253 | } | 251 | } |
| 254 | 252 | ||
| 255 | /* | 253 | /* |
| @@ -263,9 +261,7 @@ static inline void fsnotify_xattr(struct dentry *dentry) | |||
| 263 | if (S_ISDIR(inode->i_mode)) | 261 | if (S_ISDIR(inode->i_mode)) |
| 264 | mask |= FS_IN_ISDIR; | 262 | mask |= FS_IN_ISDIR; |
| 265 | 263 | ||
| 266 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | 264 | fsnotify_parent(NULL, dentry, mask); |
| 267 | |||
| 268 | fsnotify_parent(dentry, mask); | ||
| 269 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | 265 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); |
| 270 | } | 266 | } |
| 271 | 267 | ||
| @@ -299,19 +295,18 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) | |||
| 299 | if (mask) { | 295 | if (mask) { |
| 300 | if (S_ISDIR(inode->i_mode)) | 296 | if (S_ISDIR(inode->i_mode)) |
| 301 | mask |= FS_IN_ISDIR; | 297 | mask |= FS_IN_ISDIR; |
| 302 | inotify_inode_queue_event(inode, mask, 0, NULL, NULL); | ||
| 303 | 298 | ||
| 304 | fsnotify_parent(dentry, mask); | 299 | fsnotify_parent(NULL, dentry, mask); |
| 305 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | 300 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); |
| 306 | } | 301 | } |
| 307 | } | 302 | } |
| 308 | 303 | ||
| 309 | #if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */ | 304 | #if defined(CONFIG_FSNOTIFY) /* notify helpers */ |
| 310 | 305 | ||
| 311 | /* | 306 | /* |
| 312 | * fsnotify_oldname_init - save off the old filename before we change it | 307 | * fsnotify_oldname_init - save off the old filename before we change it |
| 313 | */ | 308 | */ |
| 314 | static inline const char *fsnotify_oldname_init(const char *name) | 309 | static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name) |
| 315 | { | 310 | { |
| 316 | return kstrdup(name, GFP_KERNEL); | 311 | return kstrdup(name, GFP_KERNEL); |
| 317 | } | 312 | } |
| @@ -319,22 +314,22 @@ static inline const char *fsnotify_oldname_init(const char *name) | |||
| 319 | /* | 314 | /* |
| 320 | * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init | 315 | * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init |
| 321 | */ | 316 | */ |
| 322 | static inline void fsnotify_oldname_free(const char *old_name) | 317 | static inline void fsnotify_oldname_free(const unsigned char *old_name) |
| 323 | { | 318 | { |
| 324 | kfree(old_name); | 319 | kfree(old_name); |
| 325 | } | 320 | } |
| 326 | 321 | ||
| 327 | #else /* CONFIG_INOTIFY || CONFIG_FSNOTIFY */ | 322 | #else /* CONFIG_FSNOTIFY */ |
| 328 | 323 | ||
| 329 | static inline const char *fsnotify_oldname_init(const char *name) | 324 | static inline const char *fsnotify_oldname_init(const unsigned char *name) |
| 330 | { | 325 | { |
| 331 | return NULL; | 326 | return NULL; |
| 332 | } | 327 | } |
| 333 | 328 | ||
| 334 | static inline void fsnotify_oldname_free(const char *old_name) | 329 | static inline void fsnotify_oldname_free(const unsigned char *old_name) |
| 335 | { | 330 | { |
| 336 | } | 331 | } |
| 337 | 332 | ||
| 338 | #endif /* ! CONFIG_INOTIFY */ | 333 | #endif /* CONFIG_FSNOTIFY */ |
| 339 | 334 | ||
| 340 | #endif /* _LINUX_FS_NOTIFY_H */ | 335 | #endif /* _LINUX_FS_NOTIFY_H */ |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 4d6f47b51189..9bbfd7204b04 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
| @@ -41,6 +41,10 @@ | |||
| 41 | #define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ | 41 | #define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ |
| 42 | #define FS_IN_IGNORED 0x00008000 /* last inotify event here */ | 42 | #define FS_IN_IGNORED 0x00008000 /* last inotify event here */ |
| 43 | 43 | ||
| 44 | #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ | ||
| 45 | #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ | ||
| 46 | |||
| 47 | #define FS_EXCL_UNLINK 0x04000000 /* do not send events if object is unlinked */ | ||
| 44 | #define FS_IN_ISDIR 0x40000000 /* event occurred against dir */ | 48 | #define FS_IN_ISDIR 0x40000000 /* event occurred against dir */ |
| 45 | #define FS_IN_ONESHOT 0x80000000 /* only send event once */ | 49 | #define FS_IN_ONESHOT 0x80000000 /* only send event once */ |
| 46 | 50 | ||
| @@ -58,13 +62,20 @@ | |||
| 58 | FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ | 62 | FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ |
| 59 | FS_DELETE) | 63 | FS_DELETE) |
| 60 | 64 | ||
| 61 | /* listeners that hard code group numbers near the top */ | 65 | #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) |
| 62 | #define DNOTIFY_GROUP_NUM UINT_MAX | 66 | |
| 63 | #define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1) | 67 | #define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ |
| 68 | FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \ | ||
| 69 | FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \ | ||
| 70 | FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \ | ||
| 71 | FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ | ||
| 72 | FS_OPEN_PERM | FS_ACCESS_PERM | FS_EXCL_UNLINK | \ | ||
| 73 | FS_IN_ISDIR | FS_IN_ONESHOT | FS_DN_RENAME | \ | ||
| 74 | FS_DN_MULTISHOT | FS_EVENT_ON_CHILD) | ||
| 64 | 75 | ||
| 65 | struct fsnotify_group; | 76 | struct fsnotify_group; |
| 66 | struct fsnotify_event; | 77 | struct fsnotify_event; |
| 67 | struct fsnotify_mark_entry; | 78 | struct fsnotify_mark; |
| 68 | struct fsnotify_event_private_data; | 79 | struct fsnotify_event_private_data; |
| 69 | 80 | ||
| 70 | /* | 81 | /* |
| @@ -80,10 +91,16 @@ struct fsnotify_event_private_data; | |||
| 80 | * valid group and inode to use to clean up. | 91 | * valid group and inode to use to clean up. |
| 81 | */ | 92 | */ |
| 82 | struct fsnotify_ops { | 93 | struct fsnotify_ops { |
| 83 | bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask); | 94 | bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, |
| 84 | int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event); | 95 | struct fsnotify_mark *inode_mark, |
| 96 | struct fsnotify_mark *vfsmount_mark, | ||
| 97 | __u32 mask, void *data, int data_type); | ||
| 98 | int (*handle_event)(struct fsnotify_group *group, | ||
| 99 | struct fsnotify_mark *inode_mark, | ||
| 100 | struct fsnotify_mark *vfsmount_mark, | ||
| 101 | struct fsnotify_event *event); | ||
| 85 | void (*free_group_priv)(struct fsnotify_group *group); | 102 | void (*free_group_priv)(struct fsnotify_group *group); |
| 86 | void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group); | 103 | void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); |
| 87 | void (*free_event_priv)(struct fsnotify_event_private_data *priv); | 104 | void (*free_event_priv)(struct fsnotify_event_private_data *priv); |
| 88 | }; | 105 | }; |
| 89 | 106 | ||
| @@ -95,22 +112,6 @@ struct fsnotify_ops { | |||
| 95 | */ | 112 | */ |
| 96 | struct fsnotify_group { | 113 | struct fsnotify_group { |
| 97 | /* | 114 | /* |
| 98 | * global list of all groups receiving events from fsnotify. | ||
| 99 | * anchored by fsnotify_groups and protected by either fsnotify_grp_mutex | ||
| 100 | * or fsnotify_grp_srcu depending on write vs read. | ||
| 101 | */ | ||
| 102 | struct list_head group_list; | ||
| 103 | |||
| 104 | /* | ||
| 105 | * Defines all of the event types in which this group is interested. | ||
| 106 | * This mask is a bitwise OR of the FS_* events from above. Each time | ||
| 107 | * this mask changes for a group (if it changes) the correct functions | ||
| 108 | * must be called to update the global structures which indicate global | ||
| 109 | * interest in event types. | ||
| 110 | */ | ||
| 111 | __u32 mask; | ||
| 112 | |||
| 113 | /* | ||
| 114 | * How the refcnt is used is up to each group. When the refcnt hits 0 | 115 | * How the refcnt is used is up to each group. When the refcnt hits 0 |
| 115 | * fsnotify will clean up all of the resources associated with this group. | 116 | * fsnotify will clean up all of the resources associated with this group. |
| 116 | * As an example, the dnotify group will always have a refcnt=1 and that | 117 | * As an example, the dnotify group will always have a refcnt=1 and that |
| @@ -119,7 +120,6 @@ struct fsnotify_group { | |||
| 119 | * closed. | 120 | * closed. |
| 120 | */ | 121 | */ |
| 121 | atomic_t refcnt; /* things with interest in this group */ | 122 | atomic_t refcnt; /* things with interest in this group */ |
| 122 | unsigned int group_num; /* simply prevents accidental group collision */ | ||
| 123 | 123 | ||
| 124 | const struct fsnotify_ops *ops; /* how this group handles things */ | 124 | const struct fsnotify_ops *ops; /* how this group handles things */ |
| 125 | 125 | ||
| @@ -130,15 +130,12 @@ struct fsnotify_group { | |||
| 130 | unsigned int q_len; /* events on the queue */ | 130 | unsigned int q_len; /* events on the queue */ |
| 131 | unsigned int max_events; /* maximum events allowed on the list */ | 131 | unsigned int max_events; /* maximum events allowed on the list */ |
| 132 | 132 | ||
| 133 | /* stores all fastapth entries assoc with this group so they can be cleaned on unregister */ | 133 | /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ |
| 134 | spinlock_t mark_lock; /* protect mark_entries list */ | 134 | spinlock_t mark_lock; /* protect marks_list */ |
| 135 | atomic_t num_marks; /* 1 for each mark entry and 1 for not being | 135 | atomic_t num_marks; /* 1 for each mark and 1 for not being |
| 136 | * past the point of no return when freeing | 136 | * past the point of no return when freeing |
| 137 | * a group */ | 137 | * a group */ |
| 138 | struct list_head mark_entries; /* all inode mark entries for this group */ | 138 | struct list_head marks_list; /* all inode marks for this group */ |
| 139 | |||
| 140 | /* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */ | ||
| 141 | bool on_group_list; | ||
| 142 | 139 | ||
| 143 | /* groups can define private fields here or use the void *private */ | 140 | /* groups can define private fields here or use the void *private */ |
| 144 | union { | 141 | union { |
| @@ -152,6 +149,17 @@ struct fsnotify_group { | |||
| 152 | struct user_struct *user; | 149 | struct user_struct *user; |
| 153 | } inotify_data; | 150 | } inotify_data; |
| 154 | #endif | 151 | #endif |
| 152 | #ifdef CONFIG_FANOTIFY | ||
| 153 | struct fanotify_group_private_data { | ||
| 154 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | ||
| 155 | /* allows a group to block waiting for a userspace response */ | ||
| 156 | struct mutex access_mutex; | ||
| 157 | struct list_head access_list; | ||
| 158 | wait_queue_head_t access_waitq; | ||
| 159 | #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ | ||
| 160 | int f_flags; | ||
| 161 | } fanotify_data; | ||
| 162 | #endif /* CONFIG_FANOTIFY */ | ||
| 155 | }; | 163 | }; |
| 156 | }; | 164 | }; |
| 157 | 165 | ||
| @@ -195,35 +203,57 @@ struct fsnotify_event { | |||
| 195 | /* to_tell may ONLY be dereferenced during handle_event(). */ | 203 | /* to_tell may ONLY be dereferenced during handle_event(). */ |
| 196 | struct inode *to_tell; /* either the inode the event happened to or its parent */ | 204 | struct inode *to_tell; /* either the inode the event happened to or its parent */ |
| 197 | /* | 205 | /* |
| 198 | * depending on the event type we should have either a path or inode | 206 | * depending on the event type we should have either a file or inode |
| 199 | * We hold a reference on path, but NOT on inode. Since we have the ref on | 207 | * We hold a reference on file, but NOT on inode. Since we have the ref on |
| 200 | * the path, it may be dereferenced at any point during this object's | 208 | * the file, it may be dereferenced at any point during this object's |
| 201 | * lifetime. That reference is dropped when this object's refcnt hits | 209 | * lifetime. That reference is dropped when this object's refcnt hits |
| 202 | * 0. If this event contains an inode instead of a path, the inode may | 210 | * 0. If this event contains an inode instead of a file, the inode may |
| 203 | * ONLY be used during handle_event(). | 211 | * ONLY be used during handle_event(). |
| 204 | */ | 212 | */ |
| 205 | union { | 213 | union { |
| 206 | struct path path; | 214 | struct file *file; |
| 207 | struct inode *inode; | 215 | struct inode *inode; |
| 208 | }; | 216 | }; |
| 209 | /* when calling fsnotify tell it if the data is a path or inode */ | 217 | /* when calling fsnotify tell it if the data is a path or inode */ |
| 210 | #define FSNOTIFY_EVENT_NONE 0 | 218 | #define FSNOTIFY_EVENT_NONE 0 |
| 211 | #define FSNOTIFY_EVENT_PATH 1 | 219 | #define FSNOTIFY_EVENT_FILE 1 |
| 212 | #define FSNOTIFY_EVENT_INODE 2 | 220 | #define FSNOTIFY_EVENT_INODE 2 |
| 213 | #define FSNOTIFY_EVENT_FILE 3 | ||
| 214 | int data_type; /* which of the above union we have */ | 221 | int data_type; /* which of the above union we have */ |
| 215 | atomic_t refcnt; /* how many groups still are using/need to send this event */ | 222 | atomic_t refcnt; /* how many groups still are using/need to send this event */ |
| 216 | __u32 mask; /* the type of access, bitwise OR for FS_* event types */ | 223 | __u32 mask; /* the type of access, bitwise OR for FS_* event types */ |
| 217 | 224 | ||
| 218 | u32 sync_cookie; /* used to corrolate events, namely inotify mv events */ | 225 | u32 sync_cookie; /* used to corrolate events, namely inotify mv events */ |
| 219 | char *file_name; | 226 | const unsigned char *file_name; |
| 220 | size_t name_len; | 227 | size_t name_len; |
| 228 | struct pid *tgid; | ||
| 229 | |||
| 230 | #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS | ||
| 231 | __u32 response; /* userspace answer to question */ | ||
| 232 | #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ | ||
| 221 | 233 | ||
| 222 | struct list_head private_data_list; /* groups can store private data here */ | 234 | struct list_head private_data_list; /* groups can store private data here */ |
| 223 | }; | 235 | }; |
| 224 | 236 | ||
| 225 | /* | 237 | /* |
| 226 | * a mark is simply an entry attached to an in core inode which allows an | 238 | * Inode specific fields in an fsnotify_mark |
| 239 | */ | ||
| 240 | struct fsnotify_inode_mark { | ||
| 241 | struct inode *inode; /* inode this mark is associated with */ | ||
| 242 | struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */ | ||
| 243 | struct list_head free_i_list; /* tmp list used when freeing this mark */ | ||
| 244 | }; | ||
| 245 | |||
| 246 | /* | ||
| 247 | * Mount point specific fields in an fsnotify_mark | ||
| 248 | */ | ||
| 249 | struct fsnotify_vfsmount_mark { | ||
| 250 | struct vfsmount *mnt; /* vfsmount this mark is associated with */ | ||
| 251 | struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */ | ||
| 252 | struct list_head free_m_list; /* tmp list used when freeing this mark */ | ||
| 253 | }; | ||
| 254 | |||
| 255 | /* | ||
| 256 | * a mark is simply an object attached to an in core inode which allows an | ||
| 227 | * fsnotify listener to indicate they are either no longer interested in events | 257 | * fsnotify listener to indicate they are either no longer interested in events |
| 228 | * of a type matching mask or only interested in those events. | 258 | * of a type matching mask or only interested in those events. |
| 229 | * | 259 | * |
| @@ -232,19 +262,28 @@ struct fsnotify_event { | |||
| 232 | * (such as dnotify) will flush these when the open fd is closed and not at | 262 | * (such as dnotify) will flush these when the open fd is closed and not at |
| 233 | * inode eviction or modification. | 263 | * inode eviction or modification. |
| 234 | */ | 264 | */ |
| 235 | struct fsnotify_mark_entry { | 265 | struct fsnotify_mark { |
| 236 | __u32 mask; /* mask this mark entry is for */ | 266 | __u32 mask; /* mask this mark is for */ |
| 237 | /* we hold ref for each i_list and g_list. also one ref for each 'thing' | 267 | /* we hold ref for each i_list and g_list. also one ref for each 'thing' |
| 238 | * in kernel that found and may be using this mark. */ | 268 | * in kernel that found and may be using this mark. */ |
| 239 | atomic_t refcnt; /* active things looking at this mark */ | 269 | atomic_t refcnt; /* active things looking at this mark */ |
| 240 | struct inode *inode; /* inode this entry is associated with */ | 270 | struct fsnotify_group *group; /* group this mark is for */ |
| 241 | struct fsnotify_group *group; /* group this mark entry is for */ | 271 | struct list_head g_list; /* list of marks by group->i_fsnotify_marks */ |
| 242 | struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */ | 272 | spinlock_t lock; /* protect group and inode */ |
| 243 | struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */ | 273 | union { |
| 244 | spinlock_t lock; /* protect group, inode, and killme */ | 274 | struct fsnotify_inode_mark i; |
| 245 | struct list_head free_i_list; /* tmp list used when freeing this mark */ | 275 | struct fsnotify_vfsmount_mark m; |
| 276 | }; | ||
| 277 | __u32 ignored_mask; /* events types to ignore */ | ||
| 246 | struct list_head free_g_list; /* tmp list used when freeing this mark */ | 278 | struct list_head free_g_list; /* tmp list used when freeing this mark */ |
| 247 | void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */ | 279 | #define FSNOTIFY_MARK_FLAG_INODE 0x01 |
| 280 | #define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02 | ||
| 281 | #define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04 | ||
| 282 | #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08 | ||
| 283 | #define FSNOTIFY_MARK_FLAG_ALIVE 0x10 | ||
| 284 | unsigned int flags; /* vfsmount or inode mark? */ | ||
| 285 | struct list_head destroy_list; | ||
| 286 | void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */ | ||
| 248 | }; | 287 | }; |
| 249 | 288 | ||
| 250 | #ifdef CONFIG_FSNOTIFY | 289 | #ifdef CONFIG_FSNOTIFY |
| @@ -252,10 +291,11 @@ struct fsnotify_mark_entry { | |||
| 252 | /* called from the vfs helpers */ | 291 | /* called from the vfs helpers */ |
| 253 | 292 | ||
| 254 | /* main fsnotify call to send events */ | 293 | /* main fsnotify call to send events */ |
| 255 | extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, | 294 | extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, |
| 256 | const char *name, u32 cookie); | 295 | const unsigned char *name, u32 cookie); |
| 257 | extern void __fsnotify_parent(struct dentry *dentry, __u32 mask); | 296 | extern void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask); |
| 258 | extern void __fsnotify_inode_delete(struct inode *inode); | 297 | extern void __fsnotify_inode_delete(struct inode *inode); |
| 298 | extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); | ||
| 259 | extern u32 fsnotify_get_cookie(void); | 299 | extern u32 fsnotify_get_cookie(void); |
| 260 | 300 | ||
| 261 | static inline int fsnotify_inode_watches_children(struct inode *inode) | 301 | static inline int fsnotify_inode_watches_children(struct inode *inode) |
| @@ -304,15 +344,9 @@ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode | |||
| 304 | 344 | ||
| 305 | /* called from fsnotify listeners, such as fanotify or dnotify */ | 345 | /* called from fsnotify listeners, such as fanotify or dnotify */ |
| 306 | 346 | ||
| 307 | /* must call when a group changes its ->mask */ | ||
| 308 | extern void fsnotify_recalc_global_mask(void); | ||
| 309 | /* get a reference to an existing or create a new group */ | 347 | /* get a reference to an existing or create a new group */ |
| 310 | extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, | 348 | extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops); |
| 311 | __u32 mask, | 349 | /* drop reference on a group from fsnotify_alloc_group */ |
| 312 | const struct fsnotify_ops *ops); | ||
| 313 | /* run all marks associated with this group and update group->mask */ | ||
| 314 | extern void fsnotify_recalc_group_mask(struct fsnotify_group *group); | ||
| 315 | /* drop reference on a group from fsnotify_obtain_group */ | ||
| 316 | extern void fsnotify_put_group(struct fsnotify_group *group); | 350 | extern void fsnotify_put_group(struct fsnotify_group *group); |
| 317 | 351 | ||
| 318 | /* take a reference to an event */ | 352 | /* take a reference to an event */ |
| @@ -323,8 +357,11 @@ extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struc | |||
| 323 | struct fsnotify_event *event); | 357 | struct fsnotify_event *event); |
| 324 | 358 | ||
| 325 | /* attach the event to the group notification queue */ | 359 | /* attach the event to the group notification queue */ |
| 326 | extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event, | 360 | extern struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, |
| 327 | struct fsnotify_event_private_data *priv); | 361 | struct fsnotify_event *event, |
| 362 | struct fsnotify_event_private_data *priv, | ||
| 363 | struct fsnotify_event *(*merge)(struct list_head *, | ||
| 364 | struct fsnotify_event *)); | ||
| 328 | /* true if the group notification queue is empty */ | 365 | /* true if the group notification queue is empty */ |
| 329 | extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); | 366 | extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); |
| 330 | /* return, but do not dequeue the first event on the notification queue */ | 367 | /* return, but do not dequeue the first event on the notification queue */ |
| @@ -334,38 +371,66 @@ extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group | |||
| 334 | 371 | ||
| 335 | /* functions used to manipulate the marks attached to inodes */ | 372 | /* functions used to manipulate the marks attached to inodes */ |
| 336 | 373 | ||
| 374 | /* run all marks associated with a vfsmount and update mnt->mnt_fsnotify_mask */ | ||
| 375 | extern void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt); | ||
| 337 | /* run all marks associated with an inode and update inode->i_fsnotify_mask */ | 376 | /* run all marks associated with an inode and update inode->i_fsnotify_mask */ |
| 338 | extern void fsnotify_recalc_inode_mask(struct inode *inode); | 377 | extern void fsnotify_recalc_inode_mask(struct inode *inode); |
| 339 | extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry)); | 378 | extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(struct fsnotify_mark *mark)); |
| 340 | /* find (and take a reference) to a mark associated with group and inode */ | 379 | /* find (and take a reference) to a mark associated with group and inode */ |
| 341 | extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode); | 380 | extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode); |
| 381 | /* find (and take a reference) to a mark associated with group and vfsmount */ | ||
| 382 | extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt); | ||
| 383 | /* copy the values from old into new */ | ||
| 384 | extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old); | ||
| 385 | /* set the ignored_mask of a mark */ | ||
| 386 | extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask); | ||
| 387 | /* set the mask of a mark (might pin the object into memory */ | ||
| 388 | extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask); | ||
| 342 | /* attach the mark to both the group and the inode */ | 389 | /* attach the mark to both the group and the inode */ |
| 343 | extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode); | 390 | extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, |
| 391 | struct inode *inode, struct vfsmount *mnt, int allow_dups); | ||
| 344 | /* given a mark, flag it to be freed when all references are dropped */ | 392 | /* given a mark, flag it to be freed when all references are dropped */ |
| 345 | extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry); | 393 | extern void fsnotify_destroy_mark(struct fsnotify_mark *mark); |
| 394 | /* run all the marks in a group, and clear all of the vfsmount marks */ | ||
| 395 | extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group); | ||
| 396 | /* run all the marks in a group, and clear all of the inode marks */ | ||
| 397 | extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group); | ||
| 398 | /* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/ | ||
| 399 | extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags); | ||
| 346 | /* run all the marks in a group, and flag them to be freed */ | 400 | /* run all the marks in a group, and flag them to be freed */ |
| 347 | extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); | 401 | extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); |
| 348 | extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry); | 402 | extern void fsnotify_get_mark(struct fsnotify_mark *mark); |
| 349 | extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry); | 403 | extern void fsnotify_put_mark(struct fsnotify_mark *mark); |
| 350 | extern void fsnotify_unmount_inodes(struct list_head *list); | 404 | extern void fsnotify_unmount_inodes(struct list_head *list); |
| 351 | 405 | ||
| 352 | /* put here because inotify does some weird stuff when destroying watches */ | 406 | /* put here because inotify does some weird stuff when destroying watches */ |
| 353 | extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, | 407 | extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, |
| 354 | void *data, int data_is, const char *name, | 408 | void *data, int data_is, |
| 409 | const unsigned char *name, | ||
| 355 | u32 cookie, gfp_t gfp); | 410 | u32 cookie, gfp_t gfp); |
| 356 | 411 | ||
| 412 | /* fanotify likes to change events after they are on lists... */ | ||
| 413 | extern struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event); | ||
| 414 | extern int fsnotify_replace_event(struct fsnotify_event_holder *old_holder, | ||
| 415 | struct fsnotify_event *new_event); | ||
| 416 | |||
| 357 | #else | 417 | #else |
| 358 | 418 | ||
| 359 | static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, | 419 | static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, |
| 360 | const char *name, u32 cookie) | 420 | const unsigned char *name, u32 cookie) |
| 361 | {} | 421 | { |
| 422 | return 0; | ||
| 423 | } | ||
| 362 | 424 | ||
| 363 | static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask) | 425 | static inline void __fsnotify_parent(struct file *file, struct dentry *dentry, __u32 mask) |
| 364 | {} | 426 | {} |
| 365 | 427 | ||
| 366 | static inline void __fsnotify_inode_delete(struct inode *inode) | 428 | static inline void __fsnotify_inode_delete(struct inode *inode) |
| 367 | {} | 429 | {} |
| 368 | 430 | ||
| 431 | static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) | ||
| 432 | {} | ||
| 433 | |||
| 369 | static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) | 434 | static inline void __fsnotify_update_dcache_flags(struct dentry *dentry) |
| 370 | {} | 435 | {} |
| 371 | 436 | ||
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 41e46330d9be..dcd6a7c3a435 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -1,3 +1,8 @@ | |||
| 1 | /* | ||
| 2 | * Ftrace header. For implementation details beyond the random comments | ||
| 3 | * scattered below, see: Documentation/trace/ftrace-design.txt | ||
| 4 | */ | ||
| 5 | |||
| 1 | #ifndef _LINUX_FTRACE_H | 6 | #ifndef _LINUX_FTRACE_H |
| 2 | #define _LINUX_FTRACE_H | 7 | #define _LINUX_FTRACE_H |
| 3 | 8 | ||
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 3167f2df4126..02b8b24f8f51 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
| @@ -11,8 +11,6 @@ struct trace_array; | |||
| 11 | struct tracer; | 11 | struct tracer; |
| 12 | struct dentry; | 12 | struct dentry; |
| 13 | 13 | ||
| 14 | DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq); | ||
| 15 | |||
| 16 | struct trace_print_flags { | 14 | struct trace_print_flags { |
| 17 | unsigned long mask; | 15 | unsigned long mask; |
| 18 | const char *name; | 16 | const char *name; |
| @@ -58,6 +56,9 @@ struct trace_iterator { | |||
| 58 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; | 56 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; |
| 59 | unsigned long iter_flags; | 57 | unsigned long iter_flags; |
| 60 | 58 | ||
| 59 | /* trace_seq for __print_flags() and __print_symbolic() etc. */ | ||
| 60 | struct trace_seq tmp_seq; | ||
| 61 | |||
| 61 | /* The below is zeroed out in pipe_read */ | 62 | /* The below is zeroed out in pipe_read */ |
| 62 | struct trace_seq seq; | 63 | struct trace_seq seq; |
| 63 | struct trace_entry *ent; | 64 | struct trace_entry *ent; |
| @@ -146,14 +147,19 @@ struct ftrace_event_class { | |||
| 146 | int (*raw_init)(struct ftrace_event_call *); | 147 | int (*raw_init)(struct ftrace_event_call *); |
| 147 | }; | 148 | }; |
| 148 | 149 | ||
| 150 | extern int ftrace_event_reg(struct ftrace_event_call *event, | ||
| 151 | enum trace_reg type); | ||
| 152 | |||
| 149 | enum { | 153 | enum { |
| 150 | TRACE_EVENT_FL_ENABLED_BIT, | 154 | TRACE_EVENT_FL_ENABLED_BIT, |
| 151 | TRACE_EVENT_FL_FILTERED_BIT, | 155 | TRACE_EVENT_FL_FILTERED_BIT, |
| 156 | TRACE_EVENT_FL_RECORDED_CMD_BIT, | ||
| 152 | }; | 157 | }; |
| 153 | 158 | ||
| 154 | enum { | 159 | enum { |
| 155 | TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), | 160 | TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), |
| 156 | TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), | 161 | TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), |
| 162 | TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), | ||
| 157 | }; | 163 | }; |
| 158 | 164 | ||
| 159 | struct ftrace_event_call { | 165 | struct ftrace_event_call { |
| @@ -171,6 +177,7 @@ struct ftrace_event_call { | |||
| 171 | * 32 bit flags: | 177 | * 32 bit flags: |
| 172 | * bit 1: enabled | 178 | * bit 1: enabled |
| 173 | * bit 2: filter_active | 179 | * bit 2: filter_active |
| 180 | * bit 3: enabled cmd record | ||
| 174 | * | 181 | * |
| 175 | * Changes to flags must hold the event_mutex. | 182 | * Changes to flags must hold the event_mutex. |
| 176 | * | 183 | * |
| @@ -257,8 +264,7 @@ static inline void | |||
| 257 | perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, | 264 | perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, |
| 258 | u64 count, struct pt_regs *regs, void *head) | 265 | u64 count, struct pt_regs *regs, void *head) |
| 259 | { | 266 | { |
| 260 | perf_tp_event(addr, count, raw_data, size, regs, head); | 267 | perf_tp_event(addr, count, raw_data, size, regs, head, rctx); |
| 261 | perf_swevent_put_recursion_context(rctx); | ||
| 262 | } | 268 | } |
| 263 | #endif | 269 | #endif |
| 264 | 270 | ||
diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 88e0eb596919..c3c578e09833 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h | |||
| @@ -37,6 +37,10 @@ | |||
| 37 | * | 37 | * |
| 38 | * 7.14 | 38 | * 7.14 |
| 39 | * - add splice support to fuse device | 39 | * - add splice support to fuse device |
| 40 | * | ||
| 41 | * 7.15 | ||
| 42 | * - add store notify | ||
| 43 | * - add retrieve notify | ||
| 40 | */ | 44 | */ |
| 41 | 45 | ||
| 42 | #ifndef _LINUX_FUSE_H | 46 | #ifndef _LINUX_FUSE_H |
| @@ -68,7 +72,7 @@ | |||
| 68 | #define FUSE_KERNEL_VERSION 7 | 72 | #define FUSE_KERNEL_VERSION 7 |
| 69 | 73 | ||
| 70 | /** Minor version number of this interface */ | 74 | /** Minor version number of this interface */ |
| 71 | #define FUSE_KERNEL_MINOR_VERSION 14 | 75 | #define FUSE_KERNEL_MINOR_VERSION 15 |
| 72 | 76 | ||
| 73 | /** The node ID of the root inode */ | 77 | /** The node ID of the root inode */ |
| 74 | #define FUSE_ROOT_ID 1 | 78 | #define FUSE_ROOT_ID 1 |
| @@ -251,6 +255,7 @@ enum fuse_opcode { | |||
| 251 | FUSE_DESTROY = 38, | 255 | FUSE_DESTROY = 38, |
| 252 | FUSE_IOCTL = 39, | 256 | FUSE_IOCTL = 39, |
| 253 | FUSE_POLL = 40, | 257 | FUSE_POLL = 40, |
| 258 | FUSE_NOTIFY_REPLY = 41, | ||
| 254 | 259 | ||
| 255 | /* CUSE specific operations */ | 260 | /* CUSE specific operations */ |
| 256 | CUSE_INIT = 4096, | 261 | CUSE_INIT = 4096, |
| @@ -260,6 +265,8 @@ enum fuse_notify_code { | |||
| 260 | FUSE_NOTIFY_POLL = 1, | 265 | FUSE_NOTIFY_POLL = 1, |
| 261 | FUSE_NOTIFY_INVAL_INODE = 2, | 266 | FUSE_NOTIFY_INVAL_INODE = 2, |
| 262 | FUSE_NOTIFY_INVAL_ENTRY = 3, | 267 | FUSE_NOTIFY_INVAL_ENTRY = 3, |
| 268 | FUSE_NOTIFY_STORE = 4, | ||
| 269 | FUSE_NOTIFY_RETRIEVE = 5, | ||
| 263 | FUSE_NOTIFY_CODE_MAX, | 270 | FUSE_NOTIFY_CODE_MAX, |
| 264 | }; | 271 | }; |
| 265 | 272 | ||
| @@ -568,4 +575,29 @@ struct fuse_notify_inval_entry_out { | |||
| 568 | __u32 padding; | 575 | __u32 padding; |
| 569 | }; | 576 | }; |
| 570 | 577 | ||
| 578 | struct fuse_notify_store_out { | ||
| 579 | __u64 nodeid; | ||
| 580 | __u64 offset; | ||
| 581 | __u32 size; | ||
| 582 | __u32 padding; | ||
| 583 | }; | ||
| 584 | |||
| 585 | struct fuse_notify_retrieve_out { | ||
| 586 | __u64 notify_unique; | ||
| 587 | __u64 nodeid; | ||
| 588 | __u64 offset; | ||
| 589 | __u32 size; | ||
| 590 | __u32 padding; | ||
| 591 | }; | ||
| 592 | |||
| 593 | /* Matches the size of fuse_write_in */ | ||
| 594 | struct fuse_notify_retrieve_in { | ||
| 595 | __u64 dummy1; | ||
| 596 | __u64 offset; | ||
| 597 | __u32 size; | ||
| 598 | __u32 dummy2; | ||
| 599 | __u64 dummy3; | ||
| 600 | __u64 dummy4; | ||
| 601 | }; | ||
| 602 | |||
| 571 | #endif /* _LINUX_FUSE_H */ | 603 | #endif /* _LINUX_FUSE_H */ |
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h index cd0b3f30f48e..ce73a30113b4 100644 --- a/include/linux/gpio_keys.h +++ b/include/linux/gpio_keys.h | |||
| @@ -17,6 +17,8 @@ struct gpio_keys_platform_data { | |||
| 17 | struct gpio_keys_button *buttons; | 17 | struct gpio_keys_button *buttons; |
| 18 | int nbuttons; | 18 | int nbuttons; |
| 19 | unsigned int rep:1; /* enable input subsystem auto repeat */ | 19 | unsigned int rep:1; /* enable input subsystem auto repeat */ |
| 20 | int (*enable)(struct device *dev); | ||
| 21 | void (*disable)(struct device *dev); | ||
| 20 | }; | 22 | }; |
| 21 | 23 | ||
| 22 | #endif | 24 | #endif |
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index caafd0561aa1..e3060ef85b6d 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define _LINUX_HIGHMEM_H | 2 | #define _LINUX_HIGHMEM_H |
| 3 | 3 | ||
| 4 | #include <linux/fs.h> | 4 | #include <linux/fs.h> |
| 5 | #include <linux/kernel.h> | ||
| 5 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
| 6 | #include <linux/uaccess.h> | 7 | #include <linux/uaccess.h> |
| 7 | 8 | ||
| @@ -72,7 +73,11 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx) | |||
| 72 | } | 73 | } |
| 73 | #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) | 74 | #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx) |
| 74 | 75 | ||
| 75 | #define kunmap_atomic(addr, idx) do { pagefault_enable(); } while (0) | 76 | static inline void kunmap_atomic_notypecheck(void *addr, enum km_type idx) |
| 77 | { | ||
| 78 | pagefault_enable(); | ||
| 79 | } | ||
| 80 | |||
| 76 | #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) | 81 | #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx)) |
| 77 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) | 82 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) |
| 78 | 83 | ||
| @@ -81,6 +86,13 @@ static inline void *kmap_atomic(struct page *page, enum km_type idx) | |||
| 81 | 86 | ||
| 82 | #endif /* CONFIG_HIGHMEM */ | 87 | #endif /* CONFIG_HIGHMEM */ |
| 83 | 88 | ||
| 89 | /* Prevent people trying to call kunmap_atomic() as if it were kunmap() */ | ||
| 90 | /* kunmap_atomic() should get the return value of kmap_atomic, not the page. */ | ||
| 91 | #define kunmap_atomic(addr, idx) do { \ | ||
| 92 | BUILD_BUG_ON(__same_type((addr), struct page *)); \ | ||
| 93 | kunmap_atomic_notypecheck((addr), (idx)); \ | ||
| 94 | } while (0) | ||
| 95 | |||
| 84 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ | 96 | /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ |
| 85 | #ifndef clear_user_highpage | 97 | #ifndef clear_user_highpage |
| 86 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) | 98 | static inline void clear_user_highpage(struct page *page, unsigned long vaddr) |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 21067b418536..38dd4025aa4e 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
| @@ -108,6 +108,7 @@ extern s32 i2c_smbus_write_i2c_block_data(struct i2c_client *client, | |||
| 108 | * @shutdown: Callback for device shutdown | 108 | * @shutdown: Callback for device shutdown |
| 109 | * @suspend: Callback for device suspend | 109 | * @suspend: Callback for device suspend |
| 110 | * @resume: Callback for device resume | 110 | * @resume: Callback for device resume |
| 111 | * @alert: Alert callback, for example for the SMBus alert protocol | ||
| 111 | * @command: Callback for bus-wide signaling (optional) | 112 | * @command: Callback for bus-wide signaling (optional) |
| 112 | * @driver: Device driver model driver | 113 | * @driver: Device driver model driver |
| 113 | * @id_table: List of I2C devices supported by this driver | 114 | * @id_table: List of I2C devices supported by this driver |
| @@ -233,6 +234,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data) | |||
| 233 | * @addr: stored in i2c_client.addr | 234 | * @addr: stored in i2c_client.addr |
| 234 | * @platform_data: stored in i2c_client.dev.platform_data | 235 | * @platform_data: stored in i2c_client.dev.platform_data |
| 235 | * @archdata: copied into i2c_client.dev.archdata | 236 | * @archdata: copied into i2c_client.dev.archdata |
| 237 | * @of_node: pointer to OpenFirmware device node | ||
| 236 | * @irq: stored in i2c_client.irq | 238 | * @irq: stored in i2c_client.irq |
| 237 | * | 239 | * |
| 238 | * I2C doesn't actually support hardware probing, although controllers and | 240 | * I2C doesn't actually support hardware probing, although controllers and |
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h new file mode 100644 index 000000000000..ee3049cb9ba5 --- /dev/null +++ b/include/linux/i2c/sx150x.h | |||
| @@ -0,0 +1,78 @@ | |||
| 1 | /* | ||
| 2 | * Driver for the Semtech SX150x I2C GPIO Expanders | ||
| 3 | * | ||
| 4 | * Copyright (c) 2010, Code Aurora Forum. All rights reserved. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 and | ||
| 8 | * only version 2 as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, write to the Free Software | ||
| 17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
| 18 | * 02110-1301, USA. | ||
| 19 | */ | ||
| 20 | #ifndef __LINUX_I2C_SX150X_H | ||
| 21 | #define __LINUX_I2C_SX150X_H | ||
| 22 | |||
| 23 | /** | ||
| 24 | * struct sx150x_platform_data - config data for SX150x driver | ||
| 25 | * @gpio_base: The index number of the first GPIO assigned to this | ||
| 26 | * GPIO expander. The expander will create a block of | ||
| 27 | * consecutively numbered gpios beginning at the given base, | ||
| 28 | * with the size of the block depending on the model of the | ||
| 29 | * expander chip. | ||
| 30 | * @oscio_is_gpo: If set to true, the driver will configure OSCIO as a GPO | ||
| 31 | * instead of as an oscillator, increasing the size of the | ||
| 32 | * GP(I)O pool created by this expander by one. The | ||
| 33 | * output-only GPO pin will be added at the end of the block. | ||
| 34 | * @io_pullup_ena: A bit-mask which enables or disables the pull-up resistor | ||
| 35 | * for each IO line in the expander. Setting the bit at | ||
| 36 | * position n will enable the pull-up for the IO at | ||
| 37 | * the corresponding offset. For chips with fewer than | ||
| 38 | * 16 IO pins, high-end bits are ignored. | ||
| 39 | * @io_pulldn_ena: A bit-mask which enables-or disables the pull-down | ||
| 40 | * resistor for each IO line in the expander. Setting the | ||
| 41 | * bit at position n will enable the pull-down for the IO at | ||
| 42 | * the corresponding offset. For chips with fewer than | ||
| 43 | * 16 IO pins, high-end bits are ignored. | ||
| 44 | * @io_open_drain_ena: A bit-mask which enables-or disables open-drain | ||
| 45 | * operation for each IO line in the expander. Setting the | ||
| 46 | * bit at position n enables open-drain operation for | ||
| 47 | * the IO at the corresponding offset. Clearing the bit | ||
| 48 | * enables regular push-pull operation for that IO. | ||
| 49 | * For chips with fewer than 16 IO pins, high-end bits | ||
| 50 | * are ignored. | ||
| 51 | * @io_polarity: A bit-mask which enables polarity inversion for each IO line | ||
| 52 | * in the expander. Setting the bit at position n inverts | ||
| 53 | * the polarity of that IO line, while clearing it results | ||
| 54 | * in normal polarity. For chips with fewer than 16 IO pins, | ||
| 55 | * high-end bits are ignored. | ||
| 56 | * @irq_summary: The 'summary IRQ' line to which the GPIO expander's INT line | ||
| 57 | * is connected, via which it reports interrupt events | ||
| 58 | * across all GPIO lines. This must be a real, | ||
| 59 | * pre-existing IRQ line. | ||
| 60 | * Setting this value < 0 disables the irq_chip functionality | ||
| 61 | * of the driver. | ||
| 62 | * @irq_base: The first 'virtual IRQ' line at which our block of GPIO-based | ||
| 63 | * IRQ lines will appear. Similarly to gpio_base, the expander | ||
| 64 | * will create a block of irqs beginning at this number. | ||
| 65 | * This value is ignored if irq_summary is < 0. | ||
| 66 | */ | ||
| 67 | struct sx150x_platform_data { | ||
| 68 | unsigned gpio_base; | ||
| 69 | bool oscio_is_gpo; | ||
| 70 | u16 io_pullup_ena; | ||
| 71 | u16 io_pulldn_ena; | ||
| 72 | u16 io_open_drain_ena; | ||
| 73 | u16 io_polarity; | ||
| 74 | int irq_summary; | ||
| 75 | unsigned irq_base; | ||
| 76 | }; | ||
| 77 | |||
| 78 | #endif /* __LINUX_I2C_SX150X_H */ | ||
diff --git a/include/linux/inotify.h b/include/linux/inotify.h index 37ea2894b3c0..d33041e2a42a 100644 --- a/include/linux/inotify.h +++ b/include/linux/inotify.h | |||
| @@ -51,6 +51,7 @@ struct inotify_event { | |||
| 51 | /* special flags */ | 51 | /* special flags */ |
| 52 | #define IN_ONLYDIR 0x01000000 /* only watch the path if it is a directory */ | 52 | #define IN_ONLYDIR 0x01000000 /* only watch the path if it is a directory */ |
| 53 | #define IN_DONT_FOLLOW 0x02000000 /* don't follow a sym link */ | 53 | #define IN_DONT_FOLLOW 0x02000000 /* don't follow a sym link */ |
| 54 | #define IN_EXCL_UNLINK 0x04000000 /* exclude events on unlinked objects */ | ||
| 54 | #define IN_MASK_ADD 0x20000000 /* add to the mask of an already existing watch */ | 55 | #define IN_MASK_ADD 0x20000000 /* add to the mask of an already existing watch */ |
| 55 | #define IN_ISDIR 0x40000000 /* event occurred against dir */ | 56 | #define IN_ISDIR 0x40000000 /* event occurred against dir */ |
| 56 | #define IN_ONESHOT 0x80000000 /* only send event once */ | 57 | #define IN_ONESHOT 0x80000000 /* only send event once */ |
| @@ -70,177 +71,17 @@ struct inotify_event { | |||
| 70 | #define IN_NONBLOCK O_NONBLOCK | 71 | #define IN_NONBLOCK O_NONBLOCK |
| 71 | 72 | ||
| 72 | #ifdef __KERNEL__ | 73 | #ifdef __KERNEL__ |
| 73 | 74 | #include <linux/sysctl.h> | |
| 74 | #include <linux/dcache.h> | 75 | extern struct ctl_table inotify_table[]; /* for sysctl */ |
| 75 | #include <linux/fs.h> | 76 | |
| 76 | 77 | #define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \ | |
| 77 | /* | 78 | IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \ |
| 78 | * struct inotify_watch - represents a watch request on a specific inode | 79 | IN_MOVED_TO | IN_CREATE | IN_DELETE | \ |
| 79 | * | 80 | IN_DELETE_SELF | IN_MOVE_SELF | IN_UNMOUNT | \ |
| 80 | * h_list is protected by ih->mutex of the associated inotify_handle. | 81 | IN_Q_OVERFLOW | IN_IGNORED | IN_ONLYDIR | \ |
| 81 | * i_list, mask are protected by inode->inotify_mutex of the associated inode. | 82 | IN_DONT_FOLLOW | IN_EXCL_UNLINK | IN_MASK_ADD | \ |
| 82 | * ih, inode, and wd are never written to once the watch is created. | 83 | IN_ISDIR | IN_ONESHOT) |
| 83 | * | 84 | |
| 84 | * Callers must use the established inotify interfaces to access inotify_watch | 85 | #endif |
| 85 | * contents. The content of this structure is private to the inotify | ||
| 86 | * implementation. | ||
| 87 | */ | ||
| 88 | struct inotify_watch { | ||
| 89 | struct list_head h_list; /* entry in inotify_handle's list */ | ||
| 90 | struct list_head i_list; /* entry in inode's list */ | ||
| 91 | atomic_t count; /* reference count */ | ||
| 92 | struct inotify_handle *ih; /* associated inotify handle */ | ||
| 93 | struct inode *inode; /* associated inode */ | ||
| 94 | __s32 wd; /* watch descriptor */ | ||
| 95 | __u32 mask; /* event mask for this watch */ | ||
| 96 | }; | ||
| 97 | |||
| 98 | struct inotify_operations { | ||
| 99 | void (*handle_event)(struct inotify_watch *, u32, u32, u32, | ||
| 100 | const char *, struct inode *); | ||
| 101 | void (*destroy_watch)(struct inotify_watch *); | ||
| 102 | }; | ||
| 103 | |||
| 104 | #ifdef CONFIG_INOTIFY | ||
| 105 | |||
| 106 | /* Kernel API for producing events */ | ||
| 107 | |||
| 108 | extern void inotify_d_instantiate(struct dentry *, struct inode *); | ||
| 109 | extern void inotify_d_move(struct dentry *); | ||
| 110 | extern void inotify_inode_queue_event(struct inode *, __u32, __u32, | ||
| 111 | const char *, struct inode *); | ||
| 112 | extern void inotify_dentry_parent_queue_event(struct dentry *, __u32, __u32, | ||
| 113 | const char *); | ||
| 114 | extern void inotify_unmount_inodes(struct list_head *); | ||
| 115 | extern void inotify_inode_is_dead(struct inode *); | ||
| 116 | extern u32 inotify_get_cookie(void); | ||
| 117 | |||
| 118 | /* Kernel Consumer API */ | ||
| 119 | |||
| 120 | extern struct inotify_handle *inotify_init(const struct inotify_operations *); | ||
| 121 | extern void inotify_init_watch(struct inotify_watch *); | ||
| 122 | extern void inotify_destroy(struct inotify_handle *); | ||
| 123 | extern __s32 inotify_find_watch(struct inotify_handle *, struct inode *, | ||
| 124 | struct inotify_watch **); | ||
| 125 | extern __s32 inotify_find_update_watch(struct inotify_handle *, struct inode *, | ||
| 126 | u32); | ||
| 127 | extern __s32 inotify_add_watch(struct inotify_handle *, struct inotify_watch *, | ||
| 128 | struct inode *, __u32); | ||
| 129 | extern __s32 inotify_clone_watch(struct inotify_watch *, struct inotify_watch *); | ||
| 130 | extern void inotify_evict_watch(struct inotify_watch *); | ||
| 131 | extern int inotify_rm_watch(struct inotify_handle *, struct inotify_watch *); | ||
| 132 | extern int inotify_rm_wd(struct inotify_handle *, __u32); | ||
| 133 | extern void inotify_remove_watch_locked(struct inotify_handle *, | ||
| 134 | struct inotify_watch *); | ||
| 135 | extern void get_inotify_watch(struct inotify_watch *); | ||
| 136 | extern void put_inotify_watch(struct inotify_watch *); | ||
| 137 | extern int pin_inotify_watch(struct inotify_watch *); | ||
| 138 | extern void unpin_inotify_watch(struct inotify_watch *); | ||
| 139 | |||
| 140 | #else | ||
| 141 | |||
| 142 | static inline void inotify_d_instantiate(struct dentry *dentry, | ||
| 143 | struct inode *inode) | ||
| 144 | { | ||
| 145 | } | ||
| 146 | |||
| 147 | static inline void inotify_d_move(struct dentry *dentry) | ||
| 148 | { | ||
| 149 | } | ||
| 150 | |||
| 151 | static inline void inotify_inode_queue_event(struct inode *inode, | ||
| 152 | __u32 mask, __u32 cookie, | ||
| 153 | const char *filename, | ||
| 154 | struct inode *n_inode) | ||
| 155 | { | ||
| 156 | } | ||
| 157 | |||
| 158 | static inline void inotify_dentry_parent_queue_event(struct dentry *dentry, | ||
| 159 | __u32 mask, __u32 cookie, | ||
| 160 | const char *filename) | ||
| 161 | { | ||
| 162 | } | ||
| 163 | |||
| 164 | static inline void inotify_unmount_inodes(struct list_head *list) | ||
| 165 | { | ||
| 166 | } | ||
| 167 | |||
| 168 | static inline void inotify_inode_is_dead(struct inode *inode) | ||
| 169 | { | ||
| 170 | } | ||
| 171 | |||
| 172 | static inline u32 inotify_get_cookie(void) | ||
| 173 | { | ||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | static inline struct inotify_handle *inotify_init(const struct inotify_operations *ops) | ||
| 178 | { | ||
| 179 | return ERR_PTR(-EOPNOTSUPP); | ||
| 180 | } | ||
| 181 | |||
| 182 | static inline void inotify_init_watch(struct inotify_watch *watch) | ||
| 183 | { | ||
| 184 | } | ||
| 185 | |||
| 186 | static inline void inotify_destroy(struct inotify_handle *ih) | ||
| 187 | { | ||
| 188 | } | ||
| 189 | |||
| 190 | static inline __s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode, | ||
| 191 | struct inotify_watch **watchp) | ||
| 192 | { | ||
| 193 | return -EOPNOTSUPP; | ||
| 194 | } | ||
| 195 | |||
| 196 | static inline __s32 inotify_find_update_watch(struct inotify_handle *ih, | ||
| 197 | struct inode *inode, u32 mask) | ||
| 198 | { | ||
| 199 | return -EOPNOTSUPP; | ||
| 200 | } | ||
| 201 | |||
| 202 | static inline __s32 inotify_add_watch(struct inotify_handle *ih, | ||
| 203 | struct inotify_watch *watch, | ||
| 204 | struct inode *inode, __u32 mask) | ||
| 205 | { | ||
| 206 | return -EOPNOTSUPP; | ||
| 207 | } | ||
| 208 | |||
| 209 | static inline int inotify_rm_watch(struct inotify_handle *ih, | ||
| 210 | struct inotify_watch *watch) | ||
| 211 | { | ||
| 212 | return -EOPNOTSUPP; | ||
| 213 | } | ||
| 214 | |||
| 215 | static inline int inotify_rm_wd(struct inotify_handle *ih, __u32 wd) | ||
| 216 | { | ||
| 217 | return -EOPNOTSUPP; | ||
| 218 | } | ||
| 219 | |||
| 220 | static inline void inotify_remove_watch_locked(struct inotify_handle *ih, | ||
| 221 | struct inotify_watch *watch) | ||
| 222 | { | ||
| 223 | } | ||
| 224 | |||
| 225 | static inline void get_inotify_watch(struct inotify_watch *watch) | ||
| 226 | { | ||
| 227 | } | ||
| 228 | |||
| 229 | static inline void put_inotify_watch(struct inotify_watch *watch) | ||
| 230 | { | ||
| 231 | } | ||
| 232 | |||
| 233 | extern inline int pin_inotify_watch(struct inotify_watch *watch) | ||
| 234 | { | ||
| 235 | return 0; | ||
| 236 | } | ||
| 237 | |||
| 238 | extern inline void unpin_inotify_watch(struct inotify_watch *watch) | ||
| 239 | { | ||
| 240 | } | ||
| 241 | |||
| 242 | #endif /* CONFIG_INOTIFY */ | ||
| 243 | |||
| 244 | #endif /* __KERNEL __ */ | ||
| 245 | 86 | ||
| 246 | #endif /* _LINUX_INOTIFY_H */ | 87 | #endif /* _LINUX_INOTIFY_H */ |
diff --git a/include/linux/input.h b/include/linux/input.h index 339d043ccb53..896a92227bc4 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
| @@ -776,6 +776,7 @@ struct input_absinfo { | |||
| 776 | #define REP_DELAY 0x00 | 776 | #define REP_DELAY 0x00 |
| 777 | #define REP_PERIOD 0x01 | 777 | #define REP_PERIOD 0x01 |
| 778 | #define REP_MAX 0x01 | 778 | #define REP_MAX 0x01 |
| 779 | #define REP_CNT (REP_MAX+1) | ||
| 779 | 780 | ||
| 780 | /* | 781 | /* |
| 781 | * Sounds | 782 | * Sounds |
| @@ -1099,21 +1100,18 @@ struct input_mt_slot { | |||
| 1099 | * @repeat_key: stores key code of the last key pressed; used to implement | 1100 | * @repeat_key: stores key code of the last key pressed; used to implement |
| 1100 | * software autorepeat | 1101 | * software autorepeat |
| 1101 | * @timer: timer for software autorepeat | 1102 | * @timer: timer for software autorepeat |
| 1102 | * @abs: current values for reports from absolute axes | ||
| 1103 | * @rep: current values for autorepeat parameters (delay, rate) | 1103 | * @rep: current values for autorepeat parameters (delay, rate) |
| 1104 | * @mt: pointer to array of struct input_mt_slot holding current values | 1104 | * @mt: pointer to array of struct input_mt_slot holding current values |
| 1105 | * of tracked contacts | 1105 | * of tracked contacts |
| 1106 | * @mtsize: number of MT slots the device uses | 1106 | * @mtsize: number of MT slots the device uses |
| 1107 | * @slot: MT slot currently being transmitted | 1107 | * @slot: MT slot currently being transmitted |
| 1108 | * @absinfo: array of &struct absinfo elements holding information | ||
| 1109 | * about absolute axes (current value, min, max, flat, fuzz, | ||
| 1110 | * resolution) | ||
| 1108 | * @key: reflects current state of device's keys/buttons | 1111 | * @key: reflects current state of device's keys/buttons |
| 1109 | * @led: reflects current state of device's LEDs | 1112 | * @led: reflects current state of device's LEDs |
| 1110 | * @snd: reflects current state of sound effects | 1113 | * @snd: reflects current state of sound effects |
| 1111 | * @sw: reflects current state of device's switches | 1114 | * @sw: reflects current state of device's switches |
| 1112 | * @absmax: maximum values for events coming from absolute axes | ||
| 1113 | * @absmin: minimum values for events coming from absolute axes | ||
| 1114 | * @absfuzz: describes noisiness for axes | ||
| 1115 | * @absflat: size of the center flat position (used by joydev) | ||
| 1116 | * @absres: resolution used for events coming form absolute axes | ||
| 1117 | * @open: this method is called when the very first user calls | 1115 | * @open: this method is called when the very first user calls |
| 1118 | * input_open_device(). The driver must prepare the device | 1116 | * input_open_device(). The driver must prepare the device |
| 1119 | * to start generating events (start polling thread, | 1117 | * to start generating events (start polling thread, |
| @@ -1180,24 +1178,19 @@ struct input_dev { | |||
| 1180 | unsigned int repeat_key; | 1178 | unsigned int repeat_key; |
| 1181 | struct timer_list timer; | 1179 | struct timer_list timer; |
| 1182 | 1180 | ||
| 1183 | int abs[ABS_CNT]; | 1181 | int rep[REP_CNT]; |
| 1184 | int rep[REP_MAX + 1]; | ||
| 1185 | 1182 | ||
| 1186 | struct input_mt_slot *mt; | 1183 | struct input_mt_slot *mt; |
| 1187 | int mtsize; | 1184 | int mtsize; |
| 1188 | int slot; | 1185 | int slot; |
| 1189 | 1186 | ||
| 1187 | struct input_absinfo *absinfo; | ||
| 1188 | |||
| 1190 | unsigned long key[BITS_TO_LONGS(KEY_CNT)]; | 1189 | unsigned long key[BITS_TO_LONGS(KEY_CNT)]; |
| 1191 | unsigned long led[BITS_TO_LONGS(LED_CNT)]; | 1190 | unsigned long led[BITS_TO_LONGS(LED_CNT)]; |
| 1192 | unsigned long snd[BITS_TO_LONGS(SND_CNT)]; | 1191 | unsigned long snd[BITS_TO_LONGS(SND_CNT)]; |
| 1193 | unsigned long sw[BITS_TO_LONGS(SW_CNT)]; | 1192 | unsigned long sw[BITS_TO_LONGS(SW_CNT)]; |
| 1194 | 1193 | ||
| 1195 | int absmax[ABS_CNT]; | ||
| 1196 | int absmin[ABS_CNT]; | ||
| 1197 | int absfuzz[ABS_CNT]; | ||
| 1198 | int absflat[ABS_CNT]; | ||
| 1199 | int absres[ABS_CNT]; | ||
| 1200 | |||
| 1201 | int (*open)(struct input_dev *dev); | 1194 | int (*open)(struct input_dev *dev); |
| 1202 | void (*close)(struct input_dev *dev); | 1195 | void (*close)(struct input_dev *dev); |
| 1203 | int (*flush)(struct input_dev *dev, struct file *file); | 1196 | int (*flush)(struct input_dev *dev, struct file *file); |
| @@ -1459,16 +1452,32 @@ static inline void input_set_events_per_packet(struct input_dev *dev, int n_even | |||
| 1459 | dev->hint_events_per_packet = n_events; | 1452 | dev->hint_events_per_packet = n_events; |
| 1460 | } | 1453 | } |
| 1461 | 1454 | ||
| 1462 | static inline void input_set_abs_params(struct input_dev *dev, int axis, int min, int max, int fuzz, int flat) | 1455 | void input_alloc_absinfo(struct input_dev *dev); |
| 1463 | { | 1456 | void input_set_abs_params(struct input_dev *dev, unsigned int axis, |
| 1464 | dev->absmin[axis] = min; | 1457 | int min, int max, int fuzz, int flat); |
| 1465 | dev->absmax[axis] = max; | 1458 | |
| 1466 | dev->absfuzz[axis] = fuzz; | 1459 | #define INPUT_GENERATE_ABS_ACCESSORS(_suffix, _item) \ |
| 1467 | dev->absflat[axis] = flat; | 1460 | static inline int input_abs_get_##_suffix(struct input_dev *dev, \ |
| 1468 | 1461 | unsigned int axis) \ | |
| 1469 | dev->absbit[BIT_WORD(axis)] |= BIT_MASK(axis); | 1462 | { \ |
| 1463 | return dev->absinfo ? dev->absinfo[axis]._item : 0; \ | ||
| 1464 | } \ | ||
| 1465 | \ | ||
| 1466 | static inline void input_abs_set_##_suffix(struct input_dev *dev, \ | ||
| 1467 | unsigned int axis, int val) \ | ||
| 1468 | { \ | ||
| 1469 | input_alloc_absinfo(dev); \ | ||
| 1470 | if (dev->absinfo) \ | ||
| 1471 | dev->absinfo[axis]._item = val; \ | ||
| 1470 | } | 1472 | } |
| 1471 | 1473 | ||
| 1474 | INPUT_GENERATE_ABS_ACCESSORS(val, value) | ||
| 1475 | INPUT_GENERATE_ABS_ACCESSORS(min, minimum) | ||
| 1476 | INPUT_GENERATE_ABS_ACCESSORS(max, maximum) | ||
| 1477 | INPUT_GENERATE_ABS_ACCESSORS(fuzz, fuzz) | ||
| 1478 | INPUT_GENERATE_ABS_ACCESSORS(flat, flat) | ||
| 1479 | INPUT_GENERATE_ABS_ACCESSORS(res, resolution) | ||
| 1480 | |||
| 1472 | int input_get_keycode(struct input_dev *dev, | 1481 | int input_get_keycode(struct input_dev *dev, |
| 1473 | unsigned int scancode, unsigned int *keycode); | 1482 | unsigned int scancode, unsigned int *keycode); |
| 1474 | int input_set_keycode(struct input_dev *dev, | 1483 | int input_set_keycode(struct input_dev *dev, |
diff --git a/include/linux/intel_mid_dma.h b/include/linux/intel_mid_dma.h new file mode 100644 index 000000000000..d9d08b6269b6 --- /dev/null +++ b/include/linux/intel_mid_dma.h | |||
| @@ -0,0 +1,86 @@ | |||
| 1 | /* | ||
| 2 | * intel_mid_dma.h - Intel MID DMA Drivers | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008-10 Intel Corp | ||
| 5 | * Author: Vinod Koul <vinod.koul@intel.com> | ||
| 6 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; version 2 of the License. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, but | ||
| 13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 15 | * General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License along | ||
| 18 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
| 19 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | ||
| 20 | * | ||
| 21 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
| 22 | * | ||
| 23 | * | ||
| 24 | */ | ||
| 25 | #ifndef __INTEL_MID_DMA_H__ | ||
| 26 | #define __INTEL_MID_DMA_H__ | ||
| 27 | |||
| 28 | #include <linux/dmaengine.h> | ||
| 29 | |||
| 30 | /*DMA transaction width, src and dstn width would be same | ||
| 31 | The DMA length must be width aligned, | ||
| 32 | for 32 bit width the length must be 32 bit (4bytes) aligned only*/ | ||
| 33 | enum intel_mid_dma_width { | ||
| 34 | LNW_DMA_WIDTH_8BIT = 0x0, | ||
| 35 | LNW_DMA_WIDTH_16BIT = 0x1, | ||
| 36 | LNW_DMA_WIDTH_32BIT = 0x2, | ||
| 37 | }; | ||
| 38 | |||
| 39 | /*DMA mode configurations*/ | ||
| 40 | enum intel_mid_dma_mode { | ||
| 41 | LNW_DMA_PER_TO_MEM = 0, /*periphral to memory configuration*/ | ||
| 42 | LNW_DMA_MEM_TO_PER, /*memory to periphral configuration*/ | ||
| 43 | LNW_DMA_MEM_TO_MEM, /*mem to mem confg (testing only)*/ | ||
| 44 | }; | ||
| 45 | |||
| 46 | /*DMA handshaking*/ | ||
| 47 | enum intel_mid_dma_hs_mode { | ||
| 48 | LNW_DMA_HW_HS = 0, /*HW Handshaking only*/ | ||
| 49 | LNW_DMA_SW_HS = 1, /*SW Handshaking not recommended*/ | ||
| 50 | }; | ||
| 51 | |||
| 52 | /*Burst size configuration*/ | ||
| 53 | enum intel_mid_dma_msize { | ||
| 54 | LNW_DMA_MSIZE_1 = 0x0, | ||
| 55 | LNW_DMA_MSIZE_4 = 0x1, | ||
| 56 | LNW_DMA_MSIZE_8 = 0x2, | ||
| 57 | LNW_DMA_MSIZE_16 = 0x3, | ||
| 58 | LNW_DMA_MSIZE_32 = 0x4, | ||
| 59 | LNW_DMA_MSIZE_64 = 0x5, | ||
| 60 | }; | ||
| 61 | |||
| 62 | /** | ||
| 63 | * struct intel_mid_dma_slave - DMA slave structure | ||
| 64 | * | ||
| 65 | * @dirn: DMA trf direction | ||
| 66 | * @src_width: tx register width | ||
| 67 | * @dst_width: rx register width | ||
| 68 | * @hs_mode: HW/SW handshaking mode | ||
| 69 | * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) | ||
| 70 | * @src_msize: Source DMA burst size | ||
| 71 | * @dst_msize: Dst DMA burst size | ||
| 72 | * @device_instance: DMA peripheral device instance, we can have multiple | ||
| 73 | * peripheral device connected to single DMAC | ||
| 74 | */ | ||
| 75 | struct intel_mid_dma_slave { | ||
| 76 | enum dma_data_direction dirn; | ||
| 77 | enum intel_mid_dma_width src_width; /*width of DMA src txn*/ | ||
| 78 | enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/ | ||
| 79 | enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ | ||
| 80 | enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ | ||
| 81 | enum intel_mid_dma_msize src_msize; /*size if src burst*/ | ||
| 82 | enum intel_mid_dma_msize dst_msize; /*size of dst burst*/ | ||
| 83 | unsigned int device_instance; /*0, 1 for periphral instance*/ | ||
| 84 | }; | ||
| 85 | |||
| 86 | #endif /*__INTEL_MID_DMA_H__*/ | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c2331138ca1b..a0384a4d1e6f 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -53,16 +53,21 @@ | |||
| 53 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. | 53 | * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. |
| 54 | * Used by threaded interrupts which need to keep the | 54 | * Used by threaded interrupts which need to keep the |
| 55 | * irq line disabled until the threaded handler has been run. | 55 | * irq line disabled until the threaded handler has been run. |
| 56 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend | ||
| 57 | * | ||
| 56 | */ | 58 | */ |
| 57 | #define IRQF_DISABLED 0x00000020 | 59 | #define IRQF_DISABLED 0x00000020 |
| 58 | #define IRQF_SAMPLE_RANDOM 0x00000040 | 60 | #define IRQF_SAMPLE_RANDOM 0x00000040 |
| 59 | #define IRQF_SHARED 0x00000080 | 61 | #define IRQF_SHARED 0x00000080 |
| 60 | #define IRQF_PROBE_SHARED 0x00000100 | 62 | #define IRQF_PROBE_SHARED 0x00000100 |
| 61 | #define IRQF_TIMER 0x00000200 | 63 | #define __IRQF_TIMER 0x00000200 |
| 62 | #define IRQF_PERCPU 0x00000400 | 64 | #define IRQF_PERCPU 0x00000400 |
| 63 | #define IRQF_NOBALANCING 0x00000800 | 65 | #define IRQF_NOBALANCING 0x00000800 |
| 64 | #define IRQF_IRQPOLL 0x00001000 | 66 | #define IRQF_IRQPOLL 0x00001000 |
| 65 | #define IRQF_ONESHOT 0x00002000 | 67 | #define IRQF_ONESHOT 0x00002000 |
| 68 | #define IRQF_NO_SUSPEND 0x00004000 | ||
| 69 | |||
| 70 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) | ||
| 66 | 71 | ||
| 67 | /* | 72 | /* |
| 68 | * Bits used by threaded handlers: | 73 | * Bits used by threaded handlers: |
diff --git a/include/linux/io.h b/include/linux/io.h index 6c7f0ba0d5fa..7fd2d2138bf3 100644 --- a/include/linux/io.h +++ b/include/linux/io.h | |||
| @@ -29,10 +29,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count); | |||
| 29 | 29 | ||
| 30 | #ifdef CONFIG_MMU | 30 | #ifdef CONFIG_MMU |
| 31 | int ioremap_page_range(unsigned long addr, unsigned long end, | 31 | int ioremap_page_range(unsigned long addr, unsigned long end, |
| 32 | unsigned long phys_addr, pgprot_t prot); | 32 | phys_addr_t phys_addr, pgprot_t prot); |
| 33 | #else | 33 | #else |
| 34 | static inline int ioremap_page_range(unsigned long addr, unsigned long end, | 34 | static inline int ioremap_page_range(unsigned long addr, unsigned long end, |
| 35 | unsigned long phys_addr, pgprot_t prot) | 35 | phys_addr_t phys_addr, pgprot_t prot) |
| 36 | { | 36 | { |
| 37 | return 0; | 37 | return 0; |
| 38 | } | 38 | } |
diff --git a/include/linux/iommu-helper.h b/include/linux/iommu-helper.h index 64d1b638745d..86bdeffe43ad 100644 --- a/include/linux/iommu-helper.h +++ b/include/linux/iommu-helper.h | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | #ifndef _LINUX_IOMMU_HELPER_H | 1 | #ifndef _LINUX_IOMMU_HELPER_H |
| 2 | #define _LINUX_IOMMU_HELPER_H | 2 | #define _LINUX_IOMMU_HELPER_H |
| 3 | 3 | ||
| 4 | #include <linux/kernel.h> | ||
| 5 | |||
| 4 | static inline unsigned long iommu_device_max_index(unsigned long size, | 6 | static inline unsigned long iommu_device_max_index(unsigned long size, |
| 5 | unsigned long offset, | 7 | unsigned long offset, |
| 6 | u64 dma_mask) | 8 | u64 dma_mask) |
| @@ -20,7 +22,13 @@ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, | |||
| 20 | unsigned long boundary_size, | 22 | unsigned long boundary_size, |
| 21 | unsigned long align_mask); | 23 | unsigned long align_mask); |
| 22 | 24 | ||
| 23 | extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len, | 25 | static inline unsigned long iommu_num_pages(unsigned long addr, |
| 24 | unsigned long io_page_size); | 26 | unsigned long len, |
| 27 | unsigned long io_page_size) | ||
| 28 | { | ||
| 29 | unsigned long size = (addr & (io_page_size - 1)) + len; | ||
| 30 | |||
| 31 | return DIV_ROUND_UP(size, io_page_size); | ||
| 32 | } | ||
| 25 | 33 | ||
| 26 | #endif | 34 | #endif |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index be22ad83689c..0a2ba4098996 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -30,6 +30,7 @@ struct iommu_domain { | |||
| 30 | }; | 30 | }; |
| 31 | 31 | ||
| 32 | #define IOMMU_CAP_CACHE_COHERENCY 0x1 | 32 | #define IOMMU_CAP_CACHE_COHERENCY 0x1 |
| 33 | #define IOMMU_CAP_INTR_REMAP 0x2 /* isolates device intrs */ | ||
| 33 | 34 | ||
| 34 | struct iommu_ops { | 35 | struct iommu_ops { |
| 35 | int (*domain_init)(struct iommu_domain *domain); | 36 | int (*domain_init)(struct iommu_domain *domain); |
diff --git a/include/linux/istallion.h b/include/linux/istallion.h index 7faca98c7d14..ad700a60c158 100644 --- a/include/linux/istallion.h +++ b/include/linux/istallion.h | |||
| @@ -86,7 +86,7 @@ struct stlibrd { | |||
| 86 | unsigned long magic; | 86 | unsigned long magic; |
| 87 | unsigned int brdnr; | 87 | unsigned int brdnr; |
| 88 | unsigned int brdtype; | 88 | unsigned int brdtype; |
| 89 | unsigned int state; | 89 | unsigned long state; |
| 90 | unsigned int nrpanels; | 90 | unsigned int nrpanels; |
| 91 | unsigned int nrports; | 91 | unsigned int nrports; |
| 92 | unsigned int nrdevs; | 92 | unsigned int nrdevs; |
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index adf832dec3f3..0b52924a0cb6 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
| @@ -601,13 +601,13 @@ struct transaction_s | |||
| 601 | * Number of outstanding updates running on this transaction | 601 | * Number of outstanding updates running on this transaction |
| 602 | * [t_handle_lock] | 602 | * [t_handle_lock] |
| 603 | */ | 603 | */ |
| 604 | int t_updates; | 604 | atomic_t t_updates; |
| 605 | 605 | ||
| 606 | /* | 606 | /* |
| 607 | * Number of buffers reserved for use by all handles in this transaction | 607 | * Number of buffers reserved for use by all handles in this transaction |
| 608 | * handle but not yet modified. [t_handle_lock] | 608 | * handle but not yet modified. [t_handle_lock] |
| 609 | */ | 609 | */ |
| 610 | int t_outstanding_credits; | 610 | atomic_t t_outstanding_credits; |
| 611 | 611 | ||
| 612 | /* | 612 | /* |
| 613 | * Forward and backward links for the circular list of all transactions | 613 | * Forward and backward links for the circular list of all transactions |
| @@ -629,7 +629,7 @@ struct transaction_s | |||
| 629 | /* | 629 | /* |
| 630 | * How many handles used this transaction? [t_handle_lock] | 630 | * How many handles used this transaction? [t_handle_lock] |
| 631 | */ | 631 | */ |
| 632 | int t_handle_count; | 632 | atomic_t t_handle_count; |
| 633 | 633 | ||
| 634 | /* | 634 | /* |
| 635 | * This transaction is being forced and some process is | 635 | * This transaction is being forced and some process is |
| @@ -764,7 +764,7 @@ struct journal_s | |||
| 764 | /* | 764 | /* |
| 765 | * Protect the various scalars in the journal | 765 | * Protect the various scalars in the journal |
| 766 | */ | 766 | */ |
| 767 | spinlock_t j_state_lock; | 767 | rwlock_t j_state_lock; |
| 768 | 768 | ||
| 769 | /* | 769 | /* |
| 770 | * Number of processes waiting to create a barrier lock [j_state_lock] | 770 | * Number of processes waiting to create a barrier lock [j_state_lock] |
| @@ -1082,7 +1082,9 @@ static inline handle_t *journal_current_handle(void) | |||
| 1082 | */ | 1082 | */ |
| 1083 | 1083 | ||
| 1084 | extern handle_t *jbd2_journal_start(journal_t *, int nblocks); | 1084 | extern handle_t *jbd2_journal_start(journal_t *, int nblocks); |
| 1085 | extern int jbd2_journal_restart (handle_t *, int nblocks); | 1085 | extern handle_t *jbd2__journal_start(journal_t *, int nblocks, int gfp_mask); |
| 1086 | extern int jbd2_journal_restart(handle_t *, int nblocks); | ||
| 1087 | extern int jbd2__journal_restart(handle_t *, int nblocks, int gfp_mask); | ||
| 1086 | extern int jbd2_journal_extend (handle_t *, int nblocks); | 1088 | extern int jbd2_journal_extend (handle_t *, int nblocks); |
| 1087 | extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); | 1089 | extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *); |
| 1088 | extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); | 1090 | extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *); |
| @@ -1257,8 +1259,8 @@ static inline int jbd_space_needed(journal_t *journal) | |||
| 1257 | { | 1259 | { |
| 1258 | int nblocks = journal->j_max_transaction_buffers; | 1260 | int nblocks = journal->j_max_transaction_buffers; |
| 1259 | if (journal->j_committing_transaction) | 1261 | if (journal->j_committing_transaction) |
| 1260 | nblocks += journal->j_committing_transaction-> | 1262 | nblocks += atomic_read(&journal->j_committing_transaction-> |
| 1261 | t_outstanding_credits; | 1263 | t_outstanding_credits); |
| 1262 | return nblocks; | 1264 | return nblocks; |
| 1263 | } | 1265 | } |
| 1264 | 1266 | ||
diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h index edb9231f1898..a18b719f49d4 100644 --- a/include/linux/jffs2.h +++ b/include/linux/jffs2.h | |||
| @@ -1,7 +1,8 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * JFFS2 -- Journalling Flash File System, Version 2. | 2 | * JFFS2 -- Journalling Flash File System, Version 2. |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2001-2003 Red Hat, Inc. | 4 | * Copyright © 2001-2007 Red Hat, Inc. |
| 5 | * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org> | ||
| 5 | * | 6 | * |
| 6 | * Created by David Woodhouse <dwmw2@infradead.org> | 7 | * Created by David Woodhouse <dwmw2@infradead.org> |
| 7 | * | 8 | * |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 5de838b0fc1a..d848cb854655 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -177,11 +177,12 @@ struct va_format { | |||
| 177 | }; | 177 | }; |
| 178 | 178 | ||
| 179 | extern struct atomic_notifier_head panic_notifier_list; | 179 | extern struct atomic_notifier_head panic_notifier_list; |
| 180 | extern long (*panic_blink)(long time); | 180 | extern long (*panic_blink)(int state); |
| 181 | NORET_TYPE void panic(const char * fmt, ...) | 181 | NORET_TYPE void panic(const char * fmt, ...) |
| 182 | __attribute__ ((NORET_AND format (printf, 1, 2))) __cold; | 182 | __attribute__ ((NORET_AND format (printf, 1, 2))) __cold; |
| 183 | extern void oops_enter(void); | 183 | extern void oops_enter(void); |
| 184 | extern void oops_exit(void); | 184 | extern void oops_exit(void); |
| 185 | void print_oops_end_marker(void); | ||
| 185 | extern int oops_may_print(void); | 186 | extern int oops_may_print(void); |
| 186 | NORET_TYPE void do_exit(long error_code) | 187 | NORET_TYPE void do_exit(long error_code) |
| 187 | ATTRIB_NORET; | 188 | ATTRIB_NORET; |
| @@ -252,6 +253,13 @@ extern struct pid *session_of_pgrp(struct pid *pgrp); | |||
| 252 | #define FW_WARN "[Firmware Warn]: " | 253 | #define FW_WARN "[Firmware Warn]: " |
| 253 | #define FW_INFO "[Firmware Info]: " | 254 | #define FW_INFO "[Firmware Info]: " |
| 254 | 255 | ||
| 256 | /* | ||
| 257 | * HW_ERR | ||
| 258 | * Add this to a message for hardware errors, so that user can report | ||
| 259 | * it to hardware vendor instead of LKML or software vendor. | ||
| 260 | */ | ||
| 261 | #define HW_ERR "[Hardware Error]: " | ||
| 262 | |||
| 255 | #ifdef CONFIG_PRINTK | 263 | #ifdef CONFIG_PRINTK |
| 256 | asmlinkage int vprintk(const char *fmt, va_list args) | 264 | asmlinkage int vprintk(const char *fmt, va_list args) |
| 257 | __attribute__ ((format (printf, 1, 0))); | 265 | __attribute__ ((format (printf, 1, 0))); |
| @@ -513,9 +521,6 @@ extern void tracing_start(void); | |||
| 513 | extern void tracing_stop(void); | 521 | extern void tracing_stop(void); |
| 514 | extern void ftrace_off_permanent(void); | 522 | extern void ftrace_off_permanent(void); |
| 515 | 523 | ||
| 516 | extern void | ||
| 517 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); | ||
| 518 | |||
| 519 | static inline void __attribute__ ((format (printf, 1, 2))) | 524 | static inline void __attribute__ ((format (printf, 1, 2))) |
| 520 | ____trace_printk_check_format(const char *fmt, ...) | 525 | ____trace_printk_check_format(const char *fmt, ...) |
| 521 | { | 526 | { |
| @@ -591,8 +596,6 @@ __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); | |||
| 591 | 596 | ||
| 592 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); | 597 | extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); |
| 593 | #else | 598 | #else |
| 594 | static inline void | ||
| 595 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } | ||
| 596 | static inline int | 599 | static inline int |
| 597 | trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); | 600 | trace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); |
| 598 | 601 | ||
| @@ -614,17 +617,6 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 614 | #endif /* CONFIG_TRACING */ | 617 | #endif /* CONFIG_TRACING */ |
| 615 | 618 | ||
| 616 | /* | 619 | /* |
| 617 | * Display an IP address in readable format. | ||
| 618 | */ | ||
| 619 | |||
| 620 | #define NIPQUAD(addr) \ | ||
| 621 | ((unsigned char *)&addr)[0], \ | ||
| 622 | ((unsigned char *)&addr)[1], \ | ||
| 623 | ((unsigned char *)&addr)[2], \ | ||
| 624 | ((unsigned char *)&addr)[3] | ||
| 625 | #define NIPQUAD_FMT "%u.%u.%u.%u" | ||
| 626 | |||
| 627 | /* | ||
| 628 | * min()/max()/clamp() macros that also do | 620 | * min()/max()/clamp() macros that also do |
| 629 | * strict type-checking.. See the | 621 | * strict type-checking.. See the |
| 630 | * "unnecessary" pointer comparison. | 622 | * "unnecessary" pointer comparison. |
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 9fad0527344f..311f8753d713 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
| @@ -1,8 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * A generic kernel FIFO implementation. | 2 | * A generic kernel FIFO implementation |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net> | 4 | * Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net> |
| 5 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> | ||
| 6 | * | 5 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
| @@ -20,8 +19,11 @@ | |||
| 20 | * | 19 | * |
| 21 | */ | 20 | */ |
| 22 | 21 | ||
| 22 | #ifndef _LINUX_KFIFO_H | ||
| 23 | #define _LINUX_KFIFO_H | ||
| 24 | |||
| 23 | /* | 25 | /* |
| 24 | * Howto porting drivers to the new generic fifo API: | 26 | * How to porting drivers to the new generic FIFO API: |
| 25 | * | 27 | * |
| 26 | * - Modify the declaration of the "struct kfifo *" object into a | 28 | * - Modify the declaration of the "struct kfifo *" object into a |
| 27 | * in-place "struct kfifo" object | 29 | * in-place "struct kfifo" object |
| @@ -30,586 +32,813 @@ | |||
| 30 | * passed as the first argument to this functions | 32 | * passed as the first argument to this functions |
| 31 | * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get | 33 | * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get |
| 32 | * into kfifo_out | 34 | * into kfifo_out |
| 33 | * - Replace the use of kfifo_put into kfifo_in_locked and kfifo_get | 35 | * - Replace the use of kfifo_put into kfifo_in_spinlocked and kfifo_get |
| 34 | * into kfifo_out_locked | 36 | * into kfifo_out_spinlocked |
| 35 | * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc | 37 | * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc |
| 36 | * must be passed now to the kfifo_in_locked and kfifo_out_locked | 38 | * must be passed now to the kfifo_in_spinlocked and kfifo_out_spinlocked |
| 37 | * as the last parameter. | 39 | * as the last parameter |
| 38 | * - All formerly name __kfifo_* functions has been renamed into kfifo_* | 40 | * - The formerly __kfifo_* functions are renamed into kfifo_* |
| 39 | */ | 41 | */ |
| 40 | 42 | ||
| 41 | #ifndef _LINUX_KFIFO_H | 43 | /* |
| 42 | #define _LINUX_KFIFO_H | 44 | * Note about locking : There is no locking required until only * one reader |
| 45 | * and one writer is using the fifo and no kfifo_reset() will be * called | ||
| 46 | * kfifo_reset_out() can be safely used, until it will be only called | ||
| 47 | * in the reader thread. | ||
| 48 | * For multiple writer and one reader there is only a need to lock the writer. | ||
| 49 | * And vice versa for only one writer and multiple reader there is only a need | ||
| 50 | * to lock the reader. | ||
| 51 | */ | ||
| 43 | 52 | ||
| 44 | #include <linux/kernel.h> | 53 | #include <linux/kernel.h> |
| 45 | #include <linux/spinlock.h> | 54 | #include <linux/spinlock.h> |
| 46 | 55 | #include <linux/stddef.h> | |
| 47 | struct kfifo { | 56 | #include <linux/scatterlist.h> |
| 48 | unsigned char *buffer; /* the buffer holding the data */ | 57 | |
| 49 | unsigned int size; /* the size of the allocated buffer */ | 58 | struct __kfifo { |
| 50 | unsigned int in; /* data is added at offset (in % size) */ | 59 | unsigned int in; |
| 51 | unsigned int out; /* data is extracted from off. (out % size) */ | 60 | unsigned int out; |
| 61 | unsigned int mask; | ||
| 62 | unsigned int esize; | ||
| 63 | void *data; | ||
| 52 | }; | 64 | }; |
| 53 | 65 | ||
| 54 | /* | 66 | #define __STRUCT_KFIFO_COMMON(datatype, recsize, ptrtype) \ |
| 55 | * Macros for declaration and initialization of the kfifo datatype | 67 | union { \ |
| 56 | */ | 68 | struct __kfifo kfifo; \ |
| 57 | 69 | datatype *type; \ | |
| 58 | /* helper macro */ | 70 | char (*rectype)[recsize]; \ |
| 59 | #define __kfifo_initializer(s, b) \ | 71 | ptrtype *ptr; \ |
| 60 | (struct kfifo) { \ | 72 | const ptrtype *ptr_const; \ |
| 61 | .size = s, \ | ||
| 62 | .in = 0, \ | ||
| 63 | .out = 0, \ | ||
| 64 | .buffer = b \ | ||
| 65 | } | 73 | } |
| 66 | 74 | ||
| 67 | /** | 75 | #define __STRUCT_KFIFO(type, size, recsize, ptrtype) \ |
| 68 | * DECLARE_KFIFO - macro to declare a kfifo and the associated buffer | 76 | { \ |
| 69 | * @name: name of the declared kfifo datatype | 77 | __STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \ |
| 70 | * @size: size of the fifo buffer. Must be a power of two. | 78 | type buf[((size < 2) || (size & (size - 1))) ? -1 : size]; \ |
| 71 | * | ||
| 72 | * Note1: the macro can be used inside struct or union declaration | ||
| 73 | * Note2: the macro creates two objects: | ||
| 74 | * A kfifo object with the given name and a buffer for the kfifo | ||
| 75 | * object named name##kfifo_buffer | ||
| 76 | */ | ||
| 77 | #define DECLARE_KFIFO(name, size) \ | ||
| 78 | union { \ | ||
| 79 | struct kfifo name; \ | ||
| 80 | unsigned char name##kfifo_buffer[size + sizeof(struct kfifo)]; \ | ||
| 81 | } | 79 | } |
| 82 | 80 | ||
| 83 | /** | 81 | #define STRUCT_KFIFO(type, size) \ |
| 84 | * INIT_KFIFO - Initialize a kfifo declared by DECLARE_KFIFO | 82 | struct __STRUCT_KFIFO(type, size, 0, type) |
| 85 | * @name: name of the declared kfifo datatype | 83 | |
| 84 | #define __STRUCT_KFIFO_PTR(type, recsize, ptrtype) \ | ||
| 85 | { \ | ||
| 86 | __STRUCT_KFIFO_COMMON(type, recsize, ptrtype); \ | ||
| 87 | type buf[0]; \ | ||
| 88 | } | ||
| 89 | |||
| 90 | #define STRUCT_KFIFO_PTR(type) \ | ||
| 91 | struct __STRUCT_KFIFO_PTR(type, 0, type) | ||
| 92 | |||
| 93 | /* | ||
| 94 | * define compatibility "struct kfifo" for dynamic allocated fifos | ||
| 86 | */ | 95 | */ |
| 87 | #define INIT_KFIFO(name) \ | 96 | struct kfifo __STRUCT_KFIFO_PTR(unsigned char, 0, void); |
| 88 | name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \ | ||
| 89 | sizeof(struct kfifo), \ | ||
| 90 | name##kfifo_buffer + sizeof(struct kfifo)) | ||
| 91 | 97 | ||
| 92 | /** | 98 | #define STRUCT_KFIFO_REC_1(size) \ |
| 93 | * DEFINE_KFIFO - macro to define and initialize a kfifo | 99 | struct __STRUCT_KFIFO(unsigned char, size, 1, void) |
| 94 | * @name: name of the declared kfifo datatype | 100 | |
| 95 | * @size: size of the fifo buffer. Must be a power of two. | 101 | #define STRUCT_KFIFO_REC_2(size) \ |
| 96 | * | 102 | struct __STRUCT_KFIFO(unsigned char, size, 2, void) |
| 97 | * Note1: the macro can be used for global and local kfifo data type variables | 103 | |
| 98 | * Note2: the macro creates two objects: | 104 | /* |
| 99 | * A kfifo object with the given name and a buffer for the kfifo | 105 | * define kfifo_rec types |
| 100 | * object named name##kfifo_buffer | ||
| 101 | */ | 106 | */ |
| 102 | #define DEFINE_KFIFO(name, size) \ | 107 | struct kfifo_rec_ptr_1 __STRUCT_KFIFO_PTR(unsigned char, 1, void); |
| 103 | unsigned char name##kfifo_buffer[size]; \ | 108 | struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void); |
| 104 | struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer) | ||
| 105 | 109 | ||
| 106 | extern void kfifo_init(struct kfifo *fifo, void *buffer, | 110 | /* |
| 107 | unsigned int size); | 111 | * helper macro to distinguish between real in place fifo where the fifo |
| 108 | extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size, | 112 | * array is a part of the structure and the fifo type where the array is |
| 109 | gfp_t gfp_mask); | 113 | * outside of the fifo structure. |
| 110 | extern void kfifo_free(struct kfifo *fifo); | 114 | */ |
| 111 | extern unsigned int kfifo_in(struct kfifo *fifo, | 115 | #define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo)) |
| 112 | const void *from, unsigned int len); | ||
| 113 | extern __must_check unsigned int kfifo_out(struct kfifo *fifo, | ||
| 114 | void *to, unsigned int len); | ||
| 115 | extern __must_check unsigned int kfifo_out_peek(struct kfifo *fifo, | ||
| 116 | void *to, unsigned int len, unsigned offset); | ||
| 117 | 116 | ||
| 118 | /** | 117 | /** |
| 119 | * kfifo_initialized - Check if kfifo is initialized. | 118 | * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object |
| 120 | * @fifo: fifo to check | 119 | * @fifo: name of the declared fifo |
| 121 | * Return %true if FIFO is initialized, otherwise %false. | 120 | * @type: type of the fifo elements |
| 122 | * Assumes the fifo was 0 before. | ||
| 123 | */ | 121 | */ |
| 124 | static inline bool kfifo_initialized(struct kfifo *fifo) | 122 | #define DECLARE_KFIFO_PTR(fifo, type) STRUCT_KFIFO_PTR(type) fifo |
| 125 | { | ||
| 126 | return fifo->buffer != NULL; | ||
| 127 | } | ||
| 128 | 123 | ||
| 129 | /** | 124 | /** |
| 130 | * kfifo_reset - removes the entire FIFO contents | 125 | * DECLARE_KFIFO - macro to declare a fifo object |
| 131 | * @fifo: the fifo to be emptied. | 126 | * @fifo: name of the declared fifo |
| 127 | * @type: type of the fifo elements | ||
| 128 | * @size: the number of elements in the fifo, this must be a power of 2 | ||
| 132 | */ | 129 | */ |
| 133 | static inline void kfifo_reset(struct kfifo *fifo) | 130 | #define DECLARE_KFIFO(fifo, type, size) STRUCT_KFIFO(type, size) fifo |
| 134 | { | ||
| 135 | fifo->in = fifo->out = 0; | ||
| 136 | } | ||
| 137 | 131 | ||
| 138 | /** | 132 | /** |
| 139 | * kfifo_reset_out - skip FIFO contents | 133 | * INIT_KFIFO - Initialize a fifo declared by DECLARE_KFIFO |
| 140 | * @fifo: the fifo to be emptied. | 134 | * @fifo: name of the declared fifo datatype |
| 141 | */ | 135 | */ |
| 142 | static inline void kfifo_reset_out(struct kfifo *fifo) | 136 | #define INIT_KFIFO(fifo) \ |
| 143 | { | 137 | (void)({ \ |
| 144 | smp_mb(); | 138 | typeof(&(fifo)) __tmp = &(fifo); \ |
| 145 | fifo->out = fifo->in; | 139 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 146 | } | 140 | __kfifo->in = 0; \ |
| 141 | __kfifo->out = 0; \ | ||
| 142 | __kfifo->mask = __is_kfifo_ptr(__tmp) ? 0 : ARRAY_SIZE(__tmp->buf) - 1;\ | ||
| 143 | __kfifo->esize = sizeof(*__tmp->buf); \ | ||
| 144 | __kfifo->data = __is_kfifo_ptr(__tmp) ? NULL : __tmp->buf; \ | ||
| 145 | }) | ||
| 147 | 146 | ||
| 148 | /** | 147 | /** |
| 149 | * kfifo_size - returns the size of the fifo in bytes | 148 | * DEFINE_KFIFO - macro to define and initialize a fifo |
| 150 | * @fifo: the fifo to be used. | 149 | * @fifo: name of the declared fifo datatype |
| 151 | */ | 150 | * @type: type of the fifo elements |
| 152 | static inline __must_check unsigned int kfifo_size(struct kfifo *fifo) | 151 | * @size: the number of elements in the fifo, this must be a power of 2 |
| 152 | * | ||
| 153 | * Note: the macro can be used for global and local fifo data type variables. | ||
| 154 | */ | ||
| 155 | #define DEFINE_KFIFO(fifo, type, size) \ | ||
| 156 | DECLARE_KFIFO(fifo, type, size) = \ | ||
| 157 | (typeof(fifo)) { \ | ||
| 158 | { \ | ||
| 159 | { \ | ||
| 160 | .in = 0, \ | ||
| 161 | .out = 0, \ | ||
| 162 | .mask = __is_kfifo_ptr(&(fifo)) ? \ | ||
| 163 | 0 : \ | ||
| 164 | ARRAY_SIZE((fifo).buf) - 1, \ | ||
| 165 | .esize = sizeof(*(fifo).buf), \ | ||
| 166 | .data = __is_kfifo_ptr(&(fifo)) ? \ | ||
| 167 | NULL : \ | ||
| 168 | (fifo).buf, \ | ||
| 169 | } \ | ||
| 170 | } \ | ||
| 171 | } | ||
| 172 | |||
| 173 | |||
| 174 | static inline unsigned int __must_check | ||
| 175 | __kfifo_must_check_helper(unsigned int val) | ||
| 153 | { | 176 | { |
| 154 | return fifo->size; | 177 | return val; |
| 155 | } | 178 | } |
| 156 | 179 | ||
| 157 | /** | 180 | /** |
| 158 | * kfifo_len - returns the number of used bytes in the FIFO | 181 | * kfifo_initialized - Check if the fifo is initialized |
| 159 | * @fifo: the fifo to be used. | 182 | * @fifo: address of the fifo to check |
| 183 | * | ||
| 184 | * Return %true if fifo is initialized, otherwise %false. | ||
| 185 | * Assumes the fifo was 0 before. | ||
| 160 | */ | 186 | */ |
| 161 | static inline unsigned int kfifo_len(struct kfifo *fifo) | 187 | #define kfifo_initialized(fifo) ((fifo)->kfifo.mask) |
| 162 | { | ||
| 163 | register unsigned int out; | ||
| 164 | |||
| 165 | out = fifo->out; | ||
| 166 | smp_rmb(); | ||
| 167 | return fifo->in - out; | ||
| 168 | } | ||
| 169 | 188 | ||
| 170 | /** | 189 | /** |
| 171 | * kfifo_is_empty - returns true if the fifo is empty | 190 | * kfifo_esize - returns the size of the element managed by the fifo |
| 172 | * @fifo: the fifo to be used. | 191 | * @fifo: address of the fifo to be used |
| 173 | */ | 192 | */ |
| 174 | static inline __must_check int kfifo_is_empty(struct kfifo *fifo) | 193 | #define kfifo_esize(fifo) ((fifo)->kfifo.esize) |
| 175 | { | ||
| 176 | return fifo->in == fifo->out; | ||
| 177 | } | ||
| 178 | 194 | ||
| 179 | /** | 195 | /** |
| 180 | * kfifo_is_full - returns true if the fifo is full | 196 | * kfifo_recsize - returns the size of the record length field |
| 181 | * @fifo: the fifo to be used. | 197 | * @fifo: address of the fifo to be used |
| 182 | */ | 198 | */ |
| 183 | static inline __must_check int kfifo_is_full(struct kfifo *fifo) | 199 | #define kfifo_recsize(fifo) (sizeof(*(fifo)->rectype)) |
| 184 | { | ||
| 185 | return kfifo_len(fifo) == kfifo_size(fifo); | ||
| 186 | } | ||
| 187 | 200 | ||
| 188 | /** | 201 | /** |
| 189 | * kfifo_avail - returns the number of bytes available in the FIFO | 202 | * kfifo_size - returns the size of the fifo in elements |
| 190 | * @fifo: the fifo to be used. | 203 | * @fifo: address of the fifo to be used |
| 191 | */ | 204 | */ |
| 192 | static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo) | 205 | #define kfifo_size(fifo) ((fifo)->kfifo.mask + 1) |
| 193 | { | ||
| 194 | return kfifo_size(fifo) - kfifo_len(fifo); | ||
| 195 | } | ||
| 196 | 206 | ||
| 197 | /** | 207 | /** |
| 198 | * kfifo_in_locked - puts some data into the FIFO using a spinlock for locking | 208 | * kfifo_reset - removes the entire fifo content |
| 199 | * @fifo: the fifo to be used. | 209 | * @fifo: address of the fifo to be used |
| 200 | * @from: the data to be added. | ||
| 201 | * @n: the length of the data to be added. | ||
| 202 | * @lock: pointer to the spinlock to use for locking. | ||
| 203 | * | 210 | * |
| 204 | * This function copies at most @n bytes from the @from buffer into | 211 | * Note: usage of kfifo_reset() is dangerous. It should be only called when the |
| 205 | * the FIFO depending on the free space, and returns the number of | 212 | * fifo is exclusived locked or when it is secured that no other thread is |
| 206 | * bytes copied. | 213 | * accessing the fifo. |
| 207 | */ | 214 | */ |
| 208 | static inline unsigned int kfifo_in_locked(struct kfifo *fifo, | 215 | #define kfifo_reset(fifo) \ |
| 209 | const void *from, unsigned int n, spinlock_t *lock) | 216 | (void)({ \ |
| 210 | { | 217 | typeof(fifo + 1) __tmp = (fifo); \ |
| 211 | unsigned long flags; | 218 | __tmp->kfifo.in = __tmp->kfifo.out = 0; \ |
| 212 | unsigned int ret; | 219 | }) |
| 213 | |||
| 214 | spin_lock_irqsave(lock, flags); | ||
| 215 | |||
| 216 | ret = kfifo_in(fifo, from, n); | ||
| 217 | |||
| 218 | spin_unlock_irqrestore(lock, flags); | ||
| 219 | |||
| 220 | return ret; | ||
| 221 | } | ||
| 222 | 220 | ||
| 223 | /** | 221 | /** |
| 224 | * kfifo_out_locked - gets some data from the FIFO using a spinlock for locking | 222 | * kfifo_reset_out - skip fifo content |
| 225 | * @fifo: the fifo to be used. | 223 | * @fifo: address of the fifo to be used |
| 226 | * @to: where the data must be copied. | ||
| 227 | * @n: the size of the destination buffer. | ||
| 228 | * @lock: pointer to the spinlock to use for locking. | ||
| 229 | * | 224 | * |
| 230 | * This function copies at most @n bytes from the FIFO into the | 225 | * Note: The usage of kfifo_reset_out() is safe until it will be only called |
| 231 | * @to buffer and returns the number of copied bytes. | 226 | * from the reader thread and there is only one concurrent reader. Otherwise |
| 232 | */ | 227 | * it is dangerous and must be handled in the same way as kfifo_reset(). |
| 233 | static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo, | ||
| 234 | void *to, unsigned int n, spinlock_t *lock) | ||
| 235 | { | ||
| 236 | unsigned long flags; | ||
| 237 | unsigned int ret; | ||
| 238 | |||
| 239 | spin_lock_irqsave(lock, flags); | ||
| 240 | |||
| 241 | ret = kfifo_out(fifo, to, n); | ||
| 242 | |||
| 243 | spin_unlock_irqrestore(lock, flags); | ||
| 244 | |||
| 245 | return ret; | ||
| 246 | } | ||
| 247 | |||
| 248 | extern void kfifo_skip(struct kfifo *fifo, unsigned int len); | ||
| 249 | |||
| 250 | extern __must_check int kfifo_from_user(struct kfifo *fifo, | ||
| 251 | const void __user *from, unsigned int n, unsigned *lenout); | ||
| 252 | |||
| 253 | extern __must_check int kfifo_to_user(struct kfifo *fifo, | ||
| 254 | void __user *to, unsigned int n, unsigned *lenout); | ||
| 255 | |||
| 256 | /* | ||
| 257 | * __kfifo_add_out internal helper function for updating the out offset | ||
| 258 | */ | 228 | */ |
| 259 | static inline void __kfifo_add_out(struct kfifo *fifo, | 229 | #define kfifo_reset_out(fifo) \ |
| 260 | unsigned int off) | 230 | (void)({ \ |
| 261 | { | 231 | typeof(fifo + 1) __tmp = (fifo); \ |
| 262 | smp_mb(); | 232 | __tmp->kfifo.out = __tmp->kfifo.in; \ |
| 263 | fifo->out += off; | 233 | }) |
| 264 | } | ||
| 265 | 234 | ||
| 266 | /* | 235 | /** |
| 267 | * __kfifo_add_in internal helper function for updating the in offset | 236 | * kfifo_len - returns the number of used elements in the fifo |
| 237 | * @fifo: address of the fifo to be used | ||
| 268 | */ | 238 | */ |
| 269 | static inline void __kfifo_add_in(struct kfifo *fifo, | 239 | #define kfifo_len(fifo) \ |
| 270 | unsigned int off) | 240 | ({ \ |
| 271 | { | 241 | typeof(fifo + 1) __tmpl = (fifo); \ |
| 272 | smp_wmb(); | 242 | __tmpl->kfifo.in - __tmpl->kfifo.out; \ |
| 273 | fifo->in += off; | 243 | }) |
| 274 | } | ||
| 275 | 244 | ||
| 276 | /* | 245 | /** |
| 277 | * __kfifo_off internal helper function for calculating the index of a | 246 | * kfifo_is_empty - returns true if the fifo is empty |
| 278 | * given offeset | 247 | * @fifo: address of the fifo to be used |
| 279 | */ | 248 | */ |
| 280 | static inline unsigned int __kfifo_off(struct kfifo *fifo, unsigned int off) | 249 | #define kfifo_is_empty(fifo) \ |
| 281 | { | 250 | ({ \ |
| 282 | return off & (fifo->size - 1); | 251 | typeof(fifo + 1) __tmpq = (fifo); \ |
| 283 | } | 252 | __tmpq->kfifo.in == __tmpq->kfifo.out; \ |
| 253 | }) | ||
| 284 | 254 | ||
| 285 | /* | 255 | /** |
| 286 | * __kfifo_peek_n internal helper function for determinate the length of | 256 | * kfifo_is_full - returns true if the fifo is full |
| 287 | * the next record in the fifo | 257 | * @fifo: address of the fifo to be used |
| 288 | */ | 258 | */ |
| 289 | static inline unsigned int __kfifo_peek_n(struct kfifo *fifo, | 259 | #define kfifo_is_full(fifo) \ |
| 290 | unsigned int recsize) | 260 | ({ \ |
| 291 | { | 261 | typeof(fifo + 1) __tmpq = (fifo); \ |
| 292 | #define __KFIFO_GET(fifo, off, shift) \ | 262 | kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ |
| 293 | ((fifo)->buffer[__kfifo_off((fifo), (fifo)->out+(off))] << (shift)) | 263 | }) |
| 294 | 264 | ||
| 295 | unsigned int l; | 265 | /** |
| 266 | * kfifo_avail - returns the number of unused elements in the fifo | ||
| 267 | * @fifo: address of the fifo to be used | ||
| 268 | */ | ||
| 269 | #define kfifo_avail(fifo) \ | ||
| 270 | __kfifo_must_check_helper( \ | ||
| 271 | ({ \ | ||
| 272 | typeof(fifo + 1) __tmpq = (fifo); \ | ||
| 273 | const size_t __recsize = sizeof(*__tmpq->rectype); \ | ||
| 274 | unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ | ||
| 275 | (__recsize) ? ((__avail <= __recsize) ? 0 : \ | ||
| 276 | __kfifo_max_r(__avail - __recsize, __recsize)) : \ | ||
| 277 | __avail; \ | ||
| 278 | }) \ | ||
| 279 | ) | ||
| 296 | 280 | ||
| 297 | l = __KFIFO_GET(fifo, 0, 0); | 281 | /** |
| 282 | * kfifo_skip - skip output data | ||
| 283 | * @fifo: address of the fifo to be used | ||
| 284 | */ | ||
| 285 | #define kfifo_skip(fifo) \ | ||
| 286 | (void)({ \ | ||
| 287 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 288 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 289 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 290 | if (__recsize) \ | ||
| 291 | __kfifo_skip_r(__kfifo, __recsize); \ | ||
| 292 | else \ | ||
| 293 | __kfifo->out++; \ | ||
| 294 | }) | ||
| 298 | 295 | ||
| 299 | if (--recsize) | 296 | /** |
| 300 | l |= __KFIFO_GET(fifo, 1, 8); | 297 | * kfifo_peek_len - gets the size of the next fifo record |
| 298 | * @fifo: address of the fifo to be used | ||
| 299 | * | ||
| 300 | * This function returns the size of the next fifo record in number of bytes. | ||
| 301 | */ | ||
| 302 | #define kfifo_peek_len(fifo) \ | ||
| 303 | __kfifo_must_check_helper( \ | ||
| 304 | ({ \ | ||
| 305 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 306 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 307 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 308 | (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ | ||
| 309 | __kfifo_len_r(__kfifo, __recsize); \ | ||
| 310 | }) \ | ||
| 311 | ) | ||
| 301 | 312 | ||
| 302 | return l; | 313 | /** |
| 303 | #undef __KFIFO_GET | 314 | * kfifo_alloc - dynamically allocates a new fifo buffer |
| 304 | } | 315 | * @fifo: pointer to the fifo |
| 316 | * @size: the number of elements in the fifo, this must be a power of 2 | ||
| 317 | * @gfp_mask: get_free_pages mask, passed to kmalloc() | ||
| 318 | * | ||
| 319 | * This macro dynamically allocates a new fifo buffer. | ||
| 320 | * | ||
| 321 | * The numer of elements will be rounded-up to a power of 2. | ||
| 322 | * The fifo will be release with kfifo_free(). | ||
| 323 | * Return 0 if no error, otherwise an error code. | ||
| 324 | */ | ||
| 325 | #define kfifo_alloc(fifo, size, gfp_mask) \ | ||
| 326 | __kfifo_must_check_helper( \ | ||
| 327 | ({ \ | ||
| 328 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 329 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 330 | __is_kfifo_ptr(__tmp) ? \ | ||
| 331 | __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ | ||
| 332 | -EINVAL; \ | ||
| 333 | }) \ | ||
| 334 | ) | ||
| 305 | 335 | ||
| 306 | /* | 336 | /** |
| 307 | * __kfifo_poke_n internal helper function for storing the length of | 337 | * kfifo_free - frees the fifo |
| 308 | * the next record into the fifo | 338 | * @fifo: the fifo to be freed |
| 309 | */ | 339 | */ |
| 310 | static inline void __kfifo_poke_n(struct kfifo *fifo, | 340 | #define kfifo_free(fifo) \ |
| 311 | unsigned int recsize, unsigned int n) | 341 | ({ \ |
| 312 | { | 342 | typeof(fifo + 1) __tmp = (fifo); \ |
| 313 | #define __KFIFO_PUT(fifo, off, val, shift) \ | 343 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
| 314 | ( \ | 344 | if (__is_kfifo_ptr(__tmp)) \ |
| 315 | (fifo)->buffer[__kfifo_off((fifo), (fifo)->in+(off))] = \ | 345 | __kfifo_free(__kfifo); \ |
| 316 | (unsigned char)((val) >> (shift)) \ | 346 | }) |
| 317 | ) | ||
| 318 | 347 | ||
| 319 | __KFIFO_PUT(fifo, 0, n, 0); | 348 | /** |
| 349 | * kfifo_init - initialize a fifo using a preallocated buffer | ||
| 350 | * @fifo: the fifo to assign the buffer | ||
| 351 | * @buffer: the preallocated buffer to be used | ||
| 352 | * @size: the size of the internal buffer, this have to be a power of 2 | ||
| 353 | * | ||
| 354 | * This macro initialize a fifo using a preallocated buffer. | ||
| 355 | * | ||
| 356 | * The numer of elements will be rounded-up to a power of 2. | ||
| 357 | * Return 0 if no error, otherwise an error code. | ||
| 358 | */ | ||
| 359 | #define kfifo_init(fifo, buffer, size) \ | ||
| 360 | ({ \ | ||
| 361 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 362 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 363 | __is_kfifo_ptr(__tmp) ? \ | ||
| 364 | __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ | ||
| 365 | -EINVAL; \ | ||
| 366 | }) | ||
| 320 | 367 | ||
| 321 | if (--recsize) | 368 | /** |
| 322 | __KFIFO_PUT(fifo, 1, n, 8); | 369 | * kfifo_put - put data into the fifo |
| 323 | #undef __KFIFO_PUT | 370 | * @fifo: address of the fifo to be used |
| 324 | } | 371 | * @val: the data to be added |
| 372 | * | ||
| 373 | * This macro copies the given value into the fifo. | ||
| 374 | * It returns 0 if the fifo was full. Otherwise it returns the number | ||
| 375 | * processed elements. | ||
| 376 | * | ||
| 377 | * Note that with only one concurrent reader and one concurrent | ||
| 378 | * writer, you don't need extra locking to use these macro. | ||
| 379 | */ | ||
| 380 | #define kfifo_put(fifo, val) \ | ||
| 381 | ({ \ | ||
| 382 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 383 | typeof(val + 1) __val = (val); \ | ||
| 384 | unsigned int __ret; \ | ||
| 385 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 386 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 387 | if (0) { \ | ||
| 388 | typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \ | ||
| 389 | __dummy = (typeof(__val))NULL; \ | ||
| 390 | } \ | ||
| 391 | if (__recsize) \ | ||
| 392 | __ret = __kfifo_in_r(__kfifo, __val, sizeof(*__val), \ | ||
| 393 | __recsize); \ | ||
| 394 | else { \ | ||
| 395 | __ret = !kfifo_is_full(__tmp); \ | ||
| 396 | if (__ret) { \ | ||
| 397 | (__is_kfifo_ptr(__tmp) ? \ | ||
| 398 | ((typeof(__tmp->type))__kfifo->data) : \ | ||
| 399 | (__tmp->buf) \ | ||
| 400 | )[__kfifo->in & __tmp->kfifo.mask] = \ | ||
| 401 | *(typeof(__tmp->type))__val; \ | ||
| 402 | smp_wmb(); \ | ||
| 403 | __kfifo->in++; \ | ||
| 404 | } \ | ||
| 405 | } \ | ||
| 406 | __ret; \ | ||
| 407 | }) | ||
| 325 | 408 | ||
| 326 | /* | 409 | /** |
| 327 | * __kfifo_in_... internal functions for put date into the fifo | 410 | * kfifo_get - get data from the fifo |
| 328 | * do not call it directly, use kfifo_in_rec() instead | 411 | * @fifo: address of the fifo to be used |
| 329 | */ | 412 | * @val: the var where to store the data to be added |
| 330 | extern unsigned int __kfifo_in_n(struct kfifo *fifo, | 413 | * |
| 331 | const void *from, unsigned int n, unsigned int recsize); | 414 | * This macro reads the data from the fifo. |
| 415 | * It returns 0 if the fifo was empty. Otherwise it returns the number | ||
| 416 | * processed elements. | ||
| 417 | * | ||
| 418 | * Note that with only one concurrent reader and one concurrent | ||
| 419 | * writer, you don't need extra locking to use these macro. | ||
| 420 | */ | ||
| 421 | #define kfifo_get(fifo, val) \ | ||
| 422 | __kfifo_must_check_helper( \ | ||
| 423 | ({ \ | ||
| 424 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 425 | typeof(val + 1) __val = (val); \ | ||
| 426 | unsigned int __ret; \ | ||
| 427 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 428 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 429 | if (0) \ | ||
| 430 | __val = (typeof(__tmp->ptr))0; \ | ||
| 431 | if (__recsize) \ | ||
| 432 | __ret = __kfifo_out_r(__kfifo, __val, sizeof(*__val), \ | ||
| 433 | __recsize); \ | ||
| 434 | else { \ | ||
| 435 | __ret = !kfifo_is_empty(__tmp); \ | ||
| 436 | if (__ret) { \ | ||
| 437 | *(typeof(__tmp->type))__val = \ | ||
| 438 | (__is_kfifo_ptr(__tmp) ? \ | ||
| 439 | ((typeof(__tmp->type))__kfifo->data) : \ | ||
| 440 | (__tmp->buf) \ | ||
| 441 | )[__kfifo->out & __tmp->kfifo.mask]; \ | ||
| 442 | smp_wmb(); \ | ||
| 443 | __kfifo->out++; \ | ||
| 444 | } \ | ||
| 445 | } \ | ||
| 446 | __ret; \ | ||
| 447 | }) \ | ||
| 448 | ) | ||
| 332 | 449 | ||
| 333 | extern unsigned int __kfifo_in_generic(struct kfifo *fifo, | 450 | /** |
| 334 | const void *from, unsigned int n, unsigned int recsize); | 451 | * kfifo_peek - get data from the fifo without removing |
| 452 | * @fifo: address of the fifo to be used | ||
| 453 | * @val: the var where to store the data to be added | ||
| 454 | * | ||
| 455 | * This reads the data from the fifo without removing it from the fifo. | ||
| 456 | * It returns 0 if the fifo was empty. Otherwise it returns the number | ||
| 457 | * processed elements. | ||
| 458 | * | ||
| 459 | * Note that with only one concurrent reader and one concurrent | ||
| 460 | * writer, you don't need extra locking to use these macro. | ||
| 461 | */ | ||
| 462 | #define kfifo_peek(fifo, val) \ | ||
| 463 | __kfifo_must_check_helper( \ | ||
| 464 | ({ \ | ||
| 465 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 466 | typeof(val + 1) __val = (val); \ | ||
| 467 | unsigned int __ret; \ | ||
| 468 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 469 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 470 | if (0) \ | ||
| 471 | __val = (typeof(__tmp->ptr))NULL; \ | ||
| 472 | if (__recsize) \ | ||
| 473 | __ret = __kfifo_out_peek_r(__kfifo, __val, sizeof(*__val), \ | ||
| 474 | __recsize); \ | ||
| 475 | else { \ | ||
| 476 | __ret = !kfifo_is_empty(__tmp); \ | ||
| 477 | if (__ret) { \ | ||
| 478 | *(typeof(__tmp->type))__val = \ | ||
| 479 | (__is_kfifo_ptr(__tmp) ? \ | ||
| 480 | ((typeof(__tmp->type))__kfifo->data) : \ | ||
| 481 | (__tmp->buf) \ | ||
| 482 | )[__kfifo->out & __tmp->kfifo.mask]; \ | ||
| 483 | smp_wmb(); \ | ||
| 484 | } \ | ||
| 485 | } \ | ||
| 486 | __ret; \ | ||
| 487 | }) \ | ||
| 488 | ) | ||
| 335 | 489 | ||
| 336 | static inline unsigned int __kfifo_in_rec(struct kfifo *fifo, | 490 | /** |
| 337 | const void *from, unsigned int n, unsigned int recsize) | 491 | * kfifo_in - put data into the fifo |
| 338 | { | 492 | * @fifo: address of the fifo to be used |
| 339 | unsigned int ret; | 493 | * @buf: the data to be added |
| 494 | * @n: number of elements to be added | ||
| 495 | * | ||
| 496 | * This macro copies the given buffer into the fifo and returns the | ||
| 497 | * number of copied elements. | ||
| 498 | * | ||
| 499 | * Note that with only one concurrent reader and one concurrent | ||
| 500 | * writer, you don't need extra locking to use these macro. | ||
| 501 | */ | ||
| 502 | #define kfifo_in(fifo, buf, n) \ | ||
| 503 | ({ \ | ||
| 504 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 505 | typeof(buf + 1) __buf = (buf); \ | ||
| 506 | unsigned long __n = (n); \ | ||
| 507 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 508 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 509 | if (0) { \ | ||
| 510 | typeof(__tmp->ptr_const) __dummy __attribute__ ((unused)); \ | ||
| 511 | __dummy = (typeof(__buf))NULL; \ | ||
| 512 | } \ | ||
| 513 | (__recsize) ?\ | ||
| 514 | __kfifo_in_r(__kfifo, __buf, __n, __recsize) : \ | ||
| 515 | __kfifo_in(__kfifo, __buf, __n); \ | ||
| 516 | }) | ||
| 340 | 517 | ||
| 341 | ret = __kfifo_in_n(fifo, from, n, recsize); | 518 | /** |
| 519 | * kfifo_in_spinlocked - put data into the fifo using a spinlock for locking | ||
| 520 | * @fifo: address of the fifo to be used | ||
| 521 | * @buf: the data to be added | ||
| 522 | * @n: number of elements to be added | ||
| 523 | * @lock: pointer to the spinlock to use for locking | ||
| 524 | * | ||
| 525 | * This macro copies the given values buffer into the fifo and returns the | ||
| 526 | * number of copied elements. | ||
| 527 | */ | ||
| 528 | #define kfifo_in_spinlocked(fifo, buf, n, lock) \ | ||
| 529 | ({ \ | ||
| 530 | unsigned long __flags; \ | ||
| 531 | unsigned int __ret; \ | ||
| 532 | spin_lock_irqsave(lock, __flags); \ | ||
| 533 | __ret = kfifo_in(fifo, buf, n); \ | ||
| 534 | spin_unlock_irqrestore(lock, __flags); \ | ||
| 535 | __ret; \ | ||
| 536 | }) | ||
| 537 | |||
| 538 | /* alias for kfifo_in_spinlocked, will be removed in a future release */ | ||
| 539 | #define kfifo_in_locked(fifo, buf, n, lock) \ | ||
| 540 | kfifo_in_spinlocked(fifo, buf, n, lock) | ||
| 342 | 541 | ||
| 343 | if (likely(ret == 0)) { | 542 | /** |
| 344 | if (recsize) | 543 | * kfifo_out - get data from the fifo |
| 345 | __kfifo_poke_n(fifo, recsize, n); | 544 | * @fifo: address of the fifo to be used |
| 346 | __kfifo_add_in(fifo, n + recsize); | 545 | * @buf: pointer to the storage buffer |
| 347 | } | 546 | * @n: max. number of elements to get |
| 348 | return ret; | 547 | * |
| 349 | } | 548 | * This macro get some data from the fifo and return the numbers of elements |
| 549 | * copied. | ||
| 550 | * | ||
| 551 | * Note that with only one concurrent reader and one concurrent | ||
| 552 | * writer, you don't need extra locking to use these macro. | ||
| 553 | */ | ||
| 554 | #define kfifo_out(fifo, buf, n) \ | ||
| 555 | __kfifo_must_check_helper( \ | ||
| 556 | ({ \ | ||
| 557 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 558 | typeof(buf + 1) __buf = (buf); \ | ||
| 559 | unsigned long __n = (n); \ | ||
| 560 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 561 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 562 | if (0) { \ | ||
| 563 | typeof(__tmp->ptr) __dummy = NULL; \ | ||
| 564 | __buf = __dummy; \ | ||
| 565 | } \ | ||
| 566 | (__recsize) ?\ | ||
| 567 | __kfifo_out_r(__kfifo, __buf, __n, __recsize) : \ | ||
| 568 | __kfifo_out(__kfifo, __buf, __n); \ | ||
| 569 | }) \ | ||
| 570 | ) | ||
| 571 | |||
| 572 | /** | ||
| 573 | * kfifo_out_spinlocked - get data from the fifo using a spinlock for locking | ||
| 574 | * @fifo: address of the fifo to be used | ||
| 575 | * @buf: pointer to the storage buffer | ||
| 576 | * @n: max. number of elements to get | ||
| 577 | * @lock: pointer to the spinlock to use for locking | ||
| 578 | * | ||
| 579 | * This macro get the data from the fifo and return the numbers of elements | ||
| 580 | * copied. | ||
| 581 | */ | ||
| 582 | #define kfifo_out_spinlocked(fifo, buf, n, lock) \ | ||
| 583 | __kfifo_must_check_helper( \ | ||
| 584 | ({ \ | ||
| 585 | unsigned long __flags; \ | ||
| 586 | unsigned int __ret; \ | ||
| 587 | spin_lock_irqsave(lock, __flags); \ | ||
| 588 | __ret = kfifo_out(fifo, buf, n); \ | ||
| 589 | spin_unlock_irqrestore(lock, __flags); \ | ||
| 590 | __ret; \ | ||
| 591 | }) \ | ||
| 592 | ) | ||
| 593 | |||
| 594 | /* alias for kfifo_out_spinlocked, will be removed in a future release */ | ||
| 595 | #define kfifo_out_locked(fifo, buf, n, lock) \ | ||
| 596 | kfifo_out_spinlocked(fifo, buf, n, lock) | ||
| 350 | 597 | ||
| 351 | /** | 598 | /** |
| 352 | * kfifo_in_rec - puts some record data into the FIFO | 599 | * kfifo_from_user - puts some data from user space into the fifo |
| 353 | * @fifo: the fifo to be used. | 600 | * @fifo: address of the fifo to be used |
| 354 | * @from: the data to be added. | 601 | * @from: pointer to the data to be added |
| 355 | * @n: the length of the data to be added. | 602 | * @len: the length of the data to be added |
| 356 | * @recsize: size of record field | 603 | * @copied: pointer to output variable to store the number of copied bytes |
| 357 | * | 604 | * |
| 358 | * This function copies @n bytes from the @from into the FIFO and returns | 605 | * This macro copies at most @len bytes from the @from into the |
| 359 | * the number of bytes which cannot be copied. | 606 | * fifo, depending of the available space and returns -EFAULT/0. |
| 360 | * A returned value greater than the @n value means that the record doesn't | ||
| 361 | * fit into the buffer. | ||
| 362 | * | 607 | * |
| 363 | * Note that with only one concurrent reader and one concurrent | 608 | * Note that with only one concurrent reader and one concurrent |
| 364 | * writer, you don't need extra locking to use these functions. | 609 | * writer, you don't need extra locking to use these macro. |
| 365 | */ | 610 | */ |
| 366 | static inline __must_check unsigned int kfifo_in_rec(struct kfifo *fifo, | 611 | #define kfifo_from_user(fifo, from, len, copied) \ |
| 367 | void *from, unsigned int n, unsigned int recsize) | 612 | __kfifo_must_check_helper( \ |
| 368 | { | 613 | ({ \ |
| 369 | if (!__builtin_constant_p(recsize)) | 614 | typeof(fifo + 1) __tmp = (fifo); \ |
| 370 | return __kfifo_in_generic(fifo, from, n, recsize); | 615 | const void __user *__from = (from); \ |
| 371 | return __kfifo_in_rec(fifo, from, n, recsize); | 616 | unsigned int __len = (len); \ |
| 372 | } | 617 | unsigned int *__copied = (copied); \ |
| 618 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 619 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 620 | (__recsize) ? \ | ||
| 621 | __kfifo_from_user_r(__kfifo, __from, __len, __copied, __recsize) : \ | ||
| 622 | __kfifo_from_user(__kfifo, __from, __len, __copied); \ | ||
| 623 | }) \ | ||
| 624 | ) | ||
| 373 | 625 | ||
| 374 | /* | 626 | /** |
| 375 | * __kfifo_out_... internal functions for get date from the fifo | 627 | * kfifo_to_user - copies data from the fifo into user space |
| 376 | * do not call it directly, use kfifo_out_rec() instead | 628 | * @fifo: address of the fifo to be used |
| 377 | */ | 629 | * @to: where the data must be copied |
| 378 | extern unsigned int __kfifo_out_n(struct kfifo *fifo, | 630 | * @len: the size of the destination buffer |
| 379 | void *to, unsigned int reclen, unsigned int recsize); | 631 | * @copied: pointer to output variable to store the number of copied bytes |
| 632 | * | ||
| 633 | * This macro copies at most @len bytes from the fifo into the | ||
| 634 | * @to buffer and returns -EFAULT/0. | ||
| 635 | * | ||
| 636 | * Note that with only one concurrent reader and one concurrent | ||
| 637 | * writer, you don't need extra locking to use these macro. | ||
| 638 | */ | ||
| 639 | #define kfifo_to_user(fifo, to, len, copied) \ | ||
| 640 | __kfifo_must_check_helper( \ | ||
| 641 | ({ \ | ||
| 642 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 643 | void __user *__to = (to); \ | ||
| 644 | unsigned int __len = (len); \ | ||
| 645 | unsigned int *__copied = (copied); \ | ||
| 646 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 647 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 648 | (__recsize) ? \ | ||
| 649 | __kfifo_to_user_r(__kfifo, __to, __len, __copied, __recsize) : \ | ||
| 650 | __kfifo_to_user(__kfifo, __to, __len, __copied); \ | ||
| 651 | }) \ | ||
| 652 | ) | ||
| 653 | |||
| 654 | /** | ||
| 655 | * kfifo_dma_in_prepare - setup a scatterlist for DMA input | ||
| 656 | * @fifo: address of the fifo to be used | ||
| 657 | * @sgl: pointer to the scatterlist array | ||
| 658 | * @nents: number of entries in the scatterlist array | ||
| 659 | * @len: number of elements to transfer | ||
| 660 | * | ||
| 661 | * This macro fills a scatterlist for DMA input. | ||
| 662 | * It returns the number entries in the scatterlist array. | ||
| 663 | * | ||
| 664 | * Note that with only one concurrent reader and one concurrent | ||
| 665 | * writer, you don't need extra locking to use these macros. | ||
| 666 | */ | ||
| 667 | #define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ | ||
| 668 | ({ \ | ||
| 669 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 670 | struct scatterlist *__sgl = (sgl); \ | ||
| 671 | int __nents = (nents); \ | ||
| 672 | unsigned int __len = (len); \ | ||
| 673 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 674 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 675 | (__recsize) ? \ | ||
| 676 | __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ | ||
| 677 | __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len); \ | ||
| 678 | }) | ||
| 380 | 679 | ||
| 381 | extern unsigned int __kfifo_out_generic(struct kfifo *fifo, | 680 | /** |
| 382 | void *to, unsigned int n, | 681 | * kfifo_dma_in_finish - finish a DMA IN operation |
| 383 | unsigned int recsize, unsigned int *total); | 682 | * @fifo: address of the fifo to be used |
| 683 | * @len: number of bytes to received | ||
| 684 | * | ||
| 685 | * This macro finish a DMA IN operation. The in counter will be updated by | ||
| 686 | * the len parameter. No error checking will be done. | ||
| 687 | * | ||
| 688 | * Note that with only one concurrent reader and one concurrent | ||
| 689 | * writer, you don't need extra locking to use these macros. | ||
| 690 | */ | ||
| 691 | #define kfifo_dma_in_finish(fifo, len) \ | ||
| 692 | (void)({ \ | ||
| 693 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 694 | unsigned int __len = (len); \ | ||
| 695 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 696 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 697 | if (__recsize) \ | ||
| 698 | __kfifo_dma_in_finish_r(__kfifo, __len, __recsize); \ | ||
| 699 | else \ | ||
| 700 | __kfifo->in += __len / sizeof(*__tmp->type); \ | ||
| 701 | }) | ||
| 384 | 702 | ||
| 385 | static inline unsigned int __kfifo_out_rec(struct kfifo *fifo, | 703 | /** |
| 386 | void *to, unsigned int n, unsigned int recsize, | 704 | * kfifo_dma_out_prepare - setup a scatterlist for DMA output |
| 387 | unsigned int *total) | 705 | * @fifo: address of the fifo to be used |
| 388 | { | 706 | * @sgl: pointer to the scatterlist array |
| 389 | unsigned int l; | 707 | * @nents: number of entries in the scatterlist array |
| 390 | 708 | * @len: number of elements to transfer | |
| 391 | if (!recsize) { | 709 | * |
| 392 | l = n; | 710 | * This macro fills a scatterlist for DMA output which at most @len bytes |
| 393 | if (total) | 711 | * to transfer. |
| 394 | *total = l; | 712 | * It returns the number entries in the scatterlist array. |
| 395 | } else { | 713 | * A zero means there is no space available and the scatterlist is not filled. |
| 396 | l = __kfifo_peek_n(fifo, recsize); | 714 | * |
| 397 | if (total) | 715 | * Note that with only one concurrent reader and one concurrent |
| 398 | *total = l; | 716 | * writer, you don't need extra locking to use these macros. |
| 399 | if (n < l) | 717 | */ |
| 400 | return l; | 718 | #define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ |
| 401 | } | 719 | ({ \ |
| 720 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 721 | struct scatterlist *__sgl = (sgl); \ | ||
| 722 | int __nents = (nents); \ | ||
| 723 | unsigned int __len = (len); \ | ||
| 724 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 725 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 726 | (__recsize) ? \ | ||
| 727 | __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ | ||
| 728 | __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len); \ | ||
| 729 | }) | ||
| 402 | 730 | ||
| 403 | return __kfifo_out_n(fifo, to, l, recsize); | 731 | /** |
| 404 | } | 732 | * kfifo_dma_out_finish - finish a DMA OUT operation |
| 733 | * @fifo: address of the fifo to be used | ||
| 734 | * @len: number of bytes transferd | ||
| 735 | * | ||
| 736 | * This macro finish a DMA OUT operation. The out counter will be updated by | ||
| 737 | * the len parameter. No error checking will be done. | ||
| 738 | * | ||
| 739 | * Note that with only one concurrent reader and one concurrent | ||
| 740 | * writer, you don't need extra locking to use these macros. | ||
| 741 | */ | ||
| 742 | #define kfifo_dma_out_finish(fifo, len) \ | ||
| 743 | (void)({ \ | ||
| 744 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 745 | unsigned int __len = (len); \ | ||
| 746 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 747 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 748 | if (__recsize) \ | ||
| 749 | __kfifo_dma_out_finish_r(__kfifo, __recsize); \ | ||
| 750 | else \ | ||
| 751 | __kfifo->out += __len / sizeof(*__tmp->type); \ | ||
| 752 | }) | ||
| 405 | 753 | ||
| 406 | /** | 754 | /** |
| 407 | * kfifo_out_rec - gets some record data from the FIFO | 755 | * kfifo_out_peek - gets some data from the fifo |
| 408 | * @fifo: the fifo to be used. | 756 | * @fifo: address of the fifo to be used |
| 409 | * @to: where the data must be copied. | 757 | * @buf: pointer to the storage buffer |
| 410 | * @n: the size of the destination buffer. | 758 | * @n: max. number of elements to get |
| 411 | * @recsize: size of record field | ||
| 412 | * @total: pointer where the total number of to copied bytes should stored | ||
| 413 | * | 759 | * |
| 414 | * This function copies at most @n bytes from the FIFO to @to and returns the | 760 | * This macro get the data from the fifo and return the numbers of elements |
| 415 | * number of bytes which cannot be copied. | 761 | * copied. The data is not removed from the fifo. |
| 416 | * A returned value greater than the @n value means that the record doesn't | ||
| 417 | * fit into the @to buffer. | ||
| 418 | * | 762 | * |
| 419 | * Note that with only one concurrent reader and one concurrent | 763 | * Note that with only one concurrent reader and one concurrent |
| 420 | * writer, you don't need extra locking to use these functions. | 764 | * writer, you don't need extra locking to use these macro. |
| 421 | */ | 765 | */ |
| 422 | static inline __must_check unsigned int kfifo_out_rec(struct kfifo *fifo, | 766 | #define kfifo_out_peek(fifo, buf, n) \ |
| 423 | void *to, unsigned int n, unsigned int recsize, | 767 | __kfifo_must_check_helper( \ |
| 424 | unsigned int *total) | 768 | ({ \ |
| 769 | typeof(fifo + 1) __tmp = (fifo); \ | ||
| 770 | typeof(buf + 1) __buf = (buf); \ | ||
| 771 | unsigned long __n = (n); \ | ||
| 772 | const size_t __recsize = sizeof(*__tmp->rectype); \ | ||
| 773 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | ||
| 774 | if (0) { \ | ||
| 775 | typeof(__tmp->ptr) __dummy __attribute__ ((unused)) = NULL; \ | ||
| 776 | __buf = __dummy; \ | ||
| 777 | } \ | ||
| 778 | (__recsize) ? \ | ||
| 779 | __kfifo_out_peek_r(__kfifo, __buf, __n, __recsize) : \ | ||
| 780 | __kfifo_out_peek(__kfifo, __buf, __n); \ | ||
| 781 | }) \ | ||
| 782 | ) | ||
| 425 | 783 | ||
| 426 | { | 784 | extern int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, |
| 427 | if (!__builtin_constant_p(recsize)) | 785 | size_t esize, gfp_t gfp_mask); |
| 428 | return __kfifo_out_generic(fifo, to, n, recsize, total); | ||
| 429 | return __kfifo_out_rec(fifo, to, n, recsize, total); | ||
| 430 | } | ||
| 431 | 786 | ||
| 432 | /* | 787 | extern void __kfifo_free(struct __kfifo *fifo); |
| 433 | * __kfifo_from_user_... internal functions for transfer from user space into | ||
| 434 | * the fifo. do not call it directly, use kfifo_from_user_rec() instead | ||
| 435 | */ | ||
| 436 | extern unsigned int __kfifo_from_user_n(struct kfifo *fifo, | ||
| 437 | const void __user *from, unsigned int n, unsigned int recsize); | ||
| 438 | 788 | ||
| 439 | extern unsigned int __kfifo_from_user_generic(struct kfifo *fifo, | 789 | extern int __kfifo_init(struct __kfifo *fifo, void *buffer, |
| 440 | const void __user *from, unsigned int n, unsigned int recsize); | 790 | unsigned int size, size_t esize); |
| 441 | 791 | ||
| 442 | static inline unsigned int __kfifo_from_user_rec(struct kfifo *fifo, | 792 | extern unsigned int __kfifo_in(struct __kfifo *fifo, |
| 443 | const void __user *from, unsigned int n, unsigned int recsize) | 793 | const void *buf, unsigned int len); |
| 444 | { | ||
| 445 | unsigned int ret; | ||
| 446 | 794 | ||
| 447 | ret = __kfifo_from_user_n(fifo, from, n, recsize); | 795 | extern unsigned int __kfifo_out(struct __kfifo *fifo, |
| 796 | void *buf, unsigned int len); | ||
| 448 | 797 | ||
| 449 | if (likely(ret == 0)) { | 798 | extern int __kfifo_from_user(struct __kfifo *fifo, |
| 450 | if (recsize) | 799 | const void __user *from, unsigned long len, unsigned int *copied); |
| 451 | __kfifo_poke_n(fifo, recsize, n); | ||
| 452 | __kfifo_add_in(fifo, n + recsize); | ||
| 453 | } | ||
| 454 | return ret; | ||
| 455 | } | ||
| 456 | 800 | ||
| 457 | /** | 801 | extern int __kfifo_to_user(struct __kfifo *fifo, |
| 458 | * kfifo_from_user_rec - puts some data from user space into the FIFO | 802 | void __user *to, unsigned long len, unsigned int *copied); |
| 459 | * @fifo: the fifo to be used. | ||
| 460 | * @from: pointer to the data to be added. | ||
| 461 | * @n: the length of the data to be added. | ||
| 462 | * @recsize: size of record field | ||
| 463 | * | ||
| 464 | * This function copies @n bytes from the @from into the | ||
| 465 | * FIFO and returns the number of bytes which cannot be copied. | ||
| 466 | * | ||
| 467 | * If the returned value is equal or less the @n value, the copy_from_user() | ||
| 468 | * functions has failed. Otherwise the record doesn't fit into the buffer. | ||
| 469 | * | ||
| 470 | * Note that with only one concurrent reader and one concurrent | ||
| 471 | * writer, you don't need extra locking to use these functions. | ||
| 472 | */ | ||
| 473 | static inline __must_check unsigned int kfifo_from_user_rec(struct kfifo *fifo, | ||
| 474 | const void __user *from, unsigned int n, unsigned int recsize) | ||
| 475 | { | ||
| 476 | if (!__builtin_constant_p(recsize)) | ||
| 477 | return __kfifo_from_user_generic(fifo, from, n, recsize); | ||
| 478 | return __kfifo_from_user_rec(fifo, from, n, recsize); | ||
| 479 | } | ||
| 480 | 803 | ||
| 481 | /* | 804 | extern unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, |
| 482 | * __kfifo_to_user_... internal functions for transfer fifo data into user space | 805 | struct scatterlist *sgl, int nents, unsigned int len); |
| 483 | * do not call it directly, use kfifo_to_user_rec() instead | ||
| 484 | */ | ||
| 485 | extern unsigned int __kfifo_to_user_n(struct kfifo *fifo, | ||
| 486 | void __user *to, unsigned int n, unsigned int reclen, | ||
| 487 | unsigned int recsize); | ||
| 488 | 806 | ||
| 489 | extern unsigned int __kfifo_to_user_generic(struct kfifo *fifo, | 807 | extern unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, |
| 490 | void __user *to, unsigned int n, unsigned int recsize, | 808 | struct scatterlist *sgl, int nents, unsigned int len); |
| 491 | unsigned int *total); | ||
| 492 | 809 | ||
| 493 | static inline unsigned int __kfifo_to_user_rec(struct kfifo *fifo, | 810 | extern unsigned int __kfifo_out_peek(struct __kfifo *fifo, |
| 494 | void __user *to, unsigned int n, | 811 | void *buf, unsigned int len); |
| 495 | unsigned int recsize, unsigned int *total) | ||
| 496 | { | ||
| 497 | unsigned int l; | ||
| 498 | |||
| 499 | if (!recsize) { | ||
| 500 | l = n; | ||
| 501 | if (total) | ||
| 502 | *total = l; | ||
| 503 | } else { | ||
| 504 | l = __kfifo_peek_n(fifo, recsize); | ||
| 505 | if (total) | ||
| 506 | *total = l; | ||
| 507 | if (n < l) | ||
| 508 | return l; | ||
| 509 | } | ||
| 510 | 812 | ||
| 511 | return __kfifo_to_user_n(fifo, to, n, l, recsize); | 813 | extern unsigned int __kfifo_in_r(struct __kfifo *fifo, |
| 512 | } | 814 | const void *buf, unsigned int len, size_t recsize); |
| 513 | 815 | ||
| 514 | /** | 816 | extern unsigned int __kfifo_out_r(struct __kfifo *fifo, |
| 515 | * kfifo_to_user_rec - gets data from the FIFO and write it to user space | 817 | void *buf, unsigned int len, size_t recsize); |
| 516 | * @fifo: the fifo to be used. | ||
| 517 | * @to: where the data must be copied. | ||
| 518 | * @n: the size of the destination buffer. | ||
| 519 | * @recsize: size of record field | ||
| 520 | * @total: pointer where the total number of to copied bytes should stored | ||
| 521 | * | ||
| 522 | * This function copies at most @n bytes from the FIFO to the @to. | ||
| 523 | * In case of an error, the function returns the number of bytes which cannot | ||
| 524 | * be copied. | ||
| 525 | * If the returned value is equal or less the @n value, the copy_to_user() | ||
| 526 | * functions has failed. Otherwise the record doesn't fit into the @to buffer. | ||
| 527 | * | ||
| 528 | * Note that with only one concurrent reader and one concurrent | ||
| 529 | * writer, you don't need extra locking to use these functions. | ||
| 530 | */ | ||
| 531 | static inline __must_check unsigned int kfifo_to_user_rec(struct kfifo *fifo, | ||
| 532 | void __user *to, unsigned int n, unsigned int recsize, | ||
| 533 | unsigned int *total) | ||
| 534 | { | ||
| 535 | if (!__builtin_constant_p(recsize)) | ||
| 536 | return __kfifo_to_user_generic(fifo, to, n, recsize, total); | ||
| 537 | return __kfifo_to_user_rec(fifo, to, n, recsize, total); | ||
| 538 | } | ||
| 539 | 818 | ||
| 540 | /* | 819 | extern int __kfifo_from_user_r(struct __kfifo *fifo, |
| 541 | * __kfifo_peek_... internal functions for peek into the next fifo record | 820 | const void __user *from, unsigned long len, unsigned int *copied, |
| 542 | * do not call it directly, use kfifo_peek_rec() instead | 821 | size_t recsize); |
| 543 | */ | ||
| 544 | extern unsigned int __kfifo_peek_generic(struct kfifo *fifo, | ||
| 545 | unsigned int recsize); | ||
| 546 | 822 | ||
| 547 | /** | 823 | extern int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, |
| 548 | * kfifo_peek_rec - gets the size of the next FIFO record data | 824 | unsigned long len, unsigned int *copied, size_t recsize); |
| 549 | * @fifo: the fifo to be used. | ||
| 550 | * @recsize: size of record field | ||
| 551 | * | ||
| 552 | * This function returns the size of the next FIFO record in number of bytes | ||
| 553 | */ | ||
| 554 | static inline __must_check unsigned int kfifo_peek_rec(struct kfifo *fifo, | ||
| 555 | unsigned int recsize) | ||
| 556 | { | ||
| 557 | if (!__builtin_constant_p(recsize)) | ||
| 558 | return __kfifo_peek_generic(fifo, recsize); | ||
| 559 | if (!recsize) | ||
| 560 | return kfifo_len(fifo); | ||
| 561 | return __kfifo_peek_n(fifo, recsize); | ||
| 562 | } | ||
| 563 | 825 | ||
| 564 | /* | 826 | extern unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, |
| 565 | * __kfifo_skip_... internal functions for skip the next fifo record | 827 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); |
| 566 | * do not call it directly, use kfifo_skip_rec() instead | ||
| 567 | */ | ||
| 568 | extern void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize); | ||
| 569 | 828 | ||
| 570 | static inline void __kfifo_skip_rec(struct kfifo *fifo, | 829 | extern void __kfifo_dma_in_finish_r(struct __kfifo *fifo, |
| 571 | unsigned int recsize) | 830 | unsigned int len, size_t recsize); |
| 572 | { | ||
| 573 | unsigned int l; | ||
| 574 | 831 | ||
| 575 | if (recsize) { | 832 | extern unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, |
| 576 | l = __kfifo_peek_n(fifo, recsize); | 833 | struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); |
| 577 | 834 | ||
| 578 | if (l + recsize <= kfifo_len(fifo)) { | 835 | extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize); |
| 579 | __kfifo_add_out(fifo, l + recsize); | ||
| 580 | return; | ||
| 581 | } | ||
| 582 | } | ||
| 583 | kfifo_reset_out(fifo); | ||
| 584 | } | ||
| 585 | 836 | ||
| 586 | /** | 837 | extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); |
| 587 | * kfifo_skip_rec - skip the next fifo out record | ||
| 588 | * @fifo: the fifo to be used. | ||
| 589 | * @recsize: size of record field | ||
| 590 | * | ||
| 591 | * This function skips the next FIFO record | ||
| 592 | */ | ||
| 593 | static inline void kfifo_skip_rec(struct kfifo *fifo, | ||
| 594 | unsigned int recsize) | ||
| 595 | { | ||
| 596 | if (!__builtin_constant_p(recsize)) | ||
| 597 | __kfifo_skip_generic(fifo, recsize); | ||
| 598 | else | ||
| 599 | __kfifo_skip_rec(fifo, recsize); | ||
| 600 | } | ||
| 601 | 838 | ||
| 602 | /** | 839 | extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, |
| 603 | * kfifo_avail_rec - returns the number of bytes available in a record FIFO | 840 | void *buf, unsigned int len, size_t recsize); |
| 604 | * @fifo: the fifo to be used. | ||
| 605 | * @recsize: size of record field | ||
| 606 | */ | ||
| 607 | static inline __must_check unsigned int kfifo_avail_rec(struct kfifo *fifo, | ||
| 608 | unsigned int recsize) | ||
| 609 | { | ||
| 610 | unsigned int l = kfifo_size(fifo) - kfifo_len(fifo); | ||
| 611 | 841 | ||
| 612 | return (l > recsize) ? l - recsize : 0; | 842 | extern unsigned int __kfifo_max_r(unsigned int len, size_t recsize); |
| 613 | } | ||
| 614 | 843 | ||
| 615 | #endif | 844 | #endif |
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h deleted file mode 100644 index b616d3930c3b..000000000000 --- a/include/linux/kmemtrace.h +++ /dev/null | |||
| @@ -1,25 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
| 3 | * | ||
| 4 | * This file is released under GPL version 2. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef _LINUX_KMEMTRACE_H | ||
| 8 | #define _LINUX_KMEMTRACE_H | ||
| 9 | |||
| 10 | #ifdef __KERNEL__ | ||
| 11 | |||
| 12 | #include <trace/events/kmem.h> | ||
| 13 | |||
| 14 | #ifdef CONFIG_KMEMTRACE | ||
| 15 | extern void kmemtrace_init(void); | ||
| 16 | #else | ||
| 17 | static inline void kmemtrace_init(void) | ||
| 18 | { | ||
| 19 | } | ||
| 20 | #endif | ||
| 21 | |||
| 22 | #endif /* __KERNEL__ */ | ||
| 23 | |||
| 24 | #endif /* _LINUX_KMEMTRACE_H */ | ||
| 25 | |||
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 43bdab769fc3..74d691ee9121 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
| @@ -78,7 +78,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page, | |||
| 78 | struct anon_vma *anon_vma = page_anon_vma(page); | 78 | struct anon_vma *anon_vma = page_anon_vma(page); |
| 79 | 79 | ||
| 80 | if (!anon_vma || | 80 | if (!anon_vma || |
| 81 | (anon_vma == vma->anon_vma && | 81 | (anon_vma->root == vma->anon_vma->root && |
| 82 | page->index == linear_page_index(vma, address))) | 82 | page->index == linear_page_index(vma, address))) |
| 83 | return page; | 83 | return page; |
| 84 | 84 | ||
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index aabc8a13ba71..685ea65eb803 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
| @@ -30,8 +30,73 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
| 30 | void kthread_bind(struct task_struct *k, unsigned int cpu); | 30 | void kthread_bind(struct task_struct *k, unsigned int cpu); |
| 31 | int kthread_stop(struct task_struct *k); | 31 | int kthread_stop(struct task_struct *k); |
| 32 | int kthread_should_stop(void); | 32 | int kthread_should_stop(void); |
| 33 | void *kthread_data(struct task_struct *k); | ||
| 33 | 34 | ||
| 34 | int kthreadd(void *unused); | 35 | int kthreadd(void *unused); |
| 35 | extern struct task_struct *kthreadd_task; | 36 | extern struct task_struct *kthreadd_task; |
| 36 | 37 | ||
| 38 | /* | ||
| 39 | * Simple work processor based on kthread. | ||
| 40 | * | ||
| 41 | * This provides easier way to make use of kthreads. A kthread_work | ||
| 42 | * can be queued and flushed using queue/flush_kthread_work() | ||
| 43 | * respectively. Queued kthread_works are processed by a kthread | ||
| 44 | * running kthread_worker_fn(). | ||
| 45 | * | ||
| 46 | * A kthread_work can't be freed while it is executing. | ||
| 47 | */ | ||
| 48 | struct kthread_work; | ||
| 49 | typedef void (*kthread_work_func_t)(struct kthread_work *work); | ||
| 50 | |||
| 51 | struct kthread_worker { | ||
| 52 | spinlock_t lock; | ||
| 53 | struct list_head work_list; | ||
| 54 | struct task_struct *task; | ||
| 55 | }; | ||
| 56 | |||
| 57 | struct kthread_work { | ||
| 58 | struct list_head node; | ||
| 59 | kthread_work_func_t func; | ||
| 60 | wait_queue_head_t done; | ||
| 61 | atomic_t flushing; | ||
| 62 | int queue_seq; | ||
| 63 | int done_seq; | ||
| 64 | }; | ||
| 65 | |||
| 66 | #define KTHREAD_WORKER_INIT(worker) { \ | ||
| 67 | .lock = SPIN_LOCK_UNLOCKED, \ | ||
| 68 | .work_list = LIST_HEAD_INIT((worker).work_list), \ | ||
| 69 | } | ||
| 70 | |||
| 71 | #define KTHREAD_WORK_INIT(work, fn) { \ | ||
| 72 | .node = LIST_HEAD_INIT((work).node), \ | ||
| 73 | .func = (fn), \ | ||
| 74 | .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \ | ||
| 75 | .flushing = ATOMIC_INIT(0), \ | ||
| 76 | } | ||
| 77 | |||
| 78 | #define DEFINE_KTHREAD_WORKER(worker) \ | ||
| 79 | struct kthread_worker worker = KTHREAD_WORKER_INIT(worker) | ||
| 80 | |||
| 81 | #define DEFINE_KTHREAD_WORK(work, fn) \ | ||
| 82 | struct kthread_work work = KTHREAD_WORK_INIT(work, fn) | ||
| 83 | |||
| 84 | static inline void init_kthread_worker(struct kthread_worker *worker) | ||
| 85 | { | ||
| 86 | *worker = (struct kthread_worker)KTHREAD_WORKER_INIT(*worker); | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline void init_kthread_work(struct kthread_work *work, | ||
| 90 | kthread_work_func_t fn) | ||
| 91 | { | ||
| 92 | *work = (struct kthread_work)KTHREAD_WORK_INIT(*work, fn); | ||
| 93 | } | ||
| 94 | |||
| 95 | int kthread_worker_fn(void *worker_ptr); | ||
| 96 | |||
| 97 | bool queue_kthread_work(struct kthread_worker *worker, | ||
| 98 | struct kthread_work *work); | ||
| 99 | void flush_kthread_work(struct kthread_work *work); | ||
| 100 | void flush_kthread_worker(struct kthread_worker *worker); | ||
| 101 | |||
| 37 | #endif /* _LINUX_KTHREAD_H */ | 102 | #endif /* _LINUX_KTHREAD_H */ |
diff --git a/include/linux/libata.h b/include/linux/libata.h index b85f3ff34d7d..f010f18a0f86 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -751,6 +751,7 @@ struct ata_port { | |||
| 751 | struct ata_host *host; | 751 | struct ata_host *host; |
| 752 | struct device *dev; | 752 | struct device *dev; |
| 753 | 753 | ||
| 754 | struct mutex scsi_scan_mutex; | ||
| 754 | struct delayed_work hotplug_task; | 755 | struct delayed_work hotplug_task; |
| 755 | struct work_struct scsi_rescan_task; | 756 | struct work_struct scsi_rescan_task; |
| 756 | 757 | ||
diff --git a/include/linux/list.h b/include/linux/list.h index 5d57a3a1fa1b..d167b5d7c0ac 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | #ifndef _LINUX_LIST_H | 1 | #ifndef _LINUX_LIST_H |
| 2 | #define _LINUX_LIST_H | 2 | #define _LINUX_LIST_H |
| 3 | 3 | ||
| 4 | #include <linux/types.h> | ||
| 4 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
| 5 | #include <linux/poison.h> | 6 | #include <linux/poison.h> |
| 6 | #include <linux/prefetch.h> | 7 | #include <linux/prefetch.h> |
| @@ -16,10 +17,6 @@ | |||
| 16 | * using the generic single-entry routines. | 17 | * using the generic single-entry routines. |
| 17 | */ | 18 | */ |
| 18 | 19 | ||
| 19 | struct list_head { | ||
| 20 | struct list_head *next, *prev; | ||
| 21 | }; | ||
| 22 | |||
| 23 | #define LIST_HEAD_INIT(name) { &(name), &(name) } | 20 | #define LIST_HEAD_INIT(name) { &(name), &(name) } |
| 24 | 21 | ||
| 25 | #define LIST_HEAD(name) \ | 22 | #define LIST_HEAD(name) \ |
| @@ -566,14 +563,6 @@ static inline void list_splice_tail_init(struct list_head *list, | |||
| 566 | * You lose the ability to access the tail in O(1). | 563 | * You lose the ability to access the tail in O(1). |
| 567 | */ | 564 | */ |
| 568 | 565 | ||
| 569 | struct hlist_head { | ||
| 570 | struct hlist_node *first; | ||
| 571 | }; | ||
| 572 | |||
| 573 | struct hlist_node { | ||
| 574 | struct hlist_node *next, **pprev; | ||
| 575 | }; | ||
| 576 | |||
| 577 | #define HLIST_HEAD_INIT { .first = NULL } | 566 | #define HLIST_HEAD_INIT { .first = NULL } |
| 578 | #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } | 567 | #define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } |
| 579 | #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) | 568 | #define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) |
diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h index a09b84e4fdb4..54cbbac1e71d 100644 --- a/include/linux/mbcache.h +++ b/include/linux/mbcache.h | |||
| @@ -4,9 +4,6 @@ | |||
| 4 | (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org> | 4 | (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org> |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | /* Hardwire the number of additional indexes */ | ||
| 8 | #define MB_CACHE_INDEXES_COUNT 1 | ||
| 9 | |||
| 10 | struct mb_cache_entry { | 7 | struct mb_cache_entry { |
| 11 | struct list_head e_lru_list; | 8 | struct list_head e_lru_list; |
| 12 | struct mb_cache *e_cache; | 9 | struct mb_cache *e_cache; |
| @@ -18,17 +15,12 @@ struct mb_cache_entry { | |||
| 18 | struct { | 15 | struct { |
| 19 | struct list_head o_list; | 16 | struct list_head o_list; |
| 20 | unsigned int o_key; | 17 | unsigned int o_key; |
| 21 | } e_indexes[0]; | 18 | } e_index; |
| 22 | }; | ||
| 23 | |||
| 24 | struct mb_cache_op { | ||
| 25 | int (*free)(struct mb_cache_entry *, gfp_t); | ||
| 26 | }; | 19 | }; |
| 27 | 20 | ||
| 28 | /* Functions on caches */ | 21 | /* Functions on caches */ |
| 29 | 22 | ||
| 30 | struct mb_cache * mb_cache_create(const char *, struct mb_cache_op *, size_t, | 23 | struct mb_cache *mb_cache_create(const char *, int); |
| 31 | int, int); | ||
| 32 | void mb_cache_shrink(struct block_device *); | 24 | void mb_cache_shrink(struct block_device *); |
| 33 | void mb_cache_destroy(struct mb_cache *); | 25 | void mb_cache_destroy(struct mb_cache *); |
| 34 | 26 | ||
| @@ -36,17 +28,15 @@ void mb_cache_destroy(struct mb_cache *); | |||
| 36 | 28 | ||
| 37 | struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t); | 29 | struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t); |
| 38 | int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *, | 30 | int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *, |
| 39 | sector_t, unsigned int[]); | 31 | sector_t, unsigned int); |
| 40 | void mb_cache_entry_release(struct mb_cache_entry *); | 32 | void mb_cache_entry_release(struct mb_cache_entry *); |
| 41 | void mb_cache_entry_free(struct mb_cache_entry *); | 33 | void mb_cache_entry_free(struct mb_cache_entry *); |
| 42 | struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *, | 34 | struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *, |
| 43 | struct block_device *, | 35 | struct block_device *, |
| 44 | sector_t); | 36 | sector_t); |
| 45 | #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) | 37 | struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, |
| 46 | struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, int, | ||
| 47 | struct block_device *, | 38 | struct block_device *, |
| 48 | unsigned int); | 39 | unsigned int); |
| 49 | struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *, int, | 40 | struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *, |
| 50 | struct block_device *, | 41 | struct block_device *, |
| 51 | unsigned int); | 42 | unsigned int); |
| 52 | #endif | ||
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 9411d32840b0..159a0762aeaf 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -98,11 +98,6 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem, | |||
| 98 | /* | 98 | /* |
| 99 | * For memory reclaim. | 99 | * For memory reclaim. |
| 100 | */ | 100 | */ |
| 101 | extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem); | ||
| 102 | extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, | ||
| 103 | int priority); | ||
| 104 | extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, | ||
| 105 | int priority); | ||
| 106 | int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); | 101 | int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); |
| 107 | int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); | 102 | int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); |
| 108 | unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, | 103 | unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, |
| @@ -128,8 +123,9 @@ static inline bool mem_cgroup_disabled(void) | |||
| 128 | 123 | ||
| 129 | void mem_cgroup_update_file_mapped(struct page *page, int val); | 124 | void mem_cgroup_update_file_mapped(struct page *page, int val); |
| 130 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | 125 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
| 131 | gfp_t gfp_mask, int nid, | 126 | gfp_t gfp_mask); |
| 132 | int zid); | 127 | u64 mem_cgroup_get_limit(struct mem_cgroup *mem); |
| 128 | |||
| 133 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 129 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
| 134 | struct mem_cgroup; | 130 | struct mem_cgroup; |
| 135 | 131 | ||
| @@ -304,7 +300,13 @@ static inline void mem_cgroup_update_file_mapped(struct page *page, | |||
| 304 | 300 | ||
| 305 | static inline | 301 | static inline |
| 306 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | 302 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
| 307 | gfp_t gfp_mask, int nid, int zid) | 303 | gfp_t gfp_mask) |
| 304 | { | ||
| 305 | return 0; | ||
| 306 | } | ||
| 307 | |||
| 308 | static inline | ||
| 309 | u64 mem_cgroup_get_limit(struct mem_cgroup *mem) | ||
| 308 | { | 310 | { |
| 309 | return 0; | 311 | return 0; |
| 310 | } | 312 | } |
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 7b9ef6bf45aa..31ac26ca4acf 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h | |||
| @@ -210,6 +210,8 @@ extern struct zonelist *huge_zonelist(struct vm_area_struct *vma, | |||
| 210 | unsigned long addr, gfp_t gfp_flags, | 210 | unsigned long addr, gfp_t gfp_flags, |
| 211 | struct mempolicy **mpol, nodemask_t **nodemask); | 211 | struct mempolicy **mpol, nodemask_t **nodemask); |
| 212 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); | 212 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); |
| 213 | extern bool mempolicy_nodemask_intersects(struct task_struct *tsk, | ||
| 214 | const nodemask_t *mask); | ||
| 213 | extern unsigned slab_node(struct mempolicy *policy); | 215 | extern unsigned slab_node(struct mempolicy *policy); |
| 214 | 216 | ||
| 215 | extern enum zone_type policy_zone; | 217 | extern enum zone_type policy_zone; |
| @@ -338,7 +340,16 @@ static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma, | |||
| 338 | return node_zonelist(0, gfp_flags); | 340 | return node_zonelist(0, gfp_flags); |
| 339 | } | 341 | } |
| 340 | 342 | ||
| 341 | static inline bool init_nodemask_of_mempolicy(nodemask_t *m) { return false; } | 343 | static inline bool init_nodemask_of_mempolicy(nodemask_t *m) |
| 344 | { | ||
| 345 | return false; | ||
| 346 | } | ||
| 347 | |||
| 348 | static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk, | ||
| 349 | const nodemask_t *mask) | ||
| 350 | { | ||
| 351 | return false; | ||
| 352 | } | ||
| 342 | 353 | ||
| 343 | static inline int do_migrate_pages(struct mm_struct *mm, | 354 | static inline int do_migrate_pages(struct mm_struct *mm, |
| 344 | const nodemask_t *from_nodes, | 355 | const nodemask_t *from_nodes, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 7a9ab7db1975..709f6728fc90 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -815,6 +815,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, | |||
| 815 | } | 815 | } |
| 816 | 816 | ||
| 817 | extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); | 817 | extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); |
| 818 | extern void truncate_setsize(struct inode *inode, loff_t newsize); | ||
| 818 | extern int vmtruncate(struct inode *inode, loff_t offset); | 819 | extern int vmtruncate(struct inode *inode, loff_t offset); |
| 819 | extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); | 820 | extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); |
| 820 | 821 | ||
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index d02d2c6e0cfe..6b7525099e56 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
| @@ -24,12 +24,14 @@ struct mmc_cid { | |||
| 24 | }; | 24 | }; |
| 25 | 25 | ||
| 26 | struct mmc_csd { | 26 | struct mmc_csd { |
| 27 | unsigned char structure; | ||
| 27 | unsigned char mmca_vsn; | 28 | unsigned char mmca_vsn; |
| 28 | unsigned short cmdclass; | 29 | unsigned short cmdclass; |
| 29 | unsigned short tacc_clks; | 30 | unsigned short tacc_clks; |
| 30 | unsigned int tacc_ns; | 31 | unsigned int tacc_ns; |
| 31 | unsigned int r2w_factor; | 32 | unsigned int r2w_factor; |
| 32 | unsigned int max_dtr; | 33 | unsigned int max_dtr; |
| 34 | unsigned int erase_size; /* In sectors */ | ||
| 33 | unsigned int read_blkbits; | 35 | unsigned int read_blkbits; |
| 34 | unsigned int write_blkbits; | 36 | unsigned int write_blkbits; |
| 35 | unsigned int capacity; | 37 | unsigned int capacity; |
| @@ -41,9 +43,16 @@ struct mmc_csd { | |||
| 41 | 43 | ||
| 42 | struct mmc_ext_csd { | 44 | struct mmc_ext_csd { |
| 43 | u8 rev; | 45 | u8 rev; |
| 46 | u8 erase_group_def; | ||
| 47 | u8 sec_feature_support; | ||
| 44 | unsigned int sa_timeout; /* Units: 100ns */ | 48 | unsigned int sa_timeout; /* Units: 100ns */ |
| 45 | unsigned int hs_max_dtr; | 49 | unsigned int hs_max_dtr; |
| 46 | unsigned int sectors; | 50 | unsigned int sectors; |
| 51 | unsigned int hc_erase_size; /* In sectors */ | ||
| 52 | unsigned int hc_erase_timeout; /* In milliseconds */ | ||
| 53 | unsigned int sec_trim_mult; /* Secure trim multiplier */ | ||
| 54 | unsigned int sec_erase_mult; /* Secure erase multiplier */ | ||
| 55 | unsigned int trim_timeout; /* In milliseconds */ | ||
| 47 | }; | 56 | }; |
| 48 | 57 | ||
| 49 | struct sd_scr { | 58 | struct sd_scr { |
| @@ -53,6 +62,12 @@ struct sd_scr { | |||
| 53 | #define SD_SCR_BUS_WIDTH_4 (1<<2) | 62 | #define SD_SCR_BUS_WIDTH_4 (1<<2) |
| 54 | }; | 63 | }; |
| 55 | 64 | ||
| 65 | struct sd_ssr { | ||
| 66 | unsigned int au; /* In sectors */ | ||
| 67 | unsigned int erase_timeout; /* In milliseconds */ | ||
| 68 | unsigned int erase_offset; /* In milliseconds */ | ||
| 69 | }; | ||
| 70 | |||
| 56 | struct sd_switch_caps { | 71 | struct sd_switch_caps { |
| 57 | unsigned int hs_max_dtr; | 72 | unsigned int hs_max_dtr; |
| 58 | }; | 73 | }; |
| @@ -92,6 +107,7 @@ struct mmc_card { | |||
| 92 | #define MMC_TYPE_MMC 0 /* MMC card */ | 107 | #define MMC_TYPE_MMC 0 /* MMC card */ |
| 93 | #define MMC_TYPE_SD 1 /* SD card */ | 108 | #define MMC_TYPE_SD 1 /* SD card */ |
| 94 | #define MMC_TYPE_SDIO 2 /* SDIO card */ | 109 | #define MMC_TYPE_SDIO 2 /* SDIO card */ |
| 110 | #define MMC_TYPE_SD_COMBO 3 /* SD combo (IO+mem) card */ | ||
| 95 | unsigned int state; /* (our) card state */ | 111 | unsigned int state; /* (our) card state */ |
| 96 | #define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ | 112 | #define MMC_STATE_PRESENT (1<<0) /* present in sysfs */ |
| 97 | #define MMC_STATE_READONLY (1<<1) /* card is read-only */ | 113 | #define MMC_STATE_READONLY (1<<1) /* card is read-only */ |
| @@ -101,6 +117,13 @@ struct mmc_card { | |||
| 101 | #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ | 117 | #define MMC_QUIRK_LENIENT_FN0 (1<<0) /* allow SDIO FN0 writes outside of the VS CCCR range */ |
| 102 | #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ | 118 | #define MMC_QUIRK_BLKSZ_FOR_BYTE_MODE (1<<1) /* use func->cur_blksize */ |
| 103 | /* for byte mode */ | 119 | /* for byte mode */ |
| 120 | #define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */ | ||
| 121 | /* (missing CIA registers) */ | ||
| 122 | |||
| 123 | unsigned int erase_size; /* erase size in sectors */ | ||
| 124 | unsigned int erase_shift; /* if erase unit is power 2 */ | ||
| 125 | unsigned int pref_erase; /* in sectors */ | ||
| 126 | u8 erased_byte; /* value of erased bytes */ | ||
| 104 | 127 | ||
| 105 | u32 raw_cid[4]; /* raw card CID */ | 128 | u32 raw_cid[4]; /* raw card CID */ |
| 106 | u32 raw_csd[4]; /* raw card CSD */ | 129 | u32 raw_csd[4]; /* raw card CSD */ |
| @@ -109,6 +132,7 @@ struct mmc_card { | |||
| 109 | struct mmc_csd csd; /* card specific */ | 132 | struct mmc_csd csd; /* card specific */ |
| 110 | struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */ | 133 | struct mmc_ext_csd ext_csd; /* mmc v4 extended card specific */ |
| 111 | struct sd_scr scr; /* extra SD information */ | 134 | struct sd_scr scr; /* extra SD information */ |
| 135 | struct sd_ssr ssr; /* yet more SD information */ | ||
| 112 | struct sd_switch_caps sw_caps; /* switch (CMD6) caps */ | 136 | struct sd_switch_caps sw_caps; /* switch (CMD6) caps */ |
| 113 | 137 | ||
| 114 | unsigned int sdio_funcs; /* number of SDIO functions */ | 138 | unsigned int sdio_funcs; /* number of SDIO functions */ |
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index e4898e9eeb59..7429033acb66 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h | |||
| @@ -92,6 +92,8 @@ struct mmc_command { | |||
| 92 | * actively failing requests | 92 | * actively failing requests |
| 93 | */ | 93 | */ |
| 94 | 94 | ||
| 95 | unsigned int erase_timeout; /* in milliseconds */ | ||
| 96 | |||
| 95 | struct mmc_data *data; /* data segment associated with cmd */ | 97 | struct mmc_data *data; /* data segment associated with cmd */ |
| 96 | struct mmc_request *mrq; /* associated request */ | 98 | struct mmc_request *mrq; /* associated request */ |
| 97 | }; | 99 | }; |
| @@ -134,6 +136,23 @@ extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int); | |||
| 134 | extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, | 136 | extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *, |
| 135 | struct mmc_command *, int); | 137 | struct mmc_command *, int); |
| 136 | 138 | ||
| 139 | #define MMC_ERASE_ARG 0x00000000 | ||
| 140 | #define MMC_SECURE_ERASE_ARG 0x80000000 | ||
| 141 | #define MMC_TRIM_ARG 0x00000001 | ||
| 142 | #define MMC_SECURE_TRIM1_ARG 0x80000001 | ||
| 143 | #define MMC_SECURE_TRIM2_ARG 0x80008000 | ||
| 144 | |||
| 145 | #define MMC_SECURE_ARGS 0x80000000 | ||
| 146 | #define MMC_TRIM_ARGS 0x00008001 | ||
| 147 | |||
| 148 | extern int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr, | ||
| 149 | unsigned int arg); | ||
| 150 | extern int mmc_can_erase(struct mmc_card *card); | ||
| 151 | extern int mmc_can_trim(struct mmc_card *card); | ||
| 152 | extern int mmc_can_secure_erase_trim(struct mmc_card *card); | ||
| 153 | extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from, | ||
| 154 | unsigned int nr); | ||
| 155 | |||
| 137 | extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *); | 156 | extern void mmc_set_data_timeout(struct mmc_data *, const struct mmc_card *); |
| 138 | extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); | 157 | extern unsigned int mmc_align_data_size(struct mmc_card *, unsigned int); |
| 139 | 158 | ||
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index f65913c9f5a4..1575b52c3bfa 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
| @@ -124,6 +124,7 @@ struct mmc_host { | |||
| 124 | unsigned int f_min; | 124 | unsigned int f_min; |
| 125 | unsigned int f_max; | 125 | unsigned int f_max; |
| 126 | u32 ocr_avail; | 126 | u32 ocr_avail; |
| 127 | struct notifier_block pm_notify; | ||
| 127 | 128 | ||
| 128 | #define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */ | 129 | #define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */ |
| 129 | #define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */ | 130 | #define MMC_VDD_20_21 0x00000100 /* VDD voltage 2.0 ~ 2.1 */ |
| @@ -155,6 +156,7 @@ struct mmc_host { | |||
| 155 | #define MMC_CAP_DISABLE (1 << 7) /* Can the host be disabled */ | 156 | #define MMC_CAP_DISABLE (1 << 7) /* Can the host be disabled */ |
| 156 | #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ | 157 | #define MMC_CAP_NONREMOVABLE (1 << 8) /* Nonremovable e.g. eMMC */ |
| 157 | #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ | 158 | #define MMC_CAP_WAIT_WHILE_BUSY (1 << 9) /* Waits while card is busy */ |
| 159 | #define MMC_CAP_ERASE (1 << 10) /* Allow erase/trim commands */ | ||
| 158 | 160 | ||
| 159 | mmc_pm_flag_t pm_caps; /* supported pm features */ | 161 | mmc_pm_flag_t pm_caps; /* supported pm features */ |
| 160 | 162 | ||
| @@ -183,6 +185,7 @@ struct mmc_host { | |||
| 183 | 185 | ||
| 184 | /* Only used with MMC_CAP_DISABLE */ | 186 | /* Only used with MMC_CAP_DISABLE */ |
| 185 | int enabled; /* host is enabled */ | 187 | int enabled; /* host is enabled */ |
| 188 | int rescan_disable; /* disable card detection */ | ||
| 186 | int nesting_cnt; /* "enable" nesting count */ | 189 | int nesting_cnt; /* "enable" nesting count */ |
| 187 | int en_dis_recurs; /* detect recursion */ | 190 | int en_dis_recurs; /* detect recursion */ |
| 188 | unsigned int disable_delay; /* disable delay in msecs */ | 191 | unsigned int disable_delay; /* disable delay in msecs */ |
| @@ -257,6 +260,7 @@ int mmc_card_can_sleep(struct mmc_host *host); | |||
| 257 | int mmc_host_enable(struct mmc_host *host); | 260 | int mmc_host_enable(struct mmc_host *host); |
| 258 | int mmc_host_disable(struct mmc_host *host); | 261 | int mmc_host_disable(struct mmc_host *host); |
| 259 | int mmc_host_lazy_disable(struct mmc_host *host); | 262 | int mmc_host_lazy_disable(struct mmc_host *host); |
| 263 | int mmc_pm_notify(struct notifier_block *notify_block, unsigned long, void *); | ||
| 260 | 264 | ||
| 261 | static inline void mmc_set_disable_delay(struct mmc_host *host, | 265 | static inline void mmc_set_disable_delay(struct mmc_host *host, |
| 262 | unsigned int disable_delay) | 266 | unsigned int disable_delay) |
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index 8a49cbf0376d..dd11ae51fb68 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h | |||
| @@ -251,12 +251,21 @@ struct _mmc_csd { | |||
| 251 | * EXT_CSD fields | 251 | * EXT_CSD fields |
| 252 | */ | 252 | */ |
| 253 | 253 | ||
| 254 | #define EXT_CSD_BUS_WIDTH 183 /* R/W */ | 254 | #define EXT_CSD_ERASE_GROUP_DEF 175 /* R/W */ |
| 255 | #define EXT_CSD_HS_TIMING 185 /* R/W */ | 255 | #define EXT_CSD_ERASED_MEM_CONT 181 /* RO */ |
| 256 | #define EXT_CSD_CARD_TYPE 196 /* RO */ | 256 | #define EXT_CSD_BUS_WIDTH 183 /* R/W */ |
| 257 | #define EXT_CSD_REV 192 /* RO */ | 257 | #define EXT_CSD_HS_TIMING 185 /* R/W */ |
| 258 | #define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ | 258 | #define EXT_CSD_REV 192 /* RO */ |
| 259 | #define EXT_CSD_S_A_TIMEOUT 217 | 259 | #define EXT_CSD_STRUCTURE 194 /* RO */ |
| 260 | #define EXT_CSD_CARD_TYPE 196 /* RO */ | ||
| 261 | #define EXT_CSD_SEC_CNT 212 /* RO, 4 bytes */ | ||
| 262 | #define EXT_CSD_S_A_TIMEOUT 217 /* RO */ | ||
| 263 | #define EXT_CSD_ERASE_TIMEOUT_MULT 223 /* RO */ | ||
| 264 | #define EXT_CSD_HC_ERASE_GRP_SIZE 224 /* RO */ | ||
| 265 | #define EXT_CSD_SEC_TRIM_MULT 229 /* RO */ | ||
| 266 | #define EXT_CSD_SEC_ERASE_MULT 230 /* RO */ | ||
| 267 | #define EXT_CSD_SEC_FEATURE_SUPPORT 231 /* RO */ | ||
| 268 | #define EXT_CSD_TRIM_MULT 232 /* RO */ | ||
| 260 | 269 | ||
| 261 | /* | 270 | /* |
| 262 | * EXT_CSD field definitions | 271 | * EXT_CSD field definitions |
| @@ -274,6 +283,10 @@ struct _mmc_csd { | |||
| 274 | #define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ | 283 | #define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ |
| 275 | #define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */ | 284 | #define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */ |
| 276 | 285 | ||
| 286 | #define EXT_CSD_SEC_ER_EN BIT(0) | ||
| 287 | #define EXT_CSD_SEC_BD_BLK_EN BIT(2) | ||
| 288 | #define EXT_CSD_SEC_GB_CL_EN BIT(4) | ||
| 289 | |||
| 277 | /* | 290 | /* |
| 278 | * MMC_SWITCH access modes | 291 | * MMC_SWITCH access modes |
| 279 | */ | 292 | */ |
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h index f310062cffb4..3fd85e088cc3 100644 --- a/include/linux/mmc/sd.h +++ b/include/linux/mmc/sd.h | |||
| @@ -21,8 +21,13 @@ | |||
| 21 | /* class 10 */ | 21 | /* class 10 */ |
| 22 | #define SD_SWITCH 6 /* adtc [31:0] See below R1 */ | 22 | #define SD_SWITCH 6 /* adtc [31:0] See below R1 */ |
| 23 | 23 | ||
| 24 | /* class 5 */ | ||
| 25 | #define SD_ERASE_WR_BLK_START 32 /* ac [31:0] data addr R1 */ | ||
| 26 | #define SD_ERASE_WR_BLK_END 33 /* ac [31:0] data addr R1 */ | ||
| 27 | |||
| 24 | /* Application commands */ | 28 | /* Application commands */ |
| 25 | #define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */ | 29 | #define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */ |
| 30 | #define SD_APP_SD_STATUS 13 /* adtc R1 */ | ||
| 26 | #define SD_APP_SEND_NUM_WR_BLKS 22 /* adtc R1 */ | 31 | #define SD_APP_SEND_NUM_WR_BLKS 22 /* adtc R1 */ |
| 27 | #define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ | 32 | #define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ |
| 28 | #define SD_APP_SEND_SCR 51 /* adtc R1 */ | 33 | #define SD_APP_SEND_SCR 51 /* adtc R1 */ |
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index ee24ef8ab616..c04ecfe03f7f 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | #ifdef CONFIG_DEBUG_VM | 4 | #ifdef CONFIG_DEBUG_VM |
| 5 | #define VM_BUG_ON(cond) BUG_ON(cond) | 5 | #define VM_BUG_ON(cond) BUG_ON(cond) |
| 6 | #else | 6 | #else |
| 7 | #define VM_BUG_ON(cond) do { } while (0) | 7 | #define VM_BUG_ON(cond) do { (void)(cond); } while (0) |
| 8 | #endif | 8 | #endif |
| 9 | 9 | ||
| 10 | #ifdef CONFIG_DEBUG_VIRTUAL | 10 | #ifdef CONFIG_DEBUG_VIRTUAL |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b4d109e389b8..6e6e62648a4d 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -348,21 +348,6 @@ struct zone { | |||
| 348 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | 348 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
| 349 | 349 | ||
| 350 | /* | 350 | /* |
| 351 | * prev_priority holds the scanning priority for this zone. It is | ||
| 352 | * defined as the scanning priority at which we achieved our reclaim | ||
| 353 | * target at the previous try_to_free_pages() or balance_pgdat() | ||
| 354 | * invocation. | ||
| 355 | * | ||
| 356 | * We use prev_priority as a measure of how much stress page reclaim is | ||
| 357 | * under - it drives the swappiness decision: whether to unmap mapped | ||
| 358 | * pages. | ||
| 359 | * | ||
| 360 | * Access to both this field is quite racy even on uniprocessor. But | ||
| 361 | * it is expected to average out OK. | ||
| 362 | */ | ||
| 363 | int prev_priority; | ||
| 364 | |||
| 365 | /* | ||
| 366 | * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on | 351 | * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on |
| 367 | * this zone's LRU. Maintained by the pageout code. | 352 | * this zone's LRU. Maintained by the pageout code. |
| 368 | */ | 353 | */ |
| @@ -651,8 +636,6 @@ typedef struct pglist_data { | |||
| 651 | #include <linux/memory_hotplug.h> | 636 | #include <linux/memory_hotplug.h> |
| 652 | 637 | ||
| 653 | extern struct mutex zonelists_mutex; | 638 | extern struct mutex zonelists_mutex; |
| 654 | void get_zone_counts(unsigned long *active, unsigned long *inactive, | ||
| 655 | unsigned long *free); | ||
| 656 | void build_all_zonelists(void *data); | 639 | void build_all_zonelists(void *data); |
| 657 | void wakeup_kswapd(struct zone *zone, int order); | 640 | void wakeup_kswapd(struct zone *zone, int order); |
| 658 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 641 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
diff --git a/include/linux/mount.h b/include/linux/mount.h index 4bd05474d11d..5e7a59408dd4 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h | |||
| @@ -27,7 +27,6 @@ struct mnt_namespace; | |||
| 27 | #define MNT_NODIRATIME 0x10 | 27 | #define MNT_NODIRATIME 0x10 |
| 28 | #define MNT_RELATIME 0x20 | 28 | #define MNT_RELATIME 0x20 |
| 29 | #define MNT_READONLY 0x40 /* does the user want this to be r/o? */ | 29 | #define MNT_READONLY 0x40 /* does the user want this to be r/o? */ |
| 30 | #define MNT_STRICTATIME 0x80 | ||
| 31 | 30 | ||
| 32 | #define MNT_SHRINKABLE 0x100 | 31 | #define MNT_SHRINKABLE 0x100 |
| 33 | #define MNT_WRITE_HOLD 0x200 | 32 | #define MNT_WRITE_HOLD 0x200 |
| @@ -56,7 +55,11 @@ struct vfsmount { | |||
| 56 | struct list_head mnt_mounts; /* list of children, anchored here */ | 55 | struct list_head mnt_mounts; /* list of children, anchored here */ |
| 57 | struct list_head mnt_child; /* and going through their mnt_child */ | 56 | struct list_head mnt_child; /* and going through their mnt_child */ |
| 58 | int mnt_flags; | 57 | int mnt_flags; |
| 59 | /* 4 bytes hole on 64bits arches */ | 58 | /* 4 bytes hole on 64bits arches without fsnotify */ |
| 59 | #ifdef CONFIG_FSNOTIFY | ||
| 60 | __u32 mnt_fsnotify_mask; | ||
| 61 | struct hlist_head mnt_fsnotify_marks; | ||
| 62 | #endif | ||
| 60 | const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ | 63 | const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */ |
| 61 | struct list_head mnt_list; | 64 | struct list_head mnt_list; |
| 62 | struct list_head mnt_expire; /* link in fs-specific expiry list */ | 65 | struct list_head mnt_expire; /* link in fs-specific expiry list */ |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 6991ab5b24d1..91b05c171854 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
| @@ -14,8 +14,10 @@ struct irq_desc; | |||
| 14 | extern void mask_msi_irq(unsigned int irq); | 14 | extern void mask_msi_irq(unsigned int irq); |
| 15 | extern void unmask_msi_irq(unsigned int irq); | 15 | extern void unmask_msi_irq(unsigned int irq); |
| 16 | extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 16 | extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); |
| 17 | extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | ||
| 17 | extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 18 | extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); |
| 18 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); | 19 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); |
| 20 | extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); | ||
| 19 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); | 21 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); |
| 20 | 22 | ||
| 21 | struct msi_desc { | 23 | struct msi_desc { |
diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h index 9c3757c5759d..7fa20beb2ab9 100644 --- a/include/linux/mtd/bbm.h +++ b/include/linux/mtd/bbm.h | |||
| @@ -4,12 +4,26 @@ | |||
| 4 | * NAND family Bad Block Management (BBM) header file | 4 | * NAND family Bad Block Management (BBM) header file |
| 5 | * - Bad Block Table (BBT) implementation | 5 | * - Bad Block Table (BBT) implementation |
| 6 | * | 6 | * |
| 7 | * Copyright (c) 2005 Samsung Electronics | 7 | * Copyright © 2005 Samsung Electronics |
| 8 | * Kyungmin Park <kyungmin.park@samsung.com> | 8 | * Kyungmin Park <kyungmin.park@samsung.com> |
| 9 | * | 9 | * |
| 10 | * Copyright (c) 2000-2005 | 10 | * Copyright © 2000-2005 |
| 11 | * Thomas Gleixner <tglx@linuxtronix.de> | 11 | * Thomas Gleixner <tglx@linuxtronix.de> |
| 12 | * | 12 | * |
| 13 | * This program is free software; you can redistribute it and/or modify | ||
| 14 | * it under the terms of the GNU General Public License as published by | ||
| 15 | * the Free Software Foundation; either version 2 of the License, or | ||
| 16 | * (at your option) any later version. | ||
| 17 | * | ||
| 18 | * This program is distributed in the hope that it will be useful, | ||
| 19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 21 | * GNU General Public License for more details. | ||
| 22 | * | ||
| 23 | * You should have received a copy of the GNU General Public License | ||
| 24 | * along with this program; if not, write to the Free Software | ||
| 25 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 26 | * | ||
| 13 | */ | 27 | */ |
| 14 | #ifndef __LINUX_MTD_BBM_H | 28 | #ifndef __LINUX_MTD_BBM_H |
| 15 | #define __LINUX_MTD_BBM_H | 29 | #define __LINUX_MTD_BBM_H |
| @@ -82,6 +96,12 @@ struct nand_bbt_descr { | |||
| 82 | #define NAND_BBT_SAVECONTENT 0x00002000 | 96 | #define NAND_BBT_SAVECONTENT 0x00002000 |
| 83 | /* Search good / bad pattern on the first and the second page */ | 97 | /* Search good / bad pattern on the first and the second page */ |
| 84 | #define NAND_BBT_SCAN2NDPAGE 0x00004000 | 98 | #define NAND_BBT_SCAN2NDPAGE 0x00004000 |
| 99 | /* Search good / bad pattern on the last page of the eraseblock */ | ||
| 100 | #define NAND_BBT_SCANLASTPAGE 0x00008000 | ||
| 101 | /* Chip stores bad block marker on BOTH 1st and 6th bytes of OOB */ | ||
| 102 | #define NAND_BBT_SCANBYTE1AND6 0x00100000 | ||
| 103 | /* The nand_bbt_descr was created dynamicaly and must be freed */ | ||
| 104 | #define NAND_BBT_DYNAMICSTRUCT 0x00200000 | ||
| 85 | 105 | ||
| 86 | /* The maximum number of blocks to scan for a bbt */ | 106 | /* The maximum number of blocks to scan for a bbt */ |
| 87 | #define NAND_BBT_SCAN_MAXBLOCKS 4 | 107 | #define NAND_BBT_SCAN_MAXBLOCKS 4 |
diff --git a/include/linux/mtd/blktrans.h b/include/linux/mtd/blktrans.h index b481ccd7ff3c..26529ebd59cc 100644 --- a/include/linux/mtd/blktrans.h +++ b/include/linux/mtd/blktrans.h | |||
| @@ -1,7 +1,19 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * (C) 2003 David Woodhouse <dwmw2@infradead.org> | 2 | * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org> |
| 3 | * | 3 | * |
| 4 | * Interface to Linux block layer for MTD 'translation layers'. | 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 5 | * | 17 | * |
| 6 | */ | 18 | */ |
| 7 | 19 | ||
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h index 574d9ee066f1..d2118b0eac9a 100644 --- a/include/linux/mtd/cfi.h +++ b/include/linux/mtd/cfi.h | |||
| @@ -1,6 +1,20 @@ | |||
| 1 | 1 | /* | |
| 2 | /* Common Flash Interface structures | 2 | * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al. |
| 3 | * See http://support.intel.com/design/flash/technote/index.htm | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 17 | * | ||
| 4 | */ | 18 | */ |
| 5 | 19 | ||
| 6 | #ifndef __MTD_CFI_H__ | 20 | #ifndef __MTD_CFI_H__ |
diff --git a/include/linux/mtd/cfi_endian.h b/include/linux/mtd/cfi_endian.h index d802f7736be3..51cc3f5917a8 100644 --- a/include/linux/mtd/cfi_endian.h +++ b/include/linux/mtd/cfi_endian.h | |||
| @@ -1,3 +1,22 @@ | |||
| 1 | /* | ||
| 2 | * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 17 | * | ||
| 18 | */ | ||
| 19 | |||
| 1 | #include <asm/byteorder.h> | 20 | #include <asm/byteorder.h> |
| 2 | 21 | ||
| 3 | #ifndef CONFIG_MTD_CFI_ADV_OPTIONS | 22 | #ifndef CONFIG_MTD_CFI_ADV_OPTIONS |
diff --git a/include/linux/mtd/compatmac.h b/include/linux/mtd/compatmac.h deleted file mode 100644 index 7d1300d9bd51..000000000000 --- a/include/linux/mtd/compatmac.h +++ /dev/null | |||
| @@ -1,10 +0,0 @@ | |||
| 1 | |||
| 2 | #ifndef __LINUX_MTD_COMPATMAC_H__ | ||
| 3 | #define __LINUX_MTD_COMPATMAC_H__ | ||
| 4 | |||
| 5 | /* Nothing to see here. We write 2.5-compatible code and this | ||
| 6 | file makes it all OK in older kernels, but it's empty in _current_ | ||
| 7 | kernels. Include guard just to make GCC ignore it in future inclusions | ||
| 8 | anyway... */ | ||
| 9 | |||
| 10 | #endif /* __LINUX_MTD_COMPATMAC_H__ */ | ||
diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h index e80c674daeb3..ccdbe93a909c 100644 --- a/include/linux/mtd/concat.h +++ b/include/linux/mtd/concat.h | |||
| @@ -1,9 +1,22 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * MTD device concatenation layer definitions | 2 | * MTD device concatenation layer definitions |
| 3 | * | 3 | * |
| 4 | * (C) 2002 Robert Kaiser <rkaiser@sysgo.de> | 4 | * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de> |
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 5 | * | 19 | * |
| 6 | * This code is GPL | ||
| 7 | */ | 20 | */ |
| 8 | 21 | ||
| 9 | #ifndef MTD_CONCAT_H | 22 | #ifndef MTD_CONCAT_H |
diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h index 0a6d516ab71d..0f6fea73a1f6 100644 --- a/include/linux/mtd/doc2000.h +++ b/include/linux/mtd/doc2000.h | |||
| @@ -1,12 +1,25 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Linux driver for Disk-On-Chip devices | 2 | * Linux driver for Disk-On-Chip devices |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 1999 Machine Vision Holdings, Inc. | 4 | * Copyright © 1999 Machine Vision Holdings, Inc. |
| 5 | * Copyright (C) 2001-2003 David Woodhouse <dwmw2@infradead.org> | 5 | * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> |
| 6 | * Copyright (C) 2002-2003 Greg Ungerer <gerg@snapgear.com> | 6 | * Copyright © 2002-2003 Greg Ungerer <gerg@snapgear.com> |
| 7 | * Copyright (C) 2002-2003 SnapGear Inc | 7 | * Copyright © 2002-2003 SnapGear Inc |
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License as published by | ||
| 11 | * the Free Software Foundation; either version 2 of the License, or | ||
| 12 | * (at your option) any later version. | ||
| 13 | * | ||
| 14 | * This program is distributed in the hope that it will be useful, | ||
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 17 | * GNU General Public License for more details. | ||
| 18 | * | ||
| 19 | * You should have received a copy of the GNU General Public License | ||
| 20 | * along with this program; if not, write to the Free Software | ||
| 21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 8 | * | 22 | * |
| 9 | * Released under GPL | ||
| 10 | */ | 23 | */ |
| 11 | 24 | ||
| 12 | #ifndef __MTD_DOC2000_H__ | 25 | #ifndef __MTD_DOC2000_H__ |
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h index f43e9b49b751..b63fa457febd 100644 --- a/include/linux/mtd/flashchip.h +++ b/include/linux/mtd/flashchip.h | |||
| @@ -1,10 +1,21 @@ | |||
| 1 | |||
| 2 | /* | 1 | /* |
| 3 | * struct flchip definition | 2 | * Copyright © 2000 Red Hat UK Limited |
| 3 | * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License as published by | ||
| 7 | * the Free Software Foundation; either version 2 of the License, or | ||
| 8 | * (at your option) any later version. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 4 | * | 14 | * |
| 5 | * Contains information about the location and state of a given flash device | 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software | ||
| 17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 6 | * | 18 | * |
| 7 | * (C) 2000 Red Hat. GPLd. | ||
| 8 | */ | 19 | */ |
| 9 | 20 | ||
| 10 | #ifndef __MTD_FLASHCHIP_H__ | 21 | #ifndef __MTD_FLASHCHIP_H__ |
| @@ -92,7 +103,7 @@ struct flchip { | |||
| 92 | /* This is used to handle contention on write/erase operations | 103 | /* This is used to handle contention on write/erase operations |
| 93 | between partitions of the same physical chip. */ | 104 | between partitions of the same physical chip. */ |
| 94 | struct flchip_shared { | 105 | struct flchip_shared { |
| 95 | spinlock_t lock; | 106 | struct mutex lock; |
| 96 | struct flchip *writing; | 107 | struct flchip *writing; |
| 97 | struct flchip *erasing; | 108 | struct flchip *erasing; |
| 98 | }; | 109 | }; |
diff --git a/include/linux/mtd/gen_probe.h b/include/linux/mtd/gen_probe.h index df362ddf2949..2c456054fded 100644 --- a/include/linux/mtd/gen_probe.h +++ b/include/linux/mtd/gen_probe.h | |||
| @@ -1,6 +1,21 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * (C) 2001, 2001 Red Hat, Inc. | 2 | * Copyright © 2001 Red Hat UK Limited |
| 3 | * GPL'd | 3 | * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> |
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License as published by | ||
| 7 | * the Free Software Foundation; either version 2 of the License, or | ||
| 8 | * (at your option) any later version. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, write to the Free Software | ||
| 17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 18 | * | ||
| 4 | */ | 19 | */ |
| 5 | 20 | ||
| 6 | #ifndef __LINUX_MTD_GEN_PROBE_H__ | 21 | #ifndef __LINUX_MTD_GEN_PROBE_H__ |
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index de89eca864ce..a9e6ba46865e 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h | |||
| @@ -1,3 +1,21 @@ | |||
| 1 | /* | ||
| 2 | * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 17 | * | ||
| 18 | */ | ||
| 1 | 19 | ||
| 2 | /* Overhauled routines for dealing with different mmap regions of flash */ | 20 | /* Overhauled routines for dealing with different mmap regions of flash */ |
| 3 | 21 | ||
| @@ -9,7 +27,6 @@ | |||
| 9 | #include <linux/string.h> | 27 | #include <linux/string.h> |
| 10 | #include <linux/bug.h> | 28 | #include <linux/bug.h> |
| 11 | 29 | ||
| 12 | #include <linux/mtd/compatmac.h> | ||
| 13 | 30 | ||
| 14 | #include <asm/unaligned.h> | 31 | #include <asm/unaligned.h> |
| 15 | #include <asm/system.h> | 32 | #include <asm/system.h> |
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 5326435a7571..8485e42a9b09 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h | |||
| @@ -1,7 +1,20 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> et al. | 2 | * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al. |
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 3 | * | 17 | * |
| 4 | * Released under GPL | ||
| 5 | */ | 18 | */ |
| 6 | 19 | ||
| 7 | #ifndef __MTD_MTD_H__ | 20 | #ifndef __MTD_MTD_H__ |
| @@ -13,7 +26,6 @@ | |||
| 13 | #include <linux/notifier.h> | 26 | #include <linux/notifier.h> |
| 14 | #include <linux/device.h> | 27 | #include <linux/device.h> |
| 15 | 28 | ||
| 16 | #include <linux/mtd/compatmac.h> | ||
| 17 | #include <mtd/mtd-abi.h> | 29 | #include <mtd/mtd-abi.h> |
| 18 | 30 | ||
| 19 | #include <asm/div64.h> | 31 | #include <asm/div64.h> |
| @@ -216,6 +228,7 @@ struct mtd_info { | |||
| 216 | /* Chip-supported device locking */ | 228 | /* Chip-supported device locking */ |
| 217 | int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); | 229 | int (*lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); |
| 218 | int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); | 230 | int (*unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); |
| 231 | int (*is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); | ||
| 219 | 232 | ||
| 220 | /* Power Management functions */ | 233 | /* Power Management functions */ |
| 221 | int (*suspend) (struct mtd_info *mtd); | 234 | int (*suspend) (struct mtd_info *mtd); |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index a81b185e23a7..102e12c58cb3 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
| @@ -1,9 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * linux/include/linux/mtd/nand.h | 2 | * linux/include/linux/mtd/nand.h |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2000 David Woodhouse <dwmw2@infradead.org> | 4 | * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> |
| 5 | * Steven J. Hill <sjhill@realitydiluted.com> | 5 | * Steven J. Hill <sjhill@realitydiluted.com> |
| 6 | * Thomas Gleixner <tglx@linutronix.de> | 6 | * Thomas Gleixner <tglx@linutronix.de> |
| 7 | * | 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
| @@ -181,8 +181,6 @@ typedef enum { | |||
| 181 | #define NAND_NO_READRDY 0x00000100 | 181 | #define NAND_NO_READRDY 0x00000100 |
| 182 | /* Chip does not allow subpage writes */ | 182 | /* Chip does not allow subpage writes */ |
| 183 | #define NAND_NO_SUBPAGE_WRITE 0x00000200 | 183 | #define NAND_NO_SUBPAGE_WRITE 0x00000200 |
| 184 | /* Chip stores bad block marker on the last page of the eraseblock */ | ||
| 185 | #define NAND_BB_LAST_PAGE 0x00000400 | ||
| 186 | 184 | ||
| 187 | /* Device is one of 'new' xD cards that expose fake nand command set */ | 185 | /* Device is one of 'new' xD cards that expose fake nand command set */ |
| 188 | #define NAND_BROKEN_XD 0x00000400 | 186 | #define NAND_BROKEN_XD 0x00000400 |
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h index 41bc013571d0..4d8406c81652 100644 --- a/include/linux/mtd/nand_ecc.h +++ b/include/linux/mtd/nand_ecc.h | |||
| @@ -1,7 +1,9 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * drivers/mtd/nand_ecc.h | 2 | * drivers/mtd/nand_ecc.h |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) | 4 | * Copyright (C) 2000-2010 Steven J. Hill <sjhill@realitydiluted.com> |
| 5 | * David Woodhouse <dwmw2@infradead.org> | ||
| 6 | * Thomas Gleixner <tglx@linutronix.de> | ||
| 5 | * | 7 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
diff --git a/include/linux/mtd/nftl.h b/include/linux/mtd/nftl.h index dcaf611ed748..b059629e22bc 100644 --- a/include/linux/mtd/nftl.h +++ b/include/linux/mtd/nftl.h | |||
| @@ -1,5 +1,20 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> | 2 | * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> |
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, write to the Free Software | ||
| 16 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 17 | * | ||
| 3 | */ | 18 | */ |
| 4 | 19 | ||
| 5 | #ifndef __MTD_NFTL_H__ | 20 | #ifndef __MTD_NFTL_H__ |
diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h index c26ff86ad08a..0c8815bfae1c 100644 --- a/include/linux/mtd/onenand.h +++ b/include/linux/mtd/onenand.h | |||
| @@ -68,6 +68,7 @@ struct onenand_bufferram { | |||
| 68 | * @write_word: [REPLACEABLE] hardware specific function for write | 68 | * @write_word: [REPLACEABLE] hardware specific function for write |
| 69 | * register of OneNAND | 69 | * register of OneNAND |
| 70 | * @mmcontrol: sync burst read function | 70 | * @mmcontrol: sync burst read function |
| 71 | * @chip_probe: [REPLACEABLE] hardware specific function for chip probe | ||
| 71 | * @block_markbad: function to mark a block as bad | 72 | * @block_markbad: function to mark a block as bad |
| 72 | * @scan_bbt: [REPLACEALBE] hardware specific function for scanning | 73 | * @scan_bbt: [REPLACEALBE] hardware specific function for scanning |
| 73 | * Bad block Table | 74 | * Bad block Table |
| @@ -114,6 +115,7 @@ struct onenand_chip { | |||
| 114 | unsigned short (*read_word)(void __iomem *addr); | 115 | unsigned short (*read_word)(void __iomem *addr); |
| 115 | void (*write_word)(unsigned short value, void __iomem *addr); | 116 | void (*write_word)(unsigned short value, void __iomem *addr); |
| 116 | void (*mmcontrol)(struct mtd_info *mtd, int sync_read); | 117 | void (*mmcontrol)(struct mtd_info *mtd, int sync_read); |
| 118 | int (*chip_probe)(struct mtd_info *mtd); | ||
| 117 | int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); | 119 | int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); |
| 118 | int (*scan_bbt)(struct mtd_info *mtd); | 120 | int (*scan_bbt)(struct mtd_info *mtd); |
| 119 | 121 | ||
diff --git a/include/linux/mtd/physmap.h b/include/linux/mtd/physmap.h index 76f7cabf07d3..bcfd9f777454 100644 --- a/include/linux/mtd/physmap.h +++ b/include/linux/mtd/physmap.h | |||
| @@ -25,6 +25,7 @@ struct physmap_flash_data { | |||
| 25 | void (*set_vpp)(struct map_info *, int); | 25 | void (*set_vpp)(struct map_info *, int); |
| 26 | unsigned int nr_parts; | 26 | unsigned int nr_parts; |
| 27 | unsigned int pfow_base; | 27 | unsigned int pfow_base; |
| 28 | char *probe_type; | ||
| 28 | struct mtd_partition *parts; | 29 | struct mtd_partition *parts; |
| 29 | }; | 30 | }; |
| 30 | 31 | ||
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 9b8299af3741..07e40c625972 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
| @@ -523,6 +523,7 @@ enum { | |||
| 523 | NFSPROC4_CLNT_GETACL, | 523 | NFSPROC4_CLNT_GETACL, |
| 524 | NFSPROC4_CLNT_SETACL, | 524 | NFSPROC4_CLNT_SETACL, |
| 525 | NFSPROC4_CLNT_FS_LOCATIONS, | 525 | NFSPROC4_CLNT_FS_LOCATIONS, |
| 526 | NFSPROC4_CLNT_RELEASE_LOCKOWNER, | ||
| 526 | 527 | ||
| 527 | /* nfs41 */ | 528 | /* nfs41 */ |
| 528 | NFSPROC4_CLNT_EXCHANGE_ID, | 529 | NFSPROC4_CLNT_EXCHANGE_ID, |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index bad4d121b16e..508f8cf6da37 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
| @@ -72,13 +72,20 @@ struct nfs_access_entry { | |||
| 72 | int mask; | 72 | int mask; |
| 73 | }; | 73 | }; |
| 74 | 74 | ||
| 75 | struct nfs_lock_context { | ||
| 76 | atomic_t count; | ||
| 77 | struct list_head list; | ||
| 78 | struct nfs_open_context *open_context; | ||
| 79 | fl_owner_t lockowner; | ||
| 80 | pid_t pid; | ||
| 81 | }; | ||
| 82 | |||
| 75 | struct nfs4_state; | 83 | struct nfs4_state; |
| 76 | struct nfs_open_context { | 84 | struct nfs_open_context { |
| 77 | atomic_t count; | 85 | struct nfs_lock_context lock_context; |
| 78 | struct path path; | 86 | struct path path; |
| 79 | struct rpc_cred *cred; | 87 | struct rpc_cred *cred; |
| 80 | struct nfs4_state *state; | 88 | struct nfs4_state *state; |
| 81 | fl_owner_t lockowner; | ||
| 82 | fmode_t mode; | 89 | fmode_t mode; |
| 83 | 90 | ||
| 84 | unsigned long flags; | 91 | unsigned long flags; |
| @@ -353,6 +360,8 @@ extern void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr); | |||
| 353 | extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); | 360 | extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx); |
| 354 | extern void put_nfs_open_context(struct nfs_open_context *ctx); | 361 | extern void put_nfs_open_context(struct nfs_open_context *ctx); |
| 355 | extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); | 362 | extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode); |
| 363 | extern struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx); | ||
| 364 | extern void nfs_put_lock_context(struct nfs_lock_context *l_ctx); | ||
| 356 | extern u64 nfs_compat_user_ino64(u64 fileid); | 365 | extern u64 nfs_compat_user_ino64(u64 fileid); |
| 357 | extern void nfs_fattr_init(struct nfs_fattr *fattr); | 366 | extern void nfs_fattr_init(struct nfs_fattr *fattr); |
| 358 | 367 | ||
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index d6e10a4c06e5..c82ee7cd6288 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
| @@ -15,6 +15,7 @@ struct nlm_host; | |||
| 15 | struct nfs4_sequence_args; | 15 | struct nfs4_sequence_args; |
| 16 | struct nfs4_sequence_res; | 16 | struct nfs4_sequence_res; |
| 17 | struct nfs_server; | 17 | struct nfs_server; |
| 18 | struct nfs4_minor_version_ops; | ||
| 18 | 19 | ||
| 19 | /* | 20 | /* |
| 20 | * The nfs_client identifies our client state to the server. | 21 | * The nfs_client identifies our client state to the server. |
| @@ -70,11 +71,7 @@ struct nfs_client { | |||
| 70 | */ | 71 | */ |
| 71 | char cl_ipaddr[48]; | 72 | char cl_ipaddr[48]; |
| 72 | unsigned char cl_id_uniquifier; | 73 | unsigned char cl_id_uniquifier; |
| 73 | int (* cl_call_sync)(struct nfs_server *server, | 74 | const struct nfs4_minor_version_ops *cl_mvops; |
| 74 | struct rpc_message *msg, | ||
| 75 | struct nfs4_sequence_args *args, | ||
| 76 | struct nfs4_sequence_res *res, | ||
| 77 | int cache_reply); | ||
| 78 | #endif /* CONFIG_NFS_V4 */ | 75 | #endif /* CONFIG_NFS_V4 */ |
| 79 | 76 | ||
| 80 | #ifdef CONFIG_NFS_V4_1 | 77 | #ifdef CONFIG_NFS_V4_1 |
diff --git a/include/linux/nfs_mount.h b/include/linux/nfs_mount.h index 4499016e6d0d..5d59ae861aa6 100644 --- a/include/linux/nfs_mount.h +++ b/include/linux/nfs_mount.h | |||
| @@ -69,5 +69,6 @@ struct nfs_mount_data { | |||
| 69 | #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 | 69 | #define NFS_MOUNT_LOOKUP_CACHE_NONEG 0x10000 |
| 70 | #define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000 | 70 | #define NFS_MOUNT_LOOKUP_CACHE_NONE 0x20000 |
| 71 | #define NFS_MOUNT_NORESVPORT 0x40000 | 71 | #define NFS_MOUNT_NORESVPORT 0x40000 |
| 72 | #define NFS_MOUNT_LEGACY_INTERFACE 0x80000 | ||
| 72 | 73 | ||
| 73 | #endif | 74 | #endif |
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 3c60685d972b..f8b60e7f4c44 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
| @@ -39,6 +39,7 @@ struct nfs_page { | |||
| 39 | struct list_head wb_list; /* Defines state of page: */ | 39 | struct list_head wb_list; /* Defines state of page: */ |
| 40 | struct page *wb_page; /* page to read in/write out */ | 40 | struct page *wb_page; /* page to read in/write out */ |
| 41 | struct nfs_open_context *wb_context; /* File state context info */ | 41 | struct nfs_open_context *wb_context; /* File state context info */ |
| 42 | struct nfs_lock_context *wb_lock_context; /* lock context info */ | ||
| 42 | atomic_t wb_complete; /* i/os we're waiting for */ | 43 | atomic_t wb_complete; /* i/os we're waiting for */ |
| 43 | pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ | 44 | pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ |
| 44 | unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ | 45 | unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 51914d7d6cc4..fc461926c412 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
| @@ -196,8 +196,10 @@ struct nfs_openargs { | |||
| 196 | __u64 clientid; | 196 | __u64 clientid; |
| 197 | __u64 id; | 197 | __u64 id; |
| 198 | union { | 198 | union { |
| 199 | struct iattr * attrs; /* UNCHECKED, GUARDED */ | 199 | struct { |
| 200 | nfs4_verifier verifier; /* EXCLUSIVE */ | 200 | struct iattr * attrs; /* UNCHECKED, GUARDED */ |
| 201 | nfs4_verifier verifier; /* EXCLUSIVE */ | ||
| 202 | }; | ||
| 201 | nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ | 203 | nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */ |
| 202 | fmode_t delegation_type; /* CLAIM_PREVIOUS */ | 204 | fmode_t delegation_type; /* CLAIM_PREVIOUS */ |
| 203 | } u; | 205 | } u; |
| @@ -313,6 +315,10 @@ struct nfs_lockt_res { | |||
| 313 | struct nfs4_sequence_res seq_res; | 315 | struct nfs4_sequence_res seq_res; |
| 314 | }; | 316 | }; |
| 315 | 317 | ||
| 318 | struct nfs_release_lockowner_args { | ||
| 319 | struct nfs_lowner lock_owner; | ||
| 320 | }; | ||
| 321 | |||
| 316 | struct nfs4_delegreturnargs { | 322 | struct nfs4_delegreturnargs { |
| 317 | const struct nfs_fh *fhandle; | 323 | const struct nfs_fh *fhandle; |
| 318 | const nfs4_stateid *stateid; | 324 | const nfs4_stateid *stateid; |
| @@ -332,6 +338,7 @@ struct nfs4_delegreturnres { | |||
| 332 | struct nfs_readargs { | 338 | struct nfs_readargs { |
| 333 | struct nfs_fh * fh; | 339 | struct nfs_fh * fh; |
| 334 | struct nfs_open_context *context; | 340 | struct nfs_open_context *context; |
| 341 | struct nfs_lock_context *lock_context; | ||
| 335 | __u64 offset; | 342 | __u64 offset; |
| 336 | __u32 count; | 343 | __u32 count; |
| 337 | unsigned int pgbase; | 344 | unsigned int pgbase; |
| @@ -352,6 +359,7 @@ struct nfs_readres { | |||
| 352 | struct nfs_writeargs { | 359 | struct nfs_writeargs { |
| 353 | struct nfs_fh * fh; | 360 | struct nfs_fh * fh; |
| 354 | struct nfs_open_context *context; | 361 | struct nfs_open_context *context; |
| 362 | struct nfs_lock_context *lock_context; | ||
| 355 | __u64 offset; | 363 | __u64 offset; |
| 356 | __u32 count; | 364 | __u32 count; |
| 357 | enum nfs3_stable_how stable; | 365 | enum nfs3_stable_how stable; |
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 8c2c6116e788..f5487b6f91ed 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h | |||
| @@ -160,7 +160,7 @@ struct nilfs_super_root { | |||
| 160 | * struct nilfs_super_block - structure of super block on disk | 160 | * struct nilfs_super_block - structure of super block on disk |
| 161 | */ | 161 | */ |
| 162 | struct nilfs_super_block { | 162 | struct nilfs_super_block { |
| 163 | __le32 s_rev_level; /* Revision level */ | 163 | /*00*/ __le32 s_rev_level; /* Revision level */ |
| 164 | __le16 s_minor_rev_level; /* minor revision level */ | 164 | __le16 s_minor_rev_level; /* minor revision level */ |
| 165 | __le16 s_magic; /* Magic signature */ | 165 | __le16 s_magic; /* Magic signature */ |
| 166 | 166 | ||
| @@ -169,50 +169,53 @@ struct nilfs_super_block { | |||
| 169 | is excluded. */ | 169 | is excluded. */ |
| 170 | __le16 s_flags; /* flags */ | 170 | __le16 s_flags; /* flags */ |
| 171 | __le32 s_crc_seed; /* Seed value of CRC calculation */ | 171 | __le32 s_crc_seed; /* Seed value of CRC calculation */ |
| 172 | __le32 s_sum; /* Check sum of super block */ | 172 | /*10*/ __le32 s_sum; /* Check sum of super block */ |
| 173 | 173 | ||
| 174 | __le32 s_log_block_size; /* Block size represented as follows | 174 | __le32 s_log_block_size; /* Block size represented as follows |
| 175 | blocksize = | 175 | blocksize = |
| 176 | 1 << (s_log_block_size + 10) */ | 176 | 1 << (s_log_block_size + 10) */ |
| 177 | __le64 s_nsegments; /* Number of segments in filesystem */ | 177 | __le64 s_nsegments; /* Number of segments in filesystem */ |
| 178 | __le64 s_dev_size; /* block device size in bytes */ | 178 | /*20*/ __le64 s_dev_size; /* block device size in bytes */ |
| 179 | __le64 s_first_data_block; /* 1st seg disk block number */ | 179 | __le64 s_first_data_block; /* 1st seg disk block number */ |
| 180 | __le32 s_blocks_per_segment; /* number of blocks per full segment */ | 180 | /*30*/ __le32 s_blocks_per_segment; /* number of blocks per full segment */ |
| 181 | __le32 s_r_segments_percentage; /* Reserved segments percentage */ | 181 | __le32 s_r_segments_percentage; /* Reserved segments percentage */ |
| 182 | 182 | ||
| 183 | __le64 s_last_cno; /* Last checkpoint number */ | 183 | __le64 s_last_cno; /* Last checkpoint number */ |
| 184 | __le64 s_last_pseg; /* disk block addr pseg written last */ | 184 | /*40*/ __le64 s_last_pseg; /* disk block addr pseg written last */ |
| 185 | __le64 s_last_seq; /* seq. number of seg written last */ | 185 | __le64 s_last_seq; /* seq. number of seg written last */ |
| 186 | __le64 s_free_blocks_count; /* Free blocks count */ | 186 | /*50*/ __le64 s_free_blocks_count; /* Free blocks count */ |
| 187 | 187 | ||
| 188 | __le64 s_ctime; /* Creation time (execution time of | 188 | __le64 s_ctime; /* Creation time (execution time of |
| 189 | newfs) */ | 189 | newfs) */ |
| 190 | __le64 s_mtime; /* Mount time */ | 190 | /*60*/ __le64 s_mtime; /* Mount time */ |
| 191 | __le64 s_wtime; /* Write time */ | 191 | __le64 s_wtime; /* Write time */ |
| 192 | __le16 s_mnt_count; /* Mount count */ | 192 | /*70*/ __le16 s_mnt_count; /* Mount count */ |
| 193 | __le16 s_max_mnt_count; /* Maximal mount count */ | 193 | __le16 s_max_mnt_count; /* Maximal mount count */ |
| 194 | __le16 s_state; /* File system state */ | 194 | __le16 s_state; /* File system state */ |
| 195 | __le16 s_errors; /* Behaviour when detecting errors */ | 195 | __le16 s_errors; /* Behaviour when detecting errors */ |
| 196 | __le64 s_lastcheck; /* time of last check */ | 196 | __le64 s_lastcheck; /* time of last check */ |
| 197 | 197 | ||
| 198 | __le32 s_checkinterval; /* max. time between checks */ | 198 | /*80*/ __le32 s_checkinterval; /* max. time between checks */ |
| 199 | __le32 s_creator_os; /* OS */ | 199 | __le32 s_creator_os; /* OS */ |
| 200 | __le16 s_def_resuid; /* Default uid for reserved blocks */ | 200 | __le16 s_def_resuid; /* Default uid for reserved blocks */ |
| 201 | __le16 s_def_resgid; /* Default gid for reserved blocks */ | 201 | __le16 s_def_resgid; /* Default gid for reserved blocks */ |
| 202 | __le32 s_first_ino; /* First non-reserved inode */ | 202 | __le32 s_first_ino; /* First non-reserved inode */ |
| 203 | 203 | ||
| 204 | __le16 s_inode_size; /* Size of an inode */ | 204 | /*90*/ __le16 s_inode_size; /* Size of an inode */ |
| 205 | __le16 s_dat_entry_size; /* Size of a dat entry */ | 205 | __le16 s_dat_entry_size; /* Size of a dat entry */ |
| 206 | __le16 s_checkpoint_size; /* Size of a checkpoint */ | 206 | __le16 s_checkpoint_size; /* Size of a checkpoint */ |
| 207 | __le16 s_segment_usage_size; /* Size of a segment usage */ | 207 | __le16 s_segment_usage_size; /* Size of a segment usage */ |
| 208 | 208 | ||
| 209 | __u8 s_uuid[16]; /* 128-bit uuid for volume */ | 209 | /*98*/ __u8 s_uuid[16]; /* 128-bit uuid for volume */ |
| 210 | char s_volume_name[80]; /* volume name */ | 210 | /*A8*/ char s_volume_name[80]; /* volume name */ |
| 211 | 211 | ||
| 212 | __le32 s_c_interval; /* Commit interval of segment */ | 212 | /*F8*/ __le32 s_c_interval; /* Commit interval of segment */ |
| 213 | __le32 s_c_block_max; /* Threshold of data amount for | 213 | __le32 s_c_block_max; /* Threshold of data amount for |
| 214 | the segment construction */ | 214 | the segment construction */ |
| 215 | __u32 s_reserved[192]; /* padding to the end of the block */ | 215 | /*100*/ __le64 s_feature_compat; /* Compatible feature set */ |
| 216 | __le64 s_feature_compat_ro; /* Read-only compatible feature set */ | ||
| 217 | __le64 s_feature_incompat; /* Incompatible feature set */ | ||
| 218 | __u32 s_reserved[186]; /* padding to the end of the block */ | ||
| 216 | }; | 219 | }; |
| 217 | 220 | ||
| 218 | /* | 221 | /* |
| @@ -228,6 +231,16 @@ struct nilfs_super_block { | |||
| 228 | #define NILFS_MINOR_REV 0 /* minor revision */ | 231 | #define NILFS_MINOR_REV 0 /* minor revision */ |
| 229 | 232 | ||
| 230 | /* | 233 | /* |
| 234 | * Feature set definitions | ||
| 235 | * | ||
| 236 | * If there is a bit set in the incompatible feature set that the kernel | ||
| 237 | * doesn't know about, it should refuse to mount the filesystem. | ||
| 238 | */ | ||
| 239 | #define NILFS_FEATURE_COMPAT_SUPP 0ULL | ||
| 240 | #define NILFS_FEATURE_COMPAT_RO_SUPP 0ULL | ||
| 241 | #define NILFS_FEATURE_INCOMPAT_SUPP 0ULL | ||
| 242 | |||
| 243 | /* | ||
| 231 | * Bytes count of super_block for CRC-calculation | 244 | * Bytes count of super_block for CRC-calculation |
| 232 | */ | 245 | */ |
| 233 | #define NILFS_SB_BYTES \ | 246 | #define NILFS_SB_BYTES \ |
| @@ -274,6 +287,12 @@ struct nilfs_super_block { | |||
| 274 | #define NILFS_NAME_LEN 255 | 287 | #define NILFS_NAME_LEN 255 |
| 275 | 288 | ||
| 276 | /* | 289 | /* |
| 290 | * Block size limitations | ||
| 291 | */ | ||
| 292 | #define NILFS_MIN_BLOCK_SIZE 1024 | ||
| 293 | #define NILFS_MAX_BLOCK_SIZE 65536 | ||
| 294 | |||
| 295 | /* | ||
| 277 | * The new version of the directory entry. Since V0 structures are | 296 | * The new version of the directory entry. Since V0 structures are |
| 278 | * stored in intel byte order, and the name_len field could never be | 297 | * stored in intel byte order, and the name_len field could never be |
| 279 | * bigger than 255 chars, it's safe to reclaim the extra byte for the | 298 | * bigger than 255 chars, it's safe to reclaim the extra byte for the |
| @@ -313,7 +332,25 @@ enum { | |||
| 313 | #define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1) | 332 | #define NILFS_DIR_ROUND (NILFS_DIR_PAD - 1) |
| 314 | #define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \ | 333 | #define NILFS_DIR_REC_LEN(name_len) (((name_len) + 12 + NILFS_DIR_ROUND) & \ |
| 315 | ~NILFS_DIR_ROUND) | 334 | ~NILFS_DIR_ROUND) |
| 335 | #define NILFS_MAX_REC_LEN ((1<<16)-1) | ||
| 316 | 336 | ||
| 337 | static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) | ||
| 338 | { | ||
| 339 | unsigned len = le16_to_cpu(dlen); | ||
| 340 | |||
| 341 | if (len == NILFS_MAX_REC_LEN) | ||
| 342 | return 1 << 16; | ||
| 343 | return len; | ||
| 344 | } | ||
| 345 | |||
| 346 | static inline __le16 nilfs_rec_len_to_disk(unsigned len) | ||
| 347 | { | ||
| 348 | if (len == (1 << 16)) | ||
| 349 | return cpu_to_le16(NILFS_MAX_REC_LEN); | ||
| 350 | else if (len > (1 << 16)) | ||
| 351 | BUG(); | ||
| 352 | return cpu_to_le16(len); | ||
| 353 | } | ||
| 317 | 354 | ||
| 318 | /** | 355 | /** |
| 319 | * struct nilfs_finfo - file information | 356 | * struct nilfs_finfo - file information |
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index b752e807adde..06aab5eee134 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
| @@ -20,10 +20,14 @@ extern void touch_nmi_watchdog(void); | |||
| 20 | extern void acpi_nmi_disable(void); | 20 | extern void acpi_nmi_disable(void); |
| 21 | extern void acpi_nmi_enable(void); | 21 | extern void acpi_nmi_enable(void); |
| 22 | #else | 22 | #else |
| 23 | #ifndef CONFIG_HARDLOCKUP_DETECTOR | ||
| 23 | static inline void touch_nmi_watchdog(void) | 24 | static inline void touch_nmi_watchdog(void) |
| 24 | { | 25 | { |
| 25 | touch_softlockup_watchdog(); | 26 | touch_softlockup_watchdog(); |
| 26 | } | 27 | } |
| 28 | #else | ||
| 29 | extern void touch_nmi_watchdog(void); | ||
| 30 | #endif | ||
| 27 | static inline void acpi_nmi_disable(void) { } | 31 | static inline void acpi_nmi_disable(void) { } |
| 28 | static inline void acpi_nmi_enable(void) { } | 32 | static inline void acpi_nmi_enable(void) { } |
| 29 | #endif | 33 | #endif |
| @@ -47,4 +51,13 @@ static inline bool trigger_all_cpu_backtrace(void) | |||
| 47 | } | 51 | } |
| 48 | #endif | 52 | #endif |
| 49 | 53 | ||
| 54 | #ifdef CONFIG_LOCKUP_DETECTOR | ||
| 55 | int hw_nmi_is_cpu_stuck(struct pt_regs *); | ||
| 56 | u64 hw_nmi_get_sample_period(void); | ||
| 57 | extern int watchdog_enabled; | ||
| 58 | struct ctl_table; | ||
| 59 | extern int proc_dowatchdog_enabled(struct ctl_table *, int , | ||
| 60 | void __user *, size_t *, loff_t *); | ||
| 61 | #endif | ||
| 62 | |||
| 50 | #endif | 63 | #endif |
diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h index 7e4cd616bcb5..c0b018790f07 100644 --- a/include/linux/omapfb.h +++ b/include/linux/omapfb.h | |||
| @@ -85,6 +85,9 @@ | |||
| 85 | #define OMAPFB_MEMTYPE_SRAM 1 | 85 | #define OMAPFB_MEMTYPE_SRAM 1 |
| 86 | #define OMAPFB_MEMTYPE_MAX 1 | 86 | #define OMAPFB_MEMTYPE_MAX 1 |
| 87 | 87 | ||
| 88 | #define OMAPFB_MEM_IDX_ENABLED 0x80 | ||
| 89 | #define OMAPFB_MEM_IDX_MASK 0x7f | ||
| 90 | |||
| 88 | enum omapfb_color_format { | 91 | enum omapfb_color_format { |
| 89 | OMAPFB_COLOR_RGB565 = 0, | 92 | OMAPFB_COLOR_RGB565 = 0, |
| 90 | OMAPFB_COLOR_YUV422, | 93 | OMAPFB_COLOR_YUV422, |
| @@ -136,7 +139,7 @@ struct omapfb_plane_info { | |||
| 136 | __u8 enabled; | 139 | __u8 enabled; |
| 137 | __u8 channel_out; | 140 | __u8 channel_out; |
| 138 | __u8 mirror; | 141 | __u8 mirror; |
| 139 | __u8 reserved1; | 142 | __u8 mem_idx; |
| 140 | __u32 out_width; | 143 | __u32 out_width; |
| 141 | __u32 out_height; | 144 | __u32 out_height; |
| 142 | __u32 reserved2[12]; | 145 | __u32 reserved2[12]; |
diff --git a/include/linux/oom.h b/include/linux/oom.h index 537662315627..5e3aa8311c5e 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
| @@ -1,19 +1,34 @@ | |||
| 1 | #ifndef __INCLUDE_LINUX_OOM_H | 1 | #ifndef __INCLUDE_LINUX_OOM_H |
| 2 | #define __INCLUDE_LINUX_OOM_H | 2 | #define __INCLUDE_LINUX_OOM_H |
| 3 | 3 | ||
| 4 | /* /proc/<pid>/oom_adj set to -17 protects from the oom-killer */ | 4 | /* |
| 5 | * /proc/<pid>/oom_adj is deprecated, see | ||
| 6 | * Documentation/feature-removal-schedule.txt. | ||
| 7 | * | ||
| 8 | * /proc/<pid>/oom_adj set to -17 protects from the oom-killer | ||
| 9 | */ | ||
| 5 | #define OOM_DISABLE (-17) | 10 | #define OOM_DISABLE (-17) |
| 6 | /* inclusive */ | 11 | /* inclusive */ |
| 7 | #define OOM_ADJUST_MIN (-16) | 12 | #define OOM_ADJUST_MIN (-16) |
| 8 | #define OOM_ADJUST_MAX 15 | 13 | #define OOM_ADJUST_MAX 15 |
| 9 | 14 | ||
| 15 | /* | ||
| 16 | * /proc/<pid>/oom_score_adj set to OOM_SCORE_ADJ_MIN disables oom killing for | ||
| 17 | * pid. | ||
| 18 | */ | ||
| 19 | #define OOM_SCORE_ADJ_MIN (-1000) | ||
| 20 | #define OOM_SCORE_ADJ_MAX 1000 | ||
| 21 | |||
| 10 | #ifdef __KERNEL__ | 22 | #ifdef __KERNEL__ |
| 11 | 23 | ||
| 24 | #include <linux/sched.h> | ||
| 12 | #include <linux/types.h> | 25 | #include <linux/types.h> |
| 13 | #include <linux/nodemask.h> | 26 | #include <linux/nodemask.h> |
| 14 | 27 | ||
| 15 | struct zonelist; | 28 | struct zonelist; |
| 16 | struct notifier_block; | 29 | struct notifier_block; |
| 30 | struct mem_cgroup; | ||
| 31 | struct task_struct; | ||
| 17 | 32 | ||
| 18 | /* | 33 | /* |
| 19 | * Types of limitations to the nodes from which allocations may occur | 34 | * Types of limitations to the nodes from which allocations may occur |
| @@ -22,9 +37,12 @@ enum oom_constraint { | |||
| 22 | CONSTRAINT_NONE, | 37 | CONSTRAINT_NONE, |
| 23 | CONSTRAINT_CPUSET, | 38 | CONSTRAINT_CPUSET, |
| 24 | CONSTRAINT_MEMORY_POLICY, | 39 | CONSTRAINT_MEMORY_POLICY, |
| 40 | CONSTRAINT_MEMCG, | ||
| 25 | }; | 41 | }; |
| 26 | 42 | ||
| 27 | extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 43 | extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, |
| 44 | const nodemask_t *nodemask, unsigned long totalpages); | ||
| 45 | extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); | ||
| 28 | extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); | 46 | extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); |
| 29 | 47 | ||
| 30 | extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | 48 | extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, |
| @@ -43,5 +61,16 @@ static inline void oom_killer_enable(void) | |||
| 43 | { | 61 | { |
| 44 | oom_killer_disabled = false; | 62 | oom_killer_disabled = false; |
| 45 | } | 63 | } |
| 64 | |||
| 65 | /* The badness from the OOM killer */ | ||
| 66 | extern unsigned long badness(struct task_struct *p, struct mem_cgroup *mem, | ||
| 67 | const nodemask_t *nodemask, unsigned long uptime); | ||
| 68 | |||
| 69 | extern struct task_struct *find_lock_task_mm(struct task_struct *p); | ||
| 70 | |||
| 71 | /* sysctls */ | ||
| 72 | extern int sysctl_oom_dump_tasks; | ||
| 73 | extern int sysctl_oom_kill_allocating_task; | ||
| 74 | extern int sysctl_panic_on_oom; | ||
| 46 | #endif /* __KERNEL__*/ | 75 | #endif /* __KERNEL__*/ |
| 47 | #endif /* _INCLUDE_LINUX_OOM_H */ | 76 | #endif /* _INCLUDE_LINUX_OOM_H */ |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5b59f35dcb8f..6fa317801e1c 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -128,7 +128,6 @@ enum pageflags { | |||
| 128 | 128 | ||
| 129 | /* SLUB */ | 129 | /* SLUB */ |
| 130 | PG_slub_frozen = PG_active, | 130 | PG_slub_frozen = PG_active, |
| 131 | PG_slub_debug = PG_error, | ||
| 132 | }; | 131 | }; |
| 133 | 132 | ||
| 134 | #ifndef __GENERATING_BOUNDS_H | 133 | #ifndef __GENERATING_BOUNDS_H |
| @@ -215,7 +214,6 @@ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) | |||
| 215 | __PAGEFLAG(SlobFree, slob_free) | 214 | __PAGEFLAG(SlobFree, slob_free) |
| 216 | 215 | ||
| 217 | __PAGEFLAG(SlubFrozen, slub_frozen) | 216 | __PAGEFLAG(SlubFrozen, slub_frozen) |
| 218 | __PAGEFLAG(SlubDebug, slub_debug) | ||
| 219 | 217 | ||
| 220 | /* | 218 | /* |
| 221 | * Private page markings that may be used by the filesystem that owns the page | 219 | * Private page markings that may be used by the filesystem that owns the page |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 3c62ed408492..78a702ce4fcb 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
| @@ -423,8 +423,10 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size) | |||
| 423 | const char __user *end = uaddr + size - 1; | 423 | const char __user *end = uaddr + size - 1; |
| 424 | 424 | ||
| 425 | if (((unsigned long)uaddr & PAGE_MASK) != | 425 | if (((unsigned long)uaddr & PAGE_MASK) != |
| 426 | ((unsigned long)end & PAGE_MASK)) | 426 | ((unsigned long)end & PAGE_MASK)) { |
| 427 | ret = __get_user(c, end); | 427 | ret = __get_user(c, end); |
| 428 | (void)c; | ||
| 429 | } | ||
| 428 | } | 430 | } |
| 429 | return ret; | 431 | return ret; |
| 430 | } | 432 | } |
diff --git a/include/linux/path.h b/include/linux/path.h index 915e0c382a51..edc98dec6266 100644 --- a/include/linux/path.h +++ b/include/linux/path.h | |||
| @@ -12,4 +12,9 @@ struct path { | |||
| 12 | extern void path_get(struct path *); | 12 | extern void path_get(struct path *); |
| 13 | extern void path_put(struct path *); | 13 | extern void path_put(struct path *); |
| 14 | 14 | ||
| 15 | static inline int path_equal(const struct path *path1, const struct path *path2) | ||
| 16 | { | ||
| 17 | return path1->mnt == path2->mnt && path1->dentry == path2->dentry; | ||
| 18 | } | ||
| 19 | |||
| 15 | #endif /* _LINUX_PATH_H */ | 20 | #endif /* _LINUX_PATH_H */ |
diff --git a/include/linux/pch_dma.h b/include/linux/pch_dma.h new file mode 100644 index 000000000000..fdafe529ef8a --- /dev/null +++ b/include/linux/pch_dma.h | |||
| @@ -0,0 +1,37 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (c) 2010 Intel Corporation | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program; if not, write to the Free Software | ||
| 15 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef PCH_DMA_H | ||
| 19 | #define PCH_DMA_H | ||
| 20 | |||
| 21 | #include <linux/dmaengine.h> | ||
| 22 | |||
| 23 | enum pch_dma_width { | ||
| 24 | PCH_DMA_WIDTH_1_BYTE, | ||
| 25 | PCH_DMA_WIDTH_2_BYTES, | ||
| 26 | PCH_DMA_WIDTH_4_BYTES, | ||
| 27 | }; | ||
| 28 | |||
| 29 | struct pch_dma_slave { | ||
| 30 | struct device *dma_dev; | ||
| 31 | unsigned int chan_id; | ||
| 32 | dma_addr_t tx_reg; | ||
| 33 | dma_addr_t rx_reg; | ||
| 34 | enum pch_dma_width width; | ||
| 35 | }; | ||
| 36 | |||
| 37 | #endif | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index f26fda76b87f..b1d17956a153 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -270,6 +270,8 @@ struct pci_dev { | |||
| 270 | unsigned int d1_support:1; /* Low power state D1 is supported */ | 270 | unsigned int d1_support:1; /* Low power state D1 is supported */ |
| 271 | unsigned int d2_support:1; /* Low power state D2 is supported */ | 271 | unsigned int d2_support:1; /* Low power state D2 is supported */ |
| 272 | unsigned int no_d1d2:1; /* Only allow D0 and D3 */ | 272 | unsigned int no_d1d2:1; /* Only allow D0 and D3 */ |
| 273 | unsigned int mmio_always_on:1; /* disallow turning off io/mem | ||
| 274 | decoding during bar sizing */ | ||
| 273 | unsigned int wakeup_prepared:1; | 275 | unsigned int wakeup_prepared:1; |
| 274 | unsigned int d3_delay; /* D3->D0 transition time in ms */ | 276 | unsigned int d3_delay; /* D3->D0 transition time in ms */ |
| 275 | 277 | ||
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 40c804d484ca..f6a3b2d36cad 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -2325,9 +2325,11 @@ | |||
| 2325 | #define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 | 2325 | #define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 |
| 2326 | #define PCI_DEVICE_ID_JMICRON_JMB362 0x2362 | 2326 | #define PCI_DEVICE_ID_JMICRON_JMB362 0x2362 |
| 2327 | #define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 | 2327 | #define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 |
| 2328 | #define PCI_DEVICE_ID_JMICRON_JMB364 0x2364 | ||
| 2328 | #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 | 2329 | #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 |
| 2329 | #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 | 2330 | #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 |
| 2330 | #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 | 2331 | #define PCI_DEVICE_ID_JMICRON_JMB368 0x2368 |
| 2332 | #define PCI_DEVICE_ID_JMICRON_JMB369 0x2369 | ||
| 2331 | #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 | 2333 | #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 |
| 2332 | #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 | 2334 | #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 |
| 2333 | #define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 | 2335 | #define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 |
| @@ -2370,6 +2372,9 @@ | |||
| 2370 | #define PCI_VENDOR_ID_AKS 0x416c | 2372 | #define PCI_VENDOR_ID_AKS 0x416c |
| 2371 | #define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 | 2373 | #define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 |
| 2372 | 2374 | ||
| 2375 | #define PCI_VENDOR_ID_ACCESSIO 0x494f | ||
| 2376 | #define PCI_DEVICE_ID_ACCESSIO_WDG_CSM 0x22c0 | ||
| 2377 | |||
| 2373 | #define PCI_VENDOR_ID_S3 0x5333 | 2378 | #define PCI_VENDOR_ID_S3 0x5333 |
| 2374 | #define PCI_DEVICE_ID_S3_TRIO 0x8811 | 2379 | #define PCI_DEVICE_ID_S3_TRIO 0x8811 |
| 2375 | #define PCI_DEVICE_ID_S3_868 0x8880 | 2380 | #define PCI_DEVICE_ID_S3_868 0x8880 |
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index c88d67b59394..8a7d510ffa9c 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h | |||
| @@ -40,6 +40,7 @@ void percpu_counter_destroy(struct percpu_counter *fbc); | |||
| 40 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); | 40 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); |
| 41 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); | 41 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); |
| 42 | s64 __percpu_counter_sum(struct percpu_counter *fbc); | 42 | s64 __percpu_counter_sum(struct percpu_counter *fbc); |
| 43 | int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); | ||
| 43 | 44 | ||
| 44 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) | 45 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
| 45 | { | 46 | { |
| @@ -98,6 +99,16 @@ static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) | |||
| 98 | fbc->count = amount; | 99 | fbc->count = amount; |
| 99 | } | 100 | } |
| 100 | 101 | ||
| 102 | static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs) | ||
| 103 | { | ||
| 104 | if (fbc->count > rhs) | ||
| 105 | return 1; | ||
| 106 | else if (fbc->count < rhs) | ||
| 107 | return -1; | ||
| 108 | else | ||
| 109 | return 0; | ||
| 110 | } | ||
| 111 | |||
| 101 | static inline void | 112 | static inline void |
| 102 | percpu_counter_add(struct percpu_counter *fbc, s64 amount) | 113 | percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
| 103 | { | 114 | { |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 5d0266d94985..716f99b682c1 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -214,8 +214,9 @@ struct perf_event_attr { | |||
| 214 | * See also PERF_RECORD_MISC_EXACT_IP | 214 | * See also PERF_RECORD_MISC_EXACT_IP |
| 215 | */ | 215 | */ |
| 216 | precise_ip : 2, /* skid constraint */ | 216 | precise_ip : 2, /* skid constraint */ |
| 217 | mmap_data : 1, /* non-exec mmap data */ | ||
| 217 | 218 | ||
| 218 | __reserved_1 : 47; | 219 | __reserved_1 : 46; |
| 219 | 220 | ||
| 220 | union { | 221 | union { |
| 221 | __u32 wakeup_events; /* wakeup every n events */ | 222 | __u32 wakeup_events; /* wakeup every n events */ |
| @@ -461,6 +462,7 @@ enum perf_callchain_context { | |||
| 461 | 462 | ||
| 462 | #ifdef CONFIG_PERF_EVENTS | 463 | #ifdef CONFIG_PERF_EVENTS |
| 463 | # include <asm/perf_event.h> | 464 | # include <asm/perf_event.h> |
| 465 | # include <asm/local64.h> | ||
| 464 | #endif | 466 | #endif |
| 465 | 467 | ||
| 466 | struct perf_guest_info_callbacks { | 468 | struct perf_guest_info_callbacks { |
| @@ -531,14 +533,16 @@ struct hw_perf_event { | |||
| 531 | struct hrtimer hrtimer; | 533 | struct hrtimer hrtimer; |
| 532 | }; | 534 | }; |
| 533 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 535 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 534 | /* breakpoint */ | 536 | struct { /* breakpoint */ |
| 535 | struct arch_hw_breakpoint info; | 537 | struct arch_hw_breakpoint info; |
| 538 | struct list_head bp_list; | ||
| 539 | }; | ||
| 536 | #endif | 540 | #endif |
| 537 | }; | 541 | }; |
| 538 | atomic64_t prev_count; | 542 | local64_t prev_count; |
| 539 | u64 sample_period; | 543 | u64 sample_period; |
| 540 | u64 last_period; | 544 | u64 last_period; |
| 541 | atomic64_t period_left; | 545 | local64_t period_left; |
| 542 | u64 interrupts; | 546 | u64 interrupts; |
| 543 | 547 | ||
| 544 | u64 freq_time_stamp; | 548 | u64 freq_time_stamp; |
| @@ -548,7 +552,10 @@ struct hw_perf_event { | |||
| 548 | 552 | ||
| 549 | struct perf_event; | 553 | struct perf_event; |
| 550 | 554 | ||
| 551 | #define PERF_EVENT_TXN_STARTED 1 | 555 | /* |
| 556 | * Common implementation detail of pmu::{start,commit,cancel}_txn | ||
| 557 | */ | ||
| 558 | #define PERF_EVENT_TXN 0x1 | ||
| 552 | 559 | ||
| 553 | /** | 560 | /** |
| 554 | * struct pmu - generic performance monitoring unit | 561 | * struct pmu - generic performance monitoring unit |
| @@ -562,14 +569,28 @@ struct pmu { | |||
| 562 | void (*unthrottle) (struct perf_event *event); | 569 | void (*unthrottle) (struct perf_event *event); |
| 563 | 570 | ||
| 564 | /* | 571 | /* |
| 565 | * group events scheduling is treated as a transaction, | 572 | * Group events scheduling is treated as a transaction, add group |
| 566 | * add group events as a whole and perform one schedulability test. | 573 | * events as a whole and perform one schedulability test. If the test |
| 567 | * If test fails, roll back the whole group | 574 | * fails, roll back the whole group |
| 568 | */ | 575 | */ |
| 569 | 576 | ||
| 577 | /* | ||
| 578 | * Start the transaction, after this ->enable() doesn't need | ||
| 579 | * to do schedulability tests. | ||
| 580 | */ | ||
| 570 | void (*start_txn) (const struct pmu *pmu); | 581 | void (*start_txn) (const struct pmu *pmu); |
| 571 | void (*cancel_txn) (const struct pmu *pmu); | 582 | /* |
| 583 | * If ->start_txn() disabled the ->enable() schedulability test | ||
| 584 | * then ->commit_txn() is required to perform one. On success | ||
| 585 | * the transaction is closed. On error the transaction is kept | ||
| 586 | * open until ->cancel_txn() is called. | ||
| 587 | */ | ||
| 572 | int (*commit_txn) (const struct pmu *pmu); | 588 | int (*commit_txn) (const struct pmu *pmu); |
| 589 | /* | ||
| 590 | * Will cancel the transaction, assumes ->disable() is called for | ||
| 591 | * each successfull ->enable() during the transaction. | ||
| 592 | */ | ||
| 593 | void (*cancel_txn) (const struct pmu *pmu); | ||
| 573 | }; | 594 | }; |
| 574 | 595 | ||
| 575 | /** | 596 | /** |
| @@ -584,7 +605,9 @@ enum perf_event_active_state { | |||
| 584 | 605 | ||
| 585 | struct file; | 606 | struct file; |
| 586 | 607 | ||
| 587 | struct perf_mmap_data { | 608 | #define PERF_BUFFER_WRITABLE 0x01 |
| 609 | |||
| 610 | struct perf_buffer { | ||
| 588 | atomic_t refcount; | 611 | atomic_t refcount; |
| 589 | struct rcu_head rcu_head; | 612 | struct rcu_head rcu_head; |
| 590 | #ifdef CONFIG_PERF_USE_VMALLOC | 613 | #ifdef CONFIG_PERF_USE_VMALLOC |
| @@ -650,7 +673,8 @@ struct perf_event { | |||
| 650 | 673 | ||
| 651 | enum perf_event_active_state state; | 674 | enum perf_event_active_state state; |
| 652 | unsigned int attach_state; | 675 | unsigned int attach_state; |
| 653 | atomic64_t count; | 676 | local64_t count; |
| 677 | atomic64_t child_count; | ||
| 654 | 678 | ||
| 655 | /* | 679 | /* |
| 656 | * These are the total time in nanoseconds that the event | 680 | * These are the total time in nanoseconds that the event |
| @@ -709,7 +733,7 @@ struct perf_event { | |||
| 709 | atomic_t mmap_count; | 733 | atomic_t mmap_count; |
| 710 | int mmap_locked; | 734 | int mmap_locked; |
| 711 | struct user_struct *mmap_user; | 735 | struct user_struct *mmap_user; |
| 712 | struct perf_mmap_data *data; | 736 | struct perf_buffer *buffer; |
| 713 | 737 | ||
| 714 | /* poll related */ | 738 | /* poll related */ |
| 715 | wait_queue_head_t waitq; | 739 | wait_queue_head_t waitq; |
| @@ -807,7 +831,7 @@ struct perf_cpu_context { | |||
| 807 | 831 | ||
| 808 | struct perf_output_handle { | 832 | struct perf_output_handle { |
| 809 | struct perf_event *event; | 833 | struct perf_event *event; |
| 810 | struct perf_mmap_data *data; | 834 | struct perf_buffer *buffer; |
| 811 | unsigned long wakeup; | 835 | unsigned long wakeup; |
| 812 | unsigned long size; | 836 | unsigned long size; |
| 813 | void *addr; | 837 | void *addr; |
| @@ -910,8 +934,10 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | |||
| 910 | 934 | ||
| 911 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); | 935 | extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64); |
| 912 | 936 | ||
| 913 | extern void | 937 | #ifndef perf_arch_fetch_caller_regs |
| 914 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | 938 | static inline void |
| 939 | perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } | ||
| 940 | #endif | ||
| 915 | 941 | ||
| 916 | /* | 942 | /* |
| 917 | * Take a snapshot of the regs. Skip ip and frame pointer to | 943 | * Take a snapshot of the regs. Skip ip and frame pointer to |
| @@ -921,31 +947,11 @@ perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip); | |||
| 921 | * - bp for callchains | 947 | * - bp for callchains |
| 922 | * - eflags, for future purposes, just in case | 948 | * - eflags, for future purposes, just in case |
| 923 | */ | 949 | */ |
| 924 | static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip) | 950 | static inline void perf_fetch_caller_regs(struct pt_regs *regs) |
| 925 | { | 951 | { |
| 926 | unsigned long ip; | ||
| 927 | |||
| 928 | memset(regs, 0, sizeof(*regs)); | 952 | memset(regs, 0, sizeof(*regs)); |
| 929 | 953 | ||
| 930 | switch (skip) { | 954 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
| 931 | case 1 : | ||
| 932 | ip = CALLER_ADDR0; | ||
| 933 | break; | ||
| 934 | case 2 : | ||
| 935 | ip = CALLER_ADDR1; | ||
| 936 | break; | ||
| 937 | case 3 : | ||
| 938 | ip = CALLER_ADDR2; | ||
| 939 | break; | ||
| 940 | case 4: | ||
| 941 | ip = CALLER_ADDR3; | ||
| 942 | break; | ||
| 943 | /* No need to support further for now */ | ||
| 944 | default: | ||
| 945 | ip = 0; | ||
| 946 | } | ||
| 947 | |||
| 948 | return perf_arch_fetch_caller_regs(regs, ip, skip); | ||
| 949 | } | 955 | } |
| 950 | 956 | ||
| 951 | static inline void | 957 | static inline void |
| @@ -955,21 +961,14 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | |||
| 955 | struct pt_regs hot_regs; | 961 | struct pt_regs hot_regs; |
| 956 | 962 | ||
| 957 | if (!regs) { | 963 | if (!regs) { |
| 958 | perf_fetch_caller_regs(&hot_regs, 1); | 964 | perf_fetch_caller_regs(&hot_regs); |
| 959 | regs = &hot_regs; | 965 | regs = &hot_regs; |
| 960 | } | 966 | } |
| 961 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 967 | __perf_sw_event(event_id, nr, nmi, regs, addr); |
| 962 | } | 968 | } |
| 963 | } | 969 | } |
| 964 | 970 | ||
| 965 | extern void __perf_event_mmap(struct vm_area_struct *vma); | 971 | extern void perf_event_mmap(struct vm_area_struct *vma); |
| 966 | |||
| 967 | static inline void perf_event_mmap(struct vm_area_struct *vma) | ||
| 968 | { | ||
| 969 | if (vma->vm_flags & VM_EXEC) | ||
| 970 | __perf_event_mmap(vma); | ||
| 971 | } | ||
| 972 | |||
| 973 | extern struct perf_guest_info_callbacks *perf_guest_cbs; | 972 | extern struct perf_guest_info_callbacks *perf_guest_cbs; |
| 974 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 973 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
| 975 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 974 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
| @@ -1001,7 +1000,7 @@ static inline bool perf_paranoid_kernel(void) | |||
| 1001 | extern void perf_event_init(void); | 1000 | extern void perf_event_init(void); |
| 1002 | extern void perf_tp_event(u64 addr, u64 count, void *record, | 1001 | extern void perf_tp_event(u64 addr, u64 count, void *record, |
| 1003 | int entry_size, struct pt_regs *regs, | 1002 | int entry_size, struct pt_regs *regs, |
| 1004 | struct hlist_head *head); | 1003 | struct hlist_head *head, int rctx); |
| 1005 | extern void perf_bp_event(struct perf_event *event, void *data); | 1004 | extern void perf_bp_event(struct perf_event *event, void *data); |
| 1006 | 1005 | ||
| 1007 | #ifndef perf_misc_flags | 1006 | #ifndef perf_misc_flags |
| @@ -1068,7 +1067,7 @@ static inline void perf_event_disable(struct perf_event *event) { } | |||
| 1068 | #define perf_cpu_notifier(fn) \ | 1067 | #define perf_cpu_notifier(fn) \ |
| 1069 | do { \ | 1068 | do { \ |
| 1070 | static struct notifier_block fn##_nb __cpuinitdata = \ | 1069 | static struct notifier_block fn##_nb __cpuinitdata = \ |
| 1071 | { .notifier_call = fn, .priority = 20 }; \ | 1070 | { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ |
| 1072 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ | 1071 | fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ |
| 1073 | (void *)(unsigned long)smp_processor_id()); \ | 1072 | (void *)(unsigned long)smp_processor_id()); \ |
| 1074 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ | 1073 | fn(&fn##_nb, (unsigned long)CPU_STARTING, \ |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 5417944d3687..d7ecad0093bb 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
| @@ -43,10 +43,64 @@ extern struct resource *platform_get_resource_byname(struct platform_device *, u | |||
| 43 | extern int platform_get_irq_byname(struct platform_device *, const char *); | 43 | extern int platform_get_irq_byname(struct platform_device *, const char *); |
| 44 | extern int platform_add_devices(struct platform_device **, int); | 44 | extern int platform_add_devices(struct platform_device **, int); |
| 45 | 45 | ||
| 46 | extern struct platform_device *platform_device_register_simple(const char *, int id, | 46 | extern struct platform_device *platform_device_register_resndata( |
| 47 | const struct resource *, unsigned int); | 47 | struct device *parent, const char *name, int id, |
| 48 | extern struct platform_device *platform_device_register_data(struct device *, | 48 | const struct resource *res, unsigned int num, |
| 49 | const char *, int, const void *, size_t); | 49 | const void *data, size_t size); |
| 50 | |||
| 51 | /** | ||
| 52 | * platform_device_register_simple - add a platform-level device and its resources | ||
| 53 | * @name: base name of the device we're adding | ||
| 54 | * @id: instance id | ||
| 55 | * @res: set of resources that needs to be allocated for the device | ||
| 56 | * @num: number of resources | ||
| 57 | * | ||
| 58 | * This function creates a simple platform device that requires minimal | ||
| 59 | * resource and memory management. Canned release function freeing memory | ||
| 60 | * allocated for the device allows drivers using such devices to be | ||
| 61 | * unloaded without waiting for the last reference to the device to be | ||
| 62 | * dropped. | ||
| 63 | * | ||
| 64 | * This interface is primarily intended for use with legacy drivers which | ||
| 65 | * probe hardware directly. Because such drivers create sysfs device nodes | ||
| 66 | * themselves, rather than letting system infrastructure handle such device | ||
| 67 | * enumeration tasks, they don't fully conform to the Linux driver model. | ||
| 68 | * In particular, when such drivers are built as modules, they can't be | ||
| 69 | * "hotplugged". | ||
| 70 | * | ||
| 71 | * Returns &struct platform_device pointer on success, or ERR_PTR() on error. | ||
| 72 | */ | ||
| 73 | static inline struct platform_device *platform_device_register_simple( | ||
| 74 | const char *name, int id, | ||
| 75 | const struct resource *res, unsigned int num) | ||
| 76 | { | ||
| 77 | return platform_device_register_resndata(NULL, name, id, | ||
| 78 | res, num, NULL, 0); | ||
| 79 | } | ||
| 80 | |||
| 81 | /** | ||
| 82 | * platform_device_register_data - add a platform-level device with platform-specific data | ||
| 83 | * @parent: parent device for the device we're adding | ||
| 84 | * @name: base name of the device we're adding | ||
| 85 | * @id: instance id | ||
| 86 | * @data: platform specific data for this platform device | ||
| 87 | * @size: size of platform specific data | ||
| 88 | * | ||
| 89 | * This function creates a simple platform device that requires minimal | ||
| 90 | * resource and memory management. Canned release function freeing memory | ||
| 91 | * allocated for the device allows drivers using such devices to be | ||
| 92 | * unloaded without waiting for the last reference to the device to be | ||
| 93 | * dropped. | ||
| 94 | * | ||
| 95 | * Returns &struct platform_device pointer on success, or ERR_PTR() on error. | ||
| 96 | */ | ||
| 97 | static inline struct platform_device *platform_device_register_data( | ||
| 98 | struct device *parent, const char *name, int id, | ||
| 99 | const void *data, size_t size) | ||
| 100 | { | ||
| 101 | return platform_device_register_resndata(parent, name, id, | ||
| 102 | NULL, 0, data, size); | ||
| 103 | } | ||
| 50 | 104 | ||
| 51 | extern struct platform_device *platform_device_alloc(const char *name, int id); | 105 | extern struct platform_device *platform_device_alloc(const char *name, int id); |
| 52 | extern int platform_device_add_resources(struct platform_device *pdev, | 106 | extern int platform_device_add_resources(struct platform_device *pdev, |
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 4f71bf4e628c..3e23844a6990 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h | |||
| @@ -117,6 +117,6 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, | |||
| 117 | 117 | ||
| 118 | long clock_nanosleep_restart(struct restart_block *restart_block); | 118 | long clock_nanosleep_restart(struct restart_block *restart_block); |
| 119 | 119 | ||
| 120 | void update_rlimit_cpu(unsigned long rlim_new); | 120 | void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); |
| 121 | 121 | ||
| 122 | #endif | 122 | #endif |
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h index bff98ec1bfed..5d87f810a3b7 100644 --- a/include/linux/ppp_channel.h +++ b/include/linux/ppp_channel.h | |||
| @@ -36,7 +36,7 @@ struct ppp_channel_ops { | |||
| 36 | 36 | ||
| 37 | struct ppp_channel { | 37 | struct ppp_channel { |
| 38 | void *private; /* channel private data */ | 38 | void *private; /* channel private data */ |
| 39 | struct ppp_channel_ops *ops; /* operations for this channel */ | 39 | const struct ppp_channel_ops *ops; /* operations for this channel */ |
| 40 | int mtu; /* max transmit packet size */ | 40 | int mtu; /* max transmit packet size */ |
| 41 | int hdrlen; /* amount of headroom channel needs */ | 41 | int hdrlen; /* amount of headroom channel needs */ |
| 42 | void *ppp; /* opaque to channel */ | 42 | void *ppp; /* opaque to channel */ |
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index aa36793b48bd..d50ba858cfe0 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h | |||
| @@ -28,6 +28,12 @@ static inline bool is_quota_modification(struct inode *inode, struct iattr *ia) | |||
| 28 | 28 | ||
| 29 | #if defined(CONFIG_QUOTA) | 29 | #if defined(CONFIG_QUOTA) |
| 30 | 30 | ||
| 31 | #define quota_error(sb, fmt, args...) \ | ||
| 32 | __quota_error((sb), __func__, fmt , ## args) | ||
| 33 | |||
| 34 | extern void __quota_error(struct super_block *sb, const char *func, | ||
| 35 | const char *fmt, ...); | ||
| 36 | |||
| 31 | /* | 37 | /* |
| 32 | * declaration of quota_function calls in kernel. | 38 | * declaration of quota_function calls in kernel. |
| 33 | */ | 39 | */ |
| @@ -145,11 +151,6 @@ static inline bool sb_has_quota_active(struct super_block *sb, int type) | |||
| 145 | !sb_has_quota_suspended(sb, type); | 151 | !sb_has_quota_suspended(sb, type); |
| 146 | } | 152 | } |
| 147 | 153 | ||
| 148 | static inline unsigned sb_any_quota_active(struct super_block *sb) | ||
| 149 | { | ||
| 150 | return sb_any_quota_loaded(sb) & ~sb_any_quota_suspended(sb); | ||
| 151 | } | ||
| 152 | |||
| 153 | /* | 154 | /* |
| 154 | * Operations supported for diskquotas. | 155 | * Operations supported for diskquotas. |
| 155 | */ | 156 | */ |
| @@ -194,11 +195,6 @@ static inline int sb_has_quota_active(struct super_block *sb, int type) | |||
| 194 | return 0; | 195 | return 0; |
| 195 | } | 196 | } |
| 196 | 197 | ||
| 197 | static inline int sb_any_quota_active(struct super_block *sb) | ||
| 198 | { | ||
| 199 | return 0; | ||
| 200 | } | ||
| 201 | |||
| 202 | static inline void dquot_initialize(struct inode *inode) | 198 | static inline void dquot_initialize(struct inode *inode) |
| 203 | { | 199 | { |
| 204 | } | 200 | } |
| @@ -270,7 +266,7 @@ static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr) | |||
| 270 | static inline void dquot_alloc_space_nofail(struct inode *inode, qsize_t nr) | 266 | static inline void dquot_alloc_space_nofail(struct inode *inode, qsize_t nr) |
| 271 | { | 267 | { |
| 272 | __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN|DQUOT_SPACE_NOFAIL); | 268 | __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN|DQUOT_SPACE_NOFAIL); |
| 273 | mark_inode_dirty(inode); | 269 | mark_inode_dirty_sync(inode); |
| 274 | } | 270 | } |
| 275 | 271 | ||
| 276 | static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) | 272 | static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) |
| @@ -279,7 +275,7 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) | |||
| 279 | 275 | ||
| 280 | ret = dquot_alloc_space_nodirty(inode, nr); | 276 | ret = dquot_alloc_space_nodirty(inode, nr); |
| 281 | if (!ret) | 277 | if (!ret) |
| 282 | mark_inode_dirty(inode); | 278 | mark_inode_dirty_sync(inode); |
| 283 | return ret; | 279 | return ret; |
| 284 | } | 280 | } |
| 285 | 281 | ||
| @@ -309,7 +305,7 @@ static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr) | |||
| 309 | 305 | ||
| 310 | ret = dquot_prealloc_block_nodirty(inode, nr); | 306 | ret = dquot_prealloc_block_nodirty(inode, nr); |
| 311 | if (!ret) | 307 | if (!ret) |
| 312 | mark_inode_dirty(inode); | 308 | mark_inode_dirty_sync(inode); |
| 313 | return ret; | 309 | return ret; |
| 314 | } | 310 | } |
| 315 | 311 | ||
| @@ -325,7 +321,7 @@ static inline int dquot_claim_block(struct inode *inode, qsize_t nr) | |||
| 325 | 321 | ||
| 326 | ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits); | 322 | ret = dquot_claim_space_nodirty(inode, nr << inode->i_blkbits); |
| 327 | if (!ret) | 323 | if (!ret) |
| 328 | mark_inode_dirty(inode); | 324 | mark_inode_dirty_sync(inode); |
| 329 | return ret; | 325 | return ret; |
| 330 | } | 326 | } |
| 331 | 327 | ||
| @@ -337,7 +333,7 @@ static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr) | |||
| 337 | static inline void dquot_free_space(struct inode *inode, qsize_t nr) | 333 | static inline void dquot_free_space(struct inode *inode, qsize_t nr) |
| 338 | { | 334 | { |
| 339 | dquot_free_space_nodirty(inode, nr); | 335 | dquot_free_space_nodirty(inode, nr); |
| 340 | mark_inode_dirty(inode); | 336 | mark_inode_dirty_sync(inode); |
| 341 | } | 337 | } |
| 342 | 338 | ||
| 343 | static inline void dquot_free_block_nodirty(struct inode *inode, qsize_t nr) | 339 | static inline void dquot_free_block_nodirty(struct inode *inode, qsize_t nr) |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 55ca73cf25e5..634b8e674ac5 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
| @@ -55,7 +55,7 @@ static inline int radix_tree_is_indirect_ptr(void *ptr) | |||
| 55 | 55 | ||
| 56 | /*** radix-tree API starts here ***/ | 56 | /*** radix-tree API starts here ***/ |
| 57 | 57 | ||
| 58 | #define RADIX_TREE_MAX_TAGS 2 | 58 | #define RADIX_TREE_MAX_TAGS 3 |
| 59 | 59 | ||
| 60 | /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ | 60 | /* root tags are stored in gfp_mask, shifted by __GFP_BITS_SHIFT */ |
| 61 | struct radix_tree_root { | 61 | struct radix_tree_root { |
| @@ -192,6 +192,10 @@ unsigned int | |||
| 192 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, | 192 | radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, |
| 193 | unsigned long first_index, unsigned int max_items, | 193 | unsigned long first_index, unsigned int max_items, |
| 194 | unsigned int tag); | 194 | unsigned int tag); |
| 195 | unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, | ||
| 196 | unsigned long *first_indexp, unsigned long last_index, | ||
| 197 | unsigned long nr_to_tag, | ||
| 198 | unsigned int fromtag, unsigned int totag); | ||
| 195 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); | 199 | int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); |
| 196 | 200 | ||
| 197 | static inline void radix_tree_preload_end(void) | 201 | static inline void radix_tree_preload_end(void) |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index b653b4aaa8a6..9fbc54a2585d 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/seqlock.h> | 40 | #include <linux/seqlock.h> |
| 41 | #include <linux/lockdep.h> | 41 | #include <linux/lockdep.h> |
| 42 | #include <linux/completion.h> | 42 | #include <linux/completion.h> |
| 43 | #include <linux/debugobjects.h> | ||
| 43 | 44 | ||
| 44 | #ifdef CONFIG_RCU_TORTURE_TEST | 45 | #ifdef CONFIG_RCU_TORTURE_TEST |
| 45 | extern int rcutorture_runnable; /* for sysctl */ | 46 | extern int rcutorture_runnable; /* for sysctl */ |
| @@ -79,6 +80,16 @@ extern void rcu_init(void); | |||
| 79 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 80 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
| 80 | } while (0) | 81 | } while (0) |
| 81 | 82 | ||
| 83 | /* | ||
| 84 | * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic | ||
| 85 | * initialization and destruction of rcu_head on the stack. rcu_head structures | ||
| 86 | * allocated dynamically in the heap or defined statically don't need any | ||
| 87 | * initialization. | ||
| 88 | */ | ||
| 89 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | ||
| 90 | extern void init_rcu_head_on_stack(struct rcu_head *head); | ||
| 91 | extern void destroy_rcu_head_on_stack(struct rcu_head *head); | ||
| 92 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
| 82 | static inline void init_rcu_head_on_stack(struct rcu_head *head) | 93 | static inline void init_rcu_head_on_stack(struct rcu_head *head) |
| 83 | { | 94 | { |
| 84 | } | 95 | } |
| @@ -86,6 +97,7 @@ static inline void init_rcu_head_on_stack(struct rcu_head *head) | |||
| 86 | static inline void destroy_rcu_head_on_stack(struct rcu_head *head) | 97 | static inline void destroy_rcu_head_on_stack(struct rcu_head *head) |
| 87 | { | 98 | { |
| 88 | } | 99 | } |
| 100 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
| 89 | 101 | ||
| 90 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 102 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 91 | 103 | ||
| @@ -517,4 +529,74 @@ extern void call_rcu(struct rcu_head *head, | |||
| 517 | extern void call_rcu_bh(struct rcu_head *head, | 529 | extern void call_rcu_bh(struct rcu_head *head, |
| 518 | void (*func)(struct rcu_head *head)); | 530 | void (*func)(struct rcu_head *head)); |
| 519 | 531 | ||
| 532 | /* | ||
| 533 | * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally | ||
| 534 | * by call_rcu() and rcu callback execution, and are therefore not part of the | ||
| 535 | * RCU API. Leaving in rcupdate.h because they are used by all RCU flavors. | ||
| 536 | */ | ||
| 537 | |||
| 538 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | ||
| 539 | # define STATE_RCU_HEAD_READY 0 | ||
| 540 | # define STATE_RCU_HEAD_QUEUED 1 | ||
| 541 | |||
| 542 | extern struct debug_obj_descr rcuhead_debug_descr; | ||
| 543 | |||
| 544 | static inline void debug_rcu_head_queue(struct rcu_head *head) | ||
| 545 | { | ||
| 546 | debug_object_activate(head, &rcuhead_debug_descr); | ||
| 547 | debug_object_active_state(head, &rcuhead_debug_descr, | ||
| 548 | STATE_RCU_HEAD_READY, | ||
| 549 | STATE_RCU_HEAD_QUEUED); | ||
| 550 | } | ||
| 551 | |||
| 552 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | ||
| 553 | { | ||
| 554 | debug_object_active_state(head, &rcuhead_debug_descr, | ||
| 555 | STATE_RCU_HEAD_QUEUED, | ||
| 556 | STATE_RCU_HEAD_READY); | ||
| 557 | debug_object_deactivate(head, &rcuhead_debug_descr); | ||
| 558 | } | ||
| 559 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
| 560 | static inline void debug_rcu_head_queue(struct rcu_head *head) | ||
| 561 | { | ||
| 562 | } | ||
| 563 | |||
| 564 | static inline void debug_rcu_head_unqueue(struct rcu_head *head) | ||
| 565 | { | ||
| 566 | } | ||
| 567 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | ||
| 568 | |||
| 569 | #ifndef CONFIG_PROVE_RCU | ||
| 570 | #define __do_rcu_dereference_check(c) do { } while (0) | ||
| 571 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
| 572 | |||
| 573 | #define __rcu_dereference_index_check(p, c) \ | ||
| 574 | ({ \ | ||
| 575 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
| 576 | __do_rcu_dereference_check(c); \ | ||
| 577 | smp_read_barrier_depends(); \ | ||
| 578 | (_________p1); \ | ||
| 579 | }) | ||
| 580 | |||
| 581 | /** | ||
| 582 | * rcu_dereference_index_check() - rcu_dereference for indices with debug checking | ||
| 583 | * @p: The pointer to read, prior to dereferencing | ||
| 584 | * @c: The conditions under which the dereference will take place | ||
| 585 | * | ||
| 586 | * Similar to rcu_dereference_check(), but omits the sparse checking. | ||
| 587 | * This allows rcu_dereference_index_check() to be used on integers, | ||
| 588 | * which can then be used as array indices. Attempting to use | ||
| 589 | * rcu_dereference_check() on an integer will give compiler warnings | ||
| 590 | * because the sparse address-space mechanism relies on dereferencing | ||
| 591 | * the RCU-protected pointer. Dereferencing integers is not something | ||
| 592 | * that even gcc will put up with. | ||
| 593 | * | ||
| 594 | * Note that this function does not implicitly check for RCU read-side | ||
| 595 | * critical sections. If this function gains lots of uses, it might | ||
| 596 | * make sense to provide versions for each flavor of RCU, but it does | ||
| 597 | * not make sense as of early 2010. | ||
| 598 | */ | ||
| 599 | #define rcu_dereference_index_check(p, c) \ | ||
| 600 | __rcu_dereference_index_check((p), (c)) | ||
| 601 | |||
| 520 | #endif /* __LINUX_RCUPDATE_H */ | 602 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index ba394163dea1..91a4177e60ce 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h | |||
| @@ -2033,7 +2033,7 @@ void reiserfs_read_locked_inode(struct inode *inode, | |||
| 2033 | struct reiserfs_iget_args *args); | 2033 | struct reiserfs_iget_args *args); |
| 2034 | int reiserfs_find_actor(struct inode *inode, void *p); | 2034 | int reiserfs_find_actor(struct inode *inode, void *p); |
| 2035 | int reiserfs_init_locked_inode(struct inode *inode, void *p); | 2035 | int reiserfs_init_locked_inode(struct inode *inode, void *p); |
| 2036 | void reiserfs_delete_inode(struct inode *inode); | 2036 | void reiserfs_evict_inode(struct inode *inode); |
| 2037 | int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc); | 2037 | int reiserfs_write_inode(struct inode *inode, struct writeback_control *wbc); |
| 2038 | int reiserfs_get_block(struct inode *inode, sector_t block, | 2038 | int reiserfs_get_block(struct inode *inode, sector_t block, |
| 2039 | struct buffer_head *bh_result, int create); | 2039 | struct buffer_head *bh_result, int create); |
diff --git a/include/linux/reiserfs_fs_i.h b/include/linux/reiserfs_fs_i.h index 89f4d3abbf5a..97959bdfe214 100644 --- a/include/linux/reiserfs_fs_i.h +++ b/include/linux/reiserfs_fs_i.h | |||
| @@ -25,7 +25,6 @@ typedef enum { | |||
| 25 | i_link_saved_truncate_mask = 0x0020, | 25 | i_link_saved_truncate_mask = 0x0020, |
| 26 | i_has_xattr_dir = 0x0040, | 26 | i_has_xattr_dir = 0x0040, |
| 27 | i_data_log = 0x0080, | 27 | i_data_log = 0x0080, |
| 28 | i_ever_mapped = 0x0100 | ||
| 29 | } reiserfs_inode_flags; | 28 | } reiserfs_inode_flags; |
| 30 | 29 | ||
| 31 | struct reiserfs_inode_info { | 30 | struct reiserfs_inode_info { |
| @@ -53,7 +52,8 @@ struct reiserfs_inode_info { | |||
| 53 | ** flushed */ | 52 | ** flushed */ |
| 54 | unsigned int i_trans_id; | 53 | unsigned int i_trans_id; |
| 55 | struct reiserfs_journal_list *i_jl; | 54 | struct reiserfs_journal_list *i_jl; |
| 56 | struct mutex i_mmap; | 55 | atomic_t openers; |
| 56 | struct mutex tailpack; | ||
| 57 | #ifdef CONFIG_REISERFS_FS_XATTR | 57 | #ifdef CONFIG_REISERFS_FS_XATTR |
| 58 | struct rw_semaphore i_xattr_sem; | 58 | struct rw_semaphore i_xattr_sem; |
| 59 | #endif | 59 | #endif |
diff --git a/include/linux/resource.h b/include/linux/resource.h index f1e914eefeab..88d36f9145ba 100644 --- a/include/linux/resource.h +++ b/include/linux/resource.h | |||
| @@ -43,6 +43,13 @@ struct rlimit { | |||
| 43 | unsigned long rlim_max; | 43 | unsigned long rlim_max; |
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | #define RLIM64_INFINITY (~0ULL) | ||
| 47 | |||
| 48 | struct rlimit64 { | ||
| 49 | __u64 rlim_cur; | ||
| 50 | __u64 rlim_max; | ||
| 51 | }; | ||
| 52 | |||
| 46 | #define PRIO_MIN (-20) | 53 | #define PRIO_MIN (-20) |
| 47 | #define PRIO_MAX 20 | 54 | #define PRIO_MAX 20 |
| 48 | 55 | ||
| @@ -73,6 +80,8 @@ struct rlimit { | |||
| 73 | struct task_struct; | 80 | struct task_struct; |
| 74 | 81 | ||
| 75 | int getrusage(struct task_struct *p, int who, struct rusage __user *ru); | 82 | int getrusage(struct task_struct *p, int who, struct rusage __user *ru); |
| 83 | int do_prlimit(struct task_struct *tsk, unsigned int resource, | ||
| 84 | struct rlimit *new_rlim, struct rlimit *old_rlim); | ||
| 76 | 85 | ||
| 77 | #endif /* __KERNEL__ */ | 86 | #endif /* __KERNEL__ */ |
| 78 | 87 | ||
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 77216742c178..d6661de56f30 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | */ | 26 | */ |
| 27 | struct anon_vma { | 27 | struct anon_vma { |
| 28 | spinlock_t lock; /* Serialize access to vma list */ | 28 | spinlock_t lock; /* Serialize access to vma list */ |
| 29 | struct anon_vma *root; /* Root of this anon_vma tree */ | ||
| 29 | #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) | 30 | #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION) |
| 30 | 31 | ||
| 31 | /* | 32 | /* |
| @@ -80,6 +81,13 @@ static inline int anonvma_external_refcount(struct anon_vma *anon_vma) | |||
| 80 | { | 81 | { |
| 81 | return atomic_read(&anon_vma->external_refcount); | 82 | return atomic_read(&anon_vma->external_refcount); |
| 82 | } | 83 | } |
| 84 | |||
| 85 | static inline void get_anon_vma(struct anon_vma *anon_vma) | ||
| 86 | { | ||
| 87 | atomic_inc(&anon_vma->external_refcount); | ||
| 88 | } | ||
| 89 | |||
| 90 | void drop_anon_vma(struct anon_vma *); | ||
| 83 | #else | 91 | #else |
| 84 | static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) | 92 | static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) |
| 85 | { | 93 | { |
| @@ -89,6 +97,14 @@ static inline int anonvma_external_refcount(struct anon_vma *anon_vma) | |||
| 89 | { | 97 | { |
| 90 | return 0; | 98 | return 0; |
| 91 | } | 99 | } |
| 100 | |||
| 101 | static inline void get_anon_vma(struct anon_vma *anon_vma) | ||
| 102 | { | ||
| 103 | } | ||
| 104 | |||
| 105 | static inline void drop_anon_vma(struct anon_vma *anon_vma) | ||
| 106 | { | ||
| 107 | } | ||
| 92 | #endif /* CONFIG_KSM */ | 108 | #endif /* CONFIG_KSM */ |
| 93 | 109 | ||
| 94 | static inline struct anon_vma *page_anon_vma(struct page *page) | 110 | static inline struct anon_vma *page_anon_vma(struct page *page) |
| @@ -99,18 +115,28 @@ static inline struct anon_vma *page_anon_vma(struct page *page) | |||
| 99 | return page_rmapping(page); | 115 | return page_rmapping(page); |
| 100 | } | 116 | } |
| 101 | 117 | ||
| 102 | static inline void anon_vma_lock(struct vm_area_struct *vma) | 118 | static inline void vma_lock_anon_vma(struct vm_area_struct *vma) |
| 103 | { | 119 | { |
| 104 | struct anon_vma *anon_vma = vma->anon_vma; | 120 | struct anon_vma *anon_vma = vma->anon_vma; |
| 105 | if (anon_vma) | 121 | if (anon_vma) |
| 106 | spin_lock(&anon_vma->lock); | 122 | spin_lock(&anon_vma->root->lock); |
| 107 | } | 123 | } |
| 108 | 124 | ||
| 109 | static inline void anon_vma_unlock(struct vm_area_struct *vma) | 125 | static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) |
| 110 | { | 126 | { |
| 111 | struct anon_vma *anon_vma = vma->anon_vma; | 127 | struct anon_vma *anon_vma = vma->anon_vma; |
| 112 | if (anon_vma) | 128 | if (anon_vma) |
| 113 | spin_unlock(&anon_vma->lock); | 129 | spin_unlock(&anon_vma->root->lock); |
| 130 | } | ||
| 131 | |||
| 132 | static inline void anon_vma_lock(struct anon_vma *anon_vma) | ||
| 133 | { | ||
| 134 | spin_lock(&anon_vma->root->lock); | ||
| 135 | } | ||
| 136 | |||
| 137 | static inline void anon_vma_unlock(struct anon_vma *anon_vma) | ||
| 138 | { | ||
| 139 | spin_unlock(&anon_vma->root->lock); | ||
| 114 | } | 140 | } |
| 115 | 141 | ||
| 116 | /* | 142 | /* |
| @@ -136,6 +162,8 @@ static inline void anon_vma_merge(struct vm_area_struct *vma, | |||
| 136 | */ | 162 | */ |
| 137 | void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | 163 | void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
| 138 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | 164 | void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
| 165 | void do_page_add_anon_rmap(struct page *, struct vm_area_struct *, | ||
| 166 | unsigned long, int); | ||
| 139 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); | 167 | void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); |
| 140 | void page_add_file_rmap(struct page *); | 168 | void page_add_file_rmap(struct page *); |
| 141 | void page_remove_rmap(struct page *); | 169 | void page_remove_rmap(struct page *); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 0478888c6899..ce160d68f5e7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -272,19 +272,10 @@ extern int runqueue_is_locked(int cpu); | |||
| 272 | 272 | ||
| 273 | extern cpumask_var_t nohz_cpu_mask; | 273 | extern cpumask_var_t nohz_cpu_mask; |
| 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) | 274 | #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) |
| 275 | extern int select_nohz_load_balancer(int cpu); | 275 | extern void select_nohz_load_balancer(int stop_tick); |
| 276 | extern int get_nohz_load_balancer(void); | 276 | extern int get_nohz_timer_target(void); |
| 277 | extern int nohz_ratelimit(int cpu); | ||
| 278 | #else | 277 | #else |
| 279 | static inline int select_nohz_load_balancer(int cpu) | 278 | static inline void select_nohz_load_balancer(int stop_tick) { } |
| 280 | { | ||
| 281 | return 0; | ||
| 282 | } | ||
| 283 | |||
| 284 | static inline int nohz_ratelimit(int cpu) | ||
| 285 | { | ||
| 286 | return 0; | ||
| 287 | } | ||
| 288 | #endif | 279 | #endif |
| 289 | 280 | ||
| 290 | /* | 281 | /* |
| @@ -316,20 +307,16 @@ extern void scheduler_tick(void); | |||
| 316 | 307 | ||
| 317 | extern void sched_show_task(struct task_struct *p); | 308 | extern void sched_show_task(struct task_struct *p); |
| 318 | 309 | ||
| 319 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 310 | #ifdef CONFIG_LOCKUP_DETECTOR |
| 320 | extern void softlockup_tick(void); | ||
| 321 | extern void touch_softlockup_watchdog(void); | 311 | extern void touch_softlockup_watchdog(void); |
| 322 | extern void touch_softlockup_watchdog_sync(void); | 312 | extern void touch_softlockup_watchdog_sync(void); |
| 323 | extern void touch_all_softlockup_watchdogs(void); | 313 | extern void touch_all_softlockup_watchdogs(void); |
| 324 | extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | 314 | extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, |
| 325 | void __user *buffer, | 315 | void __user *buffer, |
| 326 | size_t *lenp, loff_t *ppos); | 316 | size_t *lenp, loff_t *ppos); |
| 327 | extern unsigned int softlockup_panic; | 317 | extern unsigned int softlockup_panic; |
| 328 | extern int softlockup_thresh; | 318 | extern int softlockup_thresh; |
| 329 | #else | 319 | #else |
| 330 | static inline void softlockup_tick(void) | ||
| 331 | { | ||
| 332 | } | ||
| 333 | static inline void touch_softlockup_watchdog(void) | 320 | static inline void touch_softlockup_watchdog(void) |
| 334 | { | 321 | { |
| 335 | } | 322 | } |
| @@ -634,7 +621,8 @@ struct signal_struct { | |||
| 634 | struct tty_audit_buf *tty_audit_buf; | 621 | struct tty_audit_buf *tty_audit_buf; |
| 635 | #endif | 622 | #endif |
| 636 | 623 | ||
| 637 | int oom_adj; /* OOM kill score adjustment (bit shift) */ | 624 | int oom_adj; /* OOM kill score adjustment (bit shift) */ |
| 625 | int oom_score_adj; /* OOM kill score adjustment */ | ||
| 638 | }; | 626 | }; |
| 639 | 627 | ||
| 640 | /* Context switch must be unlocked if interrupts are to be enabled */ | 628 | /* Context switch must be unlocked if interrupts are to be enabled */ |
| @@ -805,7 +793,7 @@ enum cpu_idle_type { | |||
| 805 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ | 793 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ |
| 806 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ | 794 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ |
| 807 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 795 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
| 808 | 796 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ | |
| 809 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | 797 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ |
| 810 | 798 | ||
| 811 | enum powersavings_balance_level { | 799 | enum powersavings_balance_level { |
| @@ -840,6 +828,8 @@ static inline int sd_balance_for_package_power(void) | |||
| 840 | return SD_PREFER_SIBLING; | 828 | return SD_PREFER_SIBLING; |
| 841 | } | 829 | } |
| 842 | 830 | ||
| 831 | extern int __weak arch_sd_sibiling_asym_packing(void); | ||
| 832 | |||
| 843 | /* | 833 | /* |
| 844 | * Optimise SD flags for power savings: | 834 | * Optimise SD flags for power savings: |
| 845 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. | 835 | * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings. |
| @@ -861,7 +851,7 @@ struct sched_group { | |||
| 861 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 851 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
| 862 | * single CPU. | 852 | * single CPU. |
| 863 | */ | 853 | */ |
| 864 | unsigned int cpu_power; | 854 | unsigned int cpu_power, cpu_power_orig; |
| 865 | 855 | ||
| 866 | /* | 856 | /* |
| 867 | * The CPUs this group covers. | 857 | * The CPUs this group covers. |
| @@ -1697,6 +1687,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
| 1697 | #define PF_EXITING 0x00000004 /* getting shut down */ | 1687 | #define PF_EXITING 0x00000004 /* getting shut down */ |
| 1698 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | 1688 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
| 1699 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ | 1689 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
| 1690 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ | ||
| 1700 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ | 1691 | #define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */ |
| 1701 | #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ | 1692 | #define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */ |
| 1702 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ | 1693 | #define PF_SUPERPRIV 0x00000100 /* used super-user privileges */ |
| @@ -1791,20 +1782,23 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
| 1791 | #endif | 1782 | #endif |
| 1792 | 1783 | ||
| 1793 | /* | 1784 | /* |
| 1794 | * Architectures can set this to 1 if they have specified | 1785 | * Do not use outside of architecture code which knows its limitations. |
| 1795 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | 1786 | * |
| 1796 | * but then during bootup it turns out that sched_clock() | 1787 | * sched_clock() has no promise of monotonicity or bounded drift between |
| 1797 | * is reliable after all: | 1788 | * CPUs, use (which you should not) requires disabling IRQs. |
| 1789 | * | ||
| 1790 | * Please use one of the three interfaces below. | ||
| 1798 | */ | 1791 | */ |
| 1799 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
| 1800 | extern int sched_clock_stable; | ||
| 1801 | #endif | ||
| 1802 | |||
| 1803 | /* ftrace calls sched_clock() directly */ | ||
| 1804 | extern unsigned long long notrace sched_clock(void); | 1792 | extern unsigned long long notrace sched_clock(void); |
| 1793 | /* | ||
| 1794 | * See the comment in kernel/sched_clock.c | ||
| 1795 | */ | ||
| 1796 | extern u64 cpu_clock(int cpu); | ||
| 1797 | extern u64 local_clock(void); | ||
| 1798 | extern u64 sched_clock_cpu(int cpu); | ||
| 1799 | |||
| 1805 | 1800 | ||
| 1806 | extern void sched_clock_init(void); | 1801 | extern void sched_clock_init(void); |
| 1807 | extern u64 sched_clock_cpu(int cpu); | ||
| 1808 | 1802 | ||
| 1809 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 1803 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
| 1810 | static inline void sched_clock_tick(void) | 1804 | static inline void sched_clock_tick(void) |
| @@ -1819,17 +1813,19 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
| 1819 | { | 1813 | { |
| 1820 | } | 1814 | } |
| 1821 | #else | 1815 | #else |
| 1816 | /* | ||
| 1817 | * Architectures can set this to 1 if they have specified | ||
| 1818 | * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig, | ||
| 1819 | * but then during bootup it turns out that sched_clock() | ||
| 1820 | * is reliable after all: | ||
| 1821 | */ | ||
| 1822 | extern int sched_clock_stable; | ||
| 1823 | |||
| 1822 | extern void sched_clock_tick(void); | 1824 | extern void sched_clock_tick(void); |
| 1823 | extern void sched_clock_idle_sleep_event(void); | 1825 | extern void sched_clock_idle_sleep_event(void); |
| 1824 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1826 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
| 1825 | #endif | 1827 | #endif |
| 1826 | 1828 | ||
| 1827 | /* | ||
| 1828 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | ||
| 1829 | * clock constructed from sched_clock(): | ||
| 1830 | */ | ||
| 1831 | extern unsigned long long cpu_clock(int cpu); | ||
| 1832 | |||
| 1833 | extern unsigned long long | 1829 | extern unsigned long long |
| 1834 | task_sched_runtime(struct task_struct *task); | 1830 | task_sched_runtime(struct task_struct *task); |
| 1835 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); | 1831 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); |
| @@ -2435,18 +2431,6 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) | |||
| 2435 | 2431 | ||
| 2436 | #endif /* CONFIG_SMP */ | 2432 | #endif /* CONFIG_SMP */ |
| 2437 | 2433 | ||
| 2438 | #ifdef CONFIG_TRACING | ||
| 2439 | extern void | ||
| 2440 | __trace_special(void *__tr, void *__data, | ||
| 2441 | unsigned long arg1, unsigned long arg2, unsigned long arg3); | ||
| 2442 | #else | ||
| 2443 | static inline void | ||
| 2444 | __trace_special(void *__tr, void *__data, | ||
| 2445 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
| 2446 | { | ||
| 2447 | } | ||
| 2448 | #endif | ||
| 2449 | |||
| 2450 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); | 2434 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
| 2451 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); | 2435 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
| 2452 | 2436 | ||
diff --git a/include/linux/security.h b/include/linux/security.h index 723a93df756a..a22219afff09 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #define __LINUX_SECURITY_H | 23 | #define __LINUX_SECURITY_H |
| 24 | 24 | ||
| 25 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
| 26 | #include <linux/fsnotify.h> | ||
| 26 | #include <linux/binfmts.h> | 27 | #include <linux/binfmts.h> |
| 27 | #include <linux/signal.h> | 28 | #include <linux/signal.h> |
| 28 | #include <linux/resource.h> | 29 | #include <linux/resource.h> |
| @@ -1498,7 +1499,8 @@ struct security_operations { | |||
| 1498 | int (*task_setnice) (struct task_struct *p, int nice); | 1499 | int (*task_setnice) (struct task_struct *p, int nice); |
| 1499 | int (*task_setioprio) (struct task_struct *p, int ioprio); | 1500 | int (*task_setioprio) (struct task_struct *p, int ioprio); |
| 1500 | int (*task_getioprio) (struct task_struct *p); | 1501 | int (*task_getioprio) (struct task_struct *p); |
| 1501 | int (*task_setrlimit) (unsigned int resource, struct rlimit *new_rlim); | 1502 | int (*task_setrlimit) (struct task_struct *p, unsigned int resource, |
| 1503 | struct rlimit *new_rlim); | ||
| 1502 | int (*task_setscheduler) (struct task_struct *p, int policy, | 1504 | int (*task_setscheduler) (struct task_struct *p, int policy, |
| 1503 | struct sched_param *lp); | 1505 | struct sched_param *lp); |
| 1504 | int (*task_getscheduler) (struct task_struct *p); | 1506 | int (*task_getscheduler) (struct task_struct *p); |
| @@ -1748,7 +1750,8 @@ void security_task_getsecid(struct task_struct *p, u32 *secid); | |||
| 1748 | int security_task_setnice(struct task_struct *p, int nice); | 1750 | int security_task_setnice(struct task_struct *p, int nice); |
| 1749 | int security_task_setioprio(struct task_struct *p, int ioprio); | 1751 | int security_task_setioprio(struct task_struct *p, int ioprio); |
| 1750 | int security_task_getioprio(struct task_struct *p); | 1752 | int security_task_getioprio(struct task_struct *p); |
| 1751 | int security_task_setrlimit(unsigned int resource, struct rlimit *new_rlim); | 1753 | int security_task_setrlimit(struct task_struct *p, unsigned int resource, |
| 1754 | struct rlimit *new_rlim); | ||
| 1752 | int security_task_setscheduler(struct task_struct *p, | 1755 | int security_task_setscheduler(struct task_struct *p, |
| 1753 | int policy, struct sched_param *lp); | 1756 | int policy, struct sched_param *lp); |
| 1754 | int security_task_getscheduler(struct task_struct *p); | 1757 | int security_task_getscheduler(struct task_struct *p); |
| @@ -2310,7 +2313,8 @@ static inline int security_task_getioprio(struct task_struct *p) | |||
| 2310 | return 0; | 2313 | return 0; |
| 2311 | } | 2314 | } |
| 2312 | 2315 | ||
| 2313 | static inline int security_task_setrlimit(unsigned int resource, | 2316 | static inline int security_task_setrlimit(struct task_struct *p, |
| 2317 | unsigned int resource, | ||
| 2314 | struct rlimit *new_rlim) | 2318 | struct rlimit *new_rlim) |
| 2315 | { | 2319 | { |
| 2316 | return 0; | 2320 | return 0; |
diff --git a/include/linux/serial.h b/include/linux/serial.h index c8613c3ff9d3..1ebc694a6d52 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h | |||
| @@ -77,7 +77,8 @@ struct serial_struct { | |||
| 77 | #define PORT_16654 11 | 77 | #define PORT_16654 11 |
| 78 | #define PORT_16850 12 | 78 | #define PORT_16850 12 |
| 79 | #define PORT_RSA 13 /* RSA-DV II/S card */ | 79 | #define PORT_RSA 13 /* RSA-DV II/S card */ |
| 80 | #define PORT_MAX 13 | 80 | #define PORT_U6_16550A 14 |
| 81 | #define PORT_MAX 14 | ||
| 81 | 82 | ||
| 82 | #define SERIAL_IO_PORT 0 | 83 | #define SERIAL_IO_PORT 0 |
| 83 | #define SERIAL_IO_HUB6 1 | 84 | #define SERIAL_IO_HUB6 1 |
| @@ -151,7 +152,7 @@ struct serial_uart_config { | |||
| 151 | #define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART) | 152 | #define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART) |
| 152 | #define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE) | 153 | #define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE) |
| 153 | 154 | ||
| 154 | #define ASYNC_FLAGS ((1U << ASYNCB_LAST_USER) - 1) | 155 | #define ASYNC_FLAGS ((1U << (ASYNCB_LAST_USER + 1)) - 1) |
| 155 | #define ASYNC_USR_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI| \ | 156 | #define ASYNC_USR_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI| \ |
| 156 | ASYNC_CALLOUT_NOHUP|ASYNC_SPD_SHI|ASYNC_LOW_LATENCY) | 157 | ASYNC_CALLOUT_NOHUP|ASYNC_SPD_SHI|ASYNC_LOW_LATENCY) |
| 157 | #define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI) | 158 | #define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI) |
| @@ -210,8 +211,10 @@ struct serial_rs485 { | |||
| 210 | #define SER_RS485_ENABLED (1 << 0) | 211 | #define SER_RS485_ENABLED (1 << 0) |
| 211 | #define SER_RS485_RTS_ON_SEND (1 << 1) | 212 | #define SER_RS485_RTS_ON_SEND (1 << 1) |
| 212 | #define SER_RS485_RTS_AFTER_SEND (1 << 2) | 213 | #define SER_RS485_RTS_AFTER_SEND (1 << 2) |
| 214 | #define SER_RS485_RTS_BEFORE_SEND (1 << 3) | ||
| 213 | __u32 delay_rts_before_send; /* Milliseconds */ | 215 | __u32 delay_rts_before_send; /* Milliseconds */ |
| 214 | __u32 padding[6]; /* Memory is cheap, new structs | 216 | __u32 delay_rts_after_send; /* Milliseconds */ |
| 217 | __u32 padding[5]; /* Memory is cheap, new structs | ||
| 215 | are a royal PITA .. */ | 218 | are a royal PITA .. */ |
| 216 | }; | 219 | }; |
| 217 | 220 | ||
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index fb46aba11fb5..7638deaaba65 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h | |||
| @@ -32,6 +32,9 @@ struct plat_serial8250_port { | |||
| 32 | unsigned int type; /* If UPF_FIXED_TYPE */ | 32 | unsigned int type; /* If UPF_FIXED_TYPE */ |
| 33 | unsigned int (*serial_in)(struct uart_port *, int); | 33 | unsigned int (*serial_in)(struct uart_port *, int); |
| 34 | void (*serial_out)(struct uart_port *, int, int); | 34 | void (*serial_out)(struct uart_port *, int, int); |
| 35 | void (*set_termios)(struct uart_port *, | ||
| 36 | struct ktermios *new, | ||
| 37 | struct ktermios *old); | ||
| 35 | }; | 38 | }; |
| 36 | 39 | ||
| 37 | /* | 40 | /* |
| @@ -71,5 +74,7 @@ extern int early_serial_setup(struct uart_port *port); | |||
| 71 | extern int serial8250_find_port(struct uart_port *p); | 74 | extern int serial8250_find_port(struct uart_port *p); |
| 72 | extern int serial8250_find_port_for_earlycon(void); | 75 | extern int serial8250_find_port_for_earlycon(void); |
| 73 | extern int setup_early_serial8250_console(char *cmdline); | 76 | extern int setup_early_serial8250_console(char *cmdline); |
| 77 | extern void serial8250_do_set_termios(struct uart_port *port, | ||
| 78 | struct ktermios *termios, struct ktermios *old); | ||
| 74 | 79 | ||
| 75 | #endif | 80 | #endif |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index f10db6e5f3b5..3c2ad99fed34 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
| @@ -186,6 +186,15 @@ | |||
| 186 | #define PORT_ALTERA_JTAGUART 91 | 186 | #define PORT_ALTERA_JTAGUART 91 |
| 187 | #define PORT_ALTERA_UART 92 | 187 | #define PORT_ALTERA_UART 92 |
| 188 | 188 | ||
| 189 | /* SH-SCI */ | ||
| 190 | #define PORT_SCIFB 93 | ||
| 191 | |||
| 192 | /* MAX3107 */ | ||
| 193 | #define PORT_MAX3107 94 | ||
| 194 | |||
| 195 | /* High Speed UART for Medfield */ | ||
| 196 | #define PORT_MFD 95 | ||
| 197 | |||
| 189 | #ifdef __KERNEL__ | 198 | #ifdef __KERNEL__ |
| 190 | 199 | ||
| 191 | #include <linux/compiler.h> | 200 | #include <linux/compiler.h> |
| @@ -220,7 +229,7 @@ struct uart_ops { | |||
| 220 | void (*flush_buffer)(struct uart_port *); | 229 | void (*flush_buffer)(struct uart_port *); |
| 221 | void (*set_termios)(struct uart_port *, struct ktermios *new, | 230 | void (*set_termios)(struct uart_port *, struct ktermios *new, |
| 222 | struct ktermios *old); | 231 | struct ktermios *old); |
| 223 | void (*set_ldisc)(struct uart_port *); | 232 | void (*set_ldisc)(struct uart_port *, int new); |
| 224 | void (*pm)(struct uart_port *, unsigned int state, | 233 | void (*pm)(struct uart_port *, unsigned int state, |
| 225 | unsigned int oldstate); | 234 | unsigned int oldstate); |
| 226 | int (*set_wake)(struct uart_port *, unsigned int state); | 235 | int (*set_wake)(struct uart_port *, unsigned int state); |
| @@ -276,6 +285,9 @@ struct uart_port { | |||
| 276 | unsigned char __iomem *membase; /* read/write[bwl] */ | 285 | unsigned char __iomem *membase; /* read/write[bwl] */ |
| 277 | unsigned int (*serial_in)(struct uart_port *, int); | 286 | unsigned int (*serial_in)(struct uart_port *, int); |
| 278 | void (*serial_out)(struct uart_port *, int, int); | 287 | void (*serial_out)(struct uart_port *, int, int); |
| 288 | void (*set_termios)(struct uart_port *, | ||
| 289 | struct ktermios *new, | ||
| 290 | struct ktermios *old); | ||
| 279 | unsigned int irq; /* irq number */ | 291 | unsigned int irq; /* irq number */ |
| 280 | unsigned long irqflags; /* irq flags */ | 292 | unsigned long irqflags; /* irq flags */ |
| 281 | unsigned int uartclk; /* base uart clock */ | 293 | unsigned int uartclk; /* base uart clock */ |
diff --git a/include/linux/serial_mfd.h b/include/linux/serial_mfd.h new file mode 100644 index 000000000000..2b071e0b034d --- /dev/null +++ b/include/linux/serial_mfd.h | |||
| @@ -0,0 +1,47 @@ | |||
| 1 | #ifndef _SERIAL_MFD_H_ | ||
| 2 | #define _SERIAL_MFD_H_ | ||
| 3 | |||
| 4 | /* HW register offset definition */ | ||
| 5 | #define UART_FOR 0x08 | ||
| 6 | #define UART_PS 0x0C | ||
| 7 | #define UART_MUL 0x0D | ||
| 8 | #define UART_DIV 0x0E | ||
| 9 | |||
| 10 | #define HSU_GBL_IEN 0x0 | ||
| 11 | #define HSU_GBL_IST 0x4 | ||
| 12 | |||
| 13 | #define HSU_GBL_INT_BIT_PORT0 0x0 | ||
| 14 | #define HSU_GBL_INT_BIT_PORT1 0x1 | ||
| 15 | #define HSU_GBL_INT_BIT_PORT2 0x2 | ||
| 16 | #define HSU_GBL_INT_BIT_IRI 0x3 | ||
| 17 | #define HSU_GBL_INT_BIT_HDLC 0x4 | ||
| 18 | #define HSU_GBL_INT_BIT_DMA 0x5 | ||
| 19 | |||
| 20 | #define HSU_GBL_ISR 0x8 | ||
| 21 | #define HSU_GBL_DMASR 0x400 | ||
| 22 | #define HSU_GBL_DMAISR 0x404 | ||
| 23 | |||
| 24 | #define HSU_PORT_REG_OFFSET 0x80 | ||
| 25 | #define HSU_PORT0_REG_OFFSET 0x80 | ||
| 26 | #define HSU_PORT1_REG_OFFSET 0x100 | ||
| 27 | #define HSU_PORT2_REG_OFFSET 0x180 | ||
| 28 | #define HSU_PORT_REG_LENGTH 0x80 | ||
| 29 | |||
| 30 | #define HSU_DMA_CHANS_REG_OFFSET 0x500 | ||
| 31 | #define HSU_DMA_CHANS_REG_LENGTH 0x40 | ||
| 32 | |||
| 33 | #define HSU_CH_SR 0x0 /* channel status reg */ | ||
| 34 | #define HSU_CH_CR 0x4 /* control reg */ | ||
| 35 | #define HSU_CH_DCR 0x8 /* descriptor control reg */ | ||
| 36 | #define HSU_CH_BSR 0x10 /* max fifo buffer size reg */ | ||
| 37 | #define HSU_CH_MOTSR 0x14 /* minimum ocp transfer size */ | ||
| 38 | #define HSU_CH_D0SAR 0x20 /* desc 0 start addr */ | ||
| 39 | #define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */ | ||
| 40 | #define HSU_CH_D1SAR 0x28 | ||
| 41 | #define HSU_CH_D1TSR 0x2C | ||
| 42 | #define HSU_CH_D2SAR 0x30 | ||
| 43 | #define HSU_CH_D2TSR 0x34 | ||
| 44 | #define HSU_CH_D3SAR 0x38 | ||
| 45 | #define HSU_CH_D3TSR 0x3C | ||
| 46 | |||
| 47 | #endif | ||
diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h index cf9327c051ad..c7a0ce11cd47 100644 --- a/include/linux/serial_reg.h +++ b/include/linux/serial_reg.h | |||
| @@ -221,8 +221,24 @@ | |||
| 221 | #define UART_FCR_PXAR16 0x80 /* receive FIFO threshold = 16 */ | 221 | #define UART_FCR_PXAR16 0x80 /* receive FIFO threshold = 16 */ |
| 222 | #define UART_FCR_PXAR32 0xc0 /* receive FIFO threshold = 32 */ | 222 | #define UART_FCR_PXAR32 0xc0 /* receive FIFO threshold = 32 */ |
| 223 | 223 | ||
| 224 | /* | ||
| 225 | * Intel MID on-chip HSU (High Speed UART) defined bits | ||
| 226 | */ | ||
| 227 | #define UART_FCR_HSU_64_1B 0x00 /* receive FIFO treshold = 1 */ | ||
| 228 | #define UART_FCR_HSU_64_16B 0x40 /* receive FIFO treshold = 16 */ | ||
| 229 | #define UART_FCR_HSU_64_32B 0x80 /* receive FIFO treshold = 32 */ | ||
| 230 | #define UART_FCR_HSU_64_56B 0xc0 /* receive FIFO treshold = 56 */ | ||
| 231 | |||
| 232 | #define UART_FCR_HSU_16_1B 0x00 /* receive FIFO treshold = 1 */ | ||
| 233 | #define UART_FCR_HSU_16_4B 0x40 /* receive FIFO treshold = 4 */ | ||
| 234 | #define UART_FCR_HSU_16_8B 0x80 /* receive FIFO treshold = 8 */ | ||
| 235 | #define UART_FCR_HSU_16_14B 0xc0 /* receive FIFO treshold = 14 */ | ||
| 224 | 236 | ||
| 237 | #define UART_FCR_HSU_64B_FIFO 0x20 /* chose 64 bytes FIFO */ | ||
| 238 | #define UART_FCR_HSU_16B_FIFO 0x00 /* chose 16 bytes FIFO */ | ||
| 225 | 239 | ||
| 240 | #define UART_FCR_HALF_EMPT_TXI 0x00 /* trigger TX_EMPT IRQ for half empty */ | ||
| 241 | #define UART_FCR_FULL_EMPT_TXI 0x08 /* trigger TX_EMPT IRQ for full empty */ | ||
| 226 | 242 | ||
| 227 | /* | 243 | /* |
| 228 | * These register definitions are for the 16C950 | 244 | * These register definitions are for the 16C950 |
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h index 1636d1e2a5f1..875ce50719a9 100644 --- a/include/linux/sh_clk.h +++ b/include/linux/sh_clk.h | |||
| @@ -25,6 +25,10 @@ struct clk { | |||
| 25 | int id; | 25 | int id; |
| 26 | 26 | ||
| 27 | struct clk *parent; | 27 | struct clk *parent; |
| 28 | struct clk **parent_table; /* list of parents to */ | ||
| 29 | unsigned short parent_num; /* choose between */ | ||
| 30 | unsigned char src_shift; /* source clock field in the */ | ||
| 31 | unsigned char src_width; /* configuration register */ | ||
| 28 | struct clk_ops *ops; | 32 | struct clk_ops *ops; |
| 29 | 33 | ||
| 30 | struct list_head children; | 34 | struct list_head children; |
| @@ -138,13 +142,22 @@ int sh_clk_div4_enable_register(struct clk *clks, int nr, | |||
| 138 | int sh_clk_div4_reparent_register(struct clk *clks, int nr, | 142 | int sh_clk_div4_reparent_register(struct clk *clks, int nr, |
| 139 | struct clk_div4_table *table); | 143 | struct clk_div4_table *table); |
| 140 | 144 | ||
| 141 | #define SH_CLK_DIV6(_parent, _reg, _flags) \ | 145 | #define SH_CLK_DIV6_EXT(_parent, _reg, _flags, _parents, \ |
| 142 | { \ | 146 | _num_parents, _src_shift, _src_width) \ |
| 143 | .parent = _parent, \ | 147 | { \ |
| 144 | .enable_reg = (void __iomem *)_reg, \ | 148 | .parent = _parent, \ |
| 145 | .flags = _flags, \ | 149 | .enable_reg = (void __iomem *)_reg, \ |
| 150 | .flags = _flags, \ | ||
| 151 | .parent_table = _parents, \ | ||
| 152 | .parent_num = _num_parents, \ | ||
| 153 | .src_shift = _src_shift, \ | ||
| 154 | .src_width = _src_width, \ | ||
| 146 | } | 155 | } |
| 147 | 156 | ||
| 157 | #define SH_CLK_DIV6(_parent, _reg, _flags) \ | ||
| 158 | SH_CLK_DIV6_EXT(_parent, _reg, _flags, NULL, 0, 0, 0) | ||
| 159 | |||
| 148 | int sh_clk_div6_register(struct clk *clks, int nr); | 160 | int sh_clk_div6_register(struct clk *clks, int nr); |
| 161 | int sh_clk_div6_reparent_register(struct clk *clks, int nr); | ||
| 149 | 162 | ||
| 150 | #endif /* __SH_CLOCK_H */ | 163 | #endif /* __SH_CLOCK_H */ |
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index e164291fb3e7..399be5ad2f99 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/swap.h> | 4 | #include <linux/swap.h> |
| 5 | #include <linux/mempolicy.h> | 5 | #include <linux/mempolicy.h> |
| 6 | #include <linux/percpu_counter.h> | ||
| 6 | 7 | ||
| 7 | /* inode in-kernel data */ | 8 | /* inode in-kernel data */ |
| 8 | 9 | ||
| @@ -23,7 +24,7 @@ struct shmem_inode_info { | |||
| 23 | 24 | ||
| 24 | struct shmem_sb_info { | 25 | struct shmem_sb_info { |
| 25 | unsigned long max_blocks; /* How many blocks are allowed */ | 26 | unsigned long max_blocks; /* How many blocks are allowed */ |
| 26 | unsigned long free_blocks; /* How many are left for allocation */ | 27 | struct percpu_counter used_blocks; /* How many are allocated */ |
| 27 | unsigned long max_inodes; /* How many inodes are allowed */ | 28 | unsigned long max_inodes; /* How many inodes are allowed */ |
| 28 | unsigned long free_inodes; /* How many are left for allocation */ | 29 | unsigned long free_inodes; /* How many are left for allocation */ |
| 29 | spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ | 30 | spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index d20d9e7a9bbd..77eb60d2b496 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -1379,6 +1379,11 @@ static inline int skb_network_offset(const struct sk_buff *skb) | |||
| 1379 | return skb_network_header(skb) - skb->data; | 1379 | return skb_network_header(skb) - skb->data; |
| 1380 | } | 1380 | } |
| 1381 | 1381 | ||
| 1382 | static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) | ||
| 1383 | { | ||
| 1384 | return pskb_may_pull(skb, skb_network_offset(skb) + len); | ||
| 1385 | } | ||
| 1386 | |||
| 1382 | /* | 1387 | /* |
| 1383 | * CPUs often take a performance hit when accessing unaligned memory | 1388 | * CPUs often take a performance hit when accessing unaligned memory |
| 1384 | * locations. The actual performance hit varies, it can be small if the | 1389 | * locations. The actual performance hit varies, it can be small if the |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 49d1247cd6d9..59260e21bdf5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -268,7 +268,8 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | |||
| 268 | * allocator where we care about the real place the memory allocation | 268 | * allocator where we care about the real place the memory allocation |
| 269 | * request comes from. | 269 | * request comes from. |
| 270 | */ | 270 | */ |
| 271 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) | 271 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ |
| 272 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) | ||
| 272 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); | 273 | extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); |
| 273 | #define kmalloc_track_caller(size, flags) \ | 274 | #define kmalloc_track_caller(size, flags) \ |
| 274 | __kmalloc_track_caller(size, flags, _RET_IP_) | 275 | __kmalloc_track_caller(size, flags, _RET_IP_) |
| @@ -286,7 +287,8 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); | |||
| 286 | * standard allocator where we care about the real place the memory | 287 | * standard allocator where we care about the real place the memory |
| 287 | * allocation request comes from. | 288 | * allocation request comes from. |
| 288 | */ | 289 | */ |
| 289 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) | 290 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ |
| 291 | (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) | ||
| 290 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); | 292 | extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); |
| 291 | #define kmalloc_node_track_caller(size, flags, node) \ | 293 | #define kmalloc_node_track_caller(size, flags, node) \ |
| 292 | __kmalloc_node_track_caller(size, flags, node, \ | 294 | __kmalloc_node_track_caller(size, flags, node, \ |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 1812dac8c496..791a502f6906 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
| @@ -14,9 +14,9 @@ | |||
| 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ | 14 | #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ |
| 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ | 15 | #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ |
| 16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
| 17 | #include <linux/kmemtrace.h> | ||
| 18 | 17 | ||
| 19 | #ifndef ARCH_KMALLOC_MINALIGN | 18 | #include <trace/events/kmem.h> |
| 19 | |||
| 20 | /* | 20 | /* |
| 21 | * Enforce a minimum alignment for the kmalloc caches. | 21 | * Enforce a minimum alignment for the kmalloc caches. |
| 22 | * Usually, the kmalloc caches are cache_line_size() aligned, except when | 22 | * Usually, the kmalloc caches are cache_line_size() aligned, except when |
| @@ -26,6 +26,9 @@ | |||
| 26 | * ARCH_KMALLOC_MINALIGN allows that. | 26 | * ARCH_KMALLOC_MINALIGN allows that. |
| 27 | * Note that increasing this value may disable some debug features. | 27 | * Note that increasing this value may disable some debug features. |
| 28 | */ | 28 | */ |
| 29 | #ifdef ARCH_DMA_MINALIGN | ||
| 30 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | ||
| 31 | #else | ||
| 29 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | 32 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) |
| 30 | #endif | 33 | #endif |
| 31 | 34 | ||
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h index 62667f72c2ef..4382db09df4f 100644 --- a/include/linux/slob_def.h +++ b/include/linux/slob_def.h | |||
| @@ -1,7 +1,9 @@ | |||
| 1 | #ifndef __LINUX_SLOB_DEF_H | 1 | #ifndef __LINUX_SLOB_DEF_H |
| 2 | #define __LINUX_SLOB_DEF_H | 2 | #define __LINUX_SLOB_DEF_H |
| 3 | 3 | ||
| 4 | #ifndef ARCH_KMALLOC_MINALIGN | 4 | #ifdef ARCH_DMA_MINALIGN |
| 5 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | ||
| 6 | #else | ||
| 5 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) | 7 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long) |
| 6 | #endif | 8 | #endif |
| 7 | 9 | ||
diff --git a/include/linux/slow-work.h b/include/linux/slow-work.h deleted file mode 100644 index 13337bf6c3f5..000000000000 --- a/include/linux/slow-work.h +++ /dev/null | |||
| @@ -1,163 +0,0 @@ | |||
| 1 | /* Worker thread pool for slow items, such as filesystem lookups or mkdirs | ||
| 2 | * | ||
| 3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
| 4 | * Written by David Howells (dhowells@redhat.com) | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU General Public Licence | ||
| 8 | * as published by the Free Software Foundation; either version | ||
| 9 | * 2 of the Licence, or (at your option) any later version. | ||
| 10 | * | ||
| 11 | * See Documentation/slow-work.txt | ||
| 12 | */ | ||
| 13 | |||
| 14 | #ifndef _LINUX_SLOW_WORK_H | ||
| 15 | #define _LINUX_SLOW_WORK_H | ||
| 16 | |||
| 17 | #ifdef CONFIG_SLOW_WORK | ||
| 18 | |||
| 19 | #include <linux/sysctl.h> | ||
| 20 | #include <linux/timer.h> | ||
| 21 | |||
| 22 | struct slow_work; | ||
| 23 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
| 24 | struct seq_file; | ||
| 25 | #endif | ||
| 26 | |||
| 27 | /* | ||
| 28 | * The operations used to support slow work items | ||
| 29 | */ | ||
| 30 | struct slow_work_ops { | ||
| 31 | /* owner */ | ||
| 32 | struct module *owner; | ||
| 33 | |||
| 34 | /* get a ref on a work item | ||
| 35 | * - return 0 if successful, -ve if not | ||
| 36 | */ | ||
| 37 | int (*get_ref)(struct slow_work *work); | ||
| 38 | |||
| 39 | /* discard a ref to a work item */ | ||
| 40 | void (*put_ref)(struct slow_work *work); | ||
| 41 | |||
| 42 | /* execute a work item */ | ||
| 43 | void (*execute)(struct slow_work *work); | ||
| 44 | |||
| 45 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
| 46 | /* describe a work item for debugfs */ | ||
| 47 | void (*desc)(struct slow_work *work, struct seq_file *m); | ||
| 48 | #endif | ||
| 49 | }; | ||
| 50 | |||
| 51 | /* | ||
| 52 | * A slow work item | ||
| 53 | * - A reference is held on the parent object by the thread pool when it is | ||
| 54 | * queued | ||
| 55 | */ | ||
| 56 | struct slow_work { | ||
| 57 | struct module *owner; /* the owning module */ | ||
| 58 | unsigned long flags; | ||
| 59 | #define SLOW_WORK_PENDING 0 /* item pending (further) execution */ | ||
| 60 | #define SLOW_WORK_EXECUTING 1 /* item currently executing */ | ||
| 61 | #define SLOW_WORK_ENQ_DEFERRED 2 /* item enqueue deferred */ | ||
| 62 | #define SLOW_WORK_VERY_SLOW 3 /* item is very slow */ | ||
| 63 | #define SLOW_WORK_CANCELLING 4 /* item is being cancelled, don't enqueue */ | ||
| 64 | #define SLOW_WORK_DELAYED 5 /* item is struct delayed_slow_work with active timer */ | ||
| 65 | const struct slow_work_ops *ops; /* operations table for this item */ | ||
| 66 | struct list_head link; /* link in queue */ | ||
| 67 | #ifdef CONFIG_SLOW_WORK_DEBUG | ||
| 68 | struct timespec mark; /* jiffies at which queued or exec begun */ | ||
| 69 | #endif | ||
| 70 | }; | ||
| 71 | |||
| 72 | struct delayed_slow_work { | ||
| 73 | struct slow_work work; | ||
| 74 | struct timer_list timer; | ||
| 75 | }; | ||
| 76 | |||
| 77 | /** | ||
| 78 | * slow_work_init - Initialise a slow work item | ||
| 79 | * @work: The work item to initialise | ||
| 80 | * @ops: The operations to use to handle the slow work item | ||
| 81 | * | ||
| 82 | * Initialise a slow work item. | ||
| 83 | */ | ||
| 84 | static inline void slow_work_init(struct slow_work *work, | ||
| 85 | const struct slow_work_ops *ops) | ||
| 86 | { | ||
| 87 | work->flags = 0; | ||
| 88 | work->ops = ops; | ||
| 89 | INIT_LIST_HEAD(&work->link); | ||
| 90 | } | ||
| 91 | |||
| 92 | /** | ||
| 93 | * slow_work_init - Initialise a delayed slow work item | ||
| 94 | * @work: The work item to initialise | ||
| 95 | * @ops: The operations to use to handle the slow work item | ||
| 96 | * | ||
| 97 | * Initialise a delayed slow work item. | ||
| 98 | */ | ||
| 99 | static inline void delayed_slow_work_init(struct delayed_slow_work *dwork, | ||
| 100 | const struct slow_work_ops *ops) | ||
| 101 | { | ||
| 102 | init_timer(&dwork->timer); | ||
| 103 | slow_work_init(&dwork->work, ops); | ||
| 104 | } | ||
| 105 | |||
| 106 | /** | ||
| 107 | * vslow_work_init - Initialise a very slow work item | ||
| 108 | * @work: The work item to initialise | ||
| 109 | * @ops: The operations to use to handle the slow work item | ||
| 110 | * | ||
| 111 | * Initialise a very slow work item. This item will be restricted such that | ||
| 112 | * only a certain number of the pool threads will be able to execute items of | ||
| 113 | * this type. | ||
| 114 | */ | ||
| 115 | static inline void vslow_work_init(struct slow_work *work, | ||
| 116 | const struct slow_work_ops *ops) | ||
| 117 | { | ||
| 118 | work->flags = 1 << SLOW_WORK_VERY_SLOW; | ||
| 119 | work->ops = ops; | ||
| 120 | INIT_LIST_HEAD(&work->link); | ||
| 121 | } | ||
| 122 | |||
| 123 | /** | ||
| 124 | * slow_work_is_queued - Determine if a slow work item is on the work queue | ||
| 125 | * work: The work item to test | ||
| 126 | * | ||
| 127 | * Determine if the specified slow-work item is on the work queue. This | ||
| 128 | * returns true if it is actually on the queue. | ||
| 129 | * | ||
| 130 | * If the item is executing and has been marked for requeue when execution | ||
| 131 | * finishes, then false will be returned. | ||
| 132 | * | ||
| 133 | * Anyone wishing to wait for completion of execution can wait on the | ||
| 134 | * SLOW_WORK_EXECUTING bit. | ||
| 135 | */ | ||
| 136 | static inline bool slow_work_is_queued(struct slow_work *work) | ||
| 137 | { | ||
| 138 | unsigned long flags = work->flags; | ||
| 139 | return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING); | ||
| 140 | } | ||
| 141 | |||
| 142 | extern int slow_work_enqueue(struct slow_work *work); | ||
| 143 | extern void slow_work_cancel(struct slow_work *work); | ||
| 144 | extern int slow_work_register_user(struct module *owner); | ||
| 145 | extern void slow_work_unregister_user(struct module *owner); | ||
| 146 | |||
| 147 | extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, | ||
| 148 | unsigned long delay); | ||
| 149 | |||
| 150 | static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork) | ||
| 151 | { | ||
| 152 | slow_work_cancel(&dwork->work); | ||
| 153 | } | ||
| 154 | |||
| 155 | extern bool slow_work_sleep_till_thread_needed(struct slow_work *work, | ||
| 156 | signed long *_timeout); | ||
| 157 | |||
| 158 | #ifdef CONFIG_SYSCTL | ||
| 159 | extern ctl_table slow_work_sysctls[]; | ||
| 160 | #endif | ||
| 161 | |||
| 162 | #endif /* CONFIG_SLOW_WORK */ | ||
| 163 | #endif /* _LINUX_SLOW_WORK_H */ | ||
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 4ba59cfc1f75..6d14409c4d9a 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -10,9 +10,10 @@ | |||
| 10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
| 11 | #include <linux/workqueue.h> | 11 | #include <linux/workqueue.h> |
| 12 | #include <linux/kobject.h> | 12 | #include <linux/kobject.h> |
| 13 | #include <linux/kmemtrace.h> | ||
| 14 | #include <linux/kmemleak.h> | 13 | #include <linux/kmemleak.h> |
| 15 | 14 | ||
| 15 | #include <trace/events/kmem.h> | ||
| 16 | |||
| 16 | enum stat_item { | 17 | enum stat_item { |
| 17 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 18 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
| 18 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ | 19 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ |
| @@ -105,15 +106,17 @@ struct kmem_cache { | |||
| 105 | /* | 106 | /* |
| 106 | * Kmalloc subsystem. | 107 | * Kmalloc subsystem. |
| 107 | */ | 108 | */ |
| 108 | #if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8 | 109 | #if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 |
| 109 | #define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN | 110 | #define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN |
| 110 | #else | 111 | #else |
| 111 | #define KMALLOC_MIN_SIZE 8 | 112 | #define KMALLOC_MIN_SIZE 8 |
| 112 | #endif | 113 | #endif |
| 113 | 114 | ||
| 114 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) | 115 | #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) |
| 115 | 116 | ||
| 116 | #ifndef ARCH_KMALLOC_MINALIGN | 117 | #ifdef ARCH_DMA_MINALIGN |
| 118 | #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN | ||
| 119 | #else | ||
| 117 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) | 120 | #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) |
| 118 | #endif | 121 | #endif |
| 119 | 122 | ||
diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h index 34af0a3477bf..bcaa2f762cc1 100644 --- a/include/linux/spi/max7301.h +++ b/include/linux/spi/max7301.h | |||
| @@ -11,6 +11,7 @@ struct max7301 { | |||
| 11 | struct mutex lock; | 11 | struct mutex lock; |
| 12 | u8 port_config[8]; /* field 0 is unused */ | 12 | u8 port_config[8]; /* field 0 is unused */ |
| 13 | u32 out_level; /* cached output levels */ | 13 | u32 out_level; /* cached output levels */ |
| 14 | u32 input_pullup_active; | ||
| 14 | struct gpio_chip chip; | 15 | struct gpio_chip chip; |
| 15 | struct device *dev; | 16 | struct device *dev; |
| 16 | int (*write)(struct device *dev, unsigned int reg, unsigned int val); | 17 | int (*write)(struct device *dev, unsigned int reg, unsigned int val); |
| @@ -20,6 +21,13 @@ struct max7301 { | |||
| 20 | struct max7301_platform_data { | 21 | struct max7301_platform_data { |
| 21 | /* number assigned to the first GPIO */ | 22 | /* number assigned to the first GPIO */ |
| 22 | unsigned base; | 23 | unsigned base; |
| 24 | /* | ||
| 25 | * bitmask controlling the pullup configuration, | ||
| 26 | * | ||
| 27 | * _note_ the 4 lowest bits are unused, because the first 4 | ||
| 28 | * ports of the controller are not used, too. | ||
| 29 | */ | ||
| 30 | u32 input_pullup_active; | ||
| 23 | }; | 31 | }; |
| 24 | 32 | ||
| 25 | extern int __max730x_remove(struct device *dev); | 33 | extern int __max730x_remove(struct device *dev); |
diff --git a/include/linux/statfs.h b/include/linux/statfs.h index b34cc829f98d..0166d320a75d 100644 --- a/include/linux/statfs.h +++ b/include/linux/statfs.h | |||
| @@ -2,7 +2,6 @@ | |||
| 2 | #define _LINUX_STATFS_H | 2 | #define _LINUX_STATFS_H |
| 3 | 3 | ||
| 4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 5 | |||
| 6 | #include <asm/statfs.h> | 5 | #include <asm/statfs.h> |
| 7 | 6 | ||
| 8 | struct kstatfs { | 7 | struct kstatfs { |
| @@ -16,7 +15,29 @@ struct kstatfs { | |||
| 16 | __kernel_fsid_t f_fsid; | 15 | __kernel_fsid_t f_fsid; |
| 17 | long f_namelen; | 16 | long f_namelen; |
| 18 | long f_frsize; | 17 | long f_frsize; |
| 19 | long f_spare[5]; | 18 | long f_flags; |
| 19 | long f_spare[4]; | ||
| 20 | }; | 20 | }; |
| 21 | 21 | ||
| 22 | /* | ||
| 23 | * Definitions for the flag in f_flag. | ||
| 24 | * | ||
| 25 | * Generally these flags are equivalent to the MS_ flags used in the mount | ||
| 26 | * ABI. The exception is ST_VALID which has the same value as MS_REMOUNT | ||
| 27 | * which doesn't make any sense for statfs. | ||
| 28 | */ | ||
| 29 | #define ST_RDONLY 0x0001 /* mount read-only */ | ||
| 30 | #define ST_NOSUID 0x0002 /* ignore suid and sgid bits */ | ||
| 31 | #define ST_NODEV 0x0004 /* disallow access to device special files */ | ||
| 32 | #define ST_NOEXEC 0x0008 /* disallow program execution */ | ||
| 33 | #define ST_SYNCHRONOUS 0x0010 /* writes are synced at once */ | ||
| 34 | #define ST_VALID 0x0020 /* f_flags support is implemented */ | ||
| 35 | #define ST_MANDLOCK 0x0040 /* allow mandatory locks on an FS */ | ||
| 36 | /* 0x0080 used for ST_WRITE in glibc */ | ||
| 37 | /* 0x0100 used for ST_APPEND in glibc */ | ||
| 38 | /* 0x0200 used for ST_IMMUTABLE in glibc */ | ||
| 39 | #define ST_NOATIME 0x0400 /* do not update access times */ | ||
| 40 | #define ST_NODIRATIME 0x0800 /* do not update directory access times */ | ||
| 41 | #define ST_RELATIME 0x1000 /* update atime relative to mtime/ctime */ | ||
| 42 | |||
| 22 | #endif | 43 | #endif |
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 87d7ec0bf779..5bbc447175dc 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h | |||
| @@ -61,13 +61,7 @@ struct rpc_cred { | |||
| 61 | /* | 61 | /* |
| 62 | * Client authentication handle | 62 | * Client authentication handle |
| 63 | */ | 63 | */ |
| 64 | #define RPC_CREDCACHE_HASHBITS 4 | 64 | struct rpc_cred_cache; |
| 65 | #define RPC_CREDCACHE_NR (1 << RPC_CREDCACHE_HASHBITS) | ||
| 66 | struct rpc_cred_cache { | ||
| 67 | struct hlist_head hashtable[RPC_CREDCACHE_NR]; | ||
| 68 | spinlock_t lock; | ||
| 69 | }; | ||
| 70 | |||
| 71 | struct rpc_authops; | 65 | struct rpc_authops; |
| 72 | struct rpc_auth { | 66 | struct rpc_auth { |
| 73 | unsigned int au_cslack; /* call cred size estimate */ | 67 | unsigned int au_cslack; /* call cred size estimate */ |
| @@ -112,7 +106,7 @@ struct rpc_credops { | |||
| 112 | void (*crdestroy)(struct rpc_cred *); | 106 | void (*crdestroy)(struct rpc_cred *); |
| 113 | 107 | ||
| 114 | int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); | 108 | int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); |
| 115 | void (*crbind)(struct rpc_task *, struct rpc_cred *, int); | 109 | struct rpc_cred * (*crbind)(struct rpc_task *, struct rpc_cred *, int); |
| 116 | __be32 * (*crmarshal)(struct rpc_task *, __be32 *); | 110 | __be32 * (*crmarshal)(struct rpc_task *, __be32 *); |
| 117 | int (*crrefresh)(struct rpc_task *); | 111 | int (*crrefresh)(struct rpc_task *); |
| 118 | __be32 * (*crvalidate)(struct rpc_task *, __be32 *); | 112 | __be32 * (*crvalidate)(struct rpc_task *, __be32 *); |
| @@ -125,11 +119,12 @@ struct rpc_credops { | |||
| 125 | extern const struct rpc_authops authunix_ops; | 119 | extern const struct rpc_authops authunix_ops; |
| 126 | extern const struct rpc_authops authnull_ops; | 120 | extern const struct rpc_authops authnull_ops; |
| 127 | 121 | ||
| 128 | void __init rpc_init_authunix(void); | 122 | int __init rpc_init_authunix(void); |
| 129 | void __init rpc_init_generic_auth(void); | 123 | int __init rpc_init_generic_auth(void); |
| 130 | void __init rpcauth_init_module(void); | 124 | int __init rpcauth_init_module(void); |
| 131 | void __exit rpcauth_remove_module(void); | 125 | void __exit rpcauth_remove_module(void); |
| 132 | void __exit rpc_destroy_generic_auth(void); | 126 | void __exit rpc_destroy_generic_auth(void); |
| 127 | void rpc_destroy_authunix(void); | ||
| 133 | 128 | ||
| 134 | struct rpc_cred * rpc_lookup_cred(void); | 129 | struct rpc_cred * rpc_lookup_cred(void); |
| 135 | struct rpc_cred * rpc_lookup_machine_cred(void); | 130 | struct rpc_cred * rpc_lookup_machine_cred(void); |
| @@ -140,10 +135,8 @@ void rpcauth_release(struct rpc_auth *); | |||
| 140 | struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int); | 135 | struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int); |
| 141 | void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); | 136 | void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); |
| 142 | struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); | 137 | struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); |
| 143 | void rpcauth_bindcred(struct rpc_task *, struct rpc_cred *, int); | 138 | struct rpc_cred * rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); |
| 144 | void rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *, int); | ||
| 145 | void put_rpccred(struct rpc_cred *); | 139 | void put_rpccred(struct rpc_cred *); |
| 146 | void rpcauth_unbindcred(struct rpc_task *); | ||
| 147 | __be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); | 140 | __be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); |
| 148 | __be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); | 141 | __be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); |
| 149 | int rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, __be32 *data, void *obj); | 142 | int rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, __be32 *data, void *obj); |
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index 6f52b4d7c447..7bf3e84b92f4 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h | |||
| @@ -192,6 +192,7 @@ extern int cache_check(struct cache_detail *detail, | |||
| 192 | extern void cache_flush(void); | 192 | extern void cache_flush(void); |
| 193 | extern void cache_purge(struct cache_detail *detail); | 193 | extern void cache_purge(struct cache_detail *detail); |
| 194 | #define NEVER (0x7FFFFFFF) | 194 | #define NEVER (0x7FFFFFFF) |
| 195 | extern void __init cache_initialize(void); | ||
| 195 | extern int cache_register(struct cache_detail *cd); | 196 | extern int cache_register(struct cache_detail *cd); |
| 196 | extern void cache_unregister(struct cache_detail *cd); | 197 | extern void cache_unregister(struct cache_detail *cd); |
| 197 | 198 | ||
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 8ed9642a5a76..569dc722a600 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
| @@ -131,6 +131,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, | |||
| 131 | struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); | 131 | struct rpc_clnt *rpc_clone_client(struct rpc_clnt *); |
| 132 | void rpc_shutdown_client(struct rpc_clnt *); | 132 | void rpc_shutdown_client(struct rpc_clnt *); |
| 133 | void rpc_release_client(struct rpc_clnt *); | 133 | void rpc_release_client(struct rpc_clnt *); |
| 134 | void rpc_task_release_client(struct rpc_task *); | ||
| 134 | 135 | ||
| 135 | int rpcb_register(u32, u32, int, unsigned short); | 136 | int rpcb_register(u32, u32, int, unsigned short); |
| 136 | int rpcb_v4_register(const u32 program, const u32 version, | 137 | int rpcb_v4_register(const u32 program, const u32 version, |
| @@ -148,8 +149,8 @@ int rpc_call_sync(struct rpc_clnt *clnt, | |||
| 148 | const struct rpc_message *msg, int flags); | 149 | const struct rpc_message *msg, int flags); |
| 149 | struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, | 150 | struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, |
| 150 | int flags); | 151 | int flags); |
| 151 | void rpc_restart_call_prepare(struct rpc_task *); | 152 | int rpc_restart_call_prepare(struct rpc_task *); |
| 152 | void rpc_restart_call(struct rpc_task *); | 153 | int rpc_restart_call(struct rpc_task *); |
| 153 | void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); | 154 | void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); |
| 154 | size_t rpc_max_payload(struct rpc_clnt *); | 155 | size_t rpc_max_payload(struct rpc_clnt *); |
| 155 | void rpc_force_rebind(struct rpc_clnt *); | 156 | void rpc_force_rebind(struct rpc_clnt *); |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 7be4f3a6d246..88513fd8e208 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
| @@ -213,6 +213,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, | |||
| 213 | const struct rpc_call_ops *ops); | 213 | const struct rpc_call_ops *ops); |
| 214 | void rpc_put_task(struct rpc_task *); | 214 | void rpc_put_task(struct rpc_task *); |
| 215 | void rpc_exit_task(struct rpc_task *); | 215 | void rpc_exit_task(struct rpc_task *); |
| 216 | void rpc_exit(struct rpc_task *, int); | ||
| 216 | void rpc_release_calldata(const struct rpc_call_ops *, void *); | 217 | void rpc_release_calldata(const struct rpc_call_ops *, void *); |
| 217 | void rpc_killall_tasks(struct rpc_clnt *); | 218 | void rpc_killall_tasks(struct rpc_clnt *); |
| 218 | void rpc_execute(struct rpc_task *); | 219 | void rpc_execute(struct rpc_task *); |
| @@ -241,12 +242,6 @@ void rpc_destroy_mempool(void); | |||
| 241 | extern struct workqueue_struct *rpciod_workqueue; | 242 | extern struct workqueue_struct *rpciod_workqueue; |
| 242 | void rpc_prepare_task(struct rpc_task *task); | 243 | void rpc_prepare_task(struct rpc_task *task); |
| 243 | 244 | ||
| 244 | static inline void rpc_exit(struct rpc_task *task, int status) | ||
| 245 | { | ||
| 246 | task->tk_status = status; | ||
| 247 | task->tk_action = rpc_exit_task; | ||
| 248 | } | ||
| 249 | |||
| 250 | static inline int rpc_wait_for_completion_task(struct rpc_task *task) | 245 | static inline int rpc_wait_for_completion_task(struct rpc_task *task) |
| 251 | { | 246 | { |
| 252 | return __rpc_wait_for_completion_task(task, NULL); | 247 | return __rpc_wait_for_completion_task(task, NULL); |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index b51470302399..ff5a77b28c50 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
| @@ -64,6 +64,7 @@ struct rpc_rqst { | |||
| 64 | * This is the private part | 64 | * This is the private part |
| 65 | */ | 65 | */ |
| 66 | struct rpc_task * rq_task; /* RPC task data */ | 66 | struct rpc_task * rq_task; /* RPC task data */ |
| 67 | struct rpc_cred * rq_cred; /* Bound cred */ | ||
| 67 | __be32 rq_xid; /* request XID */ | 68 | __be32 rq_xid; /* request XID */ |
| 68 | int rq_cong; /* has incremented xprt->cong */ | 69 | int rq_cong; /* has incremented xprt->cong */ |
| 69 | u32 rq_seqno; /* gss seq no. used on req. */ | 70 | u32 rq_seqno; /* gss seq no. used on req. */ |
diff --git a/include/linux/swap.h b/include/linux/swap.h index ff4acea9bbdb..2fee51a11b73 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -244,8 +244,7 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, | |||
| 244 | extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | 244 | extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, |
| 245 | gfp_t gfp_mask, bool noswap, | 245 | gfp_t gfp_mask, bool noswap, |
| 246 | unsigned int swappiness, | 246 | unsigned int swappiness, |
| 247 | struct zone *zone, | 247 | struct zone *zone); |
| 248 | int nid); | ||
| 249 | extern int __isolate_lru_page(struct page *page, int mode, int file); | 248 | extern int __isolate_lru_page(struct page *page, int mode, int file); |
| 250 | extern unsigned long shrink_all_memory(unsigned long nr_pages); | 249 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
| 251 | extern int vm_swappiness; | 250 | extern int vm_swappiness; |
| @@ -316,7 +315,6 @@ extern long nr_swap_pages; | |||
| 316 | extern long total_swap_pages; | 315 | extern long total_swap_pages; |
| 317 | extern void si_swapinfo(struct sysinfo *); | 316 | extern void si_swapinfo(struct sysinfo *); |
| 318 | extern swp_entry_t get_swap_page(void); | 317 | extern swp_entry_t get_swap_page(void); |
| 319 | extern swp_entry_t get_swap_page_of_type(int); | ||
| 320 | extern int valid_swaphandles(swp_entry_t, unsigned long *); | 318 | extern int valid_swaphandles(swp_entry_t, unsigned long *); |
| 321 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); | 319 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
| 322 | extern void swap_shmem_alloc(swp_entry_t); | 320 | extern void swap_shmem_alloc(swp_entry_t); |
| @@ -333,6 +331,13 @@ extern int reuse_swap_page(struct page *); | |||
| 333 | extern int try_to_free_swap(struct page *); | 331 | extern int try_to_free_swap(struct page *); |
| 334 | struct backing_dev_info; | 332 | struct backing_dev_info; |
| 335 | 333 | ||
| 334 | #ifdef CONFIG_HIBERNATION | ||
| 335 | void hibernation_freeze_swap(void); | ||
| 336 | void hibernation_thaw_swap(void); | ||
| 337 | swp_entry_t get_swap_for_hibernation(int type); | ||
| 338 | void swap_free_for_hibernation(swp_entry_t val); | ||
| 339 | #endif | ||
| 340 | |||
| 336 | /* linux/mm/thrash.c */ | 341 | /* linux/mm/thrash.c */ |
| 337 | extern struct mm_struct *swap_token_mm; | 342 | extern struct mm_struct *swap_token_mm; |
| 338 | extern void grab_swap_token(struct mm_struct *); | 343 | extern void grab_swap_token(struct mm_struct *); |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 13ebb5413a79..1b67bd333b5e 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -35,6 +35,7 @@ struct oldold_utsname; | |||
| 35 | struct old_utsname; | 35 | struct old_utsname; |
| 36 | struct pollfd; | 36 | struct pollfd; |
| 37 | struct rlimit; | 37 | struct rlimit; |
| 38 | struct rlimit64; | ||
| 38 | struct rusage; | 39 | struct rusage; |
| 39 | struct sched_param; | 40 | struct sched_param; |
| 40 | struct sel_arg_struct; | 41 | struct sel_arg_struct; |
| @@ -167,7 +168,6 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
| 167 | .enter_event = &event_enter_##sname, \ | 168 | .enter_event = &event_enter_##sname, \ |
| 168 | .exit_event = &event_exit_##sname, \ | 169 | .exit_event = &event_exit_##sname, \ |
| 169 | .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ | 170 | .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ |
| 170 | .exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \ | ||
| 171 | }; | 171 | }; |
| 172 | 172 | ||
| 173 | #define SYSCALL_DEFINE0(sname) \ | 173 | #define SYSCALL_DEFINE0(sname) \ |
| @@ -182,7 +182,6 @@ extern struct trace_event_functions exit_syscall_print_funcs; | |||
| 182 | .enter_event = &event_enter__##sname, \ | 182 | .enter_event = &event_enter__##sname, \ |
| 183 | .exit_event = &event_exit__##sname, \ | 183 | .exit_event = &event_exit__##sname, \ |
| 184 | .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \ | 184 | .enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \ |
| 185 | .exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \ | ||
| 186 | }; \ | 185 | }; \ |
| 187 | asmlinkage long sys_##sname(void) | 186 | asmlinkage long sys_##sname(void) |
| 188 | #else | 187 | #else |
| @@ -646,6 +645,9 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r | |||
| 646 | #endif | 645 | #endif |
| 647 | asmlinkage long sys_setrlimit(unsigned int resource, | 646 | asmlinkage long sys_setrlimit(unsigned int resource, |
| 648 | struct rlimit __user *rlim); | 647 | struct rlimit __user *rlim); |
| 648 | asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource, | ||
| 649 | const struct rlimit64 __user *new_rlim, | ||
| 650 | struct rlimit64 __user *old_rlim); | ||
| 649 | asmlinkage long sys_getrusage(int who, struct rusage __user *ru); | 651 | asmlinkage long sys_getrusage(int who, struct rusage __user *ru); |
| 650 | asmlinkage long sys_umask(int mask); | 652 | asmlinkage long sys_umask(int mask); |
| 651 | 653 | ||
| @@ -813,6 +815,10 @@ asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *, | |||
| 813 | asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, | 815 | asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int, |
| 814 | struct timespec __user *, const sigset_t __user *, | 816 | struct timespec __user *, const sigset_t __user *, |
| 815 | size_t); | 817 | size_t); |
| 818 | asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags); | ||
| 819 | asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, | ||
| 820 | u64 mask, int fd, | ||
| 821 | const char __user *pathname); | ||
| 816 | 822 | ||
| 817 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); | 823 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); |
| 818 | 824 | ||
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index f2694eb4dd3d..3c92121ba9af 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
| @@ -22,14 +22,8 @@ struct kobject; | |||
| 22 | struct module; | 22 | struct module; |
| 23 | enum kobj_ns_type; | 23 | enum kobj_ns_type; |
| 24 | 24 | ||
| 25 | /* FIXME | ||
| 26 | * The *owner field is no longer used. | ||
| 27 | * x86 tree has been cleaned up. The owner | ||
| 28 | * attribute is still left for other arches. | ||
| 29 | */ | ||
| 30 | struct attribute { | 25 | struct attribute { |
| 31 | const char *name; | 26 | const char *name; |
| 32 | struct module *owner; | ||
| 33 | mode_t mode; | 27 | mode_t mode; |
| 34 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 28 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 35 | struct lock_class_key *key; | 29 | struct lock_class_key *key; |
| @@ -136,8 +130,8 @@ int __must_check sysfs_create_file(struct kobject *kobj, | |||
| 136 | const struct attribute *attr); | 130 | const struct attribute *attr); |
| 137 | int __must_check sysfs_create_files(struct kobject *kobj, | 131 | int __must_check sysfs_create_files(struct kobject *kobj, |
| 138 | const struct attribute **attr); | 132 | const struct attribute **attr); |
| 139 | int __must_check sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, | 133 | int __must_check sysfs_chmod_file(struct kobject *kobj, |
| 140 | mode_t mode); | 134 | const struct attribute *attr, mode_t mode); |
| 141 | void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); | 135 | void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); |
| 142 | void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); | 136 | void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); |
| 143 | 137 | ||
| @@ -225,7 +219,7 @@ static inline int sysfs_create_files(struct kobject *kobj, | |||
| 225 | } | 219 | } |
| 226 | 220 | ||
| 227 | static inline int sysfs_chmod_file(struct kobject *kobj, | 221 | static inline int sysfs_chmod_file(struct kobject *kobj, |
| 228 | struct attribute *attr, mode_t mode) | 222 | const struct attribute *attr, mode_t mode) |
| 229 | { | 223 | { |
| 230 | return 0; | 224 | return 0; |
| 231 | } | 225 | } |
diff --git a/include/linux/sysv_fs.h b/include/linux/sysv_fs.h index 96411306eec6..e47d6d90023d 100644 --- a/include/linux/sysv_fs.h +++ b/include/linux/sysv_fs.h | |||
| @@ -148,6 +148,17 @@ struct v7_super_block { | |||
| 148 | char s_fname[6]; /* file system name */ | 148 | char s_fname[6]; /* file system name */ |
| 149 | char s_fpack[6]; /* file system pack name */ | 149 | char s_fpack[6]; /* file system pack name */ |
| 150 | }; | 150 | }; |
| 151 | /* Constants to aid sanity checking */ | ||
| 152 | /* This is not a hard limit, nor enforced by v7 kernel. It's actually just | ||
| 153 | * the limit used by Seventh Edition's ls, though is high enough to assume | ||
| 154 | * that no reasonable file system would have that much entries in root | ||
| 155 | * directory. Thus, if we see anything higher, we just probably got the | ||
| 156 | * endiannes wrong. */ | ||
| 157 | #define V7_NFILES 1024 | ||
| 158 | /* The disk addresses are three-byte (despite direct block addresses being | ||
| 159 | * aligned word-wise in inode). If the most significant byte is non-zero, | ||
| 160 | * something is most likely wrong (not a filesystem, bad bytesex). */ | ||
| 161 | #define V7_MAXSIZE 0x00ffffff | ||
| 151 | 162 | ||
| 152 | /* Coherent super-block data on disk */ | 163 | /* Coherent super-block data on disk */ |
| 153 | #define COH_NICINOD 100 /* number of inode cache entries */ | 164 | #define COH_NICINOD 100 /* number of inode cache entries */ |
diff --git a/include/linux/time.h b/include/linux/time.h index ea3559f0b3f2..cb34e35fabac 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
| @@ -76,9 +76,25 @@ extern unsigned long mktime(const unsigned int year, const unsigned int mon, | |||
| 76 | const unsigned int min, const unsigned int sec); | 76 | const unsigned int min, const unsigned int sec); |
| 77 | 77 | ||
| 78 | extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); | 78 | extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); |
| 79 | |||
| 80 | /* | ||
| 81 | * timespec_add_safe assumes both values are positive and checks | ||
| 82 | * for overflow. It will return TIME_T_MAX if the reutrn would be | ||
| 83 | * smaller then either of the arguments. | ||
| 84 | */ | ||
| 79 | extern struct timespec timespec_add_safe(const struct timespec lhs, | 85 | extern struct timespec timespec_add_safe(const struct timespec lhs, |
| 80 | const struct timespec rhs); | 86 | const struct timespec rhs); |
| 81 | 87 | ||
| 88 | |||
| 89 | static inline struct timespec timespec_add(struct timespec lhs, | ||
| 90 | struct timespec rhs) | ||
| 91 | { | ||
| 92 | struct timespec ts_delta; | ||
| 93 | set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, | ||
| 94 | lhs.tv_nsec + rhs.tv_nsec); | ||
| 95 | return ts_delta; | ||
| 96 | } | ||
| 97 | |||
| 82 | /* | 98 | /* |
| 83 | * sub = lhs - rhs, in normalized form | 99 | * sub = lhs - rhs, in normalized form |
| 84 | */ | 100 | */ |
| @@ -97,8 +113,6 @@ static inline struct timespec timespec_sub(struct timespec lhs, | |||
| 97 | #define timespec_valid(ts) \ | 113 | #define timespec_valid(ts) \ |
| 98 | (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) | 114 | (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) |
| 99 | 115 | ||
| 100 | extern struct timespec xtime; | ||
| 101 | extern struct timespec wall_to_monotonic; | ||
| 102 | extern seqlock_t xtime_lock; | 116 | extern seqlock_t xtime_lock; |
| 103 | 117 | ||
| 104 | extern void read_persistent_clock(struct timespec *ts); | 118 | extern void read_persistent_clock(struct timespec *ts); |
| @@ -110,7 +124,8 @@ extern int timekeeping_suspended; | |||
| 110 | 124 | ||
| 111 | unsigned long get_seconds(void); | 125 | unsigned long get_seconds(void); |
| 112 | struct timespec current_kernel_time(void); | 126 | struct timespec current_kernel_time(void); |
| 113 | struct timespec __current_kernel_time(void); /* does not hold xtime_lock */ | 127 | struct timespec __current_kernel_time(void); /* does not take xtime_lock */ |
| 128 | struct timespec __get_wall_to_monotonic(void); /* does not take xtime_lock */ | ||
| 114 | struct timespec get_monotonic_coarse(void); | 129 | struct timespec get_monotonic_coarse(void); |
| 115 | 130 | ||
| 116 | #define CURRENT_TIME (current_kernel_time()) | 131 | #define CURRENT_TIME (current_kernel_time()) |
diff --git a/include/linux/topology.h b/include/linux/topology.h index c44df50a05ab..64e084ff5e5c 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
| @@ -103,6 +103,7 @@ int arch_update_cpu_topology(void); | |||
| 103 | | 1*SD_SHARE_PKG_RESOURCES \ | 103 | | 1*SD_SHARE_PKG_RESOURCES \ |
| 104 | | 0*SD_SERIALIZE \ | 104 | | 0*SD_SERIALIZE \ |
| 105 | | 0*SD_PREFER_SIBLING \ | 105 | | 0*SD_PREFER_SIBLING \ |
| 106 | | arch_sd_sibling_asym_packing() \ | ||
| 106 | , \ | 107 | , \ |
| 107 | .last_balance = jiffies, \ | 108 | .last_balance = jiffies, \ |
| 108 | .balance_interval = 1, \ | 109 | .balance_interval = 1, \ |
| @@ -291,10 +292,6 @@ static inline void set_cpu_numa_mem(int cpu, int node) | |||
| 291 | 292 | ||
| 292 | #else /* !CONFIG_HAVE_MEMORYLESS_NODES */ | 293 | #else /* !CONFIG_HAVE_MEMORYLESS_NODES */ |
| 293 | 294 | ||
| 294 | static inline void set_numa_mem(int node) {} | ||
| 295 | |||
| 296 | static inline void set_cpu_numa_mem(int cpu, int node) {} | ||
| 297 | |||
| 298 | #ifndef numa_mem_id | 295 | #ifndef numa_mem_id |
| 299 | /* Returns the number of the nearest Node with memory */ | 296 | /* Returns the number of the nearest Node with memory */ |
| 300 | static inline int numa_mem_id(void) | 297 | static inline int numa_mem_id(void) |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 931078b73226..1437da3ddc62 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/tty_driver.h> | 13 | #include <linux/tty_driver.h> |
| 14 | #include <linux/tty_ldisc.h> | 14 | #include <linux/tty_ldisc.h> |
| 15 | #include <linux/mutex.h> | 15 | #include <linux/mutex.h> |
| 16 | #include <linux/smp_lock.h> | ||
| 16 | 17 | ||
| 17 | #include <asm/system.h> | 18 | #include <asm/system.h> |
| 18 | 19 | ||
| @@ -179,6 +180,7 @@ struct tty_bufhead { | |||
| 179 | #define L_FLUSHO(tty) _L_FLAG((tty), FLUSHO) | 180 | #define L_FLUSHO(tty) _L_FLAG((tty), FLUSHO) |
| 180 | #define L_PENDIN(tty) _L_FLAG((tty), PENDIN) | 181 | #define L_PENDIN(tty) _L_FLAG((tty), PENDIN) |
| 181 | #define L_IEXTEN(tty) _L_FLAG((tty), IEXTEN) | 182 | #define L_IEXTEN(tty) _L_FLAG((tty), IEXTEN) |
| 183 | #define L_EXTPROC(tty) _L_FLAG((tty), EXTPROC) | ||
| 182 | 184 | ||
| 183 | struct device; | 185 | struct device; |
| 184 | struct signal_struct; | 186 | struct signal_struct; |
| @@ -415,6 +417,7 @@ extern int is_ignored(int sig); | |||
| 415 | extern int tty_signal(int sig, struct tty_struct *tty); | 417 | extern int tty_signal(int sig, struct tty_struct *tty); |
| 416 | extern void tty_hangup(struct tty_struct *tty); | 418 | extern void tty_hangup(struct tty_struct *tty); |
| 417 | extern void tty_vhangup(struct tty_struct *tty); | 419 | extern void tty_vhangup(struct tty_struct *tty); |
| 420 | extern void tty_vhangup_locked(struct tty_struct *tty); | ||
| 418 | extern void tty_vhangup_self(void); | 421 | extern void tty_vhangup_self(void); |
| 419 | extern void tty_unhangup(struct file *filp); | 422 | extern void tty_unhangup(struct file *filp); |
| 420 | extern int tty_hung_up_p(struct file *filp); | 423 | extern int tty_hung_up_p(struct file *filp); |
| @@ -552,6 +555,9 @@ static inline void tty_audit_push_task(struct task_struct *tsk, | |||
| 552 | } | 555 | } |
| 553 | #endif | 556 | #endif |
| 554 | 557 | ||
| 558 | /* tty_io.c */ | ||
| 559 | extern int __init tty_init(void); | ||
| 560 | |||
| 555 | /* tty_ioctl.c */ | 561 | /* tty_ioctl.c */ |
| 556 | extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, | 562 | extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, |
| 557 | unsigned int cmd, unsigned long arg); | 563 | unsigned int cmd, unsigned long arg); |
| @@ -572,5 +578,54 @@ extern int vt_ioctl(struct tty_struct *tty, struct file *file, | |||
| 572 | extern long vt_compat_ioctl(struct tty_struct *tty, struct file * file, | 578 | extern long vt_compat_ioctl(struct tty_struct *tty, struct file * file, |
| 573 | unsigned int cmd, unsigned long arg); | 579 | unsigned int cmd, unsigned long arg); |
| 574 | 580 | ||
| 581 | /* tty_mutex.c */ | ||
| 582 | /* functions for preparation of BKL removal */ | ||
| 583 | extern void __lockfunc tty_lock(void) __acquires(tty_lock); | ||
| 584 | extern void __lockfunc tty_unlock(void) __releases(tty_lock); | ||
| 585 | extern struct task_struct *__big_tty_mutex_owner; | ||
| 586 | #define tty_locked() (current == __big_tty_mutex_owner) | ||
| 587 | |||
| 588 | /* | ||
| 589 | * wait_event_interruptible_tty -- wait for a condition with the tty lock held | ||
| 590 | * | ||
| 591 | * The condition we are waiting for might take a long time to | ||
| 592 | * become true, or might depend on another thread taking the | ||
| 593 | * BTM. In either case, we need to drop the BTM to guarantee | ||
| 594 | * forward progress. This is a leftover from the conversion | ||
| 595 | * from the BKL and should eventually get removed as the BTM | ||
| 596 | * falls out of use. | ||
| 597 | * | ||
| 598 | * Do not use in new code. | ||
| 599 | */ | ||
| 600 | #define wait_event_interruptible_tty(wq, condition) \ | ||
| 601 | ({ \ | ||
| 602 | int __ret = 0; \ | ||
| 603 | if (!(condition)) { \ | ||
| 604 | __wait_event_interruptible_tty(wq, condition, __ret); \ | ||
| 605 | } \ | ||
| 606 | __ret; \ | ||
| 607 | }) | ||
| 608 | |||
| 609 | #define __wait_event_interruptible_tty(wq, condition, ret) \ | ||
| 610 | do { \ | ||
| 611 | DEFINE_WAIT(__wait); \ | ||
| 612 | \ | ||
| 613 | for (;;) { \ | ||
| 614 | prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ | ||
| 615 | if (condition) \ | ||
| 616 | break; \ | ||
| 617 | if (!signal_pending(current)) { \ | ||
| 618 | tty_unlock(); \ | ||
| 619 | schedule(); \ | ||
| 620 | tty_lock(); \ | ||
| 621 | continue; \ | ||
| 622 | } \ | ||
| 623 | ret = -ERESTARTSYS; \ | ||
| 624 | break; \ | ||
| 625 | } \ | ||
| 626 | finish_wait(&wq, &__wait); \ | ||
| 627 | } while (0) | ||
| 628 | |||
| 629 | |||
| 575 | #endif /* __KERNEL__ */ | 630 | #endif /* __KERNEL__ */ |
| 576 | #endif | 631 | #endif |
diff --git a/include/linux/types.h b/include/linux/types.h index 331d8baabcf2..01a082f56ef4 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
| @@ -200,6 +200,18 @@ typedef struct { | |||
| 200 | } atomic64_t; | 200 | } atomic64_t; |
| 201 | #endif | 201 | #endif |
| 202 | 202 | ||
| 203 | struct list_head { | ||
| 204 | struct list_head *next, *prev; | ||
| 205 | }; | ||
| 206 | |||
| 207 | struct hlist_head { | ||
| 208 | struct hlist_node *first; | ||
| 209 | }; | ||
| 210 | |||
| 211 | struct hlist_node { | ||
| 212 | struct hlist_node *next, **pprev; | ||
| 213 | }; | ||
| 214 | |||
| 203 | struct ustat { | 215 | struct ustat { |
| 204 | __kernel_daddr_t f_tfree; | 216 | __kernel_daddr_t f_tfree; |
| 205 | __kernel_ino_t f_tinode; | 217 | __kernel_ino_t f_tinode; |
diff --git a/include/linux/usb.h b/include/linux/usb.h index d5922a877994..35fe6ab222bb 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
| @@ -127,6 +127,8 @@ enum usb_interface_condition { | |||
| 127 | * queued reset so that usb_cancel_queued_reset() doesn't try to | 127 | * queued reset so that usb_cancel_queued_reset() doesn't try to |
| 128 | * remove from the workqueue when running inside the worker | 128 | * remove from the workqueue when running inside the worker |
| 129 | * thread. See __usb_queue_reset_device(). | 129 | * thread. See __usb_queue_reset_device(). |
| 130 | * @resetting_device: USB core reset the device, so use alt setting 0 as | ||
| 131 | * current; needs bandwidth alloc after reset. | ||
| 130 | * | 132 | * |
| 131 | * USB device drivers attach to interfaces on a physical device. Each | 133 | * USB device drivers attach to interfaces on a physical device. Each |
| 132 | * interface encapsulates a single high level function, such as feeding | 134 | * interface encapsulates a single high level function, such as feeding |
| @@ -843,7 +845,7 @@ struct usb_driver { | |||
| 843 | 845 | ||
| 844 | void (*disconnect) (struct usb_interface *intf); | 846 | void (*disconnect) (struct usb_interface *intf); |
| 845 | 847 | ||
| 846 | int (*ioctl) (struct usb_interface *intf, unsigned int code, | 848 | int (*unlocked_ioctl) (struct usb_interface *intf, unsigned int code, |
| 847 | void *buf); | 849 | void *buf); |
| 848 | 850 | ||
| 849 | int (*suspend) (struct usb_interface *intf, pm_message_t message); | 851 | int (*suspend) (struct usb_interface *intf, pm_message_t message); |
| @@ -1015,6 +1017,7 @@ typedef void (*usb_complete_t)(struct urb *); | |||
| 1015 | * is a different endpoint (and pipe) from "out" endpoint two. | 1017 | * is a different endpoint (and pipe) from "out" endpoint two. |
| 1016 | * The current configuration controls the existence, type, and | 1018 | * The current configuration controls the existence, type, and |
| 1017 | * maximum packet size of any given endpoint. | 1019 | * maximum packet size of any given endpoint. |
| 1020 | * @stream_id: the endpoint's stream ID for bulk streams | ||
| 1018 | * @dev: Identifies the USB device to perform the request. | 1021 | * @dev: Identifies the USB device to perform the request. |
| 1019 | * @status: This is read in non-iso completion functions to get the | 1022 | * @status: This is read in non-iso completion functions to get the |
| 1020 | * status of the particular request. ISO requests only use it | 1023 | * status of the particular request. ISO requests only use it |
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index 383b94ba8c20..964cb603f7c7 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h | |||
| @@ -18,6 +18,21 @@ | |||
| 18 | /* v1.0 and v2.0 of this standard have many things in common. For the rest | 18 | /* v1.0 and v2.0 of this standard have many things in common. For the rest |
| 19 | * of the definitions, please refer to audio.h */ | 19 | * of the definitions, please refer to audio.h */ |
| 20 | 20 | ||
| 21 | /* | ||
| 22 | * bmControl field decoders | ||
| 23 | * | ||
| 24 | * From the USB Audio spec v2.0: | ||
| 25 | * | ||
| 26 | * bmaControls() is a (ch+1)-element array of 4-byte bitmaps, | ||
| 27 | * each containing a set of bit pairs. If a Control is present, | ||
| 28 | * it must be Host readable. If a certain Control is not | ||
| 29 | * present then the bit pair must be set to 0b00. | ||
| 30 | * If a Control is present but read-only, the bit pair must be | ||
| 31 | * set to 0b01. If a Control is also Host programmable, the bit | ||
| 32 | * pair must be set to 0b11. The value 0b10 is not allowed. | ||
| 33 | * | ||
| 34 | */ | ||
| 35 | |||
| 21 | static inline bool uac2_control_is_readable(u32 bmControls, u8 control) | 36 | static inline bool uac2_control_is_readable(u32 bmControls, u8 control) |
| 22 | { | 37 | { |
| 23 | return (bmControls >> (control * 2)) & 0x1; | 38 | return (bmControls >> (control * 2)) & 0x1; |
| @@ -121,7 +136,7 @@ struct uac2_feature_unit_descriptor { | |||
| 121 | 136 | ||
| 122 | /* 4.9.2 Class-Specific AS Interface Descriptor */ | 137 | /* 4.9.2 Class-Specific AS Interface Descriptor */ |
| 123 | 138 | ||
| 124 | struct uac_as_header_descriptor_v2 { | 139 | struct uac2_as_header_descriptor { |
| 125 | __u8 bLength; | 140 | __u8 bLength; |
| 126 | __u8 bDescriptorType; | 141 | __u8 bDescriptorType; |
| 127 | __u8 bDescriptorSubtype; | 142 | __u8 bDescriptorSubtype; |
diff --git a/include/linux/usb/audio.h b/include/linux/usb/audio.h index c51200c715e5..a54b8255d75f 100644 --- a/include/linux/usb/audio.h +++ b/include/linux/usb/audio.h | |||
| @@ -39,8 +39,8 @@ | |||
| 39 | #define UAC_MIXER_UNIT 0x04 | 39 | #define UAC_MIXER_UNIT 0x04 |
| 40 | #define UAC_SELECTOR_UNIT 0x05 | 40 | #define UAC_SELECTOR_UNIT 0x05 |
| 41 | #define UAC_FEATURE_UNIT 0x06 | 41 | #define UAC_FEATURE_UNIT 0x06 |
| 42 | #define UAC_PROCESSING_UNIT_V1 0x07 | 42 | #define UAC1_PROCESSING_UNIT 0x07 |
| 43 | #define UAC_EXTENSION_UNIT_V1 0x08 | 43 | #define UAC1_EXTENSION_UNIT 0x08 |
| 44 | 44 | ||
| 45 | /* A.6 Audio Class-Specific AS Interface Descriptor Subtypes */ | 45 | /* A.6 Audio Class-Specific AS Interface Descriptor Subtypes */ |
| 46 | #define UAC_AS_GENERAL 0x01 | 46 | #define UAC_AS_GENERAL 0x01 |
| @@ -151,7 +151,7 @@ | |||
| 151 | 151 | ||
| 152 | /* Terminal Control Selectors */ | 152 | /* Terminal Control Selectors */ |
| 153 | /* 4.3.2 Class-Specific AC Interface Descriptor */ | 153 | /* 4.3.2 Class-Specific AC Interface Descriptor */ |
| 154 | struct uac_ac_header_descriptor_v1 { | 154 | struct uac1_ac_header_descriptor { |
| 155 | __u8 bLength; /* 8 + n */ | 155 | __u8 bLength; /* 8 + n */ |
| 156 | __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ | 156 | __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ |
| 157 | __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ | 157 | __u8 bDescriptorSubtype; /* UAC_MS_HEADER */ |
| @@ -165,7 +165,7 @@ struct uac_ac_header_descriptor_v1 { | |||
| 165 | 165 | ||
| 166 | /* As above, but more useful for defining your own descriptors: */ | 166 | /* As above, but more useful for defining your own descriptors: */ |
| 167 | #define DECLARE_UAC_AC_HEADER_DESCRIPTOR(n) \ | 167 | #define DECLARE_UAC_AC_HEADER_DESCRIPTOR(n) \ |
| 168 | struct uac_ac_header_descriptor_v1_##n { \ | 168 | struct uac1_ac_header_descriptor_##n { \ |
| 169 | __u8 bLength; \ | 169 | __u8 bLength; \ |
| 170 | __u8 bDescriptorType; \ | 170 | __u8 bDescriptorType; \ |
| 171 | __u8 bDescriptorSubtype; \ | 171 | __u8 bDescriptorSubtype; \ |
| @@ -205,7 +205,7 @@ struct uac_input_terminal_descriptor { | |||
| 205 | #define UAC_TERMINAL_CS_COPY_PROTECT_CONTROL 0x01 | 205 | #define UAC_TERMINAL_CS_COPY_PROTECT_CONTROL 0x01 |
| 206 | 206 | ||
| 207 | /* 4.3.2.2 Output Terminal Descriptor */ | 207 | /* 4.3.2.2 Output Terminal Descriptor */ |
| 208 | struct uac_output_terminal_descriptor_v1 { | 208 | struct uac1_output_terminal_descriptor { |
| 209 | __u8 bLength; /* in bytes: 9 */ | 209 | __u8 bLength; /* in bytes: 9 */ |
| 210 | __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ | 210 | __u8 bDescriptorType; /* CS_INTERFACE descriptor type */ |
| 211 | __u8 bDescriptorSubtype; /* OUTPUT_TERMINAL descriptor subtype */ | 211 | __u8 bDescriptorSubtype; /* OUTPUT_TERMINAL descriptor subtype */ |
| @@ -395,7 +395,7 @@ static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_desc | |||
| 395 | } | 395 | } |
| 396 | 396 | ||
| 397 | /* 4.5.2 Class-Specific AS Interface Descriptor */ | 397 | /* 4.5.2 Class-Specific AS Interface Descriptor */ |
| 398 | struct uac_as_header_descriptor_v1 { | 398 | struct uac1_as_header_descriptor { |
| 399 | __u8 bLength; /* in bytes: 7 */ | 399 | __u8 bLength; /* in bytes: 7 */ |
| 400 | __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ | 400 | __u8 bDescriptorType; /* USB_DT_CS_INTERFACE */ |
| 401 | __u8 bDescriptorSubtype; /* AS_GENERAL */ | 401 | __u8 bDescriptorSubtype; /* AS_GENERAL */ |
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 139353efad34..890bc1472190 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h | |||
| @@ -276,6 +276,8 @@ struct usb_composite_driver { | |||
| 276 | int (*bind)(struct usb_composite_dev *); | 276 | int (*bind)(struct usb_composite_dev *); |
| 277 | int (*unbind)(struct usb_composite_dev *); | 277 | int (*unbind)(struct usb_composite_dev *); |
| 278 | 278 | ||
| 279 | void (*disconnect)(struct usb_composite_dev *); | ||
| 280 | |||
| 279 | /* global suspend hooks */ | 281 | /* global suspend hooks */ |
| 280 | void (*suspend)(struct usb_composite_dev *); | 282 | void (*suspend)(struct usb_composite_dev *); |
| 281 | void (*resume)(struct usb_composite_dev *); | 283 | void (*resume)(struct usb_composite_dev *); |
| @@ -342,6 +344,10 @@ struct usb_composite_dev { | |||
| 342 | }; | 344 | }; |
| 343 | 345 | ||
| 344 | extern int usb_string_id(struct usb_composite_dev *c); | 346 | extern int usb_string_id(struct usb_composite_dev *c); |
| 347 | extern int usb_string_ids_tab(struct usb_composite_dev *c, | ||
| 348 | struct usb_string *str); | ||
| 349 | extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); | ||
| 350 | |||
| 345 | 351 | ||
| 346 | /* messaging utils */ | 352 | /* messaging utils */ |
| 347 | #define DBG(d, fmt, args...) \ | 353 | #define DBG(d, fmt, args...) \ |
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 80287af2a738..2e262cb15425 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h | |||
| @@ -39,6 +39,12 @@ struct ehci_caps { | |||
| 39 | #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ | 39 | #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ |
| 40 | 40 | ||
| 41 | u32 hcc_params; /* HCCPARAMS - offset 0x8 */ | 41 | u32 hcc_params; /* HCCPARAMS - offset 0x8 */ |
| 42 | /* EHCI 1.1 addendum */ | ||
| 43 | #define HCC_32FRAME_PERIODIC_LIST(p) ((p)&(1 << 19)) | ||
| 44 | #define HCC_PER_PORT_CHANGE_EVENT(p) ((p)&(1 << 18)) | ||
| 45 | #define HCC_LPM(p) ((p)&(1 << 17)) | ||
| 46 | #define HCC_HW_PREFETCH(p) ((p)&(1 << 16)) | ||
| 47 | |||
| 42 | #define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */ | 48 | #define HCC_EXT_CAPS(p) (((p)>>8)&0xff) /* for pci extended caps */ |
| 43 | #define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */ | 49 | #define HCC_ISOC_CACHE(p) ((p)&(1 << 7)) /* true: can cache isoc frame */ |
| 44 | #define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */ | 50 | #define HCC_ISOC_THRES(p) (((p)>>4)&0x7) /* bits 6:4, uframes cached */ |
| @@ -54,6 +60,13 @@ struct ehci_regs { | |||
| 54 | 60 | ||
| 55 | /* USBCMD: offset 0x00 */ | 61 | /* USBCMD: offset 0x00 */ |
| 56 | u32 command; | 62 | u32 command; |
| 63 | |||
| 64 | /* EHCI 1.1 addendum */ | ||
| 65 | #define CMD_HIRD (0xf<<24) /* host initiated resume duration */ | ||
| 66 | #define CMD_PPCEE (1<<15) /* per port change event enable */ | ||
| 67 | #define CMD_FSP (1<<14) /* fully synchronized prefetch */ | ||
| 68 | #define CMD_ASPE (1<<13) /* async schedule prefetch enable */ | ||
| 69 | #define CMD_PSPE (1<<12) /* periodic schedule prefetch enable */ | ||
| 57 | /* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */ | 70 | /* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */ |
| 58 | #define CMD_PARK (1<<11) /* enable "park" on async qh */ | 71 | #define CMD_PARK (1<<11) /* enable "park" on async qh */ |
| 59 | #define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */ | 72 | #define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */ |
| @@ -67,6 +80,7 @@ struct ehci_regs { | |||
| 67 | 80 | ||
| 68 | /* USBSTS: offset 0x04 */ | 81 | /* USBSTS: offset 0x04 */ |
| 69 | u32 status; | 82 | u32 status; |
| 83 | #define STS_PPCE_MASK (0xff<<16) /* Per-Port change event 1-16 */ | ||
| 70 | #define STS_ASS (1<<15) /* Async Schedule Status */ | 84 | #define STS_ASS (1<<15) /* Async Schedule Status */ |
| 71 | #define STS_PSS (1<<14) /* Periodic Schedule Status */ | 85 | #define STS_PSS (1<<14) /* Periodic Schedule Status */ |
| 72 | #define STS_RECL (1<<13) /* Reclamation */ | 86 | #define STS_RECL (1<<13) /* Reclamation */ |
| @@ -100,6 +114,14 @@ struct ehci_regs { | |||
| 100 | 114 | ||
| 101 | /* PORTSC: offset 0x44 */ | 115 | /* PORTSC: offset 0x44 */ |
| 102 | u32 port_status[0]; /* up to N_PORTS */ | 116 | u32 port_status[0]; /* up to N_PORTS */ |
| 117 | /* EHCI 1.1 addendum */ | ||
| 118 | #define PORTSC_SUSPEND_STS_ACK 0 | ||
| 119 | #define PORTSC_SUSPEND_STS_NYET 1 | ||
| 120 | #define PORTSC_SUSPEND_STS_STALL 2 | ||
| 121 | #define PORTSC_SUSPEND_STS_ERR 3 | ||
| 122 | |||
| 123 | #define PORT_DEV_ADDR (0x7f<<25) /* device address */ | ||
| 124 | #define PORT_SSTS (0x3<<23) /* suspend status */ | ||
| 103 | /* 31:23 reserved */ | 125 | /* 31:23 reserved */ |
| 104 | #define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */ | 126 | #define PORT_WKOC_E (1<<22) /* wake on overcurrent (enable) */ |
| 105 | #define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */ | 127 | #define PORT_WKDISC_E (1<<21) /* wake on disconnect (enable) */ |
| @@ -115,6 +137,7 @@ struct ehci_regs { | |||
| 115 | #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ | 137 | #define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */ |
| 116 | /* 11:10 for detecting lowspeed devices (reset vs release ownership) */ | 138 | /* 11:10 for detecting lowspeed devices (reset vs release ownership) */ |
| 117 | /* 9 reserved */ | 139 | /* 9 reserved */ |
| 140 | #define PORT_LPM (1<<9) /* LPM transaction */ | ||
| 118 | #define PORT_RESET (1<<8) /* reset port */ | 141 | #define PORT_RESET (1<<8) /* reset port */ |
| 119 | #define PORT_SUSPEND (1<<7) /* suspend port */ | 142 | #define PORT_SUSPEND (1<<7) /* suspend port */ |
| 120 | #define PORT_RESUME (1<<6) /* resume it */ | 143 | #define PORT_RESUME (1<<6) /* resume it */ |
diff --git a/include/linux/usb/functionfs.h b/include/linux/usb/functionfs.h index a34a2a043b21..6f649c13193b 100644 --- a/include/linux/usb/functionfs.h +++ b/include/linux/usb/functionfs.h | |||
| @@ -180,9 +180,9 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev) | |||
| 180 | static void functionfs_unbind(struct ffs_data *ffs) | 180 | static void functionfs_unbind(struct ffs_data *ffs) |
| 181 | __attribute__((nonnull)); | 181 | __attribute__((nonnull)); |
| 182 | 182 | ||
| 183 | static int functionfs_add(struct usb_composite_dev *cdev, | 183 | static int functionfs_bind_config(struct usb_composite_dev *cdev, |
| 184 | struct usb_configuration *c, | 184 | struct usb_configuration *c, |
| 185 | struct ffs_data *ffs) | 185 | struct ffs_data *ffs) |
| 186 | __attribute__((warn_unused_result, nonnull)); | 186 | __attribute__((warn_unused_result, nonnull)); |
| 187 | 187 | ||
| 188 | 188 | ||
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 2e3a4ea1a3da..3b571f1ffbb3 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
| @@ -89,18 +89,33 @@ struct usb_hcd { | |||
| 89 | */ | 89 | */ |
| 90 | const struct hc_driver *driver; /* hw-specific hooks */ | 90 | const struct hc_driver *driver; /* hw-specific hooks */ |
| 91 | 91 | ||
| 92 | /* Flags that need to be manipulated atomically */ | 92 | /* Flags that need to be manipulated atomically because they can |
| 93 | * change while the host controller is running. Always use | ||
| 94 | * set_bit() or clear_bit() to change their values. | ||
| 95 | */ | ||
| 93 | unsigned long flags; | 96 | unsigned long flags; |
| 94 | #define HCD_FLAG_HW_ACCESSIBLE 0x00000001 | 97 | #define HCD_FLAG_HW_ACCESSIBLE 0 /* at full power */ |
| 95 | #define HCD_FLAG_SAW_IRQ 0x00000002 | 98 | #define HCD_FLAG_SAW_IRQ 1 |
| 99 | #define HCD_FLAG_POLL_RH 2 /* poll for rh status? */ | ||
| 100 | #define HCD_FLAG_POLL_PENDING 3 /* status has changed? */ | ||
| 101 | #define HCD_FLAG_WAKEUP_PENDING 4 /* root hub is resuming? */ | ||
| 102 | |||
| 103 | /* The flags can be tested using these macros; they are likely to | ||
| 104 | * be slightly faster than test_bit(). | ||
| 105 | */ | ||
| 106 | #define HCD_HW_ACCESSIBLE(hcd) ((hcd)->flags & (1U << HCD_FLAG_HW_ACCESSIBLE)) | ||
| 107 | #define HCD_SAW_IRQ(hcd) ((hcd)->flags & (1U << HCD_FLAG_SAW_IRQ)) | ||
| 108 | #define HCD_POLL_RH(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_RH)) | ||
| 109 | #define HCD_POLL_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_POLL_PENDING)) | ||
| 110 | #define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING)) | ||
| 96 | 111 | ||
| 112 | /* Flags that get set only during HCD registration or removal. */ | ||
| 97 | unsigned rh_registered:1;/* is root hub registered? */ | 113 | unsigned rh_registered:1;/* is root hub registered? */ |
| 114 | unsigned rh_pollable:1; /* may we poll the root hub? */ | ||
| 98 | 115 | ||
| 99 | /* The next flag is a stopgap, to be removed when all the HCDs | 116 | /* The next flag is a stopgap, to be removed when all the HCDs |
| 100 | * support the new root-hub polling mechanism. */ | 117 | * support the new root-hub polling mechanism. */ |
| 101 | unsigned uses_new_polling:1; | 118 | unsigned uses_new_polling:1; |
| 102 | unsigned poll_rh:1; /* poll for rh status? */ | ||
| 103 | unsigned poll_pending:1; /* status has changed? */ | ||
| 104 | unsigned wireless:1; /* Wireless USB HCD */ | 119 | unsigned wireless:1; /* Wireless USB HCD */ |
| 105 | unsigned authorized_default:1; | 120 | unsigned authorized_default:1; |
| 106 | unsigned has_tt:1; /* Integrated TT in root hub */ | 121 | unsigned has_tt:1; /* Integrated TT in root hub */ |
| @@ -198,7 +213,7 @@ struct hc_driver { | |||
| 198 | * a whole, not just the root hub; they're for PCI bus glue. | 213 | * a whole, not just the root hub; they're for PCI bus glue. |
| 199 | */ | 214 | */ |
| 200 | /* called after suspending the hub, before entering D3 etc */ | 215 | /* called after suspending the hub, before entering D3 etc */ |
| 201 | int (*pci_suspend)(struct usb_hcd *hcd); | 216 | int (*pci_suspend)(struct usb_hcd *hcd, bool do_wakeup); |
| 202 | 217 | ||
| 203 | /* called after entering D0 (etc), before resuming the hub */ | 218 | /* called after entering D0 (etc), before resuming the hub */ |
| 204 | int (*pci_resume)(struct usb_hcd *hcd, bool hibernated); | 219 | int (*pci_resume)(struct usb_hcd *hcd, bool hibernated); |
| @@ -299,6 +314,10 @@ struct hc_driver { | |||
| 299 | int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev, | 314 | int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev, |
| 300 | struct usb_tt *tt, gfp_t mem_flags); | 315 | struct usb_tt *tt, gfp_t mem_flags); |
| 301 | int (*reset_device)(struct usb_hcd *, struct usb_device *); | 316 | int (*reset_device)(struct usb_hcd *, struct usb_device *); |
| 317 | /* Notifies the HCD after a device is connected and its | ||
| 318 | * address is set | ||
| 319 | */ | ||
| 320 | int (*update_device)(struct usb_hcd *, struct usb_device *); | ||
| 302 | }; | 321 | }; |
| 303 | 322 | ||
| 304 | extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); | 323 | extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb); |
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index f8302d036a76..545cba73ccaf 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h | |||
| @@ -43,13 +43,6 @@ enum usb_xceiv_events { | |||
| 43 | USB_EVENT_ENUMERATED, /* gadget driver enumerated */ | 43 | USB_EVENT_ENUMERATED, /* gadget driver enumerated */ |
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | #define USB_OTG_PULLUP_ID (1 << 0) | ||
| 47 | #define USB_OTG_PULLDOWN_DP (1 << 1) | ||
| 48 | #define USB_OTG_PULLDOWN_DM (1 << 2) | ||
| 49 | #define USB_OTG_EXT_VBUS_INDICATOR (1 << 3) | ||
| 50 | #define USB_OTG_DRV_VBUS (1 << 4) | ||
| 51 | #define USB_OTG_DRV_VBUS_EXT (1 << 5) | ||
| 52 | |||
| 53 | struct otg_transceiver; | 46 | struct otg_transceiver; |
| 54 | 47 | ||
| 55 | /* for transceivers connected thru an ULPI interface, the user must | 48 | /* for transceivers connected thru an ULPI interface, the user must |
| @@ -146,10 +139,10 @@ static inline int otg_io_read(struct otg_transceiver *otg, u32 reg) | |||
| 146 | return -EINVAL; | 139 | return -EINVAL; |
| 147 | } | 140 | } |
| 148 | 141 | ||
| 149 | static inline int otg_io_write(struct otg_transceiver *otg, u32 reg, u32 val) | 142 | static inline int otg_io_write(struct otg_transceiver *otg, u32 val, u32 reg) |
| 150 | { | 143 | { |
| 151 | if (otg->io_ops && otg->io_ops->write) | 144 | if (otg->io_ops && otg->io_ops->write) |
| 152 | return otg->io_ops->write(otg, reg, val); | 145 | return otg->io_ops->write(otg, val, reg); |
| 153 | 146 | ||
| 154 | return -EINVAL; | 147 | return -EINVAL; |
| 155 | } | 148 | } |
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 16b7f3347545..3e93de7ecbc3 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h | |||
| @@ -26,4 +26,8 @@ | |||
| 26 | and can't handle talking to these interfaces */ | 26 | and can't handle talking to these interfaces */ |
| 27 | #define USB_QUIRK_HONOR_BNUMINTERFACES 0x00000020 | 27 | #define USB_QUIRK_HONOR_BNUMINTERFACES 0x00000020 |
| 28 | 28 | ||
| 29 | /* device needs a pause during initialization, after we read the device | ||
| 30 | descriptor */ | ||
| 31 | #define USB_QUIRK_DELAY_INIT 0x00000040 | ||
| 32 | |||
| 29 | #endif /* __LINUX_USB_QUIRKS_H */ | 33 | #endif /* __LINUX_USB_QUIRKS_H */ |
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h index 2369d07c3c87..82b1507f4735 100644 --- a/include/linux/usb/ulpi.h +++ b/include/linux/usb/ulpi.h | |||
| @@ -11,6 +11,42 @@ | |||
| 11 | #ifndef __LINUX_USB_ULPI_H | 11 | #ifndef __LINUX_USB_ULPI_H |
| 12 | #define __LINUX_USB_ULPI_H | 12 | #define __LINUX_USB_ULPI_H |
| 13 | 13 | ||
| 14 | #include <linux/usb/otg.h> | ||
| 15 | /*-------------------------------------------------------------------------*/ | ||
| 16 | |||
| 17 | /* | ||
| 18 | * ULPI Flags | ||
| 19 | */ | ||
| 20 | #define ULPI_OTG_ID_PULLUP (1 << 0) | ||
| 21 | #define ULPI_OTG_DP_PULLDOWN_DIS (1 << 1) | ||
| 22 | #define ULPI_OTG_DM_PULLDOWN_DIS (1 << 2) | ||
| 23 | #define ULPI_OTG_DISCHRGVBUS (1 << 3) | ||
| 24 | #define ULPI_OTG_CHRGVBUS (1 << 4) | ||
| 25 | #define ULPI_OTG_DRVVBUS (1 << 5) | ||
| 26 | #define ULPI_OTG_DRVVBUS_EXT (1 << 6) | ||
| 27 | #define ULPI_OTG_EXTVBUSIND (1 << 7) | ||
| 28 | |||
| 29 | #define ULPI_IC_6PIN_SERIAL (1 << 8) | ||
| 30 | #define ULPI_IC_3PIN_SERIAL (1 << 9) | ||
| 31 | #define ULPI_IC_CARKIT (1 << 10) | ||
| 32 | #define ULPI_IC_CLKSUSPM (1 << 11) | ||
| 33 | #define ULPI_IC_AUTORESUME (1 << 12) | ||
| 34 | #define ULPI_IC_EXTVBUS_INDINV (1 << 13) | ||
| 35 | #define ULPI_IC_IND_PASSTHRU (1 << 14) | ||
| 36 | #define ULPI_IC_PROTECT_DIS (1 << 15) | ||
| 37 | |||
| 38 | #define ULPI_FC_HS (1 << 16) | ||
| 39 | #define ULPI_FC_FS (1 << 17) | ||
| 40 | #define ULPI_FC_LS (1 << 18) | ||
| 41 | #define ULPI_FC_FS4LS (1 << 19) | ||
| 42 | #define ULPI_FC_TERMSEL (1 << 20) | ||
| 43 | #define ULPI_FC_OP_NORM (1 << 21) | ||
| 44 | #define ULPI_FC_OP_NODRV (1 << 22) | ||
| 45 | #define ULPI_FC_OP_DIS_NRZI (1 << 23) | ||
| 46 | #define ULPI_FC_OP_NSYNC_NEOP (1 << 24) | ||
| 47 | #define ULPI_FC_RST (1 << 25) | ||
| 48 | #define ULPI_FC_SUSPM (1 << 26) | ||
| 49 | |||
| 14 | /*-------------------------------------------------------------------------*/ | 50 | /*-------------------------------------------------------------------------*/ |
| 15 | 51 | ||
| 16 | /* | 52 | /* |
| @@ -58,6 +94,10 @@ | |||
| 58 | 94 | ||
| 59 | /*-------------------------------------------------------------------------*/ | 95 | /*-------------------------------------------------------------------------*/ |
| 60 | 96 | ||
| 97 | /* | ||
| 98 | * Register Bits | ||
| 99 | */ | ||
| 100 | |||
| 61 | /* Function Control */ | 101 | /* Function Control */ |
| 62 | #define ULPI_FUNC_CTRL_XCVRSEL (1 << 0) | 102 | #define ULPI_FUNC_CTRL_XCVRSEL (1 << 0) |
| 63 | #define ULPI_FUNC_CTRL_XCVRSEL_MASK (3 << 0) | 103 | #define ULPI_FUNC_CTRL_XCVRSEL_MASK (3 << 0) |
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h index 047f7e6edb86..61490c6dcdbd 100644 --- a/include/linux/videodev2.h +++ b/include/linux/videodev2.h | |||
| @@ -277,6 +277,7 @@ struct v4l2_pix_format { | |||
| 277 | #define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */ | 277 | #define V4L2_PIX_FMT_RGB565 v4l2_fourcc('R', 'G', 'B', 'P') /* 16 RGB-5-6-5 */ |
| 278 | #define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */ | 278 | #define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */ |
| 279 | #define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */ | 279 | #define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */ |
| 280 | #define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */ | ||
| 280 | #define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ | 281 | #define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ |
| 281 | #define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */ | 282 | #define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */ |
| 282 | #define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */ | 283 | #define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */ |
diff --git a/include/linux/virtio_9p.h b/include/linux/virtio_9p.h index 395c38a47adb..1faa80d92f05 100644 --- a/include/linux/virtio_9p.h +++ b/include/linux/virtio_9p.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define _LINUX_VIRTIO_9P_H | 2 | #define _LINUX_VIRTIO_9P_H |
| 3 | /* This header is BSD licensed so anyone can use the definitions to implement | 3 | /* This header is BSD licensed so anyone can use the definitions to implement |
| 4 | * compatible drivers/servers. */ | 4 | * compatible drivers/servers. */ |
| 5 | #include <linux/types.h> | ||
| 5 | #include <linux/virtio_ids.h> | 6 | #include <linux/virtio_ids.h> |
| 6 | #include <linux/virtio_config.h> | 7 | #include <linux/virtio_config.h> |
| 7 | #include <linux/types.h> | 8 | #include <linux/types.h> |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 227c2a585e4f..01c2145118dc 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | 7 | ||
| 8 | struct vm_area_struct; /* vma defining user mapping in mm_types.h */ | 8 | struct vm_area_struct; /* vma defining user mapping in mm_types.h */ |
| 9 | 9 | ||
| 10 | extern bool vmap_lazy_unmap; | ||
| 11 | |||
| 10 | /* bits in flags of vmalloc's vm_struct below */ | 12 | /* bits in flags of vmalloc's vm_struct below */ |
| 11 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ | 13 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ |
| 12 | #define VM_ALLOC 0x00000002 /* vmalloc() */ | 14 | #define VM_ALLOC 0x00000002 /* vmalloc() */ |
| @@ -30,7 +32,7 @@ struct vm_struct { | |||
| 30 | unsigned long flags; | 32 | unsigned long flags; |
| 31 | struct page **pages; | 33 | struct page **pages; |
| 32 | unsigned int nr_pages; | 34 | unsigned int nr_pages; |
| 33 | unsigned long phys_addr; | 35 | phys_addr_t phys_addr; |
| 34 | void *caller; | 36 | void *caller; |
| 35 | }; | 37 | }; |
| 36 | 38 | ||
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 7f56db4a79f0..6625cc1ab758 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h | |||
| @@ -76,17 +76,52 @@ int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc); | |||
| 76 | #define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ | 76 | #define vc_translate(vc, c) ((vc)->vc_translate[(c) | \ |
| 77 | ((vc)->vc_toggle_meta ? 0x80 : 0)]) | 77 | ((vc)->vc_toggle_meta ? 0x80 : 0)]) |
| 78 | #else | 78 | #else |
| 79 | #define con_set_trans_old(arg) (0) | 79 | static inline int con_set_trans_old(unsigned char __user *table) |
| 80 | #define con_get_trans_old(arg) (-EINVAL) | 80 | { |
| 81 | #define con_set_trans_new(arg) (0) | 81 | return 0; |
| 82 | #define con_get_trans_new(arg) (-EINVAL) | 82 | } |
| 83 | #define con_clear_unimap(vc, ui) (0) | 83 | static inline int con_get_trans_old(unsigned char __user *table) |
| 84 | #define con_set_unimap(vc, ct, list) (0) | 84 | { |
| 85 | #define con_set_default_unimap(vc) (0) | 85 | return -EINVAL; |
| 86 | #define con_copy_unimap(d, s) (0) | 86 | } |
| 87 | #define con_get_unimap(vc, ct, uct, list) (-EINVAL) | 87 | static inline int con_set_trans_new(unsigned short __user *table) |
| 88 | #define con_free_unimap(vc) do { ; } while (0) | 88 | { |
| 89 | #define con_protect_unimap(vc, rdonly) do { ; } while (0) | 89 | return 0; |
| 90 | } | ||
| 91 | static inline int con_get_trans_new(unsigned short __user *table) | ||
| 92 | { | ||
| 93 | return -EINVAL; | ||
| 94 | } | ||
| 95 | static inline int con_clear_unimap(struct vc_data *vc, struct unimapinit *ui) | ||
| 96 | { | ||
| 97 | return 0; | ||
| 98 | } | ||
| 99 | static inline | ||
| 100 | int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) | ||
| 101 | { | ||
| 102 | return 0; | ||
| 103 | } | ||
| 104 | static inline | ||
| 105 | int con_get_unimap(struct vc_data *vc, ushort ct, ushort __user *uct, | ||
| 106 | struct unipair __user *list) | ||
| 107 | { | ||
| 108 | return -EINVAL; | ||
| 109 | } | ||
| 110 | static inline int con_set_default_unimap(struct vc_data *vc) | ||
| 111 | { | ||
| 112 | return 0; | ||
| 113 | } | ||
| 114 | static inline void con_free_unimap(struct vc_data *vc) | ||
| 115 | { | ||
| 116 | } | ||
| 117 | static inline void con_protect_unimap(struct vc_data *vc, int rdonly) | ||
| 118 | { | ||
| 119 | } | ||
| 120 | static inline | ||
| 121 | int con_copy_unimap(struct vc_data *dst_vc, struct vc_data *src_vc) | ||
| 122 | { | ||
| 123 | return 0; | ||
| 124 | } | ||
| 90 | 125 | ||
| 91 | #define vc_translate(vc, c) (c) | 126 | #define vc_translate(vc, c) (c) |
| 92 | #endif | 127 | #endif |
| @@ -100,6 +135,13 @@ extern int unbind_con_driver(const struct consw *csw, int first, int last, | |||
| 100 | int deflt); | 135 | int deflt); |
| 101 | int vty_init(const struct file_operations *console_fops); | 136 | int vty_init(const struct file_operations *console_fops); |
| 102 | 137 | ||
| 138 | static inline bool vt_force_oops_output(struct vc_data *vc) | ||
| 139 | { | ||
| 140 | if (oops_in_progress && vc->vc_panic_force_write) | ||
| 141 | return true; | ||
| 142 | return false; | ||
| 143 | } | ||
| 144 | |||
| 103 | /* | 145 | /* |
| 104 | * vc_screen.c shares this temporary buffer with the console write code so that | 146 | * vc_screen.c shares this temporary buffer with the console write code so that |
| 105 | * we can easily avoid touching user space while holding the console spinlock. | 147 | * we can easily avoid touching user space while holding the console spinlock. |
diff --git a/include/linux/wm97xx_batt.h b/include/linux/wm97xx_batt.h deleted file mode 100644 index a1d6419c2ff8..000000000000 --- a/include/linux/wm97xx_batt.h +++ /dev/null | |||
| @@ -1,16 +0,0 @@ | |||
| 1 | #ifndef _LINUX_WM97XX_BAT_H | ||
| 2 | #define _LINUX_WM97XX_BAT_H | ||
| 3 | |||
| 4 | #include <linux/wm97xx.h> | ||
| 5 | |||
| 6 | #warning This file will be removed soon, use wm97xx.h instead! | ||
| 7 | |||
| 8 | #define wm97xx_batt_info wm97xx_batt_pdata | ||
| 9 | |||
| 10 | #ifdef CONFIG_BATTERY_WM97XX | ||
| 11 | void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data); | ||
| 12 | #else | ||
| 13 | static inline void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) {} | ||
| 14 | #endif | ||
| 15 | |||
| 16 | #endif | ||
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 9466e860d8c2..4f9d277bcd9a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
| 10 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
| 11 | #include <linux/lockdep.h> | 11 | #include <linux/lockdep.h> |
| 12 | #include <linux/threads.h> | ||
| 12 | #include <asm/atomic.h> | 13 | #include <asm/atomic.h> |
| 13 | 14 | ||
| 14 | struct workqueue_struct; | 15 | struct workqueue_struct; |
| @@ -22,12 +23,59 @@ typedef void (*work_func_t)(struct work_struct *work); | |||
| 22 | */ | 23 | */ |
| 23 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) | 24 | #define work_data_bits(work) ((unsigned long *)(&(work)->data)) |
| 24 | 25 | ||
| 26 | enum { | ||
| 27 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ | ||
| 28 | WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */ | ||
| 29 | WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */ | ||
| 30 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | ||
| 31 | WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */ | ||
| 32 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ | ||
| 33 | #else | ||
| 34 | WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ | ||
| 35 | #endif | ||
| 36 | |||
| 37 | WORK_STRUCT_COLOR_BITS = 4, | ||
| 38 | |||
| 39 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, | ||
| 40 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, | ||
| 41 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, | ||
| 42 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | ||
| 43 | WORK_STRUCT_STATIC = 1 << WORK_STRUCT_STATIC_BIT, | ||
| 44 | #else | ||
| 45 | WORK_STRUCT_STATIC = 0, | ||
| 46 | #endif | ||
| 47 | |||
| 48 | /* | ||
| 49 | * The last color is no color used for works which don't | ||
| 50 | * participate in workqueue flushing. | ||
| 51 | */ | ||
| 52 | WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, | ||
| 53 | WORK_NO_COLOR = WORK_NR_COLORS, | ||
| 54 | |||
| 55 | /* special cpu IDs */ | ||
| 56 | WORK_CPU_UNBOUND = NR_CPUS, | ||
| 57 | WORK_CPU_NONE = NR_CPUS + 1, | ||
| 58 | WORK_CPU_LAST = WORK_CPU_NONE, | ||
| 59 | |||
| 60 | /* | ||
| 61 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned | ||
| 62 | * off. This makes cwqs aligned to 128 bytes which isn't too | ||
| 63 | * excessive while allowing 15 workqueue flush colors. | ||
| 64 | */ | ||
| 65 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | ||
| 66 | WORK_STRUCT_COLOR_BITS, | ||
| 67 | |||
| 68 | WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, | ||
| 69 | WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, | ||
| 70 | WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, | ||
| 71 | |||
| 72 | /* bit mask for work_busy() return values */ | ||
| 73 | WORK_BUSY_PENDING = 1 << 0, | ||
| 74 | WORK_BUSY_RUNNING = 1 << 1, | ||
| 75 | }; | ||
| 76 | |||
| 25 | struct work_struct { | 77 | struct work_struct { |
| 26 | atomic_long_t data; | 78 | atomic_long_t data; |
| 27 | #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ | ||
| 28 | #define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */ | ||
| 29 | #define WORK_STRUCT_FLAG_MASK (3UL) | ||
| 30 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) | ||
| 31 | struct list_head entry; | 79 | struct list_head entry; |
| 32 | work_func_t func; | 80 | work_func_t func; |
| 33 | #ifdef CONFIG_LOCKDEP | 81 | #ifdef CONFIG_LOCKDEP |
| @@ -35,8 +83,9 @@ struct work_struct { | |||
| 35 | #endif | 83 | #endif |
| 36 | }; | 84 | }; |
| 37 | 85 | ||
| 38 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) | 86 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU) |
| 39 | #define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2) | 87 | #define WORK_DATA_STATIC_INIT() \ |
| 88 | ATOMIC_LONG_INIT(WORK_STRUCT_NO_CPU | WORK_STRUCT_STATIC) | ||
| 40 | 89 | ||
| 41 | struct delayed_work { | 90 | struct delayed_work { |
| 42 | struct work_struct work; | 91 | struct work_struct work; |
| @@ -96,9 +145,14 @@ struct execute_work { | |||
| 96 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 145 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
| 97 | extern void __init_work(struct work_struct *work, int onstack); | 146 | extern void __init_work(struct work_struct *work, int onstack); |
| 98 | extern void destroy_work_on_stack(struct work_struct *work); | 147 | extern void destroy_work_on_stack(struct work_struct *work); |
| 148 | static inline unsigned int work_static(struct work_struct *work) | ||
| 149 | { | ||
| 150 | return *work_data_bits(work) & WORK_STRUCT_STATIC; | ||
| 151 | } | ||
| 99 | #else | 152 | #else |
| 100 | static inline void __init_work(struct work_struct *work, int onstack) { } | 153 | static inline void __init_work(struct work_struct *work, int onstack) { } |
| 101 | static inline void destroy_work_on_stack(struct work_struct *work) { } | 154 | static inline void destroy_work_on_stack(struct work_struct *work) { } |
| 155 | static inline unsigned int work_static(struct work_struct *work) { return 0; } | ||
| 102 | #endif | 156 | #endif |
| 103 | 157 | ||
| 104 | /* | 158 | /* |
| @@ -162,7 +216,7 @@ static inline void destroy_work_on_stack(struct work_struct *work) { } | |||
| 162 | * @work: The work item in question | 216 | * @work: The work item in question |
| 163 | */ | 217 | */ |
| 164 | #define work_pending(work) \ | 218 | #define work_pending(work) \ |
| 165 | test_bit(WORK_STRUCT_PENDING, work_data_bits(work)) | 219 | test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
| 166 | 220 | ||
| 167 | /** | 221 | /** |
| 168 | * delayed_work_pending - Find out whether a delayable work item is currently | 222 | * delayed_work_pending - Find out whether a delayable work item is currently |
| @@ -177,16 +231,56 @@ static inline void destroy_work_on_stack(struct work_struct *work) { } | |||
| 177 | * @work: The work item in question | 231 | * @work: The work item in question |
| 178 | */ | 232 | */ |
| 179 | #define work_clear_pending(work) \ | 233 | #define work_clear_pending(work) \ |
| 180 | clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) | 234 | clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
| 235 | |||
| 236 | enum { | ||
| 237 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ | ||
| 238 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ | ||
| 239 | WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ | ||
| 240 | WQ_RESCUER = 1 << 3, /* has an rescue worker */ | ||
| 241 | WQ_HIGHPRI = 1 << 4, /* high priority */ | ||
| 242 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ | ||
| 243 | |||
| 244 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ | ||
| 245 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ | ||
| 246 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, | ||
| 247 | }; | ||
| 181 | 248 | ||
| 249 | /* unbound wq's aren't per-cpu, scale max_active according to #cpus */ | ||
| 250 | #define WQ_UNBOUND_MAX_ACTIVE \ | ||
| 251 | max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU) | ||
| 252 | |||
| 253 | /* | ||
| 254 | * System-wide workqueues which are always present. | ||
| 255 | * | ||
| 256 | * system_wq is the one used by schedule[_delayed]_work[_on](). | ||
| 257 | * Multi-CPU multi-threaded. There are users which expect relatively | ||
| 258 | * short queue flush time. Don't queue works which can run for too | ||
| 259 | * long. | ||
| 260 | * | ||
| 261 | * system_long_wq is similar to system_wq but may host long running | ||
| 262 | * works. Queue flushing might take relatively long. | ||
| 263 | * | ||
| 264 | * system_nrt_wq is non-reentrant and guarantees that any given work | ||
| 265 | * item is never executed in parallel by multiple CPUs. Queue | ||
| 266 | * flushing might take relatively long. | ||
| 267 | * | ||
| 268 | * system_unbound_wq is unbound workqueue. Workers are not bound to | ||
| 269 | * any specific CPU, not concurrency managed, and all queued works are | ||
| 270 | * executed immediately as long as max_active limit is not reached and | ||
| 271 | * resources are available. | ||
| 272 | */ | ||
| 273 | extern struct workqueue_struct *system_wq; | ||
| 274 | extern struct workqueue_struct *system_long_wq; | ||
| 275 | extern struct workqueue_struct *system_nrt_wq; | ||
| 276 | extern struct workqueue_struct *system_unbound_wq; | ||
| 182 | 277 | ||
| 183 | extern struct workqueue_struct * | 278 | extern struct workqueue_struct * |
| 184 | __create_workqueue_key(const char *name, int singlethread, | 279 | __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, |
| 185 | int freezeable, int rt, struct lock_class_key *key, | 280 | struct lock_class_key *key, const char *lock_name); |
| 186 | const char *lock_name); | ||
| 187 | 281 | ||
| 188 | #ifdef CONFIG_LOCKDEP | 282 | #ifdef CONFIG_LOCKDEP |
| 189 | #define __create_workqueue(name, singlethread, freezeable, rt) \ | 283 | #define alloc_workqueue(name, flags, max_active) \ |
| 190 | ({ \ | 284 | ({ \ |
| 191 | static struct lock_class_key __key; \ | 285 | static struct lock_class_key __key; \ |
| 192 | const char *__lock_name; \ | 286 | const char *__lock_name; \ |
| @@ -196,20 +290,20 @@ __create_workqueue_key(const char *name, int singlethread, | |||
| 196 | else \ | 290 | else \ |
| 197 | __lock_name = #name; \ | 291 | __lock_name = #name; \ |
| 198 | \ | 292 | \ |
| 199 | __create_workqueue_key((name), (singlethread), \ | 293 | __alloc_workqueue_key((name), (flags), (max_active), \ |
| 200 | (freezeable), (rt), &__key, \ | 294 | &__key, __lock_name); \ |
| 201 | __lock_name); \ | ||
| 202 | }) | 295 | }) |
| 203 | #else | 296 | #else |
| 204 | #define __create_workqueue(name, singlethread, freezeable, rt) \ | 297 | #define alloc_workqueue(name, flags, max_active) \ |
| 205 | __create_workqueue_key((name), (singlethread), (freezeable), (rt), \ | 298 | __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) |
| 206 | NULL, NULL) | ||
| 207 | #endif | 299 | #endif |
| 208 | 300 | ||
| 209 | #define create_workqueue(name) __create_workqueue((name), 0, 0, 0) | 301 | #define create_workqueue(name) \ |
| 210 | #define create_rt_workqueue(name) __create_workqueue((name), 0, 0, 1) | 302 | alloc_workqueue((name), WQ_RESCUER, 1) |
| 211 | #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1, 0) | 303 | #define create_freezeable_workqueue(name) \ |
| 212 | #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0) | 304 | alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1) |
| 305 | #define create_singlethread_workqueue(name) \ | ||
| 306 | alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1) | ||
| 213 | 307 | ||
| 214 | extern void destroy_workqueue(struct workqueue_struct *wq); | 308 | extern void destroy_workqueue(struct workqueue_struct *wq); |
| 215 | 309 | ||
| @@ -231,16 +325,19 @@ extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay) | |||
| 231 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, | 325 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, |
| 232 | unsigned long delay); | 326 | unsigned long delay); |
| 233 | extern int schedule_on_each_cpu(work_func_t func); | 327 | extern int schedule_on_each_cpu(work_func_t func); |
| 234 | extern int current_is_keventd(void); | ||
| 235 | extern int keventd_up(void); | 328 | extern int keventd_up(void); |
| 236 | 329 | ||
| 237 | extern void init_workqueues(void); | ||
| 238 | int execute_in_process_context(work_func_t fn, struct execute_work *); | 330 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
| 239 | 331 | ||
| 240 | extern int flush_work(struct work_struct *work); | 332 | extern int flush_work(struct work_struct *work); |
| 241 | |||
| 242 | extern int cancel_work_sync(struct work_struct *work); | 333 | extern int cancel_work_sync(struct work_struct *work); |
| 243 | 334 | ||
| 335 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | ||
| 336 | int max_active); | ||
| 337 | extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); | ||
| 338 | extern unsigned int work_cpu(struct work_struct *work); | ||
| 339 | extern unsigned int work_busy(struct work_struct *work); | ||
| 340 | |||
| 244 | /* | 341 | /* |
| 245 | * Kill off a pending schedule_delayed_work(). Note that the work callback | 342 | * Kill off a pending schedule_delayed_work(). Note that the work callback |
| 246 | * function may still be running on return from cancel_delayed_work(), unless | 343 | * function may still be running on return from cancel_delayed_work(), unless |
| @@ -297,4 +394,15 @@ static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) | |||
| 297 | #else | 394 | #else |
| 298 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); | 395 | long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); |
| 299 | #endif /* CONFIG_SMP */ | 396 | #endif /* CONFIG_SMP */ |
| 397 | |||
| 398 | #ifdef CONFIG_FREEZER | ||
| 399 | extern void freeze_workqueues_begin(void); | ||
| 400 | extern bool freeze_workqueues_busy(void); | ||
| 401 | extern void thaw_workqueues(void); | ||
| 402 | #endif /* CONFIG_FREEZER */ | ||
| 403 | |||
| 404 | #ifdef CONFIG_LOCKDEP | ||
| 405 | int in_workqueue_context(struct workqueue_struct *wq); | ||
| 406 | #endif | ||
| 407 | |||
| 300 | #endif | 408 | #endif |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index c24eca71e80c..72a5d647a5f2 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
| @@ -124,8 +124,9 @@ struct ctl_table; | |||
| 124 | int dirty_writeback_centisecs_handler(struct ctl_table *, int, | 124 | int dirty_writeback_centisecs_handler(struct ctl_table *, int, |
| 125 | void __user *, size_t *, loff_t *); | 125 | void __user *, size_t *, loff_t *); |
| 126 | 126 | ||
| 127 | void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty, | 127 | void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); |
| 128 | unsigned long *pbdi_dirty, struct backing_dev_info *bdi); | 128 | unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, |
| 129 | unsigned long dirty); | ||
| 129 | 130 | ||
| 130 | void page_writeback_init(void); | 131 | void page_writeback_init(void); |
| 131 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | 132 | void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, |
