diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/decompress/mm.h | 4 | ||||
| -rw-r--r-- | include/linux/device.h | 12 | ||||
| -rw-r--r-- | include/linux/dst.h | 587 | ||||
| -rw-r--r-- | include/linux/elf.h | 2 | ||||
| -rw-r--r-- | include/linux/ext3_fs_sb.h | 2 | ||||
| -rw-r--r-- | include/linux/ext3_jbd.h | 7 | ||||
| -rw-r--r-- | include/linux/fs.h | 6 | ||||
| -rw-r--r-- | include/linux/kfifo.h | 554 | ||||
| -rw-r--r-- | include/linux/memory.h | 27 | ||||
| -rw-r--r-- | include/linux/mm.h | 3 | ||||
| -rw-r--r-- | include/linux/namei.h | 2 | ||||
| -rw-r--r-- | include/linux/perf_counter.h | 444 | ||||
| -rw-r--r-- | include/linux/quota.h | 5 | ||||
| -rw-r--r-- | include/linux/rcutiny.h | 5 | ||||
| -rw-r--r-- | include/linux/rcutree.h | 11 | ||||
| -rw-r--r-- | include/linux/sched.h | 13 | ||||
| -rw-r--r-- | include/linux/sonypi.h | 1 | ||||
| -rw-r--r-- | include/linux/sysfs.h | 9 | ||||
| -rw-r--r-- | include/linux/usb/serial.h | 3 |
19 files changed, 592 insertions, 1105 deletions
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h index 12ff8c3f1d05..5032b9a31ae7 100644 --- a/include/linux/decompress/mm.h +++ b/include/linux/decompress/mm.h | |||
| @@ -25,7 +25,7 @@ static void *malloc(int size) | |||
| 25 | void *p; | 25 | void *p; |
| 26 | 26 | ||
| 27 | if (size < 0) | 27 | if (size < 0) |
| 28 | error("Malloc error"); | 28 | return NULL; |
| 29 | if (!malloc_ptr) | 29 | if (!malloc_ptr) |
| 30 | malloc_ptr = free_mem_ptr; | 30 | malloc_ptr = free_mem_ptr; |
| 31 | 31 | ||
| @@ -35,7 +35,7 @@ static void *malloc(int size) | |||
| 35 | malloc_ptr += size; | 35 | malloc_ptr += size; |
| 36 | 36 | ||
| 37 | if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) | 37 | if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) |
| 38 | error("Out of memory"); | 38 | return NULL; |
| 39 | 39 | ||
| 40 | malloc_count++; | 40 | malloc_count++; |
| 41 | return p; | 41 | return p; |
diff --git a/include/linux/device.h b/include/linux/device.h index 2a73d9bcbc9c..a62799f2ab00 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -166,9 +166,9 @@ struct driver_attribute driver_attr_##_name = \ | |||
| 166 | __ATTR(_name, _mode, _show, _store) | 166 | __ATTR(_name, _mode, _show, _store) |
| 167 | 167 | ||
| 168 | extern int __must_check driver_create_file(struct device_driver *driver, | 168 | extern int __must_check driver_create_file(struct device_driver *driver, |
| 169 | struct driver_attribute *attr); | 169 | const struct driver_attribute *attr); |
| 170 | extern void driver_remove_file(struct device_driver *driver, | 170 | extern void driver_remove_file(struct device_driver *driver, |
| 171 | struct driver_attribute *attr); | 171 | const struct driver_attribute *attr); |
| 172 | 172 | ||
| 173 | extern int __must_check driver_add_kobj(struct device_driver *drv, | 173 | extern int __must_check driver_add_kobj(struct device_driver *drv, |
| 174 | struct kobject *kobj, | 174 | struct kobject *kobj, |
| @@ -319,13 +319,13 @@ struct device_attribute { | |||
| 319 | struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) | 319 | struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) |
| 320 | 320 | ||
| 321 | extern int __must_check device_create_file(struct device *device, | 321 | extern int __must_check device_create_file(struct device *device, |
| 322 | struct device_attribute *entry); | 322 | const struct device_attribute *entry); |
| 323 | extern void device_remove_file(struct device *dev, | 323 | extern void device_remove_file(struct device *dev, |
| 324 | struct device_attribute *attr); | 324 | const struct device_attribute *attr); |
| 325 | extern int __must_check device_create_bin_file(struct device *dev, | 325 | extern int __must_check device_create_bin_file(struct device *dev, |
| 326 | struct bin_attribute *attr); | 326 | const struct bin_attribute *attr); |
| 327 | extern void device_remove_bin_file(struct device *dev, | 327 | extern void device_remove_bin_file(struct device *dev, |
| 328 | struct bin_attribute *attr); | 328 | const struct bin_attribute *attr); |
| 329 | extern int device_schedule_callback_owner(struct device *dev, | 329 | extern int device_schedule_callback_owner(struct device *dev, |
| 330 | void (*func)(struct device *dev), struct module *owner); | 330 | void (*func)(struct device *dev), struct module *owner); |
| 331 | 331 | ||
diff --git a/include/linux/dst.h b/include/linux/dst.h deleted file mode 100644 index e26fed84b1aa..000000000000 --- a/include/linux/dst.h +++ /dev/null | |||
| @@ -1,587 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> | ||
| 3 | * All rights reserved. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License as published by | ||
| 7 | * the Free Software Foundation; either version 2 of the License, or | ||
| 8 | * (at your option) any later version. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #ifndef __DST_H | ||
| 17 | #define __DST_H | ||
| 18 | |||
| 19 | #include <linux/types.h> | ||
| 20 | #include <linux/connector.h> | ||
| 21 | |||
| 22 | #define DST_NAMELEN 32 | ||
| 23 | #define DST_NAME "dst" | ||
| 24 | |||
| 25 | enum { | ||
| 26 | /* Remove node with given id from storage */ | ||
| 27 | DST_DEL_NODE = 0, | ||
| 28 | /* Add remote node with given id to the storage */ | ||
| 29 | DST_ADD_REMOTE, | ||
| 30 | /* Add local node with given id to the storage to be exported and used by remote peers */ | ||
| 31 | DST_ADD_EXPORT, | ||
| 32 | /* Crypto initialization command (hash/cipher used to protect the connection) */ | ||
| 33 | DST_CRYPTO, | ||
| 34 | /* Security attributes for given connection (permissions for example) */ | ||
| 35 | DST_SECURITY, | ||
| 36 | /* Register given node in the block layer subsystem */ | ||
| 37 | DST_START, | ||
| 38 | DST_CMD_MAX | ||
| 39 | }; | ||
| 40 | |||
| 41 | struct dst_ctl | ||
| 42 | { | ||
| 43 | /* Storage name */ | ||
| 44 | char name[DST_NAMELEN]; | ||
| 45 | /* Command flags */ | ||
| 46 | __u32 flags; | ||
| 47 | /* Command itself (see above) */ | ||
| 48 | __u32 cmd; | ||
| 49 | /* Maximum number of pages per single request in this device */ | ||
| 50 | __u32 max_pages; | ||
| 51 | /* Stale/error transaction scanning timeout in milliseconds */ | ||
| 52 | __u32 trans_scan_timeout; | ||
| 53 | /* Maximum number of retry sends before completing transaction as broken */ | ||
| 54 | __u32 trans_max_retries; | ||
| 55 | /* Storage size */ | ||
| 56 | __u64 size; | ||
| 57 | }; | ||
| 58 | |||
| 59 | /* Reply command carries completion status */ | ||
| 60 | struct dst_ctl_ack | ||
| 61 | { | ||
| 62 | struct cn_msg msg; | ||
| 63 | int error; | ||
| 64 | int unused[3]; | ||
| 65 | }; | ||
| 66 | |||
| 67 | /* | ||
| 68 | * Unfortunaltely socket address structure is not exported to userspace | ||
| 69 | * and is redefined there. | ||
| 70 | */ | ||
| 71 | #define SADDR_MAX_DATA 128 | ||
| 72 | |||
| 73 | struct saddr { | ||
| 74 | /* address family, AF_xxx */ | ||
| 75 | unsigned short sa_family; | ||
| 76 | /* 14 bytes of protocol address */ | ||
| 77 | char sa_data[SADDR_MAX_DATA]; | ||
| 78 | /* Number of bytes used in sa_data */ | ||
| 79 | unsigned short sa_data_len; | ||
| 80 | }; | ||
| 81 | |||
| 82 | /* Address structure */ | ||
| 83 | struct dst_network_ctl | ||
| 84 | { | ||
| 85 | /* Socket type: datagram, stream...*/ | ||
| 86 | unsigned int type; | ||
| 87 | /* Let me guess, is it a Jupiter diameter? */ | ||
| 88 | unsigned int proto; | ||
| 89 | /* Peer's address */ | ||
| 90 | struct saddr addr; | ||
| 91 | }; | ||
| 92 | |||
| 93 | struct dst_crypto_ctl | ||
| 94 | { | ||
| 95 | /* Cipher and hash names */ | ||
| 96 | char cipher_algo[DST_NAMELEN]; | ||
| 97 | char hash_algo[DST_NAMELEN]; | ||
| 98 | |||
| 99 | /* Key sizes. Can be zero for digest for example */ | ||
| 100 | unsigned int cipher_keysize, hash_keysize; | ||
| 101 | /* Alignment. Calculated by the DST itself. */ | ||
| 102 | unsigned int crypto_attached_size; | ||
| 103 | /* Number of threads to perform crypto operations */ | ||
| 104 | int thread_num; | ||
| 105 | }; | ||
| 106 | |||
| 107 | /* Export security attributes have this bits checked in when client connects */ | ||
| 108 | #define DST_PERM_READ (1<<0) | ||
| 109 | #define DST_PERM_WRITE (1<<1) | ||
| 110 | |||
| 111 | /* | ||
| 112 | * Right now it is simple model, where each remote address | ||
| 113 | * is assigned to set of permissions it is allowed to perform. | ||
| 114 | * In real world block device does not know anything but | ||
| 115 | * reading and writing, so it should be more than enough. | ||
| 116 | */ | ||
| 117 | struct dst_secure_user | ||
| 118 | { | ||
| 119 | unsigned int permissions; | ||
| 120 | struct saddr addr; | ||
| 121 | }; | ||
| 122 | |||
| 123 | /* | ||
| 124 | * Export control command: device to export and network address to accept | ||
| 125 | * clients to work with given device | ||
| 126 | */ | ||
| 127 | struct dst_export_ctl | ||
| 128 | { | ||
| 129 | char device[DST_NAMELEN]; | ||
| 130 | struct dst_network_ctl ctl; | ||
| 131 | }; | ||
| 132 | |||
| 133 | enum { | ||
| 134 | DST_CFG = 1, /* Request remote configuration */ | ||
| 135 | DST_IO, /* IO command */ | ||
| 136 | DST_IO_RESPONSE, /* IO response */ | ||
| 137 | DST_PING, /* Keepalive message */ | ||
| 138 | DST_NCMD_MAX, | ||
| 139 | }; | ||
| 140 | |||
| 141 | struct dst_cmd | ||
| 142 | { | ||
| 143 | /* Network command itself, see above */ | ||
| 144 | __u32 cmd; | ||
| 145 | /* | ||
| 146 | * Size of the attached data | ||
| 147 | * (in most cases, for READ command it means how many bytes were requested) | ||
| 148 | */ | ||
| 149 | __u32 size; | ||
| 150 | /* Crypto size: number of attached bytes with digest/hmac */ | ||
| 151 | __u32 csize; | ||
| 152 | /* Here we can carry secret data */ | ||
| 153 | __u32 reserved; | ||
| 154 | /* Read/write bits, see how they are encoded in bio structure */ | ||
| 155 | __u64 rw; | ||
| 156 | /* BIO flags */ | ||
| 157 | __u64 flags; | ||
| 158 | /* Unique command id (like transaction ID) */ | ||
| 159 | __u64 id; | ||
| 160 | /* Sector to start IO from */ | ||
| 161 | __u64 sector; | ||
| 162 | /* Hash data is placed after this header */ | ||
| 163 | __u8 hash[0]; | ||
| 164 | }; | ||
| 165 | |||
| 166 | /* | ||
| 167 | * Convert command to/from network byte order. | ||
| 168 | * We do not use hton*() functions, since there is | ||
| 169 | * no 64-bit implementation. | ||
| 170 | */ | ||
| 171 | static inline void dst_convert_cmd(struct dst_cmd *c) | ||
| 172 | { | ||
| 173 | c->cmd = __cpu_to_be32(c->cmd); | ||
| 174 | c->csize = __cpu_to_be32(c->csize); | ||
| 175 | c->size = __cpu_to_be32(c->size); | ||
| 176 | c->sector = __cpu_to_be64(c->sector); | ||
| 177 | c->id = __cpu_to_be64(c->id); | ||
| 178 | c->flags = __cpu_to_be64(c->flags); | ||
| 179 | c->rw = __cpu_to_be64(c->rw); | ||
| 180 | } | ||
| 181 | |||
| 182 | /* Transaction id */ | ||
| 183 | typedef __u64 dst_gen_t; | ||
| 184 | |||
| 185 | #ifdef __KERNEL__ | ||
| 186 | |||
| 187 | #include <linux/blkdev.h> | ||
| 188 | #include <linux/bio.h> | ||
| 189 | #include <linux/device.h> | ||
| 190 | #include <linux/mempool.h> | ||
| 191 | #include <linux/net.h> | ||
| 192 | #include <linux/poll.h> | ||
| 193 | #include <linux/rbtree.h> | ||
| 194 | |||
| 195 | #ifdef CONFIG_DST_DEBUG | ||
| 196 | #define dprintk(f, a...) printk(KERN_NOTICE f, ##a) | ||
| 197 | #else | ||
| 198 | static inline void __attribute__ ((format (printf, 1, 2))) | ||
| 199 | dprintk(const char *fmt, ...) {} | ||
| 200 | #endif | ||
| 201 | |||
| 202 | struct dst_node; | ||
| 203 | |||
| 204 | struct dst_trans | ||
| 205 | { | ||
| 206 | /* DST node we are working with */ | ||
| 207 | struct dst_node *n; | ||
| 208 | |||
| 209 | /* Entry inside transaction tree */ | ||
| 210 | struct rb_node trans_entry; | ||
| 211 | |||
| 212 | /* Merlin kills this transaction when this memory cell equals zero */ | ||
| 213 | atomic_t refcnt; | ||
| 214 | |||
| 215 | /* How this transaction should be processed by crypto engine */ | ||
| 216 | short enc; | ||
| 217 | /* How many times this transaction was resent */ | ||
| 218 | short retries; | ||
| 219 | /* Completion status */ | ||
| 220 | int error; | ||
| 221 | |||
| 222 | /* When did we send it to the remote peer */ | ||
| 223 | long send_time; | ||
| 224 | |||
| 225 | /* My name is... | ||
| 226 | * Well, computers does not speak, they have unique id instead */ | ||
| 227 | dst_gen_t gen; | ||
| 228 | |||
| 229 | /* Block IO we are working with */ | ||
| 230 | struct bio *bio; | ||
| 231 | |||
| 232 | /* Network command for above block IO request */ | ||
| 233 | struct dst_cmd cmd; | ||
| 234 | }; | ||
| 235 | |||
| 236 | struct dst_crypto_engine | ||
| 237 | { | ||
| 238 | /* What should we do with all block requests */ | ||
| 239 | struct crypto_hash *hash; | ||
| 240 | struct crypto_ablkcipher *cipher; | ||
| 241 | |||
| 242 | /* Pool of pages used to encrypt data into before sending */ | ||
| 243 | int page_num; | ||
| 244 | struct page **pages; | ||
| 245 | |||
| 246 | /* What to do with current request */ | ||
| 247 | int enc; | ||
| 248 | /* Who we are and where do we go */ | ||
| 249 | struct scatterlist *src, *dst; | ||
| 250 | |||
| 251 | /* Maximum timeout waiting for encryption to be completed */ | ||
| 252 | long timeout; | ||
| 253 | /* IV is a 64-bit sequential counter */ | ||
| 254 | u64 iv; | ||
| 255 | |||
| 256 | /* Secret data */ | ||
| 257 | void *private; | ||
| 258 | |||
| 259 | /* Cached temporary data lives here */ | ||
| 260 | int size; | ||
| 261 | void *data; | ||
| 262 | }; | ||
| 263 | |||
| 264 | struct dst_state | ||
| 265 | { | ||
| 266 | /* The main state protection */ | ||
| 267 | struct mutex state_lock; | ||
| 268 | |||
| 269 | /* Polling machinery for sockets */ | ||
| 270 | wait_queue_t wait; | ||
| 271 | wait_queue_head_t *whead; | ||
| 272 | /* Most of events are being waited here */ | ||
| 273 | wait_queue_head_t thread_wait; | ||
| 274 | |||
| 275 | /* Who owns this? */ | ||
| 276 | struct dst_node *node; | ||
| 277 | |||
| 278 | /* Network address for this state */ | ||
| 279 | struct dst_network_ctl ctl; | ||
| 280 | |||
| 281 | /* Permissions to work with: read-only or rw connection */ | ||
| 282 | u32 permissions; | ||
| 283 | |||
| 284 | /* Called when we need to clean private data */ | ||
| 285 | void (* cleanup)(struct dst_state *st); | ||
| 286 | |||
| 287 | /* Used by the server: BIO completion queues BIOs here */ | ||
| 288 | struct list_head request_list; | ||
| 289 | spinlock_t request_lock; | ||
| 290 | |||
| 291 | /* Guess what? No, it is not number of planets */ | ||
| 292 | atomic_t refcnt; | ||
| 293 | |||
| 294 | /* This flags is set when connection should be dropped */ | ||
| 295 | int need_exit; | ||
| 296 | |||
| 297 | /* | ||
| 298 | * Socket to work with. Second pointer is used for | ||
| 299 | * lockless check if socket was changed before performing | ||
| 300 | * next action (like working with cached polling result) | ||
| 301 | */ | ||
| 302 | struct socket *socket, *read_socket; | ||
| 303 | |||
| 304 | /* Cached preallocated data */ | ||
| 305 | void *data; | ||
| 306 | unsigned int size; | ||
| 307 | |||
| 308 | /* Currently processed command */ | ||
| 309 | struct dst_cmd cmd; | ||
| 310 | }; | ||
| 311 | |||
| 312 | struct dst_info | ||
| 313 | { | ||
| 314 | /* Device size */ | ||
| 315 | u64 size; | ||
| 316 | |||
| 317 | /* Local device name for export devices */ | ||
| 318 | char local[DST_NAMELEN]; | ||
| 319 | |||
| 320 | /* Network setup */ | ||
| 321 | struct dst_network_ctl net; | ||
| 322 | |||
| 323 | /* Sysfs bits use this */ | ||
| 324 | struct device device; | ||
| 325 | }; | ||
| 326 | |||
| 327 | struct dst_node | ||
| 328 | { | ||
| 329 | struct list_head node_entry; | ||
| 330 | |||
| 331 | /* Hi, my name is stored here */ | ||
| 332 | char name[DST_NAMELEN]; | ||
| 333 | /* My cache name is stored here */ | ||
| 334 | char cache_name[DST_NAMELEN]; | ||
| 335 | |||
| 336 | /* Block device attached to given node. | ||
| 337 | * Only valid for exporting nodes */ | ||
| 338 | struct block_device *bdev; | ||
| 339 | /* Network state machine for given peer */ | ||
| 340 | struct dst_state *state; | ||
| 341 | |||
| 342 | /* Block IO machinery */ | ||
| 343 | struct request_queue *queue; | ||
| 344 | struct gendisk *disk; | ||
| 345 | |||
| 346 | /* Number of threads in processing pool */ | ||
| 347 | int thread_num; | ||
| 348 | /* Maximum number of pages in single IO */ | ||
| 349 | int max_pages; | ||
| 350 | |||
| 351 | /* I'm that big in bytes */ | ||
| 352 | loff_t size; | ||
| 353 | |||
| 354 | /* Exported to userspace node information */ | ||
| 355 | struct dst_info *info; | ||
| 356 | |||
| 357 | /* | ||
| 358 | * Security attribute list. | ||
| 359 | * Used only by exporting node currently. | ||
| 360 | */ | ||
| 361 | struct list_head security_list; | ||
| 362 | struct mutex security_lock; | ||
| 363 | |||
| 364 | /* | ||
| 365 | * When this unerflows below zero, university collapses. | ||
| 366 | * But this will not happen, since node will be freed, | ||
| 367 | * when reference counter reaches zero. | ||
| 368 | */ | ||
| 369 | atomic_t refcnt; | ||
| 370 | |||
| 371 | /* How precisely should I be started? */ | ||
| 372 | int (*start)(struct dst_node *); | ||
| 373 | |||
| 374 | /* Crypto capabilities */ | ||
| 375 | struct dst_crypto_ctl crypto; | ||
| 376 | u8 *hash_key; | ||
| 377 | u8 *cipher_key; | ||
| 378 | |||
| 379 | /* Pool of processing thread */ | ||
| 380 | struct thread_pool *pool; | ||
| 381 | |||
| 382 | /* Transaction IDs live here */ | ||
| 383 | atomic_long_t gen; | ||
| 384 | |||
| 385 | /* | ||
| 386 | * How frequently and how many times transaction | ||
| 387 | * tree should be scanned to drop stale objects. | ||
| 388 | */ | ||
| 389 | long trans_scan_timeout; | ||
| 390 | int trans_max_retries; | ||
| 391 | |||
| 392 | /* Small gnomes live here */ | ||
| 393 | struct rb_root trans_root; | ||
| 394 | struct mutex trans_lock; | ||
| 395 | |||
| 396 | /* | ||
| 397 | * Transaction cache/memory pool. | ||
| 398 | * It is big enough to contain not only transaction | ||
| 399 | * itself, but additional crypto data (digest/hmac). | ||
| 400 | */ | ||
| 401 | struct kmem_cache *trans_cache; | ||
| 402 | mempool_t *trans_pool; | ||
| 403 | |||
| 404 | /* This entity scans transaction tree */ | ||
| 405 | struct delayed_work trans_work; | ||
| 406 | |||
| 407 | wait_queue_head_t wait; | ||
| 408 | }; | ||
| 409 | |||
| 410 | /* Kernel representation of the security attribute */ | ||
| 411 | struct dst_secure | ||
| 412 | { | ||
| 413 | struct list_head sec_entry; | ||
| 414 | struct dst_secure_user sec; | ||
| 415 | }; | ||
| 416 | |||
| 417 | int dst_process_bio(struct dst_node *n, struct bio *bio); | ||
| 418 | |||
| 419 | int dst_node_init_connected(struct dst_node *n, struct dst_network_ctl *r); | ||
| 420 | int dst_node_init_listened(struct dst_node *n, struct dst_export_ctl *le); | ||
| 421 | |||
| 422 | static inline struct dst_state *dst_state_get(struct dst_state *st) | ||
| 423 | { | ||
| 424 | BUG_ON(atomic_read(&st->refcnt) == 0); | ||
| 425 | atomic_inc(&st->refcnt); | ||
| 426 | return st; | ||
| 427 | } | ||
| 428 | |||
| 429 | void dst_state_put(struct dst_state *st); | ||
| 430 | |||
| 431 | struct dst_state *dst_state_alloc(struct dst_node *n); | ||
| 432 | int dst_state_socket_create(struct dst_state *st); | ||
| 433 | void dst_state_socket_release(struct dst_state *st); | ||
| 434 | |||
| 435 | void dst_state_exit_connected(struct dst_state *st); | ||
| 436 | |||
| 437 | int dst_state_schedule_receiver(struct dst_state *st); | ||
| 438 | |||
| 439 | void dst_dump_addr(struct socket *sk, struct sockaddr *sa, char *str); | ||
| 440 | |||
| 441 | static inline void dst_state_lock(struct dst_state *st) | ||
| 442 | { | ||
| 443 | mutex_lock(&st->state_lock); | ||
| 444 | } | ||
| 445 | |||
| 446 | static inline void dst_state_unlock(struct dst_state *st) | ||
| 447 | { | ||
| 448 | mutex_unlock(&st->state_lock); | ||
| 449 | } | ||
| 450 | |||
| 451 | void dst_poll_exit(struct dst_state *st); | ||
| 452 | int dst_poll_init(struct dst_state *st); | ||
| 453 | |||
| 454 | static inline unsigned int dst_state_poll(struct dst_state *st) | ||
| 455 | { | ||
| 456 | unsigned int revents = POLLHUP | POLLERR; | ||
| 457 | |||
| 458 | dst_state_lock(st); | ||
| 459 | if (st->socket) | ||
| 460 | revents = st->socket->ops->poll(NULL, st->socket, NULL); | ||
| 461 | dst_state_unlock(st); | ||
| 462 | |||
| 463 | return revents; | ||
| 464 | } | ||
| 465 | |||
| 466 | static inline int dst_thread_setup(void *private, void *data) | ||
| 467 | { | ||
| 468 | return 0; | ||
| 469 | } | ||
| 470 | |||
| 471 | void dst_node_put(struct dst_node *n); | ||
| 472 | |||
| 473 | static inline struct dst_node *dst_node_get(struct dst_node *n) | ||
| 474 | { | ||
| 475 | atomic_inc(&n->refcnt); | ||
| 476 | return n; | ||
| 477 | } | ||
| 478 | |||
| 479 | int dst_data_recv(struct dst_state *st, void *data, unsigned int size); | ||
| 480 | int dst_recv_cdata(struct dst_state *st, void *cdata); | ||
| 481 | int dst_data_send_header(struct socket *sock, | ||
| 482 | void *data, unsigned int size, int more); | ||
| 483 | |||
| 484 | int dst_send_bio(struct dst_state *st, struct dst_cmd *cmd, struct bio *bio); | ||
| 485 | |||
| 486 | int dst_process_io(struct dst_state *st); | ||
| 487 | int dst_export_crypto(struct dst_node *n, struct bio *bio); | ||
| 488 | int dst_export_send_bio(struct bio *bio); | ||
| 489 | int dst_start_export(struct dst_node *n); | ||
| 490 | |||
| 491 | int __init dst_export_init(void); | ||
| 492 | void dst_export_exit(void); | ||
| 493 | |||
| 494 | /* Private structure for export block IO requests */ | ||
| 495 | struct dst_export_priv | ||
| 496 | { | ||
| 497 | struct list_head request_entry; | ||
| 498 | struct dst_state *state; | ||
| 499 | struct bio *bio; | ||
| 500 | struct dst_cmd cmd; | ||
| 501 | }; | ||
| 502 | |||
| 503 | static inline void dst_trans_get(struct dst_trans *t) | ||
| 504 | { | ||
| 505 | atomic_inc(&t->refcnt); | ||
| 506 | } | ||
| 507 | |||
| 508 | struct dst_trans *dst_trans_search(struct dst_node *node, dst_gen_t gen); | ||
| 509 | int dst_trans_remove(struct dst_trans *t); | ||
| 510 | int dst_trans_remove_nolock(struct dst_trans *t); | ||
| 511 | void dst_trans_put(struct dst_trans *t); | ||
| 512 | |||
| 513 | /* | ||
| 514 | * Convert bio into network command. | ||
| 515 | */ | ||
| 516 | static inline void dst_bio_to_cmd(struct bio *bio, struct dst_cmd *cmd, | ||
| 517 | u32 command, u64 id) | ||
| 518 | { | ||
| 519 | cmd->cmd = command; | ||
| 520 | cmd->flags = (bio->bi_flags << BIO_POOL_BITS) >> BIO_POOL_BITS; | ||
| 521 | cmd->rw = bio->bi_rw; | ||
| 522 | cmd->size = bio->bi_size; | ||
| 523 | cmd->csize = 0; | ||
| 524 | cmd->id = id; | ||
| 525 | cmd->sector = bio->bi_sector; | ||
| 526 | }; | ||
| 527 | |||
| 528 | int dst_trans_send(struct dst_trans *t); | ||
| 529 | int dst_trans_crypto(struct dst_trans *t); | ||
| 530 | |||
| 531 | int dst_node_crypto_init(struct dst_node *n, struct dst_crypto_ctl *ctl); | ||
| 532 | void dst_node_crypto_exit(struct dst_node *n); | ||
| 533 | |||
| 534 | static inline int dst_need_crypto(struct dst_node *n) | ||
| 535 | { | ||
| 536 | struct dst_crypto_ctl *c = &n->crypto; | ||
| 537 | /* | ||
| 538 | * Logical OR is appropriate here, but boolean one produces | ||
| 539 | * more optimal code, so it is used instead. | ||
| 540 | */ | ||
| 541 | return (c->hash_algo[0] | c->cipher_algo[0]); | ||
| 542 | } | ||
| 543 | |||
| 544 | int dst_node_trans_init(struct dst_node *n, unsigned int size); | ||
| 545 | void dst_node_trans_exit(struct dst_node *n); | ||
| 546 | |||
| 547 | /* | ||
| 548 | * Pool of threads. | ||
| 549 | * Ready list contains threads currently free to be used, | ||
| 550 | * active one contains threads with some work scheduled for them. | ||
| 551 | * Caller can wait in given queue when thread is ready. | ||
| 552 | */ | ||
| 553 | struct thread_pool | ||
| 554 | { | ||
| 555 | int thread_num; | ||
| 556 | struct mutex thread_lock; | ||
| 557 | struct list_head ready_list, active_list; | ||
| 558 | |||
| 559 | wait_queue_head_t wait; | ||
| 560 | }; | ||
| 561 | |||
| 562 | void thread_pool_del_worker(struct thread_pool *p); | ||
| 563 | void thread_pool_del_worker_id(struct thread_pool *p, unsigned int id); | ||
| 564 | int thread_pool_add_worker(struct thread_pool *p, | ||
| 565 | char *name, | ||
| 566 | unsigned int id, | ||
| 567 | void *(* init)(void *data), | ||
| 568 | void (* cleanup)(void *data), | ||
| 569 | void *data); | ||
| 570 | |||
| 571 | void thread_pool_destroy(struct thread_pool *p); | ||
| 572 | struct thread_pool *thread_pool_create(int num, char *name, | ||
| 573 | void *(* init)(void *data), | ||
| 574 | void (* cleanup)(void *data), | ||
| 575 | void *data); | ||
| 576 | |||
| 577 | int thread_pool_schedule(struct thread_pool *p, | ||
| 578 | int (* setup)(void *stored_private, void *setup_data), | ||
| 579 | int (* action)(void *stored_private, void *setup_data), | ||
| 580 | void *setup_data, long timeout); | ||
| 581 | int thread_pool_schedule_private(struct thread_pool *p, | ||
| 582 | int (* setup)(void *private, void *data), | ||
| 583 | int (* action)(void *private, void *data), | ||
| 584 | void *data, long timeout, void *id); | ||
| 585 | |||
| 586 | #endif /* __KERNEL__ */ | ||
| 587 | #endif /* __DST_H */ | ||
diff --git a/include/linux/elf.h b/include/linux/elf.h index 90a4ed0ea0e5..0cc4d55151b7 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h | |||
| @@ -361,7 +361,7 @@ typedef struct elf64_shdr { | |||
| 361 | #define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ | 361 | #define NT_PPC_VSX 0x102 /* PowerPC VSX registers */ |
| 362 | #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ | 362 | #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ |
| 363 | #define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ | 363 | #define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */ |
| 364 | #define NT_PRXSTATUS 0x300 /* s390 upper register halves */ | 364 | #define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */ |
| 365 | 365 | ||
| 366 | 366 | ||
| 367 | /* Note header in a PT_NOTE section */ | 367 | /* Note header in a PT_NOTE section */ |
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h index f07f34de2f0e..258088ab3c6b 100644 --- a/include/linux/ext3_fs_sb.h +++ b/include/linux/ext3_fs_sb.h | |||
| @@ -72,6 +72,8 @@ struct ext3_sb_info { | |||
| 72 | struct inode * s_journal_inode; | 72 | struct inode * s_journal_inode; |
| 73 | struct journal_s * s_journal; | 73 | struct journal_s * s_journal; |
| 74 | struct list_head s_orphan; | 74 | struct list_head s_orphan; |
| 75 | struct mutex s_orphan_lock; | ||
| 76 | struct mutex s_resize_lock; | ||
| 75 | unsigned long s_commit_interval; | 77 | unsigned long s_commit_interval; |
| 76 | struct block_device *journal_bdev; | 78 | struct block_device *journal_bdev; |
| 77 | #ifdef CONFIG_JBD_DEBUG | 79 | #ifdef CONFIG_JBD_DEBUG |
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h index cf82d519be40..d7b5ddca99c2 100644 --- a/include/linux/ext3_jbd.h +++ b/include/linux/ext3_jbd.h | |||
| @@ -44,13 +44,13 @@ | |||
| 44 | 44 | ||
| 45 | #define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \ | 45 | #define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \ |
| 46 | EXT3_XATTR_TRANS_BLOCKS - 2 + \ | 46 | EXT3_XATTR_TRANS_BLOCKS - 2 + \ |
| 47 | 2*EXT3_QUOTA_TRANS_BLOCKS(sb)) | 47 | EXT3_MAXQUOTAS_TRANS_BLOCKS(sb)) |
| 48 | 48 | ||
| 49 | /* Delete operations potentially hit one directory's namespace plus an | 49 | /* Delete operations potentially hit one directory's namespace plus an |
| 50 | * entire inode, plus arbitrary amounts of bitmap/indirection data. Be | 50 | * entire inode, plus arbitrary amounts of bitmap/indirection data. Be |
| 51 | * generous. We can grow the delete transaction later if necessary. */ | 51 | * generous. We can grow the delete transaction later if necessary. */ |
| 52 | 52 | ||
| 53 | #define EXT3_DELETE_TRANS_BLOCKS(sb) (2 * EXT3_DATA_TRANS_BLOCKS(sb) + 64) | 53 | #define EXT3_DELETE_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64) |
| 54 | 54 | ||
| 55 | /* Define an arbitrary limit for the amount of data we will anticipate | 55 | /* Define an arbitrary limit for the amount of data we will anticipate |
| 56 | * writing to any given transaction. For unbounded transactions such as | 56 | * writing to any given transaction. For unbounded transactions such as |
| @@ -86,6 +86,9 @@ | |||
| 86 | #define EXT3_QUOTA_INIT_BLOCKS(sb) 0 | 86 | #define EXT3_QUOTA_INIT_BLOCKS(sb) 0 |
| 87 | #define EXT3_QUOTA_DEL_BLOCKS(sb) 0 | 87 | #define EXT3_QUOTA_DEL_BLOCKS(sb) 0 |
| 88 | #endif | 88 | #endif |
| 89 | #define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb)) | ||
| 90 | #define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb)) | ||
| 91 | #define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb)) | ||
| 89 | 92 | ||
| 90 | int | 93 | int |
| 91 | ext3_mark_iloc_dirty(handle_t *handle, | 94 | ext3_mark_iloc_dirty(handle_t *handle, |
diff --git a/include/linux/fs.h b/include/linux/fs.h index cca191933ff6..9147ca88f253 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -1624,8 +1624,6 @@ struct super_operations { | |||
| 1624 | * on the bit address once it is done. | 1624 | * on the bit address once it is done. |
| 1625 | * | 1625 | * |
| 1626 | * Q: What is the difference between I_WILL_FREE and I_FREEING? | 1626 | * Q: What is the difference between I_WILL_FREE and I_FREEING? |
| 1627 | * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on | ||
| 1628 | * I_CLEAR? If not, why? | ||
| 1629 | */ | 1627 | */ |
| 1630 | #define I_DIRTY_SYNC 1 | 1628 | #define I_DIRTY_SYNC 1 |
| 1631 | #define I_DIRTY_DATASYNC 2 | 1629 | #define I_DIRTY_DATASYNC 2 |
| @@ -2299,6 +2297,7 @@ extern const struct inode_operations page_symlink_inode_operations; | |||
| 2299 | extern int generic_readlink(struct dentry *, char __user *, int); | 2297 | extern int generic_readlink(struct dentry *, char __user *, int); |
| 2300 | extern void generic_fillattr(struct inode *, struct kstat *); | 2298 | extern void generic_fillattr(struct inode *, struct kstat *); |
| 2301 | extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 2299 | extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
| 2300 | void __inode_add_bytes(struct inode *inode, loff_t bytes); | ||
| 2302 | void inode_add_bytes(struct inode *inode, loff_t bytes); | 2301 | void inode_add_bytes(struct inode *inode, loff_t bytes); |
| 2303 | void inode_sub_bytes(struct inode *inode, loff_t bytes); | 2302 | void inode_sub_bytes(struct inode *inode, loff_t bytes); |
| 2304 | loff_t inode_get_bytes(struct inode *inode); | 2303 | loff_t inode_get_bytes(struct inode *inode); |
| @@ -2464,5 +2463,8 @@ int proc_nr_files(struct ctl_table *table, int write, | |||
| 2464 | 2463 | ||
| 2465 | int __init get_filesystem_list(char *buf); | 2464 | int __init get_filesystem_list(char *buf); |
| 2466 | 2465 | ||
| 2466 | #define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE]) | ||
| 2467 | #define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE)) | ||
| 2468 | |||
| 2467 | #endif /* __KERNEL__ */ | 2469 | #endif /* __KERNEL__ */ |
| 2468 | #endif /* _LINUX_FS_H */ | 2470 | #endif /* _LINUX_FS_H */ |
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index ad6bdf5a5970..3d44e9c65a8e 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * A simple kernel FIFO implementation. | 2 | * A generic kernel FIFO implementation. |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net> | ||
| 4 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> | 5 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> |
| 5 | * | 6 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
| @@ -18,6 +19,25 @@ | |||
| 18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 19 | * | 20 | * |
| 20 | */ | 21 | */ |
| 22 | |||
| 23 | /* | ||
| 24 | * Howto porting drivers to the new generic fifo API: | ||
| 25 | * | ||
| 26 | * - Modify the declaration of the "struct kfifo *" object into a | ||
| 27 | * in-place "struct kfifo" object | ||
| 28 | * - Init the in-place object with kfifo_alloc() or kfifo_init() | ||
| 29 | * Note: The address of the in-place "struct kfifo" object must be | ||
| 30 | * passed as the first argument to this functions | ||
| 31 | * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get | ||
| 32 | * into kfifo_out | ||
| 33 | * - Replace the use of kfifo_put into kfifo_in_locked and kfifo_get | ||
| 34 | * into kfifo_out_locked | ||
| 35 | * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc | ||
| 36 | * must be passed now to the kfifo_in_locked and kfifo_out_locked | ||
| 37 | * as the last parameter. | ||
| 38 | * - All formerly name __kfifo_* functions has been renamed into kfifo_* | ||
| 39 | */ | ||
| 40 | |||
| 21 | #ifndef _LINUX_KFIFO_H | 41 | #ifndef _LINUX_KFIFO_H |
| 22 | #define _LINUX_KFIFO_H | 42 | #define _LINUX_KFIFO_H |
| 23 | 43 | ||
| @@ -29,124 +49,562 @@ struct kfifo { | |||
| 29 | unsigned int size; /* the size of the allocated buffer */ | 49 | unsigned int size; /* the size of the allocated buffer */ |
| 30 | unsigned int in; /* data is added at offset (in % size) */ | 50 | unsigned int in; /* data is added at offset (in % size) */ |
| 31 | unsigned int out; /* data is extracted from off. (out % size) */ | 51 | unsigned int out; /* data is extracted from off. (out % size) */ |
| 32 | spinlock_t *lock; /* protects concurrent modifications */ | ||
| 33 | }; | 52 | }; |
| 34 | 53 | ||
| 35 | extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, | 54 | /* |
| 36 | gfp_t gfp_mask, spinlock_t *lock); | 55 | * Macros for declaration and initialization of the kfifo datatype |
| 37 | extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, | 56 | */ |
| 38 | spinlock_t *lock); | 57 | |
| 58 | /* helper macro */ | ||
| 59 | #define __kfifo_initializer(s, b) \ | ||
| 60 | (struct kfifo) { \ | ||
| 61 | .size = s, \ | ||
| 62 | .in = 0, \ | ||
| 63 | .out = 0, \ | ||
| 64 | .buffer = b \ | ||
| 65 | } | ||
| 66 | |||
| 67 | /** | ||
| 68 | * DECLARE_KFIFO - macro to declare a kfifo and the associated buffer | ||
| 69 | * @name: name of the declared kfifo datatype | ||
| 70 | * @size: size of the fifo buffer | ||
| 71 | * | ||
| 72 | * Note1: the macro can be used inside struct or union declaration | ||
| 73 | * Note2: the macro creates two objects: | ||
| 74 | * A kfifo object with the given name and a buffer for the kfifo | ||
| 75 | * object named name##kfifo_buffer | ||
| 76 | */ | ||
| 77 | #define DECLARE_KFIFO(name, size) \ | ||
| 78 | union { \ | ||
| 79 | struct kfifo name; \ | ||
| 80 | unsigned char name##kfifo_buffer[size + sizeof(struct kfifo)]; \ | ||
| 81 | } | ||
| 82 | |||
| 83 | /** | ||
| 84 | * INIT_KFIFO - Initialize a kfifo declared by DECLARED_KFIFO | ||
| 85 | * @name: name of the declared kfifo datatype | ||
| 86 | */ | ||
| 87 | #define INIT_KFIFO(name) \ | ||
| 88 | name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \ | ||
| 89 | sizeof(struct kfifo), name##kfifo_buffer) | ||
| 90 | |||
| 91 | /** | ||
| 92 | * DEFINE_KFIFO - macro to define and initialize a kfifo | ||
| 93 | * @name: name of the declared kfifo datatype | ||
| 94 | * @size: size of the fifo buffer | ||
| 95 | * | ||
| 96 | * Note1: the macro can be used for global and local kfifo data type variables | ||
| 97 | * Note2: the macro creates two objects: | ||
| 98 | * A kfifo object with the given name and a buffer for the kfifo | ||
| 99 | * object named name##kfifo_buffer | ||
| 100 | */ | ||
| 101 | #define DEFINE_KFIFO(name, size) \ | ||
| 102 | unsigned char name##kfifo_buffer[size]; \ | ||
| 103 | struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer) | ||
| 104 | |||
| 105 | #undef __kfifo_initializer | ||
| 106 | |||
| 107 | extern void kfifo_init(struct kfifo *fifo, unsigned char *buffer, | ||
| 108 | unsigned int size); | ||
| 109 | extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size, | ||
| 110 | gfp_t gfp_mask); | ||
| 39 | extern void kfifo_free(struct kfifo *fifo); | 111 | extern void kfifo_free(struct kfifo *fifo); |
| 40 | extern unsigned int __kfifo_put(struct kfifo *fifo, | 112 | extern unsigned int kfifo_in(struct kfifo *fifo, |
| 41 | const unsigned char *buffer, unsigned int len); | 113 | const unsigned char *from, unsigned int len); |
| 42 | extern unsigned int __kfifo_get(struct kfifo *fifo, | 114 | extern __must_check unsigned int kfifo_out(struct kfifo *fifo, |
| 43 | unsigned char *buffer, unsigned int len); | 115 | unsigned char *to, unsigned int len); |
| 44 | 116 | ||
| 45 | /** | 117 | /** |
| 46 | * __kfifo_reset - removes the entire FIFO contents, no locking version | 118 | * kfifo_reset - removes the entire FIFO contents |
| 47 | * @fifo: the fifo to be emptied. | 119 | * @fifo: the fifo to be emptied. |
| 48 | */ | 120 | */ |
| 49 | static inline void __kfifo_reset(struct kfifo *fifo) | 121 | static inline void kfifo_reset(struct kfifo *fifo) |
| 50 | { | 122 | { |
| 51 | fifo->in = fifo->out = 0; | 123 | fifo->in = fifo->out = 0; |
| 52 | } | 124 | } |
| 53 | 125 | ||
| 54 | /** | 126 | /** |
| 55 | * kfifo_reset - removes the entire FIFO contents | 127 | * kfifo_reset_out - skip FIFO contents |
| 56 | * @fifo: the fifo to be emptied. | 128 | * @fifo: the fifo to be emptied. |
| 57 | */ | 129 | */ |
| 58 | static inline void kfifo_reset(struct kfifo *fifo) | 130 | static inline void kfifo_reset_out(struct kfifo *fifo) |
| 59 | { | 131 | { |
| 60 | unsigned long flags; | 132 | smp_mb(); |
| 133 | fifo->out = fifo->in; | ||
| 134 | } | ||
| 61 | 135 | ||
| 62 | spin_lock_irqsave(fifo->lock, flags); | 136 | /** |
| 137 | * kfifo_size - returns the size of the fifo in bytes | ||
| 138 | * @fifo: the fifo to be used. | ||
| 139 | */ | ||
| 140 | static inline __must_check unsigned int kfifo_size(struct kfifo *fifo) | ||
| 141 | { | ||
| 142 | return fifo->size; | ||
| 143 | } | ||
| 63 | 144 | ||
| 64 | __kfifo_reset(fifo); | 145 | /** |
| 146 | * kfifo_len - returns the number of used bytes in the FIFO | ||
| 147 | * @fifo: the fifo to be used. | ||
| 148 | */ | ||
| 149 | static inline unsigned int kfifo_len(struct kfifo *fifo) | ||
| 150 | { | ||
| 151 | register unsigned int out; | ||
| 65 | 152 | ||
| 66 | spin_unlock_irqrestore(fifo->lock, flags); | 153 | out = fifo->out; |
| 154 | smp_rmb(); | ||
| 155 | return fifo->in - out; | ||
| 67 | } | 156 | } |
| 68 | 157 | ||
| 69 | /** | 158 | /** |
| 70 | * kfifo_put - puts some data into the FIFO | 159 | * kfifo_is_empty - returns true if the fifo is empty |
| 71 | * @fifo: the fifo to be used. | 160 | * @fifo: the fifo to be used. |
| 72 | * @buffer: the data to be added. | 161 | */ |
| 73 | * @len: the length of the data to be added. | 162 | static inline __must_check int kfifo_is_empty(struct kfifo *fifo) |
| 163 | { | ||
| 164 | return fifo->in == fifo->out; | ||
| 165 | } | ||
| 166 | |||
| 167 | /** | ||
| 168 | * kfifo_is_full - returns true if the fifo is full | ||
| 169 | * @fifo: the fifo to be used. | ||
| 170 | */ | ||
| 171 | static inline __must_check int kfifo_is_full(struct kfifo *fifo) | ||
| 172 | { | ||
| 173 | return kfifo_len(fifo) == kfifo_size(fifo); | ||
| 174 | } | ||
| 175 | |||
| 176 | /** | ||
| 177 | * kfifo_avail - returns the number of bytes available in the FIFO | ||
| 178 | * @fifo: the fifo to be used. | ||
| 179 | */ | ||
| 180 | static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo) | ||
| 181 | { | ||
| 182 | return kfifo_size(fifo) - kfifo_len(fifo); | ||
| 183 | } | ||
| 184 | |||
| 185 | /** | ||
| 186 | * kfifo_in_locked - puts some data into the FIFO using a spinlock for locking | ||
| 187 | * @fifo: the fifo to be used. | ||
| 188 | * @from: the data to be added. | ||
| 189 | * @n: the length of the data to be added. | ||
| 190 | * @lock: pointer to the spinlock to use for locking. | ||
| 74 | * | 191 | * |
| 75 | * This function copies at most @len bytes from the @buffer into | 192 | * This function copies at most @len bytes from the @from buffer into |
| 76 | * the FIFO depending on the free space, and returns the number of | 193 | * the FIFO depending on the free space, and returns the number of |
| 77 | * bytes copied. | 194 | * bytes copied. |
| 78 | */ | 195 | */ |
| 79 | static inline unsigned int kfifo_put(struct kfifo *fifo, | 196 | static inline unsigned int kfifo_in_locked(struct kfifo *fifo, |
| 80 | const unsigned char *buffer, unsigned int len) | 197 | const unsigned char *from, unsigned int n, spinlock_t *lock) |
| 81 | { | 198 | { |
| 82 | unsigned long flags; | 199 | unsigned long flags; |
| 83 | unsigned int ret; | 200 | unsigned int ret; |
| 84 | 201 | ||
| 85 | spin_lock_irqsave(fifo->lock, flags); | 202 | spin_lock_irqsave(lock, flags); |
| 86 | 203 | ||
| 87 | ret = __kfifo_put(fifo, buffer, len); | 204 | ret = kfifo_in(fifo, from, n); |
| 88 | 205 | ||
| 89 | spin_unlock_irqrestore(fifo->lock, flags); | 206 | spin_unlock_irqrestore(lock, flags); |
| 90 | 207 | ||
| 91 | return ret; | 208 | return ret; |
| 92 | } | 209 | } |
| 93 | 210 | ||
| 94 | /** | 211 | /** |
| 95 | * kfifo_get - gets some data from the FIFO | 212 | * kfifo_out_locked - gets some data from the FIFO using a spinlock for locking |
| 96 | * @fifo: the fifo to be used. | 213 | * @fifo: the fifo to be used. |
| 97 | * @buffer: where the data must be copied. | 214 | * @to: where the data must be copied. |
| 98 | * @len: the size of the destination buffer. | 215 | * @n: the size of the destination buffer. |
| 216 | * @lock: pointer to the spinlock to use for locking. | ||
| 99 | * | 217 | * |
| 100 | * This function copies at most @len bytes from the FIFO into the | 218 | * This function copies at most @len bytes from the FIFO into the |
| 101 | * @buffer and returns the number of copied bytes. | 219 | * @to buffer and returns the number of copied bytes. |
| 102 | */ | 220 | */ |
| 103 | static inline unsigned int kfifo_get(struct kfifo *fifo, | 221 | static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo, |
| 104 | unsigned char *buffer, unsigned int len) | 222 | unsigned char *to, unsigned int n, spinlock_t *lock) |
| 105 | { | 223 | { |
| 106 | unsigned long flags; | 224 | unsigned long flags; |
| 107 | unsigned int ret; | 225 | unsigned int ret; |
| 108 | 226 | ||
| 109 | spin_lock_irqsave(fifo->lock, flags); | 227 | spin_lock_irqsave(lock, flags); |
| 110 | 228 | ||
| 111 | ret = __kfifo_get(fifo, buffer, len); | 229 | ret = kfifo_out(fifo, to, n); |
| 112 | 230 | ||
| 113 | /* | 231 | /* |
| 114 | * optimization: if the FIFO is empty, set the indices to 0 | 232 | * optimization: if the FIFO is empty, set the indices to 0 |
| 115 | * so we don't wrap the next time | 233 | * so we don't wrap the next time |
| 116 | */ | 234 | */ |
| 117 | if (fifo->in == fifo->out) | 235 | if (kfifo_is_empty(fifo)) |
| 118 | fifo->in = fifo->out = 0; | 236 | kfifo_reset(fifo); |
| 237 | |||
| 238 | spin_unlock_irqrestore(lock, flags); | ||
| 239 | |||
| 240 | return ret; | ||
| 241 | } | ||
| 242 | |||
| 243 | extern void kfifo_skip(struct kfifo *fifo, unsigned int len); | ||
| 244 | |||
| 245 | extern __must_check unsigned int kfifo_from_user(struct kfifo *fifo, | ||
| 246 | const void __user *from, unsigned int n); | ||
| 247 | |||
| 248 | extern __must_check unsigned int kfifo_to_user(struct kfifo *fifo, | ||
| 249 | void __user *to, unsigned int n); | ||
| 250 | |||
| 251 | /* | ||
| 252 | * __kfifo_add_out internal helper function for updating the out offset | ||
| 253 | */ | ||
| 254 | static inline void __kfifo_add_out(struct kfifo *fifo, | ||
| 255 | unsigned int off) | ||
| 256 | { | ||
| 257 | smp_mb(); | ||
| 258 | fifo->out += off; | ||
| 259 | } | ||
| 260 | |||
| 261 | /* | ||
| 262 | * __kfifo_add_in internal helper function for updating the in offset | ||
| 263 | */ | ||
| 264 | static inline void __kfifo_add_in(struct kfifo *fifo, | ||
| 265 | unsigned int off) | ||
| 266 | { | ||
| 267 | smp_wmb(); | ||
| 268 | fifo->in += off; | ||
| 269 | } | ||
| 270 | |||
| 271 | /* | ||
| 272 | * __kfifo_off internal helper function for calculating the index of a | ||
| 273 | * given offeset | ||
| 274 | */ | ||
| 275 | static inline unsigned int __kfifo_off(struct kfifo *fifo, unsigned int off) | ||
| 276 | { | ||
| 277 | return off & (fifo->size - 1); | ||
| 278 | } | ||
| 279 | |||
| 280 | /* | ||
| 281 | * __kfifo_peek_n internal helper function for determinate the length of | ||
| 282 | * the next record in the fifo | ||
| 283 | */ | ||
| 284 | static inline unsigned int __kfifo_peek_n(struct kfifo *fifo, | ||
| 285 | unsigned int recsize) | ||
| 286 | { | ||
| 287 | #define __KFIFO_GET(fifo, off, shift) \ | ||
| 288 | ((fifo)->buffer[__kfifo_off((fifo), (fifo)->out+(off))] << (shift)) | ||
| 289 | |||
| 290 | unsigned int l; | ||
| 291 | |||
| 292 | l = __KFIFO_GET(fifo, 0, 0); | ||
| 293 | |||
| 294 | if (--recsize) | ||
| 295 | l |= __KFIFO_GET(fifo, 1, 8); | ||
| 296 | |||
| 297 | return l; | ||
| 298 | #undef __KFIFO_GET | ||
| 299 | } | ||
| 300 | |||
| 301 | /* | ||
| 302 | * __kfifo_poke_n internal helper function for storing the length of | ||
| 303 | * the next record into the fifo | ||
| 304 | */ | ||
| 305 | static inline void __kfifo_poke_n(struct kfifo *fifo, | ||
| 306 | unsigned int recsize, unsigned int n) | ||
| 307 | { | ||
| 308 | #define __KFIFO_PUT(fifo, off, val, shift) \ | ||
| 309 | ( \ | ||
| 310 | (fifo)->buffer[__kfifo_off((fifo), (fifo)->in+(off))] = \ | ||
| 311 | (unsigned char)((val) >> (shift)) \ | ||
| 312 | ) | ||
| 119 | 313 | ||
| 120 | spin_unlock_irqrestore(fifo->lock, flags); | 314 | __KFIFO_PUT(fifo, 0, n, 0); |
| 121 | 315 | ||
| 316 | if (--recsize) | ||
| 317 | __KFIFO_PUT(fifo, 1, n, 8); | ||
| 318 | #undef __KFIFO_PUT | ||
| 319 | } | ||
| 320 | |||
| 321 | /* | ||
| 322 | * __kfifo_in_... internal functions for put date into the fifo | ||
| 323 | * do not call it directly, use kfifo_in_rec() instead | ||
| 324 | */ | ||
| 325 | extern unsigned int __kfifo_in_n(struct kfifo *fifo, | ||
| 326 | const void *from, unsigned int n, unsigned int recsize); | ||
| 327 | |||
| 328 | extern unsigned int __kfifo_in_generic(struct kfifo *fifo, | ||
| 329 | const void *from, unsigned int n, unsigned int recsize); | ||
| 330 | |||
| 331 | static inline unsigned int __kfifo_in_rec(struct kfifo *fifo, | ||
| 332 | const void *from, unsigned int n, unsigned int recsize) | ||
| 333 | { | ||
| 334 | unsigned int ret; | ||
| 335 | |||
| 336 | ret = __kfifo_in_n(fifo, from, n, recsize); | ||
| 337 | |||
| 338 | if (likely(ret == 0)) { | ||
| 339 | if (recsize) | ||
| 340 | __kfifo_poke_n(fifo, recsize, n); | ||
| 341 | __kfifo_add_in(fifo, n + recsize); | ||
| 342 | } | ||
| 122 | return ret; | 343 | return ret; |
| 123 | } | 344 | } |
| 124 | 345 | ||
| 125 | /** | 346 | /** |
| 126 | * __kfifo_len - returns the number of bytes available in the FIFO, no locking version | 347 | * kfifo_in_rec - puts some record data into the FIFO |
| 127 | * @fifo: the fifo to be used. | 348 | * @fifo: the fifo to be used. |
| 349 | * @from: the data to be added. | ||
| 350 | * @n: the length of the data to be added. | ||
| 351 | * @recsize: size of record field | ||
| 352 | * | ||
| 353 | * This function copies @n bytes from the @from into the FIFO and returns | ||
| 354 | * the number of bytes which cannot be copied. | ||
| 355 | * A returned value greater than the @n value means that the record doesn't | ||
| 356 | * fit into the buffer. | ||
| 357 | * | ||
| 358 | * Note that with only one concurrent reader and one concurrent | ||
| 359 | * writer, you don't need extra locking to use these functions. | ||
| 128 | */ | 360 | */ |
| 129 | static inline unsigned int __kfifo_len(struct kfifo *fifo) | 361 | static inline __must_check unsigned int kfifo_in_rec(struct kfifo *fifo, |
| 362 | void *from, unsigned int n, unsigned int recsize) | ||
| 130 | { | 363 | { |
| 131 | return fifo->in - fifo->out; | 364 | if (!__builtin_constant_p(recsize)) |
| 365 | return __kfifo_in_generic(fifo, from, n, recsize); | ||
| 366 | return __kfifo_in_rec(fifo, from, n, recsize); | ||
| 367 | } | ||
| 368 | |||
| 369 | /* | ||
| 370 | * __kfifo_out_... internal functions for get date from the fifo | ||
| 371 | * do not call it directly, use kfifo_out_rec() instead | ||
| 372 | */ | ||
| 373 | extern unsigned int __kfifo_out_n(struct kfifo *fifo, | ||
| 374 | void *to, unsigned int reclen, unsigned int recsize); | ||
| 375 | |||
| 376 | extern unsigned int __kfifo_out_generic(struct kfifo *fifo, | ||
| 377 | void *to, unsigned int n, | ||
| 378 | unsigned int recsize, unsigned int *total); | ||
| 379 | |||
| 380 | static inline unsigned int __kfifo_out_rec(struct kfifo *fifo, | ||
| 381 | void *to, unsigned int n, unsigned int recsize, | ||
| 382 | unsigned int *total) | ||
| 383 | { | ||
| 384 | unsigned int l; | ||
| 385 | |||
| 386 | if (!recsize) { | ||
| 387 | l = n; | ||
| 388 | if (total) | ||
| 389 | *total = l; | ||
| 390 | } else { | ||
| 391 | l = __kfifo_peek_n(fifo, recsize); | ||
| 392 | if (total) | ||
| 393 | *total = l; | ||
| 394 | if (n < l) | ||
| 395 | return l; | ||
| 396 | } | ||
| 397 | |||
| 398 | return __kfifo_out_n(fifo, to, l, recsize); | ||
| 132 | } | 399 | } |
| 133 | 400 | ||
| 134 | /** | 401 | /** |
| 135 | * kfifo_len - returns the number of bytes available in the FIFO | 402 | * kfifo_out_rec - gets some record data from the FIFO |
| 136 | * @fifo: the fifo to be used. | 403 | * @fifo: the fifo to be used. |
| 404 | * @to: where the data must be copied. | ||
| 405 | * @n: the size of the destination buffer. | ||
| 406 | * @recsize: size of record field | ||
| 407 | * @total: pointer where the total number of to copied bytes should stored | ||
| 408 | * | ||
| 409 | * This function copies at most @n bytes from the FIFO to @to and returns the | ||
| 410 | * number of bytes which cannot be copied. | ||
| 411 | * A returned value greater than the @n value means that the record doesn't | ||
| 412 | * fit into the @to buffer. | ||
| 413 | * | ||
| 414 | * Note that with only one concurrent reader and one concurrent | ||
| 415 | * writer, you don't need extra locking to use these functions. | ||
| 137 | */ | 416 | */ |
| 138 | static inline unsigned int kfifo_len(struct kfifo *fifo) | 417 | static inline __must_check unsigned int kfifo_out_rec(struct kfifo *fifo, |
| 418 | void *to, unsigned int n, unsigned int recsize, | ||
| 419 | unsigned int *total) | ||
| 420 | |||
| 139 | { | 421 | { |
| 140 | unsigned long flags; | 422 | if (!__builtin_constant_p(recsize)) |
| 141 | unsigned int ret; | 423 | return __kfifo_out_generic(fifo, to, n, recsize, total); |
| 424 | return __kfifo_out_rec(fifo, to, n, recsize, total); | ||
| 425 | } | ||
| 426 | |||
| 427 | /* | ||
| 428 | * __kfifo_from_user_... internal functions for transfer from user space into | ||
| 429 | * the fifo. do not call it directly, use kfifo_from_user_rec() instead | ||
| 430 | */ | ||
| 431 | extern unsigned int __kfifo_from_user_n(struct kfifo *fifo, | ||
| 432 | const void __user *from, unsigned int n, unsigned int recsize); | ||
| 142 | 433 | ||
| 143 | spin_lock_irqsave(fifo->lock, flags); | 434 | extern unsigned int __kfifo_from_user_generic(struct kfifo *fifo, |
| 435 | const void __user *from, unsigned int n, unsigned int recsize); | ||
| 144 | 436 | ||
| 145 | ret = __kfifo_len(fifo); | 437 | static inline unsigned int __kfifo_from_user_rec(struct kfifo *fifo, |
| 438 | const void __user *from, unsigned int n, unsigned int recsize) | ||
| 439 | { | ||
| 440 | unsigned int ret; | ||
| 146 | 441 | ||
| 147 | spin_unlock_irqrestore(fifo->lock, flags); | 442 | ret = __kfifo_from_user_n(fifo, from, n, recsize); |
| 148 | 443 | ||
| 444 | if (likely(ret == 0)) { | ||
| 445 | if (recsize) | ||
| 446 | __kfifo_poke_n(fifo, recsize, n); | ||
| 447 | __kfifo_add_in(fifo, n + recsize); | ||
| 448 | } | ||
| 149 | return ret; | 449 | return ret; |
| 150 | } | 450 | } |
| 151 | 451 | ||
| 452 | /** | ||
| 453 | * kfifo_from_user_rec - puts some data from user space into the FIFO | ||
| 454 | * @fifo: the fifo to be used. | ||
| 455 | * @from: pointer to the data to be added. | ||
| 456 | * @n: the length of the data to be added. | ||
| 457 | * @recsize: size of record field | ||
| 458 | * | ||
| 459 | * This function copies @n bytes from the @from into the | ||
| 460 | * FIFO and returns the number of bytes which cannot be copied. | ||
| 461 | * | ||
| 462 | * If the returned value is equal or less the @n value, the copy_from_user() | ||
| 463 | * functions has failed. Otherwise the record doesn't fit into the buffer. | ||
| 464 | * | ||
| 465 | * Note that with only one concurrent reader and one concurrent | ||
| 466 | * writer, you don't need extra locking to use these functions. | ||
| 467 | */ | ||
| 468 | static inline __must_check unsigned int kfifo_from_user_rec(struct kfifo *fifo, | ||
| 469 | const void __user *from, unsigned int n, unsigned int recsize) | ||
| 470 | { | ||
| 471 | if (!__builtin_constant_p(recsize)) | ||
| 472 | return __kfifo_from_user_generic(fifo, from, n, recsize); | ||
| 473 | return __kfifo_from_user_rec(fifo, from, n, recsize); | ||
| 474 | } | ||
| 475 | |||
| 476 | /* | ||
| 477 | * __kfifo_to_user_... internal functions for transfer fifo data into user space | ||
| 478 | * do not call it directly, use kfifo_to_user_rec() instead | ||
| 479 | */ | ||
| 480 | extern unsigned int __kfifo_to_user_n(struct kfifo *fifo, | ||
| 481 | void __user *to, unsigned int n, unsigned int reclen, | ||
| 482 | unsigned int recsize); | ||
| 483 | |||
| 484 | extern unsigned int __kfifo_to_user_generic(struct kfifo *fifo, | ||
| 485 | void __user *to, unsigned int n, unsigned int recsize, | ||
| 486 | unsigned int *total); | ||
| 487 | |||
| 488 | static inline unsigned int __kfifo_to_user_rec(struct kfifo *fifo, | ||
| 489 | void __user *to, unsigned int n, | ||
| 490 | unsigned int recsize, unsigned int *total) | ||
| 491 | { | ||
| 492 | unsigned int l; | ||
| 493 | |||
| 494 | if (!recsize) { | ||
| 495 | l = n; | ||
| 496 | if (total) | ||
| 497 | *total = l; | ||
| 498 | } else { | ||
| 499 | l = __kfifo_peek_n(fifo, recsize); | ||
| 500 | if (total) | ||
| 501 | *total = l; | ||
| 502 | if (n < l) | ||
| 503 | return l; | ||
| 504 | } | ||
| 505 | |||
| 506 | return __kfifo_to_user_n(fifo, to, n, l, recsize); | ||
| 507 | } | ||
| 508 | |||
| 509 | /** | ||
| 510 | * kfifo_to_user_rec - gets data from the FIFO and write it to user space | ||
| 511 | * @fifo: the fifo to be used. | ||
| 512 | * @to: where the data must be copied. | ||
| 513 | * @n: the size of the destination buffer. | ||
| 514 | * @recsize: size of record field | ||
| 515 | * @total: pointer where the total number of to copied bytes should stored | ||
| 516 | * | ||
| 517 | * This function copies at most @n bytes from the FIFO to the @to. | ||
| 518 | * In case of an error, the function returns the number of bytes which cannot | ||
| 519 | * be copied. | ||
| 520 | * If the returned value is equal or less the @n value, the copy_to_user() | ||
| 521 | * functions has failed. Otherwise the record doesn't fit into the @to buffer. | ||
| 522 | * | ||
| 523 | * Note that with only one concurrent reader and one concurrent | ||
| 524 | * writer, you don't need extra locking to use these functions. | ||
| 525 | */ | ||
| 526 | static inline __must_check unsigned int kfifo_to_user_rec(struct kfifo *fifo, | ||
| 527 | void __user *to, unsigned int n, unsigned int recsize, | ||
| 528 | unsigned int *total) | ||
| 529 | { | ||
| 530 | if (!__builtin_constant_p(recsize)) | ||
| 531 | return __kfifo_to_user_generic(fifo, to, n, recsize, total); | ||
| 532 | return __kfifo_to_user_rec(fifo, to, n, recsize, total); | ||
| 533 | } | ||
| 534 | |||
| 535 | /* | ||
| 536 | * __kfifo_peek_... internal functions for peek into the next fifo record | ||
| 537 | * do not call it directly, use kfifo_peek_rec() instead | ||
| 538 | */ | ||
| 539 | extern unsigned int __kfifo_peek_generic(struct kfifo *fifo, | ||
| 540 | unsigned int recsize); | ||
| 541 | |||
| 542 | /** | ||
| 543 | * kfifo_peek_rec - gets the size of the next FIFO record data | ||
| 544 | * @fifo: the fifo to be used. | ||
| 545 | * @recsize: size of record field | ||
| 546 | * | ||
| 547 | * This function returns the size of the next FIFO record in number of bytes | ||
| 548 | */ | ||
| 549 | static inline __must_check unsigned int kfifo_peek_rec(struct kfifo *fifo, | ||
| 550 | unsigned int recsize) | ||
| 551 | { | ||
| 552 | if (!__builtin_constant_p(recsize)) | ||
| 553 | return __kfifo_peek_generic(fifo, recsize); | ||
| 554 | if (!recsize) | ||
| 555 | return kfifo_len(fifo); | ||
| 556 | return __kfifo_peek_n(fifo, recsize); | ||
| 557 | } | ||
| 558 | |||
| 559 | /* | ||
| 560 | * __kfifo_skip_... internal functions for skip the next fifo record | ||
| 561 | * do not call it directly, use kfifo_skip_rec() instead | ||
| 562 | */ | ||
| 563 | extern void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize); | ||
| 564 | |||
| 565 | static inline void __kfifo_skip_rec(struct kfifo *fifo, | ||
| 566 | unsigned int recsize) | ||
| 567 | { | ||
| 568 | unsigned int l; | ||
| 569 | |||
| 570 | if (recsize) { | ||
| 571 | l = __kfifo_peek_n(fifo, recsize); | ||
| 572 | |||
| 573 | if (l + recsize <= kfifo_len(fifo)) { | ||
| 574 | __kfifo_add_out(fifo, l + recsize); | ||
| 575 | return; | ||
| 576 | } | ||
| 577 | } | ||
| 578 | kfifo_reset_out(fifo); | ||
| 579 | } | ||
| 580 | |||
| 581 | /** | ||
| 582 | * kfifo_skip_rec - skip the next fifo out record | ||
| 583 | * @fifo: the fifo to be used. | ||
| 584 | * @recsize: size of record field | ||
| 585 | * | ||
| 586 | * This function skips the next FIFO record | ||
| 587 | */ | ||
| 588 | static inline void kfifo_skip_rec(struct kfifo *fifo, | ||
| 589 | unsigned int recsize) | ||
| 590 | { | ||
| 591 | if (!__builtin_constant_p(recsize)) | ||
| 592 | __kfifo_skip_generic(fifo, recsize); | ||
| 593 | else | ||
| 594 | __kfifo_skip_rec(fifo, recsize); | ||
| 595 | } | ||
| 596 | |||
| 597 | /** | ||
| 598 | * kfifo_avail_rec - returns the number of bytes available in a record FIFO | ||
| 599 | * @fifo: the fifo to be used. | ||
| 600 | * @recsize: size of record field | ||
| 601 | */ | ||
| 602 | static inline __must_check unsigned int kfifo_avail_rec(struct kfifo *fifo, | ||
| 603 | unsigned int recsize) | ||
| 604 | { | ||
| 605 | unsigned int l = kfifo_size(fifo) - kfifo_len(fifo); | ||
| 606 | |||
| 607 | return (l > recsize) ? l - recsize : 0; | ||
| 608 | } | ||
| 609 | |||
| 152 | #endif | 610 | #endif |
diff --git a/include/linux/memory.h b/include/linux/memory.h index 37fa19b34ef5..1adfe779eb99 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
| @@ -50,6 +50,19 @@ struct memory_notify { | |||
| 50 | int status_change_nid; | 50 | int status_change_nid; |
| 51 | }; | 51 | }; |
| 52 | 52 | ||
| 53 | /* | ||
| 54 | * During pageblock isolation, count the number of pages within the | ||
| 55 | * range [start_pfn, start_pfn + nr_pages) which are owned by code | ||
| 56 | * in the notifier chain. | ||
| 57 | */ | ||
| 58 | #define MEM_ISOLATE_COUNT (1<<0) | ||
| 59 | |||
| 60 | struct memory_isolate_notify { | ||
| 61 | unsigned long start_pfn; /* Start of range to check */ | ||
| 62 | unsigned int nr_pages; /* # pages in range to check */ | ||
| 63 | unsigned int pages_found; /* # pages owned found by callbacks */ | ||
| 64 | }; | ||
| 65 | |||
| 53 | struct notifier_block; | 66 | struct notifier_block; |
| 54 | struct mem_section; | 67 | struct mem_section; |
| 55 | 68 | ||
| @@ -76,14 +89,28 @@ static inline int memory_notify(unsigned long val, void *v) | |||
| 76 | { | 89 | { |
| 77 | return 0; | 90 | return 0; |
| 78 | } | 91 | } |
| 92 | static inline int register_memory_isolate_notifier(struct notifier_block *nb) | ||
| 93 | { | ||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | static inline void unregister_memory_isolate_notifier(struct notifier_block *nb) | ||
| 97 | { | ||
| 98 | } | ||
| 99 | static inline int memory_isolate_notify(unsigned long val, void *v) | ||
| 100 | { | ||
| 101 | return 0; | ||
| 102 | } | ||
| 79 | #else | 103 | #else |
| 80 | extern int register_memory_notifier(struct notifier_block *nb); | 104 | extern int register_memory_notifier(struct notifier_block *nb); |
| 81 | extern void unregister_memory_notifier(struct notifier_block *nb); | 105 | extern void unregister_memory_notifier(struct notifier_block *nb); |
| 106 | extern int register_memory_isolate_notifier(struct notifier_block *nb); | ||
| 107 | extern void unregister_memory_isolate_notifier(struct notifier_block *nb); | ||
| 82 | extern int register_new_memory(int, struct mem_section *); | 108 | extern int register_new_memory(int, struct mem_section *); |
| 83 | extern int unregister_memory_section(struct mem_section *); | 109 | extern int unregister_memory_section(struct mem_section *); |
| 84 | extern int memory_dev_init(void); | 110 | extern int memory_dev_init(void); |
| 85 | extern int remove_memory_block(unsigned long, struct mem_section *, int); | 111 | extern int remove_memory_block(unsigned long, struct mem_section *, int); |
| 86 | extern int memory_notify(unsigned long val, void *v); | 112 | extern int memory_notify(unsigned long val, void *v); |
| 113 | extern int memory_isolate_notify(unsigned long val, void *v); | ||
| 87 | extern struct memory_block *find_memory_block(struct mem_section *); | 114 | extern struct memory_block *find_memory_block(struct mem_section *); |
| 88 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) | 115 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) |
| 89 | enum mem_add_context { BOOT, HOTPLUG }; | 116 | enum mem_add_context { BOOT, HOTPLUG }; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 849b4a61bd8f..2265f28eb47a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -1037,6 +1037,9 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn, | |||
| 1037 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, | 1037 | extern void remove_active_range(unsigned int nid, unsigned long start_pfn, |
| 1038 | unsigned long end_pfn); | 1038 | unsigned long end_pfn); |
| 1039 | extern void remove_all_active_ranges(void); | 1039 | extern void remove_all_active_ranges(void); |
| 1040 | void sort_node_map(void); | ||
| 1041 | unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, | ||
| 1042 | unsigned long end_pfn); | ||
| 1040 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, | 1043 | extern unsigned long absent_pages_in_range(unsigned long start_pfn, |
| 1041 | unsigned long end_pfn); | 1044 | unsigned long end_pfn); |
| 1042 | extern void get_pfn_range_for_nid(unsigned int nid, | 1045 | extern void get_pfn_range_for_nid(unsigned int nid, |
diff --git a/include/linux/namei.h b/include/linux/namei.h index 028946750289..05b441d93642 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
| @@ -72,8 +72,6 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *, | |||
| 72 | 72 | ||
| 73 | extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, | 73 | extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, |
| 74 | int (*open)(struct inode *, struct file *)); | 74 | int (*open)(struct inode *, struct file *)); |
| 75 | extern struct file *nameidata_to_filp(struct nameidata *nd, int flags); | ||
| 76 | extern void release_open_intent(struct nameidata *); | ||
| 77 | 75 | ||
| 78 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); | 76 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); |
| 79 | 77 | ||
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h deleted file mode 100644 index e3fb25606706..000000000000 --- a/include/linux/perf_counter.h +++ /dev/null | |||
| @@ -1,444 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * NOTE: this file will be removed in a future kernel release, it is | ||
| 3 | * provided as a courtesy copy of user-space code that relies on the | ||
| 4 | * old (pre-rename) symbols and constants. | ||
| 5 | * | ||
| 6 | * Performance events: | ||
| 7 | * | ||
| 8 | * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de> | ||
| 9 | * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar | ||
| 10 | * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra | ||
| 11 | * | ||
| 12 | * Data type definitions, declarations, prototypes. | ||
| 13 | * | ||
| 14 | * Started by: Thomas Gleixner and Ingo Molnar | ||
| 15 | * | ||
| 16 | * For licencing details see kernel-base/COPYING | ||
| 17 | */ | ||
| 18 | #ifndef _LINUX_PERF_COUNTER_H | ||
| 19 | #define _LINUX_PERF_COUNTER_H | ||
| 20 | |||
| 21 | #include <linux/types.h> | ||
| 22 | #include <linux/ioctl.h> | ||
| 23 | #include <asm/byteorder.h> | ||
| 24 | |||
| 25 | /* | ||
| 26 | * User-space ABI bits: | ||
| 27 | */ | ||
| 28 | |||
| 29 | /* | ||
| 30 | * attr.type | ||
| 31 | */ | ||
| 32 | enum perf_type_id { | ||
| 33 | PERF_TYPE_HARDWARE = 0, | ||
| 34 | PERF_TYPE_SOFTWARE = 1, | ||
| 35 | PERF_TYPE_TRACEPOINT = 2, | ||
| 36 | PERF_TYPE_HW_CACHE = 3, | ||
| 37 | PERF_TYPE_RAW = 4, | ||
| 38 | |||
| 39 | PERF_TYPE_MAX, /* non-ABI */ | ||
| 40 | }; | ||
| 41 | |||
| 42 | /* | ||
| 43 | * Generalized performance counter event types, used by the | ||
| 44 | * attr.event_id parameter of the sys_perf_counter_open() | ||
| 45 | * syscall: | ||
| 46 | */ | ||
| 47 | enum perf_hw_id { | ||
| 48 | /* | ||
| 49 | * Common hardware events, generalized by the kernel: | ||
| 50 | */ | ||
| 51 | PERF_COUNT_HW_CPU_CYCLES = 0, | ||
| 52 | PERF_COUNT_HW_INSTRUCTIONS = 1, | ||
| 53 | PERF_COUNT_HW_CACHE_REFERENCES = 2, | ||
| 54 | PERF_COUNT_HW_CACHE_MISSES = 3, | ||
| 55 | PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, | ||
| 56 | PERF_COUNT_HW_BRANCH_MISSES = 5, | ||
| 57 | PERF_COUNT_HW_BUS_CYCLES = 6, | ||
| 58 | |||
| 59 | PERF_COUNT_HW_MAX, /* non-ABI */ | ||
| 60 | }; | ||
| 61 | |||
| 62 | /* | ||
| 63 | * Generalized hardware cache counters: | ||
| 64 | * | ||
| 65 | * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x | ||
| 66 | * { read, write, prefetch } x | ||
| 67 | * { accesses, misses } | ||
| 68 | */ | ||
| 69 | enum perf_hw_cache_id { | ||
| 70 | PERF_COUNT_HW_CACHE_L1D = 0, | ||
| 71 | PERF_COUNT_HW_CACHE_L1I = 1, | ||
| 72 | PERF_COUNT_HW_CACHE_LL = 2, | ||
| 73 | PERF_COUNT_HW_CACHE_DTLB = 3, | ||
| 74 | PERF_COUNT_HW_CACHE_ITLB = 4, | ||
| 75 | PERF_COUNT_HW_CACHE_BPU = 5, | ||
| 76 | |||
| 77 | PERF_COUNT_HW_CACHE_MAX, /* non-ABI */ | ||
| 78 | }; | ||
| 79 | |||
| 80 | enum perf_hw_cache_op_id { | ||
| 81 | PERF_COUNT_HW_CACHE_OP_READ = 0, | ||
| 82 | PERF_COUNT_HW_CACHE_OP_WRITE = 1, | ||
| 83 | PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, | ||
| 84 | |||
| 85 | PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */ | ||
| 86 | }; | ||
| 87 | |||
| 88 | enum perf_hw_cache_op_result_id { | ||
| 89 | PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, | ||
| 90 | PERF_COUNT_HW_CACHE_RESULT_MISS = 1, | ||
| 91 | |||
| 92 | PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */ | ||
| 93 | }; | ||
| 94 | |||
| 95 | /* | ||
| 96 | * Special "software" counters provided by the kernel, even if the hardware | ||
| 97 | * does not support performance counters. These counters measure various | ||
| 98 | * physical and sw events of the kernel (and allow the profiling of them as | ||
| 99 | * well): | ||
| 100 | */ | ||
| 101 | enum perf_sw_ids { | ||
| 102 | PERF_COUNT_SW_CPU_CLOCK = 0, | ||
| 103 | PERF_COUNT_SW_TASK_CLOCK = 1, | ||
| 104 | PERF_COUNT_SW_PAGE_FAULTS = 2, | ||
| 105 | PERF_COUNT_SW_CONTEXT_SWITCHES = 3, | ||
| 106 | PERF_COUNT_SW_CPU_MIGRATIONS = 4, | ||
| 107 | PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, | ||
| 108 | PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, | ||
| 109 | PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, | ||
| 110 | PERF_COUNT_SW_EMULATION_FAULTS = 8, | ||
| 111 | |||
| 112 | PERF_COUNT_SW_MAX, /* non-ABI */ | ||
| 113 | }; | ||
| 114 | |||
| 115 | /* | ||
| 116 | * Bits that can be set in attr.sample_type to request information | ||
| 117 | * in the overflow packets. | ||
| 118 | */ | ||
| 119 | enum perf_counter_sample_format { | ||
| 120 | PERF_SAMPLE_IP = 1U << 0, | ||
| 121 | PERF_SAMPLE_TID = 1U << 1, | ||
| 122 | PERF_SAMPLE_TIME = 1U << 2, | ||
| 123 | PERF_SAMPLE_ADDR = 1U << 3, | ||
| 124 | PERF_SAMPLE_READ = 1U << 4, | ||
| 125 | PERF_SAMPLE_CALLCHAIN = 1U << 5, | ||
| 126 | PERF_SAMPLE_ID = 1U << 6, | ||
| 127 | PERF_SAMPLE_CPU = 1U << 7, | ||
| 128 | PERF_SAMPLE_PERIOD = 1U << 8, | ||
| 129 | PERF_SAMPLE_STREAM_ID = 1U << 9, | ||
| 130 | PERF_SAMPLE_RAW = 1U << 10, | ||
| 131 | |||
| 132 | PERF_SAMPLE_MAX = 1U << 11, /* non-ABI */ | ||
| 133 | }; | ||
| 134 | |||
| 135 | /* | ||
| 136 | * The format of the data returned by read() on a perf counter fd, | ||
| 137 | * as specified by attr.read_format: | ||
| 138 | * | ||
| 139 | * struct read_format { | ||
| 140 | * { u64 value; | ||
| 141 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | ||
| 142 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
| 143 | * { u64 id; } && PERF_FORMAT_ID | ||
| 144 | * } && !PERF_FORMAT_GROUP | ||
| 145 | * | ||
| 146 | * { u64 nr; | ||
| 147 | * { u64 time_enabled; } && PERF_FORMAT_ENABLED | ||
| 148 | * { u64 time_running; } && PERF_FORMAT_RUNNING | ||
| 149 | * { u64 value; | ||
| 150 | * { u64 id; } && PERF_FORMAT_ID | ||
| 151 | * } cntr[nr]; | ||
| 152 | * } && PERF_FORMAT_GROUP | ||
| 153 | * }; | ||
| 154 | */ | ||
| 155 | enum perf_counter_read_format { | ||
| 156 | PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, | ||
| 157 | PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, | ||
| 158 | PERF_FORMAT_ID = 1U << 2, | ||
| 159 | PERF_FORMAT_GROUP = 1U << 3, | ||
| 160 | |||
| 161 | PERF_FORMAT_MAX = 1U << 4, /* non-ABI */ | ||
| 162 | }; | ||
| 163 | |||
| 164 | #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ | ||
| 165 | |||
| 166 | /* | ||
| 167 | * Hardware event to monitor via a performance monitoring counter: | ||
| 168 | */ | ||
| 169 | struct perf_counter_attr { | ||
| 170 | |||
| 171 | /* | ||
| 172 | * Major type: hardware/software/tracepoint/etc. | ||
| 173 | */ | ||
| 174 | __u32 type; | ||
| 175 | |||
| 176 | /* | ||
| 177 | * Size of the attr structure, for fwd/bwd compat. | ||
| 178 | */ | ||
| 179 | __u32 size; | ||
| 180 | |||
| 181 | /* | ||
| 182 | * Type specific configuration information. | ||
| 183 | */ | ||
| 184 | __u64 config; | ||
| 185 | |||
| 186 | union { | ||
| 187 | __u64 sample_period; | ||
| 188 | __u64 sample_freq; | ||
| 189 | }; | ||
| 190 | |||
| 191 | __u64 sample_type; | ||
| 192 | __u64 read_format; | ||
| 193 | |||
| 194 | __u64 disabled : 1, /* off by default */ | ||
| 195 | inherit : 1, /* children inherit it */ | ||
| 196 | pinned : 1, /* must always be on PMU */ | ||
| 197 | exclusive : 1, /* only group on PMU */ | ||
| 198 | exclude_user : 1, /* don't count user */ | ||
| 199 | exclude_kernel : 1, /* ditto kernel */ | ||
| 200 | exclude_hv : 1, /* ditto hypervisor */ | ||
| 201 | exclude_idle : 1, /* don't count when idle */ | ||
| 202 | mmap : 1, /* include mmap data */ | ||
| 203 | comm : 1, /* include comm data */ | ||
| 204 | freq : 1, /* use freq, not period */ | ||
| 205 | inherit_stat : 1, /* per task counts */ | ||
| 206 | enable_on_exec : 1, /* next exec enables */ | ||
| 207 | task : 1, /* trace fork/exit */ | ||
| 208 | watermark : 1, /* wakeup_watermark */ | ||
| 209 | |||
| 210 | __reserved_1 : 49; | ||
| 211 | |||
| 212 | union { | ||
| 213 | __u32 wakeup_events; /* wakeup every n events */ | ||
| 214 | __u32 wakeup_watermark; /* bytes before wakeup */ | ||
| 215 | }; | ||
| 216 | __u32 __reserved_2; | ||
| 217 | |||
| 218 | __u64 __reserved_3; | ||
| 219 | }; | ||
| 220 | |||
| 221 | /* | ||
| 222 | * Ioctls that can be done on a perf counter fd: | ||
| 223 | */ | ||
| 224 | #define PERF_COUNTER_IOC_ENABLE _IO ('$', 0) | ||
| 225 | #define PERF_COUNTER_IOC_DISABLE _IO ('$', 1) | ||
| 226 | #define PERF_COUNTER_IOC_REFRESH _IO ('$', 2) | ||
| 227 | #define PERF_COUNTER_IOC_RESET _IO ('$', 3) | ||
| 228 | #define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64) | ||
| 229 | #define PERF_COUNTER_IOC_SET_OUTPUT _IO ('$', 5) | ||
| 230 | #define PERF_COUNTER_IOC_SET_FILTER _IOW('$', 6, char *) | ||
| 231 | |||
| 232 | enum perf_counter_ioc_flags { | ||
| 233 | PERF_IOC_FLAG_GROUP = 1U << 0, | ||
| 234 | }; | ||
| 235 | |||
| 236 | /* | ||
| 237 | * Structure of the page that can be mapped via mmap | ||
| 238 | */ | ||
| 239 | struct perf_counter_mmap_page { | ||
| 240 | __u32 version; /* version number of this structure */ | ||
| 241 | __u32 compat_version; /* lowest version this is compat with */ | ||
| 242 | |||
| 243 | /* | ||
| 244 | * Bits needed to read the hw counters in user-space. | ||
| 245 | * | ||
| 246 | * u32 seq; | ||
| 247 | * s64 count; | ||
| 248 | * | ||
| 249 | * do { | ||
| 250 | * seq = pc->lock; | ||
| 251 | * | ||
| 252 | * barrier() | ||
| 253 | * if (pc->index) { | ||
| 254 | * count = pmc_read(pc->index - 1); | ||
| 255 | * count += pc->offset; | ||
| 256 | * } else | ||
| 257 | * goto regular_read; | ||
| 258 | * | ||
| 259 | * barrier(); | ||
| 260 | * } while (pc->lock != seq); | ||
| 261 | * | ||
| 262 | * NOTE: for obvious reason this only works on self-monitoring | ||
| 263 | * processes. | ||
| 264 | */ | ||
| 265 | __u32 lock; /* seqlock for synchronization */ | ||
| 266 | __u32 index; /* hardware counter identifier */ | ||
| 267 | __s64 offset; /* add to hardware counter value */ | ||
| 268 | __u64 time_enabled; /* time counter active */ | ||
| 269 | __u64 time_running; /* time counter on cpu */ | ||
| 270 | |||
| 271 | /* | ||
| 272 | * Hole for extension of the self monitor capabilities | ||
| 273 | */ | ||
| 274 | |||
| 275 | __u64 __reserved[123]; /* align to 1k */ | ||
| 276 | |||
| 277 | /* | ||
| 278 | * Control data for the mmap() data buffer. | ||
| 279 | * | ||
| 280 | * User-space reading the @data_head value should issue an rmb(), on | ||
| 281 | * SMP capable platforms, after reading this value -- see | ||
| 282 | * perf_counter_wakeup(). | ||
| 283 | * | ||
| 284 | * When the mapping is PROT_WRITE the @data_tail value should be | ||
| 285 | * written by userspace to reflect the last read data. In this case | ||
| 286 | * the kernel will not over-write unread data. | ||
| 287 | */ | ||
| 288 | __u64 data_head; /* head in the data section */ | ||
| 289 | __u64 data_tail; /* user-space written tail */ | ||
| 290 | }; | ||
| 291 | |||
| 292 | #define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0) | ||
| 293 | #define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0) | ||
| 294 | #define PERF_EVENT_MISC_KERNEL (1 << 0) | ||
| 295 | #define PERF_EVENT_MISC_USER (2 << 0) | ||
| 296 | #define PERF_EVENT_MISC_HYPERVISOR (3 << 0) | ||
| 297 | |||
| 298 | struct perf_event_header { | ||
| 299 | __u32 type; | ||
| 300 | __u16 misc; | ||
| 301 | __u16 size; | ||
| 302 | }; | ||
| 303 | |||
| 304 | enum perf_event_type { | ||
| 305 | |||
| 306 | /* | ||
| 307 | * The MMAP events record the PROT_EXEC mappings so that we can | ||
| 308 | * correlate userspace IPs to code. They have the following structure: | ||
| 309 | * | ||
| 310 | * struct { | ||
| 311 | * struct perf_event_header header; | ||
| 312 | * | ||
| 313 | * u32 pid, tid; | ||
| 314 | * u64 addr; | ||
| 315 | * u64 len; | ||
| 316 | * u64 pgoff; | ||
| 317 | * char filename[]; | ||
| 318 | * }; | ||
| 319 | */ | ||
| 320 | PERF_EVENT_MMAP = 1, | ||
| 321 | |||
| 322 | /* | ||
| 323 | * struct { | ||
| 324 | * struct perf_event_header header; | ||
| 325 | * u64 id; | ||
| 326 | * u64 lost; | ||
| 327 | * }; | ||
| 328 | */ | ||
| 329 | PERF_EVENT_LOST = 2, | ||
| 330 | |||
| 331 | /* | ||
| 332 | * struct { | ||
| 333 | * struct perf_event_header header; | ||
| 334 | * | ||
| 335 | * u32 pid, tid; | ||
| 336 | * char comm[]; | ||
| 337 | * }; | ||
| 338 | */ | ||
| 339 | PERF_EVENT_COMM = 3, | ||
| 340 | |||
| 341 | /* | ||
| 342 | * struct { | ||
| 343 | * struct perf_event_header header; | ||
| 344 | * u32 pid, ppid; | ||
| 345 | * u32 tid, ptid; | ||
| 346 | * u64 time; | ||
| 347 | * }; | ||
| 348 | */ | ||
| 349 | PERF_EVENT_EXIT = 4, | ||
| 350 | |||
| 351 | /* | ||
| 352 | * struct { | ||
| 353 | * struct perf_event_header header; | ||
| 354 | * u64 time; | ||
| 355 | * u64 id; | ||
| 356 | * u64 stream_id; | ||
| 357 | * }; | ||
| 358 | */ | ||
| 359 | PERF_EVENT_THROTTLE = 5, | ||
| 360 | PERF_EVENT_UNTHROTTLE = 6, | ||
| 361 | |||
| 362 | /* | ||
| 363 | * struct { | ||
| 364 | * struct perf_event_header header; | ||
| 365 | * u32 pid, ppid; | ||
| 366 | * u32 tid, ptid; | ||
| 367 | * u64 time; | ||
| 368 | * }; | ||
| 369 | */ | ||
| 370 | PERF_EVENT_FORK = 7, | ||
| 371 | |||
| 372 | /* | ||
| 373 | * struct { | ||
| 374 | * struct perf_event_header header; | ||
| 375 | * u32 pid, tid; | ||
| 376 | * | ||
| 377 | * struct read_format values; | ||
| 378 | * }; | ||
| 379 | */ | ||
| 380 | PERF_EVENT_READ = 8, | ||
| 381 | |||
| 382 | /* | ||
| 383 | * struct { | ||
| 384 | * struct perf_event_header header; | ||
| 385 | * | ||
| 386 | * { u64 ip; } && PERF_SAMPLE_IP | ||
| 387 | * { u32 pid, tid; } && PERF_SAMPLE_TID | ||
| 388 | * { u64 time; } && PERF_SAMPLE_TIME | ||
| 389 | * { u64 addr; } && PERF_SAMPLE_ADDR | ||
| 390 | * { u64 id; } && PERF_SAMPLE_ID | ||
| 391 | * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID | ||
| 392 | * { u32 cpu, res; } && PERF_SAMPLE_CPU | ||
| 393 | * { u64 period; } && PERF_SAMPLE_PERIOD | ||
| 394 | * | ||
| 395 | * { struct read_format values; } && PERF_SAMPLE_READ | ||
| 396 | * | ||
| 397 | * { u64 nr, | ||
| 398 | * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN | ||
| 399 | * | ||
| 400 | * # | ||
| 401 | * # The RAW record below is opaque data wrt the ABI | ||
| 402 | * # | ||
| 403 | * # That is, the ABI doesn't make any promises wrt to | ||
| 404 | * # the stability of its content, it may vary depending | ||
| 405 | * # on event, hardware, kernel version and phase of | ||
| 406 | * # the moon. | ||
| 407 | * # | ||
| 408 | * # In other words, PERF_SAMPLE_RAW contents are not an ABI. | ||
| 409 | * # | ||
| 410 | * | ||
| 411 | * { u32 size; | ||
| 412 | * char data[size];}&& PERF_SAMPLE_RAW | ||
| 413 | * }; | ||
| 414 | */ | ||
| 415 | PERF_EVENT_SAMPLE = 9, | ||
| 416 | |||
| 417 | PERF_EVENT_MAX, /* non-ABI */ | ||
| 418 | }; | ||
| 419 | |||
| 420 | enum perf_callchain_context { | ||
| 421 | PERF_CONTEXT_HV = (__u64)-32, | ||
| 422 | PERF_CONTEXT_KERNEL = (__u64)-128, | ||
| 423 | PERF_CONTEXT_USER = (__u64)-512, | ||
| 424 | |||
| 425 | PERF_CONTEXT_GUEST = (__u64)-2048, | ||
| 426 | PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, | ||
| 427 | PERF_CONTEXT_GUEST_USER = (__u64)-2560, | ||
| 428 | |||
| 429 | PERF_CONTEXT_MAX = (__u64)-4095, | ||
| 430 | }; | ||
| 431 | |||
| 432 | #define PERF_FLAG_FD_NO_GROUP (1U << 0) | ||
| 433 | #define PERF_FLAG_FD_OUTPUT (1U << 1) | ||
| 434 | |||
| 435 | /* | ||
| 436 | * In case some app still references the old symbols: | ||
| 437 | */ | ||
| 438 | |||
| 439 | #define __NR_perf_counter_open __NR_perf_event_open | ||
| 440 | |||
| 441 | #define PR_TASK_PERF_COUNTERS_DISABLE PR_TASK_PERF_EVENTS_DISABLE | ||
| 442 | #define PR_TASK_PERF_COUNTERS_ENABLE PR_TASK_PERF_EVENTS_ENABLE | ||
| 443 | |||
| 444 | #endif /* _LINUX_PERF_COUNTER_H */ | ||
diff --git a/include/linux/quota.h b/include/linux/quota.h index e70e62194243..a6861f117480 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
| @@ -315,8 +315,9 @@ struct dquot_operations { | |||
| 315 | int (*claim_space) (struct inode *, qsize_t); | 315 | int (*claim_space) (struct inode *, qsize_t); |
| 316 | /* release rsved quota for delayed alloc */ | 316 | /* release rsved quota for delayed alloc */ |
| 317 | void (*release_rsv) (struct inode *, qsize_t); | 317 | void (*release_rsv) (struct inode *, qsize_t); |
| 318 | /* get reserved quota for delayed alloc */ | 318 | /* get reserved quota for delayed alloc, value returned is managed by |
| 319 | qsize_t (*get_reserved_space) (struct inode *); | 319 | * quota code only */ |
| 320 | qsize_t *(*get_reserved_space) (struct inode *); | ||
| 320 | }; | 321 | }; |
| 321 | 322 | ||
| 322 | /* Operations handling requests from userspace */ | 323 | /* Operations handling requests from userspace */ |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index c4ba9a78721e..96cc307ed9f4 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
| @@ -101,4 +101,9 @@ static inline void exit_rcu(void) | |||
| 101 | { | 101 | { |
| 102 | } | 102 | } |
| 103 | 103 | ||
| 104 | static inline int rcu_preempt_depth(void) | ||
| 105 | { | ||
| 106 | return 0; | ||
| 107 | } | ||
| 108 | |||
| 104 | #endif /* __LINUX_RCUTINY_H */ | 109 | #endif /* __LINUX_RCUTINY_H */ |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index c93eee5911b0..8044b1b94333 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -45,6 +45,12 @@ extern void __rcu_read_unlock(void); | |||
| 45 | extern void synchronize_rcu(void); | 45 | extern void synchronize_rcu(void); |
| 46 | extern void exit_rcu(void); | 46 | extern void exit_rcu(void); |
| 47 | 47 | ||
| 48 | /* | ||
| 49 | * Defined as macro as it is a very low level header | ||
| 50 | * included from areas that don't even know about current | ||
| 51 | */ | ||
| 52 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) | ||
| 53 | |||
| 48 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 54 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
| 49 | 55 | ||
| 50 | static inline void __rcu_read_lock(void) | 56 | static inline void __rcu_read_lock(void) |
| @@ -63,6 +69,11 @@ static inline void exit_rcu(void) | |||
| 63 | { | 69 | { |
| 64 | } | 70 | } |
| 65 | 71 | ||
| 72 | static inline int rcu_preempt_depth(void) | ||
| 73 | { | ||
| 74 | return 0; | ||
| 75 | } | ||
| 76 | |||
| 66 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 77 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
| 67 | 78 | ||
| 68 | static inline void __rcu_read_lock_bh(void) | 79 | static inline void __rcu_read_lock_bh(void) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index e89857812be6..f2f842db03ce 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -192,6 +192,12 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
| 192 | #define TASK_DEAD 64 | 192 | #define TASK_DEAD 64 |
| 193 | #define TASK_WAKEKILL 128 | 193 | #define TASK_WAKEKILL 128 |
| 194 | #define TASK_WAKING 256 | 194 | #define TASK_WAKING 256 |
| 195 | #define TASK_STATE_MAX 512 | ||
| 196 | |||
| 197 | #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW" | ||
| 198 | |||
| 199 | extern char ___assert_task_state[1 - 2*!!( | ||
| 200 | sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)]; | ||
| 195 | 201 | ||
| 196 | /* Convenience macros for the sake of set_task_state */ | 202 | /* Convenience macros for the sake of set_task_state */ |
| 197 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) | 203 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
| @@ -1091,7 +1097,8 @@ struct sched_class { | |||
| 1091 | enum cpu_idle_type idle); | 1097 | enum cpu_idle_type idle); |
| 1092 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | 1098 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); |
| 1093 | void (*post_schedule) (struct rq *this_rq); | 1099 | void (*post_schedule) (struct rq *this_rq); |
| 1094 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | 1100 | void (*task_waking) (struct rq *this_rq, struct task_struct *task); |
| 1101 | void (*task_woken) (struct rq *this_rq, struct task_struct *task); | ||
| 1095 | 1102 | ||
| 1096 | void (*set_cpus_allowed)(struct task_struct *p, | 1103 | void (*set_cpus_allowed)(struct task_struct *p, |
| 1097 | const struct cpumask *newmask); | 1104 | const struct cpumask *newmask); |
| @@ -1115,7 +1122,7 @@ struct sched_class { | |||
| 1115 | struct task_struct *task); | 1122 | struct task_struct *task); |
| 1116 | 1123 | ||
| 1117 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1124 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 1118 | void (*moved_group) (struct task_struct *p); | 1125 | void (*moved_group) (struct task_struct *p, int on_rq); |
| 1119 | #endif | 1126 | #endif |
| 1120 | }; | 1127 | }; |
| 1121 | 1128 | ||
| @@ -2594,8 +2601,6 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | |||
| 2594 | } | 2601 | } |
| 2595 | #endif /* CONFIG_MM_OWNER */ | 2602 | #endif /* CONFIG_MM_OWNER */ |
| 2596 | 2603 | ||
| 2597 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" | ||
| 2598 | |||
| 2599 | #endif /* __KERNEL__ */ | 2604 | #endif /* __KERNEL__ */ |
| 2600 | 2605 | ||
| 2601 | #endif | 2606 | #endif |
diff --git a/include/linux/sonypi.h b/include/linux/sonypi.h index 34c4475ac4a2..4f95c1aac2fd 100644 --- a/include/linux/sonypi.h +++ b/include/linux/sonypi.h | |||
| @@ -111,6 +111,7 @@ | |||
| 111 | #define SONYPI_EVENT_VOLUME_INC_PRESSED 69 | 111 | #define SONYPI_EVENT_VOLUME_INC_PRESSED 69 |
| 112 | #define SONYPI_EVENT_VOLUME_DEC_PRESSED 70 | 112 | #define SONYPI_EVENT_VOLUME_DEC_PRESSED 70 |
| 113 | #define SONYPI_EVENT_BRIGHTNESS_PRESSED 71 | 113 | #define SONYPI_EVENT_BRIGHTNESS_PRESSED 71 |
| 114 | #define SONYPI_EVENT_MEDIA_PRESSED 72 | ||
| 114 | 115 | ||
| 115 | /* get/set brightness */ | 116 | /* get/set brightness */ |
| 116 | #define SONYPI_IOCGBRT _IOR('v', 0, __u8) | 117 | #define SONYPI_IOCGBRT _IOR('v', 0, __u8) |
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 9d68fed50f11..cfa83083a2d4 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
| @@ -99,8 +99,9 @@ int __must_check sysfs_chmod_file(struct kobject *kobj, struct attribute *attr, | |||
| 99 | void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); | 99 | void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); |
| 100 | 100 | ||
| 101 | int __must_check sysfs_create_bin_file(struct kobject *kobj, | 101 | int __must_check sysfs_create_bin_file(struct kobject *kobj, |
| 102 | struct bin_attribute *attr); | 102 | const struct bin_attribute *attr); |
| 103 | void sysfs_remove_bin_file(struct kobject *kobj, struct bin_attribute *attr); | 103 | void sysfs_remove_bin_file(struct kobject *kobj, |
| 104 | const struct bin_attribute *attr); | ||
| 104 | 105 | ||
| 105 | int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target, | 106 | int __must_check sysfs_create_link(struct kobject *kobj, struct kobject *target, |
| 106 | const char *name); | 107 | const char *name); |
| @@ -175,13 +176,13 @@ static inline void sysfs_remove_file(struct kobject *kobj, | |||
| 175 | } | 176 | } |
| 176 | 177 | ||
| 177 | static inline int sysfs_create_bin_file(struct kobject *kobj, | 178 | static inline int sysfs_create_bin_file(struct kobject *kobj, |
| 178 | struct bin_attribute *attr) | 179 | const struct bin_attribute *attr) |
| 179 | { | 180 | { |
| 180 | return 0; | 181 | return 0; |
| 181 | } | 182 | } |
| 182 | 183 | ||
| 183 | static inline void sysfs_remove_bin_file(struct kobject *kobj, | 184 | static inline void sysfs_remove_bin_file(struct kobject *kobj, |
| 184 | struct bin_attribute *attr) | 185 | const struct bin_attribute *attr) |
| 185 | { | 186 | { |
| 186 | } | 187 | } |
| 187 | 188 | ||
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index acf6e457c04b..1819396ed501 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/kref.h> | 16 | #include <linux/kref.h> |
| 17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
| 18 | #include <linux/sysrq.h> | 18 | #include <linux/sysrq.h> |
| 19 | #include <linux/kfifo.h> | ||
| 19 | 20 | ||
| 20 | #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ | 21 | #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ |
| 21 | #define SERIAL_TTY_MINORS 254 /* loads of devices :) */ | 22 | #define SERIAL_TTY_MINORS 254 /* loads of devices :) */ |
| @@ -94,7 +95,7 @@ struct usb_serial_port { | |||
| 94 | unsigned char *bulk_out_buffer; | 95 | unsigned char *bulk_out_buffer; |
| 95 | int bulk_out_size; | 96 | int bulk_out_size; |
| 96 | struct urb *write_urb; | 97 | struct urb *write_urb; |
| 97 | struct kfifo *write_fifo; | 98 | struct kfifo write_fifo; |
| 98 | int write_urb_busy; | 99 | int write_urb_busy; |
| 99 | __u8 bulk_out_endpointAddress; | 100 | __u8 bulk_out_endpointAddress; |
| 100 | 101 | ||
