diff options
| author | Al Viro <viro@zeniv.linux.org.uk> | 2012-12-20 18:49:14 -0500 |
|---|---|---|
| committer | Al Viro <viro@zeniv.linux.org.uk> | 2012-12-20 18:49:14 -0500 |
| commit | 21e89c0c48bb799beb09181740796fc80c9676e2 (patch) | |
| tree | bd5aef34a980f189ad41c75e881d225bc854bf44 /include/linux | |
| parent | b911a6bdeef5848c468597d040e3407e0aee04ce (diff) | |
| parent | 91c7fbbf63f33c77d8d28de624834a21888842bb (diff) | |
Merge branch 'fscache' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs into for-linus
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/dma-buf.h | 99 | ||||
| -rw-r--r-- | include/linux/fs.h | 1 | ||||
| -rw-r--r-- | include/linux/fscache-cache.h | 71 | ||||
| -rw-r--r-- | include/linux/fscache.h | 50 | ||||
| -rw-r--r-- | include/linux/platform_data/serial-omap.h | 51 | ||||
| -rw-r--r-- | include/linux/platform_data/usb-omap.h | 3 | ||||
| -rw-r--r-- | include/linux/virtio.h | 25 | ||||
| -rw-r--r-- | include/linux/virtio_scsi.h | 28 |
8 files changed, 206 insertions, 122 deletions
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index eb48f3816df9..bd2e52ccc4f2 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h | |||
| @@ -156,7 +156,6 @@ static inline void get_dma_buf(struct dma_buf *dmabuf) | |||
| 156 | get_file(dmabuf->file); | 156 | get_file(dmabuf->file); |
| 157 | } | 157 | } |
| 158 | 158 | ||
| 159 | #ifdef CONFIG_DMA_SHARED_BUFFER | ||
| 160 | struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, | 159 | struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, |
| 161 | struct device *dev); | 160 | struct device *dev); |
| 162 | void dma_buf_detach(struct dma_buf *dmabuf, | 161 | void dma_buf_detach(struct dma_buf *dmabuf, |
| @@ -184,103 +183,5 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, | |||
| 184 | unsigned long); | 183 | unsigned long); |
| 185 | void *dma_buf_vmap(struct dma_buf *); | 184 | void *dma_buf_vmap(struct dma_buf *); |
| 186 | void dma_buf_vunmap(struct dma_buf *, void *vaddr); | 185 | void dma_buf_vunmap(struct dma_buf *, void *vaddr); |
| 187 | #else | ||
| 188 | |||
| 189 | static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, | ||
| 190 | struct device *dev) | ||
| 191 | { | ||
| 192 | return ERR_PTR(-ENODEV); | ||
| 193 | } | ||
| 194 | |||
| 195 | static inline void dma_buf_detach(struct dma_buf *dmabuf, | ||
| 196 | struct dma_buf_attachment *dmabuf_attach) | ||
| 197 | { | ||
| 198 | return; | ||
| 199 | } | ||
| 200 | |||
| 201 | static inline struct dma_buf *dma_buf_export(void *priv, | ||
| 202 | const struct dma_buf_ops *ops, | ||
| 203 | size_t size, int flags) | ||
| 204 | { | ||
| 205 | return ERR_PTR(-ENODEV); | ||
| 206 | } | ||
| 207 | |||
| 208 | static inline int dma_buf_fd(struct dma_buf *dmabuf, int flags) | ||
| 209 | { | ||
| 210 | return -ENODEV; | ||
| 211 | } | ||
| 212 | |||
| 213 | static inline struct dma_buf *dma_buf_get(int fd) | ||
| 214 | { | ||
| 215 | return ERR_PTR(-ENODEV); | ||
| 216 | } | ||
| 217 | |||
| 218 | static inline void dma_buf_put(struct dma_buf *dmabuf) | ||
| 219 | { | ||
| 220 | return; | ||
| 221 | } | ||
| 222 | |||
| 223 | static inline struct sg_table *dma_buf_map_attachment( | ||
| 224 | struct dma_buf_attachment *attach, enum dma_data_direction write) | ||
| 225 | { | ||
| 226 | return ERR_PTR(-ENODEV); | ||
| 227 | } | ||
| 228 | |||
| 229 | static inline void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, | ||
| 230 | struct sg_table *sg, enum dma_data_direction dir) | ||
| 231 | { | ||
| 232 | return; | ||
| 233 | } | ||
| 234 | |||
| 235 | static inline int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, | ||
| 236 | size_t start, size_t len, | ||
| 237 | enum dma_data_direction dir) | ||
| 238 | { | ||
| 239 | return -ENODEV; | ||
| 240 | } | ||
| 241 | |||
| 242 | static inline void dma_buf_end_cpu_access(struct dma_buf *dmabuf, | ||
| 243 | size_t start, size_t len, | ||
| 244 | enum dma_data_direction dir) | ||
| 245 | { | ||
| 246 | } | ||
| 247 | |||
| 248 | static inline void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, | ||
| 249 | unsigned long pnum) | ||
| 250 | { | ||
| 251 | return NULL; | ||
| 252 | } | ||
| 253 | |||
| 254 | static inline void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, | ||
| 255 | unsigned long pnum, void *vaddr) | ||
| 256 | { | ||
| 257 | } | ||
| 258 | |||
| 259 | static inline void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long pnum) | ||
| 260 | { | ||
| 261 | return NULL; | ||
| 262 | } | ||
| 263 | |||
| 264 | static inline void dma_buf_kunmap(struct dma_buf *dmabuf, | ||
| 265 | unsigned long pnum, void *vaddr) | ||
| 266 | { | ||
| 267 | } | ||
| 268 | |||
| 269 | static inline int dma_buf_mmap(struct dma_buf *dmabuf, | ||
| 270 | struct vm_area_struct *vma, | ||
| 271 | unsigned long pgoff) | ||
| 272 | { | ||
| 273 | return -ENODEV; | ||
| 274 | } | ||
| 275 | |||
| 276 | static inline void *dma_buf_vmap(struct dma_buf *dmabuf) | ||
| 277 | { | ||
| 278 | return NULL; | ||
| 279 | } | ||
| 280 | |||
| 281 | static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) | ||
| 282 | { | ||
| 283 | } | ||
| 284 | #endif /* CONFIG_DMA_SHARED_BUFFER */ | ||
| 285 | 186 | ||
| 286 | #endif /* __DMA_BUF_H__ */ | 187 | #endif /* __DMA_BUF_H__ */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 05cd238ad941..7617ee04f066 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -1994,6 +1994,7 @@ struct filename { | |||
| 1994 | bool separate; /* should "name" be freed? */ | 1994 | bool separate; /* should "name" be freed? */ |
| 1995 | }; | 1995 | }; |
| 1996 | 1996 | ||
| 1997 | extern long vfs_truncate(struct path *, loff_t); | ||
| 1997 | extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, | 1998 | extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, |
| 1998 | struct file *filp); | 1999 | struct file *filp); |
| 1999 | extern int do_fallocate(struct file *file, int mode, loff_t offset, | 2000 | extern int do_fallocate(struct file *file, int mode, loff_t offset, |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index ce31408b1e47..5dfa0aa216b6 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
| @@ -75,6 +75,16 @@ extern wait_queue_head_t fscache_cache_cleared_wq; | |||
| 75 | typedef void (*fscache_operation_release_t)(struct fscache_operation *op); | 75 | typedef void (*fscache_operation_release_t)(struct fscache_operation *op); |
| 76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); | 76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); |
| 77 | 77 | ||
| 78 | enum fscache_operation_state { | ||
| 79 | FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */ | ||
| 80 | FSCACHE_OP_ST_INITIALISED, /* Op is initialised */ | ||
| 81 | FSCACHE_OP_ST_PENDING, /* Op is blocked from running */ | ||
| 82 | FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */ | ||
| 83 | FSCACHE_OP_ST_COMPLETE, /* Op is complete */ | ||
| 84 | FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */ | ||
| 85 | FSCACHE_OP_ST_DEAD /* Op is now dead */ | ||
| 86 | }; | ||
| 87 | |||
| 78 | struct fscache_operation { | 88 | struct fscache_operation { |
| 79 | struct work_struct work; /* record for async ops */ | 89 | struct work_struct work; /* record for async ops */ |
| 80 | struct list_head pend_link; /* link in object->pending_ops */ | 90 | struct list_head pend_link; /* link in object->pending_ops */ |
| @@ -86,10 +96,10 @@ struct fscache_operation { | |||
| 86 | #define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */ | 96 | #define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */ |
| 87 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ | 97 | #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ |
| 88 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ | 98 | #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ |
| 89 | #define FSCACHE_OP_DEAD 6 /* op is now dead */ | 99 | #define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */ |
| 90 | #define FSCACHE_OP_DEC_READ_CNT 7 /* decrement object->n_reads on destruction */ | 100 | #define FSCACHE_OP_KEEP_FLAGS 0x0070 /* flags to keep when repurposing an op */ |
| 91 | #define FSCACHE_OP_KEEP_FLAGS 0xc0 /* flags to keep when repurposing an op */ | ||
| 92 | 101 | ||
| 102 | enum fscache_operation_state state; | ||
| 93 | atomic_t usage; | 103 | atomic_t usage; |
| 94 | unsigned debug_id; /* debugging ID */ | 104 | unsigned debug_id; /* debugging ID */ |
| 95 | 105 | ||
| @@ -106,6 +116,7 @@ extern atomic_t fscache_op_debug_id; | |||
| 106 | extern void fscache_op_work_func(struct work_struct *work); | 116 | extern void fscache_op_work_func(struct work_struct *work); |
| 107 | 117 | ||
| 108 | extern void fscache_enqueue_operation(struct fscache_operation *); | 118 | extern void fscache_enqueue_operation(struct fscache_operation *); |
| 119 | extern void fscache_op_complete(struct fscache_operation *, bool); | ||
| 109 | extern void fscache_put_operation(struct fscache_operation *); | 120 | extern void fscache_put_operation(struct fscache_operation *); |
| 110 | 121 | ||
| 111 | /** | 122 | /** |
| @@ -122,6 +133,7 @@ static inline void fscache_operation_init(struct fscache_operation *op, | |||
| 122 | { | 133 | { |
| 123 | INIT_WORK(&op->work, fscache_op_work_func); | 134 | INIT_WORK(&op->work, fscache_op_work_func); |
| 124 | atomic_set(&op->usage, 1); | 135 | atomic_set(&op->usage, 1); |
| 136 | op->state = FSCACHE_OP_ST_INITIALISED; | ||
| 125 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | 137 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); |
| 126 | op->processor = processor; | 138 | op->processor = processor; |
| 127 | op->release = release; | 139 | op->release = release; |
| @@ -138,6 +150,7 @@ struct fscache_retrieval { | |||
| 138 | void *context; /* netfs read context (pinned) */ | 150 | void *context; /* netfs read context (pinned) */ |
| 139 | struct list_head to_do; /* list of things to be done by the backend */ | 151 | struct list_head to_do; /* list of things to be done by the backend */ |
| 140 | unsigned long start_time; /* time at which retrieval started */ | 152 | unsigned long start_time; /* time at which retrieval started */ |
| 153 | unsigned n_pages; /* number of pages to be retrieved */ | ||
| 141 | }; | 154 | }; |
| 142 | 155 | ||
| 143 | typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op, | 156 | typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op, |
| @@ -174,8 +187,22 @@ static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op) | |||
| 174 | } | 187 | } |
| 175 | 188 | ||
| 176 | /** | 189 | /** |
| 190 | * fscache_retrieval_complete - Record (partial) completion of a retrieval | ||
| 191 | * @op: The retrieval operation affected | ||
| 192 | * @n_pages: The number of pages to account for | ||
| 193 | */ | ||
| 194 | static inline void fscache_retrieval_complete(struct fscache_retrieval *op, | ||
| 195 | int n_pages) | ||
| 196 | { | ||
| 197 | op->n_pages -= n_pages; | ||
| 198 | if (op->n_pages <= 0) | ||
| 199 | fscache_op_complete(&op->op, true); | ||
| 200 | } | ||
| 201 | |||
| 202 | /** | ||
| 177 | * fscache_put_retrieval - Drop a reference to a retrieval operation | 203 | * fscache_put_retrieval - Drop a reference to a retrieval operation |
| 178 | * @op: The retrieval operation affected | 204 | * @op: The retrieval operation affected |
| 205 | * @n_pages: The number of pages to account for | ||
| 179 | * | 206 | * |
| 180 | * Drop a reference to a retrieval operation. | 207 | * Drop a reference to a retrieval operation. |
| 181 | */ | 208 | */ |
| @@ -227,6 +254,9 @@ struct fscache_cache_ops { | |||
| 227 | /* store the updated auxiliary data on an object */ | 254 | /* store the updated auxiliary data on an object */ |
| 228 | void (*update_object)(struct fscache_object *object); | 255 | void (*update_object)(struct fscache_object *object); |
| 229 | 256 | ||
| 257 | /* Invalidate an object */ | ||
| 258 | void (*invalidate_object)(struct fscache_operation *op); | ||
| 259 | |||
| 230 | /* discard the resources pinned by an object and effect retirement if | 260 | /* discard the resources pinned by an object and effect retirement if |
| 231 | * necessary */ | 261 | * necessary */ |
| 232 | void (*drop_object)(struct fscache_object *object); | 262 | void (*drop_object)(struct fscache_object *object); |
| @@ -301,11 +331,30 @@ struct fscache_cookie { | |||
| 301 | #define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */ | 331 | #define FSCACHE_COOKIE_PENDING_FILL 3 /* T if pending initial fill on object */ |
| 302 | #define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */ | 332 | #define FSCACHE_COOKIE_FILLING 4 /* T if filling object incrementally */ |
| 303 | #define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */ | 333 | #define FSCACHE_COOKIE_UNAVAILABLE 5 /* T if cookie is unavailable (error, etc) */ |
| 334 | #define FSCACHE_COOKIE_WAITING_ON_READS 6 /* T if cookie is waiting on reads */ | ||
| 335 | #define FSCACHE_COOKIE_INVALIDATING 7 /* T if cookie is being invalidated */ | ||
| 304 | }; | 336 | }; |
| 305 | 337 | ||
| 306 | extern struct fscache_cookie fscache_fsdef_index; | 338 | extern struct fscache_cookie fscache_fsdef_index; |
| 307 | 339 | ||
| 308 | /* | 340 | /* |
| 341 | * Event list for fscache_object::{event_mask,events} | ||
| 342 | */ | ||
| 343 | enum { | ||
| 344 | FSCACHE_OBJECT_EV_REQUEUE, /* T if object should be requeued */ | ||
| 345 | FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */ | ||
| 346 | FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */ | ||
| 347 | FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */ | ||
| 348 | FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */ | ||
| 349 | FSCACHE_OBJECT_EV_RELEASE, /* T if netfs requested object release */ | ||
| 350 | FSCACHE_OBJECT_EV_RETIRE, /* T if netfs requested object retirement */ | ||
| 351 | FSCACHE_OBJECT_EV_WITHDRAW, /* T if cache requested object withdrawal */ | ||
| 352 | NR_FSCACHE_OBJECT_EVENTS | ||
| 353 | }; | ||
| 354 | |||
| 355 | #define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1) | ||
| 356 | |||
| 357 | /* | ||
| 309 | * on-disk cache file or index handle | 358 | * on-disk cache file or index handle |
| 310 | */ | 359 | */ |
| 311 | struct fscache_object { | 360 | struct fscache_object { |
| @@ -317,6 +366,7 @@ struct fscache_object { | |||
| 317 | /* active states */ | 366 | /* active states */ |
| 318 | FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */ | 367 | FSCACHE_OBJECT_AVAILABLE, /* cleaning up object after creation */ |
| 319 | FSCACHE_OBJECT_ACTIVE, /* object is usable */ | 368 | FSCACHE_OBJECT_ACTIVE, /* object is usable */ |
| 369 | FSCACHE_OBJECT_INVALIDATING, /* object is invalidating */ | ||
| 320 | FSCACHE_OBJECT_UPDATING, /* object is updating */ | 370 | FSCACHE_OBJECT_UPDATING, /* object is updating */ |
| 321 | 371 | ||
| 322 | /* terminal states */ | 372 | /* terminal states */ |
| @@ -332,10 +382,10 @@ struct fscache_object { | |||
| 332 | 382 | ||
| 333 | int debug_id; /* debugging ID */ | 383 | int debug_id; /* debugging ID */ |
| 334 | int n_children; /* number of child objects */ | 384 | int n_children; /* number of child objects */ |
| 335 | int n_ops; /* number of ops outstanding on object */ | 385 | int n_ops; /* number of extant ops on object */ |
| 336 | int n_obj_ops; /* number of object ops outstanding on object */ | 386 | int n_obj_ops; /* number of object ops outstanding on object */ |
| 337 | int n_in_progress; /* number of ops in progress */ | 387 | int n_in_progress; /* number of ops in progress */ |
| 338 | int n_exclusive; /* number of exclusive ops queued */ | 388 | int n_exclusive; /* number of exclusive ops queued or in progress */ |
| 339 | atomic_t n_reads; /* number of read ops in progress */ | 389 | atomic_t n_reads; /* number of read ops in progress */ |
| 340 | spinlock_t lock; /* state and operations lock */ | 390 | spinlock_t lock; /* state and operations lock */ |
| 341 | 391 | ||
| @@ -343,14 +393,6 @@ struct fscache_object { | |||
| 343 | unsigned long event_mask; /* events this object is interested in */ | 393 | unsigned long event_mask; /* events this object is interested in */ |
| 344 | unsigned long events; /* events to be processed by this object | 394 | unsigned long events; /* events to be processed by this object |
| 345 | * (order is important - using fls) */ | 395 | * (order is important - using fls) */ |
| 346 | #define FSCACHE_OBJECT_EV_REQUEUE 0 /* T if object should be requeued */ | ||
| 347 | #define FSCACHE_OBJECT_EV_UPDATE 1 /* T if object should be updated */ | ||
| 348 | #define FSCACHE_OBJECT_EV_CLEARED 2 /* T if accessors all gone */ | ||
| 349 | #define FSCACHE_OBJECT_EV_ERROR 3 /* T if fatal error occurred during processing */ | ||
| 350 | #define FSCACHE_OBJECT_EV_RELEASE 4 /* T if netfs requested object release */ | ||
| 351 | #define FSCACHE_OBJECT_EV_RETIRE 5 /* T if netfs requested object retirement */ | ||
| 352 | #define FSCACHE_OBJECT_EV_WITHDRAW 6 /* T if cache requested object withdrawal */ | ||
| 353 | #define FSCACHE_OBJECT_EVENTS_MASK 0x7f /* mask of all events*/ | ||
| 354 | 396 | ||
| 355 | unsigned long flags; | 397 | unsigned long flags; |
| 356 | #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ | 398 | #define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */ |
| @@ -504,6 +546,9 @@ extern void fscache_withdraw_cache(struct fscache_cache *cache); | |||
| 504 | 546 | ||
| 505 | extern void fscache_io_error(struct fscache_cache *cache); | 547 | extern void fscache_io_error(struct fscache_cache *cache); |
| 506 | 548 | ||
| 549 | extern void fscache_mark_page_cached(struct fscache_retrieval *op, | ||
| 550 | struct page *page); | ||
| 551 | |||
| 507 | extern void fscache_mark_pages_cached(struct fscache_retrieval *op, | 552 | extern void fscache_mark_pages_cached(struct fscache_retrieval *op, |
| 508 | struct pagevec *pagevec); | 553 | struct pagevec *pagevec); |
| 509 | 554 | ||
diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 9ec20dec3353..7a086235da4b 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h | |||
| @@ -135,14 +135,14 @@ struct fscache_cookie_def { | |||
| 135 | */ | 135 | */ |
| 136 | void (*put_context)(void *cookie_netfs_data, void *context); | 136 | void (*put_context)(void *cookie_netfs_data, void *context); |
| 137 | 137 | ||
| 138 | /* indicate pages that now have cache metadata retained | 138 | /* indicate page that now have cache metadata retained |
| 139 | * - this function should mark the specified pages as now being cached | 139 | * - this function should mark the specified page as now being cached |
| 140 | * - the pages will have been marked with PG_fscache before this is | 140 | * - the page will have been marked with PG_fscache before this is |
| 141 | * called, so this is optional | 141 | * called, so this is optional |
| 142 | */ | 142 | */ |
| 143 | void (*mark_pages_cached)(void *cookie_netfs_data, | 143 | void (*mark_page_cached)(void *cookie_netfs_data, |
| 144 | struct address_space *mapping, | 144 | struct address_space *mapping, |
| 145 | struct pagevec *cached_pvec); | 145 | struct page *page); |
| 146 | 146 | ||
| 147 | /* indicate the cookie is no longer cached | 147 | /* indicate the cookie is no longer cached |
| 148 | * - this function is called when the backing store currently caching | 148 | * - this function is called when the backing store currently caching |
| @@ -185,6 +185,8 @@ extern struct fscache_cookie *__fscache_acquire_cookie( | |||
| 185 | extern void __fscache_relinquish_cookie(struct fscache_cookie *, int); | 185 | extern void __fscache_relinquish_cookie(struct fscache_cookie *, int); |
| 186 | extern void __fscache_update_cookie(struct fscache_cookie *); | 186 | extern void __fscache_update_cookie(struct fscache_cookie *); |
| 187 | extern int __fscache_attr_changed(struct fscache_cookie *); | 187 | extern int __fscache_attr_changed(struct fscache_cookie *); |
| 188 | extern void __fscache_invalidate(struct fscache_cookie *); | ||
| 189 | extern void __fscache_wait_on_invalidate(struct fscache_cookie *); | ||
| 188 | extern int __fscache_read_or_alloc_page(struct fscache_cookie *, | 190 | extern int __fscache_read_or_alloc_page(struct fscache_cookie *, |
| 189 | struct page *, | 191 | struct page *, |
| 190 | fscache_rw_complete_t, | 192 | fscache_rw_complete_t, |
| @@ -390,6 +392,42 @@ int fscache_attr_changed(struct fscache_cookie *cookie) | |||
| 390 | } | 392 | } |
| 391 | 393 | ||
| 392 | /** | 394 | /** |
| 395 | * fscache_invalidate - Notify cache that an object needs invalidation | ||
| 396 | * @cookie: The cookie representing the cache object | ||
| 397 | * | ||
| 398 | * Notify the cache that an object is needs to be invalidated and that it | ||
| 399 | * should abort any retrievals or stores it is doing on the cache. The object | ||
| 400 | * is then marked non-caching until such time as the invalidation is complete. | ||
| 401 | * | ||
| 402 | * This can be called with spinlocks held. | ||
| 403 | * | ||
| 404 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
| 405 | * description. | ||
| 406 | */ | ||
| 407 | static inline | ||
| 408 | void fscache_invalidate(struct fscache_cookie *cookie) | ||
| 409 | { | ||
| 410 | if (fscache_cookie_valid(cookie)) | ||
| 411 | __fscache_invalidate(cookie); | ||
| 412 | } | ||
| 413 | |||
| 414 | /** | ||
| 415 | * fscache_wait_on_invalidate - Wait for invalidation to complete | ||
| 416 | * @cookie: The cookie representing the cache object | ||
| 417 | * | ||
| 418 | * Wait for the invalidation of an object to complete. | ||
| 419 | * | ||
| 420 | * See Documentation/filesystems/caching/netfs-api.txt for a complete | ||
| 421 | * description. | ||
| 422 | */ | ||
| 423 | static inline | ||
| 424 | void fscache_wait_on_invalidate(struct fscache_cookie *cookie) | ||
| 425 | { | ||
| 426 | if (fscache_cookie_valid(cookie)) | ||
| 427 | __fscache_wait_on_invalidate(cookie); | ||
| 428 | } | ||
| 429 | |||
| 430 | /** | ||
| 393 | * fscache_reserve_space - Reserve data space for a cached object | 431 | * fscache_reserve_space - Reserve data space for a cached object |
| 394 | * @cookie: The cookie representing the cache object | 432 | * @cookie: The cookie representing the cache object |
| 395 | * @i_size: The amount of space to be reserved | 433 | * @i_size: The amount of space to be reserved |
diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h new file mode 100644 index 000000000000..ff9b0aab5281 --- /dev/null +++ b/include/linux/platform_data/serial-omap.h | |||
| @@ -0,0 +1,51 @@ | |||
| 1 | /* | ||
| 2 | * Driver for OMAP-UART controller. | ||
| 3 | * Based on drivers/serial/8250.c | ||
| 4 | * | ||
| 5 | * Copyright (C) 2010 Texas Instruments. | ||
| 6 | * | ||
| 7 | * Authors: | ||
| 8 | * Govindraj R <govindraj.raja@ti.com> | ||
| 9 | * Thara Gopinath <thara@ti.com> | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License as published by | ||
| 13 | * the Free Software Foundation; either version 2 of the License, or | ||
| 14 | * (at your option) any later version. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #ifndef __OMAP_SERIAL_H__ | ||
| 18 | #define __OMAP_SERIAL_H__ | ||
| 19 | |||
| 20 | #include <linux/serial_core.h> | ||
| 21 | #include <linux/device.h> | ||
| 22 | #include <linux/pm_qos.h> | ||
| 23 | |||
| 24 | #define DRIVER_NAME "omap_uart" | ||
| 25 | |||
| 26 | /* | ||
| 27 | * Use tty device name as ttyO, [O -> OMAP] | ||
| 28 | * in bootargs we specify as console=ttyO0 if uart1 | ||
| 29 | * is used as console uart. | ||
| 30 | */ | ||
| 31 | #define OMAP_SERIAL_NAME "ttyO" | ||
| 32 | |||
| 33 | struct omap_uart_port_info { | ||
| 34 | bool dma_enabled; /* To specify DMA Mode */ | ||
| 35 | unsigned int uartclk; /* UART clock rate */ | ||
| 36 | upf_t flags; /* UPF_* flags */ | ||
| 37 | unsigned int dma_rx_buf_size; | ||
| 38 | unsigned int dma_rx_timeout; | ||
| 39 | unsigned int autosuspend_timeout; | ||
| 40 | unsigned int dma_rx_poll_rate; | ||
| 41 | int DTR_gpio; | ||
| 42 | int DTR_inverted; | ||
| 43 | int DTR_present; | ||
| 44 | |||
| 45 | int (*get_context_loss_count)(struct device *); | ||
| 46 | void (*set_forceidle)(struct device *); | ||
| 47 | void (*set_noidle)(struct device *); | ||
| 48 | void (*enable_wakeup)(struct device *, bool); | ||
| 49 | }; | ||
| 50 | |||
| 51 | #endif /* __OMAP_SERIAL_H__ */ | ||
diff --git a/include/linux/platform_data/usb-omap.h b/include/linux/platform_data/usb-omap.h index 8570bcfe6311..ef65b67c56c3 100644 --- a/include/linux/platform_data/usb-omap.h +++ b/include/linux/platform_data/usb-omap.h | |||
| @@ -59,6 +59,9 @@ struct usbhs_omap_platform_data { | |||
| 59 | 59 | ||
| 60 | struct ehci_hcd_omap_platform_data *ehci_data; | 60 | struct ehci_hcd_omap_platform_data *ehci_data; |
| 61 | struct ohci_hcd_omap_platform_data *ohci_data; | 61 | struct ohci_hcd_omap_platform_data *ohci_data; |
| 62 | |||
| 63 | /* OMAP3 <= ES2.1 have a single ulpi bypass control bit */ | ||
| 64 | unsigned single_ulpi_bypass:1; | ||
| 62 | }; | 65 | }; |
| 63 | 66 | ||
| 64 | /*-------------------------------------------------------------------------*/ | 67 | /*-------------------------------------------------------------------------*/ |
diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 533b1157f22e..cf8adb1f5b2c 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h | |||
| @@ -16,12 +16,20 @@ | |||
| 16 | * @name: the name of this virtqueue (mainly for debugging) | 16 | * @name: the name of this virtqueue (mainly for debugging) |
| 17 | * @vdev: the virtio device this queue was created for. | 17 | * @vdev: the virtio device this queue was created for. |
| 18 | * @priv: a pointer for the virtqueue implementation to use. | 18 | * @priv: a pointer for the virtqueue implementation to use. |
| 19 | * @index: the zero-based ordinal number for this queue. | ||
| 20 | * @num_free: number of elements we expect to be able to fit. | ||
| 21 | * | ||
| 22 | * A note on @num_free: with indirect buffers, each buffer needs one | ||
| 23 | * element in the queue, otherwise a buffer will need one element per | ||
| 24 | * sg element. | ||
| 19 | */ | 25 | */ |
| 20 | struct virtqueue { | 26 | struct virtqueue { |
| 21 | struct list_head list; | 27 | struct list_head list; |
| 22 | void (*callback)(struct virtqueue *vq); | 28 | void (*callback)(struct virtqueue *vq); |
| 23 | const char *name; | 29 | const char *name; |
| 24 | struct virtio_device *vdev; | 30 | struct virtio_device *vdev; |
| 31 | unsigned int index; | ||
| 32 | unsigned int num_free; | ||
| 25 | void *priv; | 33 | void *priv; |
| 26 | }; | 34 | }; |
| 27 | 35 | ||
| @@ -50,7 +58,11 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq); | |||
| 50 | 58 | ||
| 51 | unsigned int virtqueue_get_vring_size(struct virtqueue *vq); | 59 | unsigned int virtqueue_get_vring_size(struct virtqueue *vq); |
| 52 | 60 | ||
| 53 | int virtqueue_get_queue_index(struct virtqueue *vq); | 61 | /* FIXME: Obsolete accessor, but required for virtio_net merge. */ |
| 62 | static inline unsigned int virtqueue_get_queue_index(struct virtqueue *vq) | ||
| 63 | { | ||
| 64 | return vq->index; | ||
| 65 | } | ||
| 54 | 66 | ||
| 55 | /** | 67 | /** |
| 56 | * virtio_device - representation of a device using virtio | 68 | * virtio_device - representation of a device using virtio |
| @@ -73,7 +85,11 @@ struct virtio_device { | |||
| 73 | void *priv; | 85 | void *priv; |
| 74 | }; | 86 | }; |
| 75 | 87 | ||
| 76 | #define dev_to_virtio(dev) container_of(dev, struct virtio_device, dev) | 88 | static inline struct virtio_device *dev_to_virtio(struct device *_dev) |
| 89 | { | ||
| 90 | return container_of(_dev, struct virtio_device, dev); | ||
| 91 | } | ||
| 92 | |||
| 77 | int register_virtio_device(struct virtio_device *dev); | 93 | int register_virtio_device(struct virtio_device *dev); |
| 78 | void unregister_virtio_device(struct virtio_device *dev); | 94 | void unregister_virtio_device(struct virtio_device *dev); |
| 79 | 95 | ||
| @@ -103,6 +119,11 @@ struct virtio_driver { | |||
| 103 | #endif | 119 | #endif |
| 104 | }; | 120 | }; |
| 105 | 121 | ||
| 122 | static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv) | ||
| 123 | { | ||
| 124 | return container_of(drv, struct virtio_driver, driver); | ||
| 125 | } | ||
| 126 | |||
| 106 | int register_virtio_driver(struct virtio_driver *drv); | 127 | int register_virtio_driver(struct virtio_driver *drv); |
| 107 | void unregister_virtio_driver(struct virtio_driver *drv); | 128 | void unregister_virtio_driver(struct virtio_driver *drv); |
| 108 | #endif /* _LINUX_VIRTIO_H */ | 129 | #endif /* _LINUX_VIRTIO_H */ |
diff --git a/include/linux/virtio_scsi.h b/include/linux/virtio_scsi.h index d6b4440387b7..4195b97a3def 100644 --- a/include/linux/virtio_scsi.h +++ b/include/linux/virtio_scsi.h | |||
| @@ -1,7 +1,31 @@ | |||
| 1 | /* | ||
| 2 | * This header is BSD licensed so anyone can use the definitions to implement | ||
| 3 | * compatible drivers/servers. | ||
| 4 | * | ||
| 5 | * Redistribution and use in source and binary forms, with or without | ||
| 6 | * modification, are permitted provided that the following conditions | ||
| 7 | * are met: | ||
| 8 | * 1. Redistributions of source code must retain the above copyright | ||
| 9 | * notice, this list of conditions and the following disclaimer. | ||
| 10 | * 2. Redistributions in binary form must reproduce the above copyright | ||
| 11 | * notice, this list of conditions and the following disclaimer in the | ||
| 12 | * documentation and/or other materials provided with the distribution. | ||
| 13 | * | ||
| 14 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND | ||
| 15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
| 16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
| 17 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE | ||
| 18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | ||
| 19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | ||
| 20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | ||
| 21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | ||
| 22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | ||
| 23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | ||
| 24 | * SUCH DAMAGE. | ||
| 25 | */ | ||
| 26 | |||
| 1 | #ifndef _LINUX_VIRTIO_SCSI_H | 27 | #ifndef _LINUX_VIRTIO_SCSI_H |
| 2 | #define _LINUX_VIRTIO_SCSI_H | 28 | #define _LINUX_VIRTIO_SCSI_H |
| 3 | /* This header is BSD licensed so anyone can use the definitions to implement | ||
| 4 | * compatible drivers/servers. */ | ||
| 5 | 29 | ||
| 6 | #define VIRTIO_SCSI_CDB_SIZE 32 | 30 | #define VIRTIO_SCSI_CDB_SIZE 32 |
| 7 | #define VIRTIO_SCSI_SENSE_SIZE 96 | 31 | #define VIRTIO_SCSI_SENSE_SIZE 96 |
