aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/aio.h1
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/input.h1
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--include/linux/prctl.h2
-rw-r--r--include/linux/splice.h8
6 files changed, 10 insertions, 7 deletions
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 2314ad8b3c9c..b1a520ec8b59 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -140,6 +140,7 @@ struct kiocb {
140 (x)->ki_dtor = NULL; \ 140 (x)->ki_dtor = NULL; \
141 (x)->ki_obj.tsk = tsk; \ 141 (x)->ki_obj.tsk = tsk; \
142 (x)->ki_user_data = 0; \ 142 (x)->ki_user_data = 0; \
143 (x)->private = NULL; \
143 } while (0) 144 } while (0)
144 145
145#define AIO_RING_MAGIC 0xa10a10a1 146#define AIO_RING_MAGIC 0xa10a10a1
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ba43f408baa3..07954b05b86c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -827,7 +827,6 @@ extern bool __blk_end_request_err(struct request *rq, int error);
827extern void blk_complete_request(struct request *); 827extern void blk_complete_request(struct request *);
828extern void __blk_complete_request(struct request *); 828extern void __blk_complete_request(struct request *);
829extern void blk_abort_request(struct request *); 829extern void blk_abort_request(struct request *);
830extern void blk_abort_queue(struct request_queue *);
831extern void blk_unprep_request(struct request *); 830extern void blk_unprep_request(struct request *);
832 831
833/* 832/*
diff --git a/include/linux/input.h b/include/linux/input.h
index a81671453575..2740d080ec6b 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -116,6 +116,7 @@ struct input_keymap_entry {
116 116
117/** 117/**
118 * EVIOCGMTSLOTS(len) - get MT slot values 118 * EVIOCGMTSLOTS(len) - get MT slot values
119 * @len: size of the data buffer in bytes
119 * 120 *
120 * The ioctl buffer argument should be binary equivalent to 121 * The ioctl buffer argument should be binary equivalent to
121 * 122 *
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c4464356b35b..96c158a37d3e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -815,7 +815,7 @@ static inline void kvm_free_irq_routing(struct kvm *kvm) {}
815#ifdef CONFIG_HAVE_KVM_EVENTFD 815#ifdef CONFIG_HAVE_KVM_EVENTFD
816 816
817void kvm_eventfd_init(struct kvm *kvm); 817void kvm_eventfd_init(struct kvm *kvm);
818int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags); 818int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
819void kvm_irqfd_release(struct kvm *kvm); 819void kvm_irqfd_release(struct kvm *kvm);
820void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *); 820void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
821int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); 821int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
@@ -824,7 +824,7 @@ int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
824 824
825static inline void kvm_eventfd_init(struct kvm *kvm) {} 825static inline void kvm_eventfd_init(struct kvm *kvm) {}
826 826
827static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) 827static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
828{ 828{
829 return -EINVAL; 829 return -EINVAL;
830} 830}
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 3988012255dc..289760f424aa 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -141,6 +141,8 @@
141 * Changing LSM security domain is considered a new privilege. So, for example, 141 * Changing LSM security domain is considered a new privilege. So, for example,
142 * asking selinux for a specific new context (e.g. with runcon) will result 142 * asking selinux for a specific new context (e.g. with runcon) will result
143 * in execve returning -EPERM. 143 * in execve returning -EPERM.
144 *
145 * See Documentation/prctl/no_new_privs.txt for more details.
144 */ 146 */
145#define PR_SET_NO_NEW_PRIVS 38 147#define PR_SET_NO_NEW_PRIVS 38
146#define PR_GET_NO_NEW_PRIVS 39 148#define PR_GET_NO_NEW_PRIVS 39
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 26e5b613deda..09a545a7dfa3 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -51,7 +51,8 @@ struct partial_page {
51struct splice_pipe_desc { 51struct splice_pipe_desc {
52 struct page **pages; /* page map */ 52 struct page **pages; /* page map */
53 struct partial_page *partial; /* pages[] may not be contig */ 53 struct partial_page *partial; /* pages[] may not be contig */
54 int nr_pages; /* number of pages in map */ 54 int nr_pages; /* number of populated pages in map */
55 unsigned int nr_pages_max; /* pages[] & partial[] arrays size */
55 unsigned int flags; /* splice flags */ 56 unsigned int flags; /* splice flags */
56 const struct pipe_buf_operations *ops;/* ops associated with output pipe */ 57 const struct pipe_buf_operations *ops;/* ops associated with output pipe */
57 void (*spd_release)(struct splice_pipe_desc *, unsigned int); 58 void (*spd_release)(struct splice_pipe_desc *, unsigned int);
@@ -85,9 +86,8 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
85/* 86/*
86 * for dynamic pipe sizing 87 * for dynamic pipe sizing
87 */ 88 */
88extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *); 89extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
89extern void splice_shrink_spd(struct pipe_inode_info *, 90extern void splice_shrink_spd(struct splice_pipe_desc *);
90 struct splice_pipe_desc *);
91extern void spd_release_page(struct splice_pipe_desc *, unsigned int); 91extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
92 92
93extern const struct pipe_buf_operations page_cache_pipe_buf_ops; 93extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
lass="hl opt">), page); return 1; } /* * Same as above, but add instead of inc (could just be merged) */ static inline int page_cache_add_speculative(struct page *page, int count) { VM_BUG_ON(in_interrupt()); #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) # ifdef CONFIG_PREEMPT_COUNT VM_BUG_ON(!in_atomic()); # endif VM_BUG_ON_PAGE(page_count(page) == 0, page); atomic_add(count, &page->_count); #else if (unlikely(!atomic_add_unless(&page->_count, count, 0))) return 0; #endif VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); return 1; } static inline int page_freeze_refs(struct page *page, int count) { return likely(atomic_cmpxchg(&page->_count, count, 0) == count); } static inline void page_unfreeze_refs(struct page *page, int count) { VM_BUG_ON_PAGE(page_count(page) != 0, page); VM_BUG_ON(count == 0); atomic_set(&page->_count, count); } #ifdef CONFIG_NUMA extern struct page *__page_cache_alloc(gfp_t gfp); #else static inline struct page *__page_cache_alloc(gfp_t gfp) { return alloc_pages(gfp, 0); } #endif static inline struct page *page_cache_alloc(struct address_space *x) { return __page_cache_alloc(mapping_gfp_mask(x)); } static inline struct page *page_cache_alloc_cold(struct address_space *x) { return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); } static inline struct page *page_cache_alloc_readahead(struct address_space *x) { return __page_cache_alloc(mapping_gfp_mask(x) | __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN); } typedef int filler_t(void *, struct page *); pgoff_t page_cache_next_hole(struct address_space *mapping, pgoff_t index, unsigned long max_scan); pgoff_t page_cache_prev_hole(struct address_space *mapping, pgoff_t index, unsigned long max_scan); #define FGP_ACCESSED 0x00000001 #define FGP_LOCK 0x00000002 #define FGP_CREAT 0x00000004 #define FGP_WRITE 0x00000008 #define FGP_NOFS 0x00000010 #define FGP_NOWAIT 0x00000020 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, int fgp_flags, gfp_t cache_gfp_mask); /** * find_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned with an increased refcount. * * Otherwise, %NULL is returned. */ static inline struct page *find_get_page(struct address_space *mapping, pgoff_t offset) { return pagecache_get_page(mapping, offset, 0, 0); } static inline struct page *find_get_page_flags(struct address_space *mapping, pgoff_t offset, int fgp_flags) { return pagecache_get_page(mapping, offset, fgp_flags, 0); } /** * find_lock_page - locate, pin and lock a pagecache page * pagecache_get_page - find and get a page reference * @mapping: the address_space to search * @offset: the page index * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned locked and with an increased * refcount. * * Otherwise, %NULL is returned. * * find_lock_page() may sleep. */ static inline struct page *find_lock_page(struct address_space *mapping, pgoff_t offset) { return pagecache_get_page(mapping, offset, FGP_LOCK, 0); } /** * find_or_create_page - locate or add a pagecache page * @mapping: the page's address_space * @index: the page's index into the mapping * @gfp_mask: page allocation mode * * Looks up the page cache slot at @mapping & @offset. If there is a * page cache page, it is returned locked and with an increased * refcount. * * If the page is not present, a new page is allocated using @gfp_mask * and added to the page cache and the VM's LRU list. The page is * returned locked and with an increased refcount. * * On memory exhaustion, %NULL is returned. * * find_or_create_page() may sleep, even if @gfp_flags specifies an * atomic allocation! */ static inline struct page *find_or_create_page(struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { return pagecache_get_page(mapping, offset, FGP_LOCK|FGP_ACCESSED|FGP_CREAT, gfp_mask); } /** * grab_cache_page_nowait - returns locked page at given index in given cache * @mapping: target address_space * @index: the page index * * Same as grab_cache_page(), but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. * * Clear __GFP_FS when allocating the page to avoid recursion into the fs * and deadlock against the caller's locked page. */ static inline struct page *grab_cache_page_nowait(struct address_space *mapping, pgoff_t index) { return pagecache_get_page(mapping, index, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, mapping_gfp_mask(mapping)); } struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); unsigned find_get_entries(struct address_space *mapping, pgoff_t start, unsigned int nr_entries, struct page **entries, pgoff_t *indices); unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages); struct page *grab_cache_page_write_begin(struct address_space *mapping, pgoff_t index, unsigned flags); /* * Returns locked page at given index in given cache, creating it if needed. */ static inline struct page *grab_cache_page(struct address_space *mapping, pgoff_t index) { return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); } extern struct page * read_cache_page(struct address_space *mapping, pgoff_t index, filler_t *filler, void *data); extern struct page * read_cache_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern int read_cache_pages(struct address_space *mapping, struct list_head *pages, filler_t *filler, void *data); static inline struct page *read_mapping_page(struct address_space *mapping, pgoff_t index, void *data) { filler_t *filler = (filler_t *)mapping->a_ops->readpage; return read_cache_page(mapping, index, filler, data); } /* * Get the offset in PAGE_SIZE. * (TODO: hugepage should have ->index in PAGE_SIZE) */ static inline pgoff_t page_to_pgoff(struct page *page) { if (unlikely(PageHeadHuge(page))) return page->index << compound_order(page); else return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); } /* * Return byte-offset into filesystem object for page. */ static inline loff_t page_offset(struct page *page) { return ((loff_t)page->index) << PAGE_CACHE_SHIFT; } static inline loff_t page_file_offset(struct page *page) { return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; } extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, unsigned long address); static inline pgoff_t linear_page_index(struct vm_area_struct *vma, unsigned long address) { pgoff_t pgoff; if (unlikely(is_vm_hugetlb_page(vma))) return linear_hugepage_index(vma, address); pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); } extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags); extern void unlock_page(struct page *page); static inline void __set_page_locked(struct page *page) { __set_bit(PG_locked, &page->flags); } static inline void __clear_page_locked(struct page *page) { __clear_bit(PG_locked, &page->flags); } static inline int trylock_page(struct page *page) { return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); } /* * lock_page may only be called if we have the page's inode pinned. */ static inline void lock_page(struct page *page) { might_sleep(); if (!trylock_page(page)) __lock_page(page); } /* * lock_page_killable is like lock_page but can be interrupted by fatal * signals. It returns 0 if it locked the page and -EINTR if it was * killed while waiting. */ static inline int lock_page_killable(struct page *page) { might_sleep(); if (!trylock_page(page)) return __lock_page_killable(page); return 0; } /* * lock_page_or_retry - Lock the page, unless this would block and the * caller indicated that it can handle a retry. * * Return value and mmap_sem implications depend on flags; see * __lock_page_or_retry(). */ static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, unsigned int flags) { might_sleep(); return trylock_page(page) || __lock_page_or_retry(page, mm, flags); } /* * This is exported only for wait_on_page_locked/wait_on_page_writeback, * and for filesystems which need to wait on PG_private. */ extern void wait_on_page_bit(struct page *page, int bit_nr); extern int wait_on_page_bit_killable(struct page *page, int bit_nr); extern int wait_on_page_bit_killable_timeout(struct page *page, int bit_nr, unsigned long timeout); static inline int wait_on_page_locked_killable(struct page *page) { if (PageLocked(page)) return wait_on_page_bit_killable(page, PG_locked); return 0; } extern wait_queue_head_t *page_waitqueue(struct page *page); static inline void wake_up_page(struct page *page, int bit) { __wake_up_bit(page_waitqueue(page), &page->flags, bit); } /* * Wait for a page to be unlocked. * * This must be called with the caller "holding" the page, * ie with increased "page->count" so that the page won't * go away during the wait.. */ static inline void wait_on_page_locked(struct page *page) { if (PageLocked(page)) wait_on_page_bit(page, PG_locked); } /* * Wait for a page to complete writeback */ static inline void wait_on_page_writeback(struct page *page) { if (PageWriteback(page)) wait_on_page_bit(page, PG_writeback); } extern void end_page_writeback(struct page *page); void wait_for_stable_page(struct page *page); void page_endio(struct page *page, int rw, int err); /* * Add an arbitrary waiter to a page's wait queue */ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); /* * Fault a userspace page into pagetables. Return non-zero on a fault. * * This assumes that two userspace pages are always sufficient. That's * not true if PAGE_CACHE_SIZE > PAGE_SIZE. */ static inline int fault_in_pages_writeable(char __user *uaddr, int size) { int ret; if (unlikely(size == 0)) return 0; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ ret = __put_user(0, uaddr); if (ret == 0) { char __user *end = uaddr + size - 1; /* * If the page was already mapped, this will get a cache miss * for sure, so try to avoid doing it. */ if (((unsigned long)uaddr & PAGE_MASK) != ((unsigned long)end & PAGE_MASK)) ret = __put_user(0, end); } return ret; } static inline int fault_in_pages_readable(const char __user *uaddr, int size) { volatile char c; int ret; if (unlikely(size == 0)) return 0; ret = __get_user(c, uaddr); if (ret == 0) { const char __user *end = uaddr + size - 1; if (((unsigned long)uaddr & PAGE_MASK) != ((unsigned long)end & PAGE_MASK)) { ret = __get_user(c, end); (void)c; } } return ret; } /* * Multipage variants of the above prefault helpers, useful if more than * PAGE_SIZE of data needs to be prefaulted. These are separate from the above * functions (which only handle up to PAGE_SIZE) to avoid clobbering the * filemap.c hotpaths. */ static inline int fault_in_multipages_writeable(char __user *uaddr, int size) { int ret = 0; char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return ret; /* * Writing zeroes into userspace here is OK, because we know that if * the zero gets there, we'll be overwriting it. */ while (uaddr <= end) { ret = __put_user(0, uaddr); if (ret != 0) return ret; uaddr += PAGE_SIZE; } /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) ret = __put_user(0, end); return ret; } static inline int fault_in_multipages_readable(const char __user *uaddr, int size) { volatile char c; int ret = 0; const char __user *end = uaddr + size - 1; if (unlikely(size == 0)) return ret; while (uaddr <= end) { ret = __get_user(c, uaddr); if (ret != 0) return ret; uaddr += PAGE_SIZE; } /* Check whether the range spilled into the next page. */ if (((unsigned long)uaddr & PAGE_MASK) == ((unsigned long)end & PAGE_MASK)) { ret = __get_user(c, end); (void)c; } return ret; } int add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); extern void delete_from_page_cache(struct page *page); extern void __delete_from_page_cache(struct page *page, void *shadow); int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); /* * Like add_to_page_cache_locked, but used to add newly allocated pages: * the page is new, so we can just run __set_page_locked() against it. */ static inline int add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) { int error; __set_page_locked(page); error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); if (unlikely(error)) __clear_page_locked(page); return error; } #endif /* _LINUX_PAGEMAP_H */