diff options
author | Qian Cai <cai@lca.pw> | 2019-07-16 19:27:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-16 22:23:22 -0400 |
commit | c296d4dc13aefe96792538a949996b8938f28f13 (patch) | |
tree | 5b2f4f6fd98e6e5286f749e0aa639cf9c35054cd | |
parent | 3a7f0adfe7c27cdaf6dc3456226a430398732e2c (diff) |
asm-generic: fix a compilation warning
Fix this compilation warning on x86 by making flush_cache_vmap() inline.
lib/ioremap.c: In function 'ioremap_page_range':
lib/ioremap.c:214:16: warning: variable 'start' set but not used [-Wunused-but-set-variable]
unsigned long start;
^~~~~
While at it, convert all other similar functions to inline for
consistency.
Link: http://lkml.kernel.org/r/1562594592-15228-1-git-send-email-cai@lca.pw
Signed-off-by: Qian Cai <cai@lca.pw>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/asm-generic/cacheflush.h | 74 |
1 files changed, 60 insertions, 14 deletions
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h index 0dd47a6db2cf..a950a22c4890 100644 --- a/include/asm-generic/cacheflush.h +++ b/include/asm-generic/cacheflush.h | |||
@@ -5,24 +5,70 @@ | |||
5 | /* Keep includes the same across arches. */ | 5 | /* Keep includes the same across arches. */ |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | 7 | ||
8 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | ||
9 | |||
8 | /* | 10 | /* |
9 | * The cache doesn't need to be flushed when TLB entries change when | 11 | * The cache doesn't need to be flushed when TLB entries change when |
10 | * the cache is mapped to physical memory, not virtual memory | 12 | * the cache is mapped to physical memory, not virtual memory |
11 | */ | 13 | */ |
12 | #define flush_cache_all() do { } while (0) | 14 | static inline void flush_cache_all(void) |
13 | #define flush_cache_mm(mm) do { } while (0) | 15 | { |
14 | #define flush_cache_dup_mm(mm) do { } while (0) | 16 | } |
15 | #define flush_cache_range(vma, start, end) do { } while (0) | 17 | |
16 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 18 | static inline void flush_cache_mm(struct mm_struct *mm) |
17 | #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 | 19 | { |
18 | #define flush_dcache_page(page) do { } while (0) | 20 | } |
19 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 21 | |
20 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 22 | static inline void flush_cache_dup_mm(struct mm_struct *mm) |
21 | #define flush_icache_range(start, end) do { } while (0) | 23 | { |
22 | #define flush_icache_page(vma,pg) do { } while (0) | 24 | } |
23 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | 25 | |
24 | #define flush_cache_vmap(start, end) do { } while (0) | 26 | static inline void flush_cache_range(struct vm_area_struct *vma, |
25 | #define flush_cache_vunmap(start, end) do { } while (0) | 27 | unsigned long start, |
28 | unsigned long end) | ||
29 | { | ||
30 | } | ||
31 | |||
32 | static inline void flush_cache_page(struct vm_area_struct *vma, | ||
33 | unsigned long vmaddr, | ||
34 | unsigned long pfn) | ||
35 | { | ||
36 | } | ||
37 | |||
38 | static inline void flush_dcache_page(struct page *page) | ||
39 | { | ||
40 | } | ||
41 | |||
42 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) | ||
43 | { | ||
44 | } | ||
45 | |||
46 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) | ||
47 | { | ||
48 | } | ||
49 | |||
50 | static inline void flush_icache_range(unsigned long start, unsigned long end) | ||
51 | { | ||
52 | } | ||
53 | |||
54 | static inline void flush_icache_page(struct vm_area_struct *vma, | ||
55 | struct page *page) | ||
56 | { | ||
57 | } | ||
58 | |||
59 | static inline void flush_icache_user_range(struct vm_area_struct *vma, | ||
60 | struct page *page, | ||
61 | unsigned long addr, int len) | ||
62 | { | ||
63 | } | ||
64 | |||
65 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) | ||
66 | { | ||
67 | } | ||
68 | |||
69 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | ||
70 | { | ||
71 | } | ||
26 | 72 | ||
27 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 73 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
28 | do { \ | 74 | do { \ |