diff options
Diffstat (limited to 'include/asm-x86/cacheflush.h')
-rw-r--r-- | include/asm-x86/cacheflush.h | 80 |
1 files changed, 66 insertions, 14 deletions
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h index 5396c212d8c0..f4c0ab50d2c2 100644 --- a/include/asm-x86/cacheflush.h +++ b/include/asm-x86/cacheflush.h | |||
@@ -14,33 +14,85 @@ | |||
14 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 14 | #define flush_dcache_mmap_lock(mapping) do { } while (0) |
15 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 15 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) |
16 | #define flush_icache_range(start, end) do { } while (0) | 16 | #define flush_icache_range(start, end) do { } while (0) |
17 | #define flush_icache_page(vma,pg) do { } while (0) | 17 | #define flush_icache_page(vma, pg) do { } while (0) |
18 | #define flush_icache_user_range(vma,pg,adr,len) do { } while (0) | 18 | #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) |
19 | #define flush_cache_vmap(start, end) do { } while (0) | 19 | #define flush_cache_vmap(start, end) do { } while (0) |
20 | #define flush_cache_vunmap(start, end) do { } while (0) | 20 | #define flush_cache_vunmap(start, end) do { } while (0) |
21 | 21 | ||
22 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 22 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ |
23 | memcpy(dst, src, len) | 23 | memcpy((dst), (src), (len)) |
24 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 24 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ |
25 | memcpy(dst, src, len) | 25 | memcpy((dst), (src), (len)) |
26 | 26 | ||
27 | int __deprecated_for_modules change_page_attr(struct page *page, int numpages, | ||
28 | pgprot_t prot); | ||
29 | 27 | ||
30 | int set_pages_uc(struct page *page, int numpages); | 28 | /* |
31 | int set_pages_wb(struct page *page, int numpages); | 29 | * The set_memory_* API can be used to change various attributes of a virtual |
32 | int set_pages_x(struct page *page, int numpages); | 30 | * address range. The attributes include: |
33 | int set_pages_nx(struct page *page, int numpages); | 31 | * Cachability : UnCached, WriteCombining, WriteBack |
34 | int set_pages_ro(struct page *page, int numpages); | 32 | * Executability : eXeutable, NoteXecutable |
35 | int set_pages_rw(struct page *page, int numpages); | 33 | * Read/Write : ReadOnly, ReadWrite |
34 | * Presence : NotPresent | ||
35 | * | ||
36 | * Within a catagory, the attributes are mutually exclusive. | ||
37 | * | ||
38 | * The implementation of this API will take care of various aspects that | ||
39 | * are associated with changing such attributes, such as: | ||
40 | * - Flushing TLBs | ||
41 | * - Flushing CPU caches | ||
42 | * - Making sure aliases of the memory behind the mapping don't violate | ||
43 | * coherency rules as defined by the CPU in the system. | ||
44 | * | ||
45 | * What this API does not do: | ||
46 | * - Provide exclusion between various callers - including callers that | ||
47 | * operation on other mappings of the same physical page | ||
48 | * - Restore default attributes when a page is freed | ||
49 | * - Guarantee that mappings other than the requested one are | ||
50 | * in any state, other than that these do not violate rules for | ||
51 | * the CPU you have. Do not depend on any effects on other mappings, | ||
52 | * CPUs other than the one you have may have more relaxed rules. | ||
53 | * The caller is required to take care of these. | ||
54 | */ | ||
36 | 55 | ||
56 | int _set_memory_uc(unsigned long addr, int numpages); | ||
57 | int _set_memory_wc(unsigned long addr, int numpages); | ||
58 | int _set_memory_wb(unsigned long addr, int numpages); | ||
37 | int set_memory_uc(unsigned long addr, int numpages); | 59 | int set_memory_uc(unsigned long addr, int numpages); |
60 | int set_memory_wc(unsigned long addr, int numpages); | ||
38 | int set_memory_wb(unsigned long addr, int numpages); | 61 | int set_memory_wb(unsigned long addr, int numpages); |
39 | int set_memory_x(unsigned long addr, int numpages); | 62 | int set_memory_x(unsigned long addr, int numpages); |
40 | int set_memory_nx(unsigned long addr, int numpages); | 63 | int set_memory_nx(unsigned long addr, int numpages); |
41 | int set_memory_ro(unsigned long addr, int numpages); | 64 | int set_memory_ro(unsigned long addr, int numpages); |
42 | int set_memory_rw(unsigned long addr, int numpages); | 65 | int set_memory_rw(unsigned long addr, int numpages); |
43 | int set_memory_np(unsigned long addr, int numpages); | 66 | int set_memory_np(unsigned long addr, int numpages); |
67 | int set_memory_4k(unsigned long addr, int numpages); | ||
68 | |||
69 | /* | ||
70 | * For legacy compatibility with the old APIs, a few functions | ||
71 | * are provided that work on a "struct page". | ||
72 | * These functions operate ONLY on the 1:1 kernel mapping of the | ||
73 | * memory that the struct page represents, and internally just | ||
74 | * call the set_memory_* function. See the description of the | ||
75 | * set_memory_* function for more details on conventions. | ||
76 | * | ||
77 | * These APIs should be considered *deprecated* and are likely going to | ||
78 | * be removed in the future. | ||
79 | * The reason for this is the implicit operation on the 1:1 mapping only, | ||
80 | * making this not a generally useful API. | ||
81 | * | ||
82 | * Specifically, many users of the old APIs had a virtual address, | ||
83 | * called virt_to_page() or vmalloc_to_page() on that address to | ||
84 | * get a struct page* that the old API required. | ||
85 | * To convert these cases, use set_memory_*() on the original | ||
86 | * virtual address, do not use these functions. | ||
87 | */ | ||
88 | |||
89 | int set_pages_uc(struct page *page, int numpages); | ||
90 | int set_pages_wb(struct page *page, int numpages); | ||
91 | int set_pages_x(struct page *page, int numpages); | ||
92 | int set_pages_nx(struct page *page, int numpages); | ||
93 | int set_pages_ro(struct page *page, int numpages); | ||
94 | int set_pages_rw(struct page *page, int numpages); | ||
95 | |||
44 | 96 | ||
45 | void clflush_cache_range(void *addr, unsigned int size); | 97 | void clflush_cache_range(void *addr, unsigned int size); |
46 | 98 | ||