diff options
Diffstat (limited to 'arch/microblaze/mm/consistent.c')
| -rw-r--r-- | arch/microblaze/mm/consistent.c | 190 |
1 files changed, 99 insertions, 91 deletions
diff --git a/arch/microblaze/mm/consistent.c b/arch/microblaze/mm/consistent.c index f956e24fe49c..5a59dad62bd2 100644 --- a/arch/microblaze/mm/consistent.c +++ b/arch/microblaze/mm/consistent.c | |||
| @@ -42,11 +42,12 @@ | |||
| 42 | #include <linux/uaccess.h> | 42 | #include <linux/uaccess.h> |
| 43 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
| 44 | #include <asm/cpuinfo.h> | 44 | #include <asm/cpuinfo.h> |
| 45 | #include <asm/tlbflush.h> | ||
| 45 | 46 | ||
| 46 | #ifndef CONFIG_MMU | 47 | #ifndef CONFIG_MMU |
| 47 | |||
| 48 | /* I have to use dcache values because I can't relate on ram size */ | 48 | /* I have to use dcache values because I can't relate on ram size */ |
| 49 | #define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) | 49 | # define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1) |
| 50 | #endif | ||
| 50 | 51 | ||
| 51 | /* | 52 | /* |
| 52 | * Consistent memory allocators. Used for DMA devices that want to | 53 | * Consistent memory allocators. Used for DMA devices that want to |
| @@ -60,71 +61,16 @@ | |||
| 60 | */ | 61 | */ |
| 61 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) | 62 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) |
| 62 | { | 63 | { |
| 63 | struct page *page, *end, *free; | 64 | unsigned long order, vaddr; |
| 64 | unsigned long order; | 65 | void *ret; |
| 65 | void *ret, *virt; | 66 | unsigned int i, err = 0; |
| 66 | 67 | struct page *page, *end; | |
| 67 | if (in_interrupt()) | ||
| 68 | BUG(); | ||
| 69 | |||
| 70 | size = PAGE_ALIGN(size); | ||
| 71 | order = get_order(size); | ||
| 72 | |||
| 73 | page = alloc_pages(gfp, order); | ||
| 74 | if (!page) | ||
| 75 | goto no_page; | ||
| 76 | |||
| 77 | /* We could do with a page_to_phys and page_to_bus here. */ | ||
| 78 | virt = page_address(page); | ||
| 79 | ret = ioremap(virt_to_phys(virt), size); | ||
| 80 | if (!ret) | ||
| 81 | goto no_remap; | ||
| 82 | |||
| 83 | /* | ||
| 84 | * Here's the magic! Note if the uncached shadow is not implemented, | ||
| 85 | * it's up to the calling code to also test that condition and make | ||
| 86 | * other arranegments, such as manually flushing the cache and so on. | ||
| 87 | */ | ||
| 88 | #ifdef CONFIG_XILINX_UNCACHED_SHADOW | ||
| 89 | ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); | ||
| 90 | #endif | ||
| 91 | /* dma_handle is same as physical (shadowed) address */ | ||
| 92 | *dma_handle = (dma_addr_t)ret; | ||
| 93 | |||
| 94 | /* | ||
| 95 | * free wasted pages. We skip the first page since we know | ||
| 96 | * that it will have count = 1 and won't require freeing. | ||
| 97 | * We also mark the pages in use as reserved so that | ||
| 98 | * remap_page_range works. | ||
| 99 | */ | ||
| 100 | page = virt_to_page(virt); | ||
| 101 | free = page + (size >> PAGE_SHIFT); | ||
| 102 | end = page + (1 << order); | ||
| 103 | |||
| 104 | for (; page < end; page++) { | ||
| 105 | init_page_count(page); | ||
| 106 | if (page >= free) | ||
| 107 | __free_page(page); | ||
| 108 | else | ||
| 109 | SetPageReserved(page); | ||
| 110 | } | ||
| 111 | |||
| 112 | return ret; | ||
| 113 | no_remap: | ||
| 114 | __free_pages(page, order); | ||
| 115 | no_page: | ||
| 116 | return NULL; | ||
| 117 | } | ||
| 118 | |||
| 119 | #else | ||
| 120 | 68 | ||
| 121 | void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) | 69 | #ifdef CONFIG_MMU |
| 122 | { | ||
| 123 | int order, err, i; | ||
| 124 | unsigned long page, va, flags; | ||
| 125 | phys_addr_t pa; | 70 | phys_addr_t pa; |
| 126 | struct vm_struct *area; | 71 | struct vm_struct *area; |
| 127 | void *ret; | 72 | unsigned long va; |
| 73 | #endif | ||
| 128 | 74 | ||
| 129 | if (in_interrupt()) | 75 | if (in_interrupt()) |
| 130 | BUG(); | 76 | BUG(); |
| @@ -133,71 +79,133 @@ void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle) | |||
| 133 | size = PAGE_ALIGN(size); | 79 | size = PAGE_ALIGN(size); |
| 134 | order = get_order(size); | 80 | order = get_order(size); |
| 135 | 81 | ||
| 136 | page = __get_free_pages(gfp, order); | 82 | vaddr = __get_free_pages(gfp, order); |
| 137 | if (!page) { | 83 | if (!vaddr) |
| 138 | BUG(); | ||
| 139 | return NULL; | 84 | return NULL; |
| 140 | } | ||
| 141 | 85 | ||
| 142 | /* | 86 | /* |
| 143 | * we need to ensure that there are no cachelines in use, | 87 | * we need to ensure that there are no cachelines in use, |
| 144 | * or worse dirty in this area. | 88 | * or worse dirty in this area. |
| 145 | */ | 89 | */ |
| 146 | flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size); | 90 | flush_dcache_range(virt_to_phys((void *)vaddr), |
| 91 | virt_to_phys((void *)vaddr) + size); | ||
| 147 | 92 | ||
| 93 | #ifndef CONFIG_MMU | ||
| 94 | ret = (void *)vaddr; | ||
| 95 | /* | ||
| 96 | * Here's the magic! Note if the uncached shadow is not implemented, | ||
| 97 | * it's up to the calling code to also test that condition and make | ||
| 98 | * other arranegments, such as manually flushing the cache and so on. | ||
| 99 | */ | ||
| 100 | # ifdef CONFIG_XILINX_UNCACHED_SHADOW | ||
| 101 | ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK); | ||
| 102 | # endif | ||
| 103 | if ((unsigned int)ret > cpuinfo.dcache_base && | ||
| 104 | (unsigned int)ret < cpuinfo.dcache_high) | ||
| 105 | printk(KERN_WARNING | ||
| 106 | "ERROR: Your cache coherent area is CACHED!!!\n"); | ||
| 107 | |||
| 108 | /* dma_handle is same as physical (shadowed) address */ | ||
| 109 | *dma_handle = (dma_addr_t)ret; | ||
| 110 | #else | ||
| 148 | /* Allocate some common virtual space to map the new pages. */ | 111 | /* Allocate some common virtual space to map the new pages. */ |
| 149 | area = get_vm_area(size, VM_ALLOC); | 112 | area = get_vm_area(size, VM_ALLOC); |
| 150 | if (area == NULL) { | 113 | if (!area) { |
| 151 | free_pages(page, order); | 114 | free_pages(vaddr, order); |
| 152 | return NULL; | 115 | return NULL; |
| 153 | } | 116 | } |
| 154 | va = (unsigned long) area->addr; | 117 | va = (unsigned long) area->addr; |
| 155 | ret = (void *)va; | 118 | ret = (void *)va; |
| 156 | 119 | ||
| 157 | /* This gives us the real physical address of the first page. */ | 120 | /* This gives us the real physical address of the first page. */ |
| 158 | *dma_handle = pa = virt_to_bus((void *)page); | 121 | *dma_handle = pa = virt_to_bus((void *)vaddr); |
| 159 | 122 | #endif | |
| 160 | /* MS: This is the whole magic - use cache inhibit pages */ | ||
| 161 | flags = _PAGE_KERNEL | _PAGE_NO_CACHE; | ||
| 162 | 123 | ||
| 163 | /* | 124 | /* |
| 164 | * Set refcount=1 on all pages in an order>0 | 125 | * free wasted pages. We skip the first page since we know |
| 165 | * allocation so that vfree() will actually | 126 | * that it will have count = 1 and won't require freeing. |
| 166 | * free all pages that were allocated. | 127 | * We also mark the pages in use as reserved so that |
| 128 | * remap_page_range works. | ||
| 167 | */ | 129 | */ |
| 168 | if (order > 0) { | 130 | page = virt_to_page(vaddr); |
| 169 | struct page *rpage = virt_to_page(page); | 131 | end = page + (1 << order); |
| 170 | for (i = 1; i < (1 << order); i++) | 132 | |
| 171 | init_page_count(rpage+i); | 133 | split_page(page, order); |
| 134 | |||
| 135 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) { | ||
| 136 | #ifdef CONFIG_MMU | ||
| 137 | /* MS: This is the whole magic - use cache inhibit pages */ | ||
| 138 | err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE); | ||
| 139 | #endif | ||
| 140 | |||
| 141 | SetPageReserved(page); | ||
| 142 | page++; | ||
| 172 | } | 143 | } |
| 173 | 144 | ||
| 174 | err = 0; | 145 | /* Free the otherwise unused pages. */ |
| 175 | for (i = 0; i < size && err == 0; i += PAGE_SIZE) | 146 | while (page < end) { |
| 176 | err = map_page(va+i, pa+i, flags); | 147 | __free_page(page); |
| 148 | page++; | ||
| 149 | } | ||
| 177 | 150 | ||
| 178 | if (err) { | 151 | if (err) { |
| 179 | vfree((void *)va); | 152 | free_pages(vaddr, order); |
| 180 | return NULL; | 153 | return NULL; |
| 181 | } | 154 | } |
| 182 | 155 | ||
| 183 | return ret; | 156 | return ret; |
| 184 | } | 157 | } |
| 185 | #endif /* CONFIG_MMU */ | ||
| 186 | EXPORT_SYMBOL(consistent_alloc); | 158 | EXPORT_SYMBOL(consistent_alloc); |
| 187 | 159 | ||
| 188 | /* | 160 | /* |
| 189 | * free page(s) as defined by the above mapping. | 161 | * free page(s) as defined by the above mapping. |
| 190 | */ | 162 | */ |
| 191 | void consistent_free(void *vaddr) | 163 | void consistent_free(size_t size, void *vaddr) |
| 192 | { | 164 | { |
| 165 | struct page *page; | ||
| 166 | |||
| 193 | if (in_interrupt()) | 167 | if (in_interrupt()) |
| 194 | BUG(); | 168 | BUG(); |
| 195 | 169 | ||
| 170 | size = PAGE_ALIGN(size); | ||
| 171 | |||
| 172 | #ifndef CONFIG_MMU | ||
| 196 | /* Clear SHADOW_MASK bit in address, and free as per usual */ | 173 | /* Clear SHADOW_MASK bit in address, and free as per usual */ |
| 197 | #ifdef CONFIG_XILINX_UNCACHED_SHADOW | 174 | # ifdef CONFIG_XILINX_UNCACHED_SHADOW |
| 198 | vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); | 175 | vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); |
| 176 | # endif | ||
| 177 | page = virt_to_page(vaddr); | ||
| 178 | |||
| 179 | do { | ||
| 180 | ClearPageReserved(page); | ||
| 181 | __free_page(page); | ||
| 182 | page++; | ||
| 183 | } while (size -= PAGE_SIZE); | ||
| 184 | #else | ||
| 185 | do { | ||
| 186 | pte_t *ptep; | ||
| 187 | unsigned long pfn; | ||
| 188 | |||
| 189 | ptep = pte_offset_kernel(pmd_offset(pgd_offset_k( | ||
| 190 | (unsigned int)vaddr), | ||
| 191 | (unsigned int)vaddr), | ||
| 192 | (unsigned int)vaddr); | ||
| 193 | if (!pte_none(*ptep) && pte_present(*ptep)) { | ||
| 194 | pfn = pte_pfn(*ptep); | ||
| 195 | pte_clear(&init_mm, (unsigned int)vaddr, ptep); | ||
| 196 | if (pfn_valid(pfn)) { | ||
| 197 | page = pfn_to_page(pfn); | ||
| 198 | |||
| 199 | ClearPageReserved(page); | ||
| 200 | __free_page(page); | ||
| 201 | } | ||
| 202 | } | ||
| 203 | vaddr += PAGE_SIZE; | ||
| 204 | } while (size -= PAGE_SIZE); | ||
| 205 | |||
| 206 | /* flush tlb */ | ||
| 207 | flush_tlb_all(); | ||
| 199 | #endif | 208 | #endif |
| 200 | vfree(vaddr); | ||
| 201 | } | 209 | } |
| 202 | EXPORT_SYMBOL(consistent_free); | 210 | EXPORT_SYMBOL(consistent_free); |
| 203 | 211 | ||
| @@ -221,7 +229,7 @@ void consistent_sync(void *vaddr, size_t size, int direction) | |||
| 221 | case PCI_DMA_NONE: | 229 | case PCI_DMA_NONE: |
| 222 | BUG(); | 230 | BUG(); |
| 223 | case PCI_DMA_FROMDEVICE: /* invalidate only */ | 231 | case PCI_DMA_FROMDEVICE: /* invalidate only */ |
| 224 | flush_dcache_range(start, end); | 232 | invalidate_dcache_range(start, end); |
| 225 | break; | 233 | break; |
| 226 | case PCI_DMA_TODEVICE: /* writeback only */ | 234 | case PCI_DMA_TODEVICE: /* writeback only */ |
| 227 | flush_dcache_range(start, end); | 235 | flush_dcache_range(start, end); |
