diff options
author | Michal Simek <monstr@monstr.eu> | 2010-02-08 06:13:10 -0500 |
---|---|---|
committer | Michal Simek <monstr@monstr.eu> | 2010-03-11 08:08:33 -0500 |
commit | d79f3b06a9e40b382bd5d5ae8dea9b3210eda9ce (patch) | |
tree | d0e49b835c40cde89f8af1b641df6baf0b16dd82 | |
parent | 407c1da07d5afa001ed0fdb8f379c00bbd09990a (diff) |
microblaze: Preliminary support for dma drivers
I found several problems for ll_temac driver and on system with WB.
This early fix should fix it. I will clean this patch before I will add
it to mainline
Signed-off-by: Michal Simek <monstr@monstr.eu>
-rw-r--r-- | arch/microblaze/include/asm/io.h | 3 | ||||
-rw-r--r-- | arch/microblaze/include/asm/page.h | 12 | ||||
-rw-r--r-- | arch/microblaze/kernel/dma.c | 18 |
3 files changed, 17 insertions, 16 deletions
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h index 06d804b15a51..32d621a56aee 100644 --- a/arch/microblaze/include/asm/io.h +++ b/arch/microblaze/include/asm/io.h | |||
@@ -140,9 +140,6 @@ static inline void writel(unsigned int v, volatile void __iomem *addr) | |||
140 | #define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) | 140 | #define virt_to_phys(addr) ((unsigned long)__virt_to_phys(addr)) |
141 | #define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) | 141 | #define virt_to_bus(addr) ((unsigned long)__virt_to_phys(addr)) |
142 | 142 | ||
143 | #define __page_address(page) \ | ||
144 | (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) | ||
145 | #define page_to_phys(page) virt_to_phys((void *)__page_address(page)) | ||
146 | #define page_to_bus(page) (page_to_phys(page)) | 143 | #define page_to_bus(page) (page_to_phys(page)) |
147 | #define bus_to_virt(addr) (phys_to_virt(addr)) | 144 | #define bus_to_virt(addr) (phys_to_virt(addr)) |
148 | 145 | ||
diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index 9b66c0fa9a32..2dd1d04129e0 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h | |||
@@ -62,12 +62,6 @@ extern unsigned int __page_offset; | |||
62 | #define PAGE_OFFSET CONFIG_KERNEL_START | 62 | #define PAGE_OFFSET CONFIG_KERNEL_START |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * MAP_NR -- given an address, calculate the index of the page struct which | ||
66 | * points to the address's page. | ||
67 | */ | ||
68 | #define MAP_NR(addr) (((unsigned long)(addr) - PAGE_OFFSET) >> PAGE_SHIFT) | ||
69 | |||
70 | /* | ||
71 | * The basic type of a PTE - 32 bit physical addressing. | 65 | * The basic type of a PTE - 32 bit physical addressing. |
72 | */ | 66 | */ |
73 | typedef unsigned long pte_basic_t; | 67 | typedef unsigned long pte_basic_t; |
@@ -154,7 +148,11 @@ extern int page_is_ram(unsigned long pfn); | |||
154 | # define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) | 148 | # define pfn_to_virt(pfn) __va(pfn_to_phys((pfn))) |
155 | 149 | ||
156 | # ifdef CONFIG_MMU | 150 | # ifdef CONFIG_MMU |
157 | # define virt_to_page(kaddr) (mem_map + MAP_NR(kaddr)) | 151 | |
152 | # define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) | ||
153 | # define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) | ||
154 | # define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | ||
155 | |||
158 | # else /* CONFIG_MMU */ | 156 | # else /* CONFIG_MMU */ |
159 | # define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) | 157 | # define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) |
160 | # define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) | 158 | # define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) |
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c index 64bc39f40ba7..f230a8de0bcd 100644 --- a/arch/microblaze/kernel/dma.c +++ b/arch/microblaze/kernel/dma.c | |||
@@ -21,10 +21,10 @@ | |||
21 | * default the offset is PCI_DRAM_OFFSET. | 21 | * default the offset is PCI_DRAM_OFFSET. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | static inline void __dma_sync_page(void *vaddr, unsigned long offset, | 24 | static inline void __dma_sync_page(void *paddr, unsigned long offset, |
25 | size_t size, enum dma_data_direction direction) | 25 | size_t size, enum dma_data_direction direction) |
26 | { | 26 | { |
27 | unsigned long start = virt_to_phys(vaddr); | 27 | unsigned long start = (unsigned long)paddr; |
28 | 28 | ||
29 | switch (direction) { | 29 | switch (direction) { |
30 | case DMA_TO_DEVICE: | 30 | case DMA_TO_DEVICE: |
@@ -79,10 +79,12 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | |||
79 | struct scatterlist *sg; | 79 | struct scatterlist *sg; |
80 | int i; | 80 | int i; |
81 | 81 | ||
82 | /* FIXME this part of code is untested */ | ||
82 | for_each_sg(sgl, sg, nents, i) { | 83 | for_each_sg(sgl, sg, nents, i) { |
83 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); | 84 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); |
84 | sg->dma_length = sg->length; | 85 | sg->dma_length = sg->length; |
85 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | 86 | __dma_sync_page(page_to_phys(sg_page(sg)), sg->offset, |
87 | sg->length, direction); | ||
86 | } | 88 | } |
87 | 89 | ||
88 | return nents; | 90 | return nents; |
@@ -107,7 +109,7 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev, | |||
107 | struct dma_attrs *attrs) | 109 | struct dma_attrs *attrs) |
108 | { | 110 | { |
109 | BUG_ON(direction == DMA_NONE); | 111 | BUG_ON(direction == DMA_NONE); |
110 | __dma_sync_page(page, offset, size, direction); | 112 | __dma_sync_page(page_to_phys(page), offset, size, direction); |
111 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); | 113 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); |
112 | } | 114 | } |
113 | 115 | ||
@@ -117,8 +119,12 @@ static inline void dma_direct_unmap_page(struct device *dev, | |||
117 | enum dma_data_direction direction, | 119 | enum dma_data_direction direction, |
118 | struct dma_attrs *attrs) | 120 | struct dma_attrs *attrs) |
119 | { | 121 | { |
120 | /* There is not necessary to do cache cleanup */ | 122 | /* There is not necessary to do cache cleanup |
121 | /* __dma_sync_page(dma_address, 0 , size, direction); */ | 123 | * |
124 | * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and | ||
125 | * dma_address is physical address | ||
126 | */ | ||
127 | __dma_sync_page((void *)dma_address, 0 , size, direction); | ||
122 | } | 128 | } |
123 | 129 | ||
124 | struct dma_map_ops dma_direct_ops = { | 130 | struct dma_map_ops dma_direct_ops = { |