aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/cavium-octeon
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2010-10-01 16:27:34 -0400
committerRalf Baechle <ralf@linux-mips.org>2010-10-29 14:08:32 -0400
commitb93b2abce497873be97d765b848e0a955d29f200 (patch)
tree0372a9162b8bbf67f5a5f7367a1da2001ea0292c /arch/mips/cavium-octeon
parentee71b7d2f834d5e4b3a43001b2fa88743ed71a2c (diff)
MIPS: Octeon: Rewrite DMA mapping functions.
All Octeon chips can support more than 4GB of RAM. Also due to how Octeon PCI is setup, even some configurations with less than 4GB of RAM will have portions that are not accessible from 32-bit devices. Enable the swiotlb code to handle the cases where a device cannot directly do DMA. This is a complete rewrite of the Octeon DMA mapping code. Signed-off-by: David Daney <ddaney@caviumnetworks.com> Patchwork: http://patchwork.linux-mips.org/patch/1639/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/cavium-octeon')
-rw-r--r--arch/mips/cavium-octeon/Kconfig12
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c581
2 files changed, 306 insertions, 287 deletions
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 47323ca452d..475156b0c80 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -87,3 +87,15 @@ config ARCH_SPARSEMEM_ENABLE
87config CAVIUM_OCTEON_HELPER 87config CAVIUM_OCTEON_HELPER
88 def_bool y 88 def_bool y
89 depends on OCTEON_ETHERNET || PCI 89 depends on OCTEON_ETHERNET || PCI
90
91config IOMMU_HELPER
92 bool
93
94config NEED_SG_DMA_LENGTH
95 bool
96
97config SWIOTLB
98 def_bool y
99 depends on CPU_CAVIUM_OCTEON
100 select IOMMU_HELPER
101 select NEED_SG_DMA_LENGTH
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index d22b5a2d64f..1abb66caaa1 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -8,335 +8,342 @@
8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com> 8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. 9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 * IP32 changes by Ilya. 10 * IP32 changes by Ilya.
11 * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on 11 * Copyright (C) 2010 Cavium Networks, Inc.
12 * the kernels original.
13 */ 12 */
14#include <linux/types.h>
15#include <linux/mm.h>
16#include <linux/module.h>
17#include <linux/string.h>
18#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
19#include <linux/platform_device.h>
20#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
15#include <linux/bootmem.h>
16#include <linux/swiotlb.h>
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/mm.h>
21 20
22#include <linux/cache.h> 21#include <asm/bootinfo.h>
23#include <linux/io.h>
24 22
25#include <asm/octeon/octeon.h> 23#include <asm/octeon/octeon.h>
24
25#ifdef CONFIG_PCI
26#include <asm/octeon/pci-octeon.h>
26#include <asm/octeon/cvmx-npi-defs.h> 27#include <asm/octeon/cvmx-npi-defs.h>
27#include <asm/octeon/cvmx-pci-defs.h> 28#include <asm/octeon/cvmx-pci-defs.h>
28 29
29#include <dma-coherence.h> 30static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
31{
32 if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
33 return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
34 else
35 return paddr;
36}
30 37
31#ifdef CONFIG_PCI 38static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
32#include <asm/octeon/pci-octeon.h> 39{
33#endif 40 if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
41 return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
42 else
43 return daddr;
44}
45
46static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
47{
48 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
49 paddr -= 0x400000000ull;
50 return octeon_hole_phys_to_dma(paddr);
51}
34 52
35#define BAR2_PCI_ADDRESS 0x8000000000ul 53static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
54{
55 daddr = octeon_hole_dma_to_phys(daddr);
36 56
37struct bar1_index_state { 57 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
38 int16_t ref_count; /* Number of PCI mappings using this index */ 58 daddr += 0x400000000ull;
39 uint16_t address_bits; /* Upper bits of physical address. This is
40 shifted 22 bits */
41};
42 59
43#ifdef CONFIG_PCI 60 return daddr;
44static DEFINE_RAW_SPINLOCK(bar1_lock); 61}
45static struct bar1_index_state bar1_state[32];
46#endif
47 62
48dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size) 63static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
49{ 64{
50#ifndef CONFIG_PCI 65 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
51 /* Without PCI/PCIe this function can be called for Octeon internal 66 paddr -= 0x400000000ull;
52 devices such as USB. These devices all support 64bit addressing */ 67
68 /* Anything in the BAR1 hole or above goes via BAR2 */
69 if (paddr >= 0xf0000000ull)
70 paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
71
72 return paddr;
73}
74
75static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
76{
77 if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
78 daddr -= OCTEON_BAR2_PCI_ADDRESS;
79
80 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
81 daddr += 0x400000000ull;
82 return daddr;
83}
84
85static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
86 phys_addr_t paddr)
87{
88 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
89 paddr -= 0x400000000ull;
90
91 /* Anything not in the BAR1 range goes via BAR2 */
92 if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
93 paddr = paddr - octeon_bar1_pci_phys;
94 else
95 paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
96
97 return paddr;
98}
99
100static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
101 dma_addr_t daddr)
102{
103 if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
104 daddr -= OCTEON_BAR2_PCI_ADDRESS;
105 else
106 daddr += octeon_bar1_pci_phys;
107
108 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
109 daddr += 0x400000000ull;
110 return daddr;
111}
112
113#endif /* CONFIG_PCI */
114
115static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
116 unsigned long offset, size_t size, enum dma_data_direction direction,
117 struct dma_attrs *attrs)
118{
119 dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
120 direction, attrs);
53 mb(); 121 mb();
54 return virt_to_phys(ptr);
55#else
56 unsigned long flags;
57 uint64_t dma_mask;
58 int64_t start_index;
59 dma_addr_t result = -1;
60 uint64_t physical = virt_to_phys(ptr);
61 int64_t index;
62 122
123 return daddr;
124}
125
126static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
127 int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
128{
129 int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
63 mb(); 130 mb();
64 /* 131 return r;
65 * Use the DMA masks to determine the allowed memory 132}
66 * region. For us it doesn't limit the actual memory, just the
67 * address visible over PCI. Devices with limits need to use
68 * lower indexed Bar1 entries.
69 */
70 if (dev) {
71 dma_mask = dev->coherent_dma_mask;
72 if (dev->dma_mask)
73 dma_mask = *dev->dma_mask;
74 } else {
75 dma_mask = 0xfffffffful;
76 }
77 133
78 /* 134static void octeon_dma_sync_single_for_device(struct device *dev,
79 * Platform devices, such as the internal USB, skip all 135 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
80 * translation and use Octeon physical addresses directly. 136{
81 */ 137 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
82 if (!dev || dev->bus == &platform_bus_type) 138 mb();
83 return physical; 139}
84 140
85 switch (octeon_dma_bar_type) { 141static void octeon_dma_sync_sg_for_device(struct device *dev,
86 case OCTEON_DMA_BAR_TYPE_PCIE: 142 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
87 if (unlikely(physical < (16ul << 10))) 143{
88 panic("dma_map_single: Not allowed to map first 16KB." 144 swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
89 " It interferes with BAR0 special area\n"); 145 mb();
90 else if ((physical + size >= (256ul << 20)) && 146}
91 (physical < (512ul << 20)))
92 panic("dma_map_single: Not allowed to map bootbus\n");
93 else if ((physical + size >= 0x400000000ull) &&
94 physical < 0x410000000ull)
95 panic("dma_map_single: "
96 "Attempt to map illegal memory address 0x%llx\n",
97 physical);
98 else if (physical >= 0x420000000ull)
99 panic("dma_map_single: "
100 "Attempt to map illegal memory address 0x%llx\n",
101 physical);
102 else if (physical >= CVMX_PCIE_BAR1_PHYS_BASE &&
103 physical + size < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE)) {
104 result = physical - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
105
106 if (((result+size-1) & dma_mask) != result+size-1)
107 panic("dma_map_single: Attempt to map address 0x%llx-0x%llx, which can't be accessed according to the dma mask 0x%llx\n",
108 physical, physical+size-1, dma_mask);
109 goto done;
110 }
111
112 /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */
113 if ((physical >= 0x410000000ull) && physical < 0x420000000ull)
114 result = physical - 0x400000000ull;
115 else
116 result = physical;
117 if (((result+size-1) & dma_mask) != result+size-1)
118 panic("dma_map_single: Attempt to map address "
119 "0x%llx-0x%llx, which can't be accessed "
120 "according to the dma mask 0x%llx\n",
121 physical, physical+size-1, dma_mask);
122 goto done;
123 147
124 case OCTEON_DMA_BAR_TYPE_BIG: 148static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
125#ifdef CONFIG_64BIT 149 dma_addr_t *dma_handle, gfp_t gfp)
126 /* If the device supports 64bit addressing, then use BAR2 */ 150{
127 if (dma_mask > BAR2_PCI_ADDRESS) { 151 void *ret;
128 result = physical + BAR2_PCI_ADDRESS;
129 goto done;
130 }
131#endif
132 if (unlikely(physical < (4ul << 10))) {
133 panic("dma_map_single: Not allowed to map first 4KB. "
134 "It interferes with BAR0 special area\n");
135 } else if (physical < (256ul << 20)) {
136 if (unlikely(physical + size > (256ul << 20)))
137 panic("dma_map_single: Requested memory spans "
138 "Bar0 0:256MB and bootbus\n");
139 result = physical;
140 goto done;
141 } else if (unlikely(physical < (512ul << 20))) {
142 panic("dma_map_single: Not allowed to map bootbus\n");
143 } else if (physical < (2ul << 30)) {
144 if (unlikely(physical + size > (2ul << 30)))
145 panic("dma_map_single: Requested memory spans "
146 "Bar0 512MB:2GB and BAR1\n");
147 result = physical;
148 goto done;
149 } else if (physical < (2ul << 30) + (128 << 20)) {
150 /* Fall through */
151 } else if (physical <
152 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) {
153 if (unlikely
154 (physical + size >
155 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)))
156 panic("dma_map_single: Requested memory "
157 "extends past Bar1 (4GB-%luMB)\n",
158 OCTEON_PCI_BAR1_HOLE_SIZE);
159 result = physical;
160 goto done;
161 } else if ((physical >= 0x410000000ull) &&
162 (physical < 0x420000000ull)) {
163 if (unlikely(physical + size > 0x420000000ull))
164 panic("dma_map_single: Requested memory spans "
165 "non existant memory\n");
166 /* BAR0 fixed mapping 256MB:512MB ->
167 * 16GB+256MB:16GB+512MB */
168 result = physical - 0x400000000ull;
169 goto done;
170 } else {
171 /* Continued below switch statement */
172 }
173 break;
174 152
175 case OCTEON_DMA_BAR_TYPE_SMALL: 153 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
176#ifdef CONFIG_64BIT 154 return ret;
177 /* If the device supports 64bit addressing, then use BAR2 */ 155
178 if (dma_mask > BAR2_PCI_ADDRESS) { 156 /* ignore region specifiers */
179 result = physical + BAR2_PCI_ADDRESS; 157 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
180 goto done; 158
181 } 159#ifdef CONFIG_ZONE_DMA
160 if (dev == NULL)
161 gfp |= __GFP_DMA;
162 else if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
163 gfp |= __GFP_DMA;
164 else
182#endif 165#endif
183 /* Continued below switch statement */ 166#ifdef CONFIG_ZONE_DMA32
184 break; 167 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
168 gfp |= __GFP_DMA32;
169 else
170#endif
171 ;
185 172
186 default: 173 /* Don't invoke OOM killer */
187 panic("dma_map_single: Invalid octeon_dma_bar_type\n"); 174 gfp |= __GFP_NORETRY;
188 }
189 175
190 /* Don't allow mapping to span multiple Bar entries. The hardware guys 176 ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
191 won't guarantee that DMA across boards work */
192 if (unlikely((physical >> 22) != ((physical + size - 1) >> 22)))
193 panic("dma_map_single: "
194 "Requested memory spans more than one Bar1 entry\n");
195 177
196 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) 178 mb();
197 start_index = 31;
198 else if (unlikely(dma_mask < (1ul << 27)))
199 start_index = (dma_mask >> 22);
200 else
201 start_index = 31;
202
203 /* Only one processor can access the Bar register at once */
204 raw_spin_lock_irqsave(&bar1_lock, flags);
205
206 /* Look through Bar1 for existing mapping that will work */
207 for (index = start_index; index >= 0; index--) {
208 if ((bar1_state[index].address_bits == physical >> 22) &&
209 (bar1_state[index].ref_count)) {
210 /* An existing mapping will work, use it */
211 bar1_state[index].ref_count++;
212 if (unlikely(bar1_state[index].ref_count < 0))
213 panic("dma_map_single: "
214 "Bar1[%d] reference count overflowed\n",
215 (int) index);
216 result = (index << 22) | (physical & ((1 << 22) - 1));
217 /* Large BAR1 is offset at 2GB */
218 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
219 result += 2ul << 30;
220 goto done_unlock;
221 }
222 }
223 179
224 /* No existing mappings, look for a free entry */ 180 return ret;
225 for (index = start_index; index >= 0; index--) { 181}
226 if (unlikely(bar1_state[index].ref_count == 0)) {
227 union cvmx_pci_bar1_indexx bar1_index;
228 /* We have a free entry, use it */
229 bar1_state[index].ref_count = 1;
230 bar1_state[index].address_bits = physical >> 22;
231 bar1_index.u32 = 0;
232 /* Address bits[35:22] sent to L2C */
233 bar1_index.s.addr_idx = physical >> 22;
234 /* Don't put PCI accesses in L2. */
235 bar1_index.s.ca = 1;
236 /* Endian Swap Mode */
237 bar1_index.s.end_swp = 1;
238 /* Set '1' when the selected address range is valid. */
239 bar1_index.s.addr_v = 1;
240 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
241 bar1_index.u32);
242 /* An existing mapping will work, use it */
243 result = (index << 22) | (physical & ((1 << 22) - 1));
244 /* Large BAR1 is offset at 2GB */
245 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
246 result += 2ul << 30;
247 goto done_unlock;
248 }
249 }
250 182
251 pr_err("dma_map_single: " 183static void octeon_dma_free_coherent(struct device *dev, size_t size,
252 "Can't find empty BAR1 index for physical mapping 0x%llx\n", 184 void *vaddr, dma_addr_t dma_handle)
253 (unsigned long long) physical); 185{
186 int order = get_order(size);
254 187
255done_unlock: 188 if (dma_release_from_coherent(dev, order, vaddr))
256 raw_spin_unlock_irqrestore(&bar1_lock, flags); 189 return;
257done: 190
258 pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result); 191 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
259 return result;
260#endif
261} 192}
262 193
263void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) 194static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
264{ 195{
265#ifndef CONFIG_PCI 196 return paddr;
266 /* 197}
267 * Without PCI/PCIe this function can be called for Octeon internal
268 * devices such as USB. These devices all support 64bit addressing.
269 */
270 return;
271#else
272 unsigned long flags;
273 uint64_t index;
274 198
199static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
200{
201 return daddr;
202}
203
204struct octeon_dma_map_ops {
205 struct dma_map_ops dma_map_ops;
206 dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
207 phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
208};
209
210dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
211{
212 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
213 struct octeon_dma_map_ops,
214 dma_map_ops);
215
216 return ops->phys_to_dma(dev, paddr);
217}
218EXPORT_SYMBOL(phys_to_dma);
219
220phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
221{
222 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
223 struct octeon_dma_map_ops,
224 dma_map_ops);
225
226 return ops->dma_to_phys(dev, daddr);
227}
228EXPORT_SYMBOL(dma_to_phys);
229
230static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
231 .dma_map_ops = {
232 .alloc_coherent = octeon_dma_alloc_coherent,
233 .free_coherent = octeon_dma_free_coherent,
234 .map_page = octeon_dma_map_page,
235 .unmap_page = swiotlb_unmap_page,
236 .map_sg = octeon_dma_map_sg,
237 .unmap_sg = swiotlb_unmap_sg_attrs,
238 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
239 .sync_single_for_device = octeon_dma_sync_single_for_device,
240 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
241 .sync_sg_for_device = octeon_dma_sync_sg_for_device,
242 .mapping_error = swiotlb_dma_mapping_error,
243 .dma_supported = swiotlb_dma_supported
244 },
245 .phys_to_dma = octeon_unity_phys_to_dma,
246 .dma_to_phys = octeon_unity_dma_to_phys
247};
248
249char *octeon_swiotlb;
250
251void __init plat_swiotlb_setup(void)
252{
253 int i;
254 phys_t max_addr;
255 phys_t addr_size;
256 size_t swiotlbsize;
257 unsigned long swiotlb_nslabs;
258
259 max_addr = 0;
260 addr_size = 0;
261
262 for (i = 0 ; i < boot_mem_map.nr_map; i++) {
263 struct boot_mem_map_entry *e = &boot_mem_map.map[i];
264 if (e->type != BOOT_MEM_RAM)
265 continue;
266
267 /* These addresses map low for PCI. */
268 if (e->addr > 0x410000000ull)
269 continue;
270
271 addr_size += e->size;
272
273 if (max_addr < e->addr + e->size)
274 max_addr = e->addr + e->size;
275
276 }
277
278 swiotlbsize = PAGE_SIZE;
279
280#ifdef CONFIG_PCI
275 /* 281 /*
276 * Platform devices, such as the internal USB, skip all 282 * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
277 * translation and use Octeon physical addresses directly. 283 * size to a maximum of 64MB
278 */ 284 */
279 if (dev->bus == &platform_bus_type) 285 if (OCTEON_IS_MODEL(OCTEON_CN31XX)
280 return; 286 || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
287 swiotlbsize = addr_size / 4;
288 if (swiotlbsize > 64 * (1<<20))
289 swiotlbsize = 64 * (1<<20);
290 } else if (max_addr > 0xf0000000ul) {
291 /*
292 * Otherwise only allocate a big iotlb if there is
293 * memory past the BAR1 hole.
294 */
295 swiotlbsize = 64 * (1<<20);
296 }
297#endif
298 swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
299 swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
300 swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
301
302 octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
281 303
304 swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1);
305
306 mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
307}
308
309#ifdef CONFIG_PCI
310static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
311 .dma_map_ops = {
312 .alloc_coherent = octeon_dma_alloc_coherent,
313 .free_coherent = octeon_dma_free_coherent,
314 .map_page = octeon_dma_map_page,
315 .unmap_page = swiotlb_unmap_page,
316 .map_sg = octeon_dma_map_sg,
317 .unmap_sg = swiotlb_unmap_sg_attrs,
318 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
319 .sync_single_for_device = octeon_dma_sync_single_for_device,
320 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
321 .sync_sg_for_device = octeon_dma_sync_sg_for_device,
322 .mapping_error = swiotlb_dma_mapping_error,
323 .dma_supported = swiotlb_dma_supported
324 },
325};
326
327struct dma_map_ops *octeon_pci_dma_map_ops;
328
329void __init octeon_pci_dma_init(void)
330{
282 switch (octeon_dma_bar_type) { 331 switch (octeon_dma_bar_type) {
283 case OCTEON_DMA_BAR_TYPE_PCIE: 332 case OCTEON_DMA_BAR_TYPE_PCIE:
284 /* Nothing to do, all mappings are static */ 333 _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
285 goto done; 334 _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
286 335 break;
287 case OCTEON_DMA_BAR_TYPE_BIG: 336 case OCTEON_DMA_BAR_TYPE_BIG:
288#ifdef CONFIG_64BIT 337 _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
289 /* Nothing to do for addresses using BAR2 */ 338 _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
290 if (dma_addr >= BAR2_PCI_ADDRESS)
291 goto done;
292#endif
293 if (unlikely(dma_addr < (4ul << 10)))
294 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
295 dma_addr);
296 else if (dma_addr < (2ul << 30))
297 /* Nothing to do for addresses using BAR0 */
298 goto done;
299 else if (dma_addr < (2ul << 30) + (128ul << 20))
300 /* Need to unmap, fall through */
301 index = (dma_addr - (2ul << 30)) >> 22;
302 else if (dma_addr <
303 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20))
304 goto done; /* Nothing to do for the rest of BAR1 */
305 else
306 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
307 dma_addr);
308 /* Continued below switch statement */
309 break; 339 break;
310
311 case OCTEON_DMA_BAR_TYPE_SMALL: 340 case OCTEON_DMA_BAR_TYPE_SMALL:
312#ifdef CONFIG_64BIT 341 _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
313 /* Nothing to do for addresses using BAR2 */ 342 _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
314 if (dma_addr >= BAR2_PCI_ADDRESS)
315 goto done;
316#endif
317 index = dma_addr >> 22;
318 /* Continued below switch statement */
319 break; 343 break;
320
321 default: 344 default:
322 panic("dma_unmap_single: Invalid octeon_dma_bar_type\n"); 345 BUG();
323 } 346 }
324 347 octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
325 if (unlikely(index > 31))
326 panic("dma_unmap_single: "
327 "Attempt to unmap an invalid address (0x%llx)\n",
328 dma_addr);
329
330 raw_spin_lock_irqsave(&bar1_lock, flags);
331 bar1_state[index].ref_count--;
332 if (bar1_state[index].ref_count == 0)
333 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
334 else if (unlikely(bar1_state[index].ref_count < 0))
335 panic("dma_unmap_single: Bar1[%u] reference count < 0\n",
336 (int) index);
337 raw_spin_unlock_irqrestore(&bar1_lock, flags);
338done:
339 pr_debug("dma_unmap_single 0x%llx\n", dma_addr);
340 return;
341#endif
342} 348}
349#endif /* CONFIG_PCI */