aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2010-10-01 16:27:34 -0400
committerRalf Baechle <ralf@linux-mips.org>2010-10-29 14:08:32 -0400
commitb93b2abce497873be97d765b848e0a955d29f200 (patch)
tree0372a9162b8bbf67f5a5f7367a1da2001ea0292c /arch/mips
parentee71b7d2f834d5e4b3a43001b2fa88743ed71a2c (diff)
MIPS: Octeon: Rewrite DMA mapping functions.
All Octeon chips can support more than 4GB of RAM. Also due to how Octeon PCI is setup, even some configurations with less than 4GB of RAM will have portions that are not accessible from 32-bit devices. Enable the swiotlb code to handle the cases where a device cannot directly do DMA. This is a complete rewrite of the Octeon DMA mapping code. Signed-off-by: David Daney <ddaney@caviumnetworks.com> Patchwork: http://patchwork.linux-mips.org/patch/1639/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/cavium-octeon/Kconfig12
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c581
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h22
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h10
-rw-r--r--arch/mips/pci/pci-octeon.c60
-rw-r--r--arch/mips/pci/pcie-octeon.c5
6 files changed, 389 insertions, 301 deletions
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 47323ca452dc..475156b0c807 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -87,3 +87,15 @@ config ARCH_SPARSEMEM_ENABLE
87config CAVIUM_OCTEON_HELPER 87config CAVIUM_OCTEON_HELPER
88 def_bool y 88 def_bool y
89 depends on OCTEON_ETHERNET || PCI 89 depends on OCTEON_ETHERNET || PCI
90
91config IOMMU_HELPER
92 bool
93
94config NEED_SG_DMA_LENGTH
95 bool
96
97config SWIOTLB
98 def_bool y
99 depends on CPU_CAVIUM_OCTEON
100 select IOMMU_HELPER
101 select NEED_SG_DMA_LENGTH
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index d22b5a2d64f4..1abb66caaa1d 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -8,335 +8,342 @@
8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com> 8 * Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf. 9 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
10 * IP32 changes by Ilya. 10 * IP32 changes by Ilya.
11 * Cavium Networks: Create new dma setup for Cavium Networks Octeon based on 11 * Copyright (C) 2010 Cavium Networks, Inc.
12 * the kernels original.
13 */ 12 */
14#include <linux/types.h>
15#include <linux/mm.h>
16#include <linux/module.h>
17#include <linux/string.h>
18#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
19#include <linux/platform_device.h>
20#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
15#include <linux/bootmem.h>
16#include <linux/swiotlb.h>
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/mm.h>
21 20
22#include <linux/cache.h> 21#include <asm/bootinfo.h>
23#include <linux/io.h>
24 22
25#include <asm/octeon/octeon.h> 23#include <asm/octeon/octeon.h>
24
25#ifdef CONFIG_PCI
26#include <asm/octeon/pci-octeon.h>
26#include <asm/octeon/cvmx-npi-defs.h> 27#include <asm/octeon/cvmx-npi-defs.h>
27#include <asm/octeon/cvmx-pci-defs.h> 28#include <asm/octeon/cvmx-pci-defs.h>
28 29
29#include <dma-coherence.h> 30static dma_addr_t octeon_hole_phys_to_dma(phys_addr_t paddr)
31{
32 if (paddr >= CVMX_PCIE_BAR1_PHYS_BASE && paddr < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE))
33 return paddr - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
34 else
35 return paddr;
36}
30 37
31#ifdef CONFIG_PCI 38static phys_addr_t octeon_hole_dma_to_phys(dma_addr_t daddr)
32#include <asm/octeon/pci-octeon.h> 39{
33#endif 40 if (daddr >= CVMX_PCIE_BAR1_RC_BASE)
41 return daddr + CVMX_PCIE_BAR1_PHYS_BASE - CVMX_PCIE_BAR1_RC_BASE;
42 else
43 return daddr;
44}
45
46static dma_addr_t octeon_gen1_phys_to_dma(struct device *dev, phys_addr_t paddr)
47{
48 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
49 paddr -= 0x400000000ull;
50 return octeon_hole_phys_to_dma(paddr);
51}
34 52
35#define BAR2_PCI_ADDRESS 0x8000000000ul 53static phys_addr_t octeon_gen1_dma_to_phys(struct device *dev, dma_addr_t daddr)
54{
55 daddr = octeon_hole_dma_to_phys(daddr);
36 56
37struct bar1_index_state { 57 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
38 int16_t ref_count; /* Number of PCI mappings using this index */ 58 daddr += 0x400000000ull;
39 uint16_t address_bits; /* Upper bits of physical address. This is
40 shifted 22 bits */
41};
42 59
43#ifdef CONFIG_PCI 60 return daddr;
44static DEFINE_RAW_SPINLOCK(bar1_lock); 61}
45static struct bar1_index_state bar1_state[32];
46#endif
47 62
48dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size) 63static dma_addr_t octeon_big_phys_to_dma(struct device *dev, phys_addr_t paddr)
49{ 64{
50#ifndef CONFIG_PCI 65 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
51 /* Without PCI/PCIe this function can be called for Octeon internal 66 paddr -= 0x400000000ull;
52 devices such as USB. These devices all support 64bit addressing */ 67
68 /* Anything in the BAR1 hole or above goes via BAR2 */
69 if (paddr >= 0xf0000000ull)
70 paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
71
72 return paddr;
73}
74
75static phys_addr_t octeon_big_dma_to_phys(struct device *dev, dma_addr_t daddr)
76{
77 if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
78 daddr -= OCTEON_BAR2_PCI_ADDRESS;
79
80 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
81 daddr += 0x400000000ull;
82 return daddr;
83}
84
85static dma_addr_t octeon_small_phys_to_dma(struct device *dev,
86 phys_addr_t paddr)
87{
88 if (paddr >= 0x410000000ull && paddr < 0x420000000ull)
89 paddr -= 0x400000000ull;
90
91 /* Anything not in the BAR1 range goes via BAR2 */
92 if (paddr >= octeon_bar1_pci_phys && paddr < octeon_bar1_pci_phys + 0x8000000ull)
93 paddr = paddr - octeon_bar1_pci_phys;
94 else
95 paddr = OCTEON_BAR2_PCI_ADDRESS + paddr;
96
97 return paddr;
98}
99
100static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
101 dma_addr_t daddr)
102{
103 if (daddr >= OCTEON_BAR2_PCI_ADDRESS)
104 daddr -= OCTEON_BAR2_PCI_ADDRESS;
105 else
106 daddr += octeon_bar1_pci_phys;
107
108 if (daddr >= 0x10000000ull && daddr < 0x20000000ull)
109 daddr += 0x400000000ull;
110 return daddr;
111}
112
113#endif /* CONFIG_PCI */
114
115static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
116 unsigned long offset, size_t size, enum dma_data_direction direction,
117 struct dma_attrs *attrs)
118{
119 dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
120 direction, attrs);
53 mb(); 121 mb();
54 return virt_to_phys(ptr);
55#else
56 unsigned long flags;
57 uint64_t dma_mask;
58 int64_t start_index;
59 dma_addr_t result = -1;
60 uint64_t physical = virt_to_phys(ptr);
61 int64_t index;
62 122
123 return daddr;
124}
125
126static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
127 int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
128{
129 int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
63 mb(); 130 mb();
64 /* 131 return r;
65 * Use the DMA masks to determine the allowed memory 132}
66 * region. For us it doesn't limit the actual memory, just the
67 * address visible over PCI. Devices with limits need to use
68 * lower indexed Bar1 entries.
69 */
70 if (dev) {
71 dma_mask = dev->coherent_dma_mask;
72 if (dev->dma_mask)
73 dma_mask = *dev->dma_mask;
74 } else {
75 dma_mask = 0xfffffffful;
76 }
77 133
78 /* 134static void octeon_dma_sync_single_for_device(struct device *dev,
79 * Platform devices, such as the internal USB, skip all 135 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
80 * translation and use Octeon physical addresses directly. 136{
81 */ 137 swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
82 if (!dev || dev->bus == &platform_bus_type) 138 mb();
83 return physical; 139}
84 140
85 switch (octeon_dma_bar_type) { 141static void octeon_dma_sync_sg_for_device(struct device *dev,
86 case OCTEON_DMA_BAR_TYPE_PCIE: 142 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
87 if (unlikely(physical < (16ul << 10))) 143{
88 panic("dma_map_single: Not allowed to map first 16KB." 144 swiotlb_sync_sg_for_device(dev, sg, nelems, direction);
89 " It interferes with BAR0 special area\n"); 145 mb();
90 else if ((physical + size >= (256ul << 20)) && 146}
91 (physical < (512ul << 20)))
92 panic("dma_map_single: Not allowed to map bootbus\n");
93 else if ((physical + size >= 0x400000000ull) &&
94 physical < 0x410000000ull)
95 panic("dma_map_single: "
96 "Attempt to map illegal memory address 0x%llx\n",
97 physical);
98 else if (physical >= 0x420000000ull)
99 panic("dma_map_single: "
100 "Attempt to map illegal memory address 0x%llx\n",
101 physical);
102 else if (physical >= CVMX_PCIE_BAR1_PHYS_BASE &&
103 physical + size < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE)) {
104 result = physical - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
105
106 if (((result+size-1) & dma_mask) != result+size-1)
107 panic("dma_map_single: Attempt to map address 0x%llx-0x%llx, which can't be accessed according to the dma mask 0x%llx\n",
108 physical, physical+size-1, dma_mask);
109 goto done;
110 }
111
112 /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */
113 if ((physical >= 0x410000000ull) && physical < 0x420000000ull)
114 result = physical - 0x400000000ull;
115 else
116 result = physical;
117 if (((result+size-1) & dma_mask) != result+size-1)
118 panic("dma_map_single: Attempt to map address "
119 "0x%llx-0x%llx, which can't be accessed "
120 "according to the dma mask 0x%llx\n",
121 physical, physical+size-1, dma_mask);
122 goto done;
123 147
124 case OCTEON_DMA_BAR_TYPE_BIG: 148static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
125#ifdef CONFIG_64BIT 149 dma_addr_t *dma_handle, gfp_t gfp)
126 /* If the device supports 64bit addressing, then use BAR2 */ 150{
127 if (dma_mask > BAR2_PCI_ADDRESS) { 151 void *ret;
128 result = physical + BAR2_PCI_ADDRESS;
129 goto done;
130 }
131#endif
132 if (unlikely(physical < (4ul << 10))) {
133 panic("dma_map_single: Not allowed to map first 4KB. "
134 "It interferes with BAR0 special area\n");
135 } else if (physical < (256ul << 20)) {
136 if (unlikely(physical + size > (256ul << 20)))
137 panic("dma_map_single: Requested memory spans "
138 "Bar0 0:256MB and bootbus\n");
139 result = physical;
140 goto done;
141 } else if (unlikely(physical < (512ul << 20))) {
142 panic("dma_map_single: Not allowed to map bootbus\n");
143 } else if (physical < (2ul << 30)) {
144 if (unlikely(physical + size > (2ul << 30)))
145 panic("dma_map_single: Requested memory spans "
146 "Bar0 512MB:2GB and BAR1\n");
147 result = physical;
148 goto done;
149 } else if (physical < (2ul << 30) + (128 << 20)) {
150 /* Fall through */
151 } else if (physical <
152 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)) {
153 if (unlikely
154 (physical + size >
155 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20)))
156 panic("dma_map_single: Requested memory "
157 "extends past Bar1 (4GB-%luMB)\n",
158 OCTEON_PCI_BAR1_HOLE_SIZE);
159 result = physical;
160 goto done;
161 } else if ((physical >= 0x410000000ull) &&
162 (physical < 0x420000000ull)) {
163 if (unlikely(physical + size > 0x420000000ull))
164 panic("dma_map_single: Requested memory spans "
165 "non existant memory\n");
166 /* BAR0 fixed mapping 256MB:512MB ->
167 * 16GB+256MB:16GB+512MB */
168 result = physical - 0x400000000ull;
169 goto done;
170 } else {
171 /* Continued below switch statement */
172 }
173 break;
174 152
175 case OCTEON_DMA_BAR_TYPE_SMALL: 153 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
176#ifdef CONFIG_64BIT 154 return ret;
177 /* If the device supports 64bit addressing, then use BAR2 */ 155
178 if (dma_mask > BAR2_PCI_ADDRESS) { 156 /* ignore region specifiers */
179 result = physical + BAR2_PCI_ADDRESS; 157 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
180 goto done; 158
181 } 159#ifdef CONFIG_ZONE_DMA
160 if (dev == NULL)
161 gfp |= __GFP_DMA;
162 else if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
163 gfp |= __GFP_DMA;
164 else
182#endif 165#endif
183 /* Continued below switch statement */ 166#ifdef CONFIG_ZONE_DMA32
184 break; 167 if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
168 gfp |= __GFP_DMA32;
169 else
170#endif
171 ;
185 172
186 default: 173 /* Don't invoke OOM killer */
187 panic("dma_map_single: Invalid octeon_dma_bar_type\n"); 174 gfp |= __GFP_NORETRY;
188 }
189 175
190 /* Don't allow mapping to span multiple Bar entries. The hardware guys 176 ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
191 won't guarantee that DMA across boards work */
192 if (unlikely((physical >> 22) != ((physical + size - 1) >> 22)))
193 panic("dma_map_single: "
194 "Requested memory spans more than one Bar1 entry\n");
195 177
196 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) 178 mb();
197 start_index = 31;
198 else if (unlikely(dma_mask < (1ul << 27)))
199 start_index = (dma_mask >> 22);
200 else
201 start_index = 31;
202
203 /* Only one processor can access the Bar register at once */
204 raw_spin_lock_irqsave(&bar1_lock, flags);
205
206 /* Look through Bar1 for existing mapping that will work */
207 for (index = start_index; index >= 0; index--) {
208 if ((bar1_state[index].address_bits == physical >> 22) &&
209 (bar1_state[index].ref_count)) {
210 /* An existing mapping will work, use it */
211 bar1_state[index].ref_count++;
212 if (unlikely(bar1_state[index].ref_count < 0))
213 panic("dma_map_single: "
214 "Bar1[%d] reference count overflowed\n",
215 (int) index);
216 result = (index << 22) | (physical & ((1 << 22) - 1));
217 /* Large BAR1 is offset at 2GB */
218 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
219 result += 2ul << 30;
220 goto done_unlock;
221 }
222 }
223 179
224 /* No existing mappings, look for a free entry */ 180 return ret;
225 for (index = start_index; index >= 0; index--) { 181}
226 if (unlikely(bar1_state[index].ref_count == 0)) {
227 union cvmx_pci_bar1_indexx bar1_index;
228 /* We have a free entry, use it */
229 bar1_state[index].ref_count = 1;
230 bar1_state[index].address_bits = physical >> 22;
231 bar1_index.u32 = 0;
232 /* Address bits[35:22] sent to L2C */
233 bar1_index.s.addr_idx = physical >> 22;
234 /* Don't put PCI accesses in L2. */
235 bar1_index.s.ca = 1;
236 /* Endian Swap Mode */
237 bar1_index.s.end_swp = 1;
238 /* Set '1' when the selected address range is valid. */
239 bar1_index.s.addr_v = 1;
240 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
241 bar1_index.u32);
242 /* An existing mapping will work, use it */
243 result = (index << 22) | (physical & ((1 << 22) - 1));
244 /* Large BAR1 is offset at 2GB */
245 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG)
246 result += 2ul << 30;
247 goto done_unlock;
248 }
249 }
250 182
251 pr_err("dma_map_single: " 183static void octeon_dma_free_coherent(struct device *dev, size_t size,
252 "Can't find empty BAR1 index for physical mapping 0x%llx\n", 184 void *vaddr, dma_addr_t dma_handle)
253 (unsigned long long) physical); 185{
186 int order = get_order(size);
254 187
255done_unlock: 188 if (dma_release_from_coherent(dev, order, vaddr))
256 raw_spin_unlock_irqrestore(&bar1_lock, flags); 189 return;
257done: 190
258 pr_debug("dma_map_single 0x%llx->0x%llx\n", physical, result); 191 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
259 return result;
260#endif
261} 192}
262 193
263void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr) 194static dma_addr_t octeon_unity_phys_to_dma(struct device *dev, phys_addr_t paddr)
264{ 195{
265#ifndef CONFIG_PCI 196 return paddr;
266 /* 197}
267 * Without PCI/PCIe this function can be called for Octeon internal
268 * devices such as USB. These devices all support 64bit addressing.
269 */
270 return;
271#else
272 unsigned long flags;
273 uint64_t index;
274 198
199static phys_addr_t octeon_unity_dma_to_phys(struct device *dev, dma_addr_t daddr)
200{
201 return daddr;
202}
203
204struct octeon_dma_map_ops {
205 struct dma_map_ops dma_map_ops;
206 dma_addr_t (*phys_to_dma)(struct device *dev, phys_addr_t paddr);
207 phys_addr_t (*dma_to_phys)(struct device *dev, dma_addr_t daddr);
208};
209
210dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
211{
212 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
213 struct octeon_dma_map_ops,
214 dma_map_ops);
215
216 return ops->phys_to_dma(dev, paddr);
217}
218EXPORT_SYMBOL(phys_to_dma);
219
220phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
221{
222 struct octeon_dma_map_ops *ops = container_of(get_dma_ops(dev),
223 struct octeon_dma_map_ops,
224 dma_map_ops);
225
226 return ops->dma_to_phys(dev, daddr);
227}
228EXPORT_SYMBOL(dma_to_phys);
229
230static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
231 .dma_map_ops = {
232 .alloc_coherent = octeon_dma_alloc_coherent,
233 .free_coherent = octeon_dma_free_coherent,
234 .map_page = octeon_dma_map_page,
235 .unmap_page = swiotlb_unmap_page,
236 .map_sg = octeon_dma_map_sg,
237 .unmap_sg = swiotlb_unmap_sg_attrs,
238 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
239 .sync_single_for_device = octeon_dma_sync_single_for_device,
240 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
241 .sync_sg_for_device = octeon_dma_sync_sg_for_device,
242 .mapping_error = swiotlb_dma_mapping_error,
243 .dma_supported = swiotlb_dma_supported
244 },
245 .phys_to_dma = octeon_unity_phys_to_dma,
246 .dma_to_phys = octeon_unity_dma_to_phys
247};
248
249char *octeon_swiotlb;
250
251void __init plat_swiotlb_setup(void)
252{
253 int i;
254 phys_t max_addr;
255 phys_t addr_size;
256 size_t swiotlbsize;
257 unsigned long swiotlb_nslabs;
258
259 max_addr = 0;
260 addr_size = 0;
261
262 for (i = 0 ; i < boot_mem_map.nr_map; i++) {
263 struct boot_mem_map_entry *e = &boot_mem_map.map[i];
264 if (e->type != BOOT_MEM_RAM)
265 continue;
266
267 /* These addresses map low for PCI. */
268 if (e->addr > 0x410000000ull)
269 continue;
270
271 addr_size += e->size;
272
273 if (max_addr < e->addr + e->size)
274 max_addr = e->addr + e->size;
275
276 }
277
278 swiotlbsize = PAGE_SIZE;
279
280#ifdef CONFIG_PCI
275 /* 281 /*
276 * Platform devices, such as the internal USB, skip all 282 * For OCTEON_DMA_BAR_TYPE_SMALL, size the iotlb at 1/4 memory
277 * translation and use Octeon physical addresses directly. 283 * size to a maximum of 64MB
278 */ 284 */
279 if (dev->bus == &platform_bus_type) 285 if (OCTEON_IS_MODEL(OCTEON_CN31XX)
280 return; 286 || OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
287 swiotlbsize = addr_size / 4;
288 if (swiotlbsize > 64 * (1<<20))
289 swiotlbsize = 64 * (1<<20);
290 } else if (max_addr > 0xf0000000ul) {
291 /*
292 * Otherwise only allocate a big iotlb if there is
293 * memory past the BAR1 hole.
294 */
295 swiotlbsize = 64 * (1<<20);
296 }
297#endif
298 swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
299 swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
300 swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
301
302 octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
281 303
304 swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1);
305
306 mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
307}
308
309#ifdef CONFIG_PCI
310static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
311 .dma_map_ops = {
312 .alloc_coherent = octeon_dma_alloc_coherent,
313 .free_coherent = octeon_dma_free_coherent,
314 .map_page = octeon_dma_map_page,
315 .unmap_page = swiotlb_unmap_page,
316 .map_sg = octeon_dma_map_sg,
317 .unmap_sg = swiotlb_unmap_sg_attrs,
318 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
319 .sync_single_for_device = octeon_dma_sync_single_for_device,
320 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
321 .sync_sg_for_device = octeon_dma_sync_sg_for_device,
322 .mapping_error = swiotlb_dma_mapping_error,
323 .dma_supported = swiotlb_dma_supported
324 },
325};
326
327struct dma_map_ops *octeon_pci_dma_map_ops;
328
329void __init octeon_pci_dma_init(void)
330{
282 switch (octeon_dma_bar_type) { 331 switch (octeon_dma_bar_type) {
283 case OCTEON_DMA_BAR_TYPE_PCIE: 332 case OCTEON_DMA_BAR_TYPE_PCIE:
284 /* Nothing to do, all mappings are static */ 333 _octeon_pci_dma_map_ops.phys_to_dma = octeon_gen1_phys_to_dma;
285 goto done; 334 _octeon_pci_dma_map_ops.dma_to_phys = octeon_gen1_dma_to_phys;
286 335 break;
287 case OCTEON_DMA_BAR_TYPE_BIG: 336 case OCTEON_DMA_BAR_TYPE_BIG:
288#ifdef CONFIG_64BIT 337 _octeon_pci_dma_map_ops.phys_to_dma = octeon_big_phys_to_dma;
289 /* Nothing to do for addresses using BAR2 */ 338 _octeon_pci_dma_map_ops.dma_to_phys = octeon_big_dma_to_phys;
290 if (dma_addr >= BAR2_PCI_ADDRESS)
291 goto done;
292#endif
293 if (unlikely(dma_addr < (4ul << 10)))
294 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
295 dma_addr);
296 else if (dma_addr < (2ul << 30))
297 /* Nothing to do for addresses using BAR0 */
298 goto done;
299 else if (dma_addr < (2ul << 30) + (128ul << 20))
300 /* Need to unmap, fall through */
301 index = (dma_addr - (2ul << 30)) >> 22;
302 else if (dma_addr <
303 (4ul << 30) - (OCTEON_PCI_BAR1_HOLE_SIZE << 20))
304 goto done; /* Nothing to do for the rest of BAR1 */
305 else
306 panic("dma_unmap_single: Unexpect DMA address 0x%llx\n",
307 dma_addr);
308 /* Continued below switch statement */
309 break; 339 break;
310
311 case OCTEON_DMA_BAR_TYPE_SMALL: 340 case OCTEON_DMA_BAR_TYPE_SMALL:
312#ifdef CONFIG_64BIT 341 _octeon_pci_dma_map_ops.phys_to_dma = octeon_small_phys_to_dma;
313 /* Nothing to do for addresses using BAR2 */ 342 _octeon_pci_dma_map_ops.dma_to_phys = octeon_small_dma_to_phys;
314 if (dma_addr >= BAR2_PCI_ADDRESS)
315 goto done;
316#endif
317 index = dma_addr >> 22;
318 /* Continued below switch statement */
319 break; 343 break;
320
321 default: 344 default:
322 panic("dma_unmap_single: Invalid octeon_dma_bar_type\n"); 345 BUG();
323 } 346 }
324 347 octeon_pci_dma_map_ops = &_octeon_pci_dma_map_ops.dma_map_ops;
325 if (unlikely(index > 31))
326 panic("dma_unmap_single: "
327 "Attempt to unmap an invalid address (0x%llx)\n",
328 dma_addr);
329
330 raw_spin_lock_irqsave(&bar1_lock, flags);
331 bar1_state[index].ref_count--;
332 if (bar1_state[index].ref_count == 0)
333 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
334 else if (unlikely(bar1_state[index].ref_count < 0))
335 panic("dma_unmap_single: Bar1[%u] reference count < 0\n",
336 (int) index);
337 raw_spin_unlock_irqrestore(&bar1_lock, flags);
338done:
339 pr_debug("dma_unmap_single 0x%llx\n", dma_addr);
340 return;
341#endif
342} 348}
349#endif /* CONFIG_PCI */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
index f768f6fe712e..be8fb4240cec 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
@@ -15,13 +15,12 @@
15 15
16struct device; 16struct device;
17 17
18dma_addr_t octeon_map_dma_mem(struct device *, void *, size_t); 18extern void octeon_pci_dma_init(void);
19void octeon_unmap_dma_mem(struct device *, dma_addr_t);
20 19
21static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, 20static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
22 size_t size) 21 size_t size)
23{ 22{
24 return octeon_map_dma_mem(dev, addr, size); 23 BUG();
25} 24}
26 25
27static inline dma_addr_t plat_map_dma_mem_page(struct device *dev, 26static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
@@ -33,23 +32,23 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
33static inline unsigned long plat_dma_addr_to_phys(struct device *dev, 32static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
34 dma_addr_t dma_addr) 33 dma_addr_t dma_addr)
35{ 34{
36 return dma_addr; 35 BUG();
37} 36}
38 37
39static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, 38static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
40 size_t size, enum dma_data_direction direction) 39 size_t size, enum dma_data_direction direction)
41{ 40{
42 octeon_unmap_dma_mem(dev, dma_addr); 41 BUG();
43} 42}
44 43
45static inline int plat_dma_supported(struct device *dev, u64 mask) 44static inline int plat_dma_supported(struct device *dev, u64 mask)
46{ 45{
47 return 1; 46 BUG();
48} 47}
49 48
50static inline void plat_extra_sync_for_device(struct device *dev) 49static inline void plat_extra_sync_for_device(struct device *dev)
51{ 50{
52 mb(); 51 BUG();
53} 52}
54 53
55static inline int plat_device_is_coherent(struct device *dev) 54static inline int plat_device_is_coherent(struct device *dev)
@@ -60,7 +59,14 @@ static inline int plat_device_is_coherent(struct device *dev)
60static inline int plat_dma_mapping_error(struct device *dev, 59static inline int plat_dma_mapping_error(struct device *dev,
61 dma_addr_t dma_addr) 60 dma_addr_t dma_addr)
62{ 61{
63 return dma_addr == -1; 62 BUG();
64} 63}
65 64
65dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
66phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
67
68struct dma_map_ops;
69extern struct dma_map_ops *octeon_pci_dma_map_ops;
70extern char *octeon_swiotlb;
71
66#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */ 72#endif /* __ASM_MACH_CAVIUM_OCTEON_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
index ece78043acf6..fba2ba200f58 100644
--- a/arch/mips/include/asm/octeon/pci-octeon.h
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -36,6 +36,16 @@ extern int (*octeon_pcibios_map_irq)(const struct pci_dev *dev,
36 u8 slot, u8 pin); 36 u8 slot, u8 pin);
37 37
38/* 38/*
39 * For PCI (not PCIe) the BAR2 base address.
40 */
41#define OCTEON_BAR2_PCI_ADDRESS 0x8000000000ull
42
43/*
44 * For PCI (not PCIe) the base of the memory mapped by BAR1
45 */
46extern u64 octeon_bar1_pci_phys;
47
48/*
39 * The following defines are used when octeon_dma_bar_type = 49 * The following defines are used when octeon_dma_bar_type =
40 * OCTEON_DMA_BAR_TYPE_BIG 50 * OCTEON_DMA_BAR_TYPE_BIG
41 */ 51 */
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index d248b707eff3..2d74fc9ae3ba 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -11,6 +11,7 @@
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/swiotlb.h>
14 15
15#include <asm/time.h> 16#include <asm/time.h>
16 17
@@ -19,6 +20,8 @@
19#include <asm/octeon/cvmx-pci-defs.h> 20#include <asm/octeon/cvmx-pci-defs.h>
20#include <asm/octeon/pci-octeon.h> 21#include <asm/octeon/pci-octeon.h>
21 22
23#include <dma-coherence.h>
24
22#define USE_OCTEON_INTERNAL_ARBITER 25#define USE_OCTEON_INTERNAL_ARBITER
23 26
24/* 27/*
@@ -32,6 +35,8 @@
32/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */ 35/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
33#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull) 36#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull)
34 37
38u64 octeon_bar1_pci_phys;
39
35/** 40/**
36 * This is the bit decoding used for the Octeon PCI controller addresses 41 * This is the bit decoding used for the Octeon PCI controller addresses
37 */ 42 */
@@ -170,6 +175,8 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
170 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig); 175 pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, dconfig);
171 } 176 }
172 177
178 dev->dev.archdata.dma_ops = octeon_pci_dma_map_ops;
179
173 return 0; 180 return 0;
174} 181}
175 182
@@ -618,12 +625,10 @@ static int __init octeon_pci_setup(void)
618 * before the readl()'s below. We don't want BAR2 overlapping 625 * before the readl()'s below. We don't want BAR2 overlapping
619 * with BAR0/BAR1 during these reads. 626 * with BAR0/BAR1 during these reads.
620 */ 627 */
621 octeon_npi_write32(CVMX_NPI_PCI_CFG08, 0); 628 octeon_npi_write32(CVMX_NPI_PCI_CFG08,
622 octeon_npi_write32(CVMX_NPI_PCI_CFG09, 0x80); 629 (u32)(OCTEON_BAR2_PCI_ADDRESS & 0xffffffffull));
623 630 octeon_npi_write32(CVMX_NPI_PCI_CFG09,
624 /* Disable the BAR1 movable mappings */ 631 (u32)(OCTEON_BAR2_PCI_ADDRESS >> 32));
625 for (index = 0; index < 32; index++)
626 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index), 0);
627 632
628 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) { 633 if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_BIG) {
629 /* Remap the Octeon BAR 0 to 0-2GB */ 634 /* Remap the Octeon BAR 0 to 0-2GB */
@@ -637,6 +642,25 @@ static int __init octeon_pci_setup(void)
637 octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30); 642 octeon_npi_write32(CVMX_NPI_PCI_CFG06, 2ul << 30);
638 octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0); 643 octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
639 644
645 /* BAR1 movable mappings set for identity mapping */
646 octeon_bar1_pci_phys = 0x80000000ull;
647 for (index = 0; index < 32; index++) {
648 union cvmx_pci_bar1_indexx bar1_index;
649
650 bar1_index.u32 = 0;
651 /* Address bits[35:22] sent to L2C */
652 bar1_index.s.addr_idx =
653 (octeon_bar1_pci_phys >> 22) + index;
654 /* Don't put PCI accesses in L2. */
655 bar1_index.s.ca = 1;
656 /* Endian Swap Mode */
657 bar1_index.s.end_swp = 1;
658 /* Set '1' when the selected address range is valid. */
659 bar1_index.s.addr_v = 1;
660 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
661 bar1_index.u32);
662 }
663
640 /* Devices go after BAR1 */ 664 /* Devices go after BAR1 */
641 octeon_pci_mem_resource.start = 665 octeon_pci_mem_resource.start =
642 OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) - 666 OCTEON_PCI_MEMSPACE_OFFSET + (4ul << 30) -
@@ -652,6 +676,27 @@ static int __init octeon_pci_setup(void)
652 octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0); 676 octeon_npi_write32(CVMX_NPI_PCI_CFG06, 0);
653 octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0); 677 octeon_npi_write32(CVMX_NPI_PCI_CFG07, 0);
654 678
679 /* BAR1 movable regions contiguous to cover the swiotlb */
680 octeon_bar1_pci_phys =
681 virt_to_phys(octeon_swiotlb) & ~((1ull << 22) - 1);
682
683 for (index = 0; index < 32; index++) {
684 union cvmx_pci_bar1_indexx bar1_index;
685
686 bar1_index.u32 = 0;
687 /* Address bits[35:22] sent to L2C */
688 bar1_index.s.addr_idx =
689 (octeon_bar1_pci_phys >> 22) + index;
690 /* Don't put PCI accesses in L2. */
691 bar1_index.s.ca = 1;
692 /* Endian Swap Mode */
693 bar1_index.s.end_swp = 1;
694 /* Set '1' when the selected address range is valid. */
695 bar1_index.s.addr_v = 1;
696 octeon_npi_write32(CVMX_NPI_PCI_BAR1_INDEXX(index),
697 bar1_index.u32);
698 }
699
655 /* Devices go after BAR0 */ 700 /* Devices go after BAR0 */
656 octeon_pci_mem_resource.start = 701 octeon_pci_mem_resource.start =
657 OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) + 702 OCTEON_PCI_MEMSPACE_OFFSET + (128ul << 20) +
@@ -667,6 +712,9 @@ static int __init octeon_pci_setup(void)
667 * was setup properly. 712 * was setup properly.
668 */ 713 */
669 cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1); 714 cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, -1);
715
716 octeon_pci_dma_init();
717
670 return 0; 718 return 0;
671} 719}
672 720
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index 861361e0c9af..385f035b24e4 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -75,6 +75,8 @@ union cvmx_pcie_address {
75 } mem; 75 } mem;
76}; 76};
77 77
78#include <dma-coherence.h>
79
78/** 80/**
79 * Return the Core virtual base address for PCIe IO access. IOs are 81 * Return the Core virtual base address for PCIe IO access. IOs are
80 * read/written as an offset from this address. 82 * read/written as an offset from this address.
@@ -1391,6 +1393,9 @@ static int __init octeon_pcie_setup(void)
1391 cvmx_pcie_get_io_size(1) - 1; 1393 cvmx_pcie_get_io_size(1) - 1;
1392 register_pci_controller(&octeon_pcie1_controller); 1394 register_pci_controller(&octeon_pcie1_controller);
1393 } 1395 }
1396
1397 octeon_pci_dma_init();
1398
1394 return 0; 1399 return 0;
1395} 1400}
1396 1401