aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorGuo Chao <yan@linux.vnet.ibm.com>2014-07-21 00:42:30 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-08-05 01:41:47 -0400
commit262af557dd750e94adcee3f450782c743f9a92d6 (patch)
treed060995887f19716ca264ab04980519290411bde /arch/powerpc/platforms
parentbb593c0049fd6b6e420a6f68c5a688e14782dba1 (diff)
powerpc/powernv: Enable M64 aperatus for PHB3
This patch enables M64 aperatus for PHB3. We already had platform hook (ppc_md.pcibios_window_alignment) to affect the PCI resource assignment done in PCI core so that each PE's M32 resource was built on basis of M32 segment size. Similarly, we're using that for M64 assignment on basis of M64 segment size. * We're using last M64 BAR to cover M64 aperatus, and it's shared by all 256 PEs. * We don't support P7IOC yet. However, some function callbacks are added to (struct pnv_phb) so that we can reuse them on P7IOC in future. * PE, corresponding to PCI bus with large M64 BAR device attached, might span multiple M64 segments. We introduce "compound" PE to cover the case. The compound PE is a list of PEs and the master PE is used as before. The slave PEs are just for MMIO isolation. Signed-off-by: Guo Chao <yan@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c301
-rw-r--r--arch/powerpc/platforms/powernv/pci.h20
2 files changed, 300 insertions, 21 deletions
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 0701f90ac625..899fe4049b44 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -36,6 +36,7 @@
36#include <asm/tce.h> 36#include <asm/tce.h>
37#include <asm/xics.h> 37#include <asm/xics.h>
38#include <asm/debug.h> 38#include <asm/debug.h>
39#include <asm/firmware.h>
39 40
40#include "powernv.h" 41#include "powernv.h"
41#include "pci.h" 42#include "pci.h"
@@ -82,6 +83,12 @@ static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
82 : : "r" (val), "r" (paddr) : "memory"); 83 : : "r" (val), "r" (paddr) : "memory");
83} 84}
84 85
86static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
87{
88 return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
89 (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
90}
91
85static int pnv_ioda_alloc_pe(struct pnv_phb *phb) 92static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
86{ 93{
87 unsigned long pe; 94 unsigned long pe;
@@ -106,6 +113,240 @@ static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
106 clear_bit(pe, phb->ioda.pe_alloc); 113 clear_bit(pe, phb->ioda.pe_alloc);
107} 114}
108 115
116/* The default M64 BAR is shared by all PEs */
117static int pnv_ioda2_init_m64(struct pnv_phb *phb)
118{
119 const char *desc;
120 struct resource *r;
121 s64 rc;
122
123 /* Configure the default M64 BAR */
124 rc = opal_pci_set_phb_mem_window(phb->opal_id,
125 OPAL_M64_WINDOW_TYPE,
126 phb->ioda.m64_bar_idx,
127 phb->ioda.m64_base,
128 0, /* unused */
129 phb->ioda.m64_size);
130 if (rc != OPAL_SUCCESS) {
131 desc = "configuring";
132 goto fail;
133 }
134
135 /* Enable the default M64 BAR */
136 rc = opal_pci_phb_mmio_enable(phb->opal_id,
137 OPAL_M64_WINDOW_TYPE,
138 phb->ioda.m64_bar_idx,
139 OPAL_ENABLE_M64_SPLIT);
140 if (rc != OPAL_SUCCESS) {
141 desc = "enabling";
142 goto fail;
143 }
144
145 /* Mark the M64 BAR assigned */
146 set_bit(phb->ioda.m64_bar_idx, &phb->ioda.m64_bar_alloc);
147
148 /*
149 * Strip off the segment used by the reserved PE, which is
150 * expected to be 0 or last one of PE capabicity.
151 */
152 r = &phb->hose->mem_resources[1];
153 if (phb->ioda.reserved_pe == 0)
154 r->start += phb->ioda.m64_segsize;
155 else if (phb->ioda.reserved_pe == (phb->ioda.total_pe - 1))
156 r->end -= phb->ioda.m64_segsize;
157 else
158 pr_warn(" Cannot strip M64 segment for reserved PE#%d\n",
159 phb->ioda.reserved_pe);
160
161 return 0;
162
163fail:
164 pr_warn(" Failure %lld %s M64 BAR#%d\n",
165 rc, desc, phb->ioda.m64_bar_idx);
166 opal_pci_phb_mmio_enable(phb->opal_id,
167 OPAL_M64_WINDOW_TYPE,
168 phb->ioda.m64_bar_idx,
169 OPAL_DISABLE_M64);
170 return -EIO;
171}
172
173static void pnv_ioda2_alloc_m64_pe(struct pnv_phb *phb)
174{
175 resource_size_t sgsz = phb->ioda.m64_segsize;
176 struct pci_dev *pdev;
177 struct resource *r;
178 int base, step, i;
179
180 /*
181 * Root bus always has full M64 range and root port has
182 * M64 range used in reality. So we're checking root port
183 * instead of root bus.
184 */
185 list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
186 for (i = PCI_BRIDGE_RESOURCES;
187 i <= PCI_BRIDGE_RESOURCE_END; i++) {
188 r = &pdev->resource[i];
189 if (!r->parent ||
190 !pnv_pci_is_mem_pref_64(r->flags))
191 continue;
192
193 base = (r->start - phb->ioda.m64_base) / sgsz;
194 for (step = 0; step < resource_size(r) / sgsz; step++)
195 set_bit(base + step, phb->ioda.pe_alloc);
196 }
197 }
198}
199
200static int pnv_ioda2_pick_m64_pe(struct pnv_phb *phb,
201 struct pci_bus *bus, int all)
202{
203 resource_size_t segsz = phb->ioda.m64_segsize;
204 struct pci_dev *pdev;
205 struct resource *r;
206 struct pnv_ioda_pe *master_pe, *pe;
207 unsigned long size, *pe_alloc;
208 bool found;
209 int start, i, j;
210
211 /* Root bus shouldn't use M64 */
212 if (pci_is_root_bus(bus))
213 return IODA_INVALID_PE;
214
215 /* We support only one M64 window on each bus */
216 found = false;
217 pci_bus_for_each_resource(bus, r, i) {
218 if (r && r->parent &&
219 pnv_pci_is_mem_pref_64(r->flags)) {
220 found = true;
221 break;
222 }
223 }
224
225 /* No M64 window found ? */
226 if (!found)
227 return IODA_INVALID_PE;
228
229 /* Allocate bitmap */
230 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
231 pe_alloc = kzalloc(size, GFP_KERNEL);
232 if (!pe_alloc) {
233 pr_warn("%s: Out of memory !\n",
234 __func__);
235 return IODA_INVALID_PE;
236 }
237
238 /*
239 * Figure out reserved PE numbers by the PE
240 * the its child PEs.
241 */
242 start = (r->start - phb->ioda.m64_base) / segsz;
243 for (i = 0; i < resource_size(r) / segsz; i++)
244 set_bit(start + i, pe_alloc);
245
246 if (all)
247 goto done;
248
249 /*
250 * If the PE doesn't cover all subordinate buses,
251 * we need subtract from reserved PEs for children.
252 */
253 list_for_each_entry(pdev, &bus->devices, bus_list) {
254 if (!pdev->subordinate)
255 continue;
256
257 pci_bus_for_each_resource(pdev->subordinate, r, i) {
258 if (!r || !r->parent ||
259 !pnv_pci_is_mem_pref_64(r->flags))
260 continue;
261
262 start = (r->start - phb->ioda.m64_base) / segsz;
263 for (j = 0; j < resource_size(r) / segsz ; j++)
264 clear_bit(start + j, pe_alloc);
265 }
266 }
267
268 /*
269 * the current bus might not own M64 window and that's all
270 * contributed by its child buses. For the case, we needn't
271 * pick M64 dependent PE#.
272 */
273 if (bitmap_empty(pe_alloc, phb->ioda.total_pe)) {
274 kfree(pe_alloc);
275 return IODA_INVALID_PE;
276 }
277
278 /*
279 * Figure out the master PE and put all slave PEs to master
280 * PE's list to form compound PE.
281 */
282done:
283 master_pe = NULL;
284 i = -1;
285 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
286 phb->ioda.total_pe) {
287 pe = &phb->ioda.pe_array[i];
288 pe->phb = phb;
289 pe->pe_number = i;
290
291 if (!master_pe) {
292 pe->flags |= PNV_IODA_PE_MASTER;
293 INIT_LIST_HEAD(&pe->slaves);
294 master_pe = pe;
295 } else {
296 pe->flags |= PNV_IODA_PE_SLAVE;
297 pe->master = master_pe;
298 list_add_tail(&pe->list, &master_pe->slaves);
299 }
300 }
301
302 kfree(pe_alloc);
303 return master_pe->pe_number;
304}
305
306static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
307{
308 struct pci_controller *hose = phb->hose;
309 struct device_node *dn = hose->dn;
310 struct resource *res;
311 const u32 *r;
312 u64 pci_addr;
313
314 if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
315 pr_info(" Firmware too old to support M64 window\n");
316 return;
317 }
318
319 r = of_get_property(dn, "ibm,opal-m64-window", NULL);
320 if (!r) {
321 pr_info(" No <ibm,opal-m64-window> on %s\n",
322 dn->full_name);
323 return;
324 }
325
326 /* FIXME: Support M64 for P7IOC */
327 if (phb->type != PNV_PHB_IODA2) {
328 pr_info(" Not support M64 window\n");
329 return;
330 }
331
332 res = &hose->mem_resources[1];
333 res->start = of_translate_address(dn, r + 2);
334 res->end = res->start + of_read_number(r + 4, 2) - 1;
335 res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
336 pci_addr = of_read_number(r, 2);
337 hose->mem_offset[1] = res->start - pci_addr;
338
339 phb->ioda.m64_size = resource_size(res);
340 phb->ioda.m64_segsize = phb->ioda.m64_size / phb->ioda.total_pe;
341 phb->ioda.m64_base = pci_addr;
342
343 /* Use last M64 BAR to cover M64 window */
344 phb->ioda.m64_bar_idx = 15;
345 phb->init_m64 = pnv_ioda2_init_m64;
346 phb->alloc_m64_pe = pnv_ioda2_alloc_m64_pe;
347 phb->pick_m64_pe = pnv_ioda2_pick_m64_pe;
348}
349
109/* Currently those 2 are only used when MSIs are enabled, this will change 350/* Currently those 2 are only used when MSIs are enabled, this will change
110 * but in the meantime, we need to protect them to avoid warnings 351 * but in the meantime, we need to protect them to avoid warnings
111 */ 352 */
@@ -363,9 +604,16 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
363 struct pci_controller *hose = pci_bus_to_host(bus); 604 struct pci_controller *hose = pci_bus_to_host(bus);
364 struct pnv_phb *phb = hose->private_data; 605 struct pnv_phb *phb = hose->private_data;
365 struct pnv_ioda_pe *pe; 606 struct pnv_ioda_pe *pe;
366 int pe_num; 607 int pe_num = IODA_INVALID_PE;
608
609 /* Check if PE is determined by M64 */
610 if (phb->pick_m64_pe)
611 pe_num = phb->pick_m64_pe(phb, bus, all);
612
613 /* The PE number isn't pinned by M64 */
614 if (pe_num == IODA_INVALID_PE)
615 pe_num = pnv_ioda_alloc_pe(phb);
367 616
368 pe_num = pnv_ioda_alloc_pe(phb);
369 if (pe_num == IODA_INVALID_PE) { 617 if (pe_num == IODA_INVALID_PE) {
370 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n", 618 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
371 __func__, pci_domain_nr(bus), bus->number); 619 __func__, pci_domain_nr(bus), bus->number);
@@ -373,7 +621,7 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
373 } 621 }
374 622
375 pe = &phb->ioda.pe_array[pe_num]; 623 pe = &phb->ioda.pe_array[pe_num];
376 pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS); 624 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
377 pe->pbus = bus; 625 pe->pbus = bus;
378 pe->pdev = NULL; 626 pe->pdev = NULL;
379 pe->tce32_seg = -1; 627 pe->tce32_seg = -1;
@@ -441,8 +689,15 @@ static void pnv_ioda_setup_PEs(struct pci_bus *bus)
441static void pnv_pci_ioda_setup_PEs(void) 689static void pnv_pci_ioda_setup_PEs(void)
442{ 690{
443 struct pci_controller *hose, *tmp; 691 struct pci_controller *hose, *tmp;
692 struct pnv_phb *phb;
444 693
445 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 694 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
695 phb = hose->private_data;
696
697 /* M64 layout might affect PE allocation */
698 if (phb->alloc_m64_pe)
699 phb->alloc_m64_pe(phb);
700
446 pnv_ioda_setup_PEs(hose->bus); 701 pnv_ioda_setup_PEs(hose->bus);
447 } 702 }
448} 703}
@@ -1071,9 +1326,6 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
1071 index++; 1326 index++;
1072 } 1327 }
1073 } else if (res->flags & IORESOURCE_MEM) { 1328 } else if (res->flags & IORESOURCE_MEM) {
1074 /* WARNING: Assumes M32 is mem region 0 in PHB. We need to
1075 * harden that algorithm when we start supporting M64
1076 */
1077 region.start = res->start - 1329 region.start = res->start -
1078 hose->mem_offset[0] - 1330 hose->mem_offset[0] -
1079 phb->ioda.m32_pci_base; 1331 phb->ioda.m32_pci_base;
@@ -1193,7 +1445,10 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
1193 bridge = bridge->bus->self; 1445 bridge = bridge->bus->self;
1194 } 1446 }
1195 1447
1196 /* We need support prefetchable memory window later */ 1448 /* We fail back to M32 if M64 isn't supported */
1449 if (phb->ioda.m64_segsize &&
1450 pnv_pci_is_mem_pref_64(type))
1451 return phb->ioda.m64_segsize;
1197 if (type & IORESOURCE_MEM) 1452 if (type & IORESOURCE_MEM)
1198 return phb->ioda.m32_segsize; 1453 return phb->ioda.m32_segsize;
1199 1454
@@ -1314,6 +1569,10 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1314 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL); 1569 prop32 = of_get_property(np, "ibm,opal-reserved-pe", NULL);
1315 if (prop32) 1570 if (prop32)
1316 phb->ioda.reserved_pe = be32_to_cpup(prop32); 1571 phb->ioda.reserved_pe = be32_to_cpup(prop32);
1572
1573 /* Parse 64-bit MMIO range */
1574 pnv_ioda_parse_m64_window(phb);
1575
1317 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]); 1576 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
1318 /* FW Has already off top 64k of M32 space (MSI space) */ 1577 /* FW Has already off top 64k of M32 space (MSI space) */
1319 phb->ioda.m32_size += 0x10000; 1578 phb->ioda.m32_size += 0x10000;
@@ -1349,14 +1608,6 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1349 /* Calculate how many 32-bit TCE segments we have */ 1608 /* Calculate how many 32-bit TCE segments we have */
1350 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28; 1609 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
1351 1610
1352 /* Clear unusable m64 */
1353 hose->mem_resources[1].flags = 0;
1354 hose->mem_resources[1].start = 0;
1355 hose->mem_resources[1].end = 0;
1356 hose->mem_resources[2].flags = 0;
1357 hose->mem_resources[2].start = 0;
1358 hose->mem_resources[2].end = 0;
1359
1360#if 0 /* We should really do that ... */ 1611#if 0 /* We should really do that ... */
1361 rc = opal_pci_set_phb_mem_window(opal->phb_id, 1612 rc = opal_pci_set_phb_mem_window(opal->phb_id,
1362 window_type, 1613 window_type,
@@ -1366,12 +1617,16 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1366 segment_size); 1617 segment_size);
1367#endif 1618#endif
1368 1619
1369 pr_info(" %d (%d) PE's M32: 0x%x [segment=0x%x]" 1620 pr_info(" %03d (%03d) PE's M32: 0x%x [segment=0x%x]\n",
1370 " IO: 0x%x [segment=0x%x]\n", 1621 phb->ioda.total_pe, phb->ioda.reserved_pe,
1371 phb->ioda.total_pe, 1622 phb->ioda.m32_size, phb->ioda.m32_segsize);
1372 phb->ioda.reserved_pe, 1623 if (phb->ioda.m64_size)
1373 phb->ioda.m32_size, phb->ioda.m32_segsize, 1624 pr_info(" M64: 0x%lx [segment=0x%lx]\n",
1374 phb->ioda.io_size, phb->ioda.io_segsize); 1625 phb->ioda.m64_size, phb->ioda.m64_segsize);
1626 if (phb->ioda.io_size)
1627 pr_info(" IO: 0x%x [segment=0x%x]\n",
1628 phb->ioda.io_size, phb->ioda.io_segsize);
1629
1375 1630
1376 phb->hose->ops = &pnv_pci_ops; 1631 phb->hose->ops = &pnv_pci_ops;
1377#ifdef CONFIG_EEH 1632#ifdef CONFIG_EEH
@@ -1419,6 +1674,10 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1419 ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); 1674 ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
1420 ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET); 1675 ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET);
1421 } 1676 }
1677
1678 /* Configure M64 window */
1679 if (phb->init_m64 && phb->init_m64(phb))
1680 hose->mem_resources[1].flags = 0;
1422} 1681}
1423 1682
1424void __init pnv_pci_init_ioda2_phb(struct device_node *np) 1683void __init pnv_pci_init_ioda2_phb(struct device_node *np)
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 6f5ff6921dab..49da9f154950 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -21,6 +21,8 @@ enum pnv_phb_model {
21#define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */ 21#define PNV_IODA_PE_DEV (1 << 0) /* PE has single PCI device */
22#define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */ 22#define PNV_IODA_PE_BUS (1 << 1) /* PE has primary PCI bus */
23#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */ 23#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
24#define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */
25#define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
24 26
25/* Data associated with a PE, including IOMMU tracking etc.. */ 27/* Data associated with a PE, including IOMMU tracking etc.. */
26struct pnv_phb; 28struct pnv_phb;
@@ -64,6 +66,10 @@ struct pnv_ioda_pe {
64 */ 66 */
65 int mve_number; 67 int mve_number;
66 68
69 /* PEs in compound case */
70 struct pnv_ioda_pe *master;
71 struct list_head slaves;
72
67 /* Link in list of PE#s */ 73 /* Link in list of PE#s */
68 struct list_head dma_link; 74 struct list_head dma_link;
69 struct list_head list; 75 struct list_head list;
@@ -119,6 +125,9 @@ struct pnv_phb {
119 void (*fixup_phb)(struct pci_controller *hose); 125 void (*fixup_phb)(struct pci_controller *hose);
120 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); 126 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
121 void (*shutdown)(struct pnv_phb *phb); 127 void (*shutdown)(struct pnv_phb *phb);
128 int (*init_m64)(struct pnv_phb *phb);
129 void (*alloc_m64_pe)(struct pnv_phb *phb);
130 int (*pick_m64_pe)(struct pnv_phb *phb, struct pci_bus *bus, int all);
122 131
123 union { 132 union {
124 struct { 133 struct {
@@ -129,9 +138,20 @@ struct pnv_phb {
129 /* Global bridge info */ 138 /* Global bridge info */
130 unsigned int total_pe; 139 unsigned int total_pe;
131 unsigned int reserved_pe; 140 unsigned int reserved_pe;
141
142 /* 32-bit MMIO window */
132 unsigned int m32_size; 143 unsigned int m32_size;
133 unsigned int m32_segsize; 144 unsigned int m32_segsize;
134 unsigned int m32_pci_base; 145 unsigned int m32_pci_base;
146
147 /* 64-bit MMIO window */
148 unsigned int m64_bar_idx;
149 unsigned long m64_size;
150 unsigned long m64_segsize;
151 unsigned long m64_base;
152 unsigned long m64_bar_alloc;
153
154 /* IO ports */
135 unsigned int io_size; 155 unsigned int io_size;
136 unsigned int io_segsize; 156 unsigned int io_segsize;
137 unsigned int io_pci_base; 157 unsigned int io_pci_base;