diff options
author | Kishon Vijay Abraham I <kishon@ti.com> | 2018-10-17 03:41:01 -0400 |
---|---|---|
committer | Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> | 2018-10-17 04:44:52 -0400 |
commit | a1cabd2b42fd7710f75dda4ef504ded9ddae612c (patch) | |
tree | a1dc927d2429f10a7ac8365bbe8cc196850b8016 /drivers/pci/controller/dwc/pci-keystone.c | |
parent | c81ab80136727e11ad76a601b179d4dc84b08120 (diff) |
PCI: keystone: Use uniform function naming convention
No functional change. Some function names begin with ks_dw_pcie_*
and some function names begin with ks_pcie_*. Modify it so that
all function names begin with ks_pcie_*.
Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Diffstat (limited to 'drivers/pci/controller/dwc/pci-keystone.c')
-rw-r--r-- | drivers/pci/controller/dwc/pci-keystone.c | 221 |
1 files changed, 111 insertions, 110 deletions
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 926e345dc965..e2045b5d2af2 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c | |||
@@ -118,7 +118,7 @@ static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, | |||
118 | *bit_pos = offset >> 3; | 118 | *bit_pos = offset >> 3; |
119 | } | 119 | } |
120 | 120 | ||
121 | static phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) | 121 | static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp) |
122 | { | 122 | { |
123 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 123 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
124 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | 124 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
@@ -126,17 +126,18 @@ static phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) | |||
126 | return ks_pcie->app.start + MSI_IRQ; | 126 | return ks_pcie->app.start + MSI_IRQ; |
127 | } | 127 | } |
128 | 128 | ||
129 | static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset) | 129 | static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset) |
130 | { | 130 | { |
131 | return readl(ks_pcie->va_app_base + offset); | 131 | return readl(ks_pcie->va_app_base + offset); |
132 | } | 132 | } |
133 | 133 | ||
134 | static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val) | 134 | static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset, |
135 | u32 val) | ||
135 | { | 136 | { |
136 | writel(val, ks_pcie->va_app_base + offset); | 137 | writel(val, ks_pcie->va_app_base + offset); |
137 | } | 138 | } |
138 | 139 | ||
139 | static void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) | 140 | static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) |
140 | { | 141 | { |
141 | struct dw_pcie *pci = ks_pcie->pci; | 142 | struct dw_pcie *pci = ks_pcie->pci; |
142 | struct pcie_port *pp = &pci->pp; | 143 | struct pcie_port *pp = &pci->pp; |
@@ -144,7 +145,7 @@ static void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) | |||
144 | u32 pending, vector; | 145 | u32 pending, vector; |
145 | int src, virq; | 146 | int src, virq; |
146 | 147 | ||
147 | pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); | 148 | pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); |
148 | 149 | ||
149 | /* | 150 | /* |
150 | * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit | 151 | * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit |
@@ -161,7 +162,7 @@ static void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) | |||
161 | } | 162 | } |
162 | } | 163 | } |
163 | 164 | ||
164 | static void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp) | 165 | static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp) |
165 | { | 166 | { |
166 | u32 reg_offset, bit_pos; | 167 | u32 reg_offset, bit_pos; |
167 | struct keystone_pcie *ks_pcie; | 168 | struct keystone_pcie *ks_pcie; |
@@ -171,55 +172,55 @@ static void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp) | |||
171 | ks_pcie = to_keystone_pcie(pci); | 172 | ks_pcie = to_keystone_pcie(pci); |
172 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | 173 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); |
173 | 174 | ||
174 | ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), | 175 | ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), |
175 | BIT(bit_pos)); | 176 | BIT(bit_pos)); |
176 | ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); | 177 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); |
177 | } | 178 | } |
178 | 179 | ||
179 | static void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) | 180 | static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq) |
180 | { | 181 | { |
181 | u32 reg_offset, bit_pos; | 182 | u32 reg_offset, bit_pos; |
182 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 183 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
183 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | 184 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
184 | 185 | ||
185 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | 186 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); |
186 | ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), | 187 | ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), |
187 | BIT(bit_pos)); | 188 | BIT(bit_pos)); |
188 | } | 189 | } |
189 | 190 | ||
190 | static void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) | 191 | static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq) |
191 | { | 192 | { |
192 | u32 reg_offset, bit_pos; | 193 | u32 reg_offset, bit_pos; |
193 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 194 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
194 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | 195 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
195 | 196 | ||
196 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | 197 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); |
197 | ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), | 198 | ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), |
198 | BIT(bit_pos)); | 199 | BIT(bit_pos)); |
199 | } | 200 | } |
200 | 201 | ||
201 | static int ks_dw_pcie_msi_host_init(struct pcie_port *pp) | 202 | static int ks_pcie_msi_host_init(struct pcie_port *pp) |
202 | { | 203 | { |
203 | return dw_pcie_allocate_domains(pp); | 204 | return dw_pcie_allocate_domains(pp); |
204 | } | 205 | } |
205 | 206 | ||
206 | static void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) | 207 | static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) |
207 | { | 208 | { |
208 | int i; | 209 | int i; |
209 | 210 | ||
210 | for (i = 0; i < PCI_NUM_INTX; i++) | 211 | for (i = 0; i < PCI_NUM_INTX; i++) |
211 | ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); | 212 | ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); |
212 | } | 213 | } |
213 | 214 | ||
214 | static void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, | 215 | static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, |
215 | int offset) | 216 | int offset) |
216 | { | 217 | { |
217 | struct dw_pcie *pci = ks_pcie->pci; | 218 | struct dw_pcie *pci = ks_pcie->pci; |
218 | struct device *dev = pci->dev; | 219 | struct device *dev = pci->dev; |
219 | u32 pending; | 220 | u32 pending; |
220 | int virq; | 221 | int virq; |
221 | 222 | ||
222 | pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); | 223 | pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); |
223 | 224 | ||
224 | if (BIT(0) & pending) { | 225 | if (BIT(0) & pending) { |
225 | virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); | 226 | virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); |
@@ -228,19 +229,19 @@ static void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, | |||
228 | } | 229 | } |
229 | 230 | ||
230 | /* EOI the INTx interrupt */ | 231 | /* EOI the INTx interrupt */ |
231 | ks_dw_app_writel(ks_pcie, IRQ_EOI, offset); | 232 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset); |
232 | } | 233 | } |
233 | 234 | ||
234 | static void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) | 235 | static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) |
235 | { | 236 | { |
236 | ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); | 237 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); |
237 | } | 238 | } |
238 | 239 | ||
239 | static irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) | 240 | static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) |
240 | { | 241 | { |
241 | u32 status; | 242 | u32 status; |
242 | 243 | ||
243 | status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; | 244 | status = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; |
244 | if (!status) | 245 | if (!status) |
245 | return IRQ_NONE; | 246 | return IRQ_NONE; |
246 | 247 | ||
@@ -249,83 +250,83 @@ static irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) | |||
249 | status); | 250 | status); |
250 | 251 | ||
251 | /* Ack the IRQ; status bits are RW1C */ | 252 | /* Ack the IRQ; status bits are RW1C */ |
252 | ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status); | 253 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, status); |
253 | return IRQ_HANDLED; | 254 | return IRQ_HANDLED; |
254 | } | 255 | } |
255 | 256 | ||
256 | static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d) | 257 | static void ks_pcie_ack_legacy_irq(struct irq_data *d) |
257 | { | 258 | { |
258 | } | 259 | } |
259 | 260 | ||
260 | static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d) | 261 | static void ks_pcie_mask_legacy_irq(struct irq_data *d) |
261 | { | 262 | { |
262 | } | 263 | } |
263 | 264 | ||
264 | static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d) | 265 | static void ks_pcie_unmask_legacy_irq(struct irq_data *d) |
265 | { | 266 | { |
266 | } | 267 | } |
267 | 268 | ||
268 | static struct irq_chip ks_dw_pcie_legacy_irq_chip = { | 269 | static struct irq_chip ks_pcie_legacy_irq_chip = { |
269 | .name = "Keystone-PCI-Legacy-IRQ", | 270 | .name = "Keystone-PCI-Legacy-IRQ", |
270 | .irq_ack = ks_dw_pcie_ack_legacy_irq, | 271 | .irq_ack = ks_pcie_ack_legacy_irq, |
271 | .irq_mask = ks_dw_pcie_mask_legacy_irq, | 272 | .irq_mask = ks_pcie_mask_legacy_irq, |
272 | .irq_unmask = ks_dw_pcie_unmask_legacy_irq, | 273 | .irq_unmask = ks_pcie_unmask_legacy_irq, |
273 | }; | 274 | }; |
274 | 275 | ||
275 | static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, | 276 | static int ks_pcie_init_legacy_irq_map(struct irq_domain *d, |
276 | unsigned int irq, | 277 | unsigned int irq, |
277 | irq_hw_number_t hw_irq) | 278 | irq_hw_number_t hw_irq) |
278 | { | 279 | { |
279 | irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, | 280 | irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip, |
280 | handle_level_irq); | 281 | handle_level_irq); |
281 | irq_set_chip_data(irq, d->host_data); | 282 | irq_set_chip_data(irq, d->host_data); |
282 | 283 | ||
283 | return 0; | 284 | return 0; |
284 | } | 285 | } |
285 | 286 | ||
286 | static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = { | 287 | static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = { |
287 | .map = ks_dw_pcie_init_legacy_irq_map, | 288 | .map = ks_pcie_init_legacy_irq_map, |
288 | .xlate = irq_domain_xlate_onetwocell, | 289 | .xlate = irq_domain_xlate_onetwocell, |
289 | }; | 290 | }; |
290 | 291 | ||
291 | /** | 292 | /** |
292 | * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask | 293 | * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask |
293 | * registers | 294 | * registers |
294 | * | 295 | * |
295 | * Since modification of dbi_cs2 involves different clock domain, read the | 296 | * Since modification of dbi_cs2 involves different clock domain, read the |
296 | * status back to ensure the transition is complete. | 297 | * status back to ensure the transition is complete. |
297 | */ | 298 | */ |
298 | static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) | 299 | static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) |
299 | { | 300 | { |
300 | u32 val; | 301 | u32 val; |
301 | 302 | ||
302 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | 303 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
303 | ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val); | 304 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val); |
304 | 305 | ||
305 | do { | 306 | do { |
306 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | 307 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
307 | } while (!(val & DBI_CS2_EN_VAL)); | 308 | } while (!(val & DBI_CS2_EN_VAL)); |
308 | } | 309 | } |
309 | 310 | ||
310 | /** | 311 | /** |
311 | * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode | 312 | * ks_pcie_clear_dbi_mode() - Disable DBI mode |
312 | * | 313 | * |
313 | * Since modification of dbi_cs2 involves different clock domain, read the | 314 | * Since modification of dbi_cs2 involves different clock domain, read the |
314 | * status back to ensure the transition is complete. | 315 | * status back to ensure the transition is complete. |
315 | */ | 316 | */ |
316 | static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) | 317 | static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) |
317 | { | 318 | { |
318 | u32 val; | 319 | u32 val; |
319 | 320 | ||
320 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | 321 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
321 | ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val); | 322 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val); |
322 | 323 | ||
323 | do { | 324 | do { |
324 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | 325 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
325 | } while (val & DBI_CS2_EN_VAL); | 326 | } while (val & DBI_CS2_EN_VAL); |
326 | } | 327 | } |
327 | 328 | ||
328 | static void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) | 329 | static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) |
329 | { | 330 | { |
330 | struct dw_pcie *pci = ks_pcie->pci; | 331 | struct dw_pcie *pci = ks_pcie->pci; |
331 | struct pcie_port *pp = &pci->pp; | 332 | struct pcie_port *pp = &pci->pp; |
@@ -334,26 +335,26 @@ static void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) | |||
334 | u32 val; | 335 | u32 val; |
335 | 336 | ||
336 | /* Disable BARs for inbound access */ | 337 | /* Disable BARs for inbound access */ |
337 | ks_dw_pcie_set_dbi_mode(ks_pcie); | 338 | ks_pcie_set_dbi_mode(ks_pcie); |
338 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); | 339 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); |
339 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); | 340 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); |
340 | ks_dw_pcie_clear_dbi_mode(ks_pcie); | 341 | ks_pcie_clear_dbi_mode(ks_pcie); |
341 | 342 | ||
342 | /* Set outbound translation size per window division */ | 343 | /* Set outbound translation size per window division */ |
343 | ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7); | 344 | ks_pcie_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7); |
344 | 345 | ||
345 | tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M; | 346 | tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M; |
346 | 347 | ||
347 | /* Using Direct 1:1 mapping of RC <-> PCI memory space */ | 348 | /* Using Direct 1:1 mapping of RC <-> PCI memory space */ |
348 | for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) { | 349 | for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) { |
349 | ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1); | 350 | ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1); |
350 | ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0); | 351 | ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i), 0); |
351 | start += tr_size; | 352 | start += tr_size; |
352 | } | 353 | } |
353 | 354 | ||
354 | /* Enable OB translation */ | 355 | /* Enable OB translation */ |
355 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | 356 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
356 | ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val); | 357 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val); |
357 | } | 358 | } |
358 | 359 | ||
359 | /** | 360 | /** |
@@ -394,13 +395,13 @@ static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus, | |||
394 | if (bus != 1) | 395 | if (bus != 1) |
395 | regval |= BIT(24); | 396 | regval |= BIT(24); |
396 | 397 | ||
397 | ks_dw_app_writel(ks_pcie, CFG_SETUP, regval); | 398 | ks_pcie_app_writel(ks_pcie, CFG_SETUP, regval); |
398 | return pp->va_cfg0_base; | 399 | return pp->va_cfg0_base; |
399 | } | 400 | } |
400 | 401 | ||
401 | static int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | 402 | static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, |
402 | unsigned int devfn, int where, int size, | 403 | unsigned int devfn, int where, int size, |
403 | u32 *val) | 404 | u32 *val) |
404 | { | 405 | { |
405 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 406 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
406 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | 407 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
@@ -412,9 +413,9 @@ static int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | |||
412 | return dw_pcie_read(addr + where, size, val); | 413 | return dw_pcie_read(addr + where, size, val); |
413 | } | 414 | } |
414 | 415 | ||
415 | static int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | 416 | static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, |
416 | unsigned int devfn, int where, int size, | 417 | unsigned int devfn, int where, int size, |
417 | u32 val) | 418 | u32 val) |
418 | { | 419 | { |
419 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 420 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
420 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | 421 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
@@ -427,23 +428,23 @@ static int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | |||
427 | } | 428 | } |
428 | 429 | ||
429 | /** | 430 | /** |
430 | * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization | 431 | * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization |
431 | * | 432 | * |
432 | * This sets BAR0 to enable inbound access for MSI_IRQ register | 433 | * This sets BAR0 to enable inbound access for MSI_IRQ register |
433 | */ | 434 | */ |
434 | static void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) | 435 | static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp) |
435 | { | 436 | { |
436 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 437 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
437 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | 438 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
438 | 439 | ||
439 | /* Configure and set up BAR0 */ | 440 | /* Configure and set up BAR0 */ |
440 | ks_dw_pcie_set_dbi_mode(ks_pcie); | 441 | ks_pcie_set_dbi_mode(ks_pcie); |
441 | 442 | ||
442 | /* Enable BAR0 */ | 443 | /* Enable BAR0 */ |
443 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); | 444 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); |
444 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); | 445 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); |
445 | 446 | ||
446 | ks_dw_pcie_clear_dbi_mode(ks_pcie); | 447 | ks_pcie_clear_dbi_mode(ks_pcie); |
447 | 448 | ||
448 | /* | 449 | /* |
449 | * For BAR0, just setting bus address for inbound writes (MSI) should | 450 | * For BAR0, just setting bus address for inbound writes (MSI) should |
@@ -453,9 +454,9 @@ static void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) | |||
453 | } | 454 | } |
454 | 455 | ||
455 | /** | 456 | /** |
456 | * ks_dw_pcie_link_up() - Check if link up | 457 | * ks_pcie_link_up() - Check if link up |
457 | */ | 458 | */ |
458 | static int ks_dw_pcie_link_up(struct dw_pcie *pci) | 459 | static int ks_pcie_link_up(struct dw_pcie *pci) |
459 | { | 460 | { |
460 | u32 val; | 461 | u32 val; |
461 | 462 | ||
@@ -463,28 +464,28 @@ static int ks_dw_pcie_link_up(struct dw_pcie *pci) | |||
463 | return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0; | 464 | return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0; |
464 | } | 465 | } |
465 | 466 | ||
466 | static void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) | 467 | static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) |
467 | { | 468 | { |
468 | u32 val; | 469 | u32 val; |
469 | 470 | ||
470 | /* Disable Link training */ | 471 | /* Disable Link training */ |
471 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | 472 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
472 | val &= ~LTSSM_EN_VAL; | 473 | val &= ~LTSSM_EN_VAL; |
473 | ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | 474 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); |
474 | 475 | ||
475 | /* Initiate Link Training */ | 476 | /* Initiate Link Training */ |
476 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | 477 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
477 | ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | 478 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); |
478 | } | 479 | } |
479 | 480 | ||
480 | /** | 481 | /** |
481 | * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware | 482 | * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware |
482 | * | 483 | * |
483 | * Ioremap the register resources, initialize legacy irq domain | 484 | * Ioremap the register resources, initialize legacy irq domain |
484 | * and call dw_pcie_v3_65_host_init() API to initialize the Keystone | 485 | * and call dw_pcie_v3_65_host_init() API to initialize the Keystone |
485 | * PCI host controller. | 486 | * PCI host controller. |
486 | */ | 487 | */ |
487 | static int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie) | 488 | static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie) |
488 | { | 489 | { |
489 | struct dw_pcie *pci = ks_pcie->pci; | 490 | struct dw_pcie *pci = ks_pcie->pci; |
490 | struct pcie_port *pp = &pci->pp; | 491 | struct pcie_port *pp = &pci->pp; |
@@ -517,7 +518,7 @@ static int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie) | |||
517 | ks_pcie->legacy_irq_domain = | 518 | ks_pcie->legacy_irq_domain = |
518 | irq_domain_add_linear(ks_pcie->legacy_intc_np, | 519 | irq_domain_add_linear(ks_pcie->legacy_intc_np, |
519 | PCI_NUM_INTX, | 520 | PCI_NUM_INTX, |
520 | &ks_dw_pcie_legacy_irq_domain_ops, | 521 | &ks_pcie_legacy_irq_domain_ops, |
521 | NULL); | 522 | NULL); |
522 | if (!ks_pcie->legacy_irq_domain) { | 523 | if (!ks_pcie->legacy_irq_domain) { |
523 | dev_err(dev, "Failed to add irq domain for legacy irqs\n"); | 524 | dev_err(dev, "Failed to add irq domain for legacy irqs\n"); |
@@ -527,7 +528,7 @@ static int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie) | |||
527 | return dw_pcie_host_init(pp); | 528 | return dw_pcie_host_init(pp); |
528 | } | 529 | } |
529 | 530 | ||
530 | static void quirk_limit_mrrs(struct pci_dev *dev) | 531 | static void ks_pcie_quirk(struct pci_dev *dev) |
531 | { | 532 | { |
532 | struct pci_bus *bus = dev->bus; | 533 | struct pci_bus *bus = dev->bus; |
533 | struct pci_dev *bridge; | 534 | struct pci_dev *bridge; |
@@ -568,7 +569,7 @@ static void quirk_limit_mrrs(struct pci_dev *dev) | |||
568 | } | 569 | } |
569 | } | 570 | } |
570 | } | 571 | } |
571 | DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); | 572 | DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); |
572 | 573 | ||
573 | static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) | 574 | static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) |
574 | { | 575 | { |
@@ -580,7 +581,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) | |||
580 | return 0; | 581 | return 0; |
581 | } | 582 | } |
582 | 583 | ||
583 | ks_dw_pcie_initiate_link_train(ks_pcie); | 584 | ks_pcie_initiate_link_train(ks_pcie); |
584 | 585 | ||
585 | /* check if the link is up or not */ | 586 | /* check if the link is up or not */ |
586 | if (!dw_pcie_wait_for_link(pci)) | 587 | if (!dw_pcie_wait_for_link(pci)) |
@@ -607,7 +608,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc) | |||
607 | * ack operation. | 608 | * ack operation. |
608 | */ | 609 | */ |
609 | chained_irq_enter(chip, desc); | 610 | chained_irq_enter(chip, desc); |
610 | ks_dw_pcie_handle_msi_irq(ks_pcie, offset); | 611 | ks_pcie_handle_msi_irq(ks_pcie, offset); |
611 | chained_irq_exit(chip, desc); | 612 | chained_irq_exit(chip, desc); |
612 | } | 613 | } |
613 | 614 | ||
@@ -636,7 +637,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) | |||
636 | * ack operation. | 637 | * ack operation. |
637 | */ | 638 | */ |
638 | chained_irq_enter(chip, desc); | 639 | chained_irq_enter(chip, desc); |
639 | ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset); | 640 | ks_pcie_handle_legacy_irq(ks_pcie, irq_offset); |
640 | chained_irq_exit(chip, desc); | 641 | chained_irq_exit(chip, desc); |
641 | } | 642 | } |
642 | 643 | ||
@@ -708,7 +709,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) | |||
708 | ks_pcie_legacy_irq_handler, | 709 | ks_pcie_legacy_irq_handler, |
709 | ks_pcie); | 710 | ks_pcie); |
710 | } | 711 | } |
711 | ks_dw_pcie_enable_legacy_irqs(ks_pcie); | 712 | ks_pcie_enable_legacy_irqs(ks_pcie); |
712 | 713 | ||
713 | /* MSI IRQ */ | 714 | /* MSI IRQ */ |
714 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | 715 | if (IS_ENABLED(CONFIG_PCI_MSI)) { |
@@ -720,7 +721,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) | |||
720 | } | 721 | } |
721 | 722 | ||
722 | if (ks_pcie->error_irq > 0) | 723 | if (ks_pcie->error_irq > 0) |
723 | ks_dw_pcie_enable_error_irq(ks_pcie); | 724 | ks_pcie_enable_error_irq(ks_pcie); |
724 | } | 725 | } |
725 | 726 | ||
726 | /* | 727 | /* |
@@ -728,8 +729,8 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) | |||
728 | * bus error instead of returning 0xffffffff. This handler always returns 0 | 729 | * bus error instead of returning 0xffffffff. This handler always returns 0 |
729 | * for this kind of faults. | 730 | * for this kind of faults. |
730 | */ | 731 | */ |
731 | static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, | 732 | static int ks_pcie_fault(unsigned long addr, unsigned int fsr, |
732 | struct pt_regs *regs) | 733 | struct pt_regs *regs) |
733 | { | 734 | { |
734 | unsigned long instr = *(unsigned long *) instruction_pointer(regs); | 735 | unsigned long instr = *(unsigned long *) instruction_pointer(regs); |
735 | 736 | ||
@@ -751,7 +752,7 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) | |||
751 | dw_pcie_setup_rc(pp); | 752 | dw_pcie_setup_rc(pp); |
752 | 753 | ||
753 | ks_pcie_establish_link(ks_pcie); | 754 | ks_pcie_establish_link(ks_pcie); |
754 | ks_dw_pcie_setup_rc_app_regs(ks_pcie); | 755 | ks_pcie_setup_rc_app_regs(ks_pcie); |
755 | ks_pcie_setup_interrupts(ks_pcie); | 756 | ks_pcie_setup_interrupts(ks_pcie); |
756 | writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), | 757 | writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), |
757 | pci->dbi_base + PCI_IO_BASE); | 758 | pci->dbi_base + PCI_IO_BASE); |
@@ -763,33 +764,33 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) | |||
763 | * PCIe access errors that result into OCP errors are caught by ARM as | 764 | * PCIe access errors that result into OCP errors are caught by ARM as |
764 | * "External aborts" | 765 | * "External aborts" |
765 | */ | 766 | */ |
766 | hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, | 767 | hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, |
767 | "Asynchronous external abort"); | 768 | "Asynchronous external abort"); |
768 | 769 | ||
769 | return 0; | 770 | return 0; |
770 | } | 771 | } |
771 | 772 | ||
772 | static const struct dw_pcie_host_ops keystone_pcie_host_ops = { | 773 | static const struct dw_pcie_host_ops ks_pcie_host_ops = { |
773 | .rd_other_conf = ks_dw_pcie_rd_other_conf, | 774 | .rd_other_conf = ks_pcie_rd_other_conf, |
774 | .wr_other_conf = ks_dw_pcie_wr_other_conf, | 775 | .wr_other_conf = ks_pcie_wr_other_conf, |
775 | .host_init = ks_pcie_host_init, | 776 | .host_init = ks_pcie_host_init, |
776 | .msi_set_irq = ks_dw_pcie_msi_set_irq, | 777 | .msi_set_irq = ks_pcie_msi_set_irq, |
777 | .msi_clear_irq = ks_dw_pcie_msi_clear_irq, | 778 | .msi_clear_irq = ks_pcie_msi_clear_irq, |
778 | .get_msi_addr = ks_dw_pcie_get_msi_addr, | 779 | .get_msi_addr = ks_pcie_get_msi_addr, |
779 | .msi_host_init = ks_dw_pcie_msi_host_init, | 780 | .msi_host_init = ks_pcie_msi_host_init, |
780 | .msi_irq_ack = ks_dw_pcie_msi_irq_ack, | 781 | .msi_irq_ack = ks_pcie_msi_irq_ack, |
781 | .scan_bus = ks_dw_pcie_v3_65_scan_bus, | 782 | .scan_bus = ks_pcie_v3_65_scan_bus, |
782 | }; | 783 | }; |
783 | 784 | ||
784 | static irqreturn_t pcie_err_irq_handler(int irq, void *priv) | 785 | static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) |
785 | { | 786 | { |
786 | struct keystone_pcie *ks_pcie = priv; | 787 | struct keystone_pcie *ks_pcie = priv; |
787 | 788 | ||
788 | return ks_dw_pcie_handle_error_irq(ks_pcie); | 789 | return ks_pcie_handle_error_irq(ks_pcie); |
789 | } | 790 | } |
790 | 791 | ||
791 | static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, | 792 | static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie, |
792 | struct platform_device *pdev) | 793 | struct platform_device *pdev) |
793 | { | 794 | { |
794 | struct dw_pcie *pci = ks_pcie->pci; | 795 | struct dw_pcie *pci = ks_pcie->pci; |
795 | struct pcie_port *pp = &pci->pp; | 796 | struct pcie_port *pp = &pci->pp; |
@@ -818,7 +819,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, | |||
818 | if (ks_pcie->error_irq <= 0) | 819 | if (ks_pcie->error_irq <= 0) |
819 | dev_info(dev, "no error IRQ defined\n"); | 820 | dev_info(dev, "no error IRQ defined\n"); |
820 | else { | 821 | else { |
821 | ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler, | 822 | ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler, |
822 | IRQF_SHARED, "pcie-error-irq", ks_pcie); | 823 | IRQF_SHARED, "pcie-error-irq", ks_pcie); |
823 | if (ret < 0) { | 824 | if (ret < 0) { |
824 | dev_err(dev, "failed to request error IRQ %d\n", | 825 | dev_err(dev, "failed to request error IRQ %d\n", |
@@ -827,8 +828,8 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, | |||
827 | } | 828 | } |
828 | } | 829 | } |
829 | 830 | ||
830 | pp->ops = &keystone_pcie_host_ops; | 831 | pp->ops = &ks_pcie_host_ops; |
831 | ret = ks_dw_pcie_host_init(ks_pcie); | 832 | ret = ks_pcie_dw_host_init(ks_pcie); |
832 | if (ret) { | 833 | if (ret) { |
833 | dev_err(dev, "failed to initialize host\n"); | 834 | dev_err(dev, "failed to initialize host\n"); |
834 | return ret; | 835 | return ret; |
@@ -845,8 +846,8 @@ static const struct of_device_id ks_pcie_of_match[] = { | |||
845 | { }, | 846 | { }, |
846 | }; | 847 | }; |
847 | 848 | ||
848 | static const struct dw_pcie_ops dw_pcie_ops = { | 849 | static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = { |
849 | .link_up = ks_dw_pcie_link_up, | 850 | .link_up = ks_pcie_link_up, |
850 | }; | 851 | }; |
851 | 852 | ||
852 | static int __exit ks_pcie_remove(struct platform_device *pdev) | 853 | static int __exit ks_pcie_remove(struct platform_device *pdev) |
@@ -877,7 +878,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) | |||
877 | return -ENOMEM; | 878 | return -ENOMEM; |
878 | 879 | ||
879 | pci->dev = dev; | 880 | pci->dev = dev; |
880 | pci->ops = &dw_pcie_ops; | 881 | pci->ops = &ks_pcie_dw_pcie_ops; |
881 | 882 | ||
882 | ks_pcie->pci = pci; | 883 | ks_pcie->pci = pci; |
883 | 884 | ||
@@ -912,7 +913,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) | |||
912 | if (ret) | 913 | if (ret) |
913 | return ret; | 914 | return ret; |
914 | 915 | ||
915 | ret = ks_add_pcie_port(ks_pcie, pdev); | 916 | ret = ks_pcie_add_pcie_port(ks_pcie, pdev); |
916 | if (ret < 0) | 917 | if (ret < 0) |
917 | goto fail_clk; | 918 | goto fail_clk; |
918 | 919 | ||