diff options
author | Linas Vepstas <linas@linas.org> | 2005-11-03 19:53:07 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-01-09 23:29:04 -0500 |
commit | 5d5a0936b3ad9e3d3f6eaf61f1a06c62ea0e7a59 (patch) | |
tree | b663cd4b65c378161afef4d27e579af883b31457 /arch/powerpc/platforms/pseries/eeh.c | |
parent | 77bd741561016134d1761d6101c4f0361025062f (diff) |
[PATCH] powerpc: Split out PCI address cache to its own file
25-pci-address-cache.patch
The core EEH file is rather large. This patch splits out a self-contained
chunk of it into its own file. This is the chunk that performes the
caching and lookup of pci devices based on the i/o addresses of thier
resoures. This code is almos architecture-independent and could be
used by any system that wanted to find a pci device based only on
the i/o address used by the device.
Signed-off-by: Linas Vepstas <linas@austin.ibm.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
(cherry picked from b0b291d59906d4a9a89ed9e34d9fd684c7188924 commit)
Diffstat (limited to 'arch/powerpc/platforms/pseries/eeh.c')
-rw-r--r-- | arch/powerpc/platforms/pseries/eeh.c | 295 |
1 files changed, 2 insertions, 293 deletions
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index d6560c45637b..57bef2c2f325 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
@@ -76,9 +76,6 @@ | |||
76 | */ | 76 | */ |
77 | #define EEH_MAX_FAILS 100000 | 77 | #define EEH_MAX_FAILS 100000 |
78 | 78 | ||
79 | /* Misc forward declaraions */ | ||
80 | static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn); | ||
81 | |||
82 | /* RTAS tokens */ | 79 | /* RTAS tokens */ |
83 | static int ibm_set_eeh_option; | 80 | static int ibm_set_eeh_option; |
84 | static int ibm_set_slot_reset; | 81 | static int ibm_set_slot_reset; |
@@ -107,296 +104,8 @@ static DEFINE_PER_CPU(unsigned long, false_positives); | |||
107 | static DEFINE_PER_CPU(unsigned long, ignored_failures); | 104 | static DEFINE_PER_CPU(unsigned long, ignored_failures); |
108 | static DEFINE_PER_CPU(unsigned long, slot_resets); | 105 | static DEFINE_PER_CPU(unsigned long, slot_resets); |
109 | 106 | ||
110 | /** | ||
111 | * The pci address cache subsystem. This subsystem places | ||
112 | * PCI device address resources into a red-black tree, sorted | ||
113 | * according to the address range, so that given only an i/o | ||
114 | * address, the corresponding PCI device can be **quickly** | ||
115 | * found. It is safe to perform an address lookup in an interrupt | ||
116 | * context; this ability is an important feature. | ||
117 | * | ||
118 | * Currently, the only customer of this code is the EEH subsystem; | ||
119 | * thus, this code has been somewhat tailored to suit EEH better. | ||
120 | * In particular, the cache does *not* hold the addresses of devices | ||
121 | * for which EEH is not enabled. | ||
122 | * | ||
123 | * (Implementation Note: The RB tree seems to be better/faster | ||
124 | * than any hash algo I could think of for this problem, even | ||
125 | * with the penalty of slow pointer chases for d-cache misses). | ||
126 | */ | ||
127 | struct pci_io_addr_range | ||
128 | { | ||
129 | struct rb_node rb_node; | ||
130 | unsigned long addr_lo; | ||
131 | unsigned long addr_hi; | ||
132 | struct pci_dev *pcidev; | ||
133 | unsigned int flags; | ||
134 | }; | ||
135 | |||
136 | static struct pci_io_addr_cache | ||
137 | { | ||
138 | struct rb_root rb_root; | ||
139 | spinlock_t piar_lock; | ||
140 | } pci_io_addr_cache_root; | ||
141 | |||
142 | static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr) | ||
143 | { | ||
144 | struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node; | ||
145 | |||
146 | while (n) { | ||
147 | struct pci_io_addr_range *piar; | ||
148 | piar = rb_entry(n, struct pci_io_addr_range, rb_node); | ||
149 | |||
150 | if (addr < piar->addr_lo) { | ||
151 | n = n->rb_left; | ||
152 | } else { | ||
153 | if (addr > piar->addr_hi) { | ||
154 | n = n->rb_right; | ||
155 | } else { | ||
156 | pci_dev_get(piar->pcidev); | ||
157 | return piar->pcidev; | ||
158 | } | ||
159 | } | ||
160 | } | ||
161 | |||
162 | return NULL; | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * pci_get_device_by_addr - Get device, given only address | ||
167 | * @addr: mmio (PIO) phys address or i/o port number | ||
168 | * | ||
169 | * Given an mmio phys address, or a port number, find a pci device | ||
170 | * that implements this address. Be sure to pci_dev_put the device | ||
171 | * when finished. I/O port numbers are assumed to be offset | ||
172 | * from zero (that is, they do *not* have pci_io_addr added in). | ||
173 | * It is safe to call this function within an interrupt. | ||
174 | */ | ||
175 | static struct pci_dev *pci_get_device_by_addr(unsigned long addr) | ||
176 | { | ||
177 | struct pci_dev *dev; | ||
178 | unsigned long flags; | ||
179 | |||
180 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); | ||
181 | dev = __pci_get_device_by_addr(addr); | ||
182 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); | ||
183 | return dev; | ||
184 | } | ||
185 | |||
186 | #ifdef DEBUG | ||
187 | /* | ||
188 | * Handy-dandy debug print routine, does nothing more | ||
189 | * than print out the contents of our addr cache. | ||
190 | */ | ||
191 | static void pci_addr_cache_print(struct pci_io_addr_cache *cache) | ||
192 | { | ||
193 | struct rb_node *n; | ||
194 | int cnt = 0; | ||
195 | |||
196 | n = rb_first(&cache->rb_root); | ||
197 | while (n) { | ||
198 | struct pci_io_addr_range *piar; | ||
199 | piar = rb_entry(n, struct pci_io_addr_range, rb_node); | ||
200 | printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n", | ||
201 | (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt, | ||
202 | piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev)); | ||
203 | cnt++; | ||
204 | n = rb_next(n); | ||
205 | } | ||
206 | } | ||
207 | #endif | ||
208 | |||
209 | /* Insert address range into the rb tree. */ | ||
210 | static struct pci_io_addr_range * | ||
211 | pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo, | ||
212 | unsigned long ahi, unsigned int flags) | ||
213 | { | ||
214 | struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node; | ||
215 | struct rb_node *parent = NULL; | ||
216 | struct pci_io_addr_range *piar; | ||
217 | |||
218 | /* Walk tree, find a place to insert into tree */ | ||
219 | while (*p) { | ||
220 | parent = *p; | ||
221 | piar = rb_entry(parent, struct pci_io_addr_range, rb_node); | ||
222 | if (ahi < piar->addr_lo) { | ||
223 | p = &parent->rb_left; | ||
224 | } else if (alo > piar->addr_hi) { | ||
225 | p = &parent->rb_right; | ||
226 | } else { | ||
227 | if (dev != piar->pcidev || | ||
228 | alo != piar->addr_lo || ahi != piar->addr_hi) { | ||
229 | printk(KERN_WARNING "PIAR: overlapping address range\n"); | ||
230 | } | ||
231 | return piar; | ||
232 | } | ||
233 | } | ||
234 | piar = (struct pci_io_addr_range *)kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); | ||
235 | if (!piar) | ||
236 | return NULL; | ||
237 | |||
238 | piar->addr_lo = alo; | ||
239 | piar->addr_hi = ahi; | ||
240 | piar->pcidev = dev; | ||
241 | piar->flags = flags; | ||
242 | |||
243 | #ifdef DEBUG | ||
244 | printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n", | ||
245 | alo, ahi, pci_name (dev)); | ||
246 | #endif | ||
247 | |||
248 | rb_link_node(&piar->rb_node, parent, p); | ||
249 | rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root); | ||
250 | |||
251 | return piar; | ||
252 | } | ||
253 | |||
254 | static void __pci_addr_cache_insert_device(struct pci_dev *dev) | ||
255 | { | ||
256 | struct device_node *dn; | ||
257 | struct pci_dn *pdn; | ||
258 | int i; | ||
259 | int inserted = 0; | ||
260 | |||
261 | dn = pci_device_to_OF_node(dev); | ||
262 | if (!dn) { | ||
263 | printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev)); | ||
264 | return; | ||
265 | } | ||
266 | |||
267 | /* Skip any devices for which EEH is not enabled. */ | ||
268 | pdn = PCI_DN(dn); | ||
269 | if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) || | ||
270 | pdn->eeh_mode & EEH_MODE_NOCHECK) { | ||
271 | #ifdef DEBUG | ||
272 | printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n", | ||
273 | pci_name(dev), pdn->node->full_name); | ||
274 | #endif | ||
275 | return; | ||
276 | } | ||
277 | |||
278 | /* The cache holds a reference to the device... */ | ||
279 | pci_dev_get(dev); | ||
280 | |||
281 | /* Walk resources on this device, poke them into the tree */ | ||
282 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
283 | unsigned long start = pci_resource_start(dev,i); | ||
284 | unsigned long end = pci_resource_end(dev,i); | ||
285 | unsigned int flags = pci_resource_flags(dev,i); | ||
286 | |||
287 | /* We are interested only bus addresses, not dma or other stuff */ | ||
288 | if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM))) | ||
289 | continue; | ||
290 | if (start == 0 || ~start == 0 || end == 0 || ~end == 0) | ||
291 | continue; | ||
292 | pci_addr_cache_insert(dev, start, end, flags); | ||
293 | inserted = 1; | ||
294 | } | ||
295 | |||
296 | /* If there was nothing to add, the cache has no reference... */ | ||
297 | if (!inserted) | ||
298 | pci_dev_put(dev); | ||
299 | } | ||
300 | |||
301 | /** | ||
302 | * pci_addr_cache_insert_device - Add a device to the address cache | ||
303 | * @dev: PCI device whose I/O addresses we are interested in. | ||
304 | * | ||
305 | * In order to support the fast lookup of devices based on addresses, | ||
306 | * we maintain a cache of devices that can be quickly searched. | ||
307 | * This routine adds a device to that cache. | ||
308 | */ | ||
309 | static void pci_addr_cache_insert_device(struct pci_dev *dev) | ||
310 | { | ||
311 | unsigned long flags; | ||
312 | |||
313 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); | ||
314 | __pci_addr_cache_insert_device(dev); | ||
315 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); | ||
316 | } | ||
317 | |||
318 | static inline void __pci_addr_cache_remove_device(struct pci_dev *dev) | ||
319 | { | ||
320 | struct rb_node *n; | ||
321 | int removed = 0; | ||
322 | |||
323 | restart: | ||
324 | n = rb_first(&pci_io_addr_cache_root.rb_root); | ||
325 | while (n) { | ||
326 | struct pci_io_addr_range *piar; | ||
327 | piar = rb_entry(n, struct pci_io_addr_range, rb_node); | ||
328 | |||
329 | if (piar->pcidev == dev) { | ||
330 | rb_erase(n, &pci_io_addr_cache_root.rb_root); | ||
331 | removed = 1; | ||
332 | kfree(piar); | ||
333 | goto restart; | ||
334 | } | ||
335 | n = rb_next(n); | ||
336 | } | ||
337 | |||
338 | /* The cache no longer holds its reference to this device... */ | ||
339 | if (removed) | ||
340 | pci_dev_put(dev); | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * pci_addr_cache_remove_device - remove pci device from addr cache | ||
345 | * @dev: device to remove | ||
346 | * | ||
347 | * Remove a device from the addr-cache tree. | ||
348 | * This is potentially expensive, since it will walk | ||
349 | * the tree multiple times (once per resource). | ||
350 | * But so what; device removal doesn't need to be that fast. | ||
351 | */ | ||
352 | static void pci_addr_cache_remove_device(struct pci_dev *dev) | ||
353 | { | ||
354 | unsigned long flags; | ||
355 | |||
356 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); | ||
357 | __pci_addr_cache_remove_device(dev); | ||
358 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); | ||
359 | } | ||
360 | |||
361 | /** | ||
362 | * pci_addr_cache_build - Build a cache of I/O addresses | ||
363 | * | ||
364 | * Build a cache of pci i/o addresses. This cache will be used to | ||
365 | * find the pci device that corresponds to a given address. | ||
366 | * This routine scans all pci busses to build the cache. | ||
367 | * Must be run late in boot process, after the pci controllers | ||
368 | * have been scaned for devices (after all device resources are known). | ||
369 | */ | ||
370 | void __init pci_addr_cache_build(void) | ||
371 | { | ||
372 | struct device_node *dn; | ||
373 | struct pci_dev *dev = NULL; | ||
374 | |||
375 | if (!eeh_subsystem_enabled) | ||
376 | return; | ||
377 | |||
378 | spin_lock_init(&pci_io_addr_cache_root.piar_lock); | ||
379 | |||
380 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
381 | /* Ignore PCI bridges ( XXX why ??) */ | ||
382 | if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) { | ||
383 | continue; | ||
384 | } | ||
385 | pci_addr_cache_insert_device(dev); | ||
386 | |||
387 | /* Save the BAR's; firmware doesn't restore these after EEH reset */ | ||
388 | dn = pci_device_to_OF_node(dev); | ||
389 | eeh_save_bars(dev, PCI_DN(dn)); | ||
390 | } | ||
391 | |||
392 | #ifdef DEBUG | ||
393 | /* Verify tree built up above, echo back the list of addrs. */ | ||
394 | pci_addr_cache_print(&pci_io_addr_cache_root); | ||
395 | #endif | ||
396 | } | ||
397 | |||
398 | /* --------------------------------------------------------------- */ | 107 | /* --------------------------------------------------------------- */ |
399 | /* Above lies the PCI Address Cache. Below lies the EEH event infrastructure */ | 108 | /* Below lies the EEH event infrastructure */ |
400 | 109 | ||
401 | void eeh_slot_error_detail (struct pci_dn *pdn, int severity) | 110 | void eeh_slot_error_detail (struct pci_dn *pdn, int severity) |
402 | { | 111 | { |
@@ -880,7 +589,7 @@ void eeh_restore_bars(struct pci_dn *pdn) | |||
880 | * PCI devices are added individuallly; but, for the restore, | 589 | * PCI devices are added individuallly; but, for the restore, |
881 | * an entire slot is reset at a time. | 590 | * an entire slot is reset at a time. |
882 | */ | 591 | */ |
883 | static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn) | 592 | void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn) |
884 | { | 593 | { |
885 | int i; | 594 | int i; |
886 | 595 | ||