diff options
author | Myron Stowe <myron.stowe@hp.com> | 2010-10-21 16:24:14 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2010-10-24 23:31:43 -0400 |
commit | 4a3cba5e72a5232842ff7c1ca691ec3450af64b9 (patch) | |
tree | bcf2659d20f6b8383f3c42d066595832097ca46e /drivers/acpi/osl.c | |
parent | 78cdb3ed4053798c894899b15d2255fb880edad4 (diff) |
ACPI: Page based coalescing of I/O remappings optimization
This patch optimizes ACPI MMIO remappings by keeping track of the
remappings on a PAGE_SIZE granularity.
When an ioremap() occurs, the underlying infrastructure works on a 'page'
based granularity. As such, an ioremap() request for 1 byte for example,
will end up mapping in an entire (PAGE_SIZE) page. Huang Ying took
advantage of this in commit 15651291a2f8c11e7e6a42d8bfde7a213ff13262 by
checking if subsequent ioremap() requests reside within any of the list's
existing remappings still in place, and if so, incrementing a reference
count on the existing mapping as opposed to performing another ioremap().
Signed-off-by: Myron Stowe <myron.stowe@hp.com>
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi/osl.c')
-rw-r--r-- | drivers/acpi/osl.c | 62 |
1 files changed, 51 insertions, 11 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 32826893c2e6..885e222bcabd 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -104,6 +104,7 @@ struct acpi_ioremap { | |||
104 | void __iomem *virt; | 104 | void __iomem *virt; |
105 | acpi_physical_address phys; | 105 | acpi_physical_address phys; |
106 | acpi_size size; | 106 | acpi_size size; |
107 | struct kref ref; | ||
107 | }; | 108 | }; |
108 | 109 | ||
109 | static LIST_HEAD(acpi_ioremaps); | 110 | static LIST_HEAD(acpi_ioremaps); |
@@ -245,15 +246,28 @@ acpi_physical_address __init acpi_os_get_root_pointer(void) | |||
245 | } | 246 | } |
246 | 247 | ||
247 | /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ | 248 | /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ |
248 | static void __iomem * | 249 | static struct acpi_ioremap * |
249 | acpi_map_vaddr_lookup(acpi_physical_address phys, acpi_size size) | 250 | acpi_map_lookup(acpi_physical_address phys, acpi_size size) |
250 | { | 251 | { |
251 | struct acpi_ioremap *map; | 252 | struct acpi_ioremap *map; |
252 | 253 | ||
253 | list_for_each_entry_rcu(map, &acpi_ioremaps, list) | 254 | list_for_each_entry_rcu(map, &acpi_ioremaps, list) |
254 | if (map->phys <= phys && | 255 | if (map->phys <= phys && |
255 | phys + size <= map->phys + map->size) | 256 | phys + size <= map->phys + map->size) |
256 | return map->virt + (phys - map->phys); | 257 | return map; |
258 | |||
259 | return NULL; | ||
260 | } | ||
261 | |||
262 | /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ | ||
263 | static void __iomem * | ||
264 | acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size) | ||
265 | { | ||
266 | struct acpi_ioremap *map; | ||
267 | |||
268 | map = acpi_map_lookup(phys, size); | ||
269 | if (map) | ||
270 | return map->virt + (phys - map->phys); | ||
257 | 271 | ||
258 | return NULL; | 272 | return NULL; |
259 | } | 273 | } |
@@ -265,7 +279,8 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size) | |||
265 | struct acpi_ioremap *map; | 279 | struct acpi_ioremap *map; |
266 | 280 | ||
267 | list_for_each_entry_rcu(map, &acpi_ioremaps, list) | 281 | list_for_each_entry_rcu(map, &acpi_ioremaps, list) |
268 | if (map->virt == virt && map->size == size) | 282 | if (map->virt <= virt && |
283 | virt + size <= map->virt + map->size) | ||
269 | return map; | 284 | return map; |
270 | 285 | ||
271 | return NULL; | 286 | return NULL; |
@@ -274,9 +289,10 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size) | |||
274 | void __iomem *__init_refok | 289 | void __iomem *__init_refok |
275 | acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | 290 | acpi_os_map_memory(acpi_physical_address phys, acpi_size size) |
276 | { | 291 | { |
277 | struct acpi_ioremap *map; | 292 | struct acpi_ioremap *map, *tmp_map; |
278 | unsigned long flags; | 293 | unsigned long flags, pg_sz; |
279 | void __iomem *virt; | 294 | void __iomem *virt; |
295 | phys_addr_t pg_off; | ||
280 | 296 | ||
281 | if (phys > ULONG_MAX) { | 297 | if (phys > ULONG_MAX) { |
282 | printk(KERN_ERR PREFIX "Cannot map memory that high\n"); | 298 | printk(KERN_ERR PREFIX "Cannot map memory that high\n"); |
@@ -290,7 +306,9 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | |||
290 | if (!map) | 306 | if (!map) |
291 | return NULL; | 307 | return NULL; |
292 | 308 | ||
293 | virt = ioremap(phys, size); | 309 | pg_off = round_down(phys, PAGE_SIZE); |
310 | pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; | ||
311 | virt = ioremap(pg_off, pg_sz); | ||
294 | if (!virt) { | 312 | if (!virt) { |
295 | kfree(map); | 313 | kfree(map); |
296 | return NULL; | 314 | return NULL; |
@@ -298,21 +316,40 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) | |||
298 | 316 | ||
299 | INIT_LIST_HEAD(&map->list); | 317 | INIT_LIST_HEAD(&map->list); |
300 | map->virt = virt; | 318 | map->virt = virt; |
301 | map->phys = phys; | 319 | map->phys = pg_off; |
302 | map->size = size; | 320 | map->size = pg_sz; |
321 | kref_init(&map->ref); | ||
303 | 322 | ||
304 | spin_lock_irqsave(&acpi_ioremap_lock, flags); | 323 | spin_lock_irqsave(&acpi_ioremap_lock, flags); |
324 | /* Check if page has already been mapped. */ | ||
325 | tmp_map = acpi_map_lookup(phys, size); | ||
326 | if (tmp_map) { | ||
327 | kref_get(&tmp_map->ref); | ||
328 | spin_unlock_irqrestore(&acpi_ioremap_lock, flags); | ||
329 | iounmap(map->virt); | ||
330 | kfree(map); | ||
331 | return tmp_map->virt + (phys - tmp_map->phys); | ||
332 | } | ||
305 | list_add_tail_rcu(&map->list, &acpi_ioremaps); | 333 | list_add_tail_rcu(&map->list, &acpi_ioremaps); |
306 | spin_unlock_irqrestore(&acpi_ioremap_lock, flags); | 334 | spin_unlock_irqrestore(&acpi_ioremap_lock, flags); |
307 | 335 | ||
308 | return virt; | 336 | return map->virt + (phys - map->phys); |
309 | } | 337 | } |
310 | EXPORT_SYMBOL_GPL(acpi_os_map_memory); | 338 | EXPORT_SYMBOL_GPL(acpi_os_map_memory); |
311 | 339 | ||
340 | static void acpi_kref_del_iomap(struct kref *ref) | ||
341 | { | ||
342 | struct acpi_ioremap *map; | ||
343 | |||
344 | map = container_of(ref, struct acpi_ioremap, ref); | ||
345 | list_del_rcu(&map->list); | ||
346 | } | ||
347 | |||
312 | void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) | 348 | void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) |
313 | { | 349 | { |
314 | struct acpi_ioremap *map; | 350 | struct acpi_ioremap *map; |
315 | unsigned long flags; | 351 | unsigned long flags; |
352 | int del; | ||
316 | 353 | ||
317 | if (!acpi_gbl_permanent_mmap) { | 354 | if (!acpi_gbl_permanent_mmap) { |
318 | __acpi_unmap_table(virt, size); | 355 | __acpi_unmap_table(virt, size); |
@@ -328,9 +365,12 @@ void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) | |||
328 | return; | 365 | return; |
329 | } | 366 | } |
330 | 367 | ||
331 | list_del_rcu(&map->list); | 368 | del = kref_put(&map->ref, acpi_kref_del_iomap); |
332 | spin_unlock_irqrestore(&acpi_ioremap_lock, flags); | 369 | spin_unlock_irqrestore(&acpi_ioremap_lock, flags); |
333 | 370 | ||
371 | if (!del) | ||
372 | return; | ||
373 | |||
334 | synchronize_rcu(); | 374 | synchronize_rcu(); |
335 | iounmap(map->virt); | 375 | iounmap(map->virt); |
336 | kfree(map); | 376 | kfree(map); |