diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-28 14:59:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-28 14:59:52 -0400 |
commit | e9f29c9a56ca06d0effa557823a737cbe7ec09f7 (patch) | |
tree | c331c4aa741a8f384ee13d0b08bd340c23164b16 /kernel | |
parent | 800416f799e0723635ac2d720ad4449917a1481c (diff) | |
parent | 1af3c2e45e7a641e774bbb84fa428f2f0bf2d9c9 (diff) |
Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6
* 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/jbarnes/pci-2.6: (27 commits)
x86: allocate space within a region top-down
x86: update iomem_resource end based on CPU physical address capabilities
x86/PCI: allocate space from the end of a region, not the beginning
PCI: allocate bus resources from the top down
resources: support allocating space within a region from the top down
resources: handle overflow when aligning start of available area
resources: ensure callback doesn't allocate outside available space
resources: factor out resource_clip() to simplify find_resource()
resources: add a default alignf to simplify find_resource()
x86/PCI: MMCONFIG: fix region end calculation
PCI: Add support for polling PME state on suspended legacy PCI devices
PCI: Export some PCI PM functionality
PCI: fix message typo
PCI: log vendor/device ID always
PCI: update Intel chipset names and defines
PCI: use new ccflags variable in Makefile
PCI: add PCI_MSIX_TABLE/PBA defines
PCI: add PCI vendor id for STmicroelectronics
x86/PCI: irq and pci_ids patch for Intel Patsburg DeviceIDs
PCI: OLPC: Only enable PCI configuration type override on XO-1
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/resource.c | 151 |
1 files changed, 135 insertions, 16 deletions
diff --git a/kernel/resource.c b/kernel/resource.c index 9c9841cb6902..9fad33efd0db 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -40,6 +40,23 @@ EXPORT_SYMBOL(iomem_resource); | |||
40 | 40 | ||
41 | static DEFINE_RWLOCK(resource_lock); | 41 | static DEFINE_RWLOCK(resource_lock); |
42 | 42 | ||
43 | /* | ||
44 | * By default, we allocate free space bottom-up. The architecture can request | ||
45 | * top-down by clearing this flag. The user can override the architecture's | ||
46 | * choice with the "resource_alloc_from_bottom" kernel boot option, but that | ||
47 | * should only be a debugging tool. | ||
48 | */ | ||
49 | int resource_alloc_from_bottom = 1; | ||
50 | |||
51 | static __init int setup_alloc_from_bottom(char *s) | ||
52 | { | ||
53 | printk(KERN_INFO | ||
54 | "resource: allocating from bottom-up; please report a bug\n"); | ||
55 | resource_alloc_from_bottom = 1; | ||
56 | return 0; | ||
57 | } | ||
58 | early_param("resource_alloc_from_bottom", setup_alloc_from_bottom); | ||
59 | |||
43 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) | 60 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) |
44 | { | 61 | { |
45 | struct resource *p = v; | 62 | struct resource *p = v; |
@@ -357,8 +374,97 @@ int __weak page_is_ram(unsigned long pfn) | |||
357 | return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; | 374 | return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1; |
358 | } | 375 | } |
359 | 376 | ||
377 | static resource_size_t simple_align_resource(void *data, | ||
378 | const struct resource *avail, | ||
379 | resource_size_t size, | ||
380 | resource_size_t align) | ||
381 | { | ||
382 | return avail->start; | ||
383 | } | ||
384 | |||
385 | static void resource_clip(struct resource *res, resource_size_t min, | ||
386 | resource_size_t max) | ||
387 | { | ||
388 | if (res->start < min) | ||
389 | res->start = min; | ||
390 | if (res->end > max) | ||
391 | res->end = max; | ||
392 | } | ||
393 | |||
394 | static bool resource_contains(struct resource *res1, struct resource *res2) | ||
395 | { | ||
396 | return res1->start <= res2->start && res1->end >= res2->end; | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * Find the resource before "child" in the sibling list of "root" children. | ||
401 | */ | ||
402 | static struct resource *find_sibling_prev(struct resource *root, struct resource *child) | ||
403 | { | ||
404 | struct resource *this; | ||
405 | |||
406 | for (this = root->child; this; this = this->sibling) | ||
407 | if (this->sibling == child) | ||
408 | return this; | ||
409 | |||
410 | return NULL; | ||
411 | } | ||
412 | |||
360 | /* | 413 | /* |
361 | * Find empty slot in the resource tree given range and alignment. | 414 | * Find empty slot in the resource tree given range and alignment. |
415 | * This version allocates from the end of the root resource first. | ||
416 | */ | ||
417 | static int find_resource_from_top(struct resource *root, struct resource *new, | ||
418 | resource_size_t size, resource_size_t min, | ||
419 | resource_size_t max, resource_size_t align, | ||
420 | resource_size_t (*alignf)(void *, | ||
421 | const struct resource *, | ||
422 | resource_size_t, | ||
423 | resource_size_t), | ||
424 | void *alignf_data) | ||
425 | { | ||
426 | struct resource *this; | ||
427 | struct resource tmp, avail, alloc; | ||
428 | |||
429 | tmp.start = root->end; | ||
430 | tmp.end = root->end; | ||
431 | |||
432 | this = find_sibling_prev(root, NULL); | ||
433 | for (;;) { | ||
434 | if (this) { | ||
435 | if (this->end < root->end) | ||
436 | tmp.start = this->end + 1; | ||
437 | } else | ||
438 | tmp.start = root->start; | ||
439 | |||
440 | resource_clip(&tmp, min, max); | ||
441 | |||
442 | /* Check for overflow after ALIGN() */ | ||
443 | avail = *new; | ||
444 | avail.start = ALIGN(tmp.start, align); | ||
445 | avail.end = tmp.end; | ||
446 | if (avail.start >= tmp.start) { | ||
447 | alloc.start = alignf(alignf_data, &avail, size, align); | ||
448 | alloc.end = alloc.start + size - 1; | ||
449 | if (resource_contains(&avail, &alloc)) { | ||
450 | new->start = alloc.start; | ||
451 | new->end = alloc.end; | ||
452 | return 0; | ||
453 | } | ||
454 | } | ||
455 | |||
456 | if (!this || this->start == root->start) | ||
457 | break; | ||
458 | |||
459 | tmp.end = this->start - 1; | ||
460 | this = find_sibling_prev(root, this); | ||
461 | } | ||
462 | return -EBUSY; | ||
463 | } | ||
464 | |||
465 | /* | ||
466 | * Find empty slot in the resource tree given range and alignment. | ||
467 | * This version allocates from the beginning of the root resource first. | ||
362 | */ | 468 | */ |
363 | static int find_resource(struct resource *root, struct resource *new, | 469 | static int find_resource(struct resource *root, struct resource *new, |
364 | resource_size_t size, resource_size_t min, | 470 | resource_size_t size, resource_size_t min, |
@@ -370,36 +476,43 @@ static int find_resource(struct resource *root, struct resource *new, | |||
370 | void *alignf_data) | 476 | void *alignf_data) |
371 | { | 477 | { |
372 | struct resource *this = root->child; | 478 | struct resource *this = root->child; |
373 | struct resource tmp = *new; | 479 | struct resource tmp = *new, avail, alloc; |
374 | 480 | ||
375 | tmp.start = root->start; | 481 | tmp.start = root->start; |
376 | /* | 482 | /* |
377 | * Skip past an allocated resource that starts at 0, since the assignment | 483 | * Skip past an allocated resource that starts at 0, since the |
378 | * of this->start - 1 to tmp->end below would cause an underflow. | 484 | * assignment of this->start - 1 to tmp->end below would cause an |
485 | * underflow. | ||
379 | */ | 486 | */ |
380 | if (this && this->start == 0) { | 487 | if (this && this->start == 0) { |
381 | tmp.start = this->end + 1; | 488 | tmp.start = this->end + 1; |
382 | this = this->sibling; | 489 | this = this->sibling; |
383 | } | 490 | } |
384 | for(;;) { | 491 | for (;;) { |
385 | if (this) | 492 | if (this) |
386 | tmp.end = this->start - 1; | 493 | tmp.end = this->start - 1; |
387 | else | 494 | else |
388 | tmp.end = root->end; | 495 | tmp.end = root->end; |
389 | if (tmp.start < min) | 496 | |
390 | tmp.start = min; | 497 | resource_clip(&tmp, min, max); |
391 | if (tmp.end > max) | 498 | |
392 | tmp.end = max; | 499 | /* Check for overflow after ALIGN() */ |
393 | tmp.start = ALIGN(tmp.start, align); | 500 | avail = *new; |
394 | if (alignf) | 501 | avail.start = ALIGN(tmp.start, align); |
395 | tmp.start = alignf(alignf_data, &tmp, size, align); | 502 | avail.end = tmp.end; |
396 | if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) { | 503 | if (avail.start >= tmp.start) { |
397 | new->start = tmp.start; | 504 | alloc.start = alignf(alignf_data, &avail, size, align); |
398 | new->end = tmp.start + size - 1; | 505 | alloc.end = alloc.start + size - 1; |
399 | return 0; | 506 | if (resource_contains(&avail, &alloc)) { |
507 | new->start = alloc.start; | ||
508 | new->end = alloc.end; | ||
509 | return 0; | ||
510 | } | ||
400 | } | 511 | } |
512 | |||
401 | if (!this) | 513 | if (!this) |
402 | break; | 514 | break; |
515 | |||
403 | tmp.start = this->end + 1; | 516 | tmp.start = this->end + 1; |
404 | this = this->sibling; | 517 | this = this->sibling; |
405 | } | 518 | } |
@@ -428,8 +541,14 @@ int allocate_resource(struct resource *root, struct resource *new, | |||
428 | { | 541 | { |
429 | int err; | 542 | int err; |
430 | 543 | ||
544 | if (!alignf) | ||
545 | alignf = simple_align_resource; | ||
546 | |||
431 | write_lock(&resource_lock); | 547 | write_lock(&resource_lock); |
432 | err = find_resource(root, new, size, min, max, align, alignf, alignf_data); | 548 | if (resource_alloc_from_bottom) |
549 | err = find_resource(root, new, size, min, max, align, alignf, alignf_data); | ||
550 | else | ||
551 | err = find_resource_from_top(root, new, size, min, max, align, alignf, alignf_data); | ||
433 | if (err >= 0 && __request_resource(root, new)) | 552 | if (err >= 0 && __request_resource(root, new)) |
434 | err = -EBUSY; | 553 | err = -EBUSY; |
435 | write_unlock(&resource_lock); | 554 | write_unlock(&resource_lock); |