diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-08 15:05:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-08 15:05:50 -0400 |
commit | b924f9599dfd4a604761e84b1e920e480fb57f66 (patch) | |
tree | a1456ef8aea8beb8415d8258a978e072467d8ff6 /mm | |
parent | b9d40b7b1e349bdc5c174b4ef1a333e62f7d749c (diff) | |
parent | 2dca6999eed58d44b67e9de7d6ec230f6250553d (diff) |
Merge branch 'sparc-perf-events-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sparc-perf-events-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
mm, perf_event: Make vmalloc_user() align base kernel virtual address to SHMLBA
perf_event: Provide vmalloc() based mmap() backing
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmalloc.c | 48 |
1 files changed, 26 insertions, 22 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 2f7c9d75c55..5e7aed0802b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/atomic.h> | 28 | #include <asm/atomic.h> |
29 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
30 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
31 | #include <asm/shmparam.h> | ||
31 | 32 | ||
32 | 33 | ||
33 | /*** Page table manipulation functions ***/ | 34 | /*** Page table manipulation functions ***/ |
@@ -1155,12 +1156,11 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, | |||
1155 | } | 1156 | } |
1156 | 1157 | ||
1157 | static struct vm_struct *__get_vm_area_node(unsigned long size, | 1158 | static struct vm_struct *__get_vm_area_node(unsigned long size, |
1158 | unsigned long flags, unsigned long start, unsigned long end, | 1159 | unsigned long align, unsigned long flags, unsigned long start, |
1159 | int node, gfp_t gfp_mask, void *caller) | 1160 | unsigned long end, int node, gfp_t gfp_mask, void *caller) |
1160 | { | 1161 | { |
1161 | static struct vmap_area *va; | 1162 | static struct vmap_area *va; |
1162 | struct vm_struct *area; | 1163 | struct vm_struct *area; |
1163 | unsigned long align = 1; | ||
1164 | 1164 | ||
1165 | BUG_ON(in_interrupt()); | 1165 | BUG_ON(in_interrupt()); |
1166 | if (flags & VM_IOREMAP) { | 1166 | if (flags & VM_IOREMAP) { |
@@ -1200,7 +1200,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, | |||
1200 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, | 1200 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
1201 | unsigned long start, unsigned long end) | 1201 | unsigned long start, unsigned long end) |
1202 | { | 1202 | { |
1203 | return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, | 1203 | return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, |
1204 | __builtin_return_address(0)); | 1204 | __builtin_return_address(0)); |
1205 | } | 1205 | } |
1206 | EXPORT_SYMBOL_GPL(__get_vm_area); | 1206 | EXPORT_SYMBOL_GPL(__get_vm_area); |
@@ -1209,7 +1209,7 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, | |||
1209 | unsigned long start, unsigned long end, | 1209 | unsigned long start, unsigned long end, |
1210 | void *caller) | 1210 | void *caller) |
1211 | { | 1211 | { |
1212 | return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL, | 1212 | return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL, |
1213 | caller); | 1213 | caller); |
1214 | } | 1214 | } |
1215 | 1215 | ||
@@ -1224,22 +1224,22 @@ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags, | |||
1224 | */ | 1224 | */ |
1225 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) | 1225 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) |
1226 | { | 1226 | { |
1227 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, | 1227 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, |
1228 | -1, GFP_KERNEL, __builtin_return_address(0)); | 1228 | -1, GFP_KERNEL, __builtin_return_address(0)); |
1229 | } | 1229 | } |
1230 | 1230 | ||
1231 | struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, | 1231 | struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags, |
1232 | void *caller) | 1232 | void *caller) |
1233 | { | 1233 | { |
1234 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, | 1234 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, |
1235 | -1, GFP_KERNEL, caller); | 1235 | -1, GFP_KERNEL, caller); |
1236 | } | 1236 | } |
1237 | 1237 | ||
1238 | struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, | 1238 | struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, |
1239 | int node, gfp_t gfp_mask) | 1239 | int node, gfp_t gfp_mask) |
1240 | { | 1240 | { |
1241 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node, | 1241 | return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END, |
1242 | gfp_mask, __builtin_return_address(0)); | 1242 | node, gfp_mask, __builtin_return_address(0)); |
1243 | } | 1243 | } |
1244 | 1244 | ||
1245 | static struct vm_struct *find_vm_area(const void *addr) | 1245 | static struct vm_struct *find_vm_area(const void *addr) |
@@ -1402,7 +1402,8 @@ void *vmap(struct page **pages, unsigned int count, | |||
1402 | } | 1402 | } |
1403 | EXPORT_SYMBOL(vmap); | 1403 | EXPORT_SYMBOL(vmap); |
1404 | 1404 | ||
1405 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | 1405 | static void *__vmalloc_node(unsigned long size, unsigned long align, |
1406 | gfp_t gfp_mask, pgprot_t prot, | ||
1406 | int node, void *caller); | 1407 | int node, void *caller); |
1407 | static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | 1408 | static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, |
1408 | pgprot_t prot, int node, void *caller) | 1409 | pgprot_t prot, int node, void *caller) |
@@ -1416,7 +1417,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
1416 | area->nr_pages = nr_pages; | 1417 | area->nr_pages = nr_pages; |
1417 | /* Please note that the recursion is strictly bounded. */ | 1418 | /* Please note that the recursion is strictly bounded. */ |
1418 | if (array_size > PAGE_SIZE) { | 1419 | if (array_size > PAGE_SIZE) { |
1419 | pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO, | 1420 | pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO, |
1420 | PAGE_KERNEL, node, caller); | 1421 | PAGE_KERNEL, node, caller); |
1421 | area->flags |= VM_VPAGES; | 1422 | area->flags |= VM_VPAGES; |
1422 | } else { | 1423 | } else { |
@@ -1475,6 +1476,7 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | |||
1475 | /** | 1476 | /** |
1476 | * __vmalloc_node - allocate virtually contiguous memory | 1477 | * __vmalloc_node - allocate virtually contiguous memory |
1477 | * @size: allocation size | 1478 | * @size: allocation size |
1479 | * @align: desired alignment | ||
1478 | * @gfp_mask: flags for the page level allocator | 1480 | * @gfp_mask: flags for the page level allocator |
1479 | * @prot: protection mask for the allocated pages | 1481 | * @prot: protection mask for the allocated pages |
1480 | * @node: node to use for allocation or -1 | 1482 | * @node: node to use for allocation or -1 |
@@ -1484,8 +1486,9 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) | |||
1484 | * allocator with @gfp_mask flags. Map them into contiguous | 1486 | * allocator with @gfp_mask flags. Map them into contiguous |
1485 | * kernel virtual space, using a pagetable protection of @prot. | 1487 | * kernel virtual space, using a pagetable protection of @prot. |
1486 | */ | 1488 | */ |
1487 | static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | 1489 | static void *__vmalloc_node(unsigned long size, unsigned long align, |
1488 | int node, void *caller) | 1490 | gfp_t gfp_mask, pgprot_t prot, |
1491 | int node, void *caller) | ||
1489 | { | 1492 | { |
1490 | struct vm_struct *area; | 1493 | struct vm_struct *area; |
1491 | void *addr; | 1494 | void *addr; |
@@ -1495,8 +1498,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | |||
1495 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) | 1498 | if (!size || (size >> PAGE_SHIFT) > totalram_pages) |
1496 | return NULL; | 1499 | return NULL; |
1497 | 1500 | ||
1498 | area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END, | 1501 | area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, |
1499 | node, gfp_mask, caller); | 1502 | VMALLOC_END, node, gfp_mask, caller); |
1500 | 1503 | ||
1501 | if (!area) | 1504 | if (!area) |
1502 | return NULL; | 1505 | return NULL; |
@@ -1515,7 +1518,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, | |||
1515 | 1518 | ||
1516 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) | 1519 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
1517 | { | 1520 | { |
1518 | return __vmalloc_node(size, gfp_mask, prot, -1, | 1521 | return __vmalloc_node(size, 1, gfp_mask, prot, -1, |
1519 | __builtin_return_address(0)); | 1522 | __builtin_return_address(0)); |
1520 | } | 1523 | } |
1521 | EXPORT_SYMBOL(__vmalloc); | 1524 | EXPORT_SYMBOL(__vmalloc); |
@@ -1531,7 +1534,7 @@ EXPORT_SYMBOL(__vmalloc); | |||
1531 | */ | 1534 | */ |
1532 | void *vmalloc(unsigned long size) | 1535 | void *vmalloc(unsigned long size) |
1533 | { | 1536 | { |
1534 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, | 1537 | return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, |
1535 | -1, __builtin_return_address(0)); | 1538 | -1, __builtin_return_address(0)); |
1536 | } | 1539 | } |
1537 | EXPORT_SYMBOL(vmalloc); | 1540 | EXPORT_SYMBOL(vmalloc); |
@@ -1548,7 +1551,8 @@ void *vmalloc_user(unsigned long size) | |||
1548 | struct vm_struct *area; | 1551 | struct vm_struct *area; |
1549 | void *ret; | 1552 | void *ret; |
1550 | 1553 | ||
1551 | ret = __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | 1554 | ret = __vmalloc_node(size, SHMLBA, |
1555 | GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, | ||
1552 | PAGE_KERNEL, -1, __builtin_return_address(0)); | 1556 | PAGE_KERNEL, -1, __builtin_return_address(0)); |
1553 | if (ret) { | 1557 | if (ret) { |
1554 | area = find_vm_area(ret); | 1558 | area = find_vm_area(ret); |
@@ -1571,7 +1575,7 @@ EXPORT_SYMBOL(vmalloc_user); | |||
1571 | */ | 1575 | */ |
1572 | void *vmalloc_node(unsigned long size, int node) | 1576 | void *vmalloc_node(unsigned long size, int node) |
1573 | { | 1577 | { |
1574 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, | 1578 | return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, |
1575 | node, __builtin_return_address(0)); | 1579 | node, __builtin_return_address(0)); |
1576 | } | 1580 | } |
1577 | EXPORT_SYMBOL(vmalloc_node); | 1581 | EXPORT_SYMBOL(vmalloc_node); |
@@ -1594,7 +1598,7 @@ EXPORT_SYMBOL(vmalloc_node); | |||
1594 | 1598 | ||
1595 | void *vmalloc_exec(unsigned long size) | 1599 | void *vmalloc_exec(unsigned long size) |
1596 | { | 1600 | { |
1597 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, | 1601 | return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, |
1598 | -1, __builtin_return_address(0)); | 1602 | -1, __builtin_return_address(0)); |
1599 | } | 1603 | } |
1600 | 1604 | ||
@@ -1615,7 +1619,7 @@ void *vmalloc_exec(unsigned long size) | |||
1615 | */ | 1619 | */ |
1616 | void *vmalloc_32(unsigned long size) | 1620 | void *vmalloc_32(unsigned long size) |
1617 | { | 1621 | { |
1618 | return __vmalloc_node(size, GFP_VMALLOC32, PAGE_KERNEL, | 1622 | return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, |
1619 | -1, __builtin_return_address(0)); | 1623 | -1, __builtin_return_address(0)); |
1620 | } | 1624 | } |
1621 | EXPORT_SYMBOL(vmalloc_32); | 1625 | EXPORT_SYMBOL(vmalloc_32); |
@@ -1632,7 +1636,7 @@ void *vmalloc_32_user(unsigned long size) | |||
1632 | struct vm_struct *area; | 1636 | struct vm_struct *area; |
1633 | void *ret; | 1637 | void *ret; |
1634 | 1638 | ||
1635 | ret = __vmalloc_node(size, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, | 1639 | ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, |
1636 | -1, __builtin_return_address(0)); | 1640 | -1, __builtin_return_address(0)); |
1637 | if (ret) { | 1641 | if (ret) { |
1638 | area = find_vm_area(ret); | 1642 | area = find_vm_area(ret); |