aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c246
1 files changed, 125 insertions, 121 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3e873f0101fb..05bc9af4fca9 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -41,6 +41,131 @@ static int hugetlb_next_nid;
41static DEFINE_SPINLOCK(hugetlb_lock); 41static DEFINE_SPINLOCK(hugetlb_lock);
42 42
43/* 43/*
44 * Region tracking -- allows tracking of reservations and instantiated pages
45 * across the pages in a mapping.
46 */
47struct file_region {
48 struct list_head link;
49 long from;
50 long to;
51};
52
53static long region_add(struct list_head *head, long f, long t)
54{
55 struct file_region *rg, *nrg, *trg;
56
57 /* Locate the region we are either in or before. */
58 list_for_each_entry(rg, head, link)
59 if (f <= rg->to)
60 break;
61
62 /* Round our left edge to the current segment if it encloses us. */
63 if (f > rg->from)
64 f = rg->from;
65
66 /* Check for and consume any regions we now overlap with. */
67 nrg = rg;
68 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
69 if (&rg->link == head)
70 break;
71 if (rg->from > t)
72 break;
73
74 /* If this area reaches higher then extend our area to
75 * include it completely. If this is not the first area
76 * which we intend to reuse, free it. */
77 if (rg->to > t)
78 t = rg->to;
79 if (rg != nrg) {
80 list_del(&rg->link);
81 kfree(rg);
82 }
83 }
84 nrg->from = f;
85 nrg->to = t;
86 return 0;
87}
88
89static long region_chg(struct list_head *head, long f, long t)
90{
91 struct file_region *rg, *nrg;
92 long chg = 0;
93
94 /* Locate the region we are before or in. */
95 list_for_each_entry(rg, head, link)
96 if (f <= rg->to)
97 break;
98
99 /* If we are below the current region then a new region is required.
100 * Subtle, allocate a new region at the position but make it zero
101 * size such that we can guarantee to record the reservation. */
102 if (&rg->link == head || t < rg->from) {
103 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
104 if (!nrg)
105 return -ENOMEM;
106 nrg->from = f;
107 nrg->to = f;
108 INIT_LIST_HEAD(&nrg->link);
109 list_add(&nrg->link, rg->link.prev);
110
111 return t - f;
112 }
113
114 /* Round our left edge to the current segment if it encloses us. */
115 if (f > rg->from)
116 f = rg->from;
117 chg = t - f;
118
119 /* Check for and consume any regions we now overlap with. */
120 list_for_each_entry(rg, rg->link.prev, link) {
121 if (&rg->link == head)
122 break;
123 if (rg->from > t)
124 return chg;
125
126 /* We overlap with this area, if it extends futher than
127 * us then we must extend ourselves. Account for its
128 * existing reservation. */
129 if (rg->to > t) {
130 chg += rg->to - t;
131 t = rg->to;
132 }
133 chg -= rg->to - rg->from;
134 }
135 return chg;
136}
137
138static long region_truncate(struct list_head *head, long end)
139{
140 struct file_region *rg, *trg;
141 long chg = 0;
142
143 /* Locate the region we are either in or before. */
144 list_for_each_entry(rg, head, link)
145 if (end <= rg->to)
146 break;
147 if (&rg->link == head)
148 return 0;
149
150 /* If we are in the middle of a region then adjust it. */
151 if (end > rg->from) {
152 chg = rg->to - end;
153 rg->to = end;
154 rg = list_entry(rg->link.next, typeof(*rg), link);
155 }
156
157 /* Drop any remaining regions. */
158 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
159 if (&rg->link == head)
160 break;
161 chg += rg->to - rg->from;
162 list_del(&rg->link);
163 kfree(rg);
164 }
165 return chg;
166}
167
168/*
44 * Convert the address within this vma to the page offset within 169 * Convert the address within this vma to the page offset within
45 * the mapping, in base page units. 170 * the mapping, in base page units.
46 */ 171 */
@@ -1429,127 +1554,6 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
1429 flush_tlb_range(vma, start, end); 1554 flush_tlb_range(vma, start, end);
1430} 1555}
1431 1556
1432struct file_region {
1433 struct list_head link;
1434 long from;
1435 long to;
1436};
1437
1438static long region_add(struct list_head *head, long f, long t)
1439{
1440 struct file_region *rg, *nrg, *trg;
1441
1442 /* Locate the region we are either in or before. */
1443 list_for_each_entry(rg, head, link)
1444 if (f <= rg->to)
1445 break;
1446
1447 /* Round our left edge to the current segment if it encloses us. */
1448 if (f > rg->from)
1449 f = rg->from;
1450
1451 /* Check for and consume any regions we now overlap with. */
1452 nrg = rg;
1453 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
1454 if (&rg->link == head)
1455 break;
1456 if (rg->from > t)
1457 break;
1458
1459 /* If this area reaches higher then extend our area to
1460 * include it completely. If this is not the first area
1461 * which we intend to reuse, free it. */
1462 if (rg->to > t)
1463 t = rg->to;
1464 if (rg != nrg) {
1465 list_del(&rg->link);
1466 kfree(rg);
1467 }
1468 }
1469 nrg->from = f;
1470 nrg->to = t;
1471 return 0;
1472}
1473
1474static long region_chg(struct list_head *head, long f, long t)
1475{
1476 struct file_region *rg, *nrg;
1477 long chg = 0;
1478
1479 /* Locate the region we are before or in. */
1480 list_for_each_entry(rg, head, link)
1481 if (f <= rg->to)
1482 break;
1483
1484 /* If we are below the current region then a new region is required.
1485 * Subtle, allocate a new region at the position but make it zero
1486 * size such that we can guarantee to record the reservation. */
1487 if (&rg->link == head || t < rg->from) {
1488 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
1489 if (!nrg)
1490 return -ENOMEM;
1491 nrg->from = f;
1492 nrg->to = f;
1493 INIT_LIST_HEAD(&nrg->link);
1494 list_add(&nrg->link, rg->link.prev);
1495
1496 return t - f;
1497 }
1498
1499 /* Round our left edge to the current segment if it encloses us. */
1500 if (f > rg->from)
1501 f = rg->from;
1502 chg = t - f;
1503
1504 /* Check for and consume any regions we now overlap with. */
1505 list_for_each_entry(rg, rg->link.prev, link) {
1506 if (&rg->link == head)
1507 break;
1508 if (rg->from > t)
1509 return chg;
1510
1511 /* We overlap with this area, if it extends futher than
1512 * us then we must extend ourselves. Account for its
1513 * existing reservation. */
1514 if (rg->to > t) {
1515 chg += rg->to - t;
1516 t = rg->to;
1517 }
1518 chg -= rg->to - rg->from;
1519 }
1520 return chg;
1521}
1522
1523static long region_truncate(struct list_head *head, long end)
1524{
1525 struct file_region *rg, *trg;
1526 long chg = 0;
1527
1528 /* Locate the region we are either in or before. */
1529 list_for_each_entry(rg, head, link)
1530 if (end <= rg->to)
1531 break;
1532 if (&rg->link == head)
1533 return 0;
1534
1535 /* If we are in the middle of a region then adjust it. */
1536 if (end > rg->from) {
1537 chg = rg->to - end;
1538 rg->to = end;
1539 rg = list_entry(rg->link.next, typeof(*rg), link);
1540 }
1541
1542 /* Drop any remaining regions. */
1543 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
1544 if (&rg->link == head)
1545 break;
1546 chg += rg->to - rg->from;
1547 list_del(&rg->link);
1548 kfree(rg);
1549 }
1550 return chg;
1551}
1552
1553int hugetlb_reserve_pages(struct inode *inode, 1557int hugetlb_reserve_pages(struct inode *inode,
1554 long from, long to, 1558 long from, long to,
1555 struct vm_area_struct *vma) 1559 struct vm_area_struct *vma)