diff options
Diffstat (limited to 'mm/swap.c')
| -rw-r--r-- | mm/swap.c | 76 |
1 files changed, 47 insertions, 29 deletions
| @@ -34,6 +34,25 @@ | |||
| 34 | /* How many pages do we try to swap or page in/out together? */ | 34 | /* How many pages do we try to swap or page in/out together? */ |
| 35 | int page_cluster; | 35 | int page_cluster; |
| 36 | 36 | ||
| 37 | /* | ||
| 38 | * This path almost never happens for VM activity - pages are normally | ||
| 39 | * freed via pagevecs. But it gets used by networking. | ||
| 40 | */ | ||
| 41 | static void fastcall __page_cache_release(struct page *page) | ||
| 42 | { | ||
| 43 | if (PageLRU(page)) { | ||
| 44 | unsigned long flags; | ||
| 45 | struct zone *zone = page_zone(page); | ||
| 46 | |||
| 47 | spin_lock_irqsave(&zone->lru_lock, flags); | ||
| 48 | VM_BUG_ON(!PageLRU(page)); | ||
| 49 | __ClearPageLRU(page); | ||
| 50 | del_page_from_lru(zone, page); | ||
| 51 | spin_unlock_irqrestore(&zone->lru_lock, flags); | ||
| 52 | } | ||
| 53 | free_hot_page(page); | ||
| 54 | } | ||
| 55 | |||
| 37 | static void put_compound_page(struct page *page) | 56 | static void put_compound_page(struct page *page) |
| 38 | { | 57 | { |
| 39 | page = (struct page *)page_private(page); | 58 | page = (struct page *)page_private(page); |
| @@ -54,6 +73,26 @@ void put_page(struct page *page) | |||
| 54 | } | 73 | } |
| 55 | EXPORT_SYMBOL(put_page); | 74 | EXPORT_SYMBOL(put_page); |
| 56 | 75 | ||
| 76 | /** | ||
| 77 | * put_pages_list(): release a list of pages | ||
| 78 | * | ||
| 79 | * Release a list of pages which are strung together on page.lru. Currently | ||
| 80 | * used by read_cache_pages() and related error recovery code. | ||
| 81 | * | ||
| 82 | * @pages: list of pages threaded on page->lru | ||
| 83 | */ | ||
| 84 | void put_pages_list(struct list_head *pages) | ||
| 85 | { | ||
| 86 | while (!list_empty(pages)) { | ||
| 87 | struct page *victim; | ||
| 88 | |||
| 89 | victim = list_entry(pages->prev, struct page, lru); | ||
| 90 | list_del(&victim->lru); | ||
| 91 | page_cache_release(victim); | ||
| 92 | } | ||
| 93 | } | ||
| 94 | EXPORT_SYMBOL(put_pages_list); | ||
| 95 | |||
| 57 | /* | 96 | /* |
| 58 | * Writeback is about to end against a page which has been marked for immediate | 97 | * Writeback is about to end against a page which has been marked for immediate |
| 59 | * reclaim. If it still appears to be reclaimable, move it to the tail of the | 98 | * reclaim. If it still appears to be reclaimable, move it to the tail of the |
| @@ -86,9 +125,8 @@ int rotate_reclaimable_page(struct page *page) | |||
| 86 | zone = page_zone(page); | 125 | zone = page_zone(page); |
| 87 | spin_lock_irqsave(&zone->lru_lock, flags); | 126 | spin_lock_irqsave(&zone->lru_lock, flags); |
| 88 | if (PageLRU(page) && !PageActive(page)) { | 127 | if (PageLRU(page) && !PageActive(page)) { |
| 89 | list_del(&page->lru); | 128 | list_move_tail(&page->lru, &zone->inactive_list); |
| 90 | list_add_tail(&page->lru, &zone->inactive_list); | 129 | __count_vm_event(PGROTATED); |
| 91 | inc_page_state(pgrotated); | ||
| 92 | } | 130 | } |
| 93 | if (!test_clear_page_writeback(page)) | 131 | if (!test_clear_page_writeback(page)) |
| 94 | BUG(); | 132 | BUG(); |
| @@ -108,7 +146,7 @@ void fastcall activate_page(struct page *page) | |||
| 108 | del_page_from_inactive_list(zone, page); | 146 | del_page_from_inactive_list(zone, page); |
| 109 | SetPageActive(page); | 147 | SetPageActive(page); |
| 110 | add_page_to_active_list(zone, page); | 148 | add_page_to_active_list(zone, page); |
| 111 | inc_page_state(pgactivate); | 149 | __count_vm_event(PGACTIVATE); |
| 112 | } | 150 | } |
| 113 | spin_unlock_irq(&zone->lru_lock); | 151 | spin_unlock_irq(&zone->lru_lock); |
| 114 | } | 152 | } |
| @@ -204,26 +242,6 @@ int lru_add_drain_all(void) | |||
| 204 | #endif | 242 | #endif |
| 205 | 243 | ||
| 206 | /* | 244 | /* |
| 207 | * This path almost never happens for VM activity - pages are normally | ||
| 208 | * freed via pagevecs. But it gets used by networking. | ||
| 209 | */ | ||
| 210 | void fastcall __page_cache_release(struct page *page) | ||
| 211 | { | ||
| 212 | if (PageLRU(page)) { | ||
| 213 | unsigned long flags; | ||
| 214 | struct zone *zone = page_zone(page); | ||
| 215 | |||
| 216 | spin_lock_irqsave(&zone->lru_lock, flags); | ||
| 217 | BUG_ON(!PageLRU(page)); | ||
| 218 | __ClearPageLRU(page); | ||
| 219 | del_page_from_lru(zone, page); | ||
| 220 | spin_unlock_irqrestore(&zone->lru_lock, flags); | ||
| 221 | } | ||
| 222 | free_hot_page(page); | ||
| 223 | } | ||
| 224 | EXPORT_SYMBOL(__page_cache_release); | ||
| 225 | |||
| 226 | /* | ||
| 227 | * Batched page_cache_release(). Decrement the reference count on all the | 245 | * Batched page_cache_release(). Decrement the reference count on all the |
| 228 | * passed pages. If it fell to zero then remove the page from the LRU and | 246 | * passed pages. If it fell to zero then remove the page from the LRU and |
| 229 | * free it. | 247 | * free it. |
| @@ -265,7 +283,7 @@ void release_pages(struct page **pages, int nr, int cold) | |||
| 265 | zone = pagezone; | 283 | zone = pagezone; |
| 266 | spin_lock_irq(&zone->lru_lock); | 284 | spin_lock_irq(&zone->lru_lock); |
| 267 | } | 285 | } |
| 268 | BUG_ON(!PageLRU(page)); | 286 | VM_BUG_ON(!PageLRU(page)); |
| 269 | __ClearPageLRU(page); | 287 | __ClearPageLRU(page); |
| 270 | del_page_from_lru(zone, page); | 288 | del_page_from_lru(zone, page); |
| 271 | } | 289 | } |
| @@ -318,7 +336,7 @@ void __pagevec_release_nonlru(struct pagevec *pvec) | |||
| 318 | for (i = 0; i < pagevec_count(pvec); i++) { | 336 | for (i = 0; i < pagevec_count(pvec); i++) { |
| 319 | struct page *page = pvec->pages[i]; | 337 | struct page *page = pvec->pages[i]; |
| 320 | 338 | ||
| 321 | BUG_ON(PageLRU(page)); | 339 | VM_BUG_ON(PageLRU(page)); |
| 322 | if (put_page_testzero(page)) | 340 | if (put_page_testzero(page)) |
| 323 | pagevec_add(&pages_to_free, page); | 341 | pagevec_add(&pages_to_free, page); |
| 324 | } | 342 | } |
| @@ -345,7 +363,7 @@ void __pagevec_lru_add(struct pagevec *pvec) | |||
| 345 | zone = pagezone; | 363 | zone = pagezone; |
| 346 | spin_lock_irq(&zone->lru_lock); | 364 | spin_lock_irq(&zone->lru_lock); |
| 347 | } | 365 | } |
| 348 | BUG_ON(PageLRU(page)); | 366 | VM_BUG_ON(PageLRU(page)); |
| 349 | SetPageLRU(page); | 367 | SetPageLRU(page); |
| 350 | add_page_to_inactive_list(zone, page); | 368 | add_page_to_inactive_list(zone, page); |
| 351 | } | 369 | } |
| @@ -372,9 +390,9 @@ void __pagevec_lru_add_active(struct pagevec *pvec) | |||
| 372 | zone = pagezone; | 390 | zone = pagezone; |
| 373 | spin_lock_irq(&zone->lru_lock); | 391 | spin_lock_irq(&zone->lru_lock); |
| 374 | } | 392 | } |
| 375 | BUG_ON(PageLRU(page)); | 393 | VM_BUG_ON(PageLRU(page)); |
| 376 | SetPageLRU(page); | 394 | SetPageLRU(page); |
| 377 | BUG_ON(PageActive(page)); | 395 | VM_BUG_ON(PageActive(page)); |
| 378 | SetPageActive(page); | 396 | SetPageActive(page); |
| 379 | add_page_to_active_list(zone, page); | 397 | add_page_to_active_list(zone, page); |
| 380 | } | 398 | } |
