aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/power
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/power')
-rw-r--r--kernel/power/swap.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 12cd989dadf6..160e1006640d 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -37,6 +37,14 @@
37#define HIBERNATE_SIG "S1SUSPEND" 37#define HIBERNATE_SIG "S1SUSPEND"
38 38
39/* 39/*
40 * When reading an {un,}compressed image, we may restore pages in place,
41 * in which case some architectures need these pages cleaning before they
42 * can be executed. We don't know which pages these may be, so clean the lot.
43 */
44static bool clean_pages_on_read;
45static bool clean_pages_on_decompress;
46
47/*
40 * The swap map is a data structure used for keeping track of each page 48 * The swap map is a data structure used for keeping track of each page
41 * written to a swap partition. It consists of many swap_map_page 49 * written to a swap partition. It consists of many swap_map_page
42 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. 50 * structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
@@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio)
241 249
242 if (bio_data_dir(bio) == WRITE) 250 if (bio_data_dir(bio) == WRITE)
243 put_page(page); 251 put_page(page);
252 else if (clean_pages_on_read)
253 flush_icache_range((unsigned long)page_address(page),
254 (unsigned long)page_address(page) + PAGE_SIZE);
244 255
245 if (bio->bi_error && !hb->error) 256 if (bio->bi_error && !hb->error)
246 hb->error = bio->bi_error; 257 hb->error = bio->bi_error;
@@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle,
1049 1060
1050 hib_init_batch(&hb); 1061 hib_init_batch(&hb);
1051 1062
1063 clean_pages_on_read = true;
1052 printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n", 1064 printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
1053 nr_to_read); 1065 nr_to_read);
1054 m = nr_to_read / 10; 1066 m = nr_to_read / 10;
@@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data)
1124 d->unc_len = LZO_UNC_SIZE; 1136 d->unc_len = LZO_UNC_SIZE;
1125 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, 1137 d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
1126 d->unc, &d->unc_len); 1138 d->unc, &d->unc_len);
1139 if (clean_pages_on_decompress)
1140 flush_icache_range((unsigned long)d->unc,
1141 (unsigned long)d->unc + d->unc_len);
1142
1127 atomic_set(&d->stop, 1); 1143 atomic_set(&d->stop, 1);
1128 wake_up(&d->done); 1144 wake_up(&d->done);
1129 } 1145 }
@@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
1189 } 1205 }
1190 memset(crc, 0, offsetof(struct crc_data, go)); 1206 memset(crc, 0, offsetof(struct crc_data, go));
1191 1207
1208 clean_pages_on_decompress = true;
1209
1192 /* 1210 /*
1193 * Start the decompression threads. 1211 * Start the decompression threads.
1194 */ 1212 */