aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/zram
diff options
context:
space:
mode:
authorNitin Gupta <ngupta@vflare.org>2010-06-01 04:01:25 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-06-18 15:49:48 -0400
commitf1e3cfff4d58767a76fe71e18bffdeed10318b4e (patch)
tree93bff0d4b079898446f1a1efa54bd68d7e4f5bcf /drivers/staging/zram
parent16a4bfb9e9dfefbd28ee170fa3e259bc88d81eb5 (diff)
Staging: zram: Rename ramzswap to zram in code
Automated renames in code: - rzs* -> zram* - RZS* -> ZRAM* - ramzswap* -> zram* Manual changes: - Edited comments/messages mentioning "swap" Signed-off-by: Nitin Gupta <ngupta@vflare.org> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging/zram')
-rw-r--r--drivers/staging/zram/zram_drv.c431
-rw-r--r--drivers/staging/zram/zram_drv.h66
-rw-r--r--drivers/staging/zram/zram_ioctl.h21
3 files changed, 256 insertions, 262 deletions
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index e9b064c2148..3f778434dc9 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Compressed RAM based swap device 2 * Compressed RAM block device
3 * 3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 5 *
@@ -12,7 +12,7 @@
12 * Project home: http://compcache.googlecode.com 12 * Project home: http://compcache.googlecode.com
13 */ 13 */
14 14
15#define KMSG_COMPONENT "ramzswap" 15#define KMSG_COMPONENT "zram"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 17
18#include <linux/module.h> 18#include <linux/module.h>
@@ -26,35 +26,33 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/lzo.h> 27#include <linux/lzo.h>
28#include <linux/string.h> 28#include <linux/string.h>
29#include <linux/swap.h>
30#include <linux/swapops.h>
31#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
32 30
33#include "zram_drv.h" 31#include "zram_drv.h"
34 32
35/* Globals */ 33/* Globals */
36static int ramzswap_major; 34static int zram_major;
37static struct ramzswap *devices; 35static struct zram *devices;
38 36
39/* Module params (documentation at end) */ 37/* Module params (documentation at end) */
40static unsigned int num_devices; 38static unsigned int num_devices;
41 39
42static int rzs_test_flag(struct ramzswap *rzs, u32 index, 40static int zram_test_flag(struct zram *zram, u32 index,
43 enum rzs_pageflags flag) 41 enum zram_pageflags flag)
44{ 42{
45 return rzs->table[index].flags & BIT(flag); 43 return zram->table[index].flags & BIT(flag);
46} 44}
47 45
48static void rzs_set_flag(struct ramzswap *rzs, u32 index, 46static void zram_set_flag(struct zram *zram, u32 index,
49 enum rzs_pageflags flag) 47 enum zram_pageflags flag)
50{ 48{
51 rzs->table[index].flags |= BIT(flag); 49 zram->table[index].flags |= BIT(flag);
52} 50}
53 51
54static void rzs_clear_flag(struct ramzswap *rzs, u32 index, 52static void zram_clear_flag(struct zram *zram, u32 index,
55 enum rzs_pageflags flag) 53 enum zram_pageflags flag)
56{ 54{
57 rzs->table[index].flags &= ~BIT(flag); 55 zram->table[index].flags &= ~BIT(flag);
58} 56}
59 57
60static int page_zero_filled(void *ptr) 58static int page_zero_filled(void *ptr)
@@ -72,50 +70,50 @@ static int page_zero_filled(void *ptr)
72 return 1; 70 return 1;
73} 71}
74 72
75static void ramzswap_set_disksize(struct ramzswap *rzs, size_t totalram_bytes) 73static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
76{ 74{
77 if (!rzs->disksize) { 75 if (!zram->disksize) {
78 pr_info( 76 pr_info(
79 "disk size not provided. You can use disksize_kb module " 77 "disk size not provided. You can use disksize_kb module "
80 "param to specify size.\nUsing default: (%u%% of RAM).\n", 78 "param to specify size.\nUsing default: (%u%% of RAM).\n",
81 default_disksize_perc_ram 79 default_disksize_perc_ram
82 ); 80 );
83 rzs->disksize = default_disksize_perc_ram * 81 zram->disksize = default_disksize_perc_ram *
84 (totalram_bytes / 100); 82 (totalram_bytes / 100);
85 } 83 }
86 84
87 if (rzs->disksize > 2 * (totalram_bytes)) { 85 if (zram->disksize > 2 * (totalram_bytes)) {
88 pr_info( 86 pr_info(
89 "There is little point creating a ramzswap of greater than " 87 "There is little point creating a zram of greater than "
90 "twice the size of memory since we expect a 2:1 compression " 88 "twice the size of memory since we expect a 2:1 compression "
91 "ratio. Note that ramzswap uses about 0.1%% of the size of " 89 "ratio. Note that zram uses about 0.1%% of the size of "
92 "the swap device when not in use so a huge ramzswap is " 90 "the disk when not in use so a huge zram is "
93 "wasteful.\n" 91 "wasteful.\n"
94 "\tMemory Size: %zu kB\n" 92 "\tMemory Size: %zu kB\n"
95 "\tSize you selected: %zu kB\n" 93 "\tSize you selected: %zu kB\n"
96 "Continuing anyway ...\n", 94 "Continuing anyway ...\n",
97 totalram_bytes >> 10, rzs->disksize 95 totalram_bytes >> 10, zram->disksize
98 ); 96 );
99 } 97 }
100 98
101 rzs->disksize &= PAGE_MASK; 99 zram->disksize &= PAGE_MASK;
102} 100}
103 101
104static void ramzswap_ioctl_get_stats(struct ramzswap *rzs, 102static void zram_ioctl_get_stats(struct zram *zram,
105 struct ramzswap_ioctl_stats *s) 103 struct zram_ioctl_stats *s)
106{ 104{
107 s->disksize = rzs->disksize; 105 s->disksize = zram->disksize;
108 106
109#if defined(CONFIG_RAMZSWAP_STATS) 107#if defined(CONFIG_ZRAM_STATS)
110 { 108 {
111 struct ramzswap_stats *rs = &rzs->stats; 109 struct zram_stats *rs = &zram->stats;
112 size_t succ_writes, mem_used; 110 size_t succ_writes, mem_used;
113 unsigned int good_compress_perc = 0, no_compress_perc = 0; 111 unsigned int good_compress_perc = 0, no_compress_perc = 0;
114 112
115 mem_used = xv_get_total_size_bytes(rzs->mem_pool) 113 mem_used = xv_get_total_size_bytes(zram->mem_pool)
116 + (rs->pages_expand << PAGE_SHIFT); 114 + (rs->pages_expand << PAGE_SHIFT);
117 succ_writes = rzs_stat64_read(rzs, &rs->num_writes) - 115 succ_writes = zram_stat64_read(zram, &rs->num_writes) -
118 rzs_stat64_read(rzs, &rs->failed_writes); 116 zram_stat64_read(zram, &rs->failed_writes);
119 117
120 if (succ_writes && rs->pages_stored) { 118 if (succ_writes && rs->pages_stored) {
121 good_compress_perc = rs->good_compress * 100 119 good_compress_perc = rs->good_compress * 100
@@ -124,12 +122,12 @@ static void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
124 / rs->pages_stored; 122 / rs->pages_stored;
125 } 123 }
126 124
127 s->num_reads = rzs_stat64_read(rzs, &rs->num_reads); 125 s->num_reads = zram_stat64_read(zram, &rs->num_reads);
128 s->num_writes = rzs_stat64_read(rzs, &rs->num_writes); 126 s->num_writes = zram_stat64_read(zram, &rs->num_writes);
129 s->failed_reads = rzs_stat64_read(rzs, &rs->failed_reads); 127 s->failed_reads = zram_stat64_read(zram, &rs->failed_reads);
130 s->failed_writes = rzs_stat64_read(rzs, &rs->failed_writes); 128 s->failed_writes = zram_stat64_read(zram, &rs->failed_writes);
131 s->invalid_io = rzs_stat64_read(rzs, &rs->invalid_io); 129 s->invalid_io = zram_stat64_read(zram, &rs->invalid_io);
132 s->notify_free = rzs_stat64_read(rzs, &rs->notify_free); 130 s->notify_free = zram_stat64_read(zram, &rs->notify_free);
133 s->pages_zero = rs->pages_zero; 131 s->pages_zero = rs->pages_zero;
134 132
135 s->good_compress_pct = good_compress_perc; 133 s->good_compress_pct = good_compress_perc;
@@ -141,34 +139,34 @@ static void ramzswap_ioctl_get_stats(struct ramzswap *rzs,
141 s->compr_data_size = rs->compr_size; 139 s->compr_data_size = rs->compr_size;
142 s->mem_used_total = mem_used; 140 s->mem_used_total = mem_used;
143 } 141 }
144#endif /* CONFIG_RAMZSWAP_STATS */ 142#endif /* CONFIG_ZRAM_STATS */
145} 143}
146 144
147static void ramzswap_free_page(struct ramzswap *rzs, size_t index) 145static void zram_free_page(struct zram *zram, size_t index)
148{ 146{
149 u32 clen; 147 u32 clen;
150 void *obj; 148 void *obj;
151 149
152 struct page *page = rzs->table[index].page; 150 struct page *page = zram->table[index].page;
153 u32 offset = rzs->table[index].offset; 151 u32 offset = zram->table[index].offset;
154 152
155 if (unlikely(!page)) { 153 if (unlikely(!page)) {
156 /* 154 /*
157 * No memory is allocated for zero filled pages. 155 * No memory is allocated for zero filled pages.
158 * Simply clear zero page flag. 156 * Simply clear zero page flag.
159 */ 157 */
160 if (rzs_test_flag(rzs, index, RZS_ZERO)) { 158 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
161 rzs_clear_flag(rzs, index, RZS_ZERO); 159 zram_clear_flag(zram, index, ZRAM_ZERO);
162 rzs_stat_dec(&rzs->stats.pages_zero); 160 zram_stat_dec(&zram->stats.pages_zero);
163 } 161 }
164 return; 162 return;
165 } 163 }
166 164
167 if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) { 165 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
168 clen = PAGE_SIZE; 166 clen = PAGE_SIZE;
169 __free_page(page); 167 __free_page(page);
170 rzs_clear_flag(rzs, index, RZS_UNCOMPRESSED); 168 zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
171 rzs_stat_dec(&rzs->stats.pages_expand); 169 zram_stat_dec(&zram->stats.pages_expand);
172 goto out; 170 goto out;
173 } 171 }
174 172
@@ -176,16 +174,16 @@ static void ramzswap_free_page(struct ramzswap *rzs, size_t index)
176 clen = xv_get_object_size(obj) - sizeof(struct zobj_header); 174 clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
177 kunmap_atomic(obj, KM_USER0); 175 kunmap_atomic(obj, KM_USER0);
178 176
179 xv_free(rzs->mem_pool, page, offset); 177 xv_free(zram->mem_pool, page, offset);
180 if (clen <= PAGE_SIZE / 2) 178 if (clen <= PAGE_SIZE / 2)
181 rzs_stat_dec(&rzs->stats.good_compress); 179 zram_stat_dec(&zram->stats.good_compress);
182 180
183out: 181out:
184 rzs->stats.compr_size -= clen; 182 zram->stats.compr_size -= clen;
185 rzs_stat_dec(&rzs->stats.pages_stored); 183 zram_stat_dec(&zram->stats.pages_stored);
186 184
187 rzs->table[index].page = NULL; 185 zram->table[index].page = NULL;
188 rzs->table[index].offset = 0; 186 zram->table[index].offset = 0;
189} 187}
190 188
191static void handle_zero_page(struct page *page) 189static void handle_zero_page(struct page *page)
@@ -199,14 +197,14 @@ static void handle_zero_page(struct page *page)
199 flush_dcache_page(page); 197 flush_dcache_page(page);
200} 198}
201 199
202static void handle_uncompressed_page(struct ramzswap *rzs, 200static void handle_uncompressed_page(struct zram *zram,
203 struct page *page, u32 index) 201 struct page *page, u32 index)
204{ 202{
205 unsigned char *user_mem, *cmem; 203 unsigned char *user_mem, *cmem;
206 204
207 user_mem = kmap_atomic(page, KM_USER0); 205 user_mem = kmap_atomic(page, KM_USER0);
208 cmem = kmap_atomic(rzs->table[index].page, KM_USER1) + 206 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
209 rzs->table[index].offset; 207 zram->table[index].offset;
210 208
211 memcpy(user_mem, cmem, PAGE_SIZE); 209 memcpy(user_mem, cmem, PAGE_SIZE);
212 kunmap_atomic(user_mem, KM_USER0); 210 kunmap_atomic(user_mem, KM_USER0);
@@ -215,14 +213,14 @@ static void handle_uncompressed_page(struct ramzswap *rzs,
215 flush_dcache_page(page); 213 flush_dcache_page(page);
216} 214}
217 215
218static int ramzswap_read(struct ramzswap *rzs, struct bio *bio) 216static int zram_read(struct zram *zram, struct bio *bio)
219{ 217{
220 218
221 int i; 219 int i;
222 u32 index; 220 u32 index;
223 struct bio_vec *bvec; 221 struct bio_vec *bvec;
224 222
225 rzs_stat64_inc(rzs, &rzs->stats.num_reads); 223 zram_stat64_inc(zram, &zram->stats.num_reads);
226 224
227 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; 225 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
228 bio_for_each_segment(bvec, bio, i) { 226 bio_for_each_segment(bvec, bio, i) {
@@ -234,13 +232,13 @@ static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
234 232
235 page = bvec->bv_page; 233 page = bvec->bv_page;
236 234
237 if (rzs_test_flag(rzs, index, RZS_ZERO)) { 235 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
238 handle_zero_page(page); 236 handle_zero_page(page);
239 continue; 237 continue;
240 } 238 }
241 239
242 /* Requested page is not present in compressed area */ 240 /* Requested page is not present in compressed area */
243 if (unlikely(!rzs->table[index].page)) { 241 if (unlikely(!zram->table[index].page)) {
244 pr_debug("Read before write: sector=%lu, size=%u", 242 pr_debug("Read before write: sector=%lu, size=%u",
245 (ulong)(bio->bi_sector), bio->bi_size); 243 (ulong)(bio->bi_sector), bio->bi_size);
246 /* Do nothing */ 244 /* Do nothing */
@@ -248,16 +246,16 @@ static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
248 } 246 }
249 247
250 /* Page is stored uncompressed since it's incompressible */ 248 /* Page is stored uncompressed since it's incompressible */
251 if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) { 249 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
252 handle_uncompressed_page(rzs, page, index); 250 handle_uncompressed_page(zram, page, index);
253 continue; 251 continue;
254 } 252 }
255 253
256 user_mem = kmap_atomic(page, KM_USER0); 254 user_mem = kmap_atomic(page, KM_USER0);
257 clen = PAGE_SIZE; 255 clen = PAGE_SIZE;
258 256
259 cmem = kmap_atomic(rzs->table[index].page, KM_USER1) + 257 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
260 rzs->table[index].offset; 258 zram->table[index].offset;
261 259
262 ret = lzo1x_decompress_safe( 260 ret = lzo1x_decompress_safe(
263 cmem + sizeof(*zheader), 261 cmem + sizeof(*zheader),
@@ -271,7 +269,7 @@ static int ramzswap_read(struct ramzswap *rzs, struct bio *bio)
271 if (unlikely(ret != LZO_E_OK)) { 269 if (unlikely(ret != LZO_E_OK)) {
272 pr_err("Decompression failed! err=%d, page=%u\n", 270 pr_err("Decompression failed! err=%d, page=%u\n",
273 ret, index); 271 ret, index);
274 rzs_stat64_inc(rzs, &rzs->stats.failed_reads); 272 zram_stat64_inc(zram, &zram->stats.failed_reads);
275 goto out; 273 goto out;
276 } 274 }
277 275
@@ -288,13 +286,13 @@ out:
288 return 0; 286 return 0;
289} 287}
290 288
291static int ramzswap_write(struct ramzswap *rzs, struct bio *bio) 289static int zram_write(struct zram *zram, struct bio *bio)
292{ 290{
293 int i; 291 int i;
294 u32 index; 292 u32 index;
295 struct bio_vec *bvec; 293 struct bio_vec *bvec;
296 294
297 rzs_stat64_inc(rzs, &rzs->stats.num_writes); 295 zram_stat64_inc(zram, &zram->stats.num_writes);
298 296
299 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; 297 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
300 298
@@ -307,82 +305,83 @@ static int ramzswap_write(struct ramzswap *rzs, struct bio *bio)
307 unsigned char *user_mem, *cmem, *src; 305 unsigned char *user_mem, *cmem, *src;
308 306
309 page = bvec->bv_page; 307 page = bvec->bv_page;
310 src = rzs->compress_buffer; 308 src = zram->compress_buffer;
311 309
312 /* 310 /*
313 * System overwrites unused sectors. Free memory associated 311 * System overwrites unused sectors. Free memory associated
314 * with this sector now. 312 * with this sector now.
315 */ 313 */
316 if (rzs->table[index].page || 314 if (zram->table[index].page ||
317 rzs_test_flag(rzs, index, RZS_ZERO)) 315 zram_test_flag(zram, index, ZRAM_ZERO))
318 ramzswap_free_page(rzs, index); 316 zram_free_page(zram, index);
319 317
320 mutex_lock(&rzs->lock); 318 mutex_lock(&zram->lock);
321 319
322 user_mem = kmap_atomic(page, KM_USER0); 320 user_mem = kmap_atomic(page, KM_USER0);
323 if (page_zero_filled(user_mem)) { 321 if (page_zero_filled(user_mem)) {
324 kunmap_atomic(user_mem, KM_USER0); 322 kunmap_atomic(user_mem, KM_USER0);
325 mutex_unlock(&rzs->lock); 323 mutex_unlock(&zram->lock);
326 rzs_stat_inc(&rzs->stats.pages_zero); 324 zram_stat_inc(&zram->stats.pages_zero);
327 rzs_set_flag(rzs, index, RZS_ZERO); 325 zram_set_flag(zram, index, ZRAM_ZERO);
328 continue; 326 continue;
329 } 327 }
330 328
331 ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen, 329 ret = lzo1x_1_compress(user_mem, PAGE_SIZE, src, &clen,
332 rzs->compress_workmem); 330 zram->compress_workmem);
333 331
334 kunmap_atomic(user_mem, KM_USER0); 332 kunmap_atomic(user_mem, KM_USER0);
335 333
336 if (unlikely(ret != LZO_E_OK)) { 334 if (unlikely(ret != LZO_E_OK)) {
337 mutex_unlock(&rzs->lock); 335 mutex_unlock(&zram->lock);
338 pr_err("Compression failed! err=%d\n", ret); 336 pr_err("Compression failed! err=%d\n", ret);
339 rzs_stat64_inc(rzs, &rzs->stats.failed_writes); 337 zram_stat64_inc(zram, &zram->stats.failed_writes);
340 goto out; 338 goto out;
341 } 339 }
342 340
343 /* 341 /*
344 * Page is incompressible. Store it as-is (uncompressed) 342 * Page is incompressible. Store it as-is (uncompressed)
345 * since we do not want to return too many swap write 343 * since we do not want to return too many disk write
346 * errors which has side effect of hanging the system. 344 * errors which has side effect of hanging the system.
347 */ 345 */
348 if (unlikely(clen > max_zpage_size)) { 346 if (unlikely(clen > max_zpage_size)) {
349 clen = PAGE_SIZE; 347 clen = PAGE_SIZE;
350 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM); 348 page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
351 if (unlikely(!page_store)) { 349 if (unlikely(!page_store)) {
352 mutex_unlock(&rzs->lock); 350 mutex_unlock(&zram->lock);
353 pr_info("Error allocating memory for " 351 pr_info("Error allocating memory for "
354 "incompressible page: %u\n", index); 352 "incompressible page: %u\n", index);
355 rzs_stat64_inc(rzs, &rzs->stats.failed_writes); 353 zram_stat64_inc(zram,
354 &zram->stats.failed_writes);
356 goto out; 355 goto out;
357 } 356 }
358 357
359 offset = 0; 358 offset = 0;
360 rzs_set_flag(rzs, index, RZS_UNCOMPRESSED); 359 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
361 rzs_stat_inc(&rzs->stats.pages_expand); 360 zram_stat_inc(&zram->stats.pages_expand);
362 rzs->table[index].page = page_store; 361 zram->table[index].page = page_store;
363 src = kmap_atomic(page, KM_USER0); 362 src = kmap_atomic(page, KM_USER0);
364 goto memstore; 363 goto memstore;
365 } 364 }
366 365
367 if (xv_malloc(rzs->mem_pool, clen + sizeof(*zheader), 366 if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
368 &rzs->table[index].page, &offset, 367 &zram->table[index].page, &offset,
369 GFP_NOIO | __GFP_HIGHMEM)) { 368 GFP_NOIO | __GFP_HIGHMEM)) {
370 mutex_unlock(&rzs->lock); 369 mutex_unlock(&zram->lock);
371 pr_info("Error allocating memory for compressed " 370 pr_info("Error allocating memory for compressed "
372 "page: %u, size=%zu\n", index, clen); 371 "page: %u, size=%zu\n", index, clen);
373 rzs_stat64_inc(rzs, &rzs->stats.failed_writes); 372 zram_stat64_inc(zram, &zram->stats.failed_writes);
374 goto out; 373 goto out;
375 } 374 }
376 375
377memstore: 376memstore:
378 rzs->table[index].offset = offset; 377 zram->table[index].offset = offset;
379 378
380 cmem = kmap_atomic(rzs->table[index].page, KM_USER1) + 379 cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
381 rzs->table[index].offset; 380 zram->table[index].offset;
382 381
383#if 0 382#if 0
384 /* Back-reference needed for memory defragmentation */ 383 /* Back-reference needed for memory defragmentation */
385 if (!rzs_test_flag(rzs, index, RZS_UNCOMPRESSED)) { 384 if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
386 zheader = (struct zobj_header *)cmem; 385 zheader = (struct zobj_header *)cmem;
387 zheader->table_idx = index; 386 zheader->table_idx = index;
388 cmem += sizeof(*zheader); 387 cmem += sizeof(*zheader);
@@ -392,16 +391,16 @@ memstore:
392 memcpy(cmem, src, clen); 391 memcpy(cmem, src, clen);
393 392
394 kunmap_atomic(cmem, KM_USER1); 393 kunmap_atomic(cmem, KM_USER1);
395 if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) 394 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
396 kunmap_atomic(src, KM_USER0); 395 kunmap_atomic(src, KM_USER0);
397 396
398 /* Update stats */ 397 /* Update stats */
399 rzs->stats.compr_size += clen; 398 zram->stats.compr_size += clen;
400 rzs_stat_inc(&rzs->stats.pages_stored); 399 zram_stat_inc(&zram->stats.pages_stored);
401 if (clen <= PAGE_SIZE / 2) 400 if (clen <= PAGE_SIZE / 2)
402 rzs_stat_inc(&rzs->stats.good_compress); 401 zram_stat_inc(&zram->stats.good_compress);
403 402
404 mutex_unlock(&rzs->lock); 403 mutex_unlock(&zram->lock);
405 index++; 404 index++;
406 } 405 }
407 406
@@ -417,10 +416,10 @@ out:
417/* 416/*
418 * Check if request is within bounds and page aligned. 417 * Check if request is within bounds and page aligned.
419 */ 418 */
420static inline int valid_io_request(struct ramzswap *rzs, struct bio *bio) 419static inline int valid_io_request(struct zram *zram, struct bio *bio)
421{ 420{
422 if (unlikely( 421 if (unlikely(
423 (bio->bi_sector >= (rzs->disksize >> SECTOR_SHIFT)) || 422 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
424 (bio->bi_sector & (SECTORS_PER_PAGE - 1)) || 423 (bio->bi_sector & (SECTORS_PER_PAGE - 1)) ||
425 (bio->bi_size & (PAGE_SIZE - 1)))) { 424 (bio->bi_size & (PAGE_SIZE - 1)))) {
426 425
@@ -432,160 +431,160 @@ static inline int valid_io_request(struct ramzswap *rzs, struct bio *bio)
432} 431}
433 432
434/* 433/*
435 * Handler function for all ramzswap I/O requests. 434 * Handler function for all zram I/O requests.
436 */ 435 */
437static int ramzswap_make_request(struct request_queue *queue, struct bio *bio) 436static int zram_make_request(struct request_queue *queue, struct bio *bio)
438{ 437{
439 int ret = 0; 438 int ret = 0;
440 struct ramzswap *rzs = queue->queuedata; 439 struct zram *zram = queue->queuedata;
441 440
442 if (unlikely(!rzs->init_done)) { 441 if (unlikely(!zram->init_done)) {
443 bio_io_error(bio); 442 bio_io_error(bio);
444 return 0; 443 return 0;
445 } 444 }
446 445
447 if (!valid_io_request(rzs, bio)) { 446 if (!valid_io_request(zram, bio)) {
448 rzs_stat64_inc(rzs, &rzs->stats.invalid_io); 447 zram_stat64_inc(zram, &zram->stats.invalid_io);
449 bio_io_error(bio); 448 bio_io_error(bio);
450 return 0; 449 return 0;
451 } 450 }
452 451
453 switch (bio_data_dir(bio)) { 452 switch (bio_data_dir(bio)) {
454 case READ: 453 case READ:
455 ret = ramzswap_read(rzs, bio); 454 ret = zram_read(zram, bio);
456 break; 455 break;
457 456
458 case WRITE: 457 case WRITE:
459 ret = ramzswap_write(rzs, bio); 458 ret = zram_write(zram, bio);
460 break; 459 break;
461 } 460 }
462 461
463 return ret; 462 return ret;
464} 463}
465 464
466static void reset_device(struct ramzswap *rzs) 465static void reset_device(struct zram *zram)
467{ 466{
468 size_t index; 467 size_t index;
469 468
470 /* Do not accept any new I/O request */ 469 /* Do not accept any new I/O request */
471 rzs->init_done = 0; 470 zram->init_done = 0;
472 471
473 /* Free various per-device buffers */ 472 /* Free various per-device buffers */
474 kfree(rzs->compress_workmem); 473 kfree(zram->compress_workmem);
475 free_pages((unsigned long)rzs->compress_buffer, 1); 474 free_pages((unsigned long)zram->compress_buffer, 1);
476 475
477 rzs->compress_workmem = NULL; 476 zram->compress_workmem = NULL;
478 rzs->compress_buffer = NULL; 477 zram->compress_buffer = NULL;
479 478
480 /* Free all pages that are still in this ramzswap device */ 479 /* Free all pages that are still in this zram device */
481 for (index = 0; index < rzs->disksize >> PAGE_SHIFT; index++) { 480 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
482 struct page *page; 481 struct page *page;
483 u16 offset; 482 u16 offset;
484 483
485 page = rzs->table[index].page; 484 page = zram->table[index].page;
486 offset = rzs->table[index].offset; 485 offset = zram->table[index].offset;
487 486
488 if (!page) 487 if (!page)
489 continue; 488 continue;
490 489
491 if (unlikely(rzs_test_flag(rzs, index, RZS_UNCOMPRESSED))) 490 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
492 __free_page(page); 491 __free_page(page);
493 else 492 else
494 xv_free(rzs->mem_pool, page, offset); 493 xv_free(zram->mem_pool, page, offset);
495 } 494 }
496 495
497 vfree(rzs->table); 496 vfree(zram->table);
498 rzs->table = NULL; 497 zram->table = NULL;
499 498
500 xv_destroy_pool(rzs->mem_pool); 499 xv_destroy_pool(zram->mem_pool);
501 rzs->mem_pool = NULL; 500 zram->mem_pool = NULL;
502 501
503 /* Reset stats */ 502 /* Reset stats */
504 memset(&rzs->stats, 0, sizeof(rzs->stats)); 503 memset(&zram->stats, 0, sizeof(zram->stats));
505 504
506 rzs->disksize = 0; 505 zram->disksize = 0;
507} 506}
508 507
509static int ramzswap_ioctl_init_device(struct ramzswap *rzs) 508static int zram_ioctl_init_device(struct zram *zram)
510{ 509{
511 int ret; 510 int ret;
512 size_t num_pages; 511 size_t num_pages;
513 512
514 if (rzs->init_done) { 513 if (zram->init_done) {
515 pr_info("Device already initialized!\n"); 514 pr_info("Device already initialized!\n");
516 return -EBUSY; 515 return -EBUSY;
517 } 516 }
518 517
519 ramzswap_set_disksize(rzs, totalram_pages << PAGE_SHIFT); 518 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
520 519
521 rzs->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); 520 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
522 if (!rzs->compress_workmem) { 521 if (!zram->compress_workmem) {
523 pr_err("Error allocating compressor working memory!\n"); 522 pr_err("Error allocating compressor working memory!\n");
524 ret = -ENOMEM; 523 ret = -ENOMEM;
525 goto fail; 524 goto fail;
526 } 525 }
527 526
528 rzs->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1); 527 zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
529 if (!rzs->compress_buffer) { 528 if (!zram->compress_buffer) {
530 pr_err("Error allocating compressor buffer space\n"); 529 pr_err("Error allocating compressor buffer space\n");
531 ret = -ENOMEM; 530 ret = -ENOMEM;
532 goto fail; 531 goto fail;
533 } 532 }
534 533
535 num_pages = rzs->disksize >> PAGE_SHIFT; 534 num_pages = zram->disksize >> PAGE_SHIFT;
536 rzs->table = vmalloc(num_pages * sizeof(*rzs->table)); 535 zram->table = vmalloc(num_pages * sizeof(*zram->table));
537 if (!rzs->table) { 536 if (!zram->table) {
538 pr_err("Error allocating ramzswap address table\n"); 537 pr_err("Error allocating zram address table\n");
539 /* To prevent accessing table entries during cleanup */ 538 /* To prevent accessing table entries during cleanup */
540 rzs->disksize = 0; 539 zram->disksize = 0;
541 ret = -ENOMEM; 540 ret = -ENOMEM;
542 goto fail; 541 goto fail;
543 } 542 }
544 memset(rzs->table, 0, num_pages * sizeof(*rzs->table)); 543 memset(zram->table, 0, num_pages * sizeof(*zram->table));
545 544
546 set_capacity(rzs->disk, rzs->disksize >> SECTOR_SHIFT); 545 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
547 546
548 /* ramzswap devices sort of resembles non-rotational disks */ 547 /* zram devices sort of resembles non-rotational disks */
549 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rzs->disk->queue); 548 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
550 549
551 rzs->mem_pool = xv_create_pool(); 550 zram->mem_pool = xv_create_pool();
552 if (!rzs->mem_pool) { 551 if (!zram->mem_pool) {
553 pr_err("Error creating memory pool\n"); 552 pr_err("Error creating memory pool\n");
554 ret = -ENOMEM; 553 ret = -ENOMEM;
555 goto fail; 554 goto fail;
556 } 555 }
557 556
558 rzs->init_done = 1; 557 zram->init_done = 1;
559 558
560 pr_debug("Initialization done!\n"); 559 pr_debug("Initialization done!\n");
561 return 0; 560 return 0;
562 561
563fail: 562fail:
564 reset_device(rzs); 563 reset_device(zram);
565 564
566 pr_err("Initialization failed: err=%d\n", ret); 565 pr_err("Initialization failed: err=%d\n", ret);
567 return ret; 566 return ret;
568} 567}
569 568
570static int ramzswap_ioctl_reset_device(struct ramzswap *rzs) 569static int zram_ioctl_reset_device(struct zram *zram)
571{ 570{
572 if (rzs->init_done) 571 if (zram->init_done)
573 reset_device(rzs); 572 reset_device(zram);
574 573
575 return 0; 574 return 0;
576} 575}
577 576
578static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode, 577static int zram_ioctl(struct block_device *bdev, fmode_t mode,
579 unsigned int cmd, unsigned long arg) 578 unsigned int cmd, unsigned long arg)
580{ 579{
581 int ret = 0; 580 int ret = 0;
582 size_t disksize_kb; 581 size_t disksize_kb;
583 582
584 struct ramzswap *rzs = bdev->bd_disk->private_data; 583 struct zram *zram = bdev->bd_disk->private_data;
585 584
586 switch (cmd) { 585 switch (cmd) {
587 case RZSIO_SET_DISKSIZE_KB: 586 case ZRAMIO_SET_DISKSIZE_KB:
588 if (rzs->init_done) { 587 if (zram->init_done) {
589 ret = -EBUSY; 588 ret = -EBUSY;
590 goto out; 589 goto out;
591 } 590 }
@@ -594,14 +593,14 @@ static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
594 ret = -EFAULT; 593 ret = -EFAULT;
595 goto out; 594 goto out;
596 } 595 }
597 rzs->disksize = disksize_kb << 10; 596 zram->disksize = disksize_kb << 10;
598 pr_info("Disk size set to %zu kB\n", disksize_kb); 597 pr_info("Disk size set to %zu kB\n", disksize_kb);
599 break; 598 break;
600 599
601 case RZSIO_GET_STATS: 600 case ZRAMIO_GET_STATS:
602 { 601 {
603 struct ramzswap_ioctl_stats *stats; 602 struct zram_ioctl_stats *stats;
604 if (!rzs->init_done) { 603 if (!zram->init_done) {
605 ret = -ENOTTY; 604 ret = -ENOTTY;
606 goto out; 605 goto out;
607 } 606 }
@@ -610,7 +609,7 @@ static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
610 ret = -ENOMEM; 609 ret = -ENOMEM;
611 goto out; 610 goto out;
612 } 611 }
613 ramzswap_ioctl_get_stats(rzs, stats); 612 zram_ioctl_get_stats(zram, stats);
614 if (copy_to_user((void *)arg, stats, sizeof(*stats))) { 613 if (copy_to_user((void *)arg, stats, sizeof(*stats))) {
615 kfree(stats); 614 kfree(stats);
616 ret = -EFAULT; 615 ret = -EFAULT;
@@ -619,11 +618,11 @@ static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
619 kfree(stats); 618 kfree(stats);
620 break; 619 break;
621 } 620 }
622 case RZSIO_INIT: 621 case ZRAMIO_INIT:
623 ret = ramzswap_ioctl_init_device(rzs); 622 ret = zram_ioctl_init_device(zram);
624 break; 623 break;
625 624
626 case RZSIO_RESET: 625 case ZRAMIO_RESET:
627 /* Do not reset an active device! */ 626 /* Do not reset an active device! */
628 if (bdev->bd_holders) { 627 if (bdev->bd_holders) {
629 ret = -EBUSY; 628 ret = -EBUSY;
@@ -634,7 +633,7 @@ static int ramzswap_ioctl(struct block_device *bdev, fmode_t mode,
634 if (bdev) 633 if (bdev)
635 fsync_bdev(bdev); 634 fsync_bdev(bdev);
636 635
637 ret = ramzswap_ioctl_reset_device(rzs); 636 ret = zram_ioctl_reset_device(zram);
638 break; 637 break;
639 638
640 default: 639 default:
@@ -646,88 +645,88 @@ out:
646 return ret; 645 return ret;
647} 646}
648 647
649void ramzswap_slot_free_notify(struct block_device *bdev, unsigned long index) 648void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
650{ 649{
651 struct ramzswap *rzs; 650 struct zram *zram;
652 651
653 rzs = bdev->bd_disk->private_data; 652 zram = bdev->bd_disk->private_data;
654 ramzswap_free_page(rzs, index); 653 zram_free_page(zram, index);
655 rzs_stat64_inc(rzs, &rzs->stats.notify_free); 654 zram_stat64_inc(zram, &zram->stats.notify_free);
656} 655}
657 656
658static const struct block_device_operations ramzswap_devops = { 657static const struct block_device_operations zram_devops = {
659 .ioctl = ramzswap_ioctl, 658 .ioctl = zram_ioctl,
660 .swap_slot_free_notify = ramzswap_slot_free_notify, 659 .swap_slot_free_notify = zram_slot_free_notify,
661 .owner = THIS_MODULE 660 .owner = THIS_MODULE
662}; 661};
663 662
664static int create_device(struct ramzswap *rzs, int device_id) 663static int create_device(struct zram *zram, int device_id)
665{ 664{
666 int ret = 0; 665 int ret = 0;
667 666
668 mutex_init(&rzs->lock); 667 mutex_init(&zram->lock);
669 spin_lock_init(&rzs->stat64_lock); 668 spin_lock_init(&zram->stat64_lock);
670 669
671 rzs->queue = blk_alloc_queue(GFP_KERNEL); 670 zram->queue = blk_alloc_queue(GFP_KERNEL);
672 if (!rzs->queue) { 671 if (!zram->queue) {
673 pr_err("Error allocating disk queue for device %d\n", 672 pr_err("Error allocating disk queue for device %d\n",
674 device_id); 673 device_id);
675 ret = -ENOMEM; 674 ret = -ENOMEM;
676 goto out; 675 goto out;
677 } 676 }
678 677
679 blk_queue_make_request(rzs->queue, ramzswap_make_request); 678 blk_queue_make_request(zram->queue, zram_make_request);
680 rzs->queue->queuedata = rzs; 679 zram->queue->queuedata = zram;
681 680
682 /* gendisk structure */ 681 /* gendisk structure */
683 rzs->disk = alloc_disk(1); 682 zram->disk = alloc_disk(1);
684 if (!rzs->disk) { 683 if (!zram->disk) {
685 blk_cleanup_queue(rzs->queue); 684 blk_cleanup_queue(zram->queue);
686 pr_warning("Error allocating disk structure for device %d\n", 685 pr_warning("Error allocating disk structure for device %d\n",
687 device_id); 686 device_id);
688 ret = -ENOMEM; 687 ret = -ENOMEM;
689 goto out; 688 goto out;
690 } 689 }
691 690
692 rzs->disk->major = ramzswap_major; 691 zram->disk->major = zram_major;
693 rzs->disk->first_minor = device_id; 692 zram->disk->first_minor = device_id;
694 rzs->disk->fops = &ramzswap_devops; 693 zram->disk->fops = &zram_devops;
695 rzs->disk->queue = rzs->queue; 694 zram->disk->queue = zram->queue;
696 rzs->disk->private_data = rzs; 695 zram->disk->private_data = zram;
697 snprintf(rzs->disk->disk_name, 16, "ramzswap%d", device_id); 696 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
698 697
699 /* Actual capacity set using RZSIO_SET_DISKSIZE_KB ioctl */ 698 /* Actual capacity set using ZRAMIO_SET_DISKSIZE_KB ioctl */
700 set_capacity(rzs->disk, 0); 699 set_capacity(zram->disk, 0);
701 700
702 /* 701 /*
703 * To ensure that we always get PAGE_SIZE aligned 702 * To ensure that we always get PAGE_SIZE aligned
704 * and n*PAGE_SIZED sized I/O requests. 703 * and n*PAGE_SIZED sized I/O requests.
705 */ 704 */
706 blk_queue_physical_block_size(rzs->disk->queue, PAGE_SIZE); 705 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
707 blk_queue_logical_block_size(rzs->disk->queue, PAGE_SIZE); 706 blk_queue_logical_block_size(zram->disk->queue, PAGE_SIZE);
708 blk_queue_io_min(rzs->disk->queue, PAGE_SIZE); 707 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
709 blk_queue_io_opt(rzs->disk->queue, PAGE_SIZE); 708 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
710 709
711 add_disk(rzs->disk); 710 add_disk(zram->disk);
712 711
713 rzs->init_done = 0; 712 zram->init_done = 0;
714 713
715out: 714out:
716 return ret; 715 return ret;
717} 716}
718 717
719static void destroy_device(struct ramzswap *rzs) 718static void destroy_device(struct zram *zram)
720{ 719{
721 if (rzs->disk) { 720 if (zram->disk) {
722 del_gendisk(rzs->disk); 721 del_gendisk(zram->disk);
723 put_disk(rzs->disk); 722 put_disk(zram->disk);
724 } 723 }
725 724
726 if (rzs->queue) 725 if (zram->queue)
727 blk_cleanup_queue(rzs->queue); 726 blk_cleanup_queue(zram->queue);
728} 727}
729 728
730static int __init ramzswap_init(void) 729static int __init zram_init(void)
731{ 730{
732 int ret, dev_id; 731 int ret, dev_id;
733 732
@@ -738,8 +737,8 @@ static int __init ramzswap_init(void)
738 goto out; 737 goto out;
739 } 738 }
740 739
741 ramzswap_major = register_blkdev(0, "ramzswap"); 740 zram_major = register_blkdev(0, "zram");
742 if (ramzswap_major <= 0) { 741 if (zram_major <= 0) {
743 pr_warning("Unable to get major number\n"); 742 pr_warning("Unable to get major number\n");
744 ret = -EBUSY; 743 ret = -EBUSY;
745 goto out; 744 goto out;
@@ -752,7 +751,7 @@ static int __init ramzswap_init(void)
752 751
753 /* Allocate the device array and initialize each one */ 752 /* Allocate the device array and initialize each one */
754 pr_info("Creating %u devices ...\n", num_devices); 753 pr_info("Creating %u devices ...\n", num_devices);
755 devices = kzalloc(num_devices * sizeof(struct ramzswap), GFP_KERNEL); 754 devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
756 if (!devices) { 755 if (!devices) {
757 ret = -ENOMEM; 756 ret = -ENOMEM;
758 goto unregister; 757 goto unregister;
@@ -770,36 +769,36 @@ free_devices:
770 while (dev_id) 769 while (dev_id)
771 destroy_device(&devices[--dev_id]); 770 destroy_device(&devices[--dev_id]);
772unregister: 771unregister:
773 unregister_blkdev(ramzswap_major, "ramzswap"); 772 unregister_blkdev(zram_major, "zram");
774out: 773out:
775 return ret; 774 return ret;
776} 775}
777 776
778static void __exit ramzswap_exit(void) 777static void __exit zram_exit(void)
779{ 778{
780 int i; 779 int i;
781 struct ramzswap *rzs; 780 struct zram *zram;
782 781
783 for (i = 0; i < num_devices; i++) { 782 for (i = 0; i < num_devices; i++) {
784 rzs = &devices[i]; 783 zram = &devices[i];
785 784
786 destroy_device(rzs); 785 destroy_device(zram);
787 if (rzs->init_done) 786 if (zram->init_done)
788 reset_device(rzs); 787 reset_device(zram);
789 } 788 }
790 789
791 unregister_blkdev(ramzswap_major, "ramzswap"); 790 unregister_blkdev(zram_major, "zram");
792 791
793 kfree(devices); 792 kfree(devices);
794 pr_debug("Cleanup done!\n"); 793 pr_debug("Cleanup done!\n");
795} 794}
796 795
797module_param(num_devices, uint, 0); 796module_param(num_devices, uint, 0);
798MODULE_PARM_DESC(num_devices, "Number of ramzswap devices"); 797MODULE_PARM_DESC(num_devices, "Number of zram devices");
799 798
800module_init(ramzswap_init); 799module_init(zram_init);
801module_exit(ramzswap_exit); 800module_exit(zram_exit);
802 801
803MODULE_LICENSE("Dual BSD/GPL"); 802MODULE_LICENSE("Dual BSD/GPL");
804MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>"); 803MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
805MODULE_DESCRIPTION("Compressed RAM Based Swap Device"); 804MODULE_DESCRIPTION("Compressed RAM Block Device");
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index 4d2e48a2347..945f9740442 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Compressed RAM based swap device 2 * Compressed RAM block device
3 * 3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 5 *
@@ -12,8 +12,8 @@
12 * Project home: http://compcache.googlecode.com 12 * Project home: http://compcache.googlecode.com
13 */ 13 */
14 14
15#ifndef _RAMZSWAP_DRV_H_ 15#ifndef _ZRAM_DRV_H_
16#define _RAMZSWAP_DRV_H_ 16#define _ZRAM_DRV_H_
17 17
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/mutex.h> 19#include <linux/mutex.h>
@@ -41,7 +41,7 @@ struct zobj_header {
41 41
42/*-- Configurable parameters */ 42/*-- Configurable parameters */
43 43
44/* Default ramzswap disk size: 25% of total RAM */ 44/* Default zram disk size: 25% of total RAM */
45static const unsigned default_disksize_perc_ram = 25; 45static const unsigned default_disksize_perc_ram = 25;
46 46
47/* 47/*
@@ -63,23 +63,20 @@ static const unsigned max_zpage_size = PAGE_SIZE / 4 * 3;
63#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) 63#define SECTORS_PER_PAGE_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
64#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT) 64#define SECTORS_PER_PAGE (1 << SECTORS_PER_PAGE_SHIFT)
65 65
66/* Flags for ramzswap pages (table[page_no].flags) */ 66/* Flags for zram pages (table[page_no].flags) */
67enum rzs_pageflags { 67enum zram_pageflags {
68 /* Page is stored uncompressed */ 68 /* Page is stored uncompressed */
69 RZS_UNCOMPRESSED, 69 ZRAM_UNCOMPRESSED,
70 70
71 /* Page consists entirely of zeros */ 71 /* Page consists entirely of zeros */
72 RZS_ZERO, 72 ZRAM_ZERO,
73 73
74 __NR_RZS_PAGEFLAGS, 74 __NR_ZRAM_PAGEFLAGS,
75}; 75};
76 76
77/*-- Data structures */ 77/*-- Data structures */
78 78
79/* 79/* Allocated for each disk page */
80 * Allocated for each swap slot, indexed by page no.
81 * These table entries must fit exactly in a page.
82 */
83struct table { 80struct table {
84 struct page *page; 81 struct page *page;
85 u16 offset; 82 u16 offset;
@@ -87,17 +84,17 @@ struct table {
87 u8 flags; 84 u8 flags;
88} __attribute__((aligned(4))); 85} __attribute__((aligned(4)));
89 86
90struct ramzswap_stats { 87struct zram_stats {
91 /* basic stats */ 88 /* basic stats */
92 size_t compr_size; /* compressed size of pages stored - 89 size_t compr_size; /* compressed size of pages stored -
93 * needed to enforce memlimit */ 90 * needed to enforce memlimit */
94 /* more stats */ 91 /* more stats */
95#if defined(CONFIG_RAMZSWAP_STATS) 92#if defined(CONFIG_ZRAM_STATS)
96 u64 num_reads; /* failed + successful */ 93 u64 num_reads; /* failed + successful */
97 u64 num_writes; /* --do-- */ 94 u64 num_writes; /* --do-- */
98 u64 failed_reads; /* should NEVER! happen */ 95 u64 failed_reads; /* should NEVER! happen */
99 u64 failed_writes; /* can happen when memory is too low */ 96 u64 failed_writes; /* can happen when memory is too low */
100 u64 invalid_io; /* non-swap I/O requests */ 97 u64 invalid_io; /* non-page-aligned I/O requests */
101 u64 notify_free; /* no. of swap slot free notifications */ 98 u64 notify_free; /* no. of swap slot free notifications */
102 u32 pages_zero; /* no. of zero filled pages */ 99 u32 pages_zero; /* no. of zero filled pages */
103 u32 pages_stored; /* no. of pages currently stored */ 100 u32 pages_stored; /* no. of pages currently stored */
@@ -106,7 +103,7 @@ struct ramzswap_stats {
106#endif 103#endif
107}; 104};
108 105
109struct ramzswap { 106struct zram {
110 struct xv_pool *mem_pool; 107 struct xv_pool *mem_pool;
111 void *compress_workmem; 108 void *compress_workmem;
112 void *compress_buffer; 109 void *compress_buffer;
@@ -118,51 +115,50 @@ struct ramzswap {
118 struct gendisk *disk; 115 struct gendisk *disk;
119 int init_done; 116 int init_done;
120 /* 117 /*
121 * This is limit on amount of *uncompressed* worth of data 118 * This is the limit on amount of *uncompressed* worth of data
122 * we can hold. When backing swap device is provided, it is 119 * we can store in a disk.
123 * set equal to device size.
124 */ 120 */
125 size_t disksize; /* bytes */ 121 size_t disksize; /* bytes */
126 122
127 struct ramzswap_stats stats; 123 struct zram_stats stats;
128}; 124};
129 125
130/*-- */ 126/*-- */
131 127
132/* Debugging and Stats */ 128/* Debugging and Stats */
133#if defined(CONFIG_RAMZSWAP_STATS) 129#if defined(CONFIG_ZRAM_STATS)
134static void rzs_stat_inc(u32 *v) 130static void zram_stat_inc(u32 *v)
135{ 131{
136 *v = *v + 1; 132 *v = *v + 1;
137} 133}
138 134
139static void rzs_stat_dec(u32 *v) 135static void zram_stat_dec(u32 *v)
140{ 136{
141 *v = *v - 1; 137 *v = *v - 1;
142} 138}
143 139
144static void rzs_stat64_inc(struct ramzswap *rzs, u64 *v) 140static void zram_stat64_inc(struct zram *zram, u64 *v)
145{ 141{
146 spin_lock(&rzs->stat64_lock); 142 spin_lock(&zram->stat64_lock);
147 *v = *v + 1; 143 *v = *v + 1;
148 spin_unlock(&rzs->stat64_lock); 144 spin_unlock(&zram->stat64_lock);
149} 145}
150 146
151static u64 rzs_stat64_read(struct ramzswap *rzs, u64 *v) 147static u64 zram_stat64_read(struct zram *zram, u64 *v)
152{ 148{
153 u64 val; 149 u64 val;
154 150
155 spin_lock(&rzs->stat64_lock); 151 spin_lock(&zram->stat64_lock);
156 val = *v; 152 val = *v;
157 spin_unlock(&rzs->stat64_lock); 153 spin_unlock(&zram->stat64_lock);
158 154
159 return val; 155 return val;
160} 156}
161#else 157#else
162#define rzs_stat_inc(v) 158#define zram_stat_inc(v)
163#define rzs_stat_dec(v) 159#define zram_stat_dec(v)
164#define rzs_stat64_inc(r, v) 160#define zram_stat64_inc(r, v)
165#define rzs_stat64_read(r, v) 161#define zram_stat64_read(r, v)
166#endif /* CONFIG_RAMZSWAP_STATS */ 162#endif /* CONFIG_ZRAM_STATS */
167 163
168#endif 164#endif
diff --git a/drivers/staging/zram/zram_ioctl.h b/drivers/staging/zram/zram_ioctl.h
index db94bcb4296..5c415fa4f17 100644
--- a/drivers/staging/zram/zram_ioctl.h
+++ b/drivers/staging/zram/zram_ioctl.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Compressed RAM based swap device 2 * Compressed RAM block device
3 * 3 *
4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
5 * 5 *
@@ -12,17 +12,16 @@
12 * Project home: http://compcache.googlecode.com 12 * Project home: http://compcache.googlecode.com
13 */ 13 */
14 14
15#ifndef _RAMZSWAP_IOCTL_H_ 15#ifndef _ZRAM_IOCTL_H_
16#define _RAMZSWAP_IOCTL_H_ 16#define _ZRAM_IOCTL_H_
17 17
18struct ramzswap_ioctl_stats { 18struct zram_ioctl_stats {
19 u64 disksize; /* user specified or equal to backing swap 19 u64 disksize; /* disksize in bytes (user specifies in KB) */
20 * size (if present) */
21 u64 num_reads; /* failed + successful */ 20 u64 num_reads; /* failed + successful */
22 u64 num_writes; /* --do-- */ 21 u64 num_writes; /* --do-- */
23 u64 failed_reads; /* should NEVER! happen */ 22 u64 failed_reads; /* should NEVER! happen */
24 u64 failed_writes; /* can happen when memory is too low */ 23 u64 failed_writes; /* can happen when memory is too low */
25 u64 invalid_io; /* non-swap I/O requests */ 24 u64 invalid_io; /* non-page-aligned I/O requests */
26 u64 notify_free; /* no. of swap slot free notifications */ 25 u64 notify_free; /* no. of swap slot free notifications */
27 u32 pages_zero; /* no. of zero filled pages */ 26 u32 pages_zero; /* no. of zero filled pages */
28 u32 good_compress_pct; /* no. of pages with compression ratio<=50% */ 27 u32 good_compress_pct; /* no. of pages with compression ratio<=50% */
@@ -34,9 +33,9 @@ struct ramzswap_ioctl_stats {
34 u64 mem_used_total; 33 u64 mem_used_total;
35} __attribute__ ((packed, aligned(4))); 34} __attribute__ ((packed, aligned(4)));
36 35
37#define RZSIO_SET_DISKSIZE_KB _IOW('z', 0, size_t) 36#define ZRAMIO_SET_DISKSIZE_KB _IOW('z', 0, size_t)
38#define RZSIO_GET_STATS _IOR('z', 1, struct ramzswap_ioctl_stats) 37#define ZRAMIO_GET_STATS _IOR('z', 1, struct zram_ioctl_stats)
39#define RZSIO_INIT _IO('z', 2) 38#define ZRAMIO_INIT _IO('z', 2)
40#define RZSIO_RESET _IO('z', 3) 39#define ZRAMIO_RESET _IO('z', 3)
41 40
42#endif 41#endif