staging: zsmalloc documentation
[deliverable/linux.git] / drivers / staging / zram / zram_drv.c
CommitLineData
306b0c95 1/*
f1e3cfff 2 * Compressed RAM block device
306b0c95 3 *
1130ebba 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
306b0c95
NG
5 *
6 * This code is released using a dual license strategy: BSD/GPL
7 * You can choose the licence that better fits your requirements.
8 *
9 * Released under the terms of 3-clause BSD License
10 * Released under the terms of GNU General Public License Version 2.0
11 *
12 * Project home: http://compcache.googlecode.com
13 */
14
f1e3cfff 15#define KMSG_COMPONENT "zram"
306b0c95
NG
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
b1f5b81e
RJ
18#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
306b0c95
NG
22#include <linux/module.h>
23#include <linux/kernel.h>
8946a086 24#include <linux/bio.h>
306b0c95
NG
25#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
5a0e3ad6 31#include <linux/slab.h>
306b0c95 32#include <linux/lzo.h>
306b0c95 33#include <linux/string.h>
306b0c95 34#include <linux/vmalloc.h>
306b0c95 35
16a4bfb9 36#include "zram_drv.h"
306b0c95
NG
37
38/* Globals */
f1e3cfff 39static int zram_major;
43801f6e 40struct zram *zram_devices;
306b0c95 41
306b0c95 42/* Module params (documentation at end) */
5fa5a901 43static unsigned int num_devices;
33863c21
NG
44
45static void zram_stat_inc(u32 *v)
46{
47 *v = *v + 1;
48}
49
50static void zram_stat_dec(u32 *v)
51{
52 *v = *v - 1;
53}
54
55static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
56{
57 spin_lock(&zram->stat64_lock);
58 *v = *v + inc;
59 spin_unlock(&zram->stat64_lock);
60}
61
62static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
63{
64 spin_lock(&zram->stat64_lock);
65 *v = *v - dec;
66 spin_unlock(&zram->stat64_lock);
67}
68
69static void zram_stat64_inc(struct zram *zram, u64 *v)
70{
71 zram_stat64_add(zram, v, 1);
72}
306b0c95 73
f1e3cfff
NG
74static int zram_test_flag(struct zram *zram, u32 index,
75 enum zram_pageflags flag)
306b0c95 76{
f1e3cfff 77 return zram->table[index].flags & BIT(flag);
306b0c95
NG
78}
79
f1e3cfff
NG
80static void zram_set_flag(struct zram *zram, u32 index,
81 enum zram_pageflags flag)
306b0c95 82{
f1e3cfff 83 zram->table[index].flags |= BIT(flag);
306b0c95
NG
84}
85
f1e3cfff
NG
86static void zram_clear_flag(struct zram *zram, u32 index,
87 enum zram_pageflags flag)
306b0c95 88{
f1e3cfff 89 zram->table[index].flags &= ~BIT(flag);
306b0c95
NG
90}
91
92static int page_zero_filled(void *ptr)
93{
94 unsigned int pos;
95 unsigned long *page;
96
97 page = (unsigned long *)ptr;
98
99 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
100 if (page[pos])
101 return 0;
102 }
103
104 return 1;
105}
106
f1e3cfff 107static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
306b0c95 108{
f1e3cfff 109 if (!zram->disksize) {
306b0c95
NG
110 pr_info(
111 "disk size not provided. You can use disksize_kb module "
112 "param to specify size.\nUsing default: (%u%% of RAM).\n",
113 default_disksize_perc_ram
114 );
f1e3cfff 115 zram->disksize = default_disksize_perc_ram *
306b0c95
NG
116 (totalram_bytes / 100);
117 }
118
f1e3cfff 119 if (zram->disksize > 2 * (totalram_bytes)) {
306b0c95 120 pr_info(
f1e3cfff 121 "There is little point creating a zram of greater than "
306b0c95 122 "twice the size of memory since we expect a 2:1 compression "
f1e3cfff
NG
123 "ratio. Note that zram uses about 0.1%% of the size of "
124 "the disk when not in use so a huge zram is "
306b0c95
NG
125 "wasteful.\n"
126 "\tMemory Size: %zu kB\n"
33863c21 127 "\tSize you selected: %llu kB\n"
306b0c95 128 "Continuing anyway ...\n",
f1e3cfff 129 totalram_bytes >> 10, zram->disksize
306b0c95
NG
130 );
131 }
132
f1e3cfff 133 zram->disksize &= PAGE_MASK;
306b0c95
NG
134}
135
f1e3cfff 136static void zram_free_page(struct zram *zram, size_t index)
306b0c95 137{
c2344348 138 unsigned long handle = zram->table[index].handle;
130f315a 139 u16 size = zram->table[index].size;
306b0c95 140
fd1a30de 141 if (unlikely(!handle)) {
2e882281
NG
142 /*
143 * No memory is allocated for zero filled pages.
144 * Simply clear zero page flag.
145 */
f1e3cfff
NG
146 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
147 zram_clear_flag(zram, index, ZRAM_ZERO);
148 zram_stat_dec(&zram->stats.pages_zero);
306b0c95
NG
149 }
150 return;
151 }
152
130f315a
MK
153 if (unlikely(size > max_zpage_size))
154 zram_stat_dec(&zram->stats.bad_compress);
306b0c95 155
fd1a30de 156 zs_free(zram->mem_pool, handle);
306b0c95 157
130f315a 158 if (size <= PAGE_SIZE / 2)
f1e3cfff 159 zram_stat_dec(&zram->stats.good_compress);
306b0c95 160
fd1a30de
NG
161 zram_stat64_sub(zram, &zram->stats.compr_size,
162 zram->table[index].size);
f1e3cfff 163 zram_stat_dec(&zram->stats.pages_stored);
306b0c95 164
c2344348 165 zram->table[index].handle = 0;
fd1a30de 166 zram->table[index].size = 0;
306b0c95
NG
167}
168
924bd88d 169static void handle_zero_page(struct bio_vec *bvec)
306b0c95 170{
924bd88d 171 struct page *page = bvec->bv_page;
306b0c95 172 void *user_mem;
306b0c95 173
ba82fe2e 174 user_mem = kmap_atomic(page);
924bd88d 175 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
ba82fe2e 176 kunmap_atomic(user_mem);
306b0c95 177
30fb8a71 178 flush_dcache_page(page);
306b0c95
NG
179}
180
924bd88d
JM
181static inline int is_partial_io(struct bio_vec *bvec)
182{
183 return bvec->bv_len != PAGE_SIZE;
184}
185
8c921b2b 186static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
924bd88d 187 u32 index, int offset, struct bio *bio)
306b0c95 188{
8c921b2b
JM
189 int ret;
190 size_t clen;
191 struct page *page;
924bd88d 192 unsigned char *user_mem, *cmem, *uncmem = NULL;
a1dd52af 193
8c921b2b 194 page = bvec->bv_page;
306b0c95 195
8c921b2b 196 if (zram_test_flag(zram, index, ZRAM_ZERO)) {
924bd88d 197 handle_zero_page(bvec);
8c921b2b
JM
198 return 0;
199 }
306b0c95 200
8c921b2b 201 /* Requested page is not present in compressed area */
fd1a30de 202 if (unlikely(!zram->table[index].handle)) {
8c921b2b
JM
203 pr_debug("Read before write: sector=%lu, size=%u",
204 (ulong)(bio->bi_sector), bio->bi_size);
924bd88d 205 handle_zero_page(bvec);
8c921b2b
JM
206 return 0;
207 }
306b0c95 208
924bd88d
JM
209 if (is_partial_io(bvec)) {
210 /* Use a temporary buffer to decompress the page */
211 uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
212 if (!uncmem) {
213 pr_info("Error allocating temp memory!\n");
214 return -ENOMEM;
215 }
216 }
217
ba82fe2e 218 user_mem = kmap_atomic(page);
924bd88d
JM
219 if (!is_partial_io(bvec))
220 uncmem = user_mem;
8c921b2b 221 clen = PAGE_SIZE;
306b0c95 222
fd1a30de 223 cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
306b0c95 224
130f315a 225 ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
924bd88d
JM
226 uncmem, &clen);
227
228 if (is_partial_io(bvec)) {
229 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
230 bvec->bv_len);
231 kfree(uncmem);
232 }
306b0c95 233
fd1a30de 234 zs_unmap_object(zram->mem_pool, zram->table[index].handle);
ba82fe2e 235 kunmap_atomic(user_mem);
a1dd52af 236
8c921b2b
JM
237 /* Should NEVER happen. Return bio error if it does. */
238 if (unlikely(ret != LZO_E_OK)) {
239 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
240 zram_stat64_inc(zram, &zram->stats.failed_reads);
241 return ret;
a1dd52af 242 }
306b0c95 243
8c921b2b 244 flush_dcache_page(page);
306b0c95 245
8c921b2b 246 return 0;
306b0c95
NG
247}
248
924bd88d
JM
249static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
250{
251 int ret;
252 size_t clen = PAGE_SIZE;
924bd88d 253 unsigned char *cmem;
374a6919 254 unsigned long handle = zram->table[index].handle;
924bd88d 255
374a6919 256 if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
924bd88d
JM
257 memset(mem, 0, PAGE_SIZE);
258 return 0;
259 }
260
374a6919 261 cmem = zs_map_object(zram->mem_pool, handle);
130f315a 262 ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
924bd88d 263 mem, &clen);
374a6919 264 zs_unmap_object(zram->mem_pool, handle);
924bd88d
JM
265
266 /* Should NEVER happen. Return bio error if it does. */
267 if (unlikely(ret != LZO_E_OK)) {
268 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
269 zram_stat64_inc(zram, &zram->stats.failed_reads);
270 return ret;
271 }
272
273 return 0;
274}
275
276static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
277 int offset)
306b0c95 278{
8c921b2b 279 int ret;
8c921b2b 280 size_t clen;
c2344348 281 unsigned long handle;
130f315a 282 struct page *page;
924bd88d 283 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
306b0c95 284
8c921b2b
JM
285 page = bvec->bv_page;
286 src = zram->compress_buffer;
306b0c95 287
924bd88d
JM
288 if (is_partial_io(bvec)) {
289 /*
290 * This is a partial IO. We need to read the full page
291 * before to write the changes.
292 */
293 uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
294 if (!uncmem) {
295 pr_info("Error allocating temp memory!\n");
296 ret = -ENOMEM;
297 goto out;
298 }
299 ret = zram_read_before_write(zram, uncmem, index);
300 if (ret) {
301 kfree(uncmem);
302 goto out;
303 }
304 }
305
8c921b2b
JM
306 /*
307 * System overwrites unused sectors. Free memory associated
308 * with this sector now.
309 */
fd1a30de 310 if (zram->table[index].handle ||
8c921b2b
JM
311 zram_test_flag(zram, index, ZRAM_ZERO))
312 zram_free_page(zram, index);
306b0c95 313
ba82fe2e 314 user_mem = kmap_atomic(page);
924bd88d
JM
315
316 if (is_partial_io(bvec))
317 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
318 bvec->bv_len);
319 else
320 uncmem = user_mem;
321
322 if (page_zero_filled(uncmem)) {
ba82fe2e 323 kunmap_atomic(user_mem);
924bd88d
JM
324 if (is_partial_io(bvec))
325 kfree(uncmem);
8c921b2b
JM
326 zram_stat_inc(&zram->stats.pages_zero);
327 zram_set_flag(zram, index, ZRAM_ZERO);
924bd88d
JM
328 ret = 0;
329 goto out;
8c921b2b 330 }
306b0c95 331
924bd88d 332 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
8c921b2b 333 zram->compress_workmem);
306b0c95 334
ba82fe2e 335 kunmap_atomic(user_mem);
924bd88d
JM
336 if (is_partial_io(bvec))
337 kfree(uncmem);
306b0c95 338
8c921b2b 339 if (unlikely(ret != LZO_E_OK)) {
8c921b2b 340 pr_err("Compression failed! err=%d\n", ret);
924bd88d 341 goto out;
8c921b2b 342 }
306b0c95 343
130f315a
MK
344 if (unlikely(clen > max_zpage_size))
345 zram_stat_inc(&zram->stats.bad_compress);
a1dd52af 346
130f315a 347 handle = zs_malloc(zram->mem_pool, clen);
fd1a30de 348 if (!handle) {
8c921b2b
JM
349 pr_info("Error allocating memory for compressed "
350 "page: %u, size=%zu\n", index, clen);
924bd88d
JM
351 ret = -ENOMEM;
352 goto out;
8c921b2b 353 }
fd1a30de 354 cmem = zs_map_object(zram->mem_pool, handle);
306b0c95 355
8c921b2b 356 memcpy(cmem, src, clen);
306b0c95 357
130f315a 358 zs_unmap_object(zram->mem_pool, handle);
fd1a30de
NG
359
360 zram->table[index].handle = handle;
361 zram->table[index].size = clen;
306b0c95 362
8c921b2b
JM
363 /* Update stats */
364 zram_stat64_add(zram, &zram->stats.compr_size, clen);
365 zram_stat_inc(&zram->stats.pages_stored);
366 if (clen <= PAGE_SIZE / 2)
367 zram_stat_inc(&zram->stats.good_compress);
306b0c95 368
8c921b2b 369 return 0;
924bd88d
JM
370
371out:
372 if (ret)
373 zram_stat64_inc(zram, &zram->stats.failed_writes);
374 return ret;
8c921b2b
JM
375}
376
377static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
924bd88d 378 int offset, struct bio *bio, int rw)
8c921b2b 379{
c5bde238 380 int ret;
8c921b2b 381
c5bde238
JM
382 if (rw == READ) {
383 down_read(&zram->lock);
384 ret = zram_bvec_read(zram, bvec, index, offset, bio);
385 up_read(&zram->lock);
386 } else {
387 down_write(&zram->lock);
388 ret = zram_bvec_write(zram, bvec, index, offset);
389 up_write(&zram->lock);
390 }
391
392 return ret;
924bd88d
JM
393}
394
395static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
396{
397 if (*offset + bvec->bv_len >= PAGE_SIZE)
398 (*index)++;
399 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
8c921b2b
JM
400}
401
402static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
403{
924bd88d 404 int i, offset;
8c921b2b
JM
405 u32 index;
406 struct bio_vec *bvec;
407
408 switch (rw) {
409 case READ:
410 zram_stat64_inc(zram, &zram->stats.num_reads);
411 break;
412 case WRITE:
413 zram_stat64_inc(zram, &zram->stats.num_writes);
414 break;
415 }
416
417 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
924bd88d 418 offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
8c921b2b
JM
419
420 bio_for_each_segment(bvec, bio, i) {
924bd88d
JM
421 int max_transfer_size = PAGE_SIZE - offset;
422
423 if (bvec->bv_len > max_transfer_size) {
424 /*
425 * zram_bvec_rw() can only make operation on a single
426 * zram page. Split the bio vector.
427 */
428 struct bio_vec bv;
429
430 bv.bv_page = bvec->bv_page;
431 bv.bv_len = max_transfer_size;
432 bv.bv_offset = bvec->bv_offset;
433
434 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
435 goto out;
436
437 bv.bv_len = bvec->bv_len - max_transfer_size;
438 bv.bv_offset += max_transfer_size;
439 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
440 goto out;
441 } else
442 if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
443 < 0)
444 goto out;
445
446 update_position(&index, &offset, bvec);
a1dd52af 447 }
306b0c95
NG
448
449 set_bit(BIO_UPTODATE, &bio->bi_flags);
450 bio_endio(bio, 0);
7d7854b4 451 return;
306b0c95
NG
452
453out:
306b0c95 454 bio_io_error(bio);
306b0c95
NG
455}
456
306b0c95 457/*
924bd88d 458 * Check if request is within bounds and aligned on zram logical blocks.
306b0c95 459 */
f1e3cfff 460static inline int valid_io_request(struct zram *zram, struct bio *bio)
306b0c95
NG
461{
462 if (unlikely(
f1e3cfff 463 (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
924bd88d
JM
464 (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
465 (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
306b0c95
NG
466
467 return 0;
468 }
469
a1dd52af 470 /* I/O request is valid */
306b0c95
NG
471 return 1;
472}
473
474/*
f1e3cfff 475 * Handler function for all zram I/O requests.
306b0c95 476 */
5a7bbad2 477static void zram_make_request(struct request_queue *queue, struct bio *bio)
306b0c95 478{
f1e3cfff 479 struct zram *zram = queue->queuedata;
306b0c95 480
0900beae
JM
481 if (unlikely(!zram->init_done) && zram_init_device(zram))
482 goto error;
483
484 down_read(&zram->init_lock);
485 if (unlikely(!zram->init_done))
486 goto error_unlock;
487
f1e3cfff
NG
488 if (!valid_io_request(zram, bio)) {
489 zram_stat64_inc(zram, &zram->stats.invalid_io);
0900beae 490 goto error_unlock;
6642a67c
JM
491 }
492
8c921b2b 493 __zram_make_request(zram, bio, bio_data_dir(bio));
0900beae 494 up_read(&zram->init_lock);
306b0c95 495
b4fdcb02 496 return;
0900beae
JM
497
498error_unlock:
499 up_read(&zram->init_lock);
500error:
501 bio_io_error(bio);
306b0c95
NG
502}
503
0900beae 504void __zram_reset_device(struct zram *zram)
306b0c95 505{
97a06382 506 size_t index;
306b0c95 507
f1e3cfff 508 zram->init_done = 0;
7eef7533 509
306b0c95 510 /* Free various per-device buffers */
f1e3cfff
NG
511 kfree(zram->compress_workmem);
512 free_pages((unsigned long)zram->compress_buffer, 1);
306b0c95 513
f1e3cfff
NG
514 zram->compress_workmem = NULL;
515 zram->compress_buffer = NULL;
306b0c95 516
f1e3cfff
NG
517 /* Free all pages that are still in this zram device */
518 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
c2344348 519 unsigned long handle = zram->table[index].handle;
fd1a30de 520 if (!handle)
306b0c95
NG
521 continue;
522
130f315a 523 zs_free(zram->mem_pool, handle);
306b0c95
NG
524 }
525
f1e3cfff
NG
526 vfree(zram->table);
527 zram->table = NULL;
306b0c95 528
fd1a30de 529 zs_destroy_pool(zram->mem_pool);
f1e3cfff 530 zram->mem_pool = NULL;
306b0c95 531
306b0c95 532 /* Reset stats */
f1e3cfff 533 memset(&zram->stats, 0, sizeof(zram->stats));
306b0c95 534
f1e3cfff 535 zram->disksize = 0;
0900beae
JM
536}
537
538void zram_reset_device(struct zram *zram)
539{
540 down_write(&zram->init_lock);
541 __zram_reset_device(zram);
542 up_write(&zram->init_lock);
306b0c95
NG
543}
544
33863c21 545int zram_init_device(struct zram *zram)
306b0c95
NG
546{
547 int ret;
548 size_t num_pages;
306b0c95 549
0900beae 550 down_write(&zram->init_lock);
484875ad 551
f1e3cfff 552 if (zram->init_done) {
0900beae 553 up_write(&zram->init_lock);
484875ad 554 return 0;
306b0c95
NG
555 }
556
f1e3cfff 557 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
306b0c95 558
f1e3cfff
NG
559 zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
560 if (!zram->compress_workmem) {
306b0c95
NG
561 pr_err("Error allocating compressor working memory!\n");
562 ret = -ENOMEM;
5a18c531 563 goto fail_no_table;
306b0c95
NG
564 }
565
fb927284
JM
566 zram->compress_buffer =
567 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
f1e3cfff 568 if (!zram->compress_buffer) {
306b0c95
NG
569 pr_err("Error allocating compressor buffer space\n");
570 ret = -ENOMEM;
5a18c531 571 goto fail_no_table;
306b0c95
NG
572 }
573
f1e3cfff 574 num_pages = zram->disksize >> PAGE_SHIFT;
5b84cc78 575 zram->table = vzalloc(num_pages * sizeof(*zram->table));
f1e3cfff
NG
576 if (!zram->table) {
577 pr_err("Error allocating zram address table\n");
306b0c95 578 ret = -ENOMEM;
5a18c531 579 goto fail_no_table;
306b0c95 580 }
306b0c95 581
f1e3cfff 582 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
306b0c95 583
f1e3cfff
NG
584 /* zram devices sort of resembles non-rotational disks */
585 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
306b0c95 586
fd1a30de 587 zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
f1e3cfff 588 if (!zram->mem_pool) {
306b0c95
NG
589 pr_err("Error creating memory pool\n");
590 ret = -ENOMEM;
591 goto fail;
592 }
593
f1e3cfff 594 zram->init_done = 1;
0900beae 595 up_write(&zram->init_lock);
306b0c95
NG
596
597 pr_debug("Initialization done!\n");
598 return 0;
599
5a18c531
JM
600fail_no_table:
601 /* To prevent accessing table entries during cleanup */
602 zram->disksize = 0;
306b0c95 603fail:
0900beae
JM
604 __zram_reset_device(zram);
605 up_write(&zram->init_lock);
306b0c95
NG
606 pr_err("Initialization failed: err=%d\n", ret);
607 return ret;
608}
609
2ccbec05
NG
610static void zram_slot_free_notify(struct block_device *bdev,
611 unsigned long index)
107c161b 612{
f1e3cfff 613 struct zram *zram;
107c161b 614
f1e3cfff
NG
615 zram = bdev->bd_disk->private_data;
616 zram_free_page(zram, index);
617 zram_stat64_inc(zram, &zram->stats.notify_free);
107c161b
NG
618}
619
f1e3cfff 620static const struct block_device_operations zram_devops = {
f1e3cfff 621 .swap_slot_free_notify = zram_slot_free_notify,
107c161b 622 .owner = THIS_MODULE
306b0c95
NG
623};
624
f1e3cfff 625static int create_device(struct zram *zram, int device_id)
306b0c95 626{
de1a21a0
NG
627 int ret = 0;
628
c5bde238 629 init_rwsem(&zram->lock);
0900beae 630 init_rwsem(&zram->init_lock);
f1e3cfff 631 spin_lock_init(&zram->stat64_lock);
306b0c95 632
f1e3cfff
NG
633 zram->queue = blk_alloc_queue(GFP_KERNEL);
634 if (!zram->queue) {
306b0c95
NG
635 pr_err("Error allocating disk queue for device %d\n",
636 device_id);
de1a21a0
NG
637 ret = -ENOMEM;
638 goto out;
306b0c95
NG
639 }
640
f1e3cfff
NG
641 blk_queue_make_request(zram->queue, zram_make_request);
642 zram->queue->queuedata = zram;
306b0c95
NG
643
644 /* gendisk structure */
f1e3cfff
NG
645 zram->disk = alloc_disk(1);
646 if (!zram->disk) {
647 blk_cleanup_queue(zram->queue);
306b0c95
NG
648 pr_warning("Error allocating disk structure for device %d\n",
649 device_id);
de1a21a0
NG
650 ret = -ENOMEM;
651 goto out;
306b0c95
NG
652 }
653
f1e3cfff
NG
654 zram->disk->major = zram_major;
655 zram->disk->first_minor = device_id;
656 zram->disk->fops = &zram_devops;
657 zram->disk->queue = zram->queue;
658 zram->disk->private_data = zram;
659 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
306b0c95 660
33863c21 661 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
f1e3cfff 662 set_capacity(zram->disk, 0);
5d83d5a0 663
a1dd52af
NG
664 /*
665 * To ensure that we always get PAGE_SIZE aligned
666 * and n*PAGE_SIZED sized I/O requests.
667 */
f1e3cfff 668 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
7b19b8d4
RJ
669 blk_queue_logical_block_size(zram->disk->queue,
670 ZRAM_LOGICAL_BLOCK_SIZE);
f1e3cfff
NG
671 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
672 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
5d83d5a0 673
f1e3cfff 674 add_disk(zram->disk);
306b0c95 675
33863c21
NG
676 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
677 &zram_disk_attr_group);
678 if (ret < 0) {
679 pr_warning("Error creating sysfs group");
680 goto out;
681 }
33863c21 682
f1e3cfff 683 zram->init_done = 0;
de1a21a0
NG
684
685out:
686 return ret;
306b0c95
NG
687}
688
f1e3cfff 689static void destroy_device(struct zram *zram)
306b0c95 690{
33863c21
NG
691 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
692 &zram_disk_attr_group);
33863c21 693
f1e3cfff
NG
694 if (zram->disk) {
695 del_gendisk(zram->disk);
696 put_disk(zram->disk);
306b0c95
NG
697 }
698
f1e3cfff
NG
699 if (zram->queue)
700 blk_cleanup_queue(zram->queue);
306b0c95
NG
701}
702
5fa5a901
NG
703unsigned int zram_get_num_devices(void)
704{
705 return num_devices;
706}
707
f1e3cfff 708static int __init zram_init(void)
306b0c95 709{
de1a21a0 710 int ret, dev_id;
306b0c95 711
5fa5a901 712 if (num_devices > max_num_devices) {
306b0c95 713 pr_warning("Invalid value for num_devices: %u\n",
5fa5a901 714 num_devices);
de1a21a0
NG
715 ret = -EINVAL;
716 goto out;
306b0c95
NG
717 }
718
f1e3cfff
NG
719 zram_major = register_blkdev(0, "zram");
720 if (zram_major <= 0) {
306b0c95 721 pr_warning("Unable to get major number\n");
de1a21a0
NG
722 ret = -EBUSY;
723 goto out;
306b0c95
NG
724 }
725
5fa5a901 726 if (!num_devices) {
306b0c95 727 pr_info("num_devices not specified. Using default: 1\n");
5fa5a901 728 num_devices = 1;
306b0c95
NG
729 }
730
731 /* Allocate the device array and initialize each one */
5fa5a901
NG
732 pr_info("Creating %u devices ...\n", num_devices);
733 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
43801f6e 734 if (!zram_devices) {
de1a21a0
NG
735 ret = -ENOMEM;
736 goto unregister;
737 }
306b0c95 738
5fa5a901 739 for (dev_id = 0; dev_id < num_devices; dev_id++) {
43801f6e 740 ret = create_device(&zram_devices[dev_id], dev_id);
de1a21a0 741 if (ret)
3bf040c7 742 goto free_devices;
de1a21a0
NG
743 }
744
306b0c95 745 return 0;
de1a21a0 746
3bf040c7 747free_devices:
de1a21a0 748 while (dev_id)
43801f6e
NW
749 destroy_device(&zram_devices[--dev_id]);
750 kfree(zram_devices);
de1a21a0 751unregister:
f1e3cfff 752 unregister_blkdev(zram_major, "zram");
de1a21a0 753out:
306b0c95
NG
754 return ret;
755}
756
f1e3cfff 757static void __exit zram_exit(void)
306b0c95
NG
758{
759 int i;
f1e3cfff 760 struct zram *zram;
306b0c95 761
5fa5a901 762 for (i = 0; i < num_devices; i++) {
43801f6e 763 zram = &zram_devices[i];
306b0c95 764
f1e3cfff
NG
765 destroy_device(zram);
766 if (zram->init_done)
33863c21 767 zram_reset_device(zram);
306b0c95
NG
768 }
769
f1e3cfff 770 unregister_blkdev(zram_major, "zram");
306b0c95 771
43801f6e 772 kfree(zram_devices);
306b0c95
NG
773 pr_debug("Cleanup done!\n");
774}
775
5fa5a901
NG
776module_param(num_devices, uint, 0);
777MODULE_PARM_DESC(num_devices, "Number of zram devices");
306b0c95 778
f1e3cfff
NG
779module_init(zram_init);
780module_exit(zram_exit);
306b0c95
NG
781
782MODULE_LICENSE("Dual BSD/GPL");
783MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
f1e3cfff 784MODULE_DESCRIPTION("Compressed RAM Block Device");
This page took 0.318742 seconds and 5 git commands to generate.