zram: do not pass rw argument to __zram_make_request()
[deliverable/linux.git] / drivers / block / zram / zram_drv.c
CommitLineData
306b0c95 1/*
f1e3cfff 2 * Compressed RAM block device
306b0c95 3 *
1130ebba 4 * Copyright (C) 2008, 2009, 2010 Nitin Gupta
7bfb3de8 5 * 2012, 2013 Minchan Kim
306b0c95
NG
6 *
7 * This code is released using a dual license strategy: BSD/GPL
8 * You can choose the licence that better fits your requirements.
9 *
10 * Released under the terms of 3-clause BSD License
11 * Released under the terms of GNU General Public License Version 2.0
12 *
306b0c95
NG
13 */
14
f1e3cfff 15#define KMSG_COMPONENT "zram"
306b0c95
NG
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
b1f5b81e
RJ
18#ifdef CONFIG_ZRAM_DEBUG
19#define DEBUG
20#endif
21
306b0c95
NG
22#include <linux/module.h>
23#include <linux/kernel.h>
8946a086 24#include <linux/bio.h>
306b0c95
NG
25#include <linux/bitops.h>
26#include <linux/blkdev.h>
27#include <linux/buffer_head.h>
28#include <linux/device.h>
29#include <linux/genhd.h>
30#include <linux/highmem.h>
5a0e3ad6 31#include <linux/slab.h>
306b0c95 32#include <linux/lzo.h>
306b0c95 33#include <linux/string.h>
306b0c95 34#include <linux/vmalloc.h>
306b0c95 35
16a4bfb9 36#include "zram_drv.h"
306b0c95
NG
37
38/* Globals */
f1e3cfff 39static int zram_major;
0f0e3ba3 40static struct zram *zram_devices;
306b0c95 41
306b0c95 42/* Module params (documentation at end) */
ca3d70bd 43static unsigned int num_devices = 1;
33863c21 44
be2d1d56
SS
45static inline int init_done(struct zram *zram)
46{
47 return zram->meta != NULL;
48}
49
9b3bb7ab
SS
50static inline struct zram *dev_to_zram(struct device *dev)
51{
52 return (struct zram *)dev_to_disk(dev)->private_data;
53}
54
55static ssize_t disksize_show(struct device *dev,
56 struct device_attribute *attr, char *buf)
57{
58 struct zram *zram = dev_to_zram(dev);
59
60 return sprintf(buf, "%llu\n", zram->disksize);
61}
62
63static ssize_t initstate_show(struct device *dev,
64 struct device_attribute *attr, char *buf)
65{
66 struct zram *zram = dev_to_zram(dev);
67
be2d1d56 68 return sprintf(buf, "%u\n", init_done(zram));
9b3bb7ab
SS
69}
70
71static ssize_t num_reads_show(struct device *dev,
72 struct device_attribute *attr, char *buf)
73{
74 struct zram *zram = dev_to_zram(dev);
75
76 return sprintf(buf, "%llu\n",
77 (u64)atomic64_read(&zram->stats.num_reads));
78}
79
80static ssize_t num_writes_show(struct device *dev,
81 struct device_attribute *attr, char *buf)
82{
83 struct zram *zram = dev_to_zram(dev);
84
85 return sprintf(buf, "%llu\n",
86 (u64)atomic64_read(&zram->stats.num_writes));
87}
88
89static ssize_t invalid_io_show(struct device *dev,
90 struct device_attribute *attr, char *buf)
91{
92 struct zram *zram = dev_to_zram(dev);
93
94 return sprintf(buf, "%llu\n",
95 (u64)atomic64_read(&zram->stats.invalid_io));
96}
97
98static ssize_t notify_free_show(struct device *dev,
99 struct device_attribute *attr, char *buf)
100{
101 struct zram *zram = dev_to_zram(dev);
102
103 return sprintf(buf, "%llu\n",
104 (u64)atomic64_read(&zram->stats.notify_free));
105}
106
107static ssize_t zero_pages_show(struct device *dev,
108 struct device_attribute *attr, char *buf)
109{
110 struct zram *zram = dev_to_zram(dev);
111
deb0bdeb 112 return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
9b3bb7ab
SS
113}
114
115static ssize_t orig_data_size_show(struct device *dev,
116 struct device_attribute *attr, char *buf)
117{
118 struct zram *zram = dev_to_zram(dev);
119
120 return sprintf(buf, "%llu\n",
deb0bdeb 121 (u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
9b3bb7ab
SS
122}
123
124static ssize_t compr_data_size_show(struct device *dev,
125 struct device_attribute *attr, char *buf)
126{
127 struct zram *zram = dev_to_zram(dev);
128
129 return sprintf(buf, "%llu\n",
130 (u64)atomic64_read(&zram->stats.compr_size));
131}
132
133static ssize_t mem_used_total_show(struct device *dev,
134 struct device_attribute *attr, char *buf)
135{
136 u64 val = 0;
137 struct zram *zram = dev_to_zram(dev);
138 struct zram_meta *meta = zram->meta;
139
140 down_read(&zram->init_lock);
be2d1d56 141 if (init_done(zram))
9b3bb7ab
SS
142 val = zs_get_total_size_bytes(meta->mem_pool);
143 up_read(&zram->init_lock);
144
145 return sprintf(buf, "%llu\n", val);
146}
147
92967471 148/* flag operations needs meta->tb_lock */
8b3cc3ed 149static int zram_test_flag(struct zram_meta *meta, u32 index,
f1e3cfff 150 enum zram_pageflags flag)
306b0c95 151{
8b3cc3ed 152 return meta->table[index].flags & BIT(flag);
306b0c95
NG
153}
154
8b3cc3ed 155static void zram_set_flag(struct zram_meta *meta, u32 index,
f1e3cfff 156 enum zram_pageflags flag)
306b0c95 157{
8b3cc3ed 158 meta->table[index].flags |= BIT(flag);
306b0c95
NG
159}
160
8b3cc3ed 161static void zram_clear_flag(struct zram_meta *meta, u32 index,
f1e3cfff 162 enum zram_pageflags flag)
306b0c95 163{
8b3cc3ed 164 meta->table[index].flags &= ~BIT(flag);
306b0c95
NG
165}
166
9b3bb7ab
SS
167static inline int is_partial_io(struct bio_vec *bvec)
168{
169 return bvec->bv_len != PAGE_SIZE;
170}
171
172/*
173 * Check if request is within bounds and aligned on zram logical blocks.
174 */
175static inline int valid_io_request(struct zram *zram, struct bio *bio)
176{
177 u64 start, end, bound;
a539c72a 178
9b3bb7ab 179 /* unaligned request */
4f024f37
KO
180 if (unlikely(bio->bi_iter.bi_sector &
181 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
9b3bb7ab 182 return 0;
4f024f37 183 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
9b3bb7ab
SS
184 return 0;
185
4f024f37
KO
186 start = bio->bi_iter.bi_sector;
187 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
9b3bb7ab
SS
188 bound = zram->disksize >> SECTOR_SHIFT;
189 /* out of range range */
75c7caf5 190 if (unlikely(start >= bound || end > bound || start > end))
9b3bb7ab
SS
191 return 0;
192
193 /* I/O request is valid */
194 return 1;
195}
196
197static void zram_meta_free(struct zram_meta *meta)
198{
199 zs_destroy_pool(meta->mem_pool);
200 kfree(meta->compress_workmem);
201 free_pages((unsigned long)meta->compress_buffer, 1);
202 vfree(meta->table);
203 kfree(meta);
204}
205
206static struct zram_meta *zram_meta_alloc(u64 disksize)
207{
208 size_t num_pages;
209 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
210 if (!meta)
211 goto out;
212
213 meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
214 if (!meta->compress_workmem)
215 goto free_meta;
216
217 meta->compress_buffer =
218 (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
219 if (!meta->compress_buffer) {
220 pr_err("Error allocating compressor buffer space\n");
221 goto free_workmem;
222 }
223
224 num_pages = disksize >> PAGE_SHIFT;
225 meta->table = vzalloc(num_pages * sizeof(*meta->table));
226 if (!meta->table) {
227 pr_err("Error allocating zram address table\n");
228 goto free_buffer;
229 }
230
231 meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
232 if (!meta->mem_pool) {
233 pr_err("Error creating memory pool\n");
234 goto free_table;
235 }
236
92967471 237 rwlock_init(&meta->tb_lock);
e46e3315 238 mutex_init(&meta->buffer_lock);
9b3bb7ab
SS
239 return meta;
240
241free_table:
242 vfree(meta->table);
243free_buffer:
244 free_pages((unsigned long)meta->compress_buffer, 1);
245free_workmem:
246 kfree(meta->compress_workmem);
247free_meta:
248 kfree(meta);
249 meta = NULL;
250out:
251 return meta;
252}
253
254static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
255{
256 if (*offset + bvec->bv_len >= PAGE_SIZE)
257 (*index)++;
258 *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
259}
260
306b0c95
NG
261static int page_zero_filled(void *ptr)
262{
263 unsigned int pos;
264 unsigned long *page;
265
266 page = (unsigned long *)ptr;
267
268 for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
269 if (page[pos])
270 return 0;
271 }
272
273 return 1;
274}
275
9b3bb7ab
SS
276static void handle_zero_page(struct bio_vec *bvec)
277{
278 struct page *page = bvec->bv_page;
279 void *user_mem;
280
281 user_mem = kmap_atomic(page);
282 if (is_partial_io(bvec))
283 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
284 else
285 clear_page(user_mem);
286 kunmap_atomic(user_mem);
287
288 flush_dcache_page(page);
289}
290
92967471 291/* NOTE: caller should hold meta->tb_lock with write-side */
f1e3cfff 292static void zram_free_page(struct zram *zram, size_t index)
306b0c95 293{
8b3cc3ed
MK
294 struct zram_meta *meta = zram->meta;
295 unsigned long handle = meta->table[index].handle;
296 u16 size = meta->table[index].size;
306b0c95 297
fd1a30de 298 if (unlikely(!handle)) {
2e882281
NG
299 /*
300 * No memory is allocated for zero filled pages.
301 * Simply clear zero page flag.
302 */
8b3cc3ed
MK
303 if (zram_test_flag(meta, index, ZRAM_ZERO)) {
304 zram_clear_flag(meta, index, ZRAM_ZERO);
deb0bdeb 305 atomic_dec(&zram->stats.pages_zero);
306b0c95
NG
306 }
307 return;
308 }
309
130f315a 310 if (unlikely(size > max_zpage_size))
deb0bdeb 311 atomic_dec(&zram->stats.bad_compress);
306b0c95 312
8b3cc3ed 313 zs_free(meta->mem_pool, handle);
306b0c95 314
130f315a 315 if (size <= PAGE_SIZE / 2)
deb0bdeb 316 atomic_dec(&zram->stats.good_compress);
306b0c95 317
da5cc7d3 318 atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
deb0bdeb 319 atomic_dec(&zram->stats.pages_stored);
306b0c95 320
8b3cc3ed
MK
321 meta->table[index].handle = 0;
322 meta->table[index].size = 0;
306b0c95
NG
323}
324
37b51fdd 325static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
306b0c95 326{
37b51fdd
SS
327 int ret = LZO_E_OK;
328 size_t clen = PAGE_SIZE;
329 unsigned char *cmem;
8b3cc3ed 330 struct zram_meta *meta = zram->meta;
92967471
MK
331 unsigned long handle;
332 u16 size;
333
334 read_lock(&meta->tb_lock);
335 handle = meta->table[index].handle;
336 size = meta->table[index].size;
306b0c95 337
8b3cc3ed 338 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
92967471 339 read_unlock(&meta->tb_lock);
42e99bd9 340 clear_page(mem);
8c921b2b
JM
341 return 0;
342 }
306b0c95 343
8b3cc3ed 344 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
92967471 345 if (size == PAGE_SIZE)
42e99bd9 346 copy_page(mem, cmem);
37b51fdd 347 else
92967471 348 ret = lzo1x_decompress_safe(cmem, size, mem, &clen);
8b3cc3ed 349 zs_unmap_object(meta->mem_pool, handle);
92967471 350 read_unlock(&meta->tb_lock);
a1dd52af 351
8c921b2b
JM
352 /* Should NEVER happen. Return bio error if it does. */
353 if (unlikely(ret != LZO_E_OK)) {
354 pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
da5cc7d3 355 atomic64_inc(&zram->stats.failed_reads);
8c921b2b 356 return ret;
a1dd52af 357 }
306b0c95 358
8c921b2b 359 return 0;
306b0c95
NG
360}
361
37b51fdd
SS
362static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
363 u32 index, int offset, struct bio *bio)
924bd88d
JM
364{
365 int ret;
37b51fdd
SS
366 struct page *page;
367 unsigned char *user_mem, *uncmem = NULL;
8b3cc3ed 368 struct zram_meta *meta = zram->meta;
37b51fdd
SS
369 page = bvec->bv_page;
370
92967471 371 read_lock(&meta->tb_lock);
8b3cc3ed
MK
372 if (unlikely(!meta->table[index].handle) ||
373 zram_test_flag(meta, index, ZRAM_ZERO)) {
92967471 374 read_unlock(&meta->tb_lock);
37b51fdd 375 handle_zero_page(bvec);
924bd88d
JM
376 return 0;
377 }
92967471 378 read_unlock(&meta->tb_lock);
924bd88d 379
37b51fdd
SS
380 if (is_partial_io(bvec))
381 /* Use a temporary buffer to decompress the page */
7e5a5104
MK
382 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
383
384 user_mem = kmap_atomic(page);
385 if (!is_partial_io(bvec))
37b51fdd
SS
386 uncmem = user_mem;
387
388 if (!uncmem) {
389 pr_info("Unable to allocate temp memory\n");
390 ret = -ENOMEM;
391 goto out_cleanup;
392 }
924bd88d 393
37b51fdd 394 ret = zram_decompress_page(zram, uncmem, index);
924bd88d 395 /* Should NEVER happen. Return bio error if it does. */
25eeb667 396 if (unlikely(ret != LZO_E_OK))
37b51fdd 397 goto out_cleanup;
924bd88d 398
37b51fdd
SS
399 if (is_partial_io(bvec))
400 memcpy(user_mem + bvec->bv_offset, uncmem + offset,
401 bvec->bv_len);
402
403 flush_dcache_page(page);
404 ret = 0;
405out_cleanup:
406 kunmap_atomic(user_mem);
407 if (is_partial_io(bvec))
408 kfree(uncmem);
409 return ret;
924bd88d
JM
410}
411
412static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
413 int offset)
306b0c95 414{
397c6066 415 int ret = 0;
8c921b2b 416 size_t clen;
c2344348 417 unsigned long handle;
130f315a 418 struct page *page;
924bd88d 419 unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
8b3cc3ed 420 struct zram_meta *meta = zram->meta;
e46e3315 421 bool locked = false;
306b0c95 422
8c921b2b 423 page = bvec->bv_page;
8b3cc3ed 424 src = meta->compress_buffer;
306b0c95 425
924bd88d
JM
426 if (is_partial_io(bvec)) {
427 /*
428 * This is a partial IO. We need to read the full page
429 * before to write the changes.
430 */
7e5a5104 431 uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
924bd88d 432 if (!uncmem) {
924bd88d
JM
433 ret = -ENOMEM;
434 goto out;
435 }
37b51fdd 436 ret = zram_decompress_page(zram, uncmem, index);
397c6066 437 if (ret)
924bd88d 438 goto out;
924bd88d
JM
439 }
440
e46e3315
MK
441 mutex_lock(&meta->buffer_lock);
442 locked = true;
ba82fe2e 443 user_mem = kmap_atomic(page);
924bd88d 444
397c6066 445 if (is_partial_io(bvec)) {
924bd88d
JM
446 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
447 bvec->bv_len);
397c6066
NG
448 kunmap_atomic(user_mem);
449 user_mem = NULL;
450 } else {
924bd88d 451 uncmem = user_mem;
397c6066 452 }
924bd88d
JM
453
454 if (page_zero_filled(uncmem)) {
ba82fe2e 455 kunmap_atomic(user_mem);
f40ac2ae 456 /* Free memory associated with this sector now. */
92967471 457 write_lock(&zram->meta->tb_lock);
f40ac2ae 458 zram_free_page(zram, index);
92967471
MK
459 zram_set_flag(meta, index, ZRAM_ZERO);
460 write_unlock(&zram->meta->tb_lock);
f40ac2ae 461
deb0bdeb 462 atomic_inc(&zram->stats.pages_zero);
924bd88d
JM
463 ret = 0;
464 goto out;
8c921b2b 465 }
306b0c95 466
924bd88d 467 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
8b3cc3ed 468 meta->compress_workmem);
397c6066
NG
469 if (!is_partial_io(bvec)) {
470 kunmap_atomic(user_mem);
471 user_mem = NULL;
472 uncmem = NULL;
473 }
306b0c95 474
8c921b2b 475 if (unlikely(ret != LZO_E_OK)) {
8c921b2b 476 pr_err("Compression failed! err=%d\n", ret);
924bd88d 477 goto out;
8c921b2b 478 }
306b0c95 479
c8f2f0db 480 if (unlikely(clen > max_zpage_size)) {
deb0bdeb 481 atomic_inc(&zram->stats.bad_compress);
c8f2f0db 482 clen = PAGE_SIZE;
397c6066
NG
483 src = NULL;
484 if (is_partial_io(bvec))
485 src = uncmem;
c8f2f0db 486 }
a1dd52af 487
8b3cc3ed 488 handle = zs_malloc(meta->mem_pool, clen);
fd1a30de 489 if (!handle) {
596b3dd4
MR
490 pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
491 index, clen);
924bd88d
JM
492 ret = -ENOMEM;
493 goto out;
8c921b2b 494 }
8b3cc3ed 495 cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
306b0c95 496
42e99bd9 497 if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
397c6066 498 src = kmap_atomic(page);
42e99bd9 499 copy_page(cmem, src);
397c6066 500 kunmap_atomic(src);
42e99bd9
JL
501 } else {
502 memcpy(cmem, src, clen);
503 }
306b0c95 504
8b3cc3ed 505 zs_unmap_object(meta->mem_pool, handle);
fd1a30de 506
f40ac2ae
SS
507 /*
508 * Free memory associated with this sector
509 * before overwriting unused sectors.
510 */
92967471 511 write_lock(&zram->meta->tb_lock);
f40ac2ae
SS
512 zram_free_page(zram, index);
513
8b3cc3ed
MK
514 meta->table[index].handle = handle;
515 meta->table[index].size = clen;
92967471 516 write_unlock(&zram->meta->tb_lock);
306b0c95 517
8c921b2b 518 /* Update stats */
da5cc7d3 519 atomic64_add(clen, &zram->stats.compr_size);
deb0bdeb 520 atomic_inc(&zram->stats.pages_stored);
8c921b2b 521 if (clen <= PAGE_SIZE / 2)
deb0bdeb 522 atomic_inc(&zram->stats.good_compress);
306b0c95 523
924bd88d 524out:
e46e3315
MK
525 if (locked)
526 mutex_unlock(&meta->buffer_lock);
397c6066
NG
527 if (is_partial_io(bvec))
528 kfree(uncmem);
529
924bd88d 530 if (ret)
da5cc7d3 531 atomic64_inc(&zram->stats.failed_writes);
924bd88d 532 return ret;
8c921b2b
JM
533}
534
535static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
be257c61 536 int offset, struct bio *bio)
8c921b2b 537{
c5bde238 538 int ret;
be257c61 539 int rw = bio_data_dir(bio);
8c921b2b 540
be257c61
SS
541 if (rw == READ) {
542 atomic64_inc(&zram->stats.num_reads);
c5bde238 543 ret = zram_bvec_read(zram, bvec, index, offset, bio);
be257c61
SS
544 } else {
545 atomic64_inc(&zram->stats.num_writes);
c5bde238 546 ret = zram_bvec_write(zram, bvec, index, offset);
be257c61 547 }
c5bde238
JM
548
549 return ret;
924bd88d
JM
550}
551
2b86ab9c 552static void zram_reset_device(struct zram *zram, bool reset_capacity)
924bd88d 553{
9b3bb7ab
SS
554 size_t index;
555 struct zram_meta *meta;
556
644d4787 557 down_write(&zram->init_lock);
be2d1d56 558 if (!init_done(zram)) {
644d4787 559 up_write(&zram->init_lock);
9b3bb7ab 560 return;
644d4787 561 }
9b3bb7ab
SS
562
563 meta = zram->meta;
9b3bb7ab
SS
564 /* Free all pages that are still in this zram device */
565 for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
566 unsigned long handle = meta->table[index].handle;
567 if (!handle)
568 continue;
569
570 zs_free(meta->mem_pool, handle);
571 }
572
573 zram_meta_free(zram->meta);
574 zram->meta = NULL;
575 /* Reset stats */
576 memset(&zram->stats, 0, sizeof(zram->stats));
577
578 zram->disksize = 0;
2b86ab9c
MK
579 if (reset_capacity)
580 set_capacity(zram->disk, 0);
644d4787 581 up_write(&zram->init_lock);
9b3bb7ab
SS
582}
583
584static void zram_init_device(struct zram *zram, struct zram_meta *meta)
585{
586 if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
587 pr_info(
588 "There is little point creating a zram of greater than "
589 "twice the size of memory since we expect a 2:1 compression "
590 "ratio. Note that zram uses about 0.1%% of the size of "
591 "the disk when not in use so a huge zram is "
592 "wasteful.\n"
593 "\tMemory Size: %lu kB\n"
594 "\tSize you selected: %llu kB\n"
595 "Continuing anyway ...\n",
596 (totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
597 );
598 }
599
600 /* zram devices sort of resembles non-rotational disks */
601 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
602
603 zram->meta = meta;
9b3bb7ab
SS
604 pr_debug("Initialization done!\n");
605}
606
607static ssize_t disksize_store(struct device *dev,
608 struct device_attribute *attr, const char *buf, size_t len)
609{
610 u64 disksize;
611 struct zram_meta *meta;
612 struct zram *zram = dev_to_zram(dev);
613
614 disksize = memparse(buf, NULL);
615 if (!disksize)
616 return -EINVAL;
617
618 disksize = PAGE_ALIGN(disksize);
619 meta = zram_meta_alloc(disksize);
db5d711e
MK
620 if (!meta)
621 return -ENOMEM;
9b3bb7ab 622 down_write(&zram->init_lock);
be2d1d56 623 if (init_done(zram)) {
9b3bb7ab
SS
624 up_write(&zram->init_lock);
625 zram_meta_free(meta);
626 pr_info("Cannot change disksize for initialized device\n");
627 return -EBUSY;
628 }
629
630 zram->disksize = disksize;
631 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
632 zram_init_device(zram, meta);
633 up_write(&zram->init_lock);
634
635 return len;
636}
637
638static ssize_t reset_store(struct device *dev,
639 struct device_attribute *attr, const char *buf, size_t len)
640{
641 int ret;
642 unsigned short do_reset;
643 struct zram *zram;
644 struct block_device *bdev;
645
646 zram = dev_to_zram(dev);
647 bdev = bdget_disk(zram->disk, 0);
648
46a51c80
RK
649 if (!bdev)
650 return -ENOMEM;
651
9b3bb7ab 652 /* Do not reset an active device! */
1b672224
RK
653 if (bdev->bd_holders) {
654 ret = -EBUSY;
655 goto out;
656 }
9b3bb7ab
SS
657
658 ret = kstrtou16(buf, 10, &do_reset);
659 if (ret)
1b672224 660 goto out;
9b3bb7ab 661
1b672224
RK
662 if (!do_reset) {
663 ret = -EINVAL;
664 goto out;
665 }
9b3bb7ab
SS
666
667 /* Make sure all pending I/O is finished */
46a51c80 668 fsync_bdev(bdev);
1b672224 669 bdput(bdev);
9b3bb7ab 670
2b86ab9c 671 zram_reset_device(zram, true);
9b3bb7ab 672 return len;
1b672224
RK
673
674out:
675 bdput(bdev);
676 return ret;
8c921b2b
JM
677}
678
be257c61 679static void __zram_make_request(struct zram *zram, struct bio *bio)
8c921b2b 680{
7988613b 681 int offset;
8c921b2b 682 u32 index;
7988613b
KO
683 struct bio_vec bvec;
684 struct bvec_iter iter;
8c921b2b 685
4f024f37
KO
686 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
687 offset = (bio->bi_iter.bi_sector &
688 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
8c921b2b 689
7988613b 690 bio_for_each_segment(bvec, bio, iter) {
924bd88d
JM
691 int max_transfer_size = PAGE_SIZE - offset;
692
7988613b 693 if (bvec.bv_len > max_transfer_size) {
924bd88d
JM
694 /*
695 * zram_bvec_rw() can only make operation on a single
696 * zram page. Split the bio vector.
697 */
698 struct bio_vec bv;
699
7988613b 700 bv.bv_page = bvec.bv_page;
924bd88d 701 bv.bv_len = max_transfer_size;
7988613b 702 bv.bv_offset = bvec.bv_offset;
924bd88d 703
be257c61 704 if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
924bd88d
JM
705 goto out;
706
7988613b 707 bv.bv_len = bvec.bv_len - max_transfer_size;
924bd88d 708 bv.bv_offset += max_transfer_size;
be257c61 709 if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
924bd88d
JM
710 goto out;
711 } else
be257c61 712 if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
924bd88d
JM
713 goto out;
714
7988613b 715 update_position(&index, &offset, &bvec);
a1dd52af 716 }
306b0c95
NG
717
718 set_bit(BIO_UPTODATE, &bio->bi_flags);
719 bio_endio(bio, 0);
7d7854b4 720 return;
306b0c95
NG
721
722out:
306b0c95 723 bio_io_error(bio);
306b0c95
NG
724}
725
306b0c95 726/*
f1e3cfff 727 * Handler function for all zram I/O requests.
306b0c95 728 */
5a7bbad2 729static void zram_make_request(struct request_queue *queue, struct bio *bio)
306b0c95 730{
f1e3cfff 731 struct zram *zram = queue->queuedata;
306b0c95 732
0900beae 733 down_read(&zram->init_lock);
be2d1d56 734 if (unlikely(!init_done(zram)))
3de738cd 735 goto error;
0900beae 736
f1e3cfff 737 if (!valid_io_request(zram, bio)) {
da5cc7d3 738 atomic64_inc(&zram->stats.invalid_io);
3de738cd 739 goto error;
6642a67c
JM
740 }
741
be257c61 742 __zram_make_request(zram, bio);
0900beae 743 up_read(&zram->init_lock);
306b0c95 744
b4fdcb02 745 return;
0900beae 746
0900beae 747error:
3de738cd 748 up_read(&zram->init_lock);
0900beae 749 bio_io_error(bio);
306b0c95
NG
750}
751
2ccbec05
NG
752static void zram_slot_free_notify(struct block_device *bdev,
753 unsigned long index)
107c161b 754{
f1e3cfff 755 struct zram *zram;
f614a9f4 756 struct zram_meta *meta;
107c161b 757
f1e3cfff 758 zram = bdev->bd_disk->private_data;
f614a9f4 759 meta = zram->meta;
a0c516cb 760
f614a9f4
MK
761 write_lock(&meta->tb_lock);
762 zram_free_page(zram, index);
763 write_unlock(&meta->tb_lock);
764 atomic64_inc(&zram->stats.notify_free);
107c161b
NG
765}
766
f1e3cfff 767static const struct block_device_operations zram_devops = {
f1e3cfff 768 .swap_slot_free_notify = zram_slot_free_notify,
107c161b 769 .owner = THIS_MODULE
306b0c95
NG
770};
771
9b3bb7ab
SS
772static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
773 disksize_show, disksize_store);
774static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
775static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
776static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
777static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
778static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
779static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
780static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
781static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
782static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
783static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
784
785static struct attribute *zram_disk_attrs[] = {
786 &dev_attr_disksize.attr,
787 &dev_attr_initstate.attr,
788 &dev_attr_reset.attr,
789 &dev_attr_num_reads.attr,
790 &dev_attr_num_writes.attr,
791 &dev_attr_invalid_io.attr,
792 &dev_attr_notify_free.attr,
793 &dev_attr_zero_pages.attr,
794 &dev_attr_orig_data_size.attr,
795 &dev_attr_compr_data_size.attr,
796 &dev_attr_mem_used_total.attr,
797 NULL,
798};
799
800static struct attribute_group zram_disk_attr_group = {
801 .attrs = zram_disk_attrs,
802};
803
f1e3cfff 804static int create_device(struct zram *zram, int device_id)
306b0c95 805{
39a9b8ac 806 int ret = -ENOMEM;
de1a21a0 807
0900beae 808 init_rwsem(&zram->init_lock);
306b0c95 809
f1e3cfff
NG
810 zram->queue = blk_alloc_queue(GFP_KERNEL);
811 if (!zram->queue) {
306b0c95
NG
812 pr_err("Error allocating disk queue for device %d\n",
813 device_id);
de1a21a0 814 goto out;
306b0c95
NG
815 }
816
f1e3cfff
NG
817 blk_queue_make_request(zram->queue, zram_make_request);
818 zram->queue->queuedata = zram;
306b0c95
NG
819
820 /* gendisk structure */
f1e3cfff
NG
821 zram->disk = alloc_disk(1);
822 if (!zram->disk) {
94b8435f 823 pr_warn("Error allocating disk structure for device %d\n",
306b0c95 824 device_id);
39a9b8ac 825 goto out_free_queue;
306b0c95
NG
826 }
827
f1e3cfff
NG
828 zram->disk->major = zram_major;
829 zram->disk->first_minor = device_id;
830 zram->disk->fops = &zram_devops;
831 zram->disk->queue = zram->queue;
832 zram->disk->private_data = zram;
833 snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
306b0c95 834
33863c21 835 /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
f1e3cfff 836 set_capacity(zram->disk, 0);
5d83d5a0 837
a1dd52af
NG
838 /*
839 * To ensure that we always get PAGE_SIZE aligned
840 * and n*PAGE_SIZED sized I/O requests.
841 */
f1e3cfff 842 blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
7b19b8d4
RJ
843 blk_queue_logical_block_size(zram->disk->queue,
844 ZRAM_LOGICAL_BLOCK_SIZE);
f1e3cfff
NG
845 blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
846 blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
5d83d5a0 847
f1e3cfff 848 add_disk(zram->disk);
306b0c95 849
33863c21
NG
850 ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
851 &zram_disk_attr_group);
852 if (ret < 0) {
94b8435f 853 pr_warn("Error creating sysfs group");
39a9b8ac 854 goto out_free_disk;
33863c21 855 }
33863c21 856
be2d1d56 857 zram->meta = NULL;
39a9b8ac 858 return 0;
de1a21a0 859
39a9b8ac
JL
860out_free_disk:
861 del_gendisk(zram->disk);
862 put_disk(zram->disk);
863out_free_queue:
864 blk_cleanup_queue(zram->queue);
de1a21a0
NG
865out:
866 return ret;
306b0c95
NG
867}
868
f1e3cfff 869static void destroy_device(struct zram *zram)
306b0c95 870{
33863c21
NG
871 sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
872 &zram_disk_attr_group);
33863c21 873
59d3fe54
RK
874 del_gendisk(zram->disk);
875 put_disk(zram->disk);
306b0c95 876
59d3fe54 877 blk_cleanup_queue(zram->queue);
306b0c95
NG
878}
879
f1e3cfff 880static int __init zram_init(void)
306b0c95 881{
de1a21a0 882 int ret, dev_id;
306b0c95 883
5fa5a901 884 if (num_devices > max_num_devices) {
94b8435f 885 pr_warn("Invalid value for num_devices: %u\n",
5fa5a901 886 num_devices);
de1a21a0
NG
887 ret = -EINVAL;
888 goto out;
306b0c95
NG
889 }
890
f1e3cfff
NG
891 zram_major = register_blkdev(0, "zram");
892 if (zram_major <= 0) {
94b8435f 893 pr_warn("Unable to get major number\n");
de1a21a0
NG
894 ret = -EBUSY;
895 goto out;
306b0c95
NG
896 }
897
306b0c95 898 /* Allocate the device array and initialize each one */
5fa5a901 899 zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
43801f6e 900 if (!zram_devices) {
de1a21a0
NG
901 ret = -ENOMEM;
902 goto unregister;
903 }
306b0c95 904
5fa5a901 905 for (dev_id = 0; dev_id < num_devices; dev_id++) {
43801f6e 906 ret = create_device(&zram_devices[dev_id], dev_id);
de1a21a0 907 if (ret)
3bf040c7 908 goto free_devices;
de1a21a0
NG
909 }
910
ca3d70bd
DB
911 pr_info("Created %u device(s) ...\n", num_devices);
912
306b0c95 913 return 0;
de1a21a0 914
3bf040c7 915free_devices:
de1a21a0 916 while (dev_id)
43801f6e
NW
917 destroy_device(&zram_devices[--dev_id]);
918 kfree(zram_devices);
de1a21a0 919unregister:
f1e3cfff 920 unregister_blkdev(zram_major, "zram");
de1a21a0 921out:
306b0c95
NG
922 return ret;
923}
924
f1e3cfff 925static void __exit zram_exit(void)
306b0c95
NG
926{
927 int i;
f1e3cfff 928 struct zram *zram;
306b0c95 929
5fa5a901 930 for (i = 0; i < num_devices; i++) {
43801f6e 931 zram = &zram_devices[i];
306b0c95 932
f1e3cfff 933 destroy_device(zram);
2b86ab9c
MK
934 /*
935 * Shouldn't access zram->disk after destroy_device
936 * because destroy_device already released zram->disk.
937 */
938 zram_reset_device(zram, false);
306b0c95
NG
939 }
940
f1e3cfff 941 unregister_blkdev(zram_major, "zram");
306b0c95 942
43801f6e 943 kfree(zram_devices);
306b0c95
NG
944 pr_debug("Cleanup done!\n");
945}
946
f1e3cfff
NG
947module_init(zram_init);
948module_exit(zram_exit);
306b0c95 949
9b3bb7ab
SS
950module_param(num_devices, uint, 0);
951MODULE_PARM_DESC(num_devices, "Number of zram devices");
952
306b0c95
NG
953MODULE_LICENSE("Dual BSD/GPL");
954MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
f1e3cfff 955MODULE_DESCRIPTION("Compressed RAM Block Device");
This page took 0.511786 seconds and 5 git commands to generate.