mmc: card: block.c cleanup for host claim/release.
[deliverable/linux.git] / drivers / mmc / card / block.c
CommitLineData
1da177e4
LT
1/*
2 * Block driver for media (i.e., flash cards)
3 *
4 * Copyright 2002 Hewlett-Packard Company
979ce720 5 * Copyright 2005-2008 Pierre Ossman
1da177e4
LT
6 *
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
10 *
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
14 *
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
16 *
17 * Author: Andrew Christian
18 * 28 May 2002
19 */
20#include <linux/moduleparam.h>
21#include <linux/module.h>
22#include <linux/init.h>
23
1da177e4
LT
24#include <linux/kernel.h>
25#include <linux/fs.h>
5a0e3ad6 26#include <linux/slab.h>
1da177e4
LT
27#include <linux/errno.h>
28#include <linux/hdreg.h>
29#include <linux/kdev_t.h>
30#include <linux/blkdev.h>
a621aaed 31#include <linux/mutex.h>
ec5a19dd 32#include <linux/scatterlist.h>
a7bbb573 33#include <linux/string_helpers.h>
1da177e4
LT
34
35#include <linux/mmc/card.h>
385e3227 36#include <linux/mmc/host.h>
da7fbe58
PO
37#include <linux/mmc/mmc.h>
38#include <linux/mmc/sd.h>
1da177e4
LT
39
40#include <asm/system.h>
41#include <asm/uaccess.h>
42
98ac2162 43#include "queue.h"
1da177e4 44
6b0b6285 45MODULE_ALIAS("mmc:block");
5e71b7a6
OJ
46#ifdef MODULE_PARAM_PREFIX
47#undef MODULE_PARAM_PREFIX
48#endif
49#define MODULE_PARAM_PREFIX "mmcblk."
50
f4c5522b
AW
51#define REL_WRITES_SUPPORTED(card) (mmc_card_mmc((card)) && \
52 (((card)->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || \
53 ((card)->ext_csd.rel_sectors)))
54
5e71b7a6 55static DEFINE_MUTEX(block_mutex);
6b0b6285 56
1da177e4 57/*
5e71b7a6
OJ
58 * The defaults come from config options but can be overriden by module
59 * or bootarg options.
1da177e4 60 */
5e71b7a6 61static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
1dff3144 62
5e71b7a6
OJ
63/*
64 * We've only got one major, so number of mmcblk devices is
65 * limited to 256 / number of minors per device.
66 */
67static int max_devices;
68
69/* 256 minors, so at most 256 separate devices */
70static DECLARE_BITMAP(dev_use, 256);
1da177e4 71
1da177e4
LT
72/*
73 * There is one mmc_blk_data per slot.
74 */
75struct mmc_blk_data {
76 spinlock_t lock;
77 struct gendisk *disk;
78 struct mmc_queue queue;
79
80 unsigned int usage;
a6f6c96b 81 unsigned int read_only;
1da177e4
LT
82};
83
a621aaed 84static DEFINE_MUTEX(open_lock);
1da177e4 85
5e71b7a6
OJ
86module_param(perdev_minors, int, 0444);
87MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
88
1da177e4
LT
89static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
90{
91 struct mmc_blk_data *md;
92
a621aaed 93 mutex_lock(&open_lock);
1da177e4
LT
94 md = disk->private_data;
95 if (md && md->usage == 0)
96 md = NULL;
97 if (md)
98 md->usage++;
a621aaed 99 mutex_unlock(&open_lock);
1da177e4
LT
100
101 return md;
102}
103
104static void mmc_blk_put(struct mmc_blk_data *md)
105{
a621aaed 106 mutex_lock(&open_lock);
1da177e4
LT
107 md->usage--;
108 if (md->usage == 0) {
7d92df69 109 int devmaj = MAJOR(disk_devt(md->disk));
5e71b7a6 110 int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
7d92df69
AL
111
112 if (!devmaj)
5e71b7a6 113 devidx = md->disk->first_minor / perdev_minors;
7d92df69 114
5fa83ce2
AH
115 blk_cleanup_queue(md->queue.queue);
116
1dff3144
DW
117 __clear_bit(devidx, dev_use);
118
1da177e4 119 put_disk(md->disk);
1da177e4
LT
120 kfree(md);
121 }
a621aaed 122 mutex_unlock(&open_lock);
1da177e4
LT
123}
124
a5a1561f 125static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4 126{
a5a1561f 127 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
1da177e4
LT
128 int ret = -ENXIO;
129
2a48fc0a 130 mutex_lock(&block_mutex);
1da177e4
LT
131 if (md) {
132 if (md->usage == 2)
a5a1561f 133 check_disk_change(bdev);
1da177e4 134 ret = 0;
a00fc090 135
a5a1561f 136 if ((mode & FMODE_WRITE) && md->read_only) {
70bb0896 137 mmc_blk_put(md);
a00fc090 138 ret = -EROFS;
70bb0896 139 }
1da177e4 140 }
2a48fc0a 141 mutex_unlock(&block_mutex);
1da177e4
LT
142
143 return ret;
144}
145
a5a1561f 146static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
1da177e4 147{
a5a1561f 148 struct mmc_blk_data *md = disk->private_data;
1da177e4 149
2a48fc0a 150 mutex_lock(&block_mutex);
1da177e4 151 mmc_blk_put(md);
2a48fc0a 152 mutex_unlock(&block_mutex);
1da177e4
LT
153 return 0;
154}
155
156static int
a885c8c4 157mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1da177e4 158{
a885c8c4
CH
159 geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
160 geo->heads = 4;
161 geo->sectors = 16;
162 return 0;
1da177e4
LT
163}
164
83d5cde4 165static const struct block_device_operations mmc_bdops = {
a5a1561f
AV
166 .open = mmc_blk_open,
167 .release = mmc_blk_release,
a885c8c4 168 .getgeo = mmc_blk_getgeo,
1da177e4
LT
169 .owner = THIS_MODULE,
170};
171
172struct mmc_blk_request {
173 struct mmc_request mrq;
174 struct mmc_command cmd;
175 struct mmc_command stop;
176 struct mmc_data data;
177};
178
ec5a19dd
PO
179static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
180{
181 int err;
051913da
BD
182 u32 result;
183 __be32 *blocks;
ec5a19dd
PO
184
185 struct mmc_request mrq;
186 struct mmc_command cmd;
187 struct mmc_data data;
188 unsigned int timeout_us;
189
190 struct scatterlist sg;
191
192 memset(&cmd, 0, sizeof(struct mmc_command));
193
194 cmd.opcode = MMC_APP_CMD;
195 cmd.arg = card->rca << 16;
7213d175 196 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
ec5a19dd
PO
197
198 err = mmc_wait_for_cmd(card->host, &cmd, 0);
7213d175
DB
199 if (err)
200 return (u32)-1;
201 if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
ec5a19dd
PO
202 return (u32)-1;
203
204 memset(&cmd, 0, sizeof(struct mmc_command));
205
206 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
207 cmd.arg = 0;
7213d175 208 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
ec5a19dd
PO
209
210 memset(&data, 0, sizeof(struct mmc_data));
211
212 data.timeout_ns = card->csd.tacc_ns * 100;
213 data.timeout_clks = card->csd.tacc_clks * 100;
214
215 timeout_us = data.timeout_ns / 1000;
216 timeout_us += data.timeout_clks * 1000 /
217 (card->host->ios.clock / 1000);
218
219 if (timeout_us > 100000) {
220 data.timeout_ns = 100000000;
221 data.timeout_clks = 0;
222 }
223
224 data.blksz = 4;
225 data.blocks = 1;
226 data.flags = MMC_DATA_READ;
227 data.sg = &sg;
228 data.sg_len = 1;
229
230 memset(&mrq, 0, sizeof(struct mmc_request));
231
232 mrq.cmd = &cmd;
233 mrq.data = &data;
234
051913da
BD
235 blocks = kmalloc(4, GFP_KERNEL);
236 if (!blocks)
237 return (u32)-1;
238
239 sg_init_one(&sg, blocks, 4);
ec5a19dd
PO
240
241 mmc_wait_for_req(card->host, &mrq);
242
051913da
BD
243 result = ntohl(*blocks);
244 kfree(blocks);
245
17b0429d 246 if (cmd.error || data.error)
051913da 247 result = (u32)-1;
ec5a19dd 248
051913da 249 return result;
ec5a19dd
PO
250}
251
504f191f
AH
252static u32 get_card_status(struct mmc_card *card, struct request *req)
253{
254 struct mmc_command cmd;
255 int err;
256
257 memset(&cmd, 0, sizeof(struct mmc_command));
258 cmd.opcode = MMC_SEND_STATUS;
259 if (!mmc_host_is_spi(card->host))
260 cmd.arg = card->rca << 16;
261 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
262 err = mmc_wait_for_cmd(card->host, &cmd, 0);
263 if (err)
b595076a 264 printk(KERN_ERR "%s: error %d sending status command",
504f191f
AH
265 req->rq_disk->disk_name, err);
266 return cmd.resp[0];
267}
268
bd788c96
AH
269static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
270{
271 struct mmc_blk_data *md = mq->data;
272 struct mmc_card *card = md->queue.card;
273 unsigned int from, nr, arg;
274 int err = 0;
275
bd788c96
AH
276 if (!mmc_can_erase(card)) {
277 err = -EOPNOTSUPP;
278 goto out;
279 }
280
281 from = blk_rq_pos(req);
282 nr = blk_rq_sectors(req);
283
284 if (mmc_can_trim(card))
285 arg = MMC_TRIM_ARG;
286 else
287 arg = MMC_ERASE_ARG;
288
289 err = mmc_erase(card, from, nr, arg);
290out:
291 spin_lock_irq(&md->lock);
292 __blk_end_request(req, err, blk_rq_bytes(req));
293 spin_unlock_irq(&md->lock);
294
bd788c96
AH
295 return err ? 0 : 1;
296}
297
49804548
AH
298static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
299 struct request *req)
300{
301 struct mmc_blk_data *md = mq->data;
302 struct mmc_card *card = md->queue.card;
303 unsigned int from, nr, arg;
304 int err = 0;
305
49804548
AH
306 if (!mmc_can_secure_erase_trim(card)) {
307 err = -EOPNOTSUPP;
308 goto out;
309 }
310
311 from = blk_rq_pos(req);
312 nr = blk_rq_sectors(req);
313
314 if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
315 arg = MMC_SECURE_TRIM1_ARG;
316 else
317 arg = MMC_SECURE_ERASE_ARG;
318
319 err = mmc_erase(card, from, nr, arg);
320 if (!err && arg == MMC_SECURE_TRIM1_ARG)
321 err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
322out:
323 spin_lock_irq(&md->lock);
324 __blk_end_request(req, err, blk_rq_bytes(req));
325 spin_unlock_irq(&md->lock);
326
49804548
AH
327 return err ? 0 : 1;
328}
329
f4c5522b
AW
330static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
331{
332 struct mmc_blk_data *md = mq->data;
333
334 /*
335 * No-op, only service this because we need REQ_FUA for reliable
336 * writes.
337 */
338 spin_lock_irq(&md->lock);
339 __blk_end_request_all(req, 0);
340 spin_unlock_irq(&md->lock);
341
342 return 1;
343}
344
345/*
346 * Reformat current write as a reliable write, supporting
347 * both legacy and the enhanced reliable write MMC cards.
348 * In each transfer we'll handle only as much as a single
349 * reliable write can handle, thus finish the request in
350 * partial completions.
351 */
352static inline int mmc_apply_rel_rw(struct mmc_blk_request *brq,
353 struct mmc_card *card,
354 struct request *req)
355{
356 int err;
357 struct mmc_command set_count;
358
359 if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
360 /* Legacy mode imposes restrictions on transfers. */
361 if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
362 brq->data.blocks = 1;
363
364 if (brq->data.blocks > card->ext_csd.rel_sectors)
365 brq->data.blocks = card->ext_csd.rel_sectors;
366 else if (brq->data.blocks < card->ext_csd.rel_sectors)
367 brq->data.blocks = 1;
368 }
369
370 memset(&set_count, 0, sizeof(struct mmc_command));
371 set_count.opcode = MMC_SET_BLOCK_COUNT;
372 set_count.arg = brq->data.blocks | (1 << 31);
373 set_count.flags = MMC_RSP_R1 | MMC_CMD_AC;
374 err = mmc_wait_for_cmd(card->host, &set_count, 0);
375 if (err)
376 printk(KERN_ERR "%s: error %d SET_BLOCK_COUNT\n",
377 req->rq_disk->disk_name, err);
378 return err;
379}
380
bd788c96 381static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1da177e4
LT
382{
383 struct mmc_blk_data *md = mq->data;
384 struct mmc_card *card = md->queue.card;
176f00ff 385 struct mmc_blk_request brq;
6a79e391 386 int ret = 1, disable_multi = 0;
1da177e4 387
f4c5522b
AW
388 /*
389 * Reliable writes are used to implement Forced Unit Access and
390 * REQ_META accesses, and are supported only on MMCs.
391 */
392 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
393 (req->cmd_flags & REQ_META)) &&
394 (rq_data_dir(req) == WRITE) &&
395 REL_WRITES_SUPPORTED(card);
396
1da177e4 397 do {
1da177e4 398 struct mmc_command cmd;
504f191f 399 u32 readcmd, writecmd, status = 0;
1da177e4
LT
400
401 memset(&brq, 0, sizeof(struct mmc_blk_request));
402 brq.mrq.cmd = &brq.cmd;
403 brq.mrq.data = &brq.data;
404
83096ebf 405 brq.cmd.arg = blk_rq_pos(req);
fba68bd2
PL
406 if (!mmc_card_blockaddr(card))
407 brq.cmd.arg <<= 9;
7213d175 408 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
08846698 409 brq.data.blksz = 512;
1da177e4
LT
410 brq.stop.opcode = MMC_STOP_TRANSMISSION;
411 brq.stop.arg = 0;
7213d175 412 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
83096ebf 413 brq.data.blocks = blk_rq_sectors(req);
1da177e4 414
548d2de9
PO
415 /*
416 * The block layer doesn't support all sector count
417 * restrictions, so we need to be prepared for too big
418 * requests.
419 */
420 if (brq.data.blocks > card->host->max_blk_count)
421 brq.data.blocks = card->host->max_blk_count;
422
6a79e391
AH
423 /*
424 * After a read error, we redo the request one sector at a time
425 * in order to accurately determine which sectors can be read
426 * successfully.
427 */
428 if (disable_multi && brq.data.blocks > 1)
429 brq.data.blocks = 1;
430
f4c5522b 431 if (brq.data.blocks > 1 || do_rel_wr) {
7213d175 432 /* SPI multiblock writes terminate using a special
f4c5522b
AW
433 * token, not a STOP_TRANSMISSION request. Reliable
434 * writes use SET_BLOCK_COUNT and do not use a
435 * STOP_TRANSMISSION request either.
7213d175 436 */
f4c5522b
AW
437 if ((!mmc_host_is_spi(card->host) && !do_rel_wr) ||
438 rq_data_dir(req) == READ)
7213d175 439 brq.mrq.stop = &brq.stop;
db53f28b
RK
440 readcmd = MMC_READ_MULTIPLE_BLOCK;
441 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
788ee7b0
RK
442 } else {
443 brq.mrq.stop = NULL;
db53f28b
RK
444 readcmd = MMC_READ_SINGLE_BLOCK;
445 writecmd = MMC_WRITE_BLOCK;
446 }
db53f28b
RK
447 if (rq_data_dir(req) == READ) {
448 brq.cmd.opcode = readcmd;
449 brq.data.flags |= MMC_DATA_READ;
450 } else {
451 brq.cmd.opcode = writecmd;
452 brq.data.flags |= MMC_DATA_WRITE;
788ee7b0 453 }
1da177e4 454
f4c5522b
AW
455 if (do_rel_wr && mmc_apply_rel_rw(&brq, card, req))
456 goto cmd_err;
457
b146d26a
PO
458 mmc_set_data_timeout(&brq.data, card);
459
1da177e4 460 brq.data.sg = mq->sg;
98ccf149
PO
461 brq.data.sg_len = mmc_queue_map_sg(mq);
462
6a79e391
AH
463 /*
464 * Adjust the sg list so it is the same size as the
465 * request.
466 */
83096ebf 467 if (brq.data.blocks != blk_rq_sectors(req)) {
6a79e391
AH
468 int i, data_size = brq.data.blocks << 9;
469 struct scatterlist *sg;
470
471 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
472 data_size -= sg->length;
473 if (data_size <= 0) {
474 sg->length += data_size;
475 i++;
476 break;
477 }
478 }
479 brq.data.sg_len = i;
480 }
481
98ccf149 482 mmc_queue_bounce_pre(mq);
1da177e4
LT
483
484 mmc_wait_for_req(card->host, &brq.mrq);
98ccf149
PO
485
486 mmc_queue_bounce_post(mq);
487
979ce720
PO
488 /*
489 * Check for errors here, but don't jump to cmd_err
490 * until later as we need to wait for the card to leave
491 * programming mode even when things go wrong.
492 */
6a79e391
AH
493 if (brq.cmd.error || brq.data.error || brq.stop.error) {
494 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
495 /* Redo read one sector at a time */
496 printk(KERN_WARNING "%s: retrying using single "
497 "block read\n", req->rq_disk->disk_name);
498 disable_multi = 1;
499 continue;
500 }
504f191f 501 status = get_card_status(card, req);
6a79e391 502 }
504f191f 503
1da177e4 504 if (brq.cmd.error) {
504f191f
AH
505 printk(KERN_ERR "%s: error %d sending read/write "
506 "command, response %#x, card status %#x\n",
507 req->rq_disk->disk_name, brq.cmd.error,
508 brq.cmd.resp[0], status);
1da177e4
LT
509 }
510
511 if (brq.data.error) {
504f191f
AH
512 if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
513 /* 'Stop' response contains card status */
514 status = brq.mrq.stop->resp[0];
515 printk(KERN_ERR "%s: error %d transferring data,"
516 " sector %u, nr %u, card status %#x\n",
517 req->rq_disk->disk_name, brq.data.error,
83096ebf
TH
518 (unsigned)blk_rq_pos(req),
519 (unsigned)blk_rq_sectors(req), status);
1da177e4
LT
520 }
521
522 if (brq.stop.error) {
504f191f
AH
523 printk(KERN_ERR "%s: error %d sending stop command, "
524 "response %#x, card status %#x\n",
525 req->rq_disk->disk_name, brq.stop.error,
526 brq.stop.resp[0], status);
1da177e4
LT
527 }
528
7213d175 529 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
2ed6d22c
RK
530 do {
531 int err;
532
533 cmd.opcode = MMC_SEND_STATUS;
534 cmd.arg = card->rca << 16;
535 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
536 err = mmc_wait_for_cmd(card->host, &cmd, 5);
537 if (err) {
538 printk(KERN_ERR "%s: error %d requesting status\n",
539 req->rq_disk->disk_name, err);
540 goto cmd_err;
541 }
d198f101
PO
542 /*
543 * Some cards mishandle the status bits,
544 * so make sure to check both the busy
545 * indication and the card state.
546 */
547 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
548 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
1da177e4
LT
549
550#if 0
2ed6d22c
RK
551 if (cmd.resp[0] & ~0x00000900)
552 printk(KERN_ERR "%s: status = %08x\n",
553 req->rq_disk->disk_name, cmd.resp[0]);
554 if (mmc_decode_status(cmd.resp))
555 goto cmd_err;
1da177e4 556#endif
2ed6d22c 557 }
1da177e4 558
6a79e391
AH
559 if (brq.cmd.error || brq.stop.error || brq.data.error) {
560 if (rq_data_dir(req) == READ) {
561 /*
562 * After an error, we redo I/O one sector at a
563 * time, so we only reach here after trying to
564 * read a single sector.
565 */
566 spin_lock_irq(&md->lock);
567 ret = __blk_end_request(req, -EIO, brq.data.blksz);
568 spin_unlock_irq(&md->lock);
569 continue;
570 }
979ce720 571 goto cmd_err;
6a79e391 572 }
979ce720 573
1da177e4
LT
574 /*
575 * A block was successfully transferred.
576 */
577 spin_lock_irq(&md->lock);
fd539832 578 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
1da177e4
LT
579 spin_unlock_irq(&md->lock);
580 } while (ret);
581
1da177e4
LT
582 return 1;
583
584 cmd_err:
ec5a19dd
PO
585 /*
586 * If this is an SD card and we're writing, we can first
587 * mark the known good sectors as ok.
588 *
589 * If the card is not SD, we can still ok written sectors
23af6039
PO
590 * as reported by the controller (which might be less than
591 * the real number of written sectors, but never more).
1da177e4 592 */
6a79e391
AH
593 if (mmc_card_sd(card)) {
594 u32 blocks;
23af6039 595
6a79e391
AH
596 blocks = mmc_sd_num_wr_blocks(card);
597 if (blocks != (u32)-1) {
ec5a19dd 598 spin_lock_irq(&md->lock);
6a79e391 599 ret = __blk_end_request(req, 0, blocks << 9);
ec5a19dd
PO
600 spin_unlock_irq(&md->lock);
601 }
6a79e391
AH
602 } else {
603 spin_lock_irq(&md->lock);
604 ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
605 spin_unlock_irq(&md->lock);
176f00ff
PO
606 }
607
1da177e4 608 spin_lock_irq(&md->lock);
fd539832
KU
609 while (ret)
610 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
1da177e4
LT
611 spin_unlock_irq(&md->lock);
612
613 return 0;
614}
615
bd788c96
AH
616static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
617{
1a258db6
AW
618 int ret;
619 struct mmc_blk_data *md = mq->data;
620 struct mmc_card *card = md->queue.card;
621
622 mmc_claim_host(card->host);
623
49804548
AH
624 if (req->cmd_flags & REQ_DISCARD) {
625 if (req->cmd_flags & REQ_SECURE)
1a258db6 626 ret = mmc_blk_issue_secdiscard_rq(mq, req);
49804548 627 else
1a258db6 628 ret = mmc_blk_issue_discard_rq(mq, req);
f4c5522b 629 } else if (req->cmd_flags & REQ_FLUSH) {
1a258db6 630 ret = mmc_blk_issue_flush(mq, req);
49804548 631 } else {
1a258db6 632 ret = mmc_blk_issue_rw_rq(mq, req);
49804548 633 }
1a258db6
AW
634
635 mmc_release_host(card->host);
636 return ret;
bd788c96 637}
1da177e4 638
a6f6c96b
RK
639static inline int mmc_blk_readonly(struct mmc_card *card)
640{
641 return mmc_card_readonly(card) ||
642 !(card->csd.cmdclass & CCC_BLOCK_WRITE);
643}
644
1da177e4
LT
645static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
646{
647 struct mmc_blk_data *md;
648 int devidx, ret;
649
5e71b7a6
OJ
650 devidx = find_first_zero_bit(dev_use, max_devices);
651 if (devidx >= max_devices)
1da177e4
LT
652 return ERR_PTR(-ENOSPC);
653 __set_bit(devidx, dev_use);
654
dd00cc48 655 md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
a6f6c96b
RK
656 if (!md) {
657 ret = -ENOMEM;
658 goto out;
659 }
1da177e4 660
1da177e4 661
a6f6c96b
RK
662 /*
663 * Set the read-only status based on the supported commands
664 * and the write protect switch.
665 */
666 md->read_only = mmc_blk_readonly(card);
1da177e4 667
5e71b7a6 668 md->disk = alloc_disk(perdev_minors);
a6f6c96b
RK
669 if (md->disk == NULL) {
670 ret = -ENOMEM;
671 goto err_kfree;
672 }
1da177e4 673
a6f6c96b
RK
674 spin_lock_init(&md->lock);
675 md->usage = 1;
1da177e4 676
a6f6c96b
RK
677 ret = mmc_init_queue(&md->queue, card, &md->lock);
678 if (ret)
679 goto err_putdisk;
1da177e4 680
a6f6c96b
RK
681 md->queue.issue_fn = mmc_blk_issue_rq;
682 md->queue.data = md;
d2b18394 683
fe6b4c88 684 md->disk->major = MMC_BLOCK_MAJOR;
5e71b7a6 685 md->disk->first_minor = devidx * perdev_minors;
a6f6c96b
RK
686 md->disk->fops = &mmc_bdops;
687 md->disk->private_data = md;
688 md->disk->queue = md->queue.queue;
689 md->disk->driverfs_dev = &card->dev;
3362177f 690 set_disk_ro(md->disk, md->read_only);
f4c5522b
AW
691 if (REL_WRITES_SUPPORTED(card))
692 blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
a6f6c96b
RK
693
694 /*
695 * As discussed on lkml, GENHD_FL_REMOVABLE should:
696 *
697 * - be set for removable media with permanent block devices
698 * - be unset for removable block devices with permanent media
699 *
700 * Since MMC block devices clearly fall under the second
701 * case, we do not set GENHD_FL_REMOVABLE. Userspace
702 * should use the block device creation/destruction hotplug
703 * messages to tell when the card is present.
704 */
705
12578f66
J
706 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
707 "mmcblk%d", devidx);
a6f6c96b 708
e1defc4f 709 blk_queue_logical_block_size(md->queue.queue, 512);
a6f6c96b 710
85a18ad9
PO
711 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
712 /*
713 * The EXT_CSD sector count is in number or 512 byte
714 * sectors.
715 */
716 set_capacity(md->disk, card->ext_csd.sectors);
717 } else {
718 /*
719 * The CSD capacity field is in units of read_blkbits.
720 * set_capacity takes units of 512 bytes.
721 */
722 set_capacity(md->disk,
723 card->csd.capacity << (card->csd.read_blkbits - 9));
724 }
1da177e4 725 return md;
a6f6c96b
RK
726
727 err_putdisk:
728 put_disk(md->disk);
729 err_kfree:
730 kfree(md);
731 out:
732 return ERR_PTR(ret);
1da177e4
LT
733}
734
735static int
736mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
737{
1da177e4
LT
738 int err;
739
b855885e 740 mmc_claim_host(card->host);
0f8d8ea6 741 err = mmc_set_blocklen(card, 512);
b855885e 742 mmc_release_host(card->host);
1da177e4
LT
743
744 if (err) {
0f8d8ea6
AH
745 printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
746 md->disk->disk_name, err);
1da177e4
LT
747 return -EINVAL;
748 }
749
750 return 0;
751}
752
753static int mmc_blk_probe(struct mmc_card *card)
754{
755 struct mmc_blk_data *md;
756 int err;
a7bbb573
PO
757 char cap_str[10];
758
912490db
PO
759 /*
760 * Check that the card supports the command class(es) we need.
761 */
762 if (!(card->csd.cmdclass & CCC_BLOCK_READ))
1da177e4
LT
763 return -ENODEV;
764
1da177e4
LT
765 md = mmc_blk_alloc(card);
766 if (IS_ERR(md))
767 return PTR_ERR(md);
768
769 err = mmc_blk_set_blksize(md, card);
770 if (err)
771 goto out;
772
444122fd 773 string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
a7bbb573
PO
774 cap_str, sizeof(cap_str));
775 printk(KERN_INFO "%s: %s %s %s %s\n",
1da177e4 776 md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
a7bbb573 777 cap_str, md->read_only ? "(ro)" : "");
1da177e4
LT
778
779 mmc_set_drvdata(card, md);
780 add_disk(md->disk);
781 return 0;
782
783 out:
0a74ff29 784 mmc_cleanup_queue(&md->queue);
1da177e4
LT
785 mmc_blk_put(md);
786
787 return err;
788}
789
790static void mmc_blk_remove(struct mmc_card *card)
791{
792 struct mmc_blk_data *md = mmc_get_drvdata(card);
793
794 if (md) {
89b4e133 795 /* Stop new requests from getting into the queue */
1da177e4
LT
796 del_gendisk(md->disk);
797
89b4e133
PO
798 /* Then flush out any already in there */
799 mmc_cleanup_queue(&md->queue);
1da177e4 800
1da177e4
LT
801 mmc_blk_put(md);
802 }
803 mmc_set_drvdata(card, NULL);
804}
805
806#ifdef CONFIG_PM
807static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
808{
809 struct mmc_blk_data *md = mmc_get_drvdata(card);
810
811 if (md) {
812 mmc_queue_suspend(&md->queue);
813 }
814 return 0;
815}
816
817static int mmc_blk_resume(struct mmc_card *card)
818{
819 struct mmc_blk_data *md = mmc_get_drvdata(card);
820
821 if (md) {
822 mmc_blk_set_blksize(md, card);
823 mmc_queue_resume(&md->queue);
824 }
825 return 0;
826}
827#else
828#define mmc_blk_suspend NULL
829#define mmc_blk_resume NULL
830#endif
831
832static struct mmc_driver mmc_driver = {
833 .drv = {
834 .name = "mmcblk",
835 },
836 .probe = mmc_blk_probe,
837 .remove = mmc_blk_remove,
838 .suspend = mmc_blk_suspend,
839 .resume = mmc_blk_resume,
840};
841
842static int __init mmc_blk_init(void)
843{
9d4e98e9 844 int res;
1da177e4 845
5e71b7a6
OJ
846 if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
847 pr_info("mmcblk: using %d minors per device\n", perdev_minors);
848
849 max_devices = 256 / perdev_minors;
850
fe6b4c88
PO
851 res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
852 if (res)
1da177e4 853 goto out;
1da177e4 854
9d4e98e9
AM
855 res = mmc_register_driver(&mmc_driver);
856 if (res)
857 goto out2;
1da177e4 858
9d4e98e9
AM
859 return 0;
860 out2:
861 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1da177e4
LT
862 out:
863 return res;
864}
865
866static void __exit mmc_blk_exit(void)
867{
868 mmc_unregister_driver(&mmc_driver);
fe6b4c88 869 unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
1da177e4
LT
870}
871
872module_init(mmc_blk_init);
873module_exit(mmc_blk_exit);
874
875MODULE_LICENSE("GPL");
876MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
877
This page took 0.773971 seconds and 5 git commands to generate.