2 * Block driver for media (i.e., flash cards)
4 * Copyright 2002 Hewlett-Packard Company
5 * Copyright 2005-2008 Pierre Ossman
7 * Use consistent with the GNU GPL is permitted,
8 * provided that this copyright notice is
9 * preserved in its entirety in all copies and derived works.
11 * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
12 * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
13 * FITNESS FOR ANY PARTICULAR PURPOSE.
15 * Many thanks to Alessandro Rubini and Jonathan Corbet!
17 * Author: Andrew Christian
20 #include <linux/moduleparam.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
24 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/errno.h>
28 #include <linux/hdreg.h>
29 #include <linux/kdev_t.h>
30 #include <linux/blkdev.h>
31 #include <linux/mutex.h>
32 #include <linux/scatterlist.h>
33 #include <linux/string_helpers.h>
34 #include <linux/delay.h>
35 #include <linux/capability.h>
36 #include <linux/compat.h>
37 #include <linux/pm_runtime.h>
39 #include <linux/mmc/ioctl.h>
40 #include <linux/mmc/card.h>
41 #include <linux/mmc/host.h>
42 #include <linux/mmc/mmc.h>
43 #include <linux/mmc/sd.h>
45 #include <asm/uaccess.h>
49 MODULE_ALIAS("mmc:block");
50 #ifdef MODULE_PARAM_PREFIX
51 #undef MODULE_PARAM_PREFIX
53 #define MODULE_PARAM_PREFIX "mmcblk."
55 #define INAND_CMD38_ARG_EXT_CSD 113
56 #define INAND_CMD38_ARG_ERASE 0x00
57 #define INAND_CMD38_ARG_TRIM 0x01
58 #define INAND_CMD38_ARG_SECERASE 0x80
59 #define INAND_CMD38_ARG_SECTRIM1 0x81
60 #define INAND_CMD38_ARG_SECTRIM2 0x88
61 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
62 #define MMC_SANITIZE_REQ_TIMEOUT 240000
63 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
65 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
66 (rq_data_dir(req) == WRITE))
67 #define PACKED_CMD_VER 0x01
68 #define PACKED_CMD_WR 0x02
70 static DEFINE_MUTEX(block_mutex
);
73 * The defaults come from config options but can be overriden by module
76 static int perdev_minors
= CONFIG_MMC_BLOCK_MINORS
;
79 * We've only got one major, so number of mmcblk devices is
80 * limited to (1 << 20) / number of minors per device. It is also
81 * currently limited by the size of the static bitmaps below.
83 static int max_devices
;
85 #define MAX_DEVICES 256
87 /* TODO: Replace these with struct ida */
88 static DECLARE_BITMAP(dev_use
, MAX_DEVICES
);
89 static DECLARE_BITMAP(name_use
, MAX_DEVICES
);
92 * There is one mmc_blk_data per slot.
97 struct mmc_queue queue
;
98 struct list_head part
;
101 #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
102 #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
103 #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
106 unsigned int read_only
;
107 unsigned int part_type
;
108 unsigned int name_idx
;
109 unsigned int reset_done
;
110 #define MMC_BLK_READ BIT(0)
111 #define MMC_BLK_WRITE BIT(1)
112 #define MMC_BLK_DISCARD BIT(2)
113 #define MMC_BLK_SECDISCARD BIT(3)
116 * Only set in main mmc_blk_data associated
117 * with mmc_card with dev_set_drvdata, and keeps
118 * track of the current selected device partition.
120 unsigned int part_curr
;
121 struct device_attribute force_ro
;
122 struct device_attribute power_ro_lock
;
126 static DEFINE_MUTEX(open_lock
);
129 MMC_PACKED_NR_IDX
= -1,
131 MMC_PACKED_NR_SINGLE
,
134 module_param(perdev_minors
, int, 0444);
135 MODULE_PARM_DESC(perdev_minors
, "Minors numbers to allocate per device");
137 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
138 struct mmc_blk_data
*md
);
139 static int get_card_status(struct mmc_card
*card
, u32
*status
, int retries
);
141 static inline void mmc_blk_clear_packed(struct mmc_queue_req
*mqrq
)
143 struct mmc_packed
*packed
= mqrq
->packed
;
147 mqrq
->cmd_type
= MMC_PACKED_NONE
;
148 packed
->nr_entries
= MMC_PACKED_NR_ZERO
;
149 packed
->idx_failure
= MMC_PACKED_NR_IDX
;
154 static struct mmc_blk_data
*mmc_blk_get(struct gendisk
*disk
)
156 struct mmc_blk_data
*md
;
158 mutex_lock(&open_lock
);
159 md
= disk
->private_data
;
160 if (md
&& md
->usage
== 0)
164 mutex_unlock(&open_lock
);
169 static inline int mmc_get_devidx(struct gendisk
*disk
)
171 int devidx
= disk
->first_minor
/ perdev_minors
;
175 static void mmc_blk_put(struct mmc_blk_data
*md
)
177 mutex_lock(&open_lock
);
179 if (md
->usage
== 0) {
180 int devidx
= mmc_get_devidx(md
->disk
);
181 blk_cleanup_queue(md
->queue
.queue
);
183 __clear_bit(devidx
, dev_use
);
188 mutex_unlock(&open_lock
);
191 static ssize_t
power_ro_lock_show(struct device
*dev
,
192 struct device_attribute
*attr
, char *buf
)
195 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
196 struct mmc_card
*card
= md
->queue
.card
;
199 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PERM_WP_EN
)
201 else if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_EN
)
204 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n", locked
);
211 static ssize_t
power_ro_lock_store(struct device
*dev
,
212 struct device_attribute
*attr
, const char *buf
, size_t count
)
215 struct mmc_blk_data
*md
, *part_md
;
216 struct mmc_card
*card
;
219 if (kstrtoul(buf
, 0, &set
))
225 md
= mmc_blk_get(dev_to_disk(dev
));
226 card
= md
->queue
.card
;
230 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
, EXT_CSD_BOOT_WP
,
231 card
->ext_csd
.boot_ro_lock
|
232 EXT_CSD_BOOT_WP_B_PWR_WP_EN
,
233 card
->ext_csd
.part_time
);
235 pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md
->disk
->disk_name
, ret
);
237 card
->ext_csd
.boot_ro_lock
|= EXT_CSD_BOOT_WP_B_PWR_WP_EN
;
242 pr_info("%s: Locking boot partition ro until next power on\n",
243 md
->disk
->disk_name
);
244 set_disk_ro(md
->disk
, 1);
246 list_for_each_entry(part_md
, &md
->part
, part
)
247 if (part_md
->area_type
== MMC_BLK_DATA_AREA_BOOT
) {
248 pr_info("%s: Locking boot partition ro until next power on\n", part_md
->disk
->disk_name
);
249 set_disk_ro(part_md
->disk
, 1);
257 static ssize_t
force_ro_show(struct device
*dev
, struct device_attribute
*attr
,
261 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
263 ret
= snprintf(buf
, PAGE_SIZE
, "%d\n",
264 get_disk_ro(dev_to_disk(dev
)) ^
270 static ssize_t
force_ro_store(struct device
*dev
, struct device_attribute
*attr
,
271 const char *buf
, size_t count
)
275 struct mmc_blk_data
*md
= mmc_blk_get(dev_to_disk(dev
));
276 unsigned long set
= simple_strtoul(buf
, &end
, 0);
282 set_disk_ro(dev_to_disk(dev
), set
|| md
->read_only
);
289 static int mmc_blk_open(struct block_device
*bdev
, fmode_t mode
)
291 struct mmc_blk_data
*md
= mmc_blk_get(bdev
->bd_disk
);
294 mutex_lock(&block_mutex
);
297 check_disk_change(bdev
);
300 if ((mode
& FMODE_WRITE
) && md
->read_only
) {
305 mutex_unlock(&block_mutex
);
310 static void mmc_blk_release(struct gendisk
*disk
, fmode_t mode
)
312 struct mmc_blk_data
*md
= disk
->private_data
;
314 mutex_lock(&block_mutex
);
316 mutex_unlock(&block_mutex
);
320 mmc_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
322 geo
->cylinders
= get_capacity(bdev
->bd_disk
) / (4 * 16);
328 struct mmc_blk_ioc_data
{
329 struct mmc_ioc_cmd ic
;
334 static struct mmc_blk_ioc_data
*mmc_blk_ioctl_copy_from_user(
335 struct mmc_ioc_cmd __user
*user
)
337 struct mmc_blk_ioc_data
*idata
;
340 idata
= kmalloc(sizeof(*idata
), GFP_KERNEL
);
346 if (copy_from_user(&idata
->ic
, user
, sizeof(idata
->ic
))) {
351 idata
->buf_bytes
= (u64
) idata
->ic
.blksz
* idata
->ic
.blocks
;
352 if (idata
->buf_bytes
> MMC_IOC_MAX_BYTES
) {
357 if (!idata
->buf_bytes
)
360 idata
->buf
= kmalloc(idata
->buf_bytes
, GFP_KERNEL
);
366 if (copy_from_user(idata
->buf
, (void __user
*)(unsigned long)
367 idata
->ic
.data_ptr
, idata
->buf_bytes
)) {
382 static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user
*ic_ptr
,
383 struct mmc_blk_ioc_data
*idata
)
385 struct mmc_ioc_cmd
*ic
= &idata
->ic
;
387 if (copy_to_user(&(ic_ptr
->response
), ic
->response
,
388 sizeof(ic
->response
)))
391 if (!idata
->ic
.write_flag
) {
392 if (copy_to_user((void __user
*)(unsigned long)ic
->data_ptr
,
393 idata
->buf
, idata
->buf_bytes
))
400 static int ioctl_rpmb_card_status_poll(struct mmc_card
*card
, u32
*status
,
406 if (!status
|| !retries_max
)
410 err
= get_card_status(card
, status
, 5);
414 if (!R1_STATUS(*status
) &&
415 (R1_CURRENT_STATE(*status
) != R1_STATE_PRG
))
416 break; /* RPMB programming operation complete */
419 * Rechedule to give the MMC device a chance to continue
420 * processing the previous command without being polled too
423 usleep_range(1000, 5000);
424 } while (++retry_count
< retries_max
);
426 if (retry_count
== retries_max
)
432 static int ioctl_do_sanitize(struct mmc_card
*card
)
436 if (!mmc_can_sanitize(card
)) {
437 pr_warn("%s: %s - SANITIZE is not supported\n",
438 mmc_hostname(card
->host
), __func__
);
443 pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
444 mmc_hostname(card
->host
), __func__
);
446 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
447 EXT_CSD_SANITIZE_START
, 1,
448 MMC_SANITIZE_REQ_TIMEOUT
);
451 pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
452 mmc_hostname(card
->host
), __func__
, err
);
454 pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card
->host
),
460 static int __mmc_blk_ioctl_cmd(struct mmc_card
*card
, struct mmc_blk_data
*md
,
461 struct mmc_blk_ioc_data
*idata
)
463 struct mmc_command cmd
= {0};
464 struct mmc_data data
= {0};
465 struct mmc_request mrq
= {NULL
};
466 struct scatterlist sg
;
471 if (!card
|| !md
|| !idata
)
474 if (md
->area_type
& MMC_BLK_DATA_AREA_RPMB
)
477 cmd
.opcode
= idata
->ic
.opcode
;
478 cmd
.arg
= idata
->ic
.arg
;
479 cmd
.flags
= idata
->ic
.flags
;
481 if (idata
->buf_bytes
) {
484 data
.blksz
= idata
->ic
.blksz
;
485 data
.blocks
= idata
->ic
.blocks
;
487 sg_init_one(data
.sg
, idata
->buf
, idata
->buf_bytes
);
489 if (idata
->ic
.write_flag
)
490 data
.flags
= MMC_DATA_WRITE
;
492 data
.flags
= MMC_DATA_READ
;
494 /* data.flags must already be set before doing this. */
495 mmc_set_data_timeout(&data
, card
);
497 /* Allow overriding the timeout_ns for empirical tuning. */
498 if (idata
->ic
.data_timeout_ns
)
499 data
.timeout_ns
= idata
->ic
.data_timeout_ns
;
501 if ((cmd
.flags
& MMC_RSP_R1B
) == MMC_RSP_R1B
) {
503 * Pretend this is a data transfer and rely on the
504 * host driver to compute timeout. When all host
505 * drivers support cmd.cmd_timeout for R1B, this
509 * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
511 data
.timeout_ns
= idata
->ic
.cmd_timeout_ms
* 1000000;
519 err
= mmc_blk_part_switch(card
, md
);
523 if (idata
->ic
.is_acmd
) {
524 err
= mmc_app_cmd(card
->host
, card
);
530 err
= mmc_set_blockcount(card
, data
.blocks
,
531 idata
->ic
.write_flag
& (1 << 31));
536 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd
.arg
) == EXT_CSD_SANITIZE_START
) &&
537 (cmd
.opcode
== MMC_SWITCH
)) {
538 err
= ioctl_do_sanitize(card
);
541 pr_err("%s: ioctl_do_sanitize() failed. err = %d",
547 mmc_wait_for_req(card
->host
, &mrq
);
550 dev_err(mmc_dev(card
->host
), "%s: cmd error %d\n",
551 __func__
, cmd
.error
);
555 dev_err(mmc_dev(card
->host
), "%s: data error %d\n",
556 __func__
, data
.error
);
561 * According to the SD specs, some commands require a delay after
562 * issuing the command.
564 if (idata
->ic
.postsleep_min_us
)
565 usleep_range(idata
->ic
.postsleep_min_us
, idata
->ic
.postsleep_max_us
);
567 memcpy(&(idata
->ic
.response
), cmd
.resp
, sizeof(cmd
.resp
));
571 * Ensure RPMB command has completed by polling CMD13
574 err
= ioctl_rpmb_card_status_poll(card
, &status
, 5);
576 dev_err(mmc_dev(card
->host
),
577 "%s: Card Status=0x%08X, error %d\n",
578 __func__
, status
, err
);
584 static int mmc_blk_ioctl_cmd(struct block_device
*bdev
,
585 struct mmc_ioc_cmd __user
*ic_ptr
)
587 struct mmc_blk_ioc_data
*idata
;
588 struct mmc_blk_data
*md
;
589 struct mmc_card
*card
;
590 int err
= 0, ioc_err
= 0;
593 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
594 * whole block device, not on a partition. This prevents overspray
595 * between sibling partitions.
597 if ((!capable(CAP_SYS_RAWIO
)) || (bdev
!= bdev
->bd_contains
))
600 idata
= mmc_blk_ioctl_copy_from_user(ic_ptr
);
602 return PTR_ERR(idata
);
604 md
= mmc_blk_get(bdev
->bd_disk
);
610 card
= md
->queue
.card
;
618 ioc_err
= __mmc_blk_ioctl_cmd(card
, md
, idata
);
622 err
= mmc_blk_ioctl_copy_to_user(ic_ptr
, idata
);
629 return ioc_err
? ioc_err
: err
;
632 static int mmc_blk_ioctl_multi_cmd(struct block_device
*bdev
,
633 struct mmc_ioc_multi_cmd __user
*user
)
635 struct mmc_blk_ioc_data
**idata
= NULL
;
636 struct mmc_ioc_cmd __user
*cmds
= user
->cmds
;
637 struct mmc_card
*card
;
638 struct mmc_blk_data
*md
;
639 int i
, err
= 0, ioc_err
= 0;
643 * The caller must have CAP_SYS_RAWIO, and must be calling this on the
644 * whole block device, not on a partition. This prevents overspray
645 * between sibling partitions.
647 if ((!capable(CAP_SYS_RAWIO
)) || (bdev
!= bdev
->bd_contains
))
650 if (copy_from_user(&num_of_cmds
, &user
->num_of_cmds
,
651 sizeof(num_of_cmds
)))
654 if (num_of_cmds
> MMC_IOC_MAX_CMDS
)
657 idata
= kcalloc(num_of_cmds
, sizeof(*idata
), GFP_KERNEL
);
661 for (i
= 0; i
< num_of_cmds
; i
++) {
662 idata
[i
] = mmc_blk_ioctl_copy_from_user(&cmds
[i
]);
663 if (IS_ERR(idata
[i
])) {
664 err
= PTR_ERR(idata
[i
]);
670 md
= mmc_blk_get(bdev
->bd_disk
);
676 card
= md
->queue
.card
;
684 for (i
= 0; i
< num_of_cmds
&& !ioc_err
; i
++)
685 ioc_err
= __mmc_blk_ioctl_cmd(card
, md
, idata
[i
]);
689 /* copy to user if data and response */
690 for (i
= 0; i
< num_of_cmds
&& !err
; i
++)
691 err
= mmc_blk_ioctl_copy_to_user(&cmds
[i
], idata
[i
]);
696 for (i
= 0; i
< num_of_cmds
; i
++) {
697 kfree(idata
[i
]->buf
);
701 return ioc_err
? ioc_err
: err
;
704 static int mmc_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
705 unsigned int cmd
, unsigned long arg
)
709 return mmc_blk_ioctl_cmd(bdev
,
710 (struct mmc_ioc_cmd __user
*)arg
);
711 case MMC_IOC_MULTI_CMD
:
712 return mmc_blk_ioctl_multi_cmd(bdev
,
713 (struct mmc_ioc_multi_cmd __user
*)arg
);
720 static int mmc_blk_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
721 unsigned int cmd
, unsigned long arg
)
723 return mmc_blk_ioctl(bdev
, mode
, cmd
, (unsigned long) compat_ptr(arg
));
727 static const struct block_device_operations mmc_bdops
= {
728 .open
= mmc_blk_open
,
729 .release
= mmc_blk_release
,
730 .getgeo
= mmc_blk_getgeo
,
731 .owner
= THIS_MODULE
,
732 .ioctl
= mmc_blk_ioctl
,
734 .compat_ioctl
= mmc_blk_compat_ioctl
,
738 static inline int mmc_blk_part_switch(struct mmc_card
*card
,
739 struct mmc_blk_data
*md
)
742 struct mmc_blk_data
*main_md
= dev_get_drvdata(&card
->dev
);
744 if (main_md
->part_curr
== md
->part_type
)
747 if (mmc_card_mmc(card
)) {
748 u8 part_config
= card
->ext_csd
.part_config
;
750 part_config
&= ~EXT_CSD_PART_CONFIG_ACC_MASK
;
751 part_config
|= md
->part_type
;
753 ret
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
754 EXT_CSD_PART_CONFIG
, part_config
,
755 card
->ext_csd
.part_time
);
759 card
->ext_csd
.part_config
= part_config
;
762 main_md
->part_curr
= md
->part_type
;
766 static u32
mmc_sd_num_wr_blocks(struct mmc_card
*card
)
772 struct mmc_request mrq
= {NULL
};
773 struct mmc_command cmd
= {0};
774 struct mmc_data data
= {0};
776 struct scatterlist sg
;
778 cmd
.opcode
= MMC_APP_CMD
;
779 cmd
.arg
= card
->rca
<< 16;
780 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
782 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
785 if (!mmc_host_is_spi(card
->host
) && !(cmd
.resp
[0] & R1_APP_CMD
))
788 memset(&cmd
, 0, sizeof(struct mmc_command
));
790 cmd
.opcode
= SD_APP_SEND_NUM_WR_BLKS
;
792 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
796 data
.flags
= MMC_DATA_READ
;
799 mmc_set_data_timeout(&data
, card
);
804 blocks
= kmalloc(4, GFP_KERNEL
);
808 sg_init_one(&sg
, blocks
, 4);
810 mmc_wait_for_req(card
->host
, &mrq
);
812 result
= ntohl(*blocks
);
815 if (cmd
.error
|| data
.error
)
821 static int get_card_status(struct mmc_card
*card
, u32
*status
, int retries
)
823 struct mmc_command cmd
= {0};
826 cmd
.opcode
= MMC_SEND_STATUS
;
827 if (!mmc_host_is_spi(card
->host
))
828 cmd
.arg
= card
->rca
<< 16;
829 cmd
.flags
= MMC_RSP_SPI_R2
| MMC_RSP_R1
| MMC_CMD_AC
;
830 err
= mmc_wait_for_cmd(card
->host
, &cmd
, retries
);
832 *status
= cmd
.resp
[0];
836 static int card_busy_detect(struct mmc_card
*card
, unsigned int timeout_ms
,
837 bool hw_busy_detect
, struct request
*req
, int *gen_err
)
839 unsigned long timeout
= jiffies
+ msecs_to_jiffies(timeout_ms
);
844 err
= get_card_status(card
, &status
, 5);
846 pr_err("%s: error %d requesting status\n",
847 req
->rq_disk
->disk_name
, err
);
851 if (status
& R1_ERROR
) {
852 pr_err("%s: %s: error sending status cmd, status %#x\n",
853 req
->rq_disk
->disk_name
, __func__
, status
);
857 /* We may rely on the host hw to handle busy detection.*/
858 if ((card
->host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
) &&
863 * Timeout if the device never becomes ready for data and never
864 * leaves the program state.
866 if (time_after(jiffies
, timeout
)) {
867 pr_err("%s: Card stuck in programming state! %s %s\n",
868 mmc_hostname(card
->host
),
869 req
->rq_disk
->disk_name
, __func__
);
874 * Some cards mishandle the status bits,
875 * so make sure to check both the busy
876 * indication and the card state.
878 } while (!(status
& R1_READY_FOR_DATA
) ||
879 (R1_CURRENT_STATE(status
) == R1_STATE_PRG
));
884 static int send_stop(struct mmc_card
*card
, unsigned int timeout_ms
,
885 struct request
*req
, int *gen_err
, u32
*stop_status
)
887 struct mmc_host
*host
= card
->host
;
888 struct mmc_command cmd
= {0};
890 bool use_r1b_resp
= rq_data_dir(req
) == WRITE
;
893 * Normally we use R1B responses for WRITE, but in cases where the host
894 * has specified a max_busy_timeout we need to validate it. A failure
895 * means we need to prevent the host from doing hw busy detection, which
896 * is done by converting to a R1 response instead.
898 if (host
->max_busy_timeout
&& (timeout_ms
> host
->max_busy_timeout
))
899 use_r1b_resp
= false;
901 cmd
.opcode
= MMC_STOP_TRANSMISSION
;
903 cmd
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
904 cmd
.busy_timeout
= timeout_ms
;
906 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
909 err
= mmc_wait_for_cmd(host
, &cmd
, 5);
913 *stop_status
= cmd
.resp
[0];
915 /* No need to check card status in case of READ. */
916 if (rq_data_dir(req
) == READ
)
919 if (!mmc_host_is_spi(host
) &&
920 (*stop_status
& R1_ERROR
)) {
921 pr_err("%s: %s: general error sending stop command, resp %#x\n",
922 req
->rq_disk
->disk_name
, __func__
, *stop_status
);
926 return card_busy_detect(card
, timeout_ms
, use_r1b_resp
, req
, gen_err
);
929 #define ERR_NOMEDIUM 3
932 #define ERR_CONTINUE 0
934 static int mmc_blk_cmd_error(struct request
*req
, const char *name
, int error
,
935 bool status_valid
, u32 status
)
939 /* response crc error, retry the r/w cmd */
940 pr_err("%s: %s sending %s command, card status %#x\n",
941 req
->rq_disk
->disk_name
, "response CRC error",
946 pr_err("%s: %s sending %s command, card status %#x\n",
947 req
->rq_disk
->disk_name
, "timed out", name
, status
);
949 /* If the status cmd initially failed, retry the r/w cmd */
954 * If it was a r/w cmd crc error, or illegal command
955 * (eg, issued in wrong state) then retry - we should
956 * have corrected the state problem above.
958 if (status
& (R1_COM_CRC_ERROR
| R1_ILLEGAL_COMMAND
))
961 /* Otherwise abort the command */
965 /* We don't understand the error code the driver gave us */
966 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
967 req
->rq_disk
->disk_name
, error
, status
);
973 * Initial r/w and stop cmd error recovery.
974 * We don't know whether the card received the r/w cmd or not, so try to
975 * restore things back to a sane state. Essentially, we do this as follows:
976 * - Obtain card status. If the first attempt to obtain card status fails,
977 * the status word will reflect the failed status cmd, not the failed
978 * r/w cmd. If we fail to obtain card status, it suggests we can no
979 * longer communicate with the card.
980 * - Check the card state. If the card received the cmd but there was a
981 * transient problem with the response, it might still be in a data transfer
982 * mode. Try to send it a stop command. If this fails, we can't recover.
983 * - If the r/w cmd failed due to a response CRC error, it was probably
984 * transient, so retry the cmd.
985 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
986 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
987 * illegal cmd, retry.
988 * Otherwise we don't understand what happened, so abort.
990 static int mmc_blk_cmd_recovery(struct mmc_card
*card
, struct request
*req
,
991 struct mmc_blk_request
*brq
, int *ecc_err
, int *gen_err
)
993 bool prev_cmd_status_valid
= true;
994 u32 status
, stop_status
= 0;
997 if (mmc_card_removed(card
))
1001 * Try to get card status which indicates both the card state
1002 * and why there was no response. If the first attempt fails,
1003 * we can't be sure the returned status is for the r/w command.
1005 for (retry
= 2; retry
>= 0; retry
--) {
1006 err
= get_card_status(card
, &status
, 0);
1010 /* Re-tune if needed */
1011 mmc_retune_recheck(card
->host
);
1013 prev_cmd_status_valid
= false;
1014 pr_err("%s: error %d sending status command, %sing\n",
1015 req
->rq_disk
->disk_name
, err
, retry
? "retry" : "abort");
1018 /* We couldn't get a response from the card. Give up. */
1020 /* Check if the card is removed */
1021 if (mmc_detect_card_removed(card
->host
))
1022 return ERR_NOMEDIUM
;
1026 /* Flag ECC errors */
1027 if ((status
& R1_CARD_ECC_FAILED
) ||
1028 (brq
->stop
.resp
[0] & R1_CARD_ECC_FAILED
) ||
1029 (brq
->cmd
.resp
[0] & R1_CARD_ECC_FAILED
))
1032 /* Flag General errors */
1033 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
)
1034 if ((status
& R1_ERROR
) ||
1035 (brq
->stop
.resp
[0] & R1_ERROR
)) {
1036 pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
1037 req
->rq_disk
->disk_name
, __func__
,
1038 brq
->stop
.resp
[0], status
);
1043 * Check the current card state. If it is in some data transfer
1044 * mode, tell it to stop (and hopefully transition back to TRAN.)
1046 if (R1_CURRENT_STATE(status
) == R1_STATE_DATA
||
1047 R1_CURRENT_STATE(status
) == R1_STATE_RCV
) {
1048 err
= send_stop(card
,
1049 DIV_ROUND_UP(brq
->data
.timeout_ns
, 1000000),
1050 req
, gen_err
, &stop_status
);
1052 pr_err("%s: error %d sending stop command\n",
1053 req
->rq_disk
->disk_name
, err
);
1055 * If the stop cmd also timed out, the card is probably
1056 * not present, so abort. Other errors are bad news too.
1061 if (stop_status
& R1_CARD_ECC_FAILED
)
1065 /* Check for set block count errors */
1067 return mmc_blk_cmd_error(req
, "SET_BLOCK_COUNT", brq
->sbc
.error
,
1068 prev_cmd_status_valid
, status
);
1070 /* Check for r/w command errors */
1072 return mmc_blk_cmd_error(req
, "r/w cmd", brq
->cmd
.error
,
1073 prev_cmd_status_valid
, status
);
1076 if (!brq
->stop
.error
)
1077 return ERR_CONTINUE
;
1079 /* Now for stop errors. These aren't fatal to the transfer. */
1080 pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
1081 req
->rq_disk
->disk_name
, brq
->stop
.error
,
1082 brq
->cmd
.resp
[0], status
);
1085 * Subsitute in our own stop status as this will give the error
1086 * state which happened during the execution of the r/w command.
1089 brq
->stop
.resp
[0] = stop_status
;
1090 brq
->stop
.error
= 0;
1092 return ERR_CONTINUE
;
1095 static int mmc_blk_reset(struct mmc_blk_data
*md
, struct mmc_host
*host
,
1100 if (md
->reset_done
& type
)
1103 md
->reset_done
|= type
;
1104 err
= mmc_hw_reset(host
);
1105 /* Ensure we switch back to the correct partition */
1106 if (err
!= -EOPNOTSUPP
) {
1107 struct mmc_blk_data
*main_md
=
1108 dev_get_drvdata(&host
->card
->dev
);
1111 main_md
->part_curr
= main_md
->part_type
;
1112 part_err
= mmc_blk_part_switch(host
->card
, md
);
1115 * We have failed to get back into the correct
1116 * partition, so we need to abort the whole request.
1124 static inline void mmc_blk_reset_success(struct mmc_blk_data
*md
, int type
)
1126 md
->reset_done
&= ~type
;
1129 int mmc_access_rpmb(struct mmc_queue
*mq
)
1131 struct mmc_blk_data
*md
= mq
->data
;
1133 * If this is a RPMB partition access, return ture
1135 if (md
&& md
->part_type
== EXT_CSD_PART_CONFIG_ACC_RPMB
)
1141 static int mmc_blk_issue_discard_rq(struct mmc_queue
*mq
, struct request
*req
)
1143 struct mmc_blk_data
*md
= mq
->data
;
1144 struct mmc_card
*card
= md
->queue
.card
;
1145 unsigned int from
, nr
, arg
;
1146 int err
= 0, type
= MMC_BLK_DISCARD
;
1148 if (!mmc_can_erase(card
)) {
1153 from
= blk_rq_pos(req
);
1154 nr
= blk_rq_sectors(req
);
1156 if (mmc_can_discard(card
))
1157 arg
= MMC_DISCARD_ARG
;
1158 else if (mmc_can_trim(card
))
1161 arg
= MMC_ERASE_ARG
;
1163 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1164 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1165 INAND_CMD38_ARG_EXT_CSD
,
1166 arg
== MMC_TRIM_ARG
?
1167 INAND_CMD38_ARG_TRIM
:
1168 INAND_CMD38_ARG_ERASE
,
1173 err
= mmc_erase(card
, from
, nr
, arg
);
1175 if (err
== -EIO
&& !mmc_blk_reset(md
, card
->host
, type
))
1178 mmc_blk_reset_success(md
, type
);
1179 blk_end_request(req
, err
, blk_rq_bytes(req
));
1184 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue
*mq
,
1185 struct request
*req
)
1187 struct mmc_blk_data
*md
= mq
->data
;
1188 struct mmc_card
*card
= md
->queue
.card
;
1189 unsigned int from
, nr
, arg
;
1190 int err
= 0, type
= MMC_BLK_SECDISCARD
;
1192 if (!(mmc_can_secure_erase_trim(card
))) {
1197 from
= blk_rq_pos(req
);
1198 nr
= blk_rq_sectors(req
);
1200 if (mmc_can_trim(card
) && !mmc_erase_group_aligned(card
, from
, nr
))
1201 arg
= MMC_SECURE_TRIM1_ARG
;
1203 arg
= MMC_SECURE_ERASE_ARG
;
1206 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1207 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1208 INAND_CMD38_ARG_EXT_CSD
,
1209 arg
== MMC_SECURE_TRIM1_ARG
?
1210 INAND_CMD38_ARG_SECTRIM1
:
1211 INAND_CMD38_ARG_SECERASE
,
1217 err
= mmc_erase(card
, from
, nr
, arg
);
1223 if (arg
== MMC_SECURE_TRIM1_ARG
) {
1224 if (card
->quirks
& MMC_QUIRK_INAND_CMD38
) {
1225 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
1226 INAND_CMD38_ARG_EXT_CSD
,
1227 INAND_CMD38_ARG_SECTRIM2
,
1233 err
= mmc_erase(card
, from
, nr
, MMC_SECURE_TRIM2_ARG
);
1241 if (err
&& !mmc_blk_reset(md
, card
->host
, type
))
1244 mmc_blk_reset_success(md
, type
);
1246 blk_end_request(req
, err
, blk_rq_bytes(req
));
1251 static int mmc_blk_issue_flush(struct mmc_queue
*mq
, struct request
*req
)
1253 struct mmc_blk_data
*md
= mq
->data
;
1254 struct mmc_card
*card
= md
->queue
.card
;
1257 ret
= mmc_flush_cache(card
);
1261 blk_end_request_all(req
, ret
);
1267 * Reformat current write as a reliable write, supporting
1268 * both legacy and the enhanced reliable write MMC cards.
1269 * In each transfer we'll handle only as much as a single
1270 * reliable write can handle, thus finish the request in
1271 * partial completions.
1273 static inline void mmc_apply_rel_rw(struct mmc_blk_request
*brq
,
1274 struct mmc_card
*card
,
1275 struct request
*req
)
1277 if (!(card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
)) {
1278 /* Legacy mode imposes restrictions on transfers. */
1279 if (!IS_ALIGNED(brq
->cmd
.arg
, card
->ext_csd
.rel_sectors
))
1280 brq
->data
.blocks
= 1;
1282 if (brq
->data
.blocks
> card
->ext_csd
.rel_sectors
)
1283 brq
->data
.blocks
= card
->ext_csd
.rel_sectors
;
1284 else if (brq
->data
.blocks
< card
->ext_csd
.rel_sectors
)
1285 brq
->data
.blocks
= 1;
1289 #define CMD_ERRORS \
1290 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
1291 R1_ADDRESS_ERROR | /* Misaligned address */ \
1292 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
1293 R1_WP_VIOLATION | /* Tried to write to protected block */ \
1294 R1_CC_ERROR | /* Card controller error */ \
1295 R1_ERROR) /* General/unknown error */
1297 static int mmc_blk_err_check(struct mmc_card
*card
,
1298 struct mmc_async_req
*areq
)
1300 struct mmc_queue_req
*mq_mrq
= container_of(areq
, struct mmc_queue_req
,
1302 struct mmc_blk_request
*brq
= &mq_mrq
->brq
;
1303 struct request
*req
= mq_mrq
->req
;
1304 int need_retune
= card
->host
->need_retune
;
1305 int ecc_err
= 0, gen_err
= 0;
1308 * sbc.error indicates a problem with the set block count
1309 * command. No data will have been transferred.
1311 * cmd.error indicates a problem with the r/w command. No
1312 * data will have been transferred.
1314 * stop.error indicates a problem with the stop command. Data
1315 * may have been transferred, or may still be transferring.
1317 if (brq
->sbc
.error
|| brq
->cmd
.error
|| brq
->stop
.error
||
1319 switch (mmc_blk_cmd_recovery(card
, req
, brq
, &ecc_err
, &gen_err
)) {
1321 return MMC_BLK_RETRY
;
1323 return MMC_BLK_ABORT
;
1325 return MMC_BLK_NOMEDIUM
;
1332 * Check for errors relating to the execution of the
1333 * initial command - such as address errors. No data
1334 * has been transferred.
1336 if (brq
->cmd
.resp
[0] & CMD_ERRORS
) {
1337 pr_err("%s: r/w command failed, status = %#x\n",
1338 req
->rq_disk
->disk_name
, brq
->cmd
.resp
[0]);
1339 return MMC_BLK_ABORT
;
1343 * Everything else is either success, or a data error of some
1344 * kind. If it was a write, we may have transitioned to
1345 * program mode, which we have to wait for it to complete.
1347 if (!mmc_host_is_spi(card
->host
) && rq_data_dir(req
) != READ
) {
1350 /* Check stop command response */
1351 if (brq
->stop
.resp
[0] & R1_ERROR
) {
1352 pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
1353 req
->rq_disk
->disk_name
, __func__
,
1358 err
= card_busy_detect(card
, MMC_BLK_TIMEOUT_MS
, false, req
,
1361 return MMC_BLK_CMD_ERR
;
1364 /* if general error occurs, retry the write operation. */
1366 pr_warn("%s: retrying write for general error\n",
1367 req
->rq_disk
->disk_name
);
1368 return MMC_BLK_RETRY
;
1371 if (brq
->data
.error
) {
1372 if (need_retune
&& !brq
->retune_retry_done
) {
1373 pr_debug("%s: retrying because a re-tune was needed\n",
1374 req
->rq_disk
->disk_name
);
1375 brq
->retune_retry_done
= 1;
1376 return MMC_BLK_RETRY
;
1378 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1379 req
->rq_disk
->disk_name
, brq
->data
.error
,
1380 (unsigned)blk_rq_pos(req
),
1381 (unsigned)blk_rq_sectors(req
),
1382 brq
->cmd
.resp
[0], brq
->stop
.resp
[0]);
1384 if (rq_data_dir(req
) == READ
) {
1386 return MMC_BLK_ECC_ERR
;
1387 return MMC_BLK_DATA_ERR
;
1389 return MMC_BLK_CMD_ERR
;
1393 if (!brq
->data
.bytes_xfered
)
1394 return MMC_BLK_RETRY
;
1396 if (mmc_packed_cmd(mq_mrq
->cmd_type
)) {
1397 if (unlikely(brq
->data
.blocks
<< 9 != brq
->data
.bytes_xfered
))
1398 return MMC_BLK_PARTIAL
;
1400 return MMC_BLK_SUCCESS
;
1403 if (blk_rq_bytes(req
) != brq
->data
.bytes_xfered
)
1404 return MMC_BLK_PARTIAL
;
1406 return MMC_BLK_SUCCESS
;
1409 static int mmc_blk_packed_err_check(struct mmc_card
*card
,
1410 struct mmc_async_req
*areq
)
1412 struct mmc_queue_req
*mq_rq
= container_of(areq
, struct mmc_queue_req
,
1414 struct request
*req
= mq_rq
->req
;
1415 struct mmc_packed
*packed
= mq_rq
->packed
;
1416 int err
, check
, status
;
1422 check
= mmc_blk_err_check(card
, areq
);
1423 err
= get_card_status(card
, &status
, 0);
1425 pr_err("%s: error %d sending status command\n",
1426 req
->rq_disk
->disk_name
, err
);
1427 return MMC_BLK_ABORT
;
1430 if (status
& R1_EXCEPTION_EVENT
) {
1431 err
= mmc_get_ext_csd(card
, &ext_csd
);
1433 pr_err("%s: error %d sending ext_csd\n",
1434 req
->rq_disk
->disk_name
, err
);
1435 return MMC_BLK_ABORT
;
1438 if ((ext_csd
[EXT_CSD_EXP_EVENTS_STATUS
] &
1439 EXT_CSD_PACKED_FAILURE
) &&
1440 (ext_csd
[EXT_CSD_PACKED_CMD_STATUS
] &
1441 EXT_CSD_PACKED_GENERIC_ERROR
)) {
1442 if (ext_csd
[EXT_CSD_PACKED_CMD_STATUS
] &
1443 EXT_CSD_PACKED_INDEXED_ERROR
) {
1444 packed
->idx_failure
=
1445 ext_csd
[EXT_CSD_PACKED_FAILURE_INDEX
] - 1;
1446 check
= MMC_BLK_PARTIAL
;
1448 pr_err("%s: packed cmd failed, nr %u, sectors %u, "
1449 "failure index: %d\n",
1450 req
->rq_disk
->disk_name
, packed
->nr_entries
,
1451 packed
->blocks
, packed
->idx_failure
);
1459 static void mmc_blk_rw_rq_prep(struct mmc_queue_req
*mqrq
,
1460 struct mmc_card
*card
,
1462 struct mmc_queue
*mq
)
1464 u32 readcmd
, writecmd
;
1465 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1466 struct request
*req
= mqrq
->req
;
1467 struct mmc_blk_data
*md
= mq
->data
;
1471 * Reliable writes are used to implement Forced Unit Access and
1472 * are supported only on MMCs.
1474 bool do_rel_wr
= (req
->cmd_flags
& REQ_FUA
) &&
1475 (rq_data_dir(req
) == WRITE
) &&
1476 (md
->flags
& MMC_BLK_REL_WR
);
1478 memset(brq
, 0, sizeof(struct mmc_blk_request
));
1479 brq
->mrq
.cmd
= &brq
->cmd
;
1480 brq
->mrq
.data
= &brq
->data
;
1482 brq
->cmd
.arg
= blk_rq_pos(req
);
1483 if (!mmc_card_blockaddr(card
))
1485 brq
->cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
1486 brq
->data
.blksz
= 512;
1487 brq
->stop
.opcode
= MMC_STOP_TRANSMISSION
;
1489 brq
->data
.blocks
= blk_rq_sectors(req
);
1492 * The block layer doesn't support all sector count
1493 * restrictions, so we need to be prepared for too big
1496 if (brq
->data
.blocks
> card
->host
->max_blk_count
)
1497 brq
->data
.blocks
= card
->host
->max_blk_count
;
1499 if (brq
->data
.blocks
> 1) {
1501 * After a read error, we redo the request one sector
1502 * at a time in order to accurately determine which
1503 * sectors can be read successfully.
1506 brq
->data
.blocks
= 1;
1509 * Some controllers have HW issues while operating
1510 * in multiple I/O mode
1512 if (card
->host
->ops
->multi_io_quirk
)
1513 brq
->data
.blocks
= card
->host
->ops
->multi_io_quirk(card
,
1514 (rq_data_dir(req
) == READ
) ?
1515 MMC_DATA_READ
: MMC_DATA_WRITE
,
1519 if (brq
->data
.blocks
> 1 || do_rel_wr
) {
1520 /* SPI multiblock writes terminate using a special
1521 * token, not a STOP_TRANSMISSION request.
1523 if (!mmc_host_is_spi(card
->host
) ||
1524 rq_data_dir(req
) == READ
)
1525 brq
->mrq
.stop
= &brq
->stop
;
1526 readcmd
= MMC_READ_MULTIPLE_BLOCK
;
1527 writecmd
= MMC_WRITE_MULTIPLE_BLOCK
;
1529 brq
->mrq
.stop
= NULL
;
1530 readcmd
= MMC_READ_SINGLE_BLOCK
;
1531 writecmd
= MMC_WRITE_BLOCK
;
1533 if (rq_data_dir(req
) == READ
) {
1534 brq
->cmd
.opcode
= readcmd
;
1535 brq
->data
.flags
= MMC_DATA_READ
;
1537 brq
->stop
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
|
1540 brq
->cmd
.opcode
= writecmd
;
1541 brq
->data
.flags
= MMC_DATA_WRITE
;
1543 brq
->stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
|
1548 mmc_apply_rel_rw(brq
, card
, req
);
1551 * Data tag is used only during writing meta data to speed
1552 * up write and any subsequent read of this meta data
1554 do_data_tag
= (card
->ext_csd
.data_tag_unit_size
) &&
1555 (req
->cmd_flags
& REQ_META
) &&
1556 (rq_data_dir(req
) == WRITE
) &&
1557 ((brq
->data
.blocks
* brq
->data
.blksz
) >=
1558 card
->ext_csd
.data_tag_unit_size
);
1561 * Pre-defined multi-block transfers are preferable to
1562 * open ended-ones (and necessary for reliable writes).
1563 * However, it is not sufficient to just send CMD23,
1564 * and avoid the final CMD12, as on an error condition
1565 * CMD12 (stop) needs to be sent anyway. This, coupled
1566 * with Auto-CMD23 enhancements provided by some
1567 * hosts, means that the complexity of dealing
1568 * with this is best left to the host. If CMD23 is
1569 * supported by card and host, we'll fill sbc in and let
1570 * the host deal with handling it correctly. This means
1571 * that for hosts that don't expose MMC_CAP_CMD23, no
1572 * change of behavior will be observed.
1574 * N.B: Some MMC cards experience perf degradation.
1575 * We'll avoid using CMD23-bounded multiblock writes for
1576 * these, while retaining features like reliable writes.
1578 if ((md
->flags
& MMC_BLK_CMD23
) && mmc_op_multi(brq
->cmd
.opcode
) &&
1579 (do_rel_wr
|| !(card
->quirks
& MMC_QUIRK_BLK_NO_CMD23
) ||
1581 brq
->sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
1582 brq
->sbc
.arg
= brq
->data
.blocks
|
1583 (do_rel_wr
? (1 << 31) : 0) |
1584 (do_data_tag
? (1 << 29) : 0);
1585 brq
->sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1586 brq
->mrq
.sbc
= &brq
->sbc
;
1589 mmc_set_data_timeout(&brq
->data
, card
);
1591 brq
->data
.sg
= mqrq
->sg
;
1592 brq
->data
.sg_len
= mmc_queue_map_sg(mq
, mqrq
);
1595 * Adjust the sg list so it is the same size as the
1598 if (brq
->data
.blocks
!= blk_rq_sectors(req
)) {
1599 int i
, data_size
= brq
->data
.blocks
<< 9;
1600 struct scatterlist
*sg
;
1602 for_each_sg(brq
->data
.sg
, sg
, brq
->data
.sg_len
, i
) {
1603 data_size
-= sg
->length
;
1604 if (data_size
<= 0) {
1605 sg
->length
+= data_size
;
1610 brq
->data
.sg_len
= i
;
1613 mqrq
->mmc_active
.mrq
= &brq
->mrq
;
1614 mqrq
->mmc_active
.err_check
= mmc_blk_err_check
;
1616 mmc_queue_bounce_pre(mqrq
);
1619 static inline u8
mmc_calc_packed_hdr_segs(struct request_queue
*q
,
1620 struct mmc_card
*card
)
1622 unsigned int hdr_sz
= mmc_large_sector(card
) ? 4096 : 512;
1623 unsigned int max_seg_sz
= queue_max_segment_size(q
);
1624 unsigned int len
, nr_segs
= 0;
1627 len
= min(hdr_sz
, max_seg_sz
);
1635 static u8
mmc_blk_prep_packed_list(struct mmc_queue
*mq
, struct request
*req
)
1637 struct request_queue
*q
= mq
->queue
;
1638 struct mmc_card
*card
= mq
->card
;
1639 struct request
*cur
= req
, *next
= NULL
;
1640 struct mmc_blk_data
*md
= mq
->data
;
1641 struct mmc_queue_req
*mqrq
= mq
->mqrq_cur
;
1642 bool en_rel_wr
= card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
;
1643 unsigned int req_sectors
= 0, phys_segments
= 0;
1644 unsigned int max_blk_count
, max_phys_segs
;
1645 bool put_back
= true;
1646 u8 max_packed_rw
= 0;
1649 if (!(md
->flags
& MMC_BLK_PACKED_CMD
))
1652 if ((rq_data_dir(cur
) == WRITE
) &&
1653 mmc_host_packed_wr(card
->host
))
1654 max_packed_rw
= card
->ext_csd
.max_packed_writes
;
1656 if (max_packed_rw
== 0)
1659 if (mmc_req_rel_wr(cur
) &&
1660 (md
->flags
& MMC_BLK_REL_WR
) && !en_rel_wr
)
1663 if (mmc_large_sector(card
) &&
1664 !IS_ALIGNED(blk_rq_sectors(cur
), 8))
1667 mmc_blk_clear_packed(mqrq
);
1669 max_blk_count
= min(card
->host
->max_blk_count
,
1670 card
->host
->max_req_size
>> 9);
1671 if (unlikely(max_blk_count
> 0xffff))
1672 max_blk_count
= 0xffff;
1674 max_phys_segs
= queue_max_segments(q
);
1675 req_sectors
+= blk_rq_sectors(cur
);
1676 phys_segments
+= cur
->nr_phys_segments
;
1678 if (rq_data_dir(cur
) == WRITE
) {
1679 req_sectors
+= mmc_large_sector(card
) ? 8 : 1;
1680 phys_segments
+= mmc_calc_packed_hdr_segs(q
, card
);
1684 if (reqs
>= max_packed_rw
- 1) {
1689 spin_lock_irq(q
->queue_lock
);
1690 next
= blk_fetch_request(q
);
1691 spin_unlock_irq(q
->queue_lock
);
1697 if (mmc_large_sector(card
) &&
1698 !IS_ALIGNED(blk_rq_sectors(next
), 8))
1701 if (next
->cmd_flags
& REQ_DISCARD
||
1702 next
->cmd_flags
& REQ_FLUSH
)
1705 if (rq_data_dir(cur
) != rq_data_dir(next
))
1708 if (mmc_req_rel_wr(next
) &&
1709 (md
->flags
& MMC_BLK_REL_WR
) && !en_rel_wr
)
1712 req_sectors
+= blk_rq_sectors(next
);
1713 if (req_sectors
> max_blk_count
)
1716 phys_segments
+= next
->nr_phys_segments
;
1717 if (phys_segments
> max_phys_segs
)
1720 list_add_tail(&next
->queuelist
, &mqrq
->packed
->list
);
1726 spin_lock_irq(q
->queue_lock
);
1727 blk_requeue_request(q
, next
);
1728 spin_unlock_irq(q
->queue_lock
);
1732 list_add(&req
->queuelist
, &mqrq
->packed
->list
);
1733 mqrq
->packed
->nr_entries
= ++reqs
;
1734 mqrq
->packed
->retries
= reqs
;
1739 mqrq
->cmd_type
= MMC_PACKED_NONE
;
1743 static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req
*mqrq
,
1744 struct mmc_card
*card
,
1745 struct mmc_queue
*mq
)
1747 struct mmc_blk_request
*brq
= &mqrq
->brq
;
1748 struct request
*req
= mqrq
->req
;
1749 struct request
*prq
;
1750 struct mmc_blk_data
*md
= mq
->data
;
1751 struct mmc_packed
*packed
= mqrq
->packed
;
1752 bool do_rel_wr
, do_data_tag
;
1753 u32
*packed_cmd_hdr
;
1759 mqrq
->cmd_type
= MMC_PACKED_WRITE
;
1761 packed
->idx_failure
= MMC_PACKED_NR_IDX
;
1763 packed_cmd_hdr
= packed
->cmd_hdr
;
1764 memset(packed_cmd_hdr
, 0, sizeof(packed
->cmd_hdr
));
1765 packed_cmd_hdr
[0] = (packed
->nr_entries
<< 16) |
1766 (PACKED_CMD_WR
<< 8) | PACKED_CMD_VER
;
1767 hdr_blocks
= mmc_large_sector(card
) ? 8 : 1;
1770 * Argument for each entry of packed group
1772 list_for_each_entry(prq
, &packed
->list
, queuelist
) {
1773 do_rel_wr
= mmc_req_rel_wr(prq
) && (md
->flags
& MMC_BLK_REL_WR
);
1774 do_data_tag
= (card
->ext_csd
.data_tag_unit_size
) &&
1775 (prq
->cmd_flags
& REQ_META
) &&
1776 (rq_data_dir(prq
) == WRITE
) &&
1777 ((brq
->data
.blocks
* brq
->data
.blksz
) >=
1778 card
->ext_csd
.data_tag_unit_size
);
1779 /* Argument of CMD23 */
1780 packed_cmd_hdr
[(i
* 2)] =
1781 (do_rel_wr
? MMC_CMD23_ARG_REL_WR
: 0) |
1782 (do_data_tag
? MMC_CMD23_ARG_TAG_REQ
: 0) |
1783 blk_rq_sectors(prq
);
1784 /* Argument of CMD18 or CMD25 */
1785 packed_cmd_hdr
[((i
* 2)) + 1] =
1786 mmc_card_blockaddr(card
) ?
1787 blk_rq_pos(prq
) : blk_rq_pos(prq
) << 9;
1788 packed
->blocks
+= blk_rq_sectors(prq
);
1792 memset(brq
, 0, sizeof(struct mmc_blk_request
));
1793 brq
->mrq
.cmd
= &brq
->cmd
;
1794 brq
->mrq
.data
= &brq
->data
;
1795 brq
->mrq
.sbc
= &brq
->sbc
;
1796 brq
->mrq
.stop
= &brq
->stop
;
1798 brq
->sbc
.opcode
= MMC_SET_BLOCK_COUNT
;
1799 brq
->sbc
.arg
= MMC_CMD23_ARG_PACKED
| (packed
->blocks
+ hdr_blocks
);
1800 brq
->sbc
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1802 brq
->cmd
.opcode
= MMC_WRITE_MULTIPLE_BLOCK
;
1803 brq
->cmd
.arg
= blk_rq_pos(req
);
1804 if (!mmc_card_blockaddr(card
))
1806 brq
->cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_ADTC
;
1808 brq
->data
.blksz
= 512;
1809 brq
->data
.blocks
= packed
->blocks
+ hdr_blocks
;
1810 brq
->data
.flags
= MMC_DATA_WRITE
;
1812 brq
->stop
.opcode
= MMC_STOP_TRANSMISSION
;
1814 brq
->stop
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
1816 mmc_set_data_timeout(&brq
->data
, card
);
1818 brq
->data
.sg
= mqrq
->sg
;
1819 brq
->data
.sg_len
= mmc_queue_map_sg(mq
, mqrq
);
1821 mqrq
->mmc_active
.mrq
= &brq
->mrq
;
1822 mqrq
->mmc_active
.err_check
= mmc_blk_packed_err_check
;
1824 mmc_queue_bounce_pre(mqrq
);
1827 static int mmc_blk_cmd_err(struct mmc_blk_data
*md
, struct mmc_card
*card
,
1828 struct mmc_blk_request
*brq
, struct request
*req
,
1831 struct mmc_queue_req
*mq_rq
;
1832 mq_rq
= container_of(brq
, struct mmc_queue_req
, brq
);
1835 * If this is an SD card and we're writing, we can first
1836 * mark the known good sectors as ok.
1838 * If the card is not SD, we can still ok written sectors
1839 * as reported by the controller (which might be less than
1840 * the real number of written sectors, but never more).
1842 if (mmc_card_sd(card
)) {
1845 blocks
= mmc_sd_num_wr_blocks(card
);
1846 if (blocks
!= (u32
)-1) {
1847 ret
= blk_end_request(req
, 0, blocks
<< 9);
1850 if (!mmc_packed_cmd(mq_rq
->cmd_type
))
1851 ret
= blk_end_request(req
, 0, brq
->data
.bytes_xfered
);
1856 static int mmc_blk_end_packed_req(struct mmc_queue_req
*mq_rq
)
1858 struct request
*prq
;
1859 struct mmc_packed
*packed
= mq_rq
->packed
;
1860 int idx
= packed
->idx_failure
, i
= 0;
1865 while (!list_empty(&packed
->list
)) {
1866 prq
= list_entry_rq(packed
->list
.next
);
1868 /* retry from error index */
1869 packed
->nr_entries
-= idx
;
1873 if (packed
->nr_entries
== MMC_PACKED_NR_SINGLE
) {
1874 list_del_init(&prq
->queuelist
);
1875 mmc_blk_clear_packed(mq_rq
);
1879 list_del_init(&prq
->queuelist
);
1880 blk_end_request(prq
, 0, blk_rq_bytes(prq
));
1884 mmc_blk_clear_packed(mq_rq
);
1888 static void mmc_blk_abort_packed_req(struct mmc_queue_req
*mq_rq
)
1890 struct request
*prq
;
1891 struct mmc_packed
*packed
= mq_rq
->packed
;
1895 while (!list_empty(&packed
->list
)) {
1896 prq
= list_entry_rq(packed
->list
.next
);
1897 list_del_init(&prq
->queuelist
);
1898 blk_end_request(prq
, -EIO
, blk_rq_bytes(prq
));
1901 mmc_blk_clear_packed(mq_rq
);
1904 static void mmc_blk_revert_packed_req(struct mmc_queue
*mq
,
1905 struct mmc_queue_req
*mq_rq
)
1907 struct request
*prq
;
1908 struct request_queue
*q
= mq
->queue
;
1909 struct mmc_packed
*packed
= mq_rq
->packed
;
1913 while (!list_empty(&packed
->list
)) {
1914 prq
= list_entry_rq(packed
->list
.prev
);
1915 if (prq
->queuelist
.prev
!= &packed
->list
) {
1916 list_del_init(&prq
->queuelist
);
1917 spin_lock_irq(q
->queue_lock
);
1918 blk_requeue_request(mq
->queue
, prq
);
1919 spin_unlock_irq(q
->queue_lock
);
1921 list_del_init(&prq
->queuelist
);
1925 mmc_blk_clear_packed(mq_rq
);
1928 static int mmc_blk_issue_rw_rq(struct mmc_queue
*mq
, struct request
*rqc
)
1930 struct mmc_blk_data
*md
= mq
->data
;
1931 struct mmc_card
*card
= md
->queue
.card
;
1932 struct mmc_blk_request
*brq
= &mq
->mqrq_cur
->brq
;
1933 int ret
= 1, disable_multi
= 0, retry
= 0, type
, retune_retry_done
= 0;
1934 enum mmc_blk_status status
;
1935 struct mmc_queue_req
*mq_rq
;
1936 struct request
*req
= rqc
;
1937 struct mmc_async_req
*areq
;
1938 const u8 packed_nr
= 2;
1941 if (!rqc
&& !mq
->mqrq_prev
->req
)
1945 reqs
= mmc_blk_prep_packed_list(mq
, rqc
);
1950 * When 4KB native sector is enabled, only 8 blocks
1951 * multiple read or write is allowed
1953 if ((brq
->data
.blocks
& 0x07) &&
1954 (card
->ext_csd
.data_sector_size
== 4096)) {
1955 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1956 req
->rq_disk
->disk_name
);
1957 mq_rq
= mq
->mqrq_cur
;
1961 if (reqs
>= packed_nr
)
1962 mmc_blk_packed_hdr_wrq_prep(mq
->mqrq_cur
,
1965 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
1966 areq
= &mq
->mqrq_cur
->mmc_active
;
1969 areq
= mmc_start_req(card
->host
, areq
, (int *) &status
);
1971 if (status
== MMC_BLK_NEW_REQUEST
)
1972 mq
->flags
|= MMC_QUEUE_NEW_REQUEST
;
1976 mq_rq
= container_of(areq
, struct mmc_queue_req
, mmc_active
);
1979 type
= rq_data_dir(req
) == READ
? MMC_BLK_READ
: MMC_BLK_WRITE
;
1980 mmc_queue_bounce_post(mq_rq
);
1983 case MMC_BLK_SUCCESS
:
1984 case MMC_BLK_PARTIAL
:
1986 * A block was successfully transferred.
1988 mmc_blk_reset_success(md
, type
);
1990 if (mmc_packed_cmd(mq_rq
->cmd_type
)) {
1991 ret
= mmc_blk_end_packed_req(mq_rq
);
1994 ret
= blk_end_request(req
, 0,
1995 brq
->data
.bytes_xfered
);
1999 * If the blk_end_request function returns non-zero even
2000 * though all data has been transferred and no errors
2001 * were returned by the host controller, it's a bug.
2003 if (status
== MMC_BLK_SUCCESS
&& ret
) {
2004 pr_err("%s BUG rq_tot %d d_xfer %d\n",
2005 __func__
, blk_rq_bytes(req
),
2006 brq
->data
.bytes_xfered
);
2011 case MMC_BLK_CMD_ERR
:
2012 ret
= mmc_blk_cmd_err(md
, card
, brq
, req
, ret
);
2013 if (mmc_blk_reset(md
, card
->host
, type
))
2019 retune_retry_done
= brq
->retune_retry_done
;
2024 if (!mmc_blk_reset(md
, card
->host
, type
))
2027 case MMC_BLK_DATA_ERR
: {
2030 err
= mmc_blk_reset(md
, card
->host
, type
);
2033 if (err
== -ENODEV
||
2034 mmc_packed_cmd(mq_rq
->cmd_type
))
2038 case MMC_BLK_ECC_ERR
:
2039 if (brq
->data
.blocks
> 1) {
2040 /* Redo read one sector at a time */
2041 pr_warn("%s: retrying using single block read\n",
2042 req
->rq_disk
->disk_name
);
2047 * After an error, we redo I/O one sector at a
2048 * time, so we only reach here after trying to
2049 * read a single sector.
2051 ret
= blk_end_request(req
, -EIO
,
2056 case MMC_BLK_NOMEDIUM
:
2059 pr_err("%s: Unhandled return value (%d)",
2060 req
->rq_disk
->disk_name
, status
);
2065 if (mmc_packed_cmd(mq_rq
->cmd_type
)) {
2066 if (!mq_rq
->packed
->retries
)
2068 mmc_blk_packed_hdr_wrq_prep(mq_rq
, card
, mq
);
2069 mmc_start_req(card
->host
,
2070 &mq_rq
->mmc_active
, NULL
);
2074 * In case of a incomplete request
2075 * prepare it again and resend.
2077 mmc_blk_rw_rq_prep(mq_rq
, card
,
2079 mmc_start_req(card
->host
,
2080 &mq_rq
->mmc_active
, NULL
);
2082 mq_rq
->brq
.retune_retry_done
= retune_retry_done
;
2089 if (mmc_packed_cmd(mq_rq
->cmd_type
)) {
2090 mmc_blk_abort_packed_req(mq_rq
);
2092 if (mmc_card_removed(card
))
2093 req
->cmd_flags
|= REQ_QUIET
;
2095 ret
= blk_end_request(req
, -EIO
,
2096 blk_rq_cur_bytes(req
));
2101 if (mmc_card_removed(card
)) {
2102 rqc
->cmd_flags
|= REQ_QUIET
;
2103 blk_end_request_all(rqc
, -EIO
);
2106 * If current request is packed, it needs to put back.
2108 if (mmc_packed_cmd(mq
->mqrq_cur
->cmd_type
))
2109 mmc_blk_revert_packed_req(mq
, mq
->mqrq_cur
);
2111 mmc_blk_rw_rq_prep(mq
->mqrq_cur
, card
, 0, mq
);
2112 mmc_start_req(card
->host
,
2113 &mq
->mqrq_cur
->mmc_active
, NULL
);
2120 static int mmc_blk_issue_rq(struct mmc_queue
*mq
, struct request
*req
)
2123 struct mmc_blk_data
*md
= mq
->data
;
2124 struct mmc_card
*card
= md
->queue
.card
;
2125 struct mmc_host
*host
= card
->host
;
2126 unsigned long flags
;
2127 unsigned int cmd_flags
= req
? req
->cmd_flags
: 0;
2129 if (req
&& !mq
->mqrq_prev
->req
)
2130 /* claim host only for the first request */
2133 ret
= mmc_blk_part_switch(card
, md
);
2136 blk_end_request_all(req
, -EIO
);
2142 mq
->flags
&= ~MMC_QUEUE_NEW_REQUEST
;
2143 if (cmd_flags
& REQ_DISCARD
) {
2144 /* complete ongoing async transfer before issuing discard */
2145 if (card
->host
->areq
)
2146 mmc_blk_issue_rw_rq(mq
, NULL
);
2147 if (req
->cmd_flags
& REQ_SECURE
)
2148 ret
= mmc_blk_issue_secdiscard_rq(mq
, req
);
2150 ret
= mmc_blk_issue_discard_rq(mq
, req
);
2151 } else if (cmd_flags
& REQ_FLUSH
) {
2152 /* complete ongoing async transfer before issuing flush */
2153 if (card
->host
->areq
)
2154 mmc_blk_issue_rw_rq(mq
, NULL
);
2155 ret
= mmc_blk_issue_flush(mq
, req
);
2157 if (!req
&& host
->areq
) {
2158 spin_lock_irqsave(&host
->context_info
.lock
, flags
);
2159 host
->context_info
.is_waiting_last_req
= true;
2160 spin_unlock_irqrestore(&host
->context_info
.lock
, flags
);
2162 ret
= mmc_blk_issue_rw_rq(mq
, req
);
2166 if ((!req
&& !(mq
->flags
& MMC_QUEUE_NEW_REQUEST
)) ||
2167 (cmd_flags
& MMC_REQ_SPECIAL_MASK
))
2169 * Release host when there are no more requests
2170 * and after special request(discard, flush) is done.
2171 * In case sepecial request, there is no reentry to
2172 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
2178 static inline int mmc_blk_readonly(struct mmc_card
*card
)
2180 return mmc_card_readonly(card
) ||
2181 !(card
->csd
.cmdclass
& CCC_BLOCK_WRITE
);
2184 static struct mmc_blk_data
*mmc_blk_alloc_req(struct mmc_card
*card
,
2185 struct device
*parent
,
2188 const char *subname
,
2191 struct mmc_blk_data
*md
;
2194 devidx
= find_first_zero_bit(dev_use
, max_devices
);
2195 if (devidx
>= max_devices
)
2196 return ERR_PTR(-ENOSPC
);
2197 __set_bit(devidx
, dev_use
);
2199 md
= kzalloc(sizeof(struct mmc_blk_data
), GFP_KERNEL
);
2206 * !subname implies we are creating main mmc_blk_data that will be
2207 * associated with mmc_card with dev_set_drvdata. Due to device
2208 * partitions, devidx will not coincide with a per-physical card
2209 * index anymore so we keep track of a name index.
2212 md
->name_idx
= find_first_zero_bit(name_use
, max_devices
);
2213 __set_bit(md
->name_idx
, name_use
);
2215 md
->name_idx
= ((struct mmc_blk_data
*)
2216 dev_to_disk(parent
)->private_data
)->name_idx
;
2218 md
->area_type
= area_type
;
2221 * Set the read-only status based on the supported commands
2222 * and the write protect switch.
2224 md
->read_only
= mmc_blk_readonly(card
);
2226 md
->disk
= alloc_disk(perdev_minors
);
2227 if (md
->disk
== NULL
) {
2232 spin_lock_init(&md
->lock
);
2233 INIT_LIST_HEAD(&md
->part
);
2236 ret
= mmc_init_queue(&md
->queue
, card
, &md
->lock
, subname
);
2240 md
->queue
.issue_fn
= mmc_blk_issue_rq
;
2241 md
->queue
.data
= md
;
2243 md
->disk
->major
= MMC_BLOCK_MAJOR
;
2244 md
->disk
->first_minor
= devidx
* perdev_minors
;
2245 md
->disk
->fops
= &mmc_bdops
;
2246 md
->disk
->private_data
= md
;
2247 md
->disk
->queue
= md
->queue
.queue
;
2248 md
->disk
->driverfs_dev
= parent
;
2249 set_disk_ro(md
->disk
, md
->read_only
|| default_ro
);
2250 md
->disk
->flags
= GENHD_FL_EXT_DEVT
;
2251 if (area_type
& (MMC_BLK_DATA_AREA_RPMB
| MMC_BLK_DATA_AREA_BOOT
))
2252 md
->disk
->flags
|= GENHD_FL_NO_PART_SCAN
;
2255 * As discussed on lkml, GENHD_FL_REMOVABLE should:
2257 * - be set for removable media with permanent block devices
2258 * - be unset for removable block devices with permanent media
2260 * Since MMC block devices clearly fall under the second
2261 * case, we do not set GENHD_FL_REMOVABLE. Userspace
2262 * should use the block device creation/destruction hotplug
2263 * messages to tell when the card is present.
2266 snprintf(md
->disk
->disk_name
, sizeof(md
->disk
->disk_name
),
2267 "mmcblk%u%s", md
->name_idx
, subname
? subname
: "");
2269 if (mmc_card_mmc(card
))
2270 blk_queue_logical_block_size(md
->queue
.queue
,
2271 card
->ext_csd
.data_sector_size
);
2273 blk_queue_logical_block_size(md
->queue
.queue
, 512);
2275 set_capacity(md
->disk
, size
);
2277 if (mmc_host_cmd23(card
->host
)) {
2278 if (mmc_card_mmc(card
) ||
2279 (mmc_card_sd(card
) &&
2280 card
->scr
.cmds
& SD_SCR_CMD23_SUPPORT
))
2281 md
->flags
|= MMC_BLK_CMD23
;
2284 if (mmc_card_mmc(card
) &&
2285 md
->flags
& MMC_BLK_CMD23
&&
2286 ((card
->ext_csd
.rel_param
& EXT_CSD_WR_REL_PARAM_EN
) ||
2287 card
->ext_csd
.rel_sectors
)) {
2288 md
->flags
|= MMC_BLK_REL_WR
;
2289 blk_queue_flush(md
->queue
.queue
, REQ_FLUSH
| REQ_FUA
);
2292 if (mmc_card_mmc(card
) &&
2293 (area_type
== MMC_BLK_DATA_AREA_MAIN
) &&
2294 (md
->flags
& MMC_BLK_CMD23
) &&
2295 card
->ext_csd
.packed_event_en
) {
2296 if (!mmc_packed_init(&md
->queue
, card
))
2297 md
->flags
|= MMC_BLK_PACKED_CMD
;
2307 return ERR_PTR(ret
);
2310 static struct mmc_blk_data
*mmc_blk_alloc(struct mmc_card
*card
)
2314 if (!mmc_card_sd(card
) && mmc_card_blockaddr(card
)) {
2316 * The EXT_CSD sector count is in number or 512 byte
2319 size
= card
->ext_csd
.sectors
;
2322 * The CSD capacity field is in units of read_blkbits.
2323 * set_capacity takes units of 512 bytes.
2325 size
= (typeof(sector_t
))card
->csd
.capacity
2326 << (card
->csd
.read_blkbits
- 9);
2329 return mmc_blk_alloc_req(card
, &card
->dev
, size
, false, NULL
,
2330 MMC_BLK_DATA_AREA_MAIN
);
2333 static int mmc_blk_alloc_part(struct mmc_card
*card
,
2334 struct mmc_blk_data
*md
,
2335 unsigned int part_type
,
2338 const char *subname
,
2342 struct mmc_blk_data
*part_md
;
2344 part_md
= mmc_blk_alloc_req(card
, disk_to_dev(md
->disk
), size
, default_ro
,
2345 subname
, area_type
);
2346 if (IS_ERR(part_md
))
2347 return PTR_ERR(part_md
);
2348 part_md
->part_type
= part_type
;
2349 list_add(&part_md
->part
, &md
->part
);
2351 string_get_size((u64
)get_capacity(part_md
->disk
), 512, STRING_UNITS_2
,
2352 cap_str
, sizeof(cap_str
));
2353 pr_info("%s: %s %s partition %u %s\n",
2354 part_md
->disk
->disk_name
, mmc_card_id(card
),
2355 mmc_card_name(card
), part_md
->part_type
, cap_str
);
2359 /* MMC Physical partitions consist of two boot partitions and
2360 * up to four general purpose partitions.
2361 * For each partition enabled in EXT_CSD a block device will be allocatedi
2362 * to provide access to the partition.
2365 static int mmc_blk_alloc_parts(struct mmc_card
*card
, struct mmc_blk_data
*md
)
2369 if (!mmc_card_mmc(card
))
2372 for (idx
= 0; idx
< card
->nr_parts
; idx
++) {
2373 if (card
->part
[idx
].size
) {
2374 ret
= mmc_blk_alloc_part(card
, md
,
2375 card
->part
[idx
].part_cfg
,
2376 card
->part
[idx
].size
>> 9,
2377 card
->part
[idx
].force_ro
,
2378 card
->part
[idx
].name
,
2379 card
->part
[idx
].area_type
);
2388 static void mmc_blk_remove_req(struct mmc_blk_data
*md
)
2390 struct mmc_card
*card
;
2394 * Flush remaining requests and free queues. It
2395 * is freeing the queue that stops new requests
2396 * from being accepted.
2398 card
= md
->queue
.card
;
2399 mmc_cleanup_queue(&md
->queue
);
2400 if (md
->flags
& MMC_BLK_PACKED_CMD
)
2401 mmc_packed_clean(&md
->queue
);
2402 if (md
->disk
->flags
& GENHD_FL_UP
) {
2403 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2404 if ((md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
2405 card
->ext_csd
.boot_ro_lockable
)
2406 device_remove_file(disk_to_dev(md
->disk
),
2407 &md
->power_ro_lock
);
2409 del_gendisk(md
->disk
);
2415 static void mmc_blk_remove_parts(struct mmc_card
*card
,
2416 struct mmc_blk_data
*md
)
2418 struct list_head
*pos
, *q
;
2419 struct mmc_blk_data
*part_md
;
2421 __clear_bit(md
->name_idx
, name_use
);
2422 list_for_each_safe(pos
, q
, &md
->part
) {
2423 part_md
= list_entry(pos
, struct mmc_blk_data
, part
);
2425 mmc_blk_remove_req(part_md
);
2429 static int mmc_add_disk(struct mmc_blk_data
*md
)
2432 struct mmc_card
*card
= md
->queue
.card
;
2435 md
->force_ro
.show
= force_ro_show
;
2436 md
->force_ro
.store
= force_ro_store
;
2437 sysfs_attr_init(&md
->force_ro
.attr
);
2438 md
->force_ro
.attr
.name
= "force_ro";
2439 md
->force_ro
.attr
.mode
= S_IRUGO
| S_IWUSR
;
2440 ret
= device_create_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2444 if ((md
->area_type
& MMC_BLK_DATA_AREA_BOOT
) &&
2445 card
->ext_csd
.boot_ro_lockable
) {
2448 if (card
->ext_csd
.boot_ro_lock
& EXT_CSD_BOOT_WP_B_PWR_WP_DIS
)
2451 mode
= S_IRUGO
| S_IWUSR
;
2453 md
->power_ro_lock
.show
= power_ro_lock_show
;
2454 md
->power_ro_lock
.store
= power_ro_lock_store
;
2455 sysfs_attr_init(&md
->power_ro_lock
.attr
);
2456 md
->power_ro_lock
.attr
.mode
= mode
;
2457 md
->power_ro_lock
.attr
.name
=
2458 "ro_lock_until_next_power_on";
2459 ret
= device_create_file(disk_to_dev(md
->disk
),
2460 &md
->power_ro_lock
);
2462 goto power_ro_lock_fail
;
2467 device_remove_file(disk_to_dev(md
->disk
), &md
->force_ro
);
2469 del_gendisk(md
->disk
);
2474 #define CID_MANFID_SANDISK 0x2
2475 #define CID_MANFID_TOSHIBA 0x11
2476 #define CID_MANFID_MICRON 0x13
2477 #define CID_MANFID_SAMSUNG 0x15
2478 #define CID_MANFID_KINGSTON 0x70
2480 static const struct mmc_fixup blk_fixups
[] =
2482 MMC_FIXUP("SEM02G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2483 MMC_QUIRK_INAND_CMD38
),
2484 MMC_FIXUP("SEM04G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2485 MMC_QUIRK_INAND_CMD38
),
2486 MMC_FIXUP("SEM08G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2487 MMC_QUIRK_INAND_CMD38
),
2488 MMC_FIXUP("SEM16G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2489 MMC_QUIRK_INAND_CMD38
),
2490 MMC_FIXUP("SEM32G", CID_MANFID_SANDISK
, 0x100, add_quirk
,
2491 MMC_QUIRK_INAND_CMD38
),
2494 * Some MMC cards experience performance degradation with CMD23
2495 * instead of CMD12-bounded multiblock transfers. For now we'll
2496 * black list what's bad...
2497 * - Certain Toshiba cards.
2499 * N.B. This doesn't affect SD cards.
2501 MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK
, CID_OEMID_ANY
, add_quirk_mmc
,
2502 MMC_QUIRK_BLK_NO_CMD23
),
2503 MMC_FIXUP("SDM032", CID_MANFID_SANDISK
, CID_OEMID_ANY
, add_quirk_mmc
,
2504 MMC_QUIRK_BLK_NO_CMD23
),
2505 MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
2506 MMC_QUIRK_BLK_NO_CMD23
),
2507 MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
2508 MMC_QUIRK_BLK_NO_CMD23
),
2509 MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA
, CID_OEMID_ANY
, add_quirk_mmc
,
2510 MMC_QUIRK_BLK_NO_CMD23
),
2513 * Some Micron MMC cards needs longer data read timeout than
2516 MMC_FIXUP(CID_NAME_ANY
, CID_MANFID_MICRON
, 0x200, add_quirk_mmc
,
2517 MMC_QUIRK_LONG_READ_TIME
),
2520 * On these Samsung MoviNAND parts, performing secure erase or
2521 * secure trim can result in unrecoverable corruption due to a
2524 MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2525 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2526 MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2527 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2528 MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2529 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2530 MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2531 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2532 MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2533 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2534 MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2535 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2536 MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2537 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2538 MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG
, CID_OEMID_ANY
, add_quirk_mmc
,
2539 MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
),
2542 * On Some Kingston eMMCs, performing trim can result in
2543 * unrecoverable data conrruption occasionally due to a firmware bug.
2545 MMC_FIXUP("V10008", CID_MANFID_KINGSTON
, CID_OEMID_ANY
, add_quirk_mmc
,
2546 MMC_QUIRK_TRIM_BROKEN
),
2547 MMC_FIXUP("V10016", CID_MANFID_KINGSTON
, CID_OEMID_ANY
, add_quirk_mmc
,
2548 MMC_QUIRK_TRIM_BROKEN
),
2553 static int mmc_blk_probe(struct mmc_card
*card
)
2555 struct mmc_blk_data
*md
, *part_md
;
2559 * Check that the card supports the command class(es) we need.
2561 if (!(card
->csd
.cmdclass
& CCC_BLOCK_READ
))
2564 mmc_fixup_device(card
, blk_fixups
);
2566 md
= mmc_blk_alloc(card
);
2570 string_get_size((u64
)get_capacity(md
->disk
), 512, STRING_UNITS_2
,
2571 cap_str
, sizeof(cap_str
));
2572 pr_info("%s: %s %s %s %s\n",
2573 md
->disk
->disk_name
, mmc_card_id(card
), mmc_card_name(card
),
2574 cap_str
, md
->read_only
? "(ro)" : "");
2576 if (mmc_blk_alloc_parts(card
, md
))
2579 dev_set_drvdata(&card
->dev
, md
);
2581 if (mmc_add_disk(md
))
2584 list_for_each_entry(part_md
, &md
->part
, part
) {
2585 if (mmc_add_disk(part_md
))
2589 pm_runtime_set_autosuspend_delay(&card
->dev
, 3000);
2590 pm_runtime_use_autosuspend(&card
->dev
);
2593 * Don't enable runtime PM for SD-combo cards here. Leave that
2594 * decision to be taken during the SDIO init sequence instead.
2596 if (card
->type
!= MMC_TYPE_SD_COMBO
) {
2597 pm_runtime_set_active(&card
->dev
);
2598 pm_runtime_enable(&card
->dev
);
2604 mmc_blk_remove_parts(card
, md
);
2605 mmc_blk_remove_req(md
);
2609 static void mmc_blk_remove(struct mmc_card
*card
)
2611 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
2613 mmc_blk_remove_parts(card
, md
);
2614 pm_runtime_get_sync(&card
->dev
);
2615 mmc_claim_host(card
->host
);
2616 mmc_blk_part_switch(card
, md
);
2617 mmc_release_host(card
->host
);
2618 if (card
->type
!= MMC_TYPE_SD_COMBO
)
2619 pm_runtime_disable(&card
->dev
);
2620 pm_runtime_put_noidle(&card
->dev
);
2621 mmc_blk_remove_req(md
);
2622 dev_set_drvdata(&card
->dev
, NULL
);
2625 static int _mmc_blk_suspend(struct mmc_card
*card
)
2627 struct mmc_blk_data
*part_md
;
2628 struct mmc_blk_data
*md
= dev_get_drvdata(&card
->dev
);
2631 mmc_queue_suspend(&md
->queue
);
2632 list_for_each_entry(part_md
, &md
->part
, part
) {
2633 mmc_queue_suspend(&part_md
->queue
);
2639 static void mmc_blk_shutdown(struct mmc_card
*card
)
2641 _mmc_blk_suspend(card
);
2644 #ifdef CONFIG_PM_SLEEP
2645 static int mmc_blk_suspend(struct device
*dev
)
2647 struct mmc_card
*card
= mmc_dev_to_card(dev
);
2649 return _mmc_blk_suspend(card
);
2652 static int mmc_blk_resume(struct device
*dev
)
2654 struct mmc_blk_data
*part_md
;
2655 struct mmc_blk_data
*md
= dev_get_drvdata(dev
);
2659 * Resume involves the card going into idle state,
2660 * so current partition is always the main one.
2662 md
->part_curr
= md
->part_type
;
2663 mmc_queue_resume(&md
->queue
);
2664 list_for_each_entry(part_md
, &md
->part
, part
) {
2665 mmc_queue_resume(&part_md
->queue
);
2672 static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops
, mmc_blk_suspend
, mmc_blk_resume
);
2674 static struct mmc_driver mmc_driver
= {
2677 .pm
= &mmc_blk_pm_ops
,
2679 .probe
= mmc_blk_probe
,
2680 .remove
= mmc_blk_remove
,
2681 .shutdown
= mmc_blk_shutdown
,
2684 static int __init
mmc_blk_init(void)
2688 if (perdev_minors
!= CONFIG_MMC_BLOCK_MINORS
)
2689 pr_info("mmcblk: using %d minors per device\n", perdev_minors
);
2691 max_devices
= min(MAX_DEVICES
, (1 << MINORBITS
) / perdev_minors
);
2693 res
= register_blkdev(MMC_BLOCK_MAJOR
, "mmc");
2697 res
= mmc_register_driver(&mmc_driver
);
2703 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
2708 static void __exit
mmc_blk_exit(void)
2710 mmc_unregister_driver(&mmc_driver
);
2711 unregister_blkdev(MMC_BLOCK_MAJOR
, "mmc");
2714 module_init(mmc_blk_init
);
2715 module_exit(mmc_blk_exit
);
2717 MODULE_LICENSE("GPL");
2718 MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");