Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[deliverable/linux.git] / drivers / mmc / core / mmc.c
1 /*
2 * linux/drivers/mmc/core/mmc.c
3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
6 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/err.h>
14 #include <linux/slab.h>
15 #include <linux/stat.h>
16
17 #include <linux/mmc/host.h>
18 #include <linux/mmc/card.h>
19 #include <linux/mmc/mmc.h>
20
21 #include "core.h"
22 #include "bus.h"
23 #include "mmc_ops.h"
24 #include "sd_ops.h"
25
26 static const unsigned int tran_exp[] = {
27 10000, 100000, 1000000, 10000000,
28 0, 0, 0, 0
29 };
30
31 static const unsigned char tran_mant[] = {
32 0, 10, 12, 13, 15, 20, 25, 30,
33 35, 40, 45, 50, 55, 60, 70, 80,
34 };
35
36 static const unsigned int tacc_exp[] = {
37 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000,
38 };
39
40 static const unsigned int tacc_mant[] = {
41 0, 10, 12, 13, 15, 20, 25, 30,
42 35, 40, 45, 50, 55, 60, 70, 80,
43 };
44
45 #define UNSTUFF_BITS(resp,start,size) \
46 ({ \
47 const int __size = size; \
48 const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
49 const int __off = 3 - ((start) / 32); \
50 const int __shft = (start) & 31; \
51 u32 __res; \
52 \
53 __res = resp[__off] >> __shft; \
54 if (__size + __shft > 32) \
55 __res |= resp[__off-1] << ((32 - __shft) % 32); \
56 __res & __mask; \
57 })
58
59 /*
60 * Given the decoded CSD structure, decode the raw CID to our CID structure.
61 */
62 static int mmc_decode_cid(struct mmc_card *card)
63 {
64 u32 *resp = card->raw_cid;
65
66 /*
67 * The selection of the format here is based upon published
68 * specs from sandisk and from what people have reported.
69 */
70 switch (card->csd.mmca_vsn) {
71 case 0: /* MMC v1.0 - v1.2 */
72 case 1: /* MMC v1.4 */
73 card->cid.manfid = UNSTUFF_BITS(resp, 104, 24);
74 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
75 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
76 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
77 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
78 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
79 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
80 card->cid.prod_name[6] = UNSTUFF_BITS(resp, 48, 8);
81 card->cid.hwrev = UNSTUFF_BITS(resp, 44, 4);
82 card->cid.fwrev = UNSTUFF_BITS(resp, 40, 4);
83 card->cid.serial = UNSTUFF_BITS(resp, 16, 24);
84 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
85 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
86 break;
87
88 case 2: /* MMC v2.0 - v2.2 */
89 case 3: /* MMC v3.1 - v3.3 */
90 case 4: /* MMC v4 */
91 card->cid.manfid = UNSTUFF_BITS(resp, 120, 8);
92 card->cid.oemid = UNSTUFF_BITS(resp, 104, 16);
93 card->cid.prod_name[0] = UNSTUFF_BITS(resp, 96, 8);
94 card->cid.prod_name[1] = UNSTUFF_BITS(resp, 88, 8);
95 card->cid.prod_name[2] = UNSTUFF_BITS(resp, 80, 8);
96 card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8);
97 card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8);
98 card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8);
99 card->cid.prv = UNSTUFF_BITS(resp, 48, 8);
100 card->cid.serial = UNSTUFF_BITS(resp, 16, 32);
101 card->cid.month = UNSTUFF_BITS(resp, 12, 4);
102 card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997;
103 break;
104
105 default:
106 pr_err("%s: card has unknown MMCA version %d\n",
107 mmc_hostname(card->host), card->csd.mmca_vsn);
108 return -EINVAL;
109 }
110
111 return 0;
112 }
113
114 static void mmc_set_erase_size(struct mmc_card *card)
115 {
116 if (card->ext_csd.erase_group_def & 1)
117 card->erase_size = card->ext_csd.hc_erase_size;
118 else
119 card->erase_size = card->csd.erase_size;
120
121 mmc_init_erase(card);
122 }
123
124 /*
125 * Given a 128-bit response, decode to our card CSD structure.
126 */
127 static int mmc_decode_csd(struct mmc_card *card)
128 {
129 struct mmc_csd *csd = &card->csd;
130 unsigned int e, m, a, b;
131 u32 *resp = card->raw_csd;
132
133 /*
134 * We only understand CSD structure v1.1 and v1.2.
135 * v1.2 has extra information in bits 15, 11 and 10.
136 * We also support eMMC v4.4 & v4.41.
137 */
138 csd->structure = UNSTUFF_BITS(resp, 126, 2);
139 if (csd->structure == 0) {
140 pr_err("%s: unrecognised CSD structure version %d\n",
141 mmc_hostname(card->host), csd->structure);
142 return -EINVAL;
143 }
144
145 csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4);
146 m = UNSTUFF_BITS(resp, 115, 4);
147 e = UNSTUFF_BITS(resp, 112, 3);
148 csd->tacc_ns = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
149 csd->tacc_clks = UNSTUFF_BITS(resp, 104, 8) * 100;
150
151 m = UNSTUFF_BITS(resp, 99, 4);
152 e = UNSTUFF_BITS(resp, 96, 3);
153 csd->max_dtr = tran_exp[e] * tran_mant[m];
154 csd->cmdclass = UNSTUFF_BITS(resp, 84, 12);
155
156 e = UNSTUFF_BITS(resp, 47, 3);
157 m = UNSTUFF_BITS(resp, 62, 12);
158 csd->capacity = (1 + m) << (e + 2);
159
160 csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
161 csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
162 csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
163 csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
164 csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
165 csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
166 csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
167
168 if (csd->write_blkbits >= 9) {
169 a = UNSTUFF_BITS(resp, 42, 5);
170 b = UNSTUFF_BITS(resp, 37, 5);
171 csd->erase_size = (a + 1) * (b + 1);
172 csd->erase_size <<= csd->write_blkbits - 9;
173 }
174
175 return 0;
176 }
177
178 /*
179 * Read extended CSD.
180 */
181 static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
182 {
183 int err;
184 u8 *ext_csd;
185
186 BUG_ON(!card);
187 BUG_ON(!new_ext_csd);
188
189 *new_ext_csd = NULL;
190
191 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
192 return 0;
193
194 /*
195 * As the ext_csd is so large and mostly unused, we don't store the
196 * raw block in mmc_card.
197 */
198 ext_csd = kmalloc(512, GFP_KERNEL);
199 if (!ext_csd) {
200 pr_err("%s: could not allocate a buffer to "
201 "receive the ext_csd.\n", mmc_hostname(card->host));
202 return -ENOMEM;
203 }
204
205 err = mmc_send_ext_csd(card, ext_csd);
206 if (err) {
207 kfree(ext_csd);
208 *new_ext_csd = NULL;
209
210 /* If the host or the card can't do the switch,
211 * fail more gracefully. */
212 if ((err != -EINVAL)
213 && (err != -ENOSYS)
214 && (err != -EFAULT))
215 return err;
216
217 /*
218 * High capacity cards should have this "magic" size
219 * stored in their CSD.
220 */
221 if (card->csd.capacity == (4096 * 512)) {
222 pr_err("%s: unable to read EXT_CSD "
223 "on a possible high capacity card. "
224 "Card will be ignored.\n",
225 mmc_hostname(card->host));
226 } else {
227 pr_warning("%s: unable to read "
228 "EXT_CSD, performance might "
229 "suffer.\n",
230 mmc_hostname(card->host));
231 err = 0;
232 }
233 } else
234 *new_ext_csd = ext_csd;
235
236 return err;
237 }
238
239 static void mmc_select_card_type(struct mmc_card *card)
240 {
241 struct mmc_host *host = card->host;
242 u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
243 u32 caps = host->caps, caps2 = host->caps2;
244 unsigned int hs_max_dtr = 0;
245
246 if (card_type & EXT_CSD_CARD_TYPE_26)
247 hs_max_dtr = MMC_HIGH_26_MAX_DTR;
248
249 if (caps & MMC_CAP_MMC_HIGHSPEED &&
250 card_type & EXT_CSD_CARD_TYPE_52)
251 hs_max_dtr = MMC_HIGH_52_MAX_DTR;
252
253 if ((caps & MMC_CAP_1_8V_DDR &&
254 card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) ||
255 (caps & MMC_CAP_1_2V_DDR &&
256 card_type & EXT_CSD_CARD_TYPE_DDR_1_2V))
257 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
258
259 if ((caps2 & MMC_CAP2_HS200_1_8V_SDR &&
260 card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) ||
261 (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
262 card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
263 hs_max_dtr = MMC_HS200_MAX_DTR;
264
265 card->ext_csd.hs_max_dtr = hs_max_dtr;
266 card->ext_csd.card_type = card_type;
267 }
268
269 /*
270 * Decode extended CSD.
271 */
272 static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
273 {
274 int err = 0, idx;
275 unsigned int part_size;
276 u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0;
277
278 BUG_ON(!card);
279
280 if (!ext_csd)
281 return 0;
282
283 /* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
284 card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
285 if (card->csd.structure == 3) {
286 if (card->ext_csd.raw_ext_csd_structure > 2) {
287 pr_err("%s: unrecognised EXT_CSD structure "
288 "version %d\n", mmc_hostname(card->host),
289 card->ext_csd.raw_ext_csd_structure);
290 err = -EINVAL;
291 goto out;
292 }
293 }
294
295 card->ext_csd.rev = ext_csd[EXT_CSD_REV];
296 if (card->ext_csd.rev > 7) {
297 pr_err("%s: unrecognised EXT_CSD revision %d\n",
298 mmc_hostname(card->host), card->ext_csd.rev);
299 err = -EINVAL;
300 goto out;
301 }
302
303 card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
304 card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
305 card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
306 card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
307 if (card->ext_csd.rev >= 2) {
308 card->ext_csd.sectors =
309 ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
310 ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
311 ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
312 ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
313
314 /* Cards with density > 2GiB are sector addressed */
315 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
316 mmc_card_set_blockaddr(card);
317 }
318
319 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
320 mmc_select_card_type(card);
321
322 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
323 card->ext_csd.raw_erase_timeout_mult =
324 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
325 card->ext_csd.raw_hc_erase_grp_size =
326 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
327 if (card->ext_csd.rev >= 3) {
328 u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
329 card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
330
331 /* EXT_CSD value is in units of 10ms, but we store in ms */
332 card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
333
334 /* Sleep / awake timeout in 100ns units */
335 if (sa_shift > 0 && sa_shift <= 0x17)
336 card->ext_csd.sa_timeout =
337 1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
338 card->ext_csd.erase_group_def =
339 ext_csd[EXT_CSD_ERASE_GROUP_DEF];
340 card->ext_csd.hc_erase_timeout = 300 *
341 ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
342 card->ext_csd.hc_erase_size =
343 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
344
345 card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
346
347 /*
348 * There are two boot regions of equal size, defined in
349 * multiples of 128K.
350 */
351 if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
352 for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
353 part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
354 mmc_part_add(card, part_size,
355 EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
356 "boot%d", idx, true,
357 MMC_BLK_DATA_AREA_BOOT);
358 }
359 }
360 }
361
362 card->ext_csd.raw_hc_erase_gap_size =
363 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
364 card->ext_csd.raw_sec_trim_mult =
365 ext_csd[EXT_CSD_SEC_TRIM_MULT];
366 card->ext_csd.raw_sec_erase_mult =
367 ext_csd[EXT_CSD_SEC_ERASE_MULT];
368 card->ext_csd.raw_sec_feature_support =
369 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
370 card->ext_csd.raw_trim_mult =
371 ext_csd[EXT_CSD_TRIM_MULT];
372 card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
373 if (card->ext_csd.rev >= 4) {
374 /*
375 * Enhanced area feature support -- check whether the eMMC
376 * card has the Enhanced area enabled. If so, export enhanced
377 * area offset and size to user by adding sysfs interface.
378 */
379 if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
380 (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
381 hc_erase_grp_sz =
382 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
383 hc_wp_grp_sz =
384 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
385
386 card->ext_csd.enhanced_area_en = 1;
387 /*
388 * calculate the enhanced data area offset, in bytes
389 */
390 card->ext_csd.enhanced_area_offset =
391 (ext_csd[139] << 24) + (ext_csd[138] << 16) +
392 (ext_csd[137] << 8) + ext_csd[136];
393 if (mmc_card_blockaddr(card))
394 card->ext_csd.enhanced_area_offset <<= 9;
395 /*
396 * calculate the enhanced data area size, in kilobytes
397 */
398 card->ext_csd.enhanced_area_size =
399 (ext_csd[142] << 16) + (ext_csd[141] << 8) +
400 ext_csd[140];
401 card->ext_csd.enhanced_area_size *=
402 (size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
403 card->ext_csd.enhanced_area_size <<= 9;
404 } else {
405 /*
406 * If the enhanced area is not enabled, disable these
407 * device attributes.
408 */
409 card->ext_csd.enhanced_area_offset = -EINVAL;
410 card->ext_csd.enhanced_area_size = -EINVAL;
411 }
412
413 /*
414 * General purpose partition feature support --
415 * If ext_csd has the size of general purpose partitions,
416 * set size, part_cfg, partition name in mmc_part.
417 */
418 if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
419 EXT_CSD_PART_SUPPORT_PART_EN) {
420 if (card->ext_csd.enhanced_area_en != 1) {
421 hc_erase_grp_sz =
422 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
423 hc_wp_grp_sz =
424 ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
425
426 card->ext_csd.enhanced_area_en = 1;
427 }
428
429 for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
430 if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
431 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
432 !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
433 continue;
434 part_size =
435 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
436 << 16) +
437 (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
438 << 8) +
439 ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
440 part_size *= (size_t)(hc_erase_grp_sz *
441 hc_wp_grp_sz);
442 mmc_part_add(card, part_size << 19,
443 EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
444 "gp%d", idx, false,
445 MMC_BLK_DATA_AREA_GP);
446 }
447 }
448 card->ext_csd.sec_trim_mult =
449 ext_csd[EXT_CSD_SEC_TRIM_MULT];
450 card->ext_csd.sec_erase_mult =
451 ext_csd[EXT_CSD_SEC_ERASE_MULT];
452 card->ext_csd.sec_feature_support =
453 ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
454 card->ext_csd.trim_timeout = 300 *
455 ext_csd[EXT_CSD_TRIM_MULT];
456
457 /*
458 * Note that the call to mmc_part_add above defaults to read
459 * only. If this default assumption is changed, the call must
460 * take into account the value of boot_locked below.
461 */
462 card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
463 card->ext_csd.boot_ro_lockable = true;
464
465 /* Save power class values */
466 card->ext_csd.raw_pwr_cl_52_195 =
467 ext_csd[EXT_CSD_PWR_CL_52_195];
468 card->ext_csd.raw_pwr_cl_26_195 =
469 ext_csd[EXT_CSD_PWR_CL_26_195];
470 card->ext_csd.raw_pwr_cl_52_360 =
471 ext_csd[EXT_CSD_PWR_CL_52_360];
472 card->ext_csd.raw_pwr_cl_26_360 =
473 ext_csd[EXT_CSD_PWR_CL_26_360];
474 card->ext_csd.raw_pwr_cl_200_195 =
475 ext_csd[EXT_CSD_PWR_CL_200_195];
476 card->ext_csd.raw_pwr_cl_200_360 =
477 ext_csd[EXT_CSD_PWR_CL_200_360];
478 card->ext_csd.raw_pwr_cl_ddr_52_195 =
479 ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
480 card->ext_csd.raw_pwr_cl_ddr_52_360 =
481 ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
482 }
483
484 if (card->ext_csd.rev >= 5) {
485 /* Adjust production date as per JEDEC JESD84-B451 */
486 if (card->cid.year < 2010)
487 card->cid.year += 16;
488
489 /* check whether the eMMC card supports BKOPS */
490 if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
491 card->ext_csd.bkops = 1;
492 card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
493 card->ext_csd.raw_bkops_status =
494 ext_csd[EXT_CSD_BKOPS_STATUS];
495 if (!card->ext_csd.bkops_en)
496 pr_info("%s: BKOPS_EN bit is not set\n",
497 mmc_hostname(card->host));
498 }
499
500 /* check whether the eMMC card supports HPI */
501 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
502 card->ext_csd.hpi = 1;
503 if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
504 card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
505 else
506 card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
507 /*
508 * Indicate the maximum timeout to close
509 * a command interrupted by HPI
510 */
511 card->ext_csd.out_of_int_time =
512 ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
513 }
514
515 card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
516 card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
517
518 /*
519 * RPMB regions are defined in multiples of 128K.
520 */
521 card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
522 if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
523 mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
524 EXT_CSD_PART_CONFIG_ACC_RPMB,
525 "rpmb", 0, false,
526 MMC_BLK_DATA_AREA_RPMB);
527 }
528 }
529
530 card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
531 if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
532 card->erased_byte = 0xFF;
533 else
534 card->erased_byte = 0x0;
535
536 /* eMMC v4.5 or later */
537 if (card->ext_csd.rev >= 6) {
538 card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
539
540 card->ext_csd.generic_cmd6_time = 10 *
541 ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
542 card->ext_csd.power_off_longtime = 10 *
543 ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
544
545 card->ext_csd.cache_size =
546 ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
547 ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
548 ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
549 ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
550
551 if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
552 card->ext_csd.data_sector_size = 4096;
553 else
554 card->ext_csd.data_sector_size = 512;
555
556 if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
557 (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
558 card->ext_csd.data_tag_unit_size =
559 ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
560 (card->ext_csd.data_sector_size);
561 } else {
562 card->ext_csd.data_tag_unit_size = 0;
563 }
564
565 card->ext_csd.max_packed_writes =
566 ext_csd[EXT_CSD_MAX_PACKED_WRITES];
567 card->ext_csd.max_packed_reads =
568 ext_csd[EXT_CSD_MAX_PACKED_READS];
569 } else {
570 card->ext_csd.data_sector_size = 512;
571 }
572
573 out:
574 return err;
575 }
576
577 static inline void mmc_free_ext_csd(u8 *ext_csd)
578 {
579 kfree(ext_csd);
580 }
581
582
583 static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
584 {
585 u8 *bw_ext_csd;
586 int err;
587
588 if (bus_width == MMC_BUS_WIDTH_1)
589 return 0;
590
591 err = mmc_get_ext_csd(card, &bw_ext_csd);
592
593 if (err || bw_ext_csd == NULL) {
594 err = -EINVAL;
595 goto out;
596 }
597
598 /* only compare read only fields */
599 err = !((card->ext_csd.raw_partition_support ==
600 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
601 (card->ext_csd.raw_erased_mem_count ==
602 bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
603 (card->ext_csd.rev ==
604 bw_ext_csd[EXT_CSD_REV]) &&
605 (card->ext_csd.raw_ext_csd_structure ==
606 bw_ext_csd[EXT_CSD_STRUCTURE]) &&
607 (card->ext_csd.raw_card_type ==
608 bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
609 (card->ext_csd.raw_s_a_timeout ==
610 bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
611 (card->ext_csd.raw_hc_erase_gap_size ==
612 bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
613 (card->ext_csd.raw_erase_timeout_mult ==
614 bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
615 (card->ext_csd.raw_hc_erase_grp_size ==
616 bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
617 (card->ext_csd.raw_sec_trim_mult ==
618 bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
619 (card->ext_csd.raw_sec_erase_mult ==
620 bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
621 (card->ext_csd.raw_sec_feature_support ==
622 bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
623 (card->ext_csd.raw_trim_mult ==
624 bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
625 (card->ext_csd.raw_sectors[0] ==
626 bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
627 (card->ext_csd.raw_sectors[1] ==
628 bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
629 (card->ext_csd.raw_sectors[2] ==
630 bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
631 (card->ext_csd.raw_sectors[3] ==
632 bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
633 (card->ext_csd.raw_pwr_cl_52_195 ==
634 bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
635 (card->ext_csd.raw_pwr_cl_26_195 ==
636 bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
637 (card->ext_csd.raw_pwr_cl_52_360 ==
638 bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
639 (card->ext_csd.raw_pwr_cl_26_360 ==
640 bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
641 (card->ext_csd.raw_pwr_cl_200_195 ==
642 bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
643 (card->ext_csd.raw_pwr_cl_200_360 ==
644 bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
645 (card->ext_csd.raw_pwr_cl_ddr_52_195 ==
646 bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
647 (card->ext_csd.raw_pwr_cl_ddr_52_360 ==
648 bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]));
649 if (err)
650 err = -EINVAL;
651
652 out:
653 mmc_free_ext_csd(bw_ext_csd);
654 return err;
655 }
656
657 MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
658 card->raw_cid[2], card->raw_cid[3]);
659 MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
660 card->raw_csd[2], card->raw_csd[3]);
661 MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
662 MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
663 MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
664 MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
665 MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
666 MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
667 MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
668 MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
669 MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
670 MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
671 MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
672 card->ext_csd.enhanced_area_offset);
673 MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
674 MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
675 MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
676
677 static struct attribute *mmc_std_attrs[] = {
678 &dev_attr_cid.attr,
679 &dev_attr_csd.attr,
680 &dev_attr_date.attr,
681 &dev_attr_erase_size.attr,
682 &dev_attr_preferred_erase_size.attr,
683 &dev_attr_fwrev.attr,
684 &dev_attr_hwrev.attr,
685 &dev_attr_manfid.attr,
686 &dev_attr_name.attr,
687 &dev_attr_oemid.attr,
688 &dev_attr_prv.attr,
689 &dev_attr_serial.attr,
690 &dev_attr_enhanced_area_offset.attr,
691 &dev_attr_enhanced_area_size.attr,
692 &dev_attr_raw_rpmb_size_mult.attr,
693 &dev_attr_rel_sectors.attr,
694 NULL,
695 };
696
697 static struct attribute_group mmc_std_attr_group = {
698 .attrs = mmc_std_attrs,
699 };
700
701 static const struct attribute_group *mmc_attr_groups[] = {
702 &mmc_std_attr_group,
703 NULL,
704 };
705
706 static struct device_type mmc_type = {
707 .groups = mmc_attr_groups,
708 };
709
710 /*
711 * Select the PowerClass for the current bus width
712 * If power class is defined for 4/8 bit bus in the
713 * extended CSD register, select it by executing the
714 * mmc_switch command.
715 */
716 static int mmc_select_powerclass(struct mmc_card *card,
717 unsigned int bus_width)
718 {
719 int err = 0;
720 unsigned int pwrclass_val = 0;
721 struct mmc_host *host;
722
723 BUG_ON(!card);
724
725 host = card->host;
726 BUG_ON(!host);
727
728 /* Power class selection is supported for versions >= 4.0 */
729 if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
730 return 0;
731
732 /* Power class values are defined only for 4/8 bit bus */
733 if (bus_width == EXT_CSD_BUS_WIDTH_1)
734 return 0;
735
736 switch (1 << host->ios.vdd) {
737 case MMC_VDD_165_195:
738 if (host->ios.clock <= 26000000)
739 pwrclass_val = card->ext_csd.raw_pwr_cl_26_195;
740 else if (host->ios.clock <= 52000000)
741 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
742 card->ext_csd.raw_pwr_cl_52_195 :
743 card->ext_csd.raw_pwr_cl_ddr_52_195;
744 else if (host->ios.clock <= 200000000)
745 pwrclass_val = card->ext_csd.raw_pwr_cl_200_195;
746 break;
747 case MMC_VDD_27_28:
748 case MMC_VDD_28_29:
749 case MMC_VDD_29_30:
750 case MMC_VDD_30_31:
751 case MMC_VDD_31_32:
752 case MMC_VDD_32_33:
753 case MMC_VDD_33_34:
754 case MMC_VDD_34_35:
755 case MMC_VDD_35_36:
756 if (host->ios.clock <= 26000000)
757 pwrclass_val = card->ext_csd.raw_pwr_cl_26_360;
758 else if (host->ios.clock <= 52000000)
759 pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
760 card->ext_csd.raw_pwr_cl_52_360 :
761 card->ext_csd.raw_pwr_cl_ddr_52_360;
762 else if (host->ios.clock <= 200000000)
763 pwrclass_val = card->ext_csd.raw_pwr_cl_200_360;
764 break;
765 default:
766 pr_warning("%s: Voltage range not supported "
767 "for power class.\n", mmc_hostname(host));
768 return -EINVAL;
769 }
770
771 if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
772 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
773 EXT_CSD_PWR_CL_8BIT_SHIFT;
774 else
775 pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
776 EXT_CSD_PWR_CL_4BIT_SHIFT;
777
778 /* If the power class is different from the default value */
779 if (pwrclass_val > 0) {
780 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
781 EXT_CSD_POWER_CLASS,
782 pwrclass_val,
783 card->ext_csd.generic_cmd6_time);
784 }
785
786 return err;
787 }
788
789 /*
790 * Selects the desired buswidth and switch to the HS200 mode
791 * if bus width set without error
792 */
793 static int mmc_select_hs200(struct mmc_card *card)
794 {
795 int idx, err = -EINVAL;
796 struct mmc_host *host;
797 static unsigned ext_csd_bits[] = {
798 EXT_CSD_BUS_WIDTH_4,
799 EXT_CSD_BUS_WIDTH_8,
800 };
801 static unsigned bus_widths[] = {
802 MMC_BUS_WIDTH_4,
803 MMC_BUS_WIDTH_8,
804 };
805
806 BUG_ON(!card);
807
808 host = card->host;
809
810 if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
811 host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
812 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
813
814 if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
815 host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
816 err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
817
818 /* If fails try again during next card power cycle */
819 if (err)
820 goto err;
821
822 idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0;
823
824 /*
825 * Unlike SD, MMC cards dont have a configuration register to notify
826 * supported bus width. So bus test command should be run to identify
827 * the supported bus width or compare the ext csd values of current
828 * bus width and ext csd values of 1 bit mode read earlier.
829 */
830 for (; idx >= 0; idx--) {
831
832 /*
833 * Host is capable of 8bit transfer, then switch
834 * the device to work in 8bit transfer mode. If the
835 * mmc switch command returns error then switch to
836 * 4bit transfer mode. On success set the corresponding
837 * bus width on the host.
838 */
839 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
840 EXT_CSD_BUS_WIDTH,
841 ext_csd_bits[idx],
842 card->ext_csd.generic_cmd6_time);
843 if (err)
844 continue;
845
846 mmc_set_bus_width(card->host, bus_widths[idx]);
847
848 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
849 err = mmc_compare_ext_csds(card, bus_widths[idx]);
850 else
851 err = mmc_bus_test(card, bus_widths[idx]);
852 if (!err)
853 break;
854 }
855
856 /* switch to HS200 mode if bus width set successfully */
857 if (!err)
858 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
859 EXT_CSD_HS_TIMING, 2, 0);
860 err:
861 return err;
862 }
863
864 /*
865 * Handle the detection and initialisation of a card.
866 *
867 * In the case of a resume, "oldcard" will contain the card
868 * we're trying to reinitialise.
869 */
870 static int mmc_init_card(struct mmc_host *host, u32 ocr,
871 struct mmc_card *oldcard)
872 {
873 struct mmc_card *card;
874 int err, ddr = 0;
875 u32 cid[4];
876 unsigned int max_dtr;
877 u32 rocr;
878 u8 *ext_csd = NULL;
879
880 BUG_ON(!host);
881 WARN_ON(!host->claimed);
882
883 /* Set correct bus mode for MMC before attempting init */
884 if (!mmc_host_is_spi(host))
885 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
886
887 /*
888 * Since we're changing the OCR value, we seem to
889 * need to tell some cards to go back to the idle
890 * state. We wait 1ms to give cards time to
891 * respond.
892 * mmc_go_idle is needed for eMMC that are asleep
893 */
894 mmc_go_idle(host);
895
896 /* The extra bit indicates that we support high capacity */
897 err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
898 if (err)
899 goto err;
900
901 /*
902 * For SPI, enable CRC as appropriate.
903 */
904 if (mmc_host_is_spi(host)) {
905 err = mmc_spi_set_crc(host, use_spi_crc);
906 if (err)
907 goto err;
908 }
909
910 /*
911 * Fetch CID from card.
912 */
913 if (mmc_host_is_spi(host))
914 err = mmc_send_cid(host, cid);
915 else
916 err = mmc_all_send_cid(host, cid);
917 if (err)
918 goto err;
919
920 if (oldcard) {
921 if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
922 err = -ENOENT;
923 goto err;
924 }
925
926 card = oldcard;
927 } else {
928 /*
929 * Allocate card structure.
930 */
931 card = mmc_alloc_card(host, &mmc_type);
932 if (IS_ERR(card)) {
933 err = PTR_ERR(card);
934 goto err;
935 }
936
937 card->type = MMC_TYPE_MMC;
938 card->rca = 1;
939 memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
940 }
941
942 /*
943 * For native busses: set card RCA and quit open drain mode.
944 */
945 if (!mmc_host_is_spi(host)) {
946 err = mmc_set_relative_addr(card);
947 if (err)
948 goto free_card;
949
950 mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
951 }
952
953 if (!oldcard) {
954 /*
955 * Fetch CSD from card.
956 */
957 err = mmc_send_csd(card, card->raw_csd);
958 if (err)
959 goto free_card;
960
961 err = mmc_decode_csd(card);
962 if (err)
963 goto free_card;
964 err = mmc_decode_cid(card);
965 if (err)
966 goto free_card;
967 }
968
969 /*
970 * Select card, as all following commands rely on that.
971 */
972 if (!mmc_host_is_spi(host)) {
973 err = mmc_select_card(card);
974 if (err)
975 goto free_card;
976 }
977
978 if (!oldcard) {
979 /*
980 * Fetch and process extended CSD.
981 */
982
983 err = mmc_get_ext_csd(card, &ext_csd);
984 if (err)
985 goto free_card;
986 err = mmc_read_ext_csd(card, ext_csd);
987 if (err)
988 goto free_card;
989
990 /* If doing byte addressing, check if required to do sector
991 * addressing. Handle the case of <2GB cards needing sector
992 * addressing. See section 8.1 JEDEC Standard JED84-A441;
993 * ocr register has bit 30 set for sector addressing.
994 */
995 if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
996 mmc_card_set_blockaddr(card);
997
998 /* Erase size depends on CSD and Extended CSD */
999 mmc_set_erase_size(card);
1000 }
1001
1002 /*
1003 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
1004 * bit. This bit will be lost every time after a reset or power off.
1005 */
1006 if (card->ext_csd.enhanced_area_en ||
1007 (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
1008 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1009 EXT_CSD_ERASE_GROUP_DEF, 1,
1010 card->ext_csd.generic_cmd6_time);
1011
1012 if (err && err != -EBADMSG)
1013 goto free_card;
1014
1015 if (err) {
1016 err = 0;
1017 /*
1018 * Just disable enhanced area off & sz
1019 * will try to enable ERASE_GROUP_DEF
1020 * during next time reinit
1021 */
1022 card->ext_csd.enhanced_area_offset = -EINVAL;
1023 card->ext_csd.enhanced_area_size = -EINVAL;
1024 } else {
1025 card->ext_csd.erase_group_def = 1;
1026 /*
1027 * enable ERASE_GRP_DEF successfully.
1028 * This will affect the erase size, so
1029 * here need to reset erase size
1030 */
1031 mmc_set_erase_size(card);
1032 }
1033 }
1034
1035 /*
1036 * Ensure eMMC user default partition is enabled
1037 */
1038 if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
1039 card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1040 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
1041 card->ext_csd.part_config,
1042 card->ext_csd.part_time);
1043 if (err && err != -EBADMSG)
1044 goto free_card;
1045 }
1046
1047 /*
1048 * Enable power_off_notification byte in the ext_csd register
1049 */
1050 if (card->ext_csd.rev >= 6) {
1051 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1052 EXT_CSD_POWER_OFF_NOTIFICATION,
1053 EXT_CSD_POWER_ON,
1054 card->ext_csd.generic_cmd6_time);
1055 if (err && err != -EBADMSG)
1056 goto free_card;
1057
1058 /*
1059 * The err can be -EBADMSG or 0,
1060 * so check for success and update the flag
1061 */
1062 if (!err)
1063 card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
1064 }
1065
1066 /*
1067 * Activate high speed (if supported)
1068 */
1069 if (card->ext_csd.hs_max_dtr != 0) {
1070 err = 0;
1071 if (card->ext_csd.hs_max_dtr > 52000000 &&
1072 host->caps2 & MMC_CAP2_HS200)
1073 err = mmc_select_hs200(card);
1074 else if (host->caps & MMC_CAP_MMC_HIGHSPEED)
1075 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1076 EXT_CSD_HS_TIMING, 1,
1077 card->ext_csd.generic_cmd6_time);
1078
1079 if (err && err != -EBADMSG)
1080 goto free_card;
1081
1082 if (err) {
1083 pr_warning("%s: switch to highspeed failed\n",
1084 mmc_hostname(card->host));
1085 err = 0;
1086 } else {
1087 if (card->ext_csd.hs_max_dtr > 52000000 &&
1088 host->caps2 & MMC_CAP2_HS200) {
1089 mmc_card_set_hs200(card);
1090 mmc_set_timing(card->host,
1091 MMC_TIMING_MMC_HS200);
1092 } else {
1093 mmc_card_set_highspeed(card);
1094 mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1095 }
1096 }
1097 }
1098
1099 /*
1100 * Compute bus speed.
1101 */
1102 max_dtr = (unsigned int)-1;
1103
1104 if (mmc_card_highspeed(card) || mmc_card_hs200(card)) {
1105 if (max_dtr > card->ext_csd.hs_max_dtr)
1106 max_dtr = card->ext_csd.hs_max_dtr;
1107 if (mmc_card_highspeed(card) && (max_dtr > 52000000))
1108 max_dtr = 52000000;
1109 } else if (max_dtr > card->csd.max_dtr) {
1110 max_dtr = card->csd.max_dtr;
1111 }
1112
1113 mmc_set_clock(host, max_dtr);
1114
1115 /*
1116 * Indicate DDR mode (if supported).
1117 */
1118 if (mmc_card_highspeed(card)) {
1119 if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V)
1120 && ((host->caps & (MMC_CAP_1_8V_DDR |
1121 MMC_CAP_UHS_DDR50))
1122 == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50)))
1123 ddr = MMC_1_8V_DDR_MODE;
1124 else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
1125 && ((host->caps & (MMC_CAP_1_2V_DDR |
1126 MMC_CAP_UHS_DDR50))
1127 == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50)))
1128 ddr = MMC_1_2V_DDR_MODE;
1129 }
1130
1131 /*
1132 * Indicate HS200 SDR mode (if supported).
1133 */
1134 if (mmc_card_hs200(card)) {
1135 u32 ext_csd_bits;
1136 u32 bus_width = card->host->ios.bus_width;
1137
1138 /*
1139 * For devices supporting HS200 mode, the bus width has
1140 * to be set before executing the tuning function. If
1141 * set before tuning, then device will respond with CRC
1142 * errors for responses on CMD line. So for HS200 the
1143 * sequence will be
1144 * 1. set bus width 4bit / 8 bit (1 bit not supported)
1145 * 2. switch to HS200 mode
1146 * 3. set the clock to > 52Mhz <=200MHz and
1147 * 4. execute tuning for HS200
1148 */
1149 if ((host->caps2 & MMC_CAP2_HS200) &&
1150 card->host->ops->execute_tuning) {
1151 mmc_host_clk_hold(card->host);
1152 err = card->host->ops->execute_tuning(card->host,
1153 MMC_SEND_TUNING_BLOCK_HS200);
1154 mmc_host_clk_release(card->host);
1155 }
1156 if (err) {
1157 pr_warning("%s: tuning execution failed\n",
1158 mmc_hostname(card->host));
1159 goto err;
1160 }
1161
1162 ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
1163 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
1164 err = mmc_select_powerclass(card, ext_csd_bits);
1165 if (err)
1166 pr_warning("%s: power class selection to bus width %d"
1167 " failed\n", mmc_hostname(card->host),
1168 1 << bus_width);
1169 }
1170
1171 /*
1172 * Activate wide bus and DDR (if supported).
1173 */
1174 if (!mmc_card_hs200(card) &&
1175 (card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
1176 (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
1177 static unsigned ext_csd_bits[][2] = {
1178 { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
1179 { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
1180 { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
1181 };
1182 static unsigned bus_widths[] = {
1183 MMC_BUS_WIDTH_8,
1184 MMC_BUS_WIDTH_4,
1185 MMC_BUS_WIDTH_1
1186 };
1187 unsigned idx, bus_width = 0;
1188
1189 if (host->caps & MMC_CAP_8_BIT_DATA)
1190 idx = 0;
1191 else
1192 idx = 1;
1193 for (; idx < ARRAY_SIZE(bus_widths); idx++) {
1194 bus_width = bus_widths[idx];
1195 if (bus_width == MMC_BUS_WIDTH_1)
1196 ddr = 0; /* no DDR for 1-bit width */
1197 err = mmc_select_powerclass(card, ext_csd_bits[idx][0]);
1198 if (err)
1199 pr_warning("%s: power class selection to "
1200 "bus width %d failed\n",
1201 mmc_hostname(card->host),
1202 1 << bus_width);
1203
1204 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1205 EXT_CSD_BUS_WIDTH,
1206 ext_csd_bits[idx][0],
1207 card->ext_csd.generic_cmd6_time);
1208 if (!err) {
1209 mmc_set_bus_width(card->host, bus_width);
1210
1211 /*
1212 * If controller can't handle bus width test,
1213 * compare ext_csd previously read in 1 bit mode
1214 * against ext_csd at new bus width
1215 */
1216 if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
1217 err = mmc_compare_ext_csds(card,
1218 bus_width);
1219 else
1220 err = mmc_bus_test(card, bus_width);
1221 if (!err)
1222 break;
1223 }
1224 }
1225
1226 if (!err && ddr) {
1227 err = mmc_select_powerclass(card, ext_csd_bits[idx][1]);
1228 if (err)
1229 pr_warning("%s: power class selection to "
1230 "bus width %d ddr %d failed\n",
1231 mmc_hostname(card->host),
1232 1 << bus_width, ddr);
1233
1234 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1235 EXT_CSD_BUS_WIDTH,
1236 ext_csd_bits[idx][1],
1237 card->ext_csd.generic_cmd6_time);
1238 }
1239 if (err) {
1240 pr_warning("%s: switch to bus width %d ddr %d "
1241 "failed\n", mmc_hostname(card->host),
1242 1 << bus_width, ddr);
1243 goto free_card;
1244 } else if (ddr) {
1245 /*
1246 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
1247 * signaling.
1248 *
1249 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
1250 *
1251 * 1.8V vccq at 3.3V core voltage (vcc) is not required
1252 * in the JEDEC spec for DDR.
1253 *
1254 * Do not force change in vccq since we are obviously
1255 * working and no change to vccq is needed.
1256 *
1257 * WARNING: eMMC rules are NOT the same as SD DDR
1258 */
1259 if (ddr == MMC_1_2V_DDR_MODE) {
1260 err = __mmc_set_signal_voltage(host,
1261 MMC_SIGNAL_VOLTAGE_120);
1262 if (err)
1263 goto err;
1264 }
1265 mmc_card_set_ddr_mode(card);
1266 mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50);
1267 mmc_set_bus_width(card->host, bus_width);
1268 }
1269 }
1270
1271 /*
1272 * Enable HPI feature (if supported)
1273 */
1274 if (card->ext_csd.hpi) {
1275 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1276 EXT_CSD_HPI_MGMT, 1,
1277 card->ext_csd.generic_cmd6_time);
1278 if (err && err != -EBADMSG)
1279 goto free_card;
1280 if (err) {
1281 pr_warning("%s: Enabling HPI failed\n",
1282 mmc_hostname(card->host));
1283 err = 0;
1284 } else
1285 card->ext_csd.hpi_en = 1;
1286 }
1287
1288 /*
1289 * If cache size is higher than 0, this indicates
1290 * the existence of cache and it can be turned on.
1291 */
1292 if ((host->caps2 & MMC_CAP2_CACHE_CTRL) &&
1293 card->ext_csd.cache_size > 0) {
1294 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1295 EXT_CSD_CACHE_CTRL, 1,
1296 card->ext_csd.generic_cmd6_time);
1297 if (err && err != -EBADMSG)
1298 goto free_card;
1299
1300 /*
1301 * Only if no error, cache is turned on successfully.
1302 */
1303 if (err) {
1304 pr_warning("%s: Cache is supported, "
1305 "but failed to turn on (%d)\n",
1306 mmc_hostname(card->host), err);
1307 card->ext_csd.cache_ctrl = 0;
1308 err = 0;
1309 } else {
1310 card->ext_csd.cache_ctrl = 1;
1311 }
1312 }
1313
1314 /*
1315 * The mandatory minimum values are defined for packed command.
1316 * read: 5, write: 3
1317 */
1318 if (card->ext_csd.max_packed_writes >= 3 &&
1319 card->ext_csd.max_packed_reads >= 5 &&
1320 host->caps2 & MMC_CAP2_PACKED_CMD) {
1321 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1322 EXT_CSD_EXP_EVENTS_CTRL,
1323 EXT_CSD_PACKED_EVENT_EN,
1324 card->ext_csd.generic_cmd6_time);
1325 if (err && err != -EBADMSG)
1326 goto free_card;
1327 if (err) {
1328 pr_warn("%s: Enabling packed event failed\n",
1329 mmc_hostname(card->host));
1330 card->ext_csd.packed_event_en = 0;
1331 err = 0;
1332 } else {
1333 card->ext_csd.packed_event_en = 1;
1334 }
1335 }
1336
1337 if (!oldcard)
1338 host->card = card;
1339
1340 mmc_free_ext_csd(ext_csd);
1341 return 0;
1342
1343 free_card:
1344 if (!oldcard)
1345 mmc_remove_card(card);
1346 err:
1347 mmc_free_ext_csd(ext_csd);
1348
1349 return err;
1350 }
1351
1352 static int mmc_can_sleep(struct mmc_card *card)
1353 {
1354 return (card && card->ext_csd.rev >= 3);
1355 }
1356
1357 static int mmc_sleep(struct mmc_host *host)
1358 {
1359 struct mmc_command cmd = {0};
1360 struct mmc_card *card = host->card;
1361 int err;
1362
1363 if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
1364 return 0;
1365
1366 err = mmc_deselect_cards(host);
1367 if (err)
1368 return err;
1369
1370 cmd.opcode = MMC_SLEEP_AWAKE;
1371 cmd.arg = card->rca << 16;
1372 cmd.arg |= 1 << 15;
1373
1374 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1375 err = mmc_wait_for_cmd(host, &cmd, 0);
1376 if (err)
1377 return err;
1378
1379 /*
1380 * If the host does not wait while the card signals busy, then we will
1381 * will have to wait the sleep/awake timeout. Note, we cannot use the
1382 * SEND_STATUS command to poll the status because that command (and most
1383 * others) is invalid while the card sleeps.
1384 */
1385 if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
1386 mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000));
1387
1388 return err;
1389 }
1390
1391 static int mmc_can_poweroff_notify(const struct mmc_card *card)
1392 {
1393 return card &&
1394 mmc_card_mmc(card) &&
1395 (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
1396 }
1397
1398 static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
1399 {
1400 unsigned int timeout = card->ext_csd.generic_cmd6_time;
1401 int err;
1402
1403 /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
1404 if (notify_type == EXT_CSD_POWER_OFF_LONG)
1405 timeout = card->ext_csd.power_off_longtime;
1406
1407 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1408 EXT_CSD_POWER_OFF_NOTIFICATION,
1409 notify_type, timeout);
1410 if (err)
1411 pr_err("%s: Power Off Notification timed out, %u\n",
1412 mmc_hostname(card->host), timeout);
1413
1414 /* Disable the power off notification after the switch operation. */
1415 card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
1416
1417 return err;
1418 }
1419
1420 /*
1421 * Host is being removed. Free up the current card.
1422 */
1423 static void mmc_remove(struct mmc_host *host)
1424 {
1425 BUG_ON(!host);
1426 BUG_ON(!host->card);
1427
1428 mmc_remove_card(host->card);
1429 host->card = NULL;
1430 }
1431
1432 /*
1433 * Card detection - card is alive.
1434 */
1435 static int mmc_alive(struct mmc_host *host)
1436 {
1437 return mmc_send_status(host->card, NULL);
1438 }
1439
1440 /*
1441 * Card detection callback from host.
1442 */
1443 static void mmc_detect(struct mmc_host *host)
1444 {
1445 int err;
1446
1447 BUG_ON(!host);
1448 BUG_ON(!host->card);
1449
1450 mmc_get_card(host->card);
1451
1452 /*
1453 * Just check if our card has been removed.
1454 */
1455 err = _mmc_detect_card_removed(host);
1456
1457 mmc_put_card(host->card);
1458
1459 if (err) {
1460 mmc_remove(host);
1461
1462 mmc_claim_host(host);
1463 mmc_detach_bus(host);
1464 mmc_power_off(host);
1465 mmc_release_host(host);
1466 }
1467 }
1468
1469 static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
1470 {
1471 int err = 0;
1472 unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
1473 EXT_CSD_POWER_OFF_LONG;
1474
1475 BUG_ON(!host);
1476 BUG_ON(!host->card);
1477
1478 mmc_claim_host(host);
1479
1480 if (mmc_card_doing_bkops(host->card)) {
1481 err = mmc_stop_bkops(host->card);
1482 if (err)
1483 goto out;
1484 }
1485
1486 err = mmc_cache_ctrl(host, 0);
1487 if (err)
1488 goto out;
1489
1490 if (mmc_can_poweroff_notify(host->card) &&
1491 ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
1492 err = mmc_poweroff_notify(host->card, notify_type);
1493 else if (mmc_can_sleep(host->card))
1494 err = mmc_sleep(host);
1495 else if (!mmc_host_is_spi(host))
1496 err = mmc_deselect_cards(host);
1497 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
1498
1499 if (!err)
1500 mmc_power_off(host);
1501 out:
1502 mmc_release_host(host);
1503 return err;
1504 }
1505
1506 /*
1507 * Suspend callback from host.
1508 */
1509 static int mmc_suspend(struct mmc_host *host)
1510 {
1511 return _mmc_suspend(host, true);
1512 }
1513
1514 /*
1515 * Shutdown callback
1516 */
1517 static int mmc_shutdown(struct mmc_host *host)
1518 {
1519 return _mmc_suspend(host, false);
1520 }
1521
1522 /*
1523 * Resume callback from host.
1524 *
1525 * This function tries to determine if the same card is still present
1526 * and, if so, restore all state to it.
1527 */
1528 static int mmc_resume(struct mmc_host *host)
1529 {
1530 int err;
1531
1532 BUG_ON(!host);
1533 BUG_ON(!host->card);
1534
1535 mmc_claim_host(host);
1536 mmc_power_up(host);
1537 mmc_select_voltage(host, host->ocr);
1538 err = mmc_init_card(host, host->ocr, host->card);
1539 mmc_release_host(host);
1540
1541 return err;
1542 }
1543
1544
1545 /*
1546 * Callback for runtime_suspend.
1547 */
1548 static int mmc_runtime_suspend(struct mmc_host *host)
1549 {
1550 int err;
1551
1552 if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
1553 return 0;
1554
1555 mmc_claim_host(host);
1556
1557 err = mmc_suspend(host);
1558 if (err) {
1559 pr_err("%s: error %d doing aggessive suspend\n",
1560 mmc_hostname(host), err);
1561 goto out;
1562 }
1563 mmc_power_off(host);
1564
1565 out:
1566 mmc_release_host(host);
1567 return err;
1568 }
1569
1570 /*
1571 * Callback for runtime_resume.
1572 */
1573 static int mmc_runtime_resume(struct mmc_host *host)
1574 {
1575 int err;
1576
1577 if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
1578 return 0;
1579
1580 mmc_claim_host(host);
1581
1582 mmc_power_up(host);
1583 err = mmc_resume(host);
1584 if (err)
1585 pr_err("%s: error %d doing aggessive resume\n",
1586 mmc_hostname(host), err);
1587
1588 mmc_release_host(host);
1589 return 0;
1590 }
1591
1592 static int mmc_power_restore(struct mmc_host *host)
1593 {
1594 int ret;
1595
1596 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
1597 mmc_claim_host(host);
1598 ret = mmc_init_card(host, host->ocr, host->card);
1599 mmc_release_host(host);
1600
1601 return ret;
1602 }
1603
1604 static const struct mmc_bus_ops mmc_ops = {
1605 .remove = mmc_remove,
1606 .detect = mmc_detect,
1607 .suspend = NULL,
1608 .resume = NULL,
1609 .power_restore = mmc_power_restore,
1610 .alive = mmc_alive,
1611 .shutdown = mmc_shutdown,
1612 };
1613
1614 static const struct mmc_bus_ops mmc_ops_unsafe = {
1615 .remove = mmc_remove,
1616 .detect = mmc_detect,
1617 .suspend = mmc_suspend,
1618 .resume = mmc_resume,
1619 .runtime_suspend = mmc_runtime_suspend,
1620 .runtime_resume = mmc_runtime_resume,
1621 .power_restore = mmc_power_restore,
1622 .alive = mmc_alive,
1623 .shutdown = mmc_shutdown,
1624 };
1625
1626 static void mmc_attach_bus_ops(struct mmc_host *host)
1627 {
1628 const struct mmc_bus_ops *bus_ops;
1629
1630 if (!mmc_card_is_removable(host))
1631 bus_ops = &mmc_ops_unsafe;
1632 else
1633 bus_ops = &mmc_ops;
1634 mmc_attach_bus(host, bus_ops);
1635 }
1636
1637 /*
1638 * Starting point for MMC card init.
1639 */
1640 int mmc_attach_mmc(struct mmc_host *host)
1641 {
1642 int err;
1643 u32 ocr;
1644
1645 BUG_ON(!host);
1646 WARN_ON(!host->claimed);
1647
1648 /* Set correct bus mode for MMC before attempting attach */
1649 if (!mmc_host_is_spi(host))
1650 mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1651
1652 err = mmc_send_op_cond(host, 0, &ocr);
1653 if (err)
1654 return err;
1655
1656 mmc_attach_bus_ops(host);
1657 if (host->ocr_avail_mmc)
1658 host->ocr_avail = host->ocr_avail_mmc;
1659
1660 /*
1661 * We need to get OCR a different way for SPI.
1662 */
1663 if (mmc_host_is_spi(host)) {
1664 err = mmc_spi_read_ocr(host, 1, &ocr);
1665 if (err)
1666 goto err;
1667 }
1668
1669 /*
1670 * Sanity check the voltages that the card claims to
1671 * support.
1672 */
1673 if (ocr & 0x7F) {
1674 pr_warning("%s: card claims to support voltages "
1675 "below the defined range. These will be ignored.\n",
1676 mmc_hostname(host));
1677 ocr &= ~0x7F;
1678 }
1679
1680 host->ocr = mmc_select_voltage(host, ocr);
1681
1682 /*
1683 * Can we support the voltage of the card?
1684 */
1685 if (!host->ocr) {
1686 err = -EINVAL;
1687 goto err;
1688 }
1689
1690 /*
1691 * Detect and init the card.
1692 */
1693 err = mmc_init_card(host, host->ocr, NULL);
1694 if (err)
1695 goto err;
1696
1697 mmc_release_host(host);
1698 err = mmc_add_card(host->card);
1699 mmc_claim_host(host);
1700 if (err)
1701 goto remove_card;
1702
1703 return 0;
1704
1705 remove_card:
1706 mmc_release_host(host);
1707 mmc_remove_card(host->card);
1708 mmc_claim_host(host);
1709 host->card = NULL;
1710 err:
1711 mmc_detach_bus(host);
1712
1713 pr_err("%s: error %d whilst initialising MMC card\n",
1714 mmc_hostname(host), err);
1715
1716 return err;
1717 }
This page took 0.067082 seconds and 5 git commands to generate.