mmc: core: Initial support for MMC power sequences
[deliverable/linux.git] / drivers / mmc / core / core.c
1 /*
2 * linux/drivers/mmc/core/core.c
3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
31 #include <linux/of.h>
32
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/slot-gpio.h>
38
39 #include "core.h"
40 #include "bus.h"
41 #include "host.h"
42 #include "sdio_bus.h"
43 #include "pwrseq.h"
44
45 #include "mmc_ops.h"
46 #include "sd_ops.h"
47 #include "sdio_ops.h"
48
49 /* If the device is not responding */
50 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
51
52 /*
53 * Background operations can take a long time, depending on the housekeeping
54 * operations the card has to perform.
55 */
56 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
57
58 static struct workqueue_struct *workqueue;
59 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
60
61 /*
62 * Enabling software CRCs on the data blocks can be a significant (30%)
63 * performance cost, and for other reasons may not always be desired.
64 * So we allow it it to be disabled.
65 */
66 bool use_spi_crc = 1;
67 module_param(use_spi_crc, bool, 0);
68
69 /*
70 * Internal function. Schedule delayed work in the MMC work queue.
71 */
72 static int mmc_schedule_delayed_work(struct delayed_work *work,
73 unsigned long delay)
74 {
75 return queue_delayed_work(workqueue, work, delay);
76 }
77
78 /*
79 * Internal function. Flush all scheduled work from the MMC work queue.
80 */
81 static void mmc_flush_scheduled_work(void)
82 {
83 flush_workqueue(workqueue);
84 }
85
86 #ifdef CONFIG_FAIL_MMC_REQUEST
87
88 /*
89 * Internal function. Inject random data errors.
90 * If mmc_data is NULL no errors are injected.
91 */
92 static void mmc_should_fail_request(struct mmc_host *host,
93 struct mmc_request *mrq)
94 {
95 struct mmc_command *cmd = mrq->cmd;
96 struct mmc_data *data = mrq->data;
97 static const int data_errors[] = {
98 -ETIMEDOUT,
99 -EILSEQ,
100 -EIO,
101 };
102
103 if (!data)
104 return;
105
106 if (cmd->error || data->error ||
107 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
108 return;
109
110 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
111 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
112 }
113
114 #else /* CONFIG_FAIL_MMC_REQUEST */
115
116 static inline void mmc_should_fail_request(struct mmc_host *host,
117 struct mmc_request *mrq)
118 {
119 }
120
121 #endif /* CONFIG_FAIL_MMC_REQUEST */
122
123 /**
124 * mmc_request_done - finish processing an MMC request
125 * @host: MMC host which completed request
126 * @mrq: MMC request which request
127 *
128 * MMC drivers should call this function when they have completed
129 * their processing of a request.
130 */
131 void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
132 {
133 struct mmc_command *cmd = mrq->cmd;
134 int err = cmd->error;
135
136 if (err && cmd->retries && mmc_host_is_spi(host)) {
137 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
138 cmd->retries = 0;
139 }
140
141 if (err && cmd->retries && !mmc_card_removed(host->card)) {
142 /*
143 * Request starter must handle retries - see
144 * mmc_wait_for_req_done().
145 */
146 if (mrq->done)
147 mrq->done(mrq);
148 } else {
149 mmc_should_fail_request(host, mrq);
150
151 led_trigger_event(host->led, LED_OFF);
152
153 if (mrq->sbc) {
154 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
155 mmc_hostname(host), mrq->sbc->opcode,
156 mrq->sbc->error,
157 mrq->sbc->resp[0], mrq->sbc->resp[1],
158 mrq->sbc->resp[2], mrq->sbc->resp[3]);
159 }
160
161 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
162 mmc_hostname(host), cmd->opcode, err,
163 cmd->resp[0], cmd->resp[1],
164 cmd->resp[2], cmd->resp[3]);
165
166 if (mrq->data) {
167 pr_debug("%s: %d bytes transferred: %d\n",
168 mmc_hostname(host),
169 mrq->data->bytes_xfered, mrq->data->error);
170 }
171
172 if (mrq->stop) {
173 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
174 mmc_hostname(host), mrq->stop->opcode,
175 mrq->stop->error,
176 mrq->stop->resp[0], mrq->stop->resp[1],
177 mrq->stop->resp[2], mrq->stop->resp[3]);
178 }
179
180 if (mrq->done)
181 mrq->done(mrq);
182
183 mmc_host_clk_release(host);
184 }
185 }
186
187 EXPORT_SYMBOL(mmc_request_done);
188
189 static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
190 {
191 #ifdef CONFIG_MMC_DEBUG
192 unsigned int i, sz;
193 struct scatterlist *sg;
194 #endif
195 if (mmc_card_removed(host->card))
196 return -ENOMEDIUM;
197
198 if (mrq->sbc) {
199 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
200 mmc_hostname(host), mrq->sbc->opcode,
201 mrq->sbc->arg, mrq->sbc->flags);
202 }
203
204 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
205 mmc_hostname(host), mrq->cmd->opcode,
206 mrq->cmd->arg, mrq->cmd->flags);
207
208 if (mrq->data) {
209 pr_debug("%s: blksz %d blocks %d flags %08x "
210 "tsac %d ms nsac %d\n",
211 mmc_hostname(host), mrq->data->blksz,
212 mrq->data->blocks, mrq->data->flags,
213 mrq->data->timeout_ns / 1000000,
214 mrq->data->timeout_clks);
215 }
216
217 if (mrq->stop) {
218 pr_debug("%s: CMD%u arg %08x flags %08x\n",
219 mmc_hostname(host), mrq->stop->opcode,
220 mrq->stop->arg, mrq->stop->flags);
221 }
222
223 WARN_ON(!host->claimed);
224
225 mrq->cmd->error = 0;
226 mrq->cmd->mrq = mrq;
227 if (mrq->sbc) {
228 mrq->sbc->error = 0;
229 mrq->sbc->mrq = mrq;
230 }
231 if (mrq->data) {
232 BUG_ON(mrq->data->blksz > host->max_blk_size);
233 BUG_ON(mrq->data->blocks > host->max_blk_count);
234 BUG_ON(mrq->data->blocks * mrq->data->blksz >
235 host->max_req_size);
236
237 #ifdef CONFIG_MMC_DEBUG
238 sz = 0;
239 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
240 sz += sg->length;
241 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
242 #endif
243
244 mrq->cmd->data = mrq->data;
245 mrq->data->error = 0;
246 mrq->data->mrq = mrq;
247 if (mrq->stop) {
248 mrq->data->stop = mrq->stop;
249 mrq->stop->error = 0;
250 mrq->stop->mrq = mrq;
251 }
252 }
253 mmc_host_clk_hold(host);
254 led_trigger_event(host->led, LED_FULL);
255 host->ops->request(host, mrq);
256
257 return 0;
258 }
259
260 /**
261 * mmc_start_bkops - start BKOPS for supported cards
262 * @card: MMC card to start BKOPS
263 * @form_exception: A flag to indicate if this function was
264 * called due to an exception raised by the card
265 *
266 * Start background operations whenever requested.
267 * When the urgent BKOPS bit is set in a R1 command response
268 * then background operations should be started immediately.
269 */
270 void mmc_start_bkops(struct mmc_card *card, bool from_exception)
271 {
272 int err;
273 int timeout;
274 bool use_busy_signal;
275
276 BUG_ON(!card);
277
278 if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card))
279 return;
280
281 err = mmc_read_bkops_status(card);
282 if (err) {
283 pr_err("%s: Failed to read bkops status: %d\n",
284 mmc_hostname(card->host), err);
285 return;
286 }
287
288 if (!card->ext_csd.raw_bkops_status)
289 return;
290
291 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
292 from_exception)
293 return;
294
295 mmc_claim_host(card->host);
296 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
297 timeout = MMC_BKOPS_MAX_TIMEOUT;
298 use_busy_signal = true;
299 } else {
300 timeout = 0;
301 use_busy_signal = false;
302 }
303
304 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
305 EXT_CSD_BKOPS_START, 1, timeout,
306 use_busy_signal, true, false);
307 if (err) {
308 pr_warn("%s: Error %d starting bkops\n",
309 mmc_hostname(card->host), err);
310 goto out;
311 }
312
313 /*
314 * For urgent bkops status (LEVEL_2 and more)
315 * bkops executed synchronously, otherwise
316 * the operation is in progress
317 */
318 if (!use_busy_signal)
319 mmc_card_set_doing_bkops(card);
320 out:
321 mmc_release_host(card->host);
322 }
323 EXPORT_SYMBOL(mmc_start_bkops);
324
325 /*
326 * mmc_wait_data_done() - done callback for data request
327 * @mrq: done data request
328 *
329 * Wakes up mmc context, passed as a callback to host controller driver
330 */
331 static void mmc_wait_data_done(struct mmc_request *mrq)
332 {
333 mrq->host->context_info.is_done_rcv = true;
334 wake_up_interruptible(&mrq->host->context_info.wait);
335 }
336
337 static void mmc_wait_done(struct mmc_request *mrq)
338 {
339 complete(&mrq->completion);
340 }
341
342 /*
343 *__mmc_start_data_req() - starts data request
344 * @host: MMC host to start the request
345 * @mrq: data request to start
346 *
347 * Sets the done callback to be called when request is completed by the card.
348 * Starts data mmc request execution
349 */
350 static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
351 {
352 int err;
353
354 mrq->done = mmc_wait_data_done;
355 mrq->host = host;
356
357 err = mmc_start_request(host, mrq);
358 if (err) {
359 mrq->cmd->error = err;
360 mmc_wait_data_done(mrq);
361 }
362
363 return err;
364 }
365
366 static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
367 {
368 int err;
369
370 init_completion(&mrq->completion);
371 mrq->done = mmc_wait_done;
372
373 err = mmc_start_request(host, mrq);
374 if (err) {
375 mrq->cmd->error = err;
376 complete(&mrq->completion);
377 }
378
379 return err;
380 }
381
382 /*
383 * mmc_wait_for_data_req_done() - wait for request completed
384 * @host: MMC host to prepare the command.
385 * @mrq: MMC request to wait for
386 *
387 * Blocks MMC context till host controller will ack end of data request
388 * execution or new request notification arrives from the block layer.
389 * Handles command retries.
390 *
391 * Returns enum mmc_blk_status after checking errors.
392 */
393 static int mmc_wait_for_data_req_done(struct mmc_host *host,
394 struct mmc_request *mrq,
395 struct mmc_async_req *next_req)
396 {
397 struct mmc_command *cmd;
398 struct mmc_context_info *context_info = &host->context_info;
399 int err;
400 unsigned long flags;
401
402 while (1) {
403 wait_event_interruptible(context_info->wait,
404 (context_info->is_done_rcv ||
405 context_info->is_new_req));
406 spin_lock_irqsave(&context_info->lock, flags);
407 context_info->is_waiting_last_req = false;
408 spin_unlock_irqrestore(&context_info->lock, flags);
409 if (context_info->is_done_rcv) {
410 context_info->is_done_rcv = false;
411 context_info->is_new_req = false;
412 cmd = mrq->cmd;
413
414 if (!cmd->error || !cmd->retries ||
415 mmc_card_removed(host->card)) {
416 err = host->areq->err_check(host->card,
417 host->areq);
418 break; /* return err */
419 } else {
420 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
421 mmc_hostname(host),
422 cmd->opcode, cmd->error);
423 cmd->retries--;
424 cmd->error = 0;
425 host->ops->request(host, mrq);
426 continue; /* wait for done/new event again */
427 }
428 } else if (context_info->is_new_req) {
429 context_info->is_new_req = false;
430 if (!next_req) {
431 err = MMC_BLK_NEW_REQUEST;
432 break; /* return err */
433 }
434 }
435 }
436 return err;
437 }
438
439 static void mmc_wait_for_req_done(struct mmc_host *host,
440 struct mmc_request *mrq)
441 {
442 struct mmc_command *cmd;
443
444 while (1) {
445 wait_for_completion(&mrq->completion);
446
447 cmd = mrq->cmd;
448
449 /*
450 * If host has timed out waiting for the sanitize
451 * to complete, card might be still in programming state
452 * so let's try to bring the card out of programming
453 * state.
454 */
455 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
456 if (!mmc_interrupt_hpi(host->card)) {
457 pr_warn("%s: %s: Interrupted sanitize\n",
458 mmc_hostname(host), __func__);
459 cmd->error = 0;
460 break;
461 } else {
462 pr_err("%s: %s: Failed to interrupt sanitize\n",
463 mmc_hostname(host), __func__);
464 }
465 }
466 if (!cmd->error || !cmd->retries ||
467 mmc_card_removed(host->card))
468 break;
469
470 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
471 mmc_hostname(host), cmd->opcode, cmd->error);
472 cmd->retries--;
473 cmd->error = 0;
474 host->ops->request(host, mrq);
475 }
476 }
477
478 /**
479 * mmc_pre_req - Prepare for a new request
480 * @host: MMC host to prepare command
481 * @mrq: MMC request to prepare for
482 * @is_first_req: true if there is no previous started request
483 * that may run in parellel to this call, otherwise false
484 *
485 * mmc_pre_req() is called in prior to mmc_start_req() to let
486 * host prepare for the new request. Preparation of a request may be
487 * performed while another request is running on the host.
488 */
489 static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
490 bool is_first_req)
491 {
492 if (host->ops->pre_req) {
493 mmc_host_clk_hold(host);
494 host->ops->pre_req(host, mrq, is_first_req);
495 mmc_host_clk_release(host);
496 }
497 }
498
499 /**
500 * mmc_post_req - Post process a completed request
501 * @host: MMC host to post process command
502 * @mrq: MMC request to post process for
503 * @err: Error, if non zero, clean up any resources made in pre_req
504 *
505 * Let the host post process a completed request. Post processing of
506 * a request may be performed while another reuqest is running.
507 */
508 static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
509 int err)
510 {
511 if (host->ops->post_req) {
512 mmc_host_clk_hold(host);
513 host->ops->post_req(host, mrq, err);
514 mmc_host_clk_release(host);
515 }
516 }
517
518 /**
519 * mmc_start_req - start a non-blocking request
520 * @host: MMC host to start command
521 * @areq: async request to start
522 * @error: out parameter returns 0 for success, otherwise non zero
523 *
524 * Start a new MMC custom command request for a host.
525 * If there is on ongoing async request wait for completion
526 * of that request and start the new one and return.
527 * Does not wait for the new request to complete.
528 *
529 * Returns the completed request, NULL in case of none completed.
530 * Wait for the an ongoing request (previoulsy started) to complete and
531 * return the completed request. If there is no ongoing request, NULL
532 * is returned without waiting. NULL is not an error condition.
533 */
534 struct mmc_async_req *mmc_start_req(struct mmc_host *host,
535 struct mmc_async_req *areq, int *error)
536 {
537 int err = 0;
538 int start_err = 0;
539 struct mmc_async_req *data = host->areq;
540
541 /* Prepare a new request */
542 if (areq)
543 mmc_pre_req(host, areq->mrq, !host->areq);
544
545 if (host->areq) {
546 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
547 if (err == MMC_BLK_NEW_REQUEST) {
548 if (error)
549 *error = err;
550 /*
551 * The previous request was not completed,
552 * nothing to return
553 */
554 return NULL;
555 }
556 /*
557 * Check BKOPS urgency for each R1 response
558 */
559 if (host->card && mmc_card_mmc(host->card) &&
560 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
561 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
562 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
563
564 /* Cancel the prepared request */
565 if (areq)
566 mmc_post_req(host, areq->mrq, -EINVAL);
567
568 mmc_start_bkops(host->card, true);
569
570 /* prepare the request again */
571 if (areq)
572 mmc_pre_req(host, areq->mrq, !host->areq);
573 }
574 }
575
576 if (!err && areq)
577 start_err = __mmc_start_data_req(host, areq->mrq);
578
579 if (host->areq)
580 mmc_post_req(host, host->areq->mrq, 0);
581
582 /* Cancel a prepared request if it was not started. */
583 if ((err || start_err) && areq)
584 mmc_post_req(host, areq->mrq, -EINVAL);
585
586 if (err)
587 host->areq = NULL;
588 else
589 host->areq = areq;
590
591 if (error)
592 *error = err;
593 return data;
594 }
595 EXPORT_SYMBOL(mmc_start_req);
596
597 /**
598 * mmc_wait_for_req - start a request and wait for completion
599 * @host: MMC host to start command
600 * @mrq: MMC request to start
601 *
602 * Start a new MMC custom command request for a host, and wait
603 * for the command to complete. Does not attempt to parse the
604 * response.
605 */
606 void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
607 {
608 __mmc_start_req(host, mrq);
609 mmc_wait_for_req_done(host, mrq);
610 }
611 EXPORT_SYMBOL(mmc_wait_for_req);
612
613 /**
614 * mmc_interrupt_hpi - Issue for High priority Interrupt
615 * @card: the MMC card associated with the HPI transfer
616 *
617 * Issued High Priority Interrupt, and check for card status
618 * until out-of prg-state.
619 */
620 int mmc_interrupt_hpi(struct mmc_card *card)
621 {
622 int err;
623 u32 status;
624 unsigned long prg_wait;
625
626 BUG_ON(!card);
627
628 if (!card->ext_csd.hpi_en) {
629 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
630 return 1;
631 }
632
633 mmc_claim_host(card->host);
634 err = mmc_send_status(card, &status);
635 if (err) {
636 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
637 goto out;
638 }
639
640 switch (R1_CURRENT_STATE(status)) {
641 case R1_STATE_IDLE:
642 case R1_STATE_READY:
643 case R1_STATE_STBY:
644 case R1_STATE_TRAN:
645 /*
646 * In idle and transfer states, HPI is not needed and the caller
647 * can issue the next intended command immediately
648 */
649 goto out;
650 case R1_STATE_PRG:
651 break;
652 default:
653 /* In all other states, it's illegal to issue HPI */
654 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
655 mmc_hostname(card->host), R1_CURRENT_STATE(status));
656 err = -EINVAL;
657 goto out;
658 }
659
660 err = mmc_send_hpi_cmd(card, &status);
661 if (err)
662 goto out;
663
664 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
665 do {
666 err = mmc_send_status(card, &status);
667
668 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
669 break;
670 if (time_after(jiffies, prg_wait))
671 err = -ETIMEDOUT;
672 } while (!err);
673
674 out:
675 mmc_release_host(card->host);
676 return err;
677 }
678 EXPORT_SYMBOL(mmc_interrupt_hpi);
679
680 /**
681 * mmc_wait_for_cmd - start a command and wait for completion
682 * @host: MMC host to start command
683 * @cmd: MMC command to start
684 * @retries: maximum number of retries
685 *
686 * Start a new MMC command for a host, and wait for the command
687 * to complete. Return any error that occurred while the command
688 * was executing. Do not attempt to parse the response.
689 */
690 int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
691 {
692 struct mmc_request mrq = {NULL};
693
694 WARN_ON(!host->claimed);
695
696 memset(cmd->resp, 0, sizeof(cmd->resp));
697 cmd->retries = retries;
698
699 mrq.cmd = cmd;
700 cmd->data = NULL;
701
702 mmc_wait_for_req(host, &mrq);
703
704 return cmd->error;
705 }
706
707 EXPORT_SYMBOL(mmc_wait_for_cmd);
708
709 /**
710 * mmc_stop_bkops - stop ongoing BKOPS
711 * @card: MMC card to check BKOPS
712 *
713 * Send HPI command to stop ongoing background operations to
714 * allow rapid servicing of foreground operations, e.g. read/
715 * writes. Wait until the card comes out of the programming state
716 * to avoid errors in servicing read/write requests.
717 */
718 int mmc_stop_bkops(struct mmc_card *card)
719 {
720 int err = 0;
721
722 BUG_ON(!card);
723 err = mmc_interrupt_hpi(card);
724
725 /*
726 * If err is EINVAL, we can't issue an HPI.
727 * It should complete the BKOPS.
728 */
729 if (!err || (err == -EINVAL)) {
730 mmc_card_clr_doing_bkops(card);
731 err = 0;
732 }
733
734 return err;
735 }
736 EXPORT_SYMBOL(mmc_stop_bkops);
737
738 int mmc_read_bkops_status(struct mmc_card *card)
739 {
740 int err;
741 u8 *ext_csd;
742
743 mmc_claim_host(card->host);
744 err = mmc_get_ext_csd(card, &ext_csd);
745 mmc_release_host(card->host);
746 if (err)
747 return err;
748
749 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
750 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
751 kfree(ext_csd);
752 return 0;
753 }
754 EXPORT_SYMBOL(mmc_read_bkops_status);
755
756 /**
757 * mmc_set_data_timeout - set the timeout for a data command
758 * @data: data phase for command
759 * @card: the MMC card associated with the data transfer
760 *
761 * Computes the data timeout parameters according to the
762 * correct algorithm given the card type.
763 */
764 void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
765 {
766 unsigned int mult;
767
768 /*
769 * SDIO cards only define an upper 1 s limit on access.
770 */
771 if (mmc_card_sdio(card)) {
772 data->timeout_ns = 1000000000;
773 data->timeout_clks = 0;
774 return;
775 }
776
777 /*
778 * SD cards use a 100 multiplier rather than 10
779 */
780 mult = mmc_card_sd(card) ? 100 : 10;
781
782 /*
783 * Scale up the multiplier (and therefore the timeout) by
784 * the r2w factor for writes.
785 */
786 if (data->flags & MMC_DATA_WRITE)
787 mult <<= card->csd.r2w_factor;
788
789 data->timeout_ns = card->csd.tacc_ns * mult;
790 data->timeout_clks = card->csd.tacc_clks * mult;
791
792 /*
793 * SD cards also have an upper limit on the timeout.
794 */
795 if (mmc_card_sd(card)) {
796 unsigned int timeout_us, limit_us;
797
798 timeout_us = data->timeout_ns / 1000;
799 if (mmc_host_clk_rate(card->host))
800 timeout_us += data->timeout_clks * 1000 /
801 (mmc_host_clk_rate(card->host) / 1000);
802
803 if (data->flags & MMC_DATA_WRITE)
804 /*
805 * The MMC spec "It is strongly recommended
806 * for hosts to implement more than 500ms
807 * timeout value even if the card indicates
808 * the 250ms maximum busy length." Even the
809 * previous value of 300ms is known to be
810 * insufficient for some cards.
811 */
812 limit_us = 3000000;
813 else
814 limit_us = 100000;
815
816 /*
817 * SDHC cards always use these fixed values.
818 */
819 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
820 data->timeout_ns = limit_us * 1000;
821 data->timeout_clks = 0;
822 }
823
824 /* assign limit value if invalid */
825 if (timeout_us == 0)
826 data->timeout_ns = limit_us * 1000;
827 }
828
829 /*
830 * Some cards require longer data read timeout than indicated in CSD.
831 * Address this by setting the read timeout to a "reasonably high"
832 * value. For the cards tested, 300ms has proven enough. If necessary,
833 * this value can be increased if other problematic cards require this.
834 */
835 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
836 data->timeout_ns = 300000000;
837 data->timeout_clks = 0;
838 }
839
840 /*
841 * Some cards need very high timeouts if driven in SPI mode.
842 * The worst observed timeout was 900ms after writing a
843 * continuous stream of data until the internal logic
844 * overflowed.
845 */
846 if (mmc_host_is_spi(card->host)) {
847 if (data->flags & MMC_DATA_WRITE) {
848 if (data->timeout_ns < 1000000000)
849 data->timeout_ns = 1000000000; /* 1s */
850 } else {
851 if (data->timeout_ns < 100000000)
852 data->timeout_ns = 100000000; /* 100ms */
853 }
854 }
855 }
856 EXPORT_SYMBOL(mmc_set_data_timeout);
857
858 /**
859 * mmc_align_data_size - pads a transfer size to a more optimal value
860 * @card: the MMC card associated with the data transfer
861 * @sz: original transfer size
862 *
863 * Pads the original data size with a number of extra bytes in
864 * order to avoid controller bugs and/or performance hits
865 * (e.g. some controllers revert to PIO for certain sizes).
866 *
867 * Returns the improved size, which might be unmodified.
868 *
869 * Note that this function is only relevant when issuing a
870 * single scatter gather entry.
871 */
872 unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
873 {
874 /*
875 * FIXME: We don't have a system for the controller to tell
876 * the core about its problems yet, so for now we just 32-bit
877 * align the size.
878 */
879 sz = ((sz + 3) / 4) * 4;
880
881 return sz;
882 }
883 EXPORT_SYMBOL(mmc_align_data_size);
884
885 /**
886 * __mmc_claim_host - exclusively claim a host
887 * @host: mmc host to claim
888 * @abort: whether or not the operation should be aborted
889 *
890 * Claim a host for a set of operations. If @abort is non null and
891 * dereference a non-zero value then this will return prematurely with
892 * that non-zero value without acquiring the lock. Returns zero
893 * with the lock held otherwise.
894 */
895 int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
896 {
897 DECLARE_WAITQUEUE(wait, current);
898 unsigned long flags;
899 int stop;
900
901 might_sleep();
902
903 add_wait_queue(&host->wq, &wait);
904 spin_lock_irqsave(&host->lock, flags);
905 while (1) {
906 set_current_state(TASK_UNINTERRUPTIBLE);
907 stop = abort ? atomic_read(abort) : 0;
908 if (stop || !host->claimed || host->claimer == current)
909 break;
910 spin_unlock_irqrestore(&host->lock, flags);
911 schedule();
912 spin_lock_irqsave(&host->lock, flags);
913 }
914 set_current_state(TASK_RUNNING);
915 if (!stop) {
916 host->claimed = 1;
917 host->claimer = current;
918 host->claim_cnt += 1;
919 } else
920 wake_up(&host->wq);
921 spin_unlock_irqrestore(&host->lock, flags);
922 remove_wait_queue(&host->wq, &wait);
923 if (host->ops->enable && !stop && host->claim_cnt == 1)
924 host->ops->enable(host);
925 return stop;
926 }
927
928 EXPORT_SYMBOL(__mmc_claim_host);
929
930 /**
931 * mmc_release_host - release a host
932 * @host: mmc host to release
933 *
934 * Release a MMC host, allowing others to claim the host
935 * for their operations.
936 */
937 void mmc_release_host(struct mmc_host *host)
938 {
939 unsigned long flags;
940
941 WARN_ON(!host->claimed);
942
943 if (host->ops->disable && host->claim_cnt == 1)
944 host->ops->disable(host);
945
946 spin_lock_irqsave(&host->lock, flags);
947 if (--host->claim_cnt) {
948 /* Release for nested claim */
949 spin_unlock_irqrestore(&host->lock, flags);
950 } else {
951 host->claimed = 0;
952 host->claimer = NULL;
953 spin_unlock_irqrestore(&host->lock, flags);
954 wake_up(&host->wq);
955 }
956 }
957 EXPORT_SYMBOL(mmc_release_host);
958
959 /*
960 * This is a helper function, which fetches a runtime pm reference for the
961 * card device and also claims the host.
962 */
963 void mmc_get_card(struct mmc_card *card)
964 {
965 pm_runtime_get_sync(&card->dev);
966 mmc_claim_host(card->host);
967 }
968 EXPORT_SYMBOL(mmc_get_card);
969
970 /*
971 * This is a helper function, which releases the host and drops the runtime
972 * pm reference for the card device.
973 */
974 void mmc_put_card(struct mmc_card *card)
975 {
976 mmc_release_host(card->host);
977 pm_runtime_mark_last_busy(&card->dev);
978 pm_runtime_put_autosuspend(&card->dev);
979 }
980 EXPORT_SYMBOL(mmc_put_card);
981
982 /*
983 * Internal function that does the actual ios call to the host driver,
984 * optionally printing some debug output.
985 */
986 static inline void mmc_set_ios(struct mmc_host *host)
987 {
988 struct mmc_ios *ios = &host->ios;
989
990 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
991 "width %u timing %u\n",
992 mmc_hostname(host), ios->clock, ios->bus_mode,
993 ios->power_mode, ios->chip_select, ios->vdd,
994 ios->bus_width, ios->timing);
995
996 if (ios->clock > 0)
997 mmc_set_ungated(host);
998 host->ops->set_ios(host, ios);
999 }
1000
1001 /*
1002 * Control chip select pin on a host.
1003 */
1004 void mmc_set_chip_select(struct mmc_host *host, int mode)
1005 {
1006 mmc_host_clk_hold(host);
1007 host->ios.chip_select = mode;
1008 mmc_set_ios(host);
1009 mmc_host_clk_release(host);
1010 }
1011
1012 /*
1013 * Sets the host clock to the highest possible frequency that
1014 * is below "hz".
1015 */
1016 static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
1017 {
1018 WARN_ON(hz && hz < host->f_min);
1019
1020 if (hz > host->f_max)
1021 hz = host->f_max;
1022
1023 host->ios.clock = hz;
1024 mmc_set_ios(host);
1025 }
1026
1027 void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1028 {
1029 mmc_host_clk_hold(host);
1030 __mmc_set_clock(host, hz);
1031 mmc_host_clk_release(host);
1032 }
1033
1034 #ifdef CONFIG_MMC_CLKGATE
1035 /*
1036 * This gates the clock by setting it to 0 Hz.
1037 */
1038 void mmc_gate_clock(struct mmc_host *host)
1039 {
1040 unsigned long flags;
1041
1042 spin_lock_irqsave(&host->clk_lock, flags);
1043 host->clk_old = host->ios.clock;
1044 host->ios.clock = 0;
1045 host->clk_gated = true;
1046 spin_unlock_irqrestore(&host->clk_lock, flags);
1047 mmc_set_ios(host);
1048 }
1049
1050 /*
1051 * This restores the clock from gating by using the cached
1052 * clock value.
1053 */
1054 void mmc_ungate_clock(struct mmc_host *host)
1055 {
1056 /*
1057 * We should previously have gated the clock, so the clock shall
1058 * be 0 here! The clock may however be 0 during initialization,
1059 * when some request operations are performed before setting
1060 * the frequency. When ungate is requested in that situation
1061 * we just ignore the call.
1062 */
1063 if (host->clk_old) {
1064 BUG_ON(host->ios.clock);
1065 /* This call will also set host->clk_gated to false */
1066 __mmc_set_clock(host, host->clk_old);
1067 }
1068 }
1069
1070 void mmc_set_ungated(struct mmc_host *host)
1071 {
1072 unsigned long flags;
1073
1074 /*
1075 * We've been given a new frequency while the clock is gated,
1076 * so make sure we regard this as ungating it.
1077 */
1078 spin_lock_irqsave(&host->clk_lock, flags);
1079 host->clk_gated = false;
1080 spin_unlock_irqrestore(&host->clk_lock, flags);
1081 }
1082
1083 #else
1084 void mmc_set_ungated(struct mmc_host *host)
1085 {
1086 }
1087 #endif
1088
1089 int mmc_execute_tuning(struct mmc_card *card)
1090 {
1091 struct mmc_host *host = card->host;
1092 u32 opcode;
1093 int err;
1094
1095 if (!host->ops->execute_tuning)
1096 return 0;
1097
1098 if (mmc_card_mmc(card))
1099 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1100 else
1101 opcode = MMC_SEND_TUNING_BLOCK;
1102
1103 mmc_host_clk_hold(host);
1104 err = host->ops->execute_tuning(host, opcode);
1105 mmc_host_clk_release(host);
1106
1107 if (err)
1108 pr_err("%s: tuning execution failed\n", mmc_hostname(host));
1109
1110 return err;
1111 }
1112
1113 /*
1114 * Change the bus mode (open drain/push-pull) of a host.
1115 */
1116 void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1117 {
1118 mmc_host_clk_hold(host);
1119 host->ios.bus_mode = mode;
1120 mmc_set_ios(host);
1121 mmc_host_clk_release(host);
1122 }
1123
1124 /*
1125 * Change data bus width of a host.
1126 */
1127 void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1128 {
1129 mmc_host_clk_hold(host);
1130 host->ios.bus_width = width;
1131 mmc_set_ios(host);
1132 mmc_host_clk_release(host);
1133 }
1134
1135 /*
1136 * Set initial state after a power cycle or a hw_reset.
1137 */
1138 void mmc_set_initial_state(struct mmc_host *host)
1139 {
1140 if (mmc_host_is_spi(host))
1141 host->ios.chip_select = MMC_CS_HIGH;
1142 else
1143 host->ios.chip_select = MMC_CS_DONTCARE;
1144 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1145 host->ios.bus_width = MMC_BUS_WIDTH_1;
1146 host->ios.timing = MMC_TIMING_LEGACY;
1147
1148 mmc_set_ios(host);
1149 }
1150
1151 /**
1152 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1153 * @vdd: voltage (mV)
1154 * @low_bits: prefer low bits in boundary cases
1155 *
1156 * This function returns the OCR bit number according to the provided @vdd
1157 * value. If conversion is not possible a negative errno value returned.
1158 *
1159 * Depending on the @low_bits flag the function prefers low or high OCR bits
1160 * on boundary voltages. For example,
1161 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1162 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1163 *
1164 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1165 */
1166 static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1167 {
1168 const int max_bit = ilog2(MMC_VDD_35_36);
1169 int bit;
1170
1171 if (vdd < 1650 || vdd > 3600)
1172 return -EINVAL;
1173
1174 if (vdd >= 1650 && vdd <= 1950)
1175 return ilog2(MMC_VDD_165_195);
1176
1177 if (low_bits)
1178 vdd -= 1;
1179
1180 /* Base 2000 mV, step 100 mV, bit's base 8. */
1181 bit = (vdd - 2000) / 100 + 8;
1182 if (bit > max_bit)
1183 return max_bit;
1184 return bit;
1185 }
1186
1187 /**
1188 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1189 * @vdd_min: minimum voltage value (mV)
1190 * @vdd_max: maximum voltage value (mV)
1191 *
1192 * This function returns the OCR mask bits according to the provided @vdd_min
1193 * and @vdd_max values. If conversion is not possible the function returns 0.
1194 *
1195 * Notes wrt boundary cases:
1196 * This function sets the OCR bits for all boundary voltages, for example
1197 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1198 * MMC_VDD_34_35 mask.
1199 */
1200 u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1201 {
1202 u32 mask = 0;
1203
1204 if (vdd_max < vdd_min)
1205 return 0;
1206
1207 /* Prefer high bits for the boundary vdd_max values. */
1208 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1209 if (vdd_max < 0)
1210 return 0;
1211
1212 /* Prefer low bits for the boundary vdd_min values. */
1213 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1214 if (vdd_min < 0)
1215 return 0;
1216
1217 /* Fill the mask, from max bit to min bit. */
1218 while (vdd_max >= vdd_min)
1219 mask |= 1 << vdd_max--;
1220
1221 return mask;
1222 }
1223 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1224
1225 #ifdef CONFIG_OF
1226
1227 /**
1228 * mmc_of_parse_voltage - return mask of supported voltages
1229 * @np: The device node need to be parsed.
1230 * @mask: mask of voltages available for MMC/SD/SDIO
1231 *
1232 * 1. Return zero on success.
1233 * 2. Return negative errno: voltage-range is invalid.
1234 */
1235 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1236 {
1237 const u32 *voltage_ranges;
1238 int num_ranges, i;
1239
1240 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1241 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1242 if (!voltage_ranges || !num_ranges) {
1243 pr_info("%s: voltage-ranges unspecified\n", np->full_name);
1244 return -EINVAL;
1245 }
1246
1247 for (i = 0; i < num_ranges; i++) {
1248 const int j = i * 2;
1249 u32 ocr_mask;
1250
1251 ocr_mask = mmc_vddrange_to_ocrmask(
1252 be32_to_cpu(voltage_ranges[j]),
1253 be32_to_cpu(voltage_ranges[j + 1]));
1254 if (!ocr_mask) {
1255 pr_err("%s: voltage-range #%d is invalid\n",
1256 np->full_name, i);
1257 return -EINVAL;
1258 }
1259 *mask |= ocr_mask;
1260 }
1261
1262 return 0;
1263 }
1264 EXPORT_SYMBOL(mmc_of_parse_voltage);
1265
1266 #endif /* CONFIG_OF */
1267
1268 static int mmc_of_get_func_num(struct device_node *node)
1269 {
1270 u32 reg;
1271 int ret;
1272
1273 ret = of_property_read_u32(node, "reg", &reg);
1274 if (ret < 0)
1275 return ret;
1276
1277 return reg;
1278 }
1279
1280 struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1281 unsigned func_num)
1282 {
1283 struct device_node *node;
1284
1285 if (!host->parent || !host->parent->of_node)
1286 return NULL;
1287
1288 for_each_child_of_node(host->parent->of_node, node) {
1289 if (mmc_of_get_func_num(node) == func_num)
1290 return node;
1291 }
1292
1293 return NULL;
1294 }
1295
1296 #ifdef CONFIG_REGULATOR
1297
1298 /**
1299 * mmc_regulator_get_ocrmask - return mask of supported voltages
1300 * @supply: regulator to use
1301 *
1302 * This returns either a negative errno, or a mask of voltages that
1303 * can be provided to MMC/SD/SDIO devices using the specified voltage
1304 * regulator. This would normally be called before registering the
1305 * MMC host adapter.
1306 */
1307 int mmc_regulator_get_ocrmask(struct regulator *supply)
1308 {
1309 int result = 0;
1310 int count;
1311 int i;
1312 int vdd_uV;
1313 int vdd_mV;
1314
1315 count = regulator_count_voltages(supply);
1316 if (count < 0)
1317 return count;
1318
1319 for (i = 0; i < count; i++) {
1320 vdd_uV = regulator_list_voltage(supply, i);
1321 if (vdd_uV <= 0)
1322 continue;
1323
1324 vdd_mV = vdd_uV / 1000;
1325 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1326 }
1327
1328 if (!result) {
1329 vdd_uV = regulator_get_voltage(supply);
1330 if (vdd_uV <= 0)
1331 return vdd_uV;
1332
1333 vdd_mV = vdd_uV / 1000;
1334 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1335 }
1336
1337 return result;
1338 }
1339 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
1340
1341 /**
1342 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1343 * @mmc: the host to regulate
1344 * @supply: regulator to use
1345 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1346 *
1347 * Returns zero on success, else negative errno.
1348 *
1349 * MMC host drivers may use this to enable or disable a regulator using
1350 * a particular supply voltage. This would normally be called from the
1351 * set_ios() method.
1352 */
1353 int mmc_regulator_set_ocr(struct mmc_host *mmc,
1354 struct regulator *supply,
1355 unsigned short vdd_bit)
1356 {
1357 int result = 0;
1358 int min_uV, max_uV;
1359
1360 if (vdd_bit) {
1361 int tmp;
1362
1363 /*
1364 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1365 * bits this regulator doesn't quite support ... don't
1366 * be too picky, most cards and regulators are OK with
1367 * a 0.1V range goof (it's a small error percentage).
1368 */
1369 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1370 if (tmp == 0) {
1371 min_uV = 1650 * 1000;
1372 max_uV = 1950 * 1000;
1373 } else {
1374 min_uV = 1900 * 1000 + tmp * 100 * 1000;
1375 max_uV = min_uV + 100 * 1000;
1376 }
1377
1378 result = regulator_set_voltage(supply, min_uV, max_uV);
1379 if (result == 0 && !mmc->regulator_enabled) {
1380 result = regulator_enable(supply);
1381 if (!result)
1382 mmc->regulator_enabled = true;
1383 }
1384 } else if (mmc->regulator_enabled) {
1385 result = regulator_disable(supply);
1386 if (result == 0)
1387 mmc->regulator_enabled = false;
1388 }
1389
1390 if (result)
1391 dev_err(mmc_dev(mmc),
1392 "could not set regulator OCR (%d)\n", result);
1393 return result;
1394 }
1395 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
1396
1397 #endif /* CONFIG_REGULATOR */
1398
1399 int mmc_regulator_get_supply(struct mmc_host *mmc)
1400 {
1401 struct device *dev = mmc_dev(mmc);
1402 int ret;
1403
1404 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
1405 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
1406
1407 if (IS_ERR(mmc->supply.vmmc)) {
1408 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1409 return -EPROBE_DEFER;
1410 dev_info(dev, "No vmmc regulator found\n");
1411 } else {
1412 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1413 if (ret > 0)
1414 mmc->ocr_avail = ret;
1415 else
1416 dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1417 }
1418
1419 if (IS_ERR(mmc->supply.vqmmc)) {
1420 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1421 return -EPROBE_DEFER;
1422 dev_info(dev, "No vqmmc regulator found\n");
1423 }
1424
1425 return 0;
1426 }
1427 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1428
1429 /*
1430 * Mask off any voltages we don't support and select
1431 * the lowest voltage
1432 */
1433 u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1434 {
1435 int bit;
1436
1437 /*
1438 * Sanity check the voltages that the card claims to
1439 * support.
1440 */
1441 if (ocr & 0x7F) {
1442 dev_warn(mmc_dev(host),
1443 "card claims to support voltages below defined range\n");
1444 ocr &= ~0x7F;
1445 }
1446
1447 ocr &= host->ocr_avail;
1448 if (!ocr) {
1449 dev_warn(mmc_dev(host), "no support for card's volts\n");
1450 return 0;
1451 }
1452
1453 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1454 bit = ffs(ocr) - 1;
1455 ocr &= 3 << bit;
1456 mmc_power_cycle(host, ocr);
1457 } else {
1458 bit = fls(ocr) - 1;
1459 ocr &= 3 << bit;
1460 if (bit != host->ios.vdd)
1461 dev_warn(mmc_dev(host), "exceeding card's volts\n");
1462 }
1463
1464 return ocr;
1465 }
1466
1467 int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1468 {
1469 int err = 0;
1470 int old_signal_voltage = host->ios.signal_voltage;
1471
1472 host->ios.signal_voltage = signal_voltage;
1473 if (host->ops->start_signal_voltage_switch) {
1474 mmc_host_clk_hold(host);
1475 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1476 mmc_host_clk_release(host);
1477 }
1478
1479 if (err)
1480 host->ios.signal_voltage = old_signal_voltage;
1481
1482 return err;
1483
1484 }
1485
1486 int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
1487 {
1488 struct mmc_command cmd = {0};
1489 int err = 0;
1490 u32 clock;
1491
1492 BUG_ON(!host);
1493
1494 /*
1495 * Send CMD11 only if the request is to switch the card to
1496 * 1.8V signalling.
1497 */
1498 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1499 return __mmc_set_signal_voltage(host, signal_voltage);
1500
1501 /*
1502 * If we cannot switch voltages, return failure so the caller
1503 * can continue without UHS mode
1504 */
1505 if (!host->ops->start_signal_voltage_switch)
1506 return -EPERM;
1507 if (!host->ops->card_busy)
1508 pr_warn("%s: cannot verify signal voltage switch\n",
1509 mmc_hostname(host));
1510
1511 mmc_host_clk_hold(host);
1512
1513 cmd.opcode = SD_SWITCH_VOLTAGE;
1514 cmd.arg = 0;
1515 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1516
1517 err = mmc_wait_for_cmd(host, &cmd, 0);
1518 if (err)
1519 goto err_command;
1520
1521 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
1522 err = -EIO;
1523 goto err_command;
1524 }
1525 /*
1526 * The card should drive cmd and dat[0:3] low immediately
1527 * after the response of cmd11, but wait 1 ms to be sure
1528 */
1529 mmc_delay(1);
1530 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1531 err = -EAGAIN;
1532 goto power_cycle;
1533 }
1534 /*
1535 * During a signal voltage level switch, the clock must be gated
1536 * for 5 ms according to the SD spec
1537 */
1538 clock = host->ios.clock;
1539 host->ios.clock = 0;
1540 mmc_set_ios(host);
1541
1542 if (__mmc_set_signal_voltage(host, signal_voltage)) {
1543 /*
1544 * Voltages may not have been switched, but we've already
1545 * sent CMD11, so a power cycle is required anyway
1546 */
1547 err = -EAGAIN;
1548 goto power_cycle;
1549 }
1550
1551 /* Keep clock gated for at least 5 ms */
1552 mmc_delay(5);
1553 host->ios.clock = clock;
1554 mmc_set_ios(host);
1555
1556 /* Wait for at least 1 ms according to spec */
1557 mmc_delay(1);
1558
1559 /*
1560 * Failure to switch is indicated by the card holding
1561 * dat[0:3] low
1562 */
1563 if (host->ops->card_busy && host->ops->card_busy(host))
1564 err = -EAGAIN;
1565
1566 power_cycle:
1567 if (err) {
1568 pr_debug("%s: Signal voltage switch failed, "
1569 "power cycling card\n", mmc_hostname(host));
1570 mmc_power_cycle(host, ocr);
1571 }
1572
1573 err_command:
1574 mmc_host_clk_release(host);
1575
1576 return err;
1577 }
1578
1579 /*
1580 * Select timing parameters for host.
1581 */
1582 void mmc_set_timing(struct mmc_host *host, unsigned int timing)
1583 {
1584 mmc_host_clk_hold(host);
1585 host->ios.timing = timing;
1586 mmc_set_ios(host);
1587 mmc_host_clk_release(host);
1588 }
1589
1590 /*
1591 * Select appropriate driver type for host.
1592 */
1593 void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1594 {
1595 mmc_host_clk_hold(host);
1596 host->ios.drv_type = drv_type;
1597 mmc_set_ios(host);
1598 mmc_host_clk_release(host);
1599 }
1600
1601 /*
1602 * Apply power to the MMC stack. This is a two-stage process.
1603 * First, we enable power to the card without the clock running.
1604 * We then wait a bit for the power to stabilise. Finally,
1605 * enable the bus drivers and clock to the card.
1606 *
1607 * We must _NOT_ enable the clock prior to power stablising.
1608 *
1609 * If a host does all the power sequencing itself, ignore the
1610 * initial MMC_POWER_UP stage.
1611 */
1612 void mmc_power_up(struct mmc_host *host, u32 ocr)
1613 {
1614 if (host->ios.power_mode == MMC_POWER_ON)
1615 return;
1616
1617 mmc_host_clk_hold(host);
1618
1619 mmc_pwrseq_pre_power_on(host);
1620
1621 host->ios.vdd = fls(ocr) - 1;
1622 host->ios.power_mode = MMC_POWER_UP;
1623 /* Set initial state and call mmc_set_ios */
1624 mmc_set_initial_state(host);
1625
1626 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1627 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1628 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1629 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1630 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1631 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1632 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
1633
1634 /*
1635 * This delay should be sufficient to allow the power supply
1636 * to reach the minimum voltage.
1637 */
1638 mmc_delay(10);
1639
1640 host->ios.clock = host->f_init;
1641
1642 host->ios.power_mode = MMC_POWER_ON;
1643 mmc_set_ios(host);
1644
1645 /*
1646 * This delay must be at least 74 clock sizes, or 1 ms, or the
1647 * time required to reach a stable voltage.
1648 */
1649 mmc_delay(10);
1650
1651 mmc_pwrseq_post_power_on(host);
1652
1653 mmc_host_clk_release(host);
1654 }
1655
1656 void mmc_power_off(struct mmc_host *host)
1657 {
1658 if (host->ios.power_mode == MMC_POWER_OFF)
1659 return;
1660
1661 mmc_host_clk_hold(host);
1662
1663 mmc_pwrseq_power_off(host);
1664
1665 host->ios.clock = 0;
1666 host->ios.vdd = 0;
1667
1668 host->ios.power_mode = MMC_POWER_OFF;
1669 /* Set initial state and call mmc_set_ios */
1670 mmc_set_initial_state(host);
1671
1672 /*
1673 * Some configurations, such as the 802.11 SDIO card in the OLPC
1674 * XO-1.5, require a short delay after poweroff before the card
1675 * can be successfully turned on again.
1676 */
1677 mmc_delay(1);
1678
1679 mmc_host_clk_release(host);
1680 }
1681
1682 void mmc_power_cycle(struct mmc_host *host, u32 ocr)
1683 {
1684 mmc_power_off(host);
1685 /* Wait at least 1 ms according to SD spec */
1686 mmc_delay(1);
1687 mmc_power_up(host, ocr);
1688 }
1689
1690 /*
1691 * Cleanup when the last reference to the bus operator is dropped.
1692 */
1693 static void __mmc_release_bus(struct mmc_host *host)
1694 {
1695 BUG_ON(!host);
1696 BUG_ON(host->bus_refs);
1697 BUG_ON(!host->bus_dead);
1698
1699 host->bus_ops = NULL;
1700 }
1701
1702 /*
1703 * Increase reference count of bus operator
1704 */
1705 static inline void mmc_bus_get(struct mmc_host *host)
1706 {
1707 unsigned long flags;
1708
1709 spin_lock_irqsave(&host->lock, flags);
1710 host->bus_refs++;
1711 spin_unlock_irqrestore(&host->lock, flags);
1712 }
1713
1714 /*
1715 * Decrease reference count of bus operator and free it if
1716 * it is the last reference.
1717 */
1718 static inline void mmc_bus_put(struct mmc_host *host)
1719 {
1720 unsigned long flags;
1721
1722 spin_lock_irqsave(&host->lock, flags);
1723 host->bus_refs--;
1724 if ((host->bus_refs == 0) && host->bus_ops)
1725 __mmc_release_bus(host);
1726 spin_unlock_irqrestore(&host->lock, flags);
1727 }
1728
1729 /*
1730 * Assign a mmc bus handler to a host. Only one bus handler may control a
1731 * host at any given time.
1732 */
1733 void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1734 {
1735 unsigned long flags;
1736
1737 BUG_ON(!host);
1738 BUG_ON(!ops);
1739
1740 WARN_ON(!host->claimed);
1741
1742 spin_lock_irqsave(&host->lock, flags);
1743
1744 BUG_ON(host->bus_ops);
1745 BUG_ON(host->bus_refs);
1746
1747 host->bus_ops = ops;
1748 host->bus_refs = 1;
1749 host->bus_dead = 0;
1750
1751 spin_unlock_irqrestore(&host->lock, flags);
1752 }
1753
1754 /*
1755 * Remove the current bus handler from a host.
1756 */
1757 void mmc_detach_bus(struct mmc_host *host)
1758 {
1759 unsigned long flags;
1760
1761 BUG_ON(!host);
1762
1763 WARN_ON(!host->claimed);
1764 WARN_ON(!host->bus_ops);
1765
1766 spin_lock_irqsave(&host->lock, flags);
1767
1768 host->bus_dead = 1;
1769
1770 spin_unlock_irqrestore(&host->lock, flags);
1771
1772 mmc_bus_put(host);
1773 }
1774
1775 static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1776 bool cd_irq)
1777 {
1778 #ifdef CONFIG_MMC_DEBUG
1779 unsigned long flags;
1780 spin_lock_irqsave(&host->lock, flags);
1781 WARN_ON(host->removed);
1782 spin_unlock_irqrestore(&host->lock, flags);
1783 #endif
1784
1785 /*
1786 * If the device is configured as wakeup, we prevent a new sleep for
1787 * 5 s to give provision for user space to consume the event.
1788 */
1789 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1790 device_can_wakeup(mmc_dev(host)))
1791 pm_wakeup_event(mmc_dev(host), 5000);
1792
1793 host->detect_change = 1;
1794 mmc_schedule_delayed_work(&host->detect, delay);
1795 }
1796
1797 /**
1798 * mmc_detect_change - process change of state on a MMC socket
1799 * @host: host which changed state.
1800 * @delay: optional delay to wait before detection (jiffies)
1801 *
1802 * MMC drivers should call this when they detect a card has been
1803 * inserted or removed. The MMC layer will confirm that any
1804 * present card is still functional, and initialize any newly
1805 * inserted.
1806 */
1807 void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1808 {
1809 _mmc_detect_change(host, delay, true);
1810 }
1811 EXPORT_SYMBOL(mmc_detect_change);
1812
1813 void mmc_init_erase(struct mmc_card *card)
1814 {
1815 unsigned int sz;
1816
1817 if (is_power_of_2(card->erase_size))
1818 card->erase_shift = ffs(card->erase_size) - 1;
1819 else
1820 card->erase_shift = 0;
1821
1822 /*
1823 * It is possible to erase an arbitrarily large area of an SD or MMC
1824 * card. That is not desirable because it can take a long time
1825 * (minutes) potentially delaying more important I/O, and also the
1826 * timeout calculations become increasingly hugely over-estimated.
1827 * Consequently, 'pref_erase' is defined as a guide to limit erases
1828 * to that size and alignment.
1829 *
1830 * For SD cards that define Allocation Unit size, limit erases to one
1831 * Allocation Unit at a time. For MMC cards that define High Capacity
1832 * Erase Size, whether it is switched on or not, limit to that size.
1833 * Otherwise just have a stab at a good value. For modern cards it
1834 * will end up being 4MiB. Note that if the value is too small, it
1835 * can end up taking longer to erase.
1836 */
1837 if (mmc_card_sd(card) && card->ssr.au) {
1838 card->pref_erase = card->ssr.au;
1839 card->erase_shift = ffs(card->ssr.au) - 1;
1840 } else if (card->ext_csd.hc_erase_size) {
1841 card->pref_erase = card->ext_csd.hc_erase_size;
1842 } else if (card->erase_size) {
1843 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1844 if (sz < 128)
1845 card->pref_erase = 512 * 1024 / 512;
1846 else if (sz < 512)
1847 card->pref_erase = 1024 * 1024 / 512;
1848 else if (sz < 1024)
1849 card->pref_erase = 2 * 1024 * 1024 / 512;
1850 else
1851 card->pref_erase = 4 * 1024 * 1024 / 512;
1852 if (card->pref_erase < card->erase_size)
1853 card->pref_erase = card->erase_size;
1854 else {
1855 sz = card->pref_erase % card->erase_size;
1856 if (sz)
1857 card->pref_erase += card->erase_size - sz;
1858 }
1859 } else
1860 card->pref_erase = 0;
1861 }
1862
1863 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1864 unsigned int arg, unsigned int qty)
1865 {
1866 unsigned int erase_timeout;
1867
1868 if (arg == MMC_DISCARD_ARG ||
1869 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1870 erase_timeout = card->ext_csd.trim_timeout;
1871 } else if (card->ext_csd.erase_group_def & 1) {
1872 /* High Capacity Erase Group Size uses HC timeouts */
1873 if (arg == MMC_TRIM_ARG)
1874 erase_timeout = card->ext_csd.trim_timeout;
1875 else
1876 erase_timeout = card->ext_csd.hc_erase_timeout;
1877 } else {
1878 /* CSD Erase Group Size uses write timeout */
1879 unsigned int mult = (10 << card->csd.r2w_factor);
1880 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1881 unsigned int timeout_us;
1882
1883 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1884 if (card->csd.tacc_ns < 1000000)
1885 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1886 else
1887 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1888
1889 /*
1890 * ios.clock is only a target. The real clock rate might be
1891 * less but not that much less, so fudge it by multiplying by 2.
1892 */
1893 timeout_clks <<= 1;
1894 timeout_us += (timeout_clks * 1000) /
1895 (mmc_host_clk_rate(card->host) / 1000);
1896
1897 erase_timeout = timeout_us / 1000;
1898
1899 /*
1900 * Theoretically, the calculation could underflow so round up
1901 * to 1ms in that case.
1902 */
1903 if (!erase_timeout)
1904 erase_timeout = 1;
1905 }
1906
1907 /* Multiplier for secure operations */
1908 if (arg & MMC_SECURE_ARGS) {
1909 if (arg == MMC_SECURE_ERASE_ARG)
1910 erase_timeout *= card->ext_csd.sec_erase_mult;
1911 else
1912 erase_timeout *= card->ext_csd.sec_trim_mult;
1913 }
1914
1915 erase_timeout *= qty;
1916
1917 /*
1918 * Ensure at least a 1 second timeout for SPI as per
1919 * 'mmc_set_data_timeout()'
1920 */
1921 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1922 erase_timeout = 1000;
1923
1924 return erase_timeout;
1925 }
1926
1927 static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1928 unsigned int arg,
1929 unsigned int qty)
1930 {
1931 unsigned int erase_timeout;
1932
1933 if (card->ssr.erase_timeout) {
1934 /* Erase timeout specified in SD Status Register (SSR) */
1935 erase_timeout = card->ssr.erase_timeout * qty +
1936 card->ssr.erase_offset;
1937 } else {
1938 /*
1939 * Erase timeout not specified in SD Status Register (SSR) so
1940 * use 250ms per write block.
1941 */
1942 erase_timeout = 250 * qty;
1943 }
1944
1945 /* Must not be less than 1 second */
1946 if (erase_timeout < 1000)
1947 erase_timeout = 1000;
1948
1949 return erase_timeout;
1950 }
1951
1952 static unsigned int mmc_erase_timeout(struct mmc_card *card,
1953 unsigned int arg,
1954 unsigned int qty)
1955 {
1956 if (mmc_card_sd(card))
1957 return mmc_sd_erase_timeout(card, arg, qty);
1958 else
1959 return mmc_mmc_erase_timeout(card, arg, qty);
1960 }
1961
1962 static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1963 unsigned int to, unsigned int arg)
1964 {
1965 struct mmc_command cmd = {0};
1966 unsigned int qty = 0;
1967 unsigned long timeout;
1968 int err;
1969
1970 /*
1971 * qty is used to calculate the erase timeout which depends on how many
1972 * erase groups (or allocation units in SD terminology) are affected.
1973 * We count erasing part of an erase group as one erase group.
1974 * For SD, the allocation units are always a power of 2. For MMC, the
1975 * erase group size is almost certainly also power of 2, but it does not
1976 * seem to insist on that in the JEDEC standard, so we fall back to
1977 * division in that case. SD may not specify an allocation unit size,
1978 * in which case the timeout is based on the number of write blocks.
1979 *
1980 * Note that the timeout for secure trim 2 will only be correct if the
1981 * number of erase groups specified is the same as the total of all
1982 * preceding secure trim 1 commands. Since the power may have been
1983 * lost since the secure trim 1 commands occurred, it is generally
1984 * impossible to calculate the secure trim 2 timeout correctly.
1985 */
1986 if (card->erase_shift)
1987 qty += ((to >> card->erase_shift) -
1988 (from >> card->erase_shift)) + 1;
1989 else if (mmc_card_sd(card))
1990 qty += to - from + 1;
1991 else
1992 qty += ((to / card->erase_size) -
1993 (from / card->erase_size)) + 1;
1994
1995 if (!mmc_card_blockaddr(card)) {
1996 from <<= 9;
1997 to <<= 9;
1998 }
1999
2000 if (mmc_card_sd(card))
2001 cmd.opcode = SD_ERASE_WR_BLK_START;
2002 else
2003 cmd.opcode = MMC_ERASE_GROUP_START;
2004 cmd.arg = from;
2005 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2006 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2007 if (err) {
2008 pr_err("mmc_erase: group start error %d, "
2009 "status %#x\n", err, cmd.resp[0]);
2010 err = -EIO;
2011 goto out;
2012 }
2013
2014 memset(&cmd, 0, sizeof(struct mmc_command));
2015 if (mmc_card_sd(card))
2016 cmd.opcode = SD_ERASE_WR_BLK_END;
2017 else
2018 cmd.opcode = MMC_ERASE_GROUP_END;
2019 cmd.arg = to;
2020 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2021 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2022 if (err) {
2023 pr_err("mmc_erase: group end error %d, status %#x\n",
2024 err, cmd.resp[0]);
2025 err = -EIO;
2026 goto out;
2027 }
2028
2029 memset(&cmd, 0, sizeof(struct mmc_command));
2030 cmd.opcode = MMC_ERASE;
2031 cmd.arg = arg;
2032 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
2033 cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
2034 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2035 if (err) {
2036 pr_err("mmc_erase: erase error %d, status %#x\n",
2037 err, cmd.resp[0]);
2038 err = -EIO;
2039 goto out;
2040 }
2041
2042 if (mmc_host_is_spi(card->host))
2043 goto out;
2044
2045 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
2046 do {
2047 memset(&cmd, 0, sizeof(struct mmc_command));
2048 cmd.opcode = MMC_SEND_STATUS;
2049 cmd.arg = card->rca << 16;
2050 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2051 /* Do not retry else we can't see errors */
2052 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2053 if (err || (cmd.resp[0] & 0xFDF92000)) {
2054 pr_err("error %d requesting status %#x\n",
2055 err, cmd.resp[0]);
2056 err = -EIO;
2057 goto out;
2058 }
2059
2060 /* Timeout if the device never becomes ready for data and
2061 * never leaves the program state.
2062 */
2063 if (time_after(jiffies, timeout)) {
2064 pr_err("%s: Card stuck in programming state! %s\n",
2065 mmc_hostname(card->host), __func__);
2066 err = -EIO;
2067 goto out;
2068 }
2069
2070 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
2071 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
2072 out:
2073 return err;
2074 }
2075
2076 /**
2077 * mmc_erase - erase sectors.
2078 * @card: card to erase
2079 * @from: first sector to erase
2080 * @nr: number of sectors to erase
2081 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2082 *
2083 * Caller must claim host before calling this function.
2084 */
2085 int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2086 unsigned int arg)
2087 {
2088 unsigned int rem, to = from + nr;
2089
2090 if (!(card->host->caps & MMC_CAP_ERASE) ||
2091 !(card->csd.cmdclass & CCC_ERASE))
2092 return -EOPNOTSUPP;
2093
2094 if (!card->erase_size)
2095 return -EOPNOTSUPP;
2096
2097 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2098 return -EOPNOTSUPP;
2099
2100 if ((arg & MMC_SECURE_ARGS) &&
2101 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2102 return -EOPNOTSUPP;
2103
2104 if ((arg & MMC_TRIM_ARGS) &&
2105 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2106 return -EOPNOTSUPP;
2107
2108 if (arg == MMC_SECURE_ERASE_ARG) {
2109 if (from % card->erase_size || nr % card->erase_size)
2110 return -EINVAL;
2111 }
2112
2113 if (arg == MMC_ERASE_ARG) {
2114 rem = from % card->erase_size;
2115 if (rem) {
2116 rem = card->erase_size - rem;
2117 from += rem;
2118 if (nr > rem)
2119 nr -= rem;
2120 else
2121 return 0;
2122 }
2123 rem = nr % card->erase_size;
2124 if (rem)
2125 nr -= rem;
2126 }
2127
2128 if (nr == 0)
2129 return 0;
2130
2131 to = from + nr;
2132
2133 if (to <= from)
2134 return -EINVAL;
2135
2136 /* 'from' and 'to' are inclusive */
2137 to -= 1;
2138
2139 return mmc_do_erase(card, from, to, arg);
2140 }
2141 EXPORT_SYMBOL(mmc_erase);
2142
2143 int mmc_can_erase(struct mmc_card *card)
2144 {
2145 if ((card->host->caps & MMC_CAP_ERASE) &&
2146 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2147 return 1;
2148 return 0;
2149 }
2150 EXPORT_SYMBOL(mmc_can_erase);
2151
2152 int mmc_can_trim(struct mmc_card *card)
2153 {
2154 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
2155 return 1;
2156 return 0;
2157 }
2158 EXPORT_SYMBOL(mmc_can_trim);
2159
2160 int mmc_can_discard(struct mmc_card *card)
2161 {
2162 /*
2163 * As there's no way to detect the discard support bit at v4.5
2164 * use the s/w feature support filed.
2165 */
2166 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2167 return 1;
2168 return 0;
2169 }
2170 EXPORT_SYMBOL(mmc_can_discard);
2171
2172 int mmc_can_sanitize(struct mmc_card *card)
2173 {
2174 if (!mmc_can_trim(card) && !mmc_can_erase(card))
2175 return 0;
2176 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2177 return 1;
2178 return 0;
2179 }
2180 EXPORT_SYMBOL(mmc_can_sanitize);
2181
2182 int mmc_can_secure_erase_trim(struct mmc_card *card)
2183 {
2184 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2185 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
2186 return 1;
2187 return 0;
2188 }
2189 EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2190
2191 int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2192 unsigned int nr)
2193 {
2194 if (!card->erase_size)
2195 return 0;
2196 if (from % card->erase_size || nr % card->erase_size)
2197 return 0;
2198 return 1;
2199 }
2200 EXPORT_SYMBOL(mmc_erase_group_aligned);
2201
2202 static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2203 unsigned int arg)
2204 {
2205 struct mmc_host *host = card->host;
2206 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
2207 unsigned int last_timeout = 0;
2208
2209 if (card->erase_shift)
2210 max_qty = UINT_MAX >> card->erase_shift;
2211 else if (mmc_card_sd(card))
2212 max_qty = UINT_MAX;
2213 else
2214 max_qty = UINT_MAX / card->erase_size;
2215
2216 /* Find the largest qty with an OK timeout */
2217 do {
2218 y = 0;
2219 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2220 timeout = mmc_erase_timeout(card, arg, qty + x);
2221 if (timeout > host->max_busy_timeout)
2222 break;
2223 if (timeout < last_timeout)
2224 break;
2225 last_timeout = timeout;
2226 y = x;
2227 }
2228 qty += y;
2229 } while (y);
2230
2231 if (!qty)
2232 return 0;
2233
2234 if (qty == 1)
2235 return 1;
2236
2237 /* Convert qty to sectors */
2238 if (card->erase_shift)
2239 max_discard = --qty << card->erase_shift;
2240 else if (mmc_card_sd(card))
2241 max_discard = qty;
2242 else
2243 max_discard = --qty * card->erase_size;
2244
2245 return max_discard;
2246 }
2247
2248 unsigned int mmc_calc_max_discard(struct mmc_card *card)
2249 {
2250 struct mmc_host *host = card->host;
2251 unsigned int max_discard, max_trim;
2252
2253 if (!host->max_busy_timeout)
2254 return UINT_MAX;
2255
2256 /*
2257 * Without erase_group_def set, MMC erase timeout depends on clock
2258 * frequence which can change. In that case, the best choice is
2259 * just the preferred erase size.
2260 */
2261 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2262 return card->pref_erase;
2263
2264 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2265 if (mmc_can_trim(card)) {
2266 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2267 if (max_trim < max_discard)
2268 max_discard = max_trim;
2269 } else if (max_discard < card->erase_size) {
2270 max_discard = 0;
2271 }
2272 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2273 mmc_hostname(host), max_discard, host->max_busy_timeout);
2274 return max_discard;
2275 }
2276 EXPORT_SYMBOL(mmc_calc_max_discard);
2277
2278 int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2279 {
2280 struct mmc_command cmd = {0};
2281
2282 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
2283 return 0;
2284
2285 cmd.opcode = MMC_SET_BLOCKLEN;
2286 cmd.arg = blocklen;
2287 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2288 return mmc_wait_for_cmd(card->host, &cmd, 5);
2289 }
2290 EXPORT_SYMBOL(mmc_set_blocklen);
2291
2292 int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2293 bool is_rel_write)
2294 {
2295 struct mmc_command cmd = {0};
2296
2297 cmd.opcode = MMC_SET_BLOCK_COUNT;
2298 cmd.arg = blockcount & 0x0000FFFF;
2299 if (is_rel_write)
2300 cmd.arg |= 1 << 31;
2301 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2302 return mmc_wait_for_cmd(card->host, &cmd, 5);
2303 }
2304 EXPORT_SYMBOL(mmc_set_blockcount);
2305
2306 static void mmc_hw_reset_for_init(struct mmc_host *host)
2307 {
2308 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2309 return;
2310 mmc_host_clk_hold(host);
2311 host->ops->hw_reset(host);
2312 mmc_host_clk_release(host);
2313 }
2314
2315 int mmc_hw_reset(struct mmc_host *host)
2316 {
2317 int ret;
2318
2319 if (!host->card)
2320 return -EINVAL;
2321
2322 mmc_bus_get(host);
2323 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2324 mmc_bus_put(host);
2325 return -EOPNOTSUPP;
2326 }
2327
2328 ret = host->bus_ops->reset(host);
2329 mmc_bus_put(host);
2330
2331 pr_warn("%s: tried to reset card\n", mmc_hostname(host));
2332
2333 return ret;
2334 }
2335 EXPORT_SYMBOL(mmc_hw_reset);
2336
2337 static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2338 {
2339 host->f_init = freq;
2340
2341 #ifdef CONFIG_MMC_DEBUG
2342 pr_info("%s: %s: trying to init card at %u Hz\n",
2343 mmc_hostname(host), __func__, host->f_init);
2344 #endif
2345 mmc_power_up(host, host->ocr_avail);
2346
2347 /*
2348 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2349 * do a hardware reset if possible.
2350 */
2351 mmc_hw_reset_for_init(host);
2352
2353 /*
2354 * sdio_reset sends CMD52 to reset card. Since we do not know
2355 * if the card is being re-initialized, just send it. CMD52
2356 * should be ignored by SD/eMMC cards.
2357 */
2358 sdio_reset(host);
2359 mmc_go_idle(host);
2360
2361 mmc_send_if_cond(host, host->ocr_avail);
2362
2363 /* Order's important: probe SDIO, then SD, then MMC */
2364 if (!mmc_attach_sdio(host))
2365 return 0;
2366 if (!mmc_attach_sd(host))
2367 return 0;
2368 if (!mmc_attach_mmc(host))
2369 return 0;
2370
2371 mmc_power_off(host);
2372 return -EIO;
2373 }
2374
2375 int _mmc_detect_card_removed(struct mmc_host *host)
2376 {
2377 int ret;
2378
2379 if (host->caps & MMC_CAP_NONREMOVABLE)
2380 return 0;
2381
2382 if (!host->card || mmc_card_removed(host->card))
2383 return 1;
2384
2385 ret = host->bus_ops->alive(host);
2386
2387 /*
2388 * Card detect status and alive check may be out of sync if card is
2389 * removed slowly, when card detect switch changes while card/slot
2390 * pads are still contacted in hardware (refer to "SD Card Mechanical
2391 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2392 * detect work 200ms later for this case.
2393 */
2394 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2395 mmc_detect_change(host, msecs_to_jiffies(200));
2396 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2397 }
2398
2399 if (ret) {
2400 mmc_card_set_removed(host->card);
2401 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2402 }
2403
2404 return ret;
2405 }
2406
2407 int mmc_detect_card_removed(struct mmc_host *host)
2408 {
2409 struct mmc_card *card = host->card;
2410 int ret;
2411
2412 WARN_ON(!host->claimed);
2413
2414 if (!card)
2415 return 1;
2416
2417 ret = mmc_card_removed(card);
2418 /*
2419 * The card will be considered unchanged unless we have been asked to
2420 * detect a change or host requires polling to provide card detection.
2421 */
2422 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
2423 return ret;
2424
2425 host->detect_change = 0;
2426 if (!ret) {
2427 ret = _mmc_detect_card_removed(host);
2428 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
2429 /*
2430 * Schedule a detect work as soon as possible to let a
2431 * rescan handle the card removal.
2432 */
2433 cancel_delayed_work(&host->detect);
2434 _mmc_detect_change(host, 0, false);
2435 }
2436 }
2437
2438 return ret;
2439 }
2440 EXPORT_SYMBOL(mmc_detect_card_removed);
2441
2442 void mmc_rescan(struct work_struct *work)
2443 {
2444 struct mmc_host *host =
2445 container_of(work, struct mmc_host, detect.work);
2446 int i;
2447
2448 if (host->trigger_card_event && host->ops->card_event) {
2449 host->ops->card_event(host);
2450 host->trigger_card_event = false;
2451 }
2452
2453 if (host->rescan_disable)
2454 return;
2455
2456 /* If there is a non-removable card registered, only scan once */
2457 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
2458 return;
2459 host->rescan_entered = 1;
2460
2461 mmc_bus_get(host);
2462
2463 /*
2464 * if there is a _removable_ card registered, check whether it is
2465 * still present
2466 */
2467 if (host->bus_ops && !host->bus_dead
2468 && !(host->caps & MMC_CAP_NONREMOVABLE))
2469 host->bus_ops->detect(host);
2470
2471 host->detect_change = 0;
2472
2473 /*
2474 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2475 * the card is no longer present.
2476 */
2477 mmc_bus_put(host);
2478 mmc_bus_get(host);
2479
2480 /* if there still is a card present, stop here */
2481 if (host->bus_ops != NULL) {
2482 mmc_bus_put(host);
2483 goto out;
2484 }
2485
2486 /*
2487 * Only we can add a new handler, so it's safe to
2488 * release the lock here.
2489 */
2490 mmc_bus_put(host);
2491
2492 if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
2493 host->ops->get_cd(host) == 0) {
2494 mmc_claim_host(host);
2495 mmc_power_off(host);
2496 mmc_release_host(host);
2497 goto out;
2498 }
2499
2500 mmc_claim_host(host);
2501 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
2502 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2503 break;
2504 if (freqs[i] <= host->f_min)
2505 break;
2506 }
2507 mmc_release_host(host);
2508
2509 out:
2510 if (host->caps & MMC_CAP_NEEDS_POLL)
2511 mmc_schedule_delayed_work(&host->detect, HZ);
2512 }
2513
2514 void mmc_start_host(struct mmc_host *host)
2515 {
2516 host->f_init = max(freqs[0], host->f_min);
2517 host->rescan_disable = 0;
2518 host->ios.power_mode = MMC_POWER_UNDEFINED;
2519 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2520 mmc_power_off(host);
2521 else
2522 mmc_power_up(host, host->ocr_avail);
2523 mmc_gpiod_request_cd_irq(host);
2524 _mmc_detect_change(host, 0, false);
2525 }
2526
2527 void mmc_stop_host(struct mmc_host *host)
2528 {
2529 #ifdef CONFIG_MMC_DEBUG
2530 unsigned long flags;
2531 spin_lock_irqsave(&host->lock, flags);
2532 host->removed = 1;
2533 spin_unlock_irqrestore(&host->lock, flags);
2534 #endif
2535 if (host->slot.cd_irq >= 0)
2536 disable_irq(host->slot.cd_irq);
2537
2538 host->rescan_disable = 1;
2539 cancel_delayed_work_sync(&host->detect);
2540 mmc_flush_scheduled_work();
2541
2542 /* clear pm flags now and let card drivers set them as needed */
2543 host->pm_flags = 0;
2544
2545 mmc_bus_get(host);
2546 if (host->bus_ops && !host->bus_dead) {
2547 /* Calling bus_ops->remove() with a claimed host can deadlock */
2548 host->bus_ops->remove(host);
2549 mmc_claim_host(host);
2550 mmc_detach_bus(host);
2551 mmc_power_off(host);
2552 mmc_release_host(host);
2553 mmc_bus_put(host);
2554 return;
2555 }
2556 mmc_bus_put(host);
2557
2558 BUG_ON(host->card);
2559
2560 mmc_power_off(host);
2561 }
2562
2563 int mmc_power_save_host(struct mmc_host *host)
2564 {
2565 int ret = 0;
2566
2567 #ifdef CONFIG_MMC_DEBUG
2568 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2569 #endif
2570
2571 mmc_bus_get(host);
2572
2573 if (!host->bus_ops || host->bus_dead) {
2574 mmc_bus_put(host);
2575 return -EINVAL;
2576 }
2577
2578 if (host->bus_ops->power_save)
2579 ret = host->bus_ops->power_save(host);
2580
2581 mmc_bus_put(host);
2582
2583 mmc_power_off(host);
2584
2585 return ret;
2586 }
2587 EXPORT_SYMBOL(mmc_power_save_host);
2588
2589 int mmc_power_restore_host(struct mmc_host *host)
2590 {
2591 int ret;
2592
2593 #ifdef CONFIG_MMC_DEBUG
2594 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2595 #endif
2596
2597 mmc_bus_get(host);
2598
2599 if (!host->bus_ops || host->bus_dead) {
2600 mmc_bus_put(host);
2601 return -EINVAL;
2602 }
2603
2604 mmc_power_up(host, host->card->ocr);
2605 ret = host->bus_ops->power_restore(host);
2606
2607 mmc_bus_put(host);
2608
2609 return ret;
2610 }
2611 EXPORT_SYMBOL(mmc_power_restore_host);
2612
2613 /*
2614 * Flush the cache to the non-volatile storage.
2615 */
2616 int mmc_flush_cache(struct mmc_card *card)
2617 {
2618 int err = 0;
2619
2620 if (mmc_card_mmc(card) &&
2621 (card->ext_csd.cache_size > 0) &&
2622 (card->ext_csd.cache_ctrl & 1)) {
2623 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2624 EXT_CSD_FLUSH_CACHE, 1, 0);
2625 if (err)
2626 pr_err("%s: cache flush error %d\n",
2627 mmc_hostname(card->host), err);
2628 }
2629
2630 return err;
2631 }
2632 EXPORT_SYMBOL(mmc_flush_cache);
2633
2634 #ifdef CONFIG_PM
2635
2636 /* Do the card removal on suspend if card is assumed removeable
2637 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2638 to sync the card.
2639 */
2640 int mmc_pm_notify(struct notifier_block *notify_block,
2641 unsigned long mode, void *unused)
2642 {
2643 struct mmc_host *host = container_of(
2644 notify_block, struct mmc_host, pm_notify);
2645 unsigned long flags;
2646 int err = 0;
2647
2648 switch (mode) {
2649 case PM_HIBERNATION_PREPARE:
2650 case PM_SUSPEND_PREPARE:
2651 spin_lock_irqsave(&host->lock, flags);
2652 host->rescan_disable = 1;
2653 spin_unlock_irqrestore(&host->lock, flags);
2654 cancel_delayed_work_sync(&host->detect);
2655
2656 if (!host->bus_ops)
2657 break;
2658
2659 /* Validate prerequisites for suspend */
2660 if (host->bus_ops->pre_suspend)
2661 err = host->bus_ops->pre_suspend(host);
2662 if (!err)
2663 break;
2664
2665 /* Calling bus_ops->remove() with a claimed host can deadlock */
2666 host->bus_ops->remove(host);
2667 mmc_claim_host(host);
2668 mmc_detach_bus(host);
2669 mmc_power_off(host);
2670 mmc_release_host(host);
2671 host->pm_flags = 0;
2672 break;
2673
2674 case PM_POST_SUSPEND:
2675 case PM_POST_HIBERNATION:
2676 case PM_POST_RESTORE:
2677
2678 spin_lock_irqsave(&host->lock, flags);
2679 host->rescan_disable = 0;
2680 spin_unlock_irqrestore(&host->lock, flags);
2681 _mmc_detect_change(host, 0, false);
2682
2683 }
2684
2685 return 0;
2686 }
2687 #endif
2688
2689 /**
2690 * mmc_init_context_info() - init synchronization context
2691 * @host: mmc host
2692 *
2693 * Init struct context_info needed to implement asynchronous
2694 * request mechanism, used by mmc core, host driver and mmc requests
2695 * supplier.
2696 */
2697 void mmc_init_context_info(struct mmc_host *host)
2698 {
2699 spin_lock_init(&host->context_info.lock);
2700 host->context_info.is_new_req = false;
2701 host->context_info.is_done_rcv = false;
2702 host->context_info.is_waiting_last_req = false;
2703 init_waitqueue_head(&host->context_info.wait);
2704 }
2705
2706 static int __init mmc_init(void)
2707 {
2708 int ret;
2709
2710 workqueue = alloc_ordered_workqueue("kmmcd", 0);
2711 if (!workqueue)
2712 return -ENOMEM;
2713
2714 ret = mmc_register_bus();
2715 if (ret)
2716 goto destroy_workqueue;
2717
2718 ret = mmc_register_host_class();
2719 if (ret)
2720 goto unregister_bus;
2721
2722 ret = sdio_register_bus();
2723 if (ret)
2724 goto unregister_host_class;
2725
2726 return 0;
2727
2728 unregister_host_class:
2729 mmc_unregister_host_class();
2730 unregister_bus:
2731 mmc_unregister_bus();
2732 destroy_workqueue:
2733 destroy_workqueue(workqueue);
2734
2735 return ret;
2736 }
2737
2738 static void __exit mmc_exit(void)
2739 {
2740 sdio_unregister_bus();
2741 mmc_unregister_host_class();
2742 mmc_unregister_bus();
2743 destroy_workqueue(workqueue);
2744 }
2745
2746 subsys_initcall(mmc_init);
2747 module_exit(mmc_exit);
2748
2749 MODULE_LICENSE("GPL");
This page took 0.156936 seconds and 5 git commands to generate.