2 * linux/drivers/mmc/core/core.c
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/completion.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19 #include <linux/pagemap.h>
20 #include <linux/err.h>
21 #include <linux/leds.h>
22 #include <linux/scatterlist.h>
23 #include <linux/log2.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm_wakeup.h>
27 #include <linux/suspend.h>
28 #include <linux/fault-inject.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
33 #include <linux/mmc/card.h>
34 #include <linux/mmc/host.h>
35 #include <linux/mmc/mmc.h>
36 #include <linux/mmc/sd.h>
37 #include <linux/mmc/slot-gpio.h>
39 #define CREATE_TRACE_POINTS
40 #include <trace/events/mmc.h>
52 /* If the device is not responding */
53 #define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
56 * Background operations can take a long time, depending on the housekeeping
57 * operations the card has to perform.
59 #define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
61 /* The max erase timeout, used when host->max_busy_timeout isn't specified */
62 #define MMC_ERASE_TIMEOUT_MS (60 * 1000) /* 60 s */
64 static const unsigned freqs
[] = { 400000, 300000, 200000, 100000 };
67 * Enabling software CRCs on the data blocks can be a significant (30%)
68 * performance cost, and for other reasons may not always be desired.
69 * So we allow it it to be disabled.
72 module_param(use_spi_crc
, bool, 0);
74 static int mmc_schedule_delayed_work(struct delayed_work
*work
,
78 * We use the system_freezable_wq, because of two reasons.
79 * First, it allows several works (not the same work item) to be
80 * executed simultaneously. Second, the queue becomes frozen when
81 * userspace becomes frozen during system PM.
83 return queue_delayed_work(system_freezable_wq
, work
, delay
);
86 #ifdef CONFIG_FAIL_MMC_REQUEST
89 * Internal function. Inject random data errors.
90 * If mmc_data is NULL no errors are injected.
92 static void mmc_should_fail_request(struct mmc_host
*host
,
93 struct mmc_request
*mrq
)
95 struct mmc_command
*cmd
= mrq
->cmd
;
96 struct mmc_data
*data
= mrq
->data
;
97 static const int data_errors
[] = {
106 if (cmd
->error
|| data
->error
||
107 !should_fail(&host
->fail_mmc_request
, data
->blksz
* data
->blocks
))
110 data
->error
= data_errors
[prandom_u32() % ARRAY_SIZE(data_errors
)];
111 data
->bytes_xfered
= (prandom_u32() % (data
->bytes_xfered
>> 9)) << 9;
114 #else /* CONFIG_FAIL_MMC_REQUEST */
116 static inline void mmc_should_fail_request(struct mmc_host
*host
,
117 struct mmc_request
*mrq
)
121 #endif /* CONFIG_FAIL_MMC_REQUEST */
123 static inline void mmc_complete_cmd(struct mmc_request
*mrq
)
125 if (mrq
->cap_cmd_during_tfr
&& !completion_done(&mrq
->cmd_completion
))
126 complete_all(&mrq
->cmd_completion
);
129 void mmc_command_done(struct mmc_host
*host
, struct mmc_request
*mrq
)
131 if (!mrq
->cap_cmd_during_tfr
)
134 mmc_complete_cmd(mrq
);
136 pr_debug("%s: cmd done, tfr ongoing (CMD%u)\n",
137 mmc_hostname(host
), mrq
->cmd
->opcode
);
139 EXPORT_SYMBOL(mmc_command_done
);
142 * mmc_request_done - finish processing an MMC request
143 * @host: MMC host which completed request
144 * @mrq: MMC request which request
146 * MMC drivers should call this function when they have completed
147 * their processing of a request.
149 void mmc_request_done(struct mmc_host
*host
, struct mmc_request
*mrq
)
151 struct mmc_command
*cmd
= mrq
->cmd
;
152 int err
= cmd
->error
;
154 /* Flag re-tuning needed on CRC errors */
155 if ((cmd
->opcode
!= MMC_SEND_TUNING_BLOCK
&&
156 cmd
->opcode
!= MMC_SEND_TUNING_BLOCK_HS200
) &&
157 (err
== -EILSEQ
|| (mrq
->sbc
&& mrq
->sbc
->error
== -EILSEQ
) ||
158 (mrq
->data
&& mrq
->data
->error
== -EILSEQ
) ||
159 (mrq
->stop
&& mrq
->stop
->error
== -EILSEQ
)))
160 mmc_retune_needed(host
);
162 if (err
&& cmd
->retries
&& mmc_host_is_spi(host
)) {
163 if (cmd
->resp
[0] & R1_SPI_ILLEGAL_COMMAND
)
167 if (host
->ongoing_mrq
== mrq
)
168 host
->ongoing_mrq
= NULL
;
170 mmc_complete_cmd(mrq
);
172 trace_mmc_request_done(host
, mrq
);
174 if (err
&& cmd
->retries
&& !mmc_card_removed(host
->card
)) {
176 * Request starter must handle retries - see
177 * mmc_wait_for_req_done().
182 mmc_should_fail_request(host
, mrq
);
184 if (!host
->ongoing_mrq
)
185 led_trigger_event(host
->led
, LED_OFF
);
188 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
189 mmc_hostname(host
), mrq
->sbc
->opcode
,
191 mrq
->sbc
->resp
[0], mrq
->sbc
->resp
[1],
192 mrq
->sbc
->resp
[2], mrq
->sbc
->resp
[3]);
195 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
196 mmc_hostname(host
), cmd
->opcode
, err
,
197 cmd
->resp
[0], cmd
->resp
[1],
198 cmd
->resp
[2], cmd
->resp
[3]);
201 pr_debug("%s: %d bytes transferred: %d\n",
203 mrq
->data
->bytes_xfered
, mrq
->data
->error
);
207 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
208 mmc_hostname(host
), mrq
->stop
->opcode
,
210 mrq
->stop
->resp
[0], mrq
->stop
->resp
[1],
211 mrq
->stop
->resp
[2], mrq
->stop
->resp
[3]);
219 EXPORT_SYMBOL(mmc_request_done
);
221 static void __mmc_start_request(struct mmc_host
*host
, struct mmc_request
*mrq
)
225 /* Assumes host controller has been runtime resumed by mmc_claim_host */
226 err
= mmc_retune(host
);
228 mrq
->cmd
->error
= err
;
229 mmc_request_done(host
, mrq
);
234 * For sdio rw commands we must wait for card busy otherwise some
235 * sdio devices won't work properly.
237 if (mmc_is_io_op(mrq
->cmd
->opcode
) && host
->ops
->card_busy
) {
238 int tries
= 500; /* Wait aprox 500ms at maximum */
240 while (host
->ops
->card_busy(host
) && --tries
)
244 mrq
->cmd
->error
= -EBUSY
;
245 mmc_request_done(host
, mrq
);
250 if (mrq
->cap_cmd_during_tfr
) {
251 host
->ongoing_mrq
= mrq
;
253 * Retry path could come through here without having waiting on
254 * cmd_completion, so ensure it is reinitialised.
256 reinit_completion(&mrq
->cmd_completion
);
259 trace_mmc_request_start(host
, mrq
);
261 host
->ops
->request(host
, mrq
);
264 static int mmc_start_request(struct mmc_host
*host
, struct mmc_request
*mrq
)
266 #ifdef CONFIG_MMC_DEBUG
268 struct scatterlist
*sg
;
270 mmc_retune_hold(host
);
272 if (mmc_card_removed(host
->card
))
276 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
277 mmc_hostname(host
), mrq
->sbc
->opcode
,
278 mrq
->sbc
->arg
, mrq
->sbc
->flags
);
281 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
282 mmc_hostname(host
), mrq
->cmd
->opcode
,
283 mrq
->cmd
->arg
, mrq
->cmd
->flags
);
286 pr_debug("%s: blksz %d blocks %d flags %08x "
287 "tsac %d ms nsac %d\n",
288 mmc_hostname(host
), mrq
->data
->blksz
,
289 mrq
->data
->blocks
, mrq
->data
->flags
,
290 mrq
->data
->timeout_ns
/ 1000000,
291 mrq
->data
->timeout_clks
);
295 pr_debug("%s: CMD%u arg %08x flags %08x\n",
296 mmc_hostname(host
), mrq
->stop
->opcode
,
297 mrq
->stop
->arg
, mrq
->stop
->flags
);
300 WARN_ON(!host
->claimed
);
309 BUG_ON(mrq
->data
->blksz
> host
->max_blk_size
);
310 BUG_ON(mrq
->data
->blocks
> host
->max_blk_count
);
311 BUG_ON(mrq
->data
->blocks
* mrq
->data
->blksz
>
314 #ifdef CONFIG_MMC_DEBUG
316 for_each_sg(mrq
->data
->sg
, sg
, mrq
->data
->sg_len
, i
)
318 BUG_ON(sz
!= mrq
->data
->blocks
* mrq
->data
->blksz
);
321 mrq
->cmd
->data
= mrq
->data
;
322 mrq
->data
->error
= 0;
323 mrq
->data
->mrq
= mrq
;
325 mrq
->data
->stop
= mrq
->stop
;
326 mrq
->stop
->error
= 0;
327 mrq
->stop
->mrq
= mrq
;
330 led_trigger_event(host
->led
, LED_FULL
);
331 __mmc_start_request(host
, mrq
);
337 * mmc_start_bkops - start BKOPS for supported cards
338 * @card: MMC card to start BKOPS
339 * @form_exception: A flag to indicate if this function was
340 * called due to an exception raised by the card
342 * Start background operations whenever requested.
343 * When the urgent BKOPS bit is set in a R1 command response
344 * then background operations should be started immediately.
346 void mmc_start_bkops(struct mmc_card
*card
, bool from_exception
)
350 bool use_busy_signal
;
354 if (!card
->ext_csd
.man_bkops_en
|| mmc_card_doing_bkops(card
))
357 err
= mmc_read_bkops_status(card
);
359 pr_err("%s: Failed to read bkops status: %d\n",
360 mmc_hostname(card
->host
), err
);
364 if (!card
->ext_csd
.raw_bkops_status
)
367 if (card
->ext_csd
.raw_bkops_status
< EXT_CSD_BKOPS_LEVEL_2
&&
371 mmc_claim_host(card
->host
);
372 if (card
->ext_csd
.raw_bkops_status
>= EXT_CSD_BKOPS_LEVEL_2
) {
373 timeout
= MMC_BKOPS_MAX_TIMEOUT
;
374 use_busy_signal
= true;
377 use_busy_signal
= false;
380 mmc_retune_hold(card
->host
);
382 err
= __mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
383 EXT_CSD_BKOPS_START
, 1, timeout
,
384 use_busy_signal
, true, false);
386 pr_warn("%s: Error %d starting bkops\n",
387 mmc_hostname(card
->host
), err
);
388 mmc_retune_release(card
->host
);
393 * For urgent bkops status (LEVEL_2 and more)
394 * bkops executed synchronously, otherwise
395 * the operation is in progress
397 if (!use_busy_signal
)
398 mmc_card_set_doing_bkops(card
);
400 mmc_retune_release(card
->host
);
402 mmc_release_host(card
->host
);
404 EXPORT_SYMBOL(mmc_start_bkops
);
407 * mmc_wait_data_done() - done callback for data request
408 * @mrq: done data request
410 * Wakes up mmc context, passed as a callback to host controller driver
412 static void mmc_wait_data_done(struct mmc_request
*mrq
)
414 struct mmc_context_info
*context_info
= &mrq
->host
->context_info
;
416 context_info
->is_done_rcv
= true;
417 wake_up_interruptible(&context_info
->wait
);
420 static void mmc_wait_done(struct mmc_request
*mrq
)
422 complete(&mrq
->completion
);
425 static inline void mmc_wait_ongoing_tfr_cmd(struct mmc_host
*host
)
427 struct mmc_request
*ongoing_mrq
= READ_ONCE(host
->ongoing_mrq
);
430 * If there is an ongoing transfer, wait for the command line to become
433 if (ongoing_mrq
&& !completion_done(&ongoing_mrq
->cmd_completion
))
434 wait_for_completion(&ongoing_mrq
->cmd_completion
);
438 *__mmc_start_data_req() - starts data request
439 * @host: MMC host to start the request
440 * @mrq: data request to start
442 * Sets the done callback to be called when request is completed by the card.
443 * Starts data mmc request execution
444 * If an ongoing transfer is already in progress, wait for the command line
445 * to become available before sending another command.
447 static int __mmc_start_data_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
451 mmc_wait_ongoing_tfr_cmd(host
);
453 mrq
->done
= mmc_wait_data_done
;
456 init_completion(&mrq
->cmd_completion
);
458 err
= mmc_start_request(host
, mrq
);
460 mrq
->cmd
->error
= err
;
461 mmc_complete_cmd(mrq
);
462 mmc_wait_data_done(mrq
);
468 static int __mmc_start_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
472 mmc_wait_ongoing_tfr_cmd(host
);
474 init_completion(&mrq
->completion
);
475 mrq
->done
= mmc_wait_done
;
477 init_completion(&mrq
->cmd_completion
);
479 err
= mmc_start_request(host
, mrq
);
481 mrq
->cmd
->error
= err
;
482 mmc_complete_cmd(mrq
);
483 complete(&mrq
->completion
);
490 * mmc_wait_for_data_req_done() - wait for request completed
491 * @host: MMC host to prepare the command.
492 * @mrq: MMC request to wait for
494 * Blocks MMC context till host controller will ack end of data request
495 * execution or new request notification arrives from the block layer.
496 * Handles command retries.
498 * Returns enum mmc_blk_status after checking errors.
500 static int mmc_wait_for_data_req_done(struct mmc_host
*host
,
501 struct mmc_request
*mrq
,
502 struct mmc_async_req
*next_req
)
504 struct mmc_command
*cmd
;
505 struct mmc_context_info
*context_info
= &host
->context_info
;
510 wait_event_interruptible(context_info
->wait
,
511 (context_info
->is_done_rcv
||
512 context_info
->is_new_req
));
513 spin_lock_irqsave(&context_info
->lock
, flags
);
514 context_info
->is_waiting_last_req
= false;
515 spin_unlock_irqrestore(&context_info
->lock
, flags
);
516 if (context_info
->is_done_rcv
) {
517 context_info
->is_done_rcv
= false;
518 context_info
->is_new_req
= false;
521 if (!cmd
->error
|| !cmd
->retries
||
522 mmc_card_removed(host
->card
)) {
523 err
= host
->areq
->err_check(host
->card
,
525 break; /* return err */
527 mmc_retune_recheck(host
);
528 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
530 cmd
->opcode
, cmd
->error
);
533 __mmc_start_request(host
, mrq
);
534 continue; /* wait for done/new event again */
536 } else if (context_info
->is_new_req
) {
537 context_info
->is_new_req
= false;
539 return MMC_BLK_NEW_REQUEST
;
542 mmc_retune_release(host
);
546 void mmc_wait_for_req_done(struct mmc_host
*host
, struct mmc_request
*mrq
)
548 struct mmc_command
*cmd
;
551 wait_for_completion(&mrq
->completion
);
556 * If host has timed out waiting for the sanitize
557 * to complete, card might be still in programming state
558 * so let's try to bring the card out of programming
561 if (cmd
->sanitize_busy
&& cmd
->error
== -ETIMEDOUT
) {
562 if (!mmc_interrupt_hpi(host
->card
)) {
563 pr_warn("%s: %s: Interrupted sanitize\n",
564 mmc_hostname(host
), __func__
);
568 pr_err("%s: %s: Failed to interrupt sanitize\n",
569 mmc_hostname(host
), __func__
);
572 if (!cmd
->error
|| !cmd
->retries
||
573 mmc_card_removed(host
->card
))
576 mmc_retune_recheck(host
);
578 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
579 mmc_hostname(host
), cmd
->opcode
, cmd
->error
);
582 __mmc_start_request(host
, mrq
);
585 mmc_retune_release(host
);
587 EXPORT_SYMBOL(mmc_wait_for_req_done
);
590 * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done
594 * mmc_is_req_done() is used with requests that have
595 * mrq->cap_cmd_during_tfr = true. mmc_is_req_done() must be called after
596 * starting a request and before waiting for it to complete. That is,
597 * either in between calls to mmc_start_req(), or after mmc_wait_for_req()
598 * and before mmc_wait_for_req_done(). If it is called at other times the
599 * result is not meaningful.
601 bool mmc_is_req_done(struct mmc_host
*host
, struct mmc_request
*mrq
)
604 return host
->context_info
.is_done_rcv
;
606 return completion_done(&mrq
->completion
);
608 EXPORT_SYMBOL(mmc_is_req_done
);
611 * mmc_pre_req - Prepare for a new request
612 * @host: MMC host to prepare command
613 * @mrq: MMC request to prepare for
614 * @is_first_req: true if there is no previous started request
615 * that may run in parellel to this call, otherwise false
617 * mmc_pre_req() is called in prior to mmc_start_req() to let
618 * host prepare for the new request. Preparation of a request may be
619 * performed while another request is running on the host.
621 static void mmc_pre_req(struct mmc_host
*host
, struct mmc_request
*mrq
,
624 if (host
->ops
->pre_req
)
625 host
->ops
->pre_req(host
, mrq
, is_first_req
);
629 * mmc_post_req - Post process a completed request
630 * @host: MMC host to post process command
631 * @mrq: MMC request to post process for
632 * @err: Error, if non zero, clean up any resources made in pre_req
634 * Let the host post process a completed request. Post processing of
635 * a request may be performed while another reuqest is running.
637 static void mmc_post_req(struct mmc_host
*host
, struct mmc_request
*mrq
,
640 if (host
->ops
->post_req
)
641 host
->ops
->post_req(host
, mrq
, err
);
645 * mmc_start_req - start a non-blocking request
646 * @host: MMC host to start command
647 * @areq: async request to start
648 * @error: out parameter returns 0 for success, otherwise non zero
650 * Start a new MMC custom command request for a host.
651 * If there is on ongoing async request wait for completion
652 * of that request and start the new one and return.
653 * Does not wait for the new request to complete.
655 * Returns the completed request, NULL in case of none completed.
656 * Wait for the an ongoing request (previoulsy started) to complete and
657 * return the completed request. If there is no ongoing request, NULL
658 * is returned without waiting. NULL is not an error condition.
660 struct mmc_async_req
*mmc_start_req(struct mmc_host
*host
,
661 struct mmc_async_req
*areq
, int *error
)
665 struct mmc_async_req
*data
= host
->areq
;
667 /* Prepare a new request */
669 mmc_pre_req(host
, areq
->mrq
, !host
->areq
);
672 err
= mmc_wait_for_data_req_done(host
, host
->areq
->mrq
, areq
);
673 if (err
== MMC_BLK_NEW_REQUEST
) {
677 * The previous request was not completed,
683 * Check BKOPS urgency for each R1 response
685 if (host
->card
&& mmc_card_mmc(host
->card
) &&
686 ((mmc_resp_type(host
->areq
->mrq
->cmd
) == MMC_RSP_R1
) ||
687 (mmc_resp_type(host
->areq
->mrq
->cmd
) == MMC_RSP_R1B
)) &&
688 (host
->areq
->mrq
->cmd
->resp
[0] & R1_EXCEPTION_EVENT
)) {
690 /* Cancel the prepared request */
692 mmc_post_req(host
, areq
->mrq
, -EINVAL
);
694 mmc_start_bkops(host
->card
, true);
696 /* prepare the request again */
698 mmc_pre_req(host
, areq
->mrq
, !host
->areq
);
703 start_err
= __mmc_start_data_req(host
, areq
->mrq
);
706 mmc_post_req(host
, host
->areq
->mrq
, 0);
708 /* Cancel a prepared request if it was not started. */
709 if ((err
|| start_err
) && areq
)
710 mmc_post_req(host
, areq
->mrq
, -EINVAL
);
721 EXPORT_SYMBOL(mmc_start_req
);
724 * mmc_wait_for_req - start a request and wait for completion
725 * @host: MMC host to start command
726 * @mrq: MMC request to start
728 * Start a new MMC custom command request for a host, and wait
729 * for the command to complete. In the case of 'cap_cmd_during_tfr'
730 * requests, the transfer is ongoing and the caller can issue further
731 * commands that do not use the data lines, and then wait by calling
732 * mmc_wait_for_req_done().
733 * Does not attempt to parse the response.
735 void mmc_wait_for_req(struct mmc_host
*host
, struct mmc_request
*mrq
)
737 __mmc_start_req(host
, mrq
);
739 if (!mrq
->cap_cmd_during_tfr
)
740 mmc_wait_for_req_done(host
, mrq
);
742 EXPORT_SYMBOL(mmc_wait_for_req
);
745 * mmc_interrupt_hpi - Issue for High priority Interrupt
746 * @card: the MMC card associated with the HPI transfer
748 * Issued High Priority Interrupt, and check for card status
749 * until out-of prg-state.
751 int mmc_interrupt_hpi(struct mmc_card
*card
)
755 unsigned long prg_wait
;
759 if (!card
->ext_csd
.hpi_en
) {
760 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card
->host
));
764 mmc_claim_host(card
->host
);
765 err
= mmc_send_status(card
, &status
);
767 pr_err("%s: Get card status fail\n", mmc_hostname(card
->host
));
771 switch (R1_CURRENT_STATE(status
)) {
777 * In idle and transfer states, HPI is not needed and the caller
778 * can issue the next intended command immediately
784 /* In all other states, it's illegal to issue HPI */
785 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
786 mmc_hostname(card
->host
), R1_CURRENT_STATE(status
));
791 err
= mmc_send_hpi_cmd(card
, &status
);
795 prg_wait
= jiffies
+ msecs_to_jiffies(card
->ext_csd
.out_of_int_time
);
797 err
= mmc_send_status(card
, &status
);
799 if (!err
&& R1_CURRENT_STATE(status
) == R1_STATE_TRAN
)
801 if (time_after(jiffies
, prg_wait
))
806 mmc_release_host(card
->host
);
809 EXPORT_SYMBOL(mmc_interrupt_hpi
);
812 * mmc_wait_for_cmd - start a command and wait for completion
813 * @host: MMC host to start command
814 * @cmd: MMC command to start
815 * @retries: maximum number of retries
817 * Start a new MMC command for a host, and wait for the command
818 * to complete. Return any error that occurred while the command
819 * was executing. Do not attempt to parse the response.
821 int mmc_wait_for_cmd(struct mmc_host
*host
, struct mmc_command
*cmd
, int retries
)
823 struct mmc_request mrq
= {NULL
};
825 WARN_ON(!host
->claimed
);
827 memset(cmd
->resp
, 0, sizeof(cmd
->resp
));
828 cmd
->retries
= retries
;
833 mmc_wait_for_req(host
, &mrq
);
838 EXPORT_SYMBOL(mmc_wait_for_cmd
);
841 * mmc_stop_bkops - stop ongoing BKOPS
842 * @card: MMC card to check BKOPS
844 * Send HPI command to stop ongoing background operations to
845 * allow rapid servicing of foreground operations, e.g. read/
846 * writes. Wait until the card comes out of the programming state
847 * to avoid errors in servicing read/write requests.
849 int mmc_stop_bkops(struct mmc_card
*card
)
854 err
= mmc_interrupt_hpi(card
);
857 * If err is EINVAL, we can't issue an HPI.
858 * It should complete the BKOPS.
860 if (!err
|| (err
== -EINVAL
)) {
861 mmc_card_clr_doing_bkops(card
);
862 mmc_retune_release(card
->host
);
868 EXPORT_SYMBOL(mmc_stop_bkops
);
870 int mmc_read_bkops_status(struct mmc_card
*card
)
875 mmc_claim_host(card
->host
);
876 err
= mmc_get_ext_csd(card
, &ext_csd
);
877 mmc_release_host(card
->host
);
881 card
->ext_csd
.raw_bkops_status
= ext_csd
[EXT_CSD_BKOPS_STATUS
];
882 card
->ext_csd
.raw_exception_status
= ext_csd
[EXT_CSD_EXP_EVENTS_STATUS
];
886 EXPORT_SYMBOL(mmc_read_bkops_status
);
889 * mmc_set_data_timeout - set the timeout for a data command
890 * @data: data phase for command
891 * @card: the MMC card associated with the data transfer
893 * Computes the data timeout parameters according to the
894 * correct algorithm given the card type.
896 void mmc_set_data_timeout(struct mmc_data
*data
, const struct mmc_card
*card
)
901 * SDIO cards only define an upper 1 s limit on access.
903 if (mmc_card_sdio(card
)) {
904 data
->timeout_ns
= 1000000000;
905 data
->timeout_clks
= 0;
910 * SD cards use a 100 multiplier rather than 10
912 mult
= mmc_card_sd(card
) ? 100 : 10;
915 * Scale up the multiplier (and therefore the timeout) by
916 * the r2w factor for writes.
918 if (data
->flags
& MMC_DATA_WRITE
)
919 mult
<<= card
->csd
.r2w_factor
;
921 data
->timeout_ns
= card
->csd
.tacc_ns
* mult
;
922 data
->timeout_clks
= card
->csd
.tacc_clks
* mult
;
925 * SD cards also have an upper limit on the timeout.
927 if (mmc_card_sd(card
)) {
928 unsigned int timeout_us
, limit_us
;
930 timeout_us
= data
->timeout_ns
/ 1000;
931 if (card
->host
->ios
.clock
)
932 timeout_us
+= data
->timeout_clks
* 1000 /
933 (card
->host
->ios
.clock
/ 1000);
935 if (data
->flags
& MMC_DATA_WRITE
)
937 * The MMC spec "It is strongly recommended
938 * for hosts to implement more than 500ms
939 * timeout value even if the card indicates
940 * the 250ms maximum busy length." Even the
941 * previous value of 300ms is known to be
942 * insufficient for some cards.
949 * SDHC cards always use these fixed values.
951 if (timeout_us
> limit_us
|| mmc_card_blockaddr(card
)) {
952 data
->timeout_ns
= limit_us
* 1000;
953 data
->timeout_clks
= 0;
956 /* assign limit value if invalid */
958 data
->timeout_ns
= limit_us
* 1000;
962 * Some cards require longer data read timeout than indicated in CSD.
963 * Address this by setting the read timeout to a "reasonably high"
964 * value. For the cards tested, 600ms has proven enough. If necessary,
965 * this value can be increased if other problematic cards require this.
967 if (mmc_card_long_read_time(card
) && data
->flags
& MMC_DATA_READ
) {
968 data
->timeout_ns
= 600000000;
969 data
->timeout_clks
= 0;
973 * Some cards need very high timeouts if driven in SPI mode.
974 * The worst observed timeout was 900ms after writing a
975 * continuous stream of data until the internal logic
978 if (mmc_host_is_spi(card
->host
)) {
979 if (data
->flags
& MMC_DATA_WRITE
) {
980 if (data
->timeout_ns
< 1000000000)
981 data
->timeout_ns
= 1000000000; /* 1s */
983 if (data
->timeout_ns
< 100000000)
984 data
->timeout_ns
= 100000000; /* 100ms */
988 EXPORT_SYMBOL(mmc_set_data_timeout
);
991 * mmc_align_data_size - pads a transfer size to a more optimal value
992 * @card: the MMC card associated with the data transfer
993 * @sz: original transfer size
995 * Pads the original data size with a number of extra bytes in
996 * order to avoid controller bugs and/or performance hits
997 * (e.g. some controllers revert to PIO for certain sizes).
999 * Returns the improved size, which might be unmodified.
1001 * Note that this function is only relevant when issuing a
1002 * single scatter gather entry.
1004 unsigned int mmc_align_data_size(struct mmc_card
*card
, unsigned int sz
)
1007 * FIXME: We don't have a system for the controller to tell
1008 * the core about its problems yet, so for now we just 32-bit
1011 sz
= ((sz
+ 3) / 4) * 4;
1015 EXPORT_SYMBOL(mmc_align_data_size
);
1018 * __mmc_claim_host - exclusively claim a host
1019 * @host: mmc host to claim
1020 * @abort: whether or not the operation should be aborted
1022 * Claim a host for a set of operations. If @abort is non null and
1023 * dereference a non-zero value then this will return prematurely with
1024 * that non-zero value without acquiring the lock. Returns zero
1025 * with the lock held otherwise.
1027 int __mmc_claim_host(struct mmc_host
*host
, atomic_t
*abort
)
1029 DECLARE_WAITQUEUE(wait
, current
);
1030 unsigned long flags
;
1036 add_wait_queue(&host
->wq
, &wait
);
1037 spin_lock_irqsave(&host
->lock
, flags
);
1039 set_current_state(TASK_UNINTERRUPTIBLE
);
1040 stop
= abort
? atomic_read(abort
) : 0;
1041 if (stop
|| !host
->claimed
|| host
->claimer
== current
)
1043 spin_unlock_irqrestore(&host
->lock
, flags
);
1045 spin_lock_irqsave(&host
->lock
, flags
);
1047 set_current_state(TASK_RUNNING
);
1050 host
->claimer
= current
;
1051 host
->claim_cnt
+= 1;
1052 if (host
->claim_cnt
== 1)
1056 spin_unlock_irqrestore(&host
->lock
, flags
);
1057 remove_wait_queue(&host
->wq
, &wait
);
1060 pm_runtime_get_sync(mmc_dev(host
));
1064 EXPORT_SYMBOL(__mmc_claim_host
);
1067 * mmc_release_host - release a host
1068 * @host: mmc host to release
1070 * Release a MMC host, allowing others to claim the host
1071 * for their operations.
1073 void mmc_release_host(struct mmc_host
*host
)
1075 unsigned long flags
;
1077 WARN_ON(!host
->claimed
);
1079 spin_lock_irqsave(&host
->lock
, flags
);
1080 if (--host
->claim_cnt
) {
1081 /* Release for nested claim */
1082 spin_unlock_irqrestore(&host
->lock
, flags
);
1085 host
->claimer
= NULL
;
1086 spin_unlock_irqrestore(&host
->lock
, flags
);
1088 pm_runtime_mark_last_busy(mmc_dev(host
));
1089 pm_runtime_put_autosuspend(mmc_dev(host
));
1092 EXPORT_SYMBOL(mmc_release_host
);
1095 * This is a helper function, which fetches a runtime pm reference for the
1096 * card device and also claims the host.
1098 void mmc_get_card(struct mmc_card
*card
)
1100 pm_runtime_get_sync(&card
->dev
);
1101 mmc_claim_host(card
->host
);
1103 EXPORT_SYMBOL(mmc_get_card
);
1106 * This is a helper function, which releases the host and drops the runtime
1107 * pm reference for the card device.
1109 void mmc_put_card(struct mmc_card
*card
)
1111 mmc_release_host(card
->host
);
1112 pm_runtime_mark_last_busy(&card
->dev
);
1113 pm_runtime_put_autosuspend(&card
->dev
);
1115 EXPORT_SYMBOL(mmc_put_card
);
1118 * Internal function that does the actual ios call to the host driver,
1119 * optionally printing some debug output.
1121 static inline void mmc_set_ios(struct mmc_host
*host
)
1123 struct mmc_ios
*ios
= &host
->ios
;
1125 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1126 "width %u timing %u\n",
1127 mmc_hostname(host
), ios
->clock
, ios
->bus_mode
,
1128 ios
->power_mode
, ios
->chip_select
, ios
->vdd
,
1129 1 << ios
->bus_width
, ios
->timing
);
1131 host
->ops
->set_ios(host
, ios
);
1135 * Control chip select pin on a host.
1137 void mmc_set_chip_select(struct mmc_host
*host
, int mode
)
1139 host
->ios
.chip_select
= mode
;
1144 * Sets the host clock to the highest possible frequency that
1147 void mmc_set_clock(struct mmc_host
*host
, unsigned int hz
)
1149 WARN_ON(hz
&& hz
< host
->f_min
);
1151 if (hz
> host
->f_max
)
1154 host
->ios
.clock
= hz
;
1158 int mmc_execute_tuning(struct mmc_card
*card
)
1160 struct mmc_host
*host
= card
->host
;
1164 if (!host
->ops
->execute_tuning
)
1167 if (mmc_card_mmc(card
))
1168 opcode
= MMC_SEND_TUNING_BLOCK_HS200
;
1170 opcode
= MMC_SEND_TUNING_BLOCK
;
1172 err
= host
->ops
->execute_tuning(host
, opcode
);
1175 pr_err("%s: tuning execution failed: %d\n",
1176 mmc_hostname(host
), err
);
1178 mmc_retune_enable(host
);
1184 * Change the bus mode (open drain/push-pull) of a host.
1186 void mmc_set_bus_mode(struct mmc_host
*host
, unsigned int mode
)
1188 host
->ios
.bus_mode
= mode
;
1193 * Change data bus width of a host.
1195 void mmc_set_bus_width(struct mmc_host
*host
, unsigned int width
)
1197 host
->ios
.bus_width
= width
;
1202 * Set initial state after a power cycle or a hw_reset.
1204 void mmc_set_initial_state(struct mmc_host
*host
)
1206 mmc_retune_disable(host
);
1208 if (mmc_host_is_spi(host
))
1209 host
->ios
.chip_select
= MMC_CS_HIGH
;
1211 host
->ios
.chip_select
= MMC_CS_DONTCARE
;
1212 host
->ios
.bus_mode
= MMC_BUSMODE_PUSHPULL
;
1213 host
->ios
.bus_width
= MMC_BUS_WIDTH_1
;
1214 host
->ios
.timing
= MMC_TIMING_LEGACY
;
1215 host
->ios
.drv_type
= 0;
1216 host
->ios
.enhanced_strobe
= false;
1219 * Make sure we are in non-enhanced strobe mode before we
1220 * actually enable it in ext_csd.
1222 if ((host
->caps2
& MMC_CAP2_HS400_ES
) &&
1223 host
->ops
->hs400_enhanced_strobe
)
1224 host
->ops
->hs400_enhanced_strobe(host
, &host
->ios
);
1230 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1231 * @vdd: voltage (mV)
1232 * @low_bits: prefer low bits in boundary cases
1234 * This function returns the OCR bit number according to the provided @vdd
1235 * value. If conversion is not possible a negative errno value returned.
1237 * Depending on the @low_bits flag the function prefers low or high OCR bits
1238 * on boundary voltages. For example,
1239 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1240 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1242 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1244 static int mmc_vdd_to_ocrbitnum(int vdd
, bool low_bits
)
1246 const int max_bit
= ilog2(MMC_VDD_35_36
);
1249 if (vdd
< 1650 || vdd
> 3600)
1252 if (vdd
>= 1650 && vdd
<= 1950)
1253 return ilog2(MMC_VDD_165_195
);
1258 /* Base 2000 mV, step 100 mV, bit's base 8. */
1259 bit
= (vdd
- 2000) / 100 + 8;
1266 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1267 * @vdd_min: minimum voltage value (mV)
1268 * @vdd_max: maximum voltage value (mV)
1270 * This function returns the OCR mask bits according to the provided @vdd_min
1271 * and @vdd_max values. If conversion is not possible the function returns 0.
1273 * Notes wrt boundary cases:
1274 * This function sets the OCR bits for all boundary voltages, for example
1275 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1276 * MMC_VDD_34_35 mask.
1278 u32
mmc_vddrange_to_ocrmask(int vdd_min
, int vdd_max
)
1282 if (vdd_max
< vdd_min
)
1285 /* Prefer high bits for the boundary vdd_max values. */
1286 vdd_max
= mmc_vdd_to_ocrbitnum(vdd_max
, false);
1290 /* Prefer low bits for the boundary vdd_min values. */
1291 vdd_min
= mmc_vdd_to_ocrbitnum(vdd_min
, true);
1295 /* Fill the mask, from max bit to min bit. */
1296 while (vdd_max
>= vdd_min
)
1297 mask
|= 1 << vdd_max
--;
1301 EXPORT_SYMBOL(mmc_vddrange_to_ocrmask
);
1306 * mmc_of_parse_voltage - return mask of supported voltages
1307 * @np: The device node need to be parsed.
1308 * @mask: mask of voltages available for MMC/SD/SDIO
1310 * Parse the "voltage-ranges" DT property, returning zero if it is not
1311 * found, negative errno if the voltage-range specification is invalid,
1312 * or one if the voltage-range is specified and successfully parsed.
1314 int mmc_of_parse_voltage(struct device_node
*np
, u32
*mask
)
1316 const u32
*voltage_ranges
;
1319 voltage_ranges
= of_get_property(np
, "voltage-ranges", &num_ranges
);
1320 num_ranges
= num_ranges
/ sizeof(*voltage_ranges
) / 2;
1321 if (!voltage_ranges
) {
1322 pr_debug("%s: voltage-ranges unspecified\n", np
->full_name
);
1326 pr_err("%s: voltage-ranges empty\n", np
->full_name
);
1330 for (i
= 0; i
< num_ranges
; i
++) {
1331 const int j
= i
* 2;
1334 ocr_mask
= mmc_vddrange_to_ocrmask(
1335 be32_to_cpu(voltage_ranges
[j
]),
1336 be32_to_cpu(voltage_ranges
[j
+ 1]));
1338 pr_err("%s: voltage-range #%d is invalid\n",
1347 EXPORT_SYMBOL(mmc_of_parse_voltage
);
1349 #endif /* CONFIG_OF */
1351 static int mmc_of_get_func_num(struct device_node
*node
)
1356 ret
= of_property_read_u32(node
, "reg", ®
);
1363 struct device_node
*mmc_of_find_child_device(struct mmc_host
*host
,
1366 struct device_node
*node
;
1368 if (!host
->parent
|| !host
->parent
->of_node
)
1371 for_each_child_of_node(host
->parent
->of_node
, node
) {
1372 if (mmc_of_get_func_num(node
) == func_num
)
1379 #ifdef CONFIG_REGULATOR
1382 * mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
1383 * @vdd_bit: OCR bit number
1384 * @min_uV: minimum voltage value (mV)
1385 * @max_uV: maximum voltage value (mV)
1387 * This function returns the voltage range according to the provided OCR
1388 * bit number. If conversion is not possible a negative errno value returned.
1390 static int mmc_ocrbitnum_to_vdd(int vdd_bit
, int *min_uV
, int *max_uV
)
1398 * REVISIT mmc_vddrange_to_ocrmask() may have set some
1399 * bits this regulator doesn't quite support ... don't
1400 * be too picky, most cards and regulators are OK with
1401 * a 0.1V range goof (it's a small error percentage).
1403 tmp
= vdd_bit
- ilog2(MMC_VDD_165_195
);
1405 *min_uV
= 1650 * 1000;
1406 *max_uV
= 1950 * 1000;
1408 *min_uV
= 1900 * 1000 + tmp
* 100 * 1000;
1409 *max_uV
= *min_uV
+ 100 * 1000;
1416 * mmc_regulator_get_ocrmask - return mask of supported voltages
1417 * @supply: regulator to use
1419 * This returns either a negative errno, or a mask of voltages that
1420 * can be provided to MMC/SD/SDIO devices using the specified voltage
1421 * regulator. This would normally be called before registering the
1424 int mmc_regulator_get_ocrmask(struct regulator
*supply
)
1432 count
= regulator_count_voltages(supply
);
1436 for (i
= 0; i
< count
; i
++) {
1437 vdd_uV
= regulator_list_voltage(supply
, i
);
1441 vdd_mV
= vdd_uV
/ 1000;
1442 result
|= mmc_vddrange_to_ocrmask(vdd_mV
, vdd_mV
);
1446 vdd_uV
= regulator_get_voltage(supply
);
1450 vdd_mV
= vdd_uV
/ 1000;
1451 result
= mmc_vddrange_to_ocrmask(vdd_mV
, vdd_mV
);
1456 EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask
);
1459 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
1460 * @mmc: the host to regulate
1461 * @supply: regulator to use
1462 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
1464 * Returns zero on success, else negative errno.
1466 * MMC host drivers may use this to enable or disable a regulator using
1467 * a particular supply voltage. This would normally be called from the
1470 int mmc_regulator_set_ocr(struct mmc_host
*mmc
,
1471 struct regulator
*supply
,
1472 unsigned short vdd_bit
)
1478 mmc_ocrbitnum_to_vdd(vdd_bit
, &min_uV
, &max_uV
);
1480 result
= regulator_set_voltage(supply
, min_uV
, max_uV
);
1481 if (result
== 0 && !mmc
->regulator_enabled
) {
1482 result
= regulator_enable(supply
);
1484 mmc
->regulator_enabled
= true;
1486 } else if (mmc
->regulator_enabled
) {
1487 result
= regulator_disable(supply
);
1489 mmc
->regulator_enabled
= false;
1493 dev_err(mmc_dev(mmc
),
1494 "could not set regulator OCR (%d)\n", result
);
1497 EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr
);
1499 static int mmc_regulator_set_voltage_if_supported(struct regulator
*regulator
,
1500 int min_uV
, int target_uV
,
1504 * Check if supported first to avoid errors since we may try several
1505 * signal levels during power up and don't want to show errors.
1507 if (!regulator_is_supported_voltage(regulator
, min_uV
, max_uV
))
1510 return regulator_set_voltage_triplet(regulator
, min_uV
, target_uV
,
1515 * mmc_regulator_set_vqmmc - Set VQMMC as per the ios
1517 * For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
1518 * That will match the behavior of old boards where VQMMC and VMMC were supplied
1519 * by the same supply. The Bus Operating conditions for 3.3V signaling in the
1520 * SD card spec also define VQMMC in terms of VMMC.
1521 * If this is not possible we'll try the full 2.7-3.6V of the spec.
1523 * For 1.2V and 1.8V signaling we'll try to get as close as possible to the
1524 * requested voltage. This is definitely a good idea for UHS where there's a
1525 * separate regulator on the card that's trying to make 1.8V and it's best if
1528 * This function is expected to be used by a controller's
1529 * start_signal_voltage_switch() function.
1531 int mmc_regulator_set_vqmmc(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
1533 struct device
*dev
= mmc_dev(mmc
);
1534 int ret
, volt
, min_uV
, max_uV
;
1536 /* If no vqmmc supply then we can't change the voltage */
1537 if (IS_ERR(mmc
->supply
.vqmmc
))
1540 switch (ios
->signal_voltage
) {
1541 case MMC_SIGNAL_VOLTAGE_120
:
1542 return mmc_regulator_set_voltage_if_supported(mmc
->supply
.vqmmc
,
1543 1100000, 1200000, 1300000);
1544 case MMC_SIGNAL_VOLTAGE_180
:
1545 return mmc_regulator_set_voltage_if_supported(mmc
->supply
.vqmmc
,
1546 1700000, 1800000, 1950000);
1547 case MMC_SIGNAL_VOLTAGE_330
:
1548 ret
= mmc_ocrbitnum_to_vdd(mmc
->ios
.vdd
, &volt
, &max_uV
);
1552 dev_dbg(dev
, "%s: found vmmc voltage range of %d-%duV\n",
1553 __func__
, volt
, max_uV
);
1555 min_uV
= max(volt
- 300000, 2700000);
1556 max_uV
= min(max_uV
+ 200000, 3600000);
1559 * Due to a limitation in the current implementation of
1560 * regulator_set_voltage_triplet() which is taking the lowest
1561 * voltage possible if below the target, search for a suitable
1562 * voltage in two steps and try to stay close to vmmc
1563 * with a 0.3V tolerance at first.
1565 if (!mmc_regulator_set_voltage_if_supported(mmc
->supply
.vqmmc
,
1566 min_uV
, volt
, max_uV
))
1569 return mmc_regulator_set_voltage_if_supported(mmc
->supply
.vqmmc
,
1570 2700000, volt
, 3600000);
1575 EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc
);
1577 #endif /* CONFIG_REGULATOR */
1579 int mmc_regulator_get_supply(struct mmc_host
*mmc
)
1581 struct device
*dev
= mmc_dev(mmc
);
1584 mmc
->supply
.vmmc
= devm_regulator_get_optional(dev
, "vmmc");
1585 mmc
->supply
.vqmmc
= devm_regulator_get_optional(dev
, "vqmmc");
1587 if (IS_ERR(mmc
->supply
.vmmc
)) {
1588 if (PTR_ERR(mmc
->supply
.vmmc
) == -EPROBE_DEFER
)
1589 return -EPROBE_DEFER
;
1590 dev_dbg(dev
, "No vmmc regulator found\n");
1592 ret
= mmc_regulator_get_ocrmask(mmc
->supply
.vmmc
);
1594 mmc
->ocr_avail
= ret
;
1596 dev_warn(dev
, "Failed getting OCR mask: %d\n", ret
);
1599 if (IS_ERR(mmc
->supply
.vqmmc
)) {
1600 if (PTR_ERR(mmc
->supply
.vqmmc
) == -EPROBE_DEFER
)
1601 return -EPROBE_DEFER
;
1602 dev_dbg(dev
, "No vqmmc regulator found\n");
1607 EXPORT_SYMBOL_GPL(mmc_regulator_get_supply
);
1610 * Mask off any voltages we don't support and select
1611 * the lowest voltage
1613 u32
mmc_select_voltage(struct mmc_host
*host
, u32 ocr
)
1618 * Sanity check the voltages that the card claims to
1622 dev_warn(mmc_dev(host
),
1623 "card claims to support voltages below defined range\n");
1627 ocr
&= host
->ocr_avail
;
1629 dev_warn(mmc_dev(host
), "no support for card's volts\n");
1633 if (host
->caps2
& MMC_CAP2_FULL_PWR_CYCLE
) {
1636 mmc_power_cycle(host
, ocr
);
1640 if (bit
!= host
->ios
.vdd
)
1641 dev_warn(mmc_dev(host
), "exceeding card's volts\n");
1647 int __mmc_set_signal_voltage(struct mmc_host
*host
, int signal_voltage
)
1650 int old_signal_voltage
= host
->ios
.signal_voltage
;
1652 host
->ios
.signal_voltage
= signal_voltage
;
1653 if (host
->ops
->start_signal_voltage_switch
)
1654 err
= host
->ops
->start_signal_voltage_switch(host
, &host
->ios
);
1657 host
->ios
.signal_voltage
= old_signal_voltage
;
1663 int mmc_set_signal_voltage(struct mmc_host
*host
, int signal_voltage
, u32 ocr
)
1665 struct mmc_command cmd
= {0};
1672 * Send CMD11 only if the request is to switch the card to
1675 if (signal_voltage
== MMC_SIGNAL_VOLTAGE_330
)
1676 return __mmc_set_signal_voltage(host
, signal_voltage
);
1679 * If we cannot switch voltages, return failure so the caller
1680 * can continue without UHS mode
1682 if (!host
->ops
->start_signal_voltage_switch
)
1684 if (!host
->ops
->card_busy
)
1685 pr_warn("%s: cannot verify signal voltage switch\n",
1686 mmc_hostname(host
));
1688 cmd
.opcode
= SD_SWITCH_VOLTAGE
;
1690 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
1692 err
= mmc_wait_for_cmd(host
, &cmd
, 0);
1696 if (!mmc_host_is_spi(host
) && (cmd
.resp
[0] & R1_ERROR
))
1700 * The card should drive cmd and dat[0:3] low immediately
1701 * after the response of cmd11, but wait 1 ms to be sure
1704 if (host
->ops
->card_busy
&& !host
->ops
->card_busy(host
)) {
1709 * During a signal voltage level switch, the clock must be gated
1710 * for 5 ms according to the SD spec
1712 clock
= host
->ios
.clock
;
1713 host
->ios
.clock
= 0;
1716 if (__mmc_set_signal_voltage(host
, signal_voltage
)) {
1718 * Voltages may not have been switched, but we've already
1719 * sent CMD11, so a power cycle is required anyway
1725 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
1727 host
->ios
.clock
= clock
;
1730 /* Wait for at least 1 ms according to spec */
1734 * Failure to switch is indicated by the card holding
1737 if (host
->ops
->card_busy
&& host
->ops
->card_busy(host
))
1742 pr_debug("%s: Signal voltage switch failed, "
1743 "power cycling card\n", mmc_hostname(host
));
1744 mmc_power_cycle(host
, ocr
);
1751 * Select timing parameters for host.
1753 void mmc_set_timing(struct mmc_host
*host
, unsigned int timing
)
1755 host
->ios
.timing
= timing
;
1760 * Select appropriate driver type for host.
1762 void mmc_set_driver_type(struct mmc_host
*host
, unsigned int drv_type
)
1764 host
->ios
.drv_type
= drv_type
;
1768 int mmc_select_drive_strength(struct mmc_card
*card
, unsigned int max_dtr
,
1769 int card_drv_type
, int *drv_type
)
1771 struct mmc_host
*host
= card
->host
;
1772 int host_drv_type
= SD_DRIVER_TYPE_B
;
1776 if (!host
->ops
->select_drive_strength
)
1779 /* Use SD definition of driver strength for hosts */
1780 if (host
->caps
& MMC_CAP_DRIVER_TYPE_A
)
1781 host_drv_type
|= SD_DRIVER_TYPE_A
;
1783 if (host
->caps
& MMC_CAP_DRIVER_TYPE_C
)
1784 host_drv_type
|= SD_DRIVER_TYPE_C
;
1786 if (host
->caps
& MMC_CAP_DRIVER_TYPE_D
)
1787 host_drv_type
|= SD_DRIVER_TYPE_D
;
1790 * The drive strength that the hardware can support
1791 * depends on the board design. Pass the appropriate
1792 * information and let the hardware specific code
1793 * return what is possible given the options
1795 return host
->ops
->select_drive_strength(card
, max_dtr
,
1802 * Apply power to the MMC stack. This is a two-stage process.
1803 * First, we enable power to the card without the clock running.
1804 * We then wait a bit for the power to stabilise. Finally,
1805 * enable the bus drivers and clock to the card.
1807 * We must _NOT_ enable the clock prior to power stablising.
1809 * If a host does all the power sequencing itself, ignore the
1810 * initial MMC_POWER_UP stage.
1812 void mmc_power_up(struct mmc_host
*host
, u32 ocr
)
1814 if (host
->ios
.power_mode
== MMC_POWER_ON
)
1817 mmc_pwrseq_pre_power_on(host
);
1819 host
->ios
.vdd
= fls(ocr
) - 1;
1820 host
->ios
.power_mode
= MMC_POWER_UP
;
1821 /* Set initial state and call mmc_set_ios */
1822 mmc_set_initial_state(host
);
1824 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1825 if (__mmc_set_signal_voltage(host
, MMC_SIGNAL_VOLTAGE_330
) == 0)
1826 dev_dbg(mmc_dev(host
), "Initial signal voltage of 3.3v\n");
1827 else if (__mmc_set_signal_voltage(host
, MMC_SIGNAL_VOLTAGE_180
) == 0)
1828 dev_dbg(mmc_dev(host
), "Initial signal voltage of 1.8v\n");
1829 else if (__mmc_set_signal_voltage(host
, MMC_SIGNAL_VOLTAGE_120
) == 0)
1830 dev_dbg(mmc_dev(host
), "Initial signal voltage of 1.2v\n");
1833 * This delay should be sufficient to allow the power supply
1834 * to reach the minimum voltage.
1838 mmc_pwrseq_post_power_on(host
);
1840 host
->ios
.clock
= host
->f_init
;
1842 host
->ios
.power_mode
= MMC_POWER_ON
;
1846 * This delay must be at least 74 clock sizes, or 1 ms, or the
1847 * time required to reach a stable voltage.
1852 void mmc_power_off(struct mmc_host
*host
)
1854 if (host
->ios
.power_mode
== MMC_POWER_OFF
)
1857 mmc_pwrseq_power_off(host
);
1859 host
->ios
.clock
= 0;
1862 host
->ios
.power_mode
= MMC_POWER_OFF
;
1863 /* Set initial state and call mmc_set_ios */
1864 mmc_set_initial_state(host
);
1867 * Some configurations, such as the 802.11 SDIO card in the OLPC
1868 * XO-1.5, require a short delay after poweroff before the card
1869 * can be successfully turned on again.
1874 void mmc_power_cycle(struct mmc_host
*host
, u32 ocr
)
1876 mmc_power_off(host
);
1877 /* Wait at least 1 ms according to SD spec */
1879 mmc_power_up(host
, ocr
);
1883 * Cleanup when the last reference to the bus operator is dropped.
1885 static void __mmc_release_bus(struct mmc_host
*host
)
1888 BUG_ON(host
->bus_refs
);
1889 BUG_ON(!host
->bus_dead
);
1891 host
->bus_ops
= NULL
;
1895 * Increase reference count of bus operator
1897 static inline void mmc_bus_get(struct mmc_host
*host
)
1899 unsigned long flags
;
1901 spin_lock_irqsave(&host
->lock
, flags
);
1903 spin_unlock_irqrestore(&host
->lock
, flags
);
1907 * Decrease reference count of bus operator and free it if
1908 * it is the last reference.
1910 static inline void mmc_bus_put(struct mmc_host
*host
)
1912 unsigned long flags
;
1914 spin_lock_irqsave(&host
->lock
, flags
);
1916 if ((host
->bus_refs
== 0) && host
->bus_ops
)
1917 __mmc_release_bus(host
);
1918 spin_unlock_irqrestore(&host
->lock
, flags
);
1922 * Assign a mmc bus handler to a host. Only one bus handler may control a
1923 * host at any given time.
1925 void mmc_attach_bus(struct mmc_host
*host
, const struct mmc_bus_ops
*ops
)
1927 unsigned long flags
;
1932 WARN_ON(!host
->claimed
);
1934 spin_lock_irqsave(&host
->lock
, flags
);
1936 BUG_ON(host
->bus_ops
);
1937 BUG_ON(host
->bus_refs
);
1939 host
->bus_ops
= ops
;
1943 spin_unlock_irqrestore(&host
->lock
, flags
);
1947 * Remove the current bus handler from a host.
1949 void mmc_detach_bus(struct mmc_host
*host
)
1951 unsigned long flags
;
1955 WARN_ON(!host
->claimed
);
1956 WARN_ON(!host
->bus_ops
);
1958 spin_lock_irqsave(&host
->lock
, flags
);
1962 spin_unlock_irqrestore(&host
->lock
, flags
);
1967 static void _mmc_detect_change(struct mmc_host
*host
, unsigned long delay
,
1970 #ifdef CONFIG_MMC_DEBUG
1971 unsigned long flags
;
1972 spin_lock_irqsave(&host
->lock
, flags
);
1973 WARN_ON(host
->removed
);
1974 spin_unlock_irqrestore(&host
->lock
, flags
);
1978 * If the device is configured as wakeup, we prevent a new sleep for
1979 * 5 s to give provision for user space to consume the event.
1981 if (cd_irq
&& !(host
->caps
& MMC_CAP_NEEDS_POLL
) &&
1982 device_can_wakeup(mmc_dev(host
)))
1983 pm_wakeup_event(mmc_dev(host
), 5000);
1985 host
->detect_change
= 1;
1986 mmc_schedule_delayed_work(&host
->detect
, delay
);
1990 * mmc_detect_change - process change of state on a MMC socket
1991 * @host: host which changed state.
1992 * @delay: optional delay to wait before detection (jiffies)
1994 * MMC drivers should call this when they detect a card has been
1995 * inserted or removed. The MMC layer will confirm that any
1996 * present card is still functional, and initialize any newly
1999 void mmc_detect_change(struct mmc_host
*host
, unsigned long delay
)
2001 _mmc_detect_change(host
, delay
, true);
2003 EXPORT_SYMBOL(mmc_detect_change
);
2005 void mmc_init_erase(struct mmc_card
*card
)
2009 if (is_power_of_2(card
->erase_size
))
2010 card
->erase_shift
= ffs(card
->erase_size
) - 1;
2012 card
->erase_shift
= 0;
2015 * It is possible to erase an arbitrarily large area of an SD or MMC
2016 * card. That is not desirable because it can take a long time
2017 * (minutes) potentially delaying more important I/O, and also the
2018 * timeout calculations become increasingly hugely over-estimated.
2019 * Consequently, 'pref_erase' is defined as a guide to limit erases
2020 * to that size and alignment.
2022 * For SD cards that define Allocation Unit size, limit erases to one
2023 * Allocation Unit at a time.
2024 * For MMC, have a stab at ai good value and for modern cards it will
2025 * end up being 4MiB. Note that if the value is too small, it can end
2026 * up taking longer to erase. Also note, erase_size is already set to
2027 * High Capacity Erase Size if available when this function is called.
2029 if (mmc_card_sd(card
) && card
->ssr
.au
) {
2030 card
->pref_erase
= card
->ssr
.au
;
2031 card
->erase_shift
= ffs(card
->ssr
.au
) - 1;
2032 } else if (card
->erase_size
) {
2033 sz
= (card
->csd
.capacity
<< (card
->csd
.read_blkbits
- 9)) >> 11;
2035 card
->pref_erase
= 512 * 1024 / 512;
2037 card
->pref_erase
= 1024 * 1024 / 512;
2039 card
->pref_erase
= 2 * 1024 * 1024 / 512;
2041 card
->pref_erase
= 4 * 1024 * 1024 / 512;
2042 if (card
->pref_erase
< card
->erase_size
)
2043 card
->pref_erase
= card
->erase_size
;
2045 sz
= card
->pref_erase
% card
->erase_size
;
2047 card
->pref_erase
+= card
->erase_size
- sz
;
2050 card
->pref_erase
= 0;
2053 static unsigned int mmc_mmc_erase_timeout(struct mmc_card
*card
,
2054 unsigned int arg
, unsigned int qty
)
2056 unsigned int erase_timeout
;
2058 if (arg
== MMC_DISCARD_ARG
||
2059 (arg
== MMC_TRIM_ARG
&& card
->ext_csd
.rev
>= 6)) {
2060 erase_timeout
= card
->ext_csd
.trim_timeout
;
2061 } else if (card
->ext_csd
.erase_group_def
& 1) {
2062 /* High Capacity Erase Group Size uses HC timeouts */
2063 if (arg
== MMC_TRIM_ARG
)
2064 erase_timeout
= card
->ext_csd
.trim_timeout
;
2066 erase_timeout
= card
->ext_csd
.hc_erase_timeout
;
2068 /* CSD Erase Group Size uses write timeout */
2069 unsigned int mult
= (10 << card
->csd
.r2w_factor
);
2070 unsigned int timeout_clks
= card
->csd
.tacc_clks
* mult
;
2071 unsigned int timeout_us
;
2073 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
2074 if (card
->csd
.tacc_ns
< 1000000)
2075 timeout_us
= (card
->csd
.tacc_ns
* mult
) / 1000;
2077 timeout_us
= (card
->csd
.tacc_ns
/ 1000) * mult
;
2080 * ios.clock is only a target. The real clock rate might be
2081 * less but not that much less, so fudge it by multiplying by 2.
2084 timeout_us
+= (timeout_clks
* 1000) /
2085 (card
->host
->ios
.clock
/ 1000);
2087 erase_timeout
= timeout_us
/ 1000;
2090 * Theoretically, the calculation could underflow so round up
2091 * to 1ms in that case.
2097 /* Multiplier for secure operations */
2098 if (arg
& MMC_SECURE_ARGS
) {
2099 if (arg
== MMC_SECURE_ERASE_ARG
)
2100 erase_timeout
*= card
->ext_csd
.sec_erase_mult
;
2102 erase_timeout
*= card
->ext_csd
.sec_trim_mult
;
2105 erase_timeout
*= qty
;
2108 * Ensure at least a 1 second timeout for SPI as per
2109 * 'mmc_set_data_timeout()'
2111 if (mmc_host_is_spi(card
->host
) && erase_timeout
< 1000)
2112 erase_timeout
= 1000;
2114 return erase_timeout
;
2117 static unsigned int mmc_sd_erase_timeout(struct mmc_card
*card
,
2121 unsigned int erase_timeout
;
2123 if (card
->ssr
.erase_timeout
) {
2124 /* Erase timeout specified in SD Status Register (SSR) */
2125 erase_timeout
= card
->ssr
.erase_timeout
* qty
+
2126 card
->ssr
.erase_offset
;
2129 * Erase timeout not specified in SD Status Register (SSR) so
2130 * use 250ms per write block.
2132 erase_timeout
= 250 * qty
;
2135 /* Must not be less than 1 second */
2136 if (erase_timeout
< 1000)
2137 erase_timeout
= 1000;
2139 return erase_timeout
;
2142 static unsigned int mmc_erase_timeout(struct mmc_card
*card
,
2146 if (mmc_card_sd(card
))
2147 return mmc_sd_erase_timeout(card
, arg
, qty
);
2149 return mmc_mmc_erase_timeout(card
, arg
, qty
);
2152 static int mmc_do_erase(struct mmc_card
*card
, unsigned int from
,
2153 unsigned int to
, unsigned int arg
)
2155 struct mmc_command cmd
= {0};
2156 unsigned int qty
= 0, busy_timeout
= 0;
2157 bool use_r1b_resp
= false;
2158 unsigned long timeout
;
2161 mmc_retune_hold(card
->host
);
2164 * qty is used to calculate the erase timeout which depends on how many
2165 * erase groups (or allocation units in SD terminology) are affected.
2166 * We count erasing part of an erase group as one erase group.
2167 * For SD, the allocation units are always a power of 2. For MMC, the
2168 * erase group size is almost certainly also power of 2, but it does not
2169 * seem to insist on that in the JEDEC standard, so we fall back to
2170 * division in that case. SD may not specify an allocation unit size,
2171 * in which case the timeout is based on the number of write blocks.
2173 * Note that the timeout for secure trim 2 will only be correct if the
2174 * number of erase groups specified is the same as the total of all
2175 * preceding secure trim 1 commands. Since the power may have been
2176 * lost since the secure trim 1 commands occurred, it is generally
2177 * impossible to calculate the secure trim 2 timeout correctly.
2179 if (card
->erase_shift
)
2180 qty
+= ((to
>> card
->erase_shift
) -
2181 (from
>> card
->erase_shift
)) + 1;
2182 else if (mmc_card_sd(card
))
2183 qty
+= to
- from
+ 1;
2185 qty
+= ((to
/ card
->erase_size
) -
2186 (from
/ card
->erase_size
)) + 1;
2188 if (!mmc_card_blockaddr(card
)) {
2193 if (mmc_card_sd(card
))
2194 cmd
.opcode
= SD_ERASE_WR_BLK_START
;
2196 cmd
.opcode
= MMC_ERASE_GROUP_START
;
2198 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
2199 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
2201 pr_err("mmc_erase: group start error %d, "
2202 "status %#x\n", err
, cmd
.resp
[0]);
2207 memset(&cmd
, 0, sizeof(struct mmc_command
));
2208 if (mmc_card_sd(card
))
2209 cmd
.opcode
= SD_ERASE_WR_BLK_END
;
2211 cmd
.opcode
= MMC_ERASE_GROUP_END
;
2213 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
2214 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
2216 pr_err("mmc_erase: group end error %d, status %#x\n",
2222 memset(&cmd
, 0, sizeof(struct mmc_command
));
2223 cmd
.opcode
= MMC_ERASE
;
2225 busy_timeout
= mmc_erase_timeout(card
, arg
, qty
);
2227 * If the host controller supports busy signalling and the timeout for
2228 * the erase operation does not exceed the max_busy_timeout, we should
2229 * use R1B response. Or we need to prevent the host from doing hw busy
2230 * detection, which is done by converting to a R1 response instead.
2232 if (card
->host
->max_busy_timeout
&&
2233 busy_timeout
> card
->host
->max_busy_timeout
) {
2234 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
2236 cmd
.flags
= MMC_RSP_SPI_R1B
| MMC_RSP_R1B
| MMC_CMD_AC
;
2237 cmd
.busy_timeout
= busy_timeout
;
2238 use_r1b_resp
= true;
2241 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
2243 pr_err("mmc_erase: erase error %d, status %#x\n",
2249 if (mmc_host_is_spi(card
->host
))
2253 * In case of when R1B + MMC_CAP_WAIT_WHILE_BUSY is used, the polling
2256 if ((card
->host
->caps
& MMC_CAP_WAIT_WHILE_BUSY
) && use_r1b_resp
)
2259 timeout
= jiffies
+ msecs_to_jiffies(busy_timeout
);
2261 memset(&cmd
, 0, sizeof(struct mmc_command
));
2262 cmd
.opcode
= MMC_SEND_STATUS
;
2263 cmd
.arg
= card
->rca
<< 16;
2264 cmd
.flags
= MMC_RSP_R1
| MMC_CMD_AC
;
2265 /* Do not retry else we can't see errors */
2266 err
= mmc_wait_for_cmd(card
->host
, &cmd
, 0);
2267 if (err
|| (cmd
.resp
[0] & 0xFDF92000)) {
2268 pr_err("error %d requesting status %#x\n",
2274 /* Timeout if the device never becomes ready for data and
2275 * never leaves the program state.
2277 if (time_after(jiffies
, timeout
)) {
2278 pr_err("%s: Card stuck in programming state! %s\n",
2279 mmc_hostname(card
->host
), __func__
);
2284 } while (!(cmd
.resp
[0] & R1_READY_FOR_DATA
) ||
2285 (R1_CURRENT_STATE(cmd
.resp
[0]) == R1_STATE_PRG
));
2287 mmc_retune_release(card
->host
);
2291 static unsigned int mmc_align_erase_size(struct mmc_card
*card
,
2296 unsigned int from_new
= *from
, nr_new
= nr
, rem
;
2299 * When the 'card->erase_size' is power of 2, we can use round_up/down()
2300 * to align the erase size efficiently.
2302 if (is_power_of_2(card
->erase_size
)) {
2303 unsigned int temp
= from_new
;
2305 from_new
= round_up(temp
, card
->erase_size
);
2306 rem
= from_new
- temp
;
2313 nr_new
= round_down(nr_new
, card
->erase_size
);
2315 rem
= from_new
% card
->erase_size
;
2317 rem
= card
->erase_size
- rem
;
2325 rem
= nr_new
% card
->erase_size
;
2333 *to
= from_new
+ nr_new
;
2340 * mmc_erase - erase sectors.
2341 * @card: card to erase
2342 * @from: first sector to erase
2343 * @nr: number of sectors to erase
2344 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2346 * Caller must claim host before calling this function.
2348 int mmc_erase(struct mmc_card
*card
, unsigned int from
, unsigned int nr
,
2351 unsigned int rem
, to
= from
+ nr
;
2354 if (!(card
->host
->caps
& MMC_CAP_ERASE
) ||
2355 !(card
->csd
.cmdclass
& CCC_ERASE
))
2358 if (!card
->erase_size
)
2361 if (mmc_card_sd(card
) && arg
!= MMC_ERASE_ARG
)
2364 if ((arg
& MMC_SECURE_ARGS
) &&
2365 !(card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_ER_EN
))
2368 if ((arg
& MMC_TRIM_ARGS
) &&
2369 !(card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_GB_CL_EN
))
2372 if (arg
== MMC_SECURE_ERASE_ARG
) {
2373 if (from
% card
->erase_size
|| nr
% card
->erase_size
)
2377 if (arg
== MMC_ERASE_ARG
)
2378 nr
= mmc_align_erase_size(card
, &from
, &to
, nr
);
2386 /* 'from' and 'to' are inclusive */
2390 * Special case where only one erase-group fits in the timeout budget:
2391 * If the region crosses an erase-group boundary on this particular
2392 * case, we will be trimming more than one erase-group which, does not
2393 * fit in the timeout budget of the controller, so we need to split it
2394 * and call mmc_do_erase() twice if necessary. This special case is
2395 * identified by the card->eg_boundary flag.
2397 rem
= card
->erase_size
- (from
% card
->erase_size
);
2398 if ((arg
& MMC_TRIM_ARGS
) && (card
->eg_boundary
) && (nr
> rem
)) {
2399 err
= mmc_do_erase(card
, from
, from
+ rem
- 1, arg
);
2401 if ((err
) || (to
<= from
))
2405 return mmc_do_erase(card
, from
, to
, arg
);
2407 EXPORT_SYMBOL(mmc_erase
);
2409 int mmc_can_erase(struct mmc_card
*card
)
2411 if ((card
->host
->caps
& MMC_CAP_ERASE
) &&
2412 (card
->csd
.cmdclass
& CCC_ERASE
) && card
->erase_size
)
2416 EXPORT_SYMBOL(mmc_can_erase
);
2418 int mmc_can_trim(struct mmc_card
*card
)
2420 if ((card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_GB_CL_EN
) &&
2421 (!(card
->quirks
& MMC_QUIRK_TRIM_BROKEN
)))
2425 EXPORT_SYMBOL(mmc_can_trim
);
2427 int mmc_can_discard(struct mmc_card
*card
)
2430 * As there's no way to detect the discard support bit at v4.5
2431 * use the s/w feature support filed.
2433 if (card
->ext_csd
.feature_support
& MMC_DISCARD_FEATURE
)
2437 EXPORT_SYMBOL(mmc_can_discard
);
2439 int mmc_can_sanitize(struct mmc_card
*card
)
2441 if (!mmc_can_trim(card
) && !mmc_can_erase(card
))
2443 if (card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_SANITIZE
)
2447 EXPORT_SYMBOL(mmc_can_sanitize
);
2449 int mmc_can_secure_erase_trim(struct mmc_card
*card
)
2451 if ((card
->ext_csd
.sec_feature_support
& EXT_CSD_SEC_ER_EN
) &&
2452 !(card
->quirks
& MMC_QUIRK_SEC_ERASE_TRIM_BROKEN
))
2456 EXPORT_SYMBOL(mmc_can_secure_erase_trim
);
2458 int mmc_erase_group_aligned(struct mmc_card
*card
, unsigned int from
,
2461 if (!card
->erase_size
)
2463 if (from
% card
->erase_size
|| nr
% card
->erase_size
)
2467 EXPORT_SYMBOL(mmc_erase_group_aligned
);
2469 static unsigned int mmc_do_calc_max_discard(struct mmc_card
*card
,
2472 struct mmc_host
*host
= card
->host
;
2473 unsigned int max_discard
, x
, y
, qty
= 0, max_qty
, min_qty
, timeout
;
2474 unsigned int last_timeout
= 0;
2475 unsigned int max_busy_timeout
= host
->max_busy_timeout
?
2476 host
->max_busy_timeout
: MMC_ERASE_TIMEOUT_MS
;
2478 if (card
->erase_shift
) {
2479 max_qty
= UINT_MAX
>> card
->erase_shift
;
2480 min_qty
= card
->pref_erase
>> card
->erase_shift
;
2481 } else if (mmc_card_sd(card
)) {
2483 min_qty
= card
->pref_erase
;
2485 max_qty
= UINT_MAX
/ card
->erase_size
;
2486 min_qty
= card
->pref_erase
/ card
->erase_size
;
2490 * We should not only use 'host->max_busy_timeout' as the limitation
2491 * when deciding the max discard sectors. We should set a balance value
2492 * to improve the erase speed, and it can not get too long timeout at
2495 * Here we set 'card->pref_erase' as the minimal discard sectors no
2496 * matter what size of 'host->max_busy_timeout', but if the
2497 * 'host->max_busy_timeout' is large enough for more discard sectors,
2498 * then we can continue to increase the max discard sectors until we
2499 * get a balance value. In cases when the 'host->max_busy_timeout'
2500 * isn't specified, use the default max erase timeout.
2504 for (x
= 1; x
&& x
<= max_qty
&& max_qty
- x
>= qty
; x
<<= 1) {
2505 timeout
= mmc_erase_timeout(card
, arg
, qty
+ x
);
2507 if (qty
+ x
> min_qty
&& timeout
> max_busy_timeout
)
2510 if (timeout
< last_timeout
)
2512 last_timeout
= timeout
;
2522 * When specifying a sector range to trim, chances are we might cross
2523 * an erase-group boundary even if the amount of sectors is less than
2525 * If we can only fit one erase-group in the controller timeout budget,
2526 * we have to care that erase-group boundaries are not crossed by a
2527 * single trim operation. We flag that special case with "eg_boundary".
2528 * In all other cases we can just decrement qty and pretend that we
2529 * always touch (qty + 1) erase-groups as a simple optimization.
2532 card
->eg_boundary
= 1;
2536 /* Convert qty to sectors */
2537 if (card
->erase_shift
)
2538 max_discard
= qty
<< card
->erase_shift
;
2539 else if (mmc_card_sd(card
))
2540 max_discard
= qty
+ 1;
2542 max_discard
= qty
* card
->erase_size
;
2547 unsigned int mmc_calc_max_discard(struct mmc_card
*card
)
2549 struct mmc_host
*host
= card
->host
;
2550 unsigned int max_discard
, max_trim
;
2553 * Without erase_group_def set, MMC erase timeout depends on clock
2554 * frequence which can change. In that case, the best choice is
2555 * just the preferred erase size.
2557 if (mmc_card_mmc(card
) && !(card
->ext_csd
.erase_group_def
& 1))
2558 return card
->pref_erase
;
2560 max_discard
= mmc_do_calc_max_discard(card
, MMC_ERASE_ARG
);
2561 if (mmc_can_trim(card
)) {
2562 max_trim
= mmc_do_calc_max_discard(card
, MMC_TRIM_ARG
);
2563 if (max_trim
< max_discard
)
2564 max_discard
= max_trim
;
2565 } else if (max_discard
< card
->erase_size
) {
2568 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
2569 mmc_hostname(host
), max_discard
, host
->max_busy_timeout
?
2570 host
->max_busy_timeout
: MMC_ERASE_TIMEOUT_MS
);
2573 EXPORT_SYMBOL(mmc_calc_max_discard
);
2575 int mmc_set_blocklen(struct mmc_card
*card
, unsigned int blocklen
)
2577 struct mmc_command cmd
= {0};
2579 if (mmc_card_blockaddr(card
) || mmc_card_ddr52(card
))
2582 cmd
.opcode
= MMC_SET_BLOCKLEN
;
2584 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
2585 return mmc_wait_for_cmd(card
->host
, &cmd
, 5);
2587 EXPORT_SYMBOL(mmc_set_blocklen
);
2589 int mmc_set_blockcount(struct mmc_card
*card
, unsigned int blockcount
,
2592 struct mmc_command cmd
= {0};
2594 cmd
.opcode
= MMC_SET_BLOCK_COUNT
;
2595 cmd
.arg
= blockcount
& 0x0000FFFF;
2598 cmd
.flags
= MMC_RSP_SPI_R1
| MMC_RSP_R1
| MMC_CMD_AC
;
2599 return mmc_wait_for_cmd(card
->host
, &cmd
, 5);
2601 EXPORT_SYMBOL(mmc_set_blockcount
);
2603 static void mmc_hw_reset_for_init(struct mmc_host
*host
)
2605 if (!(host
->caps
& MMC_CAP_HW_RESET
) || !host
->ops
->hw_reset
)
2607 host
->ops
->hw_reset(host
);
2610 int mmc_hw_reset(struct mmc_host
*host
)
2618 if (!host
->bus_ops
|| host
->bus_dead
|| !host
->bus_ops
->reset
) {
2623 ret
= host
->bus_ops
->reset(host
);
2627 pr_warn("%s: tried to reset card, got error %d\n",
2628 mmc_hostname(host
), ret
);
2632 EXPORT_SYMBOL(mmc_hw_reset
);
2634 static int mmc_rescan_try_freq(struct mmc_host
*host
, unsigned freq
)
2636 host
->f_init
= freq
;
2638 #ifdef CONFIG_MMC_DEBUG
2639 pr_info("%s: %s: trying to init card at %u Hz\n",
2640 mmc_hostname(host
), __func__
, host
->f_init
);
2642 mmc_power_up(host
, host
->ocr_avail
);
2645 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2646 * do a hardware reset if possible.
2648 mmc_hw_reset_for_init(host
);
2651 * sdio_reset sends CMD52 to reset card. Since we do not know
2652 * if the card is being re-initialized, just send it. CMD52
2653 * should be ignored by SD/eMMC cards.
2654 * Skip it if we already know that we do not support SDIO commands
2656 if (!(host
->caps2
& MMC_CAP2_NO_SDIO
))
2661 if (!(host
->caps2
& MMC_CAP2_NO_SD
))
2662 mmc_send_if_cond(host
, host
->ocr_avail
);
2664 /* Order's important: probe SDIO, then SD, then MMC */
2665 if (!(host
->caps2
& MMC_CAP2_NO_SDIO
))
2666 if (!mmc_attach_sdio(host
))
2669 if (!(host
->caps2
& MMC_CAP2_NO_SD
))
2670 if (!mmc_attach_sd(host
))
2673 if (!(host
->caps2
& MMC_CAP2_NO_MMC
))
2674 if (!mmc_attach_mmc(host
))
2677 mmc_power_off(host
);
2681 int _mmc_detect_card_removed(struct mmc_host
*host
)
2685 if (!host
->card
|| mmc_card_removed(host
->card
))
2688 ret
= host
->bus_ops
->alive(host
);
2691 * Card detect status and alive check may be out of sync if card is
2692 * removed slowly, when card detect switch changes while card/slot
2693 * pads are still contacted in hardware (refer to "SD Card Mechanical
2694 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2695 * detect work 200ms later for this case.
2697 if (!ret
&& host
->ops
->get_cd
&& !host
->ops
->get_cd(host
)) {
2698 mmc_detect_change(host
, msecs_to_jiffies(200));
2699 pr_debug("%s: card removed too slowly\n", mmc_hostname(host
));
2703 mmc_card_set_removed(host
->card
);
2704 pr_debug("%s: card remove detected\n", mmc_hostname(host
));
2710 int mmc_detect_card_removed(struct mmc_host
*host
)
2712 struct mmc_card
*card
= host
->card
;
2715 WARN_ON(!host
->claimed
);
2720 if (!mmc_card_is_removable(host
))
2723 ret
= mmc_card_removed(card
);
2725 * The card will be considered unchanged unless we have been asked to
2726 * detect a change or host requires polling to provide card detection.
2728 if (!host
->detect_change
&& !(host
->caps
& MMC_CAP_NEEDS_POLL
))
2731 host
->detect_change
= 0;
2733 ret
= _mmc_detect_card_removed(host
);
2734 if (ret
&& (host
->caps
& MMC_CAP_NEEDS_POLL
)) {
2736 * Schedule a detect work as soon as possible to let a
2737 * rescan handle the card removal.
2739 cancel_delayed_work(&host
->detect
);
2740 _mmc_detect_change(host
, 0, false);
2746 EXPORT_SYMBOL(mmc_detect_card_removed
);
2748 void mmc_rescan(struct work_struct
*work
)
2750 struct mmc_host
*host
=
2751 container_of(work
, struct mmc_host
, detect
.work
);
2754 if (host
->rescan_disable
)
2757 /* If there is a non-removable card registered, only scan once */
2758 if (!mmc_card_is_removable(host
) && host
->rescan_entered
)
2760 host
->rescan_entered
= 1;
2762 if (host
->trigger_card_event
&& host
->ops
->card_event
) {
2763 mmc_claim_host(host
);
2764 host
->ops
->card_event(host
);
2765 mmc_release_host(host
);
2766 host
->trigger_card_event
= false;
2772 * if there is a _removable_ card registered, check whether it is
2775 if (host
->bus_ops
&& !host
->bus_dead
&& mmc_card_is_removable(host
))
2776 host
->bus_ops
->detect(host
);
2778 host
->detect_change
= 0;
2781 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2782 * the card is no longer present.
2787 /* if there still is a card present, stop here */
2788 if (host
->bus_ops
!= NULL
) {
2794 * Only we can add a new handler, so it's safe to
2795 * release the lock here.
2799 mmc_claim_host(host
);
2800 if (mmc_card_is_removable(host
) && host
->ops
->get_cd
&&
2801 host
->ops
->get_cd(host
) == 0) {
2802 mmc_power_off(host
);
2803 mmc_release_host(host
);
2807 for (i
= 0; i
< ARRAY_SIZE(freqs
); i
++) {
2808 if (!mmc_rescan_try_freq(host
, max(freqs
[i
], host
->f_min
)))
2810 if (freqs
[i
] <= host
->f_min
)
2813 mmc_release_host(host
);
2816 if (host
->caps
& MMC_CAP_NEEDS_POLL
)
2817 mmc_schedule_delayed_work(&host
->detect
, HZ
);
2820 void mmc_start_host(struct mmc_host
*host
)
2822 host
->f_init
= max(freqs
[0], host
->f_min
);
2823 host
->rescan_disable
= 0;
2824 host
->ios
.power_mode
= MMC_POWER_UNDEFINED
;
2826 mmc_claim_host(host
);
2827 if (host
->caps2
& MMC_CAP2_NO_PRESCAN_POWERUP
)
2828 mmc_power_off(host
);
2830 mmc_power_up(host
, host
->ocr_avail
);
2831 mmc_release_host(host
);
2833 mmc_gpiod_request_cd_irq(host
);
2834 _mmc_detect_change(host
, 0, false);
2837 void mmc_stop_host(struct mmc_host
*host
)
2839 #ifdef CONFIG_MMC_DEBUG
2840 unsigned long flags
;
2841 spin_lock_irqsave(&host
->lock
, flags
);
2843 spin_unlock_irqrestore(&host
->lock
, flags
);
2845 if (host
->slot
.cd_irq
>= 0)
2846 disable_irq(host
->slot
.cd_irq
);
2848 host
->rescan_disable
= 1;
2849 cancel_delayed_work_sync(&host
->detect
);
2851 /* clear pm flags now and let card drivers set them as needed */
2855 if (host
->bus_ops
&& !host
->bus_dead
) {
2856 /* Calling bus_ops->remove() with a claimed host can deadlock */
2857 host
->bus_ops
->remove(host
);
2858 mmc_claim_host(host
);
2859 mmc_detach_bus(host
);
2860 mmc_power_off(host
);
2861 mmc_release_host(host
);
2869 mmc_claim_host(host
);
2870 mmc_power_off(host
);
2871 mmc_release_host(host
);
2874 int mmc_power_save_host(struct mmc_host
*host
)
2878 #ifdef CONFIG_MMC_DEBUG
2879 pr_info("%s: %s: powering down\n", mmc_hostname(host
), __func__
);
2884 if (!host
->bus_ops
|| host
->bus_dead
) {
2889 if (host
->bus_ops
->power_save
)
2890 ret
= host
->bus_ops
->power_save(host
);
2894 mmc_power_off(host
);
2898 EXPORT_SYMBOL(mmc_power_save_host
);
2900 int mmc_power_restore_host(struct mmc_host
*host
)
2904 #ifdef CONFIG_MMC_DEBUG
2905 pr_info("%s: %s: powering up\n", mmc_hostname(host
), __func__
);
2910 if (!host
->bus_ops
|| host
->bus_dead
) {
2915 mmc_power_up(host
, host
->card
->ocr
);
2916 ret
= host
->bus_ops
->power_restore(host
);
2922 EXPORT_SYMBOL(mmc_power_restore_host
);
2925 * Flush the cache to the non-volatile storage.
2927 int mmc_flush_cache(struct mmc_card
*card
)
2931 if (mmc_card_mmc(card
) &&
2932 (card
->ext_csd
.cache_size
> 0) &&
2933 (card
->ext_csd
.cache_ctrl
& 1)) {
2934 err
= mmc_switch(card
, EXT_CSD_CMD_SET_NORMAL
,
2935 EXT_CSD_FLUSH_CACHE
, 1, 0);
2937 pr_err("%s: cache flush error %d\n",
2938 mmc_hostname(card
->host
), err
);
2943 EXPORT_SYMBOL(mmc_flush_cache
);
2945 #ifdef CONFIG_PM_SLEEP
2946 /* Do the card removal on suspend if card is assumed removeable
2947 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2950 static int mmc_pm_notify(struct notifier_block
*notify_block
,
2951 unsigned long mode
, void *unused
)
2953 struct mmc_host
*host
= container_of(
2954 notify_block
, struct mmc_host
, pm_notify
);
2955 unsigned long flags
;
2959 case PM_HIBERNATION_PREPARE
:
2960 case PM_SUSPEND_PREPARE
:
2961 case PM_RESTORE_PREPARE
:
2962 spin_lock_irqsave(&host
->lock
, flags
);
2963 host
->rescan_disable
= 1;
2964 spin_unlock_irqrestore(&host
->lock
, flags
);
2965 cancel_delayed_work_sync(&host
->detect
);
2970 /* Validate prerequisites for suspend */
2971 if (host
->bus_ops
->pre_suspend
)
2972 err
= host
->bus_ops
->pre_suspend(host
);
2976 /* Calling bus_ops->remove() with a claimed host can deadlock */
2977 host
->bus_ops
->remove(host
);
2978 mmc_claim_host(host
);
2979 mmc_detach_bus(host
);
2980 mmc_power_off(host
);
2981 mmc_release_host(host
);
2985 case PM_POST_SUSPEND
:
2986 case PM_POST_HIBERNATION
:
2987 case PM_POST_RESTORE
:
2989 spin_lock_irqsave(&host
->lock
, flags
);
2990 host
->rescan_disable
= 0;
2991 spin_unlock_irqrestore(&host
->lock
, flags
);
2992 _mmc_detect_change(host
, 0, false);
2999 void mmc_register_pm_notifier(struct mmc_host
*host
)
3001 host
->pm_notify
.notifier_call
= mmc_pm_notify
;
3002 register_pm_notifier(&host
->pm_notify
);
3005 void mmc_unregister_pm_notifier(struct mmc_host
*host
)
3007 unregister_pm_notifier(&host
->pm_notify
);
3012 * mmc_init_context_info() - init synchronization context
3015 * Init struct context_info needed to implement asynchronous
3016 * request mechanism, used by mmc core, host driver and mmc requests
3019 void mmc_init_context_info(struct mmc_host
*host
)
3021 spin_lock_init(&host
->context_info
.lock
);
3022 host
->context_info
.is_new_req
= false;
3023 host
->context_info
.is_done_rcv
= false;
3024 host
->context_info
.is_waiting_last_req
= false;
3025 init_waitqueue_head(&host
->context_info
.wait
);
3028 static int __init
mmc_init(void)
3032 ret
= mmc_register_bus();
3036 ret
= mmc_register_host_class();
3038 goto unregister_bus
;
3040 ret
= sdio_register_bus();
3042 goto unregister_host_class
;
3046 unregister_host_class
:
3047 mmc_unregister_host_class();
3049 mmc_unregister_bus();
3053 static void __exit
mmc_exit(void)
3055 sdio_unregister_bus();
3056 mmc_unregister_host_class();
3057 mmc_unregister_bus();
3060 subsys_initcall(mmc_init
);
3061 module_exit(mmc_exit
);
3063 MODULE_LICENSE("GPL");