2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright (c) 2016 BayLibre, SAS.
8 * Author: Kevin Hilman <khilman@baylibre.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
26 * Copyright (c) 2016 BayLibre, SAS.
27 * Author: Kevin Hilman <khilman@baylibre.com>
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
55 #include <linux/kernel.h>
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/init.h>
59 #include <linux/device.h>
60 #include <linux/of_device.h>
61 #include <linux/platform_device.h>
62 #include <linux/ioport.h>
63 #include <linux/regmap.h>
64 #include <linux/spinlock.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/mmc/host.h>
67 #include <linux/mmc/mmc.h>
68 #include <linux/mmc/sdio.h>
69 #include <linux/mmc/slot-gpio.h>
71 #include <linux/clk.h>
72 #include <linux/clk-provider.h>
74 #define DRIVER_NAME "meson-gxbb-mmc"
76 #define SD_EMMC_CLOCK 0x0
77 #define CLK_DIV_SHIFT 0
78 #define CLK_DIV_WIDTH 6
79 #define CLK_DIV_MASK 0x3f
80 #define CLK_DIV_MAX 63
81 #define CLK_SRC_SHIFT 6
82 #define CLK_SRC_WIDTH 2
83 #define CLK_SRC_MASK 0x3
84 #define CLK_SRC_XTAL 0 /* external crystal */
85 #define CLK_SRC_XTAL_RATE 24000000
86 #define CLK_SRC_PLL 1 /* FCLK_DIV2 */
87 #define CLK_SRC_PLL_RATE 1000000000
88 #define CLK_PHASE_SHIFT 8
89 #define CLK_PHASE_MASK 0x3
91 #define CLK_PHASE_90 1
92 #define CLK_PHASE_180 2
93 #define CLK_PHASE_270 3
94 #define CLK_ALWAYS_ON BIT(24)
96 #define SD_EMMC_DElAY 0x4
97 #define SD_EMMC_ADJUST 0x8
98 #define SD_EMMC_CALOUT 0x10
99 #define SD_EMMC_START 0x40
100 #define START_DESC_INIT BIT(0)
101 #define START_DESC_BUSY BIT(1)
102 #define START_DESC_ADDR_SHIFT 2
103 #define START_DESC_ADDR_MASK (~0x3)
105 #define SD_EMMC_CFG 0x44
106 #define CFG_BUS_WIDTH_SHIFT 0
107 #define CFG_BUS_WIDTH_MASK 0x3
108 #define CFG_BUS_WIDTH_1 0x0
109 #define CFG_BUS_WIDTH_4 0x1
110 #define CFG_BUS_WIDTH_8 0x2
111 #define CFG_DDR BIT(2)
112 #define CFG_BLK_LEN_SHIFT 4
113 #define CFG_BLK_LEN_MASK 0xf
114 #define CFG_RESP_TIMEOUT_SHIFT 8
115 #define CFG_RESP_TIMEOUT_MASK 0xf
116 #define CFG_RC_CC_SHIFT 12
117 #define CFG_RC_CC_MASK 0xf
118 #define CFG_STOP_CLOCK BIT(22)
119 #define CFG_CLK_ALWAYS_ON BIT(18)
120 #define CFG_AUTO_CLK BIT(23)
122 #define SD_EMMC_STATUS 0x48
123 #define STATUS_BUSY BIT(31)
125 #define SD_EMMC_IRQ_EN 0x4c
126 #define IRQ_EN_MASK 0x3fff
127 #define IRQ_RXD_ERR_SHIFT 0
128 #define IRQ_RXD_ERR_MASK 0xff
129 #define IRQ_TXD_ERR BIT(8)
130 #define IRQ_DESC_ERR BIT(9)
131 #define IRQ_RESP_ERR BIT(10)
132 #define IRQ_RESP_TIMEOUT BIT(11)
133 #define IRQ_DESC_TIMEOUT BIT(12)
134 #define IRQ_END_OF_CHAIN BIT(13)
135 #define IRQ_RESP_STATUS BIT(14)
136 #define IRQ_SDIO BIT(15)
138 #define SD_EMMC_CMD_CFG 0x50
139 #define SD_EMMC_CMD_ARG 0x54
140 #define SD_EMMC_CMD_DAT 0x58
141 #define SD_EMMC_CMD_RSP 0x5c
142 #define SD_EMMC_CMD_RSP1 0x60
143 #define SD_EMMC_CMD_RSP2 0x64
144 #define SD_EMMC_CMD_RSP3 0x68
146 #define SD_EMMC_RXD 0x94
147 #define SD_EMMC_TXD 0x94
148 #define SD_EMMC_LAST_REG SD_EMMC_TXD
150 #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
151 #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
152 #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
153 #define MUX_CLK_NUM_PARENTS 2
157 struct mmc_host
*mmc
;
158 struct mmc_request
*mrq
;
159 struct mmc_command
*cmd
;
164 struct regmap
*regmap
;
168 struct clk
*core_clk
;
171 struct clk
*mux_parent
[MUX_CLK_NUM_PARENTS
];
172 unsigned long mux_parent_rate
[MUX_CLK_NUM_PARENTS
];
174 struct clk_divider cfg_div
;
175 struct clk
*cfg_div_clk
;
177 unsigned int bounce_buf_size
;
179 dma_addr_t bounce_dma_addr
;
181 unsigned long clk_rate
;
182 unsigned long clk_src_rate
;
183 unsigned short clk_src_div
;
186 #define reg_read(host, offset) readl(host->regs + offset)
187 #define reg_write(host, offset, val) writel(val, host->regs + offset)
196 u32 timeout
:4; /* 2^timeout msec */
208 #define CFG_OWNER_CPU 1
209 #define CFG_OWNER_MMC 0
215 struct sd_emmc_desc
{
216 struct cmd_cfg cmd_cfg
;
221 #define CMD_DATA_MASK (~0x3)
222 #define CMD_DATA_BIG_ENDIAN BIT(1)
223 #define CMD_DATA_SRAM BIT(0)
224 #define CMD_RESP_MASK (~0x1)
225 #define CMD_RESP_SRAM BIT(0)
227 static int meson_mmc_clk_set(struct meson_host
*host
, unsigned long clk_rate
)
229 struct mmc_host
*mmc
= host
->mmc
;
234 if (WARN_ON(clk_rate
> mmc
->f_max
))
235 clk_rate
= mmc
->f_max
;
236 else if (WARN_ON(clk_rate
< mmc
->f_min
))
237 clk_rate
= mmc
->f_min
;
240 if (clk_rate
== host
->clk_rate
)
244 cfg
= reg_read(host
, SD_EMMC_CFG
);
245 if (!(cfg
& CFG_STOP_CLOCK
)) {
246 cfg
|= CFG_STOP_CLOCK
;
247 reg_write(host
, SD_EMMC_CFG
, cfg
);
250 dev_dbg(host
->dev
, "change clock rate %lu -> %lu\n",
251 host
->clk_rate
, clk_rate
);
252 ret
= clk_set_rate(host
->cfg_div_clk
, clk_rate
);
253 if (clk_rate
&& clk_rate
!= clk_get_rate(host
->cfg_div_clk
))
254 dev_warn(host
->dev
, "divider requested rate %lu != actual rate %lu: ret=%d\n",
255 clk_rate
, clk_get_rate(host
->cfg_div_clk
), ret
);
257 host
->clk_rate
= clk_rate
;
259 /* (re)start clock, if non-zero */
261 cfg
= reg_read(host
, SD_EMMC_CFG
);
262 cfg
&= ~CFG_STOP_CLOCK
;
263 reg_write(host
, SD_EMMC_CFG
, cfg
);
269 static int meson_mmc_clk_init(struct meson_host
*host
)
271 struct clk_init_data init
;
274 const char *mux_parent_names
[MUX_CLK_NUM_PARENTS
];
275 unsigned int mux_parent_count
= 0;
276 const char *clk_div_parents
[1];
277 unsigned int f_min
= UINT_MAX
;
280 /* get the mux parents from DT */
281 for (i
= 0; i
< MUX_CLK_NUM_PARENTS
; i
++) {
284 snprintf(name
, sizeof(name
), "clkin%d", i
);
285 host
->mux_parent
[i
] = devm_clk_get(host
->dev
, name
);
286 if (IS_ERR(host
->mux_parent
[i
])) {
287 ret
= PTR_ERR(host
->mux_parent
[i
]);
288 if (PTR_ERR(host
->mux_parent
[i
]) != -EPROBE_DEFER
)
289 dev_err(host
->dev
, "Missing clock %s\n", name
);
290 host
->mux_parent
[i
] = NULL
;
294 host
->mux_parent_rate
[i
] = clk_get_rate(host
->mux_parent
[i
]);
295 mux_parent_names
[i
] = __clk_get_name(host
->mux_parent
[i
]);
297 if (host
->mux_parent_rate
[i
] < f_min
)
298 f_min
= host
->mux_parent_rate
[i
];
301 /* cacluate f_min based on input clocks, and max divider value */
302 if (f_min
!= UINT_MAX
)
303 f_min
= DIV_ROUND_UP(CLK_SRC_XTAL_RATE
, CLK_DIV_MAX
);
305 f_min
= 4000000; /* default min: 400 MHz */
306 host
->mmc
->f_min
= f_min
;
309 snprintf(clk_name
, sizeof(clk_name
), "%s#mux", dev_name(host
->dev
));
310 init
.name
= clk_name
;
311 init
.ops
= &clk_mux_ops
;
312 init
.flags
= CLK_IS_BASIC
;
313 init
.parent_names
= mux_parent_names
;
314 init
.num_parents
= mux_parent_count
;
316 host
->mux
.reg
= host
->regs
+ SD_EMMC_CLOCK
;
317 host
->mux
.shift
= CLK_SRC_SHIFT
;
318 host
->mux
.mask
= CLK_SRC_MASK
;
320 host
->mux
.table
= NULL
;
321 host
->mux
.hw
.init
= &init
;
323 host
->mux_clk
= devm_clk_register(host
->dev
, &host
->mux
.hw
);
324 if (WARN_ON(PTR_ERR_OR_ZERO(host
->mux_clk
)))
325 return PTR_ERR(host
->mux_clk
);
327 /* create the divider */
328 snprintf(clk_name
, sizeof(clk_name
), "%s#div", dev_name(host
->dev
));
329 init
.name
= devm_kstrdup(host
->dev
, clk_name
, GFP_KERNEL
);
330 init
.ops
= &clk_divider_ops
;
331 init
.flags
= CLK_IS_BASIC
| CLK_SET_RATE_PARENT
;
332 clk_div_parents
[0] = __clk_get_name(host
->mux_clk
);
333 init
.parent_names
= clk_div_parents
;
334 init
.num_parents
= ARRAY_SIZE(clk_div_parents
);
336 host
->cfg_div
.reg
= host
->regs
+ SD_EMMC_CLOCK
;
337 host
->cfg_div
.shift
= CLK_DIV_SHIFT
;
338 host
->cfg_div
.width
= CLK_DIV_WIDTH
;
339 host
->cfg_div
.hw
.init
= &init
;
340 host
->cfg_div
.flags
= CLK_DIVIDER_ONE_BASED
|
341 CLK_DIVIDER_ROUND_CLOSEST
| CLK_DIVIDER_ALLOW_ZERO
;
343 host
->cfg_div_clk
= devm_clk_register(host
->dev
, &host
->cfg_div
.hw
);
344 if (WARN_ON(PTR_ERR_OR_ZERO(host
->cfg_div_clk
)))
345 return PTR_ERR(host
->cfg_div_clk
);
347 /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
349 clk_reg
|= CLK_PHASE_180
<< CLK_PHASE_SHIFT
;
350 clk_reg
|= CLK_SRC_XTAL
<< CLK_SRC_SHIFT
;
351 clk_reg
|= CLK_DIV_MAX
<< CLK_DIV_SHIFT
;
352 clk_reg
&= ~CLK_ALWAYS_ON
;
353 reg_write(host
, SD_EMMC_CLOCK
, clk_reg
);
355 clk_prepare_enable(host
->cfg_div_clk
);
357 /* Ensure clock starts in "auto" mode, not "always on" */
358 cfg
= reg_read(host
, SD_EMMC_CFG
);
359 cfg
&= ~CFG_CLK_ALWAYS_ON
;
361 reg_write(host
, SD_EMMC_CFG
, cfg
);
363 meson_mmc_clk_set(host
, f_min
);
368 static void meson_mmc_set_ios(struct mmc_host
*mmc
, struct mmc_ios
*ios
)
370 struct meson_host
*host
= mmc_priv(mmc
);
375 * GPIO regulator, only controls switching between 1v8 and
376 * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
378 switch (ios
->power_mode
) {
380 if (!IS_ERR(mmc
->supply
.vmmc
))
381 mmc_regulator_set_ocr(mmc
, mmc
->supply
.vmmc
, ios
->vdd
);
384 meson_mmc_clk_set(host
, ios
->clock
);
387 val
= reg_read(host
, SD_EMMC_CFG
);
388 switch (ios
->bus_width
) {
389 case MMC_BUS_WIDTH_1
:
390 bus_width
= CFG_BUS_WIDTH_1
;
392 case MMC_BUS_WIDTH_4
:
393 bus_width
= CFG_BUS_WIDTH_4
;
395 case MMC_BUS_WIDTH_8
:
396 bus_width
= CFG_BUS_WIDTH_8
;
399 dev_err(host
->dev
, "Invalid ios->bus_width: %u. Setting to 4.\n",
401 bus_width
= CFG_BUS_WIDTH_4
;
405 val
= reg_read(host
, SD_EMMC_CFG
);
408 val
&= ~(CFG_BUS_WIDTH_MASK
<< CFG_BUS_WIDTH_SHIFT
);
409 val
|= bus_width
<< CFG_BUS_WIDTH_SHIFT
;
411 val
&= ~(CFG_BLK_LEN_MASK
<< CFG_BLK_LEN_SHIFT
);
412 val
|= ilog2(SD_EMMC_CFG_BLK_SIZE
) << CFG_BLK_LEN_SHIFT
;
414 val
&= ~(CFG_RESP_TIMEOUT_MASK
<< CFG_RESP_TIMEOUT_SHIFT
);
415 val
|= ilog2(SD_EMMC_CFG_RESP_TIMEOUT
) << CFG_RESP_TIMEOUT_SHIFT
;
417 val
&= ~(CFG_RC_CC_MASK
<< CFG_RC_CC_SHIFT
);
418 val
|= ilog2(SD_EMMC_CFG_CMD_GAP
) << CFG_RC_CC_SHIFT
;
420 reg_write(host
, SD_EMMC_CFG
, val
);
423 dev_dbg(host
->dev
, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n",
424 __func__
, orig
, val
);
427 static int meson_mmc_wait_busy(struct mmc_host
*mmc
, unsigned int timeout
)
429 struct meson_host
*host
= mmc_priv(mmc
);
433 for (i
= timeout
; i
> 0; i
--) {
434 status
= reg_read(host
, SD_EMMC_STATUS
);
435 if (!(status
& STATUS_BUSY
))
439 return (timeout
== 0);
442 static int meson_mmc_request_done(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
444 struct meson_host
*host
= mmc_priv(mmc
);
445 struct mmc_command
*cmd
= host
->cmd
;
446 unsigned int loops
= 0xfffff;
448 WARN_ON(host
->mrq
!= mrq
);
449 if (cmd
&& !cmd
->error
)
450 if (meson_mmc_wait_busy(mmc
, loops
))
451 dev_warn(host
->dev
, "%s: timeout busy.\n", __func__
);
455 mmc_request_done(host
->mmc
, mrq
);
460 static int meson_mmc_cmd_invalid(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
462 cmd
->error
= -EINVAL
;
463 meson_mmc_request_done(mmc
, cmd
->mrq
);
468 static int meson_mmc_check_cmd(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
472 /* FIXME: needs update for SDIO support */
473 if (cmd
->opcode
== SD_IO_SEND_OP_COND
474 || cmd
->opcode
== SD_IO_RW_DIRECT
475 || cmd
->opcode
== SD_IO_RW_EXTENDED
) {
476 ret
= meson_mmc_cmd_invalid(mmc
, cmd
);
482 static void meson_mmc_start_cmd(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
484 struct meson_host
*host
= mmc_priv(mmc
);
485 struct sd_emmc_desc
*desc
, desc_tmp
;
488 unsigned int xfer_bytes
= 0;
490 /* Setup descriptors */
493 memset(desc
, 0, sizeof(struct sd_emmc_desc
));
495 desc
->cmd_cfg
.cmd_index
= cmd
->opcode
;
496 desc
->cmd_cfg
.owner
= CFG_OWNER_CPU
;
497 desc
->cmd_arg
= cmd
->arg
;
500 if (cmd
->flags
& MMC_RSP_PRESENT
) {
501 desc
->cmd_cfg
.no_resp
= 0;
502 if (cmd
->flags
& MMC_RSP_136
)
503 desc
->cmd_cfg
.resp_128
= 1;
504 desc
->cmd_cfg
.resp_num
= 1;
507 if (!(cmd
->flags
& MMC_RSP_CRC
))
508 desc
->cmd_cfg
.resp_nocrc
= 1;
510 if (cmd
->flags
& MMC_RSP_BUSY
)
511 desc
->cmd_cfg
.r1b
= 1;
513 desc
->cmd_cfg
.no_resp
= 1;
518 desc
->cmd_cfg
.data_io
= 1;
519 if (cmd
->data
->blocks
> 1) {
520 desc
->cmd_cfg
.block_mode
= 1;
521 desc
->cmd_cfg
.length
= cmd
->data
->blocks
;
523 /* check if block-size matches, if not update */
524 cfg
= reg_read(host
, SD_EMMC_CFG
);
525 blk_len
= cfg
& (CFG_BLK_LEN_MASK
<< CFG_BLK_LEN_SHIFT
);
526 blk_len
>>= CFG_BLK_LEN_SHIFT
;
527 if (blk_len
!= ilog2(cmd
->data
->blksz
)) {
528 dev_warn(host
->dev
, "%s: update blk_len %d -> %d\n",
530 ilog2(cmd
->data
->blksz
));
531 blk_len
= ilog2(cmd
->data
->blksz
);
532 cfg
&= ~(CFG_BLK_LEN_MASK
<< CFG_BLK_LEN_SHIFT
);
533 cfg
|= blk_len
<< CFG_BLK_LEN_SHIFT
;
534 reg_write(host
, SD_EMMC_CFG
, cfg
);
537 desc
->cmd_cfg
.block_mode
= 0;
538 desc
->cmd_cfg
.length
= cmd
->data
->blksz
;
541 cmd
->data
->bytes_xfered
= 0;
542 xfer_bytes
= cmd
->data
->blksz
* cmd
->data
->blocks
;
543 if (cmd
->data
->flags
& MMC_DATA_WRITE
) {
544 desc
->cmd_cfg
.data_wr
= 1;
545 WARN_ON(xfer_bytes
> host
->bounce_buf_size
);
546 sg_copy_to_buffer(cmd
->data
->sg
, cmd
->data
->sg_len
,
547 host
->bounce_buf
, xfer_bytes
);
548 cmd
->data
->bytes_xfered
= xfer_bytes
;
551 desc
->cmd_cfg
.data_wr
= 0;
554 if (xfer_bytes
> 0) {
555 desc
->cmd_cfg
.data_num
= 0;
556 desc
->cmd_data
= host
->bounce_dma_addr
& CMD_DATA_MASK
;
558 /* write data to data_addr */
559 desc
->cmd_cfg
.data_num
= 1;
563 desc
->cmd_cfg
.timeout
= 12; /* 2^x msecs */
565 desc
->cmd_cfg
.data_io
= 0;
566 desc
->cmd_cfg
.timeout
= 10; /* 2^x msecs */
571 /* Last descriptor */
572 desc
->cmd_cfg
.end_of_chain
= 1;
573 reg_write(host
, SD_EMMC_CMD_CFG
, desc
->cmd_cfg
.val
);
574 reg_write(host
, SD_EMMC_CMD_DAT
, desc
->cmd_data
);
575 reg_write(host
, SD_EMMC_CMD_RSP
, desc
->cmd_resp
);
576 wmb(); /* ensure descriptor is written before kicked */
577 reg_write(host
, SD_EMMC_CMD_ARG
, desc
->cmd_arg
);
580 static void meson_mmc_request(struct mmc_host
*mmc
, struct mmc_request
*mrq
)
582 struct meson_host
*host
= mmc_priv(mmc
);
584 WARN_ON(host
->mrq
!= NULL
);
587 reg_write(host
, SD_EMMC_START
, 0);
589 /* clear, ack, enable all interrupts */
590 reg_write(host
, SD_EMMC_IRQ_EN
, 0);
591 reg_write(host
, SD_EMMC_STATUS
, IRQ_EN_MASK
);
592 reg_write(host
, SD_EMMC_IRQ_EN
, IRQ_EN_MASK
);
596 if (meson_mmc_check_cmd(mmc
, mrq
->cmd
))
600 meson_mmc_start_cmd(mmc
, mrq
->sbc
);
602 meson_mmc_start_cmd(mmc
, mrq
->cmd
);
605 static int meson_mmc_read_resp(struct mmc_host
*mmc
, struct mmc_command
*cmd
)
607 struct meson_host
*host
= mmc_priv(mmc
);
610 dev_dbg(host
->dev
, "%s: NULL command.\n", __func__
);
614 if (cmd
->flags
& MMC_RSP_136
) {
615 cmd
->resp
[0] = reg_read(host
, SD_EMMC_CMD_RSP3
);
616 cmd
->resp
[1] = reg_read(host
, SD_EMMC_CMD_RSP2
);
617 cmd
->resp
[2] = reg_read(host
, SD_EMMC_CMD_RSP1
);
618 cmd
->resp
[3] = reg_read(host
, SD_EMMC_CMD_RSP
);
619 } else if (cmd
->flags
& MMC_RSP_PRESENT
) {
620 cmd
->resp
[0] = reg_read(host
, SD_EMMC_CMD_RSP
);
626 static irqreturn_t
meson_mmc_irq(int irq
, void *dev_id
)
628 struct meson_host
*host
= dev_id
;
629 struct mmc_request
*mrq
;
630 struct mmc_command
*cmd
= host
->cmd
;
631 u32 irq_en
, status
, raw_status
;
632 irqreturn_t ret
= IRQ_HANDLED
;
645 spin_lock(&host
->lock
);
646 irq_en
= reg_read(host
, SD_EMMC_IRQ_EN
);
647 raw_status
= reg_read(host
, SD_EMMC_STATUS
);
648 status
= raw_status
& irq_en
;
651 dev_warn(host
->dev
, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n",
658 if (status
& IRQ_RXD_ERR_MASK
) {
659 dev_dbg(host
->dev
, "Unhandled IRQ: RXD error\n");
660 cmd
->error
= -EILSEQ
;
662 if (status
& IRQ_TXD_ERR
) {
663 dev_dbg(host
->dev
, "Unhandled IRQ: TXD error\n");
664 cmd
->error
= -EILSEQ
;
666 if (status
& IRQ_DESC_ERR
)
667 dev_dbg(host
->dev
, "Unhandled IRQ: Descriptor error\n");
668 if (status
& IRQ_RESP_ERR
) {
669 dev_dbg(host
->dev
, "Unhandled IRQ: Response error\n");
670 cmd
->error
= -EILSEQ
;
672 if (status
& IRQ_RESP_TIMEOUT
) {
673 dev_dbg(host
->dev
, "Unhandled IRQ: Response timeout\n");
674 cmd
->error
= -ETIMEDOUT
;
676 if (status
& IRQ_DESC_TIMEOUT
) {
677 dev_dbg(host
->dev
, "Unhandled IRQ: Descriptor timeout\n");
678 cmd
->error
= -ETIMEDOUT
;
680 if (status
& IRQ_SDIO
)
681 dev_dbg(host
->dev
, "Unhandled IRQ: SDIO.\n");
683 if (status
& (IRQ_END_OF_CHAIN
| IRQ_RESP_STATUS
))
684 ret
= IRQ_WAKE_THREAD
;
686 dev_warn(host
->dev
, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n",
687 status
, cmd
->opcode
, cmd
->arg
,
688 cmd
->flags
, mrq
->stop
? 1 : 0);
690 struct mmc_data
*data
= cmd
->data
;
692 dev_warn(host
->dev
, "\tblksz %u blocks %u flags 0x%08x (%s%s)",
693 data
->blksz
, data
->blocks
, data
->flags
,
694 data
->flags
& MMC_DATA_WRITE
? "write" : "",
695 data
->flags
& MMC_DATA_READ
? "read" : "");
700 /* ack all (enabled) interrupts */
701 reg_write(host
, SD_EMMC_STATUS
, status
);
703 if (ret
== IRQ_HANDLED
) {
704 meson_mmc_read_resp(host
->mmc
, cmd
);
705 meson_mmc_request_done(host
->mmc
, cmd
->mrq
);
708 spin_unlock(&host
->lock
);
712 static irqreturn_t
meson_mmc_irq_thread(int irq
, void *dev_id
)
714 struct meson_host
*host
= dev_id
;
715 struct mmc_request
*mrq
= host
->mrq
;
716 struct mmc_command
*cmd
= host
->cmd
;
717 struct mmc_data
*data
;
719 unsigned int xfer_bytes
;
720 int ret
= IRQ_HANDLED
;
730 xfer_bytes
= data
->blksz
* data
->blocks
;
731 if (data
->flags
& MMC_DATA_READ
) {
732 WARN_ON(xfer_bytes
> host
->bounce_buf_size
);
733 sg_copy_from_buffer(data
->sg
, data
->sg_len
,
734 host
->bounce_buf
, xfer_bytes
);
735 data
->bytes_xfered
= xfer_bytes
;
739 meson_mmc_read_resp(host
->mmc
, cmd
);
740 if (!data
|| !data
->stop
|| mrq
->sbc
)
741 meson_mmc_request_done(host
->mmc
, mrq
);
743 meson_mmc_start_cmd(host
->mmc
, data
->stop
);
749 * NOTE: we only need this until the GPIO/pinctrl driver can handle
750 * interrupts. For now, the MMC core will use this for polling.
752 static int meson_mmc_get_cd(struct mmc_host
*mmc
)
754 int status
= mmc_gpio_get_cd(mmc
);
756 if (status
== -ENOSYS
)
757 return 1; /* assume present */
762 static struct mmc_host_ops meson_mmc_ops
= {
763 .request
= meson_mmc_request
,
764 .set_ios
= meson_mmc_set_ios
,
765 .get_cd
= meson_mmc_get_cd
,
768 static int meson_mmc_probe(struct platform_device
*pdev
)
770 struct device_node
*np
= pdev
->dev
.of_node
;
771 struct resource
*res
;
772 struct meson_host
*host
;
773 struct mmc_host
*mmc
;
776 mmc
= mmc_alloc_host(sizeof(struct meson_host
), &pdev
->dev
);
779 host
= mmc_priv(mmc
);
781 host
->dev
= &pdev
->dev
;
782 dev_set_drvdata(&pdev
->dev
, host
);
784 spin_lock_init(&host
->lock
);
786 host
->core_clk
= devm_clk_get(&pdev
->dev
, "core");
787 if (IS_ERR(host
->core_clk
)) {
788 ret
= PTR_ERR(host
->core_clk
);
789 if (ret
== -EPROBE_DEFER
)
791 "Missing core clock. EPROBE_DEFER\n");
794 "Unable to get core clk (ret=%d).\n", ret
);
799 mmc_of_parse_voltage(np
, &host
->ocr_mask
);
800 ret
= mmc_regulator_get_supply(mmc
);
801 if (ret
== -EPROBE_DEFER
) {
802 dev_dbg(&pdev
->dev
, "Missing regulator: EPROBE_DEFER");
807 mmc
->ocr_avail
= host
->ocr_mask
;
809 ret
= mmc_of_parse(mmc
);
811 dev_warn(&pdev
->dev
, "error parsing DT: %d\n", ret
);
815 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
816 host
->regs
= devm_ioremap_resource(&pdev
->dev
, res
);
817 if (IS_ERR(host
->regs
)) {
818 ret
= PTR_ERR(host
->regs
);
822 host
->irq
= platform_get_irq(pdev
, 0);
823 if (host
->irq
== 0) {
824 dev_err(&pdev
->dev
, "failed to get interrupt resource.\n");
829 ret
= devm_request_threaded_irq(&pdev
->dev
, host
->irq
,
830 meson_mmc_irq
, meson_mmc_irq_thread
,
831 IRQF_SHARED
, DRIVER_NAME
, host
);
835 /* data bounce buffer */
836 host
->bounce_buf_size
= SZ_512K
;
838 dma_alloc_coherent(host
->dev
, host
->bounce_buf_size
,
839 &host
->bounce_dma_addr
, GFP_KERNEL
);
840 if (host
->bounce_buf
== NULL
) {
841 dev_err(host
->dev
, "Unable to map allocate DMA bounce buffer.\n");
846 clk_prepare_enable(host
->core_clk
);
848 ret
= meson_mmc_clk_init(host
);
853 reg_write(host
, SD_EMMC_START
, 0);
855 /* clear, ack, enable all interrupts */
856 reg_write(host
, SD_EMMC_IRQ_EN
, 0);
857 reg_write(host
, SD_EMMC_STATUS
, IRQ_EN_MASK
);
859 mmc
->ops
= &meson_mmc_ops
;
865 dev_dbg(host
->dev
, "Failed to probe: ret=%d\n", ret
);
867 clk_disable_unprepare(host
->core_clk
);
872 static int meson_mmc_remove(struct platform_device
*pdev
)
874 struct meson_host
*host
= dev_get_drvdata(&pdev
->dev
);
879 if (host
->bounce_buf
)
880 dma_free_coherent(host
->dev
, host
->bounce_buf_size
,
881 host
->bounce_buf
, host
->bounce_dma_addr
);
883 if (host
->cfg_div_clk
)
884 clk_disable_unprepare(host
->cfg_div_clk
);
887 clk_disable_unprepare(host
->core_clk
);
889 mmc_free_host(host
->mmc
);
893 static const struct of_device_id meson_mmc_of_match
[] = {
895 .compatible
= "amlogic,meson-gxbb-mmc",
899 MODULE_DEVICE_TABLE(of
, meson_mmc_of_match
);
901 static struct platform_driver meson_mmc_driver
= {
902 .probe
= meson_mmc_probe
,
903 .remove
= meson_mmc_remove
,
906 .of_match_table
= of_match_ptr(meson_mmc_of_match
),
910 module_platform_driver(meson_mmc_driver
);
912 MODULE_ALIAS("platform:" DRIVER_NAME
);
913 MODULE_DESCRIPTION("Amlogic S905/GXBB SD/eMMC driver");
914 MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
915 MODULE_LICENSE("Dual BSD/GPL");