Merge remote-tracking branch 'regmap/for-next'
[deliverable/linux.git] / drivers / mmc / host / meson-gxbb.c
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright (c) 2016 BayLibre, SAS.
8 * Author: Kevin Hilman <khilman@baylibre.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
23 *
24 * BSD LICENSE
25 *
26 * Copyright (c) 2016 BayLibre, SAS.
27 * Author: Kevin Hilman <khilman@baylibre.com>
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55 #include <linux/kernel.h>
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/init.h>
59 #include <linux/device.h>
60 #include <linux/of_device.h>
61 #include <linux/platform_device.h>
62 #include <linux/ioport.h>
63 #include <linux/regmap.h>
64 #include <linux/spinlock.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/mmc/host.h>
67 #include <linux/mmc/mmc.h>
68 #include <linux/mmc/sdio.h>
69 #include <linux/mmc/slot-gpio.h>
70 #include <linux/io.h>
71 #include <linux/clk.h>
72 #include <linux/clk-provider.h>
73
74 #define DRIVER_NAME "meson-gxbb-mmc"
75
76 #define SD_EMMC_CLOCK 0x0
77 #define CLK_DIV_SHIFT 0
78 #define CLK_DIV_WIDTH 6
79 #define CLK_DIV_MASK 0x3f
80 #define CLK_DIV_MAX 63
81 #define CLK_SRC_SHIFT 6
82 #define CLK_SRC_WIDTH 2
83 #define CLK_SRC_MASK 0x3
84 #define CLK_SRC_XTAL 0 /* external crystal */
85 #define CLK_SRC_XTAL_RATE 24000000
86 #define CLK_SRC_PLL 1 /* FCLK_DIV2 */
87 #define CLK_SRC_PLL_RATE 1000000000
88 #define CLK_PHASE_SHIFT 8
89 #define CLK_PHASE_MASK 0x3
90 #define CLK_PHASE_0 0
91 #define CLK_PHASE_90 1
92 #define CLK_PHASE_180 2
93 #define CLK_PHASE_270 3
94 #define CLK_ALWAYS_ON BIT(24)
95
96 #define SD_EMMC_DElAY 0x4
97 #define SD_EMMC_ADJUST 0x8
98 #define SD_EMMC_CALOUT 0x10
99 #define SD_EMMC_START 0x40
100 #define START_DESC_INIT BIT(0)
101 #define START_DESC_BUSY BIT(1)
102 #define START_DESC_ADDR_SHIFT 2
103 #define START_DESC_ADDR_MASK (~0x3)
104
105 #define SD_EMMC_CFG 0x44
106 #define CFG_BUS_WIDTH_SHIFT 0
107 #define CFG_BUS_WIDTH_MASK 0x3
108 #define CFG_BUS_WIDTH_1 0x0
109 #define CFG_BUS_WIDTH_4 0x1
110 #define CFG_BUS_WIDTH_8 0x2
111 #define CFG_DDR BIT(2)
112 #define CFG_BLK_LEN_SHIFT 4
113 #define CFG_BLK_LEN_MASK 0xf
114 #define CFG_RESP_TIMEOUT_SHIFT 8
115 #define CFG_RESP_TIMEOUT_MASK 0xf
116 #define CFG_RC_CC_SHIFT 12
117 #define CFG_RC_CC_MASK 0xf
118 #define CFG_STOP_CLOCK BIT(22)
119 #define CFG_CLK_ALWAYS_ON BIT(18)
120 #define CFG_AUTO_CLK BIT(23)
121
122 #define SD_EMMC_STATUS 0x48
123 #define STATUS_BUSY BIT(31)
124
125 #define SD_EMMC_IRQ_EN 0x4c
126 #define IRQ_EN_MASK 0x3fff
127 #define IRQ_RXD_ERR_SHIFT 0
128 #define IRQ_RXD_ERR_MASK 0xff
129 #define IRQ_TXD_ERR BIT(8)
130 #define IRQ_DESC_ERR BIT(9)
131 #define IRQ_RESP_ERR BIT(10)
132 #define IRQ_RESP_TIMEOUT BIT(11)
133 #define IRQ_DESC_TIMEOUT BIT(12)
134 #define IRQ_END_OF_CHAIN BIT(13)
135 #define IRQ_RESP_STATUS BIT(14)
136 #define IRQ_SDIO BIT(15)
137
138 #define SD_EMMC_CMD_CFG 0x50
139 #define SD_EMMC_CMD_ARG 0x54
140 #define SD_EMMC_CMD_DAT 0x58
141 #define SD_EMMC_CMD_RSP 0x5c
142 #define SD_EMMC_CMD_RSP1 0x60
143 #define SD_EMMC_CMD_RSP2 0x64
144 #define SD_EMMC_CMD_RSP3 0x68
145
146 #define SD_EMMC_RXD 0x94
147 #define SD_EMMC_TXD 0x94
148 #define SD_EMMC_LAST_REG SD_EMMC_TXD
149
150 #define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
151 #define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
152 #define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
153 #define MUX_CLK_NUM_PARENTS 2
154
155 struct meson_host {
156 struct device *dev;
157 struct mmc_host *mmc;
158 struct mmc_request *mrq;
159 struct mmc_command *cmd;
160
161 spinlock_t lock;
162 void __iomem *regs;
163 #ifdef USE_REGMAP
164 struct regmap *regmap;
165 #endif
166 int irq;
167 u32 ocr_mask;
168 struct clk *core_clk;
169 struct clk_mux mux;
170 struct clk *mux_clk;
171 struct clk *mux_parent[MUX_CLK_NUM_PARENTS];
172 unsigned long mux_parent_rate[MUX_CLK_NUM_PARENTS];
173
174 struct clk_divider cfg_div;
175 struct clk *cfg_div_clk;
176
177 unsigned int bounce_buf_size;
178 void *bounce_buf;
179 dma_addr_t bounce_dma_addr;
180
181 unsigned long clk_rate;
182 unsigned long clk_src_rate;
183 unsigned short clk_src_div;
184 };
185
186 #define reg_read(host, offset) readl(host->regs + offset)
187 #define reg_write(host, offset, val) writel(val, host->regs + offset)
188
189 struct cmd_cfg {
190 union {
191 struct {
192 u32 length:9;
193 u32 block_mode:1;
194 u32 r1b:1;
195 u32 end_of_chain:1;
196 u32 timeout:4; /* 2^timeout msec */
197 u32 no_resp:1;
198 u32 no_cmd:1;
199 u32 data_io:1;
200 u32 data_wr:1;
201 u32 resp_nocrc:1;
202 u32 resp_128:1;
203 u32 resp_num:1;
204 u32 data_num:1;
205 u32 cmd_index:6;
206 u32 error:1;
207 u32 owner:1;
208 #define CFG_OWNER_CPU 1
209 #define CFG_OWNER_MMC 0
210 };
211 u32 val;
212 };
213 };
214
215 struct sd_emmc_desc {
216 struct cmd_cfg cmd_cfg;
217 u32 cmd_arg;
218 u32 cmd_data;
219 u32 cmd_resp;
220 };
221 #define CMD_DATA_MASK (~0x3)
222 #define CMD_DATA_BIG_ENDIAN BIT(1)
223 #define CMD_DATA_SRAM BIT(0)
224 #define CMD_RESP_MASK (~0x1)
225 #define CMD_RESP_SRAM BIT(0)
226
227 static int meson_mmc_clk_set(struct meson_host *host, unsigned long clk_rate)
228 {
229 struct mmc_host *mmc = host->mmc;
230 int ret = 0;
231 u32 cfg;
232
233 if (clk_rate) {
234 if (WARN_ON(clk_rate > mmc->f_max))
235 clk_rate = mmc->f_max;
236 else if (WARN_ON(clk_rate < mmc->f_min))
237 clk_rate = mmc->f_min;
238 }
239
240 if (clk_rate == host->clk_rate)
241 return 0;
242
243 /* stop clock */
244 cfg = reg_read(host, SD_EMMC_CFG);
245 if (!(cfg & CFG_STOP_CLOCK)) {
246 cfg |= CFG_STOP_CLOCK;
247 reg_write(host, SD_EMMC_CFG, cfg);
248 }
249
250 dev_dbg(host->dev, "change clock rate %lu -> %lu\n",
251 host->clk_rate, clk_rate);
252 ret = clk_set_rate(host->cfg_div_clk, clk_rate);
253 if (clk_rate && clk_rate != clk_get_rate(host->cfg_div_clk))
254 dev_warn(host->dev, "divider requested rate %lu != actual rate %lu: ret=%d\n",
255 clk_rate, clk_get_rate(host->cfg_div_clk), ret);
256 else
257 host->clk_rate = clk_rate;
258
259 /* (re)start clock, if non-zero */
260 if (clk_rate) {
261 cfg = reg_read(host, SD_EMMC_CFG);
262 cfg &= ~CFG_STOP_CLOCK;
263 reg_write(host, SD_EMMC_CFG, cfg);
264 }
265
266 return ret;
267 }
268
269 static int meson_mmc_clk_init(struct meson_host *host)
270 {
271 struct clk_init_data init;
272 char clk_name[32];
273 int i, ret = 0;
274 const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
275 unsigned int mux_parent_count = 0;
276 const char *clk_div_parents[1];
277 unsigned int f_min = UINT_MAX;
278 u32 clk_reg, cfg;
279
280 /* get the mux parents from DT */
281 for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
282 char name[16];
283
284 snprintf(name, sizeof(name), "clkin%d", i);
285 host->mux_parent[i] = devm_clk_get(host->dev, name);
286 if (IS_ERR(host->mux_parent[i])) {
287 ret = PTR_ERR(host->mux_parent[i]);
288 if (PTR_ERR(host->mux_parent[i]) != -EPROBE_DEFER)
289 dev_err(host->dev, "Missing clock %s\n", name);
290 host->mux_parent[i] = NULL;
291 return ret;
292 }
293
294 host->mux_parent_rate[i] = clk_get_rate(host->mux_parent[i]);
295 mux_parent_names[i] = __clk_get_name(host->mux_parent[i]);
296 mux_parent_count++;
297 if (host->mux_parent_rate[i] < f_min)
298 f_min = host->mux_parent_rate[i];
299 }
300
301 /* cacluate f_min based on input clocks, and max divider value */
302 if (f_min != UINT_MAX)
303 f_min = DIV_ROUND_UP(CLK_SRC_XTAL_RATE, CLK_DIV_MAX);
304 else
305 f_min = 4000000; /* default min: 400 MHz */
306 host->mmc->f_min = f_min;
307
308 /* create the mux */
309 snprintf(clk_name, sizeof(clk_name), "%s#mux", dev_name(host->dev));
310 init.name = clk_name;
311 init.ops = &clk_mux_ops;
312 init.flags = CLK_IS_BASIC;
313 init.parent_names = mux_parent_names;
314 init.num_parents = mux_parent_count;
315
316 host->mux.reg = host->regs + SD_EMMC_CLOCK;
317 host->mux.shift = CLK_SRC_SHIFT;
318 host->mux.mask = CLK_SRC_MASK;
319 host->mux.flags = 0;
320 host->mux.table = NULL;
321 host->mux.hw.init = &init;
322
323 host->mux_clk = devm_clk_register(host->dev, &host->mux.hw);
324 if (WARN_ON(PTR_ERR_OR_ZERO(host->mux_clk)))
325 return PTR_ERR(host->mux_clk);
326
327 /* create the divider */
328 snprintf(clk_name, sizeof(clk_name), "%s#div", dev_name(host->dev));
329 init.name = devm_kstrdup(host->dev, clk_name, GFP_KERNEL);
330 init.ops = &clk_divider_ops;
331 init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT;
332 clk_div_parents[0] = __clk_get_name(host->mux_clk);
333 init.parent_names = clk_div_parents;
334 init.num_parents = ARRAY_SIZE(clk_div_parents);
335
336 host->cfg_div.reg = host->regs + SD_EMMC_CLOCK;
337 host->cfg_div.shift = CLK_DIV_SHIFT;
338 host->cfg_div.width = CLK_DIV_WIDTH;
339 host->cfg_div.hw.init = &init;
340 host->cfg_div.flags = CLK_DIVIDER_ONE_BASED |
341 CLK_DIVIDER_ROUND_CLOSEST | CLK_DIVIDER_ALLOW_ZERO;
342
343 host->cfg_div_clk = devm_clk_register(host->dev, &host->cfg_div.hw);
344 if (WARN_ON(PTR_ERR_OR_ZERO(host->cfg_div_clk)))
345 return PTR_ERR(host->cfg_div_clk);
346
347 /* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
348 clk_reg = 0;
349 clk_reg |= CLK_PHASE_180 << CLK_PHASE_SHIFT;
350 clk_reg |= CLK_SRC_XTAL << CLK_SRC_SHIFT;
351 clk_reg |= CLK_DIV_MAX << CLK_DIV_SHIFT;
352 clk_reg &= ~CLK_ALWAYS_ON;
353 reg_write(host, SD_EMMC_CLOCK, clk_reg);
354
355 clk_prepare_enable(host->cfg_div_clk);
356
357 /* Ensure clock starts in "auto" mode, not "always on" */
358 cfg = reg_read(host, SD_EMMC_CFG);
359 cfg &= ~CFG_CLK_ALWAYS_ON;
360 cfg |= CFG_AUTO_CLK;
361 reg_write(host, SD_EMMC_CFG, cfg);
362
363 meson_mmc_clk_set(host, f_min);
364
365 return ret;
366 }
367
368 static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
369 {
370 struct meson_host *host = mmc_priv(mmc);
371 u32 bus_width;
372 u32 val, orig;
373
374 /*
375 * GPIO regulator, only controls switching between 1v8 and
376 * 3v3, doesn't support MMC_POWER_OFF, MMC_POWER_ON.
377 */
378 switch (ios->power_mode) {
379 case MMC_POWER_UP:
380 if (!IS_ERR(mmc->supply.vmmc))
381 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
382 }
383
384 meson_mmc_clk_set(host, ios->clock);
385
386 /* Bus width */
387 val = reg_read(host, SD_EMMC_CFG);
388 switch (ios->bus_width) {
389 case MMC_BUS_WIDTH_1:
390 bus_width = CFG_BUS_WIDTH_1;
391 break;
392 case MMC_BUS_WIDTH_4:
393 bus_width = CFG_BUS_WIDTH_4;
394 break;
395 case MMC_BUS_WIDTH_8:
396 bus_width = CFG_BUS_WIDTH_8;
397 break;
398 default:
399 dev_err(host->dev, "Invalid ios->bus_width: %u. Setting to 4.\n",
400 ios->bus_width);
401 bus_width = CFG_BUS_WIDTH_4;
402 return;
403 }
404
405 val = reg_read(host, SD_EMMC_CFG);
406 orig = val;
407
408 val &= ~(CFG_BUS_WIDTH_MASK << CFG_BUS_WIDTH_SHIFT);
409 val |= bus_width << CFG_BUS_WIDTH_SHIFT;
410
411 val &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
412 val |= ilog2(SD_EMMC_CFG_BLK_SIZE) << CFG_BLK_LEN_SHIFT;
413
414 val &= ~(CFG_RESP_TIMEOUT_MASK << CFG_RESP_TIMEOUT_SHIFT);
415 val |= ilog2(SD_EMMC_CFG_RESP_TIMEOUT) << CFG_RESP_TIMEOUT_SHIFT;
416
417 val &= ~(CFG_RC_CC_MASK << CFG_RC_CC_SHIFT);
418 val |= ilog2(SD_EMMC_CFG_CMD_GAP) << CFG_RC_CC_SHIFT;
419
420 reg_write(host, SD_EMMC_CFG, val);
421
422 if (val != orig)
423 dev_dbg(host->dev, "%s: SD_EMMC_CFG: 0x%08x -> 0x%08x\n",
424 __func__, orig, val);
425 }
426
427 static int meson_mmc_wait_busy(struct mmc_host *mmc, unsigned int timeout)
428 {
429 struct meson_host *host = mmc_priv(mmc);
430 u32 status;
431 int i;
432
433 for (i = timeout; i > 0; i--) {
434 status = reg_read(host, SD_EMMC_STATUS);
435 if (!(status & STATUS_BUSY))
436 break;
437 };
438
439 return (timeout == 0);
440 }
441
442 static int meson_mmc_request_done(struct mmc_host *mmc, struct mmc_request *mrq)
443 {
444 struct meson_host *host = mmc_priv(mmc);
445 struct mmc_command *cmd = host->cmd;
446 unsigned int loops = 0xfffff;
447
448 WARN_ON(host->mrq != mrq);
449 if (cmd && !cmd->error)
450 if (meson_mmc_wait_busy(mmc, loops))
451 dev_warn(host->dev, "%s: timeout busy.\n", __func__);
452
453 host->mrq = NULL;
454 host->cmd = NULL;
455 mmc_request_done(host->mmc, mrq);
456
457 return 0;
458 }
459
460 static int meson_mmc_cmd_invalid(struct mmc_host *mmc, struct mmc_command *cmd)
461 {
462 cmd->error = -EINVAL;
463 meson_mmc_request_done(mmc, cmd->mrq);
464
465 return -EINVAL;
466 }
467
468 static int meson_mmc_check_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
469 {
470 int ret = 0;
471
472 /* FIXME: needs update for SDIO support */
473 if (cmd->opcode == SD_IO_SEND_OP_COND
474 || cmd->opcode == SD_IO_RW_DIRECT
475 || cmd->opcode == SD_IO_RW_EXTENDED) {
476 ret = meson_mmc_cmd_invalid(mmc, cmd);
477 }
478
479 return ret;
480 }
481
482 static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
483 {
484 struct meson_host *host = mmc_priv(mmc);
485 struct sd_emmc_desc *desc, desc_tmp;
486 u32 cfg;
487 u8 blk_len;
488 unsigned int xfer_bytes = 0;
489
490 /* Setup descriptors */
491 dma_rmb();
492 desc = &desc_tmp;
493 memset(desc, 0, sizeof(struct sd_emmc_desc));
494
495 desc->cmd_cfg.cmd_index = cmd->opcode;
496 desc->cmd_cfg.owner = CFG_OWNER_CPU;
497 desc->cmd_arg = cmd->arg;
498
499 /* Response */
500 if (cmd->flags & MMC_RSP_PRESENT) {
501 desc->cmd_cfg.no_resp = 0;
502 if (cmd->flags & MMC_RSP_136)
503 desc->cmd_cfg.resp_128 = 1;
504 desc->cmd_cfg.resp_num = 1;
505 desc->cmd_resp = 0;
506
507 if (!(cmd->flags & MMC_RSP_CRC))
508 desc->cmd_cfg.resp_nocrc = 1;
509
510 if (cmd->flags & MMC_RSP_BUSY)
511 desc->cmd_cfg.r1b = 1;
512 } else {
513 desc->cmd_cfg.no_resp = 1;
514 }
515
516 /* data? */
517 if (cmd->data) {
518 desc->cmd_cfg.data_io = 1;
519 if (cmd->data->blocks > 1) {
520 desc->cmd_cfg.block_mode = 1;
521 desc->cmd_cfg.length = cmd->data->blocks;
522
523 /* check if block-size matches, if not update */
524 cfg = reg_read(host, SD_EMMC_CFG);
525 blk_len = cfg & (CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
526 blk_len >>= CFG_BLK_LEN_SHIFT;
527 if (blk_len != ilog2(cmd->data->blksz)) {
528 dev_warn(host->dev, "%s: update blk_len %d -> %d\n",
529 __func__, blk_len,
530 ilog2(cmd->data->blksz));
531 blk_len = ilog2(cmd->data->blksz);
532 cfg &= ~(CFG_BLK_LEN_MASK << CFG_BLK_LEN_SHIFT);
533 cfg |= blk_len << CFG_BLK_LEN_SHIFT;
534 reg_write(host, SD_EMMC_CFG, cfg);
535 }
536 } else {
537 desc->cmd_cfg.block_mode = 0;
538 desc->cmd_cfg.length = cmd->data->blksz;
539 }
540
541 cmd->data->bytes_xfered = 0;
542 xfer_bytes = cmd->data->blksz * cmd->data->blocks;
543 if (cmd->data->flags & MMC_DATA_WRITE) {
544 desc->cmd_cfg.data_wr = 1;
545 WARN_ON(xfer_bytes > host->bounce_buf_size);
546 sg_copy_to_buffer(cmd->data->sg, cmd->data->sg_len,
547 host->bounce_buf, xfer_bytes);
548 cmd->data->bytes_xfered = xfer_bytes;
549 dma_wmb();
550 } else {
551 desc->cmd_cfg.data_wr = 0;
552 }
553
554 if (xfer_bytes > 0) {
555 desc->cmd_cfg.data_num = 0;
556 desc->cmd_data = host->bounce_dma_addr & CMD_DATA_MASK;
557 } else {
558 /* write data to data_addr */
559 desc->cmd_cfg.data_num = 1;
560 desc->cmd_data = 0;
561 }
562
563 desc->cmd_cfg.timeout = 12; /* 2^x msecs */
564 } else {
565 desc->cmd_cfg.data_io = 0;
566 desc->cmd_cfg.timeout = 10; /* 2^x msecs */
567 }
568
569 host->cmd = cmd;
570
571 /* Last descriptor */
572 desc->cmd_cfg.end_of_chain = 1;
573 reg_write(host, SD_EMMC_CMD_CFG, desc->cmd_cfg.val);
574 reg_write(host, SD_EMMC_CMD_DAT, desc->cmd_data);
575 reg_write(host, SD_EMMC_CMD_RSP, desc->cmd_resp);
576 wmb(); /* ensure descriptor is written before kicked */
577 reg_write(host, SD_EMMC_CMD_ARG, desc->cmd_arg);
578 }
579
580 static void meson_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
581 {
582 struct meson_host *host = mmc_priv(mmc);
583
584 WARN_ON(host->mrq != NULL);
585
586 /* Stop execution */
587 reg_write(host, SD_EMMC_START, 0);
588
589 /* clear, ack, enable all interrupts */
590 reg_write(host, SD_EMMC_IRQ_EN, 0);
591 reg_write(host, SD_EMMC_STATUS, IRQ_EN_MASK);
592 reg_write(host, SD_EMMC_IRQ_EN, IRQ_EN_MASK);
593
594 host->mrq = mrq;
595
596 if (meson_mmc_check_cmd(mmc, mrq->cmd))
597 return;
598
599 if (mrq->sbc)
600 meson_mmc_start_cmd(mmc, mrq->sbc);
601 else
602 meson_mmc_start_cmd(mmc, mrq->cmd);
603 }
604
605 static int meson_mmc_read_resp(struct mmc_host *mmc, struct mmc_command *cmd)
606 {
607 struct meson_host *host = mmc_priv(mmc);
608
609 if (!cmd) {
610 dev_dbg(host->dev, "%s: NULL command.\n", __func__);
611 return -EINVAL;
612 }
613
614 if (cmd->flags & MMC_RSP_136) {
615 cmd->resp[0] = reg_read(host, SD_EMMC_CMD_RSP3);
616 cmd->resp[1] = reg_read(host, SD_EMMC_CMD_RSP2);
617 cmd->resp[2] = reg_read(host, SD_EMMC_CMD_RSP1);
618 cmd->resp[3] = reg_read(host, SD_EMMC_CMD_RSP);
619 } else if (cmd->flags & MMC_RSP_PRESENT) {
620 cmd->resp[0] = reg_read(host, SD_EMMC_CMD_RSP);
621 }
622
623 return 0;
624 }
625
626 static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
627 {
628 struct meson_host *host = dev_id;
629 struct mmc_request *mrq;
630 struct mmc_command *cmd = host->cmd;
631 u32 irq_en, status, raw_status;
632 irqreturn_t ret = IRQ_HANDLED;
633
634 if (WARN_ON(!host))
635 return IRQ_NONE;
636
637 mrq = host->mrq;
638
639 if (WARN_ON(!mrq))
640 return IRQ_NONE;
641
642 if (WARN_ON(!cmd))
643 return IRQ_NONE;
644
645 spin_lock(&host->lock);
646 irq_en = reg_read(host, SD_EMMC_IRQ_EN);
647 raw_status = reg_read(host, SD_EMMC_STATUS);
648 status = raw_status & irq_en;
649
650 if (!status) {
651 dev_warn(host->dev, "Spurious IRQ! status=0x%08x, irq_en=0x%08x\n",
652 raw_status, irq_en);
653 ret = IRQ_NONE;
654 goto out;
655 }
656
657 cmd->error = 0;
658 if (status & IRQ_RXD_ERR_MASK) {
659 dev_dbg(host->dev, "Unhandled IRQ: RXD error\n");
660 cmd->error = -EILSEQ;
661 }
662 if (status & IRQ_TXD_ERR) {
663 dev_dbg(host->dev, "Unhandled IRQ: TXD error\n");
664 cmd->error = -EILSEQ;
665 }
666 if (status & IRQ_DESC_ERR)
667 dev_dbg(host->dev, "Unhandled IRQ: Descriptor error\n");
668 if (status & IRQ_RESP_ERR) {
669 dev_dbg(host->dev, "Unhandled IRQ: Response error\n");
670 cmd->error = -EILSEQ;
671 }
672 if (status & IRQ_RESP_TIMEOUT) {
673 dev_dbg(host->dev, "Unhandled IRQ: Response timeout\n");
674 cmd->error = -ETIMEDOUT;
675 }
676 if (status & IRQ_DESC_TIMEOUT) {
677 dev_dbg(host->dev, "Unhandled IRQ: Descriptor timeout\n");
678 cmd->error = -ETIMEDOUT;
679 }
680 if (status & IRQ_SDIO)
681 dev_dbg(host->dev, "Unhandled IRQ: SDIO.\n");
682
683 if (status & (IRQ_END_OF_CHAIN | IRQ_RESP_STATUS))
684 ret = IRQ_WAKE_THREAD;
685 else {
686 dev_warn(host->dev, "Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d\n",
687 status, cmd->opcode, cmd->arg,
688 cmd->flags, mrq->stop ? 1 : 0);
689 if (cmd->data) {
690 struct mmc_data *data = cmd->data;
691
692 dev_warn(host->dev, "\tblksz %u blocks %u flags 0x%08x (%s%s)",
693 data->blksz, data->blocks, data->flags,
694 data->flags & MMC_DATA_WRITE ? "write" : "",
695 data->flags & MMC_DATA_READ ? "read" : "");
696 }
697 }
698
699 out:
700 /* ack all (enabled) interrupts */
701 reg_write(host, SD_EMMC_STATUS, status);
702
703 if (ret == IRQ_HANDLED) {
704 meson_mmc_read_resp(host->mmc, cmd);
705 meson_mmc_request_done(host->mmc, cmd->mrq);
706 }
707
708 spin_unlock(&host->lock);
709 return ret;
710 }
711
712 static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
713 {
714 struct meson_host *host = dev_id;
715 struct mmc_request *mrq = host->mrq;
716 struct mmc_command *cmd = host->cmd;
717 struct mmc_data *data;
718
719 unsigned int xfer_bytes;
720 int ret = IRQ_HANDLED;
721
722 if (WARN_ON(!mrq))
723 ret = IRQ_NONE;
724
725 if (WARN_ON(!cmd))
726 ret = IRQ_NONE;
727
728 data = cmd->data;
729 if (data) {
730 xfer_bytes = data->blksz * data->blocks;
731 if (data->flags & MMC_DATA_READ) {
732 WARN_ON(xfer_bytes > host->bounce_buf_size);
733 sg_copy_from_buffer(data->sg, data->sg_len,
734 host->bounce_buf, xfer_bytes);
735 data->bytes_xfered = xfer_bytes;
736 }
737 }
738
739 meson_mmc_read_resp(host->mmc, cmd);
740 if (!data || !data->stop || mrq->sbc)
741 meson_mmc_request_done(host->mmc, mrq);
742 else
743 meson_mmc_start_cmd(host->mmc, data->stop);
744
745 return ret;
746 }
747
748 /*
749 * NOTE: we only need this until the GPIO/pinctrl driver can handle
750 * interrupts. For now, the MMC core will use this for polling.
751 */
752 static int meson_mmc_get_cd(struct mmc_host *mmc)
753 {
754 int status = mmc_gpio_get_cd(mmc);
755
756 if (status == -ENOSYS)
757 return 1; /* assume present */
758
759 return status;
760 }
761
762 static struct mmc_host_ops meson_mmc_ops = {
763 .request = meson_mmc_request,
764 .set_ios = meson_mmc_set_ios,
765 .get_cd = meson_mmc_get_cd,
766 };
767
768 static int meson_mmc_probe(struct platform_device *pdev)
769 {
770 struct device_node *np = pdev->dev.of_node;
771 struct resource *res;
772 struct meson_host *host;
773 struct mmc_host *mmc;
774 int ret;
775
776 mmc = mmc_alloc_host(sizeof(struct meson_host), &pdev->dev);
777 if (!mmc)
778 return -ENOMEM;
779 host = mmc_priv(mmc);
780 host->mmc = mmc;
781 host->dev = &pdev->dev;
782 dev_set_drvdata(&pdev->dev, host);
783
784 spin_lock_init(&host->lock);
785
786 host->core_clk = devm_clk_get(&pdev->dev, "core");
787 if (IS_ERR(host->core_clk)) {
788 ret = PTR_ERR(host->core_clk);
789 if (ret == -EPROBE_DEFER)
790 dev_dbg(&pdev->dev,
791 "Missing core clock. EPROBE_DEFER\n");
792 else
793 dev_err(&pdev->dev,
794 "Unable to get core clk (ret=%d).\n", ret);
795 goto free_host;
796 }
797
798 /* Voltage supply */
799 mmc_of_parse_voltage(np, &host->ocr_mask);
800 ret = mmc_regulator_get_supply(mmc);
801 if (ret == -EPROBE_DEFER) {
802 dev_dbg(&pdev->dev, "Missing regulator: EPROBE_DEFER");
803 goto free_host;
804 }
805
806 if (!mmc->ocr_avail)
807 mmc->ocr_avail = host->ocr_mask;
808
809 ret = mmc_of_parse(mmc);
810 if (ret) {
811 dev_warn(&pdev->dev, "error parsing DT: %d\n", ret);
812 goto free_host;
813 }
814
815 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
816 host->regs = devm_ioremap_resource(&pdev->dev, res);
817 if (IS_ERR(host->regs)) {
818 ret = PTR_ERR(host->regs);
819 goto free_host;
820 }
821
822 host->irq = platform_get_irq(pdev, 0);
823 if (host->irq == 0) {
824 dev_err(&pdev->dev, "failed to get interrupt resource.\n");
825 ret = -EINVAL;
826 goto free_host;
827 }
828
829 ret = devm_request_threaded_irq(&pdev->dev, host->irq,
830 meson_mmc_irq, meson_mmc_irq_thread,
831 IRQF_SHARED, DRIVER_NAME, host);
832 if (ret)
833 goto free_host;
834
835 /* data bounce buffer */
836 host->bounce_buf_size = SZ_512K;
837 host->bounce_buf =
838 dma_alloc_coherent(host->dev, host->bounce_buf_size,
839 &host->bounce_dma_addr, GFP_KERNEL);
840 if (host->bounce_buf == NULL) {
841 dev_err(host->dev, "Unable to map allocate DMA bounce buffer.\n");
842 ret = -ENOMEM;
843 goto free_host;
844 }
845
846 clk_prepare_enable(host->core_clk);
847
848 ret = meson_mmc_clk_init(host);
849 if (ret)
850 goto free_host;
851
852 /* Stop execution */
853 reg_write(host, SD_EMMC_START, 0);
854
855 /* clear, ack, enable all interrupts */
856 reg_write(host, SD_EMMC_IRQ_EN, 0);
857 reg_write(host, SD_EMMC_STATUS, IRQ_EN_MASK);
858
859 mmc->ops = &meson_mmc_ops;
860 mmc_add_host(mmc);
861
862 return 0;
863
864 free_host:
865 dev_dbg(host->dev, "Failed to probe: ret=%d\n", ret);
866 if (host->core_clk)
867 clk_disable_unprepare(host->core_clk);
868 mmc_free_host(mmc);
869 return ret;
870 }
871
872 static int meson_mmc_remove(struct platform_device *pdev)
873 {
874 struct meson_host *host = dev_get_drvdata(&pdev->dev);
875
876 if (WARN_ON(!host))
877 return 0;
878
879 if (host->bounce_buf)
880 dma_free_coherent(host->dev, host->bounce_buf_size,
881 host->bounce_buf, host->bounce_dma_addr);
882
883 if (host->cfg_div_clk)
884 clk_disable_unprepare(host->cfg_div_clk);
885
886 if (host->core_clk)
887 clk_disable_unprepare(host->core_clk);
888
889 mmc_free_host(host->mmc);
890 return 0;
891 }
892
893 static const struct of_device_id meson_mmc_of_match[] = {
894 {
895 .compatible = "amlogic,meson-gxbb-mmc",
896 },
897 {}
898 };
899 MODULE_DEVICE_TABLE(of, meson_mmc_of_match);
900
901 static struct platform_driver meson_mmc_driver = {
902 .probe = meson_mmc_probe,
903 .remove = meson_mmc_remove,
904 .driver = {
905 .name = DRIVER_NAME,
906 .of_match_table = of_match_ptr(meson_mmc_of_match),
907 },
908 };
909
910 module_platform_driver(meson_mmc_driver);
911
912 MODULE_ALIAS("platform:" DRIVER_NAME);
913 MODULE_DESCRIPTION("Amlogic S905/GXBB SD/eMMC driver");
914 MODULE_AUTHOR("Kevin Hilman <khilman@baylibre.com>");
915 MODULE_LICENSE("Dual BSD/GPL");
916
This page took 0.06996 seconds and 5 git commands to generate.