drm/msm/mdp5: use irqdomains
[deliverable/linux.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_kms.c
CommitLineData
06c0dd96
RC
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19#include "msm_drv.h"
20#include "msm_mmu.h"
21#include "mdp5_kms.h"
22
87e956e9
SV
23static const char *iommu_ports[] = {
24 "mdp_0",
25};
26
06c0dd96
RC
27static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
28
3d47fd47
SV
29const struct mdp5_config *mdp5_cfg;
30
31static const struct mdp5_config msm8x74_config = {
32 .name = "msm8x74",
33 .ctl = {
34 .count = 5,
35 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
36 },
37 .pipe_vig = {
38 .count = 3,
39 .base = { 0x01200, 0x01600, 0x01a00 },
40 },
41 .pipe_rgb = {
42 .count = 3,
43 .base = { 0x01e00, 0x02200, 0x02600 },
44 },
45 .pipe_dma = {
46 .count = 2,
47 .base = { 0x02a00, 0x02e00 },
48 },
49 .lm = {
50 .count = 5,
51 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
52 },
53 .dspp = {
54 .count = 3,
55 .base = { 0x04600, 0x04a00, 0x04e00 },
56 },
57 .ad = {
58 .count = 2,
59 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
60 },
61 .intf = {
62 .count = 4,
63 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
64 },
65};
66
67static const struct mdp5_config apq8084_config = {
68 .name = "apq8084",
69 .ctl = {
70 .count = 5,
71 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
72 },
73 .pipe_vig = {
74 .count = 4,
75 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
76 },
77 .pipe_rgb = {
78 .count = 4,
79 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
80 },
81 .pipe_dma = {
82 .count = 2,
83 .base = { 0x03200, 0x03600 },
84 },
85 .lm = {
86 .count = 6,
87 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
88 },
89 .dspp = {
90 .count = 4,
91 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
92
93 },
94 .ad = {
95 .count = 3,
96 .base = { 0x13500, 0x13700, 0x13900 },
97 },
98 .intf = {
99 .count = 5,
100 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
101 },
102};
103
104struct mdp5_config_entry {
105 int revision;
106 const struct mdp5_config *config;
107};
108
109static const struct mdp5_config_entry mdp5_configs[] = {
110 { .revision = 0, .config = &msm8x74_config },
111 { .revision = 2, .config = &msm8x74_config },
112 { .revision = 3, .config = &apq8084_config },
113};
114
115static int mdp5_select_hw_cfg(struct msm_kms *kms)
06c0dd96
RC
116{
117 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
118 struct drm_device *dev = mdp5_kms->dev;
119 uint32_t version, major, minor;
3d47fd47 120 int i, ret = 0;
06c0dd96
RC
121
122 mdp5_enable(mdp5_kms);
123 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
124 mdp5_disable(mdp5_kms);
125
126 major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
127 minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
128
129 DBG("found MDP5 version v%d.%d", major, minor);
130
3d47fd47
SV
131 if (major != 1) {
132 dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
06c0dd96
RC
133 major, minor);
134 ret = -ENXIO;
135 goto out;
136 }
137
138 mdp5_kms->rev = minor;
139
3d47fd47
SV
140 /* only after mdp5_cfg global pointer's init can we access the hw */
141 for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
142 if (mdp5_configs[i].revision != minor)
143 continue;
144 mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
145 break;
146 }
147 if (unlikely(!mdp5_kms->hw_cfg)) {
148 dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
149 major, minor);
150 ret = -ENXIO;
151 goto out;
152 }
153
154 DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
155
156 return 0;
157out:
158 return ret;
159}
160
161static int mdp5_hw_init(struct msm_kms *kms)
162{
163 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
164 struct drm_device *dev = mdp5_kms->dev;
165 int i;
166
167 pm_runtime_get_sync(dev->dev);
168
06c0dd96
RC
169 /* Magic unknown register writes:
170 *
171 * W VBIF:0x004 00000001 (mdss_mdp.c:839)
172 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
173 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
174 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
175 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
176 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
177 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
178 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
179 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
180 *
181 * Downstream fbdev driver gets these register offsets/values
182 * from DT.. not really sure what these registers are or if
183 * different values for different boards/SoC's, etc. I guess
184 * they are the golden registers.
185 *
186 * Not setting these does not seem to cause any problem. But
187 * we may be getting lucky with the bootloader initializing
188 * them for us. OTOH, if we can always count on the bootloader
189 * setting the golden registers, then perhaps we don't need to
190 * care.
191 */
192
193 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
06c0dd96 194
3d47fd47
SV
195 for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++)
196 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
197
06c0dd96
RC
198 pm_runtime_put_sync(dev->dev);
199
3d47fd47 200 return 0;
06c0dd96
RC
201}
202
203static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
204 struct drm_encoder *encoder)
205{
206 return rate;
207}
208
209static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
210{
211 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
212 struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
213 unsigned i;
214
215 for (i = 0; i < priv->num_crtcs; i++)
216 mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
217}
218
219static void mdp5_destroy(struct msm_kms *kms)
220{
221 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
87e956e9
SV
222 struct msm_mmu *mmu = mdp5_kms->mmu;
223
f6a8eaca
RC
224 mdp5_irq_domain_fini(mdp5_kms);
225
87e956e9
SV
226 if (mmu) {
227 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
228 mmu->funcs->destroy(mmu);
229 }
f6a8eaca 230
06c0dd96
RC
231 kfree(mdp5_kms);
232}
233
234static const struct mdp_kms_funcs kms_funcs = {
235 .base = {
236 .hw_init = mdp5_hw_init,
237 .irq_preinstall = mdp5_irq_preinstall,
238 .irq_postinstall = mdp5_irq_postinstall,
239 .irq_uninstall = mdp5_irq_uninstall,
240 .irq = mdp5_irq,
241 .enable_vblank = mdp5_enable_vblank,
242 .disable_vblank = mdp5_disable_vblank,
243 .get_format = mdp_get_format,
244 .round_pixclk = mdp5_round_pixclk,
245 .preclose = mdp5_preclose,
246 .destroy = mdp5_destroy,
247 },
248 .set_irqmask = mdp5_set_irqmask,
249};
250
251int mdp5_disable(struct mdp5_kms *mdp5_kms)
252{
253 DBG("");
254
255 clk_disable_unprepare(mdp5_kms->ahb_clk);
256 clk_disable_unprepare(mdp5_kms->axi_clk);
257 clk_disable_unprepare(mdp5_kms->core_clk);
258 clk_disable_unprepare(mdp5_kms->lut_clk);
259
260 return 0;
261}
262
263int mdp5_enable(struct mdp5_kms *mdp5_kms)
264{
265 DBG("");
266
267 clk_prepare_enable(mdp5_kms->ahb_clk);
268 clk_prepare_enable(mdp5_kms->axi_clk);
269 clk_prepare_enable(mdp5_kms->core_clk);
270 clk_prepare_enable(mdp5_kms->lut_clk);
271
272 return 0;
273}
274
275static int modeset_init(struct mdp5_kms *mdp5_kms)
276{
277 static const enum mdp5_pipe crtcs[] = {
3d47fd47 278 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
06c0dd96
RC
279 };
280 struct drm_device *dev = mdp5_kms->dev;
281 struct msm_drm_private *priv = dev->dev_private;
282 struct drm_encoder *encoder;
283 int i, ret;
284
f6a8eaca
RC
285 /* register our interrupt-controller for hdmi/eDP/dsi/etc
286 * to use for irqs routed through mdp:
287 */
288 ret = mdp5_irq_domain_init(mdp5_kms);
289 if (ret)
290 goto fail;
291
06c0dd96 292 /* construct CRTCs: */
3d47fd47 293 for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) {
06c0dd96
RC
294 struct drm_plane *plane;
295 struct drm_crtc *crtc;
296
297 plane = mdp5_plane_init(dev, crtcs[i], true);
298 if (IS_ERR(plane)) {
299 ret = PTR_ERR(plane);
300 dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
301 pipe2name(crtcs[i]), ret);
302 goto fail;
303 }
304
305 crtc = mdp5_crtc_init(dev, plane, i);
306 if (IS_ERR(crtc)) {
307 ret = PTR_ERR(crtc);
308 dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
309 pipe2name(crtcs[i]), ret);
310 goto fail;
311 }
312 priv->crtcs[priv->num_crtcs++] = crtc;
313 }
314
315 /* Construct encoder for HDMI: */
316 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
317 if (IS_ERR(encoder)) {
318 dev_err(dev->dev, "failed to construct encoder\n");
319 ret = PTR_ERR(encoder);
320 goto fail;
321 }
322
323 /* NOTE: the vsync and error irq's are actually associated with
324 * the INTF/encoder.. the easiest way to deal with this (ie. what
325 * we do now) is assume a fixed relationship between crtc's and
326 * encoders. I'm not sure if there is ever a need to more freely
327 * assign crtcs to encoders, but if there is then we need to take
328 * care of error and vblank irq's that the crtc has registered,
329 * and also update user-requested vblank_mask.
330 */
331 encoder->possible_crtcs = BIT(0);
332 mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
333
334 priv->encoders[priv->num_encoders++] = encoder;
335
336 /* Construct bridge/connector for HDMI: */
067fef37
RC
337 if (priv->hdmi) {
338 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
339 if (ret) {
340 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
341 goto fail;
342 }
06c0dd96
RC
343 }
344
345 return 0;
346
347fail:
348 return ret;
349}
350
06c0dd96
RC
351static int get_clk(struct platform_device *pdev, struct clk **clkp,
352 const char *name)
353{
354 struct device *dev = &pdev->dev;
355 struct clk *clk = devm_clk_get(dev, name);
356 if (IS_ERR(clk)) {
357 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
358 return PTR_ERR(clk);
359 }
360 *clkp = clk;
361 return 0;
362}
363
364struct msm_kms *mdp5_kms_init(struct drm_device *dev)
365{
366 struct platform_device *pdev = dev->platformdev;
367 struct mdp5_platform_config *config = mdp5_get_config(pdev);
368 struct mdp5_kms *mdp5_kms;
369 struct msm_kms *kms = NULL;
370 struct msm_mmu *mmu;
3d47fd47 371 int i, ret;
06c0dd96
RC
372
373 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
374 if (!mdp5_kms) {
375 dev_err(dev->dev, "failed to allocate kms\n");
376 ret = -ENOMEM;
377 goto fail;
378 }
379
380 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
381
382 kms = &mdp5_kms->base.base;
383
384 mdp5_kms->dev = dev;
385 mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
386
387 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
388 if (IS_ERR(mdp5_kms->mmio)) {
389 ret = PTR_ERR(mdp5_kms->mmio);
390 goto fail;
391 }
392
393 mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
394 if (IS_ERR(mdp5_kms->vbif)) {
395 ret = PTR_ERR(mdp5_kms->vbif);
396 goto fail;
397 }
398
399 mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
400 if (IS_ERR(mdp5_kms->vdd)) {
401 ret = PTR_ERR(mdp5_kms->vdd);
402 goto fail;
403 }
404
405 ret = regulator_enable(mdp5_kms->vdd);
406 if (ret) {
407 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
408 goto fail;
409 }
410
a0906a02
RC
411 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk");
412 if (ret)
413 goto fail;
414 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk");
415 if (ret)
416 goto fail;
417 ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src");
418 if (ret)
419 goto fail;
420 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk");
421 if (ret)
422 goto fail;
423 ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
424 if (ret)
425 goto fail;
426 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
06c0dd96
RC
427 if (ret)
428 goto fail;
429
430 ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk);
431
3d47fd47
SV
432 ret = mdp5_select_hw_cfg(kms);
433 if (ret)
434 goto fail;
435
06c0dd96
RC
436 /* make sure things are off before attaching iommu (bootloader could
437 * have left things on, in which case we'll start getting faults if
438 * we don't disable):
439 */
440 mdp5_enable(mdp5_kms);
3d47fd47
SV
441 for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++)
442 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
06c0dd96
RC
443 mdp5_disable(mdp5_kms);
444 mdelay(16);
445
446 if (config->iommu) {
944fc36c 447 mmu = msm_iommu_new(&pdev->dev, config->iommu);
06c0dd96
RC
448 if (IS_ERR(mmu)) {
449 ret = PTR_ERR(mmu);
87e956e9 450 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
06c0dd96
RC
451 goto fail;
452 }
87e956e9 453
06c0dd96
RC
454 ret = mmu->funcs->attach(mmu, iommu_ports,
455 ARRAY_SIZE(iommu_ports));
87e956e9
SV
456 if (ret) {
457 dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
458 mmu->funcs->destroy(mmu);
06c0dd96 459 goto fail;
87e956e9 460 }
06c0dd96
RC
461 } else {
462 dev_info(dev->dev, "no iommu, fallback to phys "
463 "contig buffers for scanout\n");
464 mmu = NULL;
465 }
87e956e9 466 mdp5_kms->mmu = mmu;
06c0dd96
RC
467
468 mdp5_kms->id = msm_register_mmu(dev, mmu);
469 if (mdp5_kms->id < 0) {
470 ret = mdp5_kms->id;
471 dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
472 goto fail;
473 }
474
475 ret = modeset_init(mdp5_kms);
476 if (ret) {
477 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
478 goto fail;
479 }
480
481 return kms;
482
483fail:
484 if (kms)
485 mdp5_destroy(kms);
486 return ERR_PTR(ret);
487}
488
489static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
490{
491 static struct mdp5_platform_config config = {};
492#ifdef CONFIG_OF
493 /* TODO */
494#endif
3bf6c1ec
SV
495 config.iommu = iommu_domain_alloc(&platform_bus_type);
496 /* TODO hard-coded in downstream mdss, but should it be? */
497 config.max_clk = 200000000;
498 /* TODO get from DT: */
499 config.smp_blk_cnt = 22;
500
06c0dd96
RC
501 return &config;
502}
This page took 0.117712 seconds and 5 git commands to generate.