x86, fpu: Fix math_state_restore() race with kernel_fpu_begin()
[deliverable/linux.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_kms.c
1 /*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19
20 #include "msm_drv.h"
21 #include "msm_mmu.h"
22 #include "mdp5_kms.h"
23
24 static const char *iommu_ports[] = {
25 "mdp_0",
26 };
27
28 static int mdp5_hw_init(struct msm_kms *kms)
29 {
30 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
31 struct drm_device *dev = mdp5_kms->dev;
32 unsigned long flags;
33
34 pm_runtime_get_sync(dev->dev);
35
36 /* Magic unknown register writes:
37 *
38 * W VBIF:0x004 00000001 (mdss_mdp.c:839)
39 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
40 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
41 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
42 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
43 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
44 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
45 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
46 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
47 *
48 * Downstream fbdev driver gets these register offsets/values
49 * from DT.. not really sure what these registers are or if
50 * different values for different boards/SoC's, etc. I guess
51 * they are the golden registers.
52 *
53 * Not setting these does not seem to cause any problem. But
54 * we may be getting lucky with the bootloader initializing
55 * them for us. OTOH, if we can always count on the bootloader
56 * setting the golden registers, then perhaps we don't need to
57 * care.
58 */
59
60 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
61 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
62 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
63
64 mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
65
66 pm_runtime_put_sync(dev->dev);
67
68 return 0;
69 }
70
71 static long mdp5_round_pixclk(struct msm_kms *kms, unsigned long rate,
72 struct drm_encoder *encoder)
73 {
74 return rate;
75 }
76
77 static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
78 {
79 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
80 struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
81 unsigned i;
82
83 for (i = 0; i < priv->num_crtcs; i++)
84 mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
85 }
86
87 static void mdp5_destroy(struct msm_kms *kms)
88 {
89 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
90 struct msm_mmu *mmu = mdp5_kms->mmu;
91
92 mdp5_irq_domain_fini(mdp5_kms);
93
94 if (mmu) {
95 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
96 mmu->funcs->destroy(mmu);
97 }
98
99 if (mdp5_kms->ctlm)
100 mdp5_ctlm_destroy(mdp5_kms->ctlm);
101 if (mdp5_kms->smp)
102 mdp5_smp_destroy(mdp5_kms->smp);
103 if (mdp5_kms->cfg)
104 mdp5_cfg_destroy(mdp5_kms->cfg);
105
106 kfree(mdp5_kms);
107 }
108
109 static const struct mdp_kms_funcs kms_funcs = {
110 .base = {
111 .hw_init = mdp5_hw_init,
112 .irq_preinstall = mdp5_irq_preinstall,
113 .irq_postinstall = mdp5_irq_postinstall,
114 .irq_uninstall = mdp5_irq_uninstall,
115 .irq = mdp5_irq,
116 .enable_vblank = mdp5_enable_vblank,
117 .disable_vblank = mdp5_disable_vblank,
118 .get_format = mdp_get_format,
119 .round_pixclk = mdp5_round_pixclk,
120 .preclose = mdp5_preclose,
121 .destroy = mdp5_destroy,
122 },
123 .set_irqmask = mdp5_set_irqmask,
124 };
125
126 int mdp5_disable(struct mdp5_kms *mdp5_kms)
127 {
128 DBG("");
129
130 clk_disable_unprepare(mdp5_kms->ahb_clk);
131 clk_disable_unprepare(mdp5_kms->axi_clk);
132 clk_disable_unprepare(mdp5_kms->core_clk);
133 clk_disable_unprepare(mdp5_kms->lut_clk);
134
135 return 0;
136 }
137
138 int mdp5_enable(struct mdp5_kms *mdp5_kms)
139 {
140 DBG("");
141
142 clk_prepare_enable(mdp5_kms->ahb_clk);
143 clk_prepare_enable(mdp5_kms->axi_clk);
144 clk_prepare_enable(mdp5_kms->core_clk);
145 clk_prepare_enable(mdp5_kms->lut_clk);
146
147 return 0;
148 }
149
150 static int modeset_init(struct mdp5_kms *mdp5_kms)
151 {
152 static const enum mdp5_pipe crtcs[] = {
153 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
154 };
155 static const enum mdp5_pipe pub_planes[] = {
156 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
157 };
158 struct drm_device *dev = mdp5_kms->dev;
159 struct msm_drm_private *priv = dev->dev_private;
160 struct drm_encoder *encoder;
161 const struct mdp5_cfg_hw *hw_cfg;
162 int i, ret;
163
164 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
165
166 /* register our interrupt-controller for hdmi/eDP/dsi/etc
167 * to use for irqs routed through mdp:
168 */
169 ret = mdp5_irq_domain_init(mdp5_kms);
170 if (ret)
171 goto fail;
172
173 /* construct CRTCs and their private planes: */
174 for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
175 struct drm_plane *plane;
176 struct drm_crtc *crtc;
177
178 plane = mdp5_plane_init(dev, crtcs[i], true,
179 hw_cfg->pipe_rgb.base[i]);
180 if (IS_ERR(plane)) {
181 ret = PTR_ERR(plane);
182 dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
183 pipe2name(crtcs[i]), ret);
184 goto fail;
185 }
186
187 crtc = mdp5_crtc_init(dev, plane, i);
188 if (IS_ERR(crtc)) {
189 ret = PTR_ERR(crtc);
190 dev_err(dev->dev, "failed to construct crtc for %s (%d)\n",
191 pipe2name(crtcs[i]), ret);
192 goto fail;
193 }
194 priv->crtcs[priv->num_crtcs++] = crtc;
195 }
196
197 /* Construct public planes: */
198 for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
199 struct drm_plane *plane;
200
201 plane = mdp5_plane_init(dev, pub_planes[i], false,
202 hw_cfg->pipe_vig.base[i]);
203 if (IS_ERR(plane)) {
204 ret = PTR_ERR(plane);
205 dev_err(dev->dev, "failed to construct %s plane: %d\n",
206 pipe2name(pub_planes[i]), ret);
207 goto fail;
208 }
209 }
210
211 /* Construct encoder for HDMI: */
212 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
213 if (IS_ERR(encoder)) {
214 dev_err(dev->dev, "failed to construct encoder\n");
215 ret = PTR_ERR(encoder);
216 goto fail;
217 }
218
219 /* NOTE: the vsync and error irq's are actually associated with
220 * the INTF/encoder.. the easiest way to deal with this (ie. what
221 * we do now) is assume a fixed relationship between crtc's and
222 * encoders. I'm not sure if there is ever a need to more freely
223 * assign crtcs to encoders, but if there is then we need to take
224 * care of error and vblank irq's that the crtc has registered,
225 * and also update user-requested vblank_mask.
226 */
227 encoder->possible_crtcs = BIT(0);
228 mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
229
230 priv->encoders[priv->num_encoders++] = encoder;
231
232 /* Construct bridge/connector for HDMI: */
233 if (priv->hdmi) {
234 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
235 if (ret) {
236 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
237 goto fail;
238 }
239 }
240
241 return 0;
242
243 fail:
244 return ret;
245 }
246
247 static void read_hw_revision(struct mdp5_kms *mdp5_kms,
248 uint32_t *major, uint32_t *minor)
249 {
250 uint32_t version;
251
252 mdp5_enable(mdp5_kms);
253 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
254 mdp5_disable(mdp5_kms);
255
256 *major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
257 *minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
258
259 DBG("MDP5 version v%d.%d", *major, *minor);
260 }
261
262 static int get_clk(struct platform_device *pdev, struct clk **clkp,
263 const char *name)
264 {
265 struct device *dev = &pdev->dev;
266 struct clk *clk = devm_clk_get(dev, name);
267 if (IS_ERR(clk)) {
268 dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
269 return PTR_ERR(clk);
270 }
271 *clkp = clk;
272 return 0;
273 }
274
275 struct msm_kms *mdp5_kms_init(struct drm_device *dev)
276 {
277 struct platform_device *pdev = dev->platformdev;
278 struct mdp5_cfg *config;
279 struct mdp5_kms *mdp5_kms;
280 struct msm_kms *kms = NULL;
281 struct msm_mmu *mmu;
282 uint32_t major, minor;
283 int i, ret;
284
285 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
286 if (!mdp5_kms) {
287 dev_err(dev->dev, "failed to allocate kms\n");
288 ret = -ENOMEM;
289 goto fail;
290 }
291
292 spin_lock_init(&mdp5_kms->resource_lock);
293
294 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
295
296 kms = &mdp5_kms->base.base;
297
298 mdp5_kms->dev = dev;
299
300 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
301 if (IS_ERR(mdp5_kms->mmio)) {
302 ret = PTR_ERR(mdp5_kms->mmio);
303 goto fail;
304 }
305
306 mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
307 if (IS_ERR(mdp5_kms->vbif)) {
308 ret = PTR_ERR(mdp5_kms->vbif);
309 goto fail;
310 }
311
312 mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
313 if (IS_ERR(mdp5_kms->vdd)) {
314 ret = PTR_ERR(mdp5_kms->vdd);
315 goto fail;
316 }
317
318 ret = regulator_enable(mdp5_kms->vdd);
319 if (ret) {
320 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
321 goto fail;
322 }
323
324 ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk");
325 if (ret)
326 goto fail;
327 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk");
328 if (ret)
329 goto fail;
330 ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src");
331 if (ret)
332 goto fail;
333 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk");
334 if (ret)
335 goto fail;
336 ret = get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk");
337 if (ret)
338 goto fail;
339 ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk");
340 if (ret)
341 goto fail;
342
343 /* we need to set a default rate before enabling. Set a safe
344 * rate first, then figure out hw revision, and then set a
345 * more optimal rate:
346 */
347 clk_set_rate(mdp5_kms->src_clk, 200000000);
348
349 read_hw_revision(mdp5_kms, &major, &minor);
350
351 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
352 if (IS_ERR(mdp5_kms->cfg)) {
353 ret = PTR_ERR(mdp5_kms->cfg);
354 mdp5_kms->cfg = NULL;
355 goto fail;
356 }
357
358 config = mdp5_cfg_get_config(mdp5_kms->cfg);
359
360 /* TODO: compute core clock rate at runtime */
361 clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
362
363 mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
364 if (IS_ERR(mdp5_kms->smp)) {
365 ret = PTR_ERR(mdp5_kms->smp);
366 mdp5_kms->smp = NULL;
367 goto fail;
368 }
369
370 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
371 if (IS_ERR(mdp5_kms->ctlm)) {
372 ret = PTR_ERR(mdp5_kms->ctlm);
373 mdp5_kms->ctlm = NULL;
374 goto fail;
375 }
376
377 /* make sure things are off before attaching iommu (bootloader could
378 * have left things on, in which case we'll start getting faults if
379 * we don't disable):
380 */
381 mdp5_enable(mdp5_kms);
382 for (i = 0; i < config->hw->intf.count; i++)
383 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
384 mdp5_disable(mdp5_kms);
385 mdelay(16);
386
387 if (config->platform.iommu) {
388 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
389 if (IS_ERR(mmu)) {
390 ret = PTR_ERR(mmu);
391 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
392 goto fail;
393 }
394
395 ret = mmu->funcs->attach(mmu, iommu_ports,
396 ARRAY_SIZE(iommu_ports));
397 if (ret) {
398 dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
399 mmu->funcs->destroy(mmu);
400 goto fail;
401 }
402 } else {
403 dev_info(dev->dev, "no iommu, fallback to phys "
404 "contig buffers for scanout\n");
405 mmu = NULL;
406 }
407 mdp5_kms->mmu = mmu;
408
409 mdp5_kms->id = msm_register_mmu(dev, mmu);
410 if (mdp5_kms->id < 0) {
411 ret = mdp5_kms->id;
412 dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
413 goto fail;
414 }
415
416 ret = modeset_init(mdp5_kms);
417 if (ret) {
418 dev_err(dev->dev, "modeset_init failed: %d\n", ret);
419 goto fail;
420 }
421
422 return kms;
423
424 fail:
425 if (kms)
426 mdp5_destroy(kms);
427 return ERR_PTR(ret);
428 }
This page took 0.065788 seconds and 5 git commands to generate.