Commit | Line | Data |
---|---|---|
d480ace0 PM |
1 | /* drivers/video/msm_fb/mdp.c |
2 | * | |
3 | * MSM MDP Interface (used by framebuffer core) | |
4 | * | |
5 | * Copyright (C) 2007 QUALCOMM Incorporated | |
6 | * Copyright (C) 2007 Google Incorporated | |
7 | * | |
8 | * This software is licensed under the terms of the GNU General Public | |
9 | * License version 2, as published by the Free Software Foundation, and | |
10 | * may be copied, distributed, and modified under those terms. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | */ | |
17 | ||
18 | #include <linux/kernel.h> | |
19 | #include <linux/fb.h> | |
20 | #include <linux/msm_mdp.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/wait.h> | |
23 | #include <linux/clk.h> | |
24 | #include <linux/file.h> | |
d480ace0 | 25 | #include <linux/major.h> |
5a0e3ad6 | 26 | #include <linux/slab.h> |
d480ace0 PM |
27 | |
28 | #include <mach/msm_iomap.h> | |
29 | #include <mach/msm_fb.h> | |
30 | #include <linux/platform_device.h> | |
31 | ||
32 | #include "mdp_hw.h" | |
33 | ||
34 | struct class *mdp_class; | |
35 | ||
36 | #define MDP_CMD_DEBUG_ACCESS_BASE (0x10000) | |
37 | ||
38 | static uint16_t mdp_default_ccs[] = { | |
39 | 0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000, | |
40 | 0x010, 0x080, 0x080 | |
41 | }; | |
42 | ||
43 | static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue); | |
44 | static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue); | |
45 | static struct msmfb_callback *dma_callback; | |
46 | static struct clk *clk; | |
47 | static unsigned int mdp_irq_mask; | |
48 | static DEFINE_SPINLOCK(mdp_lock); | |
49 | DEFINE_MUTEX(mdp_mutex); | |
50 | ||
51 | static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask) | |
52 | { | |
53 | unsigned long irq_flags; | |
54 | int ret = 0; | |
55 | ||
56 | BUG_ON(!mask); | |
57 | ||
58 | spin_lock_irqsave(&mdp_lock, irq_flags); | |
59 | /* if the mask bits are already set return an error, this interrupt | |
60 | * is already enabled */ | |
61 | if (mdp_irq_mask & mask) { | |
62 | printk(KERN_ERR "mdp irq already on already on %x %x\n", | |
63 | mdp_irq_mask, mask); | |
64 | ret = -1; | |
65 | } | |
66 | /* if the mdp irq is not already enabled enable it */ | |
67 | if (!mdp_irq_mask) { | |
68 | if (clk) | |
69 | clk_enable(clk); | |
70 | enable_irq(mdp->irq); | |
71 | } | |
72 | ||
73 | /* update the irq mask to reflect the fact that the interrupt is | |
74 | * enabled */ | |
75 | mdp_irq_mask |= mask; | |
76 | spin_unlock_irqrestore(&mdp_lock, irq_flags); | |
77 | return ret; | |
78 | } | |
79 | ||
80 | static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask) | |
81 | { | |
82 | /* this interrupt is already disabled! */ | |
83 | if (!(mdp_irq_mask & mask)) { | |
84 | printk(KERN_ERR "mdp irq already off %x %x\n", | |
85 | mdp_irq_mask, mask); | |
86 | return -1; | |
87 | } | |
88 | /* update the irq mask to reflect the fact that the interrupt is | |
89 | * disabled */ | |
90 | mdp_irq_mask &= ~(mask); | |
91 | /* if no one is waiting on the interrupt, disable it */ | |
92 | if (!mdp_irq_mask) { | |
5ad43ff9 | 93 | disable_irq_nosync(mdp->irq); |
d480ace0 PM |
94 | if (clk) |
95 | clk_disable(clk); | |
96 | } | |
97 | return 0; | |
98 | } | |
99 | ||
100 | static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask) | |
101 | { | |
102 | unsigned long irq_flags; | |
103 | int ret; | |
104 | ||
105 | spin_lock_irqsave(&mdp_lock, irq_flags); | |
106 | ret = locked_disable_mdp_irq(mdp, mask); | |
107 | spin_unlock_irqrestore(&mdp_lock, irq_flags); | |
108 | return ret; | |
109 | } | |
110 | ||
111 | static irqreturn_t mdp_isr(int irq, void *data) | |
112 | { | |
113 | uint32_t status; | |
114 | unsigned long irq_flags; | |
115 | struct mdp_info *mdp = data; | |
116 | ||
117 | spin_lock_irqsave(&mdp_lock, irq_flags); | |
118 | ||
119 | status = mdp_readl(mdp, MDP_INTR_STATUS); | |
120 | mdp_writel(mdp, status, MDP_INTR_CLEAR); | |
121 | ||
122 | status &= mdp_irq_mask; | |
123 | if (status & DL0_DMA2_TERM_DONE) { | |
124 | if (dma_callback) { | |
125 | dma_callback->func(dma_callback); | |
126 | dma_callback = NULL; | |
127 | } | |
128 | wake_up(&mdp_dma2_waitqueue); | |
129 | } | |
130 | ||
131 | if (status & DL0_ROI_DONE) | |
132 | wake_up(&mdp_ppp_waitqueue); | |
133 | ||
134 | if (status) | |
135 | locked_disable_mdp_irq(mdp, status); | |
136 | ||
137 | spin_unlock_irqrestore(&mdp_lock, irq_flags); | |
138 | return IRQ_HANDLED; | |
139 | } | |
140 | ||
141 | static uint32_t mdp_check_mask(uint32_t mask) | |
142 | { | |
143 | uint32_t ret; | |
144 | unsigned long irq_flags; | |
145 | ||
146 | spin_lock_irqsave(&mdp_lock, irq_flags); | |
147 | ret = mdp_irq_mask & mask; | |
148 | spin_unlock_irqrestore(&mdp_lock, irq_flags); | |
149 | return ret; | |
150 | } | |
151 | ||
152 | static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq) | |
153 | { | |
154 | int ret = 0; | |
155 | unsigned long irq_flags; | |
156 | ||
157 | wait_event_timeout(*wq, !mdp_check_mask(mask), HZ); | |
158 | ||
159 | spin_lock_irqsave(&mdp_lock, irq_flags); | |
160 | if (mdp_irq_mask & mask) { | |
161 | locked_disable_mdp_irq(mdp, mask); | |
162 | printk(KERN_WARNING "timeout waiting for mdp to complete %x\n", | |
163 | mask); | |
164 | ret = -ETIMEDOUT; | |
165 | } | |
166 | spin_unlock_irqrestore(&mdp_lock, irq_flags); | |
167 | ||
168 | return ret; | |
169 | } | |
170 | ||
171 | void mdp_dma_wait(struct mdp_device *mdp_dev) | |
172 | { | |
173 | #define MDP_MAX_TIMEOUTS 20 | |
174 | static int timeout_count; | |
175 | struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); | |
176 | ||
177 | if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT) | |
178 | timeout_count++; | |
179 | else | |
180 | timeout_count = 0; | |
181 | ||
182 | if (timeout_count > MDP_MAX_TIMEOUTS) { | |
183 | printk(KERN_ERR "mdp: dma failed %d times, somethings wrong!\n", | |
184 | MDP_MAX_TIMEOUTS); | |
185 | BUG(); | |
186 | } | |
187 | } | |
188 | ||
189 | static int mdp_ppp_wait(struct mdp_info *mdp) | |
190 | { | |
191 | return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue); | |
192 | } | |
193 | ||
194 | void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride, | |
195 | uint32_t width, uint32_t height, uint32_t x, uint32_t y, | |
196 | struct msmfb_callback *callback) | |
197 | { | |
198 | uint32_t dma2_cfg; | |
199 | uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */ | |
200 | ||
201 | if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) { | |
202 | printk(KERN_ERR "mdp_dma_to_mddi: busy\n"); | |
203 | return; | |
204 | } | |
205 | ||
206 | dma_callback = callback; | |
207 | ||
208 | dma2_cfg = DMA_PACK_TIGHT | | |
209 | DMA_PACK_ALIGN_LSB | | |
210 | DMA_PACK_PATTERN_RGB | | |
211 | DMA_OUT_SEL_AHB | | |
212 | DMA_IBUF_NONCONTIGUOUS; | |
213 | ||
214 | dma2_cfg |= DMA_IBUF_FORMAT_RGB565; | |
215 | ||
216 | dma2_cfg |= DMA_OUT_SEL_MDDI; | |
217 | ||
218 | dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY; | |
219 | ||
220 | dma2_cfg |= DMA_DITHER_EN; | |
221 | ||
222 | /* setup size, address, and stride */ | |
223 | mdp_writel(mdp, (height << 16) | (width), | |
224 | MDP_CMD_DEBUG_ACCESS_BASE + 0x0184); | |
225 | mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188); | |
226 | mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C); | |
227 | ||
228 | /* 666 18BPP */ | |
229 | dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS; | |
230 | ||
231 | /* set y & x offset and MDDI transaction parameters */ | |
232 | mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194); | |
233 | mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0); | |
234 | mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM, | |
235 | MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4); | |
236 | ||
237 | mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180); | |
238 | ||
239 | /* start DMA2 */ | |
240 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044); | |
241 | } | |
242 | ||
243 | void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride, | |
244 | uint32_t width, uint32_t height, uint32_t x, uint32_t y, | |
245 | struct msmfb_callback *callback, int interface) | |
246 | { | |
247 | struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); | |
248 | ||
249 | if (interface == MSM_MDDI_PMDH_INTERFACE) { | |
250 | mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y, | |
251 | callback); | |
252 | } | |
253 | } | |
254 | ||
255 | int get_img(struct mdp_img *img, struct fb_info *info, | |
256 | unsigned long *start, unsigned long *len, | |
257 | struct file **filep) | |
258 | { | |
259 | int put_needed, ret = 0; | |
260 | struct file *file; | |
d480ace0 | 261 | |
d480ace0 PM |
262 | file = fget_light(img->memory_id, &put_needed); |
263 | if (file == NULL) | |
264 | return -1; | |
265 | ||
266 | if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) { | |
267 | *start = info->fix.smem_start; | |
268 | *len = info->fix.smem_len; | |
269 | } else | |
270 | ret = -1; | |
271 | fput_light(file, put_needed); | |
272 | ||
273 | return ret; | |
274 | } | |
275 | ||
276 | void put_img(struct file *src_file, struct file *dst_file) | |
277 | { | |
d480ace0 PM |
278 | } |
279 | ||
280 | int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb, | |
281 | struct mdp_blit_req *req) | |
282 | { | |
283 | int ret; | |
284 | unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0; | |
285 | struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); | |
286 | struct file *src_file = 0, *dst_file = 0; | |
287 | ||
288 | /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */ | |
289 | if (unlikely(req->src_rect.h == 0 || | |
290 | req->src_rect.w == 0)) { | |
291 | printk(KERN_ERR "mpd_ppp: src img of zero size!\n"); | |
292 | return -EINVAL; | |
293 | } | |
294 | if (unlikely(req->dst_rect.h == 0 || | |
295 | req->dst_rect.w == 0)) | |
296 | return -EINVAL; | |
297 | ||
298 | /* do this first so that if this fails, the caller can always | |
299 | * safely call put_img */ | |
300 | if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) { | |
301 | printk(KERN_ERR "mpd_ppp: could not retrieve src image from " | |
302 | "memory\n"); | |
303 | return -EINVAL; | |
304 | } | |
305 | ||
306 | if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) { | |
307 | printk(KERN_ERR "mpd_ppp: could not retrieve dst image from " | |
308 | "memory\n"); | |
d480ace0 PM |
309 | return -EINVAL; |
310 | } | |
311 | mutex_lock(&mdp_mutex); | |
312 | ||
313 | /* transp_masking unimplemented */ | |
314 | req->transp_mask = MDP_TRANSP_NOP; | |
315 | if (unlikely((req->transp_mask != MDP_TRANSP_NOP || | |
316 | req->alpha != MDP_ALPHA_NOP || | |
317 | HAS_ALPHA(req->src.format)) && | |
318 | (req->flags & MDP_ROT_90 && | |
319 | req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) { | |
320 | int i; | |
321 | unsigned int tiles = req->dst_rect.h / 16; | |
322 | unsigned int remainder = req->dst_rect.h % 16; | |
323 | req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h; | |
324 | req->dst_rect.h = 16; | |
325 | for (i = 0; i < tiles; i++) { | |
326 | enable_mdp_irq(mdp, DL0_ROI_DONE); | |
327 | ret = mdp_ppp_blit(mdp, req, src_file, src_start, | |
328 | src_len, dst_file, dst_start, | |
329 | dst_len); | |
330 | if (ret) | |
331 | goto err_bad_blit; | |
332 | ret = mdp_ppp_wait(mdp); | |
333 | if (ret) | |
334 | goto err_wait_failed; | |
335 | req->dst_rect.y += 16; | |
336 | req->src_rect.x += req->src_rect.w; | |
337 | } | |
338 | if (!remainder) | |
339 | goto end; | |
340 | req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h; | |
341 | req->dst_rect.h = remainder; | |
342 | } | |
343 | enable_mdp_irq(mdp, DL0_ROI_DONE); | |
344 | ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file, | |
345 | dst_start, | |
346 | dst_len); | |
347 | if (ret) | |
348 | goto err_bad_blit; | |
349 | ret = mdp_ppp_wait(mdp); | |
350 | if (ret) | |
351 | goto err_wait_failed; | |
352 | end: | |
353 | put_img(src_file, dst_file); | |
354 | mutex_unlock(&mdp_mutex); | |
355 | return 0; | |
356 | err_bad_blit: | |
357 | disable_mdp_irq(mdp, DL0_ROI_DONE); | |
358 | err_wait_failed: | |
359 | put_img(src_file, dst_file); | |
360 | mutex_unlock(&mdp_mutex); | |
361 | return ret; | |
362 | } | |
363 | ||
364 | void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id) | |
365 | { | |
366 | struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev); | |
367 | ||
368 | disp_id &= 0xf; | |
369 | mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43); | |
370 | } | |
371 | ||
372 | int register_mdp_client(struct class_interface *cint) | |
373 | { | |
374 | if (!mdp_class) { | |
375 | pr_err("mdp: no mdp_class when registering mdp client\n"); | |
376 | return -ENODEV; | |
377 | } | |
378 | cint->class = mdp_class; | |
379 | return class_interface_register(cint); | |
380 | } | |
381 | ||
382 | #include "mdp_csc_table.h" | |
383 | #include "mdp_scale_tables.h" | |
384 | ||
385 | int mdp_probe(struct platform_device *pdev) | |
386 | { | |
387 | struct resource *resource; | |
388 | int ret; | |
389 | int n; | |
390 | struct mdp_info *mdp; | |
391 | ||
392 | resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
393 | if (!resource) { | |
394 | pr_err("mdp: can not get mdp mem resource!\n"); | |
395 | return -ENOMEM; | |
396 | } | |
397 | ||
398 | mdp = kzalloc(sizeof(struct mdp_info), GFP_KERNEL); | |
399 | if (!mdp) | |
400 | return -ENOMEM; | |
401 | ||
402 | mdp->irq = platform_get_irq(pdev, 0); | |
403 | if (mdp->irq < 0) { | |
404 | pr_err("mdp: can not get mdp irq\n"); | |
405 | ret = mdp->irq; | |
406 | goto error_get_irq; | |
407 | } | |
408 | ||
409 | mdp->base = ioremap(resource->start, | |
410 | resource->end - resource->start); | |
411 | if (mdp->base == 0) { | |
412 | printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n"); | |
413 | ret = -ENOMEM; | |
414 | goto error_ioremap; | |
415 | } | |
416 | ||
417 | mdp->mdp_dev.dma = mdp_dma; | |
418 | mdp->mdp_dev.dma_wait = mdp_dma_wait; | |
419 | mdp->mdp_dev.blit = mdp_blit; | |
420 | mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp; | |
421 | ||
422 | clk = clk_get(&pdev->dev, "mdp_clk"); | |
423 | if (IS_ERR(clk)) { | |
424 | printk(KERN_INFO "mdp: failed to get mdp clk"); | |
425 | return PTR_ERR(clk); | |
426 | } | |
427 | ||
428 | ret = request_irq(mdp->irq, mdp_isr, IRQF_DISABLED, "msm_mdp", mdp); | |
429 | if (ret) | |
430 | goto error_request_irq; | |
431 | disable_irq(mdp->irq); | |
432 | mdp_irq_mask = 0; | |
433 | ||
434 | /* debug interface write access */ | |
435 | mdp_writel(mdp, 1, 0x60); | |
436 | ||
437 | mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE); | |
438 | mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE); | |
439 | ||
440 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8); | |
441 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc); | |
442 | ||
443 | for (n = 0; n < ARRAY_SIZE(csc_table); n++) | |
444 | mdp_writel(mdp, csc_table[n].val, csc_table[n].reg); | |
445 | ||
446 | /* clear up unused fg/main registers */ | |
447 | /* comp.plane 2&3 ystride */ | |
448 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120); | |
449 | ||
450 | /* unpacked pattern */ | |
451 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c); | |
452 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130); | |
453 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134); | |
454 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158); | |
455 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c); | |
456 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160); | |
457 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170); | |
458 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174); | |
459 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c); | |
460 | ||
461 | /* comp.plane 2 & 3 */ | |
462 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114); | |
463 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118); | |
464 | ||
465 | /* clear unused bg registers */ | |
466 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8); | |
467 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0); | |
468 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc); | |
469 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0); | |
470 | mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4); | |
471 | ||
472 | for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++) | |
473 | mdp_writel(mdp, mdp_upscale_table[n].val, | |
474 | mdp_upscale_table[n].reg); | |
475 | ||
476 | for (n = 0; n < 9; n++) | |
477 | mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n); | |
478 | mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0); | |
479 | mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0); | |
480 | mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0); | |
481 | ||
482 | /* register mdp device */ | |
483 | mdp->mdp_dev.dev.parent = &pdev->dev; | |
484 | mdp->mdp_dev.dev.class = mdp_class; | |
d601c795 | 485 | dev_set_name(&mdp->mdp_dev.dev, "mdp%d", pdev->id); |
d480ace0 PM |
486 | |
487 | /* if you can remove the platform device you'd have to implement | |
488 | * this: | |
489 | mdp_dev.release = mdp_class; */ | |
490 | ||
491 | ret = device_register(&mdp->mdp_dev.dev); | |
492 | if (ret) | |
493 | goto error_device_register; | |
494 | return 0; | |
495 | ||
496 | error_device_register: | |
497 | free_irq(mdp->irq, mdp); | |
498 | error_request_irq: | |
499 | iounmap(mdp->base); | |
500 | error_get_irq: | |
501 | error_ioremap: | |
502 | kfree(mdp); | |
503 | return ret; | |
504 | } | |
505 | ||
506 | static struct platform_driver msm_mdp_driver = { | |
507 | .probe = mdp_probe, | |
508 | .driver = {.name = "msm_mdp"}, | |
509 | }; | |
510 | ||
511 | static int __init mdp_init(void) | |
512 | { | |
513 | mdp_class = class_create(THIS_MODULE, "msm_mdp"); | |
514 | if (IS_ERR(mdp_class)) { | |
515 | printk(KERN_ERR "Error creating mdp class\n"); | |
516 | return PTR_ERR(mdp_class); | |
517 | } | |
518 | return platform_driver_register(&msm_mdp_driver); | |
519 | } | |
520 | ||
521 | subsys_initcall(mdp_init); |