Commit | Line | Data |
---|---|---|
1a396789 BB |
1 | /* |
2 | * Copyright (C) 2014 Free Electrons | |
3 | * Copyright (C) 2014 Atmel | |
4 | * | |
5 | * Author: Boris BREZILLON <boris.brezillon@free-electrons.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms of the GNU General Public License version 2 as published by | |
9 | * the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License along with | |
17 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <linux/dma-mapping.h> | |
21 | #include <linux/interrupt.h> | |
22 | ||
23 | #include "atmel_hlcdc_dc.h" | |
24 | ||
25 | static void | |
26 | atmel_hlcdc_layer_fb_flip_release(struct drm_flip_work *work, void *val) | |
27 | { | |
28 | struct atmel_hlcdc_layer_fb_flip *flip = val; | |
29 | ||
30 | if (flip->fb) | |
31 | drm_framebuffer_unreference(flip->fb); | |
32 | kfree(flip); | |
33 | } | |
34 | ||
35 | static void | |
36 | atmel_hlcdc_layer_fb_flip_destroy(struct atmel_hlcdc_layer_fb_flip *flip) | |
37 | { | |
38 | if (flip->fb) | |
39 | drm_framebuffer_unreference(flip->fb); | |
40 | kfree(flip->task); | |
41 | kfree(flip); | |
42 | } | |
43 | ||
44 | static void | |
45 | atmel_hlcdc_layer_fb_flip_release_queue(struct atmel_hlcdc_layer *layer, | |
46 | struct atmel_hlcdc_layer_fb_flip *flip) | |
47 | { | |
48 | int i; | |
49 | ||
50 | if (!flip) | |
51 | return; | |
52 | ||
53 | for (i = 0; i < layer->max_planes; i++) { | |
54 | if (!flip->dscrs[i]) | |
55 | break; | |
56 | ||
57 | flip->dscrs[i]->status = 0; | |
58 | flip->dscrs[i] = NULL; | |
59 | } | |
60 | ||
61 | drm_flip_work_queue_task(&layer->gc, flip->task); | |
62 | drm_flip_work_commit(&layer->gc, layer->wq); | |
63 | } | |
64 | ||
65 | static void atmel_hlcdc_layer_update_reset(struct atmel_hlcdc_layer *layer, | |
66 | int id) | |
67 | { | |
68 | struct atmel_hlcdc_layer_update *upd = &layer->update; | |
69 | struct atmel_hlcdc_layer_update_slot *slot; | |
70 | ||
71 | if (id < 0 || id > 1) | |
72 | return; | |
73 | ||
74 | slot = &upd->slots[id]; | |
75 | bitmap_clear(slot->updated_configs, 0, layer->desc->nconfigs); | |
76 | memset(slot->configs, 0, | |
77 | sizeof(*slot->configs) * layer->desc->nconfigs); | |
78 | ||
79 | if (slot->fb_flip) { | |
80 | atmel_hlcdc_layer_fb_flip_release_queue(layer, slot->fb_flip); | |
81 | slot->fb_flip = NULL; | |
82 | } | |
83 | } | |
84 | ||
85 | static void atmel_hlcdc_layer_update_apply(struct atmel_hlcdc_layer *layer) | |
86 | { | |
87 | struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma; | |
88 | const struct atmel_hlcdc_layer_desc *desc = layer->desc; | |
89 | struct atmel_hlcdc_layer_update *upd = &layer->update; | |
90 | struct regmap *regmap = layer->hlcdc->regmap; | |
91 | struct atmel_hlcdc_layer_update_slot *slot; | |
92 | struct atmel_hlcdc_layer_fb_flip *fb_flip; | |
93 | struct atmel_hlcdc_dma_channel_dscr *dscr; | |
94 | unsigned int cfg; | |
95 | u32 action = 0; | |
96 | int i = 0; | |
97 | ||
98 | if (upd->pending < 0 || upd->pending > 1) | |
99 | return; | |
100 | ||
101 | slot = &upd->slots[upd->pending]; | |
102 | ||
103 | for_each_set_bit(cfg, slot->updated_configs, layer->desc->nconfigs) { | |
104 | regmap_write(regmap, | |
105 | desc->regs_offset + | |
106 | ATMEL_HLCDC_LAYER_CFG(layer, cfg), | |
107 | slot->configs[cfg]); | |
108 | action |= ATMEL_HLCDC_LAYER_UPDATE; | |
109 | } | |
110 | ||
111 | fb_flip = slot->fb_flip; | |
112 | ||
113 | if (!fb_flip->fb) | |
114 | goto apply; | |
115 | ||
116 | if (dma->status == ATMEL_HLCDC_LAYER_DISABLED) { | |
117 | for (i = 0; i < fb_flip->ngems; i++) { | |
118 | dscr = fb_flip->dscrs[i]; | |
119 | dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH | | |
120 | ATMEL_HLCDC_LAYER_DMA_IRQ | | |
121 | ATMEL_HLCDC_LAYER_ADD_IRQ | | |
122 | ATMEL_HLCDC_LAYER_DONE_IRQ; | |
123 | ||
124 | regmap_write(regmap, | |
125 | desc->regs_offset + | |
126 | ATMEL_HLCDC_LAYER_PLANE_ADDR(i), | |
127 | dscr->addr); | |
128 | regmap_write(regmap, | |
129 | desc->regs_offset + | |
130 | ATMEL_HLCDC_LAYER_PLANE_CTRL(i), | |
131 | dscr->ctrl); | |
132 | regmap_write(regmap, | |
133 | desc->regs_offset + | |
134 | ATMEL_HLCDC_LAYER_PLANE_NEXT(i), | |
135 | dscr->next); | |
136 | } | |
137 | ||
138 | action |= ATMEL_HLCDC_LAYER_DMA_CHAN; | |
139 | dma->status = ATMEL_HLCDC_LAYER_ENABLED; | |
140 | } else { | |
141 | for (i = 0; i < fb_flip->ngems; i++) { | |
142 | dscr = fb_flip->dscrs[i]; | |
143 | dscr->ctrl = ATMEL_HLCDC_LAYER_DFETCH | | |
144 | ATMEL_HLCDC_LAYER_DMA_IRQ | | |
145 | ATMEL_HLCDC_LAYER_DSCR_IRQ | | |
146 | ATMEL_HLCDC_LAYER_DONE_IRQ; | |
147 | ||
148 | regmap_write(regmap, | |
149 | desc->regs_offset + | |
150 | ATMEL_HLCDC_LAYER_PLANE_HEAD(i), | |
151 | dscr->next); | |
152 | } | |
153 | ||
154 | action |= ATMEL_HLCDC_LAYER_A2Q; | |
155 | } | |
156 | ||
157 | /* Release unneeded descriptors */ | |
158 | for (i = fb_flip->ngems; i < layer->max_planes; i++) { | |
159 | fb_flip->dscrs[i]->status = 0; | |
160 | fb_flip->dscrs[i] = NULL; | |
161 | } | |
162 | ||
163 | dma->queue = fb_flip; | |
164 | slot->fb_flip = NULL; | |
165 | ||
166 | apply: | |
167 | if (action) | |
168 | regmap_write(regmap, | |
169 | desc->regs_offset + ATMEL_HLCDC_LAYER_CHER, | |
170 | action); | |
171 | ||
172 | atmel_hlcdc_layer_update_reset(layer, upd->pending); | |
173 | ||
174 | upd->pending = -1; | |
175 | } | |
176 | ||
177 | void atmel_hlcdc_layer_irq(struct atmel_hlcdc_layer *layer) | |
178 | { | |
179 | struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma; | |
180 | const struct atmel_hlcdc_layer_desc *desc = layer->desc; | |
181 | struct regmap *regmap = layer->hlcdc->regmap; | |
182 | struct atmel_hlcdc_layer_fb_flip *flip; | |
183 | unsigned long flags; | |
184 | unsigned int isr, imr; | |
185 | unsigned int status; | |
186 | unsigned int plane_status; | |
187 | u32 flip_status; | |
188 | ||
189 | int i; | |
190 | ||
191 | regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IMR, &imr); | |
192 | regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); | |
193 | status = imr & isr; | |
194 | if (!status) | |
195 | return; | |
196 | ||
197 | spin_lock_irqsave(&layer->lock, flags); | |
198 | ||
199 | flip = dma->queue ? dma->queue : dma->cur; | |
200 | ||
201 | if (!flip) { | |
202 | spin_unlock_irqrestore(&layer->lock, flags); | |
203 | return; | |
204 | } | |
205 | ||
206 | /* | |
207 | * Set LOADED and DONE flags: they'll be cleared if at least one | |
208 | * memory plane is not LOADED or DONE. | |
209 | */ | |
210 | flip_status = ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED | | |
211 | ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE; | |
212 | for (i = 0; i < flip->ngems; i++) { | |
213 | plane_status = (status >> (8 * i)); | |
214 | ||
215 | if (plane_status & | |
216 | (ATMEL_HLCDC_LAYER_ADD_IRQ | | |
217 | ATMEL_HLCDC_LAYER_DSCR_IRQ) & | |
218 | ~flip->dscrs[i]->ctrl) { | |
219 | flip->dscrs[i]->status |= | |
220 | ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED; | |
221 | flip->dscrs[i]->ctrl |= | |
222 | ATMEL_HLCDC_LAYER_ADD_IRQ | | |
223 | ATMEL_HLCDC_LAYER_DSCR_IRQ; | |
224 | } | |
225 | ||
226 | if (plane_status & | |
227 | ATMEL_HLCDC_LAYER_DONE_IRQ & | |
228 | ~flip->dscrs[i]->ctrl) { | |
229 | flip->dscrs[i]->status |= | |
230 | ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE; | |
231 | flip->dscrs[i]->ctrl |= | |
232 | ATMEL_HLCDC_LAYER_DONE_IRQ; | |
233 | } | |
234 | ||
235 | if (plane_status & ATMEL_HLCDC_LAYER_OVR_IRQ) | |
236 | flip->dscrs[i]->status |= | |
237 | ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN; | |
238 | ||
239 | /* | |
240 | * Clear LOADED and DONE flags if the memory plane is either | |
241 | * not LOADED or not DONE. | |
242 | */ | |
243 | if (!(flip->dscrs[i]->status & | |
244 | ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED)) | |
245 | flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED; | |
246 | ||
247 | if (!(flip->dscrs[i]->status & | |
248 | ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE)) | |
249 | flip_status &= ~ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE; | |
250 | ||
251 | /* | |
252 | * An overrun on one memory plane impact the whole framebuffer | |
253 | * transfer, hence we set the OVERRUN flag as soon as there's | |
254 | * one memory plane reporting such an overrun. | |
255 | */ | |
256 | flip_status |= flip->dscrs[i]->status & | |
257 | ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN; | |
258 | } | |
259 | ||
260 | /* Get changed bits */ | |
261 | flip_status ^= flip->status; | |
262 | flip->status |= flip_status; | |
263 | ||
264 | if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_LOADED) { | |
265 | atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur); | |
266 | dma->cur = dma->queue; | |
267 | dma->queue = NULL; | |
268 | } | |
269 | ||
270 | if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_DONE) { | |
271 | atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur); | |
272 | dma->cur = NULL; | |
273 | } | |
274 | ||
275 | if (flip_status & ATMEL_HLCDC_DMA_CHANNEL_DSCR_OVERRUN) { | |
276 | regmap_write(regmap, | |
277 | desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, | |
278 | ATMEL_HLCDC_LAYER_RST); | |
279 | if (dma->queue) | |
280 | atmel_hlcdc_layer_fb_flip_release_queue(layer, | |
281 | dma->queue); | |
282 | ||
283 | if (dma->cur) | |
284 | atmel_hlcdc_layer_fb_flip_release_queue(layer, | |
285 | dma->cur); | |
286 | ||
287 | dma->cur = NULL; | |
288 | dma->queue = NULL; | |
289 | } | |
290 | ||
291 | if (!dma->queue) { | |
292 | atmel_hlcdc_layer_update_apply(layer); | |
293 | ||
294 | if (!dma->cur) | |
295 | dma->status = ATMEL_HLCDC_LAYER_DISABLED; | |
296 | } | |
297 | ||
298 | spin_unlock_irqrestore(&layer->lock, flags); | |
299 | } | |
300 | ||
2389fc13 | 301 | void atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer) |
1a396789 BB |
302 | { |
303 | struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma; | |
304 | struct atmel_hlcdc_layer_update *upd = &layer->update; | |
305 | struct regmap *regmap = layer->hlcdc->regmap; | |
306 | const struct atmel_hlcdc_layer_desc *desc = layer->desc; | |
307 | unsigned long flags; | |
308 | unsigned int isr; | |
309 | ||
310 | spin_lock_irqsave(&layer->lock, flags); | |
311 | ||
312 | /* Disable the layer */ | |
313 | regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, | |
314 | ATMEL_HLCDC_LAYER_RST); | |
315 | ||
316 | /* Clear all pending interrupts */ | |
317 | regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); | |
318 | ||
319 | /* Discard current and queued framebuffer transfers. */ | |
320 | if (dma->cur) { | |
321 | atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->cur); | |
322 | dma->cur = NULL; | |
323 | } | |
324 | ||
325 | if (dma->queue) { | |
326 | atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->queue); | |
327 | dma->queue = NULL; | |
328 | } | |
329 | ||
330 | /* | |
331 | * Then discard the pending update request (if any) to prevent | |
332 | * DMA irq handler from restarting the DMA channel after it has | |
333 | * been disabled. | |
334 | */ | |
335 | if (upd->pending >= 0) { | |
336 | atmel_hlcdc_layer_update_reset(layer, upd->pending); | |
337 | upd->pending = -1; | |
338 | } | |
339 | ||
340 | dma->status = ATMEL_HLCDC_LAYER_DISABLED; | |
341 | ||
342 | spin_unlock_irqrestore(&layer->lock, flags); | |
1a396789 BB |
343 | } |
344 | ||
345 | int atmel_hlcdc_layer_update_start(struct atmel_hlcdc_layer *layer) | |
346 | { | |
347 | struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma; | |
348 | struct atmel_hlcdc_layer_update *upd = &layer->update; | |
349 | struct regmap *regmap = layer->hlcdc->regmap; | |
350 | struct atmel_hlcdc_layer_fb_flip *fb_flip; | |
351 | struct atmel_hlcdc_layer_update_slot *slot; | |
352 | unsigned long flags; | |
353 | int i, j = 0; | |
354 | ||
355 | fb_flip = kzalloc(sizeof(*fb_flip), GFP_KERNEL); | |
356 | if (!fb_flip) | |
357 | return -ENOMEM; | |
358 | ||
359 | fb_flip->task = drm_flip_work_allocate_task(fb_flip, GFP_KERNEL); | |
360 | if (!fb_flip->task) { | |
361 | kfree(fb_flip); | |
362 | return -ENOMEM; | |
363 | } | |
364 | ||
365 | spin_lock_irqsave(&layer->lock, flags); | |
366 | ||
367 | upd->next = upd->pending ? 0 : 1; | |
368 | ||
369 | slot = &upd->slots[upd->next]; | |
370 | ||
371 | for (i = 0; i < layer->max_planes * 4; i++) { | |
372 | if (!dma->dscrs[i].status) { | |
373 | fb_flip->dscrs[j++] = &dma->dscrs[i]; | |
374 | dma->dscrs[i].status = | |
375 | ATMEL_HLCDC_DMA_CHANNEL_DSCR_RESERVED; | |
376 | if (j == layer->max_planes) | |
377 | break; | |
378 | } | |
379 | } | |
380 | ||
381 | if (j < layer->max_planes) { | |
382 | for (i = 0; i < j; i++) | |
383 | fb_flip->dscrs[i]->status = 0; | |
384 | } | |
385 | ||
386 | if (j < layer->max_planes) { | |
387 | spin_unlock_irqrestore(&layer->lock, flags); | |
388 | atmel_hlcdc_layer_fb_flip_destroy(fb_flip); | |
389 | return -EBUSY; | |
390 | } | |
391 | ||
392 | slot->fb_flip = fb_flip; | |
393 | ||
394 | if (upd->pending >= 0) { | |
395 | memcpy(slot->configs, | |
396 | upd->slots[upd->pending].configs, | |
397 | layer->desc->nconfigs * sizeof(u32)); | |
398 | memcpy(slot->updated_configs, | |
399 | upd->slots[upd->pending].updated_configs, | |
400 | DIV_ROUND_UP(layer->desc->nconfigs, | |
401 | BITS_PER_BYTE * sizeof(unsigned long)) * | |
402 | sizeof(unsigned long)); | |
403 | slot->fb_flip->fb = upd->slots[upd->pending].fb_flip->fb; | |
404 | if (upd->slots[upd->pending].fb_flip->fb) { | |
405 | slot->fb_flip->fb = | |
406 | upd->slots[upd->pending].fb_flip->fb; | |
407 | slot->fb_flip->ngems = | |
408 | upd->slots[upd->pending].fb_flip->ngems; | |
409 | drm_framebuffer_reference(slot->fb_flip->fb); | |
410 | } | |
411 | } else { | |
412 | regmap_bulk_read(regmap, | |
413 | layer->desc->regs_offset + | |
414 | ATMEL_HLCDC_LAYER_CFG(layer, 0), | |
415 | upd->slots[upd->next].configs, | |
416 | layer->desc->nconfigs); | |
417 | } | |
418 | ||
419 | spin_unlock_irqrestore(&layer->lock, flags); | |
420 | ||
421 | return 0; | |
422 | } | |
423 | ||
424 | void atmel_hlcdc_layer_update_rollback(struct atmel_hlcdc_layer *layer) | |
425 | { | |
426 | struct atmel_hlcdc_layer_update *upd = &layer->update; | |
427 | ||
428 | atmel_hlcdc_layer_update_reset(layer, upd->next); | |
429 | upd->next = -1; | |
430 | } | |
431 | ||
432 | void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer, | |
433 | struct drm_framebuffer *fb, | |
434 | unsigned int *offsets) | |
435 | { | |
436 | struct atmel_hlcdc_layer_update *upd = &layer->update; | |
437 | struct atmel_hlcdc_layer_fb_flip *fb_flip; | |
438 | struct atmel_hlcdc_layer_update_slot *slot; | |
439 | struct atmel_hlcdc_dma_channel_dscr *dscr; | |
440 | struct drm_framebuffer *old_fb; | |
441 | int nplanes = 0; | |
442 | int i; | |
443 | ||
444 | if (upd->next < 0 || upd->next > 1) | |
445 | return; | |
446 | ||
447 | if (fb) | |
448 | nplanes = drm_format_num_planes(fb->pixel_format); | |
449 | ||
450 | if (nplanes > layer->max_planes) | |
451 | return; | |
452 | ||
453 | slot = &upd->slots[upd->next]; | |
454 | ||
455 | fb_flip = slot->fb_flip; | |
456 | old_fb = slot->fb_flip->fb; | |
457 | ||
458 | for (i = 0; i < nplanes; i++) { | |
459 | struct drm_gem_cma_object *gem; | |
460 | ||
461 | dscr = slot->fb_flip->dscrs[i]; | |
462 | gem = drm_fb_cma_get_gem_obj(fb, i); | |
463 | dscr->addr = gem->paddr + offsets[i]; | |
464 | } | |
465 | ||
466 | fb_flip->ngems = nplanes; | |
467 | fb_flip->fb = fb; | |
468 | ||
469 | if (fb) | |
470 | drm_framebuffer_reference(fb); | |
471 | ||
472 | if (old_fb) | |
473 | drm_framebuffer_unreference(old_fb); | |
474 | } | |
475 | ||
476 | void atmel_hlcdc_layer_update_cfg(struct atmel_hlcdc_layer *layer, int cfg, | |
477 | u32 mask, u32 val) | |
478 | { | |
479 | struct atmel_hlcdc_layer_update *upd = &layer->update; | |
480 | struct atmel_hlcdc_layer_update_slot *slot; | |
481 | ||
482 | if (upd->next < 0 || upd->next > 1) | |
483 | return; | |
484 | ||
485 | if (cfg >= layer->desc->nconfigs) | |
486 | return; | |
487 | ||
488 | slot = &upd->slots[upd->next]; | |
489 | slot->configs[cfg] &= ~mask; | |
490 | slot->configs[cfg] |= (val & mask); | |
491 | set_bit(cfg, slot->updated_configs); | |
492 | } | |
493 | ||
494 | void atmel_hlcdc_layer_update_commit(struct atmel_hlcdc_layer *layer) | |
495 | { | |
496 | struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma; | |
497 | struct atmel_hlcdc_layer_update *upd = &layer->update; | |
498 | struct atmel_hlcdc_layer_update_slot *slot; | |
499 | unsigned long flags; | |
500 | ||
501 | if (upd->next < 0 || upd->next > 1) | |
502 | return; | |
503 | ||
504 | slot = &upd->slots[upd->next]; | |
505 | ||
506 | spin_lock_irqsave(&layer->lock, flags); | |
507 | ||
508 | /* | |
509 | * Release pending update request and replace it by the new one. | |
510 | */ | |
511 | if (upd->pending >= 0) | |
512 | atmel_hlcdc_layer_update_reset(layer, upd->pending); | |
513 | ||
514 | upd->pending = upd->next; | |
515 | upd->next = -1; | |
516 | ||
517 | if (!dma->queue) | |
518 | atmel_hlcdc_layer_update_apply(layer); | |
519 | ||
520 | spin_unlock_irqrestore(&layer->lock, flags); | |
521 | ||
522 | ||
523 | upd->next = -1; | |
524 | } | |
525 | ||
526 | static int atmel_hlcdc_layer_dma_init(struct drm_device *dev, | |
527 | struct atmel_hlcdc_layer *layer) | |
528 | { | |
529 | struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma; | |
530 | dma_addr_t dma_addr; | |
531 | int i; | |
532 | ||
533 | dma->dscrs = dma_alloc_coherent(dev->dev, | |
534 | layer->max_planes * 4 * | |
535 | sizeof(*dma->dscrs), | |
536 | &dma_addr, GFP_KERNEL); | |
537 | if (!dma->dscrs) | |
538 | return -ENOMEM; | |
539 | ||
540 | for (i = 0; i < layer->max_planes * 4; i++) { | |
541 | struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i]; | |
542 | ||
543 | dscr->next = dma_addr + (i * sizeof(*dscr)); | |
544 | } | |
545 | ||
546 | return 0; | |
547 | } | |
548 | ||
549 | static void atmel_hlcdc_layer_dma_cleanup(struct drm_device *dev, | |
550 | struct atmel_hlcdc_layer *layer) | |
551 | { | |
552 | struct atmel_hlcdc_layer_dma_channel *dma = &layer->dma; | |
553 | int i; | |
554 | ||
555 | for (i = 0; i < layer->max_planes * 4; i++) { | |
556 | struct atmel_hlcdc_dma_channel_dscr *dscr = &dma->dscrs[i]; | |
557 | ||
558 | dscr->status = 0; | |
559 | } | |
560 | ||
561 | dma_free_coherent(dev->dev, layer->max_planes * 4 * | |
562 | sizeof(*dma->dscrs), dma->dscrs, | |
563 | dma->dscrs[0].next); | |
564 | } | |
565 | ||
566 | static int atmel_hlcdc_layer_update_init(struct drm_device *dev, | |
567 | struct atmel_hlcdc_layer *layer, | |
568 | const struct atmel_hlcdc_layer_desc *desc) | |
569 | { | |
570 | struct atmel_hlcdc_layer_update *upd = &layer->update; | |
571 | int updated_size; | |
572 | void *buffer; | |
573 | int i; | |
574 | ||
575 | updated_size = DIV_ROUND_UP(desc->nconfigs, | |
576 | BITS_PER_BYTE * | |
577 | sizeof(unsigned long)); | |
578 | ||
579 | buffer = devm_kzalloc(dev->dev, | |
580 | ((desc->nconfigs * sizeof(u32)) + | |
581 | (updated_size * sizeof(unsigned long))) * 2, | |
582 | GFP_KERNEL); | |
583 | if (!buffer) | |
584 | return -ENOMEM; | |
585 | ||
586 | for (i = 0; i < 2; i++) { | |
587 | upd->slots[i].updated_configs = buffer; | |
588 | buffer += updated_size * sizeof(unsigned long); | |
589 | upd->slots[i].configs = buffer; | |
590 | buffer += desc->nconfigs * sizeof(u32); | |
591 | } | |
592 | ||
593 | upd->pending = -1; | |
594 | upd->next = -1; | |
595 | ||
596 | return 0; | |
597 | } | |
598 | ||
599 | int atmel_hlcdc_layer_init(struct drm_device *dev, | |
600 | struct atmel_hlcdc_layer *layer, | |
601 | const struct atmel_hlcdc_layer_desc *desc) | |
602 | { | |
603 | struct atmel_hlcdc_dc *dc = dev->dev_private; | |
604 | struct regmap *regmap = dc->hlcdc->regmap; | |
605 | unsigned int tmp; | |
606 | int ret; | |
607 | int i; | |
608 | ||
609 | layer->hlcdc = dc->hlcdc; | |
610 | layer->wq = dc->wq; | |
611 | layer->desc = desc; | |
612 | ||
613 | regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, | |
614 | ATMEL_HLCDC_LAYER_RST); | |
615 | for (i = 0; i < desc->formats->nformats; i++) { | |
616 | int nplanes = drm_format_num_planes(desc->formats->formats[i]); | |
617 | ||
618 | if (nplanes > layer->max_planes) | |
619 | layer->max_planes = nplanes; | |
620 | } | |
621 | ||
622 | spin_lock_init(&layer->lock); | |
623 | drm_flip_work_init(&layer->gc, desc->name, | |
624 | atmel_hlcdc_layer_fb_flip_release); | |
625 | ret = atmel_hlcdc_layer_dma_init(dev, layer); | |
626 | if (ret) | |
627 | return ret; | |
628 | ||
629 | ret = atmel_hlcdc_layer_update_init(dev, layer, desc); | |
630 | if (ret) | |
631 | return ret; | |
632 | ||
633 | /* Flush Status Register */ | |
634 | regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR, | |
635 | 0xffffffff); | |
636 | regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, | |
637 | &tmp); | |
638 | ||
639 | tmp = 0; | |
640 | for (i = 0; i < layer->max_planes; i++) | |
641 | tmp |= (ATMEL_HLCDC_LAYER_DMA_IRQ | | |
642 | ATMEL_HLCDC_LAYER_DSCR_IRQ | | |
643 | ATMEL_HLCDC_LAYER_ADD_IRQ | | |
644 | ATMEL_HLCDC_LAYER_DONE_IRQ | | |
645 | ATMEL_HLCDC_LAYER_OVR_IRQ) << (8 * i); | |
646 | ||
647 | regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IER, tmp); | |
648 | ||
649 | return 0; | |
650 | } | |
651 | ||
652 | void atmel_hlcdc_layer_cleanup(struct drm_device *dev, | |
653 | struct atmel_hlcdc_layer *layer) | |
654 | { | |
655 | const struct atmel_hlcdc_layer_desc *desc = layer->desc; | |
656 | struct regmap *regmap = layer->hlcdc->regmap; | |
657 | ||
658 | regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR, | |
659 | 0xffffffff); | |
660 | regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, | |
661 | ATMEL_HLCDC_LAYER_RST); | |
662 | ||
663 | atmel_hlcdc_layer_dma_cleanup(dev, layer); | |
664 | drm_flip_work_cleanup(&layer->gc); | |
665 | } |