sim: bfin: skip acc/ASTAT updates for moves
[deliverable/binutils-gdb.git] / sim / bfin / dv-bfin_dma.c
CommitLineData
ef016f83
MF
1/* Blackfin Direct Memory Access (DMA) Channel model.
2
3 Copyright (C) 2010-2011 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
5
6 This file is part of simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22
23#include "sim-main.h"
24#include "devices.h"
25#include "hw-device.h"
26#include "dv-bfin_dma.h"
27#include "dv-bfin_dmac.h"
28
29/* Note: This DMA implementation requires the producer to be the master when
30 the peer is MDMA. The source is always a slave. This way we don't
31 have the two DMA devices thrashing each other with one trying to
32 write and the other trying to read. */
33
34struct bfin_dma
35{
36 /* This top portion matches common dv_bfin struct. */
37 bu32 base;
38 struct hw *dma_master;
39 bool acked;
40
41 struct hw_event *handler;
42 unsigned ele_size;
43 struct hw *hw_peer;
44
45 /* Order after here is important -- matches hardware MMR layout. */
46 union {
47 struct { bu16 ndpl, ndph; };
48 bu32 next_desc_ptr;
49 };
50 union {
51 struct { bu16 sal, sah; };
52 bu32 start_addr;
53 };
54 bu16 BFIN_MMR_16 (config);
55 bu32 _pad0;
56 bu16 BFIN_MMR_16 (x_count);
57 bs16 BFIN_MMR_16 (x_modify);
58 bu16 BFIN_MMR_16 (y_count);
59 bs16 BFIN_MMR_16 (y_modify);
60 bu32 curr_desc_ptr, curr_addr;
61 bu16 BFIN_MMR_16 (irq_status);
62 bu16 BFIN_MMR_16 (peripheral_map);
63 bu16 BFIN_MMR_16 (curr_x_count);
64 bu32 _pad1;
65 bu16 BFIN_MMR_16 (curr_y_count);
66 bu32 _pad2;
67};
68#define mmr_base() offsetof(struct bfin_dma, next_desc_ptr)
69#define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
70
71static const char * const mmr_names[] = {
72 "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
73 "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
74 "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
75};
76#define mmr_name(off) mmr_names[(off) / 4]
77
78static bool
79bfin_dma_enabled (struct bfin_dma *dma)
80{
81 return (dma->config & DMAEN);
82}
83
84static bool
85bfin_dma_running (struct bfin_dma *dma)
86{
87 return (dma->irq_status & DMA_RUN);
88}
89
90static struct hw *
91bfin_dma_get_peer (struct hw *me, struct bfin_dma *dma)
92{
93 if (dma->hw_peer)
94 return dma->hw_peer;
95 return dma->hw_peer = bfin_dmac_get_peer (me, dma->peripheral_map);
96}
97
98static void
99bfin_dma_process_desc (struct hw *me, struct bfin_dma *dma)
100{
101 bu8 ndsize = (dma->config & NDSIZE) >> NDSIZE_SHIFT;
102 bu16 _flows[9], *flows = _flows;
103
104 HW_TRACE ((me, "dma starting up %#x", dma->config));
105
106 switch (dma->config & WDSIZE)
107 {
108 case WDSIZE_32:
109 dma->ele_size = 4;
110 break;
111 case WDSIZE_16:
112 dma->ele_size = 2;
113 break;
114 default:
115 dma->ele_size = 1;
116 break;
117 }
118
119 /* Address has to be mutiple of transfer size. */
120 if (dma->start_addr & (dma->ele_size - 1))
121 dma->irq_status |= DMA_ERR;
122
123 if (dma->ele_size != (unsigned) abs (dma->x_modify))
124 hw_abort (me, "DMA config (striding) %#x not supported (x_modify: %d)",
125 dma->config, dma->x_modify);
126
127 switch (dma->config & DMAFLOW)
128 {
129 case DMAFLOW_AUTO:
130 case DMAFLOW_STOP:
131 if (ndsize)
132 hw_abort (me, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
133 break;
134 case DMAFLOW_ARRAY:
135 if (ndsize == 0 || ndsize > 7)
136 hw_abort (me, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
137 sim_read (hw_system (me), dma->curr_desc_ptr, (void *)flows, ndsize * 2);
138 break;
139 case DMAFLOW_SMALL:
140 if (ndsize == 0 || ndsize > 8)
141 hw_abort (me, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
142 sim_read (hw_system (me), dma->next_desc_ptr, (void *)flows, ndsize * 2);
143 break;
144 case DMAFLOW_LARGE:
145 if (ndsize == 0 || ndsize > 9)
146 hw_abort (me, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
147 sim_read (hw_system (me), dma->next_desc_ptr, (void *)flows, ndsize * 2);
148 break;
149 default:
150 hw_abort (me, "DMA config error: invalid DMAFLOW %#x", dma->config);
151 }
152
153 if (ndsize)
154 {
155 bu8 idx;
156 bu16 *stores[] = {
157 &dma->sal,
158 &dma->sah,
159 &dma->config,
160 &dma->x_count,
161 (void *) &dma->x_modify,
162 &dma->y_count,
163 (void *) &dma->y_modify,
164 };
165
166 switch (dma->config & DMAFLOW)
167 {
168 case DMAFLOW_LARGE:
169 dma->ndph = _flows[1];
170 --ndsize;
171 ++flows;
172 case DMAFLOW_SMALL:
173 dma->ndpl = _flows[0];
174 --ndsize;
175 ++flows;
176 break;
177 }
178
179 for (idx = 0; idx < ndsize; ++idx)
180 *stores[idx] = flows[idx];
181 }
182
183 dma->curr_desc_ptr = dma->next_desc_ptr;
184 dma->curr_addr = dma->start_addr;
185 dma->curr_x_count = dma->x_count ? : 0xffff;
186 dma->curr_y_count = dma->y_count ? : 0xffff;
187}
188
189static int
190bfin_dma_finish_x (struct hw *me, struct bfin_dma *dma)
191{
192 /* XXX: This would be the time to process the next descriptor. */
193 /* XXX: Should this toggle Enable in dma->config ? */
194
195 if (dma->config & DI_EN)
196 hw_port_event (me, 0, 1);
197
198 if ((dma->config & DMA2D) && dma->curr_y_count > 1)
199 {
200 dma->curr_y_count -= 1;
201 dma->curr_x_count = dma->x_count;
202
203 /* With 2D, last X transfer does not modify curr_addr. */
204 dma->curr_addr = dma->curr_addr - dma->x_modify + dma->y_modify;
205
206 return 1;
207 }
208
209 switch (dma->config & DMAFLOW)
210 {
211 case DMAFLOW_STOP:
212 HW_TRACE ((me, "dma is complete"));
213 dma->irq_status = (dma->irq_status & ~DMA_RUN) | DMA_DONE;
214 return 0;
215 default:
216 bfin_dma_process_desc (me, dma);
217 return 1;
218 }
219}
220
221static void bfin_dma_hw_event_callback (struct hw *, void *);
222
223static void
224bfin_dma_reschedule (struct hw *me, unsigned delay)
225{
226 struct bfin_dma *dma = hw_data (me);
227 if (dma->handler)
228 {
229 hw_event_queue_deschedule (me, dma->handler);
230 dma->handler = NULL;
231 }
232 if (!delay)
233 return;
234 HW_TRACE ((me, "scheduling next process in %u", delay));
235 dma->handler = hw_event_queue_schedule (me, delay,
236 bfin_dma_hw_event_callback, dma);
237}
238
239/* Chew through the DMA over and over. */
240static void
241bfin_dma_hw_event_callback (struct hw *me, void *data)
242{
243 struct bfin_dma *dma = data;
244 struct hw *peer;
245 struct dv_bfin *bfin_peer;
246 bu8 buf[4096];
247 unsigned ret, nr_bytes, ele_count;
248
249 dma->handler = NULL;
250 peer = bfin_dma_get_peer (me, dma);
251 bfin_peer = hw_data (peer);
252 ret = 0;
253 if (dma->x_modify < 0)
254 /* XXX: This sucks performance wise. */
255 nr_bytes = dma->ele_size;
256 else
257 nr_bytes = MIN (sizeof (buf), dma->curr_x_count * dma->ele_size);
258
259 /* Pumping a chunk! */
260 bfin_peer->dma_master = me;
261 bfin_peer->acked = false;
262 if (dma->config & WNR)
263 {
264 HW_TRACE ((me, "dma transfer to 0x%08lx length %u",
265 (unsigned long) dma->curr_addr, nr_bytes));
266
267 ret = hw_dma_read_buffer (peer, buf, 0, dma->curr_addr, nr_bytes);
268 /* Has the DMA stalled ? abort for now. */
269 if (ret == 0)
270 goto reschedule;
271 /* XXX: How to handle partial DMA transfers ? */
272 if (ret % dma->ele_size)
273 goto error;
274 ret = sim_write (hw_system (me), dma->curr_addr, buf, ret);
275 }
276 else
277 {
278 HW_TRACE ((me, "dma transfer from 0x%08lx length %u",
279 (unsigned long) dma->curr_addr, nr_bytes));
280
281 ret = sim_read (hw_system (me), dma->curr_addr, buf, nr_bytes);
282 if (ret == 0)
283 goto reschedule;
284 /* XXX: How to handle partial DMA transfers ? */
285 if (ret % dma->ele_size)
286 goto error;
287 ret = hw_dma_write_buffer (peer, buf, 0, dma->curr_addr, ret, 0);
288 if (ret == 0)
289 goto reschedule;
290 }
291
292 /* Ignore partial writes. */
293 ele_count = ret / dma->ele_size;
294 dma->curr_addr += ele_count * dma->x_modify;
295 dma->curr_x_count -= ele_count;
296
297 if ((!dma->acked && dma->curr_x_count) || bfin_dma_finish_x (me, dma))
298 /* Still got work to do, so schedule again. */
299 reschedule:
300 bfin_dma_reschedule (me, ret ? 1 : 5000);
301
302 return;
303
304 error:
305 /* Don't reschedule on errors ... */
306 dma->irq_status |= DMA_ERR;
307}
308
309static unsigned
310bfin_dma_io_write_buffer (struct hw *me, const void *source, int space,
311 address_word addr, unsigned nr_bytes)
312{
313 struct bfin_dma *dma = hw_data (me);
314 bu32 mmr_off;
315 bu32 value;
316 bu16 *value16p;
317 bu32 *value32p;
318 void *valuep;
319
320 if (nr_bytes == 4)
321 value = dv_load_4 (source);
322 else
323 value = dv_load_2 (source);
324
325 mmr_off = addr % dma->base;
326 valuep = (void *)((unsigned long)dma + mmr_base() + mmr_off);
327 value16p = valuep;
328 value32p = valuep;
329
330 HW_TRACE_WRITE ();
331
332 /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
333 But does the HW discard writes or send up IVGHW ? The sim
334 simply discards atm ... */
335 switch (mmr_off)
336 {
337 case mmr_offset(next_desc_ptr):
338 case mmr_offset(start_addr):
339 case mmr_offset(curr_desc_ptr):
340 case mmr_offset(curr_addr):
341 /* Don't require 32bit access as all DMA MMRs can be used as 16bit. */
342 if (!bfin_dma_running (dma))
343 {
344 if (nr_bytes == 4)
345 *value32p = value;
346 else
347 *value16p = value;
348 }
349 else
350 HW_TRACE ((me, "discarding write while dma running"));
351 break;
352 case mmr_offset(x_count):
353 case mmr_offset(x_modify):
354 case mmr_offset(y_count):
355 case mmr_offset(y_modify):
356 if (!bfin_dma_running (dma))
357 *value16p = value;
358 break;
359 case mmr_offset(peripheral_map):
360 if (!bfin_dma_running (dma))
361 {
362 *value16p = (*value16p & CTYPE) | (value & ~CTYPE);
363 /* Clear peripheral peer so it gets looked up again. */
364 dma->hw_peer = NULL;
365 }
366 else
367 HW_TRACE ((me, "discarding write while dma running"));
368 break;
369 case mmr_offset(config):
370 /* XXX: How to handle updating CONFIG of a running channel ? */
371 if (nr_bytes == 4)
372 *value32p = value;
373 else
374 *value16p = value;
375
376 if (bfin_dma_enabled (dma))
377 {
378 dma->irq_status |= DMA_RUN;
379 bfin_dma_process_desc (me, dma);
380 /* The writer is the master. */
381 if (!(dma->peripheral_map & CTYPE) || (dma->config & WNR))
382 bfin_dma_reschedule (me, 1);
383 }
384 else
385 {
386 dma->irq_status &= ~DMA_RUN;
387 bfin_dma_reschedule (me, 0);
388 }
389 break;
390 case mmr_offset(irq_status):
391 dv_w1c_2 (value16p, value, DMA_DONE | DMA_ERR);
392 break;
393 case mmr_offset(curr_x_count):
394 case mmr_offset(curr_y_count):
395 if (!bfin_dma_running (dma))
396 *value16p = value;
397 else
398 HW_TRACE ((me, "discarding write while dma running"));
399 break;
400 default:
401 /* XXX: The HW lets the pad regions be read/written ... */
402 dv_bfin_mmr_invalid (me, addr, nr_bytes, true);
403 break;
404 }
405
406 return nr_bytes;
407}
408
409static unsigned
410bfin_dma_io_read_buffer (struct hw *me, void *dest, int space,
411 address_word addr, unsigned nr_bytes)
412{
413 struct bfin_dma *dma = hw_data (me);
414 bu32 mmr_off;
415 bu16 *value16p;
416 bu32 *value32p;
417 void *valuep;
418
419 mmr_off = addr % dma->base;
420 valuep = (void *)((unsigned long)dma + mmr_base() + mmr_off);
421 value16p = valuep;
422 value32p = valuep;
423
424 HW_TRACE_READ ();
425
426 /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved. */
427 if (nr_bytes == 4)
428 dv_store_4 (dest, *value32p);
429 else
430 dv_store_2 (dest, *value16p);
431
432 return nr_bytes;
433}
434
435static unsigned
436bfin_dma_dma_read_buffer (struct hw *me, void *dest, int space,
437 unsigned_word addr, unsigned nr_bytes)
438{
439 struct bfin_dma *dma = hw_data (me);
440 unsigned ret, ele_count;
441
442 HW_TRACE_DMA_READ ();
443
444 /* If someone is trying to read from me, I have to be enabled. */
445 if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
446 return 0;
447
448 /* XXX: handle x_modify ... */
449 ret = sim_read (hw_system (me), dma->curr_addr, dest, nr_bytes);
450 /* Ignore partial writes. */
451 ele_count = ret / dma->ele_size;
452 /* Has the DMA stalled ? abort for now. */
453 if (!ele_count)
454 return 0;
455
456 dma->curr_addr += ele_count * dma->x_modify;
457 dma->curr_x_count -= ele_count;
458
459 if (dma->curr_x_count == 0)
460 bfin_dma_finish_x (me, dma);
461
462 return ret;
463}
464
465static unsigned
466bfin_dma_dma_write_buffer (struct hw *me, const void *source,
467 int space, unsigned_word addr,
468 unsigned nr_bytes,
469 int violate_read_only_section)
470{
471 struct bfin_dma *dma = hw_data (me);
472 unsigned ret, ele_count;
473
474 HW_TRACE_DMA_WRITE ();
475
476 /* If someone is trying to write to me, I have to be enabled. */
477 if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
478 return 0;
479
480 /* XXX: handle x_modify ... */
481 ret = sim_write (hw_system (me), dma->curr_addr, source, nr_bytes);
482 /* Ignore partial writes. */
483 ele_count = ret / dma->ele_size;
484 /* Has the DMA stalled ? abort for now. */
485 if (!ele_count)
486 return 0;
487
488 dma->curr_addr += ele_count * dma->x_modify;
489 dma->curr_x_count -= ele_count;
490
491 if (dma->curr_x_count == 0)
492 bfin_dma_finish_x (me, dma);
493
494 return ret;
495}
496
497static const struct hw_port_descriptor bfin_dma_ports[] = {
498 { "di", 0, 0, output_port, }, /* DMA Interrupt */
499 { NULL, 0, 0, 0, },
500};
501
502static void
503attach_bfin_dma_regs (struct hw *me, struct bfin_dma *dma)
504{
505 address_word attach_address;
506 int attach_space;
507 unsigned attach_size;
508 reg_property_spec reg;
509
510 if (hw_find_property (me, "reg") == NULL)
511 hw_abort (me, "Missing \"reg\" property");
512
513 if (!hw_find_reg_array_property (me, "reg", 0, &reg))
514 hw_abort (me, "\"reg\" property must contain three addr/size entries");
515
516 hw_unit_address_to_attach_address (hw_parent (me),
517 &reg.address,
518 &attach_space, &attach_address, me);
519 hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
520
521 if (attach_size != BFIN_MMR_DMA_SIZE)
522 hw_abort (me, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE);
523
524 hw_attach_address (hw_parent (me),
525 0, attach_space, attach_address, attach_size, me);
526
527 dma->base = attach_address;
528}
529
530static void
531bfin_dma_finish (struct hw *me)
532{
533 struct bfin_dma *dma;
534
535 dma = HW_ZALLOC (me, struct bfin_dma);
536
537 set_hw_data (me, dma);
538 set_hw_io_read_buffer (me, bfin_dma_io_read_buffer);
539 set_hw_io_write_buffer (me, bfin_dma_io_write_buffer);
540 set_hw_dma_read_buffer (me, bfin_dma_dma_read_buffer);
541 set_hw_dma_write_buffer (me, bfin_dma_dma_write_buffer);
542 set_hw_ports (me, bfin_dma_ports);
543
544 attach_bfin_dma_regs (me, dma);
545
546 /* Initialize the DMA Channel. */
547 dma->peripheral_map = bfin_dmac_default_pmap (me);
548}
549
550const struct hw_descriptor dv_bfin_dma_descriptor[] = {
551 {"bfin_dma", bfin_dma_finish,},
552 {NULL, NULL},
553};
This page took 0.048098 seconds and 4 git commands to generate.