sim: hw: rework code to avoid gcc warnings
[deliverable/binutils-gdb.git] / sim / bfin / dv-bfin_dma.c
CommitLineData
ef016f83
MF
1/* Blackfin Direct Memory Access (DMA) Channel model.
2
3666a048 3 Copyright (C) 2010-2021 Free Software Foundation, Inc.
ef016f83
MF
4 Contributed by Analog Devices, Inc.
5
6 This file is part of simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22
9416af6e
MF
23#include <stdlib.h>
24
ef016f83
MF
25#include "sim-main.h"
26#include "devices.h"
27#include "hw-device.h"
28#include "dv-bfin_dma.h"
29#include "dv-bfin_dmac.h"
30
31/* Note: This DMA implementation requires the producer to be the master when
32 the peer is MDMA. The source is always a slave. This way we don't
33 have the two DMA devices thrashing each other with one trying to
34 write and the other trying to read. */
35
36struct bfin_dma
37{
38 /* This top portion matches common dv_bfin struct. */
39 bu32 base;
40 struct hw *dma_master;
41 bool acked;
42
43 struct hw_event *handler;
44 unsigned ele_size;
45 struct hw *hw_peer;
46
47 /* Order after here is important -- matches hardware MMR layout. */
48 union {
49 struct { bu16 ndpl, ndph; };
50 bu32 next_desc_ptr;
51 };
52 union {
53 struct { bu16 sal, sah; };
54 bu32 start_addr;
55 };
56 bu16 BFIN_MMR_16 (config);
57 bu32 _pad0;
58 bu16 BFIN_MMR_16 (x_count);
59 bs16 BFIN_MMR_16 (x_modify);
60 bu16 BFIN_MMR_16 (y_count);
61 bs16 BFIN_MMR_16 (y_modify);
62 bu32 curr_desc_ptr, curr_addr;
63 bu16 BFIN_MMR_16 (irq_status);
64 bu16 BFIN_MMR_16 (peripheral_map);
65 bu16 BFIN_MMR_16 (curr_x_count);
66 bu32 _pad1;
67 bu16 BFIN_MMR_16 (curr_y_count);
68 bu32 _pad2;
69};
70#define mmr_base() offsetof(struct bfin_dma, next_desc_ptr)
71#define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
72
990d19fd
MF
73static const char * const mmr_names[] =
74{
ef016f83
MF
75 "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
76 "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
77 "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
78};
79#define mmr_name(off) mmr_names[(off) / 4]
80
81static bool
82bfin_dma_enabled (struct bfin_dma *dma)
83{
84 return (dma->config & DMAEN);
85}
86
87static bool
88bfin_dma_running (struct bfin_dma *dma)
89{
90 return (dma->irq_status & DMA_RUN);
91}
92
93static struct hw *
94bfin_dma_get_peer (struct hw *me, struct bfin_dma *dma)
95{
96 if (dma->hw_peer)
97 return dma->hw_peer;
98 return dma->hw_peer = bfin_dmac_get_peer (me, dma->peripheral_map);
99}
100
101static void
102bfin_dma_process_desc (struct hw *me, struct bfin_dma *dma)
103{
104 bu8 ndsize = (dma->config & NDSIZE) >> NDSIZE_SHIFT;
105 bu16 _flows[9], *flows = _flows;
106
107 HW_TRACE ((me, "dma starting up %#x", dma->config));
108
109 switch (dma->config & WDSIZE)
110 {
111 case WDSIZE_32:
112 dma->ele_size = 4;
113 break;
114 case WDSIZE_16:
115 dma->ele_size = 2;
116 break;
117 default:
118 dma->ele_size = 1;
119 break;
120 }
121
122 /* Address has to be mutiple of transfer size. */
123 if (dma->start_addr & (dma->ele_size - 1))
124 dma->irq_status |= DMA_ERR;
125
126 if (dma->ele_size != (unsigned) abs (dma->x_modify))
127 hw_abort (me, "DMA config (striding) %#x not supported (x_modify: %d)",
128 dma->config, dma->x_modify);
129
130 switch (dma->config & DMAFLOW)
131 {
132 case DMAFLOW_AUTO:
133 case DMAFLOW_STOP:
134 if (ndsize)
135 hw_abort (me, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
136 break;
137 case DMAFLOW_ARRAY:
138 if (ndsize == 0 || ndsize > 7)
139 hw_abort (me, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
140 sim_read (hw_system (me), dma->curr_desc_ptr, (void *)flows, ndsize * 2);
141 break;
142 case DMAFLOW_SMALL:
143 if (ndsize == 0 || ndsize > 8)
144 hw_abort (me, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
145 sim_read (hw_system (me), dma->next_desc_ptr, (void *)flows, ndsize * 2);
146 break;
147 case DMAFLOW_LARGE:
148 if (ndsize == 0 || ndsize > 9)
149 hw_abort (me, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
150 sim_read (hw_system (me), dma->next_desc_ptr, (void *)flows, ndsize * 2);
151 break;
152 default:
153 hw_abort (me, "DMA config error: invalid DMAFLOW %#x", dma->config);
154 }
155
156 if (ndsize)
157 {
158 bu8 idx;
159 bu16 *stores[] = {
160 &dma->sal,
161 &dma->sah,
162 &dma->config,
163 &dma->x_count,
164 (void *) &dma->x_modify,
165 &dma->y_count,
166 (void *) &dma->y_modify,
167 };
168
169 switch (dma->config & DMAFLOW)
170 {
171 case DMAFLOW_LARGE:
172 dma->ndph = _flows[1];
173 --ndsize;
174 ++flows;
175 case DMAFLOW_SMALL:
176 dma->ndpl = _flows[0];
177 --ndsize;
178 ++flows;
179 break;
180 }
181
182 for (idx = 0; idx < ndsize; ++idx)
183 *stores[idx] = flows[idx];
184 }
185
186 dma->curr_desc_ptr = dma->next_desc_ptr;
187 dma->curr_addr = dma->start_addr;
188 dma->curr_x_count = dma->x_count ? : 0xffff;
189 dma->curr_y_count = dma->y_count ? : 0xffff;
190}
191
192static int
193bfin_dma_finish_x (struct hw *me, struct bfin_dma *dma)
194{
195 /* XXX: This would be the time to process the next descriptor. */
196 /* XXX: Should this toggle Enable in dma->config ? */
197
198 if (dma->config & DI_EN)
199 hw_port_event (me, 0, 1);
200
201 if ((dma->config & DMA2D) && dma->curr_y_count > 1)
202 {
203 dma->curr_y_count -= 1;
204 dma->curr_x_count = dma->x_count;
205
206 /* With 2D, last X transfer does not modify curr_addr. */
207 dma->curr_addr = dma->curr_addr - dma->x_modify + dma->y_modify;
208
209 return 1;
210 }
211
212 switch (dma->config & DMAFLOW)
213 {
214 case DMAFLOW_STOP:
215 HW_TRACE ((me, "dma is complete"));
216 dma->irq_status = (dma->irq_status & ~DMA_RUN) | DMA_DONE;
217 return 0;
218 default:
219 bfin_dma_process_desc (me, dma);
220 return 1;
221 }
222}
223
224static void bfin_dma_hw_event_callback (struct hw *, void *);
225
226static void
227bfin_dma_reschedule (struct hw *me, unsigned delay)
228{
229 struct bfin_dma *dma = hw_data (me);
230 if (dma->handler)
231 {
232 hw_event_queue_deschedule (me, dma->handler);
233 dma->handler = NULL;
234 }
235 if (!delay)
236 return;
237 HW_TRACE ((me, "scheduling next process in %u", delay));
238 dma->handler = hw_event_queue_schedule (me, delay,
239 bfin_dma_hw_event_callback, dma);
240}
241
242/* Chew through the DMA over and over. */
243static void
244bfin_dma_hw_event_callback (struct hw *me, void *data)
245{
246 struct bfin_dma *dma = data;
247 struct hw *peer;
248 struct dv_bfin *bfin_peer;
249 bu8 buf[4096];
250 unsigned ret, nr_bytes, ele_count;
251
252 dma->handler = NULL;
253 peer = bfin_dma_get_peer (me, dma);
254 bfin_peer = hw_data (peer);
255 ret = 0;
256 if (dma->x_modify < 0)
257 /* XXX: This sucks performance wise. */
258 nr_bytes = dma->ele_size;
259 else
bc273e17 260 nr_bytes = min (sizeof (buf), dma->curr_x_count * dma->ele_size);
ef016f83
MF
261
262 /* Pumping a chunk! */
263 bfin_peer->dma_master = me;
264 bfin_peer->acked = false;
265 if (dma->config & WNR)
266 {
267 HW_TRACE ((me, "dma transfer to 0x%08lx length %u",
268 (unsigned long) dma->curr_addr, nr_bytes));
269
270 ret = hw_dma_read_buffer (peer, buf, 0, dma->curr_addr, nr_bytes);
271 /* Has the DMA stalled ? abort for now. */
272 if (ret == 0)
273 goto reschedule;
274 /* XXX: How to handle partial DMA transfers ? */
275 if (ret % dma->ele_size)
276 goto error;
277 ret = sim_write (hw_system (me), dma->curr_addr, buf, ret);
278 }
279 else
280 {
281 HW_TRACE ((me, "dma transfer from 0x%08lx length %u",
282 (unsigned long) dma->curr_addr, nr_bytes));
283
284 ret = sim_read (hw_system (me), dma->curr_addr, buf, nr_bytes);
285 if (ret == 0)
286 goto reschedule;
287 /* XXX: How to handle partial DMA transfers ? */
288 if (ret % dma->ele_size)
289 goto error;
290 ret = hw_dma_write_buffer (peer, buf, 0, dma->curr_addr, ret, 0);
291 if (ret == 0)
292 goto reschedule;
293 }
294
295 /* Ignore partial writes. */
296 ele_count = ret / dma->ele_size;
297 dma->curr_addr += ele_count * dma->x_modify;
298 dma->curr_x_count -= ele_count;
299
300 if ((!dma->acked && dma->curr_x_count) || bfin_dma_finish_x (me, dma))
301 /* Still got work to do, so schedule again. */
302 reschedule:
303 bfin_dma_reschedule (me, ret ? 1 : 5000);
304
305 return;
306
307 error:
308 /* Don't reschedule on errors ... */
309 dma->irq_status |= DMA_ERR;
310}
311
312static unsigned
313bfin_dma_io_write_buffer (struct hw *me, const void *source, int space,
314 address_word addr, unsigned nr_bytes)
315{
316 struct bfin_dma *dma = hw_data (me);
317 bu32 mmr_off;
318 bu32 value;
319 bu16 *value16p;
320 bu32 *value32p;
321 void *valuep;
322
466b619e
MF
323 /* Invalid access mode is higher priority than missing register. */
324 if (!dv_bfin_mmr_require_16_32 (me, addr, nr_bytes, true))
325 return 0;
326
ef016f83
MF
327 if (nr_bytes == 4)
328 value = dv_load_4 (source);
329 else
330 value = dv_load_2 (source);
331
332 mmr_off = addr % dma->base;
333 valuep = (void *)((unsigned long)dma + mmr_base() + mmr_off);
334 value16p = valuep;
335 value32p = valuep;
336
337 HW_TRACE_WRITE ();
338
339 /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
340 But does the HW discard writes or send up IVGHW ? The sim
341 simply discards atm ... */
342 switch (mmr_off)
343 {
344 case mmr_offset(next_desc_ptr):
345 case mmr_offset(start_addr):
346 case mmr_offset(curr_desc_ptr):
347 case mmr_offset(curr_addr):
348 /* Don't require 32bit access as all DMA MMRs can be used as 16bit. */
349 if (!bfin_dma_running (dma))
350 {
351 if (nr_bytes == 4)
352 *value32p = value;
353 else
36f3e981 354 *value16p = value;
ef016f83
MF
355 }
356 else
357 HW_TRACE ((me, "discarding write while dma running"));
358 break;
359 case mmr_offset(x_count):
360 case mmr_offset(x_modify):
361 case mmr_offset(y_count):
362 case mmr_offset(y_modify):
363 if (!bfin_dma_running (dma))
364 *value16p = value;
365 break;
366 case mmr_offset(peripheral_map):
367 if (!bfin_dma_running (dma))
368 {
369 *value16p = (*value16p & CTYPE) | (value & ~CTYPE);
370 /* Clear peripheral peer so it gets looked up again. */
371 dma->hw_peer = NULL;
372 }
373 else
374 HW_TRACE ((me, "discarding write while dma running"));
375 break;
376 case mmr_offset(config):
377 /* XXX: How to handle updating CONFIG of a running channel ? */
378 if (nr_bytes == 4)
379 *value32p = value;
380 else
381 *value16p = value;
382
383 if (bfin_dma_enabled (dma))
384 {
385 dma->irq_status |= DMA_RUN;
386 bfin_dma_process_desc (me, dma);
387 /* The writer is the master. */
388 if (!(dma->peripheral_map & CTYPE) || (dma->config & WNR))
389 bfin_dma_reschedule (me, 1);
390 }
391 else
392 {
393 dma->irq_status &= ~DMA_RUN;
394 bfin_dma_reschedule (me, 0);
395 }
396 break;
397 case mmr_offset(irq_status):
398 dv_w1c_2 (value16p, value, DMA_DONE | DMA_ERR);
399 break;
400 case mmr_offset(curr_x_count):
401 case mmr_offset(curr_y_count):
402 if (!bfin_dma_running (dma))
403 *value16p = value;
404 else
405 HW_TRACE ((me, "discarding write while dma running"));
406 break;
407 default:
408 /* XXX: The HW lets the pad regions be read/written ... */
409 dv_bfin_mmr_invalid (me, addr, nr_bytes, true);
466b619e 410 return 0;
ef016f83
MF
411 }
412
413 return nr_bytes;
414}
415
416static unsigned
417bfin_dma_io_read_buffer (struct hw *me, void *dest, int space,
418 address_word addr, unsigned nr_bytes)
419{
420 struct bfin_dma *dma = hw_data (me);
421 bu32 mmr_off;
422 bu16 *value16p;
423 bu32 *value32p;
424 void *valuep;
425
466b619e
MF
426 /* Invalid access mode is higher priority than missing register. */
427 if (!dv_bfin_mmr_require_16_32 (me, addr, nr_bytes, false))
428 return 0;
429
ef016f83
MF
430 mmr_off = addr % dma->base;
431 valuep = (void *)((unsigned long)dma + mmr_base() + mmr_off);
432 value16p = valuep;
433 value32p = valuep;
434
435 HW_TRACE_READ ();
436
437 /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved. */
438 if (nr_bytes == 4)
439 dv_store_4 (dest, *value32p);
440 else
441 dv_store_2 (dest, *value16p);
442
443 return nr_bytes;
444}
445
446static unsigned
447bfin_dma_dma_read_buffer (struct hw *me, void *dest, int space,
448 unsigned_word addr, unsigned nr_bytes)
449{
450 struct bfin_dma *dma = hw_data (me);
451 unsigned ret, ele_count;
452
453 HW_TRACE_DMA_READ ();
454
455 /* If someone is trying to read from me, I have to be enabled. */
456 if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
457 return 0;
458
459 /* XXX: handle x_modify ... */
460 ret = sim_read (hw_system (me), dma->curr_addr, dest, nr_bytes);
461 /* Ignore partial writes. */
462 ele_count = ret / dma->ele_size;
463 /* Has the DMA stalled ? abort for now. */
464 if (!ele_count)
465 return 0;
466
467 dma->curr_addr += ele_count * dma->x_modify;
468 dma->curr_x_count -= ele_count;
469
470 if (dma->curr_x_count == 0)
471 bfin_dma_finish_x (me, dma);
472
473 return ret;
474}
475
476static unsigned
477bfin_dma_dma_write_buffer (struct hw *me, const void *source,
478 int space, unsigned_word addr,
479 unsigned nr_bytes,
480 int violate_read_only_section)
481{
482 struct bfin_dma *dma = hw_data (me);
483 unsigned ret, ele_count;
484
485 HW_TRACE_DMA_WRITE ();
486
487 /* If someone is trying to write to me, I have to be enabled. */
488 if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
489 return 0;
490
491 /* XXX: handle x_modify ... */
492 ret = sim_write (hw_system (me), dma->curr_addr, source, nr_bytes);
493 /* Ignore partial writes. */
494 ele_count = ret / dma->ele_size;
495 /* Has the DMA stalled ? abort for now. */
496 if (!ele_count)
497 return 0;
498
499 dma->curr_addr += ele_count * dma->x_modify;
500 dma->curr_x_count -= ele_count;
501
502 if (dma->curr_x_count == 0)
503 bfin_dma_finish_x (me, dma);
504
505 return ret;
506}
507
990d19fd
MF
508static const struct hw_port_descriptor bfin_dma_ports[] =
509{
ef016f83
MF
510 { "di", 0, 0, output_port, }, /* DMA Interrupt */
511 { NULL, 0, 0, 0, },
512};
513
514static void
515attach_bfin_dma_regs (struct hw *me, struct bfin_dma *dma)
516{
517 address_word attach_address;
518 int attach_space;
519 unsigned attach_size;
520 reg_property_spec reg;
521
522 if (hw_find_property (me, "reg") == NULL)
523 hw_abort (me, "Missing \"reg\" property");
524
525 if (!hw_find_reg_array_property (me, "reg", 0, &reg))
526 hw_abort (me, "\"reg\" property must contain three addr/size entries");
527
528 hw_unit_address_to_attach_address (hw_parent (me),
529 &reg.address,
530 &attach_space, &attach_address, me);
531 hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
532
533 if (attach_size != BFIN_MMR_DMA_SIZE)
534 hw_abort (me, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE);
535
536 hw_attach_address (hw_parent (me),
537 0, attach_space, attach_address, attach_size, me);
538
539 dma->base = attach_address;
540}
541
542static void
543bfin_dma_finish (struct hw *me)
544{
545 struct bfin_dma *dma;
546
547 dma = HW_ZALLOC (me, struct bfin_dma);
548
549 set_hw_data (me, dma);
550 set_hw_io_read_buffer (me, bfin_dma_io_read_buffer);
551 set_hw_io_write_buffer (me, bfin_dma_io_write_buffer);
552 set_hw_dma_read_buffer (me, bfin_dma_dma_read_buffer);
553 set_hw_dma_write_buffer (me, bfin_dma_dma_write_buffer);
554 set_hw_ports (me, bfin_dma_ports);
555
556 attach_bfin_dma_regs (me, dma);
557
558 /* Initialize the DMA Channel. */
559 dma->peripheral_map = bfin_dmac_default_pmap (me);
560}
561
81d126c3
MF
562const struct hw_descriptor dv_bfin_dma_descriptor[] =
563{
ef016f83
MF
564 {"bfin_dma", bfin_dma_finish,},
565 {NULL, NULL},
566};
This page took 0.612457 seconds and 4 git commands to generate.