Commit | Line | Data |
---|---|---|
58ea8abf GH |
1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) driver | |
3 | * | |
4 | * Copyright (C) 2016 Advanced Micro Devices, Inc. | |
5 | * | |
6 | * Author: Gary R Hook <gary.hook@amd.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/mutex.h> | |
17 | #include <linux/ccp.h> | |
18 | ||
19 | #include "ccp-dev.h" | |
20 | #include "../../dma/dmaengine.h" | |
21 | ||
22 | #define CCP_DMA_WIDTH(_mask) \ | |
23 | ({ \ | |
24 | u64 mask = _mask + 1; \ | |
25 | (mask == 0) ? 64 : fls64(mask); \ | |
26 | }) | |
27 | ||
28 | static void ccp_free_cmd_resources(struct ccp_device *ccp, | |
29 | struct list_head *list) | |
30 | { | |
31 | struct ccp_dma_cmd *cmd, *ctmp; | |
32 | ||
33 | list_for_each_entry_safe(cmd, ctmp, list, entry) { | |
34 | list_del(&cmd->entry); | |
35 | kmem_cache_free(ccp->dma_cmd_cache, cmd); | |
36 | } | |
37 | } | |
38 | ||
39 | static void ccp_free_desc_resources(struct ccp_device *ccp, | |
40 | struct list_head *list) | |
41 | { | |
42 | struct ccp_dma_desc *desc, *dtmp; | |
43 | ||
44 | list_for_each_entry_safe(desc, dtmp, list, entry) { | |
45 | ccp_free_cmd_resources(ccp, &desc->active); | |
46 | ccp_free_cmd_resources(ccp, &desc->pending); | |
47 | ||
48 | list_del(&desc->entry); | |
49 | kmem_cache_free(ccp->dma_desc_cache, desc); | |
50 | } | |
51 | } | |
52 | ||
53 | static void ccp_free_chan_resources(struct dma_chan *dma_chan) | |
54 | { | |
55 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | |
56 | dma_chan); | |
57 | unsigned long flags; | |
58 | ||
59 | dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan); | |
60 | ||
61 | spin_lock_irqsave(&chan->lock, flags); | |
62 | ||
63 | ccp_free_desc_resources(chan->ccp, &chan->complete); | |
64 | ccp_free_desc_resources(chan->ccp, &chan->active); | |
65 | ccp_free_desc_resources(chan->ccp, &chan->pending); | |
66 | ||
67 | spin_unlock_irqrestore(&chan->lock, flags); | |
68 | } | |
69 | ||
70 | static void ccp_cleanup_desc_resources(struct ccp_device *ccp, | |
71 | struct list_head *list) | |
72 | { | |
73 | struct ccp_dma_desc *desc, *dtmp; | |
74 | ||
75 | list_for_each_entry_safe_reverse(desc, dtmp, list, entry) { | |
76 | if (!async_tx_test_ack(&desc->tx_desc)) | |
77 | continue; | |
78 | ||
79 | dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc); | |
80 | ||
81 | ccp_free_cmd_resources(ccp, &desc->active); | |
82 | ccp_free_cmd_resources(ccp, &desc->pending); | |
83 | ||
84 | list_del(&desc->entry); | |
85 | kmem_cache_free(ccp->dma_desc_cache, desc); | |
86 | } | |
87 | } | |
88 | ||
89 | static void ccp_do_cleanup(unsigned long data) | |
90 | { | |
91 | struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data; | |
92 | unsigned long flags; | |
93 | ||
94 | dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__, | |
95 | dma_chan_name(&chan->dma_chan)); | |
96 | ||
97 | spin_lock_irqsave(&chan->lock, flags); | |
98 | ||
99 | ccp_cleanup_desc_resources(chan->ccp, &chan->complete); | |
100 | ||
101 | spin_unlock_irqrestore(&chan->lock, flags); | |
102 | } | |
103 | ||
104 | static int ccp_issue_next_cmd(struct ccp_dma_desc *desc) | |
105 | { | |
106 | struct ccp_dma_cmd *cmd; | |
107 | int ret; | |
108 | ||
109 | cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry); | |
110 | list_move(&cmd->entry, &desc->active); | |
111 | ||
112 | dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__, | |
113 | desc->tx_desc.cookie, cmd); | |
114 | ||
115 | ret = ccp_enqueue_cmd(&cmd->ccp_cmd); | |
116 | if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY)) | |
117 | return 0; | |
118 | ||
119 | dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__, | |
120 | ret, desc->tx_desc.cookie, cmd); | |
121 | ||
122 | return ret; | |
123 | } | |
124 | ||
125 | static void ccp_free_active_cmd(struct ccp_dma_desc *desc) | |
126 | { | |
127 | struct ccp_dma_cmd *cmd; | |
128 | ||
129 | cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd, | |
130 | entry); | |
131 | if (!cmd) | |
132 | return; | |
133 | ||
134 | dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n", | |
135 | __func__, desc->tx_desc.cookie, cmd); | |
136 | ||
137 | list_del(&cmd->entry); | |
138 | kmem_cache_free(desc->ccp->dma_cmd_cache, cmd); | |
139 | } | |
140 | ||
141 | static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan, | |
142 | struct ccp_dma_desc *desc) | |
143 | { | |
144 | /* Move current DMA descriptor to the complete list */ | |
145 | if (desc) | |
146 | list_move(&desc->entry, &chan->complete); | |
147 | ||
148 | /* Get the next DMA descriptor on the active list */ | |
149 | desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc, | |
150 | entry); | |
151 | ||
152 | return desc; | |
153 | } | |
154 | ||
155 | static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan, | |
156 | struct ccp_dma_desc *desc) | |
157 | { | |
158 | struct dma_async_tx_descriptor *tx_desc; | |
159 | unsigned long flags; | |
160 | ||
161 | /* Loop over descriptors until one is found with commands */ | |
162 | do { | |
163 | if (desc) { | |
164 | /* Remove the DMA command from the list and free it */ | |
165 | ccp_free_active_cmd(desc); | |
166 | ||
167 | if (!list_empty(&desc->pending)) { | |
168 | /* No errors, keep going */ | |
169 | if (desc->status != DMA_ERROR) | |
170 | return desc; | |
171 | ||
172 | /* Error, free remaining commands and move on */ | |
173 | ccp_free_cmd_resources(desc->ccp, | |
174 | &desc->pending); | |
175 | } | |
176 | ||
177 | tx_desc = &desc->tx_desc; | |
178 | } else { | |
179 | tx_desc = NULL; | |
180 | } | |
181 | ||
182 | spin_lock_irqsave(&chan->lock, flags); | |
183 | ||
184 | if (desc) { | |
185 | if (desc->status != DMA_ERROR) | |
186 | desc->status = DMA_COMPLETE; | |
187 | ||
188 | dev_dbg(desc->ccp->dev, | |
189 | "%s - tx %d complete, status=%u\n", __func__, | |
190 | desc->tx_desc.cookie, desc->status); | |
191 | ||
192 | dma_cookie_complete(tx_desc); | |
193 | } | |
194 | ||
195 | desc = __ccp_next_dma_desc(chan, desc); | |
196 | ||
197 | spin_unlock_irqrestore(&chan->lock, flags); | |
198 | ||
199 | if (tx_desc) { | |
200 | if (tx_desc->callback && | |
201 | (tx_desc->flags & DMA_PREP_INTERRUPT)) | |
202 | tx_desc->callback(tx_desc->callback_param); | |
203 | ||
204 | dma_run_dependencies(tx_desc); | |
205 | } | |
206 | } while (desc); | |
207 | ||
208 | return NULL; | |
209 | } | |
210 | ||
211 | static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan) | |
212 | { | |
213 | struct ccp_dma_desc *desc; | |
214 | ||
215 | if (list_empty(&chan->pending)) | |
216 | return NULL; | |
217 | ||
218 | desc = list_empty(&chan->active) | |
219 | ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry) | |
220 | : NULL; | |
221 | ||
222 | list_splice_tail_init(&chan->pending, &chan->active); | |
223 | ||
224 | return desc; | |
225 | } | |
226 | ||
227 | static void ccp_cmd_callback(void *data, int err) | |
228 | { | |
229 | struct ccp_dma_desc *desc = data; | |
230 | struct ccp_dma_chan *chan; | |
231 | int ret; | |
232 | ||
233 | if (err == -EINPROGRESS) | |
234 | return; | |
235 | ||
236 | chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan, | |
237 | dma_chan); | |
238 | ||
239 | dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n", | |
240 | __func__, desc->tx_desc.cookie, err); | |
241 | ||
242 | if (err) | |
243 | desc->status = DMA_ERROR; | |
244 | ||
245 | while (true) { | |
246 | /* Check for DMA descriptor completion */ | |
247 | desc = ccp_handle_active_desc(chan, desc); | |
248 | ||
249 | /* Don't submit cmd if no descriptor or DMA is paused */ | |
250 | if (!desc || (chan->status == DMA_PAUSED)) | |
251 | break; | |
252 | ||
253 | ret = ccp_issue_next_cmd(desc); | |
254 | if (!ret) | |
255 | break; | |
256 | ||
257 | desc->status = DMA_ERROR; | |
258 | } | |
259 | ||
260 | tasklet_schedule(&chan->cleanup_tasklet); | |
261 | } | |
262 | ||
263 | static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc) | |
264 | { | |
265 | struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc, | |
266 | tx_desc); | |
267 | struct ccp_dma_chan *chan; | |
268 | dma_cookie_t cookie; | |
269 | unsigned long flags; | |
270 | ||
271 | chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan); | |
272 | ||
273 | spin_lock_irqsave(&chan->lock, flags); | |
274 | ||
275 | cookie = dma_cookie_assign(tx_desc); | |
276 | list_add_tail(&desc->entry, &chan->pending); | |
277 | ||
278 | spin_unlock_irqrestore(&chan->lock, flags); | |
279 | ||
280 | dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n", | |
281 | __func__, cookie); | |
282 | ||
283 | return cookie; | |
284 | } | |
285 | ||
286 | static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan) | |
287 | { | |
288 | struct ccp_dma_cmd *cmd; | |
289 | ||
290 | cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT); | |
291 | if (cmd) | |
292 | memset(cmd, 0, sizeof(*cmd)); | |
293 | ||
294 | return cmd; | |
295 | } | |
296 | ||
297 | static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan, | |
298 | unsigned long flags) | |
299 | { | |
300 | struct ccp_dma_desc *desc; | |
301 | ||
302 | desc = kmem_cache_alloc(chan->ccp->dma_desc_cache, GFP_NOWAIT); | |
303 | if (!desc) | |
304 | return NULL; | |
305 | ||
306 | memset(desc, 0, sizeof(*desc)); | |
307 | ||
308 | dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan); | |
309 | desc->tx_desc.flags = flags; | |
310 | desc->tx_desc.tx_submit = ccp_tx_submit; | |
311 | desc->ccp = chan->ccp; | |
312 | INIT_LIST_HEAD(&desc->pending); | |
313 | INIT_LIST_HEAD(&desc->active); | |
314 | desc->status = DMA_IN_PROGRESS; | |
315 | ||
316 | return desc; | |
317 | } | |
318 | ||
319 | static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, | |
320 | struct scatterlist *dst_sg, | |
321 | unsigned int dst_nents, | |
322 | struct scatterlist *src_sg, | |
323 | unsigned int src_nents, | |
324 | unsigned long flags) | |
325 | { | |
326 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | |
327 | dma_chan); | |
328 | struct ccp_device *ccp = chan->ccp; | |
329 | struct ccp_dma_desc *desc; | |
330 | struct ccp_dma_cmd *cmd; | |
331 | struct ccp_cmd *ccp_cmd; | |
332 | struct ccp_passthru_nomap_engine *ccp_pt; | |
333 | unsigned int src_offset, src_len; | |
334 | unsigned int dst_offset, dst_len; | |
335 | unsigned int len; | |
336 | unsigned long sflags; | |
337 | size_t total_len; | |
338 | ||
339 | if (!dst_sg || !src_sg) | |
340 | return NULL; | |
341 | ||
342 | if (!dst_nents || !src_nents) | |
343 | return NULL; | |
344 | ||
345 | desc = ccp_alloc_dma_desc(chan, flags); | |
346 | if (!desc) | |
347 | return NULL; | |
348 | ||
349 | total_len = 0; | |
350 | ||
351 | src_len = sg_dma_len(src_sg); | |
352 | src_offset = 0; | |
353 | ||
354 | dst_len = sg_dma_len(dst_sg); | |
355 | dst_offset = 0; | |
356 | ||
357 | while (true) { | |
358 | if (!src_len) { | |
359 | src_nents--; | |
360 | if (!src_nents) | |
361 | break; | |
362 | ||
363 | src_sg = sg_next(src_sg); | |
364 | if (!src_sg) | |
365 | break; | |
366 | ||
367 | src_len = sg_dma_len(src_sg); | |
368 | src_offset = 0; | |
369 | continue; | |
370 | } | |
371 | ||
372 | if (!dst_len) { | |
373 | dst_nents--; | |
374 | if (!dst_nents) | |
375 | break; | |
376 | ||
377 | dst_sg = sg_next(dst_sg); | |
378 | if (!dst_sg) | |
379 | break; | |
380 | ||
381 | dst_len = sg_dma_len(dst_sg); | |
382 | dst_offset = 0; | |
383 | continue; | |
384 | } | |
385 | ||
386 | len = min(dst_len, src_len); | |
387 | ||
388 | cmd = ccp_alloc_dma_cmd(chan); | |
389 | if (!cmd) | |
390 | goto err; | |
391 | ||
392 | ccp_cmd = &cmd->ccp_cmd; | |
393 | ccp_pt = &ccp_cmd->u.passthru_nomap; | |
394 | ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; | |
395 | ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; | |
396 | ccp_cmd->engine = CCP_ENGINE_PASSTHRU; | |
397 | ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP; | |
398 | ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; | |
399 | ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset; | |
400 | ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset; | |
401 | ccp_pt->src_len = len; | |
402 | ccp_pt->final = 1; | |
403 | ccp_cmd->callback = ccp_cmd_callback; | |
404 | ccp_cmd->data = desc; | |
405 | ||
406 | list_add_tail(&cmd->entry, &desc->pending); | |
407 | ||
408 | dev_dbg(ccp->dev, | |
409 | "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__, | |
410 | cmd, &ccp_pt->src_dma, | |
411 | &ccp_pt->dst_dma, ccp_pt->src_len); | |
412 | ||
413 | total_len += len; | |
414 | ||
415 | src_len -= len; | |
416 | src_offset += len; | |
417 | ||
418 | dst_len -= len; | |
419 | dst_offset += len; | |
420 | } | |
421 | ||
422 | desc->len = total_len; | |
423 | ||
424 | if (list_empty(&desc->pending)) | |
425 | goto err; | |
426 | ||
427 | dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc); | |
428 | ||
429 | spin_lock_irqsave(&chan->lock, sflags); | |
430 | ||
431 | list_add_tail(&desc->entry, &chan->pending); | |
432 | ||
433 | spin_unlock_irqrestore(&chan->lock, sflags); | |
434 | ||
435 | return desc; | |
436 | ||
437 | err: | |
438 | ccp_free_cmd_resources(ccp, &desc->pending); | |
439 | kmem_cache_free(ccp->dma_desc_cache, desc); | |
440 | ||
441 | return NULL; | |
442 | } | |
443 | ||
444 | static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy( | |
445 | struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len, | |
446 | unsigned long flags) | |
447 | { | |
448 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | |
449 | dma_chan); | |
450 | struct ccp_dma_desc *desc; | |
451 | struct scatterlist dst_sg, src_sg; | |
452 | ||
453 | dev_dbg(chan->ccp->dev, | |
454 | "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n", | |
455 | __func__, &src, &dst, len, flags); | |
456 | ||
457 | sg_init_table(&dst_sg, 1); | |
458 | sg_dma_address(&dst_sg) = dst; | |
459 | sg_dma_len(&dst_sg) = len; | |
460 | ||
461 | sg_init_table(&src_sg, 1); | |
462 | sg_dma_address(&src_sg) = src; | |
463 | sg_dma_len(&src_sg) = len; | |
464 | ||
465 | desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags); | |
466 | if (!desc) | |
467 | return NULL; | |
468 | ||
469 | return &desc->tx_desc; | |
470 | } | |
471 | ||
472 | static struct dma_async_tx_descriptor *ccp_prep_dma_sg( | |
473 | struct dma_chan *dma_chan, struct scatterlist *dst_sg, | |
474 | unsigned int dst_nents, struct scatterlist *src_sg, | |
475 | unsigned int src_nents, unsigned long flags) | |
476 | { | |
477 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | |
478 | dma_chan); | |
479 | struct ccp_dma_desc *desc; | |
480 | ||
481 | dev_dbg(chan->ccp->dev, | |
482 | "%s - src=%p, src_nents=%u dst=%p, dst_nents=%u, flags=%#lx\n", | |
483 | __func__, src_sg, src_nents, dst_sg, dst_nents, flags); | |
484 | ||
485 | desc = ccp_create_desc(dma_chan, dst_sg, dst_nents, src_sg, src_nents, | |
486 | flags); | |
487 | if (!desc) | |
488 | return NULL; | |
489 | ||
490 | return &desc->tx_desc; | |
491 | } | |
492 | ||
493 | static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt( | |
494 | struct dma_chan *dma_chan, unsigned long flags) | |
495 | { | |
496 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | |
497 | dma_chan); | |
498 | struct ccp_dma_desc *desc; | |
499 | ||
500 | desc = ccp_alloc_dma_desc(chan, flags); | |
501 | if (!desc) | |
502 | return NULL; | |
503 | ||
504 | return &desc->tx_desc; | |
505 | } | |
506 | ||
507 | static void ccp_issue_pending(struct dma_chan *dma_chan) | |
508 | { | |
509 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | |
510 | dma_chan); | |
511 | struct ccp_dma_desc *desc; | |
512 | unsigned long flags; | |
513 | ||
514 | dev_dbg(chan->ccp->dev, "%s\n", __func__); | |
515 | ||
516 | spin_lock_irqsave(&chan->lock, flags); | |
517 | ||
518 | desc = __ccp_pending_to_active(chan); | |
519 | ||
520 | spin_unlock_irqrestore(&chan->lock, flags); | |
521 | ||
522 | /* If there was nothing active, start processing */ | |
523 | if (desc) | |
524 | ccp_cmd_callback(desc, 0); | |
525 | } | |
526 | ||
527 | static enum dma_status ccp_tx_status(struct dma_chan *dma_chan, | |
528 | dma_cookie_t cookie, | |
529 | struct dma_tx_state *state) | |
530 | { | |
531 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | |
532 | dma_chan); | |
533 | struct ccp_dma_desc *desc; | |
534 | enum dma_status ret; | |
535 | unsigned long flags; | |
536 | ||
537 | if (chan->status == DMA_PAUSED) { | |
538 | ret = DMA_PAUSED; | |
539 | goto out; | |
540 | } | |
541 | ||
542 | ret = dma_cookie_status(dma_chan, cookie, state); | |
543 | if (ret == DMA_COMPLETE) { | |
544 | spin_lock_irqsave(&chan->lock, flags); | |
545 | ||
546 | /* Get status from complete chain, if still there */ | |
547 | list_for_each_entry(desc, &chan->complete, entry) { | |
548 | if (desc->tx_desc.cookie != cookie) | |
549 | continue; | |
550 | ||
551 | ret = desc->status; | |
552 | break; | |
553 | } | |
554 | ||
555 | spin_unlock_irqrestore(&chan->lock, flags); | |
556 | } | |
557 | ||
558 | out: | |
559 | dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret); | |
560 | ||
561 | return ret; | |
562 | } | |
563 | ||
564 | static int ccp_pause(struct dma_chan *dma_chan) | |
565 | { | |
566 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | |
567 | dma_chan); | |
568 | ||
569 | chan->status = DMA_PAUSED; | |
570 | ||
571 | /*TODO: Wait for active DMA to complete before returning? */ | |
572 | ||
573 | return 0; | |
574 | } | |
575 | ||
576 | static int ccp_resume(struct dma_chan *dma_chan) | |
577 | { | |
578 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | |
579 | dma_chan); | |
580 | struct ccp_dma_desc *desc; | |
581 | unsigned long flags; | |
582 | ||
583 | spin_lock_irqsave(&chan->lock, flags); | |
584 | ||
585 | desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc, | |
586 | entry); | |
587 | ||
588 | spin_unlock_irqrestore(&chan->lock, flags); | |
589 | ||
590 | /* Indicate the channel is running again */ | |
591 | chan->status = DMA_IN_PROGRESS; | |
592 | ||
593 | /* If there was something active, re-start */ | |
594 | if (desc) | |
595 | ccp_cmd_callback(desc, 0); | |
596 | ||
597 | return 0; | |
598 | } | |
599 | ||
600 | static int ccp_terminate_all(struct dma_chan *dma_chan) | |
601 | { | |
602 | struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, | |
603 | dma_chan); | |
604 | unsigned long flags; | |
605 | ||
606 | dev_dbg(chan->ccp->dev, "%s\n", __func__); | |
607 | ||
608 | /*TODO: Wait for active DMA to complete before continuing */ | |
609 | ||
610 | spin_lock_irqsave(&chan->lock, flags); | |
611 | ||
612 | /*TODO: Purge the complete list? */ | |
613 | ccp_free_desc_resources(chan->ccp, &chan->active); | |
614 | ccp_free_desc_resources(chan->ccp, &chan->pending); | |
615 | ||
616 | spin_unlock_irqrestore(&chan->lock, flags); | |
617 | ||
618 | return 0; | |
619 | } | |
620 | ||
621 | int ccp_dmaengine_register(struct ccp_device *ccp) | |
622 | { | |
623 | struct ccp_dma_chan *chan; | |
624 | struct dma_device *dma_dev = &ccp->dma_dev; | |
625 | struct dma_chan *dma_chan; | |
626 | char *dma_cmd_cache_name; | |
627 | char *dma_desc_cache_name; | |
628 | unsigned int i; | |
629 | int ret; | |
630 | ||
631 | ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count, | |
632 | sizeof(*(ccp->ccp_dma_chan)), | |
633 | GFP_KERNEL); | |
634 | if (!ccp->ccp_dma_chan) | |
635 | return -ENOMEM; | |
636 | ||
637 | dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, | |
638 | "%s-dmaengine-cmd-cache", | |
639 | ccp->name); | |
640 | if (!dma_cmd_cache_name) | |
641 | return -ENOMEM; | |
642 | ||
643 | ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name, | |
644 | sizeof(struct ccp_dma_cmd), | |
645 | sizeof(void *), | |
646 | SLAB_HWCACHE_ALIGN, NULL); | |
647 | if (!ccp->dma_cmd_cache) | |
648 | return -ENOMEM; | |
649 | ||
650 | dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, | |
651 | "%s-dmaengine-desc-cache", | |
652 | ccp->name); | |
ba22a1e2 QL |
653 | if (!dma_cmd_cache_name) { |
654 | ret = -ENOMEM; | |
655 | goto err_cache; | |
656 | } | |
657 | ||
58ea8abf GH |
658 | ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name, |
659 | sizeof(struct ccp_dma_desc), | |
660 | sizeof(void *), | |
661 | SLAB_HWCACHE_ALIGN, NULL); | |
662 | if (!ccp->dma_desc_cache) { | |
663 | ret = -ENOMEM; | |
664 | goto err_cache; | |
665 | } | |
666 | ||
667 | dma_dev->dev = ccp->dev; | |
668 | dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev)); | |
669 | dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev)); | |
670 | dma_dev->directions = DMA_MEM_TO_MEM; | |
671 | dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | |
672 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | |
673 | dma_cap_set(DMA_SG, dma_dev->cap_mask); | |
674 | dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); | |
675 | ||
676 | INIT_LIST_HEAD(&dma_dev->channels); | |
677 | for (i = 0; i < ccp->cmd_q_count; i++) { | |
678 | chan = ccp->ccp_dma_chan + i; | |
679 | dma_chan = &chan->dma_chan; | |
680 | ||
681 | chan->ccp = ccp; | |
682 | ||
683 | spin_lock_init(&chan->lock); | |
684 | INIT_LIST_HEAD(&chan->pending); | |
685 | INIT_LIST_HEAD(&chan->active); | |
686 | INIT_LIST_HEAD(&chan->complete); | |
687 | ||
688 | tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup, | |
689 | (unsigned long)chan); | |
690 | ||
691 | dma_chan->device = dma_dev; | |
692 | dma_cookie_init(dma_chan); | |
693 | ||
694 | list_add_tail(&dma_chan->device_node, &dma_dev->channels); | |
695 | } | |
696 | ||
697 | dma_dev->device_free_chan_resources = ccp_free_chan_resources; | |
698 | dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy; | |
699 | dma_dev->device_prep_dma_sg = ccp_prep_dma_sg; | |
700 | dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt; | |
701 | dma_dev->device_issue_pending = ccp_issue_pending; | |
702 | dma_dev->device_tx_status = ccp_tx_status; | |
703 | dma_dev->device_pause = ccp_pause; | |
704 | dma_dev->device_resume = ccp_resume; | |
705 | dma_dev->device_terminate_all = ccp_terminate_all; | |
706 | ||
707 | ret = dma_async_device_register(dma_dev); | |
708 | if (ret) | |
709 | goto err_reg; | |
710 | ||
711 | return 0; | |
712 | ||
713 | err_reg: | |
714 | kmem_cache_destroy(ccp->dma_desc_cache); | |
715 | ||
716 | err_cache: | |
717 | kmem_cache_destroy(ccp->dma_cmd_cache); | |
718 | ||
719 | return ret; | |
720 | } | |
721 | ||
722 | void ccp_dmaengine_unregister(struct ccp_device *ccp) | |
723 | { | |
724 | struct dma_device *dma_dev = &ccp->dma_dev; | |
725 | ||
726 | dma_async_device_unregister(dma_dev); | |
727 | ||
728 | kmem_cache_destroy(ccp->dma_desc_cache); | |
729 | kmem_cache_destroy(ccp->dma_cmd_cache); | |
730 | } |