Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
[deliverable/linux.git] / drivers / staging / tidspbridge / rmgr / drv.c
CommitLineData
7d55524d
ORL
1/*
2 * drv.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge resource allocation module.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
2094f12d 18#include <linux/types.h>
0005391f 19#include <linux/list.h>
7d55524d
ORL
20
21/* ----------------------------------- Host OS */
22#include <dspbridge/host_os.h>
23
24/* ----------------------------------- DSP/BIOS Bridge */
7d55524d
ORL
25#include <dspbridge/dbdefs.h>
26
7d55524d
ORL
27/* ----------------------------------- This */
28#include <dspbridge/drv.h>
29#include <dspbridge/dev.h>
30
31#include <dspbridge/node.h>
32#include <dspbridge/proc.h>
33#include <dspbridge/strm.h>
34#include <dspbridge/nodepriv.h>
35#include <dspbridge/dspchnl.h>
36#include <dspbridge/resourcecleanup.h>
37
38/* ----------------------------------- Defines, Data Structures, Typedefs */
39struct drv_object {
0005391f
IN
40 struct list_head dev_list;
41 struct list_head dev_node_string;
7d55524d
ORL
42};
43
44/*
45 * This is the Device Extension. Named with the Prefix
46 * DRV_ since it is living in this module
47 */
48struct drv_ext {
49 struct list_head link;
50 char sz_string[MAXREGPATHLENGTH];
51};
52
53/* ----------------------------------- Globals */
7d55524d
ORL
54static bool ext_phys_mem_pool_enabled;
55struct ext_phys_mem_pool {
56 u32 phys_mem_base;
57 u32 phys_mem_size;
58 u32 virt_mem_base;
59 u32 next_phys_alloc_ptr;
60};
61static struct ext_phys_mem_pool ext_mem_pool;
62
63/* ----------------------------------- Function Prototypes */
64static int request_bridge_resources(struct cfg_hostres *res);
65
66
67/* GPP PROCESS CLEANUP CODE */
68
0624f52f 69static int drv_proc_free_node_res(int id, void *p, void *data);
7d55524d
ORL
70
71/* Allocate and add a node resource element
72* This function is called from .Node_Allocate. */
e6890692
RS
73int drv_insert_node_res_element(void *hnode, void *node_resource,
74 void *process_ctxt)
7d55524d
ORL
75{
76 struct node_res_object **node_res_obj =
e6890692
RS
77 (struct node_res_object **)node_resource;
78 struct process_context *ctxt = (struct process_context *)process_ctxt;
0624f52f 79 int retval;
7d55524d
ORL
80
81 *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
8e467e85
TH
82 if (!*node_res_obj)
83 return -ENOMEM;
7d55524d 84
ee4317f7 85 (*node_res_obj)->node = hnode;
8e467e85
TH
86 retval = idr_alloc(ctxt->node_id, *node_res_obj, 0, 0, GFP_KERNEL);
87 if (retval >= 0) {
88 (*node_res_obj)->id = retval;
89 return 0;
0624f52f 90 }
8e467e85
TH
91
92 kfree(*node_res_obj);
93
94 if (retval == -ENOSPC) {
0624f52f 95 pr_err("%s: FAILED, IDR is FULL\n", __func__);
8e467e85
TH
96 return -EFAULT;
97 } else {
98 pr_err("%s: OUT OF MEMORY\n", __func__);
99 return -ENOMEM;
7d55524d 100 }
7d55524d
ORL
101}
102
103/* Release all Node resources and its context
0624f52f
ER
104 * Actual Node De-Allocation */
105static int drv_proc_free_node_res(int id, void *p, void *data)
7d55524d 106{
0624f52f
ER
107 struct process_context *ctxt = data;
108 int status;
109 struct node_res_object *node_res_obj = p;
7d55524d
ORL
110 u32 node_state;
111
0624f52f 112 if (node_res_obj->node_allocated) {
ee4317f7 113 node_state = node_get_state(node_res_obj->node);
0624f52f
ER
114 if (node_state <= NODE_DELETING) {
115 if ((node_state == NODE_RUNNING) ||
116 (node_state == NODE_PAUSED) ||
117 (node_state == NODE_TERMINATING))
118 node_terminate
ee4317f7 119 (node_res_obj->node, &status);
7d55524d 120
0624f52f 121 node_delete(node_res_obj, ctxt);
7d55524d
ORL
122 }
123 }
0624f52f
ER
124
125 return 0;
7d55524d
ORL
126}
127
128/* Release all Mapped and Reserved DMM resources */
e6890692 129int drv_remove_all_dmm_res_elements(void *process_ctxt)
7d55524d 130{
e6890692 131 struct process_context *ctxt = (struct process_context *)process_ctxt;
7d55524d
ORL
132 int status = 0;
133 struct dmm_map_object *temp_map, *map_obj;
a2890350 134 struct dmm_rsv_object *temp_rsv, *rsv_obj;
7d55524d
ORL
135
136 /* Free DMM mapped memory resources */
137 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
a534f17b 138 status = proc_un_map(ctxt->processor,
7d55524d 139 (void *)map_obj->dsp_addr, ctxt);
b66e0986 140 if (status)
7d55524d
ORL
141 pr_err("%s: proc_un_map failed!"
142 " status = 0x%xn", __func__, status);
143 }
a2890350
FC
144
145 /* Free DMM reserved memory resources */
146 list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
a534f17b 147 status = proc_un_reserve_memory(ctxt->processor, (void *)
a2890350
FC
148 rsv_obj->dsp_reserved_addr,
149 ctxt);
150 if (status)
151 pr_err("%s: proc_un_reserve_memory failed!"
152 " status = 0x%xn", __func__, status);
153 }
7d55524d
ORL
154 return status;
155}
156
157/* Update Node allocation status */
e6890692 158void drv_proc_node_update_status(void *node_resource, s32 status)
7d55524d
ORL
159{
160 struct node_res_object *node_res_obj =
e6890692 161 (struct node_res_object *)node_resource;
7d55524d
ORL
162 node_res_obj->node_allocated = status;
163}
164
165/* Update Node Heap status */
e6890692 166void drv_proc_node_update_heap_status(void *node_resource, s32 status)
7d55524d
ORL
167{
168 struct node_res_object *node_res_obj =
e6890692 169 (struct node_res_object *)node_resource;
7d55524d
ORL
170 node_res_obj->heap_allocated = status;
171}
172
173/* Release all Node resources and its context
174* This is called from .bridge_release.
175 */
e6890692 176int drv_remove_all_node_res_elements(void *process_ctxt)
7d55524d 177{
0624f52f 178 struct process_context *ctxt = process_ctxt;
7d55524d 179
0624f52f
ER
180 idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt);
181 idr_destroy(ctxt->node_id);
7d55524d 182
0624f52f 183 return 0;
7d55524d
ORL
184}
185
186/* Allocate the STRM resource element
187* This is called after the actual resource is allocated
188 */
c8c1ad8c
RS
189int drv_proc_insert_strm_res_element(void *stream_obj,
190 void *strm_res, void *process_ctxt)
7d55524d
ORL
191{
192 struct strm_res_object **pstrm_res =
c8c1ad8c 193 (struct strm_res_object **)strm_res;
e6890692 194 struct process_context *ctxt = (struct process_context *)process_ctxt;
4ec09714 195 int retval;
7d55524d
ORL
196
197 *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
8e467e85
TH
198 if (*pstrm_res == NULL)
199 return -EFAULT;
7d55524d 200
ee4317f7 201 (*pstrm_res)->stream = stream_obj;
8e467e85
TH
202 retval = idr_alloc(ctxt->stream_id, *pstrm_res, 0, 0, GFP_KERNEL);
203 if (retval >= 0) {
204 (*pstrm_res)->id = retval;
205 return 0;
7d55524d 206 }
8e467e85
TH
207
208 if (retval == -ENOSPC) {
4ec09714 209 pr_err("%s: FAILED, IDR is FULL\n", __func__);
8e467e85
TH
210 return -EPERM;
211 } else {
212 pr_err("%s: OUT OF MEMORY\n", __func__);
213 return -ENOMEM;
4ec09714 214 }
7d55524d
ORL
215}
216
4ec09714 217static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
7d55524d 218{
4ec09714
ER
219 struct process_context *ctxt = process_ctxt;
220 struct strm_res_object *strm_res = p;
7d55524d
ORL
221 struct stream_info strm_info;
222 struct dsp_streaminfo user;
223 u8 **ap_buffer = NULL;
224 u8 *buf_ptr;
225 u32 ul_bytes;
226 u32 dw_arg;
227 s32 ul_buf_size;
228
4ec09714
ER
229 if (strm_res->num_bufs) {
230 ap_buffer = kmalloc((strm_res->num_bufs *
231 sizeof(u8 *)), GFP_KERNEL);
232 if (ap_buffer) {
233 strm_free_buffer(strm_res,
234 ap_buffer,
235 strm_res->num_bufs,
236 ctxt);
237 kfree(ap_buffer);
7d55524d 238 }
7d55524d 239 }
4ec09714
ER
240 strm_info.user_strm = &user;
241 user.number_bufs_in_stream = 0;
ee4317f7 242 strm_get_info(strm_res->stream, &strm_info, sizeof(strm_info));
4ec09714 243 while (user.number_bufs_in_stream--)
ee4317f7 244 strm_reclaim(strm_res->stream, &buf_ptr, &ul_bytes,
4ec09714
ER
245 (u32 *) &ul_buf_size, &dw_arg);
246 strm_close(strm_res, ctxt);
247 return 0;
7d55524d
ORL
248}
249
4ec09714
ER
250/* Release all Stream resources and its context
251* This is called from .bridge_release.
252 */
253int drv_remove_all_strm_res_elements(void *process_ctxt)
7d55524d 254{
4ec09714 255 struct process_context *ctxt = process_ctxt;
7d55524d 256
4ec09714
ER
257 idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt);
258 idr_destroy(ctxt->stream_id);
7d55524d 259
4ec09714 260 return 0;
7d55524d
ORL
261}
262
263/* Updating the stream resource element */
c8c1ad8c 264int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources)
7d55524d
ORL
265{
266 int status = 0;
267 struct strm_res_object **strm_res =
c8c1ad8c 268 (struct strm_res_object **)strm_resources;
7d55524d
ORL
269
270 (*strm_res)->num_bufs = num_bufs;
271 return status;
272}
273
274/* GPP PROCESS CLEANUP CODE END */
275
276/*
277 * ======== = drv_create ======== =
278 * Purpose:
279 * DRV Object gets created only once during Driver Loading.
280 */
e6bf74f0 281int drv_create(struct drv_object **drv_obj)
7d55524d
ORL
282{
283 int status = 0;
284 struct drv_object *pdrv_object = NULL;
b87561f7 285 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 286
7d55524d
ORL
287 pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
288 if (pdrv_object) {
289 /* Create and Initialize List of device objects */
0005391f
IN
290 INIT_LIST_HEAD(&pdrv_object->dev_list);
291 INIT_LIST_HEAD(&pdrv_object->dev_node_string);
7d55524d
ORL
292 } else {
293 status = -ENOMEM;
294 }
b87561f7
IGC
295 /* Store the DRV Object in the driver data */
296 if (!status) {
297 if (drv_datap) {
298 drv_datap->drv_object = (void *)pdrv_object;
299 } else {
300 status = -EPERM;
301 pr_err("%s: Failed to store DRV object\n", __func__);
302 }
303 }
304
a741ea6e 305 if (!status) {
e436d07d 306 *drv_obj = pdrv_object;
7d55524d 307 } else {
7d55524d
ORL
308 /* Free the DRV Object */
309 kfree(pdrv_object);
310 }
311
7d55524d
ORL
312 return status;
313}
314
7d55524d
ORL
315/*
316 * ======== = drv_destroy ======== =
317 * purpose:
318 * Invoked during bridge de-initialization
319 */
e6890692 320int drv_destroy(struct drv_object *driver_obj)
7d55524d
ORL
321{
322 int status = 0;
e6890692 323 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
b87561f7 324 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 325
7d55524d 326 kfree(pdrv_object);
b87561f7
IGC
327 /* Update the DRV Object in the driver data */
328 if (drv_datap) {
329 drv_datap->drv_object = NULL;
330 } else {
331 status = -EPERM;
332 pr_err("%s: Failed to store DRV object\n", __func__);
333 }
7d55524d
ORL
334
335 return status;
336}
337
338/*
339 * ======== drv_get_dev_object ========
340 * Purpose:
341 * Given a index, returns a handle to DevObject from the list.
342 */
343int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
e436d07d 344 struct dev_object **device_obj)
7d55524d
ORL
345{
346 int status = 0;
7d55524d
ORL
347 struct dev_object *dev_obj;
348 u32 i;
7d55524d
ORL
349
350 dev_obj = (struct dev_object *)drv_get_first_dev_object();
351 for (i = 0; i < index; i++) {
352 dev_obj =
353 (struct dev_object *)drv_get_next_dev_object((u32) dev_obj);
354 }
355 if (dev_obj) {
e436d07d 356 *device_obj = (struct dev_object *)dev_obj;
7d55524d 357 } else {
e436d07d 358 *device_obj = NULL;
7d55524d
ORL
359 status = -EPERM;
360 }
361
362 return status;
363}
364
365/*
366 * ======== drv_get_first_dev_object ========
367 * Purpose:
368 * Retrieve the first Device Object handle from an internal linked list of
369 * of DEV_OBJECTs maintained by DRV.
370 */
371u32 drv_get_first_dev_object(void)
372{
373 u32 dw_dev_object = 0;
374 struct drv_object *pdrv_obj;
73b87a91 375 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 376
73b87a91
IGC
377 if (drv_datap && drv_datap->drv_object) {
378 pdrv_obj = drv_datap->drv_object;
0005391f
IN
379 if (!list_empty(&pdrv_obj->dev_list))
380 dw_dev_object = (u32) pdrv_obj->dev_list.next;
73b87a91
IGC
381 } else {
382 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
383 }
384
385 return dw_dev_object;
386}
387
388/*
389 * ======== DRV_GetFirstDevNodeString ========
390 * Purpose:
391 * Retrieve the first Device Extension from an internal linked list of
392 * of Pointer to dev_node Strings maintained by DRV.
393 */
394u32 drv_get_first_dev_extension(void)
395{
396 u32 dw_dev_extension = 0;
397 struct drv_object *pdrv_obj;
73b87a91 398 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 399
73b87a91
IGC
400 if (drv_datap && drv_datap->drv_object) {
401 pdrv_obj = drv_datap->drv_object;
0005391f 402 if (!list_empty(&pdrv_obj->dev_node_string)) {
7d55524d 403 dw_dev_extension =
0005391f 404 (u32) pdrv_obj->dev_node_string.next;
7d55524d 405 }
73b87a91
IGC
406 } else {
407 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
408 }
409
410 return dw_dev_extension;
411}
412
413/*
414 * ======== drv_get_next_dev_object ========
415 * Purpose:
416 * Retrieve the next Device Object handle from an internal linked list of
417 * of DEV_OBJECTs maintained by DRV, after having previously called
418 * drv_get_first_dev_object() and zero or more DRV_GetNext.
419 */
420u32 drv_get_next_dev_object(u32 hdev_obj)
421{
422 u32 dw_next_dev_object = 0;
423 struct drv_object *pdrv_obj;
73b87a91 424 struct drv_data *drv_datap = dev_get_drvdata(bridge);
0005391f 425 struct list_head *curr;
7d55524d 426
73b87a91
IGC
427 if (drv_datap && drv_datap->drv_object) {
428 pdrv_obj = drv_datap->drv_object;
0005391f
IN
429 if (!list_empty(&pdrv_obj->dev_list)) {
430 curr = (struct list_head *)hdev_obj;
431 if (list_is_last(curr, &pdrv_obj->dev_list))
432 return 0;
433 dw_next_dev_object = (u32) curr->next;
7d55524d 434 }
73b87a91
IGC
435 } else {
436 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d 437 }
73b87a91 438
7d55524d
ORL
439 return dw_next_dev_object;
440}
441
442/*
443 * ======== drv_get_next_dev_extension ========
444 * Purpose:
445 * Retrieve the next Device Extension from an internal linked list of
446 * of pointer to DevNodeString maintained by DRV, after having previously
447 * called drv_get_first_dev_extension() and zero or more
448 * drv_get_next_dev_extension().
449 */
e6890692 450u32 drv_get_next_dev_extension(u32 dev_extension)
7d55524d
ORL
451{
452 u32 dw_dev_extension = 0;
453 struct drv_object *pdrv_obj;
73b87a91 454 struct drv_data *drv_datap = dev_get_drvdata(bridge);
0005391f 455 struct list_head *curr;
7d55524d 456
73b87a91
IGC
457 if (drv_datap && drv_datap->drv_object) {
458 pdrv_obj = drv_datap->drv_object;
0005391f
IN
459 if (!list_empty(&pdrv_obj->dev_node_string)) {
460 curr = (struct list_head *)dev_extension;
461 if (list_is_last(curr, &pdrv_obj->dev_node_string))
462 return 0;
463 dw_dev_extension = (u32) curr->next;
7d55524d 464 }
73b87a91
IGC
465 } else {
466 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
467 }
468
469 return dw_dev_extension;
470}
471
7d55524d
ORL
472/*
473 * ======== drv_insert_dev_object ========
474 * Purpose:
475 * Insert a DevObject into the list of Manager object.
476 */
e6890692 477int drv_insert_dev_object(struct drv_object *driver_obj,
7d55524d
ORL
478 struct dev_object *hdev_obj)
479{
e6890692 480 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
7d55524d 481
0005391f 482 list_add_tail((struct list_head *)hdev_obj, &pdrv_object->dev_list);
7d55524d 483
a741ea6e 484 return 0;
7d55524d
ORL
485}
486
487/*
488 * ======== drv_remove_dev_object ========
489 * Purpose:
490 * Search for and remove a DeviceObject from the given list of DRV
491 * objects.
492 */
e6890692 493int drv_remove_dev_object(struct drv_object *driver_obj,
7d55524d
ORL
494 struct dev_object *hdev_obj)
495{
496 int status = -EPERM;
e6890692 497 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
7d55524d
ORL
498 struct list_head *cur_elem;
499
7d55524d 500 /* Search list for p_proc_object: */
0005391f 501 list_for_each(cur_elem, &pdrv_object->dev_list) {
7d55524d
ORL
502 /* If found, remove it. */
503 if ((struct dev_object *)cur_elem == hdev_obj) {
0005391f 504 list_del(cur_elem);
7d55524d
ORL
505 status = 0;
506 break;
507 }
508 }
7d55524d
ORL
509
510 return status;
511}
512
513/*
514 * ======== drv_request_resources ========
515 * Purpose:
516 * Requests resources from the OS.
517 */
aa09b091 518int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
7d55524d
ORL
519{
520 int status = 0;
521 struct drv_object *pdrv_object;
522 struct drv_ext *pszdev_node;
73b87a91 523 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 524
7d55524d 525 /*
25985edc 526 * Allocate memory to hold the string. This will live until
7d55524d
ORL
527 * it is freed in the Release resources. Update the driver object
528 * list.
529 */
530
73b87a91
IGC
531 if (!drv_datap || !drv_datap->drv_object)
532 status = -ENODATA;
533 else
534 pdrv_object = drv_datap->drv_object;
535
a741ea6e 536 if (!status) {
7d55524d
ORL
537 pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
538 if (pszdev_node) {
7d55524d
ORL
539 strncpy(pszdev_node->sz_string,
540 (char *)dw_context, MAXREGPATHLENGTH - 1);
541 pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
542 /* Update the Driver Object List */
aa09b091 543 *dev_node_strg = (u32) pszdev_node->sz_string;
0005391f
IN
544 list_add_tail(&pszdev_node->link,
545 &pdrv_object->dev_node_string);
7d55524d
ORL
546 } else {
547 status = -ENOMEM;
aa09b091 548 *dev_node_strg = 0;
7d55524d
ORL
549 }
550 } else {
551 dev_dbg(bridge, "%s: Failed to get Driver Object from Registry",
552 __func__);
aa09b091 553 *dev_node_strg = 0;
7d55524d
ORL
554 }
555
7d55524d
ORL
556 return status;
557}
558
559/*
560 * ======== drv_release_resources ========
561 * Purpose:
562 * Releases resources from the OS.
563 */
564int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
565{
566 int status = 0;
7d55524d
ORL
567 struct drv_ext *pszdev_node;
568
569 /*
570 * Irrespective of the status go ahead and clean it
571 * The following will over write the status.
572 */
573 for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
574 pszdev_node != NULL; pszdev_node = (struct drv_ext *)
575 drv_get_next_dev_extension((u32) pszdev_node)) {
7d55524d
ORL
576 if ((u32) pszdev_node == dw_context) {
577 /* Found it */
578 /* Delete from the Driver object list */
0005391f
IN
579 list_del(&pszdev_node->link);
580 kfree(pszdev_node);
7d55524d
ORL
581 break;
582 }
7d55524d
ORL
583 }
584 return status;
585}
586
587/*
588 * ======== request_bridge_resources ========
589 * Purpose:
590 * Reserves shared memory for bridge.
591 */
592static int request_bridge_resources(struct cfg_hostres *res)
593{
7d55524d
ORL
594 struct cfg_hostres *host_res = res;
595
596 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
597 host_res->num_mem_windows = 2;
598
599 /* First window is for DSP internal memory */
5108de0a
RS
600 dev_dbg(bridge, "mem_base[0] 0x%x\n", host_res->mem_base[0]);
601 dev_dbg(bridge, "mem_base[3] 0x%x\n", host_res->mem_base[3]);
602 dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
7d55524d
ORL
603
604 /* for 24xx base port is not mapping the mamory for DSP
605 * internal memory TODO Do a ioremap here */
606 /* Second window is for DSP external memory shared with MPU */
607
608 /* These are hard-coded values */
609 host_res->birq_registers = 0;
610 host_res->birq_attrib = 0;
5108de0a 611 host_res->offset_for_monitor = 0;
b4da7fc3 612 host_res->chnl_offset = 0;
7d55524d 613 /* CHNL_MAXCHANNELS */
5108de0a 614 host_res->num_chnls = CHNL_MAXCHANNELS;
b4da7fc3 615 host_res->chnl_buf_size = 0x400;
7d55524d 616
a741ea6e 617 return 0;
7d55524d
ORL
618}
619
620/*
621 * ======== drv_request_bridge_res_dsp ========
622 * Purpose:
623 * Reserves shared memory for bridge.
624 */
625int drv_request_bridge_res_dsp(void **phost_resources)
626{
627 int status = 0;
628 struct cfg_hostres *host_res;
629 u32 dw_buff_size;
630 u32 dma_addr;
631 u32 shm_size;
632 struct drv_data *drv_datap = dev_get_drvdata(bridge);
633
634 dw_buff_size = sizeof(struct cfg_hostres);
635
636 host_res = kzalloc(dw_buff_size, GFP_KERNEL);
637
638 if (host_res != NULL) {
639 request_bridge_resources(host_res);
640 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
641 host_res->num_mem_windows = 4;
642
5108de0a
RS
643 host_res->mem_base[0] = 0;
644 host_res->mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
7d55524d 645 OMAP_DSP_MEM1_SIZE);
5108de0a 646 host_res->mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
7d55524d 647 OMAP_DSP_MEM2_SIZE);
5108de0a 648 host_res->mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
7d55524d 649 OMAP_DSP_MEM3_SIZE);
5108de0a 650 host_res->per_base = ioremap(OMAP_PER_CM_BASE,
7d55524d 651 OMAP_PER_CM_SIZE);
3c867696
ORL
652 host_res->per_pm_base = ioremap(OMAP_PER_PRM_BASE,
653 OMAP_PER_PRM_SIZE);
654 host_res->core_pm_base = ioremap(OMAP_CORE_PRM_BASE,
655 OMAP_CORE_PRM_SIZE);
5108de0a 656 host_res->dmmu_base = ioremap(OMAP_DMMU_BASE,
9d4f81a7 657 OMAP_DMMU_SIZE);
7d55524d 658
5108de0a
RS
659 dev_dbg(bridge, "mem_base[0] 0x%x\n",
660 host_res->mem_base[0]);
661 dev_dbg(bridge, "mem_base[1] 0x%x\n",
662 host_res->mem_base[1]);
663 dev_dbg(bridge, "mem_base[2] 0x%x\n",
664 host_res->mem_base[2]);
665 dev_dbg(bridge, "mem_base[3] 0x%x\n",
666 host_res->mem_base[3]);
667 dev_dbg(bridge, "mem_base[4] 0x%x\n",
668 host_res->mem_base[4]);
669 dev_dbg(bridge, "dmmu_base %p\n", host_res->dmmu_base);
7d55524d
ORL
670
671 shm_size = drv_datap->shm_size;
672 if (shm_size >= 0x10000) {
673 /* Allocate Physically contiguous,
674 * non-cacheable memory */
5108de0a 675 host_res->mem_base[1] =
7d55524d
ORL
676 (u32) mem_alloc_phys_mem(shm_size, 0x100000,
677 &dma_addr);
5108de0a 678 if (host_res->mem_base[1] == 0) {
7d55524d
ORL
679 status = -ENOMEM;
680 pr_err("shm reservation Failed\n");
681 } else {
5108de0a
RS
682 host_res->mem_length[1] = shm_size;
683 host_res->mem_phys[1] = dma_addr;
7d55524d
ORL
684
685 dev_dbg(bridge, "%s: Bridge shm address 0x%x "
686 "dma_addr %x size %x\n", __func__,
5108de0a 687 host_res->mem_base[1],
7d55524d
ORL
688 dma_addr, shm_size);
689 }
690 }
a741ea6e 691 if (!status) {
7d55524d
ORL
692 /* These are hard-coded values */
693 host_res->birq_registers = 0;
694 host_res->birq_attrib = 0;
5108de0a 695 host_res->offset_for_monitor = 0;
b4da7fc3 696 host_res->chnl_offset = 0;
7d55524d 697 /* CHNL_MAXCHANNELS */
5108de0a 698 host_res->num_chnls = CHNL_MAXCHANNELS;
b4da7fc3 699 host_res->chnl_buf_size = 0x400;
7d55524d
ORL
700 dw_buff_size = sizeof(struct cfg_hostres);
701 }
702 *phost_resources = host_res;
703 }
704 /* End Mem alloc */
705 return status;
706}
707
fb6aabb7 708void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size)
7d55524d
ORL
709{
710 u32 pool_virt_base;
711
712 /* get the virtual address for the physical memory pool passed */
fb6aabb7 713 pool_virt_base = (u32) ioremap(pool_phys_base, pool_size);
7d55524d
ORL
714
715 if ((void **)pool_virt_base == NULL) {
716 pr_err("%s: external physical memory map failed\n", __func__);
717 ext_phys_mem_pool_enabled = false;
718 } else {
fb6aabb7
RS
719 ext_mem_pool.phys_mem_base = pool_phys_base;
720 ext_mem_pool.phys_mem_size = pool_size;
7d55524d 721 ext_mem_pool.virt_mem_base = pool_virt_base;
fb6aabb7 722 ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
7d55524d
ORL
723 ext_phys_mem_pool_enabled = true;
724 }
725}
726
727void mem_ext_phys_pool_release(void)
728{
729 if (ext_phys_mem_pool_enabled) {
730 iounmap((void *)(ext_mem_pool.virt_mem_base));
731 ext_phys_mem_pool_enabled = false;
732 }
733}
734
735/*
736 * ======== mem_ext_phys_mem_alloc ========
737 * Purpose:
738 * Allocate physically contiguous, uncached memory from external memory pool
739 */
740
9cd02bd8 741static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 *phys_addr)
7d55524d
ORL
742{
743 u32 new_alloc_ptr;
744 u32 offset;
745 u32 virt_addr;
746
747 if (align == 0)
748 align = 1;
749
750 if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
751 - ext_mem_pool.next_phys_alloc_ptr)) {
13b18c29 752 phys_addr = NULL;
7d55524d
ORL
753 return NULL;
754 } else {
755 offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
756 if (offset == 0)
757 new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
758 else
759 new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
760 (align - offset);
761 if ((new_alloc_ptr + bytes) <=
762 (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
763 /* we can allocate */
13b18c29 764 *phys_addr = new_alloc_ptr;
7d55524d
ORL
765 ext_mem_pool.next_phys_alloc_ptr =
766 new_alloc_ptr + bytes;
767 virt_addr =
768 ext_mem_pool.virt_mem_base + (new_alloc_ptr -
769 ext_mem_pool.
770 phys_mem_base);
771 return (void *)virt_addr;
772 } else {
13b18c29 773 *phys_addr = 0;
7d55524d
ORL
774 return NULL;
775 }
776 }
777}
778
779/*
780 * ======== mem_alloc_phys_mem ========
781 * Purpose:
782 * Allocate physically contiguous, uncached memory
783 */
0cd343a4 784void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
e6bf74f0 785 u32 *physical_address)
7d55524d
ORL
786{
787 void *va_mem = NULL;
788 dma_addr_t pa_mem;
789
790 if (byte_size > 0) {
791 if (ext_phys_mem_pool_enabled) {
0cd343a4 792 va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
7d55524d
ORL
793 (u32 *) &pa_mem);
794 } else
795 va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
796 GFP_KERNEL);
797 if (va_mem == NULL)
13b18c29 798 *physical_address = 0;
7d55524d 799 else
13b18c29 800 *physical_address = pa_mem;
7d55524d
ORL
801 }
802 return va_mem;
803}
804
805/*
806 * ======== mem_free_phys_mem ========
807 * Purpose:
808 * Free the given block of physically contiguous memory.
809 */
318b5df9 810void mem_free_phys_mem(void *virtual_address, u32 physical_address,
7d55524d
ORL
811 u32 byte_size)
812{
7d55524d 813 if (!ext_phys_mem_pool_enabled)
318b5df9 814 dma_free_coherent(NULL, byte_size, virtual_address,
13b18c29 815 physical_address);
7d55524d 816}
This page took 0.360481 seconds and 5 git commands to generate.