staging: tidspbridge: Remove cfg_set_dev_object()
[deliverable/linux.git] / drivers / staging / tidspbridge / rmgr / drv.c
CommitLineData
7d55524d
ORL
1/*
2 * drv.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge resource allocation module.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
2094f12d 18#include <linux/types.h>
7d55524d
ORL
19
20/* ----------------------------------- Host OS */
21#include <dspbridge/host_os.h>
22
23/* ----------------------------------- DSP/BIOS Bridge */
7d55524d
ORL
24#include <dspbridge/dbdefs.h>
25
26/* ----------------------------------- Trace & Debug */
27#include <dspbridge/dbc.h>
28
29/* ----------------------------------- OS Adaptation Layer */
30#include <dspbridge/cfg.h>
31#include <dspbridge/list.h>
32
33/* ----------------------------------- This */
34#include <dspbridge/drv.h>
35#include <dspbridge/dev.h>
36
37#include <dspbridge/node.h>
38#include <dspbridge/proc.h>
39#include <dspbridge/strm.h>
40#include <dspbridge/nodepriv.h>
41#include <dspbridge/dspchnl.h>
42#include <dspbridge/resourcecleanup.h>
43
44/* ----------------------------------- Defines, Data Structures, Typedefs */
45struct drv_object {
46 struct lst_list *dev_list;
47 struct lst_list *dev_node_string;
48};
49
50/*
51 * This is the Device Extension. Named with the Prefix
52 * DRV_ since it is living in this module
53 */
54struct drv_ext {
55 struct list_head link;
56 char sz_string[MAXREGPATHLENGTH];
57};
58
59/* ----------------------------------- Globals */
60static s32 refs;
61static bool ext_phys_mem_pool_enabled;
62struct ext_phys_mem_pool {
63 u32 phys_mem_base;
64 u32 phys_mem_size;
65 u32 virt_mem_base;
66 u32 next_phys_alloc_ptr;
67};
68static struct ext_phys_mem_pool ext_mem_pool;
69
70/* ----------------------------------- Function Prototypes */
71static int request_bridge_resources(struct cfg_hostres *res);
72
73
74/* GPP PROCESS CLEANUP CODE */
75
0624f52f 76static int drv_proc_free_node_res(int id, void *p, void *data);
7d55524d
ORL
77
78/* Allocate and add a node resource element
79* This function is called from .Node_Allocate. */
e6890692
RS
80int drv_insert_node_res_element(void *hnode, void *node_resource,
81 void *process_ctxt)
7d55524d
ORL
82{
83 struct node_res_object **node_res_obj =
e6890692
RS
84 (struct node_res_object **)node_resource;
85 struct process_context *ctxt = (struct process_context *)process_ctxt;
7d55524d 86 int status = 0;
0624f52f 87 int retval;
7d55524d
ORL
88
89 *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
0624f52f
ER
90 if (!*node_res_obj) {
91 status = -ENOMEM;
92 goto func_end;
93 }
7d55524d 94
0624f52f
ER
95 (*node_res_obj)->hnode = hnode;
96 retval = idr_get_new(ctxt->node_id, *node_res_obj,
97 &(*node_res_obj)->id);
98 if (retval == -EAGAIN) {
99 if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) {
100 pr_err("%s: OUT OF MEMORY\n", __func__);
101 status = -ENOMEM;
102 goto func_end;
7d55524d 103 }
7d55524d 104
0624f52f
ER
105 retval = idr_get_new(ctxt->node_id, *node_res_obj,
106 &(*node_res_obj)->id);
107 }
108 if (retval) {
109 pr_err("%s: FAILED, IDR is FULL\n", __func__);
110 status = -EFAULT;
7d55524d 111 }
0624f52f
ER
112func_end:
113 if (status)
114 kfree(*node_res_obj);
7d55524d
ORL
115
116 return status;
117}
118
119/* Release all Node resources and its context
0624f52f
ER
120 * Actual Node De-Allocation */
121static int drv_proc_free_node_res(int id, void *p, void *data)
7d55524d 122{
0624f52f
ER
123 struct process_context *ctxt = data;
124 int status;
125 struct node_res_object *node_res_obj = p;
7d55524d
ORL
126 u32 node_state;
127
0624f52f
ER
128 if (node_res_obj->node_allocated) {
129 node_state = node_get_state(node_res_obj->hnode);
130 if (node_state <= NODE_DELETING) {
131 if ((node_state == NODE_RUNNING) ||
132 (node_state == NODE_PAUSED) ||
133 (node_state == NODE_TERMINATING))
134 node_terminate
135 (node_res_obj->hnode, &status);
7d55524d 136
0624f52f 137 node_delete(node_res_obj, ctxt);
7d55524d
ORL
138 }
139 }
0624f52f
ER
140
141 return 0;
7d55524d
ORL
142}
143
144/* Release all Mapped and Reserved DMM resources */
e6890692 145int drv_remove_all_dmm_res_elements(void *process_ctxt)
7d55524d 146{
e6890692 147 struct process_context *ctxt = (struct process_context *)process_ctxt;
7d55524d
ORL
148 int status = 0;
149 struct dmm_map_object *temp_map, *map_obj;
150 struct dmm_rsv_object *temp_rsv, *rsv_obj;
151
152 /* Free DMM mapped memory resources */
153 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
154 status = proc_un_map(ctxt->hprocessor,
155 (void *)map_obj->dsp_addr, ctxt);
b66e0986 156 if (status)
7d55524d
ORL
157 pr_err("%s: proc_un_map failed!"
158 " status = 0x%xn", __func__, status);
159 }
160
161 /* Free DMM reserved memory resources */
162 list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
163 status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
164 rsv_obj->dsp_reserved_addr,
165 ctxt);
b66e0986 166 if (status)
7d55524d
ORL
167 pr_err("%s: proc_un_reserve_memory failed!"
168 " status = 0x%xn", __func__, status);
169 }
170 return status;
171}
172
173/* Update Node allocation status */
e6890692 174void drv_proc_node_update_status(void *node_resource, s32 status)
7d55524d
ORL
175{
176 struct node_res_object *node_res_obj =
e6890692
RS
177 (struct node_res_object *)node_resource;
178 DBC_ASSERT(node_resource != NULL);
7d55524d
ORL
179 node_res_obj->node_allocated = status;
180}
181
182/* Update Node Heap status */
e6890692 183void drv_proc_node_update_heap_status(void *node_resource, s32 status)
7d55524d
ORL
184{
185 struct node_res_object *node_res_obj =
e6890692
RS
186 (struct node_res_object *)node_resource;
187 DBC_ASSERT(node_resource != NULL);
7d55524d
ORL
188 node_res_obj->heap_allocated = status;
189}
190
191/* Release all Node resources and its context
192* This is called from .bridge_release.
193 */
e6890692 194int drv_remove_all_node_res_elements(void *process_ctxt)
7d55524d 195{
0624f52f 196 struct process_context *ctxt = process_ctxt;
7d55524d 197
0624f52f
ER
198 idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt);
199 idr_destroy(ctxt->node_id);
7d55524d 200
0624f52f 201 return 0;
7d55524d
ORL
202}
203
204/* Allocate the STRM resource element
205* This is called after the actual resource is allocated
206 */
c8c1ad8c
RS
207int drv_proc_insert_strm_res_element(void *stream_obj,
208 void *strm_res, void *process_ctxt)
7d55524d
ORL
209{
210 struct strm_res_object **pstrm_res =
c8c1ad8c 211 (struct strm_res_object **)strm_res;
e6890692 212 struct process_context *ctxt = (struct process_context *)process_ctxt;
7d55524d 213 int status = 0;
4ec09714 214 int retval;
7d55524d
ORL
215
216 *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
4ec09714 217 if (*pstrm_res == NULL) {
7d55524d 218 status = -EFAULT;
4ec09714 219 goto func_end;
7d55524d 220 }
7d55524d 221
4ec09714
ER
222 (*pstrm_res)->hstream = stream_obj;
223 retval = idr_get_new(ctxt->stream_id, *pstrm_res,
224 &(*pstrm_res)->id);
225 if (retval == -EAGAIN) {
226 if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) {
227 pr_err("%s: OUT OF MEMORY\n", __func__);
228 status = -ENOMEM;
229 goto func_end;
230 }
7d55524d 231
4ec09714
ER
232 retval = idr_get_new(ctxt->stream_id, *pstrm_res,
233 &(*pstrm_res)->id);
7d55524d 234 }
4ec09714
ER
235 if (retval) {
236 pr_err("%s: FAILED, IDR is FULL\n", __func__);
237 status = -EPERM;
238 }
239
240func_end:
7d55524d
ORL
241 return status;
242}
243
4ec09714 244static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
7d55524d 245{
4ec09714
ER
246 struct process_context *ctxt = process_ctxt;
247 struct strm_res_object *strm_res = p;
7d55524d
ORL
248 struct stream_info strm_info;
249 struct dsp_streaminfo user;
250 u8 **ap_buffer = NULL;
251 u8 *buf_ptr;
252 u32 ul_bytes;
253 u32 dw_arg;
254 s32 ul_buf_size;
255
4ec09714
ER
256 if (strm_res->num_bufs) {
257 ap_buffer = kmalloc((strm_res->num_bufs *
258 sizeof(u8 *)), GFP_KERNEL);
259 if (ap_buffer) {
260 strm_free_buffer(strm_res,
261 ap_buffer,
262 strm_res->num_bufs,
263 ctxt);
264 kfree(ap_buffer);
7d55524d 265 }
7d55524d 266 }
4ec09714
ER
267 strm_info.user_strm = &user;
268 user.number_bufs_in_stream = 0;
269 strm_get_info(strm_res->hstream, &strm_info, sizeof(strm_info));
270 while (user.number_bufs_in_stream--)
271 strm_reclaim(strm_res->hstream, &buf_ptr, &ul_bytes,
272 (u32 *) &ul_buf_size, &dw_arg);
273 strm_close(strm_res, ctxt);
274 return 0;
7d55524d
ORL
275}
276
4ec09714
ER
277/* Release all Stream resources and its context
278* This is called from .bridge_release.
279 */
280int drv_remove_all_strm_res_elements(void *process_ctxt)
7d55524d 281{
4ec09714 282 struct process_context *ctxt = process_ctxt;
7d55524d 283
4ec09714
ER
284 idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt);
285 idr_destroy(ctxt->stream_id);
7d55524d 286
4ec09714 287 return 0;
7d55524d
ORL
288}
289
290/* Updating the stream resource element */
c8c1ad8c 291int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources)
7d55524d
ORL
292{
293 int status = 0;
294 struct strm_res_object **strm_res =
c8c1ad8c 295 (struct strm_res_object **)strm_resources;
7d55524d
ORL
296
297 (*strm_res)->num_bufs = num_bufs;
298 return status;
299}
300
301/* GPP PROCESS CLEANUP CODE END */
302
303/*
304 * ======== = drv_create ======== =
305 * Purpose:
306 * DRV Object gets created only once during Driver Loading.
307 */
e6bf74f0 308int drv_create(struct drv_object **drv_obj)
7d55524d
ORL
309{
310 int status = 0;
311 struct drv_object *pdrv_object = NULL;
312
e436d07d 313 DBC_REQUIRE(drv_obj != NULL);
7d55524d
ORL
314 DBC_REQUIRE(refs > 0);
315
316 pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
317 if (pdrv_object) {
318 /* Create and Initialize List of device objects */
319 pdrv_object->dev_list = kzalloc(sizeof(struct lst_list),
320 GFP_KERNEL);
321 if (pdrv_object->dev_list) {
322 /* Create and Initialize List of device Extension */
323 pdrv_object->dev_node_string =
324 kzalloc(sizeof(struct lst_list), GFP_KERNEL);
325 if (!(pdrv_object->dev_node_string)) {
326 status = -EPERM;
327 } else {
328 INIT_LIST_HEAD(&pdrv_object->
329 dev_node_string->head);
330 INIT_LIST_HEAD(&pdrv_object->dev_list->head);
331 }
332 } else {
333 status = -ENOMEM;
334 }
335 } else {
336 status = -ENOMEM;
337 }
338 /* Store the DRV Object in the Registry */
a741ea6e 339 if (!status)
7d55524d 340 status = cfg_set_object((u32) pdrv_object, REG_DRV_OBJECT);
a741ea6e 341 if (!status) {
e436d07d 342 *drv_obj = pdrv_object;
7d55524d
ORL
343 } else {
344 kfree(pdrv_object->dev_list);
345 kfree(pdrv_object->dev_node_string);
346 /* Free the DRV Object */
347 kfree(pdrv_object);
348 }
349
b66e0986 350 DBC_ENSURE(status || pdrv_object);
7d55524d
ORL
351 return status;
352}
353
354/*
355 * ======== drv_exit ========
356 * Purpose:
357 * Discontinue usage of the DRV module.
358 */
359void drv_exit(void)
360{
361 DBC_REQUIRE(refs > 0);
362
363 refs--;
364
365 DBC_ENSURE(refs >= 0);
366}
367
368/*
369 * ======== = drv_destroy ======== =
370 * purpose:
371 * Invoked during bridge de-initialization
372 */
e6890692 373int drv_destroy(struct drv_object *driver_obj)
7d55524d
ORL
374{
375 int status = 0;
e6890692 376 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
7d55524d
ORL
377
378 DBC_REQUIRE(refs > 0);
379 DBC_REQUIRE(pdrv_object);
380
381 /*
382 * Delete the List if it exists.Should not come here
383 * as the drv_remove_dev_object and the Last drv_request_resources
384 * removes the list if the lists are empty.
385 */
386 kfree(pdrv_object->dev_list);
387 kfree(pdrv_object->dev_node_string);
388 kfree(pdrv_object);
389 /* Update the DRV Object in Registry to be 0 */
390 (void)cfg_set_object(0, REG_DRV_OBJECT);
391
392 return status;
393}
394
395/*
396 * ======== drv_get_dev_object ========
397 * Purpose:
398 * Given a index, returns a handle to DevObject from the list.
399 */
400int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
e436d07d 401 struct dev_object **device_obj)
7d55524d
ORL
402{
403 int status = 0;
b3d23688 404#ifdef CONFIG_TIDSPBRIDGE_DEBUG
7d55524d
ORL
405 /* used only for Assertions and debug messages */
406 struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj;
407#endif
408 struct dev_object *dev_obj;
409 u32 i;
410 DBC_REQUIRE(pdrv_obj);
e436d07d 411 DBC_REQUIRE(device_obj != NULL);
7d55524d
ORL
412 DBC_REQUIRE(index >= 0);
413 DBC_REQUIRE(refs > 0);
414 DBC_ASSERT(!(LST_IS_EMPTY(pdrv_obj->dev_list)));
415
416 dev_obj = (struct dev_object *)drv_get_first_dev_object();
417 for (i = 0; i < index; i++) {
418 dev_obj =
419 (struct dev_object *)drv_get_next_dev_object((u32) dev_obj);
420 }
421 if (dev_obj) {
e436d07d 422 *device_obj = (struct dev_object *)dev_obj;
7d55524d 423 } else {
e436d07d 424 *device_obj = NULL;
7d55524d
ORL
425 status = -EPERM;
426 }
427
428 return status;
429}
430
431/*
432 * ======== drv_get_first_dev_object ========
433 * Purpose:
434 * Retrieve the first Device Object handle from an internal linked list of
435 * of DEV_OBJECTs maintained by DRV.
436 */
437u32 drv_get_first_dev_object(void)
438{
439 u32 dw_dev_object = 0;
440 struct drv_object *pdrv_obj;
73b87a91 441 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 442
73b87a91
IGC
443 if (drv_datap && drv_datap->drv_object) {
444 pdrv_obj = drv_datap->drv_object;
7d55524d
ORL
445 if ((pdrv_obj->dev_list != NULL) &&
446 !LST_IS_EMPTY(pdrv_obj->dev_list))
447 dw_dev_object = (u32) lst_first(pdrv_obj->dev_list);
73b87a91
IGC
448 } else {
449 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
450 }
451
452 return dw_dev_object;
453}
454
455/*
456 * ======== DRV_GetFirstDevNodeString ========
457 * Purpose:
458 * Retrieve the first Device Extension from an internal linked list of
459 * of Pointer to dev_node Strings maintained by DRV.
460 */
461u32 drv_get_first_dev_extension(void)
462{
463 u32 dw_dev_extension = 0;
464 struct drv_object *pdrv_obj;
73b87a91 465 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 466
73b87a91
IGC
467 if (drv_datap && drv_datap->drv_object) {
468 pdrv_obj = drv_datap->drv_object;
7d55524d
ORL
469 if ((pdrv_obj->dev_node_string != NULL) &&
470 !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
471 dw_dev_extension =
472 (u32) lst_first(pdrv_obj->dev_node_string);
473 }
73b87a91
IGC
474 } else {
475 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
476 }
477
478 return dw_dev_extension;
479}
480
481/*
482 * ======== drv_get_next_dev_object ========
483 * Purpose:
484 * Retrieve the next Device Object handle from an internal linked list of
485 * of DEV_OBJECTs maintained by DRV, after having previously called
486 * drv_get_first_dev_object() and zero or more DRV_GetNext.
487 */
488u32 drv_get_next_dev_object(u32 hdev_obj)
489{
490 u32 dw_next_dev_object = 0;
491 struct drv_object *pdrv_obj;
73b87a91 492 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
493
494 DBC_REQUIRE(hdev_obj != 0);
495
73b87a91
IGC
496 if (drv_datap && drv_datap->drv_object) {
497 pdrv_obj = drv_datap->drv_object;
7d55524d
ORL
498 if ((pdrv_obj->dev_list != NULL) &&
499 !LST_IS_EMPTY(pdrv_obj->dev_list)) {
500 dw_next_dev_object = (u32) lst_next(pdrv_obj->dev_list,
501 (struct list_head *)
502 hdev_obj);
503 }
73b87a91
IGC
504 } else {
505 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d 506 }
73b87a91 507
7d55524d
ORL
508 return dw_next_dev_object;
509}
510
511/*
512 * ======== drv_get_next_dev_extension ========
513 * Purpose:
514 * Retrieve the next Device Extension from an internal linked list of
515 * of pointer to DevNodeString maintained by DRV, after having previously
516 * called drv_get_first_dev_extension() and zero or more
517 * drv_get_next_dev_extension().
518 */
e6890692 519u32 drv_get_next_dev_extension(u32 dev_extension)
7d55524d
ORL
520{
521 u32 dw_dev_extension = 0;
522 struct drv_object *pdrv_obj;
73b87a91 523 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 524
e6890692 525 DBC_REQUIRE(dev_extension != 0);
7d55524d 526
73b87a91
IGC
527 if (drv_datap && drv_datap->drv_object) {
528 pdrv_obj = drv_datap->drv_object;
7d55524d
ORL
529 if ((pdrv_obj->dev_node_string != NULL) &&
530 !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
531 dw_dev_extension =
532 (u32) lst_next(pdrv_obj->dev_node_string,
e6890692 533 (struct list_head *)dev_extension);
7d55524d 534 }
73b87a91
IGC
535 } else {
536 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
537 }
538
539 return dw_dev_extension;
540}
541
542/*
543 * ======== drv_init ========
544 * Purpose:
545 * Initialize DRV module private state.
546 */
547int drv_init(void)
548{
549 s32 ret = 1; /* function return value */
550
551 DBC_REQUIRE(refs >= 0);
552
553 if (ret)
554 refs++;
555
556 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
557
558 return ret;
559}
560
561/*
562 * ======== drv_insert_dev_object ========
563 * Purpose:
564 * Insert a DevObject into the list of Manager object.
565 */
e6890692 566int drv_insert_dev_object(struct drv_object *driver_obj,
7d55524d
ORL
567 struct dev_object *hdev_obj)
568{
e6890692 569 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
7d55524d
ORL
570
571 DBC_REQUIRE(refs > 0);
572 DBC_REQUIRE(hdev_obj != NULL);
573 DBC_REQUIRE(pdrv_object);
574 DBC_ASSERT(pdrv_object->dev_list);
575
576 lst_put_tail(pdrv_object->dev_list, (struct list_head *)hdev_obj);
577
a741ea6e 578 DBC_ENSURE(!LST_IS_EMPTY(pdrv_object->dev_list));
7d55524d 579
a741ea6e 580 return 0;
7d55524d
ORL
581}
582
583/*
584 * ======== drv_remove_dev_object ========
585 * Purpose:
586 * Search for and remove a DeviceObject from the given list of DRV
587 * objects.
588 */
e6890692 589int drv_remove_dev_object(struct drv_object *driver_obj,
7d55524d
ORL
590 struct dev_object *hdev_obj)
591{
592 int status = -EPERM;
e6890692 593 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
7d55524d
ORL
594 struct list_head *cur_elem;
595
596 DBC_REQUIRE(refs > 0);
597 DBC_REQUIRE(pdrv_object);
598 DBC_REQUIRE(hdev_obj != NULL);
599
600 DBC_REQUIRE(pdrv_object->dev_list != NULL);
601 DBC_REQUIRE(!LST_IS_EMPTY(pdrv_object->dev_list));
602
603 /* Search list for p_proc_object: */
604 for (cur_elem = lst_first(pdrv_object->dev_list); cur_elem != NULL;
605 cur_elem = lst_next(pdrv_object->dev_list, cur_elem)) {
606 /* If found, remove it. */
607 if ((struct dev_object *)cur_elem == hdev_obj) {
608 lst_remove_elem(pdrv_object->dev_list, cur_elem);
609 status = 0;
610 break;
611 }
612 }
613 /* Remove list if empty. */
614 if (LST_IS_EMPTY(pdrv_object->dev_list)) {
615 kfree(pdrv_object->dev_list);
616 pdrv_object->dev_list = NULL;
617 }
618 DBC_ENSURE((pdrv_object->dev_list == NULL) ||
619 !LST_IS_EMPTY(pdrv_object->dev_list));
620
621 return status;
622}
623
624/*
625 * ======== drv_request_resources ========
626 * Purpose:
627 * Requests resources from the OS.
628 */
aa09b091 629int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
7d55524d
ORL
630{
631 int status = 0;
632 struct drv_object *pdrv_object;
633 struct drv_ext *pszdev_node;
73b87a91 634 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
635
636 DBC_REQUIRE(dw_context != 0);
aa09b091 637 DBC_REQUIRE(dev_node_strg != NULL);
7d55524d
ORL
638
639 /*
640 * Allocate memory to hold the string. This will live untill
641 * it is freed in the Release resources. Update the driver object
642 * list.
643 */
644
73b87a91
IGC
645 if (!drv_datap || !drv_datap->drv_object)
646 status = -ENODATA;
647 else
648 pdrv_object = drv_datap->drv_object;
649
a741ea6e 650 if (!status) {
7d55524d
ORL
651 pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
652 if (pszdev_node) {
653 lst_init_elem(&pszdev_node->link);
654 strncpy(pszdev_node->sz_string,
655 (char *)dw_context, MAXREGPATHLENGTH - 1);
656 pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
657 /* Update the Driver Object List */
aa09b091 658 *dev_node_strg = (u32) pszdev_node->sz_string;
7d55524d
ORL
659 lst_put_tail(pdrv_object->dev_node_string,
660 (struct list_head *)pszdev_node);
661 } else {
662 status = -ENOMEM;
aa09b091 663 *dev_node_strg = 0;
7d55524d
ORL
664 }
665 } else {
666 dev_dbg(bridge, "%s: Failed to get Driver Object from Registry",
667 __func__);
aa09b091 668 *dev_node_strg = 0;
7d55524d
ORL
669 }
670
a741ea6e 671 DBC_ENSURE((!status && dev_node_strg != NULL &&
7d55524d 672 !LST_IS_EMPTY(pdrv_object->dev_node_string)) ||
b66e0986 673 (status && *dev_node_strg == 0));
7d55524d
ORL
674
675 return status;
676}
677
678/*
679 * ======== drv_release_resources ========
680 * Purpose:
681 * Releases resources from the OS.
682 */
683int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
684{
685 int status = 0;
686 struct drv_object *pdrv_object = (struct drv_object *)hdrv_obj;
687 struct drv_ext *pszdev_node;
688
689 /*
690 * Irrespective of the status go ahead and clean it
691 * The following will over write the status.
692 */
693 for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
694 pszdev_node != NULL; pszdev_node = (struct drv_ext *)
695 drv_get_next_dev_extension((u32) pszdev_node)) {
696 if (!pdrv_object->dev_node_string) {
697 /* When this could happen? */
698 continue;
699 }
700 if ((u32) pszdev_node == dw_context) {
701 /* Found it */
702 /* Delete from the Driver object list */
703 lst_remove_elem(pdrv_object->dev_node_string,
704 (struct list_head *)pszdev_node);
705 kfree((void *)pszdev_node);
706 break;
707 }
708 /* Delete the List if it is empty */
709 if (LST_IS_EMPTY(pdrv_object->dev_node_string)) {
710 kfree(pdrv_object->dev_node_string);
711 pdrv_object->dev_node_string = NULL;
712 }
713 }
714 return status;
715}
716
717/*
718 * ======== request_bridge_resources ========
719 * Purpose:
720 * Reserves shared memory for bridge.
721 */
722static int request_bridge_resources(struct cfg_hostres *res)
723{
7d55524d
ORL
724 struct cfg_hostres *host_res = res;
725
726 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
727 host_res->num_mem_windows = 2;
728
729 /* First window is for DSP internal memory */
730 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
731 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
732 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
733 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
734
735 /* for 24xx base port is not mapping the mamory for DSP
736 * internal memory TODO Do a ioremap here */
737 /* Second window is for DSP external memory shared with MPU */
738
739 /* These are hard-coded values */
740 host_res->birq_registers = 0;
741 host_res->birq_attrib = 0;
742 host_res->dw_offset_for_monitor = 0;
743 host_res->dw_chnl_offset = 0;
744 /* CHNL_MAXCHANNELS */
745 host_res->dw_num_chnls = CHNL_MAXCHANNELS;
746 host_res->dw_chnl_buf_size = 0x400;
747
a741ea6e 748 return 0;
7d55524d
ORL
749}
750
751/*
752 * ======== drv_request_bridge_res_dsp ========
753 * Purpose:
754 * Reserves shared memory for bridge.
755 */
756int drv_request_bridge_res_dsp(void **phost_resources)
757{
758 int status = 0;
759 struct cfg_hostres *host_res;
760 u32 dw_buff_size;
761 u32 dma_addr;
762 u32 shm_size;
763 struct drv_data *drv_datap = dev_get_drvdata(bridge);
764
765 dw_buff_size = sizeof(struct cfg_hostres);
766
767 host_res = kzalloc(dw_buff_size, GFP_KERNEL);
768
769 if (host_res != NULL) {
770 request_bridge_resources(host_res);
771 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
772 host_res->num_mem_windows = 4;
773
774 host_res->dw_mem_base[0] = 0;
775 host_res->dw_mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
776 OMAP_DSP_MEM1_SIZE);
777 host_res->dw_mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
778 OMAP_DSP_MEM2_SIZE);
779 host_res->dw_mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
780 OMAP_DSP_MEM3_SIZE);
781 host_res->dw_per_base = ioremap(OMAP_PER_CM_BASE,
782 OMAP_PER_CM_SIZE);
783 host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
784 OMAP_PER_PRM_SIZE);
785 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
786 OMAP_CORE_PRM_SIZE);
787 host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
788 OMAP_DMMU_SIZE);
789
790 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
791 host_res->dw_mem_base[0]);
792 dev_dbg(bridge, "dw_mem_base[1] 0x%x\n",
793 host_res->dw_mem_base[1]);
794 dev_dbg(bridge, "dw_mem_base[2] 0x%x\n",
795 host_res->dw_mem_base[2]);
796 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n",
797 host_res->dw_mem_base[3]);
798 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
799 host_res->dw_mem_base[4]);
800 dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
801
802 shm_size = drv_datap->shm_size;
803 if (shm_size >= 0x10000) {
804 /* Allocate Physically contiguous,
805 * non-cacheable memory */
806 host_res->dw_mem_base[1] =
807 (u32) mem_alloc_phys_mem(shm_size, 0x100000,
808 &dma_addr);
809 if (host_res->dw_mem_base[1] == 0) {
810 status = -ENOMEM;
811 pr_err("shm reservation Failed\n");
812 } else {
813 host_res->dw_mem_length[1] = shm_size;
814 host_res->dw_mem_phys[1] = dma_addr;
815
816 dev_dbg(bridge, "%s: Bridge shm address 0x%x "
817 "dma_addr %x size %x\n", __func__,
818 host_res->dw_mem_base[1],
819 dma_addr, shm_size);
820 }
821 }
a741ea6e 822 if (!status) {
7d55524d
ORL
823 /* These are hard-coded values */
824 host_res->birq_registers = 0;
825 host_res->birq_attrib = 0;
826 host_res->dw_offset_for_monitor = 0;
827 host_res->dw_chnl_offset = 0;
828 /* CHNL_MAXCHANNELS */
829 host_res->dw_num_chnls = CHNL_MAXCHANNELS;
830 host_res->dw_chnl_buf_size = 0x400;
831 dw_buff_size = sizeof(struct cfg_hostres);
832 }
833 *phost_resources = host_res;
834 }
835 /* End Mem alloc */
836 return status;
837}
838
fb6aabb7 839void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size)
7d55524d
ORL
840{
841 u32 pool_virt_base;
842
843 /* get the virtual address for the physical memory pool passed */
fb6aabb7 844 pool_virt_base = (u32) ioremap(pool_phys_base, pool_size);
7d55524d
ORL
845
846 if ((void **)pool_virt_base == NULL) {
847 pr_err("%s: external physical memory map failed\n", __func__);
848 ext_phys_mem_pool_enabled = false;
849 } else {
fb6aabb7
RS
850 ext_mem_pool.phys_mem_base = pool_phys_base;
851 ext_mem_pool.phys_mem_size = pool_size;
7d55524d 852 ext_mem_pool.virt_mem_base = pool_virt_base;
fb6aabb7 853 ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
7d55524d
ORL
854 ext_phys_mem_pool_enabled = true;
855 }
856}
857
858void mem_ext_phys_pool_release(void)
859{
860 if (ext_phys_mem_pool_enabled) {
861 iounmap((void *)(ext_mem_pool.virt_mem_base));
862 ext_phys_mem_pool_enabled = false;
863 }
864}
865
866/*
867 * ======== mem_ext_phys_mem_alloc ========
868 * Purpose:
869 * Allocate physically contiguous, uncached memory from external memory pool
870 */
871
e6bf74f0 872static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
7d55524d
ORL
873{
874 u32 new_alloc_ptr;
875 u32 offset;
876 u32 virt_addr;
877
878 if (align == 0)
879 align = 1;
880
881 if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
882 - ext_mem_pool.next_phys_alloc_ptr)) {
13b18c29 883 phys_addr = NULL;
7d55524d
ORL
884 return NULL;
885 } else {
886 offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
887 if (offset == 0)
888 new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
889 else
890 new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
891 (align - offset);
892 if ((new_alloc_ptr + bytes) <=
893 (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
894 /* we can allocate */
13b18c29 895 *phys_addr = new_alloc_ptr;
7d55524d
ORL
896 ext_mem_pool.next_phys_alloc_ptr =
897 new_alloc_ptr + bytes;
898 virt_addr =
899 ext_mem_pool.virt_mem_base + (new_alloc_ptr -
900 ext_mem_pool.
901 phys_mem_base);
902 return (void *)virt_addr;
903 } else {
13b18c29 904 *phys_addr = 0;
7d55524d
ORL
905 return NULL;
906 }
907 }
908}
909
910/*
911 * ======== mem_alloc_phys_mem ========
912 * Purpose:
913 * Allocate physically contiguous, uncached memory
914 */
0cd343a4 915void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
e6bf74f0 916 u32 *physical_address)
7d55524d
ORL
917{
918 void *va_mem = NULL;
919 dma_addr_t pa_mem;
920
921 if (byte_size > 0) {
922 if (ext_phys_mem_pool_enabled) {
0cd343a4 923 va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
7d55524d
ORL
924 (u32 *) &pa_mem);
925 } else
926 va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
927 GFP_KERNEL);
928 if (va_mem == NULL)
13b18c29 929 *physical_address = 0;
7d55524d 930 else
13b18c29 931 *physical_address = pa_mem;
7d55524d
ORL
932 }
933 return va_mem;
934}
935
936/*
937 * ======== mem_free_phys_mem ========
938 * Purpose:
939 * Free the given block of physically contiguous memory.
940 */
318b5df9 941void mem_free_phys_mem(void *virtual_address, u32 physical_address,
7d55524d
ORL
942 u32 byte_size)
943{
318b5df9 944 DBC_REQUIRE(virtual_address != NULL);
7d55524d
ORL
945
946 if (!ext_phys_mem_pool_enabled)
318b5df9 947 dma_free_coherent(NULL, byte_size, virtual_address,
13b18c29 948 physical_address);
7d55524d 949}
This page took 0.08511 seconds and 5 git commands to generate.