Revert "staging: tidspbridge - deprecate reserve/unreserve_memory funtions"
[deliverable/linux.git] / drivers / staging / tidspbridge / rmgr / drv.c
CommitLineData
7d55524d
ORL
1/*
2 * drv.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge resource allocation module.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
2094f12d 18#include <linux/types.h>
7d55524d
ORL
19
20/* ----------------------------------- Host OS */
21#include <dspbridge/host_os.h>
22
23/* ----------------------------------- DSP/BIOS Bridge */
7d55524d
ORL
24#include <dspbridge/dbdefs.h>
25
26/* ----------------------------------- Trace & Debug */
27#include <dspbridge/dbc.h>
28
29/* ----------------------------------- OS Adaptation Layer */
7d55524d
ORL
30#include <dspbridge/list.h>
31
32/* ----------------------------------- This */
33#include <dspbridge/drv.h>
34#include <dspbridge/dev.h>
35
36#include <dspbridge/node.h>
37#include <dspbridge/proc.h>
38#include <dspbridge/strm.h>
39#include <dspbridge/nodepriv.h>
40#include <dspbridge/dspchnl.h>
41#include <dspbridge/resourcecleanup.h>
42
43/* ----------------------------------- Defines, Data Structures, Typedefs */
44struct drv_object {
45 struct lst_list *dev_list;
46 struct lst_list *dev_node_string;
47};
48
49/*
50 * This is the Device Extension. Named with the Prefix
51 * DRV_ since it is living in this module
52 */
53struct drv_ext {
54 struct list_head link;
55 char sz_string[MAXREGPATHLENGTH];
56};
57
58/* ----------------------------------- Globals */
59static s32 refs;
60static bool ext_phys_mem_pool_enabled;
61struct ext_phys_mem_pool {
62 u32 phys_mem_base;
63 u32 phys_mem_size;
64 u32 virt_mem_base;
65 u32 next_phys_alloc_ptr;
66};
67static struct ext_phys_mem_pool ext_mem_pool;
68
69/* ----------------------------------- Function Prototypes */
70static int request_bridge_resources(struct cfg_hostres *res);
71
72
73/* GPP PROCESS CLEANUP CODE */
74
0624f52f 75static int drv_proc_free_node_res(int id, void *p, void *data);
7d55524d
ORL
76
77/* Allocate and add a node resource element
78* This function is called from .Node_Allocate. */
e6890692
RS
79int drv_insert_node_res_element(void *hnode, void *node_resource,
80 void *process_ctxt)
7d55524d
ORL
81{
82 struct node_res_object **node_res_obj =
e6890692
RS
83 (struct node_res_object **)node_resource;
84 struct process_context *ctxt = (struct process_context *)process_ctxt;
7d55524d 85 int status = 0;
0624f52f 86 int retval;
7d55524d
ORL
87
88 *node_res_obj = kzalloc(sizeof(struct node_res_object), GFP_KERNEL);
0624f52f
ER
89 if (!*node_res_obj) {
90 status = -ENOMEM;
91 goto func_end;
92 }
7d55524d 93
0624f52f
ER
94 (*node_res_obj)->hnode = hnode;
95 retval = idr_get_new(ctxt->node_id, *node_res_obj,
96 &(*node_res_obj)->id);
97 if (retval == -EAGAIN) {
98 if (!idr_pre_get(ctxt->node_id, GFP_KERNEL)) {
99 pr_err("%s: OUT OF MEMORY\n", __func__);
100 status = -ENOMEM;
101 goto func_end;
7d55524d 102 }
7d55524d 103
0624f52f
ER
104 retval = idr_get_new(ctxt->node_id, *node_res_obj,
105 &(*node_res_obj)->id);
106 }
107 if (retval) {
108 pr_err("%s: FAILED, IDR is FULL\n", __func__);
109 status = -EFAULT;
7d55524d 110 }
0624f52f
ER
111func_end:
112 if (status)
113 kfree(*node_res_obj);
7d55524d
ORL
114
115 return status;
116}
117
118/* Release all Node resources and its context
0624f52f
ER
119 * Actual Node De-Allocation */
120static int drv_proc_free_node_res(int id, void *p, void *data)
7d55524d 121{
0624f52f
ER
122 struct process_context *ctxt = data;
123 int status;
124 struct node_res_object *node_res_obj = p;
7d55524d
ORL
125 u32 node_state;
126
0624f52f
ER
127 if (node_res_obj->node_allocated) {
128 node_state = node_get_state(node_res_obj->hnode);
129 if (node_state <= NODE_DELETING) {
130 if ((node_state == NODE_RUNNING) ||
131 (node_state == NODE_PAUSED) ||
132 (node_state == NODE_TERMINATING))
133 node_terminate
134 (node_res_obj->hnode, &status);
7d55524d 135
0624f52f 136 node_delete(node_res_obj, ctxt);
7d55524d
ORL
137 }
138 }
0624f52f
ER
139
140 return 0;
7d55524d
ORL
141}
142
143/* Release all Mapped and Reserved DMM resources */
e6890692 144int drv_remove_all_dmm_res_elements(void *process_ctxt)
7d55524d 145{
e6890692 146 struct process_context *ctxt = (struct process_context *)process_ctxt;
7d55524d
ORL
147 int status = 0;
148 struct dmm_map_object *temp_map, *map_obj;
7d55524d
ORL
149
150 /* Free DMM mapped memory resources */
151 list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
152 status = proc_un_map(ctxt->hprocessor,
153 (void *)map_obj->dsp_addr, ctxt);
b66e0986 154 if (status)
7d55524d
ORL
155 pr_err("%s: proc_un_map failed!"
156 " status = 0x%xn", __func__, status);
157 }
7d55524d
ORL
158 return status;
159}
160
161/* Update Node allocation status */
e6890692 162void drv_proc_node_update_status(void *node_resource, s32 status)
7d55524d
ORL
163{
164 struct node_res_object *node_res_obj =
e6890692
RS
165 (struct node_res_object *)node_resource;
166 DBC_ASSERT(node_resource != NULL);
7d55524d
ORL
167 node_res_obj->node_allocated = status;
168}
169
170/* Update Node Heap status */
e6890692 171void drv_proc_node_update_heap_status(void *node_resource, s32 status)
7d55524d
ORL
172{
173 struct node_res_object *node_res_obj =
e6890692
RS
174 (struct node_res_object *)node_resource;
175 DBC_ASSERT(node_resource != NULL);
7d55524d
ORL
176 node_res_obj->heap_allocated = status;
177}
178
179/* Release all Node resources and its context
180* This is called from .bridge_release.
181 */
e6890692 182int drv_remove_all_node_res_elements(void *process_ctxt)
7d55524d 183{
0624f52f 184 struct process_context *ctxt = process_ctxt;
7d55524d 185
0624f52f
ER
186 idr_for_each(ctxt->node_id, drv_proc_free_node_res, ctxt);
187 idr_destroy(ctxt->node_id);
7d55524d 188
0624f52f 189 return 0;
7d55524d
ORL
190}
191
192/* Allocate the STRM resource element
193* This is called after the actual resource is allocated
194 */
c8c1ad8c
RS
195int drv_proc_insert_strm_res_element(void *stream_obj,
196 void *strm_res, void *process_ctxt)
7d55524d
ORL
197{
198 struct strm_res_object **pstrm_res =
c8c1ad8c 199 (struct strm_res_object **)strm_res;
e6890692 200 struct process_context *ctxt = (struct process_context *)process_ctxt;
7d55524d 201 int status = 0;
4ec09714 202 int retval;
7d55524d
ORL
203
204 *pstrm_res = kzalloc(sizeof(struct strm_res_object), GFP_KERNEL);
4ec09714 205 if (*pstrm_res == NULL) {
7d55524d 206 status = -EFAULT;
4ec09714 207 goto func_end;
7d55524d 208 }
7d55524d 209
4ec09714
ER
210 (*pstrm_res)->hstream = stream_obj;
211 retval = idr_get_new(ctxt->stream_id, *pstrm_res,
212 &(*pstrm_res)->id);
213 if (retval == -EAGAIN) {
214 if (!idr_pre_get(ctxt->stream_id, GFP_KERNEL)) {
215 pr_err("%s: OUT OF MEMORY\n", __func__);
216 status = -ENOMEM;
217 goto func_end;
218 }
7d55524d 219
4ec09714
ER
220 retval = idr_get_new(ctxt->stream_id, *pstrm_res,
221 &(*pstrm_res)->id);
7d55524d 222 }
4ec09714
ER
223 if (retval) {
224 pr_err("%s: FAILED, IDR is FULL\n", __func__);
225 status = -EPERM;
226 }
227
228func_end:
7d55524d
ORL
229 return status;
230}
231
4ec09714 232static int drv_proc_free_strm_res(int id, void *p, void *process_ctxt)
7d55524d 233{
4ec09714
ER
234 struct process_context *ctxt = process_ctxt;
235 struct strm_res_object *strm_res = p;
7d55524d
ORL
236 struct stream_info strm_info;
237 struct dsp_streaminfo user;
238 u8 **ap_buffer = NULL;
239 u8 *buf_ptr;
240 u32 ul_bytes;
241 u32 dw_arg;
242 s32 ul_buf_size;
243
4ec09714
ER
244 if (strm_res->num_bufs) {
245 ap_buffer = kmalloc((strm_res->num_bufs *
246 sizeof(u8 *)), GFP_KERNEL);
247 if (ap_buffer) {
248 strm_free_buffer(strm_res,
249 ap_buffer,
250 strm_res->num_bufs,
251 ctxt);
252 kfree(ap_buffer);
7d55524d 253 }
7d55524d 254 }
4ec09714
ER
255 strm_info.user_strm = &user;
256 user.number_bufs_in_stream = 0;
257 strm_get_info(strm_res->hstream, &strm_info, sizeof(strm_info));
258 while (user.number_bufs_in_stream--)
259 strm_reclaim(strm_res->hstream, &buf_ptr, &ul_bytes,
260 (u32 *) &ul_buf_size, &dw_arg);
261 strm_close(strm_res, ctxt);
262 return 0;
7d55524d
ORL
263}
264
4ec09714
ER
265/* Release all Stream resources and its context
266* This is called from .bridge_release.
267 */
268int drv_remove_all_strm_res_elements(void *process_ctxt)
7d55524d 269{
4ec09714 270 struct process_context *ctxt = process_ctxt;
7d55524d 271
4ec09714
ER
272 idr_for_each(ctxt->stream_id, drv_proc_free_strm_res, ctxt);
273 idr_destroy(ctxt->stream_id);
7d55524d 274
4ec09714 275 return 0;
7d55524d
ORL
276}
277
278/* Updating the stream resource element */
c8c1ad8c 279int drv_proc_update_strm_res(u32 num_bufs, void *strm_resources)
7d55524d
ORL
280{
281 int status = 0;
282 struct strm_res_object **strm_res =
c8c1ad8c 283 (struct strm_res_object **)strm_resources;
7d55524d
ORL
284
285 (*strm_res)->num_bufs = num_bufs;
286 return status;
287}
288
289/* GPP PROCESS CLEANUP CODE END */
290
291/*
292 * ======== = drv_create ======== =
293 * Purpose:
294 * DRV Object gets created only once during Driver Loading.
295 */
e6bf74f0 296int drv_create(struct drv_object **drv_obj)
7d55524d
ORL
297{
298 int status = 0;
299 struct drv_object *pdrv_object = NULL;
b87561f7 300 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 301
e436d07d 302 DBC_REQUIRE(drv_obj != NULL);
7d55524d
ORL
303 DBC_REQUIRE(refs > 0);
304
305 pdrv_object = kzalloc(sizeof(struct drv_object), GFP_KERNEL);
306 if (pdrv_object) {
307 /* Create and Initialize List of device objects */
308 pdrv_object->dev_list = kzalloc(sizeof(struct lst_list),
309 GFP_KERNEL);
310 if (pdrv_object->dev_list) {
311 /* Create and Initialize List of device Extension */
312 pdrv_object->dev_node_string =
313 kzalloc(sizeof(struct lst_list), GFP_KERNEL);
314 if (!(pdrv_object->dev_node_string)) {
315 status = -EPERM;
316 } else {
317 INIT_LIST_HEAD(&pdrv_object->
318 dev_node_string->head);
319 INIT_LIST_HEAD(&pdrv_object->dev_list->head);
320 }
321 } else {
322 status = -ENOMEM;
323 }
324 } else {
325 status = -ENOMEM;
326 }
b87561f7
IGC
327 /* Store the DRV Object in the driver data */
328 if (!status) {
329 if (drv_datap) {
330 drv_datap->drv_object = (void *)pdrv_object;
331 } else {
332 status = -EPERM;
333 pr_err("%s: Failed to store DRV object\n", __func__);
334 }
335 }
336
a741ea6e 337 if (!status) {
e436d07d 338 *drv_obj = pdrv_object;
7d55524d
ORL
339 } else {
340 kfree(pdrv_object->dev_list);
341 kfree(pdrv_object->dev_node_string);
342 /* Free the DRV Object */
343 kfree(pdrv_object);
344 }
345
b66e0986 346 DBC_ENSURE(status || pdrv_object);
7d55524d
ORL
347 return status;
348}
349
350/*
351 * ======== drv_exit ========
352 * Purpose:
353 * Discontinue usage of the DRV module.
354 */
355void drv_exit(void)
356{
357 DBC_REQUIRE(refs > 0);
358
359 refs--;
360
361 DBC_ENSURE(refs >= 0);
362}
363
364/*
365 * ======== = drv_destroy ======== =
366 * purpose:
367 * Invoked during bridge de-initialization
368 */
e6890692 369int drv_destroy(struct drv_object *driver_obj)
7d55524d
ORL
370{
371 int status = 0;
e6890692 372 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
b87561f7 373 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
374
375 DBC_REQUIRE(refs > 0);
376 DBC_REQUIRE(pdrv_object);
377
378 /*
379 * Delete the List if it exists.Should not come here
380 * as the drv_remove_dev_object and the Last drv_request_resources
381 * removes the list if the lists are empty.
382 */
383 kfree(pdrv_object->dev_list);
384 kfree(pdrv_object->dev_node_string);
385 kfree(pdrv_object);
b87561f7
IGC
386 /* Update the DRV Object in the driver data */
387 if (drv_datap) {
388 drv_datap->drv_object = NULL;
389 } else {
390 status = -EPERM;
391 pr_err("%s: Failed to store DRV object\n", __func__);
392 }
7d55524d
ORL
393
394 return status;
395}
396
397/*
398 * ======== drv_get_dev_object ========
399 * Purpose:
400 * Given a index, returns a handle to DevObject from the list.
401 */
402int drv_get_dev_object(u32 index, struct drv_object *hdrv_obj,
e436d07d 403 struct dev_object **device_obj)
7d55524d
ORL
404{
405 int status = 0;
b3d23688 406#ifdef CONFIG_TIDSPBRIDGE_DEBUG
7d55524d
ORL
407 /* used only for Assertions and debug messages */
408 struct drv_object *pdrv_obj = (struct drv_object *)hdrv_obj;
409#endif
410 struct dev_object *dev_obj;
411 u32 i;
412 DBC_REQUIRE(pdrv_obj);
e436d07d 413 DBC_REQUIRE(device_obj != NULL);
7d55524d
ORL
414 DBC_REQUIRE(index >= 0);
415 DBC_REQUIRE(refs > 0);
416 DBC_ASSERT(!(LST_IS_EMPTY(pdrv_obj->dev_list)));
417
418 dev_obj = (struct dev_object *)drv_get_first_dev_object();
419 for (i = 0; i < index; i++) {
420 dev_obj =
421 (struct dev_object *)drv_get_next_dev_object((u32) dev_obj);
422 }
423 if (dev_obj) {
e436d07d 424 *device_obj = (struct dev_object *)dev_obj;
7d55524d 425 } else {
e436d07d 426 *device_obj = NULL;
7d55524d
ORL
427 status = -EPERM;
428 }
429
430 return status;
431}
432
433/*
434 * ======== drv_get_first_dev_object ========
435 * Purpose:
436 * Retrieve the first Device Object handle from an internal linked list of
437 * of DEV_OBJECTs maintained by DRV.
438 */
439u32 drv_get_first_dev_object(void)
440{
441 u32 dw_dev_object = 0;
442 struct drv_object *pdrv_obj;
73b87a91 443 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 444
73b87a91
IGC
445 if (drv_datap && drv_datap->drv_object) {
446 pdrv_obj = drv_datap->drv_object;
7d55524d
ORL
447 if ((pdrv_obj->dev_list != NULL) &&
448 !LST_IS_EMPTY(pdrv_obj->dev_list))
449 dw_dev_object = (u32) lst_first(pdrv_obj->dev_list);
73b87a91
IGC
450 } else {
451 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
452 }
453
454 return dw_dev_object;
455}
456
457/*
458 * ======== DRV_GetFirstDevNodeString ========
459 * Purpose:
460 * Retrieve the first Device Extension from an internal linked list of
461 * of Pointer to dev_node Strings maintained by DRV.
462 */
463u32 drv_get_first_dev_extension(void)
464{
465 u32 dw_dev_extension = 0;
466 struct drv_object *pdrv_obj;
73b87a91 467 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 468
73b87a91
IGC
469 if (drv_datap && drv_datap->drv_object) {
470 pdrv_obj = drv_datap->drv_object;
7d55524d
ORL
471 if ((pdrv_obj->dev_node_string != NULL) &&
472 !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
473 dw_dev_extension =
474 (u32) lst_first(pdrv_obj->dev_node_string);
475 }
73b87a91
IGC
476 } else {
477 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
478 }
479
480 return dw_dev_extension;
481}
482
483/*
484 * ======== drv_get_next_dev_object ========
485 * Purpose:
486 * Retrieve the next Device Object handle from an internal linked list of
487 * of DEV_OBJECTs maintained by DRV, after having previously called
488 * drv_get_first_dev_object() and zero or more DRV_GetNext.
489 */
490u32 drv_get_next_dev_object(u32 hdev_obj)
491{
492 u32 dw_next_dev_object = 0;
493 struct drv_object *pdrv_obj;
73b87a91 494 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
495
496 DBC_REQUIRE(hdev_obj != 0);
497
73b87a91
IGC
498 if (drv_datap && drv_datap->drv_object) {
499 pdrv_obj = drv_datap->drv_object;
7d55524d
ORL
500 if ((pdrv_obj->dev_list != NULL) &&
501 !LST_IS_EMPTY(pdrv_obj->dev_list)) {
502 dw_next_dev_object = (u32) lst_next(pdrv_obj->dev_list,
503 (struct list_head *)
504 hdev_obj);
505 }
73b87a91
IGC
506 } else {
507 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d 508 }
73b87a91 509
7d55524d
ORL
510 return dw_next_dev_object;
511}
512
513/*
514 * ======== drv_get_next_dev_extension ========
515 * Purpose:
516 * Retrieve the next Device Extension from an internal linked list of
517 * of pointer to DevNodeString maintained by DRV, after having previously
518 * called drv_get_first_dev_extension() and zero or more
519 * drv_get_next_dev_extension().
520 */
e6890692 521u32 drv_get_next_dev_extension(u32 dev_extension)
7d55524d
ORL
522{
523 u32 dw_dev_extension = 0;
524 struct drv_object *pdrv_obj;
73b87a91 525 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d 526
e6890692 527 DBC_REQUIRE(dev_extension != 0);
7d55524d 528
73b87a91
IGC
529 if (drv_datap && drv_datap->drv_object) {
530 pdrv_obj = drv_datap->drv_object;
7d55524d
ORL
531 if ((pdrv_obj->dev_node_string != NULL) &&
532 !LST_IS_EMPTY(pdrv_obj->dev_node_string)) {
533 dw_dev_extension =
534 (u32) lst_next(pdrv_obj->dev_node_string,
e6890692 535 (struct list_head *)dev_extension);
7d55524d 536 }
73b87a91
IGC
537 } else {
538 pr_err("%s: Failed to retrieve the object handle\n", __func__);
7d55524d
ORL
539 }
540
541 return dw_dev_extension;
542}
543
544/*
545 * ======== drv_init ========
546 * Purpose:
547 * Initialize DRV module private state.
548 */
549int drv_init(void)
550{
551 s32 ret = 1; /* function return value */
552
553 DBC_REQUIRE(refs >= 0);
554
555 if (ret)
556 refs++;
557
558 DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
559
560 return ret;
561}
562
563/*
564 * ======== drv_insert_dev_object ========
565 * Purpose:
566 * Insert a DevObject into the list of Manager object.
567 */
e6890692 568int drv_insert_dev_object(struct drv_object *driver_obj,
7d55524d
ORL
569 struct dev_object *hdev_obj)
570{
e6890692 571 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
7d55524d
ORL
572
573 DBC_REQUIRE(refs > 0);
574 DBC_REQUIRE(hdev_obj != NULL);
575 DBC_REQUIRE(pdrv_object);
576 DBC_ASSERT(pdrv_object->dev_list);
577
578 lst_put_tail(pdrv_object->dev_list, (struct list_head *)hdev_obj);
579
a741ea6e 580 DBC_ENSURE(!LST_IS_EMPTY(pdrv_object->dev_list));
7d55524d 581
a741ea6e 582 return 0;
7d55524d
ORL
583}
584
585/*
586 * ======== drv_remove_dev_object ========
587 * Purpose:
588 * Search for and remove a DeviceObject from the given list of DRV
589 * objects.
590 */
e6890692 591int drv_remove_dev_object(struct drv_object *driver_obj,
7d55524d
ORL
592 struct dev_object *hdev_obj)
593{
594 int status = -EPERM;
e6890692 595 struct drv_object *pdrv_object = (struct drv_object *)driver_obj;
7d55524d
ORL
596 struct list_head *cur_elem;
597
598 DBC_REQUIRE(refs > 0);
599 DBC_REQUIRE(pdrv_object);
600 DBC_REQUIRE(hdev_obj != NULL);
601
602 DBC_REQUIRE(pdrv_object->dev_list != NULL);
603 DBC_REQUIRE(!LST_IS_EMPTY(pdrv_object->dev_list));
604
605 /* Search list for p_proc_object: */
606 for (cur_elem = lst_first(pdrv_object->dev_list); cur_elem != NULL;
607 cur_elem = lst_next(pdrv_object->dev_list, cur_elem)) {
608 /* If found, remove it. */
609 if ((struct dev_object *)cur_elem == hdev_obj) {
610 lst_remove_elem(pdrv_object->dev_list, cur_elem);
611 status = 0;
612 break;
613 }
614 }
615 /* Remove list if empty. */
616 if (LST_IS_EMPTY(pdrv_object->dev_list)) {
617 kfree(pdrv_object->dev_list);
618 pdrv_object->dev_list = NULL;
619 }
620 DBC_ENSURE((pdrv_object->dev_list == NULL) ||
621 !LST_IS_EMPTY(pdrv_object->dev_list));
622
623 return status;
624}
625
626/*
627 * ======== drv_request_resources ========
628 * Purpose:
629 * Requests resources from the OS.
630 */
aa09b091 631int drv_request_resources(u32 dw_context, u32 *dev_node_strg)
7d55524d
ORL
632{
633 int status = 0;
634 struct drv_object *pdrv_object;
635 struct drv_ext *pszdev_node;
73b87a91 636 struct drv_data *drv_datap = dev_get_drvdata(bridge);
7d55524d
ORL
637
638 DBC_REQUIRE(dw_context != 0);
aa09b091 639 DBC_REQUIRE(dev_node_strg != NULL);
7d55524d
ORL
640
641 /*
642 * Allocate memory to hold the string. This will live untill
643 * it is freed in the Release resources. Update the driver object
644 * list.
645 */
646
73b87a91
IGC
647 if (!drv_datap || !drv_datap->drv_object)
648 status = -ENODATA;
649 else
650 pdrv_object = drv_datap->drv_object;
651
a741ea6e 652 if (!status) {
7d55524d
ORL
653 pszdev_node = kzalloc(sizeof(struct drv_ext), GFP_KERNEL);
654 if (pszdev_node) {
655 lst_init_elem(&pszdev_node->link);
656 strncpy(pszdev_node->sz_string,
657 (char *)dw_context, MAXREGPATHLENGTH - 1);
658 pszdev_node->sz_string[MAXREGPATHLENGTH - 1] = '\0';
659 /* Update the Driver Object List */
aa09b091 660 *dev_node_strg = (u32) pszdev_node->sz_string;
7d55524d
ORL
661 lst_put_tail(pdrv_object->dev_node_string,
662 (struct list_head *)pszdev_node);
663 } else {
664 status = -ENOMEM;
aa09b091 665 *dev_node_strg = 0;
7d55524d
ORL
666 }
667 } else {
668 dev_dbg(bridge, "%s: Failed to get Driver Object from Registry",
669 __func__);
aa09b091 670 *dev_node_strg = 0;
7d55524d
ORL
671 }
672
a741ea6e 673 DBC_ENSURE((!status && dev_node_strg != NULL &&
7d55524d 674 !LST_IS_EMPTY(pdrv_object->dev_node_string)) ||
b66e0986 675 (status && *dev_node_strg == 0));
7d55524d
ORL
676
677 return status;
678}
679
680/*
681 * ======== drv_release_resources ========
682 * Purpose:
683 * Releases resources from the OS.
684 */
685int drv_release_resources(u32 dw_context, struct drv_object *hdrv_obj)
686{
687 int status = 0;
688 struct drv_object *pdrv_object = (struct drv_object *)hdrv_obj;
689 struct drv_ext *pszdev_node;
690
691 /*
692 * Irrespective of the status go ahead and clean it
693 * The following will over write the status.
694 */
695 for (pszdev_node = (struct drv_ext *)drv_get_first_dev_extension();
696 pszdev_node != NULL; pszdev_node = (struct drv_ext *)
697 drv_get_next_dev_extension((u32) pszdev_node)) {
698 if (!pdrv_object->dev_node_string) {
699 /* When this could happen? */
700 continue;
701 }
702 if ((u32) pszdev_node == dw_context) {
703 /* Found it */
704 /* Delete from the Driver object list */
705 lst_remove_elem(pdrv_object->dev_node_string,
706 (struct list_head *)pszdev_node);
707 kfree((void *)pszdev_node);
708 break;
709 }
710 /* Delete the List if it is empty */
711 if (LST_IS_EMPTY(pdrv_object->dev_node_string)) {
712 kfree(pdrv_object->dev_node_string);
713 pdrv_object->dev_node_string = NULL;
714 }
715 }
716 return status;
717}
718
719/*
720 * ======== request_bridge_resources ========
721 * Purpose:
722 * Reserves shared memory for bridge.
723 */
724static int request_bridge_resources(struct cfg_hostres *res)
725{
7d55524d
ORL
726 struct cfg_hostres *host_res = res;
727
728 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
729 host_res->num_mem_windows = 2;
730
731 /* First window is for DSP internal memory */
732 host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
733 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
734 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
7d55524d
ORL
735
736 /* for 24xx base port is not mapping the mamory for DSP
737 * internal memory TODO Do a ioremap here */
738 /* Second window is for DSP external memory shared with MPU */
739
740 /* These are hard-coded values */
741 host_res->birq_registers = 0;
742 host_res->birq_attrib = 0;
743 host_res->dw_offset_for_monitor = 0;
744 host_res->dw_chnl_offset = 0;
745 /* CHNL_MAXCHANNELS */
746 host_res->dw_num_chnls = CHNL_MAXCHANNELS;
747 host_res->dw_chnl_buf_size = 0x400;
748
a741ea6e 749 return 0;
7d55524d
ORL
750}
751
752/*
753 * ======== drv_request_bridge_res_dsp ========
754 * Purpose:
755 * Reserves shared memory for bridge.
756 */
757int drv_request_bridge_res_dsp(void **phost_resources)
758{
759 int status = 0;
760 struct cfg_hostres *host_res;
761 u32 dw_buff_size;
762 u32 dma_addr;
763 u32 shm_size;
764 struct drv_data *drv_datap = dev_get_drvdata(bridge);
765
766 dw_buff_size = sizeof(struct cfg_hostres);
767
768 host_res = kzalloc(dw_buff_size, GFP_KERNEL);
769
770 if (host_res != NULL) {
771 request_bridge_resources(host_res);
772 /* num_mem_windows must not be more than CFG_MAXMEMREGISTERS */
773 host_res->num_mem_windows = 4;
774
775 host_res->dw_mem_base[0] = 0;
776 host_res->dw_mem_base[2] = (u32) ioremap(OMAP_DSP_MEM1_BASE,
777 OMAP_DSP_MEM1_SIZE);
778 host_res->dw_mem_base[3] = (u32) ioremap(OMAP_DSP_MEM2_BASE,
779 OMAP_DSP_MEM2_SIZE);
780 host_res->dw_mem_base[4] = (u32) ioremap(OMAP_DSP_MEM3_BASE,
781 OMAP_DSP_MEM3_SIZE);
782 host_res->dw_per_base = ioremap(OMAP_PER_CM_BASE,
783 OMAP_PER_CM_SIZE);
784 host_res->dw_per_pm_base = (u32) ioremap(OMAP_PER_PRM_BASE,
785 OMAP_PER_PRM_SIZE);
786 host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
787 OMAP_CORE_PRM_SIZE);
7d55524d
ORL
788
789 dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
790 host_res->dw_mem_base[0]);
791 dev_dbg(bridge, "dw_mem_base[1] 0x%x\n",
792 host_res->dw_mem_base[1]);
793 dev_dbg(bridge, "dw_mem_base[2] 0x%x\n",
794 host_res->dw_mem_base[2]);
795 dev_dbg(bridge, "dw_mem_base[3] 0x%x\n",
796 host_res->dw_mem_base[3]);
797 dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
798 host_res->dw_mem_base[4]);
7d55524d
ORL
799
800 shm_size = drv_datap->shm_size;
801 if (shm_size >= 0x10000) {
802 /* Allocate Physically contiguous,
803 * non-cacheable memory */
804 host_res->dw_mem_base[1] =
805 (u32) mem_alloc_phys_mem(shm_size, 0x100000,
806 &dma_addr);
807 if (host_res->dw_mem_base[1] == 0) {
808 status = -ENOMEM;
809 pr_err("shm reservation Failed\n");
810 } else {
811 host_res->dw_mem_length[1] = shm_size;
812 host_res->dw_mem_phys[1] = dma_addr;
813
814 dev_dbg(bridge, "%s: Bridge shm address 0x%x "
815 "dma_addr %x size %x\n", __func__,
816 host_res->dw_mem_base[1],
817 dma_addr, shm_size);
818 }
819 }
a741ea6e 820 if (!status) {
7d55524d
ORL
821 /* These are hard-coded values */
822 host_res->birq_registers = 0;
823 host_res->birq_attrib = 0;
824 host_res->dw_offset_for_monitor = 0;
825 host_res->dw_chnl_offset = 0;
826 /* CHNL_MAXCHANNELS */
827 host_res->dw_num_chnls = CHNL_MAXCHANNELS;
828 host_res->dw_chnl_buf_size = 0x400;
829 dw_buff_size = sizeof(struct cfg_hostres);
830 }
831 *phost_resources = host_res;
832 }
833 /* End Mem alloc */
834 return status;
835}
836
fb6aabb7 837void mem_ext_phys_pool_init(u32 pool_phys_base, u32 pool_size)
7d55524d
ORL
838{
839 u32 pool_virt_base;
840
841 /* get the virtual address for the physical memory pool passed */
fb6aabb7 842 pool_virt_base = (u32) ioremap(pool_phys_base, pool_size);
7d55524d
ORL
843
844 if ((void **)pool_virt_base == NULL) {
845 pr_err("%s: external physical memory map failed\n", __func__);
846 ext_phys_mem_pool_enabled = false;
847 } else {
fb6aabb7
RS
848 ext_mem_pool.phys_mem_base = pool_phys_base;
849 ext_mem_pool.phys_mem_size = pool_size;
7d55524d 850 ext_mem_pool.virt_mem_base = pool_virt_base;
fb6aabb7 851 ext_mem_pool.next_phys_alloc_ptr = pool_phys_base;
7d55524d
ORL
852 ext_phys_mem_pool_enabled = true;
853 }
854}
855
856void mem_ext_phys_pool_release(void)
857{
858 if (ext_phys_mem_pool_enabled) {
859 iounmap((void *)(ext_mem_pool.virt_mem_base));
860 ext_phys_mem_pool_enabled = false;
861 }
862}
863
864/*
865 * ======== mem_ext_phys_mem_alloc ========
866 * Purpose:
867 * Allocate physically contiguous, uncached memory from external memory pool
868 */
869
e6bf74f0 870static void *mem_ext_phys_mem_alloc(u32 bytes, u32 align, u32 * phys_addr)
7d55524d
ORL
871{
872 u32 new_alloc_ptr;
873 u32 offset;
874 u32 virt_addr;
875
876 if (align == 0)
877 align = 1;
878
879 if (bytes > ((ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)
880 - ext_mem_pool.next_phys_alloc_ptr)) {
13b18c29 881 phys_addr = NULL;
7d55524d
ORL
882 return NULL;
883 } else {
884 offset = (ext_mem_pool.next_phys_alloc_ptr & (align - 1));
885 if (offset == 0)
886 new_alloc_ptr = ext_mem_pool.next_phys_alloc_ptr;
887 else
888 new_alloc_ptr = (ext_mem_pool.next_phys_alloc_ptr) +
889 (align - offset);
890 if ((new_alloc_ptr + bytes) <=
891 (ext_mem_pool.phys_mem_base + ext_mem_pool.phys_mem_size)) {
892 /* we can allocate */
13b18c29 893 *phys_addr = new_alloc_ptr;
7d55524d
ORL
894 ext_mem_pool.next_phys_alloc_ptr =
895 new_alloc_ptr + bytes;
896 virt_addr =
897 ext_mem_pool.virt_mem_base + (new_alloc_ptr -
898 ext_mem_pool.
899 phys_mem_base);
900 return (void *)virt_addr;
901 } else {
13b18c29 902 *phys_addr = 0;
7d55524d
ORL
903 return NULL;
904 }
905 }
906}
907
908/*
909 * ======== mem_alloc_phys_mem ========
910 * Purpose:
911 * Allocate physically contiguous, uncached memory
912 */
0cd343a4 913void *mem_alloc_phys_mem(u32 byte_size, u32 align_mask,
e6bf74f0 914 u32 *physical_address)
7d55524d
ORL
915{
916 void *va_mem = NULL;
917 dma_addr_t pa_mem;
918
919 if (byte_size > 0) {
920 if (ext_phys_mem_pool_enabled) {
0cd343a4 921 va_mem = mem_ext_phys_mem_alloc(byte_size, align_mask,
7d55524d
ORL
922 (u32 *) &pa_mem);
923 } else
924 va_mem = dma_alloc_coherent(NULL, byte_size, &pa_mem,
925 GFP_KERNEL);
926 if (va_mem == NULL)
13b18c29 927 *physical_address = 0;
7d55524d 928 else
13b18c29 929 *physical_address = pa_mem;
7d55524d
ORL
930 }
931 return va_mem;
932}
933
934/*
935 * ======== mem_free_phys_mem ========
936 * Purpose:
937 * Free the given block of physically contiguous memory.
938 */
318b5df9 939void mem_free_phys_mem(void *virtual_address, u32 physical_address,
7d55524d
ORL
940 u32 byte_size)
941{
318b5df9 942 DBC_REQUIRE(virtual_address != NULL);
7d55524d
ORL
943
944 if (!ext_phys_mem_pool_enabled)
318b5df9 945 dma_free_coherent(NULL, byte_size, virtual_address,
13b18c29 946 physical_address);
7d55524d 947}
This page took 0.106525 seconds and 5 git commands to generate.