Merge branch 'serge-next-1' of git://git.kernel.org/pub/scm/linux/kernel/git/sergeh...
[deliverable/linux.git] / drivers / staging / tidspbridge / rmgr / node.c
1 /*
2 * node.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge Node Manager.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19 #include <linux/types.h>
20 #include <linux/bitmap.h>
21 #include <linux/list.h>
22
23 /* ----------------------------------- Host OS */
24 #include <dspbridge/host_os.h>
25
26 /* ----------------------------------- DSP/BIOS Bridge */
27 #include <dspbridge/dbdefs.h>
28
29 /* ----------------------------------- OS Adaptation Layer */
30 #include <dspbridge/memdefs.h>
31 #include <dspbridge/proc.h>
32 #include <dspbridge/strm.h>
33 #include <dspbridge/sync.h>
34 #include <dspbridge/ntfy.h>
35
36 /* ----------------------------------- Platform Manager */
37 #include <dspbridge/cmm.h>
38 #include <dspbridge/cod.h>
39 #include <dspbridge/dev.h>
40 #include <dspbridge/msg.h>
41
42 /* ----------------------------------- Resource Manager */
43 #include <dspbridge/dbdcd.h>
44 #include <dspbridge/disp.h>
45 #include <dspbridge/rms_sh.h>
46
47 /* ----------------------------------- Link Driver */
48 #include <dspbridge/dspdefs.h>
49 #include <dspbridge/dspioctl.h>
50
51 /* ----------------------------------- Others */
52 #include <dspbridge/uuidutil.h>
53
54 /* ----------------------------------- This */
55 #include <dspbridge/nodepriv.h>
56 #include <dspbridge/node.h>
57 #include <dspbridge/dmm.h>
58
59 /* Static/Dynamic Loader includes */
60 #include <dspbridge/dbll.h>
61 #include <dspbridge/nldr.h>
62
63 #include <dspbridge/drv.h>
64 #include <dspbridge/resourcecleanup.h>
65 #include <_tiomap.h>
66
67 #include <dspbridge/dspdeh.h>
68
69 #define HOSTPREFIX "/host"
70 #define PIPEPREFIX "/dbpipe"
71
72 #define MAX_INPUTS(h) \
73 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
74 #define MAX_OUTPUTS(h) \
75 ((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
76
77 #define NODE_GET_PRIORITY(h) ((h)->prio)
78 #define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
79 #define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
80
81 #define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
82 #define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
83
84 #define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
85 #define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
86
87 #define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
88 #define CREATEPHASE 1
89 #define EXECUTEPHASE 2
90 #define DELETEPHASE 3
91
92 /* Define default STRM parameters */
93 /*
94 * TBD: Put in header file, make global DSP_STRMATTRS with defaults,
95 * or make defaults configurable.
96 */
97 #define DEFAULTBUFSIZE 32
98 #define DEFAULTNBUFS 2
99 #define DEFAULTSEGID 0
100 #define DEFAULTALIGNMENT 0
101 #define DEFAULTTIMEOUT 10000
102
103 #define RMSQUERYSERVER 0
104 #define RMSCONFIGURESERVER 1
105 #define RMSCREATENODE 2
106 #define RMSEXECUTENODE 3
107 #define RMSDELETENODE 4
108 #define RMSCHANGENODEPRIORITY 5
109 #define RMSREADMEMORY 6
110 #define RMSWRITEMEMORY 7
111 #define RMSCOPY 8
112 #define MAXTIMEOUT 2000
113
114 #define NUMRMSFXNS 9
115
116 #define PWR_TIMEOUT 500 /* default PWR timeout in msec */
117
118 #define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
119
120 /*
121 * ======== node_mgr ========
122 */
123 struct node_mgr {
124 struct dev_object *dev_obj; /* Device object */
125 /* Function interface to Bridge driver */
126 struct bridge_drv_interface *intf_fxns;
127 struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
128 struct disp_object *disp_obj; /* Node dispatcher */
129 struct list_head node_list; /* List of all allocated nodes */
130 u32 num_nodes; /* Number of nodes in node_list */
131 u32 num_created; /* Number of nodes *created* on DSP */
132 DECLARE_BITMAP(pipe_map, MAXPIPES); /* Pipe connection bitmap */
133 DECLARE_BITMAP(pipe_done_map, MAXPIPES); /* Pipes that are half free */
134 /* Channel allocation bitmap */
135 DECLARE_BITMAP(chnl_map, CHNL_MAXCHANNELS);
136 /* DMA Channel allocation bitmap */
137 DECLARE_BITMAP(dma_chnl_map, CHNL_MAXCHANNELS);
138 /* Zero-Copy Channel alloc bitmap */
139 DECLARE_BITMAP(zc_chnl_map, CHNL_MAXCHANNELS);
140 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
141 struct mutex node_mgr_lock; /* For critical sections */
142 u32 fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
143 struct msg_mgr *msg_mgr_obj;
144
145 /* Processor properties needed by Node Dispatcher */
146 u32 num_chnls; /* Total number of channels */
147 u32 chnl_offset; /* Offset of chnl ids rsvd for RMS */
148 u32 chnl_buf_size; /* Buffer size for data to RMS */
149 int proc_family; /* eg, 5000 */
150 int proc_type; /* eg, 5510 */
151 u32 dsp_word_size; /* Size of DSP word on host bytes */
152 u32 dsp_data_mau_size; /* Size of DSP data MAU */
153 u32 dsp_mau_size; /* Size of MAU */
154 s32 min_pri; /* Minimum runtime priority for node */
155 s32 max_pri; /* Maximum runtime priority for node */
156
157 struct strm_mgr *strm_mgr_obj; /* STRM manager */
158
159 /* Loader properties */
160 struct nldr_object *nldr_obj; /* Handle to loader */
161 struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
162 };
163
164 /*
165 * ======== connecttype ========
166 */
167 enum connecttype {
168 NOTCONNECTED = 0,
169 NODECONNECT,
170 HOSTCONNECT,
171 DEVICECONNECT,
172 };
173
174 /*
175 * ======== stream_chnl ========
176 */
177 struct stream_chnl {
178 enum connecttype type; /* Type of stream connection */
179 u32 dev_id; /* pipe or channel id */
180 };
181
182 /*
183 * ======== node_object ========
184 */
185 struct node_object {
186 struct list_head list_elem;
187 struct node_mgr *node_mgr; /* The manager of this node */
188 struct proc_object *processor; /* Back pointer to processor */
189 struct dsp_uuid node_uuid; /* Node's ID */
190 s32 prio; /* Node's current priority */
191 u32 timeout; /* Timeout for blocking NODE calls */
192 u32 heap_size; /* Heap Size */
193 u32 dsp_heap_virt_addr; /* Heap Size */
194 u32 gpp_heap_virt_addr; /* Heap Size */
195 enum node_type ntype; /* Type of node: message, task, etc */
196 enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
197 u32 num_inputs; /* Current number of inputs */
198 u32 num_outputs; /* Current number of outputs */
199 u32 max_input_index; /* Current max input stream index */
200 u32 max_output_index; /* Current max output stream index */
201 struct stream_chnl *inputs; /* Node's input streams */
202 struct stream_chnl *outputs; /* Node's output streams */
203 struct node_createargs create_args; /* Args for node create func */
204 nodeenv node_env; /* Environment returned by RMS */
205 struct dcd_genericobj dcd_props; /* Node properties from DCD */
206 struct dsp_cbdata *args; /* Optional args to pass to node */
207 struct ntfy_object *ntfy_obj; /* Manages registered notifications */
208 char *str_dev_name; /* device name, if device node */
209 struct sync_object *sync_done; /* Synchronize node_terminate */
210 s32 exit_status; /* execute function return status */
211
212 /* Information needed for node_get_attr() */
213 void *device_owner; /* If dev node, task that owns it */
214 u32 num_gpp_inputs; /* Current # of from GPP streams */
215 u32 num_gpp_outputs; /* Current # of to GPP streams */
216 /* Current stream connections */
217 struct dsp_streamconnect *stream_connect;
218
219 /* Message queue */
220 struct msg_queue *msg_queue_obj;
221
222 /* These fields used for SM messaging */
223 struct cmm_xlatorobject *xlator; /* Node's SM addr translator */
224
225 /* Handle to pass to dynamic loader */
226 struct nldr_nodeobject *nldr_node_obj;
227 bool loaded; /* Code is (dynamically) loaded */
228 bool phase_split; /* Phases split in many libs or ovly */
229
230 };
231
232 /* Default buffer attributes */
233 static struct dsp_bufferattr node_dfltbufattrs = {
234 .cb_struct = 0,
235 .segment_id = 1,
236 .buf_alignment = 0,
237 };
238
239 static void delete_node(struct node_object *hnode,
240 struct process_context *pr_ctxt);
241 static void delete_node_mgr(struct node_mgr *hnode_mgr);
242 static void fill_stream_connect(struct node_object *node1,
243 struct node_object *node2, u32 stream1,
244 u32 stream2);
245 static void fill_stream_def(struct node_object *hnode,
246 struct node_strmdef *pstrm_def,
247 struct dsp_strmattr *pattrs);
248 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
249 static int get_fxn_address(struct node_object *hnode, u32 *fxn_addr,
250 u32 phase);
251 static int get_node_props(struct dcd_manager *hdcd_mgr,
252 struct node_object *hnode,
253 const struct dsp_uuid *node_uuid,
254 struct dcd_genericobj *dcd_prop);
255 static int get_proc_props(struct node_mgr *hnode_mgr,
256 struct dev_object *hdev_obj);
257 static int get_rms_fxns(struct node_mgr *hnode_mgr);
258 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
259 u32 ul_num_bytes, u32 mem_space);
260 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
261 u32 ul_num_bytes, u32 mem_space);
262
263 /* Dynamic loader functions. */
264 static struct node_ldr_fxns nldr_fxns = {
265 nldr_allocate,
266 nldr_create,
267 nldr_delete,
268 nldr_get_fxn_addr,
269 nldr_load,
270 nldr_unload,
271 };
272
273 enum node_state node_get_state(void *hnode)
274 {
275 struct node_object *pnode = (struct node_object *)hnode;
276 if (!pnode)
277 return -1;
278 return pnode->node_state;
279 }
280
281 /*
282 * ======== node_allocate ========
283 * Purpose:
284 * Allocate GPP resources to manage a node on the DSP.
285 */
286 int node_allocate(struct proc_object *hprocessor,
287 const struct dsp_uuid *node_uuid,
288 const struct dsp_cbdata *pargs,
289 const struct dsp_nodeattrin *attr_in,
290 struct node_res_object **noderes,
291 struct process_context *pr_ctxt)
292 {
293 struct node_mgr *hnode_mgr;
294 struct dev_object *hdev_obj;
295 struct node_object *pnode = NULL;
296 enum node_type node_type = NODE_TASK;
297 struct node_msgargs *pmsg_args;
298 struct node_taskargs *ptask_args;
299 u32 num_streams;
300 struct bridge_drv_interface *intf_fxns;
301 int status = 0;
302 struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
303 u32 proc_id;
304 u32 pul_value;
305 u32 dynext_base;
306 u32 off_set = 0;
307 u32 ul_stack_seg_val;
308 struct cfg_hostres *host_res;
309 struct bridge_dev_context *pbridge_context;
310 u32 mapped_addr = 0;
311 u32 map_attrs = 0x0;
312 struct dsp_processorstate proc_state;
313 #ifdef DSP_DMM_DEBUG
314 struct dmm_object *dmm_mgr;
315 struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
316 #endif
317
318 void *node_res;
319
320 *noderes = NULL;
321
322 status = proc_get_processor_id(hprocessor, &proc_id);
323
324 if (proc_id != DSP_UNIT)
325 goto func_end;
326
327 status = proc_get_dev_object(hprocessor, &hdev_obj);
328 if (!status) {
329 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
330 if (hnode_mgr == NULL)
331 status = -EPERM;
332
333 }
334
335 if (status)
336 goto func_end;
337
338 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
339 if (!pbridge_context) {
340 status = -EFAULT;
341 goto func_end;
342 }
343
344 status = proc_get_state(hprocessor, &proc_state,
345 sizeof(struct dsp_processorstate));
346 if (status)
347 goto func_end;
348 /* If processor is in error state then don't attempt
349 to send the message */
350 if (proc_state.proc_state == PROC_ERROR) {
351 status = -EPERM;
352 goto func_end;
353 }
354
355 /* Assuming that 0 is not a valid function address */
356 if (hnode_mgr->fxn_addrs[0] == 0) {
357 /* No RMS on target - we currently can't handle this */
358 pr_err("%s: Failed, no RMS in base image\n", __func__);
359 status = -EPERM;
360 } else {
361 /* Validate attr_in fields, if non-NULL */
362 if (attr_in) {
363 /* Check if attr_in->prio is within range */
364 if (attr_in->prio < hnode_mgr->min_pri ||
365 attr_in->prio > hnode_mgr->max_pri)
366 status = -EDOM;
367 }
368 }
369 /* Allocate node object and fill in */
370 if (status)
371 goto func_end;
372
373 pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
374 if (pnode == NULL) {
375 status = -ENOMEM;
376 goto func_end;
377 }
378 pnode->node_mgr = hnode_mgr;
379 /* This critical section protects get_node_props */
380 mutex_lock(&hnode_mgr->node_mgr_lock);
381
382 /* Get dsp_ndbprops from node database */
383 status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid,
384 &(pnode->dcd_props));
385 if (status)
386 goto func_cont;
387
388 pnode->node_uuid = *node_uuid;
389 pnode->processor = hprocessor;
390 pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
391 pnode->timeout = pnode->dcd_props.obj_data.node_obj.ndb_props.timeout;
392 pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
393
394 /* Currently only C64 DSP builds support Node Dynamic * heaps */
395 /* Allocate memory for node heap */
396 pnode->create_args.asa.task_arg_obj.heap_size = 0;
397 pnode->create_args.asa.task_arg_obj.dsp_heap_addr = 0;
398 pnode->create_args.asa.task_arg_obj.dsp_heap_res_addr = 0;
399 pnode->create_args.asa.task_arg_obj.gpp_heap_addr = 0;
400 if (!attr_in)
401 goto func_cont;
402
403 /* Check if we have a user allocated node heap */
404 if (!(attr_in->pgpp_virt_addr))
405 goto func_cont;
406
407 /* check for page aligned Heap size */
408 if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
409 pr_err("%s: node heap size not aligned to 4K, size = 0x%x\n",
410 __func__, attr_in->heap_size);
411 status = -EINVAL;
412 } else {
413 pnode->create_args.asa.task_arg_obj.heap_size =
414 attr_in->heap_size;
415 pnode->create_args.asa.task_arg_obj.gpp_heap_addr =
416 (u32) attr_in->pgpp_virt_addr;
417 }
418 if (status)
419 goto func_cont;
420
421 status = proc_reserve_memory(hprocessor,
422 pnode->create_args.asa.task_arg_obj.
423 heap_size + PAGE_SIZE,
424 (void **)&(pnode->create_args.asa.
425 task_arg_obj.dsp_heap_res_addr),
426 pr_ctxt);
427 if (status) {
428 pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
429 __func__, status);
430 goto func_cont;
431 }
432 #ifdef DSP_DMM_DEBUG
433 status = dmm_get_handle(p_proc_object, &dmm_mgr);
434 if (!dmm_mgr) {
435 status = DSP_EHANDLE;
436 goto func_cont;
437 }
438
439 dmm_mem_map_dump(dmm_mgr);
440 #endif
441
442 map_attrs |= DSP_MAPLITTLEENDIAN;
443 map_attrs |= DSP_MAPELEMSIZE32;
444 map_attrs |= DSP_MAPVIRTUALADDR;
445 status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
446 pnode->create_args.asa.task_arg_obj.heap_size,
447 (void *)pnode->create_args.asa.task_arg_obj.
448 dsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
449 pr_ctxt);
450 if (status)
451 pr_err("%s: Failed to map memory for Heap: 0x%x\n",
452 __func__, status);
453 else
454 pnode->create_args.asa.task_arg_obj.dsp_heap_addr =
455 (u32) mapped_addr;
456
457 func_cont:
458 mutex_unlock(&hnode_mgr->node_mgr_lock);
459 if (attr_in != NULL) {
460 /* Overrides of NBD properties */
461 pnode->timeout = attr_in->timeout;
462 pnode->prio = attr_in->prio;
463 }
464 /* Create object to manage notifications */
465 if (!status) {
466 pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
467 GFP_KERNEL);
468 if (pnode->ntfy_obj)
469 ntfy_init(pnode->ntfy_obj);
470 else
471 status = -ENOMEM;
472 }
473
474 if (!status) {
475 node_type = node_get_type(pnode);
476 /* Allocate dsp_streamconnect array for device, task, and
477 * dais socket nodes. */
478 if (node_type != NODE_MESSAGE) {
479 num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
480 pnode->stream_connect = kzalloc(num_streams *
481 sizeof(struct dsp_streamconnect),
482 GFP_KERNEL);
483 if (num_streams > 0 && pnode->stream_connect == NULL)
484 status = -ENOMEM;
485
486 }
487 if (!status && (node_type == NODE_TASK ||
488 node_type == NODE_DAISSOCKET)) {
489 /* Allocate arrays for maintainig stream connections */
490 pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
491 sizeof(struct stream_chnl), GFP_KERNEL);
492 pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
493 sizeof(struct stream_chnl), GFP_KERNEL);
494 ptask_args = &(pnode->create_args.asa.task_arg_obj);
495 ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
496 sizeof(struct node_strmdef),
497 GFP_KERNEL);
498 ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
499 sizeof(struct node_strmdef),
500 GFP_KERNEL);
501 if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
502 ptask_args->strm_in_def
503 == NULL))
504 || (MAX_OUTPUTS(pnode) > 0
505 && (pnode->outputs == NULL
506 || ptask_args->strm_out_def == NULL)))
507 status = -ENOMEM;
508 }
509 }
510 if (!status && (node_type != NODE_DEVICE)) {
511 /* Create an event that will be posted when RMS_EXIT is
512 * received. */
513 pnode->sync_done = kzalloc(sizeof(struct sync_object),
514 GFP_KERNEL);
515 if (pnode->sync_done)
516 sync_init_event(pnode->sync_done);
517 else
518 status = -ENOMEM;
519
520 if (!status) {
521 /*Get the shared mem mgr for this nodes dev object */
522 status = cmm_get_handle(hprocessor, &hcmm_mgr);
523 if (!status) {
524 /* Allocate a SM addr translator for this node
525 * w/ deflt attr */
526 status = cmm_xlator_create(&pnode->xlator,
527 hcmm_mgr, NULL);
528 }
529 }
530 if (!status) {
531 /* Fill in message args */
532 if ((pargs != NULL) && (pargs->cb_data > 0)) {
533 pmsg_args =
534 &(pnode->create_args.asa.node_msg_args);
535 pmsg_args->pdata = kzalloc(pargs->cb_data,
536 GFP_KERNEL);
537 if (pmsg_args->pdata == NULL) {
538 status = -ENOMEM;
539 } else {
540 pmsg_args->arg_length = pargs->cb_data;
541 memcpy(pmsg_args->pdata,
542 pargs->node_data,
543 pargs->cb_data);
544 }
545 }
546 }
547 }
548
549 if (!status && node_type != NODE_DEVICE) {
550 /* Create a message queue for this node */
551 intf_fxns = hnode_mgr->intf_fxns;
552 status =
553 (*intf_fxns->msg_create_queue) (hnode_mgr->msg_mgr_obj,
554 &pnode->msg_queue_obj,
555 0,
556 pnode->create_args.asa.
557 node_msg_args.max_msgs,
558 pnode);
559 }
560
561 if (!status) {
562 /* Create object for dynamic loading */
563
564 status = hnode_mgr->nldr_fxns.allocate(hnode_mgr->nldr_obj,
565 (void *)pnode,
566 &pnode->dcd_props.
567 obj_data.node_obj,
568 &pnode->
569 nldr_node_obj,
570 &pnode->phase_split);
571 }
572
573 /* Compare value read from Node Properties and check if it is same as
574 * STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
575 * GPP Address, Read the value in that address and override the
576 * stack_seg value in task args */
577 if (!status &&
578 (char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
579 stack_seg_name != NULL) {
580 if (strcmp((char *)
581 pnode->dcd_props.obj_data.node_obj.ndb_props.
582 stack_seg_name, STACKSEGLABEL) == 0) {
583 void __iomem *stack_seg;
584 u32 stack_seg_pa;
585
586 status =
587 hnode_mgr->nldr_fxns.
588 get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
589 &dynext_base);
590 if (status)
591 pr_err("%s: Failed to get addr for DYNEXT_BEG"
592 " status = 0x%x\n", __func__, status);
593
594 status =
595 hnode_mgr->nldr_fxns.
596 get_fxn_addr(pnode->nldr_node_obj,
597 "L1DSRAM_HEAP", &pul_value);
598
599 if (status)
600 pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
601 " status = 0x%x\n", __func__, status);
602
603 host_res = pbridge_context->resources;
604 if (!host_res)
605 status = -EPERM;
606
607 if (status) {
608 pr_err("%s: Failed to get host resource, status"
609 " = 0x%x\n", __func__, status);
610 goto func_end;
611 }
612
613 off_set = pul_value - dynext_base;
614 stack_seg_pa = host_res->mem_phys[1] + off_set;
615 stack_seg = ioremap(stack_seg_pa, SZ_32);
616 if (!stack_seg) {
617 status = -ENOMEM;
618 goto func_end;
619 }
620
621 ul_stack_seg_val = readl(stack_seg);
622
623 iounmap(stack_seg);
624
625 dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
626 " 0x%x\n", __func__, ul_stack_seg_val,
627 host_res->mem_base[1] + off_set);
628
629 pnode->create_args.asa.task_arg_obj.stack_seg =
630 ul_stack_seg_val;
631
632 }
633 }
634
635 if (!status) {
636 /* Add the node to the node manager's list of allocated
637 * nodes. */
638 NODE_SET_STATE(pnode, NODE_ALLOCATED);
639
640 mutex_lock(&hnode_mgr->node_mgr_lock);
641
642 list_add_tail(&pnode->list_elem, &hnode_mgr->node_list);
643 ++(hnode_mgr->num_nodes);
644
645 /* Exit critical section */
646 mutex_unlock(&hnode_mgr->node_mgr_lock);
647
648 /* Preset this to assume phases are split
649 * (for overlay and dll) */
650 pnode->phase_split = true;
651
652 /* Notify all clients registered for DSP_NODESTATECHANGE. */
653 proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
654 } else {
655 /* Cleanup */
656 if (pnode)
657 delete_node(pnode, pr_ctxt);
658
659 }
660
661 if (!status) {
662 status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
663 if (status) {
664 delete_node(pnode, pr_ctxt);
665 goto func_end;
666 }
667
668 *noderes = (struct node_res_object *)node_res;
669 drv_proc_node_update_heap_status(node_res, true);
670 drv_proc_node_update_status(node_res, true);
671 }
672 func_end:
673 dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
674 "node_res: %p status: 0x%x\n", __func__, hprocessor,
675 node_uuid, pargs, attr_in, noderes, status);
676 return status;
677 }
678
679 /*
680 * ======== node_alloc_msg_buf ========
681 * Purpose:
682 * Allocates buffer for zero copy messaging.
683 */
684 DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
685 struct dsp_bufferattr *pattr,
686 u8 **pbuffer)
687 {
688 struct node_object *pnode = (struct node_object *)hnode;
689 int status = 0;
690 bool va_flag = false;
691 bool set_info;
692 u32 proc_id;
693
694 if (!pnode)
695 status = -EFAULT;
696 else if (node_get_type(pnode) == NODE_DEVICE)
697 status = -EPERM;
698
699 if (status)
700 goto func_end;
701
702 if (pattr == NULL)
703 pattr = &node_dfltbufattrs; /* set defaults */
704
705 status = proc_get_processor_id(pnode->processor, &proc_id);
706 if (proc_id != DSP_UNIT)
707 goto func_end;
708
709 /* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
710 * virt address, so set this info in this node's translator
711 * object for future ref. If MEM_GETVIRTUALSEGID then retrieve
712 * virtual address from node's translator. */
713 if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
714 (pattr->segment_id & MEM_GETVIRTUALSEGID)) {
715 va_flag = true;
716 set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
717 true : false;
718 /* Clear mask bits */
719 pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
720 /* Set/get this node's translators virtual address base/size */
721 status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
722 pattr->segment_id, set_info);
723 }
724 if (!status && (!va_flag)) {
725 if (pattr->segment_id != 1) {
726 /* Node supports single SM segment only. */
727 status = -EBADR;
728 }
729 /* Arbitrary SM buffer alignment not supported for host side
730 * allocs, but guaranteed for the following alignment
731 * values. */
732 switch (pattr->buf_alignment) {
733 case 0:
734 case 1:
735 case 2:
736 case 4:
737 break;
738 default:
739 /* alignment value not supportted */
740 status = -EPERM;
741 break;
742 }
743 if (!status) {
744 /* allocate physical buffer from seg_id in node's
745 * translator */
746 (void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
747 usize);
748 if (*pbuffer == NULL) {
749 pr_err("%s: error - Out of shared memory\n",
750 __func__);
751 status = -ENOMEM;
752 }
753 }
754 }
755 func_end:
756 return status;
757 }
758
759 /*
760 * ======== node_change_priority ========
761 * Purpose:
762 * Change the priority of a node in the allocated state, or that is
763 * currently running or paused on the target.
764 */
765 int node_change_priority(struct node_object *hnode, s32 prio)
766 {
767 struct node_object *pnode = (struct node_object *)hnode;
768 struct node_mgr *hnode_mgr = NULL;
769 enum node_type node_type;
770 enum node_state state;
771 int status = 0;
772 u32 proc_id;
773
774 if (!hnode || !hnode->node_mgr) {
775 status = -EFAULT;
776 } else {
777 hnode_mgr = hnode->node_mgr;
778 node_type = node_get_type(hnode);
779 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
780 status = -EPERM;
781 else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
782 status = -EDOM;
783 }
784 if (status)
785 goto func_end;
786
787 /* Enter critical section */
788 mutex_lock(&hnode_mgr->node_mgr_lock);
789
790 state = node_get_state(hnode);
791 if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
792 NODE_SET_PRIORITY(hnode, prio);
793 } else {
794 if (state != NODE_RUNNING) {
795 status = -EBADR;
796 goto func_cont;
797 }
798 status = proc_get_processor_id(pnode->processor, &proc_id);
799 if (proc_id == DSP_UNIT) {
800 status =
801 disp_node_change_priority(hnode_mgr->disp_obj,
802 hnode,
803 hnode_mgr->fxn_addrs
804 [RMSCHANGENODEPRIORITY],
805 hnode->node_env, prio);
806 }
807 if (status >= 0)
808 NODE_SET_PRIORITY(hnode, prio);
809
810 }
811 func_cont:
812 /* Leave critical section */
813 mutex_unlock(&hnode_mgr->node_mgr_lock);
814 func_end:
815 return status;
816 }
817
818 /*
819 * ======== node_connect ========
820 * Purpose:
821 * Connect two nodes on the DSP, or a node on the DSP to the GPP.
822 */
823 int node_connect(struct node_object *node1, u32 stream1,
824 struct node_object *node2,
825 u32 stream2, struct dsp_strmattr *pattrs,
826 struct dsp_cbdata *conn_param)
827 {
828 struct node_mgr *hnode_mgr;
829 char *pstr_dev_name = NULL;
830 enum node_type node1_type = NODE_TASK;
831 enum node_type node2_type = NODE_TASK;
832 enum dsp_strmmode strm_mode;
833 struct node_strmdef *pstrm_def;
834 struct node_strmdef *input = NULL;
835 struct node_strmdef *output = NULL;
836 struct node_object *dev_node_obj;
837 struct node_object *hnode;
838 struct stream_chnl *pstream;
839 u32 pipe_id;
840 u32 chnl_id;
841 s8 chnl_mode;
842 u32 dw_length;
843 int status = 0;
844
845 if (!node1 || !node2)
846 return -EFAULT;
847
848 /* The two nodes must be on the same processor */
849 if (node1 != (struct node_object *)DSP_HGPPNODE &&
850 node2 != (struct node_object *)DSP_HGPPNODE &&
851 node1->node_mgr != node2->node_mgr)
852 return -EPERM;
853
854 /* Cannot connect a node to itself */
855 if (node1 == node2)
856 return -EPERM;
857
858 /* node_get_type() will return NODE_GPP if hnode = DSP_HGPPNODE. */
859 node1_type = node_get_type(node1);
860 node2_type = node_get_type(node2);
861 /* Check stream indices ranges */
862 if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
863 stream1 >= MAX_OUTPUTS(node1)) ||
864 (node2_type != NODE_GPP && node2_type != NODE_DEVICE &&
865 stream2 >= MAX_INPUTS(node2)))
866 return -EINVAL;
867
868 /*
869 * Only the following types of connections are allowed:
870 * task/dais socket < == > task/dais socket
871 * task/dais socket < == > device
872 * task/dais socket < == > GPP
873 *
874 * ie, no message nodes, and at least one task or dais
875 * socket node.
876 */
877 if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
878 (node1_type != NODE_TASK &&
879 node1_type != NODE_DAISSOCKET &&
880 node2_type != NODE_TASK &&
881 node2_type != NODE_DAISSOCKET))
882 return -EPERM;
883 /*
884 * Check stream mode. Default is STRMMODE_PROCCOPY.
885 */
886 if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY)
887 return -EPERM; /* illegal stream mode */
888
889 if (node1_type != NODE_GPP)
890 hnode_mgr = node1->node_mgr;
891 else
892 hnode_mgr = node2->node_mgr;
893
894 /* Enter critical section */
895 mutex_lock(&hnode_mgr->node_mgr_lock);
896
897 /* Nodes must be in the allocated state */
898 if (node1_type != NODE_GPP &&
899 node_get_state(node1) != NODE_ALLOCATED) {
900 status = -EBADR;
901 goto out_unlock;
902 }
903
904 if (node2_type != NODE_GPP &&
905 node_get_state(node2) != NODE_ALLOCATED) {
906 status = -EBADR;
907 goto out_unlock;
908 }
909
910 /*
911 * Check that stream indices for task and dais socket nodes
912 * are not already be used. (Device nodes checked later)
913 */
914 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
915 output = &(node1->create_args.asa.
916 task_arg_obj.strm_out_def[stream1]);
917 if (output->sz_device) {
918 status = -EISCONN;
919 goto out_unlock;
920 }
921
922 }
923 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
924 input = &(node2->create_args.asa.
925 task_arg_obj.strm_in_def[stream2]);
926 if (input->sz_device) {
927 status = -EISCONN;
928 goto out_unlock;
929 }
930
931 }
932 /* Connecting two task nodes? */
933 if ((node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) &&
934 (node2_type == NODE_TASK ||
935 node2_type == NODE_DAISSOCKET)) {
936 /* Find available pipe */
937 pipe_id = find_first_zero_bit(hnode_mgr->pipe_map, MAXPIPES);
938 if (pipe_id == MAXPIPES) {
939 status = -ECONNREFUSED;
940 goto out_unlock;
941 }
942 set_bit(pipe_id, hnode_mgr->pipe_map);
943 node1->outputs[stream1].type = NODECONNECT;
944 node2->inputs[stream2].type = NODECONNECT;
945 node1->outputs[stream1].dev_id = pipe_id;
946 node2->inputs[stream2].dev_id = pipe_id;
947 output->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
948 input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
949 if (!output->sz_device || !input->sz_device) {
950 /* Undo the connection */
951 kfree(output->sz_device);
952 kfree(input->sz_device);
953 clear_bit(pipe_id, hnode_mgr->pipe_map);
954 status = -ENOMEM;
955 goto out_unlock;
956 }
957 /* Copy "/dbpipe<pipId>" name to device names */
958 sprintf(output->sz_device, "%s%d", PIPEPREFIX, pipe_id);
959 strcpy(input->sz_device, output->sz_device);
960 }
961 /* Connecting task node to host? */
962 if (node1_type == NODE_GPP || node2_type == NODE_GPP) {
963 pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
964 if (!pstr_dev_name) {
965 status = -ENOMEM;
966 goto out_unlock;
967 }
968
969 chnl_mode = (node1_type == NODE_GPP) ?
970 CHNL_MODETODSP : CHNL_MODEFROMDSP;
971
972 /*
973 * Reserve a channel id. We need to put the name "/host<id>"
974 * in the node's create_args, but the host
975 * side channel will not be opened until DSPStream_Open is
976 * called for this node.
977 */
978 strm_mode = pattrs ? pattrs->strm_mode : STRMMODE_PROCCOPY;
979 switch (strm_mode) {
980 case STRMMODE_RDMA:
981 chnl_id = find_first_zero_bit(hnode_mgr->dma_chnl_map,
982 CHNL_MAXCHANNELS);
983 if (chnl_id < CHNL_MAXCHANNELS) {
984 set_bit(chnl_id, hnode_mgr->dma_chnl_map);
985 /* dma chans are 2nd transport chnl set
986 * ids(e.g. 16-31) */
987 chnl_id = chnl_id + hnode_mgr->num_chnls;
988 }
989 break;
990 case STRMMODE_ZEROCOPY:
991 chnl_id = find_first_zero_bit(hnode_mgr->zc_chnl_map,
992 CHNL_MAXCHANNELS);
993 if (chnl_id < CHNL_MAXCHANNELS) {
994 set_bit(chnl_id, hnode_mgr->zc_chnl_map);
995 /* zero-copy chans are 3nd transport set
996 * (e.g. 32-47) */
997 chnl_id = chnl_id +
998 (2 * hnode_mgr->num_chnls);
999 }
1000 break;
1001 case STRMMODE_PROCCOPY:
1002 chnl_id = find_first_zero_bit(hnode_mgr->chnl_map,
1003 CHNL_MAXCHANNELS);
1004 if (chnl_id < CHNL_MAXCHANNELS)
1005 set_bit(chnl_id, hnode_mgr->chnl_map);
1006 break;
1007 default:
1008 status = -EINVAL;
1009 goto out_unlock;
1010 }
1011 if (chnl_id == CHNL_MAXCHANNELS) {
1012 status = -ECONNREFUSED;
1013 goto out_unlock;
1014 }
1015
1016 if (node1 == (struct node_object *)DSP_HGPPNODE) {
1017 node2->inputs[stream2].type = HOSTCONNECT;
1018 node2->inputs[stream2].dev_id = chnl_id;
1019 input->sz_device = pstr_dev_name;
1020 } else {
1021 node1->outputs[stream1].type = HOSTCONNECT;
1022 node1->outputs[stream1].dev_id = chnl_id;
1023 output->sz_device = pstr_dev_name;
1024 }
1025 sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
1026 }
1027 /* Connecting task node to device node? */
1028 if ((node1_type == NODE_DEVICE) || (node2_type == NODE_DEVICE)) {
1029 if (node2_type == NODE_DEVICE) {
1030 /* node1 == > device */
1031 dev_node_obj = node2;
1032 hnode = node1;
1033 pstream = &(node1->outputs[stream1]);
1034 pstrm_def = output;
1035 } else {
1036 /* device == > node2 */
1037 dev_node_obj = node1;
1038 hnode = node2;
1039 pstream = &(node2->inputs[stream2]);
1040 pstrm_def = input;
1041 }
1042 /* Set up create args */
1043 pstream->type = DEVICECONNECT;
1044 dw_length = strlen(dev_node_obj->str_dev_name);
1045 if (conn_param)
1046 pstrm_def->sz_device = kzalloc(dw_length + 1 +
1047 conn_param->cb_data,
1048 GFP_KERNEL);
1049 else
1050 pstrm_def->sz_device = kzalloc(dw_length + 1,
1051 GFP_KERNEL);
1052 if (!pstrm_def->sz_device) {
1053 status = -ENOMEM;
1054 goto out_unlock;
1055 }
1056 /* Copy device name */
1057 strncpy(pstrm_def->sz_device,
1058 dev_node_obj->str_dev_name, dw_length);
1059 if (conn_param)
1060 strncat(pstrm_def->sz_device,
1061 (char *)conn_param->node_data,
1062 (u32) conn_param->cb_data);
1063 dev_node_obj->device_owner = hnode;
1064 }
1065 /* Fill in create args */
1066 if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
1067 node1->create_args.asa.task_arg_obj.num_outputs++;
1068 fill_stream_def(node1, output, pattrs);
1069 }
1070 if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
1071 node2->create_args.asa.task_arg_obj.num_inputs++;
1072 fill_stream_def(node2, input, pattrs);
1073 }
1074 /* Update node1 and node2 stream_connect */
1075 if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
1076 node1->num_outputs++;
1077 if (stream1 > node1->max_output_index)
1078 node1->max_output_index = stream1;
1079
1080 }
1081 if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
1082 node2->num_inputs++;
1083 if (stream2 > node2->max_input_index)
1084 node2->max_input_index = stream2;
1085
1086 }
1087 fill_stream_connect(node1, node2, stream1, stream2);
1088 /* end of sync_enter_cs */
1089 /* Exit critical section */
1090 out_unlock:
1091 if (status && pstr_dev_name)
1092 kfree(pstr_dev_name);
1093 mutex_unlock(&hnode_mgr->node_mgr_lock);
1094 dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
1095 "pattrs: %p status: 0x%x\n", __func__, node1,
1096 stream1, node2, stream2, pattrs, status);
1097 return status;
1098 }
1099
1100 /*
1101 * ======== node_create ========
1102 * Purpose:
1103 * Create a node on the DSP by remotely calling the node's create function.
1104 */
1105 int node_create(struct node_object *hnode)
1106 {
1107 struct node_object *pnode = (struct node_object *)hnode;
1108 struct node_mgr *hnode_mgr;
1109 struct bridge_drv_interface *intf_fxns;
1110 u32 ul_create_fxn;
1111 enum node_type node_type;
1112 int status = 0;
1113 int status1 = 0;
1114 struct dsp_cbdata cb_data;
1115 u32 proc_id = 255;
1116 struct dsp_processorstate proc_state;
1117 struct proc_object *hprocessor;
1118 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1119 struct dspbridge_platform_data *pdata =
1120 omap_dspbridge_dev->dev.platform_data;
1121 #endif
1122
1123 if (!pnode) {
1124 status = -EFAULT;
1125 goto func_end;
1126 }
1127 hprocessor = hnode->processor;
1128 status = proc_get_state(hprocessor, &proc_state,
1129 sizeof(struct dsp_processorstate));
1130 if (status)
1131 goto func_end;
1132 /* If processor is in error state then don't attempt to create
1133 new node */
1134 if (proc_state.proc_state == PROC_ERROR) {
1135 status = -EPERM;
1136 goto func_end;
1137 }
1138 /* create struct dsp_cbdata struct for PWR calls */
1139 cb_data.cb_data = PWR_TIMEOUT;
1140 node_type = node_get_type(hnode);
1141 hnode_mgr = hnode->node_mgr;
1142 intf_fxns = hnode_mgr->intf_fxns;
1143 /* Get access to node dispatcher */
1144 mutex_lock(&hnode_mgr->node_mgr_lock);
1145
1146 /* Check node state */
1147 if (node_get_state(hnode) != NODE_ALLOCATED)
1148 status = -EBADR;
1149
1150 if (!status)
1151 status = proc_get_processor_id(pnode->processor, &proc_id);
1152
1153 if (status)
1154 goto func_cont2;
1155
1156 if (proc_id != DSP_UNIT)
1157 goto func_cont2;
1158
1159 /* Make sure streams are properly connected */
1160 if ((hnode->num_inputs && hnode->max_input_index >
1161 hnode->num_inputs - 1) ||
1162 (hnode->num_outputs && hnode->max_output_index >
1163 hnode->num_outputs - 1))
1164 status = -ENOTCONN;
1165
1166 if (!status) {
1167 /* If node's create function is not loaded, load it */
1168 /* Boost the OPP level to max level that DSP can be requested */
1169 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1170 if (pdata->cpu_set_freq)
1171 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
1172 #endif
1173 status = hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
1174 NLDR_CREATE);
1175 /* Get address of node's create function */
1176 if (!status) {
1177 hnode->loaded = true;
1178 if (node_type != NODE_DEVICE) {
1179 status = get_fxn_address(hnode, &ul_create_fxn,
1180 CREATEPHASE);
1181 }
1182 } else {
1183 pr_err("%s: failed to load create code: 0x%x\n",
1184 __func__, status);
1185 }
1186 /* Request the lowest OPP level */
1187 #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
1188 if (pdata->cpu_set_freq)
1189 (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
1190 #endif
1191 /* Get address of iAlg functions, if socket node */
1192 if (!status) {
1193 if (node_type == NODE_DAISSOCKET) {
1194 status = hnode_mgr->nldr_fxns.get_fxn_addr
1195 (hnode->nldr_node_obj,
1196 hnode->dcd_props.obj_data.node_obj.
1197 str_i_alg_name,
1198 &hnode->create_args.asa.
1199 task_arg_obj.dais_arg);
1200 }
1201 }
1202 }
1203 if (!status) {
1204 if (node_type != NODE_DEVICE) {
1205 status = disp_node_create(hnode_mgr->disp_obj, hnode,
1206 hnode_mgr->fxn_addrs
1207 [RMSCREATENODE],
1208 ul_create_fxn,
1209 &(hnode->create_args),
1210 &(hnode->node_env));
1211 if (status >= 0) {
1212 /* Set the message queue id to the node env
1213 * pointer */
1214 intf_fxns = hnode_mgr->intf_fxns;
1215 (*intf_fxns->msg_set_queue_id) (hnode->
1216 msg_queue_obj,
1217 hnode->node_env);
1218 }
1219 }
1220 }
1221 /* Phase II/Overlays: Create, execute, delete phases possibly in
1222 * different files/sections. */
1223 if (hnode->loaded && hnode->phase_split) {
1224 /* If create code was dynamically loaded, we can now unload
1225 * it. */
1226 status1 = hnode_mgr->nldr_fxns.unload(hnode->nldr_node_obj,
1227 NLDR_CREATE);
1228 hnode->loaded = false;
1229 }
1230 if (status1)
1231 pr_err("%s: Failed to unload create code: 0x%x\n",
1232 __func__, status1);
1233 func_cont2:
1234 /* Update node state and node manager state */
1235 if (status >= 0) {
1236 NODE_SET_STATE(hnode, NODE_CREATED);
1237 hnode_mgr->num_created++;
1238 goto func_cont;
1239 }
1240 if (status != -EBADR) {
1241 /* Put back in NODE_ALLOCATED state if error occurred */
1242 NODE_SET_STATE(hnode, NODE_ALLOCATED);
1243 }
1244 func_cont:
1245 /* Free access to node dispatcher */
1246 mutex_unlock(&hnode_mgr->node_mgr_lock);
1247 func_end:
1248 if (status >= 0) {
1249 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
1250 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1251 }
1252
1253 dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
1254 hnode, status);
1255 return status;
1256 }
1257
1258 /*
1259 * ======== node_create_mgr ========
1260 * Purpose:
1261 * Create a NODE Manager object.
1262 */
1263 int node_create_mgr(struct node_mgr **node_man,
1264 struct dev_object *hdev_obj)
1265 {
1266 u32 i;
1267 struct node_mgr *node_mgr_obj = NULL;
1268 struct disp_attr disp_attr_obj;
1269 char *sz_zl_file = "";
1270 struct nldr_attrs nldr_attrs_obj;
1271 int status = 0;
1272 u8 dev_type;
1273
1274 *node_man = NULL;
1275 /* Allocate Node manager object */
1276 node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
1277 if (!node_mgr_obj)
1278 return -ENOMEM;
1279
1280 node_mgr_obj->dev_obj = hdev_obj;
1281
1282 node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
1283 GFP_KERNEL);
1284 if (!node_mgr_obj->ntfy_obj) {
1285 status = -ENOMEM;
1286 goto out_err;
1287 }
1288 ntfy_init(node_mgr_obj->ntfy_obj);
1289
1290 INIT_LIST_HEAD(&node_mgr_obj->node_list);
1291
1292 dev_get_dev_type(hdev_obj, &dev_type);
1293
1294 status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr);
1295 if (status)
1296 goto out_err;
1297
1298 status = get_proc_props(node_mgr_obj, hdev_obj);
1299 if (status)
1300 goto out_err;
1301
1302 /* Create NODE Dispatcher */
1303 disp_attr_obj.chnl_offset = node_mgr_obj->chnl_offset;
1304 disp_attr_obj.chnl_buf_size = node_mgr_obj->chnl_buf_size;
1305 disp_attr_obj.proc_family = node_mgr_obj->proc_family;
1306 disp_attr_obj.proc_type = node_mgr_obj->proc_type;
1307
1308 status = disp_create(&node_mgr_obj->disp_obj, hdev_obj, &disp_attr_obj);
1309 if (status)
1310 goto out_err;
1311
1312 /* Create a STRM Manager */
1313 status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
1314 if (status)
1315 goto out_err;
1316
1317 dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
1318 /* Get msg_ctrl queue manager */
1319 dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
1320 mutex_init(&node_mgr_obj->node_mgr_lock);
1321
1322 /* Block out reserved channels */
1323 for (i = 0; i < node_mgr_obj->chnl_offset; i++)
1324 set_bit(i, node_mgr_obj->chnl_map);
1325
1326 /* Block out channels reserved for RMS */
1327 set_bit(node_mgr_obj->chnl_offset, node_mgr_obj->chnl_map);
1328 set_bit(node_mgr_obj->chnl_offset + 1, node_mgr_obj->chnl_map);
1329
1330 /* NO RM Server on the IVA */
1331 if (dev_type != IVA_UNIT) {
1332 /* Get addresses of any RMS functions loaded */
1333 status = get_rms_fxns(node_mgr_obj);
1334 if (status)
1335 goto out_err;
1336 }
1337
1338 /* Get loader functions and create loader */
1339 node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
1340
1341 nldr_attrs_obj.ovly = ovly;
1342 nldr_attrs_obj.write = mem_write;
1343 nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
1344 nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
1345 status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
1346 hdev_obj,
1347 &nldr_attrs_obj);
1348 if (status)
1349 goto out_err;
1350
1351 *node_man = node_mgr_obj;
1352
1353 return status;
1354 out_err:
1355 delete_node_mgr(node_mgr_obj);
1356 return status;
1357 }
1358
1359 /*
1360 * ======== node_delete ========
1361 * Purpose:
1362 * Delete a node on the DSP by remotely calling the node's delete function.
1363 * Loads the node's delete function if necessary. Free GPP side resources
1364 * after node's delete function returns.
1365 */
1366 int node_delete(struct node_res_object *noderes,
1367 struct process_context *pr_ctxt)
1368 {
1369 struct node_object *pnode = noderes->node;
1370 struct node_mgr *hnode_mgr;
1371 struct proc_object *hprocessor;
1372 struct disp_object *disp_obj;
1373 u32 ul_delete_fxn;
1374 enum node_type node_type;
1375 enum node_state state;
1376 int status = 0;
1377 int status1 = 0;
1378 struct dsp_cbdata cb_data;
1379 u32 proc_id;
1380 struct bridge_drv_interface *intf_fxns;
1381
1382 void *node_res = noderes;
1383
1384 struct dsp_processorstate proc_state;
1385
1386 if (!pnode) {
1387 status = -EFAULT;
1388 goto func_end;
1389 }
1390 /* create struct dsp_cbdata struct for PWR call */
1391 cb_data.cb_data = PWR_TIMEOUT;
1392 hnode_mgr = pnode->node_mgr;
1393 hprocessor = pnode->processor;
1394 disp_obj = hnode_mgr->disp_obj;
1395 node_type = node_get_type(pnode);
1396 intf_fxns = hnode_mgr->intf_fxns;
1397 /* Enter critical section */
1398 mutex_lock(&hnode_mgr->node_mgr_lock);
1399
1400 state = node_get_state(pnode);
1401 /* Execute delete phase code for non-device node in all cases
1402 * except when the node was only allocated. Delete phase must be
1403 * executed even if create phase was executed, but failed.
1404 * If the node environment pointer is non-NULL, the delete phase
1405 * code must be executed. */
1406 if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
1407 node_type != NODE_DEVICE) {
1408 status = proc_get_processor_id(pnode->processor, &proc_id);
1409 if (status)
1410 goto func_cont1;
1411
1412 if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
1413 /* If node has terminated, execute phase code will
1414 * have already been unloaded in node_on_exit(). If the
1415 * node is PAUSED, the execute phase is loaded, and it
1416 * is now ok to unload it. If the node is running, we
1417 * will unload the execute phase only after deleting
1418 * the node. */
1419 if (state == NODE_PAUSED && pnode->loaded &&
1420 pnode->phase_split) {
1421 /* Ok to unload execute code as long as node
1422 * is not * running */
1423 status1 =
1424 hnode_mgr->nldr_fxns.
1425 unload(pnode->nldr_node_obj,
1426 NLDR_EXECUTE);
1427 pnode->loaded = false;
1428 NODE_SET_STATE(pnode, NODE_DONE);
1429 }
1430 /* Load delete phase code if not loaded or if haven't
1431 * * unloaded EXECUTE phase */
1432 if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
1433 pnode->phase_split) {
1434 status =
1435 hnode_mgr->nldr_fxns.
1436 load(pnode->nldr_node_obj, NLDR_DELETE);
1437 if (!status)
1438 pnode->loaded = true;
1439 else
1440 pr_err("%s: fail - load delete code:"
1441 " 0x%x\n", __func__, status);
1442 }
1443 }
1444 func_cont1:
1445 if (!status) {
1446 /* Unblock a thread trying to terminate the node */
1447 (void)sync_set_event(pnode->sync_done);
1448 if (proc_id == DSP_UNIT) {
1449 /* ul_delete_fxn = address of node's delete
1450 * function */
1451 status = get_fxn_address(pnode, &ul_delete_fxn,
1452 DELETEPHASE);
1453 } else if (proc_id == IVA_UNIT)
1454 ul_delete_fxn = (u32) pnode->node_env;
1455 if (!status) {
1456 status = proc_get_state(hprocessor,
1457 &proc_state,
1458 sizeof(struct
1459 dsp_processorstate));
1460 if (proc_state.proc_state != PROC_ERROR) {
1461 status =
1462 disp_node_delete(disp_obj, pnode,
1463 hnode_mgr->
1464 fxn_addrs
1465 [RMSDELETENODE],
1466 ul_delete_fxn,
1467 pnode->node_env);
1468 } else
1469 NODE_SET_STATE(pnode, NODE_DONE);
1470
1471 /* Unload execute, if not unloaded, and delete
1472 * function */
1473 if (state == NODE_RUNNING &&
1474 pnode->phase_split) {
1475 status1 =
1476 hnode_mgr->nldr_fxns.
1477 unload(pnode->nldr_node_obj,
1478 NLDR_EXECUTE);
1479 }
1480 if (status1)
1481 pr_err("%s: fail - unload execute code:"
1482 " 0x%x\n", __func__, status1);
1483
1484 status1 =
1485 hnode_mgr->nldr_fxns.unload(pnode->
1486 nldr_node_obj,
1487 NLDR_DELETE);
1488 pnode->loaded = false;
1489 if (status1)
1490 pr_err("%s: fail - unload delete code: "
1491 "0x%x\n", __func__, status1);
1492 }
1493 }
1494 }
1495 /* Free host side resources even if a failure occurred */
1496 /* Remove node from hnode_mgr->node_list */
1497 list_del(&pnode->list_elem);
1498 hnode_mgr->num_nodes--;
1499 /* Decrement count of nodes created on DSP */
1500 if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
1501 (pnode->node_env != (u32) NULL)))
1502 hnode_mgr->num_created--;
1503 /* Free host-side resources allocated by node_create()
1504 * delete_node() fails if SM buffers not freed by client! */
1505 drv_proc_node_update_status(node_res, false);
1506 delete_node(pnode, pr_ctxt);
1507
1508 /*
1509 * Release all Node resources and its context
1510 */
1511 idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
1512 kfree(node_res);
1513
1514 /* Exit critical section */
1515 mutex_unlock(&hnode_mgr->node_mgr_lock);
1516 proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
1517 func_end:
1518 dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
1519 return status;
1520 }
1521
1522 /*
1523 * ======== node_delete_mgr ========
1524 * Purpose:
1525 * Delete the NODE Manager.
1526 */
1527 int node_delete_mgr(struct node_mgr *hnode_mgr)
1528 {
1529 if (!hnode_mgr)
1530 return -EFAULT;
1531
1532 delete_node_mgr(hnode_mgr);
1533
1534 return 0;
1535 }
1536
1537 /*
1538 * ======== node_enum_nodes ========
1539 * Purpose:
1540 * Enumerate currently allocated nodes.
1541 */
1542 int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
1543 u32 node_tab_size, u32 *pu_num_nodes,
1544 u32 *pu_allocated)
1545 {
1546 struct node_object *hnode;
1547 u32 i = 0;
1548 int status = 0;
1549
1550 if (!hnode_mgr) {
1551 status = -EFAULT;
1552 goto func_end;
1553 }
1554 /* Enter critical section */
1555 mutex_lock(&hnode_mgr->node_mgr_lock);
1556
1557 if (hnode_mgr->num_nodes > node_tab_size) {
1558 *pu_allocated = hnode_mgr->num_nodes;
1559 *pu_num_nodes = 0;
1560 status = -EINVAL;
1561 } else {
1562 list_for_each_entry(hnode, &hnode_mgr->node_list, list_elem)
1563 node_tab[i++] = hnode;
1564 *pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
1565 }
1566 /* end of sync_enter_cs */
1567 /* Exit critical section */
1568 mutex_unlock(&hnode_mgr->node_mgr_lock);
1569 func_end:
1570 return status;
1571 }
1572
1573 /*
1574 * ======== node_free_msg_buf ========
1575 * Purpose:
1576 * Frees the message buffer.
1577 */
1578 int node_free_msg_buf(struct node_object *hnode, u8 *pbuffer,
1579 struct dsp_bufferattr *pattr)
1580 {
1581 struct node_object *pnode = (struct node_object *)hnode;
1582 int status = 0;
1583 u32 proc_id;
1584
1585 if (!hnode) {
1586 status = -EFAULT;
1587 goto func_end;
1588 }
1589 status = proc_get_processor_id(pnode->processor, &proc_id);
1590 if (proc_id == DSP_UNIT) {
1591 if (!status) {
1592 if (pattr == NULL) {
1593 /* set defaults */
1594 pattr = &node_dfltbufattrs;
1595 }
1596 /* Node supports single SM segment only */
1597 if (pattr->segment_id != 1)
1598 status = -EBADR;
1599
1600 /* pbuffer is clients Va. */
1601 status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
1602 }
1603 } else {
1604 }
1605 func_end:
1606 return status;
1607 }
1608
1609 /*
1610 * ======== node_get_attr ========
1611 * Purpose:
1612 * Copy the current attributes of the specified node into a dsp_nodeattr
1613 * structure.
1614 */
1615 int node_get_attr(struct node_object *hnode,
1616 struct dsp_nodeattr *pattr, u32 attr_size)
1617 {
1618 struct node_mgr *hnode_mgr;
1619
1620 if (!hnode)
1621 return -EFAULT;
1622
1623 hnode_mgr = hnode->node_mgr;
1624 /* Enter hnode_mgr critical section since we're accessing
1625 * data that could be changed by node_change_priority() and
1626 * node_connect(). */
1627 mutex_lock(&hnode_mgr->node_mgr_lock);
1628 pattr->cb_struct = sizeof(struct dsp_nodeattr);
1629 /* dsp_nodeattrin */
1630 pattr->in_node_attr_in.cb_struct =
1631 sizeof(struct dsp_nodeattrin);
1632 pattr->in_node_attr_in.prio = hnode->prio;
1633 pattr->in_node_attr_in.timeout = hnode->timeout;
1634 pattr->in_node_attr_in.heap_size =
1635 hnode->create_args.asa.task_arg_obj.heap_size;
1636 pattr->in_node_attr_in.pgpp_virt_addr = (void *)
1637 hnode->create_args.asa.task_arg_obj.gpp_heap_addr;
1638 pattr->node_attr_inputs = hnode->num_gpp_inputs;
1639 pattr->node_attr_outputs = hnode->num_gpp_outputs;
1640 /* dsp_nodeinfo */
1641 get_node_info(hnode, &(pattr->node_info));
1642 /* end of sync_enter_cs */
1643 /* Exit critical section */
1644 mutex_unlock(&hnode_mgr->node_mgr_lock);
1645
1646 return 0;
1647 }
1648
1649 /*
1650 * ======== node_get_channel_id ========
1651 * Purpose:
1652 * Get the channel index reserved for a stream connection between the
1653 * host and a node.
1654 */
1655 int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
1656 u32 *chan_id)
1657 {
1658 enum node_type node_type;
1659 int status = -EINVAL;
1660
1661 if (!hnode) {
1662 status = -EFAULT;
1663 return status;
1664 }
1665 node_type = node_get_type(hnode);
1666 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
1667 status = -EPERM;
1668 return status;
1669 }
1670 if (dir == DSP_TONODE) {
1671 if (index < MAX_INPUTS(hnode)) {
1672 if (hnode->inputs[index].type == HOSTCONNECT) {
1673 *chan_id = hnode->inputs[index].dev_id;
1674 status = 0;
1675 }
1676 }
1677 } else {
1678 if (index < MAX_OUTPUTS(hnode)) {
1679 if (hnode->outputs[index].type == HOSTCONNECT) {
1680 *chan_id = hnode->outputs[index].dev_id;
1681 status = 0;
1682 }
1683 }
1684 }
1685 return status;
1686 }
1687
1688 /*
1689 * ======== node_get_message ========
1690 * Purpose:
1691 * Retrieve a message from a node on the DSP.
1692 */
1693 int node_get_message(struct node_object *hnode,
1694 struct dsp_msg *message, u32 utimeout)
1695 {
1696 struct node_mgr *hnode_mgr;
1697 enum node_type node_type;
1698 struct bridge_drv_interface *intf_fxns;
1699 int status = 0;
1700 void *tmp_buf;
1701 struct dsp_processorstate proc_state;
1702 struct proc_object *hprocessor;
1703
1704 if (!hnode) {
1705 status = -EFAULT;
1706 goto func_end;
1707 }
1708 hprocessor = hnode->processor;
1709 status = proc_get_state(hprocessor, &proc_state,
1710 sizeof(struct dsp_processorstate));
1711 if (status)
1712 goto func_end;
1713 /* If processor is in error state then don't attempt to get the
1714 message */
1715 if (proc_state.proc_state == PROC_ERROR) {
1716 status = -EPERM;
1717 goto func_end;
1718 }
1719 hnode_mgr = hnode->node_mgr;
1720 node_type = node_get_type(hnode);
1721 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1722 node_type != NODE_DAISSOCKET) {
1723 status = -EPERM;
1724 goto func_end;
1725 }
1726 /* This function will block unless a message is available. Since
1727 * DSPNode_RegisterNotify() allows notification when a message
1728 * is available, the system can be designed so that
1729 * DSPNode_GetMessage() is only called when a message is
1730 * available. */
1731 intf_fxns = hnode_mgr->intf_fxns;
1732 status =
1733 (*intf_fxns->msg_get) (hnode->msg_queue_obj, message, utimeout);
1734 /* Check if message contains SM descriptor */
1735 if (status || !(message->cmd & DSP_RMSBUFDESC))
1736 goto func_end;
1737
1738 /* Translate DSP byte addr to GPP Va. */
1739 tmp_buf = cmm_xlator_translate(hnode->xlator,
1740 (void *)(message->arg1 *
1741 hnode->node_mgr->
1742 dsp_word_size), CMM_DSPPA2PA);
1743 if (tmp_buf != NULL) {
1744 /* now convert this GPP Pa to Va */
1745 tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
1746 CMM_PA2VA);
1747 if (tmp_buf != NULL) {
1748 /* Adjust SM size in msg */
1749 message->arg1 = (u32) tmp_buf;
1750 message->arg2 *= hnode->node_mgr->dsp_word_size;
1751 } else {
1752 status = -ESRCH;
1753 }
1754 } else {
1755 status = -ESRCH;
1756 }
1757 func_end:
1758 dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
1759 hnode, message, utimeout);
1760 return status;
1761 }
1762
1763 /*
1764 * ======== node_get_nldr_obj ========
1765 */
1766 int node_get_nldr_obj(struct node_mgr *hnode_mgr,
1767 struct nldr_object **nldr_ovlyobj)
1768 {
1769 int status = 0;
1770 struct node_mgr *node_mgr_obj = hnode_mgr;
1771
1772 if (!hnode_mgr)
1773 status = -EFAULT;
1774 else
1775 *nldr_ovlyobj = node_mgr_obj->nldr_obj;
1776
1777 return status;
1778 }
1779
1780 /*
1781 * ======== node_get_strm_mgr ========
1782 * Purpose:
1783 * Returns the Stream manager.
1784 */
1785 int node_get_strm_mgr(struct node_object *hnode,
1786 struct strm_mgr **strm_man)
1787 {
1788 int status = 0;
1789
1790 if (!hnode)
1791 status = -EFAULT;
1792 else
1793 *strm_man = hnode->node_mgr->strm_mgr_obj;
1794
1795 return status;
1796 }
1797
1798 /*
1799 * ======== node_get_load_type ========
1800 */
1801 enum nldr_loadtype node_get_load_type(struct node_object *hnode)
1802 {
1803 if (!hnode) {
1804 dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
1805 return -1;
1806 } else {
1807 return hnode->dcd_props.obj_data.node_obj.load_type;
1808 }
1809 }
1810
1811 /*
1812 * ======== node_get_timeout ========
1813 * Purpose:
1814 * Returns the timeout value for this node.
1815 */
1816 u32 node_get_timeout(struct node_object *hnode)
1817 {
1818 if (!hnode) {
1819 dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
1820 return 0;
1821 } else {
1822 return hnode->timeout;
1823 }
1824 }
1825
1826 /*
1827 * ======== node_get_type ========
1828 * Purpose:
1829 * Returns the node type.
1830 */
1831 enum node_type node_get_type(struct node_object *hnode)
1832 {
1833 enum node_type node_type;
1834
1835 if (hnode == (struct node_object *)DSP_HGPPNODE)
1836 node_type = NODE_GPP;
1837 else {
1838 if (!hnode)
1839 node_type = -1;
1840 else
1841 node_type = hnode->ntype;
1842 }
1843 return node_type;
1844 }
1845
1846 /*
1847 * ======== node_on_exit ========
1848 * Purpose:
1849 * Gets called when RMS_EXIT is received for a node.
1850 */
1851 void node_on_exit(struct node_object *hnode, s32 node_status)
1852 {
1853 if (!hnode)
1854 return;
1855
1856 /* Set node state to done */
1857 NODE_SET_STATE(hnode, NODE_DONE);
1858 hnode->exit_status = node_status;
1859 if (hnode->loaded && hnode->phase_split) {
1860 (void)hnode->node_mgr->nldr_fxns.unload(hnode->
1861 nldr_node_obj,
1862 NLDR_EXECUTE);
1863 hnode->loaded = false;
1864 }
1865 /* Unblock call to node_terminate */
1866 (void)sync_set_event(hnode->sync_done);
1867 /* Notify clients */
1868 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
1869 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1870 }
1871
1872 /*
1873 * ======== node_pause ========
1874 * Purpose:
1875 * Suspend execution of a node currently running on the DSP.
1876 */
1877 int node_pause(struct node_object *hnode)
1878 {
1879 struct node_object *pnode = (struct node_object *)hnode;
1880 enum node_type node_type;
1881 enum node_state state;
1882 struct node_mgr *hnode_mgr;
1883 int status = 0;
1884 u32 proc_id;
1885 struct dsp_processorstate proc_state;
1886 struct proc_object *hprocessor;
1887
1888 if (!hnode) {
1889 status = -EFAULT;
1890 } else {
1891 node_type = node_get_type(hnode);
1892 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
1893 status = -EPERM;
1894 }
1895 if (status)
1896 goto func_end;
1897
1898 status = proc_get_processor_id(pnode->processor, &proc_id);
1899
1900 if (proc_id == IVA_UNIT)
1901 status = -ENOSYS;
1902
1903 if (!status) {
1904 hnode_mgr = hnode->node_mgr;
1905
1906 /* Enter critical section */
1907 mutex_lock(&hnode_mgr->node_mgr_lock);
1908 state = node_get_state(hnode);
1909 /* Check node state */
1910 if (state != NODE_RUNNING)
1911 status = -EBADR;
1912
1913 if (status)
1914 goto func_cont;
1915 hprocessor = hnode->processor;
1916 status = proc_get_state(hprocessor, &proc_state,
1917 sizeof(struct dsp_processorstate));
1918 if (status)
1919 goto func_cont;
1920 /* If processor is in error state then don't attempt
1921 to send the message */
1922 if (proc_state.proc_state == PROC_ERROR) {
1923 status = -EPERM;
1924 goto func_cont;
1925 }
1926
1927 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
1928 hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY],
1929 hnode->node_env, NODE_SUSPENDEDPRI);
1930
1931 /* Update state */
1932 if (status >= 0)
1933 NODE_SET_STATE(hnode, NODE_PAUSED);
1934
1935 func_cont:
1936 /* End of sync_enter_cs */
1937 /* Leave critical section */
1938 mutex_unlock(&hnode_mgr->node_mgr_lock);
1939 if (status >= 0) {
1940 proc_notify_clients(hnode->processor,
1941 DSP_NODESTATECHANGE);
1942 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
1943 }
1944 }
1945 func_end:
1946 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
1947 return status;
1948 }
1949
1950 /*
1951 * ======== node_put_message ========
1952 * Purpose:
1953 * Send a message to a message node, task node, or XDAIS socket node. This
1954 * function will block until the message stream can accommodate the
1955 * message, or a timeout occurs.
1956 */
1957 int node_put_message(struct node_object *hnode,
1958 const struct dsp_msg *pmsg, u32 utimeout)
1959 {
1960 struct node_mgr *hnode_mgr = NULL;
1961 enum node_type node_type;
1962 struct bridge_drv_interface *intf_fxns;
1963 enum node_state state;
1964 int status = 0;
1965 void *tmp_buf;
1966 struct dsp_msg new_msg;
1967 struct dsp_processorstate proc_state;
1968 struct proc_object *hprocessor;
1969
1970 if (!hnode) {
1971 status = -EFAULT;
1972 goto func_end;
1973 }
1974 hprocessor = hnode->processor;
1975 status = proc_get_state(hprocessor, &proc_state,
1976 sizeof(struct dsp_processorstate));
1977 if (status)
1978 goto func_end;
1979 /* If processor is in bad state then don't attempt sending the
1980 message */
1981 if (proc_state.proc_state == PROC_ERROR) {
1982 status = -EPERM;
1983 goto func_end;
1984 }
1985 hnode_mgr = hnode->node_mgr;
1986 node_type = node_get_type(hnode);
1987 if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
1988 node_type != NODE_DAISSOCKET)
1989 status = -EPERM;
1990
1991 if (!status) {
1992 /* Check node state. Can't send messages to a node after
1993 * we've sent the RMS_EXIT command. There is still the
1994 * possibility that node_terminate can be called after we've
1995 * checked the state. Could add another SYNC object to
1996 * prevent this (can't use node_mgr_lock, since we don't
1997 * want to block other NODE functions). However, the node may
1998 * still exit on its own, before this message is sent. */
1999 mutex_lock(&hnode_mgr->node_mgr_lock);
2000 state = node_get_state(hnode);
2001 if (state == NODE_TERMINATING || state == NODE_DONE)
2002 status = -EBADR;
2003
2004 /* end of sync_enter_cs */
2005 mutex_unlock(&hnode_mgr->node_mgr_lock);
2006 }
2007 if (status)
2008 goto func_end;
2009
2010 /* assign pmsg values to new msg */
2011 new_msg = *pmsg;
2012 /* Now, check if message contains a SM buffer descriptor */
2013 if (pmsg->cmd & DSP_RMSBUFDESC) {
2014 /* Translate GPP Va to DSP physical buf Ptr. */
2015 tmp_buf = cmm_xlator_translate(hnode->xlator,
2016 (void *)new_msg.arg1,
2017 CMM_VA2DSPPA);
2018 if (tmp_buf != NULL) {
2019 /* got translation, convert to MAUs in msg */
2020 if (hnode->node_mgr->dsp_word_size != 0) {
2021 new_msg.arg1 =
2022 (u32) tmp_buf /
2023 hnode->node_mgr->dsp_word_size;
2024 /* MAUs */
2025 new_msg.arg2 /= hnode->node_mgr->
2026 dsp_word_size;
2027 } else {
2028 pr_err("%s: dsp_word_size is zero!\n",
2029 __func__);
2030 status = -EPERM; /* bad DSPWordSize */
2031 }
2032 } else { /* failed to translate buffer address */
2033 status = -ESRCH;
2034 }
2035 }
2036 if (!status) {
2037 intf_fxns = hnode_mgr->intf_fxns;
2038 status = (*intf_fxns->msg_put) (hnode->msg_queue_obj,
2039 &new_msg, utimeout);
2040 }
2041 func_end:
2042 dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
2043 "status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
2044 return status;
2045 }
2046
2047 /*
2048 * ======== node_register_notify ========
2049 * Purpose:
2050 * Register to be notified on specific events for this node.
2051 */
2052 int node_register_notify(struct node_object *hnode, u32 event_mask,
2053 u32 notify_type,
2054 struct dsp_notification *hnotification)
2055 {
2056 struct bridge_drv_interface *intf_fxns;
2057 int status = 0;
2058
2059 if (!hnode) {
2060 status = -EFAULT;
2061 } else {
2062 /* Check if event mask is a valid node related event */
2063 if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2064 status = -EINVAL;
2065
2066 /* Check if notify type is valid */
2067 if (notify_type != DSP_SIGNALEVENT)
2068 status = -EINVAL;
2069
2070 /* Only one Notification can be registered at a
2071 * time - Limitation */
2072 if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
2073 status = -EINVAL;
2074 }
2075 if (!status) {
2076 if (event_mask == DSP_NODESTATECHANGE) {
2077 status = ntfy_register(hnode->ntfy_obj, hnotification,
2078 event_mask & DSP_NODESTATECHANGE,
2079 notify_type);
2080 } else {
2081 /* Send Message part of event mask to msg_ctrl */
2082 intf_fxns = hnode->node_mgr->intf_fxns;
2083 status = (*intf_fxns->msg_register_notify)
2084 (hnode->msg_queue_obj,
2085 event_mask & DSP_NODEMESSAGEREADY, notify_type,
2086 hnotification);
2087 }
2088
2089 }
2090 dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
2091 "hnotification: %p status 0x%x\n", __func__, hnode,
2092 event_mask, notify_type, hnotification, status);
2093 return status;
2094 }
2095
2096 /*
2097 * ======== node_run ========
2098 * Purpose:
2099 * Start execution of a node's execute phase, or resume execution of a node
2100 * that has been suspended (via NODE_NodePause()) on the DSP. Load the
2101 * node's execute function if necessary.
2102 */
2103 int node_run(struct node_object *hnode)
2104 {
2105 struct node_object *pnode = (struct node_object *)hnode;
2106 struct node_mgr *hnode_mgr;
2107 enum node_type node_type;
2108 enum node_state state;
2109 u32 ul_execute_fxn;
2110 u32 ul_fxn_addr;
2111 int status = 0;
2112 u32 proc_id;
2113 struct bridge_drv_interface *intf_fxns;
2114 struct dsp_processorstate proc_state;
2115 struct proc_object *hprocessor;
2116
2117 if (!hnode) {
2118 status = -EFAULT;
2119 goto func_end;
2120 }
2121 hprocessor = hnode->processor;
2122 status = proc_get_state(hprocessor, &proc_state,
2123 sizeof(struct dsp_processorstate));
2124 if (status)
2125 goto func_end;
2126 /* If processor is in error state then don't attempt to run the node */
2127 if (proc_state.proc_state == PROC_ERROR) {
2128 status = -EPERM;
2129 goto func_end;
2130 }
2131 node_type = node_get_type(hnode);
2132 if (node_type == NODE_DEVICE)
2133 status = -EPERM;
2134 if (status)
2135 goto func_end;
2136
2137 hnode_mgr = hnode->node_mgr;
2138 if (!hnode_mgr) {
2139 status = -EFAULT;
2140 goto func_end;
2141 }
2142 intf_fxns = hnode_mgr->intf_fxns;
2143 /* Enter critical section */
2144 mutex_lock(&hnode_mgr->node_mgr_lock);
2145
2146 state = node_get_state(hnode);
2147 if (state != NODE_CREATED && state != NODE_PAUSED)
2148 status = -EBADR;
2149
2150 if (!status)
2151 status = proc_get_processor_id(pnode->processor, &proc_id);
2152
2153 if (status)
2154 goto func_cont1;
2155
2156 if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
2157 goto func_cont1;
2158
2159 if (state == NODE_CREATED) {
2160 /* If node's execute function is not loaded, load it */
2161 if (!(hnode->loaded) && hnode->phase_split) {
2162 status =
2163 hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
2164 NLDR_EXECUTE);
2165 if (!status) {
2166 hnode->loaded = true;
2167 } else {
2168 pr_err("%s: fail - load execute code: 0x%x\n",
2169 __func__, status);
2170 }
2171 }
2172 if (!status) {
2173 /* Get address of node's execute function */
2174 if (proc_id == IVA_UNIT)
2175 ul_execute_fxn = (u32) hnode->node_env;
2176 else {
2177 status = get_fxn_address(hnode, &ul_execute_fxn,
2178 EXECUTEPHASE);
2179 }
2180 }
2181 if (!status) {
2182 ul_fxn_addr = hnode_mgr->fxn_addrs[RMSEXECUTENODE];
2183 status =
2184 disp_node_run(hnode_mgr->disp_obj, hnode,
2185 ul_fxn_addr, ul_execute_fxn,
2186 hnode->node_env);
2187 }
2188 } else if (state == NODE_PAUSED) {
2189 ul_fxn_addr = hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY];
2190 status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
2191 ul_fxn_addr, hnode->node_env,
2192 NODE_GET_PRIORITY(hnode));
2193 } else {
2194 /* We should never get here */
2195 }
2196 func_cont1:
2197 /* Update node state. */
2198 if (status >= 0)
2199 NODE_SET_STATE(hnode, NODE_RUNNING);
2200 else /* Set state back to previous value */
2201 NODE_SET_STATE(hnode, state);
2202 /*End of sync_enter_cs */
2203 /* Exit critical section */
2204 mutex_unlock(&hnode_mgr->node_mgr_lock);
2205 if (status >= 0) {
2206 proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
2207 ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
2208 }
2209 func_end:
2210 dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
2211 return status;
2212 }
2213
2214 /*
2215 * ======== node_terminate ========
2216 * Purpose:
2217 * Signal a node running on the DSP that it should exit its execute phase
2218 * function.
2219 */
2220 int node_terminate(struct node_object *hnode, int *pstatus)
2221 {
2222 struct node_object *pnode = (struct node_object *)hnode;
2223 struct node_mgr *hnode_mgr = NULL;
2224 enum node_type node_type;
2225 struct bridge_drv_interface *intf_fxns;
2226 enum node_state state;
2227 struct dsp_msg msg, killmsg;
2228 int status = 0;
2229 u32 proc_id, kill_time_out;
2230 struct deh_mgr *hdeh_mgr;
2231 struct dsp_processorstate proc_state;
2232
2233 if (!hnode || !hnode->node_mgr) {
2234 status = -EFAULT;
2235 goto func_end;
2236 }
2237 if (pnode->processor == NULL) {
2238 status = -EFAULT;
2239 goto func_end;
2240 }
2241 status = proc_get_processor_id(pnode->processor, &proc_id);
2242
2243 if (!status) {
2244 hnode_mgr = hnode->node_mgr;
2245 node_type = node_get_type(hnode);
2246 if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
2247 status = -EPERM;
2248 }
2249 if (!status) {
2250 /* Check node state */
2251 mutex_lock(&hnode_mgr->node_mgr_lock);
2252 state = node_get_state(hnode);
2253 if (state != NODE_RUNNING) {
2254 status = -EBADR;
2255 /* Set the exit status if node terminated on
2256 * its own. */
2257 if (state == NODE_DONE)
2258 *pstatus = hnode->exit_status;
2259
2260 } else {
2261 NODE_SET_STATE(hnode, NODE_TERMINATING);
2262 }
2263 /* end of sync_enter_cs */
2264 mutex_unlock(&hnode_mgr->node_mgr_lock);
2265 }
2266 if (!status) {
2267 /*
2268 * Send exit message. Do not change state to NODE_DONE
2269 * here. That will be done in callback.
2270 */
2271 status = proc_get_state(pnode->processor, &proc_state,
2272 sizeof(struct dsp_processorstate));
2273 if (status)
2274 goto func_cont;
2275 /* If processor is in error state then don't attempt to send
2276 * A kill task command */
2277 if (proc_state.proc_state == PROC_ERROR) {
2278 status = -EPERM;
2279 goto func_cont;
2280 }
2281
2282 msg.cmd = RMS_EXIT;
2283 msg.arg1 = hnode->node_env;
2284 killmsg.cmd = RMS_KILLTASK;
2285 killmsg.arg1 = hnode->node_env;
2286 intf_fxns = hnode_mgr->intf_fxns;
2287
2288 if (hnode->timeout > MAXTIMEOUT)
2289 kill_time_out = MAXTIMEOUT;
2290 else
2291 kill_time_out = (hnode->timeout) * 2;
2292
2293 status = (*intf_fxns->msg_put) (hnode->msg_queue_obj, &msg,
2294 hnode->timeout);
2295 if (status)
2296 goto func_cont;
2297
2298 /*
2299 * Wait on synchronization object that will be
2300 * posted in the callback on receiving RMS_EXIT
2301 * message, or by node_delete. Check for valid hnode,
2302 * in case posted by node_delete().
2303 */
2304 status = sync_wait_on_event(hnode->sync_done,
2305 kill_time_out / 2);
2306 if (status != ETIME)
2307 goto func_cont;
2308
2309 status = (*intf_fxns->msg_put)(hnode->msg_queue_obj,
2310 &killmsg, hnode->timeout);
2311 if (status)
2312 goto func_cont;
2313 status = sync_wait_on_event(hnode->sync_done,
2314 kill_time_out / 2);
2315 if (status) {
2316 /*
2317 * Here it goes the part of the simulation of
2318 * the DSP exception.
2319 */
2320 dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr);
2321 if (!hdeh_mgr)
2322 goto func_cont;
2323
2324 bridge_deh_notify(hdeh_mgr, DSP_SYSERROR,
2325 DSP_EXCEPTIONABORT);
2326 }
2327 }
2328 func_cont:
2329 if (!status) {
2330 /* Enter CS before getting exit status, in case node was
2331 * deleted. */
2332 mutex_lock(&hnode_mgr->node_mgr_lock);
2333 /* Make sure node wasn't deleted while we blocked */
2334 if (!hnode) {
2335 status = -EPERM;
2336 } else {
2337 *pstatus = hnode->exit_status;
2338 dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
2339 __func__, hnode, hnode->node_env, status);
2340 }
2341 mutex_unlock(&hnode_mgr->node_mgr_lock);
2342 } /*End of sync_enter_cs */
2343 func_end:
2344 return status;
2345 }
2346
2347 /*
2348 * ======== delete_node ========
2349 * Purpose:
2350 * Free GPP resources allocated in node_allocate() or node_connect().
2351 */
2352 static void delete_node(struct node_object *hnode,
2353 struct process_context *pr_ctxt)
2354 {
2355 struct node_mgr *hnode_mgr;
2356 struct bridge_drv_interface *intf_fxns;
2357 u32 i;
2358 enum node_type node_type;
2359 struct stream_chnl stream;
2360 struct node_msgargs node_msg_args;
2361 struct node_taskargs task_arg_obj;
2362 #ifdef DSP_DMM_DEBUG
2363 struct dmm_object *dmm_mgr;
2364 struct proc_object *p_proc_object =
2365 (struct proc_object *)hnode->processor;
2366 #endif
2367 int status;
2368 if (!hnode)
2369 goto func_end;
2370 hnode_mgr = hnode->node_mgr;
2371 if (!hnode_mgr)
2372 goto func_end;
2373
2374 node_type = node_get_type(hnode);
2375 if (node_type != NODE_DEVICE) {
2376 node_msg_args = hnode->create_args.asa.node_msg_args;
2377 kfree(node_msg_args.pdata);
2378
2379 /* Free msg_ctrl queue */
2380 if (hnode->msg_queue_obj) {
2381 intf_fxns = hnode_mgr->intf_fxns;
2382 (*intf_fxns->msg_delete_queue) (hnode->
2383 msg_queue_obj);
2384 hnode->msg_queue_obj = NULL;
2385 }
2386
2387 kfree(hnode->sync_done);
2388
2389 /* Free all stream info */
2390 if (hnode->inputs) {
2391 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2392 stream = hnode->inputs[i];
2393 free_stream(hnode_mgr, stream);
2394 }
2395 kfree(hnode->inputs);
2396 hnode->inputs = NULL;
2397 }
2398 if (hnode->outputs) {
2399 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2400 stream = hnode->outputs[i];
2401 free_stream(hnode_mgr, stream);
2402 }
2403 kfree(hnode->outputs);
2404 hnode->outputs = NULL;
2405 }
2406 task_arg_obj = hnode->create_args.asa.task_arg_obj;
2407 if (task_arg_obj.strm_in_def) {
2408 for (i = 0; i < MAX_INPUTS(hnode); i++) {
2409 kfree(task_arg_obj.strm_in_def[i].sz_device);
2410 task_arg_obj.strm_in_def[i].sz_device = NULL;
2411 }
2412 kfree(task_arg_obj.strm_in_def);
2413 task_arg_obj.strm_in_def = NULL;
2414 }
2415 if (task_arg_obj.strm_out_def) {
2416 for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
2417 kfree(task_arg_obj.strm_out_def[i].sz_device);
2418 task_arg_obj.strm_out_def[i].sz_device = NULL;
2419 }
2420 kfree(task_arg_obj.strm_out_def);
2421 task_arg_obj.strm_out_def = NULL;
2422 }
2423 if (task_arg_obj.dsp_heap_res_addr) {
2424 status = proc_un_map(hnode->processor, (void *)
2425 task_arg_obj.dsp_heap_addr,
2426 pr_ctxt);
2427
2428 status = proc_un_reserve_memory(hnode->processor,
2429 (void *)
2430 task_arg_obj.
2431 dsp_heap_res_addr,
2432 pr_ctxt);
2433 #ifdef DSP_DMM_DEBUG
2434 status = dmm_get_handle(p_proc_object, &dmm_mgr);
2435 if (dmm_mgr)
2436 dmm_mem_map_dump(dmm_mgr);
2437 else
2438 status = DSP_EHANDLE;
2439 #endif
2440 }
2441 }
2442 if (node_type != NODE_MESSAGE) {
2443 kfree(hnode->stream_connect);
2444 hnode->stream_connect = NULL;
2445 }
2446 kfree(hnode->str_dev_name);
2447 hnode->str_dev_name = NULL;
2448
2449 if (hnode->ntfy_obj) {
2450 ntfy_delete(hnode->ntfy_obj);
2451 kfree(hnode->ntfy_obj);
2452 hnode->ntfy_obj = NULL;
2453 }
2454
2455 /* These were allocated in dcd_get_object_def (via node_allocate) */
2456 kfree(hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn);
2457 hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn = NULL;
2458
2459 kfree(hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn);
2460 hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn = NULL;
2461
2462 kfree(hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn);
2463 hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn = NULL;
2464
2465 kfree(hnode->dcd_props.obj_data.node_obj.str_i_alg_name);
2466 hnode->dcd_props.obj_data.node_obj.str_i_alg_name = NULL;
2467
2468 /* Free all SM address translator resources */
2469 kfree(hnode->xlator);
2470 kfree(hnode->nldr_node_obj);
2471 hnode->nldr_node_obj = NULL;
2472 hnode->node_mgr = NULL;
2473 kfree(hnode);
2474 hnode = NULL;
2475 func_end:
2476 return;
2477 }
2478
2479 /*
2480 * ======== delete_node_mgr ========
2481 * Purpose:
2482 * Frees the node manager.
2483 */
2484 static void delete_node_mgr(struct node_mgr *hnode_mgr)
2485 {
2486 struct node_object *hnode, *tmp;
2487
2488 if (hnode_mgr) {
2489 /* Free resources */
2490 if (hnode_mgr->dcd_mgr)
2491 dcd_destroy_manager(hnode_mgr->dcd_mgr);
2492
2493 /* Remove any elements remaining in lists */
2494 list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list,
2495 list_elem) {
2496 list_del(&hnode->list_elem);
2497 delete_node(hnode, NULL);
2498 }
2499 mutex_destroy(&hnode_mgr->node_mgr_lock);
2500 if (hnode_mgr->ntfy_obj) {
2501 ntfy_delete(hnode_mgr->ntfy_obj);
2502 kfree(hnode_mgr->ntfy_obj);
2503 }
2504
2505 if (hnode_mgr->disp_obj)
2506 disp_delete(hnode_mgr->disp_obj);
2507
2508 if (hnode_mgr->strm_mgr_obj)
2509 strm_delete(hnode_mgr->strm_mgr_obj);
2510
2511 /* Delete the loader */
2512 if (hnode_mgr->nldr_obj)
2513 hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
2514
2515 kfree(hnode_mgr);
2516 }
2517 }
2518
2519 /*
2520 * ======== fill_stream_connect ========
2521 * Purpose:
2522 * Fills stream information.
2523 */
2524 static void fill_stream_connect(struct node_object *node1,
2525 struct node_object *node2,
2526 u32 stream1, u32 stream2)
2527 {
2528 u32 strm_index;
2529 struct dsp_streamconnect *strm1 = NULL;
2530 struct dsp_streamconnect *strm2 = NULL;
2531 enum node_type node1_type = NODE_TASK;
2532 enum node_type node2_type = NODE_TASK;
2533
2534 node1_type = node_get_type(node1);
2535 node2_type = node_get_type(node2);
2536 if (node1 != (struct node_object *)DSP_HGPPNODE) {
2537
2538 if (node1_type != NODE_DEVICE) {
2539 strm_index = node1->num_inputs +
2540 node1->num_outputs - 1;
2541 strm1 = &(node1->stream_connect[strm_index]);
2542 strm1->cb_struct = sizeof(struct dsp_streamconnect);
2543 strm1->this_node_stream_index = stream1;
2544 }
2545
2546 if (node2 != (struct node_object *)DSP_HGPPNODE) {
2547 /* NODE == > NODE */
2548 if (node1_type != NODE_DEVICE) {
2549 strm1->connected_node = node2;
2550 strm1->ui_connected_node_id = node2->node_uuid;
2551 strm1->connected_node_stream_index = stream2;
2552 strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
2553 }
2554 if (node2_type != NODE_DEVICE) {
2555 strm_index = node2->num_inputs +
2556 node2->num_outputs - 1;
2557 strm2 = &(node2->stream_connect[strm_index]);
2558 strm2->cb_struct =
2559 sizeof(struct dsp_streamconnect);
2560 strm2->this_node_stream_index = stream2;
2561 strm2->connected_node = node1;
2562 strm2->ui_connected_node_id = node1->node_uuid;
2563 strm2->connected_node_stream_index = stream1;
2564 strm2->connect_type = CONNECTTYPE_NODEINPUT;
2565 }
2566 } else if (node1_type != NODE_DEVICE)
2567 strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
2568 } else {
2569 /* GPP == > NODE */
2570 strm_index = node2->num_inputs + node2->num_outputs - 1;
2571 strm2 = &(node2->stream_connect[strm_index]);
2572 strm2->cb_struct = sizeof(struct dsp_streamconnect);
2573 strm2->this_node_stream_index = stream2;
2574 strm2->connect_type = CONNECTTYPE_GPPINPUT;
2575 }
2576 }
2577
2578 /*
2579 * ======== fill_stream_def ========
2580 * Purpose:
2581 * Fills Stream attributes.
2582 */
2583 static void fill_stream_def(struct node_object *hnode,
2584 struct node_strmdef *pstrm_def,
2585 struct dsp_strmattr *pattrs)
2586 {
2587 struct node_mgr *hnode_mgr = hnode->node_mgr;
2588
2589 if (pattrs != NULL) {
2590 pstrm_def->num_bufs = pattrs->num_bufs;
2591 pstrm_def->buf_size =
2592 pattrs->buf_size / hnode_mgr->dsp_data_mau_size;
2593 pstrm_def->seg_id = pattrs->seg_id;
2594 pstrm_def->buf_alignment = pattrs->buf_alignment;
2595 pstrm_def->timeout = pattrs->timeout;
2596 } else {
2597 pstrm_def->num_bufs = DEFAULTNBUFS;
2598 pstrm_def->buf_size =
2599 DEFAULTBUFSIZE / hnode_mgr->dsp_data_mau_size;
2600 pstrm_def->seg_id = DEFAULTSEGID;
2601 pstrm_def->buf_alignment = DEFAULTALIGNMENT;
2602 pstrm_def->timeout = DEFAULTTIMEOUT;
2603 }
2604 }
2605
2606 /*
2607 * ======== free_stream ========
2608 * Purpose:
2609 * Updates the channel mask and frees the pipe id.
2610 */
2611 static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
2612 {
2613 /* Free up the pipe id unless other node has not yet been deleted. */
2614 if (stream.type == NODECONNECT) {
2615 if (test_bit(stream.dev_id, hnode_mgr->pipe_done_map)) {
2616 /* The other node has already been deleted */
2617 clear_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2618 clear_bit(stream.dev_id, hnode_mgr->pipe_map);
2619 } else {
2620 /* The other node has not been deleted yet */
2621 set_bit(stream.dev_id, hnode_mgr->pipe_done_map);
2622 }
2623 } else if (stream.type == HOSTCONNECT) {
2624 if (stream.dev_id < hnode_mgr->num_chnls) {
2625 clear_bit(stream.dev_id, hnode_mgr->chnl_map);
2626 } else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) {
2627 /* dsp-dma */
2628 clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls),
2629 hnode_mgr->dma_chnl_map);
2630 } else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) {
2631 /* zero-copy */
2632 clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls),
2633 hnode_mgr->zc_chnl_map);
2634 }
2635 }
2636 }
2637
2638 /*
2639 * ======== get_fxn_address ========
2640 * Purpose:
2641 * Retrieves the address for create, execute or delete phase for a node.
2642 */
2643 static int get_fxn_address(struct node_object *hnode, u32 *fxn_addr,
2644 u32 phase)
2645 {
2646 char *pstr_fxn_name = NULL;
2647 struct node_mgr *hnode_mgr = hnode->node_mgr;
2648 int status = 0;
2649
2650 switch (phase) {
2651 case CREATEPHASE:
2652 pstr_fxn_name =
2653 hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn;
2654 break;
2655 case EXECUTEPHASE:
2656 pstr_fxn_name =
2657 hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn;
2658 break;
2659 case DELETEPHASE:
2660 pstr_fxn_name =
2661 hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn;
2662 break;
2663 default:
2664 /* Should never get here */
2665 break;
2666 }
2667
2668 status =
2669 hnode_mgr->nldr_fxns.get_fxn_addr(hnode->nldr_node_obj,
2670 pstr_fxn_name, fxn_addr);
2671
2672 return status;
2673 }
2674
2675 /*
2676 * ======== get_node_info ========
2677 * Purpose:
2678 * Retrieves the node information.
2679 */
2680 void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
2681 {
2682 u32 i;
2683
2684 node_info->cb_struct = sizeof(struct dsp_nodeinfo);
2685 node_info->nb_node_database_props =
2686 hnode->dcd_props.obj_data.node_obj.ndb_props;
2687 node_info->execution_priority = hnode->prio;
2688 node_info->device_owner = hnode->device_owner;
2689 node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
2690 node_info->node_env = hnode->node_env;
2691
2692 node_info->ns_execution_state = node_get_state(hnode);
2693
2694 /* Copy stream connect data */
2695 for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
2696 node_info->sc_stream_connection[i] = hnode->stream_connect[i];
2697
2698 }
2699
2700 /*
2701 * ======== get_node_props ========
2702 * Purpose:
2703 * Retrieve node properties.
2704 */
2705 static int get_node_props(struct dcd_manager *hdcd_mgr,
2706 struct node_object *hnode,
2707 const struct dsp_uuid *node_uuid,
2708 struct dcd_genericobj *dcd_prop)
2709 {
2710 u32 len;
2711 struct node_msgargs *pmsg_args;
2712 struct node_taskargs *task_arg_obj;
2713 enum node_type node_type = NODE_TASK;
2714 struct dsp_ndbprops *pndb_props =
2715 &(dcd_prop->obj_data.node_obj.ndb_props);
2716 int status = 0;
2717 char sz_uuid[MAXUUIDLEN];
2718
2719 status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
2720 DSP_DCDNODETYPE, dcd_prop);
2721
2722 if (!status) {
2723 hnode->ntype = node_type = pndb_props->ntype;
2724
2725 /* Create UUID value to set in registry. */
2726 snprintf(sz_uuid, MAXUUIDLEN, "%pUL", node_uuid);
2727 dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
2728
2729 /* Fill in message args that come from NDB */
2730 if (node_type != NODE_DEVICE) {
2731 pmsg_args = &(hnode->create_args.asa.node_msg_args);
2732 pmsg_args->seg_id =
2733 dcd_prop->obj_data.node_obj.msg_segid;
2734 pmsg_args->notify_type =
2735 dcd_prop->obj_data.node_obj.msg_notify_type;
2736 pmsg_args->max_msgs = pndb_props->message_depth;
2737 dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
2738 pmsg_args->max_msgs);
2739 } else {
2740 /* Copy device name */
2741 len = strlen(pndb_props->ac_name);
2742 hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
2743 if (hnode->str_dev_name == NULL) {
2744 status = -ENOMEM;
2745 } else {
2746 strncpy(hnode->str_dev_name,
2747 pndb_props->ac_name, len);
2748 }
2749 }
2750 }
2751 if (!status) {
2752 /* Fill in create args that come from NDB */
2753 if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
2754 task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
2755 task_arg_obj->prio = pndb_props->prio;
2756 task_arg_obj->stack_size = pndb_props->stack_size;
2757 task_arg_obj->sys_stack_size =
2758 pndb_props->sys_stack_size;
2759 task_arg_obj->stack_seg = pndb_props->stack_seg;
2760 dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
2761 "0x%x words System Stack Size: 0x%x words "
2762 "Stack Segment: 0x%x profile count : 0x%x\n",
2763 task_arg_obj->prio, task_arg_obj->stack_size,
2764 task_arg_obj->sys_stack_size,
2765 task_arg_obj->stack_seg,
2766 pndb_props->count_profiles);
2767 }
2768 }
2769
2770 return status;
2771 }
2772
2773 /*
2774 * ======== get_proc_props ========
2775 * Purpose:
2776 * Retrieve the processor properties.
2777 */
2778 static int get_proc_props(struct node_mgr *hnode_mgr,
2779 struct dev_object *hdev_obj)
2780 {
2781 struct cfg_hostres *host_res;
2782 struct bridge_dev_context *pbridge_context;
2783 int status = 0;
2784
2785 status = dev_get_bridge_context(hdev_obj, &pbridge_context);
2786 if (!pbridge_context)
2787 status = -EFAULT;
2788
2789 if (!status) {
2790 host_res = pbridge_context->resources;
2791 if (!host_res)
2792 return -EPERM;
2793 hnode_mgr->chnl_offset = host_res->chnl_offset;
2794 hnode_mgr->chnl_buf_size = host_res->chnl_buf_size;
2795 hnode_mgr->num_chnls = host_res->num_chnls;
2796
2797 /*
2798 * PROC will add an API to get dsp_processorinfo.
2799 * Fill in default values for now.
2800 */
2801 /* TODO -- Instead of hard coding, take from registry */
2802 hnode_mgr->proc_family = 6000;
2803 hnode_mgr->proc_type = 6410;
2804 hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
2805 hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
2806 hnode_mgr->dsp_word_size = DSPWORDSIZE;
2807 hnode_mgr->dsp_data_mau_size = DSPWORDSIZE;
2808 hnode_mgr->dsp_mau_size = 1;
2809
2810 }
2811 return status;
2812 }
2813
2814 /*
2815 * ======== node_get_uuid_props ========
2816 * Purpose:
2817 * Fetch Node UUID properties from DCD/DOF file.
2818 */
2819 int node_get_uuid_props(void *hprocessor,
2820 const struct dsp_uuid *node_uuid,
2821 struct dsp_ndbprops *node_props)
2822 {
2823 struct node_mgr *hnode_mgr = NULL;
2824 struct dev_object *hdev_obj;
2825 int status = 0;
2826 struct dcd_nodeprops dcd_node_props;
2827 struct dsp_processorstate proc_state;
2828
2829 if (hprocessor == NULL || node_uuid == NULL) {
2830 status = -EFAULT;
2831 goto func_end;
2832 }
2833 status = proc_get_state(hprocessor, &proc_state,
2834 sizeof(struct dsp_processorstate));
2835 if (status)
2836 goto func_end;
2837 /* If processor is in error state then don't attempt
2838 to send the message */
2839 if (proc_state.proc_state == PROC_ERROR) {
2840 status = -EPERM;
2841 goto func_end;
2842 }
2843
2844 status = proc_get_dev_object(hprocessor, &hdev_obj);
2845 if (hdev_obj) {
2846 status = dev_get_node_manager(hdev_obj, &hnode_mgr);
2847 if (hnode_mgr == NULL) {
2848 status = -EFAULT;
2849 goto func_end;
2850 }
2851 }
2852
2853 /*
2854 * Enter the critical section. This is needed because
2855 * dcd_get_object_def will ultimately end up calling dbll_open/close,
2856 * which needs to be protected in order to not corrupt the zlib manager
2857 * (COD).
2858 */
2859 mutex_lock(&hnode_mgr->node_mgr_lock);
2860
2861 dcd_node_props.str_create_phase_fxn = NULL;
2862 dcd_node_props.str_execute_phase_fxn = NULL;
2863 dcd_node_props.str_delete_phase_fxn = NULL;
2864 dcd_node_props.str_i_alg_name = NULL;
2865
2866 status = dcd_get_object_def(hnode_mgr->dcd_mgr,
2867 (struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
2868 (struct dcd_genericobj *)&dcd_node_props);
2869
2870 if (!status) {
2871 *node_props = dcd_node_props.ndb_props;
2872 kfree(dcd_node_props.str_create_phase_fxn);
2873
2874 kfree(dcd_node_props.str_execute_phase_fxn);
2875
2876 kfree(dcd_node_props.str_delete_phase_fxn);
2877
2878 kfree(dcd_node_props.str_i_alg_name);
2879 }
2880 /* Leave the critical section, we're done. */
2881 mutex_unlock(&hnode_mgr->node_mgr_lock);
2882 func_end:
2883 return status;
2884 }
2885
2886 /*
2887 * ======== get_rms_fxns ========
2888 * Purpose:
2889 * Retrieve the RMS functions.
2890 */
2891 static int get_rms_fxns(struct node_mgr *hnode_mgr)
2892 {
2893 s32 i;
2894 struct dev_object *dev_obj = hnode_mgr->dev_obj;
2895 int status = 0;
2896
2897 static char *psz_fxns[NUMRMSFXNS] = {
2898 "RMS_queryServer", /* RMSQUERYSERVER */
2899 "RMS_configureServer", /* RMSCONFIGURESERVER */
2900 "RMS_createNode", /* RMSCREATENODE */
2901 "RMS_executeNode", /* RMSEXECUTENODE */
2902 "RMS_deleteNode", /* RMSDELETENODE */
2903 "RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
2904 "RMS_readMemory", /* RMSREADMEMORY */
2905 "RMS_writeMemory", /* RMSWRITEMEMORY */
2906 "RMS_copy", /* RMSCOPY */
2907 };
2908
2909 for (i = 0; i < NUMRMSFXNS; i++) {
2910 status = dev_get_symbol(dev_obj, psz_fxns[i],
2911 &(hnode_mgr->fxn_addrs[i]));
2912 if (status) {
2913 if (status == -ESPIPE) {
2914 /*
2915 * May be loaded dynamically (in the future),
2916 * but return an error for now.
2917 */
2918 dev_dbg(bridge, "%s: RMS function: %s currently"
2919 " not loaded\n", __func__, psz_fxns[i]);
2920 } else {
2921 dev_dbg(bridge, "%s: Symbol not found: %s "
2922 "status = 0x%x\n", __func__,
2923 psz_fxns[i], status);
2924 break;
2925 }
2926 }
2927 }
2928
2929 return status;
2930 }
2931
2932 /*
2933 * ======== ovly ========
2934 * Purpose:
2935 * Called during overlay.Sends command to RMS to copy a block of data.
2936 */
2937 static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
2938 u32 ul_num_bytes, u32 mem_space)
2939 {
2940 struct node_object *hnode = (struct node_object *)priv_ref;
2941 struct node_mgr *hnode_mgr;
2942 u32 ul_bytes = 0;
2943 u32 ul_size;
2944 u32 ul_timeout;
2945 int status = 0;
2946 struct bridge_dev_context *hbridge_context;
2947 /* Function interface to Bridge driver*/
2948 struct bridge_drv_interface *intf_fxns;
2949
2950 hnode_mgr = hnode->node_mgr;
2951
2952 ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
2953 ul_timeout = hnode->timeout;
2954
2955 /* Call new MemCopy function */
2956 intf_fxns = hnode_mgr->intf_fxns;
2957 status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
2958 if (!status) {
2959 status =
2960 (*intf_fxns->brd_mem_copy) (hbridge_context,
2961 dsp_run_addr, dsp_load_addr,
2962 ul_num_bytes, (u32) mem_space);
2963 if (!status)
2964 ul_bytes = ul_num_bytes;
2965 else
2966 pr_debug("%s: failed to copy brd memory, status 0x%x\n",
2967 __func__, status);
2968 } else {
2969 pr_debug("%s: failed to get Bridge context, status 0x%x\n",
2970 __func__, status);
2971 }
2972
2973 return ul_bytes;
2974 }
2975
2976 /*
2977 * ======== mem_write ========
2978 */
2979 static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
2980 u32 ul_num_bytes, u32 mem_space)
2981 {
2982 struct node_object *hnode = (struct node_object *)priv_ref;
2983 struct node_mgr *hnode_mgr;
2984 u16 mem_sect_type;
2985 u32 ul_timeout;
2986 int status = 0;
2987 struct bridge_dev_context *hbridge_context;
2988 /* Function interface to Bridge driver */
2989 struct bridge_drv_interface *intf_fxns;
2990
2991 hnode_mgr = hnode->node_mgr;
2992
2993 ul_timeout = hnode->timeout;
2994 mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
2995
2996 /* Call new MemWrite function */
2997 intf_fxns = hnode_mgr->intf_fxns;
2998 status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
2999 status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf,
3000 dsp_add, ul_num_bytes, mem_sect_type);
3001
3002 return ul_num_bytes;
3003 }
3004
3005 #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
3006 /*
3007 * ======== node_find_addr ========
3008 */
3009 int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
3010 u32 offset_range, void *sym_addr_output, char *sym_name)
3011 {
3012 struct node_object *node_obj;
3013 int status = -ENOENT;
3014
3015 list_for_each_entry(node_obj, &node_mgr->node_list, list_elem) {
3016 status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
3017 offset_range, sym_addr_output, sym_name);
3018 if (!status) {
3019 pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
3020 (unsigned int) node_mgr,
3021 sym_addr, offset_range,
3022 (unsigned int) sym_addr_output, sym_name);
3023 break;
3024 }
3025 }
3026
3027 return status;
3028 }
3029 #endif
This page took 0.090347 seconds and 6 git commands to generate.