Fix common misspellings
[deliverable/linux.git] / drivers / staging / msm / mdp_ppp_dq.c
1 /* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 */
17
18 #include "mdp.h"
19
20 static boolean mdp_ppp_intr_flag = FALSE;
21 static boolean mdp_ppp_busy_flag = FALSE;
22
23 /* Queue to keep track of the completed jobs for cleaning */
24 static LIST_HEAD(mdp_ppp_djob_clnrq);
25 static DEFINE_SPINLOCK(mdp_ppp_djob_clnrq_lock);
26
27 /* Worker to cleanup Display Jobs */
28 static struct workqueue_struct *mdp_ppp_djob_clnr;
29
30 /* Display Queue (DQ) for MDP PPP Block */
31 static LIST_HEAD(mdp_ppp_dq);
32 static DEFINE_SPINLOCK(mdp_ppp_dq_lock);
33
34 /* Current Display Job for MDP PPP */
35 static struct mdp_ppp_djob *curr_djob;
36
37 /* Track ret code for the last opeartion */
38 static int mdp_ppp_ret_code;
39
40 inline int mdp_ppp_get_ret_code(void)
41 {
42 return mdp_ppp_ret_code;
43 }
44
45 /* Push <Reg, Val> pair into DQ (if available) to later
46 * program the MDP PPP Block */
47 inline void mdp_ppp_outdw(uint32_t addr, uint32_t data)
48 {
49 if (curr_djob) {
50
51 /* get the last node of the list. */
52 struct mdp_ppp_roi_cmd_set *node =
53 list_entry(curr_djob->roi_cmd_list.prev,
54 struct mdp_ppp_roi_cmd_set, node);
55
56 /* If a node is already full, create a new one and add it to
57 * the list (roi_cmd_list).
58 */
59 if (node->ncmds == MDP_PPP_ROI_NODE_SIZE) {
60 node = kmalloc(sizeof(struct mdp_ppp_roi_cmd_set),
61 GFP_KERNEL);
62 if (!node) {
63 printk(KERN_ERR
64 "MDP_PPP: not enough memory.\n");
65 mdp_ppp_ret_code = -EINVAL;
66 return;
67 }
68
69 /* no ROI commands initially */
70 node->ncmds = 0;
71
72 /* add one node to roi_cmd_list. */
73 list_add_tail(&node->node, &curr_djob->roi_cmd_list);
74 }
75
76 /* register ROI commands */
77 node->cmd[node->ncmds].reg = addr;
78 node->cmd[node->ncmds].val = data;
79 node->ncmds++;
80 } else
81 /* program MDP PPP block now */
82 outpdw((addr), (data));
83 }
84
85 /* Initialize DQ */
86 inline void mdp_ppp_dq_init(void)
87 {
88 mdp_ppp_djob_clnr = create_singlethread_workqueue("MDPDJobClnrThrd");
89 }
90
91 /* Release resources of a job (DJob). */
92 static void mdp_ppp_del_djob(struct mdp_ppp_djob *job)
93 {
94 struct mdp_ppp_roi_cmd_set *node, *tmp;
95
96 /* release mem */
97 mdp_ppp_put_img(job->p_src_file, job->p_dst_file);
98
99 /* release roi_cmd_list */
100 list_for_each_entry_safe(node, tmp, &job->roi_cmd_list, node) {
101 list_del(&node->node);
102 kfree(node);
103 }
104
105 /* release job struct */
106 kfree(job);
107 }
108
109 /* Worker thread to reclaim resources once a display job is done */
110 static void mdp_ppp_djob_cleaner(struct work_struct *work)
111 {
112 struct mdp_ppp_djob *job;
113
114 MDP_PPP_DEBUG_MSG("mdp ppp display job cleaner started \n");
115
116 /* cleanup display job */
117 job = container_of(work, struct mdp_ppp_djob, cleaner.work);
118 if (likely(work && job))
119 mdp_ppp_del_djob(job);
120 }
121
122 /* Create a new Display Job (DJob) */
123 inline struct mdp_ppp_djob *mdp_ppp_new_djob(void)
124 {
125 struct mdp_ppp_djob *job;
126 struct mdp_ppp_roi_cmd_set *node;
127
128 /* create a new djob */
129 job = kmalloc(sizeof(struct mdp_ppp_djob), GFP_KERNEL);
130 if (!job)
131 return NULL;
132
133 /* add the first node to curr_djob->roi_cmd_list */
134 node = kmalloc(sizeof(struct mdp_ppp_roi_cmd_set), GFP_KERNEL);
135 if (!node) {
136 kfree(job);
137 return NULL;
138 }
139
140 /* make this current djob container to keep track of the curr djob not
141 * used in the async path i.e. no sync needed
142 *
143 * Should not contain any references from the past djob
144 */
145 BUG_ON(curr_djob);
146 curr_djob = job;
147 INIT_LIST_HEAD(&curr_djob->roi_cmd_list);
148
149 /* no ROI commands initially */
150 node->ncmds = 0;
151 INIT_LIST_HEAD(&node->node);
152 list_add_tail(&node->node, &curr_djob->roi_cmd_list);
153
154 /* register this djob with the djob cleaner
155 * initializes 'work' data struct
156 */
157 INIT_DELAYED_WORK(&curr_djob->cleaner, mdp_ppp_djob_cleaner);
158 INIT_LIST_HEAD(&curr_djob->entry);
159
160 curr_djob->p_src_file = 0;
161 curr_djob->p_dst_file = 0;
162
163 return job;
164 }
165
166 /* Undo the effect of mdp_ppp_new_djob() */
167 inline void mdp_ppp_clear_curr_djob(void)
168 {
169 if (likely(curr_djob)) {
170 mdp_ppp_del_djob(curr_djob);
171 curr_djob = NULL;
172 }
173 }
174
175 /* Cleanup dirty djobs */
176 static void mdp_ppp_flush_dirty_djobs(void *cond)
177 {
178 unsigned long flags;
179 struct mdp_ppp_djob *job;
180
181 /* Flush the jobs from the djob clnr queue */
182 while (cond && test_bit(0, (unsigned long *)cond)) {
183
184 /* Until we are done with the cleanup queue */
185 spin_lock_irqsave(&mdp_ppp_djob_clnrq_lock, flags);
186 if (list_empty(&mdp_ppp_djob_clnrq)) {
187 spin_unlock_irqrestore(&mdp_ppp_djob_clnrq_lock, flags);
188 break;
189 }
190
191 MDP_PPP_DEBUG_MSG("flushing djobs ... loop \n");
192
193 /* Retrieve the job that needs to be cleaned */
194 job = list_entry(mdp_ppp_djob_clnrq.next,
195 struct mdp_ppp_djob, entry);
196 list_del_init(&job->entry);
197 spin_unlock_irqrestore(&mdp_ppp_djob_clnrq_lock, flags);
198
199 /* Keep mem state coherent */
200 msm_fb_ensure_mem_coherency_after_dma(job->info, &job->req, 1);
201
202 /* Schedule jobs for cleanup
203 * A separate worker thread does this */
204 queue_delayed_work(mdp_ppp_djob_clnr, &job->cleaner,
205 mdp_timer_duration);
206 }
207 }
208
209 /* If MDP PPP engine is busy, wait until it is available again */
210 void mdp_ppp_wait(void)
211 {
212 unsigned long flags;
213 int cond = 1;
214
215 /* keep flushing dirty djobs as long as MDP PPP engine is busy */
216 mdp_ppp_flush_dirty_djobs(&mdp_ppp_busy_flag);
217
218 /* block if MDP PPP engine is still busy */
219 spin_lock_irqsave(&mdp_ppp_dq_lock, flags);
220 if (test_bit(0, (unsigned long *)&mdp_ppp_busy_flag)) {
221
222 /* prepare for the wakeup event */
223 test_and_set_bit(0, (unsigned long *)&mdp_ppp_waiting);
224 INIT_COMPLETION(mdp_ppp_comp);
225 spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
226
227 /* block uninterruptibly until available */
228 MDP_PPP_DEBUG_MSG("waiting for mdp... \n");
229 wait_for_completion_killable(&mdp_ppp_comp);
230
231 /* if MDP PPP engine is still free,
232 * disable INT_MDP if enabled
233 */
234 spin_lock_irqsave(&mdp_ppp_dq_lock, flags);
235 if (!test_bit(0, (unsigned long *)&mdp_ppp_busy_flag) &&
236 test_and_clear_bit(0, (unsigned long *)&mdp_ppp_intr_flag))
237 mdp_disable_irq(MDP_PPP_TERM);
238 }
239 spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
240
241 /* flush remaining dirty djobs, if any */
242 mdp_ppp_flush_dirty_djobs(&cond);
243 }
244
245 /* Program MDP PPP block to process this ROI */
246 static void mdp_ppp_process_roi(struct list_head *roi_cmd_list)
247 {
248
249 /* program PPP engine with registered ROI commands */
250 struct mdp_ppp_roi_cmd_set *node;
251 list_for_each_entry(node, roi_cmd_list, node) {
252 int i = 0;
253 for (; i < node->ncmds; i++) {
254 MDP_PPP_DEBUG_MSG("%d: reg: 0x%x val: 0x%x \n",
255 i, node->cmd[i].reg, node->cmd[i].val);
256 outpdw(node->cmd[i].reg, node->cmd[i].val);
257 }
258 }
259
260 /* kickoff MDP PPP engine */
261 MDP_PPP_DEBUG_MSG("kicking off mdp \n");
262 outpdw(MDP_BASE + 0x30, 0x1000);
263 }
264
265 /* Submit this display job to MDP PPP engine */
266 static void mdp_ppp_dispatch_djob(struct mdp_ppp_djob *job)
267 {
268 /* enable INT_MDP if disabled */
269 if (!test_and_set_bit(0, (unsigned long *)&mdp_ppp_intr_flag))
270 mdp_enable_irq(MDP_PPP_TERM);
271
272 /* turn on PPP and CMD blocks */
273 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
274 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
275
276 /* process this ROI */
277 mdp_ppp_process_roi(&job->roi_cmd_list);
278 }
279
280 /* Enqueue this display job to be cleaned up later in "mdp_ppp_djob_done" */
281 static inline void mdp_ppp_enqueue_djob(struct mdp_ppp_djob *job)
282 {
283 unsigned long flags;
284
285 spin_lock_irqsave(&mdp_ppp_dq_lock, flags);
286 list_add_tail(&job->entry, &mdp_ppp_dq);
287 spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
288 }
289
290 /* First enqueue display job for cleanup and dispatch immediately
291 * if MDP PPP engine is free */
292 void mdp_ppp_process_curr_djob(void)
293 {
294 /* enqueue djob */
295 mdp_ppp_enqueue_djob(curr_djob);
296
297 /* dispatch now if MDP PPP engine is free */
298 if (!test_and_set_bit(0, (unsigned long *)&mdp_ppp_busy_flag))
299 mdp_ppp_dispatch_djob(curr_djob);
300
301 /* done with the current djob */
302 curr_djob = NULL;
303 }
304
305 /* Called from mdp_isr - cleanup finished job and start with next
306 * if available else set MDP PPP engine free */
307 void mdp_ppp_djob_done(void)
308 {
309 struct mdp_ppp_djob *curr, *next;
310 unsigned long flags;
311
312 /* dequeue current */
313 spin_lock_irqsave(&mdp_ppp_dq_lock, flags);
314 curr = list_entry(mdp_ppp_dq.next, struct mdp_ppp_djob, entry);
315 list_del_init(&curr->entry);
316 spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
317
318 /* cleanup current - enqueue in the djob clnr queue */
319 spin_lock_irqsave(&mdp_ppp_djob_clnrq_lock, flags);
320 list_add_tail(&curr->entry, &mdp_ppp_djob_clnrq);
321 spin_unlock_irqrestore(&mdp_ppp_djob_clnrq_lock, flags);
322
323 /* grab next pending */
324 spin_lock_irqsave(&mdp_ppp_dq_lock, flags);
325 if (!list_empty(&mdp_ppp_dq)) {
326 next = list_entry(mdp_ppp_dq.next, struct mdp_ppp_djob,
327 entry);
328 spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
329
330 /* process next in the queue */
331 mdp_ppp_process_roi(&next->roi_cmd_list);
332 } else {
333 /* no pending display job */
334 spin_unlock_irqrestore(&mdp_ppp_dq_lock, flags);
335
336 /* turn off PPP and CMD blocks - "in_isr" is TRUE */
337 mdp_pipe_ctrl(MDP_PPP_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
338 mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, TRUE);
339
340 /* notify if waiting */
341 if (test_and_clear_bit(0, (unsigned long *)&mdp_ppp_waiting))
342 complete(&mdp_ppp_comp);
343
344 /* set free */
345 test_and_clear_bit(0, (unsigned long *)&mdp_ppp_busy_flag);
346 }
347 }
This page took 0.038457 seconds and 5 git commands to generate.