Merge remote-tracking branch 'regmap/topic/core' into regmap-next
[deliverable/linux.git] / drivers / staging / ozwpan / ozpd.c
1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
5 */
6
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
12 #include "ozdbg.h"
13 #include "ozprotocol.h"
14 #include "ozeltbuf.h"
15 #include "ozpd.h"
16 #include "ozproto.h"
17 #include "ozcdev.h"
18 #include "ozusbsvc.h"
19 #include <asm/unaligned.h>
20 #include <linux/uaccess.h>
21 #include <net/psnap.h>
22
23 #define OZ_MAX_TX_POOL_SIZE 6
24
25 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
26 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
27 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
28 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
29 static int oz_send_isoc_frame(struct oz_pd *pd);
30 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
31 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
32 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
33 static void oz_isoc_destructor(struct sk_buff *skb);
34 static int oz_def_app_init(void);
35 static void oz_def_app_term(void);
36 static int oz_def_app_start(struct oz_pd *pd, int resume);
37 static void oz_def_app_stop(struct oz_pd *pd, int pause);
38 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
39
40 /*
41 * Counts the uncompleted isoc frames submitted to netcard.
42 */
43 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
44
45 /* Application handler functions.
46 */
47 static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
48 {oz_usb_init,
49 oz_usb_term,
50 oz_usb_start,
51 oz_usb_stop,
52 oz_usb_rx,
53 oz_usb_heartbeat,
54 oz_usb_farewell,
55 OZ_APPID_USB},
56
57 {oz_def_app_init,
58 oz_def_app_term,
59 oz_def_app_start,
60 oz_def_app_stop,
61 oz_def_app_rx,
62 NULL,
63 NULL,
64 OZ_APPID_UNUSED1},
65
66 {oz_def_app_init,
67 oz_def_app_term,
68 oz_def_app_start,
69 oz_def_app_stop,
70 oz_def_app_rx,
71 NULL,
72 NULL,
73 OZ_APPID_UNUSED2},
74
75 {oz_cdev_init,
76 oz_cdev_term,
77 oz_cdev_start,
78 oz_cdev_stop,
79 oz_cdev_rx,
80 NULL,
81 NULL,
82 OZ_APPID_SERIAL},
83 };
84
85 /*
86 * Context: process
87 */
88 static int oz_def_app_init(void)
89 {
90 return 0;
91 }
92
93 /*
94 * Context: process
95 */
96 static void oz_def_app_term(void)
97 {
98 }
99
100 /*
101 * Context: softirq
102 */
103 static int oz_def_app_start(struct oz_pd *pd, int resume)
104 {
105 return 0;
106 }
107
108 /*
109 * Context: softirq
110 */
111 static void oz_def_app_stop(struct oz_pd *pd, int pause)
112 {
113 }
114
115 /*
116 * Context: softirq
117 */
118 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
119 {
120 }
121
122 /*
123 * Context: softirq or process
124 */
125 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
126 {
127 pd->state = state;
128 switch (state) {
129 case OZ_PD_S_IDLE:
130 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
131 break;
132 case OZ_PD_S_CONNECTED:
133 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
134 break;
135 case OZ_PD_S_STOPPED:
136 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
137 break;
138 case OZ_PD_S_SLEEP:
139 oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
140 break;
141 }
142 }
143
144 /*
145 * Context: softirq or process
146 */
147 void oz_pd_get(struct oz_pd *pd)
148 {
149 atomic_inc(&pd->ref_count);
150 }
151
152 /*
153 * Context: softirq or process
154 */
155 void oz_pd_put(struct oz_pd *pd)
156 {
157 if (atomic_dec_and_test(&pd->ref_count))
158 oz_pd_destroy(pd);
159 }
160
161 /*
162 * Context: softirq-serialized
163 */
164 struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
165 {
166 struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
167
168 if (pd) {
169 int i;
170 atomic_set(&pd->ref_count, 2);
171 for (i = 0; i < OZ_APPID_MAX; i++)
172 spin_lock_init(&pd->app_lock[i]);
173 pd->last_rx_pkt_num = 0xffffffff;
174 oz_pd_set_state(pd, OZ_PD_S_IDLE);
175 pd->max_tx_size = OZ_MAX_TX_SIZE;
176 memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
177 if (0 != oz_elt_buf_init(&pd->elt_buff)) {
178 kfree(pd);
179 pd = NULL;
180 }
181 spin_lock_init(&pd->tx_frame_lock);
182 INIT_LIST_HEAD(&pd->tx_queue);
183 INIT_LIST_HEAD(&pd->farewell_list);
184 pd->last_sent_frame = &pd->tx_queue;
185 spin_lock_init(&pd->stream_lock);
186 INIT_LIST_HEAD(&pd->stream_list);
187 tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
188 (unsigned long)pd);
189 tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
190 (unsigned long)pd);
191 hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
192 hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
193 pd->heartbeat.function = oz_pd_heartbeat_event;
194 pd->timeout.function = oz_pd_timeout_event;
195 }
196 return pd;
197 }
198
199 /*
200 * Context: softirq or process
201 */
202 static void oz_pd_free(struct work_struct *work)
203 {
204 struct list_head *e;
205 struct oz_tx_frame *f;
206 struct oz_isoc_stream *st;
207 struct oz_farewell *fwell;
208 struct oz_pd *pd;
209
210 oz_pd_dbg(pd, ON, "Destroying PD\n");
211 pd = container_of(work, struct oz_pd, workitem);
212 /*Disable timer tasklets*/
213 tasklet_kill(&pd->heartbeat_tasklet);
214 tasklet_kill(&pd->timeout_tasklet);
215 /* Delete any streams.
216 */
217 e = pd->stream_list.next;
218 while (e != &pd->stream_list) {
219 st = container_of(e, struct oz_isoc_stream, link);
220 e = e->next;
221 oz_isoc_stream_free(st);
222 }
223 /* Free any queued tx frames.
224 */
225 e = pd->tx_queue.next;
226 while (e != &pd->tx_queue) {
227 f = container_of(e, struct oz_tx_frame, link);
228 e = e->next;
229 if (f->skb != NULL)
230 kfree_skb(f->skb);
231 oz_retire_frame(pd, f);
232 }
233 oz_elt_buf_term(&pd->elt_buff);
234 /* Free any farewells.
235 */
236 e = pd->farewell_list.next;
237 while (e != &pd->farewell_list) {
238 fwell = container_of(e, struct oz_farewell, link);
239 e = e->next;
240 kfree(fwell);
241 }
242 /* Deallocate all frames in tx pool.
243 */
244 while (pd->tx_pool) {
245 e = pd->tx_pool;
246 pd->tx_pool = e->next;
247 kfree(container_of(e, struct oz_tx_frame, link));
248 }
249 if (pd->net_dev)
250 dev_put(pd->net_dev);
251 kfree(pd);
252 }
253
254 /*
255 * Context: softirq or Process
256 */
257 void oz_pd_destroy(struct oz_pd *pd)
258 {
259 if (hrtimer_active(&pd->timeout))
260 hrtimer_cancel(&pd->timeout);
261 if (hrtimer_active(&pd->heartbeat))
262 hrtimer_cancel(&pd->heartbeat);
263
264 INIT_WORK(&pd->workitem, oz_pd_free);
265 if (!schedule_work(&pd->workitem))
266 oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
267 }
268
269 /*
270 * Context: softirq-serialized
271 */
272 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
273 {
274 const struct oz_app_if *ai;
275 int rc = 0;
276
277 oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
278 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
279 if (apps & (1<<ai->app_id)) {
280 if (ai->start(pd, resume)) {
281 rc = -1;
282 oz_pd_dbg(pd, ON,
283 "Unable to start service %d\n",
284 ai->app_id);
285 break;
286 }
287 oz_polling_lock_bh();
288 pd->total_apps |= (1<<ai->app_id);
289 if (resume)
290 pd->paused_apps &= ~(1<<ai->app_id);
291 oz_polling_unlock_bh();
292 }
293 }
294 return rc;
295 }
296
297 /*
298 * Context: softirq or process
299 */
300 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
301 {
302 const struct oz_app_if *ai;
303
304 oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
305 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
306 if (apps & (1<<ai->app_id)) {
307 oz_polling_lock_bh();
308 if (pause) {
309 pd->paused_apps |= (1<<ai->app_id);
310 } else {
311 pd->total_apps &= ~(1<<ai->app_id);
312 pd->paused_apps &= ~(1<<ai->app_id);
313 }
314 oz_polling_unlock_bh();
315 ai->stop(pd, pause);
316 }
317 }
318 }
319
320 /*
321 * Context: softirq
322 */
323 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
324 {
325 const struct oz_app_if *ai;
326 int more = 0;
327
328 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
329 if (ai->heartbeat && (apps & (1<<ai->app_id))) {
330 if (ai->heartbeat(pd))
331 more = 1;
332 }
333 }
334 if ((!more) && (hrtimer_active(&pd->heartbeat)))
335 hrtimer_cancel(&pd->heartbeat);
336 if (pd->mode & OZ_F_ISOC_ANYTIME) {
337 int count = 8;
338 while (count-- && (oz_send_isoc_frame(pd) >= 0))
339 ;
340 }
341 }
342
343 /*
344 * Context: softirq or process
345 */
346 void oz_pd_stop(struct oz_pd *pd)
347 {
348 u16 stop_apps;
349
350 oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
351 oz_pd_indicate_farewells(pd);
352 oz_polling_lock_bh();
353 stop_apps = pd->total_apps;
354 pd->total_apps = 0;
355 pd->paused_apps = 0;
356 oz_polling_unlock_bh();
357 oz_services_stop(pd, stop_apps, 0);
358 oz_polling_lock_bh();
359 oz_pd_set_state(pd, OZ_PD_S_STOPPED);
360 /* Remove from PD list.*/
361 list_del(&pd->link);
362 oz_polling_unlock_bh();
363 oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
364 oz_pd_put(pd);
365 }
366
367 /*
368 * Context: softirq
369 */
370 int oz_pd_sleep(struct oz_pd *pd)
371 {
372 int do_stop = 0;
373 u16 stop_apps;
374
375 oz_polling_lock_bh();
376 if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
377 oz_polling_unlock_bh();
378 return 0;
379 }
380 if (pd->keep_alive && pd->session_id)
381 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
382 else
383 do_stop = 1;
384
385 stop_apps = pd->total_apps;
386 oz_polling_unlock_bh();
387 if (do_stop) {
388 oz_pd_stop(pd);
389 } else {
390 oz_services_stop(pd, stop_apps, 1);
391 oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
392 }
393 return do_stop;
394 }
395
396 /*
397 * Context: softirq
398 */
399 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
400 {
401 struct oz_tx_frame *f = NULL;
402
403 spin_lock_bh(&pd->tx_frame_lock);
404 if (pd->tx_pool) {
405 f = container_of(pd->tx_pool, struct oz_tx_frame, link);
406 pd->tx_pool = pd->tx_pool->next;
407 pd->tx_pool_count--;
408 }
409 spin_unlock_bh(&pd->tx_frame_lock);
410 if (f == NULL)
411 f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
412 if (f) {
413 f->total_size = sizeof(struct oz_hdr);
414 INIT_LIST_HEAD(&f->link);
415 INIT_LIST_HEAD(&f->elt_list);
416 }
417 return f;
418 }
419
420 /*
421 * Context: softirq or process
422 */
423 static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
424 {
425 pd->nb_queued_isoc_frames--;
426 list_del_init(&f->link);
427 if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
428 f->link.next = pd->tx_pool;
429 pd->tx_pool = &f->link;
430 pd->tx_pool_count++;
431 } else {
432 kfree(f);
433 }
434 oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
435 pd->nb_queued_isoc_frames);
436 }
437
438 /*
439 * Context: softirq or process
440 */
441 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
442 {
443 spin_lock_bh(&pd->tx_frame_lock);
444 if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
445 f->link.next = pd->tx_pool;
446 pd->tx_pool = &f->link;
447 pd->tx_pool_count++;
448 f = NULL;
449 }
450 spin_unlock_bh(&pd->tx_frame_lock);
451 kfree(f);
452 }
453
454 /*
455 * Context: softirq-serialized
456 */
457 static void oz_set_more_bit(struct sk_buff *skb)
458 {
459 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
460
461 oz_hdr->control |= OZ_F_MORE_DATA;
462 }
463
464 /*
465 * Context: softirq-serialized
466 */
467 static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
468 {
469 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
470
471 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
472 }
473
474 /*
475 * Context: softirq
476 */
477 int oz_prepare_frame(struct oz_pd *pd, int empty)
478 {
479 struct oz_tx_frame *f;
480
481 if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
482 return -1;
483 if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
484 return -1;
485 if (!empty && !oz_are_elts_available(&pd->elt_buff))
486 return -1;
487 f = oz_tx_frame_alloc(pd);
488 if (f == NULL)
489 return -1;
490 f->skb = NULL;
491 f->hdr.control =
492 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
493 ++pd->last_tx_pkt_num;
494 put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
495 if (empty == 0) {
496 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
497 pd->max_tx_size, &f->elt_list);
498 }
499 spin_lock(&pd->tx_frame_lock);
500 list_add_tail(&f->link, &pd->tx_queue);
501 pd->nb_queued_frames++;
502 spin_unlock(&pd->tx_frame_lock);
503 return 0;
504 }
505
506 /*
507 * Context: softirq-serialized
508 */
509 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
510 {
511 struct sk_buff *skb;
512 struct net_device *dev = pd->net_dev;
513 struct oz_hdr *oz_hdr;
514 struct oz_elt *elt;
515 struct list_head *e;
516
517 /* Allocate skb with enough space for the lower layers as well
518 * as the space we need.
519 */
520 skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
521 if (skb == NULL)
522 return NULL;
523 /* Reserve the head room for lower layers.
524 */
525 skb_reserve(skb, LL_RESERVED_SPACE(dev));
526 skb_reset_network_header(skb);
527 skb->dev = dev;
528 skb->protocol = htons(OZ_ETHERTYPE);
529 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
530 dev->dev_addr, skb->len) < 0)
531 goto fail;
532 /* Push the tail to the end of the area we are going to copy to.
533 */
534 oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
535 f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
536 memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
537 /* Copy the elements into the frame body.
538 */
539 elt = (struct oz_elt *)(oz_hdr+1);
540 for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
541 struct oz_elt_info *ei;
542 ei = container_of(e, struct oz_elt_info, link);
543 memcpy(elt, ei->data, ei->length);
544 elt = oz_next_elt(elt);
545 }
546 return skb;
547 fail:
548 kfree_skb(skb);
549 return NULL;
550 }
551
552 /*
553 * Context: softirq or process
554 */
555 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
556 {
557 struct list_head *e;
558 struct oz_elt_info *ei;
559
560 e = f->elt_list.next;
561 while (e != &f->elt_list) {
562 ei = container_of(e, struct oz_elt_info, link);
563 e = e->next;
564 list_del_init(&ei->link);
565 if (ei->callback)
566 ei->callback(pd, ei->context);
567 spin_lock_bh(&pd->elt_buff.lock);
568 oz_elt_info_free(&pd->elt_buff, ei);
569 spin_unlock_bh(&pd->elt_buff.lock);
570 }
571 oz_tx_frame_free(pd, f);
572 if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
573 oz_trim_elt_pool(&pd->elt_buff);
574 }
575
576 /*
577 * Context: softirq-serialized
578 */
579 static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
580 {
581 struct sk_buff *skb;
582 struct oz_tx_frame *f;
583 struct list_head *e;
584
585 spin_lock(&pd->tx_frame_lock);
586 e = pd->last_sent_frame->next;
587 if (e == &pd->tx_queue) {
588 spin_unlock(&pd->tx_frame_lock);
589 return -1;
590 }
591 f = container_of(e, struct oz_tx_frame, link);
592
593 if (f->skb != NULL) {
594 skb = f->skb;
595 oz_tx_isoc_free(pd, f);
596 spin_unlock(&pd->tx_frame_lock);
597 if (more_data)
598 oz_set_more_bit(skb);
599 oz_set_last_pkt_nb(pd, skb);
600 if ((int)atomic_read(&g_submitted_isoc) <
601 OZ_MAX_SUBMITTED_ISOC) {
602 if (dev_queue_xmit(skb) < 0) {
603 oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
604 return -1;
605 }
606 atomic_inc(&g_submitted_isoc);
607 oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
608 pd->nb_queued_isoc_frames);
609 return 0;
610 } else {
611 kfree_skb(skb);
612 oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
613 return -1;
614 }
615 }
616
617 pd->last_sent_frame = e;
618 skb = oz_build_frame(pd, f);
619 spin_unlock(&pd->tx_frame_lock);
620 if (!skb)
621 return -1;
622 if (more_data)
623 oz_set_more_bit(skb);
624 oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
625 if (dev_queue_xmit(skb) < 0)
626 return -1;
627
628 return 0;
629 }
630
631 /*
632 * Context: softirq-serialized
633 */
634 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
635 {
636 while (oz_prepare_frame(pd, 0) >= 0)
637 backlog++;
638
639 switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
640
641 case OZ_F_ISOC_NO_ELTS: {
642 backlog += pd->nb_queued_isoc_frames;
643 if (backlog <= 0)
644 goto out;
645 if (backlog > OZ_MAX_SUBMITTED_ISOC)
646 backlog = OZ_MAX_SUBMITTED_ISOC;
647 break;
648 }
649 case OZ_NO_ELTS_ANYTIME: {
650 if ((backlog <= 0) && (pd->isoc_sent == 0))
651 goto out;
652 break;
653 }
654 default: {
655 if (backlog <= 0)
656 goto out;
657 break;
658 }
659 }
660 while (backlog--) {
661 if (oz_send_next_queued_frame(pd, backlog) < 0)
662 break;
663 }
664 return;
665
666 out: oz_prepare_frame(pd, 1);
667 oz_send_next_queued_frame(pd, 0);
668 }
669
670 /*
671 * Context: softirq
672 */
673 static int oz_send_isoc_frame(struct oz_pd *pd)
674 {
675 struct sk_buff *skb;
676 struct net_device *dev = pd->net_dev;
677 struct oz_hdr *oz_hdr;
678 struct oz_elt *elt;
679 struct list_head *e;
680 struct list_head list;
681 int total_size = sizeof(struct oz_hdr);
682
683 INIT_LIST_HEAD(&list);
684
685 oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
686 pd->max_tx_size, &list);
687 if (list.next == &list)
688 return 0;
689 skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
690 if (skb == NULL) {
691 oz_dbg(ON, "Cannot alloc skb\n");
692 oz_elt_info_free_chain(&pd->elt_buff, &list);
693 return -1;
694 }
695 skb_reserve(skb, LL_RESERVED_SPACE(dev));
696 skb_reset_network_header(skb);
697 skb->dev = dev;
698 skb->protocol = htons(OZ_ETHERTYPE);
699 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
700 dev->dev_addr, skb->len) < 0) {
701 kfree_skb(skb);
702 return -1;
703 }
704 oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
705 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
706 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
707 elt = (struct oz_elt *)(oz_hdr+1);
708
709 for (e = list.next; e != &list; e = e->next) {
710 struct oz_elt_info *ei;
711 ei = container_of(e, struct oz_elt_info, link);
712 memcpy(elt, ei->data, ei->length);
713 elt = oz_next_elt(elt);
714 }
715 dev_queue_xmit(skb);
716 oz_elt_info_free_chain(&pd->elt_buff, &list);
717 return 0;
718 }
719
720 /*
721 * Context: softirq-serialized
722 */
723 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
724 {
725 struct list_head *e;
726 struct oz_tx_frame *f;
727 struct list_head *first = NULL;
728 struct list_head *last = NULL;
729 u8 diff;
730 u32 pkt_num;
731
732 spin_lock(&pd->tx_frame_lock);
733 e = pd->tx_queue.next;
734 while (e != &pd->tx_queue) {
735 f = container_of(e, struct oz_tx_frame, link);
736 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
737 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
738 if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
739 break;
740 oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
741 pkt_num, pd->nb_queued_frames);
742 if (first == NULL)
743 first = e;
744 last = e;
745 e = e->next;
746 pd->nb_queued_frames--;
747 }
748 if (first) {
749 last->next->prev = &pd->tx_queue;
750 pd->tx_queue.next = last->next;
751 last->next = NULL;
752 }
753 pd->last_sent_frame = &pd->tx_queue;
754 spin_unlock(&pd->tx_frame_lock);
755 while (first) {
756 f = container_of(first, struct oz_tx_frame, link);
757 first = first->next;
758 oz_retire_frame(pd, f);
759 }
760 }
761
762 /*
763 * Precondition: stream_lock must be held.
764 * Context: softirq
765 */
766 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
767 {
768 struct list_head *e;
769 struct oz_isoc_stream *st;
770
771 list_for_each(e, &pd->stream_list) {
772 st = container_of(e, struct oz_isoc_stream, link);
773 if (st->ep_num == ep_num)
774 return st;
775 }
776 return NULL;
777 }
778
779 /*
780 * Context: softirq
781 */
782 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
783 {
784 struct oz_isoc_stream *st =
785 kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
786 if (!st)
787 return -ENOMEM;
788 st->ep_num = ep_num;
789 spin_lock_bh(&pd->stream_lock);
790 if (!pd_stream_find(pd, ep_num)) {
791 list_add(&st->link, &pd->stream_list);
792 st = NULL;
793 }
794 spin_unlock_bh(&pd->stream_lock);
795 kfree(st);
796 return 0;
797 }
798
799 /*
800 * Context: softirq or process
801 */
802 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
803 {
804 kfree_skb(st->skb);
805 kfree(st);
806 }
807
808 /*
809 * Context: softirq
810 */
811 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
812 {
813 struct oz_isoc_stream *st;
814
815 spin_lock_bh(&pd->stream_lock);
816 st = pd_stream_find(pd, ep_num);
817 if (st)
818 list_del(&st->link);
819 spin_unlock_bh(&pd->stream_lock);
820 if (st)
821 oz_isoc_stream_free(st);
822 return 0;
823 }
824
825 /*
826 * Context: any
827 */
828 static void oz_isoc_destructor(struct sk_buff *skb)
829 {
830 atomic_dec(&g_submitted_isoc);
831 }
832
833 /*
834 * Context: softirq
835 */
836 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
837 {
838 struct net_device *dev = pd->net_dev;
839 struct oz_isoc_stream *st;
840 u8 nb_units = 0;
841 struct sk_buff *skb = NULL;
842 struct oz_hdr *oz_hdr = NULL;
843 int size = 0;
844
845 spin_lock_bh(&pd->stream_lock);
846 st = pd_stream_find(pd, ep_num);
847 if (st) {
848 skb = st->skb;
849 st->skb = NULL;
850 nb_units = st->nb_units;
851 st->nb_units = 0;
852 oz_hdr = st->oz_hdr;
853 size = st->size;
854 }
855 spin_unlock_bh(&pd->stream_lock);
856 if (!st)
857 return 0;
858 if (!skb) {
859 /* Allocate enough space for max size frame. */
860 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
861 GFP_ATOMIC);
862 if (skb == NULL)
863 return 0;
864 /* Reserve the head room for lower layers. */
865 skb_reserve(skb, LL_RESERVED_SPACE(dev));
866 skb_reset_network_header(skb);
867 skb->dev = dev;
868 skb->protocol = htons(OZ_ETHERTYPE);
869 /* For audio packet set priority to AC_VO */
870 skb->priority = 0x7;
871 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
872 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
873 }
874 memcpy(skb_put(skb, len), data, len);
875 size += len;
876 if (++nb_units < pd->ms_per_isoc) {
877 spin_lock_bh(&pd->stream_lock);
878 st->skb = skb;
879 st->nb_units = nb_units;
880 st->oz_hdr = oz_hdr;
881 st->size = size;
882 spin_unlock_bh(&pd->stream_lock);
883 } else {
884 struct oz_hdr oz;
885 struct oz_isoc_large iso;
886 spin_lock_bh(&pd->stream_lock);
887 iso.frame_number = st->frame_num;
888 st->frame_num += nb_units;
889 spin_unlock_bh(&pd->stream_lock);
890 oz.control =
891 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
892 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
893 oz.pkt_num = 0;
894 iso.endpoint = ep_num;
895 iso.format = OZ_DATA_F_ISOC_LARGE;
896 iso.ms_data = nb_units;
897 memcpy(oz_hdr, &oz, sizeof(oz));
898 memcpy(oz_hdr+1, &iso, sizeof(iso));
899 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
900 dev->dev_addr, skb->len) < 0)
901 goto out;
902
903 skb->destructor = oz_isoc_destructor;
904 /*Queue for Xmit if mode is not ANYTIME*/
905 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
906 struct oz_tx_frame *isoc_unit = NULL;
907 int nb = pd->nb_queued_isoc_frames;
908 if (nb >= pd->isoc_latency) {
909 struct list_head *e;
910 struct oz_tx_frame *f;
911 oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
912 nb);
913 spin_lock(&pd->tx_frame_lock);
914 list_for_each(e, &pd->tx_queue) {
915 f = container_of(e, struct oz_tx_frame,
916 link);
917 if (f->skb != NULL) {
918 oz_tx_isoc_free(pd, f);
919 break;
920 }
921 }
922 spin_unlock(&pd->tx_frame_lock);
923 }
924 isoc_unit = oz_tx_frame_alloc(pd);
925 if (isoc_unit == NULL)
926 goto out;
927 isoc_unit->hdr = oz;
928 isoc_unit->skb = skb;
929 spin_lock_bh(&pd->tx_frame_lock);
930 list_add_tail(&isoc_unit->link, &pd->tx_queue);
931 pd->nb_queued_isoc_frames++;
932 spin_unlock_bh(&pd->tx_frame_lock);
933 oz_dbg(TX_FRAMES,
934 "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
935 pd->nb_queued_isoc_frames, pd->nb_queued_frames);
936 return 0;
937 }
938
939 /*In ANYTIME mode Xmit unit immediately*/
940 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
941 atomic_inc(&g_submitted_isoc);
942 if (dev_queue_xmit(skb) < 0)
943 return -1;
944 else
945 return 0;
946 }
947
948 out: kfree_skb(skb);
949 return -1;
950
951 }
952 return 0;
953 }
954
955 /*
956 * Context: process
957 */
958 void oz_apps_init(void)
959 {
960 int i;
961
962 for (i = 0; i < OZ_APPID_MAX; i++)
963 if (g_app_if[i].init)
964 g_app_if[i].init();
965 }
966
967 /*
968 * Context: process
969 */
970 void oz_apps_term(void)
971 {
972 int i;
973
974 /* Terminate all the apps. */
975 for (i = 0; i < OZ_APPID_MAX; i++)
976 if (g_app_if[i].term)
977 g_app_if[i].term();
978 }
979
980 /*
981 * Context: softirq-serialized
982 */
983 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
984 {
985 const struct oz_app_if *ai;
986
987 if (app_id == 0 || app_id > OZ_APPID_MAX)
988 return;
989 ai = &g_app_if[app_id-1];
990 ai->rx(pd, elt);
991 }
992
993 /*
994 * Context: softirq or process
995 */
996 void oz_pd_indicate_farewells(struct oz_pd *pd)
997 {
998 struct oz_farewell *f;
999 const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
1000
1001 while (1) {
1002 oz_polling_lock_bh();
1003 if (list_empty(&pd->farewell_list)) {
1004 oz_polling_unlock_bh();
1005 break;
1006 }
1007 f = list_first_entry(&pd->farewell_list,
1008 struct oz_farewell, link);
1009 list_del(&f->link);
1010 oz_polling_unlock_bh();
1011 if (ai->farewell)
1012 ai->farewell(pd, f->ep_num, f->report, f->len);
1013 kfree(f);
1014 }
1015 }
This page took 0.054818 seconds and 5 git commands to generate.