1 /* -----------------------------------------------------------------------------
2 * Copyright (c) 2011 Ozmo Inc
3 * Released under the GNU General Public License Version 2 (GPLv2).
4 * -----------------------------------------------------------------------------
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
13 #include "ozprotocol.h"
19 #include <asm/unaligned.h>
20 #include <linux/uaccess.h>
21 #include <net/psnap.h>
23 #define OZ_MAX_TX_POOL_SIZE 6
25 static struct oz_tx_frame
*oz_tx_frame_alloc(struct oz_pd
*pd
);
26 static void oz_tx_frame_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
27 static void oz_tx_isoc_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
28 static struct sk_buff
*oz_build_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
29 static int oz_send_isoc_frame(struct oz_pd
*pd
);
30 static void oz_retire_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
);
31 static void oz_isoc_stream_free(struct oz_isoc_stream
*st
);
32 static int oz_send_next_queued_frame(struct oz_pd
*pd
, int more_data
);
33 static void oz_isoc_destructor(struct sk_buff
*skb
);
34 static int oz_def_app_init(void);
35 static void oz_def_app_term(void);
36 static int oz_def_app_start(struct oz_pd
*pd
, int resume
);
37 static void oz_def_app_stop(struct oz_pd
*pd
, int pause
);
38 static void oz_def_app_rx(struct oz_pd
*pd
, struct oz_elt
*elt
);
41 * Counts the uncompleted isoc frames submitted to netcard.
43 static atomic_t g_submitted_isoc
= ATOMIC_INIT(0);
45 /* Application handler functions.
47 static const struct oz_app_if g_app_if
[OZ_APPID_MAX
] = {
88 static int oz_def_app_init(void)
96 static void oz_def_app_term(void)
103 static int oz_def_app_start(struct oz_pd
*pd
, int resume
)
111 static void oz_def_app_stop(struct oz_pd
*pd
, int pause
)
118 static void oz_def_app_rx(struct oz_pd
*pd
, struct oz_elt
*elt
)
123 * Context: softirq or process
125 void oz_pd_set_state(struct oz_pd
*pd
, unsigned state
)
130 oz_pd_dbg(pd
, ON
, "PD State: OZ_PD_S_IDLE\n");
132 case OZ_PD_S_CONNECTED
:
133 oz_pd_dbg(pd
, ON
, "PD State: OZ_PD_S_CONNECTED\n");
135 case OZ_PD_S_STOPPED
:
136 oz_pd_dbg(pd
, ON
, "PD State: OZ_PD_S_STOPPED\n");
139 oz_pd_dbg(pd
, ON
, "PD State: OZ_PD_S_SLEEP\n");
145 * Context: softirq or process
147 void oz_pd_get(struct oz_pd
*pd
)
149 atomic_inc(&pd
->ref_count
);
153 * Context: softirq or process
155 void oz_pd_put(struct oz_pd
*pd
)
157 if (atomic_dec_and_test(&pd
->ref_count
))
162 * Context: softirq-serialized
164 struct oz_pd
*oz_pd_alloc(const u8
*mac_addr
)
166 struct oz_pd
*pd
= kzalloc(sizeof(struct oz_pd
), GFP_ATOMIC
);
170 atomic_set(&pd
->ref_count
, 2);
171 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
172 spin_lock_init(&pd
->app_lock
[i
]);
173 pd
->last_rx_pkt_num
= 0xffffffff;
174 oz_pd_set_state(pd
, OZ_PD_S_IDLE
);
175 pd
->max_tx_size
= OZ_MAX_TX_SIZE
;
176 memcpy(pd
->mac_addr
, mac_addr
, ETH_ALEN
);
177 if (0 != oz_elt_buf_init(&pd
->elt_buff
)) {
181 spin_lock_init(&pd
->tx_frame_lock
);
182 INIT_LIST_HEAD(&pd
->tx_queue
);
183 INIT_LIST_HEAD(&pd
->farewell_list
);
184 pd
->last_sent_frame
= &pd
->tx_queue
;
185 spin_lock_init(&pd
->stream_lock
);
186 INIT_LIST_HEAD(&pd
->stream_list
);
187 tasklet_init(&pd
->heartbeat_tasklet
, oz_pd_heartbeat_handler
,
189 tasklet_init(&pd
->timeout_tasklet
, oz_pd_timeout_handler
,
191 hrtimer_init(&pd
->heartbeat
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
192 hrtimer_init(&pd
->timeout
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
193 pd
->heartbeat
.function
= oz_pd_heartbeat_event
;
194 pd
->timeout
.function
= oz_pd_timeout_event
;
200 * Context: softirq or process
202 static void oz_pd_free(struct work_struct
*work
)
205 struct oz_tx_frame
*f
;
206 struct oz_isoc_stream
*st
;
207 struct oz_farewell
*fwell
;
210 oz_pd_dbg(pd
, ON
, "Destroying PD\n");
211 pd
= container_of(work
, struct oz_pd
, workitem
);
212 /*Disable timer tasklets*/
213 tasklet_kill(&pd
->heartbeat_tasklet
);
214 tasklet_kill(&pd
->timeout_tasklet
);
215 /* Delete any streams.
217 e
= pd
->stream_list
.next
;
218 while (e
!= &pd
->stream_list
) {
219 st
= container_of(e
, struct oz_isoc_stream
, link
);
221 oz_isoc_stream_free(st
);
223 /* Free any queued tx frames.
225 e
= pd
->tx_queue
.next
;
226 while (e
!= &pd
->tx_queue
) {
227 f
= container_of(e
, struct oz_tx_frame
, link
);
231 oz_retire_frame(pd
, f
);
233 oz_elt_buf_term(&pd
->elt_buff
);
234 /* Free any farewells.
236 e
= pd
->farewell_list
.next
;
237 while (e
!= &pd
->farewell_list
) {
238 fwell
= container_of(e
, struct oz_farewell
, link
);
242 /* Deallocate all frames in tx pool.
244 while (pd
->tx_pool
) {
246 pd
->tx_pool
= e
->next
;
247 kfree(container_of(e
, struct oz_tx_frame
, link
));
250 dev_put(pd
->net_dev
);
255 * Context: softirq or Process
257 void oz_pd_destroy(struct oz_pd
*pd
)
259 if (hrtimer_active(&pd
->timeout
))
260 hrtimer_cancel(&pd
->timeout
);
261 if (hrtimer_active(&pd
->heartbeat
))
262 hrtimer_cancel(&pd
->heartbeat
);
264 INIT_WORK(&pd
->workitem
, oz_pd_free
);
265 if (!schedule_work(&pd
->workitem
))
266 oz_pd_dbg(pd
, ON
, "failed to schedule workitem\n");
270 * Context: softirq-serialized
272 int oz_services_start(struct oz_pd
*pd
, u16 apps
, int resume
)
274 const struct oz_app_if
*ai
;
277 oz_pd_dbg(pd
, ON
, "%s: (0x%x) resume(%d)\n", __func__
, apps
, resume
);
278 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
279 if (apps
& (1<<ai
->app_id
)) {
280 if (ai
->start(pd
, resume
)) {
283 "Unable to start service %d\n",
287 oz_polling_lock_bh();
288 pd
->total_apps
|= (1<<ai
->app_id
);
290 pd
->paused_apps
&= ~(1<<ai
->app_id
);
291 oz_polling_unlock_bh();
298 * Context: softirq or process
300 void oz_services_stop(struct oz_pd
*pd
, u16 apps
, int pause
)
302 const struct oz_app_if
*ai
;
304 oz_pd_dbg(pd
, ON
, "%s: (0x%x) pause(%d)\n", __func__
, apps
, pause
);
305 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
306 if (apps
& (1<<ai
->app_id
)) {
307 oz_polling_lock_bh();
309 pd
->paused_apps
|= (1<<ai
->app_id
);
311 pd
->total_apps
&= ~(1<<ai
->app_id
);
312 pd
->paused_apps
&= ~(1<<ai
->app_id
);
314 oz_polling_unlock_bh();
323 void oz_pd_heartbeat(struct oz_pd
*pd
, u16 apps
)
325 const struct oz_app_if
*ai
;
328 for (ai
= g_app_if
; ai
< &g_app_if
[OZ_APPID_MAX
]; ai
++) {
329 if (ai
->heartbeat
&& (apps
& (1<<ai
->app_id
))) {
330 if (ai
->heartbeat(pd
))
334 if ((!more
) && (hrtimer_active(&pd
->heartbeat
)))
335 hrtimer_cancel(&pd
->heartbeat
);
336 if (pd
->mode
& OZ_F_ISOC_ANYTIME
) {
338 while (count
-- && (oz_send_isoc_frame(pd
) >= 0))
344 * Context: softirq or process
346 void oz_pd_stop(struct oz_pd
*pd
)
350 oz_dbg(ON
, "oz_pd_stop() State = 0x%x\n", pd
->state
);
351 oz_pd_indicate_farewells(pd
);
352 oz_polling_lock_bh();
353 stop_apps
= pd
->total_apps
;
356 oz_polling_unlock_bh();
357 oz_services_stop(pd
, stop_apps
, 0);
358 oz_polling_lock_bh();
359 oz_pd_set_state(pd
, OZ_PD_S_STOPPED
);
360 /* Remove from PD list.*/
362 oz_polling_unlock_bh();
363 oz_dbg(ON
, "pd ref count = %d\n", atomic_read(&pd
->ref_count
));
370 int oz_pd_sleep(struct oz_pd
*pd
)
375 oz_polling_lock_bh();
376 if (pd
->state
& (OZ_PD_S_SLEEP
| OZ_PD_S_STOPPED
)) {
377 oz_polling_unlock_bh();
380 if (pd
->keep_alive
&& pd
->session_id
)
381 oz_pd_set_state(pd
, OZ_PD_S_SLEEP
);
385 stop_apps
= pd
->total_apps
;
386 oz_polling_unlock_bh();
390 oz_services_stop(pd
, stop_apps
, 1);
391 oz_timer_add(pd
, OZ_TIMER_STOP
, pd
->keep_alive
);
399 static struct oz_tx_frame
*oz_tx_frame_alloc(struct oz_pd
*pd
)
401 struct oz_tx_frame
*f
= NULL
;
403 spin_lock_bh(&pd
->tx_frame_lock
);
405 f
= container_of(pd
->tx_pool
, struct oz_tx_frame
, link
);
406 pd
->tx_pool
= pd
->tx_pool
->next
;
409 spin_unlock_bh(&pd
->tx_frame_lock
);
411 f
= kmalloc(sizeof(struct oz_tx_frame
), GFP_ATOMIC
);
413 f
->total_size
= sizeof(struct oz_hdr
);
414 INIT_LIST_HEAD(&f
->link
);
415 INIT_LIST_HEAD(&f
->elt_list
);
421 * Context: softirq or process
423 static void oz_tx_isoc_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
425 pd
->nb_queued_isoc_frames
--;
426 list_del_init(&f
->link
);
427 if (pd
->tx_pool_count
< OZ_MAX_TX_POOL_SIZE
) {
428 f
->link
.next
= pd
->tx_pool
;
429 pd
->tx_pool
= &f
->link
;
434 oz_dbg(TX_FRAMES
, "Releasing ISOC Frame isoc_nb= %d\n",
435 pd
->nb_queued_isoc_frames
);
439 * Context: softirq or process
441 static void oz_tx_frame_free(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
443 spin_lock_bh(&pd
->tx_frame_lock
);
444 if (pd
->tx_pool_count
< OZ_MAX_TX_POOL_SIZE
) {
445 f
->link
.next
= pd
->tx_pool
;
446 pd
->tx_pool
= &f
->link
;
450 spin_unlock_bh(&pd
->tx_frame_lock
);
455 * Context: softirq-serialized
457 static void oz_set_more_bit(struct sk_buff
*skb
)
459 struct oz_hdr
*oz_hdr
= (struct oz_hdr
*)skb_network_header(skb
);
461 oz_hdr
->control
|= OZ_F_MORE_DATA
;
465 * Context: softirq-serialized
467 static void oz_set_last_pkt_nb(struct oz_pd
*pd
, struct sk_buff
*skb
)
469 struct oz_hdr
*oz_hdr
= (struct oz_hdr
*)skb_network_header(skb
);
471 oz_hdr
->last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
477 int oz_prepare_frame(struct oz_pd
*pd
, int empty
)
479 struct oz_tx_frame
*f
;
481 if ((pd
->mode
& OZ_MODE_MASK
) != OZ_MODE_TRIGGERED
)
483 if (pd
->nb_queued_frames
>= OZ_MAX_QUEUED_FRAMES
)
485 if (!empty
&& !oz_are_elts_available(&pd
->elt_buff
))
487 f
= oz_tx_frame_alloc(pd
);
492 (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ACK_REQUESTED
;
493 ++pd
->last_tx_pkt_num
;
494 put_unaligned(cpu_to_le32(pd
->last_tx_pkt_num
), &f
->hdr
.pkt_num
);
496 oz_select_elts_for_tx(&pd
->elt_buff
, 0, &f
->total_size
,
497 pd
->max_tx_size
, &f
->elt_list
);
499 spin_lock(&pd
->tx_frame_lock
);
500 list_add_tail(&f
->link
, &pd
->tx_queue
);
501 pd
->nb_queued_frames
++;
502 spin_unlock(&pd
->tx_frame_lock
);
507 * Context: softirq-serialized
509 static struct sk_buff
*oz_build_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
512 struct net_device
*dev
= pd
->net_dev
;
513 struct oz_hdr
*oz_hdr
;
517 /* Allocate skb with enough space for the lower layers as well
518 * as the space we need.
520 skb
= alloc_skb(f
->total_size
+ OZ_ALLOCATED_SPACE(dev
), GFP_ATOMIC
);
523 /* Reserve the head room for lower layers.
525 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
526 skb_reset_network_header(skb
);
528 skb
->protocol
= htons(OZ_ETHERTYPE
);
529 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
530 dev
->dev_addr
, skb
->len
) < 0)
532 /* Push the tail to the end of the area we are going to copy to.
534 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, f
->total_size
);
535 f
->hdr
.last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
536 memcpy(oz_hdr
, &f
->hdr
, sizeof(struct oz_hdr
));
537 /* Copy the elements into the frame body.
539 elt
= (struct oz_elt
*)(oz_hdr
+1);
540 for (e
= f
->elt_list
.next
; e
!= &f
->elt_list
; e
= e
->next
) {
541 struct oz_elt_info
*ei
;
542 ei
= container_of(e
, struct oz_elt_info
, link
);
543 memcpy(elt
, ei
->data
, ei
->length
);
544 elt
= oz_next_elt(elt
);
553 * Context: softirq or process
555 static void oz_retire_frame(struct oz_pd
*pd
, struct oz_tx_frame
*f
)
558 struct oz_elt_info
*ei
;
560 e
= f
->elt_list
.next
;
561 while (e
!= &f
->elt_list
) {
562 ei
= container_of(e
, struct oz_elt_info
, link
);
564 list_del_init(&ei
->link
);
566 ei
->callback(pd
, ei
->context
);
567 spin_lock_bh(&pd
->elt_buff
.lock
);
568 oz_elt_info_free(&pd
->elt_buff
, ei
);
569 spin_unlock_bh(&pd
->elt_buff
.lock
);
571 oz_tx_frame_free(pd
, f
);
572 if (pd
->elt_buff
.free_elts
> pd
->elt_buff
.max_free_elts
)
573 oz_trim_elt_pool(&pd
->elt_buff
);
577 * Context: softirq-serialized
579 static int oz_send_next_queued_frame(struct oz_pd
*pd
, int more_data
)
582 struct oz_tx_frame
*f
;
585 spin_lock(&pd
->tx_frame_lock
);
586 e
= pd
->last_sent_frame
->next
;
587 if (e
== &pd
->tx_queue
) {
588 spin_unlock(&pd
->tx_frame_lock
);
591 f
= container_of(e
, struct oz_tx_frame
, link
);
593 if (f
->skb
!= NULL
) {
595 oz_tx_isoc_free(pd
, f
);
596 spin_unlock(&pd
->tx_frame_lock
);
598 oz_set_more_bit(skb
);
599 oz_set_last_pkt_nb(pd
, skb
);
600 if ((int)atomic_read(&g_submitted_isoc
) <
601 OZ_MAX_SUBMITTED_ISOC
) {
602 if (dev_queue_xmit(skb
) < 0) {
603 oz_dbg(TX_FRAMES
, "Dropping ISOC Frame\n");
606 atomic_inc(&g_submitted_isoc
);
607 oz_dbg(TX_FRAMES
, "Sending ISOC Frame, nb_isoc= %d\n",
608 pd
->nb_queued_isoc_frames
);
612 oz_dbg(TX_FRAMES
, "Dropping ISOC Frame>\n");
617 pd
->last_sent_frame
= e
;
618 skb
= oz_build_frame(pd
, f
);
619 spin_unlock(&pd
->tx_frame_lock
);
623 oz_set_more_bit(skb
);
624 oz_dbg(TX_FRAMES
, "TX frame PN=0x%x\n", f
->hdr
.pkt_num
);
625 if (dev_queue_xmit(skb
) < 0)
632 * Context: softirq-serialized
634 void oz_send_queued_frames(struct oz_pd
*pd
, int backlog
)
636 while (oz_prepare_frame(pd
, 0) >= 0)
639 switch (pd
->mode
& (OZ_F_ISOC_NO_ELTS
| OZ_F_ISOC_ANYTIME
)) {
641 case OZ_F_ISOC_NO_ELTS
: {
642 backlog
+= pd
->nb_queued_isoc_frames
;
645 if (backlog
> OZ_MAX_SUBMITTED_ISOC
)
646 backlog
= OZ_MAX_SUBMITTED_ISOC
;
649 case OZ_NO_ELTS_ANYTIME
: {
650 if ((backlog
<= 0) && (pd
->isoc_sent
== 0))
661 if (oz_send_next_queued_frame(pd
, backlog
) < 0)
666 out
: oz_prepare_frame(pd
, 1);
667 oz_send_next_queued_frame(pd
, 0);
673 static int oz_send_isoc_frame(struct oz_pd
*pd
)
676 struct net_device
*dev
= pd
->net_dev
;
677 struct oz_hdr
*oz_hdr
;
680 struct list_head list
;
681 int total_size
= sizeof(struct oz_hdr
);
683 INIT_LIST_HEAD(&list
);
685 oz_select_elts_for_tx(&pd
->elt_buff
, 1, &total_size
,
686 pd
->max_tx_size
, &list
);
687 if (list
.next
== &list
)
689 skb
= alloc_skb(total_size
+ OZ_ALLOCATED_SPACE(dev
), GFP_ATOMIC
);
691 oz_dbg(ON
, "Cannot alloc skb\n");
692 oz_elt_info_free_chain(&pd
->elt_buff
, &list
);
695 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
696 skb_reset_network_header(skb
);
698 skb
->protocol
= htons(OZ_ETHERTYPE
);
699 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
700 dev
->dev_addr
, skb
->len
) < 0) {
704 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, total_size
);
705 oz_hdr
->control
= (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ISOC
;
706 oz_hdr
->last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
707 elt
= (struct oz_elt
*)(oz_hdr
+1);
709 for (e
= list
.next
; e
!= &list
; e
= e
->next
) {
710 struct oz_elt_info
*ei
;
711 ei
= container_of(e
, struct oz_elt_info
, link
);
712 memcpy(elt
, ei
->data
, ei
->length
);
713 elt
= oz_next_elt(elt
);
716 oz_elt_info_free_chain(&pd
->elt_buff
, &list
);
721 * Context: softirq-serialized
723 void oz_retire_tx_frames(struct oz_pd
*pd
, u8 lpn
)
726 struct oz_tx_frame
*f
;
727 struct list_head
*first
= NULL
;
728 struct list_head
*last
= NULL
;
732 spin_lock(&pd
->tx_frame_lock
);
733 e
= pd
->tx_queue
.next
;
734 while (e
!= &pd
->tx_queue
) {
735 f
= container_of(e
, struct oz_tx_frame
, link
);
736 pkt_num
= le32_to_cpu(get_unaligned(&f
->hdr
.pkt_num
));
737 diff
= (lpn
- (pkt_num
& OZ_LAST_PN_MASK
)) & OZ_LAST_PN_MASK
;
738 if ((diff
> OZ_LAST_PN_HALF_CYCLE
) || (pkt_num
== 0))
740 oz_dbg(TX_FRAMES
, "Releasing pkt_num= %u, nb= %d\n",
741 pkt_num
, pd
->nb_queued_frames
);
746 pd
->nb_queued_frames
--;
749 last
->next
->prev
= &pd
->tx_queue
;
750 pd
->tx_queue
.next
= last
->next
;
753 pd
->last_sent_frame
= &pd
->tx_queue
;
754 spin_unlock(&pd
->tx_frame_lock
);
756 f
= container_of(first
, struct oz_tx_frame
, link
);
758 oz_retire_frame(pd
, f
);
763 * Precondition: stream_lock must be held.
766 static struct oz_isoc_stream
*pd_stream_find(struct oz_pd
*pd
, u8 ep_num
)
769 struct oz_isoc_stream
*st
;
771 list_for_each(e
, &pd
->stream_list
) {
772 st
= container_of(e
, struct oz_isoc_stream
, link
);
773 if (st
->ep_num
== ep_num
)
782 int oz_isoc_stream_create(struct oz_pd
*pd
, u8 ep_num
)
784 struct oz_isoc_stream
*st
=
785 kzalloc(sizeof(struct oz_isoc_stream
), GFP_ATOMIC
);
789 spin_lock_bh(&pd
->stream_lock
);
790 if (!pd_stream_find(pd
, ep_num
)) {
791 list_add(&st
->link
, &pd
->stream_list
);
794 spin_unlock_bh(&pd
->stream_lock
);
800 * Context: softirq or process
802 static void oz_isoc_stream_free(struct oz_isoc_stream
*st
)
811 int oz_isoc_stream_delete(struct oz_pd
*pd
, u8 ep_num
)
813 struct oz_isoc_stream
*st
;
815 spin_lock_bh(&pd
->stream_lock
);
816 st
= pd_stream_find(pd
, ep_num
);
819 spin_unlock_bh(&pd
->stream_lock
);
821 oz_isoc_stream_free(st
);
828 static void oz_isoc_destructor(struct sk_buff
*skb
)
830 atomic_dec(&g_submitted_isoc
);
836 int oz_send_isoc_unit(struct oz_pd
*pd
, u8 ep_num
, const u8
*data
, int len
)
838 struct net_device
*dev
= pd
->net_dev
;
839 struct oz_isoc_stream
*st
;
841 struct sk_buff
*skb
= NULL
;
842 struct oz_hdr
*oz_hdr
= NULL
;
845 spin_lock_bh(&pd
->stream_lock
);
846 st
= pd_stream_find(pd
, ep_num
);
850 nb_units
= st
->nb_units
;
855 spin_unlock_bh(&pd
->stream_lock
);
859 /* Allocate enough space for max size frame. */
860 skb
= alloc_skb(pd
->max_tx_size
+ OZ_ALLOCATED_SPACE(dev
),
864 /* Reserve the head room for lower layers. */
865 skb_reserve(skb
, LL_RESERVED_SPACE(dev
));
866 skb_reset_network_header(skb
);
868 skb
->protocol
= htons(OZ_ETHERTYPE
);
869 /* For audio packet set priority to AC_VO */
871 size
= sizeof(struct oz_hdr
) + sizeof(struct oz_isoc_large
);
872 oz_hdr
= (struct oz_hdr
*)skb_put(skb
, size
);
874 memcpy(skb_put(skb
, len
), data
, len
);
876 if (++nb_units
< pd
->ms_per_isoc
) {
877 spin_lock_bh(&pd
->stream_lock
);
879 st
->nb_units
= nb_units
;
882 spin_unlock_bh(&pd
->stream_lock
);
885 struct oz_isoc_large iso
;
886 spin_lock_bh(&pd
->stream_lock
);
887 iso
.frame_number
= st
->frame_num
;
888 st
->frame_num
+= nb_units
;
889 spin_unlock_bh(&pd
->stream_lock
);
891 (OZ_PROTOCOL_VERSION
<<OZ_VERSION_SHIFT
) | OZ_F_ISOC
;
892 oz
.last_pkt_num
= pd
->trigger_pkt_num
& OZ_LAST_PN_MASK
;
894 iso
.endpoint
= ep_num
;
895 iso
.format
= OZ_DATA_F_ISOC_LARGE
;
896 iso
.ms_data
= nb_units
;
897 memcpy(oz_hdr
, &oz
, sizeof(oz
));
898 memcpy(oz_hdr
+1, &iso
, sizeof(iso
));
899 if (dev_hard_header(skb
, dev
, OZ_ETHERTYPE
, pd
->mac_addr
,
900 dev
->dev_addr
, skb
->len
) < 0)
903 skb
->destructor
= oz_isoc_destructor
;
904 /*Queue for Xmit if mode is not ANYTIME*/
905 if (!(pd
->mode
& OZ_F_ISOC_ANYTIME
)) {
906 struct oz_tx_frame
*isoc_unit
= NULL
;
907 int nb
= pd
->nb_queued_isoc_frames
;
908 if (nb
>= pd
->isoc_latency
) {
910 struct oz_tx_frame
*f
;
911 oz_dbg(TX_FRAMES
, "Dropping ISOC Unit nb= %d\n",
913 spin_lock(&pd
->tx_frame_lock
);
914 list_for_each(e
, &pd
->tx_queue
) {
915 f
= container_of(e
, struct oz_tx_frame
,
917 if (f
->skb
!= NULL
) {
918 oz_tx_isoc_free(pd
, f
);
922 spin_unlock(&pd
->tx_frame_lock
);
924 isoc_unit
= oz_tx_frame_alloc(pd
);
925 if (isoc_unit
== NULL
)
928 isoc_unit
->skb
= skb
;
929 spin_lock_bh(&pd
->tx_frame_lock
);
930 list_add_tail(&isoc_unit
->link
, &pd
->tx_queue
);
931 pd
->nb_queued_isoc_frames
++;
932 spin_unlock_bh(&pd
->tx_frame_lock
);
934 "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
935 pd
->nb_queued_isoc_frames
, pd
->nb_queued_frames
);
939 /*In ANYTIME mode Xmit unit immediately*/
940 if (atomic_read(&g_submitted_isoc
) < OZ_MAX_SUBMITTED_ISOC
) {
941 atomic_inc(&g_submitted_isoc
);
942 if (dev_queue_xmit(skb
) < 0)
958 void oz_apps_init(void)
962 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
963 if (g_app_if
[i
].init
)
970 void oz_apps_term(void)
974 /* Terminate all the apps. */
975 for (i
= 0; i
< OZ_APPID_MAX
; i
++)
976 if (g_app_if
[i
].term
)
981 * Context: softirq-serialized
983 void oz_handle_app_elt(struct oz_pd
*pd
, u8 app_id
, struct oz_elt
*elt
)
985 const struct oz_app_if
*ai
;
987 if (app_id
== 0 || app_id
> OZ_APPID_MAX
)
989 ai
= &g_app_if
[app_id
-1];
994 * Context: softirq or process
996 void oz_pd_indicate_farewells(struct oz_pd
*pd
)
998 struct oz_farewell
*f
;
999 const struct oz_app_if
*ai
= &g_app_if
[OZ_APPID_USB
-1];
1002 oz_polling_lock_bh();
1003 if (list_empty(&pd
->farewell_list
)) {
1004 oz_polling_unlock_bh();
1007 f
= list_first_entry(&pd
->farewell_list
,
1008 struct oz_farewell
, link
);
1010 oz_polling_unlock_bh();
1012 ai
->farewell(pd
, f
->ep_num
, f
->report
, f
->len
);