2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Daniel Martensson / daniel.martensson@stericsson.com
5 * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2.
9 #define pr_fmt(fmt) KBUILD_MODNAME fmt
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/netdevice.h>
15 #include <linux/string.h>
16 #include <linux/list.h>
17 #include <linux/interrupt.h>
18 #include <linux/delay.h>
19 #include <linux/sched.h>
20 #include <linux/if_arp.h>
21 #include <linux/timer.h>
22 #include <net/rtnetlink.h>
23 #include <linux/pkt_sched.h>
24 #include <net/caif/caif_layer.h>
25 #include <net/caif/caif_hsi.h>
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
29 MODULE_DESCRIPTION("CAIF HSI driver");
31 /* Returns the number of padding bytes for alignment. */
32 #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
33 (((pow)-((x)&((pow)-1)))))
35 static const struct cfhsi_config hsi_default_config
= {
37 /* Inactivity timeout on HSI, ms */
38 .inactivity_timeout
= HZ
,
40 /* Aggregation timeout (ms) of zero means no aggregation is done*/
41 .aggregation_timeout
= 1,
44 * HSI link layer flow-control thresholds.
45 * Threshold values for the HSI packet queue. Flow-control will be
46 * asserted when the number of packets exceeds q_high_mark. It will
47 * not be de-asserted before the number of packets drops below
49 * Warning: A high threshold value might increase throughput but it
50 * will at the same time prevent channel prioritization and increase
51 * the risk of flooding the modem. The high threshold should be above
58 * HSI padding options.
59 * Warning: must be a base of 2 (& operation used) and can not be zero !
68 static LIST_HEAD(cfhsi_list
);
70 static void cfhsi_inactivity_tout(unsigned long arg
)
72 struct cfhsi
*cfhsi
= (struct cfhsi
*)arg
;
74 netdev_dbg(cfhsi
->ndev
, "%s.\n",
77 /* Schedule power down work queue. */
78 if (!test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
79 queue_work(cfhsi
->wq
, &cfhsi
->wake_down_work
);
82 static void cfhsi_update_aggregation_stats(struct cfhsi
*cfhsi
,
83 const struct sk_buff
*skb
,
86 struct caif_payload_info
*info
;
89 info
= (struct caif_payload_info
*)&skb
->cb
;
90 hpad
= 1 + PAD_POW2((info
->hdr_len
+ 1), cfhsi
->cfg
.head_align
);
91 tpad
= PAD_POW2((skb
->len
+ hpad
), cfhsi
->cfg
.tail_align
);
92 len
= skb
->len
+ hpad
+ tpad
;
95 cfhsi
->aggregation_len
+= len
;
96 else if (direction
< 0)
97 cfhsi
->aggregation_len
-= len
;
100 static bool cfhsi_can_send_aggregate(struct cfhsi
*cfhsi
)
104 if (cfhsi
->cfg
.aggregation_timeout
== 0)
107 for (i
= 0; i
< CFHSI_PRIO_BEBK
; ++i
) {
108 if (cfhsi
->qhead
[i
].qlen
)
112 /* TODO: Use aggregation_len instead */
113 if (cfhsi
->qhead
[CFHSI_PRIO_BEBK
].qlen
>= CFHSI_MAX_PKTS
)
119 static struct sk_buff
*cfhsi_dequeue(struct cfhsi
*cfhsi
)
124 for (i
= 0; i
< CFHSI_PRIO_LAST
; ++i
) {
125 skb
= skb_dequeue(&cfhsi
->qhead
[i
]);
133 static int cfhsi_tx_queue_len(struct cfhsi
*cfhsi
)
136 for (i
= 0; i
< CFHSI_PRIO_LAST
; ++i
)
137 len
+= skb_queue_len(&cfhsi
->qhead
[i
]);
141 static void cfhsi_abort_tx(struct cfhsi
*cfhsi
)
146 spin_lock_bh(&cfhsi
->lock
);
147 skb
= cfhsi_dequeue(cfhsi
);
151 cfhsi
->ndev
->stats
.tx_errors
++;
152 cfhsi
->ndev
->stats
.tx_dropped
++;
153 cfhsi_update_aggregation_stats(cfhsi
, skb
, -1);
154 spin_unlock_bh(&cfhsi
->lock
);
157 cfhsi
->tx_state
= CFHSI_TX_STATE_IDLE
;
158 if (!test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
159 mod_timer(&cfhsi
->inactivity_timer
,
160 jiffies
+ cfhsi
->cfg
.inactivity_timeout
);
161 spin_unlock_bh(&cfhsi
->lock
);
164 static int cfhsi_flush_fifo(struct cfhsi
*cfhsi
)
166 char buffer
[32]; /* Any reasonable value */
167 size_t fifo_occupancy
;
170 netdev_dbg(cfhsi
->ndev
, "%s.\n",
174 ret
= cfhsi
->ops
->cfhsi_fifo_occupancy(cfhsi
->ops
,
177 netdev_warn(cfhsi
->ndev
,
178 "%s: can't get FIFO occupancy: %d.\n",
181 } else if (!fifo_occupancy
)
182 /* No more data, exitting normally */
185 fifo_occupancy
= min(sizeof(buffer
), fifo_occupancy
);
186 set_bit(CFHSI_FLUSH_FIFO
, &cfhsi
->bits
);
187 ret
= cfhsi
->ops
->cfhsi_rx(buffer
, fifo_occupancy
,
190 clear_bit(CFHSI_FLUSH_FIFO
, &cfhsi
->bits
);
191 netdev_warn(cfhsi
->ndev
,
192 "%s: can't read data: %d.\n",
198 ret
= wait_event_interruptible_timeout(cfhsi
->flush_fifo_wait
,
199 !test_bit(CFHSI_FLUSH_FIFO
, &cfhsi
->bits
), ret
);
202 netdev_warn(cfhsi
->ndev
,
203 "%s: can't wait for flush complete: %d.\n",
208 netdev_warn(cfhsi
->ndev
,
209 "%s: timeout waiting for flush complete.\n",
218 static int cfhsi_tx_frm(struct cfhsi_desc
*desc
, struct cfhsi
*cfhsi
)
223 u8
*pfrm
= desc
->emb_frm
+ CFHSI_MAX_EMB_FRM_SZ
;
225 skb
= cfhsi_dequeue(cfhsi
);
232 /* Check if we can embed a CAIF frame. */
233 if (skb
->len
< CFHSI_MAX_EMB_FRM_SZ
) {
234 struct caif_payload_info
*info
;
238 /* Calculate needed head alignment and tail alignment. */
239 info
= (struct caif_payload_info
*)&skb
->cb
;
241 hpad
= 1 + PAD_POW2((info
->hdr_len
+ 1), cfhsi
->cfg
.head_align
);
242 tpad
= PAD_POW2((skb
->len
+ hpad
), cfhsi
->cfg
.tail_align
);
244 /* Check if frame still fits with added alignment. */
245 if ((skb
->len
+ hpad
+ tpad
) <= CFHSI_MAX_EMB_FRM_SZ
) {
246 u8
*pemb
= desc
->emb_frm
;
247 desc
->offset
= CFHSI_DESC_SHORT_SZ
;
248 *pemb
= (u8
)(hpad
- 1);
251 /* Update network statistics. */
252 spin_lock_bh(&cfhsi
->lock
);
253 cfhsi
->ndev
->stats
.tx_packets
++;
254 cfhsi
->ndev
->stats
.tx_bytes
+= skb
->len
;
255 cfhsi_update_aggregation_stats(cfhsi
, skb
, -1);
256 spin_unlock_bh(&cfhsi
->lock
);
258 /* Copy in embedded CAIF frame. */
259 skb_copy_bits(skb
, 0, pemb
, skb
->len
);
261 /* Consume the SKB */
267 /* Create payload CAIF frames. */
268 pfrm
= desc
->emb_frm
+ CFHSI_MAX_EMB_FRM_SZ
;
269 while (nfrms
< CFHSI_MAX_PKTS
) {
270 struct caif_payload_info
*info
;
275 skb
= cfhsi_dequeue(cfhsi
);
280 /* Calculate needed head alignment and tail alignment. */
281 info
= (struct caif_payload_info
*)&skb
->cb
;
283 hpad
= 1 + PAD_POW2((info
->hdr_len
+ 1), cfhsi
->cfg
.head_align
);
284 tpad
= PAD_POW2((skb
->len
+ hpad
), cfhsi
->cfg
.tail_align
);
286 /* Fill in CAIF frame length in descriptor. */
287 desc
->cffrm_len
[nfrms
] = hpad
+ skb
->len
+ tpad
;
289 /* Fill head padding information. */
290 *pfrm
= (u8
)(hpad
- 1);
293 /* Update network statistics. */
294 spin_lock_bh(&cfhsi
->lock
);
295 cfhsi
->ndev
->stats
.tx_packets
++;
296 cfhsi
->ndev
->stats
.tx_bytes
+= skb
->len
;
297 cfhsi_update_aggregation_stats(cfhsi
, skb
, -1);
298 spin_unlock_bh(&cfhsi
->lock
);
300 /* Copy in CAIF frame. */
301 skb_copy_bits(skb
, 0, pfrm
, skb
->len
);
303 /* Update payload length. */
304 pld_len
+= desc
->cffrm_len
[nfrms
];
306 /* Update frame pointer. */
307 pfrm
+= skb
->len
+ tpad
;
309 /* Consume the SKB */
313 /* Update number of frames. */
317 /* Unused length fields should be zero-filled (according to SPEC). */
318 while (nfrms
< CFHSI_MAX_PKTS
) {
319 desc
->cffrm_len
[nfrms
] = 0x0000;
323 /* Check if we can piggy-back another descriptor. */
324 if (cfhsi_can_send_aggregate(cfhsi
))
325 desc
->header
|= CFHSI_PIGGY_DESC
;
327 desc
->header
&= ~CFHSI_PIGGY_DESC
;
329 return CFHSI_DESC_SZ
+ pld_len
;
332 static void cfhsi_start_tx(struct cfhsi
*cfhsi
)
334 struct cfhsi_desc
*desc
= (struct cfhsi_desc
*)cfhsi
->tx_buf
;
337 netdev_dbg(cfhsi
->ndev
, "%s.\n", __func__
);
339 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
343 /* Create HSI frame. */
344 len
= cfhsi_tx_frm(desc
, cfhsi
);
346 spin_lock_bh(&cfhsi
->lock
);
347 if (unlikely(cfhsi_tx_queue_len(cfhsi
))) {
348 spin_unlock_bh(&cfhsi
->lock
);
352 cfhsi
->tx_state
= CFHSI_TX_STATE_IDLE
;
353 /* Start inactivity timer. */
354 mod_timer(&cfhsi
->inactivity_timer
,
355 jiffies
+ cfhsi
->cfg
.inactivity_timeout
);
356 spin_unlock_bh(&cfhsi
->lock
);
360 /* Set up new transfer. */
361 res
= cfhsi
->ops
->cfhsi_tx(cfhsi
->tx_buf
, len
, cfhsi
->ops
);
362 if (WARN_ON(res
< 0))
363 netdev_err(cfhsi
->ndev
, "%s: TX error %d.\n",
368 static void cfhsi_tx_done(struct cfhsi
*cfhsi
)
370 netdev_dbg(cfhsi
->ndev
, "%s.\n", __func__
);
372 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
376 * Send flow on if flow off has been previously signalled
377 * and number of packets is below low water mark.
379 spin_lock_bh(&cfhsi
->lock
);
380 if (cfhsi
->flow_off_sent
&&
381 cfhsi_tx_queue_len(cfhsi
) <= cfhsi
->cfg
.q_low_mark
&&
382 cfhsi
->cfdev
.flowctrl
) {
384 cfhsi
->flow_off_sent
= 0;
385 cfhsi
->cfdev
.flowctrl(cfhsi
->ndev
, ON
);
388 if (cfhsi_can_send_aggregate(cfhsi
)) {
389 spin_unlock_bh(&cfhsi
->lock
);
390 cfhsi_start_tx(cfhsi
);
392 mod_timer(&cfhsi
->aggregation_timer
,
393 jiffies
+ cfhsi
->cfg
.aggregation_timeout
);
394 spin_unlock_bh(&cfhsi
->lock
);
400 static void cfhsi_tx_done_cb(struct cfhsi_cb_ops
*cb_ops
)
404 cfhsi
= container_of(cb_ops
, struct cfhsi
, cb_ops
);
405 netdev_dbg(cfhsi
->ndev
, "%s.\n",
408 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
410 cfhsi_tx_done(cfhsi
);
413 static int cfhsi_rx_desc(struct cfhsi_desc
*desc
, struct cfhsi
*cfhsi
)
420 if ((desc
->header
& ~CFHSI_PIGGY_DESC
) ||
421 (desc
->offset
> CFHSI_MAX_EMB_FRM_SZ
)) {
422 netdev_err(cfhsi
->ndev
, "%s: Invalid descriptor.\n",
427 /* Check for embedded CAIF frame. */
432 pfrm
= ((u8
*)desc
) + desc
->offset
;
434 /* Remove offset padding. */
437 /* Read length of CAIF frame (little endian). */
439 len
|= ((*(pfrm
+1)) << 8) & 0xFF00;
440 len
+= 2; /* Add FCS fields. */
442 /* Sanity check length of CAIF frame. */
443 if (unlikely(len
> CFHSI_MAX_CAIF_FRAME_SZ
)) {
444 netdev_err(cfhsi
->ndev
, "%s: Invalid length.\n",
449 /* Allocate SKB (OK even in IRQ context). */
450 skb
= alloc_skb(len
+ 1, GFP_ATOMIC
);
452 netdev_err(cfhsi
->ndev
, "%s: Out of memory !\n",
456 caif_assert(skb
!= NULL
);
458 dst
= skb_put(skb
, len
);
459 memcpy(dst
, pfrm
, len
);
461 skb
->protocol
= htons(ETH_P_CAIF
);
462 skb_reset_mac_header(skb
);
463 skb
->dev
= cfhsi
->ndev
;
466 * We are in a callback handler and
467 * unfortunately we don't know what context we're
475 /* Update network statistics. */
476 cfhsi
->ndev
->stats
.rx_packets
++;
477 cfhsi
->ndev
->stats
.rx_bytes
+= len
;
480 /* Calculate transfer length. */
481 plen
= desc
->cffrm_len
;
482 while (nfrms
< CFHSI_MAX_PKTS
&& *plen
) {
488 /* Check for piggy-backed descriptor. */
489 if (desc
->header
& CFHSI_PIGGY_DESC
)
490 xfer_sz
+= CFHSI_DESC_SZ
;
492 if ((xfer_sz
% 4) || (xfer_sz
> (CFHSI_BUF_SZ_RX
- CFHSI_DESC_SZ
))) {
493 netdev_err(cfhsi
->ndev
,
494 "%s: Invalid payload len: %d, ignored.\n",
501 static int cfhsi_rx_desc_len(struct cfhsi_desc
*desc
)
507 if ((desc
->header
& ~CFHSI_PIGGY_DESC
) ||
508 (desc
->offset
> CFHSI_MAX_EMB_FRM_SZ
)) {
510 pr_err("Invalid descriptor. %x %x\n", desc
->header
,
515 /* Calculate transfer length. */
516 plen
= desc
->cffrm_len
;
517 while (nfrms
< CFHSI_MAX_PKTS
&& *plen
) {
524 pr_err("Invalid payload len: %d, ignored.\n", xfer_sz
);
530 static int cfhsi_rx_pld(struct cfhsi_desc
*desc
, struct cfhsi
*cfhsi
)
537 /* Sanity check header and offset. */
538 if (WARN_ON((desc
->header
& ~CFHSI_PIGGY_DESC
) ||
539 (desc
->offset
> CFHSI_MAX_EMB_FRM_SZ
))) {
540 netdev_err(cfhsi
->ndev
, "%s: Invalid descriptor.\n",
545 /* Set frame pointer to start of payload. */
546 pfrm
= desc
->emb_frm
+ CFHSI_MAX_EMB_FRM_SZ
;
547 plen
= desc
->cffrm_len
;
549 /* Skip already processed frames. */
550 while (nfrms
< cfhsi
->rx_state
.nfrms
) {
558 while (nfrms
< CFHSI_MAX_PKTS
&& *plen
) {
564 /* CAIF frame starts after head padding. */
565 pcffrm
= pfrm
+ *pfrm
+ 1;
567 /* Read length of CAIF frame (little endian). */
569 len
|= ((*(pcffrm
+ 1)) << 8) & 0xFF00;
570 len
+= 2; /* Add FCS fields. */
572 /* Sanity check length of CAIF frames. */
573 if (unlikely(len
> CFHSI_MAX_CAIF_FRAME_SZ
)) {
574 netdev_err(cfhsi
->ndev
, "%s: Invalid length.\n",
579 /* Allocate SKB (OK even in IRQ context). */
580 skb
= alloc_skb(len
+ 1, GFP_ATOMIC
);
582 netdev_err(cfhsi
->ndev
, "%s: Out of memory !\n",
584 cfhsi
->rx_state
.nfrms
= nfrms
;
587 caif_assert(skb
!= NULL
);
589 dst
= skb_put(skb
, len
);
590 memcpy(dst
, pcffrm
, len
);
592 skb
->protocol
= htons(ETH_P_CAIF
);
593 skb_reset_mac_header(skb
);
594 skb
->dev
= cfhsi
->ndev
;
597 * We're called in callback from HSI
598 * and don't know the context we're running in.
605 /* Update network statistics. */
606 cfhsi
->ndev
->stats
.rx_packets
++;
607 cfhsi
->ndev
->stats
.rx_bytes
+= len
;
618 static void cfhsi_rx_done(struct cfhsi
*cfhsi
)
621 int desc_pld_len
= 0, rx_len
, rx_state
;
622 struct cfhsi_desc
*desc
= NULL
;
624 struct cfhsi_desc
*piggy_desc
= NULL
;
626 desc
= (struct cfhsi_desc
*)cfhsi
->rx_buf
;
628 netdev_dbg(cfhsi
->ndev
, "%s\n", __func__
);
630 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
633 /* Update inactivity timer if pending. */
634 spin_lock_bh(&cfhsi
->lock
);
635 mod_timer_pending(&cfhsi
->inactivity_timer
,
636 jiffies
+ cfhsi
->cfg
.inactivity_timeout
);
637 spin_unlock_bh(&cfhsi
->lock
);
639 if (cfhsi
->rx_state
.state
== CFHSI_RX_STATE_DESC
) {
640 desc_pld_len
= cfhsi_rx_desc_len(desc
);
642 if (desc_pld_len
< 0)
645 rx_buf
= cfhsi
->rx_buf
;
646 rx_len
= desc_pld_len
;
647 if (desc_pld_len
> 0 && (desc
->header
& CFHSI_PIGGY_DESC
))
648 rx_len
+= CFHSI_DESC_SZ
;
649 if (desc_pld_len
== 0)
650 rx_buf
= cfhsi
->rx_flip_buf
;
652 rx_buf
= cfhsi
->rx_flip_buf
;
654 rx_len
= CFHSI_DESC_SZ
;
655 if (cfhsi
->rx_state
.pld_len
> 0 &&
656 (desc
->header
& CFHSI_PIGGY_DESC
)) {
658 piggy_desc
= (struct cfhsi_desc
*)
659 (desc
->emb_frm
+ CFHSI_MAX_EMB_FRM_SZ
+
660 cfhsi
->rx_state
.pld_len
);
662 cfhsi
->rx_state
.piggy_desc
= true;
664 /* Extract payload len from piggy-backed descriptor. */
665 desc_pld_len
= cfhsi_rx_desc_len(piggy_desc
);
666 if (desc_pld_len
< 0)
669 if (desc_pld_len
> 0) {
670 rx_len
= desc_pld_len
;
671 if (piggy_desc
->header
& CFHSI_PIGGY_DESC
)
672 rx_len
+= CFHSI_DESC_SZ
;
676 * Copy needed information from the piggy-backed
677 * descriptor to the descriptor in the start.
679 memcpy(rx_buf
, (u8
*)piggy_desc
,
680 CFHSI_DESC_SHORT_SZ
);
685 rx_state
= CFHSI_RX_STATE_PAYLOAD
;
686 rx_ptr
= rx_buf
+ CFHSI_DESC_SZ
;
688 rx_state
= CFHSI_RX_STATE_DESC
;
690 rx_len
= CFHSI_DESC_SZ
;
693 /* Initiate next read */
694 if (test_bit(CFHSI_AWAKE
, &cfhsi
->bits
)) {
695 /* Set up new transfer. */
696 netdev_dbg(cfhsi
->ndev
, "%s: Start RX.\n",
699 res
= cfhsi
->ops
->cfhsi_rx(rx_ptr
, rx_len
,
701 if (WARN_ON(res
< 0)) {
702 netdev_err(cfhsi
->ndev
, "%s: RX error %d.\n",
704 cfhsi
->ndev
->stats
.rx_errors
++;
705 cfhsi
->ndev
->stats
.rx_dropped
++;
709 if (cfhsi
->rx_state
.state
== CFHSI_RX_STATE_DESC
) {
710 /* Extract payload from descriptor */
711 if (cfhsi_rx_desc(desc
, cfhsi
) < 0)
714 /* Extract payload */
715 if (cfhsi_rx_pld(desc
, cfhsi
) < 0)
718 /* Extract any payload in piggyback descriptor. */
719 if (cfhsi_rx_desc(piggy_desc
, cfhsi
) < 0)
721 /* Mark no embedded frame after extracting it */
722 piggy_desc
->offset
= 0;
726 /* Update state info */
727 memset(&cfhsi
->rx_state
, 0, sizeof(cfhsi
->rx_state
));
728 cfhsi
->rx_state
.state
= rx_state
;
729 cfhsi
->rx_ptr
= rx_ptr
;
730 cfhsi
->rx_len
= rx_len
;
731 cfhsi
->rx_state
.pld_len
= desc_pld_len
;
732 cfhsi
->rx_state
.piggy_desc
= desc
->header
& CFHSI_PIGGY_DESC
;
734 if (rx_buf
!= cfhsi
->rx_buf
)
735 swap(cfhsi
->rx_buf
, cfhsi
->rx_flip_buf
);
739 netdev_err(cfhsi
->ndev
, "%s: Out of sync.\n", __func__
);
740 print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE
,
741 cfhsi
->rx_buf
, CFHSI_DESC_SZ
);
742 schedule_work(&cfhsi
->out_of_sync_work
);
745 static void cfhsi_rx_slowpath(unsigned long arg
)
747 struct cfhsi
*cfhsi
= (struct cfhsi
*)arg
;
749 netdev_dbg(cfhsi
->ndev
, "%s.\n",
752 cfhsi_rx_done(cfhsi
);
755 static void cfhsi_rx_done_cb(struct cfhsi_cb_ops
*cb_ops
)
759 cfhsi
= container_of(cb_ops
, struct cfhsi
, cb_ops
);
760 netdev_dbg(cfhsi
->ndev
, "%s.\n",
763 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
766 if (test_and_clear_bit(CFHSI_FLUSH_FIFO
, &cfhsi
->bits
))
767 wake_up_interruptible(&cfhsi
->flush_fifo_wait
);
769 cfhsi_rx_done(cfhsi
);
772 static void cfhsi_wake_up(struct work_struct
*work
)
774 struct cfhsi
*cfhsi
= NULL
;
779 cfhsi
= container_of(work
, struct cfhsi
, wake_up_work
);
781 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
784 if (unlikely(test_bit(CFHSI_AWAKE
, &cfhsi
->bits
))) {
785 /* It happenes when wakeup is requested by
786 * both ends at the same time. */
787 clear_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
);
788 clear_bit(CFHSI_WAKE_UP_ACK
, &cfhsi
->bits
);
792 /* Activate wake line. */
793 cfhsi
->ops
->cfhsi_wake_up(cfhsi
->ops
);
795 netdev_dbg(cfhsi
->ndev
, "%s: Start waiting.\n",
798 /* Wait for acknowledge. */
799 ret
= CFHSI_WAKE_TOUT
;
800 ret
= wait_event_interruptible_timeout(cfhsi
->wake_up_wait
,
801 test_and_clear_bit(CFHSI_WAKE_UP_ACK
,
803 if (unlikely(ret
< 0)) {
804 /* Interrupted by signal. */
805 netdev_err(cfhsi
->ndev
, "%s: Signalled: %ld.\n",
808 clear_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
);
809 cfhsi
->ops
->cfhsi_wake_down(cfhsi
->ops
);
812 bool ca_wake
= false;
813 size_t fifo_occupancy
= 0;
816 netdev_dbg(cfhsi
->ndev
, "%s: Timeout.\n",
819 /* Check FIFO to check if modem has sent something. */
820 WARN_ON(cfhsi
->ops
->cfhsi_fifo_occupancy(cfhsi
->ops
,
823 netdev_dbg(cfhsi
->ndev
, "%s: Bytes in FIFO: %u.\n",
824 __func__
, (unsigned) fifo_occupancy
);
826 /* Check if we misssed the interrupt. */
827 WARN_ON(cfhsi
->ops
->cfhsi_get_peer_wake(cfhsi
->ops
,
831 netdev_err(cfhsi
->ndev
, "%s: CA Wake missed !.\n",
834 /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
835 clear_bit(CFHSI_WAKE_UP_ACK
, &cfhsi
->bits
);
837 /* Continue execution. */
841 clear_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
);
842 cfhsi
->ops
->cfhsi_wake_down(cfhsi
->ops
);
846 netdev_dbg(cfhsi
->ndev
, "%s: Woken.\n",
849 /* Clear power up bit. */
850 set_bit(CFHSI_AWAKE
, &cfhsi
->bits
);
851 clear_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
);
853 /* Resume read operation. */
854 netdev_dbg(cfhsi
->ndev
, "%s: Start RX.\n", __func__
);
855 res
= cfhsi
->ops
->cfhsi_rx(cfhsi
->rx_ptr
, cfhsi
->rx_len
, cfhsi
->ops
);
857 if (WARN_ON(res
< 0))
858 netdev_err(cfhsi
->ndev
, "%s: RX err %d.\n", __func__
, res
);
860 /* Clear power up acknowledment. */
861 clear_bit(CFHSI_WAKE_UP_ACK
, &cfhsi
->bits
);
863 spin_lock_bh(&cfhsi
->lock
);
865 /* Resume transmit if queues are not empty. */
866 if (!cfhsi_tx_queue_len(cfhsi
)) {
867 netdev_dbg(cfhsi
->ndev
, "%s: Peer wake, start timer.\n",
869 /* Start inactivity timer. */
870 mod_timer(&cfhsi
->inactivity_timer
,
871 jiffies
+ cfhsi
->cfg
.inactivity_timeout
);
872 spin_unlock_bh(&cfhsi
->lock
);
876 netdev_dbg(cfhsi
->ndev
, "%s: Host wake.\n",
879 spin_unlock_bh(&cfhsi
->lock
);
881 /* Create HSI frame. */
882 len
= cfhsi_tx_frm((struct cfhsi_desc
*)cfhsi
->tx_buf
, cfhsi
);
884 if (likely(len
> 0)) {
885 /* Set up new transfer. */
886 res
= cfhsi
->ops
->cfhsi_tx(cfhsi
->tx_buf
, len
, cfhsi
->ops
);
887 if (WARN_ON(res
< 0)) {
888 netdev_err(cfhsi
->ndev
, "%s: TX error %d.\n",
890 cfhsi_abort_tx(cfhsi
);
893 netdev_err(cfhsi
->ndev
,
894 "%s: Failed to create HSI frame: %d.\n",
899 static void cfhsi_wake_down(struct work_struct
*work
)
902 struct cfhsi
*cfhsi
= NULL
;
903 size_t fifo_occupancy
= 0;
904 int retry
= CFHSI_WAKE_TOUT
;
906 cfhsi
= container_of(work
, struct cfhsi
, wake_down_work
);
907 netdev_dbg(cfhsi
->ndev
, "%s.\n", __func__
);
909 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
912 /* Deactivate wake line. */
913 cfhsi
->ops
->cfhsi_wake_down(cfhsi
->ops
);
915 /* Wait for acknowledge. */
916 ret
= CFHSI_WAKE_TOUT
;
917 ret
= wait_event_interruptible_timeout(cfhsi
->wake_down_wait
,
918 test_and_clear_bit(CFHSI_WAKE_DOWN_ACK
,
921 /* Interrupted by signal. */
922 netdev_err(cfhsi
->ndev
, "%s: Signalled: %ld.\n",
929 netdev_err(cfhsi
->ndev
, "%s: Timeout.\n", __func__
);
931 /* Check if we misssed the interrupt. */
932 WARN_ON(cfhsi
->ops
->cfhsi_get_peer_wake(cfhsi
->ops
,
935 netdev_err(cfhsi
->ndev
, "%s: CA Wake missed !.\n",
939 /* Check FIFO occupancy. */
941 WARN_ON(cfhsi
->ops
->cfhsi_fifo_occupancy(cfhsi
->ops
,
947 set_current_state(TASK_INTERRUPTIBLE
);
953 netdev_err(cfhsi
->ndev
, "%s: FIFO Timeout.\n", __func__
);
955 /* Clear AWAKE condition. */
956 clear_bit(CFHSI_AWAKE
, &cfhsi
->bits
);
958 /* Cancel pending RX requests. */
959 cfhsi
->ops
->cfhsi_rx_cancel(cfhsi
->ops
);
962 static void cfhsi_out_of_sync(struct work_struct
*work
)
964 struct cfhsi
*cfhsi
= NULL
;
966 cfhsi
= container_of(work
, struct cfhsi
, out_of_sync_work
);
969 dev_close(cfhsi
->ndev
);
973 static void cfhsi_wake_up_cb(struct cfhsi_cb_ops
*cb_ops
)
975 struct cfhsi
*cfhsi
= NULL
;
977 cfhsi
= container_of(cb_ops
, struct cfhsi
, cb_ops
);
978 netdev_dbg(cfhsi
->ndev
, "%s.\n",
981 set_bit(CFHSI_WAKE_UP_ACK
, &cfhsi
->bits
);
982 wake_up_interruptible(&cfhsi
->wake_up_wait
);
984 if (test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))
987 /* Schedule wake up work queue if the peer initiates. */
988 if (!test_and_set_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
))
989 queue_work(cfhsi
->wq
, &cfhsi
->wake_up_work
);
992 static void cfhsi_wake_down_cb(struct cfhsi_cb_ops
*cb_ops
)
994 struct cfhsi
*cfhsi
= NULL
;
996 cfhsi
= container_of(cb_ops
, struct cfhsi
, cb_ops
);
997 netdev_dbg(cfhsi
->ndev
, "%s.\n",
1000 /* Initiating low power is only permitted by the host (us). */
1001 set_bit(CFHSI_WAKE_DOWN_ACK
, &cfhsi
->bits
);
1002 wake_up_interruptible(&cfhsi
->wake_down_wait
);
1005 static void cfhsi_aggregation_tout(unsigned long arg
)
1007 struct cfhsi
*cfhsi
= (struct cfhsi
*)arg
;
1009 netdev_dbg(cfhsi
->ndev
, "%s.\n",
1012 cfhsi_start_tx(cfhsi
);
1015 static int cfhsi_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1017 struct cfhsi
*cfhsi
= NULL
;
1025 cfhsi
= netdev_priv(dev
);
1027 switch (skb
->priority
) {
1028 case TC_PRIO_BESTEFFORT
:
1029 case TC_PRIO_FILLER
:
1031 prio
= CFHSI_PRIO_BEBK
;
1033 case TC_PRIO_INTERACTIVE_BULK
:
1034 prio
= CFHSI_PRIO_VI
;
1036 case TC_PRIO_INTERACTIVE
:
1037 prio
= CFHSI_PRIO_VO
;
1039 case TC_PRIO_CONTROL
:
1041 prio
= CFHSI_PRIO_CTL
;
1045 spin_lock_bh(&cfhsi
->lock
);
1047 /* Update aggregation statistics */
1048 cfhsi_update_aggregation_stats(cfhsi
, skb
, 1);
1051 skb_queue_tail(&cfhsi
->qhead
[prio
], skb
);
1053 /* Sanity check; xmit should not be called after unregister_netdev */
1054 if (WARN_ON(test_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
))) {
1055 spin_unlock_bh(&cfhsi
->lock
);
1056 cfhsi_abort_tx(cfhsi
);
1060 /* Send flow off if number of packets is above high water mark. */
1061 if (!cfhsi
->flow_off_sent
&&
1062 cfhsi_tx_queue_len(cfhsi
) > cfhsi
->cfg
.q_high_mark
&&
1063 cfhsi
->cfdev
.flowctrl
) {
1064 cfhsi
->flow_off_sent
= 1;
1065 cfhsi
->cfdev
.flowctrl(cfhsi
->ndev
, OFF
);
1068 if (cfhsi
->tx_state
== CFHSI_TX_STATE_IDLE
) {
1069 cfhsi
->tx_state
= CFHSI_TX_STATE_XFER
;
1074 /* Send aggregate if it is possible */
1075 bool aggregate_ready
=
1076 cfhsi_can_send_aggregate(cfhsi
) &&
1077 del_timer(&cfhsi
->aggregation_timer
) > 0;
1078 spin_unlock_bh(&cfhsi
->lock
);
1079 if (aggregate_ready
)
1080 cfhsi_start_tx(cfhsi
);
1084 /* Delete inactivity timer if started. */
1085 timer_active
= del_timer_sync(&cfhsi
->inactivity_timer
);
1087 spin_unlock_bh(&cfhsi
->lock
);
1090 struct cfhsi_desc
*desc
= (struct cfhsi_desc
*)cfhsi
->tx_buf
;
1094 /* Create HSI frame. */
1095 len
= cfhsi_tx_frm(desc
, cfhsi
);
1098 /* Set up new transfer. */
1099 res
= cfhsi
->ops
->cfhsi_tx(cfhsi
->tx_buf
, len
, cfhsi
->ops
);
1100 if (WARN_ON(res
< 0)) {
1101 netdev_err(cfhsi
->ndev
, "%s: TX error %d.\n",
1103 cfhsi_abort_tx(cfhsi
);
1106 /* Schedule wake up work queue if the we initiate. */
1107 if (!test_and_set_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
))
1108 queue_work(cfhsi
->wq
, &cfhsi
->wake_up_work
);
1114 static const struct net_device_ops cfhsi_netdevops
;
1116 static void cfhsi_setup(struct net_device
*dev
)
1119 struct cfhsi
*cfhsi
= netdev_priv(dev
);
1121 dev
->type
= ARPHRD_CAIF
;
1122 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
1123 dev
->mtu
= CFHSI_MAX_CAIF_FRAME_SZ
;
1124 dev
->tx_queue_len
= 0;
1125 dev
->destructor
= free_netdev
;
1126 dev
->netdev_ops
= &cfhsi_netdevops
;
1127 for (i
= 0; i
< CFHSI_PRIO_LAST
; ++i
)
1128 skb_queue_head_init(&cfhsi
->qhead
[i
]);
1129 cfhsi
->cfdev
.link_select
= CAIF_LINK_HIGH_BANDW
;
1130 cfhsi
->cfdev
.use_frag
= false;
1131 cfhsi
->cfdev
.use_stx
= false;
1132 cfhsi
->cfdev
.use_fcs
= false;
1134 cfhsi
->cfg
= hsi_default_config
;
1137 static int cfhsi_open(struct net_device
*ndev
)
1139 struct cfhsi
*cfhsi
= netdev_priv(ndev
);
1142 clear_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
);
1144 /* Initialize state vaiables. */
1145 cfhsi
->tx_state
= CFHSI_TX_STATE_IDLE
;
1146 cfhsi
->rx_state
.state
= CFHSI_RX_STATE_DESC
;
1149 cfhsi
->flow_off_sent
= 0;
1152 * Allocate a TX buffer with the size of a HSI packet descriptors
1153 * and the necessary room for CAIF payload frames.
1155 cfhsi
->tx_buf
= kzalloc(CFHSI_BUF_SZ_TX
, GFP_KERNEL
);
1156 if (!cfhsi
->tx_buf
) {
1162 * Allocate a RX buffer with the size of two HSI packet descriptors and
1163 * the necessary room for CAIF payload frames.
1165 cfhsi
->rx_buf
= kzalloc(CFHSI_BUF_SZ_RX
, GFP_KERNEL
);
1166 if (!cfhsi
->rx_buf
) {
1171 cfhsi
->rx_flip_buf
= kzalloc(CFHSI_BUF_SZ_RX
, GFP_KERNEL
);
1172 if (!cfhsi
->rx_flip_buf
) {
1174 goto err_alloc_rx_flip
;
1177 /* Initialize aggregation timeout */
1178 cfhsi
->cfg
.aggregation_timeout
= hsi_default_config
.aggregation_timeout
;
1180 /* Initialize recieve vaiables. */
1181 cfhsi
->rx_ptr
= cfhsi
->rx_buf
;
1182 cfhsi
->rx_len
= CFHSI_DESC_SZ
;
1184 /* Initialize spin locks. */
1185 spin_lock_init(&cfhsi
->lock
);
1187 /* Set up the driver. */
1188 cfhsi
->cb_ops
.tx_done_cb
= cfhsi_tx_done_cb
;
1189 cfhsi
->cb_ops
.rx_done_cb
= cfhsi_rx_done_cb
;
1190 cfhsi
->cb_ops
.wake_up_cb
= cfhsi_wake_up_cb
;
1191 cfhsi
->cb_ops
.wake_down_cb
= cfhsi_wake_down_cb
;
1193 /* Initialize the work queues. */
1194 INIT_WORK(&cfhsi
->wake_up_work
, cfhsi_wake_up
);
1195 INIT_WORK(&cfhsi
->wake_down_work
, cfhsi_wake_down
);
1196 INIT_WORK(&cfhsi
->out_of_sync_work
, cfhsi_out_of_sync
);
1198 /* Clear all bit fields. */
1199 clear_bit(CFHSI_WAKE_UP_ACK
, &cfhsi
->bits
);
1200 clear_bit(CFHSI_WAKE_DOWN_ACK
, &cfhsi
->bits
);
1201 clear_bit(CFHSI_WAKE_UP
, &cfhsi
->bits
);
1202 clear_bit(CFHSI_AWAKE
, &cfhsi
->bits
);
1204 /* Create work thread. */
1205 cfhsi
->wq
= create_singlethread_workqueue(cfhsi
->ndev
->name
);
1207 netdev_err(cfhsi
->ndev
, "%s: Failed to create work queue.\n",
1213 /* Initialize wait queues. */
1214 init_waitqueue_head(&cfhsi
->wake_up_wait
);
1215 init_waitqueue_head(&cfhsi
->wake_down_wait
);
1216 init_waitqueue_head(&cfhsi
->flush_fifo_wait
);
1218 /* Setup the inactivity timer. */
1219 init_timer(&cfhsi
->inactivity_timer
);
1220 cfhsi
->inactivity_timer
.data
= (unsigned long)cfhsi
;
1221 cfhsi
->inactivity_timer
.function
= cfhsi_inactivity_tout
;
1222 /* Setup the slowpath RX timer. */
1223 init_timer(&cfhsi
->rx_slowpath_timer
);
1224 cfhsi
->rx_slowpath_timer
.data
= (unsigned long)cfhsi
;
1225 cfhsi
->rx_slowpath_timer
.function
= cfhsi_rx_slowpath
;
1226 /* Setup the aggregation timer. */
1227 init_timer(&cfhsi
->aggregation_timer
);
1228 cfhsi
->aggregation_timer
.data
= (unsigned long)cfhsi
;
1229 cfhsi
->aggregation_timer
.function
= cfhsi_aggregation_tout
;
1231 /* Activate HSI interface. */
1232 res
= cfhsi
->ops
->cfhsi_up(cfhsi
->ops
);
1234 netdev_err(cfhsi
->ndev
,
1235 "%s: can't activate HSI interface: %d.\n",
1241 res
= cfhsi_flush_fifo(cfhsi
);
1243 netdev_err(cfhsi
->ndev
, "%s: Can't flush FIFO: %d.\n",
1250 cfhsi
->ops
->cfhsi_down(cfhsi
->ops
);
1252 destroy_workqueue(cfhsi
->wq
);
1254 kfree(cfhsi
->rx_flip_buf
);
1256 kfree(cfhsi
->rx_buf
);
1258 kfree(cfhsi
->tx_buf
);
1263 static int cfhsi_close(struct net_device
*ndev
)
1265 struct cfhsi
*cfhsi
= netdev_priv(ndev
);
1266 u8
*tx_buf
, *rx_buf
, *flip_buf
;
1268 /* going to shutdown driver */
1269 set_bit(CFHSI_SHUTDOWN
, &cfhsi
->bits
);
1271 /* Flush workqueue */
1272 flush_workqueue(cfhsi
->wq
);
1274 /* Delete timers if pending */
1275 del_timer_sync(&cfhsi
->inactivity_timer
);
1276 del_timer_sync(&cfhsi
->rx_slowpath_timer
);
1277 del_timer_sync(&cfhsi
->aggregation_timer
);
1279 /* Cancel pending RX request (if any) */
1280 cfhsi
->ops
->cfhsi_rx_cancel(cfhsi
->ops
);
1282 /* Destroy workqueue */
1283 destroy_workqueue(cfhsi
->wq
);
1285 /* Store bufferes: will be freed later. */
1286 tx_buf
= cfhsi
->tx_buf
;
1287 rx_buf
= cfhsi
->rx_buf
;
1288 flip_buf
= cfhsi
->rx_flip_buf
;
1289 /* Flush transmit queues. */
1290 cfhsi_abort_tx(cfhsi
);
1292 /* Deactivate interface */
1293 cfhsi
->ops
->cfhsi_down(cfhsi
->ops
);
1302 static void cfhsi_uninit(struct net_device
*dev
)
1304 struct cfhsi
*cfhsi
= netdev_priv(dev
);
1306 symbol_put(cfhsi_get_device
);
1307 list_del(&cfhsi
->list
);
1310 static const struct net_device_ops cfhsi_netdevops
= {
1311 .ndo_uninit
= cfhsi_uninit
,
1312 .ndo_open
= cfhsi_open
,
1313 .ndo_stop
= cfhsi_close
,
1314 .ndo_start_xmit
= cfhsi_xmit
1317 static void cfhsi_netlink_parms(struct nlattr
*data
[], struct cfhsi
*cfhsi
)
1322 pr_debug("no params data found\n");
1326 i
= __IFLA_CAIF_HSI_INACTIVITY_TOUT
;
1328 * Inactivity timeout in millisecs. Lowest possible value is 1,
1329 * and highest possible is NEXT_TIMER_MAX_DELTA.
1332 u32 inactivity_timeout
= nla_get_u32(data
[i
]);
1333 /* Pre-calculate inactivity timeout. */
1334 cfhsi
->cfg
.inactivity_timeout
= inactivity_timeout
* HZ
/ 1000;
1335 if (cfhsi
->cfg
.inactivity_timeout
== 0)
1336 cfhsi
->cfg
.inactivity_timeout
= 1;
1337 else if (cfhsi
->cfg
.inactivity_timeout
> NEXT_TIMER_MAX_DELTA
)
1338 cfhsi
->cfg
.inactivity_timeout
= NEXT_TIMER_MAX_DELTA
;
1341 i
= __IFLA_CAIF_HSI_AGGREGATION_TOUT
;
1343 cfhsi
->cfg
.aggregation_timeout
= nla_get_u32(data
[i
]);
1345 i
= __IFLA_CAIF_HSI_HEAD_ALIGN
;
1347 cfhsi
->cfg
.head_align
= nla_get_u32(data
[i
]);
1349 i
= __IFLA_CAIF_HSI_TAIL_ALIGN
;
1351 cfhsi
->cfg
.tail_align
= nla_get_u32(data
[i
]);
1353 i
= __IFLA_CAIF_HSI_QHIGH_WATERMARK
;
1355 cfhsi
->cfg
.q_high_mark
= nla_get_u32(data
[i
]);
1357 i
= __IFLA_CAIF_HSI_QLOW_WATERMARK
;
1359 cfhsi
->cfg
.q_low_mark
= nla_get_u32(data
[i
]);
1362 static int caif_hsi_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
1363 struct nlattr
*data
[])
1365 cfhsi_netlink_parms(data
, netdev_priv(dev
));
1366 netdev_state_change(dev
);
1370 static const struct nla_policy caif_hsi_policy
[__IFLA_CAIF_HSI_MAX
+ 1] = {
1371 [__IFLA_CAIF_HSI_INACTIVITY_TOUT
] = { .type
= NLA_U32
, .len
= 4 },
1372 [__IFLA_CAIF_HSI_AGGREGATION_TOUT
] = { .type
= NLA_U32
, .len
= 4 },
1373 [__IFLA_CAIF_HSI_HEAD_ALIGN
] = { .type
= NLA_U32
, .len
= 4 },
1374 [__IFLA_CAIF_HSI_TAIL_ALIGN
] = { .type
= NLA_U32
, .len
= 4 },
1375 [__IFLA_CAIF_HSI_QHIGH_WATERMARK
] = { .type
= NLA_U32
, .len
= 4 },
1376 [__IFLA_CAIF_HSI_QLOW_WATERMARK
] = { .type
= NLA_U32
, .len
= 4 },
1379 static size_t caif_hsi_get_size(const struct net_device
*dev
)
1383 for (i
= __IFLA_CAIF_HSI_UNSPEC
+ 1; i
< __IFLA_CAIF_HSI_MAX
; i
++)
1384 s
+= nla_total_size(caif_hsi_policy
[i
].len
);
1388 static int caif_hsi_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
1390 struct cfhsi
*cfhsi
= netdev_priv(dev
);
1392 if (nla_put_u32(skb
, __IFLA_CAIF_HSI_INACTIVITY_TOUT
,
1393 cfhsi
->cfg
.inactivity_timeout
) ||
1394 nla_put_u32(skb
, __IFLA_CAIF_HSI_AGGREGATION_TOUT
,
1395 cfhsi
->cfg
.aggregation_timeout
) ||
1396 nla_put_u32(skb
, __IFLA_CAIF_HSI_HEAD_ALIGN
,
1397 cfhsi
->cfg
.head_align
) ||
1398 nla_put_u32(skb
, __IFLA_CAIF_HSI_TAIL_ALIGN
,
1399 cfhsi
->cfg
.tail_align
) ||
1400 nla_put_u32(skb
, __IFLA_CAIF_HSI_QHIGH_WATERMARK
,
1401 cfhsi
->cfg
.q_high_mark
) ||
1402 nla_put_u32(skb
, __IFLA_CAIF_HSI_QLOW_WATERMARK
,
1403 cfhsi
->cfg
.q_low_mark
))
1409 static int caif_hsi_newlink(struct net
*src_net
, struct net_device
*dev
,
1410 struct nlattr
*tb
[], struct nlattr
*data
[])
1412 struct cfhsi
*cfhsi
= NULL
;
1413 struct cfhsi_ops
*(*get_ops
)(void);
1417 cfhsi
= netdev_priv(dev
);
1418 cfhsi_netlink_parms(data
, cfhsi
);
1419 dev_net_set(cfhsi
->ndev
, src_net
);
1421 get_ops
= symbol_get(cfhsi_get_ops
);
1423 pr_err("%s: failed to get the cfhsi_ops\n", __func__
);
1427 /* Assign the HSI device. */
1428 cfhsi
->ops
= (*get_ops
)();
1430 pr_err("%s: failed to get the cfhsi_ops\n", __func__
);
1434 /* Assign the driver to this HSI device. */
1435 cfhsi
->ops
->cb_ops
= &cfhsi
->cb_ops
;
1436 if (register_netdevice(dev
)) {
1437 pr_warn("%s: caif_hsi device registration failed\n", __func__
);
1440 /* Add CAIF HSI device to list. */
1441 list_add_tail(&cfhsi
->list
, &cfhsi_list
);
1445 symbol_put(cfhsi_get_ops
);
1449 static struct rtnl_link_ops caif_hsi_link_ops __read_mostly
= {
1451 .priv_size
= sizeof(struct cfhsi
),
1452 .setup
= cfhsi_setup
,
1453 .maxtype
= __IFLA_CAIF_HSI_MAX
,
1454 .policy
= caif_hsi_policy
,
1455 .newlink
= caif_hsi_newlink
,
1456 .changelink
= caif_hsi_changelink
,
1457 .get_size
= caif_hsi_get_size
,
1458 .fill_info
= caif_hsi_fill_info
,
1461 static void __exit
cfhsi_exit_module(void)
1463 struct list_head
*list_node
;
1464 struct list_head
*n
;
1465 struct cfhsi
*cfhsi
;
1467 rtnl_link_unregister(&caif_hsi_link_ops
);
1470 list_for_each_safe(list_node
, n
, &cfhsi_list
) {
1471 cfhsi
= list_entry(list_node
, struct cfhsi
, list
);
1472 unregister_netdev(cfhsi
->ndev
);
1477 static int __init
cfhsi_init_module(void)
1479 return rtnl_link_register(&caif_hsi_link_ops
);
1482 module_init(cfhsi_init_module
);
1483 module_exit(cfhsi_exit_module
);