dfae55d6 |
1 | /* |
2 | * Copyright (C) ST-Ericsson AB 2010 |
3 | * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com |
4 | * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com, |
5 | * Daniel Martensson / daniel.martensson@stericsson.com |
6 | * License terms: GNU General Public License (GPL) version 2 |
7 | */ |
8 | |
e8329323 |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt |
dfae55d6 |
10 | |
11 | #include <linux/spinlock.h> |
12 | #include <linux/sched.h> |
13 | #include <linux/list.h> |
14 | #include <linux/netdevice.h> |
15 | #include <linux/if_arp.h> |
16 | |
17 | #include <net/caif/caif_device.h> |
18 | #include <net/caif/caif_shm.h> |
19 | |
20 | #define NR_TX_BUF 6 |
21 | #define NR_RX_BUF 6 |
22 | #define TX_BUF_SZ 0x2000 |
23 | #define RX_BUF_SZ 0x2000 |
24 | |
25 | #define CAIF_NEEDED_HEADROOM 32 |
26 | |
27 | #define CAIF_FLOW_ON 1 |
28 | #define CAIF_FLOW_OFF 0 |
29 | |
30 | #define LOW_WATERMARK 3 |
31 | #define HIGH_WATERMARK 4 |
32 | |
33 | /* Maximum number of CAIF buffers per shared memory buffer. */ |
34 | #define SHM_MAX_FRMS_PER_BUF 10 |
35 | |
36 | /* |
37 | * Size in bytes of the descriptor area |
38 | * (With end of descriptor signalling) |
39 | */ |
40 | #define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \ |
41 | sizeof(struct shm_pck_desc)) |
42 | |
43 | /* |
44 | * Offset to the first CAIF frame within a shared memory buffer. |
45 | * Aligned on 32 bytes. |
46 | */ |
47 | #define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32)) |
48 | |
49 | /* Number of bytes for CAIF shared memory header. */ |
50 | #define SHM_HDR_LEN 1 |
51 | |
52 | /* Number of padding bytes for the complete CAIF frame. */ |
53 | #define SHM_FRM_PAD_LEN 4 |
54 | |
55 | #define CAIF_MAX_MTU 4096 |
56 | |
57 | #define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0) |
58 | #define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1) |
59 | |
60 | #define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4) |
61 | #define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1) |
62 | |
63 | #define SHM_FULL_MASK (0x0F << 0) |
64 | #define SHM_EMPTY_MASK (0x0F << 4) |
65 | |
66 | struct shm_pck_desc { |
67 | /* |
68 | * Offset from start of shared memory area to start of |
69 | * shared memory CAIF frame. |
70 | */ |
71 | u32 frm_ofs; |
72 | u32 frm_len; |
73 | }; |
74 | |
75 | struct buf_list { |
76 | unsigned char *desc_vptr; |
77 | u32 phy_addr; |
78 | u32 index; |
79 | u32 len; |
80 | u32 frames; |
81 | u32 frm_ofs; |
82 | struct list_head list; |
83 | }; |
84 | |
85 | struct shm_caif_frm { |
86 | /* Number of bytes of padding before the CAIF frame. */ |
87 | u8 hdr_ofs; |
88 | }; |
89 | |
90 | struct shmdrv_layer { |
91 | /* caif_dev_common must always be first in the structure*/ |
92 | struct caif_dev_common cfdev; |
93 | |
94 | u32 shm_tx_addr; |
95 | u32 shm_rx_addr; |
96 | u32 shm_base_addr; |
97 | u32 tx_empty_available; |
98 | spinlock_t lock; |
99 | |
100 | struct list_head tx_empty_list; |
101 | struct list_head tx_pend_list; |
102 | struct list_head tx_full_list; |
103 | struct list_head rx_empty_list; |
104 | struct list_head rx_pend_list; |
105 | struct list_head rx_full_list; |
106 | |
107 | struct workqueue_struct *pshm_tx_workqueue; |
108 | struct workqueue_struct *pshm_rx_workqueue; |
109 | |
110 | struct work_struct shm_tx_work; |
111 | struct work_struct shm_rx_work; |
112 | |
113 | struct sk_buff_head sk_qhead; |
114 | struct shmdev_layer *pshm_dev; |
115 | }; |
116 | |
117 | static int shm_netdev_open(struct net_device *shm_netdev) |
118 | { |
119 | netif_wake_queue(shm_netdev); |
120 | return 0; |
121 | } |
122 | |
123 | static int shm_netdev_close(struct net_device *shm_netdev) |
124 | { |
125 | netif_stop_queue(shm_netdev); |
126 | return 0; |
127 | } |
128 | |
129 | int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv) |
130 | { |
131 | struct buf_list *pbuf; |
132 | struct shmdrv_layer *pshm_drv; |
133 | struct list_head *pos; |
134 | u32 avail_emptybuff = 0; |
135 | unsigned long flags = 0; |
136 | |
137 | pshm_drv = (struct shmdrv_layer *)priv; |
138 | |
139 | /* Check for received buffers. */ |
140 | if (mbx_msg & SHM_FULL_MASK) { |
141 | int idx; |
142 | |
143 | spin_lock_irqsave(&pshm_drv->lock, flags); |
144 | |
145 | /* Check whether we have any outstanding buffers. */ |
146 | if (list_empty(&pshm_drv->rx_empty_list)) { |
147 | |
148 | /* Release spin lock. */ |
149 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
150 | |
151 | /* We print even in IRQ context... */ |
152 | pr_warn("No empty Rx buffers to fill: " |
153 | "mbx_msg:%x\n", mbx_msg); |
154 | |
155 | /* Bail out. */ |
156 | goto err_sync; |
157 | } |
158 | |
159 | pbuf = |
160 | list_entry(pshm_drv->rx_empty_list.next, |
161 | struct buf_list, list); |
162 | idx = pbuf->index; |
163 | |
164 | /* Check buffer synchronization. */ |
165 | if (idx != SHM_GET_FULL(mbx_msg)) { |
166 | |
167 | /* We print even in IRQ context... */ |
168 | pr_warn( |
169 | "phyif_shm_mbx_msg_cb: RX full out of sync:" |
170 | " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n", |
171 | idx, mbx_msg, SHM_GET_FULL(mbx_msg)); |
172 | |
173 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
174 | |
175 | /* Bail out. */ |
176 | goto err_sync; |
177 | } |
178 | |
179 | list_del_init(&pbuf->list); |
180 | list_add_tail(&pbuf->list, &pshm_drv->rx_full_list); |
181 | |
182 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
183 | |
184 | /* Schedule RX work queue. */ |
185 | if (!work_pending(&pshm_drv->shm_rx_work)) |
186 | queue_work(pshm_drv->pshm_rx_workqueue, |
187 | &pshm_drv->shm_rx_work); |
188 | } |
189 | |
190 | /* Check for emptied buffers. */ |
191 | if (mbx_msg & SHM_EMPTY_MASK) { |
192 | int idx; |
193 | |
194 | spin_lock_irqsave(&pshm_drv->lock, flags); |
195 | |
196 | /* Check whether we have any outstanding buffers. */ |
197 | if (list_empty(&pshm_drv->tx_full_list)) { |
198 | |
199 | /* We print even in IRQ context... */ |
200 | pr_warn("No TX to empty: msg:%x\n", mbx_msg); |
201 | |
202 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
203 | |
204 | /* Bail out. */ |
205 | goto err_sync; |
206 | } |
207 | |
208 | pbuf = |
209 | list_entry(pshm_drv->tx_full_list.next, |
210 | struct buf_list, list); |
211 | idx = pbuf->index; |
212 | |
213 | /* Check buffer synchronization. */ |
214 | if (idx != SHM_GET_EMPTY(mbx_msg)) { |
215 | |
216 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
217 | |
218 | /* We print even in IRQ context... */ |
219 | pr_warn("TX empty " |
220 | "out of sync:idx:%d, msg:%x\n", idx, mbx_msg); |
221 | |
222 | /* Bail out. */ |
223 | goto err_sync; |
224 | } |
225 | list_del_init(&pbuf->list); |
226 | |
227 | /* Reset buffer parameters. */ |
228 | pbuf->frames = 0; |
229 | pbuf->frm_ofs = SHM_CAIF_FRM_OFS; |
230 | |
231 | list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list); |
232 | |
233 | /* Check the available no. of buffers in the empty list */ |
234 | list_for_each(pos, &pshm_drv->tx_empty_list) |
235 | avail_emptybuff++; |
236 | |
237 | /* Check whether we have to wake up the transmitter. */ |
238 | if ((avail_emptybuff > HIGH_WATERMARK) && |
239 | (!pshm_drv->tx_empty_available)) { |
240 | pshm_drv->tx_empty_available = 1; |
241 | pshm_drv->cfdev.flowctrl |
242 | (pshm_drv->pshm_dev->pshm_netdev, |
243 | CAIF_FLOW_ON); |
244 | |
245 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
246 | |
247 | /* Schedule the work queue. if required */ |
248 | if (!work_pending(&pshm_drv->shm_tx_work)) |
249 | queue_work(pshm_drv->pshm_tx_workqueue, |
250 | &pshm_drv->shm_tx_work); |
251 | } else |
252 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
253 | } |
254 | |
255 | return 0; |
256 | |
257 | err_sync: |
258 | return -EIO; |
259 | } |
260 | |
261 | static void shm_rx_work_func(struct work_struct *rx_work) |
262 | { |
263 | struct shmdrv_layer *pshm_drv; |
264 | struct buf_list *pbuf; |
265 | unsigned long flags = 0; |
266 | struct sk_buff *skb; |
267 | char *p; |
268 | int ret; |
269 | |
270 | pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work); |
271 | |
272 | while (1) { |
273 | |
274 | struct shm_pck_desc *pck_desc; |
275 | |
276 | spin_lock_irqsave(&pshm_drv->lock, flags); |
277 | |
278 | /* Check for received buffers. */ |
279 | if (list_empty(&pshm_drv->rx_full_list)) { |
280 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
281 | break; |
282 | } |
283 | |
284 | pbuf = |
285 | list_entry(pshm_drv->rx_full_list.next, struct buf_list, |
286 | list); |
287 | list_del_init(&pbuf->list); |
288 | |
289 | /* Retrieve pointer to start of the packet descriptor area. */ |
290 | pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr; |
291 | |
292 | /* |
293 | * Check whether descriptor contains a CAIF shared memory |
294 | * frame. |
295 | */ |
296 | while (pck_desc->frm_ofs) { |
297 | unsigned int frm_buf_ofs; |
298 | unsigned int frm_pck_ofs; |
299 | unsigned int frm_pck_len; |
300 | /* |
301 | * Check whether offset is within buffer limits |
302 | * (lower). |
303 | */ |
304 | if (pck_desc->frm_ofs < |
305 | (pbuf->phy_addr - pshm_drv->shm_base_addr)) |
306 | break; |
307 | /* |
308 | * Check whether offset is within buffer limits |
309 | * (higher). |
310 | */ |
311 | if (pck_desc->frm_ofs > |
312 | ((pbuf->phy_addr - pshm_drv->shm_base_addr) + |
313 | pbuf->len)) |
314 | break; |
315 | |
316 | /* Calculate offset from start of buffer. */ |
317 | frm_buf_ofs = |
318 | pck_desc->frm_ofs - (pbuf->phy_addr - |
319 | pshm_drv->shm_base_addr); |
320 | |
321 | /* |
322 | * Calculate offset and length of CAIF packet while |
323 | * taking care of the shared memory header. |
324 | */ |
325 | frm_pck_ofs = |
326 | frm_buf_ofs + SHM_HDR_LEN + |
327 | (*(pbuf->desc_vptr + frm_buf_ofs)); |
328 | frm_pck_len = |
329 | (pck_desc->frm_len - SHM_HDR_LEN - |
330 | (*(pbuf->desc_vptr + frm_buf_ofs))); |
331 | |
332 | /* Check whether CAIF packet is within buffer limits */ |
333 | if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len) |
334 | break; |
335 | |
336 | /* Get a suitable CAIF packet and copy in data. */ |
337 | skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev, |
338 | frm_pck_len + 1); |
339 | BUG_ON(skb == NULL); |
340 | |
341 | p = skb_put(skb, frm_pck_len); |
342 | memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len); |
343 | |
344 | skb->protocol = htons(ETH_P_CAIF); |
345 | skb_reset_mac_header(skb); |
346 | skb->dev = pshm_drv->pshm_dev->pshm_netdev; |
347 | |
348 | /* Push received packet up the stack. */ |
349 | ret = netif_rx_ni(skb); |
350 | |
351 | if (!ret) { |
352 | pshm_drv->pshm_dev->pshm_netdev->stats. |
353 | rx_packets++; |
354 | pshm_drv->pshm_dev->pshm_netdev->stats. |
355 | rx_bytes += pck_desc->frm_len; |
356 | } else |
357 | ++pshm_drv->pshm_dev->pshm_netdev->stats. |
358 | rx_dropped; |
359 | /* Move to next packet descriptor. */ |
360 | pck_desc++; |
361 | } |
362 | |
363 | list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list); |
364 | |
365 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
366 | |
367 | } |
368 | |
369 | /* Schedule the work queue. if required */ |
370 | if (!work_pending(&pshm_drv->shm_tx_work)) |
371 | queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work); |
372 | |
373 | } |
374 | |
375 | static void shm_tx_work_func(struct work_struct *tx_work) |
376 | { |
377 | u32 mbox_msg; |
378 | unsigned int frmlen, avail_emptybuff, append = 0; |
379 | unsigned long flags = 0; |
380 | struct buf_list *pbuf = NULL; |
381 | struct shmdrv_layer *pshm_drv; |
382 | struct shm_caif_frm *frm; |
383 | struct sk_buff *skb; |
384 | struct shm_pck_desc *pck_desc; |
385 | struct list_head *pos; |
386 | |
387 | pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work); |
388 | |
389 | do { |
390 | /* Initialize mailbox message. */ |
391 | mbox_msg = 0x00; |
392 | avail_emptybuff = 0; |
393 | |
394 | spin_lock_irqsave(&pshm_drv->lock, flags); |
395 | |
396 | /* Check for pending receive buffers. */ |
397 | if (!list_empty(&pshm_drv->rx_pend_list)) { |
398 | |
399 | pbuf = list_entry(pshm_drv->rx_pend_list.next, |
400 | struct buf_list, list); |
401 | |
402 | list_del_init(&pbuf->list); |
403 | list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list); |
404 | /* |
405 | * Value index is never changed, |
406 | * so read access should be safe. |
407 | */ |
408 | mbox_msg |= SHM_SET_EMPTY(pbuf->index); |
409 | } |
410 | |
411 | skb = skb_peek(&pshm_drv->sk_qhead); |
412 | |
413 | if (skb == NULL) |
414 | goto send_msg; |
415 | |
416 | /* Check the available no. of buffers in the empty list */ |
417 | list_for_each(pos, &pshm_drv->tx_empty_list) |
418 | avail_emptybuff++; |
419 | |
420 | if ((avail_emptybuff < LOW_WATERMARK) && |
421 | pshm_drv->tx_empty_available) { |
422 | /* Update blocking condition. */ |
423 | pshm_drv->tx_empty_available = 0; |
424 | pshm_drv->cfdev.flowctrl |
425 | (pshm_drv->pshm_dev->pshm_netdev, |
426 | CAIF_FLOW_OFF); |
427 | } |
428 | /* |
429 | * We simply return back to the caller if we do not have space |
430 | * either in Tx pending list or Tx empty list. In this case, |
431 | * we hold the received skb in the skb list, waiting to |
432 | * be transmitted once Tx buffers become available |
433 | */ |
434 | if (list_empty(&pshm_drv->tx_empty_list)) |
435 | goto send_msg; |
436 | |
437 | /* Get the first free Tx buffer. */ |
438 | pbuf = list_entry(pshm_drv->tx_empty_list.next, |
439 | struct buf_list, list); |
440 | do { |
441 | if (append) { |
442 | skb = skb_peek(&pshm_drv->sk_qhead); |
443 | if (skb == NULL) |
444 | break; |
445 | } |
446 | |
447 | frm = (struct shm_caif_frm *) |
448 | (pbuf->desc_vptr + pbuf->frm_ofs); |
449 | |
450 | frm->hdr_ofs = 0; |
451 | frmlen = 0; |
452 | frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len; |
453 | |
454 | /* Add tail padding if needed. */ |
455 | if (frmlen % SHM_FRM_PAD_LEN) |
456 | frmlen += SHM_FRM_PAD_LEN - |
457 | (frmlen % SHM_FRM_PAD_LEN); |
458 | |
459 | /* |
460 | * Verify that packet, header and additional padding |
461 | * can fit within the buffer frame area. |
462 | */ |
463 | if (frmlen >= (pbuf->len - pbuf->frm_ofs)) |
464 | break; |
465 | |
466 | if (!append) { |
467 | list_del_init(&pbuf->list); |
468 | append = 1; |
469 | } |
470 | |
471 | skb = skb_dequeue(&pshm_drv->sk_qhead); |
472 | /* Copy in CAIF frame. */ |
473 | skb_copy_bits(skb, 0, pbuf->desc_vptr + |
474 | pbuf->frm_ofs + SHM_HDR_LEN + |
475 | frm->hdr_ofs, skb->len); |
476 | |
477 | pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++; |
478 | pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes += |
479 | frmlen; |
480 | dev_kfree_skb(skb); |
481 | |
482 | /* Fill in the shared memory packet descriptor area. */ |
483 | pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr); |
484 | /* Forward to current frame. */ |
485 | pck_desc += pbuf->frames; |
486 | pck_desc->frm_ofs = (pbuf->phy_addr - |
487 | pshm_drv->shm_base_addr) + |
488 | pbuf->frm_ofs; |
489 | pck_desc->frm_len = frmlen; |
490 | /* Terminate packet descriptor area. */ |
491 | pck_desc++; |
492 | pck_desc->frm_ofs = 0; |
493 | /* Update buffer parameters. */ |
494 | pbuf->frames++; |
495 | pbuf->frm_ofs += frmlen + (frmlen % 32); |
496 | |
497 | } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF); |
498 | |
499 | /* Assign buffer as full. */ |
500 | list_add_tail(&pbuf->list, &pshm_drv->tx_full_list); |
501 | append = 0; |
502 | mbox_msg |= SHM_SET_FULL(pbuf->index); |
503 | send_msg: |
504 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
505 | |
506 | if (mbox_msg) |
507 | pshm_drv->pshm_dev->pshmdev_mbxsend |
508 | (pshm_drv->pshm_dev->shm_id, mbox_msg); |
509 | } while (mbox_msg); |
510 | } |
511 | |
512 | static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev) |
513 | { |
514 | struct shmdrv_layer *pshm_drv; |
515 | unsigned long flags = 0; |
516 | |
517 | pshm_drv = netdev_priv(shm_netdev); |
518 | |
519 | spin_lock_irqsave(&pshm_drv->lock, flags); |
520 | |
521 | skb_queue_tail(&pshm_drv->sk_qhead, skb); |
522 | |
523 | spin_unlock_irqrestore(&pshm_drv->lock, flags); |
524 | |
525 | /* Schedule Tx work queue. for deferred processing of skbs*/ |
526 | if (!work_pending(&pshm_drv->shm_tx_work)) |
527 | queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work); |
528 | |
529 | return 0; |
530 | } |
531 | |
532 | static const struct net_device_ops netdev_ops = { |
533 | .ndo_open = shm_netdev_open, |
534 | .ndo_stop = shm_netdev_close, |
535 | .ndo_start_xmit = shm_netdev_tx, |
536 | }; |
537 | |
538 | static void shm_netdev_setup(struct net_device *pshm_netdev) |
539 | { |
540 | struct shmdrv_layer *pshm_drv; |
541 | pshm_netdev->netdev_ops = &netdev_ops; |
542 | |
543 | pshm_netdev->mtu = CAIF_MAX_MTU; |
544 | pshm_netdev->type = ARPHRD_CAIF; |
545 | pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM; |
546 | pshm_netdev->tx_queue_len = 0; |
547 | pshm_netdev->destructor = free_netdev; |
548 | |
549 | pshm_drv = netdev_priv(pshm_netdev); |
550 | |
551 | /* Initialize structures in a clean state. */ |
552 | memset(pshm_drv, 0, sizeof(struct shmdrv_layer)); |
553 | |
554 | pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY; |
555 | } |
556 | |
557 | int caif_shmcore_probe(struct shmdev_layer *pshm_dev) |
558 | { |
559 | int result, j; |
560 | struct shmdrv_layer *pshm_drv = NULL; |
561 | |
562 | pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer), |
563 | "cfshm%d", shm_netdev_setup); |
564 | if (!pshm_dev->pshm_netdev) |
565 | return -ENOMEM; |
566 | |
567 | pshm_drv = netdev_priv(pshm_dev->pshm_netdev); |
568 | pshm_drv->pshm_dev = pshm_dev; |
569 | |
570 | /* |
571 | * Initialization starts with the verification of the |
572 | * availability of MBX driver by calling its setup function. |
573 | * MBX driver must be available by this time for proper |
574 | * functioning of SHM driver. |
575 | */ |
576 | if ((pshm_dev->pshmdev_mbxsetup |
577 | (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) { |
578 | pr_warn("Could not config. SHM Mailbox," |
579 | " Bailing out.....\n"); |
580 | free_netdev(pshm_dev->pshm_netdev); |
581 | return -ENODEV; |
582 | } |
583 | |
584 | skb_queue_head_init(&pshm_drv->sk_qhead); |
585 | |
586 | pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER" |
587 | " INSTANCE AT pshm_drv =0x%p\n", |
588 | pshm_drv->pshm_dev->shm_id, pshm_drv); |
589 | |
590 | if (pshm_dev->shm_total_sz < |
591 | (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) { |
592 | |
593 | pr_warn("ERROR, Amount of available" |
594 | " Phys. SHM cannot accomodate current SHM " |
595 | "driver configuration, Bailing out ...\n"); |
596 | free_netdev(pshm_dev->pshm_netdev); |
597 | return -ENOMEM; |
598 | } |
599 | |
600 | pshm_drv->shm_base_addr = pshm_dev->shm_base_addr; |
601 | pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr; |
602 | |
603 | if (pshm_dev->shm_loopback) |
604 | pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr; |
605 | else |
606 | pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr + |
607 | (NR_TX_BUF * TX_BUF_SZ); |
608 | |
609 | INIT_LIST_HEAD(&pshm_drv->tx_empty_list); |
610 | INIT_LIST_HEAD(&pshm_drv->tx_pend_list); |
611 | INIT_LIST_HEAD(&pshm_drv->tx_full_list); |
612 | |
613 | INIT_LIST_HEAD(&pshm_drv->rx_empty_list); |
614 | INIT_LIST_HEAD(&pshm_drv->rx_pend_list); |
615 | INIT_LIST_HEAD(&pshm_drv->rx_full_list); |
616 | |
617 | INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func); |
618 | INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func); |
619 | |
620 | pshm_drv->pshm_tx_workqueue = |
621 | create_singlethread_workqueue("shm_tx_work"); |
622 | pshm_drv->pshm_rx_workqueue = |
623 | create_singlethread_workqueue("shm_rx_work"); |
624 | |
625 | for (j = 0; j < NR_TX_BUF; j++) { |
626 | struct buf_list *tx_buf = |
627 | kmalloc(sizeof(struct buf_list), GFP_KERNEL); |
628 | |
629 | if (tx_buf == NULL) { |
630 | pr_warn("ERROR, Could not" |
631 | " allocate dynamic mem. for tx_buf," |
632 | " Bailing out ...\n"); |
633 | free_netdev(pshm_dev->pshm_netdev); |
634 | return -ENOMEM; |
635 | } |
636 | tx_buf->index = j; |
637 | tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j); |
638 | tx_buf->len = TX_BUF_SZ; |
639 | tx_buf->frames = 0; |
640 | tx_buf->frm_ofs = SHM_CAIF_FRM_OFS; |
641 | |
642 | if (pshm_dev->shm_loopback) |
643 | tx_buf->desc_vptr = (char *)tx_buf->phy_addr; |
644 | else |
645 | tx_buf->desc_vptr = |
646 | ioremap(tx_buf->phy_addr, TX_BUF_SZ); |
647 | |
648 | list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list); |
649 | } |
650 | |
651 | for (j = 0; j < NR_RX_BUF; j++) { |
652 | struct buf_list *rx_buf = |
653 | kmalloc(sizeof(struct buf_list), GFP_KERNEL); |
654 | |
655 | if (rx_buf == NULL) { |
656 | pr_warn("ERROR, Could not" |
657 | " allocate dynamic mem.for rx_buf," |
658 | " Bailing out ...\n"); |
659 | free_netdev(pshm_dev->pshm_netdev); |
660 | return -ENOMEM; |
661 | } |
662 | rx_buf->index = j; |
663 | rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j); |
664 | rx_buf->len = RX_BUF_SZ; |
665 | |
666 | if (pshm_dev->shm_loopback) |
667 | rx_buf->desc_vptr = (char *)rx_buf->phy_addr; |
668 | else |
669 | rx_buf->desc_vptr = |
670 | ioremap(rx_buf->phy_addr, RX_BUF_SZ); |
671 | list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list); |
672 | } |
673 | |
674 | pshm_drv->tx_empty_available = 1; |
675 | result = register_netdev(pshm_dev->pshm_netdev); |
676 | if (result) |
677 | pr_warn("ERROR[%d], SHM could not, " |
678 | "register with NW FRMWK Bailing out ...\n", result); |
679 | |
680 | return result; |
681 | } |
682 | |
683 | void caif_shmcore_remove(struct net_device *pshm_netdev) |
684 | { |
685 | struct buf_list *pbuf; |
686 | struct shmdrv_layer *pshm_drv = NULL; |
687 | |
688 | pshm_drv = netdev_priv(pshm_netdev); |
689 | |
690 | while (!(list_empty(&pshm_drv->tx_pend_list))) { |
691 | pbuf = |
692 | list_entry(pshm_drv->tx_pend_list.next, |
693 | struct buf_list, list); |
694 | |
695 | list_del(&pbuf->list); |
696 | kfree(pbuf); |
697 | } |
698 | |
699 | while (!(list_empty(&pshm_drv->tx_full_list))) { |
700 | pbuf = |
701 | list_entry(pshm_drv->tx_full_list.next, |
702 | struct buf_list, list); |
703 | list_del(&pbuf->list); |
704 | kfree(pbuf); |
705 | } |
706 | |
707 | while (!(list_empty(&pshm_drv->tx_empty_list))) { |
708 | pbuf = |
709 | list_entry(pshm_drv->tx_empty_list.next, |
710 | struct buf_list, list); |
711 | list_del(&pbuf->list); |
712 | kfree(pbuf); |
713 | } |
714 | |
715 | while (!(list_empty(&pshm_drv->rx_full_list))) { |
716 | pbuf = |
717 | list_entry(pshm_drv->tx_full_list.next, |
718 | struct buf_list, list); |
719 | list_del(&pbuf->list); |
720 | kfree(pbuf); |
721 | } |
722 | |
723 | while (!(list_empty(&pshm_drv->rx_pend_list))) { |
724 | pbuf = |
725 | list_entry(pshm_drv->tx_pend_list.next, |
726 | struct buf_list, list); |
727 | list_del(&pbuf->list); |
728 | kfree(pbuf); |
729 | } |
730 | |
731 | while (!(list_empty(&pshm_drv->rx_empty_list))) { |
732 | pbuf = |
733 | list_entry(pshm_drv->rx_empty_list.next, |
734 | struct buf_list, list); |
735 | list_del(&pbuf->list); |
736 | kfree(pbuf); |
737 | } |
738 | |
739 | /* Destroy work queues. */ |
740 | destroy_workqueue(pshm_drv->pshm_tx_workqueue); |
741 | destroy_workqueue(pshm_drv->pshm_rx_workqueue); |
742 | |
743 | unregister_netdev(pshm_netdev); |
744 | } |