add mt7601u driver
[deliverable/linux.git] / drivers / net / wireless / mediatek / mt7601u / dma.c
1 /*
2 * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14 #include "mt7601u.h"
15 #include "dma.h"
16 #include "usb.h"
17 #include "trace.h"
18
19 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
20 struct mt7601u_dma_buf_rx *e, gfp_t gfp);
21
22 static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
23 {
24 const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
25 unsigned int hdrlen;
26
27 if (unlikely(len < 10))
28 return 0;
29 hdrlen = ieee80211_hdrlen(hdr->frame_control);
30 if (unlikely(hdrlen > len))
31 return 0;
32 return hdrlen;
33 }
34
35 static struct sk_buff *
36 mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
37 u8 *data, u32 seg_len)
38 {
39 struct sk_buff *skb;
40 u32 true_len;
41
42 if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD))
43 seg_len -= 2;
44
45 skb = alloc_skb(seg_len, GFP_ATOMIC);
46 if (!skb)
47 return NULL;
48
49 if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
50 int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
51
52 memcpy(skb_put(skb, hdr_len), data, hdr_len);
53 data += hdr_len + 2;
54 seg_len -= hdr_len;
55 }
56
57 memcpy(skb_put(skb, seg_len), data, seg_len);
58
59 true_len = mt76_mac_process_rx(dev, skb, skb->data, rxwi);
60 skb_trim(skb, true_len);
61
62 return skb;
63 }
64
65 static struct sk_buff *
66 mt7601u_rx_skb_from_seg_paged(struct mt7601u_dev *dev,
67 struct mt7601u_rxwi *rxwi, void *data,
68 u32 seg_len, u32 truesize, struct page *p)
69 {
70 unsigned int hdr_len = ieee80211_get_hdrlen_from_buf(data, seg_len);
71 unsigned int true_len, copy, frag;
72 struct sk_buff *skb;
73
74 skb = alloc_skb(128, GFP_ATOMIC);
75 if (!skb)
76 return NULL;
77
78 true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
79
80 if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
81 memcpy(skb_put(skb, hdr_len), data, hdr_len);
82 data += hdr_len + 2;
83 true_len -= hdr_len;
84 hdr_len = 0;
85 }
86
87 copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
88 frag = true_len - copy;
89
90 memcpy(skb_put(skb, copy), data, copy);
91 data += copy;
92
93 if (frag) {
94 skb_add_rx_frag(skb, 0, p, data - page_address(p),
95 frag, truesize);
96 get_page(p);
97 }
98
99 return skb;
100 }
101
102 static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
103 u32 seg_len, struct page *p, bool paged)
104 {
105 struct sk_buff *skb;
106 struct mt7601u_rxwi *rxwi;
107 u32 fce_info, truesize = seg_len;
108
109 /* DMA_INFO field at the beginning of the segment contains only some of
110 * the information, we need to read the FCE descriptor from the end.
111 */
112 fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
113 seg_len -= MT_FCE_INFO_LEN;
114
115 data += MT_DMA_HDR_LEN;
116 seg_len -= MT_DMA_HDR_LEN;
117
118 rxwi = (struct mt7601u_rxwi *) data;
119 data += sizeof(struct mt7601u_rxwi);
120 seg_len -= sizeof(struct mt7601u_rxwi);
121
122 if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
123 dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
124 if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info)))
125 dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
126
127 trace_mt_rx(dev, rxwi, fce_info);
128
129 if (paged)
130 skb = mt7601u_rx_skb_from_seg_paged(dev, rxwi, data, seg_len,
131 truesize, p);
132 else
133 skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len);
134 if (!skb)
135 return;
136
137 ieee80211_rx_ni(dev->hw, skb);
138 }
139
140 static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
141 {
142 u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
143 sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
144 u16 dma_len = get_unaligned_le16(data);
145
146 if (data_len < min_seg_len ||
147 WARN_ON(!dma_len) ||
148 WARN_ON(dma_len + MT_DMA_HDRS > data_len) ||
149 WARN_ON(dma_len & 0x3))
150 return 0;
151
152 return MT_DMA_HDRS + dma_len;
153 }
154
155 static void
156 mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
157 {
158 u32 seg_len, data_len = e->urb->actual_length;
159 u8 *data = page_address(e->p);
160 struct page *new_p = NULL;
161 bool paged = true;
162 int cnt = 0;
163
164 if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
165 return;
166
167 /* Copy if there is very little data in the buffer. */
168 if (data_len < 512) {
169 paged = false;
170 } else {
171 new_p = dev_alloc_pages(MT_RX_ORDER);
172 if (!new_p)
173 paged = false;
174 }
175
176 while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
177 mt7601u_rx_process_seg(dev, data, seg_len, e->p, paged);
178
179 data_len -= seg_len;
180 data += seg_len;
181 cnt++;
182 }
183
184 if (cnt > 1)
185 trace_mt_rx_dma_aggr(dev, cnt, paged);
186
187 if (paged) {
188 /* we have one extra ref from the allocator */
189 __free_pages(e->p, MT_RX_ORDER);
190
191 e->p = new_p;
192 }
193 }
194
195 static struct mt7601u_dma_buf_rx *
196 mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
197 {
198 struct mt7601u_rx_queue *q = &dev->rx_q;
199 struct mt7601u_dma_buf_rx *buf = NULL;
200 unsigned long flags;
201
202 spin_lock_irqsave(&dev->rx_lock, flags);
203
204 if (!q->pending)
205 goto out;
206
207 buf = &q->e[q->start];
208 q->pending--;
209 q->start = (q->start + 1) % q->entries;
210 out:
211 spin_unlock_irqrestore(&dev->rx_lock, flags);
212
213 return buf;
214 }
215
216 static void mt7601u_complete_rx(struct urb *urb)
217 {
218 struct mt7601u_dev *dev = urb->context;
219 struct mt7601u_rx_queue *q = &dev->rx_q;
220 unsigned long flags;
221
222 spin_lock_irqsave(&dev->rx_lock, flags);
223
224 if (mt7601u_urb_has_error(urb))
225 dev_err(dev->dev, "Error: RX urb failed:%d\n", urb->status);
226 if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
227 goto out;
228
229 q->end = (q->end + 1) % q->entries;
230 q->pending++;
231 tasklet_schedule(&dev->rx_tasklet);
232 out:
233 spin_unlock_irqrestore(&dev->rx_lock, flags);
234 }
235
236 static void mt7601u_rx_tasklet(unsigned long data)
237 {
238 struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
239 struct mt7601u_dma_buf_rx *e;
240
241 while ((e = mt7601u_rx_get_pending_entry(dev))) {
242 if (e->urb->status)
243 continue;
244
245 mt7601u_rx_process_entry(dev, e);
246 mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
247 }
248 }
249
250 static void mt7601u_complete_tx(struct urb *urb)
251 {
252 struct mt7601u_tx_queue *q = urb->context;
253 struct mt7601u_dev *dev = q->dev;
254 struct sk_buff *skb;
255 unsigned long flags;
256
257 spin_lock_irqsave(&dev->tx_lock, flags);
258
259 if (mt7601u_urb_has_error(urb))
260 dev_err(dev->dev, "Error: TX urb failed:%d\n", urb->status);
261 if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
262 goto out;
263
264 skb = q->e[q->start].skb;
265 trace_mt_tx_dma_done(dev, skb);
266
267 mt7601u_tx_status(dev, skb);
268
269 if (q->used == q->entries - q->entries / 8)
270 ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
271
272 q->start = (q->start + 1) % q->entries;
273 q->used--;
274
275 if (urb->status)
276 goto out;
277
278 set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
279 if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
280 queue_delayed_work(dev->stat_wq, &dev->stat_work,
281 msecs_to_jiffies(10));
282 out:
283 spin_unlock_irqrestore(&dev->tx_lock, flags);
284 }
285
286 static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
287 struct sk_buff *skb, u8 ep)
288 {
289 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
290 unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
291 struct mt7601u_dma_buf_tx *e;
292 struct mt7601u_tx_queue *q = &dev->tx_q[ep];
293 unsigned long flags;
294 int ret;
295
296 spin_lock_irqsave(&dev->tx_lock, flags);
297
298 if (WARN_ON(q->entries <= q->used)) {
299 ret = -ENOSPC;
300 goto out;
301 }
302
303 e = &q->e[q->end];
304 e->skb = skb;
305 usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
306 mt7601u_complete_tx, q);
307 ret = usb_submit_urb(e->urb, GFP_ATOMIC);
308 if (ret) {
309 /* Special-handle ENODEV from TX urb submission because it will
310 * often be the first ENODEV we see after device is removed.
311 */
312 if (ret == -ENODEV)
313 set_bit(MT7601U_STATE_REMOVED, &dev->state);
314 else
315 dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
316 ret);
317 goto out;
318 }
319
320 q->end = (q->end + 1) % q->entries;
321 q->used++;
322
323 if (q->used >= q->entries)
324 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
325 out:
326 spin_unlock_irqrestore(&dev->tx_lock, flags);
327
328 return ret;
329 }
330
331 /* Map hardware Q to USB endpoint number */
332 static u8 q2ep(u8 qid)
333 {
334 /* TODO: take management packets to queue 5 */
335 return qid + 1;
336 }
337
338 /* Map USB endpoint number to Q id in the DMA engine */
339 static enum mt76_qsel ep2dmaq(u8 ep)
340 {
341 if (ep == 5)
342 return MT_QSEL_MGMT;
343 return MT_QSEL_EDCA;
344 }
345
346 int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
347 struct mt76_wcid *wcid, int hw_q)
348 {
349 u8 ep = q2ep(hw_q);
350 u32 dma_flags;
351 int ret;
352
353 dma_flags = MT_TXD_PKT_INFO_80211;
354 if (wcid->hw_key_idx == 0xff)
355 dma_flags |= MT_TXD_PKT_INFO_WIV;
356
357 ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
358 if (ret)
359 return ret;
360
361 ret = mt7601u_dma_submit_tx(dev, skb, ep);
362 if (ret) {
363 ieee80211_free_txskb(dev->hw, skb);
364 return ret;
365 }
366
367 return 0;
368 }
369
370 static void mt7601u_kill_rx(struct mt7601u_dev *dev)
371 {
372 int i;
373 unsigned long flags;
374
375 spin_lock_irqsave(&dev->rx_lock, flags);
376
377 for (i = 0; i < dev->rx_q.entries; i++) {
378 int next = dev->rx_q.end;
379
380 spin_unlock_irqrestore(&dev->rx_lock, flags);
381 usb_poison_urb(dev->rx_q.e[next].urb);
382 spin_lock_irqsave(&dev->rx_lock, flags);
383 }
384
385 spin_unlock_irqrestore(&dev->rx_lock, flags);
386 }
387
388 static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
389 struct mt7601u_dma_buf_rx *e, gfp_t gfp)
390 {
391 struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
392 u8 *buf = page_address(e->p);
393 unsigned pipe;
394 int ret;
395
396 pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
397
398 usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
399 mt7601u_complete_rx, dev);
400
401 trace_mt_submit_urb(dev, e->urb);
402 ret = usb_submit_urb(e->urb, gfp);
403 if (ret)
404 dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
405
406 return ret;
407 }
408
409 static int mt7601u_submit_rx(struct mt7601u_dev *dev)
410 {
411 int i, ret;
412
413 for (i = 0; i < dev->rx_q.entries; i++) {
414 ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
415 if (ret)
416 return ret;
417 }
418
419 return 0;
420 }
421
422 static void mt7601u_free_rx(struct mt7601u_dev *dev)
423 {
424 int i;
425
426 for (i = 0; i < dev->rx_q.entries; i++) {
427 __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
428 usb_free_urb(dev->rx_q.e[i].urb);
429 }
430 }
431
432 static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
433 {
434 int i;
435
436 memset(&dev->rx_q, 0, sizeof(dev->rx_q));
437 dev->rx_q.dev = dev;
438 dev->rx_q.entries = N_RX_ENTRIES;
439
440 for (i = 0; i < N_RX_ENTRIES; i++) {
441 dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
442 dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
443
444 if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
445 return -ENOMEM;
446 }
447
448 return 0;
449 }
450
451 static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
452 {
453 int i;
454
455 WARN_ON(q->used);
456
457 for (i = 0; i < q->entries; i++) {
458 usb_poison_urb(q->e[i].urb);
459 usb_free_urb(q->e[i].urb);
460 }
461 }
462
463 static void mt7601u_free_tx(struct mt7601u_dev *dev)
464 {
465 int i;
466
467 for (i = 0; i < __MT_EP_OUT_MAX; i++)
468 mt7601u_free_tx_queue(&dev->tx_q[i]);
469 }
470
471 static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
472 struct mt7601u_tx_queue *q)
473 {
474 int i;
475
476 q->dev = dev;
477 q->entries = N_TX_ENTRIES;
478
479 for (i = 0; i < N_TX_ENTRIES; i++) {
480 q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
481 if (!q->e[i].urb)
482 return -ENOMEM;
483 }
484
485 return 0;
486 }
487
488 static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
489 {
490 int i;
491
492 dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
493 sizeof(*dev->tx_q), GFP_KERNEL);
494
495 for (i = 0; i < __MT_EP_OUT_MAX; i++)
496 if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
497 return -ENOMEM;
498
499 return 0;
500 }
501
502 int mt7601u_dma_init(struct mt7601u_dev *dev)
503 {
504 int ret = -ENOMEM;
505
506 tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
507
508 ret = mt7601u_alloc_tx(dev);
509 if (ret)
510 goto err;
511 ret = mt7601u_alloc_rx(dev);
512 if (ret)
513 goto err;
514
515 ret = mt7601u_submit_rx(dev);
516 if (ret)
517 goto err;
518
519 return 0;
520 err:
521 mt7601u_dma_cleanup(dev);
522 return ret;
523 }
524
525 void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
526 {
527 mt7601u_kill_rx(dev);
528
529 tasklet_kill(&dev->rx_tasklet);
530
531 mt7601u_free_rx(dev);
532 mt7601u_free_tx(dev);
533 }
This page took 0.044352 seconds and 5 git commands to generate.