MIPS: Octeon: Do proper acknowledgment of CIU timer interrupts.
[deliverable/linux.git] / drivers / staging / octeon / ethernet-tx.c
CommitLineData
80ff0fd3
DD
1/*********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
166bdaa9 7 * Copyright (c) 2003-2010 Cavium Networks
80ff0fd3
DD
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26*********************************************************************/
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/netdevice.h>
30#include <linux/init.h>
31#include <linux/etherdevice.h>
32#include <linux/ip.h>
33#include <linux/string.h>
80ff0fd3
DD
34#include <net/dst.h>
35#ifdef CONFIG_XFRM
36#include <linux/xfrm.h>
37#include <net/xfrm.h>
38#endif /* CONFIG_XFRM */
39
40#include <asm/atomic.h>
41
42#include <asm/octeon/octeon.h>
43
44#include "ethernet-defines.h"
45#include "octeon-ethernet.h"
a620c163 46#include "ethernet-tx.h"
80ff0fd3
DD
47#include "ethernet-util.h"
48
49#include "cvmx-wqe.h"
50#include "cvmx-fau.h"
51#include "cvmx-pko.h"
52#include "cvmx-helper.h"
53
54#include "cvmx-gmxx-defs.h"
55
924cc268
DD
56#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
57
80ff0fd3
DD
58/*
59 * You can define GET_SKBUFF_QOS() to override how the skbuff output
60 * function determines which output queue is used. The default
61 * implementation always uses the base queue for the port. If, for
62 * example, you wanted to use the skb->priority fieid, define
63 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
64 */
65#ifndef GET_SKBUFF_QOS
66#define GET_SKBUFF_QOS(skb) 0
67#endif
68
6888fc87
DD
69
70static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
71{
72 int32_t undo;
73 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
74 if (undo > 0)
75 cvmx_fau_atomic_add32(fau, -undo);
76 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
77 return skb_to_free;
78}
79
80void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv)
81{
82 int32_t skb_to_free;
83 int qos, queues_per_port;
84 queues_per_port = cvmx_pko_get_num_queues(priv->port);
85 /* Drain any pending packets in the free list */
86 for (qos = 0; qos < queues_per_port; qos++) {
87 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
88 continue;
89 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
90 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
91
92 while (skb_to_free > 0) {
93 dev_kfree_skb_any(skb_dequeue(&priv->tx_free_list[qos]));
94 skb_to_free--;
95 }
96 }
97}
98
99enum hrtimer_restart cvm_oct_restart_tx(struct hrtimer *timer)
100{
101 struct octeon_ethernet *priv = container_of(timer, struct octeon_ethernet, tx_restart_timer);
102 struct net_device *dev = cvm_oct_device[priv->port];
103
104 cvm_oct_free_tx_skbs(priv);
105
106 if (netif_queue_stopped(dev))
107 netif_wake_queue(dev);
108
109 return HRTIMER_NORESTART;
110}
111
80ff0fd3
DD
112/**
113 * Packet transmit
114 *
115 * @skb: Packet to send
116 * @dev: Device info structure
117 * Returns Always returns zero
118 */
119int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
120{
121 cvmx_pko_command_word0_t pko_command;
122 union cvmx_buf_ptr hw_buffer;
123 uint64_t old_scratch;
124 uint64_t old_scratch2;
80ff0fd3 125 int qos;
924cc268 126 int i;
6888fc87 127 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
80ff0fd3 128 struct octeon_ethernet *priv = netdev_priv(dev);
6888fc87 129 struct sk_buff *to_free_list;
a620c163 130 int32_t skb_to_free;
80ff0fd3 131 int32_t buffers_to_free;
6888fc87 132 unsigned long flags;
80ff0fd3
DD
133#if REUSE_SKBUFFS_WITHOUT_FREE
134 unsigned char *fpa_head;
135#endif
136
137 /*
138 * Prefetch the private data structure. It is larger that one
139 * cache line.
140 */
141 prefetch(priv);
142
80ff0fd3
DD
143 /*
144 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
145 * completely remove "qos" in the event neither interface
146 * supports multiple queues per port.
147 */
148 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
149 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
150 qos = GET_SKBUFF_QOS(skb);
151 if (qos <= 0)
152 qos = 0;
153 else if (qos >= cvmx_pko_get_num_queues(priv->port))
154 qos = 0;
155 } else
156 qos = 0;
157
158 if (USE_ASYNC_IOBDMA) {
159 /* Save scratch in case userspace is using it */
160 CVMX_SYNCIOBDMA;
161 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
162 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
163
164 /*
a620c163
DD
165 * Fetch and increment the number of packets to be
166 * freed.
80ff0fd3
DD
167 */
168 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
169 FAU_NUM_PACKET_BUFFERS_TO_FREE,
170 0);
171 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
a620c163
DD
172 priv->fau + qos * 4,
173 MAX_SKB_TO_FREE);
80ff0fd3
DD
174 }
175
924cc268
DD
176 /*
177 * We have space for 6 segment pointers, If there will be more
178 * than that, we must linearize.
179 */
180 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
181 if (unlikely(__skb_linearize(skb))) {
182 queue_type = QUEUE_DROP;
183 if (USE_ASYNC_IOBDMA) {
184 /* Get the number of skbuffs in use by the hardware */
185 CVMX_SYNCIOBDMA;
186 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
187 } else {
188 /* Get the number of skbuffs in use by the hardware */
189 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
190 MAX_SKB_TO_FREE);
191 }
192 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau + qos * 4);
193 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
194 goto skip_xmit;
195 }
196 }
197
80ff0fd3
DD
198 /*
199 * The CN3XXX series of parts has an errata (GMX-401) which
200 * causes the GMX block to hang if a collision occurs towards
201 * the end of a <68 byte packet. As a workaround for this, we
202 * pad packets to be 68 bytes whenever we are in half duplex
203 * mode. We don't handle the case of having a small packet but
204 * no room to add the padding. The kernel should always give
205 * us at least a cache line
206 */
207 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
208 union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
209 int interface = INTERFACE(priv->port);
210 int index = INDEX(priv->port);
211
212 if (interface < 2) {
213 /* We only need to pad packet in half duplex mode */
214 gmx_prt_cfg.u64 =
215 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
216 if (gmx_prt_cfg.s.duplex == 0) {
217 int add_bytes = 64 - skb->len;
218 if ((skb_tail_pointer(skb) + add_bytes) <=
219 skb_end_pointer(skb))
220 memset(__skb_put(skb, add_bytes), 0,
221 add_bytes);
222 }
223 }
224 }
225
80ff0fd3
DD
226 /* Build the PKO command */
227 pko_command.u64 = 0;
228 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
229 pko_command.s.segs = 1;
230 pko_command.s.total_bytes = skb->len;
231 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
232 pko_command.s.subone0 = 1;
233
234 pko_command.s.dontfree = 1;
235 pko_command.s.reg0 = priv->fau + qos * 4;
924cc268
DD
236
237 /* Build the PKO buffer pointer */
238 hw_buffer.u64 = 0;
239 if (skb_shinfo(skb)->nr_frags == 0) {
240 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
241 hw_buffer.s.pool = 0;
242 hw_buffer.s.size = skb->len;
243 } else {
244 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
245 hw_buffer.s.pool = 0;
246 hw_buffer.s.size = skb_headlen(skb);
247 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
248 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
249 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
250 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)(page_address(fs->page) + fs->page_offset));
251 hw_buffer.s.size = fs->size;
252 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
253 }
254 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
255 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
256 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
257 pko_command.s.gather = 1;
258 goto dont_put_skbuff_in_hw;
259 }
260
80ff0fd3
DD
261 /*
262 * See if we can put this skb in the FPA pool. Any strange
263 * behavior from the Linux networking stack will most likely
264 * be caused by a bug in the following code. If some field is
265 * in use by the network stack and get carried over when a
266 * buffer is reused, bad thing may happen. If in doubt and
267 * you dont need the absolute best performance, disable the
268 * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
269 * shown a 25% increase in performance under some loads.
270 */
271#if REUSE_SKBUFFS_WITHOUT_FREE
166bdaa9 272 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
80ff0fd3
DD
273 if (unlikely(skb->data < fpa_head)) {
274 /*
275 * printk("TX buffer beginning can't meet FPA
276 * alignment constraints\n");
277 */
278 goto dont_put_skbuff_in_hw;
279 }
280 if (unlikely
281 ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
282 /*
283 printk("TX buffer isn't large enough for the FPA\n");
284 */
285 goto dont_put_skbuff_in_hw;
286 }
287 if (unlikely(skb_shared(skb))) {
288 /*
289 printk("TX buffer sharing data with someone else\n");
290 */
291 goto dont_put_skbuff_in_hw;
292 }
293 if (unlikely(skb_cloned(skb))) {
294 /*
295 printk("TX buffer has been cloned\n");
296 */
297 goto dont_put_skbuff_in_hw;
298 }
299 if (unlikely(skb_header_cloned(skb))) {
300 /*
301 printk("TX buffer header has been cloned\n");
302 */
303 goto dont_put_skbuff_in_hw;
304 }
305 if (unlikely(skb->destructor)) {
306 /*
307 printk("TX buffer has a destructor\n");
308 */
309 goto dont_put_skbuff_in_hw;
310 }
311 if (unlikely(skb_shinfo(skb)->nr_frags)) {
312 /*
313 printk("TX buffer has fragments\n");
314 */
315 goto dont_put_skbuff_in_hw;
316 }
317 if (unlikely
318 (skb->truesize !=
319 sizeof(*skb) + skb_end_pointer(skb) - skb->head)) {
320 /*
321 printk("TX buffer truesize has been changed\n");
322 */
323 goto dont_put_skbuff_in_hw;
324 }
325
326 /*
327 * We can use this buffer in the FPA. We don't need the FAU
328 * update anymore
329 */
330 pko_command.s.reg0 = 0;
331 pko_command.s.dontfree = 0;
332
166bdaa9 333 hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
80ff0fd3
DD
334 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
335
336 /*
337 * The skbuff will be reused without ever being freed. We must
f696a108 338 * cleanup a bunch of core things.
80ff0fd3 339 */
f696a108
DD
340 dst_release(skb_dst(skb));
341 skb_dst_set(skb, NULL);
80ff0fd3
DD
342#ifdef CONFIG_XFRM
343 secpath_put(skb->sp);
344 skb->sp = NULL;
345#endif
346 nf_reset(skb);
347
348#ifdef CONFIG_NET_SCHED
349 skb->tc_index = 0;
350#ifdef CONFIG_NET_CLS_ACT
351 skb->tc_verd = 0;
352#endif /* CONFIG_NET_CLS_ACT */
353#endif /* CONFIG_NET_SCHED */
6888fc87 354#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
80ff0fd3
DD
355
356dont_put_skbuff_in_hw:
80ff0fd3
DD
357
358 /* Check if we can use the hardware checksumming */
359 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
360 (ip_hdr(skb)->version == 4) && (ip_hdr(skb)->ihl == 5) &&
361 ((ip_hdr(skb)->frag_off == 0) || (ip_hdr(skb)->frag_off == 1 << 14))
081f6749
DD
362 && ((ip_hdr(skb)->protocol == IPPROTO_TCP)
363 || (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
80ff0fd3
DD
364 /* Use hardware checksum calc */
365 pko_command.s.ipoffp1 = sizeof(struct ethhdr) + 1;
366 }
367
368 if (USE_ASYNC_IOBDMA) {
369 /* Get the number of skbuffs in use by the hardware */
370 CVMX_SYNCIOBDMA;
a620c163 371 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
80ff0fd3
DD
372 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
373 } else {
374 /* Get the number of skbuffs in use by the hardware */
a620c163
DD
375 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
376 MAX_SKB_TO_FREE);
80ff0fd3
DD
377 buffers_to_free =
378 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
379 }
380
6888fc87 381 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
a620c163 382
80ff0fd3
DD
383 /*
384 * If we're sending faster than the receive can free them then
385 * don't do the HW free.
386 */
387 if ((buffers_to_free < -100) && !pko_command.s.dontfree) {
388 pko_command.s.dontfree = 1;
389 pko_command.s.reg0 = priv->fau + qos * 4;
390 }
391
6888fc87
DD
392 if (pko_command.s.dontfree)
393 queue_type = QUEUE_CORE;
394 else
395 queue_type = QUEUE_HW;
396
397 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
80ff0fd3
DD
398
399 /* Drop this packet if we have too many already queued to the HW */
6888fc87
DD
400 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
401 if (dev->tx_queue_len != 0) {
402 /* Drop the lock when notifying the core. */
403 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
404 netif_stop_queue(dev);
405 hrtimer_start(&priv->tx_restart_timer,
406 priv->tx_restart_interval, HRTIMER_MODE_REL);
407 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
408
409 } else {
410 /* If not using normal queueing. */
411 queue_type = QUEUE_DROP;
412 goto skip_xmit;
413 }
80ff0fd3 414 }
6888fc87
DD
415
416 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
417 CVMX_PKO_LOCK_NONE);
418
80ff0fd3 419 /* Send the packet to the output queue */
6888fc87
DD
420 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
421 priv->queue + qos,
422 pko_command, hw_buffer,
423 CVMX_PKO_LOCK_NONE))) {
80ff0fd3 424 DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
6888fc87 425 queue_type = QUEUE_DROP;
80ff0fd3 426 }
6888fc87
DD
427skip_xmit:
428 to_free_list = NULL;
80ff0fd3 429
6888fc87
DD
430 switch (queue_type) {
431 case QUEUE_DROP:
432 skb->next = to_free_list;
433 to_free_list = skb;
434 priv->stats.tx_dropped++;
435 break;
436 case QUEUE_HW:
437 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
438 break;
439 case QUEUE_CORE:
440 __skb_queue_tail(&priv->tx_free_list[qos], skb);
441 break;
442 default:
443 BUG();
80ff0fd3
DD
444 }
445
6888fc87
DD
446 while (skb_to_free > 0) {
447 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
448 t->next = to_free_list;
449 to_free_list = t;
450 skb_to_free--;
80ff0fd3
DD
451 }
452
6888fc87
DD
453 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
454
455 /* Do the actual freeing outside of the lock. */
456 while (to_free_list) {
457 struct sk_buff *t = to_free_list;
458 to_free_list = to_free_list->next;
459 dev_kfree_skb_any(t);
460 }
461
462 if (USE_ASYNC_IOBDMA) {
463 /* Restore the scratch area */
464 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
465 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
80ff0fd3
DD
466 }
467
6888fc87 468 return NETDEV_TX_OK;
80ff0fd3
DD
469}
470
471/**
472 * Packet transmit to the POW
473 *
474 * @skb: Packet to send
475 * @dev: Device info structure
476 * Returns Always returns zero
477 */
478int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
479{
480 struct octeon_ethernet *priv = netdev_priv(dev);
481 void *packet_buffer;
482 void *copy_location;
483
484 /* Get a work queue entry */
485 cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
486 if (unlikely(work == NULL)) {
487 DEBUGPRINT("%s: Failed to allocate a work queue entry\n",
488 dev->name);
489 priv->stats.tx_dropped++;
490 dev_kfree_skb(skb);
491 return 0;
492 }
493
494 /* Get a packet buffer */
495 packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
496 if (unlikely(packet_buffer == NULL)) {
497 DEBUGPRINT("%s: Failed to allocate a packet buffer\n",
498 dev->name);
499 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
500 priv->stats.tx_dropped++;
501 dev_kfree_skb(skb);
502 return 0;
503 }
504
505 /*
506 * Calculate where we need to copy the data to. We need to
507 * leave 8 bytes for a next pointer (unused). We also need to
508 * include any configure skip. Then we need to align the IP
509 * packet src and dest into the same 64bit word. The below
510 * calculation may add a little extra, but that doesn't
511 * hurt.
512 */
513 copy_location = packet_buffer + sizeof(uint64_t);
514 copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
515
516 /*
517 * We have to copy the packet since whoever processes this
518 * packet will free it to a hardware pool. We can't use the
519 * trick of counting outstanding packets like in
520 * cvm_oct_xmit.
521 */
522 memcpy(copy_location, skb->data, skb->len);
523
524 /*
525 * Fill in some of the work queue fields. We may need to add
526 * more if the software at the other end needs them.
527 */
528 work->hw_chksum = skb->csum;
529 work->len = skb->len;
530 work->ipprt = priv->port;
531 work->qos = priv->port & 0x7;
532 work->grp = pow_send_group;
533 work->tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
534 work->tag = pow_send_group; /* FIXME */
535 /* Default to zero. Sets of zero later are commented out */
536 work->word2.u64 = 0;
537 work->word2.s.bufs = 1;
538 work->packet_ptr.u64 = 0;
539 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
540 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
541 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
542 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
543
544 if (skb->protocol == htons(ETH_P_IP)) {
545 work->word2.s.ip_offset = 14;
546#if 0
547 work->word2.s.vlan_valid = 0; /* FIXME */
548 work->word2.s.vlan_cfi = 0; /* FIXME */
549 work->word2.s.vlan_id = 0; /* FIXME */
550 work->word2.s.dec_ipcomp = 0; /* FIXME */
551#endif
552 work->word2.s.tcp_or_udp =
081f6749
DD
553 (ip_hdr(skb)->protocol == IPPROTO_TCP)
554 || (ip_hdr(skb)->protocol == IPPROTO_UDP);
80ff0fd3
DD
555#if 0
556 /* FIXME */
557 work->word2.s.dec_ipsec = 0;
558 /* We only support IPv4 right now */
559 work->word2.s.is_v6 = 0;
560 /* Hardware would set to zero */
561 work->word2.s.software = 0;
562 /* No error, packet is internal */
563 work->word2.s.L4_error = 0;
564#endif
565 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0)
566 || (ip_hdr(skb)->frag_off ==
567 1 << 14));
568#if 0
569 /* Assume Linux is sending a good packet */
570 work->word2.s.IP_exc = 0;
571#endif
572 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
573 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
574#if 0
575 /* This is an IP packet */
576 work->word2.s.not_IP = 0;
577 /* No error, packet is internal */
578 work->word2.s.rcv_error = 0;
579 /* No error, packet is internal */
580 work->word2.s.err_code = 0;
581#endif
582
583 /*
584 * When copying the data, include 4 bytes of the
585 * ethernet header to align the same way hardware
586 * does.
587 */
588 memcpy(work->packet_data, skb->data + 10,
589 sizeof(work->packet_data));
590 } else {
591#if 0
592 work->word2.snoip.vlan_valid = 0; /* FIXME */
593 work->word2.snoip.vlan_cfi = 0; /* FIXME */
594 work->word2.snoip.vlan_id = 0; /* FIXME */
595 work->word2.snoip.software = 0; /* Hardware would set to zero */
596#endif
597 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
598 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
599 work->word2.snoip.is_bcast =
600 (skb->pkt_type == PACKET_BROADCAST);
601 work->word2.snoip.is_mcast =
602 (skb->pkt_type == PACKET_MULTICAST);
603 work->word2.snoip.not_IP = 1; /* IP was done up above */
604#if 0
605 /* No error, packet is internal */
606 work->word2.snoip.rcv_error = 0;
607 /* No error, packet is internal */
608 work->word2.snoip.err_code = 0;
609#endif
610 memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
611 }
612
613 /* Submit the packet to the POW */
614 cvmx_pow_work_submit(work, work->tag, work->tag_type, work->qos,
615 work->grp);
616 priv->stats.tx_packets++;
617 priv->stats.tx_bytes += skb->len;
618 dev_kfree_skb(skb);
619 return 0;
620}
621
80ff0fd3 622/**
bbc9a991 623 * This function frees all skb that are currently queued for TX.
80ff0fd3
DD
624 *
625 * @dev: Device being shutdown
626 */
627void cvm_oct_tx_shutdown(struct net_device *dev)
628{
629 struct octeon_ethernet *priv = netdev_priv(dev);
630 unsigned long flags;
631 int qos;
632
633 for (qos = 0; qos < 16; qos++) {
634 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
635 while (skb_queue_len(&priv->tx_free_list[qos]))
636 dev_kfree_skb_any(__skb_dequeue
637 (&priv->tx_free_list[qos]));
638 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
639 }
640}
This page took 0.106962 seconds and 5 git commands to generate.