Commit | Line | Data |
---|---|---|
e5a06939 CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/module.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/moduleparam.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/kernel.h> /* printk() */ | |
20 | #include <linux/slab.h> /* kmalloc() */ | |
21 | #include <linux/errno.h> /* error codes */ | |
22 | #include <linux/types.h> /* size_t */ | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/in.h> | |
25 | #include <linux/netdevice.h> /* struct device, and other headers */ | |
26 | #include <linux/etherdevice.h> /* eth_type_trans */ | |
27 | #include <linux/skbuff.h> | |
28 | #include <linux/ioctl.h> | |
29 | #include <linux/cdev.h> | |
30 | #include <linux/hugetlb.h> | |
31 | #include <linux/in6.h> | |
32 | #include <linux/timer.h> | |
33 | #include <linux/io.h> | |
34 | #include <asm/checksum.h> | |
35 | #include <asm/homecache.h> | |
36 | ||
37 | #include <hv/drv_xgbe_intf.h> | |
38 | #include <hv/drv_xgbe_impl.h> | |
39 | #include <hv/hypervisor.h> | |
40 | #include <hv/netio_intf.h> | |
41 | ||
42 | /* For TSO */ | |
43 | #include <linux/ip.h> | |
44 | #include <linux/tcp.h> | |
45 | ||
46 | ||
47 | /* There is no singlethread_cpu, so schedule work on the current cpu. */ | |
48 | #define singlethread_cpu -1 | |
49 | ||
50 | ||
51 | /* | |
52 | * First, "tile_net_init_module()" initializes all four "devices" which | |
53 | * can be used by linux. | |
54 | * | |
55 | * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes | |
56 | * the network cpus, then uses "tile_net_open_aux()" to initialize | |
57 | * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all | |
58 | * the tiles, provide buffers to LIPP, allow ingress to start, and | |
59 | * turn on hypervisor interrupt handling (and NAPI) on all tiles. | |
60 | * | |
61 | * If registration fails due to the link being down, then "retry_work" | |
62 | * is used to keep calling "tile_net_open_inner()" until it succeeds. | |
63 | * | |
64 | * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to | |
65 | * stop egress, drain the LIPP buffers, unregister all the tiles, stop | |
66 | * LIPP/LEPP, and wipe the LEPP queue. | |
67 | * | |
68 | * We start out with the ingress interrupt enabled on each CPU. When | |
69 | * this interrupt fires, we disable it, and call "napi_schedule()". | |
70 | * This will cause "tile_net_poll()" to be called, which will pull | |
71 | * packets from the netio queue, filtering them out, or passing them | |
72 | * to "netif_receive_skb()". If our budget is exhausted, we will | |
73 | * return, knowing we will be called again later. Otherwise, we | |
74 | * reenable the ingress interrupt, and call "napi_complete()". | |
75 | * | |
76 | * | |
77 | * NOTE: The use of "native_driver" ensures that EPP exists, and that | |
78 | * "epp_sendv" is legal, and that "LIPP" is being used. | |
79 | * | |
80 | * NOTE: Failing to free completions for an arbitrarily long time | |
81 | * (which is defined to be illegal) does in fact cause bizarre | |
82 | * problems. The "egress_timer" helps prevent this from happening. | |
83 | * | |
84 | * NOTE: The egress code can be interrupted by the interrupt handler. | |
85 | */ | |
86 | ||
87 | ||
88 | /* HACK: Allow use of "jumbo" packets. */ | |
89 | /* This should be 1500 if "jumbo" is not set in LIPP. */ | |
90 | /* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */ | |
91 | /* ISSUE: This has not been thoroughly tested (except at 1500). */ | |
92 | #define TILE_NET_MTU 1500 | |
93 | ||
94 | /* HACK: Define to support GSO. */ | |
95 | /* ISSUE: This may actually hurt performance of the TCP blaster. */ | |
96 | /* #define TILE_NET_GSO */ | |
97 | ||
98 | /* Define this to collapse "duplicate" acks. */ | |
99 | /* #define IGNORE_DUP_ACKS */ | |
100 | ||
101 | /* HACK: Define this to verify incoming packets. */ | |
102 | /* #define TILE_NET_VERIFY_INGRESS */ | |
103 | ||
104 | /* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */ | |
105 | #define TILE_NET_TX_QUEUE_LEN 0 | |
106 | ||
107 | /* Define to dump packets (prints out the whole packet on tx and rx). */ | |
108 | /* #define TILE_NET_DUMP_PACKETS */ | |
109 | ||
110 | /* Define to enable debug spew (all PDEBUG's are enabled). */ | |
111 | /* #define TILE_NET_DEBUG */ | |
112 | ||
113 | ||
114 | /* Define to activate paranoia checks. */ | |
115 | /* #define TILE_NET_PARANOIA */ | |
116 | ||
117 | /* Default transmit lockup timeout period, in jiffies. */ | |
118 | #define TILE_NET_TIMEOUT (5 * HZ) | |
119 | ||
120 | /* Default retry interval for bringing up the NetIO interface, in jiffies. */ | |
121 | #define TILE_NET_RETRY_INTERVAL (5 * HZ) | |
122 | ||
123 | /* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */ | |
124 | #define TILE_NET_DEVS 4 | |
125 | ||
126 | ||
127 | ||
128 | /* Paranoia. */ | |
129 | #if NET_IP_ALIGN != LIPP_PACKET_PADDING | |
130 | #error "NET_IP_ALIGN must match LIPP_PACKET_PADDING." | |
131 | #endif | |
132 | ||
133 | ||
134 | /* Debug print. */ | |
135 | #ifdef TILE_NET_DEBUG | |
136 | #define PDEBUG(fmt, args...) net_printk(fmt, ## args) | |
137 | #else | |
138 | #define PDEBUG(fmt, args...) | |
139 | #endif | |
140 | ||
141 | ||
142 | MODULE_AUTHOR("Tilera"); | |
143 | MODULE_LICENSE("GPL"); | |
144 | ||
e5a06939 CM |
145 | /* |
146 | * Queue of incoming packets for a specific cpu and device. | |
147 | * | |
148 | * Includes a pointer to the "system" data, and the actual "user" data. | |
149 | */ | |
150 | struct tile_netio_queue { | |
151 | netio_queue_impl_t *__system_part; | |
152 | netio_queue_user_impl_t __user_part; | |
153 | ||
154 | }; | |
155 | ||
156 | ||
157 | /* | |
158 | * Statistics counters for a specific cpu and device. | |
159 | */ | |
160 | struct tile_net_stats_t { | |
161 | u32 rx_packets; | |
162 | u32 rx_bytes; | |
163 | u32 tx_packets; | |
164 | u32 tx_bytes; | |
165 | }; | |
166 | ||
167 | ||
168 | /* | |
169 | * Info for a specific cpu and device. | |
170 | * | |
171 | * ISSUE: There is a "dev" pointer in "napi" as well. | |
172 | */ | |
173 | struct tile_net_cpu { | |
174 | /* The NAPI struct. */ | |
175 | struct napi_struct napi; | |
176 | /* Packet queue. */ | |
177 | struct tile_netio_queue queue; | |
178 | /* Statistics. */ | |
179 | struct tile_net_stats_t stats; | |
180 | /* ISSUE: Is this needed? */ | |
181 | bool napi_enabled; | |
182 | /* True if this tile has succcessfully registered with the IPP. */ | |
183 | bool registered; | |
184 | /* True if the link was down last time we tried to register. */ | |
185 | bool link_down; | |
186 | /* True if "egress_timer" is scheduled. */ | |
187 | bool egress_timer_scheduled; | |
188 | /* Number of small sk_buffs which must still be provided. */ | |
189 | unsigned int num_needed_small_buffers; | |
190 | /* Number of large sk_buffs which must still be provided. */ | |
191 | unsigned int num_needed_large_buffers; | |
192 | /* A timer for handling egress completions. */ | |
193 | struct timer_list egress_timer; | |
194 | }; | |
195 | ||
196 | ||
197 | /* | |
198 | * Info for a specific device. | |
199 | */ | |
200 | struct tile_net_priv { | |
201 | /* Our network device. */ | |
202 | struct net_device *dev; | |
203 | /* The actual egress queue. */ | |
204 | lepp_queue_t *epp_queue; | |
205 | /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */ | |
206 | spinlock_t cmd_lock; | |
207 | /* Protects "epp_queue->comp_head". */ | |
208 | spinlock_t comp_lock; | |
209 | /* The hypervisor handle for this interface. */ | |
210 | int hv_devhdl; | |
211 | /* The intr bit mask that IDs this device. */ | |
212 | u32 intr_id; | |
213 | /* True iff "tile_net_open_aux()" has succeeded. */ | |
214 | int partly_opened; | |
215 | /* True iff "tile_net_open_inner()" has succeeded. */ | |
216 | int fully_opened; | |
217 | /* Effective network cpus. */ | |
218 | struct cpumask network_cpus_map; | |
219 | /* Number of network cpus. */ | |
220 | int network_cpus_count; | |
221 | /* Credits per network cpu. */ | |
222 | int network_cpus_credits; | |
223 | /* Network stats. */ | |
224 | struct net_device_stats stats; | |
225 | /* For NetIO bringup retries. */ | |
226 | struct delayed_work retry_work; | |
227 | /* Quick access to per cpu data. */ | |
228 | struct tile_net_cpu *cpu[NR_CPUS]; | |
229 | }; | |
230 | ||
231 | ||
232 | /* | |
233 | * The actual devices (xgbe0, xgbe1, gbe0, gbe1). | |
234 | */ | |
235 | static struct net_device *tile_net_devs[TILE_NET_DEVS]; | |
236 | ||
237 | /* | |
238 | * The "tile_net_cpu" structures for each device. | |
239 | */ | |
240 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0); | |
241 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1); | |
242 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0); | |
243 | static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1); | |
244 | ||
245 | ||
246 | /* | |
247 | * True if "network_cpus" was specified. | |
248 | */ | |
249 | static bool network_cpus_used; | |
250 | ||
251 | /* | |
252 | * The actual cpus in "network_cpus". | |
253 | */ | |
254 | static struct cpumask network_cpus_map; | |
255 | ||
256 | ||
257 | ||
258 | #ifdef TILE_NET_DEBUG | |
259 | /* | |
260 | * printk with extra stuff. | |
261 | * | |
262 | * We print the CPU we're running in brackets. | |
263 | */ | |
264 | static void net_printk(char *fmt, ...) | |
265 | { | |
266 | int i; | |
267 | int len; | |
268 | va_list args; | |
269 | static char buf[256]; | |
270 | ||
271 | len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id()); | |
272 | va_start(args, fmt); | |
273 | i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args); | |
274 | va_end(args); | |
275 | buf[255] = '\0'; | |
276 | pr_notice(buf); | |
277 | } | |
278 | #endif | |
279 | ||
280 | ||
281 | #ifdef TILE_NET_DUMP_PACKETS | |
282 | /* | |
283 | * Dump a packet. | |
284 | */ | |
285 | static void dump_packet(unsigned char *data, unsigned long length, char *s) | |
286 | { | |
287 | unsigned long i; | |
288 | static unsigned int count; | |
289 | ||
290 | pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", | |
291 | data, length, s, count++); | |
292 | ||
293 | pr_info("\n"); | |
294 | ||
295 | for (i = 0; i < length; i++) { | |
296 | if ((i & 0xf) == 0) | |
297 | sprintf(buf, "%8.8lx:", i); | |
298 | sprintf(buf + strlen(buf), " %2.2x", data[i]); | |
299 | if ((i & 0xf) == 0xf || i == length - 1) | |
300 | pr_info("%s\n", buf); | |
301 | } | |
302 | } | |
303 | #endif | |
304 | ||
305 | ||
306 | /* | |
307 | * Provide support for the __netio_fastio1() swint | |
308 | * (see <hv/drv_xgbe_intf.h> for how it is used). | |
309 | * | |
310 | * The fastio swint2 call may clobber all the caller-saved registers. | |
311 | * It rarely clobbers memory, but we allow for the possibility in | |
312 | * the signature just to be on the safe side. | |
313 | * | |
314 | * Also, gcc doesn't seem to allow an input operand to be | |
315 | * clobbered, so we fake it with dummy outputs. | |
316 | * | |
317 | * This function can't be static because of the way it is declared | |
318 | * in the netio header. | |
319 | */ | |
320 | inline int __netio_fastio1(u32 fastio_index, u32 arg0) | |
321 | { | |
322 | long result, clobber_r1, clobber_r10; | |
323 | asm volatile("swint2" | |
324 | : "=R00" (result), | |
325 | "=R01" (clobber_r1), "=R10" (clobber_r10) | |
326 | : "R10" (fastio_index), "R01" (arg0) | |
327 | : "memory", "r2", "r3", "r4", | |
328 | "r5", "r6", "r7", "r8", "r9", | |
329 | "r11", "r12", "r13", "r14", | |
330 | "r15", "r16", "r17", "r18", "r19", | |
331 | "r20", "r21", "r22", "r23", "r24", | |
332 | "r25", "r26", "r27", "r28", "r29"); | |
333 | return result; | |
334 | } | |
335 | ||
336 | ||
337 | /* | |
338 | * Provide a linux buffer to LIPP. | |
339 | */ | |
340 | static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, | |
341 | void *va, bool small) | |
342 | { | |
343 | struct tile_netio_queue *queue = &info->queue; | |
344 | ||
345 | /* Convert "va" and "small" to "linux_buffer_t". */ | |
346 | unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small; | |
347 | ||
348 | __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); | |
349 | } | |
350 | ||
351 | ||
352 | /* | |
353 | * Provide a linux buffer for LIPP. | |
354 | */ | |
355 | static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, | |
356 | bool small) | |
357 | { | |
358 | /* ISSUE: What should we use here? */ | |
359 | unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; | |
360 | ||
361 | /* Round up to ensure to avoid "false sharing" with last cache line. */ | |
362 | unsigned int buffer_size = | |
363 | (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + | |
364 | CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); | |
365 | ||
366 | /* | |
367 | * ISSUE: Since CPAs are 38 bits, and we can only encode the | |
368 | * high 31 bits in a "linux_buffer_t", the low 7 bits must be | |
369 | * zero, and thus, we must align the actual "va" mod 128. | |
370 | */ | |
371 | const unsigned long align = 128; | |
372 | ||
373 | struct sk_buff *skb; | |
374 | void *va; | |
375 | ||
376 | struct sk_buff **skb_ptr; | |
377 | ||
378 | /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */ | |
379 | /* and also "reserves" that many bytes. */ | |
380 | /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */ | |
381 | int len = sizeof(*skb_ptr) + align + buffer_size; | |
382 | ||
383 | while (1) { | |
384 | ||
385 | /* Allocate (or fail). */ | |
386 | skb = dev_alloc_skb(len); | |
387 | if (skb == NULL) | |
388 | return false; | |
389 | ||
390 | /* Make room for a back-pointer to 'skb'. */ | |
391 | skb_reserve(skb, sizeof(*skb_ptr)); | |
392 | ||
393 | /* Make sure we are aligned. */ | |
394 | skb_reserve(skb, -(long)skb->data & (align - 1)); | |
395 | ||
396 | /* This address is given to IPP. */ | |
397 | va = skb->data; | |
398 | ||
399 | if (small) | |
400 | break; | |
401 | ||
402 | /* ISSUE: This has never been observed! */ | |
403 | /* Large buffers must not span a huge page. */ | |
404 | if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0) | |
405 | break; | |
406 | pr_err("Leaking unaligned linux buffer at %p.\n", va); | |
407 | } | |
408 | ||
409 | /* Skip two bytes to satisfy LIPP assumptions. */ | |
410 | /* Note that this aligns IP on a 16 byte boundary. */ | |
411 | /* ISSUE: Do this when the packet arrives? */ | |
412 | skb_reserve(skb, NET_IP_ALIGN); | |
413 | ||
414 | /* Save a back-pointer to 'skb'. */ | |
415 | skb_ptr = va - sizeof(*skb_ptr); | |
416 | *skb_ptr = skb; | |
417 | ||
418 | /* Invalidate the packet buffer. */ | |
419 | if (!hash_default) | |
420 | __inv_buffer(skb->data, buffer_size); | |
421 | ||
422 | /* Make sure "skb_ptr" has been flushed. */ | |
423 | __insn_mf(); | |
424 | ||
425 | #ifdef TILE_NET_PARANOIA | |
426 | #if CHIP_HAS_CBOX_HOME_MAP() | |
427 | if (hash_default) { | |
428 | HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); | |
429 | if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) | |
430 | panic("Non-coherent ingress buffer!"); | |
431 | } | |
432 | #endif | |
433 | #endif | |
434 | ||
435 | /* Provide the new buffer. */ | |
436 | tile_net_provide_linux_buffer(info, va, small); | |
437 | ||
438 | return true; | |
439 | } | |
440 | ||
441 | ||
442 | /* | |
443 | * Provide linux buffers for LIPP. | |
444 | */ | |
445 | static void tile_net_provide_needed_buffers(struct tile_net_cpu *info) | |
446 | { | |
447 | while (info->num_needed_small_buffers != 0) { | |
448 | if (!tile_net_provide_needed_buffer(info, true)) | |
449 | goto oops; | |
450 | info->num_needed_small_buffers--; | |
451 | } | |
452 | ||
453 | while (info->num_needed_large_buffers != 0) { | |
454 | if (!tile_net_provide_needed_buffer(info, false)) | |
455 | goto oops; | |
456 | info->num_needed_large_buffers--; | |
457 | } | |
458 | ||
459 | return; | |
460 | ||
461 | oops: | |
462 | ||
463 | /* Add a description to the page allocation failure dump. */ | |
464 | pr_notice("Could not provide a linux buffer to LIPP.\n"); | |
465 | } | |
466 | ||
467 | ||
468 | /* | |
469 | * Grab some LEPP completions, and store them in "comps", of size | |
470 | * "comps_size", and return the number of completions which were | |
471 | * stored, so the caller can free them. | |
472 | * | |
473 | * If "pending" is not NULL, it will be set to true if there might | |
474 | * still be some pending completions caused by this tile, else false. | |
475 | */ | |
476 | static unsigned int tile_net_lepp_grab_comps(struct net_device *dev, | |
477 | struct sk_buff *comps[], | |
478 | unsigned int comps_size, | |
479 | bool *pending) | |
480 | { | |
481 | struct tile_net_priv *priv = netdev_priv(dev); | |
482 | ||
483 | lepp_queue_t *eq = priv->epp_queue; | |
484 | ||
485 | unsigned int n = 0; | |
486 | ||
487 | unsigned int comp_head; | |
488 | unsigned int comp_busy; | |
489 | unsigned int comp_tail; | |
490 | ||
491 | spin_lock(&priv->comp_lock); | |
492 | ||
493 | comp_head = eq->comp_head; | |
494 | comp_busy = eq->comp_busy; | |
495 | comp_tail = eq->comp_tail; | |
496 | ||
497 | while (comp_head != comp_busy && n < comps_size) { | |
498 | comps[n++] = eq->comps[comp_head]; | |
499 | LEPP_QINC(comp_head); | |
500 | } | |
501 | ||
502 | if (pending != NULL) | |
503 | *pending = (comp_head != comp_tail); | |
504 | ||
505 | eq->comp_head = comp_head; | |
506 | ||
507 | spin_unlock(&priv->comp_lock); | |
508 | ||
509 | return n; | |
510 | } | |
511 | ||
512 | ||
513 | /* | |
514 | * Make sure the egress timer is scheduled. | |
515 | * | |
516 | * Note that we use "schedule if not scheduled" logic instead of the more | |
517 | * obvious "reschedule" logic, because "reschedule" is fairly expensive. | |
518 | */ | |
519 | static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) | |
520 | { | |
521 | if (!info->egress_timer_scheduled) { | |
522 | mod_timer_pinned(&info->egress_timer, jiffies + 1); | |
523 | info->egress_timer_scheduled = true; | |
524 | } | |
525 | } | |
526 | ||
527 | ||
528 | /* | |
529 | * The "function" for "info->egress_timer". | |
530 | * | |
531 | * This timer will reschedule itself as long as there are any pending | |
532 | * completions expected (on behalf of any tile). | |
533 | * | |
534 | * ISSUE: Realistically, will the timer ever stop scheduling itself? | |
535 | * | |
536 | * ISSUE: This timer is almost never actually needed, so just use a global | |
537 | * timer that can run on any tile. | |
538 | * | |
539 | * ISSUE: Maybe instead track number of expected completions, and free | |
540 | * only that many, resetting to zero if "pending" is ever false. | |
541 | */ | |
542 | static void tile_net_handle_egress_timer(unsigned long arg) | |
543 | { | |
544 | struct tile_net_cpu *info = (struct tile_net_cpu *)arg; | |
545 | struct net_device *dev = info->napi.dev; | |
546 | ||
547 | struct sk_buff *olds[32]; | |
548 | unsigned int wanted = 32; | |
549 | unsigned int i, nolds = 0; | |
550 | bool pending; | |
551 | ||
552 | /* The timer is no longer scheduled. */ | |
553 | info->egress_timer_scheduled = false; | |
554 | ||
555 | nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending); | |
556 | ||
557 | for (i = 0; i < nolds; i++) | |
558 | kfree_skb(olds[i]); | |
559 | ||
560 | /* Reschedule timer if needed. */ | |
561 | if (pending) | |
562 | tile_net_schedule_egress_timer(info); | |
563 | } | |
564 | ||
565 | ||
566 | #ifdef IGNORE_DUP_ACKS | |
567 | ||
568 | /* | |
569 | * Help detect "duplicate" ACKs. These are sequential packets (for a | |
570 | * given flow) which are exactly 66 bytes long, sharing everything but | |
571 | * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32, | |
572 | * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are | |
573 | * +N, and the Tstamps are usually identical. | |
574 | * | |
575 | * NOTE: Apparently truly duplicate acks (with identical "ack" values), | |
576 | * should not be collapsed, as they are used for some kind of flow control. | |
577 | */ | |
578 | static bool is_dup_ack(char *s1, char *s2, unsigned int len) | |
579 | { | |
580 | int i; | |
581 | ||
582 | unsigned long long ignorable = 0; | |
583 | ||
584 | /* Identification. */ | |
585 | ignorable |= (1ULL << 0x12); | |
586 | ignorable |= (1ULL << 0x13); | |
587 | ||
588 | /* Header checksum. */ | |
589 | ignorable |= (1ULL << 0x18); | |
590 | ignorable |= (1ULL << 0x19); | |
591 | ||
592 | /* ACK. */ | |
593 | ignorable |= (1ULL << 0x2a); | |
594 | ignorable |= (1ULL << 0x2b); | |
595 | ignorable |= (1ULL << 0x2c); | |
596 | ignorable |= (1ULL << 0x2d); | |
597 | ||
598 | /* WinSize. */ | |
599 | ignorable |= (1ULL << 0x30); | |
600 | ignorable |= (1ULL << 0x31); | |
601 | ||
602 | /* Checksum. */ | |
603 | ignorable |= (1ULL << 0x32); | |
604 | ignorable |= (1ULL << 0x33); | |
605 | ||
606 | for (i = 0; i < len; i++, ignorable >>= 1) { | |
607 | ||
608 | if ((ignorable & 1) || (s1[i] == s2[i])) | |
609 | continue; | |
610 | ||
611 | #ifdef TILE_NET_DEBUG | |
612 | /* HACK: Mention non-timestamp diffs. */ | |
613 | if (i < 0x38 && i != 0x2f && | |
614 | net_ratelimit()) | |
615 | pr_info("Diff at 0x%x\n", i); | |
616 | #endif | |
617 | ||
618 | return false; | |
619 | } | |
620 | ||
621 | #ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS | |
622 | /* HACK: Do not suppress truly duplicate ACKs. */ | |
623 | /* ISSUE: Is this actually necessary or helpful? */ | |
624 | if (s1[0x2a] == s2[0x2a] && | |
625 | s1[0x2b] == s2[0x2b] && | |
626 | s1[0x2c] == s2[0x2c] && | |
627 | s1[0x2d] == s2[0x2d]) { | |
628 | return false; | |
629 | } | |
630 | #endif | |
631 | ||
632 | return true; | |
633 | } | |
634 | ||
635 | #endif | |
636 | ||
637 | ||
638 | ||
639 | /* | |
640 | * Like "tile_net_handle_packets()", but just discard packets. | |
641 | */ | |
642 | static void tile_net_discard_packets(struct net_device *dev) | |
643 | { | |
644 | struct tile_net_priv *priv = netdev_priv(dev); | |
645 | int my_cpu = smp_processor_id(); | |
646 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
647 | struct tile_netio_queue *queue = &info->queue; | |
648 | netio_queue_impl_t *qsp = queue->__system_part; | |
649 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
650 | ||
651 | while (qup->__packet_receive_read != | |
652 | qsp->__packet_receive_queue.__packet_write) { | |
653 | ||
654 | int index = qup->__packet_receive_read; | |
655 | ||
656 | int index2_aux = index + sizeof(netio_pkt_t); | |
657 | int index2 = | |
658 | ((index2_aux == | |
659 | qsp->__packet_receive_queue.__last_packet_plus_one) ? | |
660 | 0 : index2_aux); | |
661 | ||
662 | netio_pkt_t *pkt = (netio_pkt_t *) | |
663 | ((unsigned long) &qsp[1] + index); | |
664 | ||
665 | /* Extract the "linux_buffer_t". */ | |
666 | unsigned int buffer = pkt->__packet.word; | |
667 | ||
668 | /* Convert "linux_buffer_t" to "va". */ | |
669 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | |
670 | ||
671 | /* Acquire the associated "skb". */ | |
672 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | |
673 | struct sk_buff *skb = *skb_ptr; | |
674 | ||
675 | kfree_skb(skb); | |
676 | ||
677 | /* Consume this packet. */ | |
678 | qup->__packet_receive_read = index2; | |
679 | } | |
680 | } | |
681 | ||
682 | ||
683 | /* | |
684 | * Handle the next packet. Return true if "processed", false if "filtered". | |
685 | */ | |
686 | static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) | |
687 | { | |
688 | struct net_device *dev = info->napi.dev; | |
689 | ||
690 | struct tile_netio_queue *queue = &info->queue; | |
691 | netio_queue_impl_t *qsp = queue->__system_part; | |
692 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
693 | struct tile_net_stats_t *stats = &info->stats; | |
694 | ||
695 | int filter; | |
696 | ||
697 | int index2_aux = index + sizeof(netio_pkt_t); | |
698 | int index2 = | |
699 | ((index2_aux == | |
700 | qsp->__packet_receive_queue.__last_packet_plus_one) ? | |
701 | 0 : index2_aux); | |
702 | ||
703 | netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); | |
704 | ||
705 | netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); | |
706 | ||
707 | /* Extract the packet size. */ | |
708 | unsigned long len = | |
709 | (NETIO_PKT_CUSTOM_LENGTH(pkt) + | |
710 | NET_IP_ALIGN - NETIO_PACKET_PADDING); | |
711 | ||
712 | /* Extract the "linux_buffer_t". */ | |
713 | unsigned int buffer = pkt->__packet.word; | |
714 | ||
715 | /* Extract "small" (vs "large"). */ | |
716 | bool small = ((buffer & 1) != 0); | |
717 | ||
718 | /* Convert "linux_buffer_t" to "va". */ | |
719 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | |
720 | ||
721 | /* Extract the packet data pointer. */ | |
722 | /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ | |
723 | unsigned char *buf = va + NET_IP_ALIGN; | |
724 | ||
725 | #ifdef IGNORE_DUP_ACKS | |
726 | ||
727 | static int other; | |
728 | static int final; | |
729 | static int keep; | |
730 | static int skip; | |
731 | ||
732 | #endif | |
733 | ||
734 | /* Invalidate the packet buffer. */ | |
735 | if (!hash_default) | |
736 | __inv_buffer(buf, len); | |
737 | ||
738 | /* ISSUE: Is this needed? */ | |
739 | dev->last_rx = jiffies; | |
740 | ||
741 | #ifdef TILE_NET_DUMP_PACKETS | |
742 | dump_packet(buf, len, "rx"); | |
743 | #endif /* TILE_NET_DUMP_PACKETS */ | |
744 | ||
745 | #ifdef TILE_NET_VERIFY_INGRESS | |
746 | if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) && | |
747 | NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) { | |
748 | /* | |
749 | * FIXME: This complains about UDP packets | |
750 | * with a "zero" checksum (bug 6624). | |
751 | */ | |
752 | #ifdef TILE_NET_PANIC_ON_BAD | |
753 | dump_packet(buf, len, "rx"); | |
754 | panic("Bad L4 checksum."); | |
755 | #else | |
756 | pr_warning("Bad L4 checksum on %d byte packet.\n", len); | |
757 | #endif | |
758 | } | |
759 | if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) && | |
760 | NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) { | |
761 | dump_packet(buf, len, "rx"); | |
762 | panic("Bad L3 checksum."); | |
763 | } | |
764 | switch (NETIO_PKT_STATUS_M(metadata, pkt)) { | |
765 | case NETIO_PKT_STATUS_OVERSIZE: | |
766 | if (len >= 64) { | |
767 | dump_packet(buf, len, "rx"); | |
768 | panic("Unexpected OVERSIZE."); | |
769 | } | |
770 | break; | |
771 | case NETIO_PKT_STATUS_BAD: | |
772 | #ifdef TILE_NET_PANIC_ON_BAD | |
773 | dump_packet(buf, len, "rx"); | |
774 | panic("Unexpected BAD packet."); | |
775 | #else | |
776 | pr_warning("Unexpected BAD %d byte packet.\n", len); | |
777 | #endif | |
778 | } | |
779 | #endif | |
780 | ||
781 | filter = 0; | |
782 | ||
783 | if (!(dev->flags & IFF_UP)) { | |
784 | /* Filter packets received before we're up. */ | |
785 | filter = 1; | |
786 | } else if (!(dev->flags & IFF_PROMISC)) { | |
787 | /* | |
788 | * FIXME: Implement HW multicast filter. | |
789 | */ | |
d0f49157 | 790 | if (is_unicast_ether_addr(buf)) { |
e5a06939 CM |
791 | /* Filter packets not for our address. */ |
792 | const u8 *mine = dev->dev_addr; | |
793 | filter = compare_ether_addr(mine, buf); | |
794 | } | |
795 | } | |
796 | ||
797 | #ifdef IGNORE_DUP_ACKS | |
798 | ||
799 | if (len != 66) { | |
800 | /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */ | |
801 | ||
802 | other++; | |
803 | ||
804 | } else if (index2 == | |
805 | qsp->__packet_receive_queue.__packet_write) { | |
806 | ||
807 | final++; | |
808 | ||
809 | } else { | |
810 | ||
811 | netio_pkt_t *pkt2 = (netio_pkt_t *) | |
812 | ((unsigned long) &qsp[1] + index2); | |
813 | ||
814 | netio_pkt_metadata_t *metadata2 = | |
815 | NETIO_PKT_METADATA(pkt2); | |
816 | ||
817 | /* Extract the packet size. */ | |
818 | unsigned long len2 = | |
819 | (NETIO_PKT_CUSTOM_LENGTH(pkt2) + | |
820 | NET_IP_ALIGN - NETIO_PACKET_PADDING); | |
821 | ||
822 | if (len2 == 66 && | |
823 | NETIO_PKT_FLOW_HASH_M(metadata, pkt) == | |
824 | NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) { | |
825 | ||
826 | /* Extract the "linux_buffer_t". */ | |
827 | unsigned int buffer2 = pkt2->__packet.word; | |
828 | ||
829 | /* Convert "linux_buffer_t" to "va". */ | |
830 | void *va2 = | |
831 | __va((phys_addr_t)(buffer2 >> 1) << 7); | |
832 | ||
833 | /* Extract the packet data pointer. */ | |
834 | /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ | |
835 | unsigned char *buf2 = va2 + NET_IP_ALIGN; | |
836 | ||
837 | /* Invalidate the packet buffer. */ | |
838 | if (!hash_default) | |
839 | __inv_buffer(buf2, len2); | |
840 | ||
841 | if (is_dup_ack(buf, buf2, len)) { | |
842 | skip++; | |
843 | filter = 1; | |
844 | } else { | |
845 | keep++; | |
846 | } | |
847 | } | |
848 | } | |
849 | ||
850 | if (net_ratelimit()) | |
851 | pr_info("Other %d Final %d Keep %d Skip %d.\n", | |
852 | other, final, keep, skip); | |
853 | ||
854 | #endif | |
855 | ||
856 | if (filter) { | |
857 | ||
858 | /* ISSUE: Update "drop" statistics? */ | |
859 | ||
860 | tile_net_provide_linux_buffer(info, va, small); | |
861 | ||
862 | } else { | |
863 | ||
864 | /* Acquire the associated "skb". */ | |
865 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | |
866 | struct sk_buff *skb = *skb_ptr; | |
867 | ||
868 | /* Paranoia. */ | |
869 | if (skb->data != buf) | |
870 | panic("Corrupt linux buffer from LIPP! " | |
871 | "VA=%p, skb=%p, skb->data=%p\n", | |
872 | va, skb, skb->data); | |
873 | ||
874 | /* Encode the actual packet length. */ | |
875 | skb_put(skb, len); | |
876 | ||
877 | /* NOTE: This call also sets "skb->dev = dev". */ | |
878 | skb->protocol = eth_type_trans(skb, dev); | |
879 | ||
880 | /* ISSUE: Discard corrupt packets? */ | |
881 | /* ISSUE: Discard packets with bad checksums? */ | |
882 | ||
883 | /* Avoid recomputing TCP/UDP checksums. */ | |
884 | if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) | |
885 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
886 | ||
887 | netif_receive_skb(skb); | |
888 | ||
889 | stats->rx_packets++; | |
890 | stats->rx_bytes += len; | |
891 | ||
892 | if (small) | |
893 | info->num_needed_small_buffers++; | |
894 | else | |
895 | info->num_needed_large_buffers++; | |
896 | } | |
897 | ||
898 | /* Return four credits after every fourth packet. */ | |
899 | if (--qup->__receive_credit_remaining == 0) { | |
900 | u32 interval = qup->__receive_credit_interval; | |
901 | qup->__receive_credit_remaining = interval; | |
902 | __netio_fastio_return_credits(qup->__fastio_index, interval); | |
903 | } | |
904 | ||
905 | /* Consume this packet. */ | |
906 | qup->__packet_receive_read = index2; | |
907 | ||
908 | return !filter; | |
909 | } | |
910 | ||
911 | ||
912 | /* | |
913 | * Handle some packets for the given device on the current CPU. | |
914 | * | |
915 | * ISSUE: The "rotting packet" race condition occurs if a packet | |
916 | * arrives after the queue appears to be empty, and before the | |
917 | * hypervisor interrupt is re-enabled. | |
918 | */ | |
919 | static int tile_net_poll(struct napi_struct *napi, int budget) | |
920 | { | |
921 | struct net_device *dev = napi->dev; | |
922 | struct tile_net_priv *priv = netdev_priv(dev); | |
923 | int my_cpu = smp_processor_id(); | |
924 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
925 | struct tile_netio_queue *queue = &info->queue; | |
926 | netio_queue_impl_t *qsp = queue->__system_part; | |
927 | netio_queue_user_impl_t *qup = &queue->__user_part; | |
928 | ||
929 | unsigned int work = 0; | |
930 | ||
931 | while (1) { | |
932 | int index = qup->__packet_receive_read; | |
933 | if (index == qsp->__packet_receive_queue.__packet_write) | |
934 | break; | |
935 | ||
936 | if (tile_net_poll_aux(info, index)) { | |
937 | if (++work >= budget) | |
938 | goto done; | |
939 | } | |
940 | } | |
941 | ||
942 | napi_complete(&info->napi); | |
943 | ||
944 | /* Re-enable hypervisor interrupts. */ | |
945 | enable_percpu_irq(priv->intr_id); | |
946 | ||
947 | /* HACK: Avoid the "rotting packet" problem. */ | |
948 | if (qup->__packet_receive_read != | |
949 | qsp->__packet_receive_queue.__packet_write) | |
950 | napi_schedule(&info->napi); | |
951 | ||
952 | /* ISSUE: Handle completions? */ | |
953 | ||
954 | done: | |
955 | ||
956 | tile_net_provide_needed_buffers(info); | |
957 | ||
958 | return work; | |
959 | } | |
960 | ||
961 | ||
962 | /* | |
963 | * Handle an ingress interrupt for the given device on the current cpu. | |
964 | */ | |
965 | static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) | |
966 | { | |
967 | struct net_device *dev = (struct net_device *)dev_ptr; | |
968 | struct tile_net_priv *priv = netdev_priv(dev); | |
969 | int my_cpu = smp_processor_id(); | |
970 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
971 | ||
972 | /* Disable hypervisor interrupt. */ | |
973 | disable_percpu_irq(priv->intr_id); | |
974 | ||
975 | napi_schedule(&info->napi); | |
976 | ||
977 | return IRQ_HANDLED; | |
978 | } | |
979 | ||
980 | ||
981 | /* | |
982 | * One time initialization per interface. | |
983 | */ | |
984 | static int tile_net_open_aux(struct net_device *dev) | |
985 | { | |
986 | struct tile_net_priv *priv = netdev_priv(dev); | |
987 | ||
988 | int ret; | |
989 | int dummy; | |
990 | unsigned int epp_lotar; | |
991 | ||
992 | /* | |
993 | * Find out where EPP memory should be homed. | |
994 | */ | |
995 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
996 | (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar), | |
997 | NETIO_EPP_SHM_OFF); | |
998 | if (ret < 0) { | |
999 | pr_err("could not read epp_shm_queue lotar.\n"); | |
1000 | return -EIO; | |
1001 | } | |
1002 | ||
1003 | /* | |
1004 | * Home the page on the EPP. | |
1005 | */ | |
1006 | { | |
1007 | int epp_home = hv_lotar_to_cpu(epp_lotar); | |
1008 | struct page *page = virt_to_page(priv->epp_queue); | |
1009 | homecache_change_page_home(page, 0, epp_home); | |
1010 | } | |
1011 | ||
1012 | /* | |
1013 | * Register the EPP shared memory queue. | |
1014 | */ | |
1015 | { | |
1016 | netio_ipp_address_t ea = { | |
1017 | .va = 0, | |
1018 | .pa = __pa(priv->epp_queue), | |
1019 | .pte = hv_pte(0), | |
1020 | .size = PAGE_SIZE, | |
1021 | }; | |
1022 | ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); | |
1023 | ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); | |
1024 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, | |
1025 | (HV_VirtAddr)&ea, | |
1026 | sizeof(ea), | |
1027 | NETIO_EPP_SHM_OFF); | |
1028 | if (ret < 0) | |
1029 | return -EIO; | |
1030 | } | |
1031 | ||
1032 | /* | |
1033 | * Start LIPP/LEPP. | |
1034 | */ | |
1035 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | |
1036 | sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { | |
1037 | pr_warning("Failed to start LIPP/LEPP.\n"); | |
1038 | return -EIO; | |
1039 | } | |
1040 | ||
1041 | return 0; | |
1042 | } | |
1043 | ||
1044 | ||
1045 | /* | |
1046 | * Register with hypervisor on each CPU. | |
1047 | * | |
1048 | * Strangely, this function does important things even if it "fails", | |
1049 | * which is especially common if the link is not up yet. Hopefully | |
1050 | * these things are all "harmless" if done twice! | |
1051 | */ | |
1052 | static void tile_net_register(void *dev_ptr) | |
1053 | { | |
1054 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1055 | struct tile_net_priv *priv = netdev_priv(dev); | |
1056 | int my_cpu = smp_processor_id(); | |
1057 | struct tile_net_cpu *info; | |
1058 | ||
1059 | struct tile_netio_queue *queue; | |
1060 | ||
1061 | /* Only network cpus can receive packets. */ | |
1062 | int queue_id = | |
1063 | cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255; | |
1064 | ||
1065 | netio_input_config_t config = { | |
1066 | .flags = 0, | |
1067 | .num_receive_packets = priv->network_cpus_credits, | |
1068 | .queue_id = queue_id | |
1069 | }; | |
1070 | ||
1071 | int ret = 0; | |
1072 | netio_queue_impl_t *queuep; | |
1073 | ||
1074 | PDEBUG("tile_net_register(queue_id %d)\n", queue_id); | |
1075 | ||
1076 | if (!strcmp(dev->name, "xgbe0")) | |
1077 | info = &__get_cpu_var(hv_xgbe0); | |
1078 | else if (!strcmp(dev->name, "xgbe1")) | |
1079 | info = &__get_cpu_var(hv_xgbe1); | |
1080 | else if (!strcmp(dev->name, "gbe0")) | |
1081 | info = &__get_cpu_var(hv_gbe0); | |
1082 | else if (!strcmp(dev->name, "gbe1")) | |
1083 | info = &__get_cpu_var(hv_gbe1); | |
1084 | else | |
1085 | BUG(); | |
1086 | ||
1087 | /* Initialize the egress timer. */ | |
1088 | init_timer(&info->egress_timer); | |
1089 | info->egress_timer.data = (long)info; | |
1090 | info->egress_timer.function = tile_net_handle_egress_timer; | |
1091 | ||
1092 | priv->cpu[my_cpu] = info; | |
1093 | ||
1094 | /* | |
1095 | * Register ourselves with the IPP. | |
1096 | */ | |
1097 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, | |
1098 | (HV_VirtAddr)&config, | |
1099 | sizeof(netio_input_config_t), | |
1100 | NETIO_IPP_INPUT_REGISTER_OFF); | |
1101 | PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", | |
1102 | ret); | |
1103 | if (ret < 0) { | |
1104 | printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF" | |
1105 | " failure %d\n", ret); | |
1106 | info->link_down = (ret == NETIO_LINK_DOWN); | |
1107 | return; | |
1108 | } | |
1109 | ||
1110 | /* | |
1111 | * Get the pointer to our queue's system part. | |
1112 | */ | |
1113 | ||
1114 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
1115 | (HV_VirtAddr)&queuep, | |
1116 | sizeof(netio_queue_impl_t *), | |
1117 | NETIO_IPP_INPUT_REGISTER_OFF); | |
1118 | PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", | |
1119 | ret); | |
1120 | PDEBUG("queuep %p\n", queuep); | |
1121 | if (ret <= 0) { | |
1122 | /* ISSUE: Shouldn't this be a fatal error? */ | |
1123 | pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n"); | |
1124 | return; | |
1125 | } | |
1126 | ||
1127 | queue = &info->queue; | |
1128 | ||
1129 | queue->__system_part = queuep; | |
1130 | ||
1131 | memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t)); | |
1132 | ||
1133 | /* This is traditionally "config.num_receive_packets / 2". */ | |
1134 | queue->__user_part.__receive_credit_interval = 4; | |
1135 | queue->__user_part.__receive_credit_remaining = | |
1136 | queue->__user_part.__receive_credit_interval; | |
1137 | ||
1138 | /* | |
1139 | * Get a fastio index from the hypervisor. | |
1140 | * ISSUE: Shouldn't this check the result? | |
1141 | */ | |
1142 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
1143 | (HV_VirtAddr)&queue->__user_part.__fastio_index, | |
1144 | sizeof(queue->__user_part.__fastio_index), | |
1145 | NETIO_IPP_GET_FASTIO_OFF); | |
1146 | PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); | |
1147 | ||
1148 | netif_napi_add(dev, &info->napi, tile_net_poll, 64); | |
1149 | ||
1150 | /* Now we are registered. */ | |
1151 | info->registered = true; | |
1152 | } | |
1153 | ||
1154 | ||
1155 | /* | |
1156 | * Unregister with hypervisor on each CPU. | |
1157 | */ | |
1158 | static void tile_net_unregister(void *dev_ptr) | |
1159 | { | |
1160 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1161 | struct tile_net_priv *priv = netdev_priv(dev); | |
1162 | int my_cpu = smp_processor_id(); | |
1163 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1164 | ||
1165 | int ret = 0; | |
1166 | int dummy = 0; | |
1167 | ||
1168 | /* Do nothing if never registered. */ | |
1169 | if (info == NULL) | |
1170 | return; | |
1171 | ||
1172 | /* Do nothing if already unregistered. */ | |
1173 | if (!info->registered) | |
1174 | return; | |
1175 | ||
1176 | /* | |
1177 | * Unregister ourselves with LIPP. | |
1178 | */ | |
1179 | ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | |
1180 | sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); | |
1181 | PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n", | |
1182 | ret); | |
1183 | if (ret < 0) { | |
1184 | /* FIXME: Just panic? */ | |
1185 | pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF" | |
1186 | " failure %d\n", ret); | |
1187 | } | |
1188 | ||
1189 | /* | |
1190 | * Discard all packets still in our NetIO queue. Hopefully, | |
1191 | * once the unregister call is complete, there will be no | |
1192 | * packets still in flight on the IDN. | |
1193 | */ | |
1194 | tile_net_discard_packets(dev); | |
1195 | ||
1196 | /* Reset state. */ | |
1197 | info->num_needed_small_buffers = 0; | |
1198 | info->num_needed_large_buffers = 0; | |
1199 | ||
1200 | /* Cancel egress timer. */ | |
1201 | del_timer(&info->egress_timer); | |
1202 | info->egress_timer_scheduled = false; | |
1203 | ||
1204 | netif_napi_del(&info->napi); | |
1205 | ||
1206 | /* Now we are unregistered. */ | |
1207 | info->registered = false; | |
1208 | } | |
1209 | ||
1210 | ||
1211 | /* | |
1212 | * Helper function for "tile_net_stop()". | |
1213 | * | |
1214 | * Also used to handle registration failure in "tile_net_open_inner()", | |
1215 | * when "fully_opened" is known to be false, and the various extra | |
1216 | * steps in "tile_net_stop()" are not necessary. ISSUE: It might be | |
1217 | * simpler if we could just call "tile_net_stop()" anyway. | |
1218 | */ | |
1219 | static void tile_net_stop_aux(struct net_device *dev) | |
1220 | { | |
1221 | struct tile_net_priv *priv = netdev_priv(dev); | |
1222 | ||
1223 | int dummy = 0; | |
1224 | ||
1225 | /* Unregister all tiles, so LIPP will stop delivering packets. */ | |
1226 | on_each_cpu(tile_net_unregister, (void *)dev, 1); | |
1227 | ||
1228 | /* Stop LIPP/LEPP. */ | |
1229 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | |
1230 | sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) | |
1231 | panic("Failed to stop LIPP/LEPP!\n"); | |
1232 | ||
1233 | priv->partly_opened = 0; | |
1234 | } | |
1235 | ||
1236 | ||
1237 | /* | |
1238 | * Disable ingress interrupts for the given device on the current cpu. | |
1239 | */ | |
1240 | static void tile_net_disable_intr(void *dev_ptr) | |
1241 | { | |
1242 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1243 | struct tile_net_priv *priv = netdev_priv(dev); | |
1244 | int my_cpu = smp_processor_id(); | |
1245 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1246 | ||
1247 | /* Disable hypervisor interrupt. */ | |
1248 | disable_percpu_irq(priv->intr_id); | |
1249 | ||
1250 | /* Disable NAPI if needed. */ | |
1251 | if (info != NULL && info->napi_enabled) { | |
1252 | napi_disable(&info->napi); | |
1253 | info->napi_enabled = false; | |
1254 | } | |
1255 | } | |
1256 | ||
1257 | ||
1258 | /* | |
1259 | * Enable ingress interrupts for the given device on the current cpu. | |
1260 | */ | |
1261 | static void tile_net_enable_intr(void *dev_ptr) | |
1262 | { | |
1263 | struct net_device *dev = (struct net_device *)dev_ptr; | |
1264 | struct tile_net_priv *priv = netdev_priv(dev); | |
1265 | int my_cpu = smp_processor_id(); | |
1266 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1267 | ||
1268 | /* Enable hypervisor interrupt. */ | |
1269 | enable_percpu_irq(priv->intr_id); | |
1270 | ||
1271 | /* Enable NAPI. */ | |
1272 | napi_enable(&info->napi); | |
1273 | info->napi_enabled = true; | |
1274 | } | |
1275 | ||
1276 | ||
1277 | /* | |
1278 | * tile_net_open_inner does most of the work of bringing up the interface. | |
1279 | * It's called from tile_net_open(), and also from tile_net_retry_open(). | |
1280 | * The return value is 0 if the interface was brought up, < 0 if | |
1281 | * tile_net_open() should return the return value as an error, and > 0 if | |
1282 | * tile_net_open() should return success and schedule a work item to | |
1283 | * periodically retry the bringup. | |
1284 | */ | |
1285 | static int tile_net_open_inner(struct net_device *dev) | |
1286 | { | |
1287 | struct tile_net_priv *priv = netdev_priv(dev); | |
1288 | int my_cpu = smp_processor_id(); | |
1289 | struct tile_net_cpu *info; | |
1290 | struct tile_netio_queue *queue; | |
1291 | unsigned int irq; | |
1292 | int i; | |
1293 | ||
1294 | /* | |
1295 | * First try to register just on the local CPU, and handle any | |
1296 | * semi-expected "link down" failure specially. Note that we | |
1297 | * do NOT call "tile_net_stop_aux()", unlike below. | |
1298 | */ | |
1299 | tile_net_register(dev); | |
1300 | info = priv->cpu[my_cpu]; | |
1301 | if (!info->registered) { | |
1302 | if (info->link_down) | |
1303 | return 1; | |
1304 | return -EAGAIN; | |
1305 | } | |
1306 | ||
1307 | /* | |
1308 | * Now register everywhere else. If any registration fails, | |
1309 | * even for "link down" (which might not be possible), we | |
1310 | * clean up using "tile_net_stop_aux()". | |
1311 | */ | |
1312 | smp_call_function(tile_net_register, (void *)dev, 1); | |
1313 | for_each_online_cpu(i) { | |
1314 | if (!priv->cpu[i]->registered) { | |
1315 | tile_net_stop_aux(dev); | |
1316 | return -EAGAIN; | |
1317 | } | |
1318 | } | |
1319 | ||
1320 | queue = &info->queue; | |
1321 | ||
1322 | /* | |
1323 | * Set the device intr bit mask. | |
1324 | * The tile_net_register above sets per tile __intr_id. | |
1325 | */ | |
1326 | priv->intr_id = queue->__system_part->__intr_id; | |
1327 | BUG_ON(!priv->intr_id); | |
1328 | ||
1329 | /* | |
1330 | * Register the device interrupt handler. | |
1331 | * The __ffs() function returns the index into the interrupt handler | |
1332 | * table from the interrupt bit mask which should have one bit | |
1333 | * and one bit only set. | |
1334 | */ | |
1335 | irq = __ffs(priv->intr_id); | |
1336 | tile_irq_activate(irq, TILE_IRQ_PERCPU); | |
1337 | BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, | |
1338 | 0, dev->name, (void *)dev) != 0); | |
1339 | ||
1340 | /* ISSUE: How could "priv->fully_opened" ever be "true" here? */ | |
1341 | ||
1342 | if (!priv->fully_opened) { | |
1343 | ||
1344 | int dummy = 0; | |
1345 | ||
1346 | /* Allocate initial buffers. */ | |
1347 | ||
1348 | int max_buffers = | |
1349 | priv->network_cpus_count * priv->network_cpus_credits; | |
1350 | ||
1351 | info->num_needed_small_buffers = | |
1352 | min(LIPP_SMALL_BUFFERS, max_buffers); | |
1353 | ||
1354 | info->num_needed_large_buffers = | |
1355 | min(LIPP_LARGE_BUFFERS, max_buffers); | |
1356 | ||
1357 | tile_net_provide_needed_buffers(info); | |
1358 | ||
1359 | if (info->num_needed_small_buffers != 0 || | |
1360 | info->num_needed_large_buffers != 0) | |
1361 | panic("Insufficient memory for buffer stack!"); | |
1362 | ||
1363 | /* Start LIPP/LEPP and activate "ingress" at the shim. */ | |
1364 | if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, | |
1365 | sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) | |
1366 | panic("Failed to activate the LIPP Shim!\n"); | |
1367 | ||
1368 | priv->fully_opened = 1; | |
1369 | } | |
1370 | ||
1371 | /* On each tile, enable the hypervisor to trigger interrupts. */ | |
1372 | /* ISSUE: Do this before starting LIPP/LEPP? */ | |
1373 | on_each_cpu(tile_net_enable_intr, (void *)dev, 1); | |
1374 | ||
1375 | /* Start our transmit queue. */ | |
1376 | netif_start_queue(dev); | |
1377 | ||
1378 | return 0; | |
1379 | } | |
1380 | ||
1381 | ||
1382 | /* | |
1383 | * Called periodically to retry bringing up the NetIO interface, | |
1384 | * if it doesn't come up cleanly during tile_net_open(). | |
1385 | */ | |
1386 | static void tile_net_open_retry(struct work_struct *w) | |
1387 | { | |
1388 | struct delayed_work *dw = | |
1389 | container_of(w, struct delayed_work, work); | |
1390 | ||
1391 | struct tile_net_priv *priv = | |
1392 | container_of(dw, struct tile_net_priv, retry_work); | |
1393 | ||
1394 | /* | |
1395 | * Try to bring the NetIO interface up. If it fails, reschedule | |
1396 | * ourselves to try again later; otherwise, tell Linux we now have | |
1397 | * a working link. ISSUE: What if the return value is negative? | |
1398 | */ | |
1399 | if (tile_net_open_inner(priv->dev)) | |
1400 | schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, | |
1401 | TILE_NET_RETRY_INTERVAL); | |
1402 | else | |
1403 | netif_carrier_on(priv->dev); | |
1404 | } | |
1405 | ||
1406 | ||
1407 | /* | |
1408 | * Called when a network interface is made active. | |
1409 | * | |
1410 | * Returns 0 on success, negative value on failure. | |
1411 | * | |
1412 | * The open entry point is called when a network interface is made | |
1413 | * active by the system (IFF_UP). At this point all resources needed | |
1414 | * for transmit and receive operations are allocated, the interrupt | |
1415 | * handler is registered with the OS, the watchdog timer is started, | |
1416 | * and the stack is notified that the interface is ready. | |
1417 | * | |
1418 | * If the actual link is not available yet, then we tell Linux that | |
1419 | * we have no carrier, and we keep checking until the link comes up. | |
1420 | */ | |
1421 | static int tile_net_open(struct net_device *dev) | |
1422 | { | |
1423 | int ret = 0; | |
1424 | struct tile_net_priv *priv = netdev_priv(dev); | |
1425 | ||
1426 | /* | |
1427 | * We rely on priv->partly_opened to tell us if this is the | |
1428 | * first time this interface is being brought up. If it is | |
1429 | * set, the IPP was already initialized and should not be | |
1430 | * initialized again. | |
1431 | */ | |
1432 | if (!priv->partly_opened) { | |
1433 | ||
1434 | int count; | |
1435 | int credits; | |
1436 | ||
1437 | /* Initialize LIPP/LEPP, and start the Shim. */ | |
1438 | ret = tile_net_open_aux(dev); | |
1439 | if (ret < 0) { | |
1440 | pr_err("tile_net_open_aux failed: %d\n", ret); | |
1441 | return ret; | |
1442 | } | |
1443 | ||
1444 | /* Analyze the network cpus. */ | |
1445 | ||
1446 | if (network_cpus_used) | |
1447 | cpumask_copy(&priv->network_cpus_map, | |
1448 | &network_cpus_map); | |
1449 | else | |
1450 | cpumask_copy(&priv->network_cpus_map, cpu_online_mask); | |
1451 | ||
1452 | ||
1453 | count = cpumask_weight(&priv->network_cpus_map); | |
1454 | ||
1455 | /* Limit credits to available buffers, and apply min. */ | |
1456 | credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1); | |
1457 | ||
1458 | /* Apply "GBE" max limit. */ | |
1459 | /* ISSUE: Use higher limit for XGBE? */ | |
1460 | credits = min(NETIO_MAX_RECEIVE_PKTS, credits); | |
1461 | ||
1462 | priv->network_cpus_count = count; | |
1463 | priv->network_cpus_credits = credits; | |
1464 | ||
1465 | #ifdef TILE_NET_DEBUG | |
1466 | pr_info("Using %d network cpus, with %d credits each\n", | |
1467 | priv->network_cpus_count, priv->network_cpus_credits); | |
1468 | #endif | |
1469 | ||
1470 | priv->partly_opened = 1; | |
1471 | } | |
1472 | ||
1473 | /* | |
1474 | * Attempt to bring up the link. | |
1475 | */ | |
1476 | ret = tile_net_open_inner(dev); | |
1477 | if (ret <= 0) { | |
1478 | if (ret == 0) | |
1479 | netif_carrier_on(dev); | |
1480 | return ret; | |
1481 | } | |
1482 | ||
1483 | /* | |
1484 | * We were unable to bring up the NetIO interface, but we want to | |
1485 | * try again in a little bit. Tell Linux that we have no carrier | |
1486 | * so it doesn't try to use the interface before the link comes up | |
1487 | * and then remember to try again later. | |
1488 | */ | |
1489 | netif_carrier_off(dev); | |
1490 | schedule_delayed_work_on(singlethread_cpu, &priv->retry_work, | |
1491 | TILE_NET_RETRY_INTERVAL); | |
1492 | ||
1493 | return 0; | |
1494 | } | |
1495 | ||
1496 | ||
1497 | /* | |
1498 | * Disables a network interface. | |
1499 | * | |
1500 | * Returns 0, this is not allowed to fail. | |
1501 | * | |
1502 | * The close entry point is called when an interface is de-activated | |
1503 | * by the OS. The hardware is still under the drivers control, but | |
1504 | * needs to be disabled. A global MAC reset is issued to stop the | |
1505 | * hardware, and all transmit and receive resources are freed. | |
1506 | * | |
1507 | * ISSUE: Can this can be called while "tile_net_poll()" is running? | |
1508 | */ | |
1509 | static int tile_net_stop(struct net_device *dev) | |
1510 | { | |
1511 | struct tile_net_priv *priv = netdev_priv(dev); | |
1512 | ||
1513 | bool pending = true; | |
1514 | ||
1515 | PDEBUG("tile_net_stop()\n"); | |
1516 | ||
1517 | /* ISSUE: Only needed if not yet fully open. */ | |
1518 | cancel_delayed_work_sync(&priv->retry_work); | |
1519 | ||
1520 | /* Can't transmit any more. */ | |
1521 | netif_stop_queue(dev); | |
1522 | ||
1523 | /* | |
1524 | * Disable hypervisor interrupts on each tile. | |
1525 | */ | |
1526 | on_each_cpu(tile_net_disable_intr, (void *)dev, 1); | |
1527 | ||
1528 | /* | |
1529 | * Unregister the interrupt handler. | |
1530 | * The __ffs() function returns the index into the interrupt handler | |
1531 | * table from the interrupt bit mask which should have one bit | |
1532 | * and one bit only set. | |
1533 | */ | |
1534 | if (priv->intr_id) | |
1535 | free_irq(__ffs(priv->intr_id), dev); | |
1536 | ||
1537 | /* | |
1538 | * Drain all the LIPP buffers. | |
1539 | */ | |
1540 | ||
1541 | while (true) { | |
1542 | int buffer; | |
1543 | ||
1544 | /* NOTE: This should never fail. */ | |
1545 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, | |
1546 | sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0) | |
1547 | break; | |
1548 | ||
1549 | /* Stop when done. */ | |
1550 | if (buffer == 0) | |
1551 | break; | |
1552 | ||
1553 | { | |
1554 | /* Convert "linux_buffer_t" to "va". */ | |
1555 | void *va = __va((phys_addr_t)(buffer >> 1) << 7); | |
1556 | ||
1557 | /* Acquire the associated "skb". */ | |
1558 | struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); | |
1559 | struct sk_buff *skb = *skb_ptr; | |
1560 | ||
1561 | kfree_skb(skb); | |
1562 | } | |
1563 | } | |
1564 | ||
1565 | /* Stop LIPP/LEPP. */ | |
1566 | tile_net_stop_aux(dev); | |
1567 | ||
1568 | ||
1569 | priv->fully_opened = 0; | |
1570 | ||
1571 | ||
1572 | /* | |
1573 | * XXX: ISSUE: It appears that, in practice anyway, by the | |
1574 | * time we get here, there are no pending completions. | |
1575 | */ | |
1576 | while (pending) { | |
1577 | ||
1578 | struct sk_buff *olds[32]; | |
1579 | unsigned int wanted = 32; | |
1580 | unsigned int i, nolds = 0; | |
1581 | ||
1582 | nolds = tile_net_lepp_grab_comps(dev, olds, | |
1583 | wanted, &pending); | |
1584 | ||
1585 | /* ISSUE: We have never actually seen this debug spew. */ | |
1586 | if (nolds != 0) | |
1587 | pr_info("During tile_net_stop(), grabbed %d comps.\n", | |
1588 | nolds); | |
1589 | ||
1590 | for (i = 0; i < nolds; i++) | |
1591 | kfree_skb(olds[i]); | |
1592 | } | |
1593 | ||
1594 | ||
1595 | /* Wipe the EPP queue. */ | |
1596 | memset(priv->epp_queue, 0, sizeof(lepp_queue_t)); | |
1597 | ||
1598 | /* Evict the EPP queue. */ | |
1599 | finv_buffer(priv->epp_queue, PAGE_SIZE); | |
1600 | ||
1601 | return 0; | |
1602 | } | |
1603 | ||
1604 | ||
1605 | /* | |
1606 | * Prepare the "frags" info for the resulting LEPP command. | |
1607 | * | |
1608 | * If needed, flush the memory used by the frags. | |
1609 | */ | |
1610 | static unsigned int tile_net_tx_frags(lepp_frag_t *frags, | |
1611 | struct sk_buff *skb, | |
1612 | void *b_data, unsigned int b_len) | |
1613 | { | |
1614 | unsigned int i, n = 0; | |
1615 | ||
1616 | struct skb_shared_info *sh = skb_shinfo(skb); | |
1617 | ||
1618 | phys_addr_t cpa; | |
1619 | ||
1620 | if (b_len != 0) { | |
1621 | ||
1622 | if (!hash_default) | |
1623 | finv_buffer_remote(b_data, b_len); | |
1624 | ||
1625 | cpa = __pa(b_data); | |
1626 | frags[n].cpa_lo = cpa; | |
1627 | frags[n].cpa_hi = cpa >> 32; | |
1628 | frags[n].length = b_len; | |
1629 | frags[n].hash_for_home = hash_default; | |
1630 | n++; | |
1631 | } | |
1632 | ||
1633 | for (i = 0; i < sh->nr_frags; i++) { | |
1634 | ||
1635 | skb_frag_t *f = &sh->frags[i]; | |
1636 | unsigned long pfn = page_to_pfn(f->page); | |
1637 | ||
1638 | /* FIXME: Compute "hash_for_home" properly. */ | |
1639 | /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ | |
1640 | int hash_for_home = hash_default; | |
1641 | ||
1642 | /* FIXME: Hmmm. */ | |
1643 | if (!hash_default) { | |
1644 | void *va = pfn_to_kaddr(pfn) + f->page_offset; | |
1645 | BUG_ON(PageHighMem(f->page)); | |
1646 | finv_buffer_remote(va, f->size); | |
1647 | } | |
1648 | ||
1649 | cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; | |
1650 | frags[n].cpa_lo = cpa; | |
1651 | frags[n].cpa_hi = cpa >> 32; | |
1652 | frags[n].length = f->size; | |
1653 | frags[n].hash_for_home = hash_for_home; | |
1654 | n++; | |
1655 | } | |
1656 | ||
1657 | return n; | |
1658 | } | |
1659 | ||
1660 | ||
1661 | /* | |
1662 | * This function takes "skb", consisting of a header template and a | |
1663 | * payload, and hands it to LEPP, to emit as one or more segments, | |
1664 | * each consisting of a possibly modified header, plus a piece of the | |
1665 | * payload, via a process known as "tcp segmentation offload". | |
1666 | * | |
1667 | * Usually, "data" will contain the header template, of size "sh_len", | |
1668 | * and "sh->frags" will contain "skb->data_len" bytes of payload, and | |
1669 | * there will be "sh->gso_segs" segments. | |
1670 | * | |
1671 | * Sometimes, if "sendfile()" requires copying, we will be called with | |
1672 | * "data" containing the header and payload, with "frags" being empty. | |
1673 | * | |
1674 | * In theory, "sh->nr_frags" could be 3, but in practice, it seems | |
1675 | * that this will never actually happen. | |
1676 | * | |
1677 | * See "emulate_large_send_offload()" for some reference code, which | |
1678 | * does not handle checksumming. | |
1679 | * | |
1680 | * ISSUE: How do we make sure that high memory DMA does not migrate? | |
1681 | */ | |
1682 | static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | |
1683 | { | |
1684 | struct tile_net_priv *priv = netdev_priv(dev); | |
1685 | int my_cpu = smp_processor_id(); | |
1686 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1687 | struct tile_net_stats_t *stats = &info->stats; | |
1688 | ||
1689 | struct skb_shared_info *sh = skb_shinfo(skb); | |
1690 | ||
1691 | unsigned char *data = skb->data; | |
1692 | ||
1693 | /* The ip header follows the ethernet header. */ | |
1694 | struct iphdr *ih = ip_hdr(skb); | |
1695 | unsigned int ih_len = ih->ihl * 4; | |
1696 | ||
1697 | /* Note that "nh == ih", by definition. */ | |
1698 | unsigned char *nh = skb_network_header(skb); | |
1699 | unsigned int eh_len = nh - data; | |
1700 | ||
1701 | /* The tcp header follows the ip header. */ | |
1702 | struct tcphdr *th = (struct tcphdr *)(nh + ih_len); | |
1703 | unsigned int th_len = th->doff * 4; | |
1704 | ||
1705 | /* The total number of header bytes. */ | |
1706 | /* NOTE: This may be less than skb_headlen(skb). */ | |
1707 | unsigned int sh_len = eh_len + ih_len + th_len; | |
1708 | ||
1709 | /* The number of payload bytes at "skb->data + sh_len". */ | |
1710 | /* This is non-zero for sendfile() without HIGHDMA. */ | |
1711 | unsigned int b_len = skb_headlen(skb) - sh_len; | |
1712 | ||
1713 | /* The total number of payload bytes. */ | |
1714 | unsigned int d_len = b_len + skb->data_len; | |
1715 | ||
1716 | /* The maximum payload size. */ | |
1717 | unsigned int p_len = sh->gso_size; | |
1718 | ||
1719 | /* The total number of segments. */ | |
1720 | unsigned int num_segs = sh->gso_segs; | |
1721 | ||
1722 | /* The temporary copy of the command. */ | |
1723 | u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4]; | |
1724 | lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body; | |
1725 | ||
1726 | /* Analyze the "frags". */ | |
1727 | unsigned int num_frags = | |
1728 | tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len); | |
1729 | ||
1730 | /* The size of the command, including frags and header. */ | |
1731 | size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len); | |
1732 | ||
1733 | /* The command header. */ | |
1734 | lepp_tso_cmd_t cmd_init = { | |
1735 | .tso = true, | |
1736 | .header_size = sh_len, | |
1737 | .ip_offset = eh_len, | |
1738 | .tcp_offset = eh_len + ih_len, | |
1739 | .payload_size = p_len, | |
1740 | .num_frags = num_frags, | |
1741 | }; | |
1742 | ||
1743 | unsigned long irqflags; | |
1744 | ||
1745 | lepp_queue_t *eq = priv->epp_queue; | |
1746 | ||
1747 | struct sk_buff *olds[4]; | |
1748 | unsigned int wanted = 4; | |
1749 | unsigned int i, nolds = 0; | |
1750 | ||
1751 | unsigned int cmd_head, cmd_tail, cmd_next; | |
1752 | unsigned int comp_tail; | |
1753 | ||
1754 | unsigned int free_slots; | |
1755 | ||
1756 | ||
1757 | /* Paranoia. */ | |
1758 | BUG_ON(skb->protocol != htons(ETH_P_IP)); | |
1759 | BUG_ON(ih->protocol != IPPROTO_TCP); | |
1760 | BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL); | |
1761 | BUG_ON(num_frags > LEPP_MAX_FRAGS); | |
1762 | /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */ | |
1763 | BUG_ON(num_segs <= 1); | |
1764 | ||
1765 | ||
1766 | /* Finish preparing the command. */ | |
1767 | ||
1768 | /* Copy the command header. */ | |
1769 | *cmd = cmd_init; | |
1770 | ||
1771 | /* Copy the "header". */ | |
1772 | memcpy(&cmd->frags[num_frags], data, sh_len); | |
1773 | ||
1774 | ||
1775 | /* Prefetch and wait, to minimize time spent holding the spinlock. */ | |
1776 | prefetch_L1(&eq->comp_tail); | |
1777 | prefetch_L1(&eq->cmd_tail); | |
1778 | mb(); | |
1779 | ||
1780 | ||
1781 | /* Enqueue the command. */ | |
1782 | ||
1783 | spin_lock_irqsave(&priv->cmd_lock, irqflags); | |
1784 | ||
1785 | /* | |
1786 | * Handle completions if needed to make room. | |
1787 | * HACK: Spin until there is sufficient room. | |
1788 | */ | |
1789 | free_slots = lepp_num_free_comp_slots(eq); | |
1790 | if (free_slots < 1) { | |
1791 | spin: | |
1792 | nolds += tile_net_lepp_grab_comps(dev, olds + nolds, | |
1793 | wanted - nolds, NULL); | |
1794 | if (lepp_num_free_comp_slots(eq) < 1) | |
1795 | goto spin; | |
1796 | } | |
1797 | ||
1798 | cmd_head = eq->cmd_head; | |
1799 | cmd_tail = eq->cmd_tail; | |
1800 | ||
1801 | /* NOTE: The "gotos" below are untested. */ | |
1802 | ||
1803 | /* Prepare to advance, detecting full queue. */ | |
1804 | cmd_next = cmd_tail + cmd_size; | |
1805 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | |
1806 | goto spin; | |
1807 | if (cmd_next > LEPP_CMD_LIMIT) { | |
1808 | cmd_next = 0; | |
1809 | if (cmd_next == cmd_head) | |
1810 | goto spin; | |
1811 | } | |
1812 | ||
1813 | /* Copy the command. */ | |
1814 | memcpy(&eq->cmds[cmd_tail], cmd, cmd_size); | |
1815 | ||
1816 | /* Advance. */ | |
1817 | cmd_tail = cmd_next; | |
1818 | ||
1819 | /* Record "skb" for eventual freeing. */ | |
1820 | comp_tail = eq->comp_tail; | |
1821 | eq->comps[comp_tail] = skb; | |
1822 | LEPP_QINC(comp_tail); | |
1823 | eq->comp_tail = comp_tail; | |
1824 | ||
1825 | /* Flush before allowing LEPP to handle the command. */ | |
1826 | __insn_mf(); | |
1827 | ||
1828 | eq->cmd_tail = cmd_tail; | |
1829 | ||
1830 | spin_unlock_irqrestore(&priv->cmd_lock, irqflags); | |
1831 | ||
1832 | if (nolds == 0) | |
1833 | nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); | |
1834 | ||
1835 | /* Handle completions. */ | |
1836 | for (i = 0; i < nolds; i++) | |
1837 | kfree_skb(olds[i]); | |
1838 | ||
1839 | /* Update stats. */ | |
1840 | stats->tx_packets += num_segs; | |
1841 | stats->tx_bytes += (num_segs * sh_len) + d_len; | |
1842 | ||
1843 | /* Make sure the egress timer is scheduled. */ | |
1844 | tile_net_schedule_egress_timer(info); | |
1845 | ||
1846 | return NETDEV_TX_OK; | |
1847 | } | |
1848 | ||
1849 | ||
1850 | /* | |
1851 | * Transmit a packet (called by the kernel via "hard_start_xmit" hook). | |
1852 | */ | |
1853 | static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |
1854 | { | |
1855 | struct tile_net_priv *priv = netdev_priv(dev); | |
1856 | int my_cpu = smp_processor_id(); | |
1857 | struct tile_net_cpu *info = priv->cpu[my_cpu]; | |
1858 | struct tile_net_stats_t *stats = &info->stats; | |
1859 | ||
1860 | unsigned long irqflags; | |
1861 | ||
1862 | struct skb_shared_info *sh = skb_shinfo(skb); | |
1863 | ||
1864 | unsigned int len = skb->len; | |
1865 | unsigned char *data = skb->data; | |
1866 | ||
1867 | unsigned int csum_start = skb->csum_start - skb_headroom(skb); | |
1868 | ||
1869 | lepp_frag_t frags[LEPP_MAX_FRAGS]; | |
1870 | ||
1871 | unsigned int num_frags; | |
1872 | ||
1873 | lepp_queue_t *eq = priv->epp_queue; | |
1874 | ||
1875 | struct sk_buff *olds[4]; | |
1876 | unsigned int wanted = 4; | |
1877 | unsigned int i, nolds = 0; | |
1878 | ||
1879 | unsigned int cmd_size = sizeof(lepp_cmd_t); | |
1880 | ||
1881 | unsigned int cmd_head, cmd_tail, cmd_next; | |
1882 | unsigned int comp_tail; | |
1883 | ||
1884 | lepp_cmd_t cmds[LEPP_MAX_FRAGS]; | |
1885 | ||
1886 | unsigned int free_slots; | |
1887 | ||
1888 | ||
1889 | /* | |
1890 | * This is paranoia, since we think that if the link doesn't come | |
1891 | * up, telling Linux we have no carrier will keep it from trying | |
1892 | * to transmit. If it does, though, we can't execute this routine, | |
1893 | * since data structures we depend on aren't set up yet. | |
1894 | */ | |
1895 | if (!info->registered) | |
1896 | return NETDEV_TX_BUSY; | |
1897 | ||
1898 | ||
1899 | /* Save the timestamp. */ | |
1900 | dev->trans_start = jiffies; | |
1901 | ||
1902 | ||
1903 | #ifdef TILE_NET_PARANOIA | |
1904 | #if CHIP_HAS_CBOX_HOME_MAP() | |
1905 | if (hash_default) { | |
1906 | HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); | |
1907 | if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) | |
1908 | panic("Non-coherent egress buffer!"); | |
1909 | } | |
1910 | #endif | |
1911 | #endif | |
1912 | ||
1913 | ||
1914 | #ifdef TILE_NET_DUMP_PACKETS | |
1915 | /* ISSUE: Does not dump the "frags". */ | |
1916 | dump_packet(data, skb_headlen(skb), "tx"); | |
1917 | #endif /* TILE_NET_DUMP_PACKETS */ | |
1918 | ||
1919 | ||
1920 | if (sh->gso_size != 0) | |
1921 | return tile_net_tx_tso(skb, dev); | |
1922 | ||
1923 | ||
1924 | /* Prepare the commands. */ | |
1925 | ||
1926 | num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); | |
1927 | ||
1928 | for (i = 0; i < num_frags; i++) { | |
1929 | ||
1930 | bool final = (i == num_frags - 1); | |
1931 | ||
1932 | lepp_cmd_t cmd = { | |
1933 | .cpa_lo = frags[i].cpa_lo, | |
1934 | .cpa_hi = frags[i].cpa_hi, | |
1935 | .length = frags[i].length, | |
1936 | .hash_for_home = frags[i].hash_for_home, | |
1937 | .send_completion = final, | |
1938 | .end_of_packet = final | |
1939 | }; | |
1940 | ||
1941 | if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) { | |
1942 | cmd.compute_checksum = 1; | |
1943 | cmd.checksum_data.bits.start_byte = csum_start; | |
1944 | cmd.checksum_data.bits.count = len - csum_start; | |
1945 | cmd.checksum_data.bits.destination_byte = | |
1946 | csum_start + skb->csum_offset; | |
1947 | } | |
1948 | ||
1949 | cmds[i] = cmd; | |
1950 | } | |
1951 | ||
1952 | ||
1953 | /* Prefetch and wait, to minimize time spent holding the spinlock. */ | |
1954 | prefetch_L1(&eq->comp_tail); | |
1955 | prefetch_L1(&eq->cmd_tail); | |
1956 | mb(); | |
1957 | ||
1958 | ||
1959 | /* Enqueue the commands. */ | |
1960 | ||
1961 | spin_lock_irqsave(&priv->cmd_lock, irqflags); | |
1962 | ||
1963 | /* | |
1964 | * Handle completions if needed to make room. | |
1965 | * HACK: Spin until there is sufficient room. | |
1966 | */ | |
1967 | free_slots = lepp_num_free_comp_slots(eq); | |
1968 | if (free_slots < 1) { | |
1969 | spin: | |
1970 | nolds += tile_net_lepp_grab_comps(dev, olds + nolds, | |
1971 | wanted - nolds, NULL); | |
1972 | if (lepp_num_free_comp_slots(eq) < 1) | |
1973 | goto spin; | |
1974 | } | |
1975 | ||
1976 | cmd_head = eq->cmd_head; | |
1977 | cmd_tail = eq->cmd_tail; | |
1978 | ||
1979 | /* NOTE: The "gotos" below are untested. */ | |
1980 | ||
1981 | /* Copy the commands, or fail. */ | |
1982 | for (i = 0; i < num_frags; i++) { | |
1983 | ||
1984 | /* Prepare to advance, detecting full queue. */ | |
1985 | cmd_next = cmd_tail + cmd_size; | |
1986 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | |
1987 | goto spin; | |
1988 | if (cmd_next > LEPP_CMD_LIMIT) { | |
1989 | cmd_next = 0; | |
1990 | if (cmd_next == cmd_head) | |
1991 | goto spin; | |
1992 | } | |
1993 | ||
1994 | /* Copy the command. */ | |
1995 | *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i]; | |
1996 | ||
1997 | /* Advance. */ | |
1998 | cmd_tail = cmd_next; | |
1999 | } | |
2000 | ||
2001 | /* Record "skb" for eventual freeing. */ | |
2002 | comp_tail = eq->comp_tail; | |
2003 | eq->comps[comp_tail] = skb; | |
2004 | LEPP_QINC(comp_tail); | |
2005 | eq->comp_tail = comp_tail; | |
2006 | ||
2007 | /* Flush before allowing LEPP to handle the command. */ | |
2008 | __insn_mf(); | |
2009 | ||
2010 | eq->cmd_tail = cmd_tail; | |
2011 | ||
2012 | spin_unlock_irqrestore(&priv->cmd_lock, irqflags); | |
2013 | ||
2014 | if (nolds == 0) | |
2015 | nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL); | |
2016 | ||
2017 | /* Handle completions. */ | |
2018 | for (i = 0; i < nolds; i++) | |
2019 | kfree_skb(olds[i]); | |
2020 | ||
2021 | /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ | |
2022 | stats->tx_packets++; | |
2023 | stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); | |
2024 | ||
2025 | /* Make sure the egress timer is scheduled. */ | |
2026 | tile_net_schedule_egress_timer(info); | |
2027 | ||
2028 | return NETDEV_TX_OK; | |
2029 | } | |
2030 | ||
2031 | ||
2032 | /* | |
2033 | * Deal with a transmit timeout. | |
2034 | */ | |
2035 | static void tile_net_tx_timeout(struct net_device *dev) | |
2036 | { | |
2037 | PDEBUG("tile_net_tx_timeout()\n"); | |
2038 | PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, | |
2039 | jiffies - dev->trans_start); | |
2040 | ||
2041 | /* XXX: ISSUE: This doesn't seem useful for us. */ | |
2042 | netif_wake_queue(dev); | |
2043 | } | |
2044 | ||
2045 | ||
2046 | /* | |
2047 | * Ioctl commands. | |
2048 | */ | |
2049 | static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
2050 | { | |
2051 | return -EOPNOTSUPP; | |
2052 | } | |
2053 | ||
2054 | ||
2055 | /* | |
2056 | * Get System Network Statistics. | |
2057 | * | |
2058 | * Returns the address of the device statistics structure. | |
2059 | */ | |
2060 | static struct net_device_stats *tile_net_get_stats(struct net_device *dev) | |
2061 | { | |
2062 | struct tile_net_priv *priv = netdev_priv(dev); | |
2063 | u32 rx_packets = 0; | |
2064 | u32 tx_packets = 0; | |
2065 | u32 rx_bytes = 0; | |
2066 | u32 tx_bytes = 0; | |
2067 | int i; | |
2068 | ||
2069 | for_each_online_cpu(i) { | |
2070 | if (priv->cpu[i]) { | |
2071 | rx_packets += priv->cpu[i]->stats.rx_packets; | |
2072 | rx_bytes += priv->cpu[i]->stats.rx_bytes; | |
2073 | tx_packets += priv->cpu[i]->stats.tx_packets; | |
2074 | tx_bytes += priv->cpu[i]->stats.tx_bytes; | |
2075 | } | |
2076 | } | |
2077 | ||
2078 | priv->stats.rx_packets = rx_packets; | |
2079 | priv->stats.rx_bytes = rx_bytes; | |
2080 | priv->stats.tx_packets = tx_packets; | |
2081 | priv->stats.tx_bytes = tx_bytes; | |
2082 | ||
2083 | return &priv->stats; | |
2084 | } | |
2085 | ||
2086 | ||
2087 | /* | |
2088 | * Change the "mtu". | |
2089 | * | |
2090 | * The "change_mtu" method is usually not needed. | |
2091 | * If you need it, it must be like this. | |
2092 | */ | |
2093 | static int tile_net_change_mtu(struct net_device *dev, int new_mtu) | |
2094 | { | |
2095 | PDEBUG("tile_net_change_mtu()\n"); | |
2096 | ||
2097 | /* Check ranges. */ | |
2098 | if ((new_mtu < 68) || (new_mtu > 1500)) | |
2099 | return -EINVAL; | |
2100 | ||
2101 | /* Accept the value. */ | |
2102 | dev->mtu = new_mtu; | |
2103 | ||
2104 | return 0; | |
2105 | } | |
2106 | ||
2107 | ||
2108 | /* | |
2109 | * Change the Ethernet Address of the NIC. | |
2110 | * | |
2111 | * The hypervisor driver does not support changing MAC address. However, | |
2112 | * the IPP does not do anything with the MAC address, so the address which | |
2113 | * gets used on outgoing packets, and which is accepted on incoming packets, | |
2114 | * is completely up to the NetIO program or kernel driver which is actually | |
2115 | * handling them. | |
2116 | * | |
2117 | * Returns 0 on success, negative on failure. | |
2118 | */ | |
2119 | static int tile_net_set_mac_address(struct net_device *dev, void *p) | |
2120 | { | |
2121 | struct sockaddr *addr = p; | |
2122 | ||
2123 | if (!is_valid_ether_addr(addr->sa_data)) | |
2124 | return -EINVAL; | |
2125 | ||
2126 | /* ISSUE: Note that "dev_addr" is now a pointer. */ | |
2127 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
2128 | ||
2129 | return 0; | |
2130 | } | |
2131 | ||
2132 | ||
2133 | /* | |
2134 | * Obtain the MAC address from the hypervisor. | |
2135 | * This must be done before opening the device. | |
2136 | */ | |
2137 | static int tile_net_get_mac(struct net_device *dev) | |
2138 | { | |
2139 | struct tile_net_priv *priv = netdev_priv(dev); | |
2140 | ||
2141 | char hv_dev_name[32]; | |
2142 | int len; | |
2143 | ||
2144 | __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF }; | |
2145 | ||
2146 | int ret; | |
2147 | ||
2148 | /* For example, "xgbe0". */ | |
2149 | strcpy(hv_dev_name, dev->name); | |
2150 | len = strlen(hv_dev_name); | |
2151 | ||
2152 | /* For example, "xgbe/0". */ | |
2153 | hv_dev_name[len] = hv_dev_name[len - 1]; | |
2154 | hv_dev_name[len - 1] = '/'; | |
2155 | len++; | |
2156 | ||
2157 | /* For example, "xgbe/0/native_hash". */ | |
2158 | strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native"); | |
2159 | ||
2160 | /* Get the hypervisor handle for this device. */ | |
2161 | priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0); | |
2162 | PDEBUG("hv_dev_open(%s) returned %d %p\n", | |
2163 | hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl); | |
2164 | if (priv->hv_devhdl < 0) { | |
2165 | if (priv->hv_devhdl == HV_ENODEV) | |
2166 | printk(KERN_DEBUG "Ignoring unconfigured device %s\n", | |
2167 | hv_dev_name); | |
2168 | else | |
2169 | printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n", | |
2170 | hv_dev_name, priv->hv_devhdl); | |
2171 | return -1; | |
2172 | } | |
2173 | ||
2174 | /* | |
2175 | * Read the hardware address from the hypervisor. | |
2176 | * ISSUE: Note that "dev_addr" is now a pointer. | |
2177 | */ | |
2178 | offset.bits.class = NETIO_PARAM; | |
2179 | offset.bits.addr = NETIO_PARAM_MAC; | |
2180 | ret = hv_dev_pread(priv->hv_devhdl, 0, | |
2181 | (HV_VirtAddr)dev->dev_addr, dev->addr_len, | |
2182 | offset.word); | |
2183 | PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret); | |
2184 | if (ret <= 0) { | |
2185 | printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n", | |
2186 | dev->name); | |
2187 | /* | |
2188 | * Since the device is configured by the hypervisor but we | |
2189 | * can't get its MAC address, we are most likely running | |
2190 | * the simulator, so let's generate a random MAC address. | |
2191 | */ | |
2192 | random_ether_addr(dev->dev_addr); | |
2193 | } | |
2194 | ||
2195 | return 0; | |
2196 | } | |
2197 | ||
2198 | ||
2199 | static struct net_device_ops tile_net_ops = { | |
2200 | .ndo_open = tile_net_open, | |
2201 | .ndo_stop = tile_net_stop, | |
2202 | .ndo_start_xmit = tile_net_tx, | |
2203 | .ndo_do_ioctl = tile_net_ioctl, | |
2204 | .ndo_get_stats = tile_net_get_stats, | |
2205 | .ndo_change_mtu = tile_net_change_mtu, | |
2206 | .ndo_tx_timeout = tile_net_tx_timeout, | |
2207 | .ndo_set_mac_address = tile_net_set_mac_address | |
2208 | }; | |
2209 | ||
2210 | ||
2211 | /* | |
2212 | * The setup function. | |
2213 | * | |
2214 | * This uses ether_setup() to assign various fields in dev, including | |
2215 | * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. | |
2216 | */ | |
2217 | static void tile_net_setup(struct net_device *dev) | |
2218 | { | |
2219 | PDEBUG("tile_net_setup()\n"); | |
2220 | ||
2221 | ether_setup(dev); | |
2222 | ||
2223 | dev->netdev_ops = &tile_net_ops; | |
2224 | ||
2225 | dev->watchdog_timeo = TILE_NET_TIMEOUT; | |
2226 | ||
2227 | /* We want lockless xmit. */ | |
2228 | dev->features |= NETIF_F_LLTX; | |
2229 | ||
2230 | /* We support hardware tx checksums. */ | |
2231 | dev->features |= NETIF_F_HW_CSUM; | |
2232 | ||
2233 | /* We support scatter/gather. */ | |
2234 | dev->features |= NETIF_F_SG; | |
2235 | ||
2236 | /* We support TSO. */ | |
2237 | dev->features |= NETIF_F_TSO; | |
2238 | ||
2239 | #ifdef TILE_NET_GSO | |
2240 | /* We support GSO. */ | |
2241 | dev->features |= NETIF_F_GSO; | |
2242 | #endif | |
2243 | ||
2244 | if (hash_default) | |
2245 | dev->features |= NETIF_F_HIGHDMA; | |
2246 | ||
2247 | /* ISSUE: We should support NETIF_F_UFO. */ | |
2248 | ||
2249 | dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; | |
2250 | ||
2251 | dev->mtu = TILE_NET_MTU; | |
2252 | } | |
2253 | ||
2254 | ||
2255 | /* | |
2256 | * Allocate the device structure, register the device, and obtain the | |
2257 | * MAC address from the hypervisor. | |
2258 | */ | |
2259 | static struct net_device *tile_net_dev_init(const char *name) | |
2260 | { | |
2261 | int ret; | |
2262 | struct net_device *dev; | |
2263 | struct tile_net_priv *priv; | |
2264 | struct page *page; | |
2265 | ||
2266 | /* | |
2267 | * Allocate the device structure. This allocates "priv", calls | |
2268 | * tile_net_setup(), and saves "name". Normally, "name" is a | |
2269 | * template, instantiated by register_netdev(), but not for us. | |
2270 | */ | |
2271 | dev = alloc_netdev(sizeof(*priv), name, tile_net_setup); | |
2272 | if (!dev) { | |
2273 | pr_err("alloc_netdev(%s) failed\n", name); | |
2274 | return NULL; | |
2275 | } | |
2276 | ||
2277 | priv = netdev_priv(dev); | |
2278 | ||
2279 | /* Initialize "priv". */ | |
2280 | ||
2281 | memset(priv, 0, sizeof(*priv)); | |
2282 | ||
2283 | /* Save "dev" for "tile_net_open_retry()". */ | |
2284 | priv->dev = dev; | |
2285 | ||
2286 | INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); | |
2287 | ||
2288 | spin_lock_init(&priv->cmd_lock); | |
2289 | spin_lock_init(&priv->comp_lock); | |
2290 | ||
2291 | /* Allocate "epp_queue". */ | |
2292 | BUG_ON(get_order(sizeof(lepp_queue_t)) != 0); | |
2293 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0); | |
2294 | if (!page) { | |
2295 | free_netdev(dev); | |
2296 | return NULL; | |
2297 | } | |
2298 | priv->epp_queue = page_address(page); | |
2299 | ||
2300 | /* Register the network device. */ | |
2301 | ret = register_netdev(dev); | |
2302 | if (ret) { | |
2303 | pr_err("register_netdev %s failed %d\n", dev->name, ret); | |
2304 | free_page((unsigned long)priv->epp_queue); | |
2305 | free_netdev(dev); | |
2306 | return NULL; | |
2307 | } | |
2308 | ||
2309 | /* Get the MAC address. */ | |
2310 | ret = tile_net_get_mac(dev); | |
2311 | if (ret < 0) { | |
2312 | unregister_netdev(dev); | |
2313 | free_page((unsigned long)priv->epp_queue); | |
2314 | free_netdev(dev); | |
2315 | return NULL; | |
2316 | } | |
2317 | ||
2318 | return dev; | |
2319 | } | |
2320 | ||
2321 | ||
2322 | /* | |
2323 | * Module cleanup. | |
2324 | */ | |
2325 | static void tile_net_cleanup(void) | |
2326 | { | |
2327 | int i; | |
2328 | ||
2329 | for (i = 0; i < TILE_NET_DEVS; i++) { | |
2330 | if (tile_net_devs[i]) { | |
2331 | struct net_device *dev = tile_net_devs[i]; | |
2332 | struct tile_net_priv *priv = netdev_priv(dev); | |
2333 | unregister_netdev(dev); | |
2334 | finv_buffer(priv->epp_queue, PAGE_SIZE); | |
2335 | free_page((unsigned long)priv->epp_queue); | |
2336 | free_netdev(dev); | |
2337 | } | |
2338 | } | |
2339 | } | |
2340 | ||
2341 | ||
2342 | /* | |
2343 | * Module initialization. | |
2344 | */ | |
2345 | static int tile_net_init_module(void) | |
2346 | { | |
2347 | pr_info("Tilera IPP Net Driver\n"); | |
2348 | ||
2349 | tile_net_devs[0] = tile_net_dev_init("xgbe0"); | |
2350 | tile_net_devs[1] = tile_net_dev_init("xgbe1"); | |
2351 | tile_net_devs[2] = tile_net_dev_init("gbe0"); | |
2352 | tile_net_devs[3] = tile_net_dev_init("gbe1"); | |
2353 | ||
2354 | return 0; | |
2355 | } | |
2356 | ||
2357 | ||
2358 | #ifndef MODULE | |
2359 | /* | |
2360 | * The "network_cpus" boot argument specifies the cpus that are dedicated | |
2361 | * to handle ingress packets. | |
2362 | * | |
2363 | * The parameter should be in the form "network_cpus=m-n[,x-y]", where | |
2364 | * m, n, x, y are integer numbers that represent the cpus that can be | |
2365 | * neither a dedicated cpu nor a dataplane cpu. | |
2366 | */ | |
2367 | static int __init network_cpus_setup(char *str) | |
2368 | { | |
2369 | int rc = cpulist_parse_crop(str, &network_cpus_map); | |
2370 | if (rc != 0) { | |
2371 | pr_warning("network_cpus=%s: malformed cpu list\n", | |
2372 | str); | |
2373 | } else { | |
2374 | ||
2375 | /* Remove dedicated cpus. */ | |
2376 | cpumask_and(&network_cpus_map, &network_cpus_map, | |
2377 | cpu_possible_mask); | |
2378 | ||
2379 | ||
2380 | if (cpumask_empty(&network_cpus_map)) { | |
2381 | pr_warning("Ignoring network_cpus='%s'.\n", | |
2382 | str); | |
2383 | } else { | |
2384 | char buf[1024]; | |
2385 | cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map); | |
2386 | pr_info("Linux network CPUs: %s\n", buf); | |
2387 | network_cpus_used = true; | |
2388 | } | |
2389 | } | |
2390 | ||
2391 | return 0; | |
2392 | } | |
2393 | __setup("network_cpus=", network_cpus_setup); | |
2394 | #endif | |
2395 | ||
2396 | ||
2397 | module_init(tile_net_init_module); | |
2398 | module_exit(tile_net_cleanup); |