Merge remote-tracking branch 'staging/staging-next'
[deliverable/linux.git] / drivers / staging / unisys / visornic / visornic_main.c
CommitLineData
68905a14
DK
1/* Copyright (c) 2012 - 2015 UNISYS CORPORATION
2 * All rights reserved.
3 *
6f14cc18
BR
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
68905a14
DK
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for more
12 * details.
13 */
14
15/* This driver lives in a spar partition, and registers to ethernet io
16 * channels from the visorbus driver. It creates netdev devices and
17 * forwards transmit to the IO channel and accepts rcvs from the IO
18 * Partition via the IO channel.
19 */
20
21#include <linux/debugfs.h>
68905a14 22#include <linux/etherdevice.h>
0d507393 23#include <linux/netdevice.h>
68905a14 24#include <linux/kthread.h>
0d507393
NH
25#include <linux/skbuff.h>
26#include <linux/rtnetlink.h>
68905a14
DK
27
28#include "visorbus.h"
29#include "iochannel.h"
30
0c677e9c 31#define VISORNIC_INFINITE_RSP_WAIT 0
68905a14
DK
32
33/* MAX_BUF = 64 lines x 32 MAXVNIC x 80 characters
34 * = 163840 bytes
35 */
36#define MAX_BUF 163840
61dd330a 37#define NAPI_WEIGHT 64
68905a14 38
68905a14
DK
39static int visornic_probe(struct visor_device *dev);
40static void visornic_remove(struct visor_device *dev);
41static int visornic_pause(struct visor_device *dev,
42 visorbus_state_complete_func complete_func);
43static int visornic_resume(struct visor_device *dev,
44 visorbus_state_complete_func complete_func);
45
46/* DEBUGFS declarations */
47static ssize_t info_debugfs_read(struct file *file, char __user *buf,
48 size_t len, loff_t *offset);
49static ssize_t enable_ints_write(struct file *file, const char __user *buf,
50 size_t len, loff_t *ppos);
51static struct dentry *visornic_debugfs_dir;
52static const struct file_operations debugfs_info_fops = {
53 .read = info_debugfs_read,
54};
55
56static const struct file_operations debugfs_enable_ints_fops = {
57 .write = enable_ints_write,
58};
59
68905a14
DK
60/* GUIDS for director channel type supported by this driver. */
61static struct visor_channeltype_descriptor visornic_channel_types[] = {
62 /* Note that the only channel type we expect to be reported by the
63 * bus driver is the SPAR_VNIC channel.
64 */
65 { SPAR_VNIC_CHANNEL_PROTOCOL_UUID, "ultravnic" },
66 { NULL_UUID_LE, NULL }
67};
110a66be
PB
68MODULE_DEVICE_TABLE(visorbus, visornic_channel_types);
69/*
70 * FIXME XXX: This next line of code must be fixed and removed before
71 * acceptance into the 'normal' part of the kernel. It is only here as a place
72 * holder to get module autoloading functionality working for visorbus. Code
73 * must be added to scripts/mode/file2alias.c, etc., to get this working
74 * properly.
75 */
76MODULE_ALIAS("visorbus:" SPAR_VNIC_CHANNEL_PROTOCOL_UUID_STR);
68905a14
DK
77
78/* This is used to tell the visor bus driver which types of visor devices
79 * we support, and what functions to call when a visor device that we support
80 * is attached or removed.
81 */
82static struct visor_driver visornic_driver = {
83 .name = "visornic",
84 .version = "1.0.0.0",
85 .vertag = NULL,
86 .owner = THIS_MODULE,
87 .channel_types = visornic_channel_types,
88 .probe = visornic_probe,
89 .remove = visornic_remove,
90 .pause = visornic_pause,
91 .resume = visornic_resume,
92 .channel_interrupt = NULL,
93};
94
68905a14
DK
95struct chanstat {
96 unsigned long got_rcv;
97 unsigned long got_enbdisack;
98 unsigned long got_xmit_done;
99 unsigned long xmit_fail;
100 unsigned long sent_enbdis;
101 unsigned long sent_promisc;
102 unsigned long sent_post;
81d275c6 103 unsigned long sent_post_failed;
68905a14
DK
104 unsigned long sent_xmit;
105 unsigned long reject_count;
106 unsigned long extra_rcvbufs_sent;
107};
108
109struct visornic_devdata {
77c9a4ae
EA
110 /* 0 disabled 1 enabled to receive */
111 unsigned short enabled;
112 /* NET_RCV_ENABLE/DISABLE acked by IOPART */
113 unsigned short enab_dis_acked;
114
68905a14 115 struct visor_device *dev;
68905a14
DK
116 struct net_device *netdev;
117 struct net_device_stats net_stats;
118 atomic_t interrupt_rcvd;
119 wait_queue_head_t rsp_queue;
120 struct sk_buff **rcvbuf;
77c9a4ae
EA
121 /* incarnation_id lets IOPART know about re-birth */
122 u64 incarnation_id;
123 /* flags as they were prior to set_multicast_list */
124 unsigned short old_flags;
125 atomic_t usage; /* count of users */
126
127 /* number of rcv buffers the vnic will post */
128 int num_rcv_bufs;
68905a14
DK
129 int num_rcv_bufs_could_not_alloc;
130 atomic_t num_rcvbuf_in_iovm;
131 unsigned long alloc_failed_in_if_needed_cnt;
132 unsigned long alloc_failed_in_repost_rtn_cnt;
77c9a4ae
EA
133
134 /* absolute max number of outstanding xmits - should never hit this */
135 unsigned long max_outstanding_net_xmits;
136 /* high water mark for calling netif_stop_queue() */
137 unsigned long upper_threshold_net_xmits;
138 /* high water mark for calling netif_wake_queue() */
139 unsigned long lower_threshold_net_xmits;
140 /* xmitbufhead - head of the xmit buffer list sent to the IOPART end */
141 struct sk_buff_head xmitbufhead;
142
d01da5ea 143 visorbus_state_complete_func server_down_complete_func;
68905a14 144 struct work_struct timeout_reset;
77c9a4ae
EA
145 /* cmdrsp_rcv is used for posting/unposting rcv buffers */
146 struct uiscmdrsp *cmdrsp_rcv;
147 /* xmit_cmdrsp - issues NET_XMIT - only one active xmit at a time */
148 struct uiscmdrsp *xmit_cmdrsp;
149
68905a14
DK
150 bool server_down; /* IOPART is down */
151 bool server_change_state; /* Processing SERVER_CHANGESTATE msg */
46df8226 152 bool going_away; /* device is being torn down */
68905a14 153 struct dentry *eth_debugfs_dir;
68905a14
DK
154 u64 interrupts_rcvd;
155 u64 interrupts_notme;
156 u64 interrupts_disabled;
157 u64 busy_cnt;
158 spinlock_t priv_lock; /* spinlock to access devdata structures */
159
160 /* flow control counter */
161 u64 flow_control_upper_hits;
162 u64 flow_control_lower_hits;
163
164 /* debug counters */
165 unsigned long n_rcv0; /* # rcvs of 0 buffers */
166 unsigned long n_rcv1; /* # rcvs of 1 buffers */
167 unsigned long n_rcv2; /* # rcvs of 2 buffers */
168 unsigned long n_rcvx; /* # rcvs of >2 buffers */
77c9a4ae
EA
169 unsigned long found_repost_rcvbuf_cnt; /* # repost_rcvbuf_cnt */
170 unsigned long repost_found_skb_cnt; /* # of found the skb */
171 unsigned long n_repost_deficit; /* # of lost rcv buffers */
172 unsigned long bad_rcv_buf; /* # of unknown rcv skb not freed */
68905a14
DK
173 unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */
174
175 int queuefullmsg_logged;
176 struct chanstat chstat;
946b2546
NH
177 struct timer_list irq_poll_timer;
178 struct napi_struct napi;
179 struct uiscmdrsp cmdrsp[SIZEOF_CMDRSP];
68905a14
DK
180};
181
946b2546
NH
182static int visornic_poll(struct napi_struct *napi, int budget);
183static void poll_for_irq(unsigned long v);
68905a14
DK
184
185/**
186 * visor_copy_fragsinfo_from_skb(
187 * @skb_in: skbuff that we are pulling the frags from
188 * @firstfraglen: length of first fragment in skb
189 * @frags_max: max len of frags array
190 * @frags: frags array filled in on output
191 *
192 * Copy the fragment list in the SKB to a phys_info
193 * array that the IOPART understands.
194 * Return value indicates number of entries filled in frags
195 * Negative values indicate an error.
196 */
6a957193 197static int
68905a14
DK
198visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
199 unsigned int frags_max,
200 struct phys_info frags[])
201{
03759f8c 202 unsigned int count = 0, frag, size, offset = 0, numfrags;
513e1cbd 203 unsigned int total_count;
68905a14
DK
204
205 numfrags = skb_shinfo(skb)->nr_frags;
206
77c9a4ae 207 /* Compute the number of fragments this skb has, and if its more than
513e1cbd
NH
208 * frag array can hold, linearize the skb
209 */
210 total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
211 if (firstfraglen % PI_PAGE_SIZE)
212 total_count++;
213
214 if (total_count > frags_max) {
215 if (skb_linearize(skb))
216 return -EINVAL;
217 numfrags = skb_shinfo(skb)->nr_frags;
218 firstfraglen = 0;
219 }
220
68905a14
DK
221 while (firstfraglen) {
222 if (count == frags_max)
223 return -EINVAL;
224
225 frags[count].pi_pfn =
226 page_to_pfn(virt_to_page(skb->data + offset));
227 frags[count].pi_off =
228 (unsigned long)(skb->data + offset) & PI_PAGE_MASK;
229 size = min_t(unsigned int, firstfraglen,
230 PI_PAGE_SIZE - frags[count].pi_off);
231
232 /* can take smallest of firstfraglen (what's left) OR
233 * bytes left in the page
234 */
235 frags[count].pi_len = size;
236 firstfraglen -= size;
237 offset += size;
238 count++;
239 }
240 if (numfrags) {
241 if ((count + numfrags) > frags_max)
242 return -EINVAL;
243
03759f8c 244 for (frag = 0; frag < numfrags; frag++) {
68905a14 245 count = add_physinfo_entries(page_to_pfn(
03759f8c
EA
246 skb_frag_page(&skb_shinfo(skb)->frags[frag])),
247 skb_shinfo(skb)->frags[frag].
68905a14 248 page_offset,
03759f8c 249 skb_shinfo(skb)->frags[frag].
68905a14 250 size, count, frags_max, frags);
77c9a4ae 251 /* add_physinfo_entries only returns
998ff7f8
NH
252 * zero if the frags array is out of room
253 * That should never happen because we
254 * fail above, if count+numfrags > frags_max.
998ff7f8 255 */
6a957193
TS
256 if (!count)
257 return -EINVAL;
68905a14
DK
258 }
259 }
260 if (skb_shinfo(skb)->frag_list) {
261 struct sk_buff *skbinlist;
262 int c;
263
264 for (skbinlist = skb_shinfo(skb)->frag_list; skbinlist;
265 skbinlist = skbinlist->next) {
266 c = visor_copy_fragsinfo_from_skb(skbinlist,
267 skbinlist->len -
268 skbinlist->data_len,
269 frags_max - count,
270 &frags[count]);
271 if (c < 0)
272 return c;
273 count += c;
274 }
275 }
276 return count;
277}
278
68905a14
DK
279static ssize_t enable_ints_write(struct file *file,
280 const char __user *buffer,
281 size_t count, loff_t *ppos)
282{
77c9a4ae 283 /* Don't want to break ABI here by having a debugfs
52b1660d
NH
284 * file that no longer exists or is writable, so
285 * lets just make this a vestigual function
286 */
68905a14
DK
287 return count;
288}
289
290/**
77c9a4ae 291 * visornic_serverdown_complete - IOPART went down, pause device
68905a14
DK
292 * @work: Work queue it was scheduled on
293 *
294 * The IO partition has gone down and we need to do some cleanup
295 * for when it comes back. Treat the IO partition as the link
296 * being down.
297 * Returns void.
298 */
299static void
ace72eef 300visornic_serverdown_complete(struct visornic_devdata *devdata)
68905a14 301{
68905a14 302 struct net_device *netdev;
68905a14 303
68905a14
DK
304 netdev = devdata->netdev;
305
946b2546
NH
306 /* Stop polling for interrupts */
307 del_timer_sync(&devdata->irq_poll_timer);
68905a14 308
0d507393
NH
309 rtnl_lock();
310 dev_close(netdev);
311 rtnl_unlock();
68905a14 312
68905a14 313 atomic_set(&devdata->num_rcvbuf_in_iovm, 0);
db849927
TS
314 devdata->chstat.sent_xmit = 0;
315 devdata->chstat.got_xmit_done = 0;
68905a14 316
d01da5ea
TS
317 if (devdata->server_down_complete_func)
318 (*devdata->server_down_complete_func)(devdata->dev, 0);
319
68905a14
DK
320 devdata->server_down = true;
321 devdata->server_change_state = false;
d01da5ea 322 devdata->server_down_complete_func = NULL;
68905a14
DK
323}
324
325/**
77c9a4ae 326 * visornic_serverdown - Command has notified us that IOPART is down
68905a14
DK
327 * @devdata: device that is being managed by IOPART
328 *
329 * Schedule the work needed to handle the server down request. Make
330 * sure we haven't already handled the server change state event.
331 * Returns 0 if we scheduled the work, -EINVAL on error.
332 */
333static int
d01da5ea
TS
334visornic_serverdown(struct visornic_devdata *devdata,
335 visorbus_state_complete_func complete_func)
68905a14 336{
46df8226 337 unsigned long flags;
4145ba76 338 int err;
46df8226
TS
339
340 spin_lock_irqsave(&devdata->priv_lock, flags);
4145ba76 341 if (devdata->server_change_state) {
00748b0c
TS
342 dev_dbg(&devdata->dev->device, "%s changing state\n",
343 __func__);
4145ba76
TS
344 err = -EINVAL;
345 goto err_unlock;
346 }
347 if (devdata->server_down) {
348 dev_dbg(&devdata->dev->device, "%s already down\n",
349 __func__);
350 err = -EINVAL;
351 goto err_unlock;
352 }
353 if (devdata->going_away) {
354 dev_dbg(&devdata->dev->device,
355 "%s aborting because device removal pending\n",
356 __func__);
357 err = -ENODEV;
358 goto err_unlock;
05f1b17e 359 }
4145ba76
TS
360 devdata->server_change_state = true;
361 devdata->server_down_complete_func = complete_func;
05f1b17e 362 spin_unlock_irqrestore(&devdata->priv_lock, flags);
4145ba76
TS
363
364 visornic_serverdown_complete(devdata);
68905a14 365 return 0;
4145ba76
TS
366
367err_unlock:
368 spin_unlock_irqrestore(&devdata->priv_lock, flags);
369 return err;
68905a14
DK
370}
371
372/**
373 * alloc_rcv_buf - alloc rcv buffer to be given to the IO Partition.
374 * @netdev: network adapter the rcv bufs are attached too.
375 *
376 * Create an sk_buff (rcv_buf) that will be passed to the IO Partition
377 * so that it can write rcv data into our memory space.
378 * Return pointer to sk_buff
379 */
380static struct sk_buff *
381alloc_rcv_buf(struct net_device *netdev)
382{
383 struct sk_buff *skb;
384
385 /* NOTE: the first fragment in each rcv buffer is pointed to by
386 * rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
77c9a4ae 387 * in length, so the first frag is large enough to hold 1514.
68905a14
DK
388 */
389 skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
390 if (!skb)
391 return NULL;
392 skb->dev = netdev;
68905a14
DK
393 /* current value of mtu doesn't come into play here; large
394 * packets will just end up using multiple rcv buffers all of
77c9a4ae 395 * same size.
68905a14 396 */
77c9a4ae
EA
397 skb->len = RCVPOST_BUF_SIZE;
398 /* alloc_skb already zeroes it out for clarification. */
399 skb->data_len = 0;
68905a14
DK
400 return skb;
401}
402
403/**
404 * post_skb - post a skb to the IO Partition.
405 * @cmdrsp: cmdrsp packet to be send to the IO Partition
406 * @devdata: visornic_devdata to post the skb too
407 * @skb: skb to give to the IO partition
408 *
409 * Send the skb to the IO Partition.
410 * Returns void
411 */
412static inline void
413post_skb(struct uiscmdrsp *cmdrsp,
414 struct visornic_devdata *devdata, struct sk_buff *skb)
415{
416 cmdrsp->net.buf = skb;
417 cmdrsp->net.rcvpost.frag.pi_pfn = page_to_pfn(virt_to_page(skb->data));
418 cmdrsp->net.rcvpost.frag.pi_off =
419 (unsigned long)skb->data & PI_PAGE_MASK;
420 cmdrsp->net.rcvpost.frag.pi_len = skb->len;
91678f37 421 cmdrsp->net.rcvpost.unique_num = devdata->incarnation_id;
68905a14
DK
422
423 if ((cmdrsp->net.rcvpost.frag.pi_off + skb->len) <= PI_PAGE_SIZE) {
424 cmdrsp->net.type = NET_RCV_POST;
425 cmdrsp->cmdtype = CMD_NET_TYPE;
81d275c6 426 if (visorchannel_signalinsert(devdata->dev->visorchannel,
dc38082f
TS
427 IOCHAN_TO_IOPART,
428 cmdrsp)) {
81d275c6
TS
429 atomic_inc(&devdata->num_rcvbuf_in_iovm);
430 devdata->chstat.sent_post++;
431 } else {
432 devdata->chstat.sent_post_failed++;
433 }
68905a14
DK
434 }
435}
436
437/**
438 * send_enbdis - send NET_RCV_ENBDIS to IO Partition
439 * @netdev: netdevice we are enable/disable, used as context
440 * return value
441 * @state: enable = 1/disable = 0
442 * @devdata: visornic device we are enabling/disabling
443 *
444 * Send the enable/disable message to the IO Partition.
445 * Returns void
446 */
447static void
448send_enbdis(struct net_device *netdev, int state,
449 struct visornic_devdata *devdata)
450{
451 devdata->cmdrsp_rcv->net.enbdis.enable = state;
452 devdata->cmdrsp_rcv->net.enbdis.context = netdev;
453 devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
454 devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
81d275c6 455 if (visorchannel_signalinsert(devdata->dev->visorchannel,
dc38082f
TS
456 IOCHAN_TO_IOPART,
457 devdata->cmdrsp_rcv))
81d275c6 458 devdata->chstat.sent_enbdis++;
68905a14
DK
459}
460
461/**
462 * visornic_disable_with_timeout - Disable network adapter
463 * @netdev: netdevice to disale
464 * @timeout: timeout to wait for disable
465 *
466 * Disable the network adapter and inform the IO Partition that we
467 * are disabled, reclaim memory from rcv bufs.
468 * Returns 0 on success, negative for failure of IO Partition
469 * responding.
470 *
471 */
472static int
473visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
474{
475 struct visornic_devdata *devdata = netdev_priv(netdev);
476 int i;
477 unsigned long flags;
478 int wait = 0;
479
68905a14
DK
480 /* send a msg telling the other end we are stopping incoming pkts */
481 spin_lock_irqsave(&devdata->priv_lock, flags);
482 devdata->enabled = 0;
483 devdata->enab_dis_acked = 0; /* must wait for ack */
484 spin_unlock_irqrestore(&devdata->priv_lock, flags);
485
486 /* send disable and wait for ack -- don't hold lock when sending
487 * disable because if the queue is full, insert might sleep.
488 */
489 send_enbdis(netdev, 0, devdata);
490
491 /* wait for ack to arrive before we try to free rcv buffers
492 * NOTE: the other end automatically unposts the rcv buffers when
493 * when it gets a disable.
494 */
495 spin_lock_irqsave(&devdata->priv_lock, flags);
0c677e9c 496 while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
68905a14
DK
497 (wait < timeout)) {
498 if (devdata->enab_dis_acked)
499 break;
500 if (devdata->server_down || devdata->server_change_state) {
501 spin_unlock_irqrestore(&devdata->priv_lock, flags);
00748b0c
TS
502 dev_dbg(&netdev->dev, "%s server went away\n",
503 __func__);
68905a14
DK
504 return -EIO;
505 }
506 set_current_state(TASK_INTERRUPTIBLE);
507 spin_unlock_irqrestore(&devdata->priv_lock, flags);
508 wait += schedule_timeout(msecs_to_jiffies(10));
509 spin_lock_irqsave(&devdata->priv_lock, flags);
510 }
511
512 /* Wait for usage to go to 1 (no other users) before freeing
513 * rcv buffers
514 */
515 if (atomic_read(&devdata->usage) > 1) {
516 while (1) {
517 set_current_state(TASK_INTERRUPTIBLE);
518 spin_unlock_irqrestore(&devdata->priv_lock, flags);
519 schedule_timeout(msecs_to_jiffies(10));
520 spin_lock_irqsave(&devdata->priv_lock, flags);
521 if (atomic_read(&devdata->usage))
522 break;
523 }
524 }
68905a14
DK
525 /* we've set enabled to 0, so we can give up the lock. */
526 spin_unlock_irqrestore(&devdata->priv_lock, flags);
527
946b2546
NH
528 /* stop the transmit queue so nothing more can be transmitted */
529 netif_stop_queue(netdev);
530
531 napi_disable(&devdata->napi);
532
0d507393
NH
533 skb_queue_purge(&devdata->xmitbufhead);
534
68905a14
DK
535 /* Free rcv buffers - other end has automatically unposed them on
536 * disable
537 */
538 for (i = 0; i < devdata->num_rcv_bufs; i++) {
539 if (devdata->rcvbuf[i]) {
540 kfree_skb(devdata->rcvbuf[i]);
541 devdata->rcvbuf[i] = NULL;
542 }
543 }
544
68905a14
DK
545 return 0;
546}
547
548/**
549 * init_rcv_bufs -- initialize receive bufs and send them to the IO Part
550 * @netdev: struct netdevice
551 * @devdata: visornic_devdata
552 *
553 * Allocate rcv buffers and post them to the IO Partition.
554 * Return 0 for success, and negative for failure.
555 */
556static int
557init_rcv_bufs(struct net_device *netdev, struct visornic_devdata *devdata)
558{
559 int i, count;
560
561 /* allocate fixed number of receive buffers to post to uisnic
562 * post receive buffers after we've allocated a required amount
563 */
564 for (i = 0; i < devdata->num_rcv_bufs; i++) {
565 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
566 if (!devdata->rcvbuf[i])
567 break; /* if we failed to allocate one let us stop */
568 }
569 if (i == 0) /* couldn't even allocate one -- bail out */
570 return -ENOMEM;
571 count = i;
572
573 /* Ensure we can alloc 2/3rd of the requeested number of buffers.
574 * 2/3 is an arbitrary choice; used also in ndis init.c
575 */
576 if (count < ((2 * devdata->num_rcv_bufs) / 3)) {
577 /* free receive buffers we did alloc and then bail out */
578 for (i = 0; i < count; i++) {
579 kfree_skb(devdata->rcvbuf[i]);
580 devdata->rcvbuf[i] = NULL;
581 }
582 return -ENOMEM;
583 }
584
585 /* post receive buffers to receive incoming input - without holding
586 * lock - we've not enabled nor started the queue so there shouldn't
587 * be any rcv or xmit activity
588 */
589 for (i = 0; i < count; i++)
590 post_skb(devdata->cmdrsp_rcv, devdata, devdata->rcvbuf[i]);
591
592 return 0;
593}
594
595/**
596 * visornic_enable_with_timeout - send enable to IO Part
597 * @netdev: struct net_device
598 * @timeout: Time to wait for the ACK from the enable
599 *
600 * Sends enable to IOVM, inits, and posts receive buffers to IOVM
601 * timeout is defined in msecs (timeout of 0 specifies infinite wait)
602 * Return 0 for success, negavite for failure.
603 */
604static int
605visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
606{
607 int i;
608 struct visornic_devdata *devdata = netdev_priv(netdev);
609 unsigned long flags;
610 int wait = 0;
611
612 /* NOTE: the other end automatically unposts the rcv buffers when it
613 * gets a disable.
614 */
615 i = init_rcv_bufs(netdev, devdata);
00748b0c
TS
616 if (i < 0) {
617 dev_err(&netdev->dev,
618 "%s failed to init rcv bufs (%d)\n", __func__, i);
68905a14 619 return i;
00748b0c 620 }
68905a14
DK
621
622 spin_lock_irqsave(&devdata->priv_lock, flags);
623 devdata->enabled = 1;
6483783d 624 devdata->enab_dis_acked = 0;
68905a14
DK
625
626 /* now we're ready, let's send an ENB to uisnic but until we get
627 * an ACK back from uisnic, we'll drop the packets
628 */
629 devdata->n_rcv_packets_not_accepted = 0;
630 spin_unlock_irqrestore(&devdata->priv_lock, flags);
631
632 /* send enable and wait for ack -- don't hold lock when sending enable
633 * because if the queue is full, insert might sleep.
634 */
946b2546 635 napi_enable(&devdata->napi);
68905a14
DK
636 send_enbdis(netdev, 1, devdata);
637
638 spin_lock_irqsave(&devdata->priv_lock, flags);
0c677e9c 639 while ((timeout == VISORNIC_INFINITE_RSP_WAIT) ||
68905a14
DK
640 (wait < timeout)) {
641 if (devdata->enab_dis_acked)
642 break;
643 if (devdata->server_down || devdata->server_change_state) {
644 spin_unlock_irqrestore(&devdata->priv_lock, flags);
00748b0c
TS
645 dev_dbg(&netdev->dev, "%s server went away\n",
646 __func__);
68905a14
DK
647 return -EIO;
648 }
649 set_current_state(TASK_INTERRUPTIBLE);
650 spin_unlock_irqrestore(&devdata->priv_lock, flags);
651 wait += schedule_timeout(msecs_to_jiffies(10));
652 spin_lock_irqsave(&devdata->priv_lock, flags);
653 }
654
655 spin_unlock_irqrestore(&devdata->priv_lock, flags);
656
00748b0c
TS
657 if (!devdata->enab_dis_acked) {
658 dev_err(&netdev->dev, "%s missing ACK\n", __func__);
68905a14 659 return -EIO;
00748b0c 660 }
68905a14 661
35a8dd31 662 netif_start_queue(netdev);
946b2546 663
68905a14
DK
664 return 0;
665}
666
667/**
668 * visornic_timeout_reset - handle xmit timeout resets
669 * @work work item that scheduled the work
670 *
671 * Transmit Timeouts are typically handled by resetting the
672 * device for our virtual NIC we will send a Disable and Enable
673 * to the IOVM. If it doesn't respond we will trigger a serverdown.
674 */
675static void
676visornic_timeout_reset(struct work_struct *work)
677{
678 struct visornic_devdata *devdata;
679 struct net_device *netdev;
680 int response = 0;
681
682 devdata = container_of(work, struct visornic_devdata, timeout_reset);
683 netdev = devdata->netdev;
684
4d79002e
TS
685 rtnl_lock();
686 if (!netif_running(netdev)) {
687 rtnl_unlock();
688 return;
689 }
690
0c677e9c
NH
691 response = visornic_disable_with_timeout(netdev,
692 VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
693 if (response)
694 goto call_serverdown;
695
0c677e9c
NH
696 response = visornic_enable_with_timeout(netdev,
697 VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
698 if (response)
699 goto call_serverdown;
68905a14 700
4d79002e
TS
701 rtnl_unlock();
702
68905a14
DK
703 return;
704
705call_serverdown:
d01da5ea 706 visornic_serverdown(devdata, NULL);
4d79002e 707 rtnl_unlock();
68905a14
DK
708}
709
710/**
711 * visornic_open - Enable the visornic device and mark the queue started
712 * @netdev: netdevice to start
713 *
714 * Enable the device and start the transmit queue.
715 * Return 0 for success
716 */
717static int
718visornic_open(struct net_device *netdev)
719{
0c677e9c 720 visornic_enable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
68905a14 721
68905a14
DK
722 return 0;
723}
724
725/**
726 * visornic_close - Disables the visornic device and stops the queues
727 * @netdev: netdevice to start
728 *
729 * Disable the device and stop the transmit queue.
730 * Return 0 for success
731 */
732static int
733visornic_close(struct net_device *netdev)
734{
0c677e9c 735 visornic_disable_with_timeout(netdev, VISORNIC_INFINITE_RSP_WAIT);
68905a14
DK
736
737 return 0;
738}
739
36927c18
TS
740/**
741 * devdata_xmits_outstanding - compute outstanding xmits
742 * @devdata: visornic_devdata for device
743 *
744 * Return value is the number of outstanding xmits.
745 */
746static unsigned long devdata_xmits_outstanding(struct visornic_devdata *devdata)
747{
748 if (devdata->chstat.sent_xmit >= devdata->chstat.got_xmit_done)
749 return devdata->chstat.sent_xmit -
750 devdata->chstat.got_xmit_done;
6e1edc0f
BS
751 return (ULONG_MAX - devdata->chstat.got_xmit_done
752 + devdata->chstat.sent_xmit + 1);
36927c18
TS
753}
754
755/**
756 * vnic_hit_high_watermark
757 * @devdata: indicates visornic device we are checking
758 * @high_watermark: max num of unacked xmits we will tolerate,
759 * before we will start throttling
760 *
761 * Returns true iff the number of unacked xmits sent to
762 * the IO partition is >= high_watermark.
763 */
764static inline bool vnic_hit_high_watermark(struct visornic_devdata *devdata,
765 ulong high_watermark)
766{
767 return (devdata_xmits_outstanding(devdata) >= high_watermark);
768}
769
770/**
771 * vnic_hit_low_watermark
772 * @devdata: indicates visornic device we are checking
773 * @low_watermark: we will wait until the num of unacked xmits
774 * drops to this value or lower before we start
775 * transmitting again
776 *
777 * Returns true iff the number of unacked xmits sent to
778 * the IO partition is <= low_watermark.
779 */
780static inline bool vnic_hit_low_watermark(struct visornic_devdata *devdata,
781 ulong low_watermark)
782{
783 return (devdata_xmits_outstanding(devdata) <= low_watermark);
784}
785
68905a14
DK
786/**
787 * visornic_xmit - send a packet to the IO Partition
788 * @skb: Packet to be sent
789 * @netdev: net device the packet is being sent from
790 *
791 * Convert the skb to a cmdrsp so the IO Partition can undersand it.
792 * Send the XMIT command to the IO Partition for processing. This
793 * function is protected from concurrent calls by a spinlock xmit_lock
794 * in the net_device struct, but as soon as the function returns it
795 * can be called again.
f6346ad6 796 * Returns NETDEV_TX_OK.
68905a14
DK
797 */
798static int
799visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
800{
801 struct visornic_devdata *devdata;
802 int len, firstfraglen, padlen;
803 struct uiscmdrsp *cmdrsp = NULL;
804 unsigned long flags;
805
806 devdata = netdev_priv(netdev);
807 spin_lock_irqsave(&devdata->priv_lock, flags);
808
809 if (netif_queue_stopped(netdev) || devdata->server_down ||
810 devdata->server_change_state) {
811 spin_unlock_irqrestore(&devdata->priv_lock, flags);
812 devdata->busy_cnt++;
00748b0c
TS
813 dev_dbg(&netdev->dev,
814 "%s busy - queue stopped\n", __func__);
f6346ad6
NH
815 kfree_skb(skb);
816 return NETDEV_TX_OK;
68905a14
DK
817 }
818
819 /* sk_buff struct is used to host network data throughout all the
820 * linux network subsystems
821 */
822 len = skb->len;
823
824 /* skb->len is the FULL length of data (including fragmentary portion)
825 * skb->data_len is the length of the fragment portion in frags
826 * skb->len - skb->data_len is size of the 1st fragment in skb->data
827 * calculate the length of the first fragment that skb->data is
828 * pointing to
829 */
830 firstfraglen = skb->len - skb->data_len;
831 if (firstfraglen < ETH_HEADER_SIZE) {
832 spin_unlock_irqrestore(&devdata->priv_lock, flags);
833 devdata->busy_cnt++;
00748b0c
TS
834 dev_err(&netdev->dev,
835 "%s busy - first frag too small (%d)\n",
836 __func__, firstfraglen);
f6346ad6
NH
837 kfree_skb(skb);
838 return NETDEV_TX_OK;
68905a14
DK
839 }
840
841 if ((len < ETH_MIN_PACKET_SIZE) &&
842 ((skb_end_pointer(skb) - skb->data) >= ETH_MIN_PACKET_SIZE)) {
843 /* pad the packet out to minimum size */
844 padlen = ETH_MIN_PACKET_SIZE - len;
845 memset(&skb->data[len], 0, padlen);
846 skb->tail += padlen;
847 skb->len += padlen;
848 len += padlen;
849 firstfraglen += padlen;
850 }
851
852 cmdrsp = devdata->xmit_cmdrsp;
853 /* clear cmdrsp */
854 memset(cmdrsp, 0, SIZEOF_CMDRSP);
855 cmdrsp->net.type = NET_XMIT;
856 cmdrsp->cmdtype = CMD_NET_TYPE;
857
858 /* save the pointer to skb -- we'll need it for completion */
859 cmdrsp->net.buf = skb;
860
36927c18
TS
861 if (vnic_hit_high_watermark(devdata,
862 devdata->max_outstanding_net_xmits)) {
77c9a4ae 863 /* extra NET_XMITs queued over to IOVM - need to wait */
68905a14
DK
864 devdata->chstat.reject_count++;
865 if (!devdata->queuefullmsg_logged &&
866 ((devdata->chstat.reject_count & 0x3ff) == 1))
867 devdata->queuefullmsg_logged = 1;
868 netif_stop_queue(netdev);
869 spin_unlock_irqrestore(&devdata->priv_lock, flags);
870 devdata->busy_cnt++;
00748b0c
TS
871 dev_dbg(&netdev->dev,
872 "%s busy - waiting for iovm to catch up\n",
873 __func__);
f6346ad6
NH
874 kfree_skb(skb);
875 return NETDEV_TX_OK;
68905a14
DK
876 }
877 if (devdata->queuefullmsg_logged)
878 devdata->queuefullmsg_logged = 0;
879
880 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
881 cmdrsp->net.xmt.lincsum.valid = 1;
882 cmdrsp->net.xmt.lincsum.protocol = skb->protocol;
883 if (skb_transport_header(skb) > skb->data) {
884 cmdrsp->net.xmt.lincsum.hrawoff =
885 skb_transport_header(skb) - skb->data;
886 cmdrsp->net.xmt.lincsum.hrawoff = 1;
887 }
888 if (skb_network_header(skb) > skb->data) {
889 cmdrsp->net.xmt.lincsum.nhrawoff =
890 skb_network_header(skb) - skb->data;
891 cmdrsp->net.xmt.lincsum.nhrawoffv = 1;
892 }
893 cmdrsp->net.xmt.lincsum.csum = skb->csum;
894 } else {
895 cmdrsp->net.xmt.lincsum.valid = 0;
896 }
897
898 /* save off the length of the entire data packet */
899 cmdrsp->net.xmt.len = len;
900
901 /* copy ethernet header from first frag into ocmdrsp
902 * - everything else will be pass in frags & DMA'ed
903 */
904 memcpy(cmdrsp->net.xmt.ethhdr, skb->data, ETH_HEADER_SIZE);
905 /* copy frags info - from skb->data we need to only provide access
906 * beyond eth header
907 */
908 cmdrsp->net.xmt.num_frags =
909 visor_copy_fragsinfo_from_skb(skb, firstfraglen,
910 MAX_PHYS_INFO,
911 cmdrsp->net.xmt.frags);
ce657aa8 912 if (cmdrsp->net.xmt.num_frags < 0) {
68905a14
DK
913 spin_unlock_irqrestore(&devdata->priv_lock, flags);
914 devdata->busy_cnt++;
00748b0c
TS
915 dev_err(&netdev->dev,
916 "%s busy - copy frags failed\n", __func__);
f6346ad6
NH
917 kfree_skb(skb);
918 return NETDEV_TX_OK;
68905a14
DK
919 }
920
921 if (!visorchannel_signalinsert(devdata->dev->visorchannel,
922 IOCHAN_TO_IOPART, cmdrsp)) {
923 netif_stop_queue(netdev);
924 spin_unlock_irqrestore(&devdata->priv_lock, flags);
925 devdata->busy_cnt++;
00748b0c
TS
926 dev_dbg(&netdev->dev,
927 "%s busy - signalinsert failed\n", __func__);
f6346ad6
NH
928 kfree_skb(skb);
929 return NETDEV_TX_OK;
68905a14
DK
930 }
931
932 /* Track the skbs that have been sent to the IOVM for XMIT */
933 skb_queue_head(&devdata->xmitbufhead, skb);
934
68905a14
DK
935 /* update xmt stats */
936 devdata->net_stats.tx_packets++;
937 devdata->net_stats.tx_bytes += skb->len;
938 devdata->chstat.sent_xmit++;
939
77c9a4ae 940 /* check if we have hit the high watermark for netif_stop_queue() */
36927c18
TS
941 if (vnic_hit_high_watermark(devdata,
942 devdata->upper_threshold_net_xmits)) {
77c9a4ae
EA
943 /* extra NET_XMITs queued over to IOVM - need to wait */
944 /* stop queue - call netif_wake_queue() after lower threshold */
945 netif_stop_queue(netdev);
00748b0c
TS
946 dev_dbg(&netdev->dev,
947 "%s busy - invoking iovm flow control\n",
948 __func__);
68905a14
DK
949 devdata->flow_control_upper_hits++;
950 }
951 spin_unlock_irqrestore(&devdata->priv_lock, flags);
952
953 /* skb will be freed when we get back NET_XMIT_DONE */
954 return NETDEV_TX_OK;
955}
956
957/**
958 * visornic_get_stats - returns net_stats of the visornic device
959 * @netdev: netdevice
960 *
961 * Returns the net_device_stats for the device
962 */
963static struct net_device_stats *
964visornic_get_stats(struct net_device *netdev)
965{
966 struct visornic_devdata *devdata = netdev_priv(netdev);
967
968 return &devdata->net_stats;
969}
970
68905a14
DK
971/**
972 * visornic_change_mtu - changes mtu of device.
973 * @netdev: netdevice
974 * @new_mtu: value of new mtu
975 *
976 * MTU cannot be changed by system, must be changed via
977 * CONTROLVM message. All vnics and pnics in a switch have
978 * to have the same MTU for everything to work.
979 * Currently not supported.
980 * Returns EINVAL
981 */
982static int
983visornic_change_mtu(struct net_device *netdev, int new_mtu)
984{
985 return -EINVAL;
986}
987
988/**
989 * visornic_set_multi - changes mtu of device.
990 * @netdev: netdevice
991 *
992 * Only flag we support currently is IFF_PROMISC
993 * Returns void
994 */
995static void
996visornic_set_multi(struct net_device *netdev)
997{
998 struct uiscmdrsp *cmdrsp;
999 struct visornic_devdata *devdata = netdev_priv(netdev);
1000
6d8c96cb
DB
1001 if (devdata->old_flags == netdev->flags)
1002 return;
1003
1004 if ((netdev->flags & IFF_PROMISC) ==
1005 (devdata->old_flags & IFF_PROMISC))
1006 goto out_save_flags;
1007
1008 cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1009 if (!cmdrsp)
1010 return;
1011 cmdrsp->cmdtype = CMD_NET_TYPE;
1012 cmdrsp->net.type = NET_RCV_PROMISC;
1013 cmdrsp->net.enbdis.context = netdev;
1014 cmdrsp->net.enbdis.enable =
1015 netdev->flags & IFF_PROMISC;
1016 visorchannel_signalinsert(devdata->dev->visorchannel,
1017 IOCHAN_TO_IOPART,
1018 cmdrsp);
1019 kfree(cmdrsp);
1020
1021out_save_flags:
1022 devdata->old_flags = netdev->flags;
68905a14
DK
1023}
1024
1025/**
1026 * visornic_xmit_timeout - request to timeout the xmit
1027 * @netdev
1028 *
1029 * Queue the work and return. Make sure we have not already
1030 * been informed the IO Partition is gone, if it is gone
1031 * we will already timeout the xmits.
1032 */
1033static void
1034visornic_xmit_timeout(struct net_device *netdev)
1035{
1036 struct visornic_devdata *devdata = netdev_priv(netdev);
1037 unsigned long flags;
1038
1039 spin_lock_irqsave(&devdata->priv_lock, flags);
46df8226
TS
1040 if (devdata->going_away) {
1041 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1042 dev_dbg(&devdata->dev->device,
1043 "%s aborting because device removal pending\n",
1044 __func__);
1045 return;
1046 }
1047
68905a14
DK
1048 /* Ensure that a ServerDown message hasn't been received */
1049 if (!devdata->enabled ||
1050 (devdata->server_down && !devdata->server_change_state)) {
00748b0c
TS
1051 dev_dbg(&netdev->dev, "%s no processing\n",
1052 __func__);
68905a14
DK
1053 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1054 return;
1055 }
ce388d7e 1056 schedule_work(&devdata->timeout_reset);
46df8226 1057 spin_unlock_irqrestore(&devdata->priv_lock, flags);
68905a14
DK
1058}
1059
1060/**
1061 * repost_return - repost rcv bufs that have come back
1062 * @cmdrsp: io channel command struct to post
1063 * @devdata: visornic devdata for the device
1064 * @skb: skb
1065 * @netdev: netdevice
1066 *
1067 * Repost rcv buffers that have been returned to us when
1068 * we are finished with them.
1069 * Returns 0 for success, -1 for error.
1070 */
1071static inline int
1072repost_return(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
1073 struct sk_buff *skb, struct net_device *netdev)
1074{
1075 struct net_pkt_rcv copy;
1076 int i = 0, cc, numreposted;
1077 int found_skb = 0;
1078 int status = 0;
1079
1080 copy = cmdrsp->net.rcv;
1081 switch (copy.numrcvbufs) {
1082 case 0:
1083 devdata->n_rcv0++;
1084 break;
1085 case 1:
1086 devdata->n_rcv1++;
1087 break;
1088 case 2:
1089 devdata->n_rcv2++;
1090 break;
1091 default:
1092 devdata->n_rcvx++;
1093 break;
1094 }
1095 for (cc = 0, numreposted = 0; cc < copy.numrcvbufs; cc++) {
1096 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1097 if (devdata->rcvbuf[i] != copy.rcvbuf[cc])
1098 continue;
1099
1100 if ((skb) && devdata->rcvbuf[i] == skb) {
1101 devdata->found_repost_rcvbuf_cnt++;
1102 found_skb = 1;
1103 devdata->repost_found_skb_cnt++;
1104 }
1105 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1106 if (!devdata->rcvbuf[i]) {
1107 devdata->num_rcv_bufs_could_not_alloc++;
1108 devdata->alloc_failed_in_repost_rtn_cnt++;
1109 status = -ENOMEM;
1110 break;
1111 }
1112 post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1113 numreposted++;
1114 break;
1115 }
1116 }
1117 if (numreposted != copy.numrcvbufs) {
1118 devdata->n_repost_deficit++;
1119 status = -EINVAL;
1120 }
1121 if (skb) {
1122 if (found_skb) {
1123 kfree_skb(skb);
1124 } else {
1125 status = -EINVAL;
1126 devdata->bad_rcv_buf++;
1127 }
1128 }
68905a14
DK
1129 return status;
1130}
1131
1132/**
1133 * visornic_rx - Handle receive packets coming back from IO Part
1134 * @cmdrsp: Receive packet returned from IO Part
1135 *
1136 * Got a receive packet back from the IO Part, handle it and send
1137 * it up the stack.
73e81350 1138 * Returns 1 iff an skb was receieved, otherwise 0
68905a14 1139 */
946b2546 1140static int
68905a14
DK
1141visornic_rx(struct uiscmdrsp *cmdrsp)
1142{
1143 struct visornic_devdata *devdata;
1144 struct sk_buff *skb, *prev, *curr;
1145 struct net_device *netdev;
946b2546 1146 int cc, currsize, off;
68905a14
DK
1147 struct ethhdr *eth;
1148 unsigned long flags;
68905a14
DK
1149
1150 /* post new rcv buf to the other end using the cmdrsp we have at hand
1151 * post it without holding lock - but we'll use the signal lock to
1152 * synchronize the queue insert the cmdrsp that contains the net.rcv
1153 * is the one we are using to repost, so copy the info we need from it.
1154 */
1155 skb = cmdrsp->net.buf;
1156 netdev = skb->dev;
1157
68905a14
DK
1158 devdata = netdev_priv(netdev);
1159
1160 spin_lock_irqsave(&devdata->priv_lock, flags);
1161 atomic_dec(&devdata->num_rcvbuf_in_iovm);
1162
68905a14
DK
1163 /* set length to how much was ACTUALLY received -
1164 * NOTE: rcv_done_len includes actual length of data rcvd
1165 * including ethhdr
1166 */
1167 skb->len = cmdrsp->net.rcv.rcv_done_len;
1168
f6b6a8ec
DK
1169 /* update rcv stats - call it with priv_lock held */
1170 devdata->net_stats.rx_packets++;
1171 devdata->net_stats.rx_bytes += skb->len;
1172
68905a14
DK
1173 /* test enabled while holding lock */
1174 if (!(devdata->enabled && devdata->enab_dis_acked)) {
1175 /* don't process it unless we're in enable mode and until
1176 * we've gotten an ACK saying the other end got our RCV enable
1177 */
1178 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1179 repost_return(cmdrsp, devdata, skb, netdev);
73e81350 1180 return 0;
68905a14
DK
1181 }
1182
1183 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1184
1185 /* when skb was allocated, skb->dev, skb->data, skb->len and
1186 * skb->data_len were setup. AND, data has already put into the
1187 * skb (both first frag and in frags pages)
1188 * NOTE: firstfragslen is the amount of data in skb->data and that
1189 * which is not in nr_frags or frag_list. This is now simply
1190 * RCVPOST_BUF_SIZE. bump tail to show how much data is in
1191 * firstfrag & set data_len to show rest see if we have to chain
1192 * frag_list.
1193 */
1194 if (skb->len > RCVPOST_BUF_SIZE) { /* do PRECAUTIONARY check */
1195 if (cmdrsp->net.rcv.numrcvbufs < 2) {
1196 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1197 dev_err(&devdata->netdev->dev,
1198 "repost_return failed");
73e81350 1199 return 0;
68905a14
DK
1200 }
1201 /* length rcvd is greater than firstfrag in this skb rcv buf */
1202 skb->tail += RCVPOST_BUF_SIZE; /* amount in skb->data */
1203 skb->data_len = skb->len - RCVPOST_BUF_SIZE; /* amount that
abbceb61
EA
1204 * will be in
1205 * frag_list
1206 */
68905a14
DK
1207 } else {
1208 /* data fits in this skb - no chaining - do
1209 * PRECAUTIONARY check
1210 */
1211 if (cmdrsp->net.rcv.numrcvbufs != 1) { /* should be 1 */
1212 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1213 dev_err(&devdata->netdev->dev,
1214 "repost_return failed");
73e81350 1215 return 0;
68905a14
DK
1216 }
1217 skb->tail += skb->len;
1218 skb->data_len = 0; /* nothing rcvd in frag_list */
1219 }
1220 off = skb_tail_pointer(skb) - skb->data;
1221
1222 /* amount we bumped tail by in the head skb
1223 * it is used to calculate the size of each chained skb below
1224 * it is also used to index into bufline to continue the copy
1225 * (for chansocktwopc)
1226 * if necessary chain the rcv skbs together.
1227 * NOTE: index 0 has the same as cmdrsp->net.rcv.skb; we need to
1228 * chain the rest to that one.
1229 * - do PRECAUTIONARY check
1230 */
1231 if (cmdrsp->net.rcv.rcvbuf[0] != skb) {
1232 if (repost_return(cmdrsp, devdata, skb, netdev) < 0)
1233 dev_err(&devdata->netdev->dev, "repost_return failed");
73e81350 1234 return 0;
68905a14
DK
1235 }
1236
1237 if (cmdrsp->net.rcv.numrcvbufs > 1) {
1238 /* chain the various rcv buffers into the skb's frag_list. */
1239 /* Note: off was initialized above */
1240 for (cc = 1, prev = NULL;
1241 cc < cmdrsp->net.rcv.numrcvbufs; cc++) {
1242 curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc];
1243 curr->next = NULL;
1244 if (!prev) /* start of list- set head */
1245 skb_shinfo(skb)->frag_list = curr;
1246 else
1247 prev->next = curr;
1248 prev = curr;
1249
1250 /* should we set skb->len and skb->data_len for each
1251 * buffer being chained??? can't hurt!
1252 */
1253 currsize = min(skb->len - off,
1254 (unsigned int)RCVPOST_BUF_SIZE);
1255 curr->len = currsize;
1256 curr->tail += currsize;
1257 curr->data_len = 0;
1258 off += currsize;
1259 }
68905a14
DK
1260 /* assert skb->len == off */
1261 if (skb->len != off) {
cb84fca0
TS
1262 netdev_err(devdata->netdev,
1263 "something wrong; skb->len:%d != off:%d\n",
1264 skb->len, off);
68905a14 1265 }
68905a14
DK
1266 }
1267
1268 /* set up packet's protocl type using ethernet header - this
1269 * sets up skb->pkt_type & it also PULLS out the eth header
1270 */
1271 skb->protocol = eth_type_trans(skb, netdev);
1272
1273 eth = eth_hdr(skb);
1274
1275 skb->csum = 0;
1276 skb->ip_summed = CHECKSUM_NONE;
1277
1278 do {
1279 if (netdev->flags & IFF_PROMISC)
1280 break; /* accept all packets */
1281 if (skb->pkt_type == PACKET_BROADCAST) {
1282 if (netdev->flags & IFF_BROADCAST)
1283 break; /* accept all broadcast packets */
1284 } else if (skb->pkt_type == PACKET_MULTICAST) {
1285 if ((netdev->flags & IFF_MULTICAST) &&
1286 (netdev_mc_count(netdev))) {
1287 struct netdev_hw_addr *ha;
1288 int found_mc = 0;
1289
1290 /* only accept multicast packets that we can
1291 * find in our multicast address list
1292 */
1293 netdev_for_each_mc_addr(ha, netdev) {
1294 if (ether_addr_equal(eth->h_dest,
1295 ha->addr)) {
1296 found_mc = 1;
1297 break;
1298 }
1299 }
77c9a4ae 1300 /* accept pkt, dest matches a multicast addr */
68905a14 1301 if (found_mc)
77c9a4ae 1302 break;
68905a14 1303 }
77c9a4ae 1304 /* accept packet, h_dest must match vnic mac address */
68905a14 1305 } else if (skb->pkt_type == PACKET_HOST) {
77c9a4ae 1306 break;
68905a14
DK
1307 } else if (skb->pkt_type == PACKET_OTHERHOST) {
1308 /* something is not right */
1309 dev_err(&devdata->netdev->dev,
1310 "**** FAILED to deliver rcv packet to OS; name:%s Dest:%pM VNIC:%pM\n",
1311 netdev->name, eth->h_dest, netdev->dev_addr);
1312 }
1313 /* drop packet - don't forward it up to OS */
1314 devdata->n_rcv_packets_not_accepted++;
1315 repost_return(cmdrsp, devdata, skb, netdev);
73e81350 1316 return 0;
68905a14
DK
1317 } while (0);
1318
946b2546 1319 netif_receive_skb(skb);
68905a14
DK
1320 /* netif_rx returns various values, but "in practice most drivers
1321 * ignore the return value
1322 */
1323
1324 skb = NULL;
1325 /*
1326 * whether the packet got dropped or handled, the skb is freed by
1327 * kernel code, so we shouldn't free it. but we should repost a
1328 * new rcv buffer.
1329 */
1330 repost_return(cmdrsp, devdata, skb, netdev);
73e81350 1331 return 1;
68905a14
DK
1332}
1333
1334/**
1335 * devdata_initialize - Initialize devdata structure
1336 * @devdata: visornic_devdata structure to initialize
1337 * #dev: visorbus_deviced it belongs to
1338 *
1339 * Setup initial values for the visornic based on channel and default
1340 * values.
e1834bd0 1341 * Returns a pointer to the devdata structure
68905a14
DK
1342 */
1343static struct visornic_devdata *
1344devdata_initialize(struct visornic_devdata *devdata, struct visor_device *dev)
1345{
68905a14 1346 devdata->dev = dev;
91678f37 1347 devdata->incarnation_id = get_jiffies_64();
68905a14
DK
1348 return devdata;
1349}
1350
1351/**
8d0119d8
TS
1352 * devdata_release - Frees up references in devdata
1353 * @devdata: struct to clean up
68905a14 1354 *
8d0119d8 1355 * Frees up references in devdata.
68905a14
DK
1356 * Returns void
1357 */
8d0119d8 1358static void devdata_release(struct visornic_devdata *devdata)
68905a14 1359{
46df8226
TS
1360 kfree(devdata->rcvbuf);
1361 kfree(devdata->cmdrsp_rcv);
1362 kfree(devdata->xmit_cmdrsp);
68905a14
DK
1363}
1364
1365static const struct net_device_ops visornic_dev_ops = {
1366 .ndo_open = visornic_open,
1367 .ndo_stop = visornic_close,
1368 .ndo_start_xmit = visornic_xmit,
1369 .ndo_get_stats = visornic_get_stats,
68905a14
DK
1370 .ndo_change_mtu = visornic_change_mtu,
1371 .ndo_tx_timeout = visornic_xmit_timeout,
1372 .ndo_set_rx_mode = visornic_set_multi,
1373};
1374
52b1660d
NH
1375/* DebugFS code */
1376static ssize_t info_debugfs_read(struct file *file, char __user *buf,
1377 size_t len, loff_t *offset)
1378{
1379 ssize_t bytes_read = 0;
1380 int str_pos = 0;
1381 struct visornic_devdata *devdata;
1382 struct net_device *dev;
1383 char *vbuf;
1384
1385 if (len > MAX_BUF)
1386 len = MAX_BUF;
1387 vbuf = kzalloc(len, GFP_KERNEL);
1388 if (!vbuf)
1389 return -ENOMEM;
1390
77c9a4ae 1391 /* for each vnic channel dump out channel specific data */
52b1660d
NH
1392 rcu_read_lock();
1393 for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
77c9a4ae 1394 /* Only consider netdevs that are visornic, and are open */
52b1660d
NH
1395 if ((dev->netdev_ops != &visornic_dev_ops) ||
1396 (!netif_queue_stopped(dev)))
1397 continue;
1398
1399 devdata = netdev_priv(dev);
1400 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1401 "netdev = %s (0x%p), MAC Addr %pM\n",
1402 dev->name,
1403 dev,
1404 dev->dev_addr);
1405 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1406 "VisorNic Dev Info = 0x%p\n", devdata);
1407 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1408 " num_rcv_bufs = %d\n",
1409 devdata->num_rcv_bufs);
1410 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1411 " max_oustanding_next_xmits = %lu\n",
52b1660d
NH
1412 devdata->max_outstanding_net_xmits);
1413 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1414 " upper_threshold_net_xmits = %lu\n",
52b1660d
NH
1415 devdata->upper_threshold_net_xmits);
1416 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
36927c18 1417 " lower_threshold_net_xmits = %lu\n",
52b1660d
NH
1418 devdata->lower_threshold_net_xmits);
1419 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1420 " queuefullmsg_logged = %d\n",
1421 devdata->queuefullmsg_logged);
1422 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1423 " chstat.got_rcv = %lu\n",
1424 devdata->chstat.got_rcv);
1425 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1426 " chstat.got_enbdisack = %lu\n",
1427 devdata->chstat.got_enbdisack);
1428 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1429 " chstat.got_xmit_done = %lu\n",
1430 devdata->chstat.got_xmit_done);
1431 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1432 " chstat.xmit_fail = %lu\n",
1433 devdata->chstat.xmit_fail);
1434 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1435 " chstat.sent_enbdis = %lu\n",
1436 devdata->chstat.sent_enbdis);
1437 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1438 " chstat.sent_promisc = %lu\n",
1439 devdata->chstat.sent_promisc);
1440 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1441 " chstat.sent_post = %lu\n",
1442 devdata->chstat.sent_post);
81d275c6
TS
1443 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1444 " chstat.sent_post_failed = %lu\n",
1445 devdata->chstat.sent_post_failed);
52b1660d
NH
1446 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1447 " chstat.sent_xmit = %lu\n",
1448 devdata->chstat.sent_xmit);
1449 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1450 " chstat.reject_count = %lu\n",
1451 devdata->chstat.reject_count);
1452 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1453 " chstat.extra_rcvbufs_sent = %lu\n",
1454 devdata->chstat.extra_rcvbufs_sent);
1455 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1456 " n_rcv0 = %lu\n", devdata->n_rcv0);
1457 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1458 " n_rcv1 = %lu\n", devdata->n_rcv1);
1459 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1460 " n_rcv2 = %lu\n", devdata->n_rcv2);
1461 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1462 " n_rcvx = %lu\n", devdata->n_rcvx);
1463 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1464 " num_rcvbuf_in_iovm = %d\n",
1465 atomic_read(&devdata->num_rcvbuf_in_iovm));
1466 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1467 " alloc_failed_in_if_needed_cnt = %lu\n",
1468 devdata->alloc_failed_in_if_needed_cnt);
1469 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1470 " alloc_failed_in_repost_rtn_cnt = %lu\n",
1471 devdata->alloc_failed_in_repost_rtn_cnt);
1472 /* str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1473 * " inner_loop_limit_reached_cnt = %lu\n",
1474 * devdata->inner_loop_limit_reached_cnt);
1475 */
1476 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1477 " found_repost_rcvbuf_cnt = %lu\n",
1478 devdata->found_repost_rcvbuf_cnt);
1479 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1480 " repost_found_skb_cnt = %lu\n",
1481 devdata->repost_found_skb_cnt);
1482 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1483 " n_repost_deficit = %lu\n",
1484 devdata->n_repost_deficit);
1485 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1486 " bad_rcv_buf = %lu\n",
1487 devdata->bad_rcv_buf);
1488 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1489 " n_rcv_packets_not_accepted = %lu\n",
1490 devdata->n_rcv_packets_not_accepted);
1491 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1492 " interrupts_rcvd = %llu\n",
1493 devdata->interrupts_rcvd);
1494 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1495 " interrupts_notme = %llu\n",
1496 devdata->interrupts_notme);
1497 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1498 " interrupts_disabled = %llu\n",
1499 devdata->interrupts_disabled);
1500 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1501 " busy_cnt = %llu\n",
1502 devdata->busy_cnt);
1503 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1504 " flow_control_upper_hits = %llu\n",
1505 devdata->flow_control_upper_hits);
1506 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1507 " flow_control_lower_hits = %llu\n",
1508 devdata->flow_control_lower_hits);
52b1660d
NH
1509 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1510 " netif_queue = %s\n",
1511 netif_queue_stopped(devdata->netdev) ?
1512 "stopped" : "running");
36927c18
TS
1513 str_pos += scnprintf(vbuf + str_pos, len - str_pos,
1514 " xmits_outstanding = %lu\n",
1515 devdata_xmits_outstanding(devdata));
52b1660d
NH
1516 }
1517 rcu_read_unlock();
1518 bytes_read = simple_read_from_buffer(buf, len, offset, vbuf, str_pos);
1519 kfree(vbuf);
1520 return bytes_read;
1521}
1522
68905a14
DK
1523/**
1524 * send_rcv_posts_if_needed
1525 * @devdata: visornic device
1526 *
1527 * Send receive buffers to the IO Partition.
1528 * Returns void
1529 */
1530static void
1531send_rcv_posts_if_needed(struct visornic_devdata *devdata)
1532{
1533 int i;
1534 struct net_device *netdev;
1535 struct uiscmdrsp *cmdrsp = devdata->cmdrsp_rcv;
1536 int cur_num_rcv_bufs_to_alloc, rcv_bufs_allocated;
1537
1538 /* don't do this until vnic is marked ready */
1539 if (!(devdata->enabled && devdata->enab_dis_acked))
1540 return;
1541
1542 netdev = devdata->netdev;
1543 rcv_bufs_allocated = 0;
1544 /* this code is trying to prevent getting stuck here forever,
1545 * but still retry it if you cant allocate them all this time.
1546 */
1547 cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
1548 while (cur_num_rcv_bufs_to_alloc > 0) {
1549 cur_num_rcv_bufs_to_alloc--;
1550 for (i = 0; i < devdata->num_rcv_bufs; i++) {
1551 if (devdata->rcvbuf[i])
1552 continue;
1553 devdata->rcvbuf[i] = alloc_rcv_buf(netdev);
1554 if (!devdata->rcvbuf[i]) {
1555 devdata->alloc_failed_in_if_needed_cnt++;
1556 break;
1557 }
1558 rcv_bufs_allocated++;
1559 post_skb(cmdrsp, devdata, devdata->rcvbuf[i]);
1560 devdata->chstat.extra_rcvbufs_sent++;
1561 }
1562 }
1563 devdata->num_rcv_bufs_could_not_alloc -= rcv_bufs_allocated;
1564}
1565
1566/**
91678f37
TS
1567 * drain_resp_queue - drains and ignores all messages from the resp queue
1568 * @cmdrsp: io channel command response message
1569 * @devdata: visornic device to drain
1570 */
1571static void
1572drain_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata)
1573{
1574 while (visorchannel_signalremove(devdata->dev->visorchannel,
1575 IOCHAN_FROM_IOPART,
1576 cmdrsp))
1577 ;
1578}
1579
1580/**
1581 * service_resp_queue - drains the response queue
68905a14
DK
1582 * @cmdrsp: io channel command response message
1583 * @devdata: visornic device to drain
1584 *
1585 * Drain the respones queue of any responses from the IO partition.
1586 * Process the responses as we get them.
ab301265 1587 * Returns when response queue is empty or when the thread stops.
68905a14
DK
1588 */
1589static void
946b2546 1590service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
61dd330a 1591 int *rx_work_done, int budget)
68905a14
DK
1592{
1593 unsigned long flags;
1594 struct net_device *netdev;
1595
61dd330a 1596 while (*rx_work_done < budget) {
abbceb61
EA
1597 /* TODO: CLIENT ACQUIRE -- Don't really need this at the
1598 * moment
1599 */
7c03621a
DK
1600 if (!visorchannel_signalremove(devdata->dev->visorchannel,
1601 IOCHAN_FROM_IOPART,
1602 cmdrsp))
1603 break; /* queue empty */
1604
1605 switch (cmdrsp->net.type) {
1606 case NET_RCV:
1607 devdata->chstat.got_rcv++;
1608 /* process incoming packet */
946b2546 1609 *rx_work_done += visornic_rx(cmdrsp);
7c03621a
DK
1610 break;
1611 case NET_XMIT_DONE:
1612 spin_lock_irqsave(&devdata->priv_lock, flags);
1613 devdata->chstat.got_xmit_done++;
1614 if (cmdrsp->net.xmtdone.xmt_done_result)
1615 devdata->chstat.xmit_fail++;
1616 /* only call queue wake if we stopped it */
1617 netdev = ((struct sk_buff *)cmdrsp->net.buf)->dev;
1618 /* ASSERT netdev == vnicinfo->netdev; */
1619 if ((netdev == devdata->netdev) &&
1620 netif_queue_stopped(netdev)) {
77c9a4ae
EA
1621 /* check if we have crossed the lower watermark
1622 * for netif_wake_queue()
68905a14 1623 */
dc38082f
TS
1624 if (vnic_hit_low_watermark
1625 (devdata,
1626 devdata->lower_threshold_net_xmits)) {
7c03621a
DK
1627 /* enough NET_XMITs completed
1628 * so can restart netif queue
1629 */
1630 netif_wake_queue(netdev);
1631 devdata->flow_control_lower_hits++;
1632 }
68905a14 1633 }
7c03621a
DK
1634 skb_unlink(cmdrsp->net.buf, &devdata->xmitbufhead);
1635 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1636 kfree_skb(cmdrsp->net.buf);
68905a14 1637 break;
7c03621a
DK
1638 case NET_RCV_ENBDIS_ACK:
1639 devdata->chstat.got_enbdisack++;
1640 netdev = (struct net_device *)
1641 cmdrsp->net.enbdis.context;
87a9404e 1642 spin_lock_irqsave(&devdata->priv_lock, flags);
7c03621a
DK
1643 devdata->enab_dis_acked = 1;
1644 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1645
7c03621a
DK
1646 if (devdata->server_down &&
1647 devdata->server_change_state) {
1648 /* Inform Linux that the link is up */
1649 devdata->server_down = false;
1650 devdata->server_change_state = false;
1651 netif_wake_queue(netdev);
1652 netif_carrier_on(netdev);
1653 }
1654 break;
1655 case NET_CONNECT_STATUS:
1656 netdev = devdata->netdev;
1657 if (cmdrsp->net.enbdis.enable == 1) {
1658 spin_lock_irqsave(&devdata->priv_lock, flags);
1659 devdata->enabled = cmdrsp->net.enbdis.enable;
1660 spin_unlock_irqrestore(&devdata->priv_lock,
1661 flags);
1662 netif_wake_queue(netdev);
1663 netif_carrier_on(netdev);
1664 } else {
1665 netif_stop_queue(netdev);
1666 netif_carrier_off(netdev);
1667 spin_lock_irqsave(&devdata->priv_lock, flags);
1668 devdata->enabled = cmdrsp->net.enbdis.enable;
1669 spin_unlock_irqrestore(&devdata->priv_lock,
1670 flags);
1671 }
1672 break;
1673 default:
1674 break;
87a9404e 1675 }
7c03621a 1676 /* cmdrsp is now available for reuse */
68905a14
DK
1677 }
1678}
1679
946b2546
NH
1680static int visornic_poll(struct napi_struct *napi, int budget)
1681{
1682 struct visornic_devdata *devdata = container_of(napi,
1683 struct visornic_devdata,
1684 napi);
1685 int rx_count = 0;
1686
1687 send_rcv_posts_if_needed(devdata);
61dd330a 1688 service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
946b2546 1689
77c9a4ae 1690 /* If there aren't any more packets to receive stop the poll */
946b2546
NH
1691 if (rx_count < budget)
1692 napi_complete(napi);
1693
1694 return rx_count;
1695}
1696
68905a14 1697/**
946b2546 1698 * poll_for_irq - Checks the status of the response queue.
68905a14
DK
1699 * @v: void pointer to the visronic devdata
1700 *
1701 * Main function of the vnic_incoming thread. Peridocially check the
1702 * response queue and drain it if needed.
1703 * Returns when thread has stopped.
1704 */
946b2546
NH
1705static void
1706poll_for_irq(unsigned long v)
68905a14 1707{
946b2546 1708 struct visornic_devdata *devdata = (struct visornic_devdata *)v;
68905a14 1709
946b2546
NH
1710 if (!visorchannel_signalempty(
1711 devdata->dev->visorchannel,
1712 IOCHAN_FROM_IOPART))
1713 napi_schedule(&devdata->napi);
68905a14 1714
946b2546 1715 atomic_set(&devdata->interrupt_rcvd, 0);
68905a14 1716
946b2546 1717 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
68905a14
DK
1718}
1719
1720/**
1721 * visornic_probe - probe function for visornic devices
1722 * @dev: The visor device discovered
1723 *
1724 * Called when visorbus discovers a visornic device on its
1725 * bus. It creates a new visornic ethernet adapter.
1726 * Returns 0 or negative for error.
1727 */
1728static int visornic_probe(struct visor_device *dev)
1729{
1730 struct visornic_devdata *devdata = NULL;
1731 struct net_device *netdev = NULL;
1732 int err;
1733 int channel_offset = 0;
1734 u64 features;
1735
1736 netdev = alloc_etherdev(sizeof(struct visornic_devdata));
00748b0c
TS
1737 if (!netdev) {
1738 dev_err(&dev->device,
1739 "%s alloc_etherdev failed\n", __func__);
68905a14 1740 return -ENOMEM;
00748b0c 1741 }
68905a14
DK
1742
1743 netdev->netdev_ops = &visornic_dev_ops;
90cb147f 1744 netdev->watchdog_timeo = 5 * HZ;
051e9fbb 1745 SET_NETDEV_DEV(netdev, &dev->device);
68905a14
DK
1746
1747 /* Get MAC adddress from channel and read it into the device. */
1748 netdev->addr_len = ETH_ALEN;
1749 channel_offset = offsetof(struct spar_io_channel_protocol,
1750 vnic.macaddr);
1751 err = visorbus_read_channel(dev, channel_offset, netdev->dev_addr,
1752 ETH_ALEN);
00748b0c
TS
1753 if (err < 0) {
1754 dev_err(&dev->device,
1755 "%s failed to get mac addr from chan (%d)\n",
1756 __func__, err);
68905a14 1757 goto cleanup_netdev;
00748b0c 1758 }
68905a14
DK
1759
1760 devdata = devdata_initialize(netdev_priv(netdev), dev);
1761 if (!devdata) {
00748b0c
TS
1762 dev_err(&dev->device,
1763 "%s devdata_initialize failed\n", __func__);
68905a14
DK
1764 err = -ENOMEM;
1765 goto cleanup_netdev;
1766 }
91678f37
TS
1767 /* don't trust messages laying around in the channel */
1768 drain_resp_queue(devdata->cmdrsp, devdata);
68905a14
DK
1769
1770 devdata->netdev = netdev;
5deeea33 1771 dev_set_drvdata(&dev->device, devdata);
68905a14
DK
1772 init_waitqueue_head(&devdata->rsp_queue);
1773 spin_lock_init(&devdata->priv_lock);
1774 devdata->enabled = 0; /* not yet */
1775 atomic_set(&devdata->usage, 1);
1776
1777 /* Setup rcv bufs */
1778 channel_offset = offsetof(struct spar_io_channel_protocol,
1779 vnic.num_rcv_bufs);
1780 err = visorbus_read_channel(dev, channel_offset,
1781 &devdata->num_rcv_bufs, 4);
00748b0c
TS
1782 if (err) {
1783 dev_err(&dev->device,
1784 "%s failed to get #rcv bufs from chan (%d)\n",
1785 __func__, err);
68905a14 1786 goto cleanup_netdev;
00748b0c 1787 }
68905a14 1788
5e757bc5
SB
1789 devdata->rcvbuf = kcalloc(devdata->num_rcv_bufs,
1790 sizeof(struct sk_buff *), GFP_KERNEL);
68905a14
DK
1791 if (!devdata->rcvbuf) {
1792 err = -ENOMEM;
d12324e3 1793 goto cleanup_netdev;
68905a14
DK
1794 }
1795
1796 /* set the net_xmit outstanding threshold */
1797 /* always leave two slots open but you should have 3 at a minimum */
36927c18 1798 /* note that max_outstanding_net_xmits must be > 0 */
68905a14 1799 devdata->max_outstanding_net_xmits =
36927c18 1800 max_t(unsigned long, 3, ((devdata->num_rcv_bufs / 3) - 2));
68905a14 1801 devdata->upper_threshold_net_xmits =
36927c18
TS
1802 max_t(unsigned long,
1803 2, (devdata->max_outstanding_net_xmits - 1));
68905a14 1804 devdata->lower_threshold_net_xmits =
36927c18
TS
1805 max_t(unsigned long,
1806 1, (devdata->max_outstanding_net_xmits / 2));
68905a14
DK
1807
1808 skb_queue_head_init(&devdata->xmitbufhead);
1809
1810 /* create a cmdrsp we can use to post and unpost rcv buffers */
1811 devdata->cmdrsp_rcv = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1812 if (!devdata->cmdrsp_rcv) {
1813 err = -ENOMEM;
d12324e3 1814 goto cleanup_rcvbuf;
68905a14
DK
1815 }
1816 devdata->xmit_cmdrsp = kmalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
1817 if (!devdata->xmit_cmdrsp) {
1818 err = -ENOMEM;
d12324e3 1819 goto cleanup_cmdrsp_rcv;
68905a14 1820 }
68905a14
DK
1821 INIT_WORK(&devdata->timeout_reset, visornic_timeout_reset);
1822 devdata->server_down = false;
1823 devdata->server_change_state = false;
1824
1825 /*set the default mtu */
1826 channel_offset = offsetof(struct spar_io_channel_protocol,
1827 vnic.mtu);
1828 err = visorbus_read_channel(dev, channel_offset, &netdev->mtu, 4);
00748b0c
TS
1829 if (err) {
1830 dev_err(&dev->device,
1831 "%s failed to get mtu from chan (%d)\n",
1832 __func__, err);
68905a14 1833 goto cleanup_xmit_cmdrsp;
00748b0c 1834 }
68905a14
DK
1835
1836 /* TODO: Setup Interrupt information */
1837 /* Let's start our threads to get responses */
946b2546
NH
1838 netif_napi_add(netdev, &devdata->napi, visornic_poll, 64);
1839
1840 setup_timer(&devdata->irq_poll_timer, poll_for_irq,
1841 (unsigned long)devdata);
77c9a4ae 1842 /* Note: This time has to start running before the while
946b2546
NH
1843 * loop below because the napi routine is responsible for
1844 * setting enab_dis_acked
1845 */
1846 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
1847
68905a14
DK
1848 channel_offset = offsetof(struct spar_io_channel_protocol,
1849 channel_header.features);
1850 err = visorbus_read_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1851 if (err) {
1852 dev_err(&dev->device,
1853 "%s failed to get features from chan (%d)\n",
1854 __func__, err);
946b2546 1855 goto cleanup_napi_add;
00748b0c 1856 }
68905a14
DK
1857
1858 features |= ULTRA_IO_CHANNEL_IS_POLLING;
91678f37 1859 features |= ULTRA_IO_DRIVER_SUPPORTS_ENHANCED_RCVBUF_CHECKING;
68905a14 1860 err = visorbus_write_channel(dev, channel_offset, &features, 8);
00748b0c
TS
1861 if (err) {
1862 dev_err(&dev->device,
1863 "%s failed to set features in chan (%d)\n",
1864 __func__, err);
946b2546 1865 goto cleanup_napi_add;
00748b0c 1866 }
68905a14 1867
61dd330a
DK
1868 /* Let's start our threads to get responses */
1869 netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
1870
77c9a4ae 1871 /* Note: Interupts have to be enable before the while
61dd330a
DK
1872 * loop below because the napi routine is responsible for
1873 * setting enab_dis_acked
1874 */
1875 visorbus_enable_channel_interrupts(dev);
1876
68905a14 1877 err = register_netdev(netdev);
00748b0c
TS
1878 if (err) {
1879 dev_err(&dev->device,
1880 "%s register_netdev failed (%d)\n", __func__, err);
946b2546 1881 goto cleanup_napi_add;
00748b0c 1882 }
68905a14
DK
1883
1884 /* create debgug/sysfs directories */
1885 devdata->eth_debugfs_dir = debugfs_create_dir(netdev->name,
1886 visornic_debugfs_dir);
1887 if (!devdata->eth_debugfs_dir) {
00748b0c
TS
1888 dev_err(&dev->device,
1889 "%s debugfs_create_dir %s failed\n",
1890 __func__, netdev->name);
68905a14 1891 err = -ENOMEM;
5b12100a 1892 goto cleanup_register_netdev;
68905a14
DK
1893 }
1894
00748b0c
TS
1895 dev_info(&dev->device, "%s success netdev=%s\n",
1896 __func__, netdev->name);
68905a14
DK
1897 return 0;
1898
5b12100a
DK
1899cleanup_register_netdev:
1900 unregister_netdev(netdev);
1901
946b2546
NH
1902cleanup_napi_add:
1903 del_timer_sync(&devdata->irq_poll_timer);
1904 netif_napi_del(&devdata->napi);
1905
68905a14
DK
1906cleanup_xmit_cmdrsp:
1907 kfree(devdata->xmit_cmdrsp);
1908
1909cleanup_cmdrsp_rcv:
1910 kfree(devdata->cmdrsp_rcv);
1911
1912cleanup_rcvbuf:
1913 kfree(devdata->rcvbuf);
1914
1915cleanup_netdev:
1916 free_netdev(netdev);
1917 return err;
1918}
1919
1920/**
1921 * host_side_disappeared - IO part is gone.
1922 * @devdata: device object
1923 *
1924 * IO partition servicing this device is gone, do cleanup
1925 * Returns void.
1926 */
1927static void host_side_disappeared(struct visornic_devdata *devdata)
1928{
1929 unsigned long flags;
1930
1931 spin_lock_irqsave(&devdata->priv_lock, flags);
68905a14
DK
1932 devdata->dev = NULL; /* indicate device destroyed */
1933 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1934}
1935
1936/**
1937 * visornic_remove - Called when visornic dev goes away
1938 * @dev: visornic device that is being removed
1939 *
1940 * Called when DEVICE_DESTROY gets called to remove device.
1941 * Returns void
1942 */
1943static void visornic_remove(struct visor_device *dev)
1944{
1945 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
46df8226
TS
1946 struct net_device *netdev;
1947 unsigned long flags;
68905a14 1948
00748b0c
TS
1949 if (!devdata) {
1950 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 1951 return;
00748b0c 1952 }
46df8226
TS
1953 spin_lock_irqsave(&devdata->priv_lock, flags);
1954 if (devdata->going_away) {
1955 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1956 dev_err(&dev->device, "%s already being removed\n", __func__);
1957 return;
1958 }
1959 devdata->going_away = true;
1960 spin_unlock_irqrestore(&devdata->priv_lock, flags);
1961 netdev = devdata->netdev;
1962 if (!netdev) {
1963 dev_err(&dev->device, "%s not net device\n", __func__);
1964 return;
1965 }
1966
1967 /* going_away prevents new items being added to the workqueues */
ce388d7e 1968 cancel_work_sync(&devdata->timeout_reset);
46df8226
TS
1969
1970 debugfs_remove_recursive(devdata->eth_debugfs_dir);
1971
1972 unregister_netdev(netdev); /* this will call visornic_close() */
1973
946b2546
NH
1974 del_timer_sync(&devdata->irq_poll_timer);
1975 netif_napi_del(&devdata->napi);
46df8226 1976
68905a14
DK
1977 dev_set_drvdata(&dev->device, NULL);
1978 host_side_disappeared(devdata);
8d0119d8 1979 devdata_release(devdata);
46df8226 1980 free_netdev(netdev);
68905a14
DK
1981}
1982
1983/**
1984 * visornic_pause - Called when IO Part disappears
1985 * @dev: visornic device that is being serviced
1986 * @complete_func: call when finished.
1987 *
1988 * Called when the IO Partition has gone down. Need to free
1989 * up resources and wait for IO partition to come back. Mark
1990 * link as down and don't attempt any DMA. When we have freed
1991 * memory call the complete_func so that Command knows we are
1992 * done. If we don't call complete_func, IO part will never
1993 * come back.
1994 * Returns 0 for success.
1995 */
1996static int visornic_pause(struct visor_device *dev,
1997 visorbus_state_complete_func complete_func)
1998{
1999 struct visornic_devdata *devdata = dev_get_drvdata(&dev->device);
2000
d01da5ea 2001 visornic_serverdown(devdata, complete_func);
68905a14
DK
2002 return 0;
2003}
2004
2005/**
2006 * visornic_resume - Called when IO part has recovered
2007 * @dev: visornic device that is being serviced
2008 * @compelte_func: call when finished
2009 *
2010 * Called when the IO partition has recovered. Reestablish
2011 * connection to the IO part and set the link up. Okay to do
2012 * DMA again.
2013 * Returns 0 for success.
2014 */
2015static int visornic_resume(struct visor_device *dev,
2016 visorbus_state_complete_func complete_func)
2017{
2018 struct visornic_devdata *devdata;
2019 struct net_device *netdev;
2020 unsigned long flags;
2021
2022 devdata = dev_get_drvdata(&dev->device);
00748b0c
TS
2023 if (!devdata) {
2024 dev_err(&dev->device, "%s no devdata\n", __func__);
68905a14 2025 return -EINVAL;
00748b0c 2026 }
68905a14
DK
2027
2028 netdev = devdata->netdev;
2029
c847020e
TS
2030 spin_lock_irqsave(&devdata->priv_lock, flags);
2031 if (devdata->server_change_state) {
68905a14 2032 spin_unlock_irqrestore(&devdata->priv_lock, flags);
c847020e 2033 dev_err(&dev->device, "%s server already changing state\n",
00748b0c 2034 __func__);
c847020e 2035 return -EINVAL;
68905a14 2036 }
c847020e
TS
2037 if (!devdata->server_down) {
2038 spin_unlock_irqrestore(&devdata->priv_lock, flags);
2039 dev_err(&dev->device, "%s server not down\n", __func__);
2040 complete_func(dev, 0);
2041 return 0;
2042 }
2043 devdata->server_change_state = true;
2044 spin_unlock_irqrestore(&devdata->priv_lock, flags);
946b2546 2045
c847020e
TS
2046 /* Must transition channel to ATTACHED state BEFORE
2047 * we can start using the device again.
2048 * TODO: State transitions
2049 */
946b2546
NH
2050 mod_timer(&devdata->irq_poll_timer, msecs_to_jiffies(2));
2051
2052 init_rcv_bufs(netdev, devdata);
c847020e
TS
2053
2054 rtnl_lock();
2055 dev_open(netdev);
2056 rtnl_unlock();
68905a14
DK
2057
2058 complete_func(dev, 0);
2059 return 0;
2060}
2061
2062/**
2063 * visornic_init - Init function
2064 *
2065 * Init function for the visornic driver. Do initial driver setup
2066 * and wait for devices.
2067 * Returns 0 for success, negative for error.
2068 */
2069static int visornic_init(void)
2070{
2071 struct dentry *ret;
2072 int err = -ENOMEM;
2073
68905a14
DK
2074 visornic_debugfs_dir = debugfs_create_dir("visornic", NULL);
2075 if (!visornic_debugfs_dir)
2076 return err;
2077
2078 ret = debugfs_create_file("info", S_IRUSR, visornic_debugfs_dir, NULL,
2079 &debugfs_info_fops);
2080 if (!ret)
2081 goto cleanup_debugfs;
2082 ret = debugfs_create_file("enable_ints", S_IWUSR, visornic_debugfs_dir,
2083 NULL, &debugfs_enable_ints_fops);
2084 if (!ret)
2085 goto cleanup_debugfs;
2086
8b5081c8 2087 err = visorbus_register_visor_driver(&visornic_driver);
186896fd
DB
2088 if (err)
2089 goto cleanup_debugfs;
2090
2091 return 0;
68905a14 2092
68905a14
DK
2093cleanup_debugfs:
2094 debugfs_remove_recursive(visornic_debugfs_dir);
2095
2096 return err;
2097}
2098
2099/**
2100 * visornic_cleanup - driver exit routine
2101 *
2102 * Unregister driver from the bus and free up memory.
2103 */
2104static void visornic_cleanup(void)
2105{
3798ff31
TS
2106 visorbus_unregister_visor_driver(&visornic_driver);
2107
68905a14 2108 debugfs_remove_recursive(visornic_debugfs_dir);
68905a14
DK
2109}
2110
2111module_init(visornic_init);
2112module_exit(visornic_cleanup);
2113
2114MODULE_AUTHOR("Unisys");
2115MODULE_LICENSE("GPL");
2116MODULE_DESCRIPTION("sPAR nic driver for sparlinux: ver 1.0.0.0");
2117MODULE_VERSION("1.0.0.0");
This page took 0.292756 seconds and 5 git commands to generate.