Merge branch 'stmmac-optimizations'
[deliverable/linux.git] / drivers / net / ethernet / ibm / ibmveth.c
CommitLineData
f148f61d 1/*
9d348af4 2 * IBM Power Virtual Ethernet Device Driver
f148f61d 3 *
9d348af4
SL
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
f148f61d 8 *
9d348af4
SL
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
f148f61d 13 *
9d348af4 14 * You should have received a copy of the GNU General Public License
0ab75ae8 15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
f148f61d 16 *
9d348af4
SL
17 * Copyright (C) IBM Corporation, 2003, 2010
18 *
19 * Authors: Dave Larson <larson1@us.ibm.com>
20 * Santiago Leon <santil@linux.vnet.ibm.com>
21 * Brian King <brking@linux.vnet.ibm.com>
22 * Robert Jennings <rcj@linux.vnet.ibm.com>
23 * Anton Blanchard <anton@au.ibm.com>
f148f61d 24 */
1da177e4 25
1da177e4 26#include <linux/module.h>
1096d63d 27#include <linux/moduleparam.h>
1da177e4
LT
28#include <linux/types.h>
29#include <linux/errno.h>
1da177e4
LT
30#include <linux/dma-mapping.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/init.h>
a6b7a407 36#include <linux/interrupt.h>
1da177e4 37#include <linux/mm.h>
e7a3af5d 38#include <linux/pm.h>
1da177e4 39#include <linux/ethtool.h>
f4ff2872
BK
40#include <linux/in.h>
41#include <linux/ip.h>
ab78df75 42#include <linux/ipv6.h>
5a0e3ad6 43#include <linux/slab.h>
1da177e4 44#include <asm/hvcall.h>
60063497 45#include <linux/atomic.h>
1da177e4 46#include <asm/vio.h>
1096d63d 47#include <asm/iommu.h>
1096d63d 48#include <asm/firmware.h>
1da177e4
LT
49
50#include "ibmveth.h"
51
7d12e780 52static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
493a684a 53static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
1096d63d 54static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
e295fe83 55
860f242e 56static struct kobj_type ktype_veth_pool;
1da177e4 57
1096d63d 58
1da177e4 59static const char ibmveth_driver_name[] = "ibmveth";
9d348af4 60static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
8641dd85 61#define ibmveth_driver_version "1.05"
1da177e4 62
9d348af4
SL
63MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
1da177e4
LT
65MODULE_LICENSE("GPL");
66MODULE_VERSION(ibmveth_driver_version);
67
c08cc3cc
SL
68static unsigned int tx_copybreak __read_mostly = 128;
69module_param(tx_copybreak, uint, 0644);
70MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
72
8d86c61a
SL
73static unsigned int rx_copybreak __read_mostly = 128;
74module_param(rx_copybreak, uint, 0644);
75MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
77
0c26b677
SL
78static unsigned int rx_flush __read_mostly = 0;
79module_param(rx_flush, uint, 0644);
80MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
81
07e6a97d
TF
82static bool old_large_send __read_mostly;
83module_param(old_large_send, bool, S_IRUGO);
84MODULE_PARM_DESC(old_large_send,
85 "Use old large send method on firmware that supports the new method");
86
ddbb4de9
BK
87struct ibmveth_stat {
88 char name[ETH_GSTRING_LEN];
89 int offset;
90};
91
92#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
93#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
94
95struct ibmveth_stat ibmveth_stats[] = {
96 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
97 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
f148f61d
SL
98 { "replenish_add_buff_failure",
99 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
100 { "replenish_add_buff_success",
101 IBMVETH_STAT_OFF(replenish_add_buff_success) },
ddbb4de9
BK
102 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
103 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
ddbb4de9
BK
104 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
105 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
ab78df75
SL
106 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
107 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
8641dd85 108 { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
07e6a97d
TF
109 { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
110 { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
ddbb4de9
BK
111};
112
1da177e4 113/* simple methods of getting data from the current rxq entry */
79ef4a4d
BK
114static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
115{
0b536be7 116 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
79ef4a4d
BK
117}
118
119static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
120{
f148f61d
SL
121 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
122 IBMVETH_RXQ_TOGGLE_SHIFT;
79ef4a4d
BK
123}
124
1da177e4
LT
125static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
126{
f148f61d 127 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
1da177e4
LT
128}
129
130static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
131{
f148f61d 132 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
1da177e4
LT
133}
134
135static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
136{
f148f61d 137 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
1da177e4
LT
138}
139
140static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
141{
0b536be7 142 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
1da177e4
LT
143}
144
f4ff2872
BK
145static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
146{
f148f61d 147 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
f4ff2872
BK
148}
149
1da177e4 150/* setup the initial settings for a buffer pool */
f148f61d
SL
151static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
152 u32 pool_index, u32 pool_size,
153 u32 buff_size, u32 pool_active)
1da177e4
LT
154{
155 pool->size = pool_size;
156 pool->index = pool_index;
157 pool->buff_size = buff_size;
c033a6d1 158 pool->threshold = pool_size * 7 / 8;
860f242e 159 pool->active = pool_active;
1da177e4
LT
160}
161
162/* allocate and setup an buffer pool - called during open */
163static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
164{
165 int i;
166
d7fbeba6 167 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
1da177e4 168
f148f61d 169 if (!pool->free_map)
1da177e4 170 return -1;
1da177e4 171
076ef440 172 pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
f148f61d 173 if (!pool->dma_addr) {
1da177e4
LT
174 kfree(pool->free_map);
175 pool->free_map = NULL;
176 return -1;
177 }
178
a05abcb5 179 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
1da177e4 180
f148f61d 181 if (!pool->skbuff) {
1da177e4
LT
182 kfree(pool->dma_addr);
183 pool->dma_addr = NULL;
184
185 kfree(pool->free_map);
186 pool->free_map = NULL;
187 return -1;
188 }
189
f148f61d 190 for (i = 0; i < pool->size; ++i)
1da177e4 191 pool->free_map[i] = i;
1da177e4
LT
192
193 atomic_set(&pool->available, 0);
194 pool->producer_index = 0;
195 pool->consumer_index = 0;
196
197 return 0;
198}
199
0c26b677
SL
200static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
201{
202 unsigned long offset;
203
204 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
205 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
206}
207
1da177e4
LT
208/* replenish the buffers for a pool. note that we don't need to
209 * skb_reserve these since they are used for incoming...
210 */
f148f61d
SL
211static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
212 struct ibmveth_buff_pool *pool)
1da177e4
LT
213{
214 u32 i;
215 u32 count = pool->size - atomic_read(&pool->available);
216 u32 buffers_added = 0;
1096d63d
RJ
217 struct sk_buff *skb;
218 unsigned int free_index, index;
219 u64 correlator;
220 unsigned long lpar_rc;
221 dma_addr_t dma_addr;
1da177e4
LT
222
223 mb();
224
f148f61d 225 for (i = 0; i < count; ++i) {
1da177e4 226 union ibmveth_buf_desc desc;
1da177e4 227
003212cc 228 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
1da177e4 229
f148f61d 230 if (!skb) {
c43ced18
SL
231 netdev_dbg(adapter->netdev,
232 "replenish: unable to allocate skb\n");
1da177e4
LT
233 adapter->replenish_no_mem++;
234 break;
235 }
236
047a66d4 237 free_index = pool->consumer_index;
a613f581
SL
238 pool->consumer_index++;
239 if (pool->consumer_index >= pool->size)
240 pool->consumer_index = 0;
1da177e4 241 index = pool->free_map[free_index];
d7fbeba6 242
6485911a
SL
243 BUG_ON(index == IBM_VETH_INVALID_MAP);
244 BUG_ON(pool->skbuff[index] != NULL);
1da177e4
LT
245
246 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
247 pool->buff_size, DMA_FROM_DEVICE);
248
c713e7cb 249 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1096d63d
RJ
250 goto failure;
251
1da177e4
LT
252 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
253 pool->dma_addr[index] = dma_addr;
254 pool->skbuff[index] = skb;
255
256 correlator = ((u64)pool->index << 32) | index;
f148f61d 257 *(u64 *)skb->data = correlator;
1da177e4 258
79ef4a4d 259 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
d7fbeba6 260 desc.fields.address = dma_addr;
1da177e4 261
0c26b677
SL
262 if (rx_flush) {
263 unsigned int len = min(pool->buff_size,
264 adapter->netdev->mtu +
265 IBMVETH_BUFF_OH);
266 ibmveth_flush_buffer(skb->data, len);
267 }
f148f61d
SL
268 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
269 desc.desc);
d7fbeba6 270
f148f61d 271 if (lpar_rc != H_SUCCESS) {
1096d63d 272 goto failure;
f148f61d 273 } else {
1da177e4
LT
274 buffers_added++;
275 adapter->replenish_add_buff_success++;
276 }
277 }
d7fbeba6 278
1096d63d
RJ
279 mb();
280 atomic_add(buffers_added, &(pool->available));
281 return;
282
283failure:
284 pool->free_map[free_index] = index;
285 pool->skbuff[index] = NULL;
286 if (pool->consumer_index == 0)
287 pool->consumer_index = pool->size - 1;
288 else
289 pool->consumer_index--;
c713e7cb 290 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
1096d63d
RJ
291 dma_unmap_single(&adapter->vdev->dev,
292 pool->dma_addr[index], pool->buff_size,
293 DMA_FROM_DEVICE);
294 dev_kfree_skb_any(skb);
295 adapter->replenish_add_buff_failure++;
296
1da177e4
LT
297 mb();
298 atomic_add(buffers_added, &(pool->available));
299}
300
cbd52281
AB
301/*
302 * The final 8 bytes of the buffer list is a counter of frames dropped
303 * because there was not a buffer in the buffer list capable of holding
304 * the frame.
305 */
306static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
307{
308 __be64 *p = adapter->buffer_list_addr + 4096 - 8;
309
310 adapter->rx_no_buffer = be64_to_cpup(p);
311}
312
e2adbcb4 313/* replenish routine */
d7fbeba6 314static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
1da177e4 315{
b6d35182
SL
316 int i;
317
1da177e4
LT
318 adapter->replenish_task_cycles++;
319
517e80e6 320 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
c033a6d1
SL
321 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
322
323 if (pool->active &&
324 (atomic_read(&pool->available) < pool->threshold))
325 ibmveth_replenish_buffer_pool(adapter, pool);
326 }
1da177e4 327
cbd52281 328 ibmveth_update_rx_no_buffer(adapter);
1da177e4
LT
329}
330
331/* empty and free ana buffer pool - also used to do cleanup in error paths */
f148f61d
SL
332static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
333 struct ibmveth_buff_pool *pool)
1da177e4
LT
334{
335 int i;
336
b4558ea9
JJ
337 kfree(pool->free_map);
338 pool->free_map = NULL;
1da177e4 339
f148f61d
SL
340 if (pool->skbuff && pool->dma_addr) {
341 for (i = 0; i < pool->size; ++i) {
1da177e4 342 struct sk_buff *skb = pool->skbuff[i];
f148f61d 343 if (skb) {
1da177e4
LT
344 dma_unmap_single(&adapter->vdev->dev,
345 pool->dma_addr[i],
346 pool->buff_size,
347 DMA_FROM_DEVICE);
348 dev_kfree_skb_any(skb);
349 pool->skbuff[i] = NULL;
350 }
351 }
352 }
353
f148f61d 354 if (pool->dma_addr) {
1da177e4
LT
355 kfree(pool->dma_addr);
356 pool->dma_addr = NULL;
357 }
358
f148f61d 359 if (pool->skbuff) {
1da177e4
LT
360 kfree(pool->skbuff);
361 pool->skbuff = NULL;
362 }
363}
364
365/* remove a buffer from a pool */
f148f61d
SL
366static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
367 u64 correlator)
1da177e4
LT
368{
369 unsigned int pool = correlator >> 32;
370 unsigned int index = correlator & 0xffffffffUL;
371 unsigned int free_index;
372 struct sk_buff *skb;
373
6485911a
SL
374 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
375 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
1da177e4
LT
376
377 skb = adapter->rx_buff_pool[pool].skbuff[index];
378
6485911a 379 BUG_ON(skb == NULL);
1da177e4
LT
380
381 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
382
383 dma_unmap_single(&adapter->vdev->dev,
384 adapter->rx_buff_pool[pool].dma_addr[index],
385 adapter->rx_buff_pool[pool].buff_size,
386 DMA_FROM_DEVICE);
387
047a66d4 388 free_index = adapter->rx_buff_pool[pool].producer_index;
a613f581
SL
389 adapter->rx_buff_pool[pool].producer_index++;
390 if (adapter->rx_buff_pool[pool].producer_index >=
391 adapter->rx_buff_pool[pool].size)
392 adapter->rx_buff_pool[pool].producer_index = 0;
1da177e4
LT
393 adapter->rx_buff_pool[pool].free_map[free_index] = index;
394
395 mb();
396
397 atomic_dec(&(adapter->rx_buff_pool[pool].available));
398}
399
400/* get the current buffer on the rx queue */
401static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
402{
403 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
404 unsigned int pool = correlator >> 32;
405 unsigned int index = correlator & 0xffffffffUL;
406
6485911a
SL
407 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
408 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
1da177e4
LT
409
410 return adapter->rx_buff_pool[pool].skbuff[index];
411}
412
413/* recycle the current buffer on the rx queue */
c6f59d13 414static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
1da177e4
LT
415{
416 u32 q_index = adapter->rx_queue.index;
417 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
418 unsigned int pool = correlator >> 32;
419 unsigned int index = correlator & 0xffffffffUL;
420 union ibmveth_buf_desc desc;
421 unsigned long lpar_rc;
c6f59d13 422 int ret = 1;
1da177e4 423
6485911a
SL
424 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
425 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
1da177e4 426
f148f61d 427 if (!adapter->rx_buff_pool[pool].active) {
b6d35182
SL
428 ibmveth_rxq_harvest_buffer(adapter);
429 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
c6f59d13 430 goto out;
b6d35182
SL
431 }
432
79ef4a4d
BK
433 desc.fields.flags_len = IBMVETH_BUF_VALID |
434 adapter->rx_buff_pool[pool].buff_size;
1da177e4
LT
435 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
436
437 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
d7fbeba6 438
f148f61d 439 if (lpar_rc != H_SUCCESS) {
c43ced18
SL
440 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
441 "during recycle rc=%ld", lpar_rc);
1da177e4 442 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
c6f59d13 443 ret = 0;
1da177e4
LT
444 }
445
f148f61d 446 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
1da177e4
LT
447 adapter->rx_queue.index = 0;
448 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
449 }
c6f59d13
AB
450
451out:
452 return ret;
1da177e4
LT
453}
454
493a684a 455static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
1da177e4
LT
456{
457 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
458
f148f61d 459 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
1da177e4
LT
460 adapter->rx_queue.index = 0;
461 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
462 }
463}
464
465static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
466{
b6d35182 467 int i;
8d8bb39b 468 struct device *dev = &adapter->vdev->dev;
b6d35182 469
f148f61d 470 if (adapter->buffer_list_addr != NULL) {
8d8bb39b
FT
471 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
472 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
1da177e4
LT
473 DMA_BIDIRECTIONAL);
474 adapter->buffer_list_dma = DMA_ERROR_CODE;
475 }
476 free_page((unsigned long)adapter->buffer_list_addr);
477 adapter->buffer_list_addr = NULL;
d7fbeba6 478 }
1da177e4 479
f148f61d 480 if (adapter->filter_list_addr != NULL) {
8d8bb39b
FT
481 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
482 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
1da177e4
LT
483 DMA_BIDIRECTIONAL);
484 adapter->filter_list_dma = DMA_ERROR_CODE;
485 }
486 free_page((unsigned long)adapter->filter_list_addr);
487 adapter->filter_list_addr = NULL;
488 }
489
f148f61d 490 if (adapter->rx_queue.queue_addr != NULL) {
d90c92fe
SL
491 dma_free_coherent(dev, adapter->rx_queue.queue_len,
492 adapter->rx_queue.queue_addr,
493 adapter->rx_queue.queue_dma);
1da177e4
LT
494 adapter->rx_queue.queue_addr = NULL;
495 }
496
f148f61d 497 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
860f242e 498 if (adapter->rx_buff_pool[i].active)
d7fbeba6 499 ibmveth_free_buffer_pool(adapter,
860f242e 500 &adapter->rx_buff_pool[i]);
1096d63d
RJ
501
502 if (adapter->bounce_buffer != NULL) {
c713e7cb 503 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
1096d63d
RJ
504 dma_unmap_single(&adapter->vdev->dev,
505 adapter->bounce_buffer_dma,
506 adapter->netdev->mtu + IBMVETH_BUFF_OH,
507 DMA_BIDIRECTIONAL);
508 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
509 }
510 kfree(adapter->bounce_buffer);
511 adapter->bounce_buffer = NULL;
512 }
1da177e4
LT
513}
514
bbedefcc
ME
515static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
516 union ibmveth_buf_desc rxq_desc, u64 mac_address)
517{
518 int rc, try_again = 1;
519
f148f61d
SL
520 /*
521 * After a kexec the adapter will still be open, so our attempt to
522 * open it will fail. So if we get a failure we free the adapter and
523 * try again, but only once.
524 */
bbedefcc
ME
525retry:
526 rc = h_register_logical_lan(adapter->vdev->unit_address,
527 adapter->buffer_list_dma, rxq_desc.desc,
528 adapter->filter_list_dma, mac_address);
529
530 if (rc != H_SUCCESS && try_again) {
531 do {
532 rc = h_free_logical_lan(adapter->vdev->unit_address);
533 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
534
535 try_again = 0;
536 goto retry;
537 }
538
539 return rc;
540}
541
d746ca95
AB
542static u64 ibmveth_encode_mac_addr(u8 *mac)
543{
544 int i;
545 u64 encoded = 0;
546
547 for (i = 0; i < ETH_ALEN; i++)
548 encoded = (encoded << 8) | mac[i];
549
550 return encoded;
551}
552
1da177e4
LT
553static int ibmveth_open(struct net_device *netdev)
554{
4cf1653a 555 struct ibmveth_adapter *adapter = netdev_priv(netdev);
d746ca95 556 u64 mac_address;
b6d35182 557 int rxq_entries = 1;
1da177e4
LT
558 unsigned long lpar_rc;
559 int rc;
560 union ibmveth_buf_desc rxq_desc;
b6d35182 561 int i;
8d8bb39b 562 struct device *dev;
1da177e4 563
c43ced18 564 netdev_dbg(netdev, "open starting\n");
1da177e4 565
bea3348e
SH
566 napi_enable(&adapter->napi);
567
517e80e6 568 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
b6d35182 569 rxq_entries += adapter->rx_buff_pool[i].size;
d7fbeba6 570
1da177e4
LT
571 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
572 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
d7fbeba6 573
f148f61d 574 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
21c2dece
SL
575 netdev_err(netdev, "unable to allocate filter or buffer list "
576 "pages\n");
88426f2a
DK
577 rc = -ENOMEM;
578 goto err_out;
1da177e4
LT
579 }
580
d90c92fe
SL
581 dev = &adapter->vdev->dev;
582
f148f61d
SL
583 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
584 rxq_entries;
d90c92fe 585 adapter->rx_queue.queue_addr =
d0320f75
JP
586 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
587 &adapter->rx_queue.queue_dma, GFP_KERNEL);
f148f61d 588 if (!adapter->rx_queue.queue_addr) {
88426f2a
DK
589 rc = -ENOMEM;
590 goto err_out;
1da177e4
LT
591 }
592
8d8bb39b 593 adapter->buffer_list_dma = dma_map_single(dev,
1da177e4 594 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
8d8bb39b 595 adapter->filter_list_dma = dma_map_single(dev,
1da177e4 596 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
1da177e4 597
8d8bb39b 598 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
d90c92fe 599 (dma_mapping_error(dev, adapter->filter_list_dma))) {
21c2dece
SL
600 netdev_err(netdev, "unable to map filter or buffer list "
601 "pages\n");
88426f2a
DK
602 rc = -ENOMEM;
603 goto err_out;
1da177e4
LT
604 }
605
606 adapter->rx_queue.index = 0;
607 adapter->rx_queue.num_slots = rxq_entries;
608 adapter->rx_queue.toggle = 1;
609
d746ca95 610 mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
1da177e4 611
f148f61d
SL
612 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
613 adapter->rx_queue.queue_len;
1da177e4
LT
614 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
615
c43ced18
SL
616 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
617 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
618 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
1da177e4 619
4347ef15
SL
620 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
621
bbedefcc 622 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
1da177e4 623
f148f61d 624 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
625 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
626 lpar_rc);
627 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
628 "desc:0x%llx MAC:0x%llx\n",
1da177e4
LT
629 adapter->buffer_list_dma,
630 adapter->filter_list_dma,
631 rxq_desc.desc,
632 mac_address);
88426f2a
DK
633 rc = -ENONET;
634 goto err_out;
1da177e4
LT
635 }
636
f148f61d
SL
637 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
638 if (!adapter->rx_buff_pool[i].active)
860f242e
SL
639 continue;
640 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
21c2dece 641 netdev_err(netdev, "unable to alloc pool\n");
860f242e 642 adapter->rx_buff_pool[i].active = 0;
88426f2a
DK
643 rc = -ENOMEM;
644 goto err_out;
860f242e
SL
645 }
646 }
647
c43ced18 648 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
f148f61d
SL
649 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
650 netdev);
651 if (rc != 0) {
21c2dece
SL
652 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
653 netdev->irq, rc);
1da177e4 654 do {
95de86cf
BK
655 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
656 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
1da177e4 657
88426f2a 658 goto err_out;
1da177e4
LT
659 }
660
1096d63d
RJ
661 adapter->bounce_buffer =
662 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
663 if (!adapter->bounce_buffer) {
88426f2a 664 rc = -ENOMEM;
e0e8ab59 665 goto err_out_free_irq;
1096d63d
RJ
666 }
667 adapter->bounce_buffer_dma =
668 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
669 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
8d8bb39b 670 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
21c2dece 671 netdev_err(netdev, "unable to map bounce buffer\n");
88426f2a 672 rc = -ENOMEM;
e0e8ab59 673 goto err_out_free_irq;
1096d63d
RJ
674 }
675
c43ced18 676 netdev_dbg(netdev, "initial replenish cycle\n");
7d12e780 677 ibmveth_interrupt(netdev->irq, netdev);
1da177e4 678
e2adbcb4 679 netif_start_queue(netdev);
1da177e4 680
c43ced18 681 netdev_dbg(netdev, "open complete\n");
1da177e4
LT
682
683 return 0;
88426f2a 684
e0e8ab59
DK
685err_out_free_irq:
686 free_irq(netdev->irq, netdev);
88426f2a
DK
687err_out:
688 ibmveth_cleanup(adapter);
689 napi_disable(&adapter->napi);
690 return rc;
1da177e4
LT
691}
692
693static int ibmveth_close(struct net_device *netdev)
694{
4cf1653a 695 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1da177e4 696 long lpar_rc;
d7fbeba6 697
c43ced18 698 netdev_dbg(netdev, "close starting\n");
1da177e4 699
bea3348e
SH
700 napi_disable(&adapter->napi);
701
860f242e
SL
702 if (!adapter->pool_config)
703 netif_stop_queue(netdev);
1da177e4 704
ee2e6114 705 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
1da177e4 706
1da177e4
LT
707 do {
708 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
706c8c93 709 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
1da177e4 710
f148f61d 711 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
712 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
713 "continuing with close\n", lpar_rc);
1da177e4
LT
714 }
715
ee2e6114
RJ
716 free_irq(netdev->irq, netdev);
717
cbd52281 718 ibmveth_update_rx_no_buffer(adapter);
1da177e4
LT
719
720 ibmveth_cleanup(adapter);
721
c43ced18 722 netdev_dbg(netdev, "close complete\n");
1da177e4
LT
723
724 return 0;
725}
726
f148f61d
SL
727static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
728{
729 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
730 SUPPORTED_FIBRE);
731 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
732 ADVERTISED_FIBRE);
70739497 733 ethtool_cmd_speed_set(cmd, SPEED_1000);
1da177e4
LT
734 cmd->duplex = DUPLEX_FULL;
735 cmd->port = PORT_FIBRE;
736 cmd->phy_address = 0;
737 cmd->transceiver = XCVR_INTERNAL;
738 cmd->autoneg = AUTONEG_ENABLE;
739 cmd->maxtxpkt = 0;
740 cmd->maxrxpkt = 1;
741 return 0;
742}
743
f148f61d
SL
744static void netdev_get_drvinfo(struct net_device *dev,
745 struct ethtool_drvinfo *info)
746{
7826d43f
JP
747 strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
748 strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
1da177e4
LT
749}
750
c8f44aff
MM
751static netdev_features_t ibmveth_fix_features(struct net_device *dev,
752 netdev_features_t features)
5fc7e01c 753{
b9367bf3
MM
754 /*
755 * Since the ibmveth firmware interface does not have the
756 * concept of separate tx/rx checksum offload enable, if rx
757 * checksum is disabled we also have to disable tx checksum
758 * offload. Once we disable rx checksum offload, we are no
759 * longer allowed to send tx buffers that are not properly
760 * checksummed.
761 */
5fc7e01c 762
b9367bf3 763 if (!(features & NETIF_F_RXCSUM))
a188222b 764 features &= ~NETIF_F_CSUM_MASK;
5fc7e01c 765
b9367bf3 766 return features;
5fc7e01c
BK
767}
768
b9367bf3 769static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
5fc7e01c 770{
4cf1653a 771 struct ibmveth_adapter *adapter = netdev_priv(dev);
ff5bfc35 772 unsigned long set_attr, clr_attr, ret_attr;
ab78df75 773 unsigned long set_attr6, clr_attr6;
fb82fd20 774 long ret, ret4, ret6;
5fc7e01c
BK
775 int rc1 = 0, rc2 = 0;
776 int restart = 0;
777
778 if (netif_running(dev)) {
779 restart = 1;
780 adapter->pool_config = 1;
781 ibmveth_close(dev);
782 adapter->pool_config = 0;
783 }
784
79ef4a4d
BK
785 set_attr = 0;
786 clr_attr = 0;
fb82fd20
AB
787 set_attr6 = 0;
788 clr_attr6 = 0;
5fc7e01c 789
ab78df75 790 if (data) {
79ef4a4d 791 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
ab78df75
SL
792 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
793 } else {
79ef4a4d 794 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
ab78df75
SL
795 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
796 }
5fc7e01c 797
79ef4a4d 798 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
5fc7e01c 799
79ef4a4d
BK
800 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
801 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
802 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
fb82fd20 803 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
79ef4a4d 804 set_attr, &ret_attr);
5fc7e01c 805
fb82fd20 806 if (ret4 != H_SUCCESS) {
21c2dece
SL
807 netdev_err(dev, "unable to change IPv4 checksum "
808 "offload settings. %d rc=%ld\n",
fb82fd20
AB
809 data, ret4);
810
811 h_illan_attributes(adapter->vdev->unit_address,
812 set_attr, clr_attr, &ret_attr);
813
814 if (data == 1)
815 dev->features &= ~NETIF_F_IP_CSUM;
5fc7e01c 816
f148f61d 817 } else {
ab78df75 818 adapter->fw_ipv4_csum_support = data;
f148f61d 819 }
ab78df75
SL
820
821 ret6 = h_illan_attributes(adapter->vdev->unit_address,
822 clr_attr6, set_attr6, &ret_attr);
823
824 if (ret6 != H_SUCCESS) {
21c2dece
SL
825 netdev_err(dev, "unable to change IPv6 checksum "
826 "offload settings. %d rc=%ld\n",
fb82fd20
AB
827 data, ret6);
828
829 h_illan_attributes(adapter->vdev->unit_address,
830 set_attr6, clr_attr6, &ret_attr);
831
832 if (data == 1)
833 dev->features &= ~NETIF_F_IPV6_CSUM;
ab78df75 834
ab78df75
SL
835 } else
836 adapter->fw_ipv6_csum_support = data;
837
fb82fd20 838 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
b9367bf3 839 adapter->rx_csum = data;
ab78df75
SL
840 else
841 rc1 = -EIO;
5fc7e01c
BK
842 } else {
843 rc1 = -EIO;
21c2dece
SL
844 netdev_err(dev, "unable to change checksum offload settings."
845 " %d rc=%ld ret_attr=%lx\n", data, ret,
846 ret_attr);
5fc7e01c
BK
847 }
848
849 if (restart)
850 rc2 = ibmveth_open(dev);
851
852 return rc1 ? rc1 : rc2;
853}
854
07e6a97d
TF
855static int ibmveth_set_tso(struct net_device *dev, u32 data)
856{
857 struct ibmveth_adapter *adapter = netdev_priv(dev);
858 unsigned long set_attr, clr_attr, ret_attr;
859 long ret1, ret2;
860 int rc1 = 0, rc2 = 0;
861 int restart = 0;
862
863 if (netif_running(dev)) {
864 restart = 1;
865 adapter->pool_config = 1;
866 ibmveth_close(dev);
867 adapter->pool_config = 0;
868 }
869
870 set_attr = 0;
871 clr_attr = 0;
872
873 if (data)
874 set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
875 else
876 clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
877
878 ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
879
880 if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
881 !old_large_send) {
882 ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
883 set_attr, &ret_attr);
884
885 if (ret2 != H_SUCCESS) {
886 netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
887 data, ret2);
888
889 h_illan_attributes(adapter->vdev->unit_address,
890 set_attr, clr_attr, &ret_attr);
891
892 if (data == 1)
893 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
894 rc1 = -EIO;
895
896 } else {
897 adapter->fw_large_send_support = data;
898 adapter->large_send = data;
899 }
900 } else {
901 /* Older firmware version of large send offload does not
902 * support tcp6/ipv6
903 */
904 if (data == 1) {
905 dev->features &= ~NETIF_F_TSO6;
906 netdev_info(dev, "TSO feature requires all partitions to have updated driver");
907 }
908 adapter->large_send = data;
909 }
910
911 if (restart)
912 rc2 = ibmveth_open(dev);
913
914 return rc1 ? rc1 : rc2;
915}
916
c8f44aff
MM
917static int ibmveth_set_features(struct net_device *dev,
918 netdev_features_t features)
5fc7e01c 919{
4cf1653a 920 struct ibmveth_adapter *adapter = netdev_priv(dev);
b9367bf3 921 int rx_csum = !!(features & NETIF_F_RXCSUM);
07e6a97d
TF
922 int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
923 int rc1 = 0, rc2 = 0;
5fc7e01c 924
07e6a97d
TF
925 if (rx_csum != adapter->rx_csum) {
926 rc1 = ibmveth_set_csum_offload(dev, rx_csum);
927 if (rc1 && !adapter->rx_csum)
928 dev->features =
a188222b
TH
929 features & ~(NETIF_F_CSUM_MASK |
930 NETIF_F_RXCSUM);
07e6a97d 931 }
5fc7e01c 932
07e6a97d
TF
933 if (large_send != adapter->large_send) {
934 rc2 = ibmveth_set_tso(dev, large_send);
935 if (rc2 && !adapter->large_send)
936 dev->features =
937 features & ~(NETIF_F_TSO | NETIF_F_TSO6);
938 }
5fc7e01c 939
07e6a97d 940 return rc1 ? rc1 : rc2;
5fc7e01c
BK
941}
942
ddbb4de9
BK
943static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
944{
945 int i;
946
947 if (stringset != ETH_SS_STATS)
948 return;
949
950 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
951 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
952}
953
b9f2c044 954static int ibmveth_get_sset_count(struct net_device *dev, int sset)
ddbb4de9 955{
b9f2c044
JG
956 switch (sset) {
957 case ETH_SS_STATS:
958 return ARRAY_SIZE(ibmveth_stats);
959 default:
960 return -EOPNOTSUPP;
961 }
ddbb4de9
BK
962}
963
964static void ibmveth_get_ethtool_stats(struct net_device *dev,
965 struct ethtool_stats *stats, u64 *data)
966{
967 int i;
4cf1653a 968 struct ibmveth_adapter *adapter = netdev_priv(dev);
ddbb4de9
BK
969
970 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
971 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
972}
973
7282d491 974static const struct ethtool_ops netdev_ethtool_ops = {
1da177e4
LT
975 .get_drvinfo = netdev_get_drvinfo,
976 .get_settings = netdev_get_settings,
ed4ba4b5 977 .get_link = ethtool_op_get_link,
ddbb4de9 978 .get_strings = ibmveth_get_strings,
b9f2c044 979 .get_sset_count = ibmveth_get_sset_count,
ddbb4de9 980 .get_ethtool_stats = ibmveth_get_ethtool_stats,
1da177e4
LT
981};
982
983static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
984{
985 return -EOPNOTSUPP;
986}
987
988#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
989
6e8ab30e 990static int ibmveth_send(struct ibmveth_adapter *adapter,
07e6a97d 991 union ibmveth_buf_desc *descs, unsigned long mss)
1da177e4 992{
1da177e4
LT
993 unsigned long correlator;
994 unsigned int retry_count;
6e8ab30e
SL
995 unsigned long ret;
996
997 /*
998 * The retry count sets a maximum for the number of broadcast and
999 * multicast destinations within the system.
1000 */
1001 retry_count = 1024;
1002 correlator = 0;
1003 do {
1004 ret = h_send_logical_lan(adapter->vdev->unit_address,
1005 descs[0].desc, descs[1].desc,
1006 descs[2].desc, descs[3].desc,
1007 descs[4].desc, descs[5].desc,
07e6a97d
TF
1008 correlator, &correlator, mss,
1009 adapter->fw_large_send_support);
6e8ab30e
SL
1010 } while ((ret == H_BUSY) && (retry_count--));
1011
1012 if (ret != H_SUCCESS && ret != H_DROPPED) {
21c2dece
SL
1013 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1014 "with rc=%ld\n", ret);
6e8ab30e
SL
1015 return 1;
1016 }
1017
1018 return 0;
1019}
60296d9e 1020
6e8ab30e
SL
1021static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1022 struct net_device *netdev)
1023{
1024 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1025 unsigned int desc_flags;
1026 union ibmveth_buf_desc descs[6];
1027 int last, i;
1028 int force_bounce = 0;
b93da27f 1029 dma_addr_t dma_addr;
07e6a97d 1030 unsigned long mss = 0;
6e8ab30e
SL
1031
1032 /*
1033 * veth handles a maximum of 6 segments including the header, so
1034 * we have to linearize the skb if there are more than this.
1035 */
1036 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
1037 netdev->stats.tx_dropped++;
1038 goto out;
1039 }
1da177e4 1040
6e8ab30e 1041 /* veth can't checksum offload UDP */
f4ff2872 1042 if (skb->ip_summed == CHECKSUM_PARTIAL &&
ab78df75
SL
1043 ((skb->protocol == htons(ETH_P_IP) &&
1044 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1045 (skb->protocol == htons(ETH_P_IPV6) &&
1046 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1047 skb_checksum_help(skb)) {
1048
21c2dece 1049 netdev_err(netdev, "tx: failed to checksum packet\n");
6e8ab30e 1050 netdev->stats.tx_dropped++;
f4ff2872
BK
1051 goto out;
1052 }
1053
6e8ab30e
SL
1054 desc_flags = IBMVETH_BUF_VALID;
1055
07e6a97d
TF
1056 if (skb_is_gso(skb) && adapter->fw_large_send_support)
1057 desc_flags |= IBMVETH_BUF_LRG_SND;
1058
f4ff2872 1059 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6e8ab30e
SL
1060 unsigned char *buf = skb_transport_header(skb) +
1061 skb->csum_offset;
f4ff2872 1062
6e8ab30e 1063 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
f4ff2872
BK
1064
1065 /* Need to zero out the checksum */
1066 buf[0] = 0;
1067 buf[1] = 0;
1068 }
1069
6e8ab30e
SL
1070retry_bounce:
1071 memset(descs, 0, sizeof(descs));
c08cc3cc 1072
6e8ab30e
SL
1073 /*
1074 * If a linear packet is below the rx threshold then
1075 * copy it into the static bounce buffer. This avoids the
1076 * cost of a TCE insert and remove.
1077 */
1078 if (force_bounce || (!skb_is_nonlinear(skb) &&
1079 (skb->len < tx_copybreak))) {
1096d63d
RJ
1080 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1081 skb->len);
1da177e4 1082
6e8ab30e
SL
1083 descs[0].fields.flags_len = desc_flags | skb->len;
1084 descs[0].fields.address = adapter->bounce_buffer_dma;
1085
07e6a97d 1086 if (ibmveth_send(adapter, descs, 0)) {
6e8ab30e
SL
1087 adapter->tx_send_failed++;
1088 netdev->stats.tx_dropped++;
1089 } else {
1090 netdev->stats.tx_packets++;
1091 netdev->stats.tx_bytes += skb->len;
1092 }
1093
1094 goto out;
1095 }
1096
1097 /* Map the header */
b93da27f
AB
1098 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1099 skb_headlen(skb), DMA_TO_DEVICE);
1100 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
6e8ab30e
SL
1101 goto map_failed;
1102
1103 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
b93da27f 1104 descs[0].fields.address = dma_addr;
6e8ab30e
SL
1105
1106 /* Map the frags */
1107 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9e903e08 1108 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6e8ab30e 1109
8838a538 1110 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
9e903e08 1111 skb_frag_size(frag), DMA_TO_DEVICE);
6e8ab30e
SL
1112
1113 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1114 goto map_failed_frags;
1115
9e903e08 1116 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
6e8ab30e
SL
1117 descs[i+1].fields.address = dma_addr;
1118 }
1119
07e6a97d
TF
1120 if (skb_is_gso(skb)) {
1121 if (adapter->fw_large_send_support) {
1122 mss = (unsigned long)skb_shinfo(skb)->gso_size;
1123 adapter->tx_large_packets++;
1124 } else if (!skb_is_gso_v6(skb)) {
1125 /* Put -1 in the IP checksum to tell phyp it
1126 * is a largesend packet. Put the mss in
1127 * the TCP checksum.
1128 */
1129 ip_hdr(skb)->check = 0xffff;
1130 tcp_hdr(skb)->check =
1131 cpu_to_be16(skb_shinfo(skb)->gso_size);
1132 adapter->tx_large_packets++;
1133 }
8641dd85
TF
1134 }
1135
07e6a97d 1136 if (ibmveth_send(adapter, descs, mss)) {
6e8ab30e
SL
1137 adapter->tx_send_failed++;
1138 netdev->stats.tx_dropped++;
1da177e4 1139 } else {
6e8ab30e
SL
1140 netdev->stats.tx_packets++;
1141 netdev->stats.tx_bytes += skb->len;
1da177e4
LT
1142 }
1143
33a48ab1
BK
1144 dma_unmap_single(&adapter->vdev->dev,
1145 descs[0].fields.address,
1146 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1147 DMA_TO_DEVICE);
1148
1149 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
6e8ab30e
SL
1150 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1151 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1152 DMA_TO_DEVICE);
1da177e4 1153
e8cb7eb4 1154out:
26faa9d7 1155 dev_consume_skb_any(skb);
6ed10654 1156 return NETDEV_TX_OK;
6e8ab30e
SL
1157
1158map_failed_frags:
1159 last = i+1;
1160 for (i = 0; i < last; i++)
1161 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1162 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1163 DMA_TO_DEVICE);
1164
1165map_failed:
1166 if (!firmware_has_feature(FW_FEATURE_CMO))
21c2dece 1167 netdev_err(netdev, "tx: unable to map xmit buffer\n");
6e8ab30e
SL
1168 adapter->tx_map_failed++;
1169 skb_linearize(skb);
1170 force_bounce = 1;
1171 goto retry_bounce;
1da177e4
LT
1172}
1173
bea3348e 1174static int ibmveth_poll(struct napi_struct *napi, int budget)
1da177e4 1175{
f148f61d
SL
1176 struct ibmveth_adapter *adapter =
1177 container_of(napi, struct ibmveth_adapter, napi);
bea3348e 1178 struct net_device *netdev = adapter->netdev;
1da177e4 1179 int frames_processed = 0;
1da177e4 1180 unsigned long lpar_rc;
9c7e8bc5 1181 struct iphdr *iph;
1da177e4 1182
f148f61d 1183restart_poll:
cb013ea1 1184 while (frames_processed < budget) {
bea3348e
SH
1185 if (!ibmveth_rxq_pending_buffer(adapter))
1186 break;
1da177e4 1187
f89e49e7 1188 smp_rmb();
bea3348e
SH
1189 if (!ibmveth_rxq_buffer_valid(adapter)) {
1190 wmb(); /* suggested by larson1 */
1191 adapter->rx_invalid_buffer++;
c43ced18 1192 netdev_dbg(netdev, "recycling invalid buffer\n");
bea3348e
SH
1193 ibmveth_rxq_recycle_buffer(adapter);
1194 } else {
8d86c61a 1195 struct sk_buff *skb, *new_skb;
bea3348e
SH
1196 int length = ibmveth_rxq_frame_length(adapter);
1197 int offset = ibmveth_rxq_frame_offset(adapter);
f4ff2872
BK
1198 int csum_good = ibmveth_rxq_csum_good(adapter);
1199
bea3348e 1200 skb = ibmveth_rxq_get_buffer(adapter);
1da177e4 1201
8d86c61a
SL
1202 new_skb = NULL;
1203 if (length < rx_copybreak)
1204 new_skb = netdev_alloc_skb(netdev, length);
1205
1206 if (new_skb) {
1207 skb_copy_to_linear_data(new_skb,
1208 skb->data + offset,
1209 length);
0c26b677
SL
1210 if (rx_flush)
1211 ibmveth_flush_buffer(skb->data,
1212 length + offset);
c6f59d13
AB
1213 if (!ibmveth_rxq_recycle_buffer(adapter))
1214 kfree_skb(skb);
8d86c61a 1215 skb = new_skb;
8d86c61a
SL
1216 } else {
1217 ibmveth_rxq_harvest_buffer(adapter);
1218 skb_reserve(skb, offset);
1219 }
1da177e4 1220
bea3348e
SH
1221 skb_put(skb, length);
1222 skb->protocol = eth_type_trans(skb, netdev);
1da177e4 1223
9c7e8bc5 1224 if (csum_good) {
8d86c61a 1225 skb->ip_summed = CHECKSUM_UNNECESSARY;
9c7e8bc5
TF
1226 if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
1227 iph = (struct iphdr *)skb->data;
1228
1229 /* If the IP checksum is not offloaded and if the packet
1230 * is large send, the checksum must be rebuilt.
1231 */
1232 if (iph->check == 0xffff) {
1233 iph->check = 0;
1234 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1235 adapter->rx_large_packets++;
1236 }
1237 }
1238 }
8d86c61a 1239
92ec8279 1240 napi_gro_receive(napi, skb); /* send it up */
1da177e4 1241
09f75cd7
JG
1242 netdev->stats.rx_packets++;
1243 netdev->stats.rx_bytes += length;
bea3348e 1244 frames_processed++;
1da177e4 1245 }
cb013ea1 1246 }
1da177e4 1247
e2adbcb4 1248 ibmveth_replenish_task(adapter);
1da177e4 1249
bea3348e 1250 if (frames_processed < budget) {
4736edc7
YP
1251 napi_complete(napi);
1252
bea3348e
SH
1253 /* We think we are done - reenable interrupts,
1254 * then check once more to make sure we are done.
1255 */
1256 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1257 VIO_IRQ_ENABLE);
1da177e4 1258
6485911a 1259 BUG_ON(lpar_rc != H_SUCCESS);
1da177e4 1260
bea3348e 1261 if (ibmveth_rxq_pending_buffer(adapter) &&
288379f0 1262 napi_reschedule(napi)) {
bea3348e
SH
1263 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1264 VIO_IRQ_DISABLE);
1265 goto restart_poll;
1266 }
1da177e4
LT
1267 }
1268
bea3348e 1269 return frames_processed;
1da177e4
LT
1270}
1271
7d12e780 1272static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
d7fbeba6 1273{
1da177e4 1274 struct net_device *netdev = dev_instance;
4cf1653a 1275 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
1276 unsigned long lpar_rc;
1277
288379f0 1278 if (napi_schedule_prep(&adapter->napi)) {
bea3348e
SH
1279 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1280 VIO_IRQ_DISABLE);
6485911a 1281 BUG_ON(lpar_rc != H_SUCCESS);
288379f0 1282 __napi_schedule(&adapter->napi);
1da177e4
LT
1283 }
1284 return IRQ_HANDLED;
1285}
1286
1da177e4
LT
1287static void ibmveth_set_multicast_list(struct net_device *netdev)
1288{
4cf1653a 1289 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1da177e4
LT
1290 unsigned long lpar_rc;
1291
4cd24eaf
JP
1292 if ((netdev->flags & IFF_PROMISC) ||
1293 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1da177e4
LT
1294 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1295 IbmVethMcastEnableRecv |
1296 IbmVethMcastDisableFiltering,
1297 0);
f148f61d 1298 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1299 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1300 "entering promisc mode\n", lpar_rc);
1da177e4
LT
1301 }
1302 } else {
22bedad3 1303 struct netdev_hw_addr *ha;
1da177e4
LT
1304 /* clear the filter table & disable filtering */
1305 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1306 IbmVethMcastEnableRecv |
1307 IbmVethMcastDisableFiltering |
1308 IbmVethMcastClearFilterTable,
1309 0);
f148f61d 1310 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1311 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1312 "attempting to clear filter table\n",
1313 lpar_rc);
1da177e4
LT
1314 }
1315 /* add the addresses to the filter table */
22bedad3 1316 netdev_for_each_mc_addr(ha, netdev) {
f148f61d 1317 /* add the multicast address to the filter table */
d746ca95
AB
1318 u64 mcast_addr;
1319 mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1da177e4
LT
1320 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1321 IbmVethMcastAddFilter,
1322 mcast_addr);
f148f61d 1323 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1324 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1325 "when adding an entry to the filter "
1326 "table\n", lpar_rc);
1da177e4
LT
1327 }
1328 }
d7fbeba6 1329
1da177e4
LT
1330 /* re-enable filtering */
1331 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1332 IbmVethMcastEnableFiltering,
1333 0);
f148f61d 1334 if (lpar_rc != H_SUCCESS) {
21c2dece
SL
1335 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1336 "enabling filtering\n", lpar_rc);
1da177e4
LT
1337 }
1338 }
1339}
1340
1341static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1342{
4cf1653a 1343 struct ibmveth_adapter *adapter = netdev_priv(dev);
1096d63d 1344 struct vio_dev *viodev = adapter->vdev;
860f242e 1345 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
0645bab7
RJ
1346 int i, rc;
1347 int need_restart = 0;
b6d35182 1348
517e80e6 1349 if (new_mtu < IBMVETH_MIN_MTU)
1da177e4 1350 return -EINVAL;
b6d35182 1351
517e80e6 1352 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
4fce1482 1353 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
ce6eea58
BK
1354 break;
1355
517e80e6 1356 if (i == IBMVETH_NUM_BUFF_POOLS)
ce6eea58
BK
1357 return -EINVAL;
1358
ea866e65
SL
1359 /* Deactivate all the buffer pools so that the next loop can activate
1360 only the buffer pools necessary to hold the new MTU */
0645bab7
RJ
1361 if (netif_running(adapter->netdev)) {
1362 need_restart = 1;
1363 adapter->pool_config = 1;
1364 ibmveth_close(adapter->netdev);
1365 adapter->pool_config = 0;
1366 }
ea866e65 1367
860f242e 1368 /* Look for an active buffer pool that can hold the new MTU */
f148f61d 1369 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
ea866e65 1370 adapter->rx_buff_pool[i].active = 1;
ce6eea58 1371
4fce1482 1372 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1096d63d
RJ
1373 dev->mtu = new_mtu;
1374 vio_cmo_set_dev_desired(viodev,
1375 ibmveth_get_desired_dma
1376 (viodev));
0645bab7
RJ
1377 if (need_restart) {
1378 return ibmveth_open(adapter->netdev);
1379 }
860f242e 1380 return 0;
b6d35182 1381 }
b6d35182 1382 }
0645bab7
RJ
1383
1384 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1385 return rc;
1386
860f242e 1387 return -EINVAL;
1da177e4
LT
1388}
1389
6b422374
SL
1390#ifdef CONFIG_NET_POLL_CONTROLLER
1391static void ibmveth_poll_controller(struct net_device *dev)
1392{
4cf1653a 1393 ibmveth_replenish_task(netdev_priv(dev));
5f77113c 1394 ibmveth_interrupt(dev->irq, dev);
6b422374
SL
1395}
1396#endif
1397
1096d63d
RJ
1398/**
1399 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1400 *
1401 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1402 *
1403 * Return value:
1404 * Number of bytes of IO data the driver will need to perform well.
1405 */
1406static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1407{
1408 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1409 struct ibmveth_adapter *adapter;
d0847757 1410 struct iommu_table *tbl;
1096d63d
RJ
1411 unsigned long ret;
1412 int i;
1413 int rxqentries = 1;
1414
d0847757
AP
1415 tbl = get_iommu_table_base(&vdev->dev);
1416
1096d63d
RJ
1417 /* netdev inits at probe time along with the structures we need below*/
1418 if (netdev == NULL)
d0847757 1419 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1096d63d
RJ
1420
1421 adapter = netdev_priv(netdev);
1422
1423 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
d0847757 1424 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1096d63d 1425
517e80e6 1426 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1096d63d
RJ
1427 /* add the size of the active receive buffers */
1428 if (adapter->rx_buff_pool[i].active)
1429 ret +=
1430 adapter->rx_buff_pool[i].size *
1431 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
d0847757 1432 buff_size, tbl);
1096d63d
RJ
1433 rxqentries += adapter->rx_buff_pool[i].size;
1434 }
1435 /* add the size of the receive queue entries */
d0847757
AP
1436 ret += IOMMU_PAGE_ALIGN(
1437 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1096d63d
RJ
1438
1439 return ret;
1440}
1441
c77c761f
TF
1442static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1443{
1444 struct ibmveth_adapter *adapter = netdev_priv(dev);
1445 struct sockaddr *addr = p;
1446 u64 mac_address;
1447 int rc;
1448
1449 if (!is_valid_ether_addr(addr->sa_data))
1450 return -EADDRNOTAVAIL;
1451
1452 mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1453 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1454 if (rc) {
1455 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1456 return rc;
1457 }
1458
1459 ether_addr_copy(dev->dev_addr, addr->sa_data);
1460
1461 return 0;
1462}
1463
e186d174
AB
1464static const struct net_device_ops ibmveth_netdev_ops = {
1465 .ndo_open = ibmveth_open,
1466 .ndo_stop = ibmveth_close,
1467 .ndo_start_xmit = ibmveth_start_xmit,
afc4b13d 1468 .ndo_set_rx_mode = ibmveth_set_multicast_list,
e186d174
AB
1469 .ndo_do_ioctl = ibmveth_ioctl,
1470 .ndo_change_mtu = ibmveth_change_mtu,
b9367bf3
MM
1471 .ndo_fix_features = ibmveth_fix_features,
1472 .ndo_set_features = ibmveth_set_features,
e186d174 1473 .ndo_validate_addr = eth_validate_addr,
c77c761f 1474 .ndo_set_mac_address = ibmveth_set_mac_addr,
e186d174
AB
1475#ifdef CONFIG_NET_POLL_CONTROLLER
1476 .ndo_poll_controller = ibmveth_poll_controller,
1477#endif
1478};
1479
1dd06ae8 1480static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1da177e4 1481{
13f85203 1482 int rc, i, mac_len;
1da177e4 1483 struct net_device *netdev;
9dc83afd 1484 struct ibmveth_adapter *adapter;
1da177e4
LT
1485 unsigned char *mac_addr_p;
1486 unsigned int *mcastFilterSize_p;
07e6a97d
TF
1487 long ret;
1488 unsigned long ret_attr;
1da177e4 1489
c43ced18
SL
1490 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1491 dev->unit_address);
1da177e4 1492
f148f61d 1493 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
13f85203 1494 &mac_len);
f148f61d 1495 if (!mac_addr_p) {
21c2dece 1496 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
be35ae9e 1497 return -EINVAL;
1da177e4 1498 }
13f85203
BH
1499 /* Workaround for old/broken pHyp */
1500 if (mac_len == 8)
1501 mac_addr_p += 2;
1502 else if (mac_len != 6) {
1503 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1504 mac_len);
1505 return -EINVAL;
1506 }
d7fbeba6 1507
f148f61d 1508 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
493a684a 1509 VETH_MCAST_FILTER_SIZE, NULL);
f148f61d 1510 if (!mcastFilterSize_p) {
21c2dece
SL
1511 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1512 "attribute\n");
be35ae9e 1513 return -EINVAL;
1da177e4 1514 }
d7fbeba6 1515
1da177e4
LT
1516 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1517
f148f61d 1518 if (!netdev)
1da177e4
LT
1519 return -ENOMEM;
1520
4cf1653a 1521 adapter = netdev_priv(netdev);
c7ae011d 1522 dev_set_drvdata(&dev->dev, netdev);
1da177e4
LT
1523
1524 adapter->vdev = dev;
1525 adapter->netdev = netdev;
f148f61d 1526 adapter->mcastFilterSize = *mcastFilterSize_p;
860f242e 1527 adapter->pool_config = 0;
d7fbeba6 1528
bea3348e
SH
1529 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1530
1da177e4 1531 netdev->irq = dev->irq;
e186d174
AB
1532 netdev->netdev_ops = &ibmveth_netdev_ops;
1533 netdev->ethtool_ops = &netdev_ethtool_ops;
1da177e4 1534 SET_NETDEV_DEV(netdev, &dev->dev);
b9367bf3
MM
1535 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1536 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
07e6a97d 1537
b9367bf3 1538 netdev->features |= netdev->hw_features;
1da177e4 1539
07e6a97d
TF
1540 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1541
1542 /* If running older firmware, TSO should not be enabled by default */
1543 if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1544 !old_large_send) {
1545 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1546 netdev->features |= netdev->hw_features;
1547 } else {
1548 netdev->hw_features |= NETIF_F_TSO;
1549 }
8641dd85 1550
d746ca95 1551 memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1da177e4 1552
cd7c7ec3
TF
1553 if (firmware_has_feature(FW_FEATURE_CMO))
1554 memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1555
f148f61d 1556 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
860f242e 1557 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
8dde2a96
GKH
1558 int error;
1559
d7fbeba6
JG
1560 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1561 pool_count[i], pool_size[i],
860f242e 1562 pool_active[i]);
8dde2a96
GKH
1563 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1564 &dev->dev.kobj, "pool%d", i);
1565 if (!error)
1566 kobject_uevent(kobj, KOBJ_ADD);
860f242e 1567 }
1da177e4 1568
c43ced18 1569 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1da177e4 1570
1da177e4
LT
1571 adapter->buffer_list_dma = DMA_ERROR_CODE;
1572 adapter->filter_list_dma = DMA_ERROR_CODE;
1573 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1574
c43ced18 1575 netdev_dbg(netdev, "registering netdev...\n");
1da177e4 1576
b801a4e7
MM
1577 ibmveth_set_features(netdev, netdev->features);
1578
1da177e4
LT
1579 rc = register_netdev(netdev);
1580
f148f61d 1581 if (rc) {
c43ced18 1582 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1da177e4
LT
1583 free_netdev(netdev);
1584 return rc;
1585 }
1586
c43ced18 1587 netdev_dbg(netdev, "registered\n");
1da177e4 1588
1da177e4
LT
1589 return 0;
1590}
1591
e11787a2 1592static int ibmveth_remove(struct vio_dev *dev)
1da177e4 1593{
c7ae011d 1594 struct net_device *netdev = dev_get_drvdata(&dev->dev);
4cf1653a 1595 struct ibmveth_adapter *adapter = netdev_priv(netdev);
860f242e
SL
1596 int i;
1597
f148f61d 1598 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
c10997f6 1599 kobject_put(&adapter->rx_buff_pool[i].kobj);
1da177e4
LT
1600
1601 unregister_netdev(netdev);
1602
1da177e4 1603 free_netdev(netdev);
1096d63d
RJ
1604 dev_set_drvdata(&dev->dev, NULL);
1605
1da177e4
LT
1606 return 0;
1607}
1608
860f242e
SL
1609static struct attribute veth_active_attr;
1610static struct attribute veth_num_attr;
1611static struct attribute veth_size_attr;
1612
f148f61d
SL
1613static ssize_t veth_pool_show(struct kobject *kobj,
1614 struct attribute *attr, char *buf)
860f242e 1615{
d7fbeba6 1616 struct ibmveth_buff_pool *pool = container_of(kobj,
860f242e
SL
1617 struct ibmveth_buff_pool,
1618 kobj);
1619
1620 if (attr == &veth_active_attr)
1621 return sprintf(buf, "%d\n", pool->active);
1622 else if (attr == &veth_num_attr)
1623 return sprintf(buf, "%d\n", pool->size);
1624 else if (attr == &veth_size_attr)
1625 return sprintf(buf, "%d\n", pool->buff_size);
1626 return 0;
1627}
1628
f148f61d
SL
1629static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1630 const char *buf, size_t count)
860f242e 1631{
d7fbeba6 1632 struct ibmveth_buff_pool *pool = container_of(kobj,
860f242e
SL
1633 struct ibmveth_buff_pool,
1634 kobj);
c7ae011d
GKH
1635 struct net_device *netdev = dev_get_drvdata(
1636 container_of(kobj->parent, struct device, kobj));
4cf1653a 1637 struct ibmveth_adapter *adapter = netdev_priv(netdev);
860f242e
SL
1638 long value = simple_strtol(buf, NULL, 10);
1639 long rc;
1640
1641 if (attr == &veth_active_attr) {
1642 if (value && !pool->active) {
4aa9c93e 1643 if (netif_running(netdev)) {
f148f61d 1644 if (ibmveth_alloc_buffer_pool(pool)) {
21c2dece
SL
1645 netdev_err(netdev,
1646 "unable to alloc pool\n");
4aa9c93e
BK
1647 return -ENOMEM;
1648 }
1649 pool->active = 1;
1650 adapter->pool_config = 1;
1651 ibmveth_close(netdev);
1652 adapter->pool_config = 0;
1653 if ((rc = ibmveth_open(netdev)))
1654 return rc;
f148f61d 1655 } else {
4aa9c93e 1656 pool->active = 1;
f148f61d 1657 }
860f242e
SL
1658 } else if (!value && pool->active) {
1659 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1660 int i;
1661 /* Make sure there is a buffer pool with buffers that
1662 can hold a packet of the size of the MTU */
517e80e6 1663 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
860f242e
SL
1664 if (pool == &adapter->rx_buff_pool[i])
1665 continue;
1666 if (!adapter->rx_buff_pool[i].active)
1667 continue;
76b9cfcc
BK
1668 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1669 break;
860f242e 1670 }
76b9cfcc 1671
517e80e6 1672 if (i == IBMVETH_NUM_BUFF_POOLS) {
21c2dece 1673 netdev_err(netdev, "no active pool >= MTU\n");
860f242e
SL
1674 return -EPERM;
1675 }
76b9cfcc 1676
76b9cfcc
BK
1677 if (netif_running(netdev)) {
1678 adapter->pool_config = 1;
1679 ibmveth_close(netdev);
ea866e65 1680 pool->active = 0;
76b9cfcc
BK
1681 adapter->pool_config = 0;
1682 if ((rc = ibmveth_open(netdev)))
1683 return rc;
1684 }
ea866e65 1685 pool->active = 0;
860f242e
SL
1686 }
1687 } else if (attr == &veth_num_attr) {
f148f61d 1688 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
860f242e 1689 return -EINVAL;
f148f61d 1690 } else {
4aa9c93e
BK
1691 if (netif_running(netdev)) {
1692 adapter->pool_config = 1;
1693 ibmveth_close(netdev);
1694 adapter->pool_config = 0;
1695 pool->size = value;
1696 if ((rc = ibmveth_open(netdev)))
1697 return rc;
f148f61d 1698 } else {
4aa9c93e 1699 pool->size = value;
f148f61d 1700 }
860f242e
SL
1701 }
1702 } else if (attr == &veth_size_attr) {
f148f61d 1703 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
860f242e 1704 return -EINVAL;
f148f61d 1705 } else {
4aa9c93e
BK
1706 if (netif_running(netdev)) {
1707 adapter->pool_config = 1;
1708 ibmveth_close(netdev);
1709 adapter->pool_config = 0;
1710 pool->buff_size = value;
1711 if ((rc = ibmveth_open(netdev)))
1712 return rc;
f148f61d 1713 } else {
4aa9c93e 1714 pool->buff_size = value;
f148f61d 1715 }
860f242e
SL
1716 }
1717 }
1718
1719 /* kick the interrupt handler to allocate/deallocate pools */
7d12e780 1720 ibmveth_interrupt(netdev->irq, netdev);
860f242e
SL
1721 return count;
1722}
1723
1724
f148f61d
SL
1725#define ATTR(_name, _mode) \
1726 struct attribute veth_##_name##_attr = { \
1727 .name = __stringify(_name), .mode = _mode, \
1728 };
860f242e
SL
1729
1730static ATTR(active, 0644);
1731static ATTR(num, 0644);
1732static ATTR(size, 0644);
1733
f148f61d 1734static struct attribute *veth_pool_attrs[] = {
860f242e
SL
1735 &veth_active_attr,
1736 &veth_num_attr,
1737 &veth_size_attr,
1738 NULL,
1739};
1740
52cf25d0 1741static const struct sysfs_ops veth_pool_ops = {
860f242e
SL
1742 .show = veth_pool_show,
1743 .store = veth_pool_store,
1744};
1745
1746static struct kobj_type ktype_veth_pool = {
1747 .release = NULL,
1748 .sysfs_ops = &veth_pool_ops,
1749 .default_attrs = veth_pool_attrs,
1750};
1751
e7a3af5d
BK
1752static int ibmveth_resume(struct device *dev)
1753{
1754 struct net_device *netdev = dev_get_drvdata(dev);
1755 ibmveth_interrupt(netdev->irq, netdev);
1756 return 0;
1757}
860f242e 1758
e11787a2 1759static struct vio_device_id ibmveth_device_table[] = {
1da177e4 1760 { "network", "IBM,l-lan"},
fb120da6 1761 { "", "" }
1da177e4 1762};
1da177e4
LT
1763MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1764
e7a3af5d
BK
1765static struct dev_pm_ops ibmveth_pm_ops = {
1766 .resume = ibmveth_resume
1767};
1768
1da177e4 1769static struct vio_driver ibmveth_driver = {
6fdf5392
SR
1770 .id_table = ibmveth_device_table,
1771 .probe = ibmveth_probe,
1772 .remove = ibmveth_remove,
1096d63d 1773 .get_desired_dma = ibmveth_get_desired_dma,
cb52d897
BH
1774 .name = ibmveth_driver_name,
1775 .pm = &ibmveth_pm_ops,
1da177e4
LT
1776};
1777
1778static int __init ibmveth_module_init(void)
1779{
21c2dece
SL
1780 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1781 ibmveth_driver_string, ibmveth_driver_version);
1da177e4 1782
1da177e4
LT
1783 return vio_register_driver(&ibmveth_driver);
1784}
1785
1786static void __exit ibmveth_module_exit(void)
1787{
1788 vio_unregister_driver(&ibmveth_driver);
d7fbeba6 1789}
1da177e4
LT
1790
1791module_init(ibmveth_module_init);
1792module_exit(ibmveth_module_exit);
This page took 1.234654 seconds and 5 git commands to generate.