2 * IBM eServer i/pSeries Virtual Ethernet Device Driver
3 * Copyright (C) 2003 IBM Corp.
4 * Originally written by Dave Larson (larson1@us.ibm.com)
5 * Maintained by Santiago Leon (santil@us.ibm.com)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
22 * This module contains the implementation of a virtual ethernet device
23 * for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN
24 * option of the RS/6000 Platform Architechture to interface with virtual
25 * ethernet NICs that are presented to the partition by the hypervisor.
28 #include <linux/module.h>
29 #include <linux/moduleparam.h>
30 #include <linux/types.h>
31 #include <linux/errno.h>
32 #include <linux/ioport.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/kernel.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/init.h>
39 #include <linux/delay.h>
42 #include <linux/ethtool.h>
45 #include <linux/ipv6.h>
46 #include <linux/slab.h>
47 #include <asm/hvcall.h>
48 #include <asm/atomic.h>
50 #include <asm/iommu.h>
51 #include <asm/uaccess.h>
52 #include <asm/firmware.h>
56 static irqreturn_t
ibmveth_interrupt(int irq
, void *dev_instance
);
57 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter
*adapter
);
58 static unsigned long ibmveth_get_desired_dma(struct vio_dev
*vdev
);
60 static struct kobj_type ktype_veth_pool
;
63 static const char ibmveth_driver_name
[] = "ibmveth";
64 static const char ibmveth_driver_string
[] = "IBM i/pSeries Virtual Ethernet "
66 #define ibmveth_driver_version "1.03"
68 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
69 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(ibmveth_driver_version
);
73 static unsigned int tx_copybreak __read_mostly
= 128;
74 module_param(tx_copybreak
, uint
, 0644);
75 MODULE_PARM_DESC(tx_copybreak
,
76 "Maximum size of packet that is copied to a new buffer on transmit");
78 static unsigned int rx_copybreak __read_mostly
= 128;
79 module_param(rx_copybreak
, uint
, 0644);
80 MODULE_PARM_DESC(rx_copybreak
,
81 "Maximum size of packet that is copied to a new buffer on receive");
83 static unsigned int rx_flush __read_mostly
= 0;
84 module_param(rx_flush
, uint
, 0644);
85 MODULE_PARM_DESC(rx_flush
, "Flush receive buffers before use");
88 char name
[ETH_GSTRING_LEN
];
92 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
93 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
95 struct ibmveth_stat ibmveth_stats
[] = {
96 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles
) },
97 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem
) },
98 { "replenish_add_buff_failure",
99 IBMVETH_STAT_OFF(replenish_add_buff_failure
) },
100 { "replenish_add_buff_success",
101 IBMVETH_STAT_OFF(replenish_add_buff_success
) },
102 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer
) },
103 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer
) },
104 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed
) },
105 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed
) },
106 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support
) },
107 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support
) },
110 /* simple methods of getting data from the current rxq entry */
111 static inline u32
ibmveth_rxq_flags(struct ibmveth_adapter
*adapter
)
113 return adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].flags_off
;
116 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter
*adapter
)
118 return (ibmveth_rxq_flags(adapter
) & IBMVETH_RXQ_TOGGLE
) >>
119 IBMVETH_RXQ_TOGGLE_SHIFT
;
122 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter
*adapter
)
124 return ibmveth_rxq_toggle(adapter
) == adapter
->rx_queue
.toggle
;
127 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter
*adapter
)
129 return ibmveth_rxq_flags(adapter
) & IBMVETH_RXQ_VALID
;
132 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter
*adapter
)
134 return ibmveth_rxq_flags(adapter
) & IBMVETH_RXQ_OFF_MASK
;
137 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter
*adapter
)
139 return adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].length
;
142 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter
*adapter
)
144 return ibmveth_rxq_flags(adapter
) & IBMVETH_RXQ_CSUM_GOOD
;
147 /* setup the initial settings for a buffer pool */
148 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool
*pool
,
149 u32 pool_index
, u32 pool_size
,
150 u32 buff_size
, u32 pool_active
)
152 pool
->size
= pool_size
;
153 pool
->index
= pool_index
;
154 pool
->buff_size
= buff_size
;
155 pool
->threshold
= pool_size
* 7 / 8;
156 pool
->active
= pool_active
;
159 /* allocate and setup an buffer pool - called during open */
160 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool
*pool
)
164 pool
->free_map
= kmalloc(sizeof(u16
) * pool
->size
, GFP_KERNEL
);
169 pool
->dma_addr
= kmalloc(sizeof(dma_addr_t
) * pool
->size
, GFP_KERNEL
);
170 if (!pool
->dma_addr
) {
171 kfree(pool
->free_map
);
172 pool
->free_map
= NULL
;
176 pool
->skbuff
= kcalloc(pool
->size
, sizeof(void *), GFP_KERNEL
);
179 kfree(pool
->dma_addr
);
180 pool
->dma_addr
= NULL
;
182 kfree(pool
->free_map
);
183 pool
->free_map
= NULL
;
187 memset(pool
->dma_addr
, 0, sizeof(dma_addr_t
) * pool
->size
);
189 for (i
= 0; i
< pool
->size
; ++i
)
190 pool
->free_map
[i
] = i
;
192 atomic_set(&pool
->available
, 0);
193 pool
->producer_index
= 0;
194 pool
->consumer_index
= 0;
199 static inline void ibmveth_flush_buffer(void *addr
, unsigned long length
)
201 unsigned long offset
;
203 for (offset
= 0; offset
< length
; offset
+= SMP_CACHE_BYTES
)
204 asm("dcbfl %0,%1" :: "b" (addr
), "r" (offset
));
207 /* replenish the buffers for a pool. note that we don't need to
208 * skb_reserve these since they are used for incoming...
210 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter
*adapter
,
211 struct ibmveth_buff_pool
*pool
)
214 u32 count
= pool
->size
- atomic_read(&pool
->available
);
215 u32 buffers_added
= 0;
217 unsigned int free_index
, index
;
219 unsigned long lpar_rc
;
224 for (i
= 0; i
< count
; ++i
) {
225 union ibmveth_buf_desc desc
;
227 skb
= netdev_alloc_skb(adapter
->netdev
, pool
->buff_size
);
230 netdev_dbg(adapter
->netdev
,
231 "replenish: unable to allocate skb\n");
232 adapter
->replenish_no_mem
++;
236 free_index
= pool
->consumer_index
;
237 pool
->consumer_index
++;
238 if (pool
->consumer_index
>= pool
->size
)
239 pool
->consumer_index
= 0;
240 index
= pool
->free_map
[free_index
];
242 BUG_ON(index
== IBM_VETH_INVALID_MAP
);
243 BUG_ON(pool
->skbuff
[index
] != NULL
);
245 dma_addr
= dma_map_single(&adapter
->vdev
->dev
, skb
->data
,
246 pool
->buff_size
, DMA_FROM_DEVICE
);
248 if (dma_mapping_error(&adapter
->vdev
->dev
, dma_addr
))
251 pool
->free_map
[free_index
] = IBM_VETH_INVALID_MAP
;
252 pool
->dma_addr
[index
] = dma_addr
;
253 pool
->skbuff
[index
] = skb
;
255 correlator
= ((u64
)pool
->index
<< 32) | index
;
256 *(u64
*)skb
->data
= correlator
;
258 desc
.fields
.flags_len
= IBMVETH_BUF_VALID
| pool
->buff_size
;
259 desc
.fields
.address
= dma_addr
;
262 unsigned int len
= min(pool
->buff_size
,
263 adapter
->netdev
->mtu
+
265 ibmveth_flush_buffer(skb
->data
, len
);
267 lpar_rc
= h_add_logical_lan_buffer(adapter
->vdev
->unit_address
,
270 if (lpar_rc
!= H_SUCCESS
) {
274 adapter
->replenish_add_buff_success
++;
279 atomic_add(buffers_added
, &(pool
->available
));
283 pool
->free_map
[free_index
] = index
;
284 pool
->skbuff
[index
] = NULL
;
285 if (pool
->consumer_index
== 0)
286 pool
->consumer_index
= pool
->size
- 1;
288 pool
->consumer_index
--;
289 if (!dma_mapping_error(&adapter
->vdev
->dev
, dma_addr
))
290 dma_unmap_single(&adapter
->vdev
->dev
,
291 pool
->dma_addr
[index
], pool
->buff_size
,
293 dev_kfree_skb_any(skb
);
294 adapter
->replenish_add_buff_failure
++;
297 atomic_add(buffers_added
, &(pool
->available
));
300 /* replenish routine */
301 static void ibmveth_replenish_task(struct ibmveth_adapter
*adapter
)
305 adapter
->replenish_task_cycles
++;
307 for (i
= (IBMVETH_NUM_BUFF_POOLS
- 1); i
>= 0; i
--) {
308 struct ibmveth_buff_pool
*pool
= &adapter
->rx_buff_pool
[i
];
311 (atomic_read(&pool
->available
) < pool
->threshold
))
312 ibmveth_replenish_buffer_pool(adapter
, pool
);
315 adapter
->rx_no_buffer
= *(u64
*)(((char*)adapter
->buffer_list_addr
) +
319 /* empty and free ana buffer pool - also used to do cleanup in error paths */
320 static void ibmveth_free_buffer_pool(struct ibmveth_adapter
*adapter
,
321 struct ibmveth_buff_pool
*pool
)
325 kfree(pool
->free_map
);
326 pool
->free_map
= NULL
;
328 if (pool
->skbuff
&& pool
->dma_addr
) {
329 for (i
= 0; i
< pool
->size
; ++i
) {
330 struct sk_buff
*skb
= pool
->skbuff
[i
];
332 dma_unmap_single(&adapter
->vdev
->dev
,
336 dev_kfree_skb_any(skb
);
337 pool
->skbuff
[i
] = NULL
;
342 if (pool
->dma_addr
) {
343 kfree(pool
->dma_addr
);
344 pool
->dma_addr
= NULL
;
353 /* remove a buffer from a pool */
354 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter
*adapter
,
357 unsigned int pool
= correlator
>> 32;
358 unsigned int index
= correlator
& 0xffffffffUL
;
359 unsigned int free_index
;
362 BUG_ON(pool
>= IBMVETH_NUM_BUFF_POOLS
);
363 BUG_ON(index
>= adapter
->rx_buff_pool
[pool
].size
);
365 skb
= adapter
->rx_buff_pool
[pool
].skbuff
[index
];
369 adapter
->rx_buff_pool
[pool
].skbuff
[index
] = NULL
;
371 dma_unmap_single(&adapter
->vdev
->dev
,
372 adapter
->rx_buff_pool
[pool
].dma_addr
[index
],
373 adapter
->rx_buff_pool
[pool
].buff_size
,
376 free_index
= adapter
->rx_buff_pool
[pool
].producer_index
;
377 adapter
->rx_buff_pool
[pool
].producer_index
++;
378 if (adapter
->rx_buff_pool
[pool
].producer_index
>=
379 adapter
->rx_buff_pool
[pool
].size
)
380 adapter
->rx_buff_pool
[pool
].producer_index
= 0;
381 adapter
->rx_buff_pool
[pool
].free_map
[free_index
] = index
;
385 atomic_dec(&(adapter
->rx_buff_pool
[pool
].available
));
388 /* get the current buffer on the rx queue */
389 static inline struct sk_buff
*ibmveth_rxq_get_buffer(struct ibmveth_adapter
*adapter
)
391 u64 correlator
= adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
;
392 unsigned int pool
= correlator
>> 32;
393 unsigned int index
= correlator
& 0xffffffffUL
;
395 BUG_ON(pool
>= IBMVETH_NUM_BUFF_POOLS
);
396 BUG_ON(index
>= adapter
->rx_buff_pool
[pool
].size
);
398 return adapter
->rx_buff_pool
[pool
].skbuff
[index
];
401 /* recycle the current buffer on the rx queue */
402 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter
*adapter
)
404 u32 q_index
= adapter
->rx_queue
.index
;
405 u64 correlator
= adapter
->rx_queue
.queue_addr
[q_index
].correlator
;
406 unsigned int pool
= correlator
>> 32;
407 unsigned int index
= correlator
& 0xffffffffUL
;
408 union ibmveth_buf_desc desc
;
409 unsigned long lpar_rc
;
411 BUG_ON(pool
>= IBMVETH_NUM_BUFF_POOLS
);
412 BUG_ON(index
>= adapter
->rx_buff_pool
[pool
].size
);
414 if (!adapter
->rx_buff_pool
[pool
].active
) {
415 ibmveth_rxq_harvest_buffer(adapter
);
416 ibmveth_free_buffer_pool(adapter
, &adapter
->rx_buff_pool
[pool
]);
420 desc
.fields
.flags_len
= IBMVETH_BUF_VALID
|
421 adapter
->rx_buff_pool
[pool
].buff_size
;
422 desc
.fields
.address
= adapter
->rx_buff_pool
[pool
].dma_addr
[index
];
424 lpar_rc
= h_add_logical_lan_buffer(adapter
->vdev
->unit_address
, desc
.desc
);
426 if (lpar_rc
!= H_SUCCESS
) {
427 netdev_dbg(adapter
->netdev
, "h_add_logical_lan_buffer failed "
428 "during recycle rc=%ld", lpar_rc
);
429 ibmveth_remove_buffer_from_pool(adapter
, adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
);
432 if (++adapter
->rx_queue
.index
== adapter
->rx_queue
.num_slots
) {
433 adapter
->rx_queue
.index
= 0;
434 adapter
->rx_queue
.toggle
= !adapter
->rx_queue
.toggle
;
438 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter
*adapter
)
440 ibmveth_remove_buffer_from_pool(adapter
, adapter
->rx_queue
.queue_addr
[adapter
->rx_queue
.index
].correlator
);
442 if (++adapter
->rx_queue
.index
== adapter
->rx_queue
.num_slots
) {
443 adapter
->rx_queue
.index
= 0;
444 adapter
->rx_queue
.toggle
= !adapter
->rx_queue
.toggle
;
448 static void ibmveth_cleanup(struct ibmveth_adapter
*adapter
)
451 struct device
*dev
= &adapter
->vdev
->dev
;
453 if (adapter
->buffer_list_addr
!= NULL
) {
454 if (!dma_mapping_error(dev
, adapter
->buffer_list_dma
)) {
455 dma_unmap_single(dev
, adapter
->buffer_list_dma
, 4096,
457 adapter
->buffer_list_dma
= DMA_ERROR_CODE
;
459 free_page((unsigned long)adapter
->buffer_list_addr
);
460 adapter
->buffer_list_addr
= NULL
;
463 if (adapter
->filter_list_addr
!= NULL
) {
464 if (!dma_mapping_error(dev
, adapter
->filter_list_dma
)) {
465 dma_unmap_single(dev
, adapter
->filter_list_dma
, 4096,
467 adapter
->filter_list_dma
= DMA_ERROR_CODE
;
469 free_page((unsigned long)adapter
->filter_list_addr
);
470 adapter
->filter_list_addr
= NULL
;
473 if (adapter
->rx_queue
.queue_addr
!= NULL
) {
474 if (!dma_mapping_error(dev
, adapter
->rx_queue
.queue_dma
)) {
475 dma_unmap_single(dev
,
476 adapter
->rx_queue
.queue_dma
,
477 adapter
->rx_queue
.queue_len
,
479 adapter
->rx_queue
.queue_dma
= DMA_ERROR_CODE
;
481 kfree(adapter
->rx_queue
.queue_addr
);
482 adapter
->rx_queue
.queue_addr
= NULL
;
485 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++)
486 if (adapter
->rx_buff_pool
[i
].active
)
487 ibmveth_free_buffer_pool(adapter
,
488 &adapter
->rx_buff_pool
[i
]);
490 if (adapter
->bounce_buffer
!= NULL
) {
491 if (!dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
492 dma_unmap_single(&adapter
->vdev
->dev
,
493 adapter
->bounce_buffer_dma
,
494 adapter
->netdev
->mtu
+ IBMVETH_BUFF_OH
,
496 adapter
->bounce_buffer_dma
= DMA_ERROR_CODE
;
498 kfree(adapter
->bounce_buffer
);
499 adapter
->bounce_buffer
= NULL
;
503 static int ibmveth_register_logical_lan(struct ibmveth_adapter
*adapter
,
504 union ibmveth_buf_desc rxq_desc
, u64 mac_address
)
506 int rc
, try_again
= 1;
509 * After a kexec the adapter will still be open, so our attempt to
510 * open it will fail. So if we get a failure we free the adapter and
511 * try again, but only once.
514 rc
= h_register_logical_lan(adapter
->vdev
->unit_address
,
515 adapter
->buffer_list_dma
, rxq_desc
.desc
,
516 adapter
->filter_list_dma
, mac_address
);
518 if (rc
!= H_SUCCESS
&& try_again
) {
520 rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
521 } while (H_IS_LONG_BUSY(rc
) || (rc
== H_BUSY
));
530 static int ibmveth_open(struct net_device
*netdev
)
532 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
535 unsigned long lpar_rc
;
537 union ibmveth_buf_desc rxq_desc
;
541 netdev_dbg(netdev
, "open starting\n");
543 napi_enable(&adapter
->napi
);
545 for(i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++)
546 rxq_entries
+= adapter
->rx_buff_pool
[i
].size
;
548 adapter
->buffer_list_addr
= (void*) get_zeroed_page(GFP_KERNEL
);
549 adapter
->filter_list_addr
= (void*) get_zeroed_page(GFP_KERNEL
);
551 if (!adapter
->buffer_list_addr
|| !adapter
->filter_list_addr
) {
552 netdev_err(netdev
, "unable to allocate filter or buffer list "
554 ibmveth_cleanup(adapter
);
555 napi_disable(&adapter
->napi
);
559 adapter
->rx_queue
.queue_len
= sizeof(struct ibmveth_rx_q_entry
) *
561 adapter
->rx_queue
.queue_addr
= kmalloc(adapter
->rx_queue
.queue_len
,
564 if (!adapter
->rx_queue
.queue_addr
) {
565 netdev_err(netdev
, "unable to allocate rx queue pages\n");
566 ibmveth_cleanup(adapter
);
567 napi_disable(&adapter
->napi
);
571 dev
= &adapter
->vdev
->dev
;
573 adapter
->buffer_list_dma
= dma_map_single(dev
,
574 adapter
->buffer_list_addr
, 4096, DMA_BIDIRECTIONAL
);
575 adapter
->filter_list_dma
= dma_map_single(dev
,
576 adapter
->filter_list_addr
, 4096, DMA_BIDIRECTIONAL
);
577 adapter
->rx_queue
.queue_dma
= dma_map_single(dev
,
578 adapter
->rx_queue
.queue_addr
,
579 adapter
->rx_queue
.queue_len
, DMA_BIDIRECTIONAL
);
581 if ((dma_mapping_error(dev
, adapter
->buffer_list_dma
)) ||
582 (dma_mapping_error(dev
, adapter
->filter_list_dma
)) ||
583 (dma_mapping_error(dev
, adapter
->rx_queue
.queue_dma
))) {
584 netdev_err(netdev
, "unable to map filter or buffer list "
586 ibmveth_cleanup(adapter
);
587 napi_disable(&adapter
->napi
);
591 adapter
->rx_queue
.index
= 0;
592 adapter
->rx_queue
.num_slots
= rxq_entries
;
593 adapter
->rx_queue
.toggle
= 1;
595 memcpy(&mac_address
, netdev
->dev_addr
, netdev
->addr_len
);
596 mac_address
= mac_address
>> 16;
598 rxq_desc
.fields
.flags_len
= IBMVETH_BUF_VALID
|
599 adapter
->rx_queue
.queue_len
;
600 rxq_desc
.fields
.address
= adapter
->rx_queue
.queue_dma
;
602 netdev_dbg(netdev
, "buffer list @ 0x%p\n", adapter
->buffer_list_addr
);
603 netdev_dbg(netdev
, "filter list @ 0x%p\n", adapter
->filter_list_addr
);
604 netdev_dbg(netdev
, "receive q @ 0x%p\n", adapter
->rx_queue
.queue_addr
);
606 h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_DISABLE
);
608 lpar_rc
= ibmveth_register_logical_lan(adapter
, rxq_desc
, mac_address
);
610 if (lpar_rc
!= H_SUCCESS
) {
611 netdev_err(netdev
, "h_register_logical_lan failed with %ld\n",
613 netdev_err(netdev
, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
614 "desc:0x%llx MAC:0x%llx\n",
615 adapter
->buffer_list_dma
,
616 adapter
->filter_list_dma
,
619 ibmveth_cleanup(adapter
);
620 napi_disable(&adapter
->napi
);
624 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++) {
625 if (!adapter
->rx_buff_pool
[i
].active
)
627 if (ibmveth_alloc_buffer_pool(&adapter
->rx_buff_pool
[i
])) {
628 netdev_err(netdev
, "unable to alloc pool\n");
629 adapter
->rx_buff_pool
[i
].active
= 0;
630 ibmveth_cleanup(adapter
);
631 napi_disable(&adapter
->napi
);
636 netdev_dbg(netdev
, "registering irq 0x%x\n", netdev
->irq
);
637 rc
= request_irq(netdev
->irq
, ibmveth_interrupt
, 0, netdev
->name
,
640 netdev_err(netdev
, "unable to request irq 0x%x, rc %d\n",
643 rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
644 } while (H_IS_LONG_BUSY(rc
) || (rc
== H_BUSY
));
646 ibmveth_cleanup(adapter
);
647 napi_disable(&adapter
->napi
);
651 adapter
->bounce_buffer
=
652 kmalloc(netdev
->mtu
+ IBMVETH_BUFF_OH
, GFP_KERNEL
);
653 if (!adapter
->bounce_buffer
) {
654 netdev_err(netdev
, "unable to allocate bounce buffer\n");
655 ibmveth_cleanup(adapter
);
656 napi_disable(&adapter
->napi
);
659 adapter
->bounce_buffer_dma
=
660 dma_map_single(&adapter
->vdev
->dev
, adapter
->bounce_buffer
,
661 netdev
->mtu
+ IBMVETH_BUFF_OH
, DMA_BIDIRECTIONAL
);
662 if (dma_mapping_error(dev
, adapter
->bounce_buffer_dma
)) {
663 netdev_err(netdev
, "unable to map bounce buffer\n");
664 ibmveth_cleanup(adapter
);
665 napi_disable(&adapter
->napi
);
669 netdev_dbg(netdev
, "initial replenish cycle\n");
670 ibmveth_interrupt(netdev
->irq
, netdev
);
672 netif_start_queue(netdev
);
674 netdev_dbg(netdev
, "open complete\n");
679 static int ibmveth_close(struct net_device
*netdev
)
681 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
684 netdev_dbg(netdev
, "close starting\n");
686 napi_disable(&adapter
->napi
);
688 if (!adapter
->pool_config
)
689 netif_stop_queue(netdev
);
691 h_vio_signal(adapter
->vdev
->unit_address
, VIO_IRQ_DISABLE
);
694 lpar_rc
= h_free_logical_lan(adapter
->vdev
->unit_address
);
695 } while (H_IS_LONG_BUSY(lpar_rc
) || (lpar_rc
== H_BUSY
));
697 if (lpar_rc
!= H_SUCCESS
) {
698 netdev_err(netdev
, "h_free_logical_lan failed with %lx, "
699 "continuing with close\n", lpar_rc
);
702 free_irq(netdev
->irq
, netdev
);
704 adapter
->rx_no_buffer
= *(u64
*)(((char *)adapter
->buffer_list_addr
) +
707 ibmveth_cleanup(adapter
);
709 netdev_dbg(netdev
, "close complete\n");
714 static int netdev_get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
716 cmd
->supported
= (SUPPORTED_1000baseT_Full
| SUPPORTED_Autoneg
|
718 cmd
->advertising
= (ADVERTISED_1000baseT_Full
| ADVERTISED_Autoneg
|
720 cmd
->speed
= SPEED_1000
;
721 cmd
->duplex
= DUPLEX_FULL
;
722 cmd
->port
= PORT_FIBRE
;
723 cmd
->phy_address
= 0;
724 cmd
->transceiver
= XCVR_INTERNAL
;
725 cmd
->autoneg
= AUTONEG_ENABLE
;
731 static void netdev_get_drvinfo(struct net_device
*dev
,
732 struct ethtool_drvinfo
*info
)
734 strncpy(info
->driver
, ibmveth_driver_name
, sizeof(info
->driver
) - 1);
735 strncpy(info
->version
, ibmveth_driver_version
,
736 sizeof(info
->version
) - 1);
739 static u32
netdev_get_link(struct net_device
*dev
)
744 static void ibmveth_set_rx_csum_flags(struct net_device
*dev
, u32 data
)
746 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
749 adapter
->rx_csum
= 1;
752 * Since the ibmveth firmware interface does not have the
753 * concept of separate tx/rx checksum offload enable, if rx
754 * checksum is disabled we also have to disable tx checksum
755 * offload. Once we disable rx checksum offload, we are no
756 * longer allowed to send tx buffers that are not properly
759 adapter
->rx_csum
= 0;
760 dev
->features
&= ~NETIF_F_IP_CSUM
;
761 dev
->features
&= ~NETIF_F_IPV6_CSUM
;
765 static void ibmveth_set_tx_csum_flags(struct net_device
*dev
, u32 data
)
767 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
770 if (adapter
->fw_ipv4_csum_support
)
771 dev
->features
|= NETIF_F_IP_CSUM
;
772 if (adapter
->fw_ipv6_csum_support
)
773 dev
->features
|= NETIF_F_IPV6_CSUM
;
774 adapter
->rx_csum
= 1;
776 dev
->features
&= ~NETIF_F_IP_CSUM
;
777 dev
->features
&= ~NETIF_F_IPV6_CSUM
;
781 static int ibmveth_set_csum_offload(struct net_device
*dev
, u32 data
,
782 void (*done
) (struct net_device
*, u32
))
784 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
785 unsigned long set_attr
, clr_attr
, ret_attr
;
786 unsigned long set_attr6
, clr_attr6
;
788 int rc1
= 0, rc2
= 0;
791 if (netif_running(dev
)) {
793 adapter
->pool_config
= 1;
795 adapter
->pool_config
= 0;
802 set_attr
= IBMVETH_ILLAN_IPV4_TCP_CSUM
;
803 set_attr6
= IBMVETH_ILLAN_IPV6_TCP_CSUM
;
805 clr_attr
= IBMVETH_ILLAN_IPV4_TCP_CSUM
;
806 clr_attr6
= IBMVETH_ILLAN_IPV6_TCP_CSUM
;
809 ret
= h_illan_attributes(adapter
->vdev
->unit_address
, 0, 0, &ret_attr
);
811 if (ret
== H_SUCCESS
&& !(ret_attr
& IBMVETH_ILLAN_ACTIVE_TRUNK
) &&
812 !(ret_attr
& IBMVETH_ILLAN_TRUNK_PRI_MASK
) &&
813 (ret_attr
& IBMVETH_ILLAN_PADDED_PKT_CSUM
)) {
814 ret
= h_illan_attributes(adapter
->vdev
->unit_address
, clr_attr
,
815 set_attr
, &ret_attr
);
817 if (ret
!= H_SUCCESS
) {
818 netdev_err(dev
, "unable to change IPv4 checksum "
819 "offload settings. %d rc=%ld\n",
822 ret
= h_illan_attributes(adapter
->vdev
->unit_address
,
823 set_attr
, clr_attr
, &ret_attr
);
825 adapter
->fw_ipv4_csum_support
= data
;
828 ret6
= h_illan_attributes(adapter
->vdev
->unit_address
,
829 clr_attr6
, set_attr6
, &ret_attr
);
831 if (ret6
!= H_SUCCESS
) {
832 netdev_err(dev
, "unable to change IPv6 checksum "
833 "offload settings. %d rc=%ld\n",
836 ret
= h_illan_attributes(adapter
->vdev
->unit_address
,
837 set_attr6
, clr_attr6
,
840 adapter
->fw_ipv6_csum_support
= data
;
842 if (ret
== H_SUCCESS
|| ret6
== H_SUCCESS
)
848 netdev_err(dev
, "unable to change checksum offload settings."
849 " %d rc=%ld ret_attr=%lx\n", data
, ret
,
854 rc2
= ibmveth_open(dev
);
856 return rc1
? rc1
: rc2
;
859 static int ibmveth_set_rx_csum(struct net_device
*dev
, u32 data
)
861 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
863 if ((data
&& adapter
->rx_csum
) || (!data
&& !adapter
->rx_csum
))
866 return ibmveth_set_csum_offload(dev
, data
, ibmveth_set_rx_csum_flags
);
869 static int ibmveth_set_tx_csum(struct net_device
*dev
, u32 data
)
871 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
874 if (data
&& (dev
->features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
876 if (!data
&& !(dev
->features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
)))
879 if (data
&& !adapter
->rx_csum
)
880 rc
= ibmveth_set_csum_offload(dev
, data
,
881 ibmveth_set_tx_csum_flags
);
883 ibmveth_set_tx_csum_flags(dev
, data
);
888 static u32
ibmveth_get_rx_csum(struct net_device
*dev
)
890 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
891 return adapter
->rx_csum
;
894 static void ibmveth_get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
898 if (stringset
!= ETH_SS_STATS
)
901 for (i
= 0; i
< ARRAY_SIZE(ibmveth_stats
); i
++, data
+= ETH_GSTRING_LEN
)
902 memcpy(data
, ibmveth_stats
[i
].name
, ETH_GSTRING_LEN
);
905 static int ibmveth_get_sset_count(struct net_device
*dev
, int sset
)
909 return ARRAY_SIZE(ibmveth_stats
);
915 static void ibmveth_get_ethtool_stats(struct net_device
*dev
,
916 struct ethtool_stats
*stats
, u64
*data
)
919 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
921 for (i
= 0; i
< ARRAY_SIZE(ibmveth_stats
); i
++)
922 data
[i
] = IBMVETH_GET_STAT(adapter
, ibmveth_stats
[i
].offset
);
925 static const struct ethtool_ops netdev_ethtool_ops
= {
926 .get_drvinfo
= netdev_get_drvinfo
,
927 .get_settings
= netdev_get_settings
,
928 .get_link
= netdev_get_link
,
929 .set_tx_csum
= ibmveth_set_tx_csum
,
930 .get_rx_csum
= ibmveth_get_rx_csum
,
931 .set_rx_csum
= ibmveth_set_rx_csum
,
932 .get_strings
= ibmveth_get_strings
,
933 .get_sset_count
= ibmveth_get_sset_count
,
934 .get_ethtool_stats
= ibmveth_get_ethtool_stats
,
935 .set_sg
= ethtool_op_set_sg
,
938 static int ibmveth_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
943 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
945 static int ibmveth_send(struct ibmveth_adapter
*adapter
,
946 union ibmveth_buf_desc
*descs
)
948 unsigned long correlator
;
949 unsigned int retry_count
;
953 * The retry count sets a maximum for the number of broadcast and
954 * multicast destinations within the system.
959 ret
= h_send_logical_lan(adapter
->vdev
->unit_address
,
960 descs
[0].desc
, descs
[1].desc
,
961 descs
[2].desc
, descs
[3].desc
,
962 descs
[4].desc
, descs
[5].desc
,
963 correlator
, &correlator
);
964 } while ((ret
== H_BUSY
) && (retry_count
--));
966 if (ret
!= H_SUCCESS
&& ret
!= H_DROPPED
) {
967 netdev_err(adapter
->netdev
, "tx: h_send_logical_lan failed "
968 "with rc=%ld\n", ret
);
975 static netdev_tx_t
ibmveth_start_xmit(struct sk_buff
*skb
,
976 struct net_device
*netdev
)
978 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
979 unsigned int desc_flags
;
980 union ibmveth_buf_desc descs
[6];
982 int force_bounce
= 0;
985 * veth handles a maximum of 6 segments including the header, so
986 * we have to linearize the skb if there are more than this.
988 if (skb_shinfo(skb
)->nr_frags
> 5 && __skb_linearize(skb
)) {
989 netdev
->stats
.tx_dropped
++;
993 /* veth can't checksum offload UDP */
994 if (skb
->ip_summed
== CHECKSUM_PARTIAL
&&
995 ((skb
->protocol
== htons(ETH_P_IP
) &&
996 ip_hdr(skb
)->protocol
!= IPPROTO_TCP
) ||
997 (skb
->protocol
== htons(ETH_P_IPV6
) &&
998 ipv6_hdr(skb
)->nexthdr
!= IPPROTO_TCP
)) &&
999 skb_checksum_help(skb
)) {
1001 netdev_err(netdev
, "tx: failed to checksum packet\n");
1002 netdev
->stats
.tx_dropped
++;
1006 desc_flags
= IBMVETH_BUF_VALID
;
1008 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1009 unsigned char *buf
= skb_transport_header(skb
) +
1012 desc_flags
|= (IBMVETH_BUF_NO_CSUM
| IBMVETH_BUF_CSUM_GOOD
);
1014 /* Need to zero out the checksum */
1020 memset(descs
, 0, sizeof(descs
));
1023 * If a linear packet is below the rx threshold then
1024 * copy it into the static bounce buffer. This avoids the
1025 * cost of a TCE insert and remove.
1027 if (force_bounce
|| (!skb_is_nonlinear(skb
) &&
1028 (skb
->len
< tx_copybreak
))) {
1029 skb_copy_from_linear_data(skb
, adapter
->bounce_buffer
,
1032 descs
[0].fields
.flags_len
= desc_flags
| skb
->len
;
1033 descs
[0].fields
.address
= adapter
->bounce_buffer_dma
;
1035 if (ibmveth_send(adapter
, descs
)) {
1036 adapter
->tx_send_failed
++;
1037 netdev
->stats
.tx_dropped
++;
1039 netdev
->stats
.tx_packets
++;
1040 netdev
->stats
.tx_bytes
+= skb
->len
;
1046 /* Map the header */
1047 descs
[0].fields
.address
= dma_map_single(&adapter
->vdev
->dev
, skb
->data
,
1050 if (dma_mapping_error(&adapter
->vdev
->dev
, descs
[0].fields
.address
))
1053 descs
[0].fields
.flags_len
= desc_flags
| skb_headlen(skb
);
1056 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1057 unsigned long dma_addr
;
1058 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1060 dma_addr
= dma_map_page(&adapter
->vdev
->dev
, frag
->page
,
1061 frag
->page_offset
, frag
->size
,
1064 if (dma_mapping_error(&adapter
->vdev
->dev
, dma_addr
))
1065 goto map_failed_frags
;
1067 descs
[i
+1].fields
.flags_len
= desc_flags
| frag
->size
;
1068 descs
[i
+1].fields
.address
= dma_addr
;
1071 if (ibmveth_send(adapter
, descs
)) {
1072 adapter
->tx_send_failed
++;
1073 netdev
->stats
.tx_dropped
++;
1075 netdev
->stats
.tx_packets
++;
1076 netdev
->stats
.tx_bytes
+= skb
->len
;
1079 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
+ 1; i
++)
1080 dma_unmap_page(&adapter
->vdev
->dev
, descs
[i
].fields
.address
,
1081 descs
[i
].fields
.flags_len
& IBMVETH_BUF_LEN_MASK
,
1086 return NETDEV_TX_OK
;
1090 for (i
= 0; i
< last
; i
++)
1091 dma_unmap_page(&adapter
->vdev
->dev
, descs
[i
].fields
.address
,
1092 descs
[i
].fields
.flags_len
& IBMVETH_BUF_LEN_MASK
,
1096 if (!firmware_has_feature(FW_FEATURE_CMO
))
1097 netdev_err(netdev
, "tx: unable to map xmit buffer\n");
1098 adapter
->tx_map_failed
++;
1104 static int ibmveth_poll(struct napi_struct
*napi
, int budget
)
1106 struct ibmveth_adapter
*adapter
=
1107 container_of(napi
, struct ibmveth_adapter
, napi
);
1108 struct net_device
*netdev
= adapter
->netdev
;
1109 int frames_processed
= 0;
1110 unsigned long lpar_rc
;
1114 if (!ibmveth_rxq_pending_buffer(adapter
))
1118 if (!ibmveth_rxq_buffer_valid(adapter
)) {
1119 wmb(); /* suggested by larson1 */
1120 adapter
->rx_invalid_buffer
++;
1121 netdev_dbg(netdev
, "recycling invalid buffer\n");
1122 ibmveth_rxq_recycle_buffer(adapter
);
1124 struct sk_buff
*skb
, *new_skb
;
1125 int length
= ibmveth_rxq_frame_length(adapter
);
1126 int offset
= ibmveth_rxq_frame_offset(adapter
);
1127 int csum_good
= ibmveth_rxq_csum_good(adapter
);
1129 skb
= ibmveth_rxq_get_buffer(adapter
);
1132 if (length
< rx_copybreak
)
1133 new_skb
= netdev_alloc_skb(netdev
, length
);
1136 skb_copy_to_linear_data(new_skb
,
1140 ibmveth_flush_buffer(skb
->data
,
1143 ibmveth_rxq_recycle_buffer(adapter
);
1145 ibmveth_rxq_harvest_buffer(adapter
);
1146 skb_reserve(skb
, offset
);
1149 skb_put(skb
, length
);
1150 skb
->protocol
= eth_type_trans(skb
, netdev
);
1153 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1155 netif_receive_skb(skb
); /* send it up */
1157 netdev
->stats
.rx_packets
++;
1158 netdev
->stats
.rx_bytes
+= length
;
1161 } while (frames_processed
< budget
);
1163 ibmveth_replenish_task(adapter
);
1165 if (frames_processed
< budget
) {
1166 /* We think we are done - reenable interrupts,
1167 * then check once more to make sure we are done.
1169 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
,
1172 BUG_ON(lpar_rc
!= H_SUCCESS
);
1174 napi_complete(napi
);
1176 if (ibmveth_rxq_pending_buffer(adapter
) &&
1177 napi_reschedule(napi
)) {
1178 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
,
1184 return frames_processed
;
1187 static irqreturn_t
ibmveth_interrupt(int irq
, void *dev_instance
)
1189 struct net_device
*netdev
= dev_instance
;
1190 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
1191 unsigned long lpar_rc
;
1193 if (napi_schedule_prep(&adapter
->napi
)) {
1194 lpar_rc
= h_vio_signal(adapter
->vdev
->unit_address
,
1196 BUG_ON(lpar_rc
!= H_SUCCESS
);
1197 __napi_schedule(&adapter
->napi
);
1202 static void ibmveth_set_multicast_list(struct net_device
*netdev
)
1204 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
1205 unsigned long lpar_rc
;
1207 if ((netdev
->flags
& IFF_PROMISC
) ||
1208 (netdev_mc_count(netdev
) > adapter
->mcastFilterSize
)) {
1209 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1210 IbmVethMcastEnableRecv
|
1211 IbmVethMcastDisableFiltering
,
1213 if (lpar_rc
!= H_SUCCESS
) {
1214 netdev_err(netdev
, "h_multicast_ctrl rc=%ld when "
1215 "entering promisc mode\n", lpar_rc
);
1218 struct netdev_hw_addr
*ha
;
1219 /* clear the filter table & disable filtering */
1220 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1221 IbmVethMcastEnableRecv
|
1222 IbmVethMcastDisableFiltering
|
1223 IbmVethMcastClearFilterTable
,
1225 if (lpar_rc
!= H_SUCCESS
) {
1226 netdev_err(netdev
, "h_multicast_ctrl rc=%ld when "
1227 "attempting to clear filter table\n",
1230 /* add the addresses to the filter table */
1231 netdev_for_each_mc_addr(ha
, netdev
) {
1232 /* add the multicast address to the filter table */
1233 unsigned long mcast_addr
= 0;
1234 memcpy(((char *)&mcast_addr
)+2, ha
->addr
, 6);
1235 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1236 IbmVethMcastAddFilter
,
1238 if (lpar_rc
!= H_SUCCESS
) {
1239 netdev_err(netdev
, "h_multicast_ctrl rc=%ld "
1240 "when adding an entry to the filter "
1241 "table\n", lpar_rc
);
1245 /* re-enable filtering */
1246 lpar_rc
= h_multicast_ctrl(adapter
->vdev
->unit_address
,
1247 IbmVethMcastEnableFiltering
,
1249 if (lpar_rc
!= H_SUCCESS
) {
1250 netdev_err(netdev
, "h_multicast_ctrl rc=%ld when "
1251 "enabling filtering\n", lpar_rc
);
1256 static int ibmveth_change_mtu(struct net_device
*dev
, int new_mtu
)
1258 struct ibmveth_adapter
*adapter
= netdev_priv(dev
);
1259 struct vio_dev
*viodev
= adapter
->vdev
;
1260 int new_mtu_oh
= new_mtu
+ IBMVETH_BUFF_OH
;
1262 int need_restart
= 0;
1264 if (new_mtu
< IBMVETH_MIN_MTU
)
1267 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++)
1268 if (new_mtu_oh
< adapter
->rx_buff_pool
[i
].buff_size
)
1271 if (i
== IBMVETH_NUM_BUFF_POOLS
)
1274 /* Deactivate all the buffer pools so that the next loop can activate
1275 only the buffer pools necessary to hold the new MTU */
1276 if (netif_running(adapter
->netdev
)) {
1278 adapter
->pool_config
= 1;
1279 ibmveth_close(adapter
->netdev
);
1280 adapter
->pool_config
= 0;
1283 /* Look for an active buffer pool that can hold the new MTU */
1284 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++) {
1285 adapter
->rx_buff_pool
[i
].active
= 1;
1287 if (new_mtu_oh
< adapter
->rx_buff_pool
[i
].buff_size
) {
1289 vio_cmo_set_dev_desired(viodev
,
1290 ibmveth_get_desired_dma
1293 return ibmveth_open(adapter
->netdev
);
1299 if (need_restart
&& (rc
= ibmveth_open(adapter
->netdev
)))
1305 #ifdef CONFIG_NET_POLL_CONTROLLER
1306 static void ibmveth_poll_controller(struct net_device
*dev
)
1308 ibmveth_replenish_task(netdev_priv(dev
));
1309 ibmveth_interrupt(dev
->irq
, dev
);
1314 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1316 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1319 * Number of bytes of IO data the driver will need to perform well.
1321 static unsigned long ibmveth_get_desired_dma(struct vio_dev
*vdev
)
1323 struct net_device
*netdev
= dev_get_drvdata(&vdev
->dev
);
1324 struct ibmveth_adapter
*adapter
;
1329 /* netdev inits at probe time along with the structures we need below*/
1331 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT
);
1333 adapter
= netdev_priv(netdev
);
1335 ret
= IBMVETH_BUFF_LIST_SIZE
+ IBMVETH_FILT_LIST_SIZE
;
1336 ret
+= IOMMU_PAGE_ALIGN(netdev
->mtu
);
1338 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++) {
1339 /* add the size of the active receive buffers */
1340 if (adapter
->rx_buff_pool
[i
].active
)
1342 adapter
->rx_buff_pool
[i
].size
*
1343 IOMMU_PAGE_ALIGN(adapter
->rx_buff_pool
[i
].
1345 rxqentries
+= adapter
->rx_buff_pool
[i
].size
;
1347 /* add the size of the receive queue entries */
1348 ret
+= IOMMU_PAGE_ALIGN(rxqentries
* sizeof(struct ibmveth_rx_q_entry
));
1353 static const struct net_device_ops ibmveth_netdev_ops
= {
1354 .ndo_open
= ibmveth_open
,
1355 .ndo_stop
= ibmveth_close
,
1356 .ndo_start_xmit
= ibmveth_start_xmit
,
1357 .ndo_set_multicast_list
= ibmveth_set_multicast_list
,
1358 .ndo_do_ioctl
= ibmveth_ioctl
,
1359 .ndo_change_mtu
= ibmveth_change_mtu
,
1360 .ndo_validate_addr
= eth_validate_addr
,
1361 .ndo_set_mac_address
= eth_mac_addr
,
1362 #ifdef CONFIG_NET_POLL_CONTROLLER
1363 .ndo_poll_controller
= ibmveth_poll_controller
,
1367 static int __devinit
ibmveth_probe(struct vio_dev
*dev
,
1368 const struct vio_device_id
*id
)
1371 struct net_device
*netdev
;
1372 struct ibmveth_adapter
*adapter
;
1373 unsigned char *mac_addr_p
;
1374 unsigned int *mcastFilterSize_p
;
1376 dev_dbg(&dev
->dev
, "entering ibmveth_probe for UA 0x%x\n",
1379 mac_addr_p
= (unsigned char *)vio_get_attribute(dev
, VETH_MAC_ADDR
,
1382 dev_err(&dev
->dev
, "Can't find VETH_MAC_ADDR attribute\n");
1386 mcastFilterSize_p
= (unsigned int *)vio_get_attribute(dev
,
1387 VETH_MCAST_FILTER_SIZE
, NULL
);
1388 if (!mcastFilterSize_p
) {
1389 dev_err(&dev
->dev
, "Can't find VETH_MCAST_FILTER_SIZE "
1394 netdev
= alloc_etherdev(sizeof(struct ibmveth_adapter
));
1399 adapter
= netdev_priv(netdev
);
1400 dev_set_drvdata(&dev
->dev
, netdev
);
1402 adapter
->vdev
= dev
;
1403 adapter
->netdev
= netdev
;
1404 adapter
->mcastFilterSize
= *mcastFilterSize_p
;
1405 adapter
->pool_config
= 0;
1407 netif_napi_add(netdev
, &adapter
->napi
, ibmveth_poll
, 16);
1410 * Some older boxes running PHYP non-natively have an OF that returns
1411 * a 8-byte local-mac-address field (and the first 2 bytes have to be
1412 * ignored) while newer boxes' OF return a 6-byte field. Note that
1413 * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
1414 * The RPA doc specifies that the first byte must be 10b, so we'll
1415 * just look for it to solve this 8 vs. 6 byte field issue
1417 if ((*mac_addr_p
& 0x3) != 0x02)
1420 adapter
->mac_addr
= 0;
1421 memcpy(&adapter
->mac_addr
, mac_addr_p
, 6);
1423 netdev
->irq
= dev
->irq
;
1424 netdev
->netdev_ops
= &ibmveth_netdev_ops
;
1425 netdev
->ethtool_ops
= &netdev_ethtool_ops
;
1426 SET_NETDEV_DEV(netdev
, &dev
->dev
);
1427 netdev
->features
|= NETIF_F_SG
;
1429 memcpy(netdev
->dev_addr
, &adapter
->mac_addr
, netdev
->addr_len
);
1431 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++) {
1432 struct kobject
*kobj
= &adapter
->rx_buff_pool
[i
].kobj
;
1435 ibmveth_init_buffer_pool(&adapter
->rx_buff_pool
[i
], i
,
1436 pool_count
[i
], pool_size
[i
],
1438 error
= kobject_init_and_add(kobj
, &ktype_veth_pool
,
1439 &dev
->dev
.kobj
, "pool%d", i
);
1441 kobject_uevent(kobj
, KOBJ_ADD
);
1444 netdev_dbg(netdev
, "adapter @ 0x%p\n", adapter
);
1446 adapter
->buffer_list_dma
= DMA_ERROR_CODE
;
1447 adapter
->filter_list_dma
= DMA_ERROR_CODE
;
1448 adapter
->rx_queue
.queue_dma
= DMA_ERROR_CODE
;
1450 netdev_dbg(netdev
, "registering netdev...\n");
1452 ibmveth_set_csum_offload(netdev
, 1, ibmveth_set_tx_csum_flags
);
1454 rc
= register_netdev(netdev
);
1457 netdev_dbg(netdev
, "failed to register netdev rc=%d\n", rc
);
1458 free_netdev(netdev
);
1462 netdev_dbg(netdev
, "registered\n");
1467 static int __devexit
ibmveth_remove(struct vio_dev
*dev
)
1469 struct net_device
*netdev
= dev_get_drvdata(&dev
->dev
);
1470 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
1473 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++)
1474 kobject_put(&adapter
->rx_buff_pool
[i
].kobj
);
1476 unregister_netdev(netdev
);
1478 free_netdev(netdev
);
1479 dev_set_drvdata(&dev
->dev
, NULL
);
1484 static struct attribute veth_active_attr
;
1485 static struct attribute veth_num_attr
;
1486 static struct attribute veth_size_attr
;
1488 static ssize_t
veth_pool_show(struct kobject
*kobj
,
1489 struct attribute
*attr
, char *buf
)
1491 struct ibmveth_buff_pool
*pool
= container_of(kobj
,
1492 struct ibmveth_buff_pool
,
1495 if (attr
== &veth_active_attr
)
1496 return sprintf(buf
, "%d\n", pool
->active
);
1497 else if (attr
== &veth_num_attr
)
1498 return sprintf(buf
, "%d\n", pool
->size
);
1499 else if (attr
== &veth_size_attr
)
1500 return sprintf(buf
, "%d\n", pool
->buff_size
);
1504 static ssize_t
veth_pool_store(struct kobject
*kobj
, struct attribute
*attr
,
1505 const char *buf
, size_t count
)
1507 struct ibmveth_buff_pool
*pool
= container_of(kobj
,
1508 struct ibmveth_buff_pool
,
1510 struct net_device
*netdev
= dev_get_drvdata(
1511 container_of(kobj
->parent
, struct device
, kobj
));
1512 struct ibmveth_adapter
*adapter
= netdev_priv(netdev
);
1513 long value
= simple_strtol(buf
, NULL
, 10);
1516 if (attr
== &veth_active_attr
) {
1517 if (value
&& !pool
->active
) {
1518 if (netif_running(netdev
)) {
1519 if (ibmveth_alloc_buffer_pool(pool
)) {
1521 "unable to alloc pool\n");
1525 adapter
->pool_config
= 1;
1526 ibmveth_close(netdev
);
1527 adapter
->pool_config
= 0;
1528 if ((rc
= ibmveth_open(netdev
)))
1533 } else if (!value
&& pool
->active
) {
1534 int mtu
= netdev
->mtu
+ IBMVETH_BUFF_OH
;
1536 /* Make sure there is a buffer pool with buffers that
1537 can hold a packet of the size of the MTU */
1538 for (i
= 0; i
< IBMVETH_NUM_BUFF_POOLS
; i
++) {
1539 if (pool
== &adapter
->rx_buff_pool
[i
])
1541 if (!adapter
->rx_buff_pool
[i
].active
)
1543 if (mtu
<= adapter
->rx_buff_pool
[i
].buff_size
)
1547 if (i
== IBMVETH_NUM_BUFF_POOLS
) {
1548 netdev_err(netdev
, "no active pool >= MTU\n");
1552 if (netif_running(netdev
)) {
1553 adapter
->pool_config
= 1;
1554 ibmveth_close(netdev
);
1556 adapter
->pool_config
= 0;
1557 if ((rc
= ibmveth_open(netdev
)))
1562 } else if (attr
== &veth_num_attr
) {
1563 if (value
<= 0 || value
> IBMVETH_MAX_POOL_COUNT
) {
1566 if (netif_running(netdev
)) {
1567 adapter
->pool_config
= 1;
1568 ibmveth_close(netdev
);
1569 adapter
->pool_config
= 0;
1571 if ((rc
= ibmveth_open(netdev
)))
1577 } else if (attr
== &veth_size_attr
) {
1578 if (value
<= IBMVETH_BUFF_OH
|| value
> IBMVETH_MAX_BUF_SIZE
) {
1581 if (netif_running(netdev
)) {
1582 adapter
->pool_config
= 1;
1583 ibmveth_close(netdev
);
1584 adapter
->pool_config
= 0;
1585 pool
->buff_size
= value
;
1586 if ((rc
= ibmveth_open(netdev
)))
1589 pool
->buff_size
= value
;
1594 /* kick the interrupt handler to allocate/deallocate pools */
1595 ibmveth_interrupt(netdev
->irq
, netdev
);
1600 #define ATTR(_name, _mode) \
1601 struct attribute veth_##_name##_attr = { \
1602 .name = __stringify(_name), .mode = _mode, \
1605 static ATTR(active
, 0644);
1606 static ATTR(num
, 0644);
1607 static ATTR(size
, 0644);
1609 static struct attribute
*veth_pool_attrs
[] = {
1616 static const struct sysfs_ops veth_pool_ops
= {
1617 .show
= veth_pool_show
,
1618 .store
= veth_pool_store
,
1621 static struct kobj_type ktype_veth_pool
= {
1623 .sysfs_ops
= &veth_pool_ops
,
1624 .default_attrs
= veth_pool_attrs
,
1627 static int ibmveth_resume(struct device
*dev
)
1629 struct net_device
*netdev
= dev_get_drvdata(dev
);
1630 ibmveth_interrupt(netdev
->irq
, netdev
);
1634 static struct vio_device_id ibmveth_device_table
[] __devinitdata
= {
1635 { "network", "IBM,l-lan"},
1638 MODULE_DEVICE_TABLE(vio
, ibmveth_device_table
);
1640 static struct dev_pm_ops ibmveth_pm_ops
= {
1641 .resume
= ibmveth_resume
1644 static struct vio_driver ibmveth_driver
= {
1645 .id_table
= ibmveth_device_table
,
1646 .probe
= ibmveth_probe
,
1647 .remove
= ibmveth_remove
,
1648 .get_desired_dma
= ibmveth_get_desired_dma
,
1650 .name
= ibmveth_driver_name
,
1651 .owner
= THIS_MODULE
,
1652 .pm
= &ibmveth_pm_ops
,
1656 static int __init
ibmveth_module_init(void)
1658 printk(KERN_DEBUG
"%s: %s %s\n", ibmveth_driver_name
,
1659 ibmveth_driver_string
, ibmveth_driver_version
);
1661 return vio_register_driver(&ibmveth_driver
);
1664 static void __exit
ibmveth_module_exit(void)
1666 vio_unregister_driver(&ibmveth_driver
);
1669 module_init(ibmveth_module_init
);
1670 module_exit(ibmveth_module_exit
);