2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/nls.h>
25 #include <linux/platform_device.h>
26 #include <linux/netdevice.h>
27 #include <linux/interrupt.h>
33 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
34 #define DRV_NAME "fjes"
35 char fjes_driver_name
[] = DRV_NAME
;
36 char fjes_driver_version
[] = DRV_VERSION
;
37 static const char fjes_driver_string
[] =
38 "FUJITSU Extended Socket Network Device Driver";
39 static const char fjes_copyright
[] =
40 "Copyright (c) 2015 FUJITSU LIMITED";
42 MODULE_AUTHOR("Taku Izumi <izumi.taku@jp.fujitsu.com>");
43 MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(DRV_VERSION
);
47 static int fjes_request_irq(struct fjes_adapter
*);
48 static void fjes_free_irq(struct fjes_adapter
*);
50 static int fjes_open(struct net_device
*);
51 static int fjes_close(struct net_device
*);
52 static int fjes_setup_resources(struct fjes_adapter
*);
53 static void fjes_free_resources(struct fjes_adapter
*);
54 static netdev_tx_t
fjes_xmit_frame(struct sk_buff
*, struct net_device
*);
55 static void fjes_raise_intr_rxdata_task(struct work_struct
*);
56 static void fjes_tx_stall_task(struct work_struct
*);
57 static void fjes_force_close_task(struct work_struct
*);
58 static irqreturn_t
fjes_intr(int, void*);
59 static struct rtnl_link_stats64
*
60 fjes_get_stats64(struct net_device
*, struct rtnl_link_stats64
*);
61 static int fjes_change_mtu(struct net_device
*, int);
62 static int fjes_vlan_rx_add_vid(struct net_device
*, __be16 proto
, u16
);
63 static int fjes_vlan_rx_kill_vid(struct net_device
*, __be16 proto
, u16
);
64 static void fjes_tx_retry(struct net_device
*);
66 static int fjes_acpi_add(struct acpi_device
*);
67 static int fjes_acpi_remove(struct acpi_device
*);
68 static acpi_status
fjes_get_acpi_resource(struct acpi_resource
*, void*);
70 static int fjes_probe(struct platform_device
*);
71 static int fjes_remove(struct platform_device
*);
73 static int fjes_sw_init(struct fjes_adapter
*);
74 static void fjes_netdev_setup(struct net_device
*);
75 static void fjes_irq_watch_task(struct work_struct
*);
76 static void fjes_watch_unshare_task(struct work_struct
*);
77 static void fjes_rx_irq(struct fjes_adapter
*, int);
78 static int fjes_poll(struct napi_struct
*, int);
80 static const struct acpi_device_id fjes_acpi_ids
[] = {
84 MODULE_DEVICE_TABLE(acpi
, fjes_acpi_ids
);
86 static struct acpi_driver fjes_acpi_driver
= {
93 .remove
= fjes_acpi_remove
,
97 static struct platform_driver fjes_driver
= {
100 .owner
= THIS_MODULE
,
103 .remove
= fjes_remove
,
106 static struct resource fjes_resource
[] = {
108 .flags
= IORESOURCE_MEM
,
113 .flags
= IORESOURCE_IRQ
,
119 static int fjes_acpi_add(struct acpi_device
*device
)
121 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
122 char str_buf
[sizeof(FJES_ACPI_SYMBOL
) + 1];
123 struct platform_device
*plat_dev
;
124 union acpi_object
*str
;
128 status
= acpi_evaluate_object(device
->handle
, "_STR", NULL
, &buffer
);
129 if (ACPI_FAILURE(status
))
132 str
= buffer
.pointer
;
133 result
= utf16s_to_utf8s((wchar_t *)str
->string
.pointer
,
134 str
->string
.length
, UTF16_LITTLE_ENDIAN
,
135 str_buf
, sizeof(str_buf
) - 1);
138 if (strncmp(FJES_ACPI_SYMBOL
, str_buf
, strlen(FJES_ACPI_SYMBOL
)) != 0) {
139 kfree(buffer
.pointer
);
142 kfree(buffer
.pointer
);
144 status
= acpi_walk_resources(device
->handle
, METHOD_NAME__CRS
,
145 fjes_get_acpi_resource
, fjes_resource
);
146 if (ACPI_FAILURE(status
))
149 /* create platform_device */
150 plat_dev
= platform_device_register_simple(DRV_NAME
, 0, fjes_resource
,
151 ARRAY_SIZE(fjes_resource
));
152 device
->driver_data
= plat_dev
;
157 static int fjes_acpi_remove(struct acpi_device
*device
)
159 struct platform_device
*plat_dev
;
161 plat_dev
= (struct platform_device
*)acpi_driver_data(device
);
162 platform_device_unregister(plat_dev
);
168 fjes_get_acpi_resource(struct acpi_resource
*acpi_res
, void *data
)
170 struct acpi_resource_address32
*addr
;
171 struct acpi_resource_irq
*irq
;
172 struct resource
*res
= data
;
174 switch (acpi_res
->type
) {
175 case ACPI_RESOURCE_TYPE_ADDRESS32
:
176 addr
= &acpi_res
->data
.address32
;
177 res
[0].start
= addr
->address
.minimum
;
178 res
[0].end
= addr
->address
.minimum
+
179 addr
->address
.address_length
- 1;
182 case ACPI_RESOURCE_TYPE_IRQ
:
183 irq
= &acpi_res
->data
.irq
;
184 if (irq
->interrupt_count
!= 1)
186 res
[1].start
= irq
->interrupts
[0];
187 res
[1].end
= irq
->interrupts
[0];
197 static int fjes_request_irq(struct fjes_adapter
*adapter
)
199 struct net_device
*netdev
= adapter
->netdev
;
202 adapter
->interrupt_watch_enable
= true;
203 if (!delayed_work_pending(&adapter
->interrupt_watch_task
)) {
204 queue_delayed_work(adapter
->control_wq
,
205 &adapter
->interrupt_watch_task
,
206 FJES_IRQ_WATCH_DELAY
);
209 if (!adapter
->irq_registered
) {
210 result
= request_irq(adapter
->hw
.hw_res
.irq
, fjes_intr
,
211 IRQF_SHARED
, netdev
->name
, adapter
);
213 adapter
->irq_registered
= false;
215 adapter
->irq_registered
= true;
221 static void fjes_free_irq(struct fjes_adapter
*adapter
)
223 struct fjes_hw
*hw
= &adapter
->hw
;
225 adapter
->interrupt_watch_enable
= false;
226 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
228 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, true);
230 if (adapter
->irq_registered
) {
231 free_irq(adapter
->hw
.hw_res
.irq
, adapter
);
232 adapter
->irq_registered
= false;
236 static const struct net_device_ops fjes_netdev_ops
= {
237 .ndo_open
= fjes_open
,
238 .ndo_stop
= fjes_close
,
239 .ndo_start_xmit
= fjes_xmit_frame
,
240 .ndo_get_stats64
= fjes_get_stats64
,
241 .ndo_change_mtu
= fjes_change_mtu
,
242 .ndo_tx_timeout
= fjes_tx_retry
,
243 .ndo_vlan_rx_add_vid
= fjes_vlan_rx_add_vid
,
244 .ndo_vlan_rx_kill_vid
= fjes_vlan_rx_kill_vid
,
247 /* fjes_open - Called when a network interface is made active */
248 static int fjes_open(struct net_device
*netdev
)
250 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
251 struct fjes_hw
*hw
= &adapter
->hw
;
254 if (adapter
->open_guard
)
257 result
= fjes_setup_resources(adapter
);
261 hw
->txrx_stop_req_bit
= 0;
262 hw
->epstop_req_bit
= 0;
264 napi_enable(&adapter
->napi
);
266 fjes_hw_capture_interrupt_status(hw
);
268 result
= fjes_request_irq(adapter
);
272 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, false);
274 netif_tx_start_all_queues(netdev
);
275 netif_carrier_on(netdev
);
280 fjes_free_irq(adapter
);
281 napi_disable(&adapter
->napi
);
284 fjes_free_resources(adapter
);
288 /* fjes_close - Disables a network interface */
289 static int fjes_close(struct net_device
*netdev
)
291 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
292 struct fjes_hw
*hw
= &adapter
->hw
;
296 netif_tx_stop_all_queues(netdev
);
297 netif_carrier_off(netdev
);
299 fjes_hw_raise_epstop(hw
);
301 napi_disable(&adapter
->napi
);
303 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
304 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
305 if (epidx
== hw
->my_epid
)
308 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
310 adapter
->hw
.ep_shm_info
[epidx
]
311 .tx
.info
->v1i
.rx_status
&=
314 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
316 fjes_free_irq(adapter
);
318 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
319 cancel_work_sync(&adapter
->unshare_watch_task
);
320 adapter
->unshare_watch_bitmask
= 0;
321 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
322 cancel_work_sync(&adapter
->tx_stall_task
);
324 cancel_work_sync(&hw
->update_zone_task
);
325 cancel_work_sync(&hw
->epstop_task
);
327 fjes_hw_wait_epstop(hw
);
329 fjes_free_resources(adapter
);
334 static int fjes_setup_resources(struct fjes_adapter
*adapter
)
336 struct net_device
*netdev
= adapter
->netdev
;
337 struct ep_share_mem_info
*buf_pair
;
338 struct fjes_hw
*hw
= &adapter
->hw
;
343 mutex_lock(&hw
->hw_info
.lock
);
344 result
= fjes_hw_request_info(hw
);
347 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
348 hw
->ep_shm_info
[epidx
].es_status
=
349 hw
->hw_info
.res_buf
->info
.info
[epidx
].es_status
;
350 hw
->ep_shm_info
[epidx
].zone
=
351 hw
->hw_info
.res_buf
->info
.info
[epidx
].zone
;
357 adapter
->force_reset
= true;
359 mutex_unlock(&hw
->hw_info
.lock
);
362 mutex_unlock(&hw
->hw_info
.lock
);
364 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
365 if ((epidx
!= hw
->my_epid
) &&
366 (hw
->ep_shm_info
[epidx
].es_status
==
367 FJES_ZONING_STATUS_ENABLE
)) {
368 fjes_hw_raise_interrupt(hw
, epidx
,
369 REG_ICTL_MASK_INFO_UPDATE
);
373 msleep(FJES_OPEN_ZONE_UPDATE_WAIT
* hw
->max_epid
);
375 for (epidx
= 0; epidx
< (hw
->max_epid
); epidx
++) {
376 if (epidx
== hw
->my_epid
)
379 buf_pair
= &hw
->ep_shm_info
[epidx
];
381 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
382 fjes_hw_setup_epbuf(&buf_pair
->tx
, netdev
->dev_addr
,
384 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
386 if (fjes_hw_epid_is_same_zone(hw
, epidx
)) {
387 mutex_lock(&hw
->hw_info
.lock
);
389 fjes_hw_register_buff_addr(hw
, epidx
, buf_pair
);
390 mutex_unlock(&hw
->hw_info
.lock
);
398 adapter
->force_reset
= true;
407 static void fjes_free_resources(struct fjes_adapter
*adapter
)
409 struct net_device
*netdev
= adapter
->netdev
;
410 struct fjes_device_command_param param
;
411 struct ep_share_mem_info
*buf_pair
;
412 struct fjes_hw
*hw
= &adapter
->hw
;
413 bool reset_flag
= false;
418 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
419 if (epidx
== hw
->my_epid
)
422 mutex_lock(&hw
->hw_info
.lock
);
423 result
= fjes_hw_unregister_buff_addr(hw
, epidx
);
424 mutex_unlock(&hw
->hw_info
.lock
);
429 buf_pair
= &hw
->ep_shm_info
[epidx
];
431 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
432 fjes_hw_setup_epbuf(&buf_pair
->tx
,
433 netdev
->dev_addr
, netdev
->mtu
);
434 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
436 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
439 if (reset_flag
|| adapter
->force_reset
) {
440 result
= fjes_hw_reset(hw
);
442 adapter
->force_reset
= false;
445 adapter
->open_guard
= true;
447 hw
->hw_info
.buffer_share_bit
= 0;
449 memset((void *)¶m
, 0, sizeof(param
));
451 param
.req_len
= hw
->hw_info
.req_buf_size
;
452 param
.req_start
= __pa(hw
->hw_info
.req_buf
);
453 param
.res_len
= hw
->hw_info
.res_buf_size
;
454 param
.res_start
= __pa(hw
->hw_info
.res_buf
);
455 param
.share_start
= __pa(hw
->hw_info
.share
->ep_status
);
457 fjes_hw_init_command_registers(hw
, ¶m
);
461 static void fjes_tx_stall_task(struct work_struct
*work
)
463 struct fjes_adapter
*adapter
= container_of(work
,
464 struct fjes_adapter
, tx_stall_task
);
465 struct net_device
*netdev
= adapter
->netdev
;
466 struct fjes_hw
*hw
= &adapter
->hw
;
467 int all_queue_available
, sendable
;
468 enum ep_partner_status pstatus
;
469 int max_epid
, my_epid
, epid
;
470 union ep_buffer_info
*info
;
474 dev_trans_start(netdev
)) > FJES_TX_TX_STALL_TIMEOUT
) {
475 netif_wake_queue(netdev
);
479 my_epid
= hw
->my_epid
;
480 max_epid
= hw
->max_epid
;
482 for (i
= 0; i
< 5; i
++) {
483 all_queue_available
= 1;
485 for (epid
= 0; epid
< max_epid
; epid
++) {
489 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
490 sendable
= (pstatus
== EP_PARTNER_SHARED
);
494 info
= adapter
->hw
.ep_shm_info
[epid
].tx
.info
;
496 if (!(info
->v1i
.rx_status
& FJES_RX_MTU_CHANGING_DONE
))
499 if (EP_RING_FULL(info
->v1i
.head
, info
->v1i
.tail
,
500 info
->v1i
.count_max
)) {
501 all_queue_available
= 0;
506 if (all_queue_available
) {
507 netif_wake_queue(netdev
);
512 usleep_range(50, 100);
514 queue_work(adapter
->txrx_wq
, &adapter
->tx_stall_task
);
517 static void fjes_force_close_task(struct work_struct
*work
)
519 struct fjes_adapter
*adapter
= container_of(work
,
520 struct fjes_adapter
, force_close_task
);
521 struct net_device
*netdev
= adapter
->netdev
;
528 static void fjes_raise_intr_rxdata_task(struct work_struct
*work
)
530 struct fjes_adapter
*adapter
= container_of(work
,
531 struct fjes_adapter
, raise_intr_rxdata_task
);
532 struct fjes_hw
*hw
= &adapter
->hw
;
533 enum ep_partner_status pstatus
;
534 int max_epid
, my_epid
, epid
;
536 my_epid
= hw
->my_epid
;
537 max_epid
= hw
->max_epid
;
539 for (epid
= 0; epid
< max_epid
; epid
++)
540 hw
->ep_shm_info
[epid
].tx_status_work
= 0;
542 for (epid
= 0; epid
< max_epid
; epid
++) {
546 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
547 if (pstatus
== EP_PARTNER_SHARED
) {
548 hw
->ep_shm_info
[epid
].tx_status_work
=
549 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
;
551 if (hw
->ep_shm_info
[epid
].tx_status_work
==
552 FJES_TX_DELAY_SEND_PENDING
) {
553 hw
->ep_shm_info
[epid
].tx
.info
->v1i
.tx_status
=
554 FJES_TX_DELAY_SEND_NONE
;
559 for (epid
= 0; epid
< max_epid
; epid
++) {
563 pstatus
= fjes_hw_get_partner_ep_status(hw
, epid
);
564 if ((hw
->ep_shm_info
[epid
].tx_status_work
==
565 FJES_TX_DELAY_SEND_PENDING
) &&
566 (pstatus
== EP_PARTNER_SHARED
) &&
567 !(hw
->ep_shm_info
[epid
].rx
.info
->v1i
.rx_status
&
568 FJES_RX_POLL_WORK
)) {
569 fjes_hw_raise_interrupt(hw
, epid
,
570 REG_ICTL_MASK_RX_DATA
);
574 usleep_range(500, 1000);
577 static int fjes_tx_send(struct fjes_adapter
*adapter
, int dest
,
578 void *data
, size_t len
)
582 retval
= fjes_hw_epbuf_tx_pkt_send(&adapter
->hw
.ep_shm_info
[dest
].tx
,
587 adapter
->hw
.ep_shm_info
[dest
].tx
.info
->v1i
.tx_status
=
588 FJES_TX_DELAY_SEND_PENDING
;
589 if (!work_pending(&adapter
->raise_intr_rxdata_task
))
590 queue_work(adapter
->txrx_wq
,
591 &adapter
->raise_intr_rxdata_task
);
598 fjes_xmit_frame(struct sk_buff
*skb
, struct net_device
*netdev
)
600 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
601 struct fjes_hw
*hw
= &adapter
->hw
;
603 int max_epid
, my_epid
, dest_epid
;
604 enum ep_partner_status pstatus
;
605 struct netdev_queue
*cur_queue
;
606 char shortpkt
[VLAN_ETH_HLEN
];
617 cur_queue
= netdev_get_tx_queue(netdev
, queue_no
);
619 eth
= (struct ethhdr
*)skb
->data
;
620 my_epid
= hw
->my_epid
;
622 vlan
= (vlan_get_tag(skb
, &vlan_id
) == 0) ? true : false;
627 if (is_multicast_ether_addr(eth
->h_dest
)) {
629 max_epid
= hw
->max_epid
;
631 } else if (is_local_ether_addr(eth
->h_dest
)) {
632 dest_epid
= eth
->h_dest
[ETH_ALEN
- 1];
633 max_epid
= dest_epid
+ 1;
635 if ((eth
->h_dest
[0] == 0x02) &&
636 (0x00 == (eth
->h_dest
[1] | eth
->h_dest
[2] |
637 eth
->h_dest
[3] | eth
->h_dest
[4])) &&
638 (dest_epid
< hw
->max_epid
)) {
645 adapter
->stats64
.tx_packets
+= 1;
646 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
647 adapter
->stats64
.tx_bytes
+= len
;
648 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
655 adapter
->stats64
.tx_packets
+= 1;
656 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
657 adapter
->stats64
.tx_bytes
+= len
;
658 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
661 for (; dest_epid
< max_epid
; dest_epid
++) {
662 if (my_epid
== dest_epid
)
665 pstatus
= fjes_hw_get_partner_ep_status(hw
, dest_epid
);
666 if (pstatus
!= EP_PARTNER_SHARED
) {
668 } else if (!fjes_hw_check_epbuf_version(
669 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
, 0)) {
670 /* version is NOT 0 */
671 adapter
->stats64
.tx_carrier_errors
+= 1;
672 hw
->ep_shm_info
[dest_epid
].net_stats
673 .tx_carrier_errors
+= 1;
676 } else if (!fjes_hw_check_mtu(
677 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
679 adapter
->stats64
.tx_dropped
+= 1;
680 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_dropped
+= 1;
681 adapter
->stats64
.tx_errors
+= 1;
682 hw
->ep_shm_info
[dest_epid
].net_stats
.tx_errors
+= 1;
686 !fjes_hw_check_vlan_id(
687 &adapter
->hw
.ep_shm_info
[dest_epid
].rx
,
691 if (len
< VLAN_ETH_HLEN
) {
692 memset(shortpkt
, 0, VLAN_ETH_HLEN
);
693 memcpy(shortpkt
, skb
->data
, skb
->len
);
698 if (adapter
->tx_retry_count
== 0) {
699 adapter
->tx_start_jiffies
= jiffies
;
700 adapter
->tx_retry_count
= 1;
702 adapter
->tx_retry_count
++;
705 if (fjes_tx_send(adapter
, dest_epid
, data
, len
)) {
710 (long)adapter
->tx_start_jiffies
) >=
711 FJES_TX_RETRY_TIMEOUT
) {
712 adapter
->stats64
.tx_fifo_errors
+= 1;
713 hw
->ep_shm_info
[dest_epid
].net_stats
714 .tx_fifo_errors
+= 1;
715 adapter
->stats64
.tx_errors
+= 1;
716 hw
->ep_shm_info
[dest_epid
].net_stats
721 netif_trans_update(netdev
);
722 netif_tx_stop_queue(cur_queue
);
724 if (!work_pending(&adapter
->tx_stall_task
))
725 queue_work(adapter
->txrx_wq
,
726 &adapter
->tx_stall_task
);
728 ret
= NETDEV_TX_BUSY
;
732 adapter
->stats64
.tx_packets
+= 1;
733 hw
->ep_shm_info
[dest_epid
].net_stats
735 adapter
->stats64
.tx_bytes
+= len
;
736 hw
->ep_shm_info
[dest_epid
].net_stats
740 adapter
->tx_retry_count
= 0;
746 if (ret
== NETDEV_TX_OK
) {
749 adapter
->stats64
.tx_packets
+= 1;
750 hw
->ep_shm_info
[my_epid
].net_stats
.tx_packets
+= 1;
751 adapter
->stats64
.tx_bytes
+= 1;
752 hw
->ep_shm_info
[my_epid
].net_stats
.tx_bytes
+= len
;
759 static void fjes_tx_retry(struct net_device
*netdev
)
761 struct netdev_queue
*queue
= netdev_get_tx_queue(netdev
, 0);
763 netif_tx_wake_queue(queue
);
766 static struct rtnl_link_stats64
*
767 fjes_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
769 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
771 memcpy(stats
, &adapter
->stats64
, sizeof(struct rtnl_link_stats64
));
776 static int fjes_change_mtu(struct net_device
*netdev
, int new_mtu
)
778 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
779 bool running
= netif_running(netdev
);
780 struct fjes_hw
*hw
= &adapter
->hw
;
785 for (idx
= 0; fjes_support_mtu
[idx
] != 0; idx
++) {
786 if (new_mtu
<= fjes_support_mtu
[idx
]) {
787 new_mtu
= fjes_support_mtu
[idx
];
788 if (new_mtu
== netdev
->mtu
)
800 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
801 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
802 if (epidx
== hw
->my_epid
)
804 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
805 ~FJES_RX_MTU_CHANGING_DONE
;
807 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
809 netif_tx_stop_all_queues(netdev
);
810 netif_carrier_off(netdev
);
811 cancel_work_sync(&adapter
->tx_stall_task
);
812 napi_disable(&adapter
->napi
);
816 netif_tx_stop_all_queues(netdev
);
819 netdev
->mtu
= new_mtu
;
822 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
823 if (epidx
== hw
->my_epid
)
826 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
827 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
831 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
|=
832 FJES_RX_MTU_CHANGING_DONE
;
833 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
836 netif_tx_wake_all_queues(netdev
);
837 netif_carrier_on(netdev
);
838 napi_enable(&adapter
->napi
);
839 napi_schedule(&adapter
->napi
);
845 static int fjes_vlan_rx_add_vid(struct net_device
*netdev
,
846 __be16 proto
, u16 vid
)
848 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
852 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
853 if (epid
== adapter
->hw
.my_epid
)
856 if (!fjes_hw_check_vlan_id(
857 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
))
858 ret
= fjes_hw_set_vlan_id(
859 &adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
862 return ret
? 0 : -ENOSPC
;
865 static int fjes_vlan_rx_kill_vid(struct net_device
*netdev
,
866 __be16 proto
, u16 vid
)
868 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
871 for (epid
= 0; epid
< adapter
->hw
.max_epid
; epid
++) {
872 if (epid
== adapter
->hw
.my_epid
)
875 fjes_hw_del_vlan_id(&adapter
->hw
.ep_shm_info
[epid
].tx
, vid
);
881 static void fjes_txrx_stop_req_irq(struct fjes_adapter
*adapter
,
884 struct fjes_hw
*hw
= &adapter
->hw
;
885 enum ep_partner_status status
;
888 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
890 case EP_PARTNER_UNSHARE
:
891 case EP_PARTNER_COMPLETE
:
894 case EP_PARTNER_WAITING
:
895 if (src_epid
< hw
->my_epid
) {
896 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
897 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
898 FJES_RX_STOP_REQ_DONE
;
899 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
901 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
902 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
904 if (!work_pending(&adapter
->unshare_watch_task
))
905 queue_work(adapter
->control_wq
,
906 &adapter
->unshare_watch_task
);
909 case EP_PARTNER_SHARED
:
910 if (hw
->ep_shm_info
[src_epid
].rx
.info
->v1i
.rx_status
&
911 FJES_RX_STOP_REQ_REQUEST
) {
912 set_bit(src_epid
, &hw
->epstop_req_bit
);
913 if (!work_pending(&hw
->epstop_task
))
914 queue_work(adapter
->control_wq
,
921 static void fjes_stop_req_irq(struct fjes_adapter
*adapter
, int src_epid
)
923 struct fjes_hw
*hw
= &adapter
->hw
;
924 enum ep_partner_status status
;
927 set_bit(src_epid
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
929 status
= fjes_hw_get_partner_ep_status(hw
, src_epid
);
931 case EP_PARTNER_WAITING
:
932 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
933 hw
->ep_shm_info
[src_epid
].tx
.info
->v1i
.rx_status
|=
934 FJES_RX_STOP_REQ_DONE
;
935 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
936 clear_bit(src_epid
, &hw
->txrx_stop_req_bit
);
938 case EP_PARTNER_UNSHARE
:
939 case EP_PARTNER_COMPLETE
:
941 set_bit(src_epid
, &adapter
->unshare_watch_bitmask
);
942 if (!work_pending(&adapter
->unshare_watch_task
))
943 queue_work(adapter
->control_wq
,
944 &adapter
->unshare_watch_task
);
946 case EP_PARTNER_SHARED
:
947 set_bit(src_epid
, &hw
->epstop_req_bit
);
949 if (!work_pending(&hw
->epstop_task
))
950 queue_work(adapter
->control_wq
, &hw
->epstop_task
);
955 static void fjes_update_zone_irq(struct fjes_adapter
*adapter
,
958 struct fjes_hw
*hw
= &adapter
->hw
;
960 if (!work_pending(&hw
->update_zone_task
))
961 queue_work(adapter
->control_wq
, &hw
->update_zone_task
);
964 static irqreturn_t
fjes_intr(int irq
, void *data
)
966 struct fjes_adapter
*adapter
= data
;
967 struct fjes_hw
*hw
= &adapter
->hw
;
971 icr
= fjes_hw_capture_interrupt_status(hw
);
973 if (icr
& REG_IS_MASK_IS_ASSERT
) {
974 if (icr
& REG_ICTL_MASK_RX_DATA
)
975 fjes_rx_irq(adapter
, icr
& REG_IS_MASK_EPID
);
977 if (icr
& REG_ICTL_MASK_DEV_STOP_REQ
)
978 fjes_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
980 if (icr
& REG_ICTL_MASK_TXRX_STOP_REQ
)
981 fjes_txrx_stop_req_irq(adapter
, icr
& REG_IS_MASK_EPID
);
983 if (icr
& REG_ICTL_MASK_TXRX_STOP_DONE
)
984 fjes_hw_set_irqmask(hw
,
985 REG_ICTL_MASK_TXRX_STOP_DONE
, true);
987 if (icr
& REG_ICTL_MASK_INFO_UPDATE
)
988 fjes_update_zone_irq(adapter
, icr
& REG_IS_MASK_EPID
);
998 static int fjes_rxframe_search_exist(struct fjes_adapter
*adapter
,
1001 struct fjes_hw
*hw
= &adapter
->hw
;
1002 enum ep_partner_status pstatus
;
1003 int max_epid
, cur_epid
;
1006 max_epid
= hw
->max_epid
;
1007 start_epid
= (start_epid
+ 1 + max_epid
) % max_epid
;
1009 for (i
= 0; i
< max_epid
; i
++) {
1010 cur_epid
= (start_epid
+ i
) % max_epid
;
1011 if (cur_epid
== hw
->my_epid
)
1014 pstatus
= fjes_hw_get_partner_ep_status(hw
, cur_epid
);
1015 if (pstatus
== EP_PARTNER_SHARED
) {
1016 if (!fjes_hw_epbuf_rx_is_empty(
1017 &hw
->ep_shm_info
[cur_epid
].rx
))
1024 static void *fjes_rxframe_get(struct fjes_adapter
*adapter
, size_t *psize
,
1029 *cur_epid
= fjes_rxframe_search_exist(adapter
, *cur_epid
);
1034 fjes_hw_epbuf_rx_curpkt_get_addr(
1035 &adapter
->hw
.ep_shm_info
[*cur_epid
].rx
, psize
);
1040 static void fjes_rxframe_release(struct fjes_adapter
*adapter
, int cur_epid
)
1042 fjes_hw_epbuf_rx_curpkt_drop(&adapter
->hw
.ep_shm_info
[cur_epid
].rx
);
1045 static void fjes_rx_irq(struct fjes_adapter
*adapter
, int src_epid
)
1047 struct fjes_hw
*hw
= &adapter
->hw
;
1049 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, true);
1051 adapter
->unset_rx_last
= true;
1052 napi_schedule(&adapter
->napi
);
1055 static int fjes_poll(struct napi_struct
*napi
, int budget
)
1057 struct fjes_adapter
*adapter
=
1058 container_of(napi
, struct fjes_adapter
, napi
);
1059 struct net_device
*netdev
= napi
->dev
;
1060 struct fjes_hw
*hw
= &adapter
->hw
;
1061 struct sk_buff
*skb
;
1068 spin_lock(&hw
->rx_status_lock
);
1069 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1070 if (epidx
== hw
->my_epid
)
1073 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1075 adapter
->hw
.ep_shm_info
[epidx
]
1076 .tx
.info
->v1i
.rx_status
|= FJES_RX_POLL_WORK
;
1078 spin_unlock(&hw
->rx_status_lock
);
1080 while (work_done
< budget
) {
1081 prefetch(&adapter
->hw
);
1082 frame
= fjes_rxframe_get(adapter
, &frame_len
, &cur_epid
);
1085 skb
= napi_alloc_skb(napi
, frame_len
);
1087 adapter
->stats64
.rx_dropped
+= 1;
1088 hw
->ep_shm_info
[cur_epid
].net_stats
1090 adapter
->stats64
.rx_errors
+= 1;
1091 hw
->ep_shm_info
[cur_epid
].net_stats
1094 memcpy(skb_put(skb
, frame_len
),
1096 skb
->protocol
= eth_type_trans(skb
, netdev
);
1097 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1099 netif_receive_skb(skb
);
1103 adapter
->stats64
.rx_packets
+= 1;
1104 hw
->ep_shm_info
[cur_epid
].net_stats
1106 adapter
->stats64
.rx_bytes
+= frame_len
;
1107 hw
->ep_shm_info
[cur_epid
].net_stats
1108 .rx_bytes
+= frame_len
;
1110 if (is_multicast_ether_addr(
1111 ((struct ethhdr
*)frame
)->h_dest
)) {
1112 adapter
->stats64
.multicast
+= 1;
1113 hw
->ep_shm_info
[cur_epid
].net_stats
1118 fjes_rxframe_release(adapter
, cur_epid
);
1119 adapter
->unset_rx_last
= true;
1125 if (work_done
< budget
) {
1126 napi_complete(napi
);
1128 if (adapter
->unset_rx_last
) {
1129 adapter
->rx_last_jiffies
= jiffies
;
1130 adapter
->unset_rx_last
= false;
1133 if (((long)jiffies
- (long)adapter
->rx_last_jiffies
) < 3) {
1134 napi_reschedule(napi
);
1136 spin_lock(&hw
->rx_status_lock
);
1137 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1138 if (epidx
== hw
->my_epid
)
1140 if (fjes_hw_get_partner_ep_status(hw
, epidx
) ==
1142 adapter
->hw
.ep_shm_info
[epidx
].tx
1143 .info
->v1i
.rx_status
&=
1146 spin_unlock(&hw
->rx_status_lock
);
1148 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_RX_DATA
, false);
1155 /* fjes_probe - Device Initialization Routine */
1156 static int fjes_probe(struct platform_device
*plat_dev
)
1158 struct fjes_adapter
*adapter
;
1159 struct net_device
*netdev
;
1160 struct resource
*res
;
1165 netdev
= alloc_netdev_mq(sizeof(struct fjes_adapter
), "es%d",
1166 NET_NAME_UNKNOWN
, fjes_netdev_setup
,
1172 SET_NETDEV_DEV(netdev
, &plat_dev
->dev
);
1174 dev_set_drvdata(&plat_dev
->dev
, netdev
);
1175 adapter
= netdev_priv(netdev
);
1176 adapter
->netdev
= netdev
;
1177 adapter
->plat_dev
= plat_dev
;
1181 /* setup the private structure */
1182 err
= fjes_sw_init(adapter
);
1184 goto err_free_netdev
;
1186 INIT_WORK(&adapter
->force_close_task
, fjes_force_close_task
);
1187 adapter
->force_reset
= false;
1188 adapter
->open_guard
= false;
1190 adapter
->txrx_wq
= alloc_workqueue(DRV_NAME
"/txrx", WQ_MEM_RECLAIM
, 0);
1191 adapter
->control_wq
= alloc_workqueue(DRV_NAME
"/control",
1194 INIT_WORK(&adapter
->tx_stall_task
, fjes_tx_stall_task
);
1195 INIT_WORK(&adapter
->raise_intr_rxdata_task
,
1196 fjes_raise_intr_rxdata_task
);
1197 INIT_WORK(&adapter
->unshare_watch_task
, fjes_watch_unshare_task
);
1198 adapter
->unshare_watch_bitmask
= 0;
1200 INIT_DELAYED_WORK(&adapter
->interrupt_watch_task
, fjes_irq_watch_task
);
1201 adapter
->interrupt_watch_enable
= false;
1203 res
= platform_get_resource(plat_dev
, IORESOURCE_MEM
, 0);
1204 hw
->hw_res
.start
= res
->start
;
1205 hw
->hw_res
.size
= resource_size(res
);
1206 hw
->hw_res
.irq
= platform_get_irq(plat_dev
, 0);
1207 err
= fjes_hw_init(&adapter
->hw
);
1209 goto err_free_netdev
;
1211 /* setup MAC address (02:00:00:00:00:[epid])*/
1212 netdev
->dev_addr
[0] = 2;
1213 netdev
->dev_addr
[1] = 0;
1214 netdev
->dev_addr
[2] = 0;
1215 netdev
->dev_addr
[3] = 0;
1216 netdev
->dev_addr
[4] = 0;
1217 netdev
->dev_addr
[5] = hw
->my_epid
; /* EPID */
1219 err
= register_netdev(netdev
);
1223 netif_carrier_off(netdev
);
1228 fjes_hw_exit(&adapter
->hw
);
1230 free_netdev(netdev
);
1235 /* fjes_remove - Device Removal Routine */
1236 static int fjes_remove(struct platform_device
*plat_dev
)
1238 struct net_device
*netdev
= dev_get_drvdata(&plat_dev
->dev
);
1239 struct fjes_adapter
*adapter
= netdev_priv(netdev
);
1240 struct fjes_hw
*hw
= &adapter
->hw
;
1242 cancel_delayed_work_sync(&adapter
->interrupt_watch_task
);
1243 cancel_work_sync(&adapter
->unshare_watch_task
);
1244 cancel_work_sync(&adapter
->raise_intr_rxdata_task
);
1245 cancel_work_sync(&adapter
->tx_stall_task
);
1246 if (adapter
->control_wq
)
1247 destroy_workqueue(adapter
->control_wq
);
1248 if (adapter
->txrx_wq
)
1249 destroy_workqueue(adapter
->txrx_wq
);
1251 unregister_netdev(netdev
);
1255 netif_napi_del(&adapter
->napi
);
1257 free_netdev(netdev
);
1262 static int fjes_sw_init(struct fjes_adapter
*adapter
)
1264 struct net_device
*netdev
= adapter
->netdev
;
1266 netif_napi_add(netdev
, &adapter
->napi
, fjes_poll
, 64);
1271 /* fjes_netdev_setup - netdevice initialization routine */
1272 static void fjes_netdev_setup(struct net_device
*netdev
)
1274 ether_setup(netdev
);
1276 netdev
->watchdog_timeo
= FJES_TX_RETRY_INTERVAL
;
1277 netdev
->netdev_ops
= &fjes_netdev_ops
;
1278 fjes_set_ethtool_ops(netdev
);
1279 netdev
->mtu
= fjes_support_mtu
[3];
1280 netdev
->flags
|= IFF_BROADCAST
;
1281 netdev
->features
|= NETIF_F_HW_CSUM
| NETIF_F_HW_VLAN_CTAG_FILTER
;
1284 static void fjes_irq_watch_task(struct work_struct
*work
)
1286 struct fjes_adapter
*adapter
= container_of(to_delayed_work(work
),
1287 struct fjes_adapter
, interrupt_watch_task
);
1289 local_irq_disable();
1290 fjes_intr(adapter
->hw
.hw_res
.irq
, adapter
);
1293 if (fjes_rxframe_search_exist(adapter
, 0) >= 0)
1294 napi_schedule(&adapter
->napi
);
1296 if (adapter
->interrupt_watch_enable
) {
1297 if (!delayed_work_pending(&adapter
->interrupt_watch_task
))
1298 queue_delayed_work(adapter
->control_wq
,
1299 &adapter
->interrupt_watch_task
,
1300 FJES_IRQ_WATCH_DELAY
);
1304 static void fjes_watch_unshare_task(struct work_struct
*work
)
1306 struct fjes_adapter
*adapter
=
1307 container_of(work
, struct fjes_adapter
, unshare_watch_task
);
1309 struct net_device
*netdev
= adapter
->netdev
;
1310 struct fjes_hw
*hw
= &adapter
->hw
;
1312 int unshare_watch
, unshare_reserve
;
1313 int max_epid
, my_epid
, epidx
;
1314 int stop_req
, stop_req_done
;
1315 ulong unshare_watch_bitmask
;
1316 unsigned long flags
;
1321 my_epid
= hw
->my_epid
;
1322 max_epid
= hw
->max_epid
;
1324 unshare_watch_bitmask
= adapter
->unshare_watch_bitmask
;
1325 adapter
->unshare_watch_bitmask
= 0;
1327 while ((unshare_watch_bitmask
|| hw
->txrx_stop_req_bit
) &&
1328 (wait_time
< 3000)) {
1329 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1330 if (epidx
== hw
->my_epid
)
1333 is_shared
= fjes_hw_epid_is_shared(hw
->hw_info
.share
,
1336 stop_req
= test_bit(epidx
, &hw
->txrx_stop_req_bit
);
1338 stop_req_done
= hw
->ep_shm_info
[epidx
].rx
.info
->v1i
.rx_status
&
1339 FJES_RX_STOP_REQ_DONE
;
1341 unshare_watch
= test_bit(epidx
, &unshare_watch_bitmask
);
1343 unshare_reserve
= test_bit(epidx
,
1344 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1347 (is_shared
&& (!is_shared
|| !stop_req_done
))) &&
1348 (is_shared
|| !unshare_watch
|| !unshare_reserve
))
1351 mutex_lock(&hw
->hw_info
.lock
);
1352 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1360 &adapter
->force_close_task
)) {
1361 adapter
->force_reset
= true;
1363 &adapter
->force_close_task
);
1367 mutex_unlock(&hw
->hw_info
.lock
);
1369 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1370 fjes_hw_setup_epbuf(&hw
->ep_shm_info
[epidx
].tx
,
1371 netdev
->dev_addr
, netdev
->mtu
);
1372 spin_unlock_irqrestore(&hw
->rx_status_lock
, flags
);
1374 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1375 clear_bit(epidx
, &unshare_watch_bitmask
);
1377 &hw
->hw_info
.buffer_unshare_reserve_bit
);
1384 if (hw
->hw_info
.buffer_unshare_reserve_bit
) {
1385 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
1386 if (epidx
== hw
->my_epid
)
1390 &hw
->hw_info
.buffer_unshare_reserve_bit
)) {
1391 mutex_lock(&hw
->hw_info
.lock
);
1393 ret
= fjes_hw_unregister_buff_addr(hw
, epidx
);
1401 &adapter
->force_close_task
)) {
1402 adapter
->force_reset
= true;
1404 &adapter
->force_close_task
);
1408 mutex_unlock(&hw
->hw_info
.lock
);
1410 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1411 fjes_hw_setup_epbuf(
1412 &hw
->ep_shm_info
[epidx
].tx
,
1413 netdev
->dev_addr
, netdev
->mtu
);
1414 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1417 clear_bit(epidx
, &hw
->txrx_stop_req_bit
);
1418 clear_bit(epidx
, &unshare_watch_bitmask
);
1419 clear_bit(epidx
, &hw
->hw_info
.buffer_unshare_reserve_bit
);
1422 if (test_bit(epidx
, &unshare_watch_bitmask
)) {
1423 spin_lock_irqsave(&hw
->rx_status_lock
, flags
);
1424 hw
->ep_shm_info
[epidx
].tx
.info
->v1i
.rx_status
&=
1425 ~FJES_RX_STOP_REQ_DONE
;
1426 spin_unlock_irqrestore(&hw
->rx_status_lock
,
1433 /* fjes_init_module - Driver Registration Routine */
1434 static int __init
fjes_init_module(void)
1438 pr_info("%s - version %s - %s\n",
1439 fjes_driver_string
, fjes_driver_version
, fjes_copyright
);
1441 result
= platform_driver_register(&fjes_driver
);
1445 result
= acpi_bus_register_driver(&fjes_acpi_driver
);
1447 goto fail_acpi_driver
;
1452 platform_driver_unregister(&fjes_driver
);
1456 module_init(fjes_init_module
);
1458 /* fjes_exit_module - Driver Exit Cleanup Routine */
1459 static void __exit
fjes_exit_module(void)
1461 acpi_bus_unregister_driver(&fjes_acpi_driver
);
1462 platform_driver_unregister(&fjes_driver
);
1465 module_exit(fjes_exit_module
);