2 * FUJITSU Extended Socket Network Device driver
3 * Copyright (c) 2015 FUJITSU LIMITED
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
25 /* supported MTU list */
26 const u32 fjes_support_mtu
[] = {
27 FJES_MTU_DEFINE(8 * 1024),
28 FJES_MTU_DEFINE(16 * 1024),
29 FJES_MTU_DEFINE(32 * 1024),
30 FJES_MTU_DEFINE(64 * 1024),
34 u32
fjes_hw_rd32(struct fjes_hw
*hw
, u32 reg
)
39 value
= readl(&base
[reg
]);
44 static u8
*fjes_hw_iomap(struct fjes_hw
*hw
)
48 if (!request_mem_region(hw
->hw_res
.start
, hw
->hw_res
.size
,
50 pr_err("request_mem_region failed\n");
54 base
= (u8
*)ioremap_nocache(hw
->hw_res
.start
, hw
->hw_res
.size
);
59 static void fjes_hw_iounmap(struct fjes_hw
*hw
)
62 release_mem_region(hw
->hw_res
.start
, hw
->hw_res
.size
);
65 int fjes_hw_reset(struct fjes_hw
*hw
)
72 wr32(XSCT_DCTL
, dctl
.reg
);
74 timeout
= FJES_DEVICE_RESET_TIMEOUT
* 1000;
75 dctl
.reg
= rd32(XSCT_DCTL
);
76 while ((dctl
.bits
.reset
== 1) && (timeout
> 0)) {
78 dctl
.reg
= rd32(XSCT_DCTL
);
82 return timeout
> 0 ? 0 : -EIO
;
85 static int fjes_hw_get_max_epid(struct fjes_hw
*hw
)
87 union REG_MAX_EP info
;
89 info
.reg
= rd32(XSCT_MAX_EP
);
91 return info
.bits
.maxep
;
94 static int fjes_hw_get_my_epid(struct fjes_hw
*hw
)
96 union REG_OWNER_EPID info
;
98 info
.reg
= rd32(XSCT_OWNER_EPID
);
100 return info
.bits
.epid
;
103 static int fjes_hw_alloc_shared_status_region(struct fjes_hw
*hw
)
107 size
= sizeof(struct fjes_device_shared_info
) +
108 (sizeof(u8
) * hw
->max_epid
);
109 hw
->hw_info
.share
= kzalloc(size
, GFP_KERNEL
);
110 if (!hw
->hw_info
.share
)
113 hw
->hw_info
.share
->epnum
= hw
->max_epid
;
118 static void fjes_hw_free_shared_status_region(struct fjes_hw
*hw
)
120 kfree(hw
->hw_info
.share
);
121 hw
->hw_info
.share
= NULL
;
124 static int fjes_hw_alloc_epbuf(struct epbuf_handler
*epbh
)
128 mem
= vzalloc(EP_BUFFER_SIZE
);
133 epbh
->size
= EP_BUFFER_SIZE
;
135 epbh
->info
= (union ep_buffer_info
*)mem
;
136 epbh
->ring
= (u8
*)(mem
+ sizeof(union ep_buffer_info
));
141 static void fjes_hw_free_epbuf(struct epbuf_handler
*epbh
)
153 void fjes_hw_setup_epbuf(struct epbuf_handler
*epbh
, u8
*mac_addr
, u32 mtu
)
155 union ep_buffer_info
*info
= epbh
->info
;
156 u16 vlan_id
[EP_BUFFER_SUPPORT_VLAN_MAX
];
159 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++)
160 vlan_id
[i
] = info
->v1i
.vlan_id
[i
];
162 memset(info
, 0, sizeof(union ep_buffer_info
));
164 info
->v1i
.version
= 0; /* version 0 */
166 for (i
= 0; i
< ETH_ALEN
; i
++)
167 info
->v1i
.mac_addr
[i
] = mac_addr
[i
];
172 info
->v1i
.info_size
= sizeof(union ep_buffer_info
);
173 info
->v1i
.buffer_size
= epbh
->size
- info
->v1i
.info_size
;
175 info
->v1i
.frame_max
= FJES_MTU_TO_FRAME_SIZE(mtu
);
176 info
->v1i
.count_max
=
177 EP_RING_NUM(info
->v1i
.buffer_size
, info
->v1i
.frame_max
);
179 for (i
= 0; i
< EP_BUFFER_SUPPORT_VLAN_MAX
; i
++)
180 info
->v1i
.vlan_id
[i
] = vlan_id
[i
];
184 fjes_hw_init_command_registers(struct fjes_hw
*hw
,
185 struct fjes_device_command_param
*param
)
187 /* Request Buffer length */
188 wr32(XSCT_REQBL
, (__le32
)(param
->req_len
));
189 /* Response Buffer Length */
190 wr32(XSCT_RESPBL
, (__le32
)(param
->res_len
));
192 /* Request Buffer Address */
194 (__le32
)(param
->req_start
& GENMASK_ULL(31, 0)));
196 (__le32
)((param
->req_start
& GENMASK_ULL(63, 32)) >> 32));
198 /* Response Buffer Address */
200 (__le32
)(param
->res_start
& GENMASK_ULL(31, 0)));
202 (__le32
)((param
->res_start
& GENMASK_ULL(63, 32)) >> 32));
204 /* Share status address */
206 (__le32
)(param
->share_start
& GENMASK_ULL(31, 0)));
208 (__le32
)((param
->share_start
& GENMASK_ULL(63, 32)) >> 32));
211 static int fjes_hw_setup(struct fjes_hw
*hw
)
213 u8 mac
[ETH_ALEN
] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
214 struct fjes_device_command_param param
;
215 struct ep_share_mem_info
*buf_pair
;
221 hw
->hw_info
.max_epid
= &hw
->max_epid
;
222 hw
->hw_info
.my_epid
= &hw
->my_epid
;
224 buf
= kcalloc(hw
->max_epid
, sizeof(struct ep_share_mem_info
),
229 hw
->ep_shm_info
= (struct ep_share_mem_info
*)buf
;
231 mem_size
= FJES_DEV_REQ_BUF_SIZE(hw
->max_epid
);
232 hw
->hw_info
.req_buf
= kzalloc(mem_size
, GFP_KERNEL
);
233 if (!(hw
->hw_info
.req_buf
))
236 hw
->hw_info
.req_buf_size
= mem_size
;
238 mem_size
= FJES_DEV_RES_BUF_SIZE(hw
->max_epid
);
239 hw
->hw_info
.res_buf
= kzalloc(mem_size
, GFP_KERNEL
);
240 if (!(hw
->hw_info
.res_buf
))
243 hw
->hw_info
.res_buf_size
= mem_size
;
245 result
= fjes_hw_alloc_shared_status_region(hw
);
249 hw
->hw_info
.buffer_share_bit
= 0;
250 hw
->hw_info
.buffer_unshare_reserve_bit
= 0;
252 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
253 if (epidx
!= hw
->my_epid
) {
254 buf_pair
= &hw
->ep_shm_info
[epidx
];
256 result
= fjes_hw_alloc_epbuf(&buf_pair
->tx
);
260 result
= fjes_hw_alloc_epbuf(&buf_pair
->rx
);
264 fjes_hw_setup_epbuf(&buf_pair
->tx
, mac
,
265 fjes_support_mtu
[0]);
266 fjes_hw_setup_epbuf(&buf_pair
->rx
, mac
,
267 fjes_support_mtu
[0]);
271 memset(¶m
, 0, sizeof(param
));
273 param
.req_len
= hw
->hw_info
.req_buf_size
;
274 param
.req_start
= __pa(hw
->hw_info
.req_buf
);
275 param
.res_len
= hw
->hw_info
.res_buf_size
;
276 param
.res_start
= __pa(hw
->hw_info
.res_buf
);
278 param
.share_start
= __pa(hw
->hw_info
.share
->ep_status
);
280 fjes_hw_init_command_registers(hw
, ¶m
);
285 static void fjes_hw_cleanup(struct fjes_hw
*hw
)
289 if (!hw
->ep_shm_info
)
292 fjes_hw_free_shared_status_region(hw
);
294 kfree(hw
->hw_info
.req_buf
);
295 hw
->hw_info
.req_buf
= NULL
;
297 kfree(hw
->hw_info
.res_buf
);
298 hw
->hw_info
.res_buf
= NULL
;
300 for (epidx
= 0; epidx
< hw
->max_epid
; epidx
++) {
301 if (epidx
== hw
->my_epid
)
303 fjes_hw_free_epbuf(&hw
->ep_shm_info
[epidx
].tx
);
304 fjes_hw_free_epbuf(&hw
->ep_shm_info
[epidx
].rx
);
307 kfree(hw
->ep_shm_info
);
308 hw
->ep_shm_info
= NULL
;
311 int fjes_hw_init(struct fjes_hw
*hw
)
315 hw
->base
= fjes_hw_iomap(hw
);
319 ret
= fjes_hw_reset(hw
);
323 fjes_hw_set_irqmask(hw
, REG_ICTL_MASK_ALL
, true);
325 mutex_init(&hw
->hw_info
.lock
);
327 hw
->max_epid
= fjes_hw_get_max_epid(hw
);
328 hw
->my_epid
= fjes_hw_get_my_epid(hw
);
330 if ((hw
->max_epid
== 0) || (hw
->my_epid
>= hw
->max_epid
))
333 ret
= fjes_hw_setup(hw
);
338 void fjes_hw_exit(struct fjes_hw
*hw
)
343 ret
= fjes_hw_reset(hw
);
345 pr_err("%s: reset error", __func__
);
354 static enum fjes_dev_command_response_e
355 fjes_hw_issue_request_command(struct fjes_hw
*hw
,
356 enum fjes_dev_command_request_type type
)
358 enum fjes_dev_command_response_e ret
= FJES_CMD_STATUS_UNKNOWN
;
364 cr
.bits
.req_start
= 1;
365 cr
.bits
.req_code
= type
;
366 wr32(XSCT_CR
, cr
.reg
);
367 cr
.reg
= rd32(XSCT_CR
);
369 if (cr
.bits
.error
== 0) {
370 timeout
= FJES_COMMAND_REQ_TIMEOUT
* 1000;
371 cs
.reg
= rd32(XSCT_CS
);
373 while ((cs
.bits
.complete
!= 1) && timeout
> 0) {
375 cs
.reg
= rd32(XSCT_CS
);
379 if (cs
.bits
.complete
== 1)
380 ret
= FJES_CMD_STATUS_NORMAL
;
381 else if (timeout
<= 0)
382 ret
= FJES_CMD_STATUS_TIMEOUT
;
385 switch (cr
.bits
.err_info
) {
386 case FJES_CMD_REQ_ERR_INFO_PARAM
:
387 ret
= FJES_CMD_STATUS_ERROR_PARAM
;
389 case FJES_CMD_REQ_ERR_INFO_STATUS
:
390 ret
= FJES_CMD_STATUS_ERROR_STATUS
;
393 ret
= FJES_CMD_STATUS_UNKNOWN
;
401 int fjes_hw_request_info(struct fjes_hw
*hw
)
403 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
404 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
405 enum fjes_dev_command_response_e ret
;
408 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
409 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
411 req_buf
->info
.length
= FJES_DEV_COMMAND_INFO_REQ_LEN
;
413 res_buf
->info
.length
= 0;
414 res_buf
->info
.code
= 0;
416 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_INFO
);
420 if (FJES_DEV_COMMAND_INFO_RES_LEN((*hw
->hw_info
.max_epid
)) !=
421 res_buf
->info
.length
) {
423 } else if (ret
== FJES_CMD_STATUS_NORMAL
) {
424 switch (res_buf
->info
.code
) {
425 case FJES_CMD_REQ_RES_CODE_NORMAL
:
434 case FJES_CMD_STATUS_UNKNOWN
:
437 case FJES_CMD_STATUS_TIMEOUT
:
440 case FJES_CMD_STATUS_ERROR_PARAM
:
443 case FJES_CMD_STATUS_ERROR_STATUS
:
455 int fjes_hw_register_buff_addr(struct fjes_hw
*hw
, int dest_epid
,
456 struct ep_share_mem_info
*buf_pair
)
458 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
459 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
460 enum fjes_dev_command_response_e ret
;
467 if (test_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
))
470 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
471 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
473 req_buf
->share_buffer
.length
= FJES_DEV_COMMAND_SHARE_BUFFER_REQ_LEN(
476 req_buf
->share_buffer
.epid
= dest_epid
;
479 req_buf
->share_buffer
.buffer
[idx
++] = buf_pair
->tx
.size
;
480 page_count
= buf_pair
->tx
.size
/ EP_BUFFER_INFO_SIZE
;
481 for (i
= 0; i
< page_count
; i
++) {
482 addr
= ((u8
*)(buf_pair
->tx
.buffer
)) +
483 (i
* EP_BUFFER_INFO_SIZE
);
484 req_buf
->share_buffer
.buffer
[idx
++] =
485 (__le64
)(page_to_phys(vmalloc_to_page(addr
)) +
486 offset_in_page(addr
));
489 req_buf
->share_buffer
.buffer
[idx
++] = buf_pair
->rx
.size
;
490 page_count
= buf_pair
->rx
.size
/ EP_BUFFER_INFO_SIZE
;
491 for (i
= 0; i
< page_count
; i
++) {
492 addr
= ((u8
*)(buf_pair
->rx
.buffer
)) +
493 (i
* EP_BUFFER_INFO_SIZE
);
494 req_buf
->share_buffer
.buffer
[idx
++] =
495 (__le64
)(page_to_phys(vmalloc_to_page(addr
)) +
496 offset_in_page(addr
));
499 res_buf
->share_buffer
.length
= 0;
500 res_buf
->share_buffer
.code
= 0;
502 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_SHARE_BUFFER
);
504 timeout
= FJES_COMMAND_REQ_BUFF_TIMEOUT
* 1000;
505 while ((ret
== FJES_CMD_STATUS_NORMAL
) &&
506 (res_buf
->share_buffer
.length
==
507 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN
) &&
508 (res_buf
->share_buffer
.code
== FJES_CMD_REQ_RES_CODE_BUSY
) &&
510 msleep(200 + hw
->my_epid
* 20);
511 timeout
-= (200 + hw
->my_epid
* 20);
513 res_buf
->share_buffer
.length
= 0;
514 res_buf
->share_buffer
.code
= 0;
516 ret
= fjes_hw_issue_request_command(
517 hw
, FJES_CMD_REQ_SHARE_BUFFER
);
522 if (res_buf
->share_buffer
.length
!=
523 FJES_DEV_COMMAND_SHARE_BUFFER_RES_LEN
)
525 else if (ret
== FJES_CMD_STATUS_NORMAL
) {
526 switch (res_buf
->share_buffer
.code
) {
527 case FJES_CMD_REQ_RES_CODE_NORMAL
:
529 set_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
);
531 case FJES_CMD_REQ_RES_CODE_BUSY
:
540 case FJES_CMD_STATUS_UNKNOWN
:
543 case FJES_CMD_STATUS_TIMEOUT
:
546 case FJES_CMD_STATUS_ERROR_PARAM
:
547 case FJES_CMD_STATUS_ERROR_STATUS
:
557 int fjes_hw_unregister_buff_addr(struct fjes_hw
*hw
, int dest_epid
)
559 union fjes_device_command_req
*req_buf
= hw
->hw_info
.req_buf
;
560 union fjes_device_command_res
*res_buf
= hw
->hw_info
.res_buf
;
561 struct fjes_device_shared_info
*share
= hw
->hw_info
.share
;
562 enum fjes_dev_command_response_e ret
;
569 if (!req_buf
|| !res_buf
|| !share
)
572 if (!test_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
))
575 memset(req_buf
, 0, hw
->hw_info
.req_buf_size
);
576 memset(res_buf
, 0, hw
->hw_info
.res_buf_size
);
578 req_buf
->unshare_buffer
.length
=
579 FJES_DEV_COMMAND_UNSHARE_BUFFER_REQ_LEN
;
580 req_buf
->unshare_buffer
.epid
= dest_epid
;
582 res_buf
->unshare_buffer
.length
= 0;
583 res_buf
->unshare_buffer
.code
= 0;
585 ret
= fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_UNSHARE_BUFFER
);
587 timeout
= FJES_COMMAND_REQ_BUFF_TIMEOUT
* 1000;
588 while ((ret
== FJES_CMD_STATUS_NORMAL
) &&
589 (res_buf
->unshare_buffer
.length
==
590 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN
) &&
591 (res_buf
->unshare_buffer
.code
==
592 FJES_CMD_REQ_RES_CODE_BUSY
) &&
594 msleep(200 + hw
->my_epid
* 20);
595 timeout
-= (200 + hw
->my_epid
* 20);
597 res_buf
->unshare_buffer
.length
= 0;
598 res_buf
->unshare_buffer
.code
= 0;
601 fjes_hw_issue_request_command(hw
, FJES_CMD_REQ_UNSHARE_BUFFER
);
606 if (res_buf
->unshare_buffer
.length
!=
607 FJES_DEV_COMMAND_UNSHARE_BUFFER_RES_LEN
) {
609 } else if (ret
== FJES_CMD_STATUS_NORMAL
) {
610 switch (res_buf
->unshare_buffer
.code
) {
611 case FJES_CMD_REQ_RES_CODE_NORMAL
:
613 clear_bit(dest_epid
, &hw
->hw_info
.buffer_share_bit
);
615 case FJES_CMD_REQ_RES_CODE_BUSY
:
624 case FJES_CMD_STATUS_UNKNOWN
:
627 case FJES_CMD_STATUS_TIMEOUT
:
630 case FJES_CMD_STATUS_ERROR_PARAM
:
631 case FJES_CMD_STATUS_ERROR_STATUS
:
641 void fjes_hw_set_irqmask(struct fjes_hw
*hw
,
642 enum REG_ICTL_MASK intr_mask
, bool mask
)
645 wr32(XSCT_IMS
, intr_mask
);
647 wr32(XSCT_IMC
, intr_mask
);