2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/delay.h>
37 #include <linux/mlx5/driver.h>
38 #include <linux/mlx5/cmd.h>
39 #include "mlx5_core.h"
42 MLX5_PAGES_CANT_GIVE
= 0,
50 MLX5_POST_INIT_PAGES
= 3
53 struct mlx5_pages_req
{
54 struct mlx5_core_dev
*dev
;
57 struct work_struct work
;
61 struct rb_node rb_node
;
65 unsigned long bitmask
;
66 struct list_head list
;
70 struct mlx5_query_pages_inbox
{
71 struct mlx5_inbox_hdr hdr
;
75 struct mlx5_query_pages_outbox
{
76 struct mlx5_outbox_hdr hdr
;
82 struct mlx5_manage_pages_inbox
{
83 struct mlx5_inbox_hdr hdr
;
90 struct mlx5_manage_pages_outbox
{
91 struct mlx5_outbox_hdr hdr
;
98 MAX_RECLAIM_TIME_MSECS
= 5000,
99 MAX_RECLAIM_VFS_PAGES_TIME_MSECS
= 2 * 1000 * 60,
103 MLX5_MAX_RECLAIM_TIME_MILI
= 5000,
104 MLX5_NUM_4K_IN_PAGE
= PAGE_SIZE
/ MLX5_ADAPTER_PAGE_SIZE
,
107 static int insert_page(struct mlx5_core_dev
*dev
, u64 addr
, struct page
*page
, u16 func_id
)
109 struct rb_root
*root
= &dev
->priv
.page_root
;
110 struct rb_node
**new = &root
->rb_node
;
111 struct rb_node
*parent
= NULL
;
118 tfp
= rb_entry(parent
, struct fw_page
, rb_node
);
119 if (tfp
->addr
< addr
)
120 new = &parent
->rb_left
;
121 else if (tfp
->addr
> addr
)
122 new = &parent
->rb_right
;
127 nfp
= kzalloc(sizeof(*nfp
), GFP_KERNEL
);
133 nfp
->func_id
= func_id
;
134 nfp
->free_count
= MLX5_NUM_4K_IN_PAGE
;
135 for (i
= 0; i
< MLX5_NUM_4K_IN_PAGE
; i
++)
136 set_bit(i
, &nfp
->bitmask
);
138 rb_link_node(&nfp
->rb_node
, parent
, new);
139 rb_insert_color(&nfp
->rb_node
, root
);
140 list_add(&nfp
->list
, &dev
->priv
.free_list
);
145 static struct fw_page
*find_fw_page(struct mlx5_core_dev
*dev
, u64 addr
)
147 struct rb_root
*root
= &dev
->priv
.page_root
;
148 struct rb_node
*tmp
= root
->rb_node
;
149 struct fw_page
*result
= NULL
;
153 tfp
= rb_entry(tmp
, struct fw_page
, rb_node
);
154 if (tfp
->addr
< addr
) {
156 } else if (tfp
->addr
> addr
) {
167 static int mlx5_cmd_query_pages(struct mlx5_core_dev
*dev
, u16
*func_id
,
168 s32
*npages
, int boot
)
170 struct mlx5_query_pages_inbox in
;
171 struct mlx5_query_pages_outbox out
;
174 memset(&in
, 0, sizeof(in
));
175 memset(&out
, 0, sizeof(out
));
176 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES
);
177 in
.hdr
.opmod
= boot
? cpu_to_be16(MLX5_BOOT_PAGES
) : cpu_to_be16(MLX5_INIT_PAGES
);
179 err
= mlx5_cmd_exec(dev
, &in
, sizeof(in
), &out
, sizeof(out
));
184 return mlx5_cmd_status_to_err(&out
.hdr
);
186 *npages
= be32_to_cpu(out
.num_pages
);
187 *func_id
= be16_to_cpu(out
.func_id
);
192 static int alloc_4k(struct mlx5_core_dev
*dev
, u64
*addr
)
197 if (list_empty(&dev
->priv
.free_list
))
200 fp
= list_entry(dev
->priv
.free_list
.next
, struct fw_page
, list
);
201 n
= find_first_bit(&fp
->bitmask
, 8 * sizeof(fp
->bitmask
));
202 if (n
>= MLX5_NUM_4K_IN_PAGE
) {
203 mlx5_core_warn(dev
, "alloc 4k bug\n");
206 clear_bit(n
, &fp
->bitmask
);
211 *addr
= fp
->addr
+ n
* MLX5_ADAPTER_PAGE_SIZE
;
216 #define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
218 static void free_4k(struct mlx5_core_dev
*dev
, u64 addr
)
223 fwp
= find_fw_page(dev
, addr
& MLX5_U64_4K_PAGE_MASK
);
225 mlx5_core_warn(dev
, "page not found\n");
229 n
= (addr
& ~MLX5_U64_4K_PAGE_MASK
) >> MLX5_ADAPTER_PAGE_SHIFT
;
231 set_bit(n
, &fwp
->bitmask
);
232 if (fwp
->free_count
== MLX5_NUM_4K_IN_PAGE
) {
233 rb_erase(&fwp
->rb_node
, &dev
->priv
.page_root
);
234 if (fwp
->free_count
!= 1)
235 list_del(&fwp
->list
);
236 dma_unmap_page(&dev
->pdev
->dev
, addr
& MLX5_U64_4K_PAGE_MASK
,
237 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
238 __free_page(fwp
->page
);
240 } else if (fwp
->free_count
== 1) {
241 list_add(&fwp
->list
, &dev
->priv
.free_list
);
245 static int alloc_system_page(struct mlx5_core_dev
*dev
, u16 func_id
)
250 int nid
= dev_to_node(&dev
->pdev
->dev
);
252 page
= alloc_pages_node(nid
, GFP_HIGHUSER
, 0);
254 mlx5_core_warn(dev
, "failed to allocate page\n");
257 addr
= dma_map_page(&dev
->pdev
->dev
, page
, 0,
258 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
259 if (dma_mapping_error(&dev
->pdev
->dev
, addr
)) {
260 mlx5_core_warn(dev
, "failed dma mapping page\n");
264 err
= insert_page(dev
, addr
, page
, func_id
);
266 mlx5_core_err(dev
, "failed to track allocated page\n");
273 dma_unmap_page(&dev
->pdev
->dev
, addr
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
281 static void page_notify_fail(struct mlx5_core_dev
*dev
, u16 func_id
)
283 struct mlx5_manage_pages_inbox
*in
;
284 struct mlx5_manage_pages_outbox out
;
287 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
291 memset(&out
, 0, sizeof(out
));
292 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
293 in
->hdr
.opmod
= cpu_to_be16(MLX5_PAGES_CANT_GIVE
);
294 in
->func_id
= cpu_to_be16(func_id
);
295 err
= mlx5_cmd_exec(dev
, in
, sizeof(*in
), &out
, sizeof(out
));
297 err
= mlx5_cmd_status_to_err(&out
.hdr
);
300 mlx5_core_warn(dev
, "page notify failed\n");
305 static int give_pages(struct mlx5_core_dev
*dev
, u16 func_id
, int npages
,
308 struct mlx5_manage_pages_inbox
*in
;
309 struct mlx5_manage_pages_outbox out
;
315 inlen
= sizeof(*in
) + npages
* sizeof(in
->pas
[0]);
316 in
= mlx5_vzalloc(inlen
);
319 mlx5_core_warn(dev
, "vzalloc failed %d\n", inlen
);
322 memset(&out
, 0, sizeof(out
));
324 for (i
= 0; i
< npages
; i
++) {
326 err
= alloc_4k(dev
, &addr
);
329 err
= alloc_system_page(dev
, func_id
);
335 in
->pas
[i
] = cpu_to_be64(addr
);
338 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
339 in
->hdr
.opmod
= cpu_to_be16(MLX5_PAGES_GIVE
);
340 in
->func_id
= cpu_to_be16(func_id
);
341 in
->num_entries
= cpu_to_be32(npages
);
342 err
= mlx5_cmd_exec(dev
, in
, inlen
, &out
, sizeof(out
));
344 mlx5_core_warn(dev
, "func_id 0x%x, npages %d, err %d\n",
345 func_id
, npages
, err
);
349 err
= mlx5_cmd_status_to_err(&out
.hdr
);
351 mlx5_core_warn(dev
, "func_id 0x%x, npages %d, status %d\n",
352 func_id
, npages
, out
.hdr
.status
);
356 dev
->priv
.fw_pages
+= npages
;
358 dev
->priv
.vfs_pages
+= npages
;
360 mlx5_core_dbg(dev
, "err %d\n", err
);
366 for (i
--; i
>= 0; i
--)
367 free_4k(dev
, be64_to_cpu(in
->pas
[i
]));
371 page_notify_fail(dev
, func_id
);
375 static int reclaim_pages_cmd(struct mlx5_core_dev
*dev
,
376 struct mlx5_manage_pages_inbox
*in
, int in_size
,
377 struct mlx5_manage_pages_outbox
*out
, int out_size
)
384 if (dev
->state
!= MLX5_DEVICE_STATE_INTERNAL_ERROR
)
385 return mlx5_cmd_exec_check_status(dev
, (u32
*)in
, in_size
,
386 (u32
*)out
, out_size
);
388 npages
= be32_to_cpu(in
->num_entries
);
390 p
= rb_first(&dev
->priv
.page_root
);
391 while (p
&& i
< npages
) {
392 fwp
= rb_entry(p
, struct fw_page
, rb_node
);
393 out
->pas
[i
] = cpu_to_be64(fwp
->addr
);
398 out
->num_entries
= cpu_to_be32(i
);
402 static int reclaim_pages(struct mlx5_core_dev
*dev
, u32 func_id
, int npages
,
405 struct mlx5_manage_pages_inbox in
;
406 struct mlx5_manage_pages_outbox
*out
;
416 memset(&in
, 0, sizeof(in
));
417 outlen
= sizeof(*out
) + npages
* sizeof(out
->pas
[0]);
418 out
= mlx5_vzalloc(outlen
);
422 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES
);
423 in
.hdr
.opmod
= cpu_to_be16(MLX5_PAGES_TAKE
);
424 in
.func_id
= cpu_to_be16(func_id
);
425 in
.num_entries
= cpu_to_be32(npages
);
426 mlx5_core_dbg(dev
, "npages %d, outlen %d\n", npages
, outlen
);
427 err
= reclaim_pages_cmd(dev
, &in
, sizeof(in
), out
, outlen
);
429 mlx5_core_err(dev
, "failed reclaiming pages: err %d\n", err
);
433 num_claimed
= be32_to_cpu(out
->num_entries
);
434 if (num_claimed
> npages
) {
435 mlx5_core_warn(dev
, "fw returned %d, driver asked %d => corruption\n",
436 num_claimed
, npages
);
441 for (i
= 0; i
< num_claimed
; i
++) {
442 addr
= be64_to_cpu(out
->pas
[i
]);
447 *nclaimed
= num_claimed
;
449 dev
->priv
.fw_pages
-= num_claimed
;
451 dev
->priv
.vfs_pages
-= num_claimed
;
458 static void pages_work_handler(struct work_struct
*work
)
460 struct mlx5_pages_req
*req
= container_of(work
, struct mlx5_pages_req
, work
);
461 struct mlx5_core_dev
*dev
= req
->dev
;
465 err
= reclaim_pages(dev
, req
->func_id
, -1 * req
->npages
, NULL
);
466 else if (req
->npages
> 0)
467 err
= give_pages(dev
, req
->func_id
, req
->npages
, 1);
470 mlx5_core_warn(dev
, "%s fail %d\n",
471 req
->npages
< 0 ? "reclaim" : "give", err
);
476 void mlx5_core_req_pages_handler(struct mlx5_core_dev
*dev
, u16 func_id
,
479 struct mlx5_pages_req
*req
;
481 req
= kzalloc(sizeof(*req
), GFP_ATOMIC
);
483 mlx5_core_warn(dev
, "failed to allocate pages request\n");
488 req
->func_id
= func_id
;
489 req
->npages
= npages
;
490 INIT_WORK(&req
->work
, pages_work_handler
);
491 queue_work(dev
->priv
.pg_wq
, &req
->work
);
494 int mlx5_satisfy_startup_pages(struct mlx5_core_dev
*dev
, int boot
)
496 u16
uninitialized_var(func_id
);
497 s32
uninitialized_var(npages
);
500 err
= mlx5_cmd_query_pages(dev
, &func_id
, &npages
, boot
);
504 mlx5_core_dbg(dev
, "requested %d %s pages for func_id 0x%x\n",
505 npages
, boot
? "boot" : "init", func_id
);
507 return give_pages(dev
, func_id
, npages
, 0);
511 MLX5_BLKS_FOR_RECLAIM_PAGES
= 12
514 static int optimal_reclaimed_pages(void)
516 struct mlx5_cmd_prot_block
*block
;
517 struct mlx5_cmd_layout
*lay
;
520 ret
= (sizeof(lay
->out
) + MLX5_BLKS_FOR_RECLAIM_PAGES
* sizeof(block
->data
) -
521 sizeof(struct mlx5_manage_pages_outbox
)) /
522 FIELD_SIZEOF(struct mlx5_manage_pages_outbox
, pas
[0]);
527 int mlx5_reclaim_startup_pages(struct mlx5_core_dev
*dev
)
529 unsigned long end
= jiffies
+ msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS
);
536 p
= rb_first(&dev
->priv
.page_root
);
538 fwp
= rb_entry(p
, struct fw_page
, rb_node
);
539 err
= reclaim_pages(dev
, fwp
->func_id
,
540 optimal_reclaimed_pages(),
544 mlx5_core_warn(dev
, "failed reclaiming pages (%d)\n",
549 end
= jiffies
+ msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS
);
551 if (time_after(jiffies
, end
)) {
552 mlx5_core_warn(dev
, "FW did not return all pages. giving up...\n");
557 WARN(dev
->priv
.fw_pages
,
558 "FW pages counter is %d after reclaiming all pages\n",
560 WARN(dev
->priv
.vfs_pages
,
561 "VFs FW pages counter is %d after reclaiming all pages\n",
562 dev
->priv
.vfs_pages
);
567 void mlx5_pagealloc_init(struct mlx5_core_dev
*dev
)
569 dev
->priv
.page_root
= RB_ROOT
;
570 INIT_LIST_HEAD(&dev
->priv
.free_list
);
573 void mlx5_pagealloc_cleanup(struct mlx5_core_dev
*dev
)
578 int mlx5_pagealloc_start(struct mlx5_core_dev
*dev
)
580 dev
->priv
.pg_wq
= create_singlethread_workqueue("mlx5_page_allocator");
581 if (!dev
->priv
.pg_wq
)
587 void mlx5_pagealloc_stop(struct mlx5_core_dev
*dev
)
589 destroy_workqueue(dev
->priv
.pg_wq
);
592 int mlx5_wait_for_vf_pages(struct mlx5_core_dev
*dev
)
594 unsigned long end
= jiffies
+ msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS
);
595 int prev_vfs_pages
= dev
->priv
.vfs_pages
;
597 mlx5_core_dbg(dev
, "Waiting for %d pages from %s\n", prev_vfs_pages
,
599 while (dev
->priv
.vfs_pages
) {
600 if (time_after(jiffies
, end
)) {
601 mlx5_core_warn(dev
, "aborting while there are %d pending pages\n", dev
->priv
.vfs_pages
);
604 if (dev
->priv
.vfs_pages
< prev_vfs_pages
) {
605 end
= jiffies
+ msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS
);
606 prev_vfs_pages
= dev
->priv
.vfs_pages
;
611 mlx5_core_dbg(dev
, "All pages received from %s\n", dev
->priv
.name
);