2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/fs.h>
35 #include <linux/rbtree.h>
36 #include "mlx5_core.h"
40 #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
44 * It is the responsibility of the user to prevent concurrent calls or bad
45 * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
47 * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
48 * dump (access to struct mlx5_fc) after a counter is destroyed.
50 * access to counter list:
51 * - create (user context)
52 * - mlx5_fc_create() only adds to an addlist to be used by
53 * mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
54 * - spawn thread to do the actual destroy
56 * - destroy (user context)
57 * - mark a counter as deleted
58 * - spawn thread to do the actual del
60 * - dump (user context)
61 * user should not call dump after destroy
63 * - query (single thread workqueue context)
64 * destroy/dump - no conflict (see destroy)
65 * query/dump - packets and bytes might be inconsistent (since update is not
67 * query/create - no conflict (see create)
68 * since every create/destroy spawn the work, only after necessary time has
69 * elapsed, the thread will actually query the hardware.
72 static void mlx5_fc_stats_insert(struct rb_root
*root
, struct mlx5_fc
*counter
)
74 struct rb_node
**new = &root
->rb_node
;
75 struct rb_node
*parent
= NULL
;
78 struct mlx5_fc
*this = container_of(*new, struct mlx5_fc
, node
);
79 int result
= counter
->id
- this->id
;
83 new = &((*new)->rb_left
);
85 new = &((*new)->rb_right
);
88 /* Add new node and rebalance tree. */
89 rb_link_node(&counter
->node
, parent
, new);
90 rb_insert_color(&counter
->node
, root
);
93 static struct rb_node
*mlx5_fc_stats_query(struct mlx5_core_dev
*dev
,
94 struct mlx5_fc
*first
,
97 struct mlx5_cmd_fc_bulk
*b
;
98 struct rb_node
*node
= NULL
;
102 int max_bulk
= 1 << MLX5_CAP_GEN(dev
, log_max_flow_counter_bulk
);
104 /* first id must be aligned to 4 when using bulk query */
105 afirst_id
= first
->id
& ~0x3;
107 /* number of counters to query inc. the last counter */
108 num
= ALIGN(last_id
- afirst_id
+ 1, 4);
109 if (num
> max_bulk
) {
111 last_id
= afirst_id
+ num
- 1;
114 b
= mlx5_cmd_fc_bulk_alloc(dev
, afirst_id
, num
);
116 mlx5_core_err(dev
, "Error allocating resources for bulk query\n");
120 err
= mlx5_cmd_fc_bulk_query(dev
, b
);
122 mlx5_core_err(dev
, "Error doing bulk query: %d\n", err
);
126 for (node
= &first
->node
; node
; node
= rb_next(node
)) {
127 struct mlx5_fc
*counter
= rb_entry(node
, struct mlx5_fc
, node
);
128 struct mlx5_fc_cache
*c
= &counter
->cache
;
130 if (counter
->id
> last_id
)
133 mlx5_cmd_fc_bulk_get(dev
, b
,
134 counter
->id
, &c
->packets
, &c
->bytes
);
138 mlx5_cmd_fc_bulk_free(b
);
143 static void mlx5_fc_stats_work(struct work_struct
*work
)
145 struct mlx5_core_dev
*dev
= container_of(work
, struct mlx5_core_dev
,
146 priv
.fc_stats
.work
.work
);
147 struct mlx5_fc_stats
*fc_stats
= &dev
->priv
.fc_stats
;
148 unsigned long now
= jiffies
;
149 struct mlx5_fc
*counter
= NULL
;
150 struct mlx5_fc
*last
= NULL
;
151 struct rb_node
*node
;
154 spin_lock(&fc_stats
->addlist_lock
);
156 list_splice_tail_init(&fc_stats
->addlist
, &tmplist
);
158 if (!list_empty(&tmplist
) || !RB_EMPTY_ROOT(&fc_stats
->counters
))
159 queue_delayed_work(fc_stats
->wq
, &fc_stats
->work
, MLX5_FC_STATS_PERIOD
);
161 spin_unlock(&fc_stats
->addlist_lock
);
163 list_for_each_entry(counter
, &tmplist
, list
)
164 mlx5_fc_stats_insert(&fc_stats
->counters
, counter
);
166 node
= rb_first(&fc_stats
->counters
);
168 counter
= rb_entry(node
, struct mlx5_fc
, node
);
170 node
= rb_next(node
);
172 if (counter
->deleted
) {
173 rb_erase(&counter
->node
, &fc_stats
->counters
);
175 mlx5_cmd_fc_free(dev
, counter
->id
);
184 if (time_before(now
, fc_stats
->next_query
) || !last
)
187 node
= rb_first(&fc_stats
->counters
);
189 counter
= rb_entry(node
, struct mlx5_fc
, node
);
191 node
= mlx5_fc_stats_query(dev
, counter
, last
->id
);
194 fc_stats
->next_query
= now
+ MLX5_FC_STATS_PERIOD
;
197 struct mlx5_fc
*mlx5_fc_create(struct mlx5_core_dev
*dev
, bool aging
)
199 struct mlx5_fc_stats
*fc_stats
= &dev
->priv
.fc_stats
;
200 struct mlx5_fc
*counter
;
203 counter
= kzalloc(sizeof(*counter
), GFP_KERNEL
);
205 return ERR_PTR(-ENOMEM
);
207 err
= mlx5_cmd_fc_alloc(dev
, &counter
->id
);
212 counter
->aging
= true;
214 spin_lock(&fc_stats
->addlist_lock
);
215 list_add(&counter
->list
, &fc_stats
->addlist
);
216 spin_unlock(&fc_stats
->addlist_lock
);
218 mod_delayed_work(fc_stats
->wq
, &fc_stats
->work
, 0);
229 void mlx5_fc_destroy(struct mlx5_core_dev
*dev
, struct mlx5_fc
*counter
)
231 struct mlx5_fc_stats
*fc_stats
= &dev
->priv
.fc_stats
;
236 if (counter
->aging
) {
237 counter
->deleted
= true;
238 mod_delayed_work(fc_stats
->wq
, &fc_stats
->work
, 0);
242 mlx5_cmd_fc_free(dev
, counter
->id
);
246 int mlx5_init_fc_stats(struct mlx5_core_dev
*dev
)
248 struct mlx5_fc_stats
*fc_stats
= &dev
->priv
.fc_stats
;
250 fc_stats
->counters
= RB_ROOT
;
251 INIT_LIST_HEAD(&fc_stats
->addlist
);
252 spin_lock_init(&fc_stats
->addlist_lock
);
254 fc_stats
->wq
= create_singlethread_workqueue("mlx5_fc");
258 INIT_DELAYED_WORK(&fc_stats
->work
, mlx5_fc_stats_work
);
263 void mlx5_cleanup_fc_stats(struct mlx5_core_dev
*dev
)
265 struct mlx5_fc_stats
*fc_stats
= &dev
->priv
.fc_stats
;
266 struct mlx5_fc
*counter
;
268 struct rb_node
*node
;
270 cancel_delayed_work_sync(&dev
->priv
.fc_stats
.work
);
271 destroy_workqueue(dev
->priv
.fc_stats
.wq
);
272 dev
->priv
.fc_stats
.wq
= NULL
;
274 list_for_each_entry_safe(counter
, tmp
, &fc_stats
->addlist
, list
) {
275 list_del(&counter
->list
);
277 mlx5_cmd_fc_free(dev
, counter
->id
);
282 node
= rb_first(&fc_stats
->counters
);
284 counter
= rb_entry(node
, struct mlx5_fc
, node
);
286 node
= rb_next(node
);
288 rb_erase(&counter
->node
, &fc_stats
->counters
);
290 mlx5_cmd_fc_free(dev
, counter
->id
);
296 void mlx5_fc_query_cached(struct mlx5_fc
*counter
,
297 u64
*bytes
, u64
*packets
, u64
*lastuse
)
299 struct mlx5_fc_cache c
;
303 *bytes
= c
.bytes
- counter
->lastbytes
;
304 *packets
= c
.packets
- counter
->lastpackets
;
305 *lastuse
= c
.lastuse
;
307 counter
->lastbytes
= c
.bytes
;
308 counter
->lastpackets
= c
.packets
;