Merge remote-tracking branch 'kspp/for-next/kspp'
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_counters.c
CommitLineData
43a335e0
AV
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/fs.h>
29cc6679 35#include <linux/rbtree.h>
43a335e0
AV
36#include "mlx5_core.h"
37#include "fs_core.h"
38#include "fs_cmd.h"
39
40#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41
42/* locking scheme:
43 *
44 * It is the responsibility of the user to prevent concurrent calls or bad
45 * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
46 * to struct mlx5_fc.
47 * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
48 * dump (access to struct mlx5_fc) after a counter is destroyed.
49 *
50 * access to counter list:
51 * - create (user context)
52 * - mlx5_fc_create() only adds to an addlist to be used by
53 * mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
54 * - spawn thread to do the actual destroy
55 *
56 * - destroy (user context)
57 * - mark a counter as deleted
58 * - spawn thread to do the actual del
59 *
60 * - dump (user context)
61 * user should not call dump after destroy
62 *
63 * - query (single thread workqueue context)
64 * destroy/dump - no conflict (see destroy)
65 * query/dump - packets and bytes might be inconsistent (since update is not
66 * atomic)
67 * query/create - no conflict (see create)
68 * since every create/destroy spawn the work, only after necessary time has
69 * elapsed, the thread will actually query the hardware.
70 */
71
29cc6679
AV
72static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
73{
74 struct rb_node **new = &root->rb_node;
75 struct rb_node *parent = NULL;
76
77 while (*new) {
78 struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
79 int result = counter->id - this->id;
80
81 parent = *new;
82 if (result < 0)
83 new = &((*new)->rb_left);
84 else
85 new = &((*new)->rb_right);
86 }
87
88 /* Add new node and rebalance tree. */
89 rb_link_node(&counter->node, parent, new);
90 rb_insert_color(&counter->node, root);
91}
92
a351a1b0
AV
93static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
94 struct mlx5_fc *first,
95 u16 last_id)
96{
97 struct mlx5_cmd_fc_bulk *b;
98 struct rb_node *node = NULL;
99 u16 afirst_id;
100 int num;
101 int err;
102 int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk);
103
104 /* first id must be aligned to 4 when using bulk query */
105 afirst_id = first->id & ~0x3;
106
107 /* number of counters to query inc. the last counter */
108 num = ALIGN(last_id - afirst_id + 1, 4);
109 if (num > max_bulk) {
110 num = max_bulk;
111 last_id = afirst_id + num - 1;
112 }
113
114 b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
115 if (!b) {
116 mlx5_core_err(dev, "Error allocating resources for bulk query\n");
117 return NULL;
118 }
119
120 err = mlx5_cmd_fc_bulk_query(dev, b);
121 if (err) {
122 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
123 goto out;
124 }
125
126 for (node = &first->node; node; node = rb_next(node)) {
127 struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
128 struct mlx5_fc_cache *c = &counter->cache;
6c3b4f90
AV
129 u64 packets;
130 u64 bytes;
a351a1b0
AV
131
132 if (counter->id > last_id)
133 break;
134
135 mlx5_cmd_fc_bulk_get(dev, b,
6c3b4f90
AV
136 counter->id, &packets, &bytes);
137
138 if (c->packets == packets)
139 continue;
140
141 c->packets = packets;
142 c->bytes = bytes;
143 c->lastuse = jiffies;
a351a1b0
AV
144 }
145
146out:
147 mlx5_cmd_fc_bulk_free(b);
148
149 return node;
150}
151
43a335e0
AV
152static void mlx5_fc_stats_work(struct work_struct *work)
153{
154 struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
155 priv.fc_stats.work.work);
156 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
157 unsigned long now = jiffies;
a351a1b0
AV
158 struct mlx5_fc *counter = NULL;
159 struct mlx5_fc *last = NULL;
29cc6679
AV
160 struct rb_node *node;
161 LIST_HEAD(tmplist);
43a335e0
AV
162
163 spin_lock(&fc_stats->addlist_lock);
164
29cc6679 165 list_splice_tail_init(&fc_stats->addlist, &tmplist);
43a335e0 166
29cc6679 167 if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
43a335e0
AV
168 queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
169
170 spin_unlock(&fc_stats->addlist_lock);
171
29cc6679
AV
172 list_for_each_entry(counter, &tmplist, list)
173 mlx5_fc_stats_insert(&fc_stats->counters, counter);
174
175 node = rb_first(&fc_stats->counters);
176 while (node) {
29cc6679 177 counter = rb_entry(node, struct mlx5_fc, node);
29cc6679
AV
178
179 node = rb_next(node);
180
43a335e0 181 if (counter->deleted) {
29cc6679 182 rb_erase(&counter->node, &fc_stats->counters);
43a335e0
AV
183
184 mlx5_cmd_fc_free(dev, counter->id);
185
186 kfree(counter);
187 continue;
188 }
189
a351a1b0
AV
190 last = counter;
191 }
43a335e0 192
a351a1b0
AV
193 if (time_before(now, fc_stats->next_query) || !last)
194 return;
43a335e0 195
a351a1b0
AV
196 node = rb_first(&fc_stats->counters);
197 while (node) {
198 counter = rb_entry(node, struct mlx5_fc, node);
43a335e0 199
a351a1b0 200 node = mlx5_fc_stats_query(dev, counter, last->id);
43a335e0
AV
201 }
202
a351a1b0 203 fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
43a335e0
AV
204}
205
206struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
207{
208 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
209 struct mlx5_fc *counter;
210 int err;
211
212 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
213 if (!counter)
214 return ERR_PTR(-ENOMEM);
215
216 err = mlx5_cmd_fc_alloc(dev, &counter->id);
217 if (err)
218 goto err_out;
219
220 if (aging) {
221 counter->aging = true;
222
223 spin_lock(&fc_stats->addlist_lock);
224 list_add(&counter->list, &fc_stats->addlist);
225 spin_unlock(&fc_stats->addlist_lock);
226
227 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
228 }
229
230 return counter;
231
232err_out:
233 kfree(counter);
234
235 return ERR_PTR(err);
236}
237
238void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
239{
240 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
241
242 if (!counter)
243 return;
244
245 if (counter->aging) {
246 counter->deleted = true;
247 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
248 return;
249 }
250
251 mlx5_cmd_fc_free(dev, counter->id);
252 kfree(counter);
253}
254
255int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
256{
257 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
258
29cc6679 259 fc_stats->counters = RB_ROOT;
43a335e0
AV
260 INIT_LIST_HEAD(&fc_stats->addlist);
261 spin_lock_init(&fc_stats->addlist_lock);
262
263 fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
264 if (!fc_stats->wq)
265 return -ENOMEM;
266
267 INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
268
269 return 0;
270}
271
272void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
273{
274 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
275 struct mlx5_fc *counter;
276 struct mlx5_fc *tmp;
29cc6679 277 struct rb_node *node;
43a335e0
AV
278
279 cancel_delayed_work_sync(&dev->priv.fc_stats.work);
280 destroy_workqueue(dev->priv.fc_stats.wq);
281 dev->priv.fc_stats.wq = NULL;
282
29cc6679 283 list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
43a335e0
AV
284 list_del(&counter->list);
285
286 mlx5_cmd_fc_free(dev, counter->id);
287
288 kfree(counter);
289 }
29cc6679
AV
290
291 node = rb_first(&fc_stats->counters);
292 while (node) {
293 counter = rb_entry(node, struct mlx5_fc, node);
294
295 node = rb_next(node);
296
297 rb_erase(&counter->node, &fc_stats->counters);
298
299 mlx5_cmd_fc_free(dev, counter->id);
300
301 kfree(counter);
302 }
43a335e0
AV
303}
304
305void mlx5_fc_query_cached(struct mlx5_fc *counter,
306 u64 *bytes, u64 *packets, u64 *lastuse)
307{
308 struct mlx5_fc_cache c;
309
310 c = counter->cache;
311
312 *bytes = c.bytes - counter->lastbytes;
313 *packets = c.packets - counter->lastpackets;
314 *lastuse = c.lastuse;
315
316 counter->lastbytes = c.bytes;
317 counter->lastpackets = c.packets;
318}
This page took 0.695526 seconds and 5 git commands to generate.