latent_entropy: Mark functions with __latent_entropy
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlx5 / core / fs_counters.c
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/mlx5/driver.h>
34 #include <linux/mlx5/fs.h>
35 #include <linux/rbtree.h>
36 #include "mlx5_core.h"
37 #include "fs_core.h"
38 #include "fs_cmd.h"
39
40 #define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
41
42 /* locking scheme:
43 *
44 * It is the responsibility of the user to prevent concurrent calls or bad
45 * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
46 * to struct mlx5_fc.
47 * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
48 * dump (access to struct mlx5_fc) after a counter is destroyed.
49 *
50 * access to counter list:
51 * - create (user context)
52 * - mlx5_fc_create() only adds to an addlist to be used by
53 * mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
54 * - spawn thread to do the actual destroy
55 *
56 * - destroy (user context)
57 * - mark a counter as deleted
58 * - spawn thread to do the actual del
59 *
60 * - dump (user context)
61 * user should not call dump after destroy
62 *
63 * - query (single thread workqueue context)
64 * destroy/dump - no conflict (see destroy)
65 * query/dump - packets and bytes might be inconsistent (since update is not
66 * atomic)
67 * query/create - no conflict (see create)
68 * since every create/destroy spawn the work, only after necessary time has
69 * elapsed, the thread will actually query the hardware.
70 */
71
72 static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
73 {
74 struct rb_node **new = &root->rb_node;
75 struct rb_node *parent = NULL;
76
77 while (*new) {
78 struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
79 int result = counter->id - this->id;
80
81 parent = *new;
82 if (result < 0)
83 new = &((*new)->rb_left);
84 else
85 new = &((*new)->rb_right);
86 }
87
88 /* Add new node and rebalance tree. */
89 rb_link_node(&counter->node, parent, new);
90 rb_insert_color(&counter->node, root);
91 }
92
93 static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
94 struct mlx5_fc *first,
95 u16 last_id)
96 {
97 struct mlx5_cmd_fc_bulk *b;
98 struct rb_node *node = NULL;
99 u16 afirst_id;
100 int num;
101 int err;
102 int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk);
103
104 /* first id must be aligned to 4 when using bulk query */
105 afirst_id = first->id & ~0x3;
106
107 /* number of counters to query inc. the last counter */
108 num = ALIGN(last_id - afirst_id + 1, 4);
109 if (num > max_bulk) {
110 num = max_bulk;
111 last_id = afirst_id + num - 1;
112 }
113
114 b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
115 if (!b) {
116 mlx5_core_err(dev, "Error allocating resources for bulk query\n");
117 return NULL;
118 }
119
120 err = mlx5_cmd_fc_bulk_query(dev, b);
121 if (err) {
122 mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
123 goto out;
124 }
125
126 for (node = &first->node; node; node = rb_next(node)) {
127 struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
128 struct mlx5_fc_cache *c = &counter->cache;
129
130 if (counter->id > last_id)
131 break;
132
133 mlx5_cmd_fc_bulk_get(dev, b,
134 counter->id, &c->packets, &c->bytes);
135 }
136
137 out:
138 mlx5_cmd_fc_bulk_free(b);
139
140 return node;
141 }
142
143 static void mlx5_fc_stats_work(struct work_struct *work)
144 {
145 struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
146 priv.fc_stats.work.work);
147 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
148 unsigned long now = jiffies;
149 struct mlx5_fc *counter = NULL;
150 struct mlx5_fc *last = NULL;
151 struct rb_node *node;
152 LIST_HEAD(tmplist);
153
154 spin_lock(&fc_stats->addlist_lock);
155
156 list_splice_tail_init(&fc_stats->addlist, &tmplist);
157
158 if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
159 queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
160
161 spin_unlock(&fc_stats->addlist_lock);
162
163 list_for_each_entry(counter, &tmplist, list)
164 mlx5_fc_stats_insert(&fc_stats->counters, counter);
165
166 node = rb_first(&fc_stats->counters);
167 while (node) {
168 counter = rb_entry(node, struct mlx5_fc, node);
169
170 node = rb_next(node);
171
172 if (counter->deleted) {
173 rb_erase(&counter->node, &fc_stats->counters);
174
175 mlx5_cmd_fc_free(dev, counter->id);
176
177 kfree(counter);
178 continue;
179 }
180
181 last = counter;
182 }
183
184 if (time_before(now, fc_stats->next_query) || !last)
185 return;
186
187 node = rb_first(&fc_stats->counters);
188 while (node) {
189 counter = rb_entry(node, struct mlx5_fc, node);
190
191 node = mlx5_fc_stats_query(dev, counter, last->id);
192 }
193
194 fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
195 }
196
197 struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
198 {
199 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
200 struct mlx5_fc *counter;
201 int err;
202
203 counter = kzalloc(sizeof(*counter), GFP_KERNEL);
204 if (!counter)
205 return ERR_PTR(-ENOMEM);
206
207 err = mlx5_cmd_fc_alloc(dev, &counter->id);
208 if (err)
209 goto err_out;
210
211 if (aging) {
212 counter->aging = true;
213
214 spin_lock(&fc_stats->addlist_lock);
215 list_add(&counter->list, &fc_stats->addlist);
216 spin_unlock(&fc_stats->addlist_lock);
217
218 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
219 }
220
221 return counter;
222
223 err_out:
224 kfree(counter);
225
226 return ERR_PTR(err);
227 }
228
229 void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
230 {
231 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
232
233 if (!counter)
234 return;
235
236 if (counter->aging) {
237 counter->deleted = true;
238 mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
239 return;
240 }
241
242 mlx5_cmd_fc_free(dev, counter->id);
243 kfree(counter);
244 }
245
246 int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
247 {
248 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
249
250 fc_stats->counters = RB_ROOT;
251 INIT_LIST_HEAD(&fc_stats->addlist);
252 spin_lock_init(&fc_stats->addlist_lock);
253
254 fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
255 if (!fc_stats->wq)
256 return -ENOMEM;
257
258 INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
259
260 return 0;
261 }
262
263 void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
264 {
265 struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
266 struct mlx5_fc *counter;
267 struct mlx5_fc *tmp;
268 struct rb_node *node;
269
270 cancel_delayed_work_sync(&dev->priv.fc_stats.work);
271 destroy_workqueue(dev->priv.fc_stats.wq);
272 dev->priv.fc_stats.wq = NULL;
273
274 list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
275 list_del(&counter->list);
276
277 mlx5_cmd_fc_free(dev, counter->id);
278
279 kfree(counter);
280 }
281
282 node = rb_first(&fc_stats->counters);
283 while (node) {
284 counter = rb_entry(node, struct mlx5_fc, node);
285
286 node = rb_next(node);
287
288 rb_erase(&counter->node, &fc_stats->counters);
289
290 mlx5_cmd_fc_free(dev, counter->id);
291
292 kfree(counter);
293 }
294 }
295
296 void mlx5_fc_query_cached(struct mlx5_fc *counter,
297 u64 *bytes, u64 *packets, u64 *lastuse)
298 {
299 struct mlx5_fc_cache c;
300
301 c = counter->cache;
302
303 *bytes = c.bytes - counter->lastbytes;
304 *packets = c.packets - counter->lastpackets;
305 *lastuse = c.lastuse;
306
307 counter->lastbytes = c.bytes;
308 counter->lastpackets = c.packets;
309 }
This page took 0.036795 seconds and 5 git commands to generate.