Merge remote-tracking branch 'tpmdd/next'
[deliverable/linux.git] / drivers / net / ethernet / chelsio / cxgb4 / sched.c
1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35 #include <linux/module.h>
36 #include <linux/netdevice.h>
37
38 #include "cxgb4.h"
39 #include "sched.h"
40
41 /* Spinlock must be held by caller */
42 static int t4_sched_class_fw_cmd(struct port_info *pi,
43 struct ch_sched_params *p,
44 enum sched_fw_ops op)
45 {
46 struct adapter *adap = pi->adapter;
47 struct sched_table *s = pi->sched_tbl;
48 struct sched_class *e;
49 int err = 0;
50
51 e = &s->tab[p->u.params.class];
52 switch (op) {
53 case SCHED_FW_OP_ADD:
54 err = t4_sched_params(adap, p->type,
55 p->u.params.level, p->u.params.mode,
56 p->u.params.rateunit,
57 p->u.params.ratemode,
58 p->u.params.channel, e->idx,
59 p->u.params.minrate, p->u.params.maxrate,
60 p->u.params.weight, p->u.params.pktsize);
61 break;
62 default:
63 err = -ENOTSUPP;
64 break;
65 }
66
67 return err;
68 }
69
70 /* Spinlock must be held by caller */
71 static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
72 enum sched_bind_type type, bool bind)
73 {
74 struct adapter *adap = pi->adapter;
75 u32 fw_mnem, fw_class, fw_param;
76 unsigned int pf = adap->pf;
77 unsigned int vf = 0;
78 int err = 0;
79
80 switch (type) {
81 case SCHED_QUEUE: {
82 struct sched_queue_entry *qe;
83
84 qe = (struct sched_queue_entry *)arg;
85
86 /* Create a template for the FW_PARAMS_CMD mnemonic and
87 * value (TX Scheduling Class in this case).
88 */
89 fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
90 FW_PARAMS_PARAM_X_V(
91 FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
92 fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE;
93 fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id));
94
95 pf = adap->pf;
96 vf = 0;
97 break;
98 }
99 default:
100 err = -ENOTSUPP;
101 goto out;
102 }
103
104 err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
105
106 out:
107 return err;
108 }
109
110 static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
111 const unsigned int qid,
112 int *index)
113 {
114 struct sched_table *s = pi->sched_tbl;
115 struct sched_class *e, *end;
116 struct sched_class *found = NULL;
117 int i;
118
119 /* Look for a class with matching bound queue parameters */
120 end = &s->tab[s->sched_size];
121 for (e = &s->tab[0]; e != end; ++e) {
122 struct sched_queue_entry *qe;
123
124 i = 0;
125 if (e->state == SCHED_STATE_UNUSED)
126 continue;
127
128 list_for_each_entry(qe, &e->queue_list, list) {
129 if (qe->cntxt_id == qid) {
130 found = e;
131 if (index)
132 *index = i;
133 break;
134 }
135 i++;
136 }
137
138 if (found)
139 break;
140 }
141
142 return found;
143 }
144
145 static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
146 {
147 struct adapter *adap = pi->adapter;
148 struct sched_class *e;
149 struct sched_queue_entry *qe = NULL;
150 struct sge_eth_txq *txq;
151 unsigned int qid;
152 int index = -1;
153 int err = 0;
154
155 if (p->queue < 0 || p->queue >= pi->nqsets)
156 return -ERANGE;
157
158 txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
159 qid = txq->q.cntxt_id;
160
161 /* Find the existing class that the queue is bound to */
162 e = t4_sched_queue_lookup(pi, qid, &index);
163 if (e && index >= 0) {
164 int i = 0;
165
166 spin_lock(&e->lock);
167 list_for_each_entry(qe, &e->queue_list, list) {
168 if (i == index)
169 break;
170 i++;
171 }
172 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
173 false);
174 if (err) {
175 spin_unlock(&e->lock);
176 goto out;
177 }
178
179 list_del(&qe->list);
180 t4_free_mem(qe);
181 if (atomic_dec_and_test(&e->refcnt)) {
182 e->state = SCHED_STATE_UNUSED;
183 memset(&e->info, 0, sizeof(e->info));
184 }
185 spin_unlock(&e->lock);
186 }
187 out:
188 return err;
189 }
190
191 static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
192 {
193 struct adapter *adap = pi->adapter;
194 struct sched_table *s = pi->sched_tbl;
195 struct sched_class *e;
196 struct sched_queue_entry *qe = NULL;
197 struct sge_eth_txq *txq;
198 unsigned int qid;
199 int err = 0;
200
201 if (p->queue < 0 || p->queue >= pi->nqsets)
202 return -ERANGE;
203
204 qe = t4_alloc_mem(sizeof(struct sched_queue_entry));
205 if (!qe)
206 return -ENOMEM;
207
208 txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
209 qid = txq->q.cntxt_id;
210
211 /* Unbind queue from any existing class */
212 err = t4_sched_queue_unbind(pi, p);
213 if (err)
214 goto out;
215
216 /* Bind queue to specified class */
217 memset(qe, 0, sizeof(*qe));
218 qe->cntxt_id = qid;
219 memcpy(&qe->param, p, sizeof(qe->param));
220
221 e = &s->tab[qe->param.class];
222 spin_lock(&e->lock);
223 err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
224 if (err) {
225 t4_free_mem(qe);
226 spin_unlock(&e->lock);
227 goto out;
228 }
229
230 list_add_tail(&qe->list, &e->queue_list);
231 atomic_inc(&e->refcnt);
232 spin_unlock(&e->lock);
233 out:
234 return err;
235 }
236
237 static void t4_sched_class_unbind_all(struct port_info *pi,
238 struct sched_class *e,
239 enum sched_bind_type type)
240 {
241 if (!e)
242 return;
243
244 switch (type) {
245 case SCHED_QUEUE: {
246 struct sched_queue_entry *qe;
247
248 list_for_each_entry(qe, &e->queue_list, list)
249 t4_sched_queue_unbind(pi, &qe->param);
250 break;
251 }
252 default:
253 break;
254 }
255 }
256
257 static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
258 enum sched_bind_type type, bool bind)
259 {
260 int err = 0;
261
262 if (!arg)
263 return -EINVAL;
264
265 switch (type) {
266 case SCHED_QUEUE: {
267 struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
268
269 if (bind)
270 err = t4_sched_queue_bind(pi, qe);
271 else
272 err = t4_sched_queue_unbind(pi, qe);
273 break;
274 }
275 default:
276 err = -ENOTSUPP;
277 break;
278 }
279
280 return err;
281 }
282
283 /**
284 * cxgb4_sched_class_bind - Bind an entity to a scheduling class
285 * @dev: net_device pointer
286 * @arg: Entity opaque data
287 * @type: Entity type (Queue)
288 *
289 * Binds an entity (queue) to a scheduling class. If the entity
290 * is bound to another class, it will be unbound from the other class
291 * and bound to the class specified in @arg.
292 */
293 int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
294 enum sched_bind_type type)
295 {
296 struct port_info *pi = netdev2pinfo(dev);
297 struct sched_table *s;
298 int err = 0;
299 u8 class_id;
300
301 if (!can_sched(dev))
302 return -ENOTSUPP;
303
304 if (!arg)
305 return -EINVAL;
306
307 switch (type) {
308 case SCHED_QUEUE: {
309 struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
310
311 class_id = qe->class;
312 break;
313 }
314 default:
315 return -ENOTSUPP;
316 }
317
318 if (!valid_class_id(dev, class_id))
319 return -EINVAL;
320
321 if (class_id == SCHED_CLS_NONE)
322 return -ENOTSUPP;
323
324 s = pi->sched_tbl;
325 write_lock(&s->rw_lock);
326 err = t4_sched_class_bind_unbind_op(pi, arg, type, true);
327 write_unlock(&s->rw_lock);
328
329 return err;
330 }
331
332 /**
333 * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
334 * @dev: net_device pointer
335 * @arg: Entity opaque data
336 * @type: Entity type (Queue)
337 *
338 * Unbinds an entity (queue) from a scheduling class.
339 */
340 int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
341 enum sched_bind_type type)
342 {
343 struct port_info *pi = netdev2pinfo(dev);
344 struct sched_table *s;
345 int err = 0;
346 u8 class_id;
347
348 if (!can_sched(dev))
349 return -ENOTSUPP;
350
351 if (!arg)
352 return -EINVAL;
353
354 switch (type) {
355 case SCHED_QUEUE: {
356 struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
357
358 class_id = qe->class;
359 break;
360 }
361 default:
362 return -ENOTSUPP;
363 }
364
365 if (!valid_class_id(dev, class_id))
366 return -EINVAL;
367
368 s = pi->sched_tbl;
369 write_lock(&s->rw_lock);
370 err = t4_sched_class_bind_unbind_op(pi, arg, type, false);
371 write_unlock(&s->rw_lock);
372
373 return err;
374 }
375
376 /* If @p is NULL, fetch any available unused class */
377 static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
378 const struct ch_sched_params *p)
379 {
380 struct sched_table *s = pi->sched_tbl;
381 struct sched_class *e, *end;
382 struct sched_class *found = NULL;
383
384 if (!p) {
385 /* Get any available unused class */
386 end = &s->tab[s->sched_size];
387 for (e = &s->tab[0]; e != end; ++e) {
388 if (e->state == SCHED_STATE_UNUSED) {
389 found = e;
390 break;
391 }
392 }
393 } else {
394 /* Look for a class with matching scheduling parameters */
395 struct ch_sched_params info;
396 struct ch_sched_params tp;
397
398 memset(&info, 0, sizeof(info));
399 memset(&tp, 0, sizeof(tp));
400
401 memcpy(&tp, p, sizeof(tp));
402 /* Don't try to match class parameter */
403 tp.u.params.class = SCHED_CLS_NONE;
404
405 end = &s->tab[s->sched_size];
406 for (e = &s->tab[0]; e != end; ++e) {
407 if (e->state == SCHED_STATE_UNUSED)
408 continue;
409
410 memset(&info, 0, sizeof(info));
411 memcpy(&info, &e->info, sizeof(info));
412 /* Don't try to match class parameter */
413 info.u.params.class = SCHED_CLS_NONE;
414
415 if ((info.type == tp.type) &&
416 (!memcmp(&info.u.params, &tp.u.params,
417 sizeof(info.u.params)))) {
418 found = e;
419 break;
420 }
421 }
422 }
423
424 return found;
425 }
426
427 static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
428 struct ch_sched_params *p)
429 {
430 struct sched_table *s = pi->sched_tbl;
431 struct sched_class *e;
432 u8 class_id;
433 int err;
434
435 if (!p)
436 return NULL;
437
438 class_id = p->u.params.class;
439
440 /* Only accept search for existing class with matching params
441 * or allocation of new class with specified params
442 */
443 if (class_id != SCHED_CLS_NONE)
444 return NULL;
445
446 write_lock(&s->rw_lock);
447 /* See if there's an exisiting class with same
448 * requested sched params
449 */
450 e = t4_sched_class_lookup(pi, p);
451 if (!e) {
452 struct ch_sched_params np;
453
454 /* Fetch any available unused class */
455 e = t4_sched_class_lookup(pi, NULL);
456 if (!e)
457 goto out;
458
459 memset(&np, 0, sizeof(np));
460 memcpy(&np, p, sizeof(np));
461 np.u.params.class = e->idx;
462
463 spin_lock(&e->lock);
464 /* New class */
465 err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
466 if (err) {
467 spin_unlock(&e->lock);
468 e = NULL;
469 goto out;
470 }
471 memcpy(&e->info, &np, sizeof(e->info));
472 atomic_set(&e->refcnt, 0);
473 e->state = SCHED_STATE_ACTIVE;
474 spin_unlock(&e->lock);
475 }
476
477 out:
478 write_unlock(&s->rw_lock);
479 return e;
480 }
481
482 /**
483 * cxgb4_sched_class_alloc - allocate a scheduling class
484 * @dev: net_device pointer
485 * @p: new scheduling class to create.
486 *
487 * Returns pointer to the scheduling class created. If @p is NULL, then
488 * it allocates and returns any available unused scheduling class. If a
489 * scheduling class with matching @p is found, then the matching class is
490 * returned.
491 */
492 struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
493 struct ch_sched_params *p)
494 {
495 struct port_info *pi = netdev2pinfo(dev);
496 u8 class_id;
497
498 if (!can_sched(dev))
499 return NULL;
500
501 class_id = p->u.params.class;
502 if (!valid_class_id(dev, class_id))
503 return NULL;
504
505 return t4_sched_class_alloc(pi, p);
506 }
507
508 static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
509 {
510 t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
511 }
512
513 struct sched_table *t4_init_sched(unsigned int sched_size)
514 {
515 struct sched_table *s;
516 unsigned int i;
517
518 s = t4_alloc_mem(sizeof(*s) + sched_size * sizeof(struct sched_class));
519 if (!s)
520 return NULL;
521
522 s->sched_size = sched_size;
523 rwlock_init(&s->rw_lock);
524
525 for (i = 0; i < s->sched_size; i++) {
526 memset(&s->tab[i], 0, sizeof(struct sched_class));
527 s->tab[i].idx = i;
528 s->tab[i].state = SCHED_STATE_UNUSED;
529 INIT_LIST_HEAD(&s->tab[i].queue_list);
530 spin_lock_init(&s->tab[i].lock);
531 atomic_set(&s->tab[i].refcnt, 0);
532 }
533 return s;
534 }
535
536 void t4_cleanup_sched(struct adapter *adap)
537 {
538 struct sched_table *s;
539 unsigned int i;
540
541 for_each_port(adap, i) {
542 struct port_info *pi = netdev2pinfo(adap->port[i]);
543
544 s = pi->sched_tbl;
545 for (i = 0; i < s->sched_size; i++) {
546 struct sched_class *e;
547
548 write_lock(&s->rw_lock);
549 e = &s->tab[i];
550 if (e->state == SCHED_STATE_ACTIVE)
551 t4_sched_class_free(pi, e);
552 write_unlock(&s->rw_lock);
553 }
554 t4_free_mem(s);
555 }
556 }
This page took 0.043839 seconds and 5 git commands to generate.