Merge remote-tracking branch 'lightnvm/for-next'
[deliverable/linux.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_buffers.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/dcbnl.h>
38 #include <linux/if_ether.h>
39 #include <linux/list.h>
40
41 #include "spectrum.h"
42 #include "core.h"
43 #include "port.h"
44 #include "reg.h"
45
46 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
47 u8 pool,
48 enum mlxsw_reg_sbxx_dir dir)
49 {
50 return &mlxsw_sp->sb.prs[dir][pool];
51 }
52
53 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
54 u8 local_port, u8 pg_buff,
55 enum mlxsw_reg_sbxx_dir dir)
56 {
57 return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
58 }
59
60 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
61 u8 local_port, u8 pool,
62 enum mlxsw_reg_sbxx_dir dir)
63 {
64 return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
65 }
66
67 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
68 enum mlxsw_reg_sbxx_dir dir,
69 enum mlxsw_reg_sbpr_mode mode, u32 size)
70 {
71 char sbpr_pl[MLXSW_REG_SBPR_LEN];
72 struct mlxsw_sp_sb_pr *pr;
73 int err;
74
75 mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
76 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
77 if (err)
78 return err;
79
80 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
81 pr->mode = mode;
82 pr->size = size;
83 return 0;
84 }
85
86 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
87 u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
88 u32 min_buff, u32 max_buff, u8 pool)
89 {
90 char sbcm_pl[MLXSW_REG_SBCM_LEN];
91 int err;
92
93 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
94 min_buff, max_buff, pool);
95 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
96 if (err)
97 return err;
98 if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
99 struct mlxsw_sp_sb_cm *cm;
100
101 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
102 cm->min_buff = min_buff;
103 cm->max_buff = max_buff;
104 cm->pool = pool;
105 }
106 return 0;
107 }
108
109 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
110 u8 pool, enum mlxsw_reg_sbxx_dir dir,
111 u32 min_buff, u32 max_buff)
112 {
113 char sbpm_pl[MLXSW_REG_SBPM_LEN];
114 struct mlxsw_sp_sb_pm *pm;
115 int err;
116
117 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
118 min_buff, max_buff);
119 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
120 if (err)
121 return err;
122
123 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
124 pm->min_buff = min_buff;
125 pm->max_buff = max_buff;
126 return 0;
127 }
128
129 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
130 u8 pool, enum mlxsw_reg_sbxx_dir dir,
131 struct list_head *bulk_list)
132 {
133 char sbpm_pl[MLXSW_REG_SBPM_LEN];
134
135 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
136 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
137 bulk_list, NULL, 0);
138 }
139
140 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
141 char *sbpm_pl, size_t sbpm_pl_len,
142 unsigned long cb_priv)
143 {
144 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
145
146 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
147 }
148
149 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
150 u8 pool, enum mlxsw_reg_sbxx_dir dir,
151 struct list_head *bulk_list)
152 {
153 char sbpm_pl[MLXSW_REG_SBPM_LEN];
154 struct mlxsw_sp_sb_pm *pm;
155
156 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
157 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
158 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
159 bulk_list,
160 mlxsw_sp_sb_pm_occ_query_cb,
161 (unsigned long) pm);
162 }
163
164 static const u16 mlxsw_sp_pbs[] = {
165 [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
166 [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
167 };
168
169 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
170 #define MLXSW_SP_PB_UNUSED 8
171
172 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
173 {
174 char pbmc_pl[MLXSW_REG_PBMC_LEN];
175 int i;
176
177 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
178 0xffff, 0xffff / 2);
179 for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
180 if (i == MLXSW_SP_PB_UNUSED)
181 continue;
182 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
183 }
184 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
185 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
186 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
187 MLXSW_REG(pbmc), pbmc_pl);
188 }
189
190 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
191 {
192 char pptb_pl[MLXSW_REG_PPTB_LEN];
193 int i;
194
195 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
196 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
197 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
198 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
199 pptb_pl);
200 }
201
202 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
203 {
204 int err;
205
206 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
207 if (err)
208 return err;
209 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
210 }
211
212 #define MLXSW_SP_SB_PR_INGRESS_SIZE \
213 (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
214 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
215 #define MLXSW_SP_SB_PR_EGRESS_SIZE \
216 (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
217
218 #define MLXSW_SP_SB_PR(_mode, _size) \
219 { \
220 .mode = _mode, \
221 .size = _size, \
222 }
223
224 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
225 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
226 MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
227 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
228 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
229 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
230 MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
231 };
232
233 #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
234
235 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
236 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
237 MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
238 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
239 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
240 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
241 };
242
243 #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
244
245 static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
246 enum mlxsw_reg_sbxx_dir dir,
247 const struct mlxsw_sp_sb_pr *prs,
248 size_t prs_len)
249 {
250 int i;
251 int err;
252
253 for (i = 0; i < prs_len; i++) {
254 const struct mlxsw_sp_sb_pr *pr;
255
256 pr = &prs[i];
257 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
258 pr->mode, pr->size);
259 if (err)
260 return err;
261 }
262 return 0;
263 }
264
265 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
266 {
267 int err;
268
269 err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
270 mlxsw_sp_sb_prs_ingress,
271 MLXSW_SP_SB_PRS_INGRESS_LEN);
272 if (err)
273 return err;
274 return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
275 mlxsw_sp_sb_prs_egress,
276 MLXSW_SP_SB_PRS_EGRESS_LEN);
277 }
278
279 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
280 { \
281 .min_buff = _min_buff, \
282 .max_buff = _max_buff, \
283 .pool = _pool, \
284 }
285
286 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
287 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
288 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
289 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
290 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
291 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
292 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
293 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
294 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
295 MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
296 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
297 };
298
299 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
300
301 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
302 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
303 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
304 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
305 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
306 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
307 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
308 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
309 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
310 MLXSW_SP_SB_CM(0, 0, 0),
311 MLXSW_SP_SB_CM(0, 0, 0),
312 MLXSW_SP_SB_CM(0, 0, 0),
313 MLXSW_SP_SB_CM(0, 0, 0),
314 MLXSW_SP_SB_CM(0, 0, 0),
315 MLXSW_SP_SB_CM(0, 0, 0),
316 MLXSW_SP_SB_CM(0, 0, 0),
317 MLXSW_SP_SB_CM(0, 0, 0),
318 MLXSW_SP_SB_CM(1, 0xff, 0),
319 };
320
321 #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
322
323 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
324
325 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
326 MLXSW_SP_CPU_PORT_SB_CM,
327 MLXSW_SP_CPU_PORT_SB_CM,
328 MLXSW_SP_CPU_PORT_SB_CM,
329 MLXSW_SP_CPU_PORT_SB_CM,
330 MLXSW_SP_CPU_PORT_SB_CM,
331 MLXSW_SP_CPU_PORT_SB_CM,
332 MLXSW_SP_CPU_PORT_SB_CM,
333 MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
334 MLXSW_SP_CPU_PORT_SB_CM,
335 MLXSW_SP_CPU_PORT_SB_CM,
336 MLXSW_SP_CPU_PORT_SB_CM,
337 MLXSW_SP_CPU_PORT_SB_CM,
338 MLXSW_SP_CPU_PORT_SB_CM,
339 MLXSW_SP_CPU_PORT_SB_CM,
340 MLXSW_SP_CPU_PORT_SB_CM,
341 MLXSW_SP_CPU_PORT_SB_CM,
342 MLXSW_SP_CPU_PORT_SB_CM,
343 MLXSW_SP_CPU_PORT_SB_CM,
344 MLXSW_SP_CPU_PORT_SB_CM,
345 MLXSW_SP_CPU_PORT_SB_CM,
346 MLXSW_SP_CPU_PORT_SB_CM,
347 MLXSW_SP_CPU_PORT_SB_CM,
348 MLXSW_SP_CPU_PORT_SB_CM,
349 MLXSW_SP_CPU_PORT_SB_CM,
350 MLXSW_SP_CPU_PORT_SB_CM,
351 MLXSW_SP_CPU_PORT_SB_CM,
352 MLXSW_SP_CPU_PORT_SB_CM,
353 MLXSW_SP_CPU_PORT_SB_CM,
354 MLXSW_SP_CPU_PORT_SB_CM,
355 MLXSW_SP_CPU_PORT_SB_CM,
356 MLXSW_SP_CPU_PORT_SB_CM,
357 MLXSW_SP_CPU_PORT_SB_CM,
358 };
359
360 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
361 ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
362
363 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
364 enum mlxsw_reg_sbxx_dir dir,
365 const struct mlxsw_sp_sb_cm *cms,
366 size_t cms_len)
367 {
368 int i;
369 int err;
370
371 for (i = 0; i < cms_len; i++) {
372 const struct mlxsw_sp_sb_cm *cm;
373
374 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
375 continue; /* PG number 8 does not exist, skip it */
376 cm = &cms[i];
377 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
378 cm->min_buff, cm->max_buff,
379 cm->pool);
380 if (err)
381 return err;
382 }
383 return 0;
384 }
385
386 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
387 {
388 int err;
389
390 err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
391 mlxsw_sp_port->local_port,
392 MLXSW_REG_SBXX_DIR_INGRESS,
393 mlxsw_sp_sb_cms_ingress,
394 MLXSW_SP_SB_CMS_INGRESS_LEN);
395 if (err)
396 return err;
397 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
398 mlxsw_sp_port->local_port,
399 MLXSW_REG_SBXX_DIR_EGRESS,
400 mlxsw_sp_sb_cms_egress,
401 MLXSW_SP_SB_CMS_EGRESS_LEN);
402 }
403
404 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
405 {
406 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
407 mlxsw_sp_cpu_port_sb_cms,
408 MLXSW_SP_CPU_PORT_SB_MCS_LEN);
409 }
410
411 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
412 { \
413 .min_buff = _min_buff, \
414 .max_buff = _max_buff, \
415 }
416
417 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
418 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
419 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
420 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
421 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
422 };
423
424 #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
425
426 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
427 MLXSW_SP_SB_PM(0, 7),
428 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
429 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
430 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
431 };
432
433 #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
434
435 static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
436 enum mlxsw_reg_sbxx_dir dir,
437 const struct mlxsw_sp_sb_pm *pms,
438 size_t pms_len)
439 {
440 int i;
441 int err;
442
443 for (i = 0; i < pms_len; i++) {
444 const struct mlxsw_sp_sb_pm *pm;
445
446 pm = &pms[i];
447 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
448 pm->min_buff, pm->max_buff);
449 if (err)
450 return err;
451 }
452 return 0;
453 }
454
455 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
456 {
457 int err;
458
459 err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
460 mlxsw_sp_port->local_port,
461 MLXSW_REG_SBXX_DIR_INGRESS,
462 mlxsw_sp_sb_pms_ingress,
463 MLXSW_SP_SB_PMS_INGRESS_LEN);
464 if (err)
465 return err;
466 return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
467 mlxsw_sp_port->local_port,
468 MLXSW_REG_SBXX_DIR_EGRESS,
469 mlxsw_sp_sb_pms_egress,
470 MLXSW_SP_SB_PMS_EGRESS_LEN);
471 }
472
473 struct mlxsw_sp_sb_mm {
474 u32 min_buff;
475 u32 max_buff;
476 u8 pool;
477 };
478
479 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
480 { \
481 .min_buff = _min_buff, \
482 .max_buff = _max_buff, \
483 .pool = _pool, \
484 }
485
486 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
487 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
488 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
489 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
490 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
491 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
492 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
493 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
494 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
495 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
496 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
497 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
498 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
499 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
500 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
501 MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
502 };
503
504 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
505
506 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
507 {
508 char sbmm_pl[MLXSW_REG_SBMM_LEN];
509 int i;
510 int err;
511
512 for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
513 const struct mlxsw_sp_sb_mm *mc;
514
515 mc = &mlxsw_sp_sb_mms[i];
516 mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
517 mc->max_buff, mc->pool);
518 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
519 if (err)
520 return err;
521 }
522 return 0;
523 }
524
525 #define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
526
527 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
528 {
529 int err;
530
531 err = mlxsw_sp_sb_prs_init(mlxsw_sp);
532 if (err)
533 return err;
534 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
535 if (err)
536 return err;
537 err = mlxsw_sp_sb_mms_init(mlxsw_sp);
538 if (err)
539 return err;
540 return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
541 MLXSW_SP_SB_SIZE,
542 MLXSW_SP_SB_POOL_COUNT,
543 MLXSW_SP_SB_POOL_COUNT,
544 MLXSW_SP_SB_TC_COUNT,
545 MLXSW_SP_SB_TC_COUNT);
546 }
547
548 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
549 {
550 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
551 }
552
553 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
554 {
555 int err;
556
557 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
558 if (err)
559 return err;
560 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
561 if (err)
562 return err;
563 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
564
565 return err;
566 }
567
568 static u8 pool_get(u16 pool_index)
569 {
570 return pool_index % MLXSW_SP_SB_POOL_COUNT;
571 }
572
573 static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
574 {
575 u16 pool_index;
576
577 pool_index = pool;
578 if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
579 pool_index += MLXSW_SP_SB_POOL_COUNT;
580 return pool_index;
581 }
582
583 static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
584 {
585 return pool_index < MLXSW_SP_SB_POOL_COUNT ?
586 MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
587 }
588
589 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
590 unsigned int sb_index, u16 pool_index,
591 struct devlink_sb_pool_info *pool_info)
592 {
593 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
594 u8 pool = pool_get(pool_index);
595 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
596 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
597
598 pool_info->pool_type = dir;
599 pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
600 pool_info->threshold_type = pr->mode;
601 return 0;
602 }
603
604 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
605 unsigned int sb_index, u16 pool_index, u32 size,
606 enum devlink_sb_threshold_type threshold_type)
607 {
608 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
609 u8 pool = pool_get(pool_index);
610 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
611 enum mlxsw_reg_sbpr_mode mode = threshold_type;
612 u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
613
614 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
615 }
616
617 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
618
619 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
620 enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
621 {
622 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
623
624 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
625 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
626 return MLXSW_SP_CELLS_TO_BYTES(max_buff);
627 }
628
629 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
630 enum mlxsw_reg_sbxx_dir dir, u32 threshold,
631 u32 *p_max_buff)
632 {
633 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
634
635 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
636 int val;
637
638 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
639 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
640 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
641 return -EINVAL;
642 *p_max_buff = val;
643 } else {
644 *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
645 }
646 return 0;
647 }
648
649 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
650 unsigned int sb_index, u16 pool_index,
651 u32 *p_threshold)
652 {
653 struct mlxsw_sp_port *mlxsw_sp_port =
654 mlxsw_core_port_driver_priv(mlxsw_core_port);
655 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
656 u8 local_port = mlxsw_sp_port->local_port;
657 u8 pool = pool_get(pool_index);
658 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
659 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
660 pool, dir);
661
662 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
663 pm->max_buff);
664 return 0;
665 }
666
667 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
668 unsigned int sb_index, u16 pool_index,
669 u32 threshold)
670 {
671 struct mlxsw_sp_port *mlxsw_sp_port =
672 mlxsw_core_port_driver_priv(mlxsw_core_port);
673 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
674 u8 local_port = mlxsw_sp_port->local_port;
675 u8 pool = pool_get(pool_index);
676 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
677 u32 max_buff;
678 int err;
679
680 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
681 threshold, &max_buff);
682 if (err)
683 return err;
684
685 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
686 0, max_buff);
687 }
688
689 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
690 unsigned int sb_index, u16 tc_index,
691 enum devlink_sb_pool_type pool_type,
692 u16 *p_pool_index, u32 *p_threshold)
693 {
694 struct mlxsw_sp_port *mlxsw_sp_port =
695 mlxsw_core_port_driver_priv(mlxsw_core_port);
696 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
697 u8 local_port = mlxsw_sp_port->local_port;
698 u8 pg_buff = tc_index;
699 enum mlxsw_reg_sbxx_dir dir = pool_type;
700 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
701 pg_buff, dir);
702
703 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
704 cm->max_buff);
705 *p_pool_index = pool_index_get(cm->pool, pool_type);
706 return 0;
707 }
708
709 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
710 unsigned int sb_index, u16 tc_index,
711 enum devlink_sb_pool_type pool_type,
712 u16 pool_index, u32 threshold)
713 {
714 struct mlxsw_sp_port *mlxsw_sp_port =
715 mlxsw_core_port_driver_priv(mlxsw_core_port);
716 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
717 u8 local_port = mlxsw_sp_port->local_port;
718 u8 pg_buff = tc_index;
719 enum mlxsw_reg_sbxx_dir dir = pool_type;
720 u8 pool = pool_get(pool_index);
721 u32 max_buff;
722 int err;
723
724 if (dir != dir_get(pool_index))
725 return -EINVAL;
726
727 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
728 threshold, &max_buff);
729 if (err)
730 return err;
731
732 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
733 0, max_buff, pool);
734 }
735
736 #define MASKED_COUNT_MAX \
737 (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
738
739 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
740 u8 masked_count;
741 u8 local_port_1;
742 };
743
744 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
745 char *sbsr_pl, size_t sbsr_pl_len,
746 unsigned long cb_priv)
747 {
748 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
749 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
750 u8 masked_count;
751 u8 local_port;
752 int rec_index = 0;
753 struct mlxsw_sp_sb_cm *cm;
754 int i;
755
756 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
757
758 masked_count = 0;
759 for (local_port = cb_ctx.local_port_1;
760 local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
761 if (!mlxsw_sp->ports[local_port])
762 continue;
763 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
764 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
765 MLXSW_REG_SBXX_DIR_INGRESS);
766 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
767 &cm->occ.cur, &cm->occ.max);
768 }
769 if (++masked_count == cb_ctx.masked_count)
770 break;
771 }
772 masked_count = 0;
773 for (local_port = cb_ctx.local_port_1;
774 local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
775 if (!mlxsw_sp->ports[local_port])
776 continue;
777 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
778 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
779 MLXSW_REG_SBXX_DIR_EGRESS);
780 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
781 &cm->occ.cur, &cm->occ.max);
782 }
783 if (++masked_count == cb_ctx.masked_count)
784 break;
785 }
786 }
787
788 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
789 unsigned int sb_index)
790 {
791 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
792 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
793 unsigned long cb_priv;
794 LIST_HEAD(bulk_list);
795 char *sbsr_pl;
796 u8 masked_count;
797 u8 local_port_1;
798 u8 local_port = 0;
799 int i;
800 int err;
801 int err2;
802
803 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
804 if (!sbsr_pl)
805 return -ENOMEM;
806
807 next_batch:
808 local_port++;
809 local_port_1 = local_port;
810 masked_count = 0;
811 mlxsw_reg_sbsr_pack(sbsr_pl, false);
812 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
813 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
814 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
815 }
816 for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
817 if (!mlxsw_sp->ports[local_port])
818 continue;
819 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
820 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
821 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
822 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
823 MLXSW_REG_SBXX_DIR_INGRESS,
824 &bulk_list);
825 if (err)
826 goto out;
827 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
828 MLXSW_REG_SBXX_DIR_EGRESS,
829 &bulk_list);
830 if (err)
831 goto out;
832 }
833 if (++masked_count == MASKED_COUNT_MAX)
834 goto do_query;
835 }
836
837 do_query:
838 cb_ctx.masked_count = masked_count;
839 cb_ctx.local_port_1 = local_port_1;
840 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
841 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
842 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
843 cb_priv);
844 if (err)
845 goto out;
846 if (local_port < MLXSW_PORT_MAX_PORTS)
847 goto next_batch;
848
849 out:
850 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
851 if (!err)
852 err = err2;
853 kfree(sbsr_pl);
854 return err;
855 }
856
857 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
858 unsigned int sb_index)
859 {
860 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
861 LIST_HEAD(bulk_list);
862 char *sbsr_pl;
863 unsigned int masked_count;
864 u8 local_port = 0;
865 int i;
866 int err;
867 int err2;
868
869 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
870 if (!sbsr_pl)
871 return -ENOMEM;
872
873 next_batch:
874 local_port++;
875 masked_count = 0;
876 mlxsw_reg_sbsr_pack(sbsr_pl, true);
877 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
878 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
879 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
880 }
881 for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
882 if (!mlxsw_sp->ports[local_port])
883 continue;
884 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
885 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
886 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
887 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
888 MLXSW_REG_SBXX_DIR_INGRESS,
889 &bulk_list);
890 if (err)
891 goto out;
892 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
893 MLXSW_REG_SBXX_DIR_EGRESS,
894 &bulk_list);
895 if (err)
896 goto out;
897 }
898 if (++masked_count == MASKED_COUNT_MAX)
899 goto do_query;
900 }
901
902 do_query:
903 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
904 &bulk_list, NULL, 0);
905 if (err)
906 goto out;
907 if (local_port < MLXSW_PORT_MAX_PORTS)
908 goto next_batch;
909
910 out:
911 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
912 if (!err)
913 err = err2;
914 kfree(sbsr_pl);
915 return err;
916 }
917
918 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
919 unsigned int sb_index, u16 pool_index,
920 u32 *p_cur, u32 *p_max)
921 {
922 struct mlxsw_sp_port *mlxsw_sp_port =
923 mlxsw_core_port_driver_priv(mlxsw_core_port);
924 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
925 u8 local_port = mlxsw_sp_port->local_port;
926 u8 pool = pool_get(pool_index);
927 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
928 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
929 pool, dir);
930
931 *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
932 *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
933 return 0;
934 }
935
936 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
937 unsigned int sb_index, u16 tc_index,
938 enum devlink_sb_pool_type pool_type,
939 u32 *p_cur, u32 *p_max)
940 {
941 struct mlxsw_sp_port *mlxsw_sp_port =
942 mlxsw_core_port_driver_priv(mlxsw_core_port);
943 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
944 u8 local_port = mlxsw_sp_port->local_port;
945 u8 pg_buff = tc_index;
946 enum mlxsw_reg_sbxx_dir dir = pool_type;
947 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
948 pg_buff, dir);
949
950 *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
951 *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
952 return 0;
953 }
This page took 0.050623 seconds and 5 git commands to generate.