Merge branch 'stable/xen.pm.bug-fixes' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / drivers / net / ixgbe / ixgbe_dcb_82599.c
1 /*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2011 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include "ixgbe.h"
29 #include "ixgbe_type.h"
30 #include "ixgbe_dcb.h"
31 #include "ixgbe_dcb_82599.h"
32
33 /**
34 * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
35 * @hw: pointer to hardware structure
36 * @rx_pba: method to distribute packet buffer
37 *
38 * Configure packet buffers for DCB mode.
39 */
40 static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
41 {
42 s32 ret_val = 0;
43 u32 value = IXGBE_RXPBSIZE_64KB;
44 u8 i = 0;
45
46 /* Setup Rx packet buffer sizes */
47 switch (rx_pba) {
48 case pba_80_48:
49 /* Setup the first four at 80KB */
50 value = IXGBE_RXPBSIZE_80KB;
51 for (; i < 4; i++)
52 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
53 /* Setup the last four at 48KB...don't re-init i */
54 value = IXGBE_RXPBSIZE_48KB;
55 /* Fall Through */
56 case pba_equal:
57 default:
58 for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
59 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
60
61 /* Setup Tx packet buffer sizes */
62 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
63 IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
64 IXGBE_TXPBSIZE_20KB);
65 IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i),
66 IXGBE_TXPBTHRESH_DCB);
67 }
68 break;
69 }
70
71 return ret_val;
72 }
73
74 /**
75 * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
76 * @hw: pointer to hardware structure
77 * @refill: refill credits index by traffic class
78 * @max: max credits index by traffic class
79 * @bwg_id: bandwidth grouping indexed by traffic class
80 * @prio_type: priority type indexed by traffic class
81 *
82 * Configure Rx Packet Arbiter and credits for each traffic class.
83 */
84 s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw,
85 u16 *refill,
86 u16 *max,
87 u8 *bwg_id,
88 u8 *prio_type,
89 u8 *prio_tc)
90 {
91 u32 reg = 0;
92 u32 credit_refill = 0;
93 u32 credit_max = 0;
94 u8 i = 0;
95
96 /*
97 * Disable the arbiter before changing parameters
98 * (always enable recycle mode; WSP)
99 */
100 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
101 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
102
103 /* Map all traffic classes to their UP, 1 to 1 */
104 reg = 0;
105 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
106 reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
107 IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
108
109 /* Configure traffic class credits and priority */
110 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
111 credit_refill = refill[i];
112 credit_max = max[i];
113 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
114
115 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
116
117 if (prio_type[i] == prio_link)
118 reg |= IXGBE_RTRPT4C_LSP;
119
120 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
121 }
122
123 /*
124 * Configure Rx packet plane (recycle mode; WSP) and
125 * enable arbiter
126 */
127 reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
128 IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
129
130 return 0;
131 }
132
133 /**
134 * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
135 * @hw: pointer to hardware structure
136 * @refill: refill credits index by traffic class
137 * @max: max credits index by traffic class
138 * @bwg_id: bandwidth grouping indexed by traffic class
139 * @prio_type: priority type indexed by traffic class
140 *
141 * Configure Tx Descriptor Arbiter and credits for each traffic class.
142 */
143 s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw,
144 u16 *refill,
145 u16 *max,
146 u8 *bwg_id,
147 u8 *prio_type)
148 {
149 u32 reg, max_credits;
150 u8 i;
151
152 /* Clear the per-Tx queue credits; we use per-TC instead */
153 for (i = 0; i < 128; i++) {
154 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
155 IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
156 }
157
158 /* Configure traffic class credits and priority */
159 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
160 max_credits = max[i];
161 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
162 reg |= refill[i];
163 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
164
165 if (prio_type[i] == prio_group)
166 reg |= IXGBE_RTTDT2C_GSP;
167
168 if (prio_type[i] == prio_link)
169 reg |= IXGBE_RTTDT2C_LSP;
170
171 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
172 }
173
174 /*
175 * Configure Tx descriptor plane (recycle mode; WSP) and
176 * enable arbiter
177 */
178 reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
179 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
180
181 return 0;
182 }
183
184 /**
185 * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
186 * @hw: pointer to hardware structure
187 * @refill: refill credits index by traffic class
188 * @max: max credits index by traffic class
189 * @bwg_id: bandwidth grouping indexed by traffic class
190 * @prio_type: priority type indexed by traffic class
191 *
192 * Configure Tx Packet Arbiter and credits for each traffic class.
193 */
194 s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
195 u16 *refill,
196 u16 *max,
197 u8 *bwg_id,
198 u8 *prio_type,
199 u8 *prio_tc)
200 {
201 u32 reg;
202 u8 i;
203
204 /*
205 * Disable the arbiter before changing parameters
206 * (always enable recycle mode; SP; arb delay)
207 */
208 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
209 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
210 IXGBE_RTTPCS_ARBDIS;
211 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
212
213 /* Map all traffic classes to their UP, 1 to 1 */
214 reg = 0;
215 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
216 reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
217 IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
218
219 /* Configure traffic class credits and priority */
220 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
221 reg = refill[i];
222 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
223 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
224
225 if (prio_type[i] == prio_group)
226 reg |= IXGBE_RTTPT2C_GSP;
227
228 if (prio_type[i] == prio_link)
229 reg |= IXGBE_RTTPT2C_LSP;
230
231 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
232 }
233
234 /*
235 * Configure Tx packet plane (recycle mode; SP; arb delay) and
236 * enable arbiter
237 */
238 reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
239 (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
240 IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
241
242 return 0;
243 }
244
245 /**
246 * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
247 * @hw: pointer to hardware structure
248 * @pfc_en: enabled pfc bitmask
249 *
250 * Configure Priority Flow Control (PFC) for each traffic class.
251 */
252 s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
253 {
254 u32 i, reg, rx_pba_size;
255
256 /* Configure PFC Tx thresholds per TC */
257 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
258 int enabled = pfc_en & (1 << i);
259 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
260 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
261
262 reg = (rx_pba_size - hw->fc.low_water) << 10;
263
264 if (enabled)
265 reg |= IXGBE_FCRTL_XONE;
266 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
267
268 reg = (rx_pba_size - hw->fc.high_water) << 10;
269 if (enabled)
270 reg |= IXGBE_FCRTH_FCEN;
271 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
272 }
273
274 if (pfc_en) {
275 /* Configure pause time (2 TCs per register) */
276 reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
277 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
278 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
279
280 /* Configure flow control refresh threshold value */
281 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
282
283
284 reg = IXGBE_FCCFG_TFCE_PRIORITY;
285 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
286 /*
287 * Enable Receive PFC
288 * We will always honor XOFF frames we receive when
289 * we are in PFC mode.
290 */
291 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
292 reg &= ~IXGBE_MFLCN_RFCE;
293 reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
294 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
295
296 } else {
297 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
298 hw->mac.ops.fc_enable(hw, i);
299 }
300
301 return 0;
302 }
303
304 /**
305 * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
306 * @hw: pointer to hardware structure
307 *
308 * Configure queue statistics registers, all queues belonging to same traffic
309 * class uses a single set of queue statistics counters.
310 */
311 static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
312 {
313 u32 reg = 0;
314 u8 i = 0;
315
316 /*
317 * Receive Queues stats setting
318 * 32 RQSMR registers, each configuring 4 queues.
319 * Set all 16 queues of each TC to the same stat
320 * with TC 'n' going to stat 'n'.
321 */
322 for (i = 0; i < 32; i++) {
323 reg = 0x01010101 * (i / 4);
324 IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
325 }
326 /*
327 * Transmit Queues stats setting
328 * 32 TQSM registers, each controlling 4 queues.
329 * Set all queues of each TC to the same stat
330 * with TC 'n' going to stat 'n'.
331 * Tx queues are allocated non-uniformly to TCs:
332 * 32, 32, 16, 16, 8, 8, 8, 8.
333 */
334 for (i = 0; i < 32; i++) {
335 if (i < 8)
336 reg = 0x00000000;
337 else if (i < 16)
338 reg = 0x01010101;
339 else if (i < 20)
340 reg = 0x02020202;
341 else if (i < 24)
342 reg = 0x03030303;
343 else if (i < 26)
344 reg = 0x04040404;
345 else if (i < 28)
346 reg = 0x05050505;
347 else if (i < 30)
348 reg = 0x06060606;
349 else
350 reg = 0x07070707;
351 IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
352 }
353
354 return 0;
355 }
356
357 /**
358 * ixgbe_dcb_config_82599 - Configure general DCB parameters
359 * @hw: pointer to hardware structure
360 *
361 * Configure general DCB parameters.
362 */
363 static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
364 {
365 u32 reg;
366 u32 q;
367
368 /* Disable the Tx desc arbiter so that MTQC can be changed */
369 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
370 reg |= IXGBE_RTTDCS_ARBDIS;
371 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
372
373 /* Enable DCB for Rx with 8 TCs */
374 reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
375 switch (reg & IXGBE_MRQC_MRQE_MASK) {
376 case 0:
377 case IXGBE_MRQC_RT4TCEN:
378 /* RSS disabled cases */
379 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
380 break;
381 case IXGBE_MRQC_RSSEN:
382 case IXGBE_MRQC_RTRSS4TCEN:
383 /* RSS enabled cases */
384 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN;
385 break;
386 default:
387 /* Unsupported value, assume stale data, overwrite no RSS */
388 reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
389 }
390 IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
391
392 /* Enable DCB for Tx with 8 TCs */
393 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
394 IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
395
396 /* Disable drop for all queues */
397 for (q = 0; q < 128; q++)
398 IXGBE_WRITE_REG(hw, IXGBE_QDE, q << IXGBE_QDE_IDX_SHIFT);
399
400 /* Enable the Tx desc arbiter */
401 reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
402 reg &= ~IXGBE_RTTDCS_ARBDIS;
403 IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
404
405 /* Enable Security TX Buffer IFG for DCB */
406 reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
407 reg |= IXGBE_SECTX_DCB;
408 IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
409
410 return 0;
411 }
412
413 /**
414 * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
415 * @hw: pointer to hardware structure
416 * @rx_pba: method to distribute packet buffer
417 * @refill: refill credits index by traffic class
418 * @max: max credits index by traffic class
419 * @bwg_id: bandwidth grouping indexed by traffic class
420 * @prio_type: priority type indexed by traffic class
421 * @pfc_en: enabled pfc bitmask
422 *
423 * Configure dcb settings and enable dcb mode.
424 */
425 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
426 u8 rx_pba, u8 pfc_en, u16 *refill,
427 u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
428 {
429 ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
430 ixgbe_dcb_config_82599(hw);
431 ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
432 prio_type, prio_tc);
433 ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
434 bwg_id, prio_type);
435 ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max,
436 bwg_id, prio_type, prio_tc);
437 ixgbe_dcb_config_pfc_82599(hw, pfc_en);
438 ixgbe_dcb_config_tc_stats_82599(hw);
439
440 return 0;
441 }
442
This page took 0.041136 seconds and 5 git commands to generate.